worker.js 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898
  1. import * as fs from 'fs';
  2. import { URL } from 'url';
  3. import * as path from 'path';
  4. import { v4 } from 'uuid';
  5. import { AbortController } from './abort-controller';
  6. import { delay, DELAY_TIME_1, isNotConnectionError, isRedisInstance, } from '../utils';
  7. import { QueueBase } from './queue-base';
  8. import { Repeat } from './repeat';
  9. import { ChildPool } from './child-pool';
  10. import { RedisConnection } from './redis-connection';
  11. import sandbox from './sandbox';
  12. import { AsyncFifoQueue } from './async-fifo-queue';
  13. import { DelayedError, RateLimitError, RATE_LIMIT_ERROR, WaitingChildrenError, WaitingError, UnrecoverableError, } from './errors';
  14. import { SpanKind, TelemetryAttributes } from '../enums';
  15. import { JobScheduler } from './job-scheduler';
  16. import { LockManager } from './lock-manager';
  17. // 10 seconds is the maximum time a BZPOPMIN can block.
  18. const maximumBlockTimeout = 10;
  19. /**
  20. *
  21. * This class represents a worker that is able to process jobs from the queue.
  22. * As soon as the class is instantiated and a connection to Redis is established
  23. * it will start processing jobs.
  24. *
  25. */
  26. export class Worker extends QueueBase {
  27. static RateLimitError() {
  28. return new RateLimitError();
  29. }
  30. constructor(name, processor, opts, Connection) {
  31. var _a;
  32. super(name, Object.assign(Object.assign({ drainDelay: 5, concurrency: 1, lockDuration: 30000, maximumRateLimitDelay: 30000, maxStalledCount: 1, stalledInterval: 30000, autorun: true, runRetryDelay: 15000 }, opts), { blockingConnection: true }), Connection);
  33. this.abortDelayController = null;
  34. this.blockUntil = 0;
  35. this.drained = false;
  36. this.limitUntil = 0;
  37. this.processorAcceptsSignal = false;
  38. this.stalledCheckerRunning = false;
  39. this.waiting = null;
  40. this.running = false;
  41. this.mainLoopRunning = null;
  42. if (!opts || !opts.connection) {
  43. throw new Error('Worker requires a connection');
  44. }
  45. if (typeof this.opts.maxStalledCount !== 'number' ||
  46. this.opts.maxStalledCount < 0) {
  47. throw new Error('maxStalledCount must be greater or equal than 0');
  48. }
  49. if (typeof this.opts.maxStartedAttempts === 'number' &&
  50. this.opts.maxStartedAttempts < 0) {
  51. throw new Error('maxStartedAttempts must be greater or equal than 0');
  52. }
  53. if (typeof this.opts.stalledInterval !== 'number' ||
  54. this.opts.stalledInterval <= 0) {
  55. throw new Error('stalledInterval must be greater than 0');
  56. }
  57. if (typeof this.opts.drainDelay !== 'number' || this.opts.drainDelay <= 0) {
  58. throw new Error('drainDelay must be greater than 0');
  59. }
  60. this.concurrency = this.opts.concurrency;
  61. this.opts.lockRenewTime =
  62. this.opts.lockRenewTime || this.opts.lockDuration / 2;
  63. this.id = v4();
  64. this.createLockManager();
  65. if (processor) {
  66. if (typeof processor === 'function') {
  67. this.processFn = processor;
  68. // Check if processor accepts signal parameter (3rd parameter)
  69. this.processorAcceptsSignal = processor.length >= 3;
  70. }
  71. else {
  72. // SANDBOXED
  73. if (processor instanceof URL) {
  74. if (!fs.existsSync(processor)) {
  75. throw new Error(`URL ${processor} does not exist in the local file system`);
  76. }
  77. processor = processor.href;
  78. }
  79. else {
  80. const supportedFileTypes = ['.js', '.ts', '.flow', '.cjs', '.mjs'];
  81. const processorFile = processor +
  82. (supportedFileTypes.includes(path.extname(processor)) ? '' : '.js');
  83. if (!fs.existsSync(processorFile)) {
  84. throw new Error(`File ${processorFile} does not exist`);
  85. }
  86. }
  87. // Separate paths so that bundling tools can resolve dependencies easier
  88. const dirname = path.dirname(module.filename || __filename);
  89. const workerThreadsMainFile = path.join(dirname, 'main-worker.js');
  90. const spawnProcessMainFile = path.join(dirname, 'main.js');
  91. let mainFilePath = this.opts.useWorkerThreads
  92. ? workerThreadsMainFile
  93. : spawnProcessMainFile;
  94. try {
  95. fs.statSync(mainFilePath); // would throw if file not exists
  96. }
  97. catch (_) {
  98. const mainFile = this.opts.useWorkerThreads
  99. ? 'main-worker.js'
  100. : 'main.js';
  101. mainFilePath = path.join(process.cwd(), `dist/cjs/classes/${mainFile}`);
  102. fs.statSync(mainFilePath);
  103. }
  104. this.childPool = new ChildPool({
  105. mainFile: mainFilePath,
  106. useWorkerThreads: this.opts.useWorkerThreads,
  107. workerForkOptions: this.opts.workerForkOptions,
  108. workerThreadsOptions: this.opts.workerThreadsOptions,
  109. });
  110. this.createSandbox(processor);
  111. this.processorAcceptsSignal = true;
  112. }
  113. if (this.opts.autorun) {
  114. this.run().catch(error => this.emit('error', error));
  115. }
  116. }
  117. const connectionName = this.clientName() + (this.opts.name ? `:w:${this.opts.name}` : '');
  118. this.blockingConnection = new RedisConnection(isRedisInstance(opts.connection)
  119. ? opts.connection.isCluster
  120. ? opts.connection.duplicate(undefined, {
  121. redisOptions: Object.assign(Object.assign({}, (((_a = opts.connection.options) === null || _a === void 0 ? void 0 : _a.redisOptions) || {})), { connectionName }),
  122. })
  123. : opts.connection.duplicate({ connectionName })
  124. : Object.assign(Object.assign({}, opts.connection), { connectionName }), {
  125. shared: false,
  126. blocking: true,
  127. skipVersionCheck: opts.skipVersionCheck,
  128. });
  129. this.blockingConnection.on('error', error => this.emit('error', error));
  130. this.blockingConnection.on('ready', () => setTimeout(() => this.emit('ready'), 0));
  131. }
  132. /**
  133. * Creates and configures the lock manager for processing jobs.
  134. * This method can be overridden in subclasses to customize lock manager behavior.
  135. */
  136. createLockManager() {
  137. this.lockManager = new LockManager(this, {
  138. lockRenewTime: this.opts.lockRenewTime,
  139. lockDuration: this.opts.lockDuration,
  140. workerId: this.id,
  141. workerName: this.opts.name,
  142. });
  143. }
  144. /**
  145. * Creates and configures the sandbox for processing jobs.
  146. * This method can be overridden in subclasses to customize sandbox behavior.
  147. *
  148. * @param processor - The processor file path, URL, or function to be sandboxed
  149. */
  150. createSandbox(processor) {
  151. this.processFn = sandbox(processor, this.childPool).bind(this);
  152. }
  153. /**
  154. * Public accessor method for LockManager to extend locks.
  155. * This delegates to the protected scripts object.
  156. */
  157. async extendJobLocks(jobIds, tokens, duration) {
  158. return this.scripts.extendLocks(jobIds, tokens, duration);
  159. }
  160. emit(event, ...args) {
  161. return super.emit(event, ...args);
  162. }
  163. off(eventName, listener) {
  164. super.off(eventName, listener);
  165. return this;
  166. }
  167. on(event, listener) {
  168. super.on(event, listener);
  169. return this;
  170. }
  171. once(event, listener) {
  172. super.once(event, listener);
  173. return this;
  174. }
  175. callProcessJob(job, token, signal) {
  176. return this.processFn(job, token, signal);
  177. }
  178. createJob(data, jobId) {
  179. return this.Job.fromJSON(this, data, jobId);
  180. }
  181. /**
  182. *
  183. * Waits until the worker is ready to start processing jobs.
  184. * In general only useful when writing tests.
  185. *
  186. */
  187. async waitUntilReady() {
  188. await super.waitUntilReady();
  189. return this.blockingConnection.client;
  190. }
  191. /**
  192. * Cancels a specific job currently being processed by this worker.
  193. * The job's processor function will receive an abort signal.
  194. *
  195. * @param jobId - The ID of the job to cancel
  196. * @param reason - Optional reason for the cancellation
  197. * @returns true if the job was found and cancelled, false otherwise
  198. */
  199. cancelJob(jobId, reason) {
  200. return this.lockManager.cancelJob(jobId, reason);
  201. }
  202. /**
  203. * Cancels all jobs currently being processed by this worker.
  204. * All active job processor functions will receive abort signals.
  205. *
  206. * @param reason - Optional reason for the cancellation
  207. */
  208. cancelAllJobs(reason) {
  209. this.lockManager.cancelAllJobs(reason);
  210. }
  211. set concurrency(concurrency) {
  212. if (typeof concurrency !== 'number' ||
  213. concurrency < 1 ||
  214. !isFinite(concurrency)) {
  215. throw new Error('concurrency must be a finite number greater than 0');
  216. }
  217. this._concurrency = concurrency;
  218. }
  219. get concurrency() {
  220. return this._concurrency;
  221. }
  222. get repeat() {
  223. return new Promise(async (resolve) => {
  224. if (!this._repeat) {
  225. const connection = await this.client;
  226. this._repeat = new Repeat(this.name, Object.assign(Object.assign({}, this.opts), { connection }));
  227. this._repeat.on('error', this.emit.bind(this, 'error'));
  228. }
  229. resolve(this._repeat);
  230. });
  231. }
  232. get jobScheduler() {
  233. return new Promise(async (resolve) => {
  234. if (!this._jobScheduler) {
  235. const connection = await this.client;
  236. this._jobScheduler = new JobScheduler(this.name, Object.assign(Object.assign({}, this.opts), { connection }));
  237. this._jobScheduler.on('error', this.emit.bind(this, 'error'));
  238. }
  239. resolve(this._jobScheduler);
  240. });
  241. }
  242. async run() {
  243. if (!this.processFn) {
  244. throw new Error('No process function is defined.');
  245. }
  246. if (this.running) {
  247. throw new Error('Worker is already running.');
  248. }
  249. try {
  250. this.running = true;
  251. if (this.closing || this.paused) {
  252. return;
  253. }
  254. await this.startStalledCheckTimer();
  255. if (!this.opts.skipLockRenewal) {
  256. this.lockManager.start();
  257. }
  258. const client = await this.client;
  259. const bclient = await this.blockingConnection.client;
  260. this.mainLoopRunning = this.mainLoop(client, bclient);
  261. // We must await here or finally will be called too early.
  262. await this.mainLoopRunning;
  263. }
  264. finally {
  265. this.running = false;
  266. }
  267. }
  268. async waitForRateLimit() {
  269. var _a;
  270. const limitUntil = this.limitUntil;
  271. if (limitUntil > Date.now()) {
  272. (_a = this.abortDelayController) === null || _a === void 0 ? void 0 : _a.abort();
  273. this.abortDelayController = new AbortController();
  274. const delay = this.getRateLimitDelay(limitUntil - Date.now());
  275. await this.delay(delay, this.abortDelayController);
  276. this.drained = false;
  277. this.limitUntil = 0;
  278. }
  279. }
  280. /**
  281. * This is the main loop in BullMQ. Its goals are to fetch jobs from the queue
  282. * as efficiently as possible, providing concurrency and minimal unnecessary calls
  283. * to Redis.
  284. */
  285. async mainLoop(client, bclient) {
  286. const asyncFifoQueue = new AsyncFifoQueue();
  287. let tokenPostfix = 0;
  288. while ((!this.closing && !this.paused) || asyncFifoQueue.numTotal() > 0) {
  289. /**
  290. * This inner loop tries to fetch jobs concurrently, but if we are waiting for a job
  291. * to arrive at the queue we should not try to fetch more jobs (as it would be pointless)
  292. */
  293. while (!this.closing &&
  294. !this.paused &&
  295. !this.waiting &&
  296. asyncFifoQueue.numTotal() < this._concurrency &&
  297. !this.isRateLimited()) {
  298. const token = `${this.id}:${tokenPostfix++}`;
  299. const fetchedJob = this.retryIfFailed(() => this._getNextJob(client, bclient, token, { block: true }), {
  300. delayInMs: this.opts.runRetryDelay,
  301. onlyEmitError: true,
  302. });
  303. asyncFifoQueue.add(fetchedJob);
  304. if (this.waiting && asyncFifoQueue.numTotal() > 1) {
  305. // We are waiting for jobs but we have others that we could start processing already
  306. break;
  307. }
  308. // We await here so that we fetch jobs in sequence, this is important to avoid unnecessary calls
  309. // to Redis in high concurrency scenarios.
  310. const job = await fetchedJob;
  311. // No more jobs waiting but we have others that could start processing already
  312. if (!job && asyncFifoQueue.numTotal() > 1) {
  313. break;
  314. }
  315. // If there are potential jobs to be processed and blockUntil is set, we should exit to avoid waiting
  316. // for processing this job.
  317. if (this.blockUntil) {
  318. break;
  319. }
  320. }
  321. // Since there can be undefined jobs in the queue (when a job fails or queue is empty)
  322. // we iterate until we find a job.
  323. let job;
  324. do {
  325. job = await asyncFifoQueue.fetch();
  326. } while (!job && asyncFifoQueue.numQueued() > 0);
  327. if (job) {
  328. const token = job.token;
  329. asyncFifoQueue.add(this.processJob(job, token, () => asyncFifoQueue.numTotal() <= this._concurrency));
  330. }
  331. else if (asyncFifoQueue.numQueued() === 0) {
  332. await this.waitForRateLimit();
  333. }
  334. }
  335. }
  336. /**
  337. * Returns a promise that resolves to the next job in queue.
  338. * @param token - worker token to be assigned to retrieved job
  339. * @returns a Job or undefined if no job was available in the queue.
  340. */
  341. async getNextJob(token, { block = true } = {}) {
  342. var _a, _b;
  343. const nextJob = await this._getNextJob(await this.client, await this.blockingConnection.client, token, { block });
  344. return this.trace(SpanKind.INTERNAL, 'getNextJob', this.name, async (span) => {
  345. span === null || span === void 0 ? void 0 : span.setAttributes({
  346. [TelemetryAttributes.WorkerId]: this.id,
  347. [TelemetryAttributes.QueueName]: this.name,
  348. [TelemetryAttributes.WorkerName]: this.opts.name,
  349. [TelemetryAttributes.WorkerOptions]: JSON.stringify({ block }),
  350. [TelemetryAttributes.JobId]: nextJob === null || nextJob === void 0 ? void 0 : nextJob.id,
  351. });
  352. return nextJob;
  353. }, (_b = (_a = nextJob === null || nextJob === void 0 ? void 0 : nextJob.opts) === null || _a === void 0 ? void 0 : _a.telemetry) === null || _b === void 0 ? void 0 : _b.metadata);
  354. }
  355. async _getNextJob(client, bclient, token, { block = true } = {}) {
  356. if (this.paused) {
  357. return;
  358. }
  359. if (this.closing) {
  360. return;
  361. }
  362. let job;
  363. if (this.drained && block && !this.limitUntil && !this.waiting) {
  364. this.waiting = this.waitForJob(bclient, this.blockUntil);
  365. try {
  366. this.blockUntil = await this.waiting;
  367. if (this.blockUntil <= 0 || this.blockUntil - Date.now() < 1) {
  368. job = await this.moveToActive(client, token, this.opts.name);
  369. }
  370. }
  371. finally {
  372. this.waiting = null;
  373. }
  374. }
  375. else {
  376. if (!this.isRateLimited()) {
  377. job = await this.moveToActive(client, token, this.opts.name);
  378. }
  379. }
  380. if (job) {
  381. this.emit('active', job, 'waiting');
  382. }
  383. return job;
  384. }
  385. /**
  386. * Overrides the rate limit to be active for the next jobs.
  387. * @deprecated This method is deprecated and will be removed in v6. Use queue.rateLimit method instead.
  388. * @param expireTimeMs - expire time in ms of this rate limit.
  389. */
  390. async rateLimit(expireTimeMs) {
  391. await this.trace(SpanKind.INTERNAL, 'rateLimit', this.name, async (span) => {
  392. span === null || span === void 0 ? void 0 : span.setAttributes({
  393. [TelemetryAttributes.WorkerId]: this.id,
  394. [TelemetryAttributes.WorkerRateLimit]: expireTimeMs,
  395. });
  396. await this.client.then(client => client.set(this.keys.limiter, Number.MAX_SAFE_INTEGER, 'PX', expireTimeMs));
  397. });
  398. }
  399. get minimumBlockTimeout() {
  400. return this.blockingConnection.capabilities.canBlockFor1Ms
  401. ? /* 1 millisecond is chosen because the granularity of our timestamps are milliseconds.
  402. Obviously we can still process much faster than 1 job per millisecond but delays and rate limits
  403. will never work with more accuracy than 1ms. */
  404. 0.001
  405. : 0.002;
  406. }
  407. isRateLimited() {
  408. return this.limitUntil > Date.now();
  409. }
  410. async moveToActive(client, token, name) {
  411. const [jobData, id, rateLimitDelay, delayUntil] = await this.scripts.moveToActive(client, token, name);
  412. this.updateDelays(rateLimitDelay, delayUntil);
  413. return this.nextJobFromJobData(jobData, id, token);
  414. }
  415. async waitForJob(bclient, blockUntil) {
  416. if (this.paused) {
  417. return Infinity;
  418. }
  419. let timeout;
  420. try {
  421. if (!this.closing && !this.isRateLimited()) {
  422. let blockTimeout = this.getBlockTimeout(blockUntil);
  423. if (blockTimeout > 0) {
  424. blockTimeout = this.blockingConnection.capabilities.canDoubleTimeout
  425. ? blockTimeout
  426. : Math.ceil(blockTimeout);
  427. // We cannot trust that the blocking connection stays blocking forever
  428. // due to issues in Redis and IORedis, so we will reconnect if we
  429. // don't get a response in the expected time.
  430. timeout = setTimeout(async () => {
  431. bclient.disconnect(!this.closing);
  432. }, blockTimeout * 1000 + 1000);
  433. this.updateDelays(); // reset delays to avoid reusing same values in next iteration
  434. // Markers should only be used for un-blocking, so we will handle them in this
  435. // function only.
  436. const result = await bclient.bzpopmin(this.keys.marker, blockTimeout);
  437. if (result) {
  438. const [_key, member, score] = result;
  439. if (member) {
  440. const newBlockUntil = parseInt(score);
  441. // Use by pro version as rate limited groups could generate lower blockUntil values
  442. // markers only return delays for delayed jobs
  443. if (blockUntil && newBlockUntil > blockUntil) {
  444. return blockUntil;
  445. }
  446. return newBlockUntil;
  447. }
  448. }
  449. }
  450. return 0;
  451. }
  452. }
  453. catch (error) {
  454. if (isNotConnectionError(error)) {
  455. this.emit('error', error);
  456. }
  457. if (!this.closing) {
  458. await this.delay();
  459. }
  460. }
  461. finally {
  462. clearTimeout(timeout);
  463. }
  464. return Infinity;
  465. }
  466. getBlockTimeout(blockUntil) {
  467. const opts = this.opts;
  468. // when there are delayed jobs
  469. if (blockUntil) {
  470. const blockDelay = blockUntil - Date.now();
  471. // when we reach the time to get new jobs
  472. if (blockDelay <= 0) {
  473. return blockDelay;
  474. }
  475. else if (blockDelay < this.minimumBlockTimeout * 1000) {
  476. return this.minimumBlockTimeout;
  477. }
  478. else {
  479. // We restrict the maximum block timeout to 10 second to avoid
  480. // blocking the connection for too long in the case of reconnections
  481. // reference: https://github.com/taskforcesh/bullmq/issues/1658
  482. return Math.min(blockDelay / 1000, maximumBlockTimeout);
  483. }
  484. }
  485. else {
  486. return Math.max(opts.drainDelay, this.minimumBlockTimeout);
  487. }
  488. }
  489. getRateLimitDelay(delay) {
  490. // We restrict the maximum limit delay to the configured maximumRateLimitDelay
  491. // to be able to promote delayed jobs while the queue is rate limited
  492. return Math.min(delay, this.opts.maximumRateLimitDelay);
  493. }
  494. /**
  495. *
  496. * This function is exposed only for testing purposes.
  497. */
  498. async delay(milliseconds, abortController) {
  499. await delay(milliseconds || DELAY_TIME_1, abortController);
  500. }
  501. updateDelays(limitDelay = 0, delayUntil = 0) {
  502. const clampedLimit = Math.max(limitDelay, 0);
  503. if (clampedLimit > 0) {
  504. this.limitUntil = Date.now() + clampedLimit;
  505. }
  506. else {
  507. this.limitUntil = 0;
  508. }
  509. this.blockUntil = Math.max(delayUntil, 0) || 0;
  510. }
  511. async nextJobFromJobData(jobData, jobId, token) {
  512. if (!jobData) {
  513. if (!this.drained) {
  514. this.emit('drained');
  515. this.drained = true;
  516. }
  517. }
  518. else {
  519. this.drained = false;
  520. const job = this.createJob(jobData, jobId);
  521. job.token = token;
  522. try {
  523. await this.retryIfFailed(async () => {
  524. if (job.repeatJobKey && job.repeatJobKey.split(':').length < 5) {
  525. const jobScheduler = await this.jobScheduler;
  526. await jobScheduler.upsertJobScheduler(
  527. // Most of these arguments are not really needed
  528. // anymore as we read them from the job scheduler itself
  529. job.repeatJobKey, job.opts.repeat, job.name, job.data, job.opts, { override: false, producerId: job.id });
  530. }
  531. else if (job.opts.repeat) {
  532. const repeat = await this.repeat;
  533. await repeat.updateRepeatableJob(job.name, job.data, job.opts, {
  534. override: false,
  535. });
  536. }
  537. }, { delayInMs: this.opts.runRetryDelay });
  538. }
  539. catch (err) {
  540. // Emit error but don't throw to avoid breaking current job completion
  541. // Note: This means the next repeatable job will not be scheduled
  542. const errorMessage = err instanceof Error ? err.message : String(err);
  543. const schedulingError = new Error(`Failed to add repeatable job for next iteration: ${errorMessage}`);
  544. this.emit('error', schedulingError);
  545. // Return undefined to indicate no next job is available
  546. return undefined;
  547. }
  548. return job;
  549. }
  550. }
  551. async processJob(job, token, fetchNextCallback = () => true) {
  552. var _a, _b;
  553. const srcPropagationMetadata = (_b = (_a = job.opts) === null || _a === void 0 ? void 0 : _a.telemetry) === null || _b === void 0 ? void 0 : _b.metadata;
  554. return this.trace(SpanKind.CONSUMER, 'process', this.name, async (span) => {
  555. span === null || span === void 0 ? void 0 : span.setAttributes({
  556. [TelemetryAttributes.WorkerId]: this.id,
  557. [TelemetryAttributes.WorkerName]: this.opts.name,
  558. [TelemetryAttributes.JobId]: job.id,
  559. [TelemetryAttributes.JobName]: job.name,
  560. });
  561. const abortController = this.lockManager.trackJob(job.id, token, job.processedOn, this.processorAcceptsSignal);
  562. try {
  563. const unrecoverableErrorMessage = this.getUnrecoverableErrorMessage(job);
  564. if (unrecoverableErrorMessage) {
  565. const failed = await this.retryIfFailed(() => {
  566. this.lockManager.untrackJob(job.id);
  567. return this.handleFailed(new UnrecoverableError(unrecoverableErrorMessage), job, token, fetchNextCallback, span);
  568. }, { delayInMs: this.opts.runRetryDelay, span });
  569. return failed;
  570. }
  571. const result = await this.callProcessJob(job, token, abortController
  572. ? abortController.signal
  573. : undefined);
  574. return await this.retryIfFailed(() => {
  575. this.lockManager.untrackJob(job.id);
  576. return this.handleCompleted(result, job, token, fetchNextCallback, span);
  577. }, { delayInMs: this.opts.runRetryDelay, span });
  578. }
  579. catch (err) {
  580. const failed = await this.retryIfFailed(() => {
  581. this.lockManager.untrackJob(job.id);
  582. return this.handleFailed(err, job, token, fetchNextCallback, span);
  583. }, { delayInMs: this.opts.runRetryDelay, span, onlyEmitError: true });
  584. return failed;
  585. }
  586. finally {
  587. this.lockManager.untrackJob(job.id);
  588. const now = Date.now();
  589. span === null || span === void 0 ? void 0 : span.setAttributes({
  590. [TelemetryAttributes.JobFinishedTimestamp]: now,
  591. [TelemetryAttributes.JobAttemptFinishedTimestamp]: job.finishedOn || now,
  592. [TelemetryAttributes.JobProcessedTimestamp]: job.processedOn,
  593. });
  594. }
  595. }, srcPropagationMetadata);
  596. }
  597. getUnrecoverableErrorMessage(job) {
  598. if (job.deferredFailure) {
  599. return job.deferredFailure;
  600. }
  601. if (this.opts.maxStartedAttempts &&
  602. this.opts.maxStartedAttempts < job.attemptsStarted) {
  603. return 'job started more than allowable limit';
  604. }
  605. }
  606. async handleCompleted(result, job, token, fetchNextCallback = () => true, span) {
  607. if (!this.connection.closing) {
  608. const completed = await job.moveToCompleted(result, token, fetchNextCallback() && !(this.closing || this.paused));
  609. this.emit('completed', job, result, 'active');
  610. span === null || span === void 0 ? void 0 : span.addEvent('job completed', {
  611. [TelemetryAttributes.JobResult]: JSON.stringify(result),
  612. });
  613. span === null || span === void 0 ? void 0 : span.setAttributes({
  614. [TelemetryAttributes.JobAttemptsMade]: job.attemptsMade,
  615. });
  616. if (Array.isArray(completed)) {
  617. const [jobData, jobId, rateLimitDelay, delayUntil] = completed;
  618. this.updateDelays(rateLimitDelay, delayUntil);
  619. return this.nextJobFromJobData(jobData, jobId, token);
  620. }
  621. }
  622. }
  623. async handleFailed(err, job, token, fetchNextCallback = () => true, span) {
  624. if (!this.connection.closing) {
  625. // Check if the job was manually rate-limited
  626. if (err.message === RATE_LIMIT_ERROR) {
  627. const rateLimitTtl = await this.moveLimitedBackToWait(job, token);
  628. this.limitUntil = rateLimitTtl > 0 ? Date.now() + rateLimitTtl : 0;
  629. return;
  630. }
  631. if (err instanceof DelayedError ||
  632. err.name == 'DelayedError' ||
  633. err instanceof WaitingError ||
  634. err.name == 'WaitingError' ||
  635. err instanceof WaitingChildrenError ||
  636. err.name == 'WaitingChildrenError') {
  637. const client = await this.client;
  638. return this.moveToActive(client, token, this.opts.name);
  639. }
  640. const result = await job.moveToFailed(err, token, fetchNextCallback() && !(this.closing || this.paused));
  641. this.emit('failed', job, err, 'active');
  642. span === null || span === void 0 ? void 0 : span.addEvent('job failed', {
  643. [TelemetryAttributes.JobFailedReason]: err.message,
  644. });
  645. span === null || span === void 0 ? void 0 : span.setAttributes({
  646. [TelemetryAttributes.JobAttemptsMade]: job.attemptsMade,
  647. });
  648. // Note: result can be undefined if moveToFailed fails (e.g., lock was lost)
  649. if (Array.isArray(result)) {
  650. const [jobData, jobId, rateLimitDelay, delayUntil] = result;
  651. this.updateDelays(rateLimitDelay, delayUntil);
  652. return this.nextJobFromJobData(jobData, jobId, token);
  653. }
  654. }
  655. }
  656. /**
  657. *
  658. * Pauses the processing of this queue only for this worker.
  659. */
  660. async pause(doNotWaitActive) {
  661. await this.trace(SpanKind.INTERNAL, 'pause', this.name, async (span) => {
  662. var _a;
  663. span === null || span === void 0 ? void 0 : span.setAttributes({
  664. [TelemetryAttributes.WorkerId]: this.id,
  665. [TelemetryAttributes.WorkerName]: this.opts.name,
  666. [TelemetryAttributes.WorkerDoNotWaitActive]: doNotWaitActive,
  667. });
  668. if (!this.paused) {
  669. this.paused = true;
  670. if (!doNotWaitActive) {
  671. await this.whenCurrentJobsFinished();
  672. }
  673. (_a = this.stalledCheckStopper) === null || _a === void 0 ? void 0 : _a.call(this);
  674. this.emit('paused');
  675. }
  676. });
  677. }
  678. /**
  679. *
  680. * Resumes processing of this worker (if paused).
  681. */
  682. resume() {
  683. if (!this.running || this.paused) {
  684. this.trace(SpanKind.INTERNAL, 'resume', this.name, span => {
  685. span === null || span === void 0 ? void 0 : span.setAttributes({
  686. [TelemetryAttributes.WorkerId]: this.id,
  687. [TelemetryAttributes.WorkerName]: this.opts.name,
  688. });
  689. this.paused = false;
  690. if (!this.running) {
  691. if (this.processFn) {
  692. this.run();
  693. }
  694. }
  695. else {
  696. // TODO: await for startStalledCheckTimer in next breaking change, that will convert resume method to async
  697. // Main loop is still running (pause was called with doNotWaitActive=true).
  698. // Restart the stalled checker since pause() stopped it.
  699. void this.startStalledCheckTimer().catch(err => {
  700. this.emit('error', err);
  701. });
  702. }
  703. this.emit('resumed');
  704. }).catch(err => {
  705. this.emit('error', err);
  706. });
  707. }
  708. }
  709. /**
  710. *
  711. * Checks if worker is paused.
  712. *
  713. * @returns true if worker is paused, false otherwise.
  714. */
  715. isPaused() {
  716. return !!this.paused;
  717. }
  718. /**
  719. *
  720. * Checks if worker is currently running.
  721. *
  722. * @returns true if worker is running, false otherwise.
  723. */
  724. isRunning() {
  725. return this.running;
  726. }
  727. /**
  728. *
  729. * Closes the worker and related redis connections.
  730. *
  731. * This method waits for current jobs to finalize before returning.
  732. *
  733. * @param force - Use force boolean parameter if you do not want to wait for
  734. * current jobs to be processed. When using telemetry, be mindful that it can
  735. * interfere with the proper closure of spans, potentially preventing them from being exported.
  736. *
  737. * @returns Promise that resolves when the worker has been closed.
  738. */
  739. async close(force = false) {
  740. if (this.closing) {
  741. return this.closing;
  742. }
  743. this.closing = (async () => {
  744. await this.trace(SpanKind.INTERNAL, 'close', this.name, async (span) => {
  745. var _a, _b;
  746. span === null || span === void 0 ? void 0 : span.setAttributes({
  747. [TelemetryAttributes.WorkerId]: this.id,
  748. [TelemetryAttributes.WorkerName]: this.opts.name,
  749. [TelemetryAttributes.WorkerForceClose]: force,
  750. });
  751. this.emit('closing', 'closing queue');
  752. (_a = this.abortDelayController) === null || _a === void 0 ? void 0 : _a.abort();
  753. // Define the async cleanup functions
  754. const asyncCleanups = [
  755. () => {
  756. return force || this.whenCurrentJobsFinished(false);
  757. },
  758. () => this.lockManager.close(),
  759. () => { var _a; return (_a = this.childPool) === null || _a === void 0 ? void 0 : _a.clean(); },
  760. () => this.blockingConnection.close(force),
  761. () => this.connection.close(force),
  762. ];
  763. // Run cleanup functions sequentially and make sure all are run despite any errors
  764. for (const cleanup of asyncCleanups) {
  765. try {
  766. await cleanup();
  767. }
  768. catch (err) {
  769. this.emit('error', err);
  770. }
  771. }
  772. (_b = this.stalledCheckStopper) === null || _b === void 0 ? void 0 : _b.call(this);
  773. this.closed = true;
  774. this.emit('closed');
  775. });
  776. })();
  777. return await this.closing;
  778. }
  779. /**
  780. *
  781. * Manually starts the stalled checker.
  782. * The check will run once as soon as this method is called, and
  783. * then every opts.stalledInterval milliseconds until the worker is closed.
  784. * Note: Normally you do not need to call this method, since the stalled checker
  785. * is automatically started when the worker starts processing jobs after
  786. * calling run. However if you want to process the jobs manually you need
  787. * to call this method to start the stalled checker.
  788. *
  789. * @see {@link https://docs.bullmq.io/patterns/manually-fetching-jobs}
  790. */
  791. async startStalledCheckTimer() {
  792. if (!this.opts.skipStalledCheck) {
  793. if (!this.closing && !this.stalledCheckerRunning) {
  794. await this.trace(SpanKind.INTERNAL, 'startStalledCheckTimer', this.name, async (span) => {
  795. span === null || span === void 0 ? void 0 : span.setAttributes({
  796. [TelemetryAttributes.WorkerId]: this.id,
  797. [TelemetryAttributes.WorkerName]: this.opts.name,
  798. });
  799. this.stalledCheckerRunning = true;
  800. this.stalledChecker()
  801. .catch(err => {
  802. this.emit('error', err);
  803. })
  804. .finally(() => {
  805. this.stalledCheckerRunning = false;
  806. });
  807. });
  808. }
  809. }
  810. }
  811. async stalledChecker() {
  812. while (!(this.closing || this.paused)) {
  813. await this.checkConnectionError(() => this.moveStalledJobsToWait());
  814. await new Promise(resolve => {
  815. const timeout = setTimeout(resolve, this.opts.stalledInterval);
  816. this.stalledCheckStopper = () => {
  817. clearTimeout(timeout);
  818. resolve();
  819. };
  820. });
  821. }
  822. }
  823. /**
  824. * Returns a promise that resolves when active jobs are cleared
  825. *
  826. * @returns
  827. */
  828. async whenCurrentJobsFinished(reconnect = true) {
  829. //
  830. // Force reconnection of blocking connection to abort blocking redis call immediately.
  831. //
  832. if (this.waiting) {
  833. // If we are not going to reconnect, we will not wait for the disconnection.
  834. await this.blockingConnection.disconnect(reconnect);
  835. }
  836. else {
  837. reconnect = false;
  838. }
  839. if (this.mainLoopRunning) {
  840. await this.mainLoopRunning;
  841. }
  842. reconnect && (await this.blockingConnection.reconnect());
  843. }
  844. async retryIfFailed(fn, opts) {
  845. var _a;
  846. let retry = 0;
  847. const maxRetries = opts.maxRetries || Infinity;
  848. do {
  849. try {
  850. return await fn();
  851. }
  852. catch (err) {
  853. (_a = opts.span) === null || _a === void 0 ? void 0 : _a.recordException(err.message);
  854. if (isNotConnectionError(err)) {
  855. // Emit error when not paused or closing; optionally swallow (no throw) when opts.onlyEmitError is set.
  856. if (!this.paused && !this.closing) {
  857. this.emit('error', err);
  858. }
  859. if (opts.onlyEmitError) {
  860. return;
  861. }
  862. else {
  863. throw err;
  864. }
  865. }
  866. else {
  867. if (opts.delayInMs && !this.closing && !this.closed) {
  868. await this.delay(opts.delayInMs, this.abortDelayController);
  869. }
  870. if (retry + 1 >= maxRetries) {
  871. // If we've reached max retries, throw the last error
  872. throw err;
  873. }
  874. }
  875. }
  876. } while (++retry < maxRetries);
  877. }
  878. async moveStalledJobsToWait() {
  879. await this.trace(SpanKind.INTERNAL, 'moveStalledJobsToWait', this.name, async (span) => {
  880. const stalled = await this.scripts.moveStalledJobsToWait();
  881. span === null || span === void 0 ? void 0 : span.setAttributes({
  882. [TelemetryAttributes.WorkerId]: this.id,
  883. [TelemetryAttributes.WorkerName]: this.opts.name,
  884. [TelemetryAttributes.WorkerStalledJobs]: stalled,
  885. });
  886. stalled.forEach((jobId) => {
  887. span === null || span === void 0 ? void 0 : span.addEvent('job stalled', {
  888. [TelemetryAttributes.JobId]: jobId,
  889. });
  890. this.emit('stalled', jobId, 'active');
  891. });
  892. });
  893. }
  894. moveLimitedBackToWait(job, token) {
  895. return job.moveToWait(token);
  896. }
  897. }
  898. //# sourceMappingURL=worker.js.map