worker.js 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902
  1. "use strict";
  2. Object.defineProperty(exports, "__esModule", { value: true });
  3. exports.Worker = void 0;
  4. const fs = require("fs");
  5. const url_1 = require("url");
  6. const path = require("path");
  7. const uuid_1 = require("uuid");
  8. const abort_controller_1 = require("./abort-controller");
  9. const utils_1 = require("../utils");
  10. const queue_base_1 = require("./queue-base");
  11. const repeat_1 = require("./repeat");
  12. const child_pool_1 = require("./child-pool");
  13. const redis_connection_1 = require("./redis-connection");
  14. const sandbox_1 = require("./sandbox");
  15. const async_fifo_queue_1 = require("./async-fifo-queue");
  16. const errors_1 = require("./errors");
  17. const enums_1 = require("../enums");
  18. const job_scheduler_1 = require("./job-scheduler");
  19. const lock_manager_1 = require("./lock-manager");
  20. // 10 seconds is the maximum time a BZPOPMIN can block.
  21. const maximumBlockTimeout = 10;
  22. /**
  23. *
  24. * This class represents a worker that is able to process jobs from the queue.
  25. * As soon as the class is instantiated and a connection to Redis is established
  26. * it will start processing jobs.
  27. *
  28. */
  29. class Worker extends queue_base_1.QueueBase {
  30. static RateLimitError() {
  31. return new errors_1.RateLimitError();
  32. }
  33. constructor(name, processor, opts, Connection) {
  34. var _a;
  35. super(name, Object.assign(Object.assign({ drainDelay: 5, concurrency: 1, lockDuration: 30000, maximumRateLimitDelay: 30000, maxStalledCount: 1, stalledInterval: 30000, autorun: true, runRetryDelay: 15000 }, opts), { blockingConnection: true }), Connection);
  36. this.abortDelayController = null;
  37. this.blockUntil = 0;
  38. this.drained = false;
  39. this.limitUntil = 0;
  40. this.processorAcceptsSignal = false;
  41. this.stalledCheckerRunning = false;
  42. this.waiting = null;
  43. this.running = false;
  44. this.mainLoopRunning = null;
  45. if (!opts || !opts.connection) {
  46. throw new Error('Worker requires a connection');
  47. }
  48. if (typeof this.opts.maxStalledCount !== 'number' ||
  49. this.opts.maxStalledCount < 0) {
  50. throw new Error('maxStalledCount must be greater or equal than 0');
  51. }
  52. if (typeof this.opts.maxStartedAttempts === 'number' &&
  53. this.opts.maxStartedAttempts < 0) {
  54. throw new Error('maxStartedAttempts must be greater or equal than 0');
  55. }
  56. if (typeof this.opts.stalledInterval !== 'number' ||
  57. this.opts.stalledInterval <= 0) {
  58. throw new Error('stalledInterval must be greater than 0');
  59. }
  60. if (typeof this.opts.drainDelay !== 'number' || this.opts.drainDelay <= 0) {
  61. throw new Error('drainDelay must be greater than 0');
  62. }
  63. this.concurrency = this.opts.concurrency;
  64. this.opts.lockRenewTime =
  65. this.opts.lockRenewTime || this.opts.lockDuration / 2;
  66. this.id = (0, uuid_1.v4)();
  67. this.createLockManager();
  68. if (processor) {
  69. if (typeof processor === 'function') {
  70. this.processFn = processor;
  71. // Check if processor accepts signal parameter (3rd parameter)
  72. this.processorAcceptsSignal = processor.length >= 3;
  73. }
  74. else {
  75. // SANDBOXED
  76. if (processor instanceof url_1.URL) {
  77. if (!fs.existsSync(processor)) {
  78. throw new Error(`URL ${processor} does not exist in the local file system`);
  79. }
  80. processor = processor.href;
  81. }
  82. else {
  83. const supportedFileTypes = ['.js', '.ts', '.flow', '.cjs', '.mjs'];
  84. const processorFile = processor +
  85. (supportedFileTypes.includes(path.extname(processor)) ? '' : '.js');
  86. if (!fs.existsSync(processorFile)) {
  87. throw new Error(`File ${processorFile} does not exist`);
  88. }
  89. }
  90. // Separate paths so that bundling tools can resolve dependencies easier
  91. const dirname = path.dirname(module.filename || __filename);
  92. const workerThreadsMainFile = path.join(dirname, 'main-worker.js');
  93. const spawnProcessMainFile = path.join(dirname, 'main.js');
  94. let mainFilePath = this.opts.useWorkerThreads
  95. ? workerThreadsMainFile
  96. : spawnProcessMainFile;
  97. try {
  98. fs.statSync(mainFilePath); // would throw if file not exists
  99. }
  100. catch (_) {
  101. const mainFile = this.opts.useWorkerThreads
  102. ? 'main-worker.js'
  103. : 'main.js';
  104. mainFilePath = path.join(process.cwd(), `dist/cjs/classes/${mainFile}`);
  105. fs.statSync(mainFilePath);
  106. }
  107. this.childPool = new child_pool_1.ChildPool({
  108. mainFile: mainFilePath,
  109. useWorkerThreads: this.opts.useWorkerThreads,
  110. workerForkOptions: this.opts.workerForkOptions,
  111. workerThreadsOptions: this.opts.workerThreadsOptions,
  112. });
  113. this.createSandbox(processor);
  114. this.processorAcceptsSignal = true;
  115. }
  116. if (this.opts.autorun) {
  117. this.run().catch(error => this.emit('error', error));
  118. }
  119. }
  120. const connectionName = this.clientName() + (this.opts.name ? `:w:${this.opts.name}` : '');
  121. this.blockingConnection = new redis_connection_1.RedisConnection((0, utils_1.isRedisInstance)(opts.connection)
  122. ? opts.connection.isCluster
  123. ? opts.connection.duplicate(undefined, {
  124. redisOptions: Object.assign(Object.assign({}, (((_a = opts.connection.options) === null || _a === void 0 ? void 0 : _a.redisOptions) || {})), { connectionName }),
  125. })
  126. : opts.connection.duplicate({ connectionName })
  127. : Object.assign(Object.assign({}, opts.connection), { connectionName }), {
  128. shared: false,
  129. blocking: true,
  130. skipVersionCheck: opts.skipVersionCheck,
  131. });
  132. this.blockingConnection.on('error', error => this.emit('error', error));
  133. this.blockingConnection.on('ready', () => setTimeout(() => this.emit('ready'), 0));
  134. }
  135. /**
  136. * Creates and configures the lock manager for processing jobs.
  137. * This method can be overridden in subclasses to customize lock manager behavior.
  138. */
  139. createLockManager() {
  140. this.lockManager = new lock_manager_1.LockManager(this, {
  141. lockRenewTime: this.opts.lockRenewTime,
  142. lockDuration: this.opts.lockDuration,
  143. workerId: this.id,
  144. workerName: this.opts.name,
  145. });
  146. }
  147. /**
  148. * Creates and configures the sandbox for processing jobs.
  149. * This method can be overridden in subclasses to customize sandbox behavior.
  150. *
  151. * @param processor - The processor file path, URL, or function to be sandboxed
  152. */
  153. createSandbox(processor) {
  154. this.processFn = (0, sandbox_1.default)(processor, this.childPool).bind(this);
  155. }
  156. /**
  157. * Public accessor method for LockManager to extend locks.
  158. * This delegates to the protected scripts object.
  159. */
  160. async extendJobLocks(jobIds, tokens, duration) {
  161. return this.scripts.extendLocks(jobIds, tokens, duration);
  162. }
  163. emit(event, ...args) {
  164. return super.emit(event, ...args);
  165. }
  166. off(eventName, listener) {
  167. super.off(eventName, listener);
  168. return this;
  169. }
  170. on(event, listener) {
  171. super.on(event, listener);
  172. return this;
  173. }
  174. once(event, listener) {
  175. super.once(event, listener);
  176. return this;
  177. }
  178. callProcessJob(job, token, signal) {
  179. return this.processFn(job, token, signal);
  180. }
  181. createJob(data, jobId) {
  182. return this.Job.fromJSON(this, data, jobId);
  183. }
  184. /**
  185. *
  186. * Waits until the worker is ready to start processing jobs.
  187. * In general only useful when writing tests.
  188. *
  189. */
  190. async waitUntilReady() {
  191. await super.waitUntilReady();
  192. return this.blockingConnection.client;
  193. }
  194. /**
  195. * Cancels a specific job currently being processed by this worker.
  196. * The job's processor function will receive an abort signal.
  197. *
  198. * @param jobId - The ID of the job to cancel
  199. * @param reason - Optional reason for the cancellation
  200. * @returns true if the job was found and cancelled, false otherwise
  201. */
  202. cancelJob(jobId, reason) {
  203. return this.lockManager.cancelJob(jobId, reason);
  204. }
  205. /**
  206. * Cancels all jobs currently being processed by this worker.
  207. * All active job processor functions will receive abort signals.
  208. *
  209. * @param reason - Optional reason for the cancellation
  210. */
  211. cancelAllJobs(reason) {
  212. this.lockManager.cancelAllJobs(reason);
  213. }
  214. set concurrency(concurrency) {
  215. if (typeof concurrency !== 'number' ||
  216. concurrency < 1 ||
  217. !isFinite(concurrency)) {
  218. throw new Error('concurrency must be a finite number greater than 0');
  219. }
  220. this._concurrency = concurrency;
  221. }
  222. get concurrency() {
  223. return this._concurrency;
  224. }
  225. get repeat() {
  226. return new Promise(async (resolve) => {
  227. if (!this._repeat) {
  228. const connection = await this.client;
  229. this._repeat = new repeat_1.Repeat(this.name, Object.assign(Object.assign({}, this.opts), { connection }));
  230. this._repeat.on('error', this.emit.bind(this, 'error'));
  231. }
  232. resolve(this._repeat);
  233. });
  234. }
  235. get jobScheduler() {
  236. return new Promise(async (resolve) => {
  237. if (!this._jobScheduler) {
  238. const connection = await this.client;
  239. this._jobScheduler = new job_scheduler_1.JobScheduler(this.name, Object.assign(Object.assign({}, this.opts), { connection }));
  240. this._jobScheduler.on('error', this.emit.bind(this, 'error'));
  241. }
  242. resolve(this._jobScheduler);
  243. });
  244. }
  245. async run() {
  246. if (!this.processFn) {
  247. throw new Error('No process function is defined.');
  248. }
  249. if (this.running) {
  250. throw new Error('Worker is already running.');
  251. }
  252. try {
  253. this.running = true;
  254. if (this.closing || this.paused) {
  255. return;
  256. }
  257. await this.startStalledCheckTimer();
  258. if (!this.opts.skipLockRenewal) {
  259. this.lockManager.start();
  260. }
  261. const client = await this.client;
  262. const bclient = await this.blockingConnection.client;
  263. this.mainLoopRunning = this.mainLoop(client, bclient);
  264. // We must await here or finally will be called too early.
  265. await this.mainLoopRunning;
  266. }
  267. finally {
  268. this.running = false;
  269. }
  270. }
  271. async waitForRateLimit() {
  272. var _a;
  273. const limitUntil = this.limitUntil;
  274. if (limitUntil > Date.now()) {
  275. (_a = this.abortDelayController) === null || _a === void 0 ? void 0 : _a.abort();
  276. this.abortDelayController = new abort_controller_1.AbortController();
  277. const delay = this.getRateLimitDelay(limitUntil - Date.now());
  278. await this.delay(delay, this.abortDelayController);
  279. this.drained = false;
  280. this.limitUntil = 0;
  281. }
  282. }
  283. /**
  284. * This is the main loop in BullMQ. Its goals are to fetch jobs from the queue
  285. * as efficiently as possible, providing concurrency and minimal unnecessary calls
  286. * to Redis.
  287. */
  288. async mainLoop(client, bclient) {
  289. const asyncFifoQueue = new async_fifo_queue_1.AsyncFifoQueue();
  290. let tokenPostfix = 0;
  291. while ((!this.closing && !this.paused) || asyncFifoQueue.numTotal() > 0) {
  292. /**
  293. * This inner loop tries to fetch jobs concurrently, but if we are waiting for a job
  294. * to arrive at the queue we should not try to fetch more jobs (as it would be pointless)
  295. */
  296. while (!this.closing &&
  297. !this.paused &&
  298. !this.waiting &&
  299. asyncFifoQueue.numTotal() < this._concurrency &&
  300. !this.isRateLimited()) {
  301. const token = `${this.id}:${tokenPostfix++}`;
  302. const fetchedJob = this.retryIfFailed(() => this._getNextJob(client, bclient, token, { block: true }), {
  303. delayInMs: this.opts.runRetryDelay,
  304. onlyEmitError: true,
  305. });
  306. asyncFifoQueue.add(fetchedJob);
  307. if (this.waiting && asyncFifoQueue.numTotal() > 1) {
  308. // We are waiting for jobs but we have others that we could start processing already
  309. break;
  310. }
  311. // We await here so that we fetch jobs in sequence, this is important to avoid unnecessary calls
  312. // to Redis in high concurrency scenarios.
  313. const job = await fetchedJob;
  314. // No more jobs waiting but we have others that could start processing already
  315. if (!job && asyncFifoQueue.numTotal() > 1) {
  316. break;
  317. }
  318. // If there are potential jobs to be processed and blockUntil is set, we should exit to avoid waiting
  319. // for processing this job.
  320. if (this.blockUntil) {
  321. break;
  322. }
  323. }
  324. // Since there can be undefined jobs in the queue (when a job fails or queue is empty)
  325. // we iterate until we find a job.
  326. let job;
  327. do {
  328. job = await asyncFifoQueue.fetch();
  329. } while (!job && asyncFifoQueue.numQueued() > 0);
  330. if (job) {
  331. const token = job.token;
  332. asyncFifoQueue.add(this.processJob(job, token, () => asyncFifoQueue.numTotal() <= this._concurrency));
  333. }
  334. else if (asyncFifoQueue.numQueued() === 0) {
  335. await this.waitForRateLimit();
  336. }
  337. }
  338. }
  339. /**
  340. * Returns a promise that resolves to the next job in queue.
  341. * @param token - worker token to be assigned to retrieved job
  342. * @returns a Job or undefined if no job was available in the queue.
  343. */
  344. async getNextJob(token, { block = true } = {}) {
  345. var _a, _b;
  346. const nextJob = await this._getNextJob(await this.client, await this.blockingConnection.client, token, { block });
  347. return this.trace(enums_1.SpanKind.INTERNAL, 'getNextJob', this.name, async (span) => {
  348. span === null || span === void 0 ? void 0 : span.setAttributes({
  349. [enums_1.TelemetryAttributes.WorkerId]: this.id,
  350. [enums_1.TelemetryAttributes.QueueName]: this.name,
  351. [enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
  352. [enums_1.TelemetryAttributes.WorkerOptions]: JSON.stringify({ block }),
  353. [enums_1.TelemetryAttributes.JobId]: nextJob === null || nextJob === void 0 ? void 0 : nextJob.id,
  354. });
  355. return nextJob;
  356. }, (_b = (_a = nextJob === null || nextJob === void 0 ? void 0 : nextJob.opts) === null || _a === void 0 ? void 0 : _a.telemetry) === null || _b === void 0 ? void 0 : _b.metadata);
  357. }
  358. async _getNextJob(client, bclient, token, { block = true } = {}) {
  359. if (this.paused) {
  360. return;
  361. }
  362. if (this.closing) {
  363. return;
  364. }
  365. let job;
  366. if (this.drained && block && !this.limitUntil && !this.waiting) {
  367. this.waiting = this.waitForJob(bclient, this.blockUntil);
  368. try {
  369. this.blockUntil = await this.waiting;
  370. if (this.blockUntil <= 0 || this.blockUntil - Date.now() < 1) {
  371. job = await this.moveToActive(client, token, this.opts.name);
  372. }
  373. }
  374. finally {
  375. this.waiting = null;
  376. }
  377. }
  378. else {
  379. if (!this.isRateLimited()) {
  380. job = await this.moveToActive(client, token, this.opts.name);
  381. }
  382. }
  383. if (job) {
  384. this.emit('active', job, 'waiting');
  385. }
  386. return job;
  387. }
  388. /**
  389. * Overrides the rate limit to be active for the next jobs.
  390. * @deprecated This method is deprecated and will be removed in v6. Use queue.rateLimit method instead.
  391. * @param expireTimeMs - expire time in ms of this rate limit.
  392. */
  393. async rateLimit(expireTimeMs) {
  394. await this.trace(enums_1.SpanKind.INTERNAL, 'rateLimit', this.name, async (span) => {
  395. span === null || span === void 0 ? void 0 : span.setAttributes({
  396. [enums_1.TelemetryAttributes.WorkerId]: this.id,
  397. [enums_1.TelemetryAttributes.WorkerRateLimit]: expireTimeMs,
  398. });
  399. await this.client.then(client => client.set(this.keys.limiter, Number.MAX_SAFE_INTEGER, 'PX', expireTimeMs));
  400. });
  401. }
  402. get minimumBlockTimeout() {
  403. return this.blockingConnection.capabilities.canBlockFor1Ms
  404. ? /* 1 millisecond is chosen because the granularity of our timestamps are milliseconds.
  405. Obviously we can still process much faster than 1 job per millisecond but delays and rate limits
  406. will never work with more accuracy than 1ms. */
  407. 0.001
  408. : 0.002;
  409. }
  410. isRateLimited() {
  411. return this.limitUntil > Date.now();
  412. }
  413. async moveToActive(client, token, name) {
  414. const [jobData, id, rateLimitDelay, delayUntil] = await this.scripts.moveToActive(client, token, name);
  415. this.updateDelays(rateLimitDelay, delayUntil);
  416. return this.nextJobFromJobData(jobData, id, token);
  417. }
  418. async waitForJob(bclient, blockUntil) {
  419. if (this.paused) {
  420. return Infinity;
  421. }
  422. let timeout;
  423. try {
  424. if (!this.closing && !this.isRateLimited()) {
  425. let blockTimeout = this.getBlockTimeout(blockUntil);
  426. if (blockTimeout > 0) {
  427. blockTimeout = this.blockingConnection.capabilities.canDoubleTimeout
  428. ? blockTimeout
  429. : Math.ceil(blockTimeout);
  430. // We cannot trust that the blocking connection stays blocking forever
  431. // due to issues in Redis and IORedis, so we will reconnect if we
  432. // don't get a response in the expected time.
  433. timeout = setTimeout(async () => {
  434. bclient.disconnect(!this.closing);
  435. }, blockTimeout * 1000 + 1000);
  436. this.updateDelays(); // reset delays to avoid reusing same values in next iteration
  437. // Markers should only be used for un-blocking, so we will handle them in this
  438. // function only.
  439. const result = await bclient.bzpopmin(this.keys.marker, blockTimeout);
  440. if (result) {
  441. const [_key, member, score] = result;
  442. if (member) {
  443. const newBlockUntil = parseInt(score);
  444. // Use by pro version as rate limited groups could generate lower blockUntil values
  445. // markers only return delays for delayed jobs
  446. if (blockUntil && newBlockUntil > blockUntil) {
  447. return blockUntil;
  448. }
  449. return newBlockUntil;
  450. }
  451. }
  452. }
  453. return 0;
  454. }
  455. }
  456. catch (error) {
  457. if ((0, utils_1.isNotConnectionError)(error)) {
  458. this.emit('error', error);
  459. }
  460. if (!this.closing) {
  461. await this.delay();
  462. }
  463. }
  464. finally {
  465. clearTimeout(timeout);
  466. }
  467. return Infinity;
  468. }
  469. getBlockTimeout(blockUntil) {
  470. const opts = this.opts;
  471. // when there are delayed jobs
  472. if (blockUntil) {
  473. const blockDelay = blockUntil - Date.now();
  474. // when we reach the time to get new jobs
  475. if (blockDelay <= 0) {
  476. return blockDelay;
  477. }
  478. else if (blockDelay < this.minimumBlockTimeout * 1000) {
  479. return this.minimumBlockTimeout;
  480. }
  481. else {
  482. // We restrict the maximum block timeout to 10 second to avoid
  483. // blocking the connection for too long in the case of reconnections
  484. // reference: https://github.com/taskforcesh/bullmq/issues/1658
  485. return Math.min(blockDelay / 1000, maximumBlockTimeout);
  486. }
  487. }
  488. else {
  489. return Math.max(opts.drainDelay, this.minimumBlockTimeout);
  490. }
  491. }
  492. getRateLimitDelay(delay) {
  493. // We restrict the maximum limit delay to the configured maximumRateLimitDelay
  494. // to be able to promote delayed jobs while the queue is rate limited
  495. return Math.min(delay, this.opts.maximumRateLimitDelay);
  496. }
  497. /**
  498. *
  499. * This function is exposed only for testing purposes.
  500. */
  501. async delay(milliseconds, abortController) {
  502. await (0, utils_1.delay)(milliseconds || utils_1.DELAY_TIME_1, abortController);
  503. }
  504. updateDelays(limitDelay = 0, delayUntil = 0) {
  505. const clampedLimit = Math.max(limitDelay, 0);
  506. if (clampedLimit > 0) {
  507. this.limitUntil = Date.now() + clampedLimit;
  508. }
  509. else {
  510. this.limitUntil = 0;
  511. }
  512. this.blockUntil = Math.max(delayUntil, 0) || 0;
  513. }
  514. async nextJobFromJobData(jobData, jobId, token) {
  515. if (!jobData) {
  516. if (!this.drained) {
  517. this.emit('drained');
  518. this.drained = true;
  519. }
  520. }
  521. else {
  522. this.drained = false;
  523. const job = this.createJob(jobData, jobId);
  524. job.token = token;
  525. try {
  526. await this.retryIfFailed(async () => {
  527. if (job.repeatJobKey && job.repeatJobKey.split(':').length < 5) {
  528. const jobScheduler = await this.jobScheduler;
  529. await jobScheduler.upsertJobScheduler(
  530. // Most of these arguments are not really needed
  531. // anymore as we read them from the job scheduler itself
  532. job.repeatJobKey, job.opts.repeat, job.name, job.data, job.opts, { override: false, producerId: job.id });
  533. }
  534. else if (job.opts.repeat) {
  535. const repeat = await this.repeat;
  536. await repeat.updateRepeatableJob(job.name, job.data, job.opts, {
  537. override: false,
  538. });
  539. }
  540. }, { delayInMs: this.opts.runRetryDelay });
  541. }
  542. catch (err) {
  543. // Emit error but don't throw to avoid breaking current job completion
  544. // Note: This means the next repeatable job will not be scheduled
  545. const errorMessage = err instanceof Error ? err.message : String(err);
  546. const schedulingError = new Error(`Failed to add repeatable job for next iteration: ${errorMessage}`);
  547. this.emit('error', schedulingError);
  548. // Return undefined to indicate no next job is available
  549. return undefined;
  550. }
  551. return job;
  552. }
  553. }
  554. async processJob(job, token, fetchNextCallback = () => true) {
  555. var _a, _b;
  556. const srcPropagationMetadata = (_b = (_a = job.opts) === null || _a === void 0 ? void 0 : _a.telemetry) === null || _b === void 0 ? void 0 : _b.metadata;
  557. return this.trace(enums_1.SpanKind.CONSUMER, 'process', this.name, async (span) => {
  558. span === null || span === void 0 ? void 0 : span.setAttributes({
  559. [enums_1.TelemetryAttributes.WorkerId]: this.id,
  560. [enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
  561. [enums_1.TelemetryAttributes.JobId]: job.id,
  562. [enums_1.TelemetryAttributes.JobName]: job.name,
  563. });
  564. const abortController = this.lockManager.trackJob(job.id, token, job.processedOn, this.processorAcceptsSignal);
  565. try {
  566. const unrecoverableErrorMessage = this.getUnrecoverableErrorMessage(job);
  567. if (unrecoverableErrorMessage) {
  568. const failed = await this.retryIfFailed(() => {
  569. this.lockManager.untrackJob(job.id);
  570. return this.handleFailed(new errors_1.UnrecoverableError(unrecoverableErrorMessage), job, token, fetchNextCallback, span);
  571. }, { delayInMs: this.opts.runRetryDelay, span });
  572. return failed;
  573. }
  574. const result = await this.callProcessJob(job, token, abortController
  575. ? abortController.signal
  576. : undefined);
  577. return await this.retryIfFailed(() => {
  578. this.lockManager.untrackJob(job.id);
  579. return this.handleCompleted(result, job, token, fetchNextCallback, span);
  580. }, { delayInMs: this.opts.runRetryDelay, span });
  581. }
  582. catch (err) {
  583. const failed = await this.retryIfFailed(() => {
  584. this.lockManager.untrackJob(job.id);
  585. return this.handleFailed(err, job, token, fetchNextCallback, span);
  586. }, { delayInMs: this.opts.runRetryDelay, span, onlyEmitError: true });
  587. return failed;
  588. }
  589. finally {
  590. this.lockManager.untrackJob(job.id);
  591. const now = Date.now();
  592. span === null || span === void 0 ? void 0 : span.setAttributes({
  593. [enums_1.TelemetryAttributes.JobFinishedTimestamp]: now,
  594. [enums_1.TelemetryAttributes.JobAttemptFinishedTimestamp]: job.finishedOn || now,
  595. [enums_1.TelemetryAttributes.JobProcessedTimestamp]: job.processedOn,
  596. });
  597. }
  598. }, srcPropagationMetadata);
  599. }
  600. getUnrecoverableErrorMessage(job) {
  601. if (job.deferredFailure) {
  602. return job.deferredFailure;
  603. }
  604. if (this.opts.maxStartedAttempts &&
  605. this.opts.maxStartedAttempts < job.attemptsStarted) {
  606. return 'job started more than allowable limit';
  607. }
  608. }
  609. async handleCompleted(result, job, token, fetchNextCallback = () => true, span) {
  610. if (!this.connection.closing) {
  611. const completed = await job.moveToCompleted(result, token, fetchNextCallback() && !(this.closing || this.paused));
  612. this.emit('completed', job, result, 'active');
  613. span === null || span === void 0 ? void 0 : span.addEvent('job completed', {
  614. [enums_1.TelemetryAttributes.JobResult]: JSON.stringify(result),
  615. });
  616. span === null || span === void 0 ? void 0 : span.setAttributes({
  617. [enums_1.TelemetryAttributes.JobAttemptsMade]: job.attemptsMade,
  618. });
  619. if (Array.isArray(completed)) {
  620. const [jobData, jobId, rateLimitDelay, delayUntil] = completed;
  621. this.updateDelays(rateLimitDelay, delayUntil);
  622. return this.nextJobFromJobData(jobData, jobId, token);
  623. }
  624. }
  625. }
  626. async handleFailed(err, job, token, fetchNextCallback = () => true, span) {
  627. if (!this.connection.closing) {
  628. // Check if the job was manually rate-limited
  629. if (err.message === errors_1.RATE_LIMIT_ERROR) {
  630. const rateLimitTtl = await this.moveLimitedBackToWait(job, token);
  631. this.limitUntil = rateLimitTtl > 0 ? Date.now() + rateLimitTtl : 0;
  632. return;
  633. }
  634. if (err instanceof errors_1.DelayedError ||
  635. err.name == 'DelayedError' ||
  636. err instanceof errors_1.WaitingError ||
  637. err.name == 'WaitingError' ||
  638. err instanceof errors_1.WaitingChildrenError ||
  639. err.name == 'WaitingChildrenError') {
  640. const client = await this.client;
  641. return this.moveToActive(client, token, this.opts.name);
  642. }
  643. const result = await job.moveToFailed(err, token, fetchNextCallback() && !(this.closing || this.paused));
  644. this.emit('failed', job, err, 'active');
  645. span === null || span === void 0 ? void 0 : span.addEvent('job failed', {
  646. [enums_1.TelemetryAttributes.JobFailedReason]: err.message,
  647. });
  648. span === null || span === void 0 ? void 0 : span.setAttributes({
  649. [enums_1.TelemetryAttributes.JobAttemptsMade]: job.attemptsMade,
  650. });
  651. // Note: result can be undefined if moveToFailed fails (e.g., lock was lost)
  652. if (Array.isArray(result)) {
  653. const [jobData, jobId, rateLimitDelay, delayUntil] = result;
  654. this.updateDelays(rateLimitDelay, delayUntil);
  655. return this.nextJobFromJobData(jobData, jobId, token);
  656. }
  657. }
  658. }
  659. /**
  660. *
  661. * Pauses the processing of this queue only for this worker.
  662. */
  663. async pause(doNotWaitActive) {
  664. await this.trace(enums_1.SpanKind.INTERNAL, 'pause', this.name, async (span) => {
  665. var _a;
  666. span === null || span === void 0 ? void 0 : span.setAttributes({
  667. [enums_1.TelemetryAttributes.WorkerId]: this.id,
  668. [enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
  669. [enums_1.TelemetryAttributes.WorkerDoNotWaitActive]: doNotWaitActive,
  670. });
  671. if (!this.paused) {
  672. this.paused = true;
  673. if (!doNotWaitActive) {
  674. await this.whenCurrentJobsFinished();
  675. }
  676. (_a = this.stalledCheckStopper) === null || _a === void 0 ? void 0 : _a.call(this);
  677. this.emit('paused');
  678. }
  679. });
  680. }
  681. /**
  682. *
  683. * Resumes processing of this worker (if paused).
  684. */
  685. resume() {
  686. if (!this.running || this.paused) {
  687. this.trace(enums_1.SpanKind.INTERNAL, 'resume', this.name, span => {
  688. span === null || span === void 0 ? void 0 : span.setAttributes({
  689. [enums_1.TelemetryAttributes.WorkerId]: this.id,
  690. [enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
  691. });
  692. this.paused = false;
  693. if (!this.running) {
  694. if (this.processFn) {
  695. this.run();
  696. }
  697. }
  698. else {
  699. // TODO: await for startStalledCheckTimer in next breaking change, that will convert resume method to async
  700. // Main loop is still running (pause was called with doNotWaitActive=true).
  701. // Restart the stalled checker since pause() stopped it.
  702. void this.startStalledCheckTimer().catch(err => {
  703. this.emit('error', err);
  704. });
  705. }
  706. this.emit('resumed');
  707. }).catch(err => {
  708. this.emit('error', err);
  709. });
  710. }
  711. }
  712. /**
  713. *
  714. * Checks if worker is paused.
  715. *
  716. * @returns true if worker is paused, false otherwise.
  717. */
  718. isPaused() {
  719. return !!this.paused;
  720. }
  721. /**
  722. *
  723. * Checks if worker is currently running.
  724. *
  725. * @returns true if worker is running, false otherwise.
  726. */
  727. isRunning() {
  728. return this.running;
  729. }
  730. /**
  731. *
  732. * Closes the worker and related redis connections.
  733. *
  734. * This method waits for current jobs to finalize before returning.
  735. *
  736. * @param force - Use force boolean parameter if you do not want to wait for
  737. * current jobs to be processed. When using telemetry, be mindful that it can
  738. * interfere with the proper closure of spans, potentially preventing them from being exported.
  739. *
  740. * @returns Promise that resolves when the worker has been closed.
  741. */
  742. async close(force = false) {
  743. if (this.closing) {
  744. return this.closing;
  745. }
  746. this.closing = (async () => {
  747. await this.trace(enums_1.SpanKind.INTERNAL, 'close', this.name, async (span) => {
  748. var _a, _b;
  749. span === null || span === void 0 ? void 0 : span.setAttributes({
  750. [enums_1.TelemetryAttributes.WorkerId]: this.id,
  751. [enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
  752. [enums_1.TelemetryAttributes.WorkerForceClose]: force,
  753. });
  754. this.emit('closing', 'closing queue');
  755. (_a = this.abortDelayController) === null || _a === void 0 ? void 0 : _a.abort();
  756. // Define the async cleanup functions
  757. const asyncCleanups = [
  758. () => {
  759. return force || this.whenCurrentJobsFinished(false);
  760. },
  761. () => this.lockManager.close(),
  762. () => { var _a; return (_a = this.childPool) === null || _a === void 0 ? void 0 : _a.clean(); },
  763. () => this.blockingConnection.close(force),
  764. () => this.connection.close(force),
  765. ];
  766. // Run cleanup functions sequentially and make sure all are run despite any errors
  767. for (const cleanup of asyncCleanups) {
  768. try {
  769. await cleanup();
  770. }
  771. catch (err) {
  772. this.emit('error', err);
  773. }
  774. }
  775. (_b = this.stalledCheckStopper) === null || _b === void 0 ? void 0 : _b.call(this);
  776. this.closed = true;
  777. this.emit('closed');
  778. });
  779. })();
  780. return await this.closing;
  781. }
  782. /**
  783. *
  784. * Manually starts the stalled checker.
  785. * The check will run once as soon as this method is called, and
  786. * then every opts.stalledInterval milliseconds until the worker is closed.
  787. * Note: Normally you do not need to call this method, since the stalled checker
  788. * is automatically started when the worker starts processing jobs after
  789. * calling run. However if you want to process the jobs manually you need
  790. * to call this method to start the stalled checker.
  791. *
  792. * @see {@link https://docs.bullmq.io/patterns/manually-fetching-jobs}
  793. */
  794. async startStalledCheckTimer() {
  795. if (!this.opts.skipStalledCheck) {
  796. if (!this.closing && !this.stalledCheckerRunning) {
  797. await this.trace(enums_1.SpanKind.INTERNAL, 'startStalledCheckTimer', this.name, async (span) => {
  798. span === null || span === void 0 ? void 0 : span.setAttributes({
  799. [enums_1.TelemetryAttributes.WorkerId]: this.id,
  800. [enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
  801. });
  802. this.stalledCheckerRunning = true;
  803. this.stalledChecker()
  804. .catch(err => {
  805. this.emit('error', err);
  806. })
  807. .finally(() => {
  808. this.stalledCheckerRunning = false;
  809. });
  810. });
  811. }
  812. }
  813. }
  814. async stalledChecker() {
  815. while (!(this.closing || this.paused)) {
  816. await this.checkConnectionError(() => this.moveStalledJobsToWait());
  817. await new Promise(resolve => {
  818. const timeout = setTimeout(resolve, this.opts.stalledInterval);
  819. this.stalledCheckStopper = () => {
  820. clearTimeout(timeout);
  821. resolve();
  822. };
  823. });
  824. }
  825. }
  826. /**
  827. * Returns a promise that resolves when active jobs are cleared
  828. *
  829. * @returns
  830. */
  831. async whenCurrentJobsFinished(reconnect = true) {
  832. //
  833. // Force reconnection of blocking connection to abort blocking redis call immediately.
  834. //
  835. if (this.waiting) {
  836. // If we are not going to reconnect, we will not wait for the disconnection.
  837. await this.blockingConnection.disconnect(reconnect);
  838. }
  839. else {
  840. reconnect = false;
  841. }
  842. if (this.mainLoopRunning) {
  843. await this.mainLoopRunning;
  844. }
  845. reconnect && (await this.blockingConnection.reconnect());
  846. }
  847. async retryIfFailed(fn, opts) {
  848. var _a;
  849. let retry = 0;
  850. const maxRetries = opts.maxRetries || Infinity;
  851. do {
  852. try {
  853. return await fn();
  854. }
  855. catch (err) {
  856. (_a = opts.span) === null || _a === void 0 ? void 0 : _a.recordException(err.message);
  857. if ((0, utils_1.isNotConnectionError)(err)) {
  858. // Emit error when not paused or closing; optionally swallow (no throw) when opts.onlyEmitError is set.
  859. if (!this.paused && !this.closing) {
  860. this.emit('error', err);
  861. }
  862. if (opts.onlyEmitError) {
  863. return;
  864. }
  865. else {
  866. throw err;
  867. }
  868. }
  869. else {
  870. if (opts.delayInMs && !this.closing && !this.closed) {
  871. await this.delay(opts.delayInMs, this.abortDelayController);
  872. }
  873. if (retry + 1 >= maxRetries) {
  874. // If we've reached max retries, throw the last error
  875. throw err;
  876. }
  877. }
  878. }
  879. } while (++retry < maxRetries);
  880. }
  881. async moveStalledJobsToWait() {
  882. await this.trace(enums_1.SpanKind.INTERNAL, 'moveStalledJobsToWait', this.name, async (span) => {
  883. const stalled = await this.scripts.moveStalledJobsToWait();
  884. span === null || span === void 0 ? void 0 : span.setAttributes({
  885. [enums_1.TelemetryAttributes.WorkerId]: this.id,
  886. [enums_1.TelemetryAttributes.WorkerName]: this.opts.name,
  887. [enums_1.TelemetryAttributes.WorkerStalledJobs]: stalled,
  888. });
  889. stalled.forEach((jobId) => {
  890. span === null || span === void 0 ? void 0 : span.addEvent('job stalled', {
  891. [enums_1.TelemetryAttributes.JobId]: jobId,
  892. });
  893. this.emit('stalled', jobId, 'active');
  894. });
  895. });
  896. }
  897. moveLimitedBackToWait(job, token) {
  898. return job.moveToWait(token);
  899. }
  900. }
  901. exports.Worker = Worker;
  902. //# sourceMappingURL=worker.js.map