sidekiq-ts 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/README.md +686 -0
  2. package/dist/api.d.ts +172 -0
  3. package/dist/api.d.ts.map +1 -0
  4. package/dist/api.js +679 -0
  5. package/dist/backtrace.d.ts +3 -0
  6. package/dist/backtrace.d.ts.map +1 -0
  7. package/dist/backtrace.js +16 -0
  8. package/dist/cli-helpers.d.ts +22 -0
  9. package/dist/cli-helpers.d.ts.map +1 -0
  10. package/dist/cli-helpers.js +152 -0
  11. package/dist/cli.d.ts +3 -0
  12. package/dist/cli.d.ts.map +1 -0
  13. package/dist/cli.js +143 -0
  14. package/dist/client.d.ts +25 -0
  15. package/dist/client.d.ts.map +1 -0
  16. package/dist/client.js +212 -0
  17. package/dist/config-loader.d.ts +16 -0
  18. package/dist/config-loader.d.ts.map +1 -0
  19. package/dist/config-loader.js +37 -0
  20. package/dist/config.d.ts +59 -0
  21. package/dist/config.d.ts.map +1 -0
  22. package/dist/config.js +155 -0
  23. package/dist/context.d.ts +10 -0
  24. package/dist/context.d.ts.map +1 -0
  25. package/dist/context.js +29 -0
  26. package/dist/cron.d.ts +44 -0
  27. package/dist/cron.d.ts.map +1 -0
  28. package/dist/cron.js +173 -0
  29. package/dist/index.d.ts +16 -0
  30. package/dist/index.d.ts.map +1 -0
  31. package/dist/index.js +14 -0
  32. package/dist/interrupt-handler.d.ts +8 -0
  33. package/dist/interrupt-handler.d.ts.map +1 -0
  34. package/dist/interrupt-handler.js +24 -0
  35. package/dist/iterable-constants.d.ts +3 -0
  36. package/dist/iterable-constants.d.ts.map +1 -0
  37. package/dist/iterable-constants.js +2 -0
  38. package/dist/iterable-errors.d.ts +10 -0
  39. package/dist/iterable-errors.d.ts.map +1 -0
  40. package/dist/iterable-errors.js +18 -0
  41. package/dist/iterable.d.ts +44 -0
  42. package/dist/iterable.d.ts.map +1 -0
  43. package/dist/iterable.js +298 -0
  44. package/dist/job-logger.d.ts +12 -0
  45. package/dist/job-logger.d.ts.map +1 -0
  46. package/dist/job-logger.js +64 -0
  47. package/dist/job-util.d.ts +8 -0
  48. package/dist/job-util.d.ts.map +1 -0
  49. package/dist/job-util.js +158 -0
  50. package/dist/job.d.ts +73 -0
  51. package/dist/job.d.ts.map +1 -0
  52. package/dist/job.js +200 -0
  53. package/dist/json.d.ts +3 -0
  54. package/dist/json.d.ts.map +1 -0
  55. package/dist/json.js +2 -0
  56. package/dist/leader.d.ts +63 -0
  57. package/dist/leader.d.ts.map +1 -0
  58. package/dist/leader.js +193 -0
  59. package/dist/logger.d.ts +53 -0
  60. package/dist/logger.d.ts.map +1 -0
  61. package/dist/logger.js +143 -0
  62. package/dist/middleware.d.ts +23 -0
  63. package/dist/middleware.d.ts.map +1 -0
  64. package/dist/middleware.js +92 -0
  65. package/dist/periodic.d.ts +80 -0
  66. package/dist/periodic.d.ts.map +1 -0
  67. package/dist/periodic.js +205 -0
  68. package/dist/redis.d.ts +3 -0
  69. package/dist/redis.d.ts.map +1 -0
  70. package/dist/redis.js +1 -0
  71. package/dist/registry.d.ts +11 -0
  72. package/dist/registry.d.ts.map +1 -0
  73. package/dist/registry.js +8 -0
  74. package/dist/runner.d.ts +81 -0
  75. package/dist/runner.d.ts.map +1 -0
  76. package/dist/runner.js +791 -0
  77. package/dist/sidekiq.d.ts +43 -0
  78. package/dist/sidekiq.d.ts.map +1 -0
  79. package/dist/sidekiq.js +189 -0
  80. package/dist/testing.d.ts +32 -0
  81. package/dist/testing.d.ts.map +1 -0
  82. package/dist/testing.js +112 -0
  83. package/dist/types.d.ts +116 -0
  84. package/dist/types.d.ts.map +1 -0
  85. package/dist/types.js +1 -0
  86. package/package.json +42 -0
package/dist/runner.js ADDED
@@ -0,0 +1,791 @@
1
+ import { hostname } from "node:os";
2
+ import { compressBacktrace, extractBacktrace } from "./backtrace.js";
3
+ import { Client } from "./client.js";
4
+ import { ensureInterruptHandler } from "./interrupt-handler.js";
5
+ import { JobSkipError } from "./iterable-errors.js";
6
+ import { dumpJson, loadJson } from "./json.js";
7
+ import { LeaderElector } from "./leader.js";
8
+ import { PeriodicScheduler } from "./periodic.js";
9
+ import { resolveJob } from "./registry.js";
10
+ const FETCH_TIMEOUT_SECONDS = 2;
11
+ const STATS_TTL_SECONDS = 5 * 365 * 24 * 60 * 60;
12
+ const INITIAL_WAIT_SECONDS = 10;
13
+ const LUA_ZPOPBYSCORE = `
14
+ local key, now = KEYS[1], ARGV[1]
15
+ local jobs = redis.call("zrange", key, "-inf", now, "byscore", "limit", 0, 1)
16
+ if jobs[1] then
17
+ redis.call("zrem", key, jobs[1])
18
+ return jobs[1]
19
+ end
20
+ `;
21
+ const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
22
+ const parseQueues = (queues) => {
23
+ const weights = new Map();
24
+ const list = [];
25
+ for (const entry of queues) {
26
+ if (Array.isArray(entry)) {
27
+ const [name, weight] = entry;
28
+ const weightValue = Number(weight ?? 0);
29
+ weights.set(name, weightValue);
30
+ const count = Math.max(weightValue, 1);
31
+ for (let i = 0; i < count; i += 1) {
32
+ list.push(name);
33
+ }
34
+ }
35
+ else {
36
+ const [name, weightString] = entry.split(",", 2);
37
+ const weightValue = Number(weightString ?? 0);
38
+ weights.set(name, weightValue);
39
+ const count = Math.max(weightValue, 1);
40
+ for (let i = 0; i < count; i += 1) {
41
+ list.push(name);
42
+ }
43
+ }
44
+ }
45
+ const allWeights = Array.from(weights.values());
46
+ let mode;
47
+ if (allWeights.every((weight) => weight === 0)) {
48
+ mode = "strict";
49
+ }
50
+ else if (allWeights.every((weight) => weight === 1)) {
51
+ mode = "random";
52
+ }
53
+ else {
54
+ mode = "weighted";
55
+ }
56
+ return { list, mode };
57
+ };
58
+ const shuffle = (values) => {
59
+ const result = [...values];
60
+ for (let i = result.length - 1; i > 0; i -= 1) {
61
+ const j = Math.floor(Math.random() * (i + 1));
62
+ [result[i], result[j]] = [result[j], result[i]];
63
+ }
64
+ return result;
65
+ };
66
+ const unique = (values) => {
67
+ const seen = new Set();
68
+ const result = [];
69
+ for (const value of values) {
70
+ if (!seen.has(value)) {
71
+ seen.add(value);
72
+ result.push(value);
73
+ }
74
+ }
75
+ return result;
76
+ };
77
+ class QueueStrategy {
78
+ list;
79
+ mode;
80
+ constructor(queues) {
81
+ const parsed = parseQueues(queues);
82
+ this.list = parsed.list;
83
+ this.mode = parsed.mode;
84
+ }
85
+ queueKeys() {
86
+ if (this.list.length === 0) {
87
+ return [];
88
+ }
89
+ if (this.mode === "strict") {
90
+ return this.list.map((queue) => `queue:${queue}`);
91
+ }
92
+ const shuffled = shuffle(this.list);
93
+ return unique(shuffled).map((queue) => `queue:${queue}`);
94
+ }
95
+ }
96
+ export class Runner {
97
+ config;
98
+ quieting = false;
99
+ stopping = false;
100
+ workers = [];
101
+ schedulerHandle;
102
+ schedulerRunning = false;
103
+ heartbeatHandle;
104
+ queueStrategy;
105
+ baseRedis;
106
+ workerRedis = [];
107
+ identity;
108
+ startedAt;
109
+ workState = new Map();
110
+ inProgress = new Map();
111
+ lastCleanupAt = 0;
112
+ rttReadings = [];
113
+ jobLogger;
114
+ leaderElector;
115
+ _periodicScheduler;
116
+ constructor(config) {
117
+ this.config = config;
118
+ this.queueStrategy = new QueueStrategy(config.queues);
119
+ this.startedAt = Date.now() / 1000;
120
+ this.identity = `${hostname()}:${process.pid}`;
121
+ this.jobLogger = config.jobLogger;
122
+ }
123
+ async start() {
124
+ this.baseRedis = await this.config.getRedisClient();
125
+ ensureInterruptHandler(this.config);
126
+ // Start leader election
127
+ this.leaderElector = new LeaderElector(this.config, {
128
+ identity: this.identity,
129
+ ...this.config.leaderElection,
130
+ });
131
+ await this.leaderElector.start();
132
+ // Start periodic scheduler
133
+ this._periodicScheduler = new PeriodicScheduler(this.config, this.leaderElector);
134
+ await this._periodicScheduler.start();
135
+ await this.heartbeat();
136
+ this.startHeartbeat();
137
+ this.startScheduler();
138
+ for (let i = 0; i < this.config.concurrency; i += 1) {
139
+ const client = this.baseRedis.duplicate();
140
+ await client.connect();
141
+ this.workerRedis[i] = client;
142
+ this.workers.push(this.workLoop(i));
143
+ }
144
+ await this.config.fireEvent("startup");
145
+ }
146
+ async quiet() {
147
+ this.quieting = true;
148
+ await this.config.fireEvent("quiet");
149
+ }
150
+ async stop() {
151
+ if (!this.quieting) {
152
+ await this.quiet();
153
+ }
154
+ this.stopping = true;
155
+ await this.config.fireEvent("shutdown", { reverse: true });
156
+ // Stop periodic scheduler and leader election
157
+ await this._periodicScheduler?.stop();
158
+ await this.leaderElector?.stop();
159
+ this.stopHeartbeat();
160
+ this.stopScheduler();
161
+ const deadline = Date.now() + this.config.timeout * 1000;
162
+ await this.waitForDrain(deadline);
163
+ if (this.inProgress.size > 0) {
164
+ await this.requeueInProgress();
165
+ }
166
+ await this.waitForWorkers(deadline);
167
+ await this.clearHeartbeat();
168
+ await Promise.all(this.workerRedis.map(async (client) => {
169
+ if (client.isOpen) {
170
+ await client.quit();
171
+ }
172
+ }));
173
+ await this.config.fireEvent("exit", { reverse: true });
174
+ }
175
+ /**
176
+ * Returns true if this process is currently the leader.
177
+ */
178
+ leader() {
179
+ return this.leaderElector?.leader() ?? false;
180
+ }
181
+ /**
182
+ * Get the periodic scheduler for registering cron jobs.
183
+ */
184
+ get periodicScheduler() {
185
+ return this._periodicScheduler;
186
+ }
187
+ snapshotWork() {
188
+ const now = Date.now();
189
+ const entries = [];
190
+ for (const [workerId, value] of this.workState.entries()) {
191
+ let payload;
192
+ try {
193
+ payload = loadJson(value.payload);
194
+ }
195
+ catch {
196
+ payload = undefined;
197
+ }
198
+ entries.push({
199
+ workerId,
200
+ queue: value.queue,
201
+ payloadRaw: value.payload,
202
+ payload,
203
+ runAt: value.runAt,
204
+ elapsed: (now - value.runAt) / 1000,
205
+ });
206
+ }
207
+ return entries;
208
+ }
209
+ startScheduler() {
210
+ this.schedulerRunning = true;
211
+ this.runSchedulerLoop().catch(() => undefined);
212
+ }
213
+ async runSchedulerLoop() {
214
+ // Initial wait: give time for heartbeats to register so process count is accurate
215
+ await this.initialWait();
216
+ while (this.schedulerRunning && !this.stopping) {
217
+ await this.enqueueScheduled().catch(() => undefined);
218
+ const intervalMs = await this.randomPollInterval();
219
+ await sleep(intervalMs);
220
+ }
221
+ }
222
+ async initialWait() {
223
+ // Wait 10-15 seconds before first poll to let heartbeats register
224
+ // and to avoid thundering herd on restart
225
+ const useDynamicInterval = this.config.pollIntervalAverage === null;
226
+ let waitSeconds = 0;
227
+ if (useDynamicInterval) {
228
+ waitSeconds += INITIAL_WAIT_SECONDS;
229
+ }
230
+ waitSeconds += Math.random() * 5; // 0-5 seconds jitter
231
+ if (waitSeconds > 0) {
232
+ await sleep(waitSeconds * 1000);
233
+ }
234
+ // Run cleanup after initial wait
235
+ const redis = this.baseRedis ?? (await this.config.getRedisClient());
236
+ await this.cleanupProcesses(redis);
237
+ }
238
+ async processCount() {
239
+ const redis = this.baseRedis ?? (await this.config.getRedisClient());
240
+ const count = await redis.sCard("processes");
241
+ return count === 0 ? 1 : count;
242
+ }
243
+ scaledPollInterval(processCount) {
244
+ return processCount * this.config.averageScheduledPollInterval;
245
+ }
246
+ async randomPollInterval() {
247
+ // If user set a fixed poll interval, use it directly
248
+ if (this.config.pollIntervalAverage !== null) {
249
+ return this.config.pollIntervalAverage * 1000;
250
+ }
251
+ const count = await this.processCount();
252
+ const interval = this.scaledPollInterval(count);
253
+ let intervalSeconds;
254
+ if (count < 10) {
255
+ // For small clusters, calculate a random interval that is ±50% the desired average
256
+ intervalSeconds = interval * Math.random() + interval / 2;
257
+ }
258
+ else {
259
+ // With 10+ processes, we have enough randomness for decent polling spread
260
+ intervalSeconds = interval * Math.random() * 2;
261
+ }
262
+ return intervalSeconds * 1000;
263
+ }
264
+ stopScheduler() {
265
+ this.schedulerRunning = false;
266
+ if (this.schedulerHandle) {
267
+ clearInterval(this.schedulerHandle);
268
+ this.schedulerHandle = undefined;
269
+ }
270
+ }
271
+ async heartbeat() {
272
+ if (this.stopping) {
273
+ return;
274
+ }
275
+ const redis = this.baseRedis ?? (await this.config.getRedisClient());
276
+ const now = Date.now() / 1000;
277
+ const info = this.processInfo();
278
+ const rssKb = Math.round(process.memoryUsage().rss / 1024);
279
+ const workKey = `${this.identity}:work`;
280
+ const workEntries = {};
281
+ for (const [key, value] of this.workState.entries()) {
282
+ workEntries[key] = dumpJson({
283
+ queue: value.queue,
284
+ payload: value.payload,
285
+ run_at: Math.floor(value.runAt / 1000),
286
+ });
287
+ }
288
+ try {
289
+ await this.cleanupProcesses(redis);
290
+ const rttUs = await this.checkRtt(redis);
291
+ const pipeline = redis.multi();
292
+ pipeline.unlink(workKey);
293
+ if (Object.keys(workEntries).length > 0) {
294
+ pipeline.hSet(workKey, workEntries);
295
+ pipeline.expire(workKey, 60);
296
+ }
297
+ pipeline.sAdd("processes", [this.identity]);
298
+ pipeline.hSet(this.identity, {
299
+ info: dumpJson(info),
300
+ busy: String(this.workState.size),
301
+ beat: String(now),
302
+ quiet: String(this.quieting),
303
+ rtt_us: String(rttUs),
304
+ rss: String(rssKb),
305
+ });
306
+ pipeline.expire(this.identity, 60);
307
+ pipeline.rPop(`${this.identity}-signals`);
308
+ const result = await pipeline.exec();
309
+ // Check for remote signals (last result from pipeline)
310
+ const signalResult = result?.[result.length - 1];
311
+ if (signalResult && typeof signalResult === "string") {
312
+ await this.handleSignal(signalResult);
313
+ }
314
+ // Fire heartbeat/beat events (not oneshot - they fire repeatedly)
315
+ await this.config.fireEvent("heartbeat", { oneshot: false });
316
+ await this.config.fireEvent("beat", { oneshot: false });
317
+ }
318
+ catch (error) {
319
+ this.config.logger.error(() => `heartbeat: ${error instanceof Error ? error.message : String(error)}`);
320
+ }
321
+ }
322
+ async handleSignal(signal) {
323
+ this.config.logger.info(() => `Received remote signal: ${signal}`);
324
+ switch (signal) {
325
+ case "TSTP":
326
+ await this.quiet();
327
+ break;
328
+ case "TERM":
329
+ // Trigger stop - this will be picked up by the main loop
330
+ this.stopping = true;
331
+ this.quieting = true;
332
+ await this.config.fireEvent("shutdown", { reverse: true });
333
+ break;
334
+ case "TTIN":
335
+ this.dumpWorkState();
336
+ break;
337
+ default:
338
+ this.config.logger.warn(() => `Unknown signal: ${signal}`);
339
+ }
340
+ }
341
+ dumpWorkState() {
342
+ const snapshot = this.snapshotWork();
343
+ if (snapshot.length === 0) {
344
+ this.config.logger.info(() => "No active workers");
345
+ return;
346
+ }
347
+ this.config.logger.info(() => `Active workers: ${snapshot.length}`);
348
+ for (const work of snapshot) {
349
+ this.config.logger.info(() => ` ${work.workerId}: queue=${work.queue} ` +
350
+ `jid=${work.payload?.jid ?? "unknown"} ` +
351
+ `class=${work.payload?.class ?? "unknown"} ` +
352
+ `elapsed=${work.elapsed.toFixed(1)}s`);
353
+ }
354
+ }
355
+ async clearHeartbeat() {
356
+ const redis = this.baseRedis ?? (await this.config.getRedisClient());
357
+ try {
358
+ const pipeline = redis.multi();
359
+ pipeline.sRem("processes", [this.identity]);
360
+ pipeline.unlink(`${this.identity}:work`);
361
+ pipeline.unlink(this.identity);
362
+ await pipeline.exec();
363
+ }
364
+ catch {
365
+ // best effort
366
+ }
367
+ }
368
+ processInfo() {
369
+ return {
370
+ hostname: hostname(),
371
+ started_at: this.startedAt,
372
+ pid: process.pid,
373
+ tag: this.config.tag,
374
+ concurrency: this.config.concurrency,
375
+ queues: this.config.queueNames(),
376
+ labels: this.config.labels,
377
+ identity: this.identity,
378
+ embedded: false,
379
+ };
380
+ }
381
+ async checkRtt(redis) {
382
+ const start = process.hrtime.bigint();
383
+ await redis.ping();
384
+ const end = process.hrtime.bigint();
385
+ const rtt = Number((end - start) / 1000n);
386
+ this.recordRtt(rtt);
387
+ return rtt;
388
+ }
389
+ recordRtt(rtt) {
390
+ const MAX_READINGS = 5;
391
+ const WARNING_LEVEL = 50_000;
392
+ this.rttReadings.push(rtt);
393
+ if (this.rttReadings.length > MAX_READINGS) {
394
+ this.rttReadings.shift();
395
+ }
396
+ if (this.rttReadings.length === MAX_READINGS &&
397
+ this.rttReadings.every((value) => value > WARNING_LEVEL)) {
398
+ this.config.logger.warn(() => `Redis RTT is high (${this.rttReadings.join(", ")} us). ` +
399
+ "Consider lowering concurrency or colocating Redis.");
400
+ this.rttReadings.length = 0;
401
+ }
402
+ }
403
+ async waitForDrain(deadline) {
404
+ while (this.inProgress.size > 0 && Date.now() < deadline) {
405
+ await sleep(50);
406
+ }
407
+ }
408
+ async waitForWorkers(deadline) {
409
+ const remaining = deadline - Date.now();
410
+ if (remaining <= 0) {
411
+ return;
412
+ }
413
+ await Promise.race([Promise.all(this.workers), sleep(remaining)]);
414
+ }
415
+ async requeueInProgress() {
416
+ const redis = this.baseRedis ?? (await this.config.getRedisClient());
417
+ const grouped = new Map();
418
+ for (const entry of this.inProgress.values()) {
419
+ const list = grouped.get(entry.queue) ?? [];
420
+ list.push(entry.payload);
421
+ grouped.set(entry.queue, list);
422
+ }
423
+ if (grouped.size === 0) {
424
+ return;
425
+ }
426
+ const pipeline = redis.multi();
427
+ for (const [queue, payloads] of grouped.entries()) {
428
+ pipeline.rPush(`queue:${queue}`, payloads);
429
+ }
430
+ await pipeline.exec();
431
+ }
432
+ async sendRawToMorgue(payload) {
433
+ const redis = this.baseRedis ?? (await this.config.getRedisClient());
434
+ const now = Date.now() / 1000;
435
+ const cutoff = now - this.config.deadTimeoutInSeconds;
436
+ const pipeline = redis.multi();
437
+ pipeline.zAdd("dead", [{ score: now, value: payload }]);
438
+ pipeline.zRemRangeByScore("dead", 0, cutoff);
439
+ pipeline.zRemRangeByRank("dead", 0, -this.config.deadMaxJobs);
440
+ await pipeline.exec();
441
+ }
442
+ async runWithProfiler(payload, fn) {
443
+ if (payload.profile && this.config.profiler) {
444
+ await this.config.profiler(payload, fn);
445
+ return;
446
+ }
447
+ await fn();
448
+ }
449
+ async cleanupProcesses(redis) {
450
+ const now = Date.now();
451
+ if (now - this.lastCleanupAt < 60_000) {
452
+ return;
453
+ }
454
+ const lock = await redis.set("process_cleanup", "1", {
455
+ NX: true,
456
+ EX: 60,
457
+ });
458
+ if (lock !== "OK") {
459
+ return;
460
+ }
461
+ this.lastCleanupAt = now;
462
+ const processes = await redis.sMembers("processes");
463
+ if (processes.length === 0) {
464
+ return;
465
+ }
466
+ const pipeline = redis.multi();
467
+ for (const key of processes) {
468
+ pipeline.hGet(key, "info");
469
+ }
470
+ const result = await pipeline.exec();
471
+ const toPrune = processes.filter((_, index) => !result?.[index]);
472
+ if (toPrune.length > 0) {
473
+ await redis.sRem("processes", toPrune);
474
+ }
475
+ }
476
+ startHeartbeat() {
477
+ const intervalMs = this.config.heartbeatInterval * 1000;
478
+ this.heartbeatHandle = setInterval(() => {
479
+ this.heartbeat().catch(() => undefined);
480
+ }, intervalMs);
481
+ }
482
+ stopHeartbeat() {
483
+ if (this.heartbeatHandle) {
484
+ clearInterval(this.heartbeatHandle);
485
+ this.heartbeatHandle = undefined;
486
+ }
487
+ }
488
+ async enqueueScheduled() {
489
+ if (this.stopping) {
490
+ return;
491
+ }
492
+ const redis = this.baseRedis ?? (await this.config.getRedisClient());
493
+ const client = new Client({ config: this.config });
494
+ const now = Date.now() / 1000;
495
+ const sets = ["schedule", "retry"];
496
+ for (const set of sets) {
497
+ while (!this.stopping) {
498
+ const job = (await redis.sendCommand([
499
+ "EVAL",
500
+ LUA_ZPOPBYSCORE,
501
+ "1",
502
+ set,
503
+ String(now),
504
+ ]));
505
+ if (!job) {
506
+ break;
507
+ }
508
+ const payload = loadJson(job);
509
+ await client.push(payload);
510
+ }
511
+ }
512
+ }
513
+ async workLoop(index) {
514
+ while (!this.stopping) {
515
+ if (this.quieting) {
516
+ await sleep(50);
517
+ continue;
518
+ }
519
+ const unit = await this.fetchWork(index);
520
+ if (!unit) {
521
+ continue;
522
+ }
523
+ const workerId = `worker-${index}`;
524
+ this.inProgress.set(workerId, {
525
+ queue: unit.queue,
526
+ payload: unit.payload,
527
+ });
528
+ this.workState.set(workerId, {
529
+ queue: unit.queue,
530
+ payload: unit.payload,
531
+ runAt: Date.now(),
532
+ });
533
+ this.heartbeat().catch(() => undefined);
534
+ try {
535
+ await this.processJob(unit.queue, unit.payload);
536
+ }
537
+ finally {
538
+ this.inProgress.delete(workerId);
539
+ this.workState.delete(workerId);
540
+ this.heartbeat().catch(() => undefined);
541
+ }
542
+ }
543
+ }
544
+ async fetchWork(index) {
545
+ const queueKeys = this.queueStrategy.queueKeys();
546
+ if (queueKeys.length === 0) {
547
+ await sleep(FETCH_TIMEOUT_SECONDS * 1000);
548
+ return null;
549
+ }
550
+ const redis = this.workerRedis[index] ?? (await this.config.getRedisClient());
551
+ const result = (await redis.sendCommand([
552
+ "BRPOP",
553
+ ...queueKeys,
554
+ String(FETCH_TIMEOUT_SECONDS),
555
+ ]));
556
+ if (!result) {
557
+ return null;
558
+ }
559
+ const [queueKey, job] = result;
560
+ const queue = queueKey.startsWith("queue:") ? queueKey.slice(6) : queueKey;
561
+ return { queue, payload: job };
562
+ }
563
+ async processJob(queue, payloadRaw) {
564
+ let payload;
565
+ try {
566
+ payload = loadJson(payloadRaw);
567
+ }
568
+ catch (error) {
569
+ await this.sendRawToMorgue(payloadRaw);
570
+ await this.updateStat("failed");
571
+ this.config.logger.error(() => `Invalid JSON for job on ${queue}: ${String(error)}`);
572
+ const err = error instanceof Error ? error : new Error(String(error));
573
+ await this.runErrorHandlers(err, this.buildErrorContext("Invalid JSON for job", undefined, queue, payloadRaw));
574
+ return;
575
+ }
576
+ const className = String(payload.class);
577
+ const klass = resolveJob(className);
578
+ if (!klass) {
579
+ this.config.logger.error(() => `Unknown job class ${className} for ${queue}`);
580
+ await this.updateStat("failed");
581
+ await this.runErrorHandlers(new Error(`Unknown job class ${className}`), this.buildErrorContext("Unknown job class", payload, queue));
582
+ return;
583
+ }
584
+ const job = new klass();
585
+ job.jid = payload.jid;
586
+ job._context = { stopping: () => this.stopping };
587
+ try {
588
+ let executed = false;
589
+ await this.jobLogger.prepare(payload, async () => {
590
+ await this.jobLogger.call(payload, queue, async () => {
591
+ await this.runWithProfiler(payload, async () => {
592
+ await this.config.serverMiddleware.invoke(job, payload, queue, async () => {
593
+ executed = true;
594
+ await job.perform(...(payload.args ?? []));
595
+ });
596
+ });
597
+ });
598
+ });
599
+ if (executed) {
600
+ await this.updateStat("processed");
601
+ }
602
+ }
603
+ catch (error) {
604
+ const err = error instanceof Error ? error : new Error(String(error));
605
+ if (err instanceof JobSkipError) {
606
+ return;
607
+ }
608
+ await this.handleFailure(queue, payload, klass, err);
609
+ }
610
+ }
611
+ // biome-ignore lint/complexity/noExcessiveCognitiveComplexity: retry logic requires handling many edge cases
612
+ async handleFailure(queue, payload, klass, error) {
613
+ const redis = await this.config.getRedisClient();
614
+ const className = String(payload.class);
615
+ const message = this.safeErrorMessage(error);
616
+ this.config.logger.error(() => `Job ${className} failed on ${queue}: ${message}`);
617
+ await this.updateStat("failed");
618
+ const retryOption = payload.retry !== undefined
619
+ ? payload.retry
620
+ : klass.getSidekiqOptions().retry;
621
+ const retryDisabled = retryOption === false ||
622
+ retryOption === null ||
623
+ retryOption === undefined;
624
+ if (retryDisabled) {
625
+ await this.runDeathHandlers(payload, error);
626
+ await this.runErrorHandlers(error, this.buildErrorContext("Job raised exception", payload, queue));
627
+ return;
628
+ }
629
+ const maxRetries = typeof retryOption === "number" ? retryOption : this.config.maxRetries;
630
+ const nowMs = Date.now();
631
+ const nowSeconds = nowMs / 1000;
632
+ payload.queue = payload.retry_queue ?? payload.queue ?? queue;
633
+ payload.error_message = message.slice(0, 10_000);
634
+ payload.error_class = error.name;
635
+ if (payload.retry_count !== undefined) {
636
+ payload.retry_count += 1;
637
+ payload.retried_at = nowMs;
638
+ }
639
+ else {
640
+ payload.retry_count = 0;
641
+ payload.failed_at = nowMs;
642
+ }
643
+ if (payload.backtrace) {
644
+ const rawLines = extractBacktrace(error);
645
+ const cleaned = this.config.backtraceCleaner(rawLines);
646
+ const limit = payload.backtrace === true ? cleaned.length : Number(payload.backtrace);
647
+ const lines = cleaned.slice(0, Math.max(limit, 0));
648
+ payload.error_backtrace = compressBacktrace(lines);
649
+ }
650
+ const retryFor = payload.retry_for;
651
+ if (typeof retryFor === "number" && payload.failed_at !== undefined) {
652
+ const deadline = payload.failed_at / 1000 + retryFor;
653
+ if (deadline < nowSeconds) {
654
+ await this.retriesExhausted(payload, error, klass.sidekiqRetriesExhausted);
655
+ await this.runErrorHandlers(error, this.buildErrorContext("Job raised exception", payload, queue));
656
+ return;
657
+ }
658
+ }
659
+ else if (payload.retry_count >= maxRetries) {
660
+ await this.retriesExhausted(payload, error, klass.sidekiqRetriesExhausted);
661
+ await this.runErrorHandlers(error, this.buildErrorContext("Job raised exception", payload, queue));
662
+ return;
663
+ }
664
+ const retryIn = klass.sidekiqRetryIn;
665
+ const delayResult = retryIn
666
+ ? this.safeRetryIn(retryIn, payload.retry_count, error, payload)
667
+ : "default";
668
+ if (delayResult === "discard") {
669
+ payload.discarded_at = nowMs;
670
+ await this.runDeathHandlers(payload, error);
671
+ return;
672
+ }
673
+ if (delayResult === "kill") {
674
+ await this.retriesExhausted(payload, error, klass.sidekiqRetriesExhausted);
675
+ await this.runErrorHandlers(error, this.buildErrorContext("Job raised exception", payload, queue));
676
+ return;
677
+ }
678
+ const delaySeconds = typeof delayResult === "number"
679
+ ? delayResult
680
+ : payload.retry_count ** 4 + 15;
681
+ const jitter = Math.random() * 10 * (payload.retry_count + 1);
682
+ const retryAt = nowSeconds + delaySeconds + jitter;
683
+ await redis.zAdd("retry", [{ score: retryAt, value: dumpJson(payload) }]);
684
+ await this.runErrorHandlers(error, this.buildErrorContext("Job raised exception", payload, queue));
685
+ }
686
+ safeRetryIn(handler, count, error, payload) {
687
+ try {
688
+ return handler(count, error, payload) ?? "default";
689
+ }
690
+ catch (handlerError) {
691
+ const err = handlerError instanceof Error
692
+ ? handlerError
693
+ : new Error(String(handlerError));
694
+ this.config.logger.error(() => `Error in retryIn handler: ${err.message}`);
695
+ return "default";
696
+ }
697
+ }
698
+ async retriesExhausted(payload, error, handler) {
699
+ let handlerResult;
700
+ if (handler) {
701
+ try {
702
+ handlerResult = handler(payload, error);
703
+ }
704
+ catch (handlerError) {
705
+ const err = handlerError instanceof Error
706
+ ? handlerError
707
+ : new Error(String(handlerError));
708
+ this.config.logger.error(() => `Error calling retriesExhausted handler: ${err.message}`);
709
+ }
710
+ }
711
+ const discard = payload.dead === false || handlerResult === "discard";
712
+ if (discard) {
713
+ payload.discarded_at = Date.now();
714
+ }
715
+ else {
716
+ await this.sendToMorgue(payload);
717
+ }
718
+ await this.runDeathHandlers(payload, error);
719
+ }
720
+ async sendToMorgue(payload) {
721
+ const redis = await this.config.getRedisClient();
722
+ const nowSeconds = Date.now() / 1000;
723
+ const cutoff = nowSeconds - this.config.deadTimeoutInSeconds;
724
+ const pipeline = redis.multi();
725
+ pipeline.zAdd("dead", [{ score: nowSeconds, value: dumpJson(payload) }]);
726
+ pipeline.zRemRangeByScore("dead", 0, cutoff);
727
+ pipeline.zRemRangeByRank("dead", 0, -this.config.deadMaxJobs);
728
+ await pipeline.exec();
729
+ }
730
+ async runDeathHandlers(payload, error) {
731
+ for (const handler of this.config.deathHandlers) {
732
+ try {
733
+ await handler(payload, error);
734
+ }
735
+ catch (handlerError) {
736
+ const err = handlerError instanceof Error
737
+ ? handlerError
738
+ : new Error(String(handlerError));
739
+ this.config.logger.error(() => `Error calling death handler: ${err.message}`);
740
+ }
741
+ }
742
+ }
743
+ async runErrorHandlers(error, context) {
744
+ if (this.config.errorHandlers.length === 0) {
745
+ return;
746
+ }
747
+ for (const handler of this.config.errorHandlers) {
748
+ try {
749
+ await handler(error, context, this.config);
750
+ }
751
+ catch (handlerError) {
752
+ const err = handlerError instanceof Error
753
+ ? handlerError
754
+ : new Error(String(handlerError));
755
+ this.config.logger.error(() => `Error calling error handler: ${err.message}`);
756
+ }
757
+ }
758
+ }
759
+ buildErrorContext(message, payload, queue, jobstr) {
760
+ const context = { context: message };
761
+ if (payload) {
762
+ context.job = payload;
763
+ }
764
+ if (queue) {
765
+ context.queue = queue;
766
+ }
767
+ if (jobstr) {
768
+ context.jobstr = jobstr;
769
+ }
770
+ return context;
771
+ }
772
+ safeErrorMessage(error) {
773
+ try {
774
+ return String(error.message ?? "Unknown error");
775
+ }
776
+ catch {
777
+ return "!!! ERROR MESSAGE THREW AN ERROR !!!";
778
+ }
779
+ }
780
+ async updateStat(stat, count = 1) {
781
+ const redis = this.baseRedis ?? (await this.config.getRedisClient());
782
+ const date = new Date().toISOString().slice(0, 10);
783
+ const key = `stat:${stat}`;
784
+ const dailyKey = `${key}:${date}`;
785
+ const pipeline = redis.multi();
786
+ pipeline.incrBy(key, count);
787
+ pipeline.incrBy(dailyKey, count);
788
+ pipeline.expire(dailyKey, STATS_TTL_SECONDS);
789
+ await pipeline.exec();
790
+ }
791
+ }