queasy 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/queasy.lua ADDED
@@ -0,0 +1,397 @@
1
+ #!lua name=queasy
2
+
3
+ --[[
4
+ Queasy: Redis Lua functions for job queue management
5
+
6
+ Key structure:
7
+ - {queue} - sorted set of waiting job IDs (score = run_at or -run_at if blocked)
8
+ - {queue}:expiry - sorted set of client heartbeat expiries (member = client_id, score = expiry)
9
+ - {queue}:checkouts:{client_id} - set of job IDs checked out by this client
10
+ - {queue}:waiting_job:{id} - hash with job data for waiting jobs
11
+ - {queue}:active_job:{id} - hash with job data for active jobs
12
+ ]]
13
+
14
+ -- Key helpers
15
+ local function get_waiting_job_key(queue_key, id)
16
+ return queue_key .. ':waiting_job:' .. id
17
+ end
18
+
19
+ local function get_active_job_key(queue_key, id)
20
+ return queue_key .. ':active_job:' .. id
21
+ end
22
+
23
+ local function get_expiry_key(queue_key)
24
+ return queue_key .. ':expiry'
25
+ end
26
+
27
+ local function get_checkouts_key(queue_key, client_id)
28
+ return queue_key .. ':checkouts:' .. client_id
29
+ end
30
+
31
+ -- Helper: Add job to waiting queue with appropriate flags
32
+ local function add_to_waiting(queue_key, id, score, update_run_at)
33
+ local flag = nil
34
+
35
+ if update_run_at == 'false' then
36
+ flag = 'NX'
37
+ elseif update_run_at == 'if_later' then
38
+ flag = score >= 0 and 'GT' or 'LT'
39
+ elseif update_run_at == 'if_earlier' then
40
+ flag = score >= 0 and 'LT' or 'GT'
41
+ end
42
+
43
+ if flag then
44
+ redis.call('ZADD', queue_key, flag, score, id)
45
+ else
46
+ redis.call('ZADD', queue_key, score, id)
47
+ end
48
+ end
49
+
50
+ -- Helper: Upsert job to waiting queue
51
+ local function dispatch(
52
+ queue_key,
53
+ id, run_at, data,
54
+ update_data, update_run_at, reset_counts
55
+ )
56
+ local waiting_job_key = get_waiting_job_key(queue_key, id)
57
+ local active_job_key = get_active_job_key(queue_key, id)
58
+
59
+ -- id is always stored so that HGETALL (e.g. during dequeue) includes it
60
+ redis.call('HSET', waiting_job_key, 'id', id)
61
+
62
+ -- If reset_counts is true, reset counters to 0, otherwise initialize them
63
+ redis.call(reset_counts and 'HSET' or 'HSETNX', waiting_job_key, 'retry_count', '0')
64
+ redis.call(reset_counts and 'HSET' or 'HSETNX', waiting_job_key, 'stall_count', '0')
65
+
66
+ -- Handle data
67
+ redis.call(update_data and 'HSET' or 'HSETNX', waiting_job_key, 'data', data)
68
+
69
+ -- Check if there's an active job with this ID
70
+ local is_blocked = redis.call('EXISTS', active_job_key) == 1
71
+ local score = is_blocked and -tonumber(run_at) or tonumber(run_at)
72
+
73
+ if is_blocked then
74
+ -- save these flags in case they need to be applied later
75
+ redis.call('HSET', waiting_job_key,
76
+ 'reset_counts', tostring(reset_counts),
77
+ 'update_data', tostring(update_data),
78
+ 'update_run_at', update_run_at)
79
+ end
80
+
81
+ -- Add to waiting queue
82
+ add_to_waiting(queue_key, id, score, update_run_at)
83
+
84
+ return { ok = 'OK' }
85
+ end
86
+
87
+ -- Helper: Move job back to waiting for retry
88
+ local function do_retry(queue_key, id, retry_at)
89
+ local waiting_job_key = get_waiting_job_key(queue_key, id)
90
+ local active_job_key = get_active_job_key(queue_key, id)
91
+
92
+ local existing_score = redis.call('ZSCORE', queue_key, id)
93
+
94
+ if existing_score then
95
+ local run_at = -existing_score.double
96
+ local job = redis.call('HGETALL', waiting_job_key)['map']
97
+
98
+ redis.call('RENAME', active_job_key, waiting_job_key)
99
+ redis.call('ZADD', queue_key, retry_at, id)
100
+
101
+ if next(job) then
102
+ dispatch(
103
+ queue_key,
104
+ id, run_at, job.data,
105
+ job.update_data == 'true', job.update_run_at, job.reset_counts == 'true'
106
+ )
107
+ end
108
+ else
109
+ redis.call('RENAME', active_job_key, waiting_job_key)
110
+ redis.call('ZADD', queue_key, retry_at, id)
111
+ end
112
+
113
+ return { ok = 'OK' }
114
+ end
115
+
116
+ -- Forward declaration
117
+ local sweep
118
+
119
+ -- Helper: Clear active job and unblock waiting job
120
+ local function finish(queue_key, id, client_id, now)
121
+ local waiting_job_key = get_waiting_job_key(queue_key, id)
122
+ local active_job_key = get_active_job_key(queue_key, id)
123
+ local checkouts_key = get_checkouts_key(queue_key, client_id)
124
+
125
+ redis.call('SREM', checkouts_key, id)
126
+ redis.call('DEL', active_job_key)
127
+
128
+ local score = redis.call('ZSCORE', queue_key, id)
129
+
130
+ if score then
131
+ score = tonumber(score.double)
132
+ if score < 0 then
133
+ score = -score
134
+ end
135
+
136
+ local update_run_at = redis.call('HGET', waiting_job_key, 'update_run_at') or 'true'
137
+ add_to_waiting(queue_key, id, score, update_run_at)
138
+ end
139
+
140
+ return { ok = 'OK' }
141
+ end
142
+
143
+ -- Helper: Handle permanent failure
144
+ -- Creates a fail job and finishes the original job
145
+ local function fail(queue_key, fail_queue_key, id, client_id, fail_job_id, fail_job_data, now)
146
+ -- Dispatch the fail job
147
+ dispatch(fail_queue_key,
148
+ fail_job_id, 0, fail_job_data,
149
+ 'false', 'false', 'false')
150
+
151
+ -- Finish the original job
152
+ finish(queue_key, id, client_id, now)
153
+
154
+ return { ok = 'OK' }
155
+ end
156
+
157
+ -- Helper: Handle retriable failure
158
+ local function retry(queue_key, id, client_id, retry_at, now)
159
+ local active_job_key = get_active_job_key(queue_key, id)
160
+ local checkouts_key = get_checkouts_key(queue_key, client_id)
161
+
162
+ redis.call('SREM', checkouts_key, id)
163
+
164
+ local retry_count = tonumber(redis.call('HGET', active_job_key, 'retry_count'))
165
+
166
+ retry_count = retry_count + 1
167
+ redis.call('HSET', active_job_key, 'retry_count', retry_count)
168
+
169
+ local result = do_retry(queue_key, id, retry_at)
170
+
171
+ return result
172
+ end
173
+
174
+ -- Helper: Handle stalled job
175
+ local function handle_stall(queue_key, id, retry_at)
176
+ local active_job_key = get_active_job_key(queue_key, id)
177
+
178
+ local stall_count = tonumber(redis.call('HGET', active_job_key, 'stall_count'))
179
+
180
+ stall_count = stall_count + 1
181
+ redis.call('HSET', active_job_key, 'stall_count', stall_count)
182
+
183
+ return do_retry(queue_key, id, retry_at)
184
+ end
185
+
186
+ -- Dequeue jobs from waiting queue
187
+ local function dequeue(queue_key, client_id, now, expiry, limit)
188
+ local expiry_key = get_expiry_key(queue_key)
189
+ local checkouts_key = get_checkouts_key(queue_key, client_id)
190
+ local jobs = redis.call('ZRANGEBYSCORE', queue_key, 0, now, 'LIMIT', 0, limit)
191
+ local result = {}
192
+
193
+ for _, id in ipairs(jobs) do
194
+ redis.call('ZREM', queue_key, id)
195
+ local waiting_job_key = get_waiting_job_key(queue_key, id)
196
+ local active_job_key = get_active_job_key(queue_key, id)
197
+
198
+ redis.call('RENAME', waiting_job_key, active_job_key)
199
+ local job = redis.call('HGETALL', active_job_key)
200
+
201
+ redis.call('SADD', checkouts_key, id)
202
+ table.insert(result, job)
203
+ end
204
+
205
+ if #result > 0 then
206
+ redis.call('ZADD', expiry_key, expiry, client_id)
207
+ end
208
+
209
+ -- Sweep stalled clients
210
+ sweep(queue_key, now)
211
+
212
+ return result
213
+ end
214
+
215
+ -- Cancel a waiting job
216
+ local function cancel(queue_key, id)
217
+ local waiting_job_key = get_waiting_job_key(queue_key, id)
218
+ local removed = redis.call('ZREM', queue_key, id)
219
+ if removed == 1 then
220
+ redis.call('DEL', waiting_job_key)
221
+ end
222
+ return removed
223
+ end
224
+
225
+ -- Bump heartbeat for client and sweep stalled clients
226
+ local function bump(queue_key, client_id, now, expiry)
227
+ local expiry_key = get_expiry_key(queue_key)
228
+
229
+ -- Check if this client exists in expiry set
230
+ local existing = redis.call('ZSCORE', expiry_key, client_id)
231
+ if not existing then
232
+ return 0
233
+ end
234
+
235
+ -- Update expiry
236
+ redis.call('ZADD', expiry_key, 'XX', expiry, client_id)
237
+
238
+ -- Sweep stalled clients
239
+ sweep(queue_key, now)
240
+
241
+ return 1
242
+ end
243
+
244
+ -- Sweep stalled clients
245
+ sweep = function(queue_key, now)
246
+ local expiry_key = get_expiry_key(queue_key)
247
+
248
+ -- Find first stalled client
249
+ local stalled = redis.call('ZRANGEBYSCORE', expiry_key, 0, now, 'LIMIT', 0, 1)
250
+
251
+ if #stalled == 0 then
252
+ return {}
253
+ end
254
+
255
+ local stalled_client_id = stalled[1]
256
+ local checkouts_key = get_checkouts_key(queue_key, stalled_client_id)
257
+
258
+ -- Get all job IDs checked out by this client
259
+ -- RESP3 returns SMEMBERS as { set = { id1 = true, id2 = true, ... } }
260
+ local members_resp = redis.call('SMEMBERS', checkouts_key)
261
+ local processed_jobs = {}
262
+
263
+ for id, _ in pairs(members_resp['set']) do
264
+ handle_stall(queue_key, id, 0)
265
+ table.insert(processed_jobs, id)
266
+ end
267
+
268
+ -- Clean up the stalled client
269
+ redis.call('ZREM', expiry_key, stalled_client_id)
270
+ redis.call('DEL', checkouts_key)
271
+
272
+ return processed_jobs
273
+ end
274
+
275
+ -- Register: queasy_dispatch
276
+ redis.register_function {
277
+ function_name = 'queasy_dispatch',
278
+ callback = function(keys, args)
279
+ local queue_key = keys[1]
280
+ local id = args[1]
281
+ local run_at = tonumber(args[2])
282
+ local data = args[3]
283
+ local update_data = args[4] == 'true'
284
+ local update_run_at = args[5]
285
+ local reset_counts = args[6] == 'true'
286
+
287
+ redis.setresp(3)
288
+ return dispatch(
289
+ queue_key,
290
+ id, run_at, data,
291
+ update_data, update_run_at, reset_counts
292
+ )
293
+ end,
294
+ flags = {}
295
+ }
296
+
297
+ -- Register: queasy_dequeue
298
+ redis.register_function {
299
+ function_name = 'queasy_dequeue',
300
+ callback = function(keys, args)
301
+ local queue_key = keys[1]
302
+ local client_id = args[1]
303
+ local now = tonumber(args[2])
304
+ local expiry = tonumber(args[3])
305
+ local limit = tonumber(args[4])
306
+
307
+ redis.setresp(3)
308
+ return dequeue(queue_key, client_id, now, expiry, limit)
309
+ end,
310
+ flags = {}
311
+ }
312
+
313
+ -- Register: queasy_cancel
314
+ redis.register_function {
315
+ function_name = 'queasy_cancel',
316
+ callback = function(keys, args)
317
+ local queue_key = keys[1]
318
+ local id = args[1]
319
+
320
+ redis.setresp(3)
321
+ return cancel(queue_key, id)
322
+ end,
323
+ flags = {}
324
+ }
325
+
326
+ -- Register: queasy_bump
327
+ redis.register_function {
328
+ function_name = 'queasy_bump',
329
+ callback = function(keys, args)
330
+ local queue_key = keys[1]
331
+ local client_id = args[1]
332
+ local now = tonumber(args[2])
333
+ local expiry = tonumber(args[3])
334
+
335
+ redis.setresp(3)
336
+ return bump(queue_key, client_id, now, expiry)
337
+ end,
338
+ flags = {}
339
+ }
340
+
341
+ -- Register: queasy_finish
342
+ redis.register_function {
343
+ function_name = 'queasy_finish',
344
+ callback = function(keys, args)
345
+ local queue_key = keys[1]
346
+ local id = args[1]
347
+ local client_id = args[2]
348
+ local now = tonumber(args[3])
349
+
350
+ redis.setresp(3)
351
+ return finish(queue_key, id, client_id, now)
352
+ end,
353
+ flags = {}
354
+ }
355
+
356
+ -- Register: queasy_retry
357
+ redis.register_function {
358
+ function_name = 'queasy_retry',
359
+ callback = function(keys, args)
360
+ local queue_key = keys[1]
361
+ local id = args[1]
362
+ local client_id = args[2]
363
+ local retry_at = tonumber(args[3])
364
+ local now = tonumber(args[5])
365
+
366
+ redis.setresp(3)
367
+ return retry(queue_key, id, client_id, retry_at, now)
368
+ end,
369
+ flags = {}
370
+ }
371
+
372
+ -- Register: queasy_fail
373
+ redis.register_function {
374
+ function_name = 'queasy_fail',
375
+ callback = function(keys, args)
376
+ local queue_key = keys[1]
377
+ local fail_queue_key = keys[2]
378
+ local id = args[1]
379
+ local client_id = args[2]
380
+ local fail_job_id = args[3]
381
+ local fail_job_data = args[4]
382
+ local now = tonumber(args[5])
383
+
384
+ redis.setresp(3)
385
+ return fail(queue_key, fail_queue_key, id, client_id, fail_job_id, fail_job_data, now)
386
+ end,
387
+ flags = {}
388
+ }
389
+
390
+ -- Register: queasy_version
391
+ redis.register_function {
392
+ function_name = 'queasy_version',
393
+ callback = function(keys, args)
394
+ return 1
395
+ end,
396
+ flags = {}
397
+ }
package/src/queue.js ADDED
@@ -0,0 +1,161 @@
1
+ import { DEFAULT_RETRY_OPTIONS, FAILJOB_RETRY_OPTIONS } from './constants.js';
2
+ import { generateId } from './utils.js';
3
+
4
+ // Import types:
5
+ /** @typedef {import('redis').RedisClientType} RedisClient */
6
+ /** @typedef {import('./types').HandlerOptions} HandlerOptions */
7
+ /** @typedef {import('./types').ListenOptions} ListenOptions */
8
+ /** @typedef {import('./types').JobOptions} JobOptions */
9
+ /** @typedef {import('./types').Job} Job */
10
+ /** @typedef {import('./types').DoneMessage} DoneMessage */
11
+ /** @typedef {import('./client').Client} Client */
12
+ /** @typedef {import('./pool').Pool} Pool */
13
+ /** @typedef {import('./manager').Manager} Manager */
14
+
15
+ /** @typedef {Required<Partial<Pick<Queue, keyof Queue>>>} ProcessingQueue */
16
+
17
+ /**
18
+ * Queue instance for managing a named job queue
19
+ */
20
+ export class Queue {
21
+ /**
22
+ * @param {string} key - Queue key
23
+ * @param {Client} client - Redis client wrapper
24
+ * @param {Pool | undefined} pool - Worker pool
25
+ * @param {Manager | undefined} manager - Capacity allocation manager
26
+ */
27
+ constructor(key, client, pool, manager) {
28
+ this.key = key;
29
+ this.client = client;
30
+ this.pool = pool;
31
+ this.manager = manager;
32
+
33
+ /** @type {NodeJS.Timeout | undefined} */
34
+ // this.dequeueInterval = undefined;
35
+
36
+ /** @type {Required<HandlerOptions> | undefined} */
37
+ this.handlerOptions = undefined;
38
+
39
+ /** @type {string | undefined} */
40
+ this.handlerPath = undefined;
41
+
42
+ /** @type {string | undefined} */
43
+ this.failKey = undefined;
44
+ }
45
+
46
+ /**
47
+ * Attach handlers to process jobs from this queue
48
+ * @param {string} handlerPath - Path to handler module
49
+ * @param {ListenOptions} [options] - Retry strategy options and failure handler
50
+ * @returns {Promise<void>}
51
+ */
52
+ async listen(handlerPath, { failHandler, failRetryOptions, ...retryOptions } = {}) {
53
+ if (!this.pool || !this.manager) throw new Error('Can’t listen on a non-processing client');
54
+
55
+ this.handlerPath = handlerPath;
56
+ this.handlerOptions = { ...DEFAULT_RETRY_OPTIONS, ...retryOptions };
57
+
58
+ // Initialize failure handler on all workers if provided
59
+ if (failHandler) {
60
+ this.failKey = `${this.key}-fail`;
61
+ const failQueue = this.client.queue(this.failKey, true);
62
+ failQueue.listen(failHandler, { ...FAILJOB_RETRY_OPTIONS, ...failRetryOptions });
63
+ }
64
+
65
+ this.manager.addQueue(/** @type {ProcessingQueue} */ (this));
66
+
67
+ // if (!this.dequeueInterval) {
68
+ // this.dequeueInterval = setInterval(() => this.dequeue(), DEQUEUE_INTERVAL);
69
+ // }
70
+ }
71
+
72
+ /**
73
+ * Add a job to the queue
74
+ * @param {any} data - Job data (any JSON-serializable value)
75
+ * @param {JobOptions} [options] - Job options
76
+ * @returns {Promise<string>} Job ID
77
+ */
78
+ async dispatch(data, options = {}) {
79
+ const {
80
+ id = generateId(),
81
+ runAt = 0,
82
+ updateData = false,
83
+ updateRunAt = false,
84
+ resetCounts = false,
85
+ } = options;
86
+
87
+ await this.client.dispatch(this.key, id, runAt, data, updateData, updateRunAt, resetCounts);
88
+ return id;
89
+ }
90
+
91
+ /**
92
+ * Cancel a waiting job
93
+ * @param {string} id - Job ID
94
+ * @returns {Promise<boolean>} True if job was cancelled
95
+ */
96
+ async cancel(id) {
97
+ return await this.client.cancel(this.key, id);
98
+ }
99
+
100
+ /**
101
+ * Picks jobs from the queue and processes them
102
+ * @param {number} count
103
+ * @returns {Promise<{count: number, promise: Promise<Array<unknown>>}>}
104
+ */
105
+
106
+ async dequeue(count) {
107
+ const { pool, handlerPath, handlerOptions } = /** @type {ProcessingQueue} */ (this);
108
+ const { maxRetries, maxStalls, maxBackoff, minBackoff, size, timeout } = handlerOptions;
109
+
110
+ // const capacity = pool.getCapacity(size);
111
+ // if (capacity <= 0) return;
112
+
113
+ const jobs = await this.client.dequeue(this.key, count);
114
+
115
+ const promise = Promise.all(
116
+ jobs.map(async (job) => {
117
+ // Check if job has exceeded stall limit
118
+ if (job.stallCount >= maxStalls) {
119
+ // Job has stalled too many times - fail it permanently
120
+ if (!this.failKey) return this.client.finish(this.key, job.id);
121
+
122
+ const failJobData = [job.id, job.data, { message: 'Max stalls exceeded' }];
123
+ return this.client.fail(this.key, this.failKey, job.id, failJobData);
124
+ }
125
+
126
+ try {
127
+ await pool.process(handlerPath, job, size, timeout);
128
+ await this.client.finish(this.key, job.id);
129
+ } catch (message) {
130
+ const { error } = /** @type {Required<DoneMessage>} */ (message);
131
+ const { retryAt = 0, kind } = error;
132
+
133
+ if (kind === 'permanent' || job.retryCount >= maxRetries) {
134
+ if (!this.failKey) return this.client.finish(this.key, job.id);
135
+
136
+ const failJobData = [job.id, job.data, error];
137
+ return this.client.fail(this.key, this.failKey, job.id, failJobData);
138
+ }
139
+
140
+ const backoffUntil =
141
+ Date.now() + Math.min(maxBackoff, minBackoff * 2 ** job.retryCount);
142
+
143
+ // Retriable error: call retry
144
+ await this.client.retry(this.key, job.id, Math.max(retryAt, backoffUntil));
145
+ }
146
+ })
147
+ );
148
+
149
+ return { count: jobs.length, promise };
150
+ }
151
+
152
+ /**
153
+ * Stop the dequeue interval and bump timer for this queue
154
+ */
155
+ close() {
156
+ // if (this.dequeueInterval) {
157
+ // clearInterval(this.dequeueInterval);
158
+ // this.dequeueInterval = undefined;
159
+ // }
160
+ }
161
+ }
package/src/types.ts ADDED
@@ -0,0 +1,92 @@
1
+ /**
2
+ * Core job identification and data
3
+ */
4
+ export interface JobCoreOptions {
5
+ /** Job ID (auto-generated if not provided) */
6
+ id?: string;
7
+ /** Job data (any JSON-serializable value) */
8
+ // biome-ignore lint/suspicious/noExplicitAny: Data is any serializable value
9
+ data?: any;
10
+ /** Wall clock timestamp (ms) before which job must not run */
11
+ runAt?: number;
12
+ }
13
+
14
+ /**
15
+ * Update behavior flags
16
+ */
17
+ export interface JobUpdateOptions {
18
+ /** Whether to replace data of waiting job with same ID */
19
+ updateData?: boolean;
20
+ /** How to update runAt */
21
+ updateRunAt?: boolean | 'if_later' | 'if_earlier';
22
+ /** Whether to reset retry_count and stall_count to 0 */
23
+ resetCounts?: boolean;
24
+ }
25
+
26
+ /**
27
+ * Complete options accepted by dispatch()
28
+ */
29
+ export type JobOptions = JobCoreOptions & JobUpdateOptions;
30
+
31
+ /**
32
+ * Job runtime state
33
+ */
34
+ export interface JobState {
35
+ /** Number of times this job has been retried */
36
+ retryCount: number;
37
+ /** Number of times this job has stalled */
38
+ stallCount: number;
39
+ }
40
+
41
+ /**
42
+ * Complete job representation passed to handlers
43
+ */
44
+ export type Job = Required<JobCoreOptions> & JobState;
45
+
46
+ /**
47
+ * Handler options
48
+ */
49
+ export interface HandlerOptions {
50
+ /** Maximum number of retries before permanent failure */
51
+ maxRetries?: number;
52
+ /** Maximum number of stalls before permanent failure */
53
+ maxStalls?: number;
54
+ /** Minimum backoff in milliseconds */
55
+ minBackoff?: number;
56
+ /** Maximum backoff in milliseconds */
57
+ maxBackoff?: number;
58
+ /** Size of the job (as a percent of total worker capacity) */
59
+ size?: number;
60
+ /** Maximum processing duration before considering stalled */
61
+ timeout?: number;
62
+ /** Priority of this queue (vs other queues) */
63
+ priority?: number;
64
+ }
65
+
66
+ /**
67
+ * Options for listen() - queue-level retry strategy
68
+ */
69
+ export interface ListenOptions extends HandlerOptions {
70
+ /** Path to failure handler module (optional) */
71
+ failHandler?: string;
72
+
73
+ /** Retry options of the fail job */
74
+ failRetryOptions?: HandlerOptions;
75
+ }
76
+
77
+ export type ExecMessage = {
78
+ op: 'exec';
79
+ queue: string;
80
+ job: Job;
81
+ };
82
+
83
+ export type DoneMessage = {
84
+ op: 'done';
85
+ jobId: string;
86
+ error?: {
87
+ name: string;
88
+ message: string;
89
+ retryAt?: number;
90
+ kind?: 'retriable' | 'permanent' | 'stall';
91
+ };
92
+ };
package/src/utils.js ADDED
@@ -0,0 +1,13 @@
1
+ /**
2
+ * Generate a random alphanumeric ID
3
+ * @param {number} length - Length of the ID
4
+ * @returns {string}
5
+ */
6
+ export function generateId(length = 20) {
7
+ const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
8
+ let id = '';
9
+ for (let i = 0; i < length; i++) {
10
+ id += chars.charAt(Math.floor(Math.random() * chars.length));
11
+ }
12
+ return id;
13
+ }
package/src/worker.js ADDED
@@ -0,0 +1,44 @@
1
+ import { pathToFileURL } from 'node:url';
2
+ import { parentPort, setEnvironmentData } from 'node:worker_threads';
3
+ import { PermanentError } from './errors.js';
4
+
5
+ /** @typedef {import('./types.js').ExecMessage} ExecMessage */
6
+ /** @typedef {import('./types.js').DoneMessage} DoneMessage */
7
+
8
+ if (!parentPort) throw new Error('Worker cannot be executed directly.');
9
+ setEnvironmentData('queasy_worker_context', true);
10
+
11
+ /** @param {ExecMessage} msg */
12
+ parentPort.on('message', async (msg) => {
13
+ const { handlerPath, job } = msg;
14
+ try {
15
+ const mod = await import(pathToFileURL(handlerPath).href);
16
+ if (typeof mod.handle !== 'function') {
17
+ throw new Error(`Unable to load handler ${handlerPath}`);
18
+ }
19
+
20
+ await mod.handle(job.data, job);
21
+ send({ op: 'done', jobId: job.id });
22
+ } catch (err) {
23
+ const { message, name, retryAt } = /** @type {Error & { retryAt?: number }} */ (err);
24
+
25
+ send({
26
+ op: 'done',
27
+ jobId: job.id,
28
+ error: {
29
+ name,
30
+ message,
31
+ retryAt,
32
+ kind: err instanceof PermanentError ? 'permanent' : 'retriable',
33
+ },
34
+ });
35
+ }
36
+ });
37
+
38
+ /**
39
+ * Send a message to the parentPort
40
+ * @param {DoneMessage} message
41
+ */
42
+ function send(message) {
43
+ parentPort?.postMessage(message);
44
+ }