queasy 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +27 -0
- package/.luarc.json +13 -0
- package/.zed/settings.json +39 -0
- package/AGENTS.md +102 -0
- package/CLAUDE.md +83 -0
- package/License.md +7 -0
- package/Readme.md +130 -0
- package/biome.json +28 -0
- package/doc/Implementation.md +70 -0
- package/docker-compose.yml +19 -0
- package/jsconfig.json +17 -0
- package/package.json +37 -0
- package/src/client.js +218 -0
- package/src/constants.js +34 -0
- package/src/errors.js +25 -0
- package/src/index.js +2 -0
- package/src/manager.js +94 -0
- package/src/pool.js +164 -0
- package/src/queasy.lua +397 -0
- package/src/queue.js +161 -0
- package/src/types.ts +92 -0
- package/src/utils.js +13 -0
- package/src/worker.js +44 -0
- package/test/client.test.js +49 -0
- package/test/errors.test.js +19 -0
- package/test/fixtures/always-fail-handler.js +8 -0
- package/test/fixtures/data-logger-handler.js +14 -0
- package/test/fixtures/failure-handler.js +9 -0
- package/test/fixtures/no-handle-handler.js +1 -0
- package/test/fixtures/permanent-error-handler.js +10 -0
- package/test/fixtures/slow-handler.js +9 -0
- package/test/fixtures/success-handler.js +9 -0
- package/test/fixtures/with-failure-handler.js +8 -0
- package/test/index.test.js +55 -0
- package/test/manager.test.js +87 -0
- package/test/pool.test.js +66 -0
- package/test/queue.test.js +438 -0
- package/test/redis-functions.test.js +683 -0
package/src/client.js
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
import { readFileSync } from 'node:fs';
|
|
2
|
+
import { dirname, join } from 'node:path';
|
|
3
|
+
import { fileURLToPath } from 'node:url';
|
|
4
|
+
import { getEnvironmentData } from 'node:worker_threads';
|
|
5
|
+
import { HEARTBEAT_INTERVAL, HEARTBEAT_TIMEOUT } from './constants.js';
|
|
6
|
+
import { Manager } from './manager.js';
|
|
7
|
+
import { Pool } from './pool.js';
|
|
8
|
+
import { Queue } from './queue.js';
|
|
9
|
+
import { generateId } from './utils.js';
|
|
10
|
+
|
|
11
|
+
// Load Lua script
|
|
12
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
13
|
+
const luaScript = readFileSync(join(__dirname, 'queasy.lua'), 'utf8');
|
|
14
|
+
|
|
15
|
+
/** @typedef {import('redis').RedisClientType} RedisClient */
|
|
16
|
+
/** @typedef {import('./types').Job} Job */
|
|
17
|
+
|
|
18
|
+
/** @typedef {{ queue: Queue, bumpTimer?: NodeJS.Timeout }} QueueEntry */
|
|
19
|
+
|
|
20
|
+
/**
|
|
21
|
+
* Parse job data from Redis response
|
|
22
|
+
* @param {string[]} jobArray - Flat array from HGETALL
|
|
23
|
+
* @returns {Job | null}
|
|
24
|
+
*/
|
|
25
|
+
export function parseJob(jobArray) {
|
|
26
|
+
if (!jobArray || jobArray.length === 0) return null;
|
|
27
|
+
|
|
28
|
+
/** @type {Record<string, string>} */
|
|
29
|
+
const job = {};
|
|
30
|
+
for (let i = 0; i < jobArray.length; i += 2) {
|
|
31
|
+
const key = jobArray[i];
|
|
32
|
+
const value = jobArray[i + 1];
|
|
33
|
+
job[key] = value;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
return {
|
|
37
|
+
id: job.id,
|
|
38
|
+
data: job.data ? JSON.parse(job.data) : undefined,
|
|
39
|
+
runAt: job.run_at ? Number(job.run_at) : 0,
|
|
40
|
+
retryCount: Number(job.retry_count || 0),
|
|
41
|
+
stallCount: Number(job.stall_count || 0),
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
export class Client {
|
|
46
|
+
/**
|
|
47
|
+
* @param {RedisClient} redis - Redis client
|
|
48
|
+
* @param {number?} workerCount - Allow this client to dequeue jobs.
|
|
49
|
+
*/
|
|
50
|
+
constructor(redis, workerCount) {
|
|
51
|
+
this.redis = redis;
|
|
52
|
+
this.clientId = generateId();
|
|
53
|
+
|
|
54
|
+
/** @type {Record<string, QueueEntry>} */
|
|
55
|
+
this.queues = {};
|
|
56
|
+
|
|
57
|
+
const inWorker = getEnvironmentData('queasy_worker_context');
|
|
58
|
+
this.pool = !inWorker && workerCount !== 0 ? new Pool(workerCount) : undefined;
|
|
59
|
+
if (this.pool) this.manager = new Manager(this.pool);
|
|
60
|
+
|
|
61
|
+
// We are not awaiting this; we rely on Redis’ single-threaded blocking
|
|
62
|
+
// nature to ensure that this load completes before other Redis commands
|
|
63
|
+
// are processed.
|
|
64
|
+
this.redis.sendCommand(['FUNCTION', 'LOAD', 'REPLACE', luaScript]);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Create a queue object for interacting with a named queue
|
|
69
|
+
* @param {string} name - Queue name (without braces - they will be added automatically)
|
|
70
|
+
* @returns {Queue} Queue object with dispatch, cancel, and listen methods
|
|
71
|
+
*/
|
|
72
|
+
queue(name, isKey = false) {
|
|
73
|
+
const key = isKey ? name : `{${name}}`;
|
|
74
|
+
if (!this.queues[key]) {
|
|
75
|
+
this.queues[key] = /** @type {QueueEntry} */ ({
|
|
76
|
+
queue: new Queue(key, this, this.pool, this.manager),
|
|
77
|
+
});
|
|
78
|
+
}
|
|
79
|
+
return this.queues[key].queue;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
/**
|
|
83
|
+
* This helps tests exit cleanly.
|
|
84
|
+
*/
|
|
85
|
+
close() {
|
|
86
|
+
for (const name in this.queues) {
|
|
87
|
+
this.queues[name].queue.close();
|
|
88
|
+
clearTimeout(this.queues[name].bumpTimer);
|
|
89
|
+
}
|
|
90
|
+
if (this.pool) this.pool.close();
|
|
91
|
+
if (this.manager) this.manager.close();
|
|
92
|
+
this.queues = {};
|
|
93
|
+
this.pool = undefined;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Schedule the next bump timer
|
|
98
|
+
* @param {string} key
|
|
99
|
+
*/
|
|
100
|
+
scheduleBump(key) {
|
|
101
|
+
const queueEntry = this.queues[key];
|
|
102
|
+
if (queueEntry.bumpTimer) clearTimeout(queueEntry.bumpTimer);
|
|
103
|
+
queueEntry.bumpTimer = setTimeout(() => this.bump(key), HEARTBEAT_INTERVAL);
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
/**
|
|
107
|
+
* @param {string} key
|
|
108
|
+
*/
|
|
109
|
+
async bump(key) {
|
|
110
|
+
// Set up the next bump first, in case this
|
|
111
|
+
this.scheduleBump(key);
|
|
112
|
+
const now = Date.now();
|
|
113
|
+
const expiry = now + HEARTBEAT_TIMEOUT;
|
|
114
|
+
await this.redis.fCall('queasy_bump', {
|
|
115
|
+
keys: [key],
|
|
116
|
+
arguments: [this.clientId, String(now), String(expiry)],
|
|
117
|
+
});
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
/**
|
|
121
|
+
* @param {string} key
|
|
122
|
+
* @param {string} id
|
|
123
|
+
* @param {number} runAt
|
|
124
|
+
* @param {any} data
|
|
125
|
+
* @param {boolean} updateData
|
|
126
|
+
* @param {boolean | string} updateRunAt
|
|
127
|
+
* @param {boolean} resetCounts
|
|
128
|
+
*/
|
|
129
|
+
async dispatch(key, id, runAt, data, updateData, updateRunAt, resetCounts) {
|
|
130
|
+
await this.redis.fCall('queasy_dispatch', {
|
|
131
|
+
keys: [key],
|
|
132
|
+
arguments: [
|
|
133
|
+
id,
|
|
134
|
+
String(runAt),
|
|
135
|
+
JSON.stringify(data),
|
|
136
|
+
String(updateData),
|
|
137
|
+
String(updateRunAt),
|
|
138
|
+
String(resetCounts),
|
|
139
|
+
],
|
|
140
|
+
});
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
/**
|
|
144
|
+
* @param {string} key
|
|
145
|
+
* @param {string} id
|
|
146
|
+
* @returns {Promise<boolean>}
|
|
147
|
+
*/
|
|
148
|
+
async cancel(key, id) {
|
|
149
|
+
const result = await this.redis.fCall('queasy_cancel', {
|
|
150
|
+
keys: [key],
|
|
151
|
+
arguments: [id],
|
|
152
|
+
});
|
|
153
|
+
return result === 1;
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
/**
|
|
157
|
+
* @param {string} key
|
|
158
|
+
* @param {number} count
|
|
159
|
+
* @returns {Promise<Job[]>}
|
|
160
|
+
*/
|
|
161
|
+
async dequeue(key, count) {
|
|
162
|
+
const now = Date.now();
|
|
163
|
+
const expiry = now + HEARTBEAT_TIMEOUT;
|
|
164
|
+
const result = /** @type {string[][]} */ (
|
|
165
|
+
await this.redis.fCall('queasy_dequeue', {
|
|
166
|
+
keys: [key],
|
|
167
|
+
arguments: [this.clientId, String(now), String(expiry), String(count)],
|
|
168
|
+
})
|
|
169
|
+
);
|
|
170
|
+
|
|
171
|
+
// Heartbeats should start with the first dequeue.
|
|
172
|
+
this.scheduleBump(key);
|
|
173
|
+
|
|
174
|
+
return /** @type Job[] */ (result.map((jobArray) => parseJob(jobArray)).filter(Boolean));
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
/**
|
|
178
|
+
* @param {string} key
|
|
179
|
+
* @param {string} jobId
|
|
180
|
+
*/
|
|
181
|
+
async finish(key, jobId) {
|
|
182
|
+
await this.redis.fCall('queasy_finish', {
|
|
183
|
+
keys: [key],
|
|
184
|
+
arguments: [jobId, this.clientId, String(Date.now())],
|
|
185
|
+
});
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
/**
|
|
189
|
+
* @param {string} key
|
|
190
|
+
* @param {string} failkey
|
|
191
|
+
* @param {string} jobId
|
|
192
|
+
* @param {any} failJobData
|
|
193
|
+
*/
|
|
194
|
+
async fail(key, failkey, jobId, failJobData) {
|
|
195
|
+
await this.redis.fCall('queasy_fail', {
|
|
196
|
+
keys: [key, failkey],
|
|
197
|
+
arguments: [
|
|
198
|
+
jobId,
|
|
199
|
+
this.clientId,
|
|
200
|
+
generateId(),
|
|
201
|
+
JSON.stringify(failJobData),
|
|
202
|
+
String(Date.now()),
|
|
203
|
+
],
|
|
204
|
+
});
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
/**
|
|
208
|
+
* @param {string} key
|
|
209
|
+
* @param {string} jobId
|
|
210
|
+
* @param {number} retryAt
|
|
211
|
+
*/
|
|
212
|
+
async retry(key, jobId, retryAt) {
|
|
213
|
+
await this.redis.fCall('queasy_retry', {
|
|
214
|
+
keys: [key],
|
|
215
|
+
arguments: [jobId, this.clientId, String(retryAt), String(Date.now())],
|
|
216
|
+
});
|
|
217
|
+
}
|
|
218
|
+
}
|
package/src/constants.js
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/** @typedef {import('./types').HandlerOptions} HandlerOptions */
|
|
2
|
+
/** @typedef {import('./types').JobUpdateOptions} JobUpdateOptions */
|
|
3
|
+
|
|
4
|
+
/** @type {Required<HandlerOptions>} */
|
|
5
|
+
export const DEFAULT_RETRY_OPTIONS = {
|
|
6
|
+
maxRetries: 10,
|
|
7
|
+
maxStalls: 3,
|
|
8
|
+
minBackoff: 2_000,
|
|
9
|
+
maxBackoff: 300_000, // 5 minutes
|
|
10
|
+
size: 10,
|
|
11
|
+
timeout: 60_000, // 1 minute
|
|
12
|
+
};
|
|
13
|
+
|
|
14
|
+
/** @type {Required<JobUpdateOptions>} */
|
|
15
|
+
export const DEFAULT_UPDATE_OPTIONS = {
|
|
16
|
+
updateData: true,
|
|
17
|
+
updateRunAt: true,
|
|
18
|
+
resetCounts: false,
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
/** @type {Required<HandlerOptions>} */
|
|
22
|
+
export const FAILJOB_RETRY_OPTIONS = {
|
|
23
|
+
maxRetries: 100,
|
|
24
|
+
maxStalls: 3,
|
|
25
|
+
minBackoff: 10_000,
|
|
26
|
+
maxBackoff: 900_000, // 15 minutes
|
|
27
|
+
size: 2,
|
|
28
|
+
timeout: 60_000,
|
|
29
|
+
};
|
|
30
|
+
|
|
31
|
+
export const HEARTBEAT_INTERVAL = 5000; // 5 seconds
|
|
32
|
+
export const HEARTBEAT_TIMEOUT = 10000; // 10 seconds
|
|
33
|
+
export const WORKER_CAPACITY = 10;
|
|
34
|
+
export const DEQUEUE_INTERVAL = 100; // ms
|
package/src/errors.js
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Error thrown to indicate a job should not be retried
|
|
3
|
+
*/
|
|
4
|
+
export class PermanentError extends Error {
|
|
5
|
+
/**
|
|
6
|
+
* @param {string} message - Error message
|
|
7
|
+
*/
|
|
8
|
+
constructor(message) {
|
|
9
|
+
super(message);
|
|
10
|
+
this.name = 'PermanentError';
|
|
11
|
+
}
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Error indicating a job stalled (worker stopped sending heartbeats)
|
|
16
|
+
*/
|
|
17
|
+
export class StallError extends Error {
|
|
18
|
+
/**
|
|
19
|
+
* @param {string} message - Error message
|
|
20
|
+
*/
|
|
21
|
+
constructor(message) {
|
|
22
|
+
super(message);
|
|
23
|
+
this.name = 'StallError';
|
|
24
|
+
}
|
|
25
|
+
}
|
package/src/index.js
ADDED
package/src/manager.js
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* This class manages resource allocation between
|
|
3
|
+
* different queues based on the size of the queue
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { DEQUEUE_INTERVAL } from './constants.js';
|
|
7
|
+
|
|
8
|
+
/** @typedef {import('./pool').Pool} Pool */
|
|
9
|
+
/** @typedef {import('./queue').ProcessingQueue} Queue */
|
|
10
|
+
/** @typedef {{ queue: Queue, lastDequeuedAt: number, isBusy: boolean }} QueueEntry */
|
|
11
|
+
|
|
12
|
+
export class Manager {
|
|
13
|
+
/** @param {Pool} pool */
|
|
14
|
+
constructor(pool) {
|
|
15
|
+
this.pool = pool;
|
|
16
|
+
|
|
17
|
+
/** @type {Array<QueueEntry>} */
|
|
18
|
+
this.queues = [];
|
|
19
|
+
|
|
20
|
+
/** @type {NodeJS.Timeout?} */
|
|
21
|
+
this.timer = null;
|
|
22
|
+
|
|
23
|
+
this.busyCount = 0;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
/** @param {Queue} queue */
|
|
27
|
+
addQueue(queue) {
|
|
28
|
+
// Add this at the beginning so we dequeue it at the next available opportunity.
|
|
29
|
+
this.queues.unshift({ queue, lastDequeuedAt: 0, isBusy: false });
|
|
30
|
+
this.busyCount += 1;
|
|
31
|
+
|
|
32
|
+
// This delay is required for queue listen tests
|
|
33
|
+
// as they need to be able to control dequeueing
|
|
34
|
+
this.next();
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
async next() {
|
|
38
|
+
// If this function is called while the previous execution is in progress,
|
|
39
|
+
// we do not want both executions to use the same queue.
|
|
40
|
+
const entry = this.queues.shift();
|
|
41
|
+
if (!entry) return;
|
|
42
|
+
if (this.timer) {
|
|
43
|
+
clearTimeout(this.timer);
|
|
44
|
+
this.timer = null;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
const size = entry.queue.handlerOptions.size;
|
|
48
|
+
if (this.pool.capacity < size) return;
|
|
49
|
+
|
|
50
|
+
const batchSize = Math.max(1, Math.floor(this.pool.capacity / this.busyCount / size));
|
|
51
|
+
entry.lastDequeuedAt = Date.now(); // We store the time just before the call to dequeue.
|
|
52
|
+
const { count } = await entry.queue.dequeue(batchSize);
|
|
53
|
+
|
|
54
|
+
// Update
|
|
55
|
+
const nowBusy = count >= batchSize;
|
|
56
|
+
this.busyCount += Number(nowBusy) - Number(entry.isBusy);
|
|
57
|
+
entry.isBusy = nowBusy;
|
|
58
|
+
|
|
59
|
+
this.queues.push(entry);
|
|
60
|
+
this.queues.sort(compareQueueEntries);
|
|
61
|
+
|
|
62
|
+
if (!this.timer && this.queues.length) {
|
|
63
|
+
const { isBusy, lastDequeuedAt } = this.queues[0];
|
|
64
|
+
// If the current top queue is busy, retry now.
|
|
65
|
+
const delay = isBusy ? 0 : Math.max(0, lastDequeuedAt - Date.now() + DEQUEUE_INTERVAL);
|
|
66
|
+
this.timer = setTimeout(() => this.next(), delay);
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
close() {
|
|
71
|
+
if (this.timer) clearTimeout(this.timer);
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* @param {QueueEntry} a
|
|
77
|
+
* @param {QueueEntry} b
|
|
78
|
+
* @returns -1 | 0 | 1
|
|
79
|
+
*/
|
|
80
|
+
function compareQueueEntries(a, b) {
|
|
81
|
+
if (a.isBusy > b.isBusy) return -1; // a busy, b not -> a first
|
|
82
|
+
if (a.isBusy < b.isBusy) return 1; // a free, b busy -> b first
|
|
83
|
+
|
|
84
|
+
if (a.queue.handlerOptions.priority > b.queue.handlerOptions.priority) return 1; // a higher -> a first
|
|
85
|
+
if (a.queue.handlerOptions.priority < b.queue.handlerOptions.priority) return -1; // b higher -> b first
|
|
86
|
+
|
|
87
|
+
if (a.lastDequeuedAt > b.lastDequeuedAt) return -1; // a newer -> b first
|
|
88
|
+
if (a.lastDequeuedAt < b.lastDequeuedAt) return 1; // a older -> a first
|
|
89
|
+
|
|
90
|
+
if (a.queue.handlerOptions.size > b.queue.handlerOptions.size) return 1; // a larger -> a first
|
|
91
|
+
if (a.queue.handlerOptions.size < b.queue.handlerOptions.size) return -1; // b larger -> b first
|
|
92
|
+
|
|
93
|
+
return 0;
|
|
94
|
+
}
|
package/src/pool.js
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
import { cpus } from 'node:os';
|
|
2
|
+
import { Worker } from 'node:worker_threads';
|
|
3
|
+
import { WORKER_CAPACITY } from './constants.js';
|
|
4
|
+
import { generateId } from './utils.js';
|
|
5
|
+
|
|
6
|
+
/** @typedef {import('./types').DoneMessage} DoneMessage */
|
|
7
|
+
/** @typedef {import('./types').Job} Job */
|
|
8
|
+
|
|
9
|
+
/** @typedef {{
|
|
10
|
+
* worker: Worker,
|
|
11
|
+
* capacity: number,
|
|
12
|
+
* id: string,
|
|
13
|
+
* jobCount: number,
|
|
14
|
+
* stalledJobs: Set<string>
|
|
15
|
+
* }} WorkerEntry */
|
|
16
|
+
|
|
17
|
+
/** @typedef {{
|
|
18
|
+
* resolve: (value: DoneMessage) => void,
|
|
19
|
+
* reject: (reason: DoneMessage) => void,
|
|
20
|
+
* size: number,
|
|
21
|
+
* timer: NodeJS.Timeout
|
|
22
|
+
* }} JobEntry */
|
|
23
|
+
|
|
24
|
+
export class Pool {
|
|
25
|
+
/** @param {number?} targetCount - Number of desired workers */
|
|
26
|
+
constructor(targetCount) {
|
|
27
|
+
/** @type {Set<WorkerEntry>} */
|
|
28
|
+
this.workers = new Set();
|
|
29
|
+
/** @type {Map<string, JobEntry>} */
|
|
30
|
+
this.activeJobs = new Map();
|
|
31
|
+
|
|
32
|
+
this.capacity = 0;
|
|
33
|
+
|
|
34
|
+
const count = targetCount ?? cpus().length;
|
|
35
|
+
for (let i = 0; i < count; i++) this.createWorker();
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
createWorker() {
|
|
39
|
+
const worker = new Worker(new URL('./worker.js', import.meta.url));
|
|
40
|
+
const entry = {
|
|
41
|
+
worker,
|
|
42
|
+
capacity: WORKER_CAPACITY,
|
|
43
|
+
id: generateId(),
|
|
44
|
+
jobCount: 0,
|
|
45
|
+
stalledJobs: new Set(),
|
|
46
|
+
};
|
|
47
|
+
this.capacity += WORKER_CAPACITY;
|
|
48
|
+
worker.on('message', (message) => this.handleWorkerMessage(entry, message));
|
|
49
|
+
this.workers.add(entry);
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* @param {WorkerEntry} workerEntry
|
|
54
|
+
* @param {DoneMessage} message
|
|
55
|
+
*/
|
|
56
|
+
handleWorkerMessage(workerEntry, message) {
|
|
57
|
+
const { jobId, error } = message;
|
|
58
|
+
const jobEntry = this.activeJobs.get(jobId);
|
|
59
|
+
if (!jobEntry) {
|
|
60
|
+
console.warn('Worker message with unknown Job ID; Ignoring.');
|
|
61
|
+
return;
|
|
62
|
+
}
|
|
63
|
+
clearTimeout(jobEntry.timer);
|
|
64
|
+
workerEntry.capacity += jobEntry.size;
|
|
65
|
+
this.capacity += jobEntry.size;
|
|
66
|
+
workerEntry.jobCount -= 1;
|
|
67
|
+
|
|
68
|
+
// If this job was previously marked as stalled, unmark it.
|
|
69
|
+
if (workerEntry.stalledJobs.has(jobId)) workerEntry.stalledJobs.delete(jobId);
|
|
70
|
+
|
|
71
|
+
this.activeJobs.delete(jobId);
|
|
72
|
+
jobEntry[error ? 'reject' : 'resolve'](message);
|
|
73
|
+
|
|
74
|
+
// If this worker is no longer in the the pool, check if it can be terminated.
|
|
75
|
+
if (!this.workers.has(workerEntry)) this.terminateIfEmpty(workerEntry);
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
/**
|
|
79
|
+
*
|
|
80
|
+
* @param {WorkerEntry} workerEntry
|
|
81
|
+
* @param {string} jobId
|
|
82
|
+
*/
|
|
83
|
+
handleTimeout(workerEntry, jobId) {
|
|
84
|
+
workerEntry.stalledJobs.add(jobId);
|
|
85
|
+
|
|
86
|
+
// Remove and replace this worker in the pool (if it wasn’t already).
|
|
87
|
+
if (this.workers.delete(workerEntry)) this.createWorker();
|
|
88
|
+
this.capacity -= workerEntry.capacity;
|
|
89
|
+
|
|
90
|
+
// If this is the last job in this worker, terminate it.
|
|
91
|
+
this.terminateIfEmpty(workerEntry);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Stops adding new jobs to a worker if it has stalled jobs.
|
|
96
|
+
* Terminates workers if all remaining jobs are stalled.
|
|
97
|
+
* @param {WorkerEntry} workerEntry
|
|
98
|
+
* @returns
|
|
99
|
+
*/
|
|
100
|
+
async terminateIfEmpty({ stalledJobs, jobCount, worker }) {
|
|
101
|
+
// Don't destroy if there are still non-stalled jobs running.
|
|
102
|
+
if (jobCount > stalledJobs.size) return;
|
|
103
|
+
await worker.terminate();
|
|
104
|
+
|
|
105
|
+
for (const jobId of stalledJobs) {
|
|
106
|
+
const jobEntry = this.activeJobs.get(jobId);
|
|
107
|
+
this.activeJobs.delete(jobId);
|
|
108
|
+
jobEntry?.reject({
|
|
109
|
+
op: 'done',
|
|
110
|
+
jobId,
|
|
111
|
+
error: { name: 'StallError', message: 'Job stalled', kind: 'stall' },
|
|
112
|
+
});
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* Processes a job to the most free worker
|
|
118
|
+
* @param {string} handlerPath
|
|
119
|
+
* @param {Job} job
|
|
120
|
+
* @param {number} size
|
|
121
|
+
* @param {number} timeout - Maximum time in ms
|
|
122
|
+
* @returns {Promise<DoneMessage>}
|
|
123
|
+
*/
|
|
124
|
+
process(handlerPath, job, size, timeout) {
|
|
125
|
+
// Find worker with most capacity
|
|
126
|
+
let workerEntry = null;
|
|
127
|
+
for (const entry of this.workers) {
|
|
128
|
+
if (!workerEntry || entry.capacity > workerEntry.capacity) workerEntry = entry;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
if (!workerEntry) throw Error('Can’t process job without workers');
|
|
132
|
+
|
|
133
|
+
const timer = setTimeout(() => this.handleTimeout(workerEntry, job.id), timeout);
|
|
134
|
+
|
|
135
|
+
return new Promise((resolve, reject) => {
|
|
136
|
+
this.activeJobs.set(job.id, { resolve, reject, size, timer });
|
|
137
|
+
workerEntry.capacity -= size;
|
|
138
|
+
this.capacity -= size;
|
|
139
|
+
workerEntry.jobCount += 1;
|
|
140
|
+
workerEntry.worker.postMessage({ op: 'exec', handlerPath, job });
|
|
141
|
+
});
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
/**
|
|
145
|
+
* Terminates all workers
|
|
146
|
+
*/
|
|
147
|
+
close() {
|
|
148
|
+
for (const { worker } of this.workers) {
|
|
149
|
+
worker.terminate();
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
for (const [jobId, { reject, timer }] of this.activeJobs.entries()) {
|
|
153
|
+
clearTimeout(timer);
|
|
154
|
+
reject({
|
|
155
|
+
op: 'done',
|
|
156
|
+
jobId,
|
|
157
|
+
error: { name: 'StallError', message: 'Pool is closing', kind: 'stall' },
|
|
158
|
+
});
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
this.workers = new Set();
|
|
162
|
+
this.activeJobs.clear();
|
|
163
|
+
}
|
|
164
|
+
}
|