@keetanetwork/anchor 0.0.33 → 0.0.34
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/http-server/index.d.ts +7 -1
- package/lib/http-server/index.d.ts.map +1 -1
- package/lib/http-server/index.js +2 -0
- package/lib/http-server/index.js.map +1 -1
- package/lib/queue/common.d.ts +26 -0
- package/lib/queue/common.d.ts.map +1 -0
- package/lib/queue/common.js +47 -0
- package/lib/queue/common.js.map +1 -0
- package/lib/queue/drivers/queue_file.d.ts +17 -0
- package/lib/queue/drivers/queue_file.d.ts.map +1 -0
- package/lib/queue/drivers/queue_file.js +100 -0
- package/lib/queue/drivers/queue_file.js.map +1 -0
- package/lib/queue/drivers/queue_postgres.d.ts +28 -0
- package/lib/queue/drivers/queue_postgres.d.ts.map +1 -0
- package/lib/queue/drivers/queue_postgres.js +360 -0
- package/lib/queue/drivers/queue_postgres.js.map +1 -0
- package/lib/queue/drivers/queue_redis.d.ts +27 -0
- package/lib/queue/drivers/queue_redis.d.ts.map +1 -0
- package/lib/queue/drivers/queue_redis.js +359 -0
- package/lib/queue/drivers/queue_redis.js.map +1 -0
- package/lib/queue/drivers/queue_sqlite3.d.ts +28 -0
- package/lib/queue/drivers/queue_sqlite3.d.ts.map +1 -0
- package/lib/queue/drivers/queue_sqlite3.js +378 -0
- package/lib/queue/drivers/queue_sqlite3.js.map +1 -0
- package/lib/queue/index.d.ts +341 -0
- package/lib/queue/index.d.ts.map +1 -0
- package/lib/queue/index.js +940 -0
- package/lib/queue/index.js.map +1 -0
- package/lib/queue/internal.d.ts +20 -0
- package/lib/queue/internal.d.ts.map +1 -0
- package/lib/queue/internal.js +66 -0
- package/lib/queue/internal.js.map +1 -0
- package/lib/queue/pipeline.d.ts +152 -0
- package/lib/queue/pipeline.d.ts.map +1 -0
- package/lib/queue/pipeline.js +296 -0
- package/lib/queue/pipeline.js.map +1 -0
- package/lib/resolver.d.ts +1 -1
- package/lib/resolver.d.ts.map +1 -1
- package/lib/resolver.js.map +1 -1
- package/lib/utils/asleep.d.ts +2 -0
- package/lib/utils/asleep.d.ts.map +1 -0
- package/lib/utils/asleep.js +3 -0
- package/lib/utils/asleep.js.map +1 -0
- package/lib/utils/defer.d.ts +4 -0
- package/lib/utils/defer.d.ts.map +1 -0
- package/lib/utils/defer.js +3 -0
- package/lib/utils/defer.js.map +1 -0
- package/npm-shrinkwrap.json +2 -2
- package/package.json +1 -1
- package/services/fx/client.d.ts +1 -1
- package/services/fx/client.d.ts.map +1 -1
- package/services/fx/client.js +2 -2
- package/services/fx/client.js.map +1 -1
- package/services/fx/common.d.ts +19 -4
- package/services/fx/common.d.ts.map +1 -1
- package/services/fx/common.js +8 -5
- package/services/fx/common.js.map +1 -1
- package/services/fx/server.d.ts +105 -8
- package/services/fx/server.d.ts.map +1 -1
- package/services/fx/server.js +609 -43
- package/services/fx/server.js.map +1 -1
|
@@ -0,0 +1,940 @@
|
|
|
1
|
+
import { __addDisposableResource, __disposeResources } from "tslib";
|
|
2
|
+
import { asleep } from '../utils/asleep.js';
|
|
3
|
+
import { Errors } from './common.js';
|
|
4
|
+
import { MethodLogger, ManageStatusUpdates, ConvertStringToRequestID } from './internal.js';
|
|
5
|
+
import { AsyncDisposableStack } from '../utils/defer.js';
|
|
6
|
+
/**
|
|
7
|
+
* An in-memory implementation of the KeetaAnchorQueueStorageDriver
|
|
8
|
+
*/
|
|
9
|
+
export class KeetaAnchorQueueStorageDriverMemory {
|
|
10
|
+
queueStorage = {};
|
|
11
|
+
logger;
|
|
12
|
+
partitionCounter = 0;
|
|
13
|
+
destroyed = false;
|
|
14
|
+
name = 'KeetaAnchorQueueStorageDriverMemory';
|
|
15
|
+
id;
|
|
16
|
+
path = [];
|
|
17
|
+
constructor(options) {
|
|
18
|
+
this.id = options?.id ?? crypto.randomUUID();
|
|
19
|
+
this.logger = options?.logger;
|
|
20
|
+
this.path.push(...(options?.path ?? []));
|
|
21
|
+
Object.freeze(this.path);
|
|
22
|
+
this.methodLogger('new')?.debug('Created new in-memory queue storage driver');
|
|
23
|
+
}
|
|
24
|
+
clone(options) {
|
|
25
|
+
const cloned = new KeetaAnchorQueueStorageDriverMemory({
|
|
26
|
+
logger: this.logger,
|
|
27
|
+
id: `${this.id}::${this.partitionCounter++}`,
|
|
28
|
+
path: [...this.path],
|
|
29
|
+
...options
|
|
30
|
+
});
|
|
31
|
+
cloned.queueStorage = this.queueStorage;
|
|
32
|
+
return (cloned);
|
|
33
|
+
}
|
|
34
|
+
get queue() {
|
|
35
|
+
const pathKey = ['root', ...this.path].join('.');
|
|
36
|
+
let retval = this.queueStorage[pathKey];
|
|
37
|
+
if (retval === undefined) {
|
|
38
|
+
retval = this.queueStorage[pathKey] = [];
|
|
39
|
+
}
|
|
40
|
+
return (retval);
|
|
41
|
+
}
|
|
42
|
+
methodLogger(method) {
|
|
43
|
+
return (MethodLogger(this.logger, {
|
|
44
|
+
class: 'KeetaAnchorQueueStorageDriverMemory',
|
|
45
|
+
file: 'src/lib/queue/index.ts',
|
|
46
|
+
method: method,
|
|
47
|
+
instanceID: this.id
|
|
48
|
+
}));
|
|
49
|
+
}
|
|
50
|
+
checkDestroyed() {
|
|
51
|
+
if (this.destroyed) {
|
|
52
|
+
throw (new Error('Queue has been destroyed'));
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
async add(request, info) {
|
|
56
|
+
this.checkDestroyed();
|
|
57
|
+
const logger = this.methodLogger('add');
|
|
58
|
+
let id = ConvertStringToRequestID(info?.id);
|
|
59
|
+
if (id) {
|
|
60
|
+
const duplicateID = this.queue.some(function (checkEntry) {
|
|
61
|
+
return (checkEntry.id === id);
|
|
62
|
+
});
|
|
63
|
+
if (duplicateID) {
|
|
64
|
+
logger?.debug(`Request with id ${String(id)} already exists, ignoring`);
|
|
65
|
+
return (id);
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
const idempotentIDs = info?.idempotentKeys;
|
|
69
|
+
if (idempotentIDs) {
|
|
70
|
+
const matchingIdempotentEntries = new Set();
|
|
71
|
+
for (const idempotentID of idempotentIDs) {
|
|
72
|
+
const idempotentEntryExists = this.queue.some(function (checkEntry) {
|
|
73
|
+
return (checkEntry.idempotentKeys?.has(idempotentID) ?? false);
|
|
74
|
+
});
|
|
75
|
+
if (idempotentEntryExists) {
|
|
76
|
+
matchingIdempotentEntries.add(idempotentID);
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
if (matchingIdempotentEntries.size !== 0) {
|
|
80
|
+
throw (new Errors.IdempotentExistsError('One or more idempotent entries already exist in the queue', matchingIdempotentEntries));
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
/**
|
|
84
|
+
* The status to use for the new entry
|
|
85
|
+
*/
|
|
86
|
+
const status = info?.status ?? 'pending';
|
|
87
|
+
/*
|
|
88
|
+
* The ID is a branded string, so we must convert the generated UUID
|
|
89
|
+
*/
|
|
90
|
+
id ??= ConvertStringToRequestID(crypto.randomUUID());
|
|
91
|
+
logger?.debug(`Enqueuing request with id ${String(id)}`);
|
|
92
|
+
this.queue.push({
|
|
93
|
+
id: id,
|
|
94
|
+
request: request,
|
|
95
|
+
output: null,
|
|
96
|
+
lastError: null,
|
|
97
|
+
status: status,
|
|
98
|
+
failures: 0,
|
|
99
|
+
created: new Date(),
|
|
100
|
+
updated: new Date(),
|
|
101
|
+
worker: null,
|
|
102
|
+
idempotentKeys: idempotentIDs ? new Set(idempotentIDs) : undefined
|
|
103
|
+
});
|
|
104
|
+
return (id);
|
|
105
|
+
}
|
|
106
|
+
async setStatus(id, status, ancillary) {
|
|
107
|
+
this.checkDestroyed();
|
|
108
|
+
const logger = this.methodLogger('setStatus');
|
|
109
|
+
const entry = this.queue.find(function (checkEntry) {
|
|
110
|
+
return (checkEntry.id === id);
|
|
111
|
+
});
|
|
112
|
+
if (!entry) {
|
|
113
|
+
throw (new Error(`Request with ID ${String(id)} not found`));
|
|
114
|
+
}
|
|
115
|
+
const changedFields = ManageStatusUpdates(id, entry, status, ancillary, logger);
|
|
116
|
+
Object.assign(entry, changedFields);
|
|
117
|
+
}
|
|
118
|
+
async get(id) {
|
|
119
|
+
this.checkDestroyed();
|
|
120
|
+
const entry = this.queue.find(function (checkEntry) {
|
|
121
|
+
return (checkEntry.id === id);
|
|
122
|
+
});
|
|
123
|
+
if (!entry) {
|
|
124
|
+
return (null);
|
|
125
|
+
}
|
|
126
|
+
return (structuredClone(entry));
|
|
127
|
+
}
|
|
128
|
+
async query(filter) {
|
|
129
|
+
this.checkDestroyed();
|
|
130
|
+
const logger = this.methodLogger('query');
|
|
131
|
+
const queueDuplicate = structuredClone(this.queue);
|
|
132
|
+
logger?.debug(`Querying queue with id ${this.id} with filter:`, filter);
|
|
133
|
+
const allEntriesInStatus = (function () {
|
|
134
|
+
const filterStatus = filter?.status;
|
|
135
|
+
const filterLastUpdateBefore = filter?.updatedBefore;
|
|
136
|
+
if (filterStatus || filterLastUpdateBefore) {
|
|
137
|
+
return (queueDuplicate.filter(function (entry) {
|
|
138
|
+
if (filterStatus) {
|
|
139
|
+
if (entry.status !== filterStatus) {
|
|
140
|
+
return (false);
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
if (filterLastUpdateBefore) {
|
|
144
|
+
if (entry.updated >= filterLastUpdateBefore) {
|
|
145
|
+
return (false);
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
return (true);
|
|
149
|
+
}));
|
|
150
|
+
}
|
|
151
|
+
else {
|
|
152
|
+
return (queueDuplicate);
|
|
153
|
+
}
|
|
154
|
+
})();
|
|
155
|
+
let retval = allEntriesInStatus;
|
|
156
|
+
if (filter?.limit !== undefined) {
|
|
157
|
+
retval = allEntriesInStatus.slice(0, filter.limit);
|
|
158
|
+
}
|
|
159
|
+
logger?.debug(`Queried queue with id ${this.id} with filter:`, filter, '-- found', retval.length, 'entries');
|
|
160
|
+
return (retval);
|
|
161
|
+
}
|
|
162
|
+
async partition(path) {
|
|
163
|
+
this.checkDestroyed();
|
|
164
|
+
const logger = this.methodLogger('partition');
|
|
165
|
+
logger?.debug(`Creating partitioned queue storage driver for path "${path}"`);
|
|
166
|
+
const partitioned = this.clone({
|
|
167
|
+
path: [...this.path, path]
|
|
168
|
+
});
|
|
169
|
+
return (partitioned);
|
|
170
|
+
}
|
|
171
|
+
async destroy() {
|
|
172
|
+
this.destroyed = true;
|
|
173
|
+
this.methodLogger('destroy')?.debug('Destroying in-memory queue');
|
|
174
|
+
}
|
|
175
|
+
async [Symbol.asyncDispose]() {
|
|
176
|
+
return (await this.destroy());
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
/**
|
|
180
|
+
* A Queue Runner and Request Translator for processing entries in a queue
|
|
181
|
+
*
|
|
182
|
+
* The queue runner is responsible for pulling entries from the queue,
|
|
183
|
+
* processing them, and updating their status in the queue. As well
|
|
184
|
+
* as moving jobs between queues by piping the output of one runner
|
|
185
|
+
* to another. Additionally, maintenance tasks such as re-queuing
|
|
186
|
+
* failed jobs and marking stuck jobs are also handled by the runner.
|
|
187
|
+
*
|
|
188
|
+
* This is an abstract base class that must be extended to provide
|
|
189
|
+
* the actual processing logic as well as the encoding and decoding
|
|
190
|
+
* for requests and responses.
|
|
191
|
+
*/
|
|
192
|
+
export class KeetaAnchorQueueRunner {
|
|
193
|
+
/**
|
|
194
|
+
* The queue this runner is responsible for running
|
|
195
|
+
*/
|
|
196
|
+
queue;
|
|
197
|
+
/**
|
|
198
|
+
* The logger we should use for logging anything
|
|
199
|
+
*/
|
|
200
|
+
logger;
|
|
201
|
+
/**
|
|
202
|
+
* Worker configuration
|
|
203
|
+
*/
|
|
204
|
+
workers;
|
|
205
|
+
workerID;
|
|
206
|
+
/**
|
|
207
|
+
* Pipes to other runners we have registered
|
|
208
|
+
*/
|
|
209
|
+
pipes = [];
|
|
210
|
+
/**
|
|
211
|
+
* Initialization promise
|
|
212
|
+
*/
|
|
213
|
+
initializePromise;
|
|
214
|
+
/**
|
|
215
|
+
* Configuration for this queue
|
|
216
|
+
*/
|
|
217
|
+
maxRetries = 5;
|
|
218
|
+
processTimeout = 300_000; /* 5 minutes */
|
|
219
|
+
batchSize = 100;
|
|
220
|
+
/**
|
|
221
|
+
* How many runners can process this queue in parallel
|
|
222
|
+
*/
|
|
223
|
+
maxRunners;
|
|
224
|
+
runnerLockKey;
|
|
225
|
+
/**
|
|
226
|
+
* The ID of this runner for diagnostic purposes
|
|
227
|
+
*/
|
|
228
|
+
id;
|
|
229
|
+
constructor(config) {
|
|
230
|
+
this.queue = config.queue;
|
|
231
|
+
this.logger = config.logger;
|
|
232
|
+
this.workers = config.workers ?? {
|
|
233
|
+
count: 1,
|
|
234
|
+
id: 0
|
|
235
|
+
};
|
|
236
|
+
if (this.workers.id < 0) {
|
|
237
|
+
throw (new Error('Worker ID cannot be negative'));
|
|
238
|
+
}
|
|
239
|
+
if (this.maxRunners) {
|
|
240
|
+
if (this.workers.id > this.maxRunners - 1 || this.workers.count > this.maxRunners) {
|
|
241
|
+
throw (new Error('Worker ID other than 0 or worker count other than 1 is not supported yet'));
|
|
242
|
+
}
|
|
243
|
+
}
|
|
244
|
+
/*
|
|
245
|
+
* The worker ID is just a branded version of the worker number
|
|
246
|
+
*/
|
|
247
|
+
// eslint-disable-next-line @typescript-eslint/consistent-type-assertions
|
|
248
|
+
this.workerID = this.workers.id;
|
|
249
|
+
/*
|
|
250
|
+
* The runner lock key, a unique key used to ensure only
|
|
251
|
+
* one instance of a given runner is running at a time
|
|
252
|
+
*/
|
|
253
|
+
this.runnerLockKey = ConvertStringToRequestID(`@runner-lock:9ba756f0-7aa2-41c7-a1ea-b010dc752ae8.worker.${this.workerID}`);
|
|
254
|
+
/**
|
|
255
|
+
* Instance ID
|
|
256
|
+
*/
|
|
257
|
+
this.id = config.id ?? crypto.randomUUID();
|
|
258
|
+
this.methodLogger('new')?.debug('Created new queue runner attached to queue', this.queue.id);
|
|
259
|
+
}
|
|
260
|
+
async initialize() {
|
|
261
|
+
if (this.initializePromise) {
|
|
262
|
+
return (await this.initializePromise);
|
|
263
|
+
}
|
|
264
|
+
/* Ensure the sequential lock entry exists */
|
|
265
|
+
this.initializePromise = (async () => {
|
|
266
|
+
/*
|
|
267
|
+
* We store `null` as the request value because we
|
|
268
|
+
* don't have anything better to store -- it's not
|
|
269
|
+
* always going to be compatible with the type
|
|
270
|
+
* QueueRequest but we know that we will never actually
|
|
271
|
+
* use the value.
|
|
272
|
+
*/
|
|
273
|
+
// eslint-disable-next-line @typescript-eslint/consistent-type-assertions
|
|
274
|
+
await this.queue.add(null, {
|
|
275
|
+
id: this.runnerLockKey,
|
|
276
|
+
status: '@internal'
|
|
277
|
+
});
|
|
278
|
+
})();
|
|
279
|
+
return (await this.initializePromise);
|
|
280
|
+
}
|
|
281
|
+
methodLogger(method) {
|
|
282
|
+
return (MethodLogger(this.logger, {
|
|
283
|
+
class: 'KeetaAnchorQueueRunner',
|
|
284
|
+
file: 'src/lib/queue/index.ts',
|
|
285
|
+
method: method,
|
|
286
|
+
instanceID: this.id
|
|
287
|
+
}));
|
|
288
|
+
}
|
|
289
|
+
/** @internal */
|
|
290
|
+
_Testing(key) {
|
|
291
|
+
if (key !== 'bc81abf8-e43b-490b-b486-744fb49a5082') {
|
|
292
|
+
throw (new Error('This is a testing only method'));
|
|
293
|
+
}
|
|
294
|
+
return ({
|
|
295
|
+
setParams: (maxBatchSize, processTimeout, maxRetries, maxWorkers) => {
|
|
296
|
+
this.batchSize = maxBatchSize;
|
|
297
|
+
this.processTimeout = processTimeout;
|
|
298
|
+
this.maxRetries = maxRetries;
|
|
299
|
+
if (maxWorkers !== undefined) {
|
|
300
|
+
this.maxRunners = maxWorkers;
|
|
301
|
+
}
|
|
302
|
+
},
|
|
303
|
+
queue: () => {
|
|
304
|
+
return (this.queue);
|
|
305
|
+
},
|
|
306
|
+
markWorkerAsProcessing: async () => {
|
|
307
|
+
await this.queue.setStatus(this.runnerLockKey, 'processing', {
|
|
308
|
+
oldStatus: '@internal',
|
|
309
|
+
by: this.workerID
|
|
310
|
+
});
|
|
311
|
+
}
|
|
312
|
+
});
|
|
313
|
+
}
|
|
314
|
+
decodeEntry(entry) {
|
|
315
|
+
return ({
|
|
316
|
+
...entry,
|
|
317
|
+
request: this.decodeRequest(entry.request),
|
|
318
|
+
output: this.decodeResponse(entry.output)
|
|
319
|
+
});
|
|
320
|
+
}
|
|
321
|
+
/**
|
|
322
|
+
* Enqueue an item to be processed by the queue
|
|
323
|
+
*/
|
|
324
|
+
async add(request, info) {
|
|
325
|
+
await this.initialize();
|
|
326
|
+
const encodedRequest = this.encodeRequest(request);
|
|
327
|
+
const newID = await this.queue.add(encodedRequest, info);
|
|
328
|
+
return (newID);
|
|
329
|
+
}
|
|
330
|
+
/**
|
|
331
|
+
* Get a single entry from storage by ID
|
|
332
|
+
*/
|
|
333
|
+
async get(id) {
|
|
334
|
+
await this.initialize();
|
|
335
|
+
const entry = await this.queue.get(id);
|
|
336
|
+
if (!entry) {
|
|
337
|
+
return (null);
|
|
338
|
+
}
|
|
339
|
+
return (this.decodeEntry(entry));
|
|
340
|
+
}
|
|
341
|
+
/**
|
|
342
|
+
* Get entries from storage with an optional filter
|
|
343
|
+
*/
|
|
344
|
+
async query(filter) {
|
|
345
|
+
await this.initialize();
|
|
346
|
+
const entries = await this.queue.query(filter);
|
|
347
|
+
return (entries.map((entry) => {
|
|
348
|
+
return (this.decodeEntry(entry));
|
|
349
|
+
}));
|
|
350
|
+
}
|
|
351
|
+
/**
|
|
352
|
+
* Set the status of an entry in the queue
|
|
353
|
+
*/
|
|
354
|
+
async setStatus(id, status, ancillary) {
|
|
355
|
+
await this.initialize();
|
|
356
|
+
let encodedOutput = undefined;
|
|
357
|
+
if (ancillary?.output !== undefined) {
|
|
358
|
+
encodedOutput = this.encodeResponse(ancillary.output);
|
|
359
|
+
}
|
|
360
|
+
return (await this.queue.setStatus(id, status, {
|
|
361
|
+
...ancillary,
|
|
362
|
+
output: encodedOutput
|
|
363
|
+
}));
|
|
364
|
+
}
|
|
365
|
+
/**
|
|
366
|
+
* Checks to see if the queue is runnable
|
|
367
|
+
*/
|
|
368
|
+
async runnable() {
|
|
369
|
+
await this.initialize();
|
|
370
|
+
const pendingEntries = await this.queue.query({ status: 'pending', limit: 1 });
|
|
371
|
+
if (pendingEntries.length > 0) {
|
|
372
|
+
return (true);
|
|
373
|
+
}
|
|
374
|
+
for (const pipe of this.pipes) {
|
|
375
|
+
const pipeRunnable = await pipe.target.runnable();
|
|
376
|
+
if (pipeRunnable) {
|
|
377
|
+
return (true);
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
return (false);
|
|
381
|
+
}
|
|
382
|
+
async getRunnerLock(cleanup) {
|
|
383
|
+
const logger = this.methodLogger('getRunnerLock');
|
|
384
|
+
try {
|
|
385
|
+
logger?.debug('Acquiring sequential processing lock for worker ID', this.workerID);
|
|
386
|
+
await this.queue.setStatus(this.runnerLockKey, 'processing', {
|
|
387
|
+
oldStatus: '@internal',
|
|
388
|
+
by: this.workerID
|
|
389
|
+
});
|
|
390
|
+
logger?.debug('Acquired sequential processing lock for worker ID', this.workerID);
|
|
391
|
+
}
|
|
392
|
+
catch (error) {
|
|
393
|
+
if (Errors.IncorrectStateAssertedError.isInstance(error)) {
|
|
394
|
+
return (false);
|
|
395
|
+
}
|
|
396
|
+
throw (error);
|
|
397
|
+
}
|
|
398
|
+
cleanup.defer(async () => {
|
|
399
|
+
for (let retry = 0; retry < 10; retry++) {
|
|
400
|
+
logger?.debug(`Releasing sequential processing lock try #${retry + 1} for worker ID`, this.workerID);
|
|
401
|
+
try {
|
|
402
|
+
await this.queue.setStatus(this.runnerLockKey, '@internal', {
|
|
403
|
+
oldStatus: 'processing',
|
|
404
|
+
by: undefined
|
|
405
|
+
});
|
|
406
|
+
}
|
|
407
|
+
catch {
|
|
408
|
+
await asleep(1000);
|
|
409
|
+
continue;
|
|
410
|
+
}
|
|
411
|
+
break;
|
|
412
|
+
}
|
|
413
|
+
});
|
|
414
|
+
return (true);
|
|
415
|
+
}
|
|
416
|
+
async maintainRunnerLock() {
|
|
417
|
+
const env_1 = { stack: [], error: void 0, hasError: false };
|
|
418
|
+
try {
|
|
419
|
+
const logger = this.methodLogger('maintainRunnerLock');
|
|
420
|
+
const moment = new Date();
|
|
421
|
+
const cleanup = __addDisposableResource(env_1, new AsyncDisposableStack(), true);
|
|
422
|
+
const obtained = await this.getRunnerLock(cleanup);
|
|
423
|
+
if (obtained) {
|
|
424
|
+
return;
|
|
425
|
+
}
|
|
426
|
+
/**
|
|
427
|
+
* Check to see if the lock is stale
|
|
428
|
+
*/
|
|
429
|
+
const lockEntry = await this.queue.get(this.runnerLockKey);
|
|
430
|
+
if (!lockEntry) {
|
|
431
|
+
return;
|
|
432
|
+
}
|
|
433
|
+
const lockAge = moment.getTime() - lockEntry.updated.getTime();
|
|
434
|
+
if (lockAge > this.processTimeout * 10) {
|
|
435
|
+
logger?.warn('Processing lock is stale, taking over lock for worker ID', this.workerID);
|
|
436
|
+
await this.queue.setStatus(this.runnerLockKey, '@internal', {
|
|
437
|
+
oldStatus: 'processing',
|
|
438
|
+
by: this.workerID
|
|
439
|
+
});
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
catch (e_1) {
|
|
443
|
+
env_1.error = e_1;
|
|
444
|
+
env_1.hasError = true;
|
|
445
|
+
}
|
|
446
|
+
finally {
|
|
447
|
+
const result_1 = __disposeResources(env_1);
|
|
448
|
+
if (result_1)
|
|
449
|
+
await result_1;
|
|
450
|
+
}
|
|
451
|
+
}
|
|
452
|
+
/**
|
|
453
|
+
* Run the queue processor
|
|
454
|
+
*
|
|
455
|
+
* Processes up to `batchSize` entries from the queue and returns
|
|
456
|
+
* true if there may be more work to do, or false if the queue
|
|
457
|
+
* is empty.
|
|
458
|
+
*
|
|
459
|
+
* @param options Optional run options
|
|
460
|
+
*/
|
|
461
|
+
async run(options) {
|
|
462
|
+
const env_2 = { stack: [], error: void 0, hasError: false };
|
|
463
|
+
try {
|
|
464
|
+
const timeout = options?.timeoutMs;
|
|
465
|
+
await this.initialize();
|
|
466
|
+
const logger = this.methodLogger('run');
|
|
467
|
+
const batchSize = this.batchSize;
|
|
468
|
+
const processTimeout = this.processTimeout;
|
|
469
|
+
const cleanup = __addDisposableResource(env_2, new AsyncDisposableStack(), true);
|
|
470
|
+
let retval = true;
|
|
471
|
+
const startTime = Date.now();
|
|
472
|
+
const locked = await this.getRunnerLock(cleanup);
|
|
473
|
+
if (!locked) {
|
|
474
|
+
logger?.debug('Another worker is already processing the queue, skipping run');
|
|
475
|
+
return (true);
|
|
476
|
+
}
|
|
477
|
+
const processJobOk = Symbol('processJobOk');
|
|
478
|
+
const processJobTimeout = Symbol('processJobTimeout');
|
|
479
|
+
const processJob = async (index, entry, startingStatus, processor) => {
|
|
480
|
+
if (timeout !== undefined) {
|
|
481
|
+
const elapsed = Date.now() - startTime;
|
|
482
|
+
if (elapsed >= timeout) {
|
|
483
|
+
logger?.debug(`Timeout of ${timeout}ms reached after processing ${index} entries (${startingStatus} phase; elapsed ${elapsed}ms)`);
|
|
484
|
+
return (processJobTimeout);
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
let setEntryStatus = { status: 'failed_temporarily', output: null };
|
|
488
|
+
logger?.debug(`Processing entry request with id ${String(entry.id)}`);
|
|
489
|
+
try {
|
|
490
|
+
/*
|
|
491
|
+
* Get a lock by setting it to 'processing'
|
|
492
|
+
*/
|
|
493
|
+
await this.queue.setStatus(entry.id, 'processing', { oldStatus: startingStatus, by: this.workerID });
|
|
494
|
+
/*
|
|
495
|
+
* Process the entry with a timeout, if the timeout is reached
|
|
496
|
+
* we should mark the process as aborted because we no longer
|
|
497
|
+
* know what state the work is in and someone will need to
|
|
498
|
+
* inspect the job and determine through some other means if
|
|
499
|
+
* it is completed or failed.
|
|
500
|
+
*/
|
|
501
|
+
let timeoutTimer = null;
|
|
502
|
+
setEntryStatus = await Promise.race([
|
|
503
|
+
new Promise(function (resolve) {
|
|
504
|
+
timeoutTimer = setTimeout(function () {
|
|
505
|
+
resolve({ status: 'aborted', output: null });
|
|
506
|
+
}, processTimeout);
|
|
507
|
+
}),
|
|
508
|
+
(async () => {
|
|
509
|
+
try {
|
|
510
|
+
return (await processor(this.decodeEntry(entry)));
|
|
511
|
+
}
|
|
512
|
+
finally {
|
|
513
|
+
if (timeoutTimer) {
|
|
514
|
+
clearTimeout(timeoutTimer);
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
})()
|
|
518
|
+
]);
|
|
519
|
+
}
|
|
520
|
+
catch (error) {
|
|
521
|
+
if (Errors.IncorrectStateAssertedError.isInstance(error)) {
|
|
522
|
+
logger?.info(`Skipping request with id ${String(entry.id)} because it is no longer in the expected state "${startingStatus}"`, error);
|
|
523
|
+
return (processJobOk);
|
|
524
|
+
}
|
|
525
|
+
logger?.error(`Failed to process request with id ${String(entry.id)}, setting state to "${setEntryStatus.status}":`, error);
|
|
526
|
+
setEntryStatus.status = 'failed_temporarily';
|
|
527
|
+
setEntryStatus.error = String(error);
|
|
528
|
+
}
|
|
529
|
+
if (setEntryStatus.status === 'processing') {
|
|
530
|
+
logger?.error(`Processor for request with id ${String(entry.id)} returned invalid status "processing"`);
|
|
531
|
+
setEntryStatus.status = 'failed_temporarily';
|
|
532
|
+
setEntryStatus.error = 'Processor returned invalid status "processing"';
|
|
533
|
+
}
|
|
534
|
+
let by = this.workerID;
|
|
535
|
+
if (setEntryStatus.status === 'pending') {
|
|
536
|
+
by = undefined;
|
|
537
|
+
}
|
|
538
|
+
await this.queue.setStatus(entry.id, setEntryStatus.status, { oldStatus: 'processing', by: by, output: this.encodeResponse(setEntryStatus.output), error: setEntryStatus.error });
|
|
539
|
+
return (processJobOk);
|
|
540
|
+
};
|
|
541
|
+
/*
|
|
542
|
+
* Process pending jobs first
|
|
543
|
+
*/
|
|
544
|
+
for (let index = 0; index < batchSize; index++) {
|
|
545
|
+
const entries = await this.queue.query({ status: 'pending', limit: 1 });
|
|
546
|
+
const entry = entries[0];
|
|
547
|
+
if (entry === undefined) {
|
|
548
|
+
retval = false;
|
|
549
|
+
break;
|
|
550
|
+
}
|
|
551
|
+
const result = await processJob(index, entry, 'pending', this.processor.bind(this));
|
|
552
|
+
if (result === processJobTimeout) {
|
|
553
|
+
break;
|
|
554
|
+
}
|
|
555
|
+
}
|
|
556
|
+
/*
|
|
557
|
+
* Next process any pipes to other runners
|
|
558
|
+
*/
|
|
559
|
+
const pipes = [...this.pipes];
|
|
560
|
+
for (const pipe of pipes) {
|
|
561
|
+
let remainingTime = undefined;
|
|
562
|
+
if (timeout !== undefined) {
|
|
563
|
+
const elapsed = Date.now() - startTime;
|
|
564
|
+
remainingTime = timeout - elapsed;
|
|
565
|
+
if (remainingTime <= 0) {
|
|
566
|
+
remainingTime = -1;
|
|
567
|
+
}
|
|
568
|
+
}
|
|
569
|
+
const pipeHasMoreWork = await pipe.target.run({
|
|
570
|
+
...options,
|
|
571
|
+
timeoutMs: remainingTime
|
|
572
|
+
});
|
|
573
|
+
if (pipeHasMoreWork) {
|
|
574
|
+
retval = true;
|
|
575
|
+
}
|
|
576
|
+
}
|
|
577
|
+
/**
|
|
578
|
+
* Process stuck or aborted jobs (if possible)
|
|
579
|
+
*/
|
|
580
|
+
const conditions = [{
|
|
581
|
+
status: 'aborted',
|
|
582
|
+
processor: this.processorAborted?.bind(this)
|
|
583
|
+
}, {
|
|
584
|
+
status: 'stuck',
|
|
585
|
+
processor: this.processorStuck?.bind(this)
|
|
586
|
+
}];
|
|
587
|
+
let timeoutReached = false;
|
|
588
|
+
for (const condition of conditions) {
|
|
589
|
+
if (condition.processor === undefined) {
|
|
590
|
+
continue;
|
|
591
|
+
}
|
|
592
|
+
for (let index = 0; index < batchSize; index++) {
|
|
593
|
+
const entries = await this.queue.query({ status: condition.status, limit: 1 });
|
|
594
|
+
const entry = entries[0];
|
|
595
|
+
if (entry === undefined) {
|
|
596
|
+
break;
|
|
597
|
+
}
|
|
598
|
+
const result = await processJob(index, entry, condition.status, condition.processor);
|
|
599
|
+
if (result === processJobTimeout) {
|
|
600
|
+
timeoutReached = true;
|
|
601
|
+
break;
|
|
602
|
+
}
|
|
603
|
+
}
|
|
604
|
+
if (timeoutReached) {
|
|
605
|
+
break;
|
|
606
|
+
}
|
|
607
|
+
}
|
|
608
|
+
return (retval);
|
|
609
|
+
}
|
|
610
|
+
catch (e_2) {
|
|
611
|
+
env_2.error = e_2;
|
|
612
|
+
env_2.hasError = true;
|
|
613
|
+
}
|
|
614
|
+
finally {
|
|
615
|
+
const result_2 = __disposeResources(env_2);
|
|
616
|
+
if (result_2)
|
|
617
|
+
await result_2;
|
|
618
|
+
}
|
|
619
|
+
}
|
|
620
|
+
async markStuckRequestsAsStuck() {
|
|
621
|
+
const stuckThreshold = this.processTimeout * 10;
|
|
622
|
+
const logger = this.methodLogger('markStuckRequestsAsStuck');
|
|
623
|
+
const now = Date.now();
|
|
624
|
+
const requests = await this.queue.query({ status: 'processing', limit: 100, updatedBefore: new Date(now - stuckThreshold) });
|
|
625
|
+
for (const request of requests) {
|
|
626
|
+
try {
|
|
627
|
+
logger?.warn(`Marking request with id ${String(request.id)} as stuck`);
|
|
628
|
+
await this.queue.setStatus(request.id, 'stuck', { oldStatus: 'processing', by: this.workerID });
|
|
629
|
+
}
|
|
630
|
+
catch (error) {
|
|
631
|
+
logger?.error(`Failed to mark request with id ${String(request.id)} as stuck:`, error);
|
|
632
|
+
}
|
|
633
|
+
}
|
|
634
|
+
}
|
|
635
|
+
async requeueFailedRequests() {
|
|
636
|
+
const retryDelay = this.processTimeout * 10;
|
|
637
|
+
const maxRetries = this.maxRetries;
|
|
638
|
+
const logger = this.methodLogger('requeueFailedRequests');
|
|
639
|
+
const now = Date.now();
|
|
640
|
+
const requests = await this.queue.query({ status: 'failed_temporarily', limit: 100, updatedBefore: new Date(now - retryDelay) });
|
|
641
|
+
for (const request of requests) {
|
|
642
|
+
try {
|
|
643
|
+
if (request.failures >= maxRetries) {
|
|
644
|
+
logger?.info(`Request with id ${String(request.id)} has exceeded maximum retries, not requeuing -- moving to failed_permanently`);
|
|
645
|
+
await this.queue.setStatus(request.id, 'failed_permanently', { oldStatus: 'failed_temporarily', by: this.workerID });
|
|
646
|
+
continue;
|
|
647
|
+
}
|
|
648
|
+
logger?.debug(`Requeuing failed request with id ${String(request.id)}`);
|
|
649
|
+
await this.queue.setStatus(request.id, 'pending', { oldStatus: 'failed_temporarily', by: this.workerID });
|
|
650
|
+
}
|
|
651
|
+
catch (error) {
|
|
652
|
+
logger?.error(`Failed to requeue request with id ${String(request.id)}:`, error);
|
|
653
|
+
}
|
|
654
|
+
}
|
|
655
|
+
}
|
|
656
|
+
async moveCompletedToNextStage() {
|
|
657
|
+
const logger = this.methodLogger('moveCompletedToNextStage');
|
|
658
|
+
const pipes = [...this.pipes];
|
|
659
|
+
if (pipes.length === 0) {
|
|
660
|
+
return;
|
|
661
|
+
}
|
|
662
|
+
const allRequests = await this.queue.query({ status: 'completed', limit: 100 });
|
|
663
|
+
let requests = allRequests;
|
|
664
|
+
const RequestSentToPipes = new Map();
|
|
665
|
+
function IncrRequestSentToPipes(requestID) {
|
|
666
|
+
const sentCount = RequestSentToPipes.get(requestID) ?? 0;
|
|
667
|
+
RequestSentToPipes.set(requestID, sentCount + 1);
|
|
668
|
+
}
|
|
669
|
+
for (const pipe of pipes) {
|
|
670
|
+
logger?.debug('Processing pipe to target', pipe.target.id, pipe.isBatchPipe ? '(batch pipe)' : '(single item pipe)');
|
|
671
|
+
if (pipe.isBatchPipe) {
|
|
672
|
+
/**
|
|
673
|
+
* Keep track of all the requests we successfully
|
|
674
|
+
* sent to the target stage
|
|
675
|
+
*/
|
|
676
|
+
const allTargetSeenRequestIDs = new Set();
|
|
677
|
+
/**
|
|
678
|
+
* During each iteration of the batch processing, we keep track
|
|
679
|
+
* of the IDs we have already seen by the target and processed
|
|
680
|
+
* so we don't try to reprocess them again
|
|
681
|
+
*/
|
|
682
|
+
const iterationTargetSeenRequestIDs = new Set();
|
|
683
|
+
/**
|
|
684
|
+
* If we get a batch that cannot be added to the target pipe,
|
|
685
|
+
* we just skip over them for retrying at a later date
|
|
686
|
+
*/
|
|
687
|
+
const skipRequestIDs = new Set();
|
|
688
|
+
/**
|
|
689
|
+
* Compute a durable ID for this batch and target
|
|
690
|
+
*/
|
|
691
|
+
let batchID = ConvertStringToRequestID(crypto.randomUUID());
|
|
692
|
+
/**
|
|
693
|
+
* Keep track of sequential failures to find enough entries
|
|
694
|
+
* and stop processing if we can't find enough after a few tries
|
|
695
|
+
* in a row
|
|
696
|
+
*/
|
|
697
|
+
let sequentialFailureCount = 0;
|
|
698
|
+
for (; requests.length >= pipe.minBatchSize;
|
|
699
|
+
/*
|
|
700
|
+
* Remove any entries we have already seen during
|
|
701
|
+
* the last iteration of the loop
|
|
702
|
+
*/
|
|
703
|
+
requests = requests.filter(function (entry) {
|
|
704
|
+
return (!iterationTargetSeenRequestIDs.has(entry.id) && !skipRequestIDs.has(entry.id));
|
|
705
|
+
})) {
|
|
706
|
+
iterationTargetSeenRequestIDs.clear();
|
|
707
|
+
logger?.debug(`Preparing to move completed requests to next stage ${pipe.target.id} (min=${pipe.minBatchSize}, max=${pipe.maxBatchSize}), have ${requests.length} completed requests available`);
|
|
708
|
+
/**
|
|
709
|
+
* Compute a batch of entries to send to the next stage,
|
|
710
|
+
* constrained to the max batch size of the pipe and
|
|
711
|
+
* the entries which have non-null outputs
|
|
712
|
+
*/
|
|
713
|
+
const batchRaw = requests.map((entry) => {
|
|
714
|
+
return ({ output: this.decodeResponse(entry.output), id: entry.id });
|
|
715
|
+
}).filter(function (entry) {
|
|
716
|
+
if (entry === null) {
|
|
717
|
+
return (false);
|
|
718
|
+
}
|
|
719
|
+
return (true);
|
|
720
|
+
}).slice(0, pipe.maxBatchSize);
|
|
721
|
+
/*
|
|
722
|
+
* If we don't have enough entries to meet the minimum
|
|
723
|
+
* batch size, skip this iteration
|
|
724
|
+
*/
|
|
725
|
+
if (batchRaw.length < pipe.minBatchSize) {
|
|
726
|
+
sequentialFailureCount++;
|
|
727
|
+
if (sequentialFailureCount >= 3) {
|
|
728
|
+
logger?.debug(`Not enough completed requests to move to next stage ${pipe.target.id}, stopping batch processing`);
|
|
729
|
+
break;
|
|
730
|
+
}
|
|
731
|
+
logger?.debug(`Not moving completed requests to next stage ${pipe.target.id} because batch size ${batchRaw.length} is less than minimum size ${pipe.minBatchSize}`);
|
|
732
|
+
continue;
|
|
733
|
+
}
|
|
734
|
+
sequentialFailureCount = 0;
|
|
735
|
+
/**
|
|
736
|
+
* The IDs for the entries we are sending to the next stage
|
|
737
|
+
* target -- this may get reduced if we find there are already
|
|
738
|
+
* jobs in the next stage that have the idempotentIDs of one of
|
|
739
|
+
* these jobs
|
|
740
|
+
*/
|
|
741
|
+
const batchLocalIDs = new Set(batchRaw.map(function (entry) {
|
|
742
|
+
return (entry.id);
|
|
743
|
+
}));
|
|
744
|
+
/**
|
|
745
|
+
* The outputs for the batch we are sending to the next stage
|
|
746
|
+
*/
|
|
747
|
+
const batchOutput = batchRaw.map(function (entry) {
|
|
748
|
+
return (entry.output);
|
|
749
|
+
});
|
|
750
|
+
logger?.debug(`Moving batch of ${batchOutput.length} completed requests to next pipe`, pipe.target.id, '(input entry IDs:', Array.from(batchLocalIDs), '->', `${pipe.target.id}:${String(batchID)})`);
|
|
751
|
+
try {
|
|
752
|
+
await pipe.target.add(batchOutput, {
|
|
753
|
+
id: batchID,
|
|
754
|
+
/* Use the set of IDs as the idempotent IDs for the batch */
|
|
755
|
+
idempotentKeys: batchLocalIDs
|
|
756
|
+
});
|
|
757
|
+
batchID = ConvertStringToRequestID(crypto.randomUUID());
|
|
758
|
+
}
|
|
759
|
+
catch (error) {
|
|
760
|
+
if (Errors.IdempotentExistsError.isInstance(error) && error.idempotentIDsFound) {
|
|
761
|
+
logger?.debug('Some of the jobs have already been added to the target queue, skipping those:', error.idempotentIDsFound.values());
|
|
762
|
+
for (const requestID of error.idempotentIDsFound) {
|
|
763
|
+
iterationTargetSeenRequestIDs.add(requestID);
|
|
764
|
+
allTargetSeenRequestIDs.add(requestID);
|
|
765
|
+
}
|
|
766
|
+
}
|
|
767
|
+
else {
|
|
768
|
+
/*
|
|
769
|
+
* If we got some kind of other error adding these
|
|
770
|
+
* items to the target queue runner, just skip them
|
|
771
|
+
* and we will retry them on the next iteration
|
|
772
|
+
*/
|
|
773
|
+
logger?.error(`Failed to move completed batch to next stage ${pipe.target.id}, will try to create another batch without them:`, error);
|
|
774
|
+
for (const requestID of batchLocalIDs) {
|
|
775
|
+
skipRequestIDs.add(requestID);
|
|
776
|
+
}
|
|
777
|
+
}
|
|
778
|
+
continue;
|
|
779
|
+
}
|
|
780
|
+
for (const requestID of batchLocalIDs) {
|
|
781
|
+
iterationTargetSeenRequestIDs.add(requestID);
|
|
782
|
+
allTargetSeenRequestIDs.add(requestID);
|
|
783
|
+
}
|
|
784
|
+
}
|
|
785
|
+
/*
|
|
786
|
+
* For every request we know the target has definitely seen, mark it
|
|
787
|
+
* as moved for this pipe
|
|
788
|
+
*/
|
|
789
|
+
for (const requestID of allTargetSeenRequestIDs) {
|
|
790
|
+
IncrRequestSentToPipes(requestID);
|
|
791
|
+
}
|
|
792
|
+
}
|
|
793
|
+
else {
|
|
794
|
+
for (const request of requests) {
|
|
795
|
+
let shouldMarkAsMoved = true;
|
|
796
|
+
try {
|
|
797
|
+
const output = this.decodeResponse(request.output);
|
|
798
|
+
if (output === null) {
|
|
799
|
+
logger?.debug(`Completed request with id ${String(request.id)} has no output -- next stage will not be run`);
|
|
800
|
+
}
|
|
801
|
+
else {
|
|
802
|
+
logger?.debug(`Moving completed request with id ${String(request.id)} to next pipe`, pipe.target.id);
|
|
803
|
+
await pipe.target.add(output, { id: request.id });
|
|
804
|
+
}
|
|
805
|
+
}
|
|
806
|
+
catch (error) {
|
|
807
|
+
logger?.error(`Failed to move completed request with id ${String(request.id)} to next stage:`, error);
|
|
808
|
+
shouldMarkAsMoved = false;
|
|
809
|
+
}
|
|
810
|
+
if (shouldMarkAsMoved) {
|
|
811
|
+
IncrRequestSentToPipes(request.id);
|
|
812
|
+
}
|
|
813
|
+
}
|
|
814
|
+
}
|
|
815
|
+
}
|
|
816
|
+
const TotalPipes = pipes.length;
|
|
817
|
+
for (const request of allRequests) {
|
|
818
|
+
const sentCount = RequestSentToPipes.get(request.id) ?? 0;
|
|
819
|
+
if (sentCount !== TotalPipes) {
|
|
820
|
+
logger?.debug(`Completed request with id ${String(request.id)} was only moved to ${sentCount} out of ${TotalPipes} pipes -- not marking as moved`);
|
|
821
|
+
continue;
|
|
822
|
+
}
|
|
823
|
+
logger?.debug(`Marking completed request with id ${String(request.id)} as moved`);
|
|
824
|
+
await this.queue.setStatus(request.id, 'moved', { oldStatus: 'completed', by: this.workerID });
|
|
825
|
+
}
|
|
826
|
+
}
|
|
827
|
+
async maintain() {
|
|
828
|
+
const logger = this.methodLogger('maintain');
|
|
829
|
+
if (this.workers.id !== 0) {
|
|
830
|
+
return;
|
|
831
|
+
}
|
|
832
|
+
await this.initialize();
|
|
833
|
+
try {
|
|
834
|
+
await this.maintainRunnerLock();
|
|
835
|
+
}
|
|
836
|
+
catch (error) {
|
|
837
|
+
logger?.debug('Failed to maintain runner lock:', error);
|
|
838
|
+
}
|
|
839
|
+
try {
|
|
840
|
+
await this.markStuckRequestsAsStuck();
|
|
841
|
+
}
|
|
842
|
+
catch (error) {
|
|
843
|
+
logger?.debug('Failed to mark stuck requests as stuck:', error);
|
|
844
|
+
}
|
|
845
|
+
try {
|
|
846
|
+
await this.requeueFailedRequests();
|
|
847
|
+
}
|
|
848
|
+
catch (error) {
|
|
849
|
+
logger?.debug('Failed to requeue failed requests:', error);
|
|
850
|
+
}
|
|
851
|
+
try {
|
|
852
|
+
await this.moveCompletedToNextStage();
|
|
853
|
+
}
|
|
854
|
+
catch (error) {
|
|
855
|
+
logger?.debug('Failed to move completed requests to next stage:', error);
|
|
856
|
+
}
|
|
857
|
+
for (const pipe of this.pipes) {
|
|
858
|
+
try {
|
|
859
|
+
await pipe.target.maintain();
|
|
860
|
+
}
|
|
861
|
+
catch (error) {
|
|
862
|
+
logger?.debug(`Failed to maintain piped runner with ID ${pipe.target.id}:`, error);
|
|
863
|
+
}
|
|
864
|
+
}
|
|
865
|
+
if (this.queue.maintain) {
|
|
866
|
+
try {
|
|
867
|
+
await this.queue.maintain();
|
|
868
|
+
}
|
|
869
|
+
catch (error) {
|
|
870
|
+
logger?.debug(`Failed to maintain queue storage driver with ID ${this.queue.id}`, error);
|
|
871
|
+
}
|
|
872
|
+
}
|
|
873
|
+
}
|
|
874
|
+
/**
|
|
875
|
+
* Pipe the the completed entries of this runner to another runner
|
|
876
|
+
*/
|
|
877
|
+
pipe(target) {
|
|
878
|
+
this.pipes.push({
|
|
879
|
+
isBatchPipe: false,
|
|
880
|
+
target: target
|
|
881
|
+
});
|
|
882
|
+
return (target);
|
|
883
|
+
}
|
|
884
|
+
/**
|
|
885
|
+
* Pipe batches of completed entries from this runner to another runner
|
|
886
|
+
*/
|
|
887
|
+
pipeBatch(target, maxBatchSize = 100, minBatchSize = 1) {
|
|
888
|
+
this.pipes.push({
|
|
889
|
+
isBatchPipe: true,
|
|
890
|
+
target: target,
|
|
891
|
+
minBatchSize: minBatchSize,
|
|
892
|
+
maxBatchSize: maxBatchSize
|
|
893
|
+
});
|
|
894
|
+
return (target);
|
|
895
|
+
}
|
|
896
|
+
async destroy() {
|
|
897
|
+
this.methodLogger('destroy')?.debug('Destroying queue runner attached to queue', this.queue.id);
|
|
898
|
+
}
|
|
899
|
+
async [Symbol.asyncDispose]() {
|
|
900
|
+
await this.destroy();
|
|
901
|
+
}
|
|
902
|
+
}
|
|
903
|
+
/**
|
|
904
|
+
* A KeetaAnchorQueueRunner for use when you want to process already
|
|
905
|
+
* JSON-serializable data without any encoding/decoding needed
|
|
906
|
+
*/
|
|
907
|
+
export class KeetaAnchorQueueRunnerJSON extends KeetaAnchorQueueRunner {
|
|
908
|
+
decodeRequest(request) {
|
|
909
|
+
// eslint-disable-next-line @typescript-eslint/consistent-type-assertions
|
|
910
|
+
return request;
|
|
911
|
+
}
|
|
912
|
+
decodeResponse(response) {
|
|
913
|
+
// eslint-disable-next-line @typescript-eslint/consistent-type-assertions
|
|
914
|
+
return response;
|
|
915
|
+
}
|
|
916
|
+
encodeRequest(request) {
|
|
917
|
+
return (request);
|
|
918
|
+
}
|
|
919
|
+
encodeResponse(response) {
|
|
920
|
+
return (response);
|
|
921
|
+
}
|
|
922
|
+
}
|
|
923
|
+
/**
|
|
924
|
+
* A KeetaAnchorQueueRunnerJSON that takes a processor function
|
|
925
|
+
* in the constructor -- this is mainly useful for testing
|
|
926
|
+
*/
|
|
927
|
+
export class KeetaAnchorQueueRunnerJSONConfigProc extends KeetaAnchorQueueRunnerJSON {
|
|
928
|
+
processor;
|
|
929
|
+
constructor(config) {
|
|
930
|
+
super(config);
|
|
931
|
+
this.processor = config.processor;
|
|
932
|
+
if (config.processorStuck) {
|
|
933
|
+
this.processorStuck = config.processorStuck;
|
|
934
|
+
}
|
|
935
|
+
if (config.processorAborted) {
|
|
936
|
+
this.processorAborted = config.processorAborted;
|
|
937
|
+
}
|
|
938
|
+
}
|
|
939
|
+
}
|
|
940
|
+
//# sourceMappingURL=index.js.map
|