@dbos-inc/dbos-sdk 2.8.45-preview.g9518e3e14d → 2.9.2-preview
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dbos-config.schema.json +28 -19
- package/dist/dbos-config.schema.json +28 -19
- package/dist/schemas/system_db_schema.d.ts +0 -4
- package/dist/schemas/system_db_schema.d.ts.map +1 -1
- package/dist/src/client.js +2 -2
- package/dist/src/client.js.map +1 -1
- package/dist/src/dbos-executor.d.ts +3 -1
- package/dist/src/dbos-executor.d.ts.map +1 -1
- package/dist/src/dbos-executor.js +66 -28
- package/dist/src/dbos-executor.js.map +1 -1
- package/dist/src/dbos-runtime/config.js +2 -2
- package/dist/src/dbos-runtime/config.js.map +1 -1
- package/dist/src/dbos-runtime/workflow_management.d.ts.map +1 -1
- package/dist/src/dbos-runtime/workflow_management.js +1 -2
- package/dist/src/dbos-runtime/workflow_management.js.map +1 -1
- package/dist/src/dbos.d.ts +1 -8
- package/dist/src/dbos.d.ts.map +1 -1
- package/dist/src/dbos.js +8 -28
- package/dist/src/dbos.js.map +1 -1
- package/dist/src/error.d.ts +6 -11
- package/dist/src/error.d.ts.map +1 -1
- package/dist/src/error.js +16 -27
- package/dist/src/error.js.map +1 -1
- package/dist/src/scheduler/scheduler.js +1 -1
- package/dist/src/scheduler/scheduler.js.map +1 -1
- package/dist/src/system_database.d.ts +15 -48
- package/dist/src/system_database.d.ts.map +1 -1
- package/dist/src/system_database.js +99 -332
- package/dist/src/system_database.js.map +1 -1
- package/dist/src/workflow.d.ts.map +1 -1
- package/dist/src/workflow.js +38 -7
- package/dist/src/workflow.js.map +1 -1
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/package.json +1 -1
- package/migrations/20250415000000_triggers_wfstatus.js +0 -43
- package/migrations/20250421000000_workflowcancel.js +0 -15
- package/migrations/20250421100000_triggers_wfcancel.js +0 -35
@@ -1,4 +1,5 @@
|
|
1
1
|
"use strict";
|
2
|
+
/* eslint-disable @typescript-eslint/no-explicit-any */
|
2
3
|
var __importDefault = (this && this.__importDefault) || function (mod) {
|
3
4
|
return (mod && mod.__esModule) ? mod : { "default": mod };
|
4
5
|
};
|
@@ -39,37 +40,6 @@ async function migrateSystemDatabase(systemPoolConfig, logger) {
|
|
39
40
|
}
|
40
41
|
}
|
41
42
|
exports.migrateSystemDatabase = migrateSystemDatabase;
|
42
|
-
class NotificationMap {
|
43
|
-
map = new Map();
|
44
|
-
curCK = 0;
|
45
|
-
registerCallback(key, cb) {
|
46
|
-
if (!this.map.has(key)) {
|
47
|
-
this.map.set(key, new Map());
|
48
|
-
}
|
49
|
-
const ck = this.curCK++;
|
50
|
-
this.map.get(key).set(ck, cb);
|
51
|
-
return { key, ck };
|
52
|
-
}
|
53
|
-
deregisterCallback(k) {
|
54
|
-
if (!this.map.has(k.key))
|
55
|
-
return;
|
56
|
-
const sm = this.map.get(k.key);
|
57
|
-
if (!sm.has(k.ck))
|
58
|
-
return;
|
59
|
-
sm.delete(k.ck);
|
60
|
-
if (sm.size === 0) {
|
61
|
-
this.map.delete(k.key);
|
62
|
-
}
|
63
|
-
}
|
64
|
-
callCallbacks(key, event) {
|
65
|
-
if (!this.map.has(key))
|
66
|
-
return;
|
67
|
-
const sm = this.map.get(key);
|
68
|
-
for (const cb of sm.values()) {
|
69
|
-
cb(event);
|
70
|
-
}
|
71
|
-
}
|
72
|
-
}
|
73
43
|
class PostgresSystemDatabase {
|
74
44
|
pgPoolConfig;
|
75
45
|
systemDatabaseName;
|
@@ -78,33 +48,9 @@ class PostgresSystemDatabase {
|
|
78
48
|
pool;
|
79
49
|
systemPoolConfig;
|
80
50
|
knexDB;
|
81
|
-
/*
|
82
|
-
* Generally, notifications are asynchronous. One should:
|
83
|
-
* Subscribe to updates
|
84
|
-
* Read the database item in question
|
85
|
-
* In response to updates, re-read the database item
|
86
|
-
* Unsubscribe at the end
|
87
|
-
* The notification mechanism is reliable in the sense that it will eventually deliver updates
|
88
|
-
* or the DB connection will get dropped. The right thing to do if you lose connectivity to
|
89
|
-
* the system DB is to exit the process and go through recovery... system DB writes, notifications,
|
90
|
-
* etc may not have completed correctly, and recovery is the way to rebuild in-memory state.
|
91
|
-
*
|
92
|
-
* NOTE:
|
93
|
-
* PG Notifications are not fully reliable.
|
94
|
-
* Dropped connections are recoverable - you just need to restart and scan everything.
|
95
|
-
* (The whole VM being the logical choice, so workflows can recover from any write failures.)
|
96
|
-
* The real problem is, if the pipes out of the server are full... then notifications can be
|
97
|
-
* dropped, and only the PG server log may note it. For those reasons, we do occasional polling
|
98
|
-
*/
|
99
51
|
notificationsClient = null;
|
100
|
-
|
101
|
-
|
102
|
-
notificationsMap = new NotificationMap();
|
103
|
-
workflowEventsMap = new NotificationMap();
|
104
|
-
cancelWakeupMap = new NotificationMap();
|
105
|
-
workflowStatusMap = new NotificationMap();
|
106
|
-
runningWorkflowMap = new Map(); // Map from workflowID to workflow promise
|
107
|
-
workflowCancellationMap = new Map(); // Map from workflowID to its cancellation status.
|
52
|
+
notificationsMap = {};
|
53
|
+
workflowEventsMap = {};
|
108
54
|
constructor(pgPoolConfig, systemDatabaseName, logger, sysDbPoolSize) {
|
109
55
|
this.pgPoolConfig = pgPoolConfig;
|
110
56
|
this.systemDatabaseName = systemDatabaseName;
|
@@ -154,9 +100,7 @@ class PostgresSystemDatabase {
|
|
154
100
|
finally {
|
155
101
|
await pgSystemClient.end();
|
156
102
|
}
|
157
|
-
|
158
|
-
await this.listenForNotifications();
|
159
|
-
}
|
103
|
+
await this.listenForNotifications();
|
160
104
|
}
|
161
105
|
async destroy() {
|
162
106
|
await this.knexDB.destroy();
|
@@ -179,7 +123,7 @@ class PostgresSystemDatabase {
|
|
179
123
|
await pgSystemClient.query(`DROP DATABASE IF EXISTS ${dbosConfig.system_database};`);
|
180
124
|
await pgSystemClient.end();
|
181
125
|
}
|
182
|
-
async initWorkflowStatus(initStatus,
|
126
|
+
async initWorkflowStatus(initStatus, args) {
|
183
127
|
const result = await this.pool.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status (
|
184
128
|
workflow_uuid,
|
185
129
|
status,
|
@@ -213,8 +157,8 @@ class PostgresSystemDatabase {
|
|
213
157
|
initStatus.queueName,
|
214
158
|
initStatus.authenticatedUser,
|
215
159
|
initStatus.assumedRole,
|
216
|
-
|
217
|
-
|
160
|
+
utils_1.DBOSJSON.stringify(initStatus.authenticatedRoles),
|
161
|
+
utils_1.DBOSJSON.stringify(initStatus.request),
|
218
162
|
null,
|
219
163
|
initStatus.executorId,
|
220
164
|
initStatus.applicationVersion,
|
@@ -256,11 +200,12 @@ class PostgresSystemDatabase {
|
|
256
200
|
}
|
257
201
|
this.logger.debug(`Workflow ${initStatus.workflowUUID} attempt number: ${attempts}.`);
|
258
202
|
const status = resRow.status;
|
203
|
+
const serializedInputs = utils_1.DBOSJSON.stringify(args);
|
259
204
|
const { rows } = await this.pool.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_inputs (workflow_uuid, inputs) VALUES($1, $2) ON CONFLICT (workflow_uuid) DO UPDATE SET workflow_uuid = excluded.workflow_uuid RETURNING inputs`, [initStatus.workflowUUID, serializedInputs]);
|
260
205
|
if (serializedInputs !== rows[0].inputs) {
|
261
206
|
this.logger.warn(`Workflow inputs for ${initStatus.workflowUUID} changed since the first call! Use the original inputs.`);
|
262
207
|
}
|
263
|
-
return {
|
208
|
+
return { args: utils_1.DBOSJSON.parse(rows[0].inputs), status };
|
264
209
|
}
|
265
210
|
async recordWorkflowStatusChange(workflowID, status, update, client) {
|
266
211
|
let rec = '';
|
@@ -273,7 +218,7 @@ class PostgresSystemDatabase {
|
|
273
218
|
const wRes = await (client ?? this.pool).query(`UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status
|
274
219
|
SET ${rec} status=$2, output=$3, error=$4, updated_at=$5 WHERE workflow_uuid=$1`, [workflowID, status, update.output, update.error, Date.now()]);
|
275
220
|
if (wRes.rowCount !== 1) {
|
276
|
-
throw new error_1.
|
221
|
+
throw new error_1.DBOSWorkflowConflictUUIDError(`Attempt to record transition of nonexistent workflow ${workflowID}`);
|
277
222
|
}
|
278
223
|
}
|
279
224
|
async recordWorkflowOutput(workflowID, status) {
|
@@ -294,10 +239,9 @@ class PostgresSystemDatabase {
|
|
294
239
|
if (rows.length === 0) {
|
295
240
|
return null;
|
296
241
|
}
|
297
|
-
return rows[0].inputs;
|
242
|
+
return utils_1.DBOSJSON.parse(rows[0].inputs);
|
298
243
|
}
|
299
244
|
async getOperationResult(workflowID, functionID, client) {
|
300
|
-
await this.checkIfCanceled(workflowID);
|
301
245
|
const { rows } = await (client ?? this.pool).query(`SELECT output, error, child_workflow_id, function_name
|
302
246
|
FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs
|
303
247
|
WHERE workflow_uuid=$1 AND function_id=$2`, [workflowID, functionID]);
|
@@ -338,7 +282,7 @@ class PostgresSystemDatabase {
|
|
338
282
|
const err = error;
|
339
283
|
if (err.code === '40001' || err.code === '23505') {
|
340
284
|
// Serialization and primary key conflict (Postgres).
|
341
|
-
throw new error_1.
|
285
|
+
throw new error_1.DBOSWorkflowConflictUUIDError(workflowID);
|
342
286
|
}
|
343
287
|
else {
|
344
288
|
throw err;
|
@@ -363,30 +307,6 @@ class PostgresSystemDatabase {
|
|
363
307
|
return serialOutput;
|
364
308
|
}
|
365
309
|
async durableSleepms(workflowID, functionID, durationMS) {
|
366
|
-
let resolveNotification;
|
367
|
-
const cancelPromise = new Promise((resolve) => {
|
368
|
-
resolveNotification = resolve;
|
369
|
-
});
|
370
|
-
const cbr = this.cancelWakeupMap.registerCallback(workflowID, resolveNotification);
|
371
|
-
try {
|
372
|
-
let timeoutPromise = Promise.resolve();
|
373
|
-
const { promise, cancel: timeoutCancel } = await this.durableSleepmsInternal(workflowID, functionID, durationMS);
|
374
|
-
timeoutPromise = promise;
|
375
|
-
try {
|
376
|
-
await Promise.race([cancelPromise, timeoutPromise]);
|
377
|
-
}
|
378
|
-
finally {
|
379
|
-
timeoutCancel();
|
380
|
-
}
|
381
|
-
}
|
382
|
-
finally {
|
383
|
-
this.cancelWakeupMap.deregisterCallback(cbr);
|
384
|
-
}
|
385
|
-
await this.checkIfCanceled(workflowID);
|
386
|
-
}
|
387
|
-
async durableSleepmsInternal(workflowID, functionID, durationMS, maxSleepPerIteration) {
|
388
|
-
if (maxSleepPerIteration === undefined)
|
389
|
-
maxSleepPerIteration = durationMS;
|
390
310
|
const curTime = Date.now();
|
391
311
|
let endTimeMs = curTime + durationMS;
|
392
312
|
const res = await this.getOperationResult(workflowID, functionID);
|
@@ -399,10 +319,7 @@ class PostgresSystemDatabase {
|
|
399
319
|
else {
|
400
320
|
await this.recordOperationResult(workflowID, functionID, { serialOutput: JSON.stringify(endTimeMs), functionName: exports.DBOS_FUNCNAME_SLEEP }, false);
|
401
321
|
}
|
402
|
-
return
|
403
|
-
...(0, utils_1.cancellableSleep)(Math.max(Math.min(maxSleepPerIteration, endTimeMs - curTime), 0)),
|
404
|
-
endTime: endTimeMs,
|
405
|
-
};
|
322
|
+
return (0, utils_1.cancellableSleep)(Math.max(endTimeMs - curTime, 0));
|
406
323
|
}
|
407
324
|
nullTopic = '__null__topic__';
|
408
325
|
async send(workflowID, functionID, destinationID, message, topic) {
|
@@ -441,56 +358,37 @@ class PostgresSystemDatabase {
|
|
441
358
|
}
|
442
359
|
return res.res.res;
|
443
360
|
}
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
// register the key with the global notifications listener.
|
361
|
+
// Check if the key is already in the DB, then wait for the notification if it isn't.
|
362
|
+
const initRecvRows = (await this.pool.query(`SELECT topic FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.notifications WHERE destination_uuid=$1 AND topic=$2;`, [workflowID, topic])).rows;
|
363
|
+
if (initRecvRows.length === 0) {
|
364
|
+
// Then, register the key with the global notifications listener.
|
448
365
|
let resolveNotification;
|
449
366
|
const messagePromise = new Promise((resolve) => {
|
450
367
|
resolveNotification = resolve;
|
451
368
|
});
|
452
369
|
const payload = `${workflowID}::${topic}`;
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
});
|
370
|
+
this.notificationsMap[payload] = resolveNotification; // The resolver assignment in the Promise definition runs synchronously.
|
371
|
+
let timeoutPromise = Promise.resolve();
|
372
|
+
let timeoutCancel = () => { };
|
457
373
|
try {
|
458
|
-
await this.
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
timeoutPromise = promise;
|
471
|
-
timeoutCancel = cancel;
|
472
|
-
finishTime = endTime;
|
473
|
-
}
|
474
|
-
else {
|
475
|
-
let poll = finishTime ? finishTime - ct : this.dbPollingIntervalMs;
|
476
|
-
poll = Math.min(this.dbPollingIntervalMs, poll);
|
477
|
-
const { promise, cancel } = (0, utils_1.cancellableSleep)(poll);
|
478
|
-
timeoutPromise = promise;
|
479
|
-
timeoutCancel = cancel;
|
480
|
-
}
|
481
|
-
try {
|
482
|
-
await Promise.race([messagePromise, timeoutPromise]);
|
483
|
-
}
|
484
|
-
finally {
|
485
|
-
timeoutCancel();
|
486
|
-
}
|
374
|
+
const { promise, cancel } = await this.durableSleepms(workflowID, timeoutFunctionID, timeoutSeconds * 1000);
|
375
|
+
timeoutPromise = promise;
|
376
|
+
timeoutCancel = cancel;
|
377
|
+
}
|
378
|
+
catch (e) {
|
379
|
+
this.logger.error(e);
|
380
|
+
delete this.notificationsMap[payload];
|
381
|
+
timeoutCancel();
|
382
|
+
throw new Error('durable sleepms failed');
|
383
|
+
}
|
384
|
+
try {
|
385
|
+
await Promise.race([messagePromise, timeoutPromise]);
|
487
386
|
}
|
488
387
|
finally {
|
489
|
-
|
490
|
-
this.
|
388
|
+
timeoutCancel();
|
389
|
+
delete this.notificationsMap[payload];
|
491
390
|
}
|
492
391
|
}
|
493
|
-
await this.checkIfCanceled(workflowID);
|
494
392
|
// Transactionally consume and return the message if it's in the DB, otherwise return null.
|
495
393
|
let message = null;
|
496
394
|
const client = await this.pool.connect();
|
@@ -564,49 +462,40 @@ class PostgresSystemDatabase {
|
|
564
462
|
// Get the return the value. if it's in the DB, otherwise return null.
|
565
463
|
let value = null;
|
566
464
|
const payloadKey = `${workflowID}::${key}`;
|
567
|
-
const timeoutms = timeoutSeconds !== undefined ? timeoutSeconds * 1000 : undefined;
|
568
|
-
let finishTime = timeoutms !== undefined ? Date.now() + timeoutms : undefined;
|
569
465
|
// Register the key with the global notifications listener first... we do not want to look in the DB first
|
570
466
|
// or that would cause a timing hole.
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
584
|
-
|
585
|
-
|
586
|
-
const initRecvRows = (await this.pool.query(`
|
587
|
-
SELECT key, value
|
588
|
-
FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_events
|
589
|
-
WHERE workflow_uuid=$1 AND key=$2;`, [workflowID, key])).rows;
|
590
|
-
if (initRecvRows.length > 0) {
|
591
|
-
value = initRecvRows[0].value;
|
592
|
-
break;
|
593
|
-
}
|
594
|
-
const ct = Date.now();
|
595
|
-
if (finishTime && ct > finishTime)
|
596
|
-
break; // Time's up
|
467
|
+
let resolveNotification;
|
468
|
+
const valuePromise = new Promise((resolve) => {
|
469
|
+
resolveNotification = resolve;
|
470
|
+
});
|
471
|
+
this.workflowEventsMap[payloadKey] = resolveNotification; // The resolver assignment in the Promise definition runs synchronously.
|
472
|
+
try {
|
473
|
+
// Check if the key is already in the DB, then wait for the notification if it isn't.
|
474
|
+
const initRecvRows = (await this.pool.query(`
|
475
|
+
SELECT key, value
|
476
|
+
FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_events
|
477
|
+
WHERE workflow_uuid=$1 AND key=$2;`, [workflowID, key])).rows;
|
478
|
+
if (initRecvRows.length > 0) {
|
479
|
+
value = initRecvRows[0].value;
|
480
|
+
}
|
481
|
+
else {
|
597
482
|
// If we have a callerWorkflow, we want a durable sleep, otherwise, not
|
598
483
|
let timeoutPromise = Promise.resolve();
|
599
484
|
let timeoutCancel = () => { };
|
600
|
-
if (callerWorkflow
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
485
|
+
if (callerWorkflow) {
|
486
|
+
try {
|
487
|
+
const { promise, cancel } = await this.durableSleepms(callerWorkflow.workflowID, callerWorkflow.timeoutFunctionID ?? -1, timeoutSeconds * 1000);
|
488
|
+
timeoutPromise = promise;
|
489
|
+
timeoutCancel = cancel;
|
490
|
+
}
|
491
|
+
catch (e) {
|
492
|
+
this.logger.error(e);
|
493
|
+
delete this.workflowEventsMap[payloadKey];
|
494
|
+
throw new Error('durable sleepms failed');
|
495
|
+
}
|
605
496
|
}
|
606
497
|
else {
|
607
|
-
|
608
|
-
poll = Math.min(this.dbPollingIntervalMs, poll);
|
609
|
-
const { promise, cancel } = (0, utils_1.cancellableSleep)(poll);
|
498
|
+
const { promise, cancel } = (0, utils_1.cancellableSleep)(timeoutSeconds * 1000);
|
610
499
|
timeoutPromise = promise;
|
611
500
|
timeoutCancel = cancel;
|
612
501
|
}
|
@@ -616,12 +505,17 @@ class PostgresSystemDatabase {
|
|
616
505
|
finally {
|
617
506
|
timeoutCancel();
|
618
507
|
}
|
508
|
+
const finalRecvRows = (await this.pool.query(`
|
509
|
+
SELECT value
|
510
|
+
FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_events
|
511
|
+
WHERE workflow_uuid=$1 AND key=$2;`, [workflowID, key])).rows;
|
512
|
+
if (finalRecvRows.length > 0) {
|
513
|
+
value = finalRecvRows[0].value;
|
514
|
+
}
|
619
515
|
}
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
this.cancelWakeupMap.deregisterCallback(crh);
|
624
|
-
}
|
516
|
+
}
|
517
|
+
finally {
|
518
|
+
delete this.workflowEventsMap[payloadKey];
|
625
519
|
}
|
626
520
|
// Record the output if it is inside a workflow.
|
627
521
|
if (callerWorkflow) {
|
@@ -635,23 +529,10 @@ class PostgresSystemDatabase {
|
|
635
529
|
async setWorkflowStatus(workflowID, status, resetRecoveryAttempts) {
|
636
530
|
await this.recordWorkflowStatusChange(workflowID, status, { resetRecoveryAttempts });
|
637
531
|
}
|
638
|
-
setWFCancelMap(workflowID) {
|
639
|
-
if (this.runningWorkflowMap.has(workflowID)) {
|
640
|
-
this.workflowCancellationMap.set(workflowID, true);
|
641
|
-
}
|
642
|
-
this.cancelWakeupMap.callCallbacks(workflowID);
|
643
|
-
}
|
644
|
-
clearWFCancelMap(workflowID) {
|
645
|
-
if (this.workflowCancellationMap.has(workflowID)) {
|
646
|
-
this.workflowCancellationMap.delete(workflowID);
|
647
|
-
}
|
648
|
-
}
|
649
532
|
async cancelWorkflow(workflowID) {
|
650
533
|
const client = await this.pool.connect();
|
651
534
|
try {
|
652
535
|
await client.query('BEGIN');
|
653
|
-
await client.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_cancel(workflow_id)
|
654
|
-
VALUES ($1)`, [workflowID]);
|
655
536
|
// Remove workflow from queues table
|
656
537
|
await client.query(`DELETE FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue
|
657
538
|
WHERE workflow_uuid = $1`, [workflowID]);
|
@@ -660,20 +541,12 @@ class PostgresSystemDatabase {
|
|
660
541
|
await client.query('COMMIT');
|
661
542
|
}
|
662
543
|
catch (error) {
|
663
|
-
console.log(error);
|
664
544
|
await client.query('ROLLBACK');
|
665
545
|
throw error;
|
666
546
|
}
|
667
547
|
finally {
|
668
548
|
client.release();
|
669
549
|
}
|
670
|
-
this.setWFCancelMap(workflowID);
|
671
|
-
}
|
672
|
-
async checkIfCanceled(workflowID) {
|
673
|
-
if (this.workflowCancellationMap.get(workflowID) === true) {
|
674
|
-
throw new error_1.DBOSWorkflowCancelledError(workflowID);
|
675
|
-
}
|
676
|
-
return Promise.resolve();
|
677
550
|
}
|
678
551
|
async resumeWorkflow(workflowID) {
|
679
552
|
const client = await this.pool.connect();
|
@@ -691,8 +564,6 @@ class PostgresSystemDatabase {
|
|
691
564
|
// Remove the workflow from the queues table so resume can safely be called on an ENQUEUED workflow
|
692
565
|
await client.query(`DELETE FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue
|
693
566
|
WHERE workflow_uuid = $1`, [workflowID]);
|
694
|
-
await client.query(`DELETE FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_cancel
|
695
|
-
WHERE workflow_id = $1`, [workflowID]);
|
696
567
|
// Update status to pending and reset recovery attempts
|
697
568
|
await this.recordWorkflowStatusChange(workflowID, workflow_1.StatusString.PENDING, { resetRecoveryAttempts: true }, client);
|
698
569
|
await client.query('COMMIT');
|
@@ -704,38 +575,6 @@ class PostgresSystemDatabase {
|
|
704
575
|
finally {
|
705
576
|
client.release();
|
706
577
|
}
|
707
|
-
this.clearWFCancelMap(workflowID);
|
708
|
-
}
|
709
|
-
registerRunningWorkflow(workflowID, workflowPromise) {
|
710
|
-
// Need to await for the workflow and capture errors.
|
711
|
-
const awaitWorkflowPromise = workflowPromise
|
712
|
-
.catch((error) => {
|
713
|
-
this.logger.debug('Captured error in awaitWorkflowPromise: ' + error);
|
714
|
-
})
|
715
|
-
.finally(() => {
|
716
|
-
// Remove itself from pending workflow map.
|
717
|
-
this.runningWorkflowMap.delete(workflowID);
|
718
|
-
this.workflowCancellationMap.delete(workflowID);
|
719
|
-
});
|
720
|
-
this.runningWorkflowMap.set(workflowID, awaitWorkflowPromise);
|
721
|
-
}
|
722
|
-
async awaitRunningWorkflows() {
|
723
|
-
if (this.runningWorkflowMap.size > 0) {
|
724
|
-
this.logger.info('Waiting for pending workflows to finish.');
|
725
|
-
await Promise.allSettled(this.runningWorkflowMap.values());
|
726
|
-
}
|
727
|
-
if (this.workflowEventsMap.map.size > 0) {
|
728
|
-
this.logger.warn('Workflow events map is not empty - shutdown is not clean.');
|
729
|
-
//throw new Error('Workflow events map is not empty - shutdown is not clean.');
|
730
|
-
}
|
731
|
-
if (this.notificationsMap.map.size > 0) {
|
732
|
-
this.logger.warn('Message notification map is not empty - shutdown is not clean.');
|
733
|
-
//throw new Error('Message notification map is not empty - shutdown is not clean.');
|
734
|
-
}
|
735
|
-
if (this.workflowStatusMap.map.size > 0) {
|
736
|
-
this.logger.warn('Workflow status map is not empty - shutdown is not clean.');
|
737
|
-
//throw new Error('Workflow status map is not empty - shutdown is not clean.');
|
738
|
-
}
|
739
578
|
}
|
740
579
|
async getWorkflowStatus(workflowID, callerID, callerFN) {
|
741
580
|
const internalStatus = await this.getWorkflowStatusInternal(workflowID, callerID, callerFN);
|
@@ -772,8 +611,8 @@ class PostgresSystemDatabase {
|
|
772
611
|
queueName: rows[0].queue_name || undefined,
|
773
612
|
authenticatedUser: rows[0].authenticated_user,
|
774
613
|
assumedRole: rows[0].assumed_role,
|
775
|
-
authenticatedRoles:
|
776
|
-
request:
|
614
|
+
authenticatedRoles: utils_1.DBOSJSON.parse(rows[0].authenticated_roles),
|
615
|
+
request: utils_1.DBOSJSON.parse(rows[0].request),
|
777
616
|
executorId: rows[0].executor_id,
|
778
617
|
createdAt: Number(rows[0].created_at),
|
779
618
|
updatedAt: Number(rows[0].updated_at),
|
@@ -787,79 +626,34 @@ class PostgresSystemDatabase {
|
|
787
626
|
}, exports.DBOS_FUNCNAME_GETSTATUS, callerID, callerFN);
|
788
627
|
return sv ? JSON.parse(sv) : null;
|
789
628
|
}
|
790
|
-
async awaitWorkflowResult(workflowID,
|
791
|
-
const
|
792
|
-
|
629
|
+
async awaitWorkflowResult(workflowID, timeoutms) {
|
630
|
+
const pollingIntervalMs = 1000;
|
631
|
+
const et = timeoutms !== undefined ? new Date().getTime() + timeoutms : undefined;
|
793
632
|
while (true) {
|
794
|
-
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
resolveNotification();
|
800
|
-
});
|
801
|
-
const crh = callerID
|
802
|
-
? this.cancelWakeupMap.registerCallback(callerID, (_res) => {
|
803
|
-
resolveNotification();
|
804
|
-
})
|
805
|
-
: undefined;
|
806
|
-
try {
|
807
|
-
if (callerID)
|
808
|
-
await this.checkIfCanceled(callerID);
|
809
|
-
try {
|
810
|
-
const { rows } = await this.pool.query(`SELECT status, output, error FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status WHERE workflow_uuid=$1`, [workflowID]);
|
811
|
-
if (rows.length > 0) {
|
812
|
-
const status = rows[0].status;
|
813
|
-
if (status === workflow_1.StatusString.SUCCESS) {
|
814
|
-
return { res: rows[0].output };
|
815
|
-
}
|
816
|
-
else if (status === workflow_1.StatusString.ERROR) {
|
817
|
-
return { err: rows[0].error };
|
818
|
-
}
|
819
|
-
else if (status === workflow_1.StatusString.CANCELLED) {
|
820
|
-
return { cancelled: true };
|
821
|
-
}
|
822
|
-
else {
|
823
|
-
// Status is not actionable
|
824
|
-
}
|
825
|
-
}
|
633
|
+
const { rows } = await this.pool.query(`SELECT status, output, error FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status WHERE workflow_uuid=$1`, [workflowID]);
|
634
|
+
if (rows.length > 0) {
|
635
|
+
const status = rows[0].status;
|
636
|
+
if (status === workflow_1.StatusString.SUCCESS) {
|
637
|
+
return { res: rows[0].output };
|
826
638
|
}
|
827
|
-
|
828
|
-
|
829
|
-
this.logger.error(`Exception from system database: ${err}`);
|
830
|
-
throw err;
|
639
|
+
else if (status === workflow_1.StatusString.ERROR) {
|
640
|
+
return { err: rows[0].error };
|
831
641
|
}
|
832
|
-
|
833
|
-
|
834
|
-
|
835
|
-
|
836
|
-
|
837
|
-
if (timerFuncID !== undefined && callerID !== undefined && timeoutms !== undefined) {
|
838
|
-
const { promise, cancel, endTime } = await this.durableSleepmsInternal(callerID, timerFuncID, timeoutms, this.dbPollingIntervalMs);
|
839
|
-
finishTime = endTime;
|
840
|
-
timeoutPromise = promise;
|
841
|
-
timeoutCancel = cancel;
|
642
|
+
}
|
643
|
+
if (et !== undefined) {
|
644
|
+
const ct = new Date().getTime();
|
645
|
+
if (et > ct) {
|
646
|
+
await (0, utils_1.sleepms)(Math.min(pollingIntervalMs, et - ct));
|
842
647
|
}
|
843
648
|
else {
|
844
|
-
|
845
|
-
poll = Math.min(this.dbPollingIntervalMs, poll);
|
846
|
-
const { promise, cancel } = (0, utils_1.cancellableSleep)(poll);
|
847
|
-
timeoutPromise = promise;
|
848
|
-
timeoutCancel = cancel;
|
849
|
-
}
|
850
|
-
try {
|
851
|
-
await Promise.race([statusPromise, timeoutPromise]);
|
852
|
-
}
|
853
|
-
finally {
|
854
|
-
timeoutCancel();
|
649
|
+
break;
|
855
650
|
}
|
856
651
|
}
|
857
|
-
|
858
|
-
|
859
|
-
if (crh)
|
860
|
-
this.cancelWakeupMap.deregisterCallback(crh);
|
652
|
+
else {
|
653
|
+
await (0, utils_1.sleepms)(pollingIntervalMs);
|
861
654
|
}
|
862
655
|
}
|
656
|
+
return undefined;
|
863
657
|
}
|
864
658
|
/* BACKGROUND PROCESSES */
|
865
659
|
/**
|
@@ -870,42 +664,15 @@ class PostgresSystemDatabase {
|
|
870
664
|
this.notificationsClient = await this.pool.connect();
|
871
665
|
await this.notificationsClient.query('LISTEN dbos_notifications_channel;');
|
872
666
|
await this.notificationsClient.query('LISTEN dbos_workflow_events_channel;');
|
873
|
-
await this.notificationsClient.query('LISTEN dbos_workflow_status_channel;');
|
874
|
-
await this.notificationsClient.query('LISTEN dbos_workflow_cancel_channel;');
|
875
667
|
const handler = (msg) => {
|
876
|
-
if (!this.shouldUseDBNotifications)
|
877
|
-
return; // Testing parameter
|
878
668
|
if (msg.channel === 'dbos_notifications_channel') {
|
879
|
-
if (msg.payload) {
|
880
|
-
this.notificationsMap
|
881
|
-
}
|
882
|
-
}
|
883
|
-
else if (msg.channel === 'dbos_workflow_events_channel') {
|
884
|
-
if (msg.payload) {
|
885
|
-
this.workflowEventsMap.callCallbacks(msg.payload);
|
669
|
+
if (msg.payload && msg.payload in this.notificationsMap) {
|
670
|
+
this.notificationsMap[msg.payload]();
|
886
671
|
}
|
887
672
|
}
|
888
|
-
else
|
889
|
-
if (msg.payload) {
|
890
|
-
|
891
|
-
this.workflowStatusMap.callCallbacks(notif.wfid, notif);
|
892
|
-
if (notif.status === workflow_1.StatusString.CANCELLED) {
|
893
|
-
this.setWFCancelMap(notif.wfid);
|
894
|
-
}
|
895
|
-
else {
|
896
|
-
this.clearWFCancelMap(notif.wfid);
|
897
|
-
}
|
898
|
-
}
|
899
|
-
}
|
900
|
-
else if (msg.channel === 'dbos_workflow_cancel_channel') {
|
901
|
-
if (msg.payload) {
|
902
|
-
const notif = JSON.parse(msg.payload);
|
903
|
-
if (notif.cancelled === 't') {
|
904
|
-
this.setWFCancelMap(notif.wfid);
|
905
|
-
}
|
906
|
-
else {
|
907
|
-
this.clearWFCancelMap(notif.wfid);
|
908
|
-
}
|
673
|
+
else {
|
674
|
+
if (msg.payload && msg.payload in this.workflowEventsMap) {
|
675
|
+
this.workflowEventsMap[msg.payload]();
|
909
676
|
}
|
910
677
|
}
|
911
678
|
};
|
@@ -1102,7 +869,7 @@ class PostgresSystemDatabase {
|
|
1102
869
|
}
|
1103
870
|
async dequeueWorkflow(workflowId, queue) {
|
1104
871
|
if (queue.rateLimit) {
|
1105
|
-
const time = Date.
|
872
|
+
const time = new Date().getTime();
|
1106
873
|
await this.pool.query(`
|
1107
874
|
UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue
|
1108
875
|
SET completed_at_epoch_ms = $2
|
@@ -1117,7 +884,7 @@ class PostgresSystemDatabase {
|
|
1117
884
|
}
|
1118
885
|
}
|
1119
886
|
async findAndMarkStartableWorkflows(queue, executorID, appVersion) {
|
1120
|
-
const startTimeMs = Date.
|
887
|
+
const startTimeMs = new Date().getTime();
|
1121
888
|
const limiterPeriodMS = queue.rateLimit ? queue.rateLimit.periodSec * 1000 : 0;
|
1122
889
|
const claimedIDs = [];
|
1123
890
|
await this.knexDB.transaction(async (trx) => {
|