@dbos-inc/dbos-sdk 4.10.7-preview → 4.10.9-preview
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/schemas/system_db_schema.d.ts +1 -0
- package/dist/schemas/system_db_schema.d.ts.map +1 -1
- package/dist/schemas/system_db_schema.js.map +1 -1
- package/dist/src/client.d.ts +1 -1
- package/dist/src/client.d.ts.map +1 -1
- package/dist/src/client.js +3 -26
- package/dist/src/client.js.map +1 -1
- package/dist/src/dbos-executor.d.ts +2 -3
- package/dist/src/dbos-executor.d.ts.map +1 -1
- package/dist/src/dbos-executor.js +2 -16
- package/dist/src/dbos-executor.js.map +1 -1
- package/dist/src/dbos.d.ts.map +1 -1
- package/dist/src/dbos.js +6 -10
- package/dist/src/dbos.js.map +1 -1
- package/dist/src/sysdb_migrations/internal/migrations.d.ts.map +1 -1
- package/dist/src/sysdb_migrations/internal/migrations.js +6 -0
- package/dist/src/sysdb_migrations/internal/migrations.js.map +1 -1
- package/dist/src/system_database.d.ts +49 -135
- package/dist/src/system_database.d.ts.map +1 -1
- package/dist/src/system_database.js +1169 -1116
- package/dist/src/system_database.js.map +1 -1
- package/dist/src/workflow.d.ts +1 -1
- package/dist/src/workflow.d.ts.map +1 -1
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/package.json +1 -1
|
@@ -9,7 +9,7 @@ var __metadata = (this && this.__metadata) || function (k, v) {
|
|
|
9
9
|
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
|
|
10
10
|
};
|
|
11
11
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
12
|
-
exports.
|
|
12
|
+
exports.SystemDatabase = exports.ensureSystemDatabase = exports.grantDbosSchemaPermissions = exports.DBOS_STREAM_CLOSED_SENTINEL = exports.DEFAULT_POOL_SIZE = exports.DBOS_FUNCNAME_CLOSESTREAM = exports.DBOS_FUNCNAME_WRITESTREAM = exports.DBOS_FUNCNAME_GETSTATUS = exports.DBOS_FUNCNAME_SLEEP = exports.DBOS_FUNCNAME_GETEVENT = exports.DBOS_FUNCNAME_SETEVENT = exports.DBOS_FUNCNAME_RECV = exports.DBOS_FUNCNAME_SEND = void 0;
|
|
13
13
|
const dbos_executor_1 = require("./dbos-executor");
|
|
14
14
|
const pg_1 = require("pg");
|
|
15
15
|
const error_1 = require("./error");
|
|
@@ -146,180 +146,6 @@ class NotificationMap {
|
|
|
146
146
|
}
|
|
147
147
|
}
|
|
148
148
|
}
|
|
149
|
-
async function insertWorkflowStatus(client, initStatus, schemaName, ownerXid, incrementAttempts = false) {
|
|
150
|
-
try {
|
|
151
|
-
const { rows } = await client.query(`INSERT INTO "${schemaName}".workflow_status (
|
|
152
|
-
workflow_uuid,
|
|
153
|
-
status,
|
|
154
|
-
name,
|
|
155
|
-
class_name,
|
|
156
|
-
config_name,
|
|
157
|
-
queue_name,
|
|
158
|
-
authenticated_user,
|
|
159
|
-
assumed_role,
|
|
160
|
-
authenticated_roles,
|
|
161
|
-
request,
|
|
162
|
-
executor_id,
|
|
163
|
-
application_version,
|
|
164
|
-
application_id,
|
|
165
|
-
created_at,
|
|
166
|
-
recovery_attempts,
|
|
167
|
-
updated_at,
|
|
168
|
-
workflow_timeout_ms,
|
|
169
|
-
workflow_deadline_epoch_ms,
|
|
170
|
-
inputs,
|
|
171
|
-
deduplication_id,
|
|
172
|
-
priority,
|
|
173
|
-
queue_partition_key,
|
|
174
|
-
forked_from,
|
|
175
|
-
parent_workflow_id,
|
|
176
|
-
serialization,
|
|
177
|
-
owner_xid
|
|
178
|
-
) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $26, $27)
|
|
179
|
-
ON CONFLICT (workflow_uuid)
|
|
180
|
-
DO UPDATE SET
|
|
181
|
-
recovery_attempts = CASE
|
|
182
|
-
WHEN workflow_status.status != '${workflow_1.StatusString.ENQUEUED}'
|
|
183
|
-
THEN workflow_status.recovery_attempts + $25
|
|
184
|
-
ELSE workflow_status.recovery_attempts
|
|
185
|
-
END,
|
|
186
|
-
updated_at = EXCLUDED.updated_at,
|
|
187
|
-
executor_id = CASE
|
|
188
|
-
WHEN EXCLUDED.status != '${workflow_1.StatusString.ENQUEUED}'
|
|
189
|
-
THEN EXCLUDED.executor_id
|
|
190
|
-
ELSE workflow_status.executor_id
|
|
191
|
-
END
|
|
192
|
-
RETURNING recovery_attempts, status, name, class_name, config_name, queue_name, workflow_deadline_epoch_ms, executor_id, owner_xid, serialization`, [
|
|
193
|
-
initStatus.workflowUUID,
|
|
194
|
-
initStatus.status,
|
|
195
|
-
initStatus.workflowName,
|
|
196
|
-
// For cross-language compatibility, these variables MUST be NULL in the database when not set
|
|
197
|
-
initStatus.workflowClassName === '' ? null : initStatus.workflowClassName,
|
|
198
|
-
initStatus.workflowConfigName === '' ? null : initStatus.workflowConfigName,
|
|
199
|
-
initStatus.queueName ?? null,
|
|
200
|
-
initStatus.authenticatedUser,
|
|
201
|
-
initStatus.assumedRole,
|
|
202
|
-
JSON.stringify(initStatus.authenticatedRoles),
|
|
203
|
-
JSON.stringify(initStatus.request),
|
|
204
|
-
initStatus.executorId,
|
|
205
|
-
initStatus.applicationVersion ?? null,
|
|
206
|
-
initStatus.applicationID,
|
|
207
|
-
initStatus.createdAt,
|
|
208
|
-
initStatus.status === workflow_1.StatusString.ENQUEUED ? 0 : 1,
|
|
209
|
-
initStatus.updatedAt ?? Date.now(),
|
|
210
|
-
initStatus.timeoutMS ?? null,
|
|
211
|
-
initStatus.deadlineEpochMS ?? null,
|
|
212
|
-
initStatus.input ?? null,
|
|
213
|
-
initStatus.deduplicationID ?? null,
|
|
214
|
-
initStatus.priority,
|
|
215
|
-
initStatus.queuePartitionKey ?? null,
|
|
216
|
-
initStatus.forkedFrom ?? null,
|
|
217
|
-
initStatus.parentWorkflowID ?? null,
|
|
218
|
-
(incrementAttempts ?? false) ? 1 : 0,
|
|
219
|
-
initStatus.serialization,
|
|
220
|
-
ownerXid,
|
|
221
|
-
]);
|
|
222
|
-
if (rows.length === 0) {
|
|
223
|
-
throw new Error(`Attempt to insert workflow ${initStatus.workflowUUID} failed`);
|
|
224
|
-
}
|
|
225
|
-
const ret = rows[0];
|
|
226
|
-
ret.class_name = ret.class_name ?? '';
|
|
227
|
-
ret.config_name = ret.config_name ?? '';
|
|
228
|
-
initStatus.serialization = ret.serialization;
|
|
229
|
-
return ret;
|
|
230
|
-
}
|
|
231
|
-
catch (error) {
|
|
232
|
-
const err = error;
|
|
233
|
-
if (err.code === '23505') {
|
|
234
|
-
throw new error_1.DBOSQueueDuplicatedError(initStatus.workflowUUID, initStatus.queueName ?? '', initStatus.deduplicationID ?? '');
|
|
235
|
-
}
|
|
236
|
-
throw error;
|
|
237
|
-
}
|
|
238
|
-
}
|
|
239
|
-
async function getWorkflowStatusValue(client, workflowID, schemaName) {
|
|
240
|
-
const { rows } = await client.query(`SELECT status FROM "${schemaName}".workflow_status WHERE workflow_uuid=$1`, [workflowID]);
|
|
241
|
-
return rows.length === 0 ? undefined : rows[0].status;
|
|
242
|
-
}
|
|
243
|
-
async function updateWorkflowStatus(client, workflowID, status, schemaName, options = {}) {
|
|
244
|
-
let setClause = `SET status=$2, updated_at=$3`;
|
|
245
|
-
let whereClause = `WHERE workflow_uuid=$1`;
|
|
246
|
-
const args = [workflowID, status, Date.now()];
|
|
247
|
-
const update = options.update ?? {};
|
|
248
|
-
if (update.output) {
|
|
249
|
-
const param = args.push(update.output);
|
|
250
|
-
setClause += `, output=$${param}`;
|
|
251
|
-
}
|
|
252
|
-
if (update.error) {
|
|
253
|
-
const param = args.push(update.error);
|
|
254
|
-
setClause += `, error=$${param}`;
|
|
255
|
-
}
|
|
256
|
-
if (update.resetRecoveryAttempts) {
|
|
257
|
-
setClause += `, recovery_attempts = 0`;
|
|
258
|
-
}
|
|
259
|
-
if (update.resetDeadline) {
|
|
260
|
-
setClause += `, workflow_deadline_epoch_ms = NULL`;
|
|
261
|
-
}
|
|
262
|
-
if (update.queueName !== undefined) {
|
|
263
|
-
const param = args.push(update.queueName ?? undefined);
|
|
264
|
-
setClause += `, queue_name=$${param}`;
|
|
265
|
-
}
|
|
266
|
-
if (update.resetDeduplicationID) {
|
|
267
|
-
setClause += `, deduplication_id = NULL`;
|
|
268
|
-
}
|
|
269
|
-
if (update.resetStartedAtEpochMs) {
|
|
270
|
-
setClause += `, started_at_epoch_ms = NULL`;
|
|
271
|
-
}
|
|
272
|
-
if (update.executorId !== undefined) {
|
|
273
|
-
const param = args.push(update.executorId ?? undefined);
|
|
274
|
-
setClause += `, executor_id=$${param}`;
|
|
275
|
-
}
|
|
276
|
-
if (update.resetNameTo !== undefined) {
|
|
277
|
-
const param = args.push(update.resetNameTo ?? undefined);
|
|
278
|
-
setClause += `, name=$${param}`;
|
|
279
|
-
}
|
|
280
|
-
const where = options.where ?? {};
|
|
281
|
-
if (where.status) {
|
|
282
|
-
const param = args.push(where.status);
|
|
283
|
-
whereClause += ` AND status=$${param}`;
|
|
284
|
-
}
|
|
285
|
-
const result = await client.query(`UPDATE "${schemaName}".workflow_status ${setClause} ${whereClause}`, args);
|
|
286
|
-
const throwOnFailure = options.throwOnFailure ?? true;
|
|
287
|
-
if (throwOnFailure && result.rowCount !== 1) {
|
|
288
|
-
throw new error_1.DBOSWorkflowConflictError(`Attempt to record transition of nonexistent workflow ${workflowID}`);
|
|
289
|
-
}
|
|
290
|
-
}
|
|
291
|
-
async function recordOperationResult(client, workflowID, functionID, functionName, checkConflict, schemaName, startTimeEpochMs, endTimeEpochMs, options = {}) {
|
|
292
|
-
try {
|
|
293
|
-
const out = await client.query(`INSERT INTO ${schemaName}.operation_outputs
|
|
294
|
-
(workflow_uuid, function_id, output, error, function_name, child_workflow_id, started_at_epoch_ms, completed_at_epoch_ms, serialization)
|
|
295
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
|
296
|
-
ON CONFLICT DO NOTHING RETURNING completed_at_epoch_ms;`, [
|
|
297
|
-
workflowID,
|
|
298
|
-
functionID,
|
|
299
|
-
options.output ?? null,
|
|
300
|
-
options.error ?? null,
|
|
301
|
-
functionName,
|
|
302
|
-
options.childWorkflowID ?? null,
|
|
303
|
-
startTimeEpochMs,
|
|
304
|
-
endTimeEpochMs,
|
|
305
|
-
options.serialization ?? null,
|
|
306
|
-
]);
|
|
307
|
-
if (checkConflict && (out?.rowCount ?? 0) > 0 && Number(out?.rows?.[0]?.completed_at_epoch_ms) !== endTimeEpochMs) {
|
|
308
|
-
dbos_executor_1.DBOSExecutor.globalInstance?.logger.warn(`Step output for ${workflowID}(${functionID}):${functionName} already recorded`);
|
|
309
|
-
throw new error_1.DBOSWorkflowConflictError(workflowID);
|
|
310
|
-
}
|
|
311
|
-
}
|
|
312
|
-
catch (error) {
|
|
313
|
-
const err = error;
|
|
314
|
-
if (err.code === '40001' || err.code === '23505') {
|
|
315
|
-
// Serialization and primary key conflict (Postgres).
|
|
316
|
-
throw new error_1.DBOSWorkflowConflictError(workflowID);
|
|
317
|
-
}
|
|
318
|
-
else {
|
|
319
|
-
throw err;
|
|
320
|
-
}
|
|
321
|
-
}
|
|
322
|
-
}
|
|
323
149
|
function mapWorkflowStatus(row) {
|
|
324
150
|
return {
|
|
325
151
|
workflowUUID: row.workflow_uuid,
|
|
@@ -489,10 +315,22 @@ function dbRetry(options = {}) {
|
|
|
489
315
|
return descriptor;
|
|
490
316
|
};
|
|
491
317
|
}
|
|
492
|
-
|
|
318
|
+
/**
|
|
319
|
+
* General notes:
|
|
320
|
+
* The responsibilities of the `SystemDatabase` are to store data for workflows, and
|
|
321
|
+
* associated steps, transactions, messages, and events. The system DB is
|
|
322
|
+
* also the IPC mechanism that performs notifications when things change, for
|
|
323
|
+
* example a receive is unblocked when a send occurs, or a cancel interrupts
|
|
324
|
+
* the receive.
|
|
325
|
+
* The `SystemDatabase` expects values in inputs/outputs/errors to be JSON. However,
|
|
326
|
+
* the serialization process of turning data into JSON or converting it back, should
|
|
327
|
+
* be done elsewhere (executor), as it may require application-specific logic or extensions.
|
|
328
|
+
*/
|
|
329
|
+
class SystemDatabase {
|
|
493
330
|
systemDatabaseUrl;
|
|
494
331
|
logger;
|
|
495
332
|
serializer;
|
|
333
|
+
// ==================== Lifecycle ====================
|
|
496
334
|
pool;
|
|
497
335
|
schemaName;
|
|
498
336
|
/*
|
|
@@ -574,6 +412,7 @@ class PostgresSystemDatabase {
|
|
|
574
412
|
}
|
|
575
413
|
await this.pool.end();
|
|
576
414
|
}
|
|
415
|
+
// ==================== Workflow Status ====================
|
|
577
416
|
async initWorkflowStatus(initStatus, ownerXid, options) {
|
|
578
417
|
const client = await this.pool.connect();
|
|
579
418
|
let shouldCommit = false;
|
|
@@ -581,7 +420,7 @@ class PostgresSystemDatabase {
|
|
|
581
420
|
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
582
421
|
// Moving from enqueued to pending asks to increment recovery attempts... rather than in the recovery process
|
|
583
422
|
// where it moves from pending back to enqueued.
|
|
584
|
-
const resRow = await insertWorkflowStatus(client, initStatus,
|
|
423
|
+
const resRow = await this.insertWorkflowStatus(client, initStatus, ownerXid, !!options?.isRecoveryRequest || !!options?.isDequeuedRequest);
|
|
585
424
|
if (resRow.name !== initStatus.workflowName) {
|
|
586
425
|
const msg = `Workflow already exists with a different function name: ${resRow.name}, but the provided function name is: ${initStatus.workflowName}`;
|
|
587
426
|
throw new error_1.DBOSConflictingWorkflowError(initStatus.workflowUUID, msg);
|
|
@@ -617,7 +456,7 @@ class PostgresSystemDatabase {
|
|
|
617
456
|
// Thus, when this number becomes equal to `maxRetries + 1`, we should mark the workflow as `MAX_RECOVERY_ATTEMPTS_EXCEEDED`.
|
|
618
457
|
const attempts = resRow.recovery_attempts;
|
|
619
458
|
if (options?.maxRetries && attempts > options?.maxRetries + 1) {
|
|
620
|
-
await updateWorkflowStatus(client, initStatus.workflowUUID, workflow_1.StatusString.MAX_RECOVERY_ATTEMPTS_EXCEEDED,
|
|
459
|
+
await this.updateWorkflowStatus(client, initStatus.workflowUUID, workflow_1.StatusString.MAX_RECOVERY_ATTEMPTS_EXCEEDED, {
|
|
621
460
|
where: { status: workflow_1.StatusString.PENDING },
|
|
622
461
|
throwOnFailure: false,
|
|
623
462
|
});
|
|
@@ -649,7 +488,7 @@ class PostgresSystemDatabase {
|
|
|
649
488
|
async recordWorkflowOutput(workflowID, status) {
|
|
650
489
|
const client = await this.pool.connect();
|
|
651
490
|
try {
|
|
652
|
-
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.SUCCESS,
|
|
491
|
+
await this.updateWorkflowStatus(client, workflowID, workflow_1.StatusString.SUCCESS, {
|
|
653
492
|
update: { output: status.output, resetDeduplicationID: true },
|
|
654
493
|
});
|
|
655
494
|
}
|
|
@@ -660,7 +499,7 @@ class PostgresSystemDatabase {
|
|
|
660
499
|
async recordWorkflowError(workflowID, status) {
|
|
661
500
|
const client = await this.pool.connect();
|
|
662
501
|
try {
|
|
663
|
-
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ERROR,
|
|
502
|
+
await this.updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ERROR, {
|
|
664
503
|
update: { error: status.error, resetDeduplicationID: true },
|
|
665
504
|
});
|
|
666
505
|
}
|
|
@@ -677,23 +516,44 @@ class PostgresSystemDatabase {
|
|
|
677
516
|
queueName: i.queue_name,
|
|
678
517
|
}));
|
|
679
518
|
}
|
|
680
|
-
async
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
519
|
+
async getWorkflowStatus(workflowID, callerID, callerFN) {
|
|
520
|
+
const funcGetStatus = async () => {
|
|
521
|
+
const statuses = await this.listWorkflows({ workflowIDs: [workflowID] });
|
|
522
|
+
const status = statuses.find((s) => s.workflowUUID === workflowID);
|
|
523
|
+
return status ? JSON.stringify(status) : null;
|
|
524
|
+
};
|
|
525
|
+
if (callerID && callerFN) {
|
|
526
|
+
const client = await this.pool.connect();
|
|
527
|
+
try {
|
|
528
|
+
// Check if the operation has been done before for OAOO (only do this inside a workflow).
|
|
529
|
+
const json = await this.#runAndRecordResult(client, exports.DBOS_FUNCNAME_GETSTATUS, callerID, callerFN, funcGetStatus);
|
|
530
|
+
return parseStatus(json);
|
|
531
|
+
}
|
|
532
|
+
finally {
|
|
533
|
+
client.release();
|
|
534
|
+
}
|
|
687
535
|
}
|
|
688
536
|
else {
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
537
|
+
const json = await funcGetStatus();
|
|
538
|
+
return parseStatus(json);
|
|
539
|
+
}
|
|
540
|
+
function parseStatus(json) {
|
|
541
|
+
return json ? JSON.parse(json) : null;
|
|
542
|
+
}
|
|
543
|
+
}
|
|
544
|
+
// Only used in tests
|
|
545
|
+
async setWorkflowStatus(workflowID, status, resetRecoveryAttempts, internalOptions) {
|
|
546
|
+
const client = await this.pool.connect();
|
|
547
|
+
try {
|
|
548
|
+
await this.updateWorkflowStatus(client, workflowID, status, {
|
|
549
|
+
update: { resetRecoveryAttempts, resetNameTo: internalOptions?.updateName },
|
|
550
|
+
});
|
|
551
|
+
}
|
|
552
|
+
finally {
|
|
553
|
+
client.release();
|
|
695
554
|
}
|
|
696
555
|
}
|
|
556
|
+
// ==================== Step Results ====================
|
|
697
557
|
async getOperationResultAndThrowIfCancelled(workflowID, functionID) {
|
|
698
558
|
const client = await this.pool.connect();
|
|
699
559
|
try {
|
|
@@ -711,13 +571,194 @@ class PostgresSystemDatabase {
|
|
|
711
571
|
const client = await this.pool.connect();
|
|
712
572
|
const now = Date.now();
|
|
713
573
|
try {
|
|
714
|
-
await
|
|
574
|
+
await this.recordOperationResultInternal(client, workflowID, functionID, functionName, checkConflict, startTimeEpochMs, now, options);
|
|
715
575
|
}
|
|
716
576
|
finally {
|
|
717
577
|
client.release();
|
|
718
578
|
await (0, debugpoint_1.debugTriggerPoint)(debugpoint_1.DEBUG_TRIGGER_STEP_COMMIT);
|
|
719
579
|
}
|
|
720
580
|
}
|
|
581
|
+
async runTransactionalStep(workflowID, functionID, functionName, callback) {
|
|
582
|
+
const client = await this.pool.connect();
|
|
583
|
+
try {
|
|
584
|
+
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
585
|
+
const existing = await this.#getOperationResultAndThrowIfCancelled(client, workflowID, functionID);
|
|
586
|
+
if (existing !== undefined) {
|
|
587
|
+
await client.query('ROLLBACK');
|
|
588
|
+
return existing;
|
|
589
|
+
}
|
|
590
|
+
const startTime = Date.now();
|
|
591
|
+
const output = await callback(client);
|
|
592
|
+
await this.recordOperationResultInternal(client, workflowID, functionID, functionName, true, startTime, Date.now(), {
|
|
593
|
+
output,
|
|
594
|
+
});
|
|
595
|
+
await client.query('COMMIT');
|
|
596
|
+
return undefined;
|
|
597
|
+
}
|
|
598
|
+
catch (e) {
|
|
599
|
+
await client.query('ROLLBACK');
|
|
600
|
+
throw e;
|
|
601
|
+
}
|
|
602
|
+
finally {
|
|
603
|
+
client.release();
|
|
604
|
+
}
|
|
605
|
+
}
|
|
606
|
+
async checkPatch(workflowID, functionID, patchName, deprecated) {
|
|
607
|
+
// Not doing a cancel check at this point.
|
|
608
|
+
if (functionID === undefined)
|
|
609
|
+
throw new TypeError('functionID must be defined');
|
|
610
|
+
patchName = `DBOS.patch-${patchName}`;
|
|
611
|
+
const { rows } = await this.pool.query(`SELECT function_name
|
|
612
|
+
FROM "${this.schemaName}".operation_outputs
|
|
613
|
+
WHERE workflow_uuid=$1 AND function_id=$2`, [workflowID, functionID]);
|
|
614
|
+
if (deprecated) {
|
|
615
|
+
// Deprecated does not write anything. We skip any existing matching patch marker if it matches
|
|
616
|
+
if (rows.length === 0) {
|
|
617
|
+
return { isPatched: true, hasEntry: false };
|
|
618
|
+
}
|
|
619
|
+
return { isPatched: true, hasEntry: rows[0].function_name === patchName };
|
|
620
|
+
}
|
|
621
|
+
// Nondeprecated - skip matching entry, unpatched if nonmatching entry,
|
|
622
|
+
// If there is no entry, we insert one that indicates it is patched.
|
|
623
|
+
if (rows.length !== 0) {
|
|
624
|
+
if (rows[0].function_name === patchName) {
|
|
625
|
+
return { isPatched: true, hasEntry: true };
|
|
626
|
+
}
|
|
627
|
+
return { isPatched: false, hasEntry: false };
|
|
628
|
+
}
|
|
629
|
+
// Insert a patchmarker
|
|
630
|
+
const dn = Date.now();
|
|
631
|
+
await this.pool.query(`INSERT INTO ${this.schemaName}.operation_outputs
|
|
632
|
+
(workflow_uuid, function_id, output, error, function_name, child_workflow_id, started_at_epoch_ms, completed_at_epoch_ms)
|
|
633
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
634
|
+
ON CONFLICT DO NOTHING;`, [workflowID, functionID, null, null, patchName, null, dn, dn]);
|
|
635
|
+
return { isPatched: true, hasEntry: true };
|
|
636
|
+
}
|
|
637
|
+
// ==================== Workflow Management ====================
|
|
638
|
+
async cancelWorkflow(workflowID) {
|
|
639
|
+
const client = await this.pool.connect();
|
|
640
|
+
try {
|
|
641
|
+
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
642
|
+
const statusResult = await this.getWorkflowStatusValue(client, workflowID);
|
|
643
|
+
if (!statusResult) {
|
|
644
|
+
throw new error_1.DBOSNonExistentWorkflowError(`Workflow ${workflowID} does not exist`);
|
|
645
|
+
}
|
|
646
|
+
if (statusResult === workflow_1.StatusString.SUCCESS ||
|
|
647
|
+
statusResult === workflow_1.StatusString.ERROR ||
|
|
648
|
+
statusResult === workflow_1.StatusString.CANCELLED) {
|
|
649
|
+
await client.query('ROLLBACK');
|
|
650
|
+
return;
|
|
651
|
+
}
|
|
652
|
+
// Set the workflow's status to CANCELLED and remove it from any queue it is on
|
|
653
|
+
await this.updateWorkflowStatus(client, workflowID, workflow_1.StatusString.CANCELLED, {
|
|
654
|
+
update: { queueName: null, resetDeduplicationID: true, resetStartedAtEpochMs: true },
|
|
655
|
+
});
|
|
656
|
+
await client.query('COMMIT');
|
|
657
|
+
}
|
|
658
|
+
catch (error) {
|
|
659
|
+
this.logger.error(error);
|
|
660
|
+
await client.query('ROLLBACK');
|
|
661
|
+
throw error;
|
|
662
|
+
}
|
|
663
|
+
finally {
|
|
664
|
+
client.release();
|
|
665
|
+
}
|
|
666
|
+
this.#setWFCancelMap(workflowID);
|
|
667
|
+
}
|
|
668
|
+
async checkIfCanceled(workflowID) {
|
|
669
|
+
const client = await this.pool.connect();
|
|
670
|
+
try {
|
|
671
|
+
await this.#checkIfCanceled(client, workflowID);
|
|
672
|
+
}
|
|
673
|
+
finally {
|
|
674
|
+
client.release();
|
|
675
|
+
}
|
|
676
|
+
}
|
|
677
|
+
async resumeWorkflow(workflowID) {
|
|
678
|
+
this.#clearWFCancelMap(workflowID);
|
|
679
|
+
const client = await this.pool.connect();
|
|
680
|
+
try {
|
|
681
|
+
await client.query('BEGIN ISOLATION LEVEL REPEATABLE READ');
|
|
682
|
+
// Check workflow status. If it is complete, do nothing.
|
|
683
|
+
const statusResult = await this.getWorkflowStatusValue(client, workflowID);
|
|
684
|
+
if (!statusResult || statusResult === workflow_1.StatusString.SUCCESS || statusResult === workflow_1.StatusString.ERROR) {
|
|
685
|
+
await client.query('ROLLBACK');
|
|
686
|
+
if (!statusResult) {
|
|
687
|
+
if (statusResult === undefined) {
|
|
688
|
+
throw new error_1.DBOSNonExistentWorkflowError(`Workflow ${workflowID} does not exist`);
|
|
689
|
+
}
|
|
690
|
+
}
|
|
691
|
+
return;
|
|
692
|
+
}
|
|
693
|
+
// Set the workflow's status to ENQUEUED and reset recovery attempts and deadline.
|
|
694
|
+
await this.updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ENQUEUED, {
|
|
695
|
+
update: {
|
|
696
|
+
queueName: utils_1.INTERNAL_QUEUE_NAME,
|
|
697
|
+
resetRecoveryAttempts: true,
|
|
698
|
+
resetDeadline: true,
|
|
699
|
+
resetDeduplicationID: true,
|
|
700
|
+
resetStartedAtEpochMs: true,
|
|
701
|
+
},
|
|
702
|
+
throwOnFailure: false,
|
|
703
|
+
});
|
|
704
|
+
await client.query('COMMIT');
|
|
705
|
+
}
|
|
706
|
+
catch (error) {
|
|
707
|
+
this.logger.error(error);
|
|
708
|
+
await client.query('ROLLBACK');
|
|
709
|
+
throw error;
|
|
710
|
+
}
|
|
711
|
+
finally {
|
|
712
|
+
client.release();
|
|
713
|
+
}
|
|
714
|
+
}
|
|
715
|
+
async getWorkflowChildren(workflowID) {
|
|
716
|
+
// BFS to find all descendant workflows
|
|
717
|
+
const visited = new Set([workflowID]);
|
|
718
|
+
const queue = [workflowID];
|
|
719
|
+
const children = [];
|
|
720
|
+
const client = await this.pool.connect();
|
|
721
|
+
try {
|
|
722
|
+
while (queue.length > 0) {
|
|
723
|
+
const batch = queue.splice(0, queue.length);
|
|
724
|
+
const result = await client.query(`SELECT DISTINCT child_workflow_id
|
|
725
|
+
FROM "${this.schemaName}".operation_outputs
|
|
726
|
+
WHERE workflow_uuid = ANY($1)
|
|
727
|
+
AND child_workflow_id IS NOT NULL`, [batch]);
|
|
728
|
+
for (const row of result.rows) {
|
|
729
|
+
if (!visited.has(row.child_workflow_id)) {
|
|
730
|
+
visited.add(row.child_workflow_id);
|
|
731
|
+
queue.push(row.child_workflow_id);
|
|
732
|
+
children.push(row.child_workflow_id);
|
|
733
|
+
}
|
|
734
|
+
}
|
|
735
|
+
}
|
|
736
|
+
}
|
|
737
|
+
finally {
|
|
738
|
+
client.release();
|
|
739
|
+
}
|
|
740
|
+
return children;
|
|
741
|
+
}
|
|
742
|
+
async deleteWorkflow(workflowID, deleteChildren = false) {
|
|
743
|
+
let workflowsToDelete = [workflowID];
|
|
744
|
+
if (deleteChildren) {
|
|
745
|
+
const children = await this.getWorkflowChildren(workflowID);
|
|
746
|
+
workflowsToDelete = [...workflowsToDelete, ...children];
|
|
747
|
+
}
|
|
748
|
+
const client = await this.pool.connect();
|
|
749
|
+
try {
|
|
750
|
+
await client.query(`DELETE FROM "${this.schemaName}".workflow_status
|
|
751
|
+
WHERE workflow_uuid = ANY($1)`, [workflowsToDelete]);
|
|
752
|
+
}
|
|
753
|
+
finally {
|
|
754
|
+
client.release();
|
|
755
|
+
}
|
|
756
|
+
// Clean up in-memory maps
|
|
757
|
+
for (const wfid of workflowsToDelete) {
|
|
758
|
+
this.runningWorkflowMap.delete(wfid);
|
|
759
|
+
this.workflowCancellationMap.delete(wfid);
|
|
760
|
+
}
|
|
761
|
+
}
|
|
721
762
|
async forkWorkflow(workflowID, startStep, options = {}) {
|
|
722
763
|
const newWorkflowID = options.newWorkflowID ?? (0, crypto_1.randomUUID)();
|
|
723
764
|
const workflowStatus = await this.getWorkflowStatus(workflowID);
|
|
@@ -731,7 +772,7 @@ class PostgresSystemDatabase {
|
|
|
731
772
|
try {
|
|
732
773
|
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
733
774
|
const now = Date.now();
|
|
734
|
-
await insertWorkflowStatus(client, {
|
|
775
|
+
await this.insertWorkflowStatus(client, {
|
|
735
776
|
workflowUUID: newWorkflowID,
|
|
736
777
|
status: workflow_1.StatusString.ENQUEUED,
|
|
737
778
|
workflowName: workflowStatus.workflowName,
|
|
@@ -757,7 +798,7 @@ class PostgresSystemDatabase {
|
|
|
757
798
|
queuePartitionKey: undefined,
|
|
758
799
|
forkedFrom: workflowID,
|
|
759
800
|
serialization: workflowStatus.serialization,
|
|
760
|
-
},
|
|
801
|
+
}, null);
|
|
761
802
|
if (startStep > 0) {
|
|
762
803
|
// Copy operation outputs
|
|
763
804
|
const copyOutputsQuery = `INSERT INTO "${this.schemaName}".operation_outputs
|
|
@@ -806,466 +847,6 @@ class PostgresSystemDatabase {
|
|
|
806
847
|
client.release();
|
|
807
848
|
}
|
|
808
849
|
}
|
|
809
|
-
async #runAndRecordResult(client, functionName, workflowID, functionID, func) {
|
|
810
|
-
const startTime = Date.now();
|
|
811
|
-
const result = await this.#getOperationResultAndThrowIfCancelled(client, workflowID, functionID);
|
|
812
|
-
if (result !== undefined) {
|
|
813
|
-
if (result.functionName !== functionName) {
|
|
814
|
-
throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, functionName, result.functionName);
|
|
815
|
-
}
|
|
816
|
-
return result.output;
|
|
817
|
-
}
|
|
818
|
-
const output = await func();
|
|
819
|
-
await recordOperationResult(client, workflowID, functionID, functionName, true, this.schemaName, startTime, Date.now(), {
|
|
820
|
-
output,
|
|
821
|
-
});
|
|
822
|
-
return output;
|
|
823
|
-
}
|
|
824
|
-
async durableSleepms(workflowID, functionID, durationMS) {
|
|
825
|
-
let resolveNotification;
|
|
826
|
-
const cancelPromise = new Promise((resolve) => {
|
|
827
|
-
resolveNotification = resolve;
|
|
828
|
-
});
|
|
829
|
-
const cbr = this.cancelWakeupMap.registerCallback(workflowID, resolveNotification);
|
|
830
|
-
try {
|
|
831
|
-
let timeoutPromise = Promise.resolve();
|
|
832
|
-
const { promise, cancel: timeoutCancel } = await this.#durableSleep(workflowID, functionID, durationMS);
|
|
833
|
-
timeoutPromise = promise;
|
|
834
|
-
try {
|
|
835
|
-
await Promise.race([cancelPromise, timeoutPromise]);
|
|
836
|
-
}
|
|
837
|
-
finally {
|
|
838
|
-
timeoutCancel();
|
|
839
|
-
}
|
|
840
|
-
}
|
|
841
|
-
finally {
|
|
842
|
-
this.cancelWakeupMap.deregisterCallback(cbr);
|
|
843
|
-
}
|
|
844
|
-
await this.checkIfCanceled(workflowID);
|
|
845
|
-
}
|
|
846
|
-
async #durableSleep(workflowID, functionID, durationMS, maxSleepPerIteration) {
|
|
847
|
-
if (maxSleepPerIteration === undefined)
|
|
848
|
-
maxSleepPerIteration = durationMS;
|
|
849
|
-
const curTime = Date.now();
|
|
850
|
-
let endTimeMs = curTime + durationMS;
|
|
851
|
-
const client = await this.pool.connect();
|
|
852
|
-
try {
|
|
853
|
-
const res = await this.#getOperationResultAndThrowIfCancelled(client, workflowID, functionID);
|
|
854
|
-
if (res) {
|
|
855
|
-
if (res.functionName !== exports.DBOS_FUNCNAME_SLEEP) {
|
|
856
|
-
throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, exports.DBOS_FUNCNAME_SLEEP, res.functionName);
|
|
857
|
-
}
|
|
858
|
-
endTimeMs = JSON.parse(res.output);
|
|
859
|
-
}
|
|
860
|
-
else {
|
|
861
|
-
await recordOperationResult(client, workflowID, functionID, exports.DBOS_FUNCNAME_SLEEP, false, this.schemaName, Date.now(), Date.now(), {
|
|
862
|
-
output: serialization_1.DBOSPortableJSON.stringify(endTimeMs),
|
|
863
|
-
serialization: serialization_1.DBOSPortableJSON.name(),
|
|
864
|
-
});
|
|
865
|
-
}
|
|
866
|
-
return {
|
|
867
|
-
...(0, utils_1.cancellableSleep)(Math.max(Math.min(maxSleepPerIteration, endTimeMs - curTime), 0)),
|
|
868
|
-
endTime: endTimeMs,
|
|
869
|
-
};
|
|
870
|
-
}
|
|
871
|
-
finally {
|
|
872
|
-
client.release();
|
|
873
|
-
}
|
|
874
|
-
}
|
|
875
|
-
nullTopic = '__null__topic__';
|
|
876
|
-
async send(workflowID, functionID, destinationID, message, topic, serialization) {
|
|
877
|
-
topic = topic ?? this.nullTopic;
|
|
878
|
-
const client = await this.pool.connect();
|
|
879
|
-
try {
|
|
880
|
-
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
881
|
-
await this.#runAndRecordResult(client, exports.DBOS_FUNCNAME_SEND, workflowID, functionID, async () => {
|
|
882
|
-
await client.query(`INSERT INTO "${this.schemaName}".notifications (destination_uuid, topic, message, serialization) VALUES ($1, $2, $3, $4);`, [destinationID, topic, message, serialization]);
|
|
883
|
-
return undefined;
|
|
884
|
-
});
|
|
885
|
-
await client.query('COMMIT');
|
|
886
|
-
}
|
|
887
|
-
catch (error) {
|
|
888
|
-
await client.query('ROLLBACK');
|
|
889
|
-
const err = error;
|
|
890
|
-
if (err.code === '23503') {
|
|
891
|
-
// Foreign key constraint violation (only expected for the INSERT query)
|
|
892
|
-
throw new error_1.DBOSNonExistentWorkflowError(`Sent to non-existent destination workflow UUID: ${destinationID}`);
|
|
893
|
-
}
|
|
894
|
-
else {
|
|
895
|
-
throw err;
|
|
896
|
-
}
|
|
897
|
-
}
|
|
898
|
-
finally {
|
|
899
|
-
client.release();
|
|
900
|
-
}
|
|
901
|
-
}
|
|
902
|
-
async recv(workflowID, functionID, timeoutFunctionID, topic, timeoutSeconds = dbos_executor_1.DBOSExecutor.defaultNotificationTimeoutSec) {
|
|
903
|
-
topic = topic ?? this.nullTopic;
|
|
904
|
-
const startTime = Date.now();
|
|
905
|
-
// First, check for previous executions.
|
|
906
|
-
const res = await this.getOperationResultAndThrowIfCancelled(workflowID, functionID);
|
|
907
|
-
if (res) {
|
|
908
|
-
if (res.functionName !== exports.DBOS_FUNCNAME_RECV) {
|
|
909
|
-
throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, exports.DBOS_FUNCNAME_RECV, res.functionName);
|
|
910
|
-
}
|
|
911
|
-
return { serializedValue: res.output, serialization: res.serialization ?? null };
|
|
912
|
-
}
|
|
913
|
-
const timeoutms = timeoutSeconds !== undefined ? timeoutSeconds * 1000 : undefined;
|
|
914
|
-
let finishTime = timeoutms !== undefined ? Date.now() + timeoutms : undefined;
|
|
915
|
-
while (true) {
|
|
916
|
-
// register the key with the global notifications listener.
|
|
917
|
-
let resolveNotification;
|
|
918
|
-
const messagePromise = new Promise((resolve) => {
|
|
919
|
-
resolveNotification = resolve;
|
|
920
|
-
});
|
|
921
|
-
const payload = `${workflowID}::${topic}`;
|
|
922
|
-
const cbr = this.notificationsMap.registerCallback(payload, resolveNotification);
|
|
923
|
-
const crh = this.cancelWakeupMap.registerCallback(workflowID, (_res) => {
|
|
924
|
-
resolveNotification();
|
|
925
|
-
});
|
|
926
|
-
try {
|
|
927
|
-
await this.checkIfCanceled(workflowID);
|
|
928
|
-
// Check if the key is already in the DB, then wait for the notification if it isn't.
|
|
929
|
-
const initRecvRows = (await this.pool.query(`SELECT topic FROM "${this.schemaName}".notifications WHERE destination_uuid=$1 AND topic=$2;`, [workflowID, topic])).rows;
|
|
930
|
-
if (initRecvRows.length !== 0)
|
|
931
|
-
break;
|
|
932
|
-
const ct = Date.now();
|
|
933
|
-
if (finishTime && ct > finishTime)
|
|
934
|
-
break; // Time's up
|
|
935
|
-
let timeoutPromise = Promise.resolve();
|
|
936
|
-
let timeoutCancel = () => { };
|
|
937
|
-
if (timeoutms) {
|
|
938
|
-
const { promise, cancel, endTime } = await this.#durableSleep(workflowID, timeoutFunctionID, timeoutms, this.dbPollingIntervalEventMs);
|
|
939
|
-
timeoutPromise = promise;
|
|
940
|
-
timeoutCancel = cancel;
|
|
941
|
-
finishTime = endTime;
|
|
942
|
-
}
|
|
943
|
-
else {
|
|
944
|
-
let poll = finishTime ? finishTime - ct : this.dbPollingIntervalEventMs;
|
|
945
|
-
poll = Math.min(this.dbPollingIntervalEventMs, poll);
|
|
946
|
-
const { promise, cancel } = (0, utils_1.cancellableSleep)(poll);
|
|
947
|
-
timeoutPromise = promise;
|
|
948
|
-
timeoutCancel = cancel;
|
|
949
|
-
}
|
|
950
|
-
try {
|
|
951
|
-
await Promise.race([messagePromise, timeoutPromise]);
|
|
952
|
-
}
|
|
953
|
-
finally {
|
|
954
|
-
timeoutCancel();
|
|
955
|
-
}
|
|
956
|
-
}
|
|
957
|
-
finally {
|
|
958
|
-
this.notificationsMap.deregisterCallback(cbr);
|
|
959
|
-
this.cancelWakeupMap.deregisterCallback(crh);
|
|
960
|
-
}
|
|
961
|
-
}
|
|
962
|
-
await this.checkIfCanceled(workflowID);
|
|
963
|
-
// Transactionally consume and return the message if it's in the DB, otherwise return null.
|
|
964
|
-
let message = null;
|
|
965
|
-
let serialization = null;
|
|
966
|
-
const client = await this.pool.connect();
|
|
967
|
-
try {
|
|
968
|
-
await client.query(`BEGIN ISOLATION LEVEL READ COMMITTED`);
|
|
969
|
-
const finalRecvRows = (await client.query(`DELETE FROM "${this.schemaName}".notifications
|
|
970
|
-
WHERE destination_uuid = $1
|
|
971
|
-
AND topic = $2
|
|
972
|
-
AND message_uuid = (
|
|
973
|
-
SELECT message_uuid
|
|
974
|
-
FROM "${this.schemaName}".notifications
|
|
975
|
-
WHERE destination_uuid = $1
|
|
976
|
-
AND topic = $2
|
|
977
|
-
ORDER BY created_at_epoch_ms ASC
|
|
978
|
-
LIMIT 1
|
|
979
|
-
)
|
|
980
|
-
RETURNING notifications.message, notifications.serialization;`, [workflowID, topic])).rows;
|
|
981
|
-
if (finalRecvRows.length > 0) {
|
|
982
|
-
message = finalRecvRows[0].message;
|
|
983
|
-
serialization = finalRecvRows[0].serialization;
|
|
984
|
-
}
|
|
985
|
-
await recordOperationResult(client, workflowID, functionID, exports.DBOS_FUNCNAME_RECV, true, this.schemaName, startTime, Date.now(), {
|
|
986
|
-
output: message,
|
|
987
|
-
serialization,
|
|
988
|
-
});
|
|
989
|
-
await client.query(`COMMIT`);
|
|
990
|
-
}
|
|
991
|
-
catch (e) {
|
|
992
|
-
this.logger.error(e);
|
|
993
|
-
await client.query(`ROLLBACK`);
|
|
994
|
-
throw e;
|
|
995
|
-
}
|
|
996
|
-
finally {
|
|
997
|
-
client.release();
|
|
998
|
-
}
|
|
999
|
-
return { serializedValue: message, serialization };
|
|
1000
|
-
}
|
|
1001
|
-
// Only used in tests
|
|
1002
|
-
async setWorkflowStatus(workflowID, status, resetRecoveryAttempts, internalOptions) {
|
|
1003
|
-
const client = await this.pool.connect();
|
|
1004
|
-
try {
|
|
1005
|
-
await updateWorkflowStatus(client, workflowID, status, this.schemaName, {
|
|
1006
|
-
update: { resetRecoveryAttempts, resetNameTo: internalOptions?.updateName },
|
|
1007
|
-
});
|
|
1008
|
-
}
|
|
1009
|
-
finally {
|
|
1010
|
-
client.release();
|
|
1011
|
-
}
|
|
1012
|
-
}
|
|
1013
|
-
async setEvent(workflowID, functionID, key, message, serialization) {
|
|
1014
|
-
const client = await this.pool.connect();
|
|
1015
|
-
try {
|
|
1016
|
-
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
1017
|
-
await this.#runAndRecordResult(client, exports.DBOS_FUNCNAME_SETEVENT, workflowID, functionID, async () => {
|
|
1018
|
-
await client.query(`INSERT INTO "${this.schemaName}".workflow_events (workflow_uuid, key, value, serialization)
|
|
1019
|
-
VALUES ($1, $2, $3, $4)
|
|
1020
|
-
ON CONFLICT (workflow_uuid, key)
|
|
1021
|
-
DO UPDATE SET value = $3
|
|
1022
|
-
RETURNING workflow_uuid;`, [workflowID, key, message, serialization]);
|
|
1023
|
-
// Also write to the immutable history table for fork support
|
|
1024
|
-
await client.query(`INSERT INTO "${this.schemaName}".workflow_events_history (workflow_uuid, function_id, key, value, serialization)
|
|
1025
|
-
VALUES ($1, $2, $3, $4, $5)
|
|
1026
|
-
ON CONFLICT (workflow_uuid, function_id, key)
|
|
1027
|
-
DO UPDATE SET value = $4;`, [workflowID, functionID, key, message, serialization]);
|
|
1028
|
-
return undefined;
|
|
1029
|
-
});
|
|
1030
|
-
await client.query('COMMIT');
|
|
1031
|
-
}
|
|
1032
|
-
catch (e) {
|
|
1033
|
-
this.logger.error(e);
|
|
1034
|
-
await client.query(`ROLLBACK`);
|
|
1035
|
-
throw e;
|
|
1036
|
-
}
|
|
1037
|
-
finally {
|
|
1038
|
-
client.release();
|
|
1039
|
-
}
|
|
1040
|
-
}
|
|
1041
|
-
async getEvent(workflowID, key, timeoutSeconds, callerWorkflow) {
|
|
1042
|
-
const startTime = Date.now();
|
|
1043
|
-
// Check if the operation has been done before for OAOO (only do this inside a workflow).
|
|
1044
|
-
if (callerWorkflow) {
|
|
1045
|
-
const res = await this.getOperationResultAndThrowIfCancelled(callerWorkflow.workflowID, callerWorkflow.functionID);
|
|
1046
|
-
if (res) {
|
|
1047
|
-
if (res.functionName !== exports.DBOS_FUNCNAME_GETEVENT) {
|
|
1048
|
-
throw new error_1.DBOSUnexpectedStepError(callerWorkflow.workflowID, callerWorkflow.functionID, exports.DBOS_FUNCNAME_GETEVENT, res.functionName);
|
|
1049
|
-
}
|
|
1050
|
-
return { serializedValue: res.output, serialization: null };
|
|
1051
|
-
}
|
|
1052
|
-
}
|
|
1053
|
-
// Get the return the value. if it's in the DB, otherwise return null.
|
|
1054
|
-
let value = null;
|
|
1055
|
-
let valueSer = null;
|
|
1056
|
-
const payloadKey = `${workflowID}::${key}`;
|
|
1057
|
-
const timeoutms = timeoutSeconds !== undefined ? timeoutSeconds * 1000 : undefined;
|
|
1058
|
-
let finishTime = timeoutms !== undefined ? Date.now() + timeoutms : undefined;
|
|
1059
|
-
// Register the key with the global notifications listener first... we do not want to look in the DB first
|
|
1060
|
-
// or that would cause a timing hole.
|
|
1061
|
-
while (true) {
|
|
1062
|
-
let resolveNotification;
|
|
1063
|
-
const valuePromise = new Promise((resolve) => {
|
|
1064
|
-
resolveNotification = resolve;
|
|
1065
|
-
});
|
|
1066
|
-
const cbr = this.workflowEventsMap.registerCallback(payloadKey, resolveNotification);
|
|
1067
|
-
const crh = callerWorkflow?.workflowID
|
|
1068
|
-
? this.cancelWakeupMap.registerCallback(callerWorkflow.workflowID, (_res) => {
|
|
1069
|
-
resolveNotification();
|
|
1070
|
-
})
|
|
1071
|
-
: undefined;
|
|
1072
|
-
try {
|
|
1073
|
-
if (callerWorkflow?.workflowID)
|
|
1074
|
-
await this.checkIfCanceled(callerWorkflow?.workflowID);
|
|
1075
|
-
// Check if the key is already in the DB, then wait for the notification if it isn't.
|
|
1076
|
-
const initRecvRows = (await this.pool.query(`SELECT key, value, serialization
|
|
1077
|
-
FROM "${this.schemaName}".workflow_events
|
|
1078
|
-
WHERE workflow_uuid=$1 AND key=$2;`, [workflowID, key])).rows;
|
|
1079
|
-
if (initRecvRows.length > 0) {
|
|
1080
|
-
value = initRecvRows[0].value;
|
|
1081
|
-
valueSer = initRecvRows[0].serialization;
|
|
1082
|
-
break;
|
|
1083
|
-
}
|
|
1084
|
-
const ct = Date.now();
|
|
1085
|
-
if (finishTime && ct > finishTime)
|
|
1086
|
-
break; // Time's up
|
|
1087
|
-
// If we have a callerWorkflow, we want a durable sleep, otherwise, not
|
|
1088
|
-
let timeoutPromise = Promise.resolve();
|
|
1089
|
-
let timeoutCancel = () => { };
|
|
1090
|
-
if (callerWorkflow && timeoutms) {
|
|
1091
|
-
const { promise, cancel, endTime } = await this.#durableSleep(callerWorkflow.workflowID, callerWorkflow.timeoutFunctionID ?? -1, timeoutms, this.dbPollingIntervalEventMs);
|
|
1092
|
-
timeoutPromise = promise;
|
|
1093
|
-
timeoutCancel = cancel;
|
|
1094
|
-
finishTime = endTime;
|
|
1095
|
-
}
|
|
1096
|
-
else {
|
|
1097
|
-
let poll = finishTime ? finishTime - ct : this.dbPollingIntervalEventMs;
|
|
1098
|
-
poll = Math.min(this.dbPollingIntervalEventMs, poll);
|
|
1099
|
-
const { promise, cancel } = (0, utils_1.cancellableSleep)(poll);
|
|
1100
|
-
timeoutPromise = promise;
|
|
1101
|
-
timeoutCancel = cancel;
|
|
1102
|
-
}
|
|
1103
|
-
try {
|
|
1104
|
-
await Promise.race([valuePromise, timeoutPromise]);
|
|
1105
|
-
}
|
|
1106
|
-
finally {
|
|
1107
|
-
timeoutCancel();
|
|
1108
|
-
}
|
|
1109
|
-
}
|
|
1110
|
-
finally {
|
|
1111
|
-
this.workflowEventsMap.deregisterCallback(cbr);
|
|
1112
|
-
if (crh)
|
|
1113
|
-
this.cancelWakeupMap.deregisterCallback(crh);
|
|
1114
|
-
}
|
|
1115
|
-
}
|
|
1116
|
-
// Record the output if it is inside a workflow.
|
|
1117
|
-
if (callerWorkflow) {
|
|
1118
|
-
await this.recordOperationResult(callerWorkflow.workflowID, callerWorkflow.functionID, exports.DBOS_FUNCNAME_GETEVENT, true, startTime, {
|
|
1119
|
-
output: value,
|
|
1120
|
-
serialization: valueSer,
|
|
1121
|
-
});
|
|
1122
|
-
}
|
|
1123
|
-
return { serializedValue: value, serialization: valueSer };
|
|
1124
|
-
}
|
|
1125
|
-
#setWFCancelMap(workflowID) {
|
|
1126
|
-
if (this.runningWorkflowMap.has(workflowID)) {
|
|
1127
|
-
this.workflowCancellationMap.set(workflowID, true);
|
|
1128
|
-
}
|
|
1129
|
-
this.cancelWakeupMap.callCallbacks(workflowID);
|
|
1130
|
-
}
|
|
1131
|
-
#clearWFCancelMap(workflowID) {
|
|
1132
|
-
if (this.workflowCancellationMap.has(workflowID)) {
|
|
1133
|
-
this.workflowCancellationMap.delete(workflowID);
|
|
1134
|
-
}
|
|
1135
|
-
}
|
|
1136
|
-
async cancelWorkflow(workflowID) {
|
|
1137
|
-
const client = await this.pool.connect();
|
|
1138
|
-
try {
|
|
1139
|
-
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
1140
|
-
const statusResult = await getWorkflowStatusValue(client, workflowID, this.schemaName);
|
|
1141
|
-
if (!statusResult) {
|
|
1142
|
-
throw new error_1.DBOSNonExistentWorkflowError(`Workflow ${workflowID} does not exist`);
|
|
1143
|
-
}
|
|
1144
|
-
if (statusResult === workflow_1.StatusString.SUCCESS ||
|
|
1145
|
-
statusResult === workflow_1.StatusString.ERROR ||
|
|
1146
|
-
statusResult === workflow_1.StatusString.CANCELLED) {
|
|
1147
|
-
await client.query('ROLLBACK');
|
|
1148
|
-
return;
|
|
1149
|
-
}
|
|
1150
|
-
// Set the workflow's status to CANCELLED and remove it from any queue it is on
|
|
1151
|
-
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.CANCELLED, this.schemaName, {
|
|
1152
|
-
update: { queueName: null, resetDeduplicationID: true, resetStartedAtEpochMs: true },
|
|
1153
|
-
});
|
|
1154
|
-
await client.query('COMMIT');
|
|
1155
|
-
}
|
|
1156
|
-
catch (error) {
|
|
1157
|
-
this.logger.error(error);
|
|
1158
|
-
await client.query('ROLLBACK');
|
|
1159
|
-
throw error;
|
|
1160
|
-
}
|
|
1161
|
-
finally {
|
|
1162
|
-
client.release();
|
|
1163
|
-
}
|
|
1164
|
-
this.#setWFCancelMap(workflowID);
|
|
1165
|
-
}
|
|
1166
|
-
async #checkIfCanceled(client, workflowID) {
|
|
1167
|
-
if (this.workflowCancellationMap.get(workflowID) === true) {
|
|
1168
|
-
throw new error_1.DBOSWorkflowCancelledError(workflowID);
|
|
1169
|
-
}
|
|
1170
|
-
const statusValue = await getWorkflowStatusValue(client, workflowID, this.schemaName);
|
|
1171
|
-
if (statusValue === workflow_1.StatusString.CANCELLED) {
|
|
1172
|
-
throw new error_1.DBOSWorkflowCancelledError(workflowID);
|
|
1173
|
-
}
|
|
1174
|
-
}
|
|
1175
|
-
async checkIfCanceled(workflowID) {
|
|
1176
|
-
const client = await this.pool.connect();
|
|
1177
|
-
try {
|
|
1178
|
-
await this.#checkIfCanceled(client, workflowID);
|
|
1179
|
-
}
|
|
1180
|
-
finally {
|
|
1181
|
-
client.release();
|
|
1182
|
-
}
|
|
1183
|
-
}
|
|
1184
|
-
async resumeWorkflow(workflowID) {
|
|
1185
|
-
this.#clearWFCancelMap(workflowID);
|
|
1186
|
-
const client = await this.pool.connect();
|
|
1187
|
-
try {
|
|
1188
|
-
await client.query('BEGIN ISOLATION LEVEL REPEATABLE READ');
|
|
1189
|
-
// Check workflow status. If it is complete, do nothing.
|
|
1190
|
-
const statusResult = await getWorkflowStatusValue(client, workflowID, this.schemaName);
|
|
1191
|
-
if (!statusResult || statusResult === workflow_1.StatusString.SUCCESS || statusResult === workflow_1.StatusString.ERROR) {
|
|
1192
|
-
await client.query('ROLLBACK');
|
|
1193
|
-
if (!statusResult) {
|
|
1194
|
-
if (statusResult === undefined) {
|
|
1195
|
-
throw new error_1.DBOSNonExistentWorkflowError(`Workflow ${workflowID} does not exist`);
|
|
1196
|
-
}
|
|
1197
|
-
}
|
|
1198
|
-
return;
|
|
1199
|
-
}
|
|
1200
|
-
// Set the workflow's status to ENQUEUED and reset recovery attempts and deadline.
|
|
1201
|
-
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ENQUEUED, this.schemaName, {
|
|
1202
|
-
update: {
|
|
1203
|
-
queueName: utils_1.INTERNAL_QUEUE_NAME,
|
|
1204
|
-
resetRecoveryAttempts: true,
|
|
1205
|
-
resetDeadline: true,
|
|
1206
|
-
resetDeduplicationID: true,
|
|
1207
|
-
resetStartedAtEpochMs: true,
|
|
1208
|
-
},
|
|
1209
|
-
throwOnFailure: false,
|
|
1210
|
-
});
|
|
1211
|
-
await client.query('COMMIT');
|
|
1212
|
-
}
|
|
1213
|
-
catch (error) {
|
|
1214
|
-
this.logger.error(error);
|
|
1215
|
-
await client.query('ROLLBACK');
|
|
1216
|
-
throw error;
|
|
1217
|
-
}
|
|
1218
|
-
finally {
|
|
1219
|
-
client.release();
|
|
1220
|
-
}
|
|
1221
|
-
}
|
|
1222
|
-
async getWorkflowChildren(workflowID) {
|
|
1223
|
-
// BFS to find all descendant workflows
|
|
1224
|
-
const visited = new Set([workflowID]);
|
|
1225
|
-
const queue = [workflowID];
|
|
1226
|
-
const children = [];
|
|
1227
|
-
const client = await this.pool.connect();
|
|
1228
|
-
try {
|
|
1229
|
-
while (queue.length > 0) {
|
|
1230
|
-
const batch = queue.splice(0, queue.length);
|
|
1231
|
-
const result = await client.query(`SELECT DISTINCT child_workflow_id
|
|
1232
|
-
FROM "${this.schemaName}".operation_outputs
|
|
1233
|
-
WHERE workflow_uuid = ANY($1)
|
|
1234
|
-
AND child_workflow_id IS NOT NULL`, [batch]);
|
|
1235
|
-
for (const row of result.rows) {
|
|
1236
|
-
if (!visited.has(row.child_workflow_id)) {
|
|
1237
|
-
visited.add(row.child_workflow_id);
|
|
1238
|
-
queue.push(row.child_workflow_id);
|
|
1239
|
-
children.push(row.child_workflow_id);
|
|
1240
|
-
}
|
|
1241
|
-
}
|
|
1242
|
-
}
|
|
1243
|
-
}
|
|
1244
|
-
finally {
|
|
1245
|
-
client.release();
|
|
1246
|
-
}
|
|
1247
|
-
return children;
|
|
1248
|
-
}
|
|
1249
|
-
async deleteWorkflow(workflowID, deleteChildren = false) {
|
|
1250
|
-
let workflowsToDelete = [workflowID];
|
|
1251
|
-
if (deleteChildren) {
|
|
1252
|
-
const children = await this.getWorkflowChildren(workflowID);
|
|
1253
|
-
workflowsToDelete = [...workflowsToDelete, ...children];
|
|
1254
|
-
}
|
|
1255
|
-
const client = await this.pool.connect();
|
|
1256
|
-
try {
|
|
1257
|
-
await client.query(`DELETE FROM "${this.schemaName}".workflow_status
|
|
1258
|
-
WHERE workflow_uuid = ANY($1)`, [workflowsToDelete]);
|
|
1259
|
-
}
|
|
1260
|
-
finally {
|
|
1261
|
-
client.release();
|
|
1262
|
-
}
|
|
1263
|
-
// Clean up in-memory maps
|
|
1264
|
-
for (const wfid of workflowsToDelete) {
|
|
1265
|
-
this.runningWorkflowMap.delete(wfid);
|
|
1266
|
-
this.workflowCancellationMap.delete(wfid);
|
|
1267
|
-
}
|
|
1268
|
-
}
|
|
1269
850
|
async exportWorkflow(workflowID, exportChildren = false) {
|
|
1270
851
|
const workflowIDs = [workflowID];
|
|
1271
852
|
if (exportChildren) {
|
|
@@ -1415,6 +996,7 @@ class PostgresSystemDatabase {
|
|
|
1415
996
|
client.release();
|
|
1416
997
|
}
|
|
1417
998
|
}
|
|
999
|
+
// ==================== Awaiting Workflows ====================
|
|
1418
1000
|
registerRunningWorkflow(workflowID, workflowPromise) {
|
|
1419
1001
|
// Need to await for the workflow and capture errors.
|
|
1420
1002
|
const awaitWorkflowPromise = workflowPromise
|
|
@@ -1444,223 +1026,408 @@ class PostgresSystemDatabase {
|
|
|
1444
1026
|
this.logger.warn('Message notification map is not empty - shutdown is not clean.');
|
|
1445
1027
|
//throw new Error('Message notification map is not empty - shutdown is not clean.');
|
|
1446
1028
|
}
|
|
1447
|
-
}
|
|
1448
|
-
async
|
|
1449
|
-
const
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
|
|
1459
|
-
|
|
1029
|
+
}
|
|
1030
|
+
async awaitWorkflowResult(workflowID, timeoutSeconds, callerID, timerFuncID) {
|
|
1031
|
+
const timeoutms = timeoutSeconds !== undefined ? timeoutSeconds * 1000 : undefined;
|
|
1032
|
+
let finishTime = timeoutms !== undefined ? Date.now() + timeoutms : undefined;
|
|
1033
|
+
while (true) {
|
|
1034
|
+
let resolveNotification;
|
|
1035
|
+
const statusPromise = new Promise((resolve) => {
|
|
1036
|
+
resolveNotification = resolve;
|
|
1037
|
+
});
|
|
1038
|
+
const irh = this.cancelWakeupMap.registerCallback(workflowID, (_res) => {
|
|
1039
|
+
resolveNotification();
|
|
1040
|
+
});
|
|
1041
|
+
const crh = callerID
|
|
1042
|
+
? this.cancelWakeupMap.registerCallback(callerID, (_res) => {
|
|
1043
|
+
resolveNotification();
|
|
1044
|
+
})
|
|
1045
|
+
: undefined;
|
|
1046
|
+
try {
|
|
1047
|
+
if (callerID)
|
|
1048
|
+
await this.checkIfCanceled(callerID);
|
|
1049
|
+
try {
|
|
1050
|
+
const { rows } = await this.pool.query(`SELECT status, output, error, serialization FROM "${this.schemaName}".workflow_status
|
|
1051
|
+
WHERE workflow_uuid=$1`, [workflowID]);
|
|
1052
|
+
if (rows.length > 0) {
|
|
1053
|
+
const status = rows[0].status;
|
|
1054
|
+
if (status === workflow_1.StatusString.SUCCESS) {
|
|
1055
|
+
return { output: rows[0].output, serialization: rows[0].serialization };
|
|
1056
|
+
}
|
|
1057
|
+
else if (status === workflow_1.StatusString.ERROR) {
|
|
1058
|
+
return { error: rows[0].error, serialization: rows[0].serialization };
|
|
1059
|
+
}
|
|
1060
|
+
else if (status === workflow_1.StatusString.CANCELLED) {
|
|
1061
|
+
return { cancelled: true };
|
|
1062
|
+
}
|
|
1063
|
+
else if (status === workflow_1.StatusString.MAX_RECOVERY_ATTEMPTS_EXCEEDED) {
|
|
1064
|
+
return { maxRecoveryAttemptsExceeded: true };
|
|
1065
|
+
}
|
|
1066
|
+
else {
|
|
1067
|
+
// Status is not actionable
|
|
1068
|
+
}
|
|
1069
|
+
}
|
|
1070
|
+
}
|
|
1071
|
+
catch (e) {
|
|
1072
|
+
const err = e;
|
|
1073
|
+
this.logger.error(`Exception from system database: ${err}`, err);
|
|
1074
|
+
throw err;
|
|
1075
|
+
}
|
|
1076
|
+
const ct = Date.now();
|
|
1077
|
+
if (finishTime && ct > finishTime)
|
|
1078
|
+
return undefined; // Time's up
|
|
1079
|
+
let timeoutPromise = Promise.resolve();
|
|
1080
|
+
let timeoutCancel = () => { };
|
|
1081
|
+
if (timerFuncID !== undefined && callerID !== undefined && timeoutms !== undefined) {
|
|
1082
|
+
const { promise, cancel, endTime } = await this.#durableSleep(callerID, timerFuncID, timeoutms, this.dbPollingIntervalResultMs);
|
|
1083
|
+
finishTime = endTime;
|
|
1084
|
+
timeoutPromise = promise;
|
|
1085
|
+
timeoutCancel = cancel;
|
|
1086
|
+
}
|
|
1087
|
+
else {
|
|
1088
|
+
let poll = finishTime ? finishTime - ct : this.dbPollingIntervalResultMs;
|
|
1089
|
+
poll = Math.min(this.dbPollingIntervalResultMs, poll);
|
|
1090
|
+
const { promise, cancel } = (0, utils_1.cancellableSleep)(poll);
|
|
1091
|
+
timeoutPromise = promise;
|
|
1092
|
+
timeoutCancel = cancel;
|
|
1093
|
+
}
|
|
1094
|
+
try {
|
|
1095
|
+
await Promise.race([statusPromise, timeoutPromise]);
|
|
1096
|
+
}
|
|
1097
|
+
finally {
|
|
1098
|
+
timeoutCancel();
|
|
1099
|
+
}
|
|
1100
|
+
}
|
|
1101
|
+
finally {
|
|
1102
|
+
this.cancelWakeupMap.deregisterCallback(irh);
|
|
1103
|
+
if (crh)
|
|
1104
|
+
this.cancelWakeupMap.deregisterCallback(crh);
|
|
1105
|
+
}
|
|
1106
|
+
}
|
|
1107
|
+
}
|
|
1108
|
+
async awaitFirstWorkflowId(workflowIds, callerID) {
|
|
1109
|
+
const placeholders = workflowIds.map((_, i) => `$${i + 1}`).join(', ');
|
|
1110
|
+
while (true) {
|
|
1111
|
+
let resolveNotification;
|
|
1112
|
+
const wakeupPromise = new Promise((resolve) => {
|
|
1113
|
+
resolveNotification = resolve;
|
|
1114
|
+
});
|
|
1115
|
+
// Register cancel callbacks for all target workflows and the caller.
|
|
1116
|
+
const cbHandles = workflowIds.map((wfid) => this.cancelWakeupMap.registerCallback(wfid, () => resolveNotification()));
|
|
1117
|
+
const callerCbHandle = callerID
|
|
1118
|
+
? this.cancelWakeupMap.registerCallback(callerID, () => resolveNotification())
|
|
1119
|
+
: undefined;
|
|
1120
|
+
try {
|
|
1121
|
+
if (callerID)
|
|
1122
|
+
await this.checkIfCanceled(callerID);
|
|
1123
|
+
const { rows } = await this.pool.query(`SELECT workflow_uuid FROM "${this.schemaName}".workflow_status
|
|
1124
|
+
WHERE workflow_uuid IN (${placeholders})
|
|
1125
|
+
AND status NOT IN ('${workflow_1.StatusString.PENDING}', '${workflow_1.StatusString.ENQUEUED}')
|
|
1126
|
+
LIMIT 1`, workflowIds);
|
|
1127
|
+
if (rows.length > 0) {
|
|
1128
|
+
return rows[0].workflow_uuid;
|
|
1129
|
+
}
|
|
1130
|
+
const { promise: sleepPromise, cancel: sleepCancel } = (0, utils_1.cancellableSleep)(this.dbPollingIntervalResultMs);
|
|
1131
|
+
try {
|
|
1132
|
+
await Promise.race([wakeupPromise, sleepPromise]);
|
|
1133
|
+
}
|
|
1134
|
+
finally {
|
|
1135
|
+
sleepCancel();
|
|
1136
|
+
}
|
|
1137
|
+
}
|
|
1138
|
+
finally {
|
|
1139
|
+
for (const h of cbHandles) {
|
|
1140
|
+
this.cancelWakeupMap.deregisterCallback(h);
|
|
1141
|
+
}
|
|
1142
|
+
if (callerCbHandle)
|
|
1143
|
+
this.cancelWakeupMap.deregisterCallback(callerCbHandle);
|
|
1144
|
+
}
|
|
1145
|
+
}
|
|
1146
|
+
}
|
|
1147
|
+
// ==================== Sleep ====================
|
|
1148
|
+
async durableSleepms(workflowID, functionID, durationMS) {
|
|
1149
|
+
let resolveNotification;
|
|
1150
|
+
const cancelPromise = new Promise((resolve) => {
|
|
1151
|
+
resolveNotification = resolve;
|
|
1152
|
+
});
|
|
1153
|
+
const cbr = this.cancelWakeupMap.registerCallback(workflowID, resolveNotification);
|
|
1154
|
+
try {
|
|
1155
|
+
let timeoutPromise = Promise.resolve();
|
|
1156
|
+
const { promise, cancel: timeoutCancel } = await this.#durableSleep(workflowID, functionID, durationMS);
|
|
1157
|
+
timeoutPromise = promise;
|
|
1158
|
+
try {
|
|
1159
|
+
await Promise.race([cancelPromise, timeoutPromise]);
|
|
1160
|
+
}
|
|
1161
|
+
finally {
|
|
1162
|
+
timeoutCancel();
|
|
1163
|
+
}
|
|
1164
|
+
}
|
|
1165
|
+
finally {
|
|
1166
|
+
this.cancelWakeupMap.deregisterCallback(cbr);
|
|
1167
|
+
}
|
|
1168
|
+
await this.checkIfCanceled(workflowID);
|
|
1169
|
+
}
|
|
1170
|
+
// ==================== Messaging ====================
|
|
1171
|
+
nullTopic = '__null__topic__';
|
|
1172
|
+
async send(workflowID, functionID, destinationID, message, topic, serialization, messageUUID) {
|
|
1173
|
+
topic = topic ?? this.nullTopic;
|
|
1174
|
+
messageUUID = messageUUID ?? (0, crypto_1.randomUUID)();
|
|
1175
|
+
const client = await this.pool.connect();
|
|
1176
|
+
try {
|
|
1177
|
+
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
1178
|
+
await this.#runAndRecordResult(client, exports.DBOS_FUNCNAME_SEND, workflowID, functionID, async () => {
|
|
1179
|
+
await client.query(`INSERT INTO "${this.schemaName}".notifications (destination_uuid, topic, message, serialization, message_uuid)
|
|
1180
|
+
VALUES ($1, $2, $3, $4, $5)
|
|
1181
|
+
ON CONFLICT (message_uuid) DO NOTHING;`, [destinationID, topic, message, serialization, messageUUID]);
|
|
1182
|
+
return undefined;
|
|
1183
|
+
});
|
|
1184
|
+
await client.query('COMMIT');
|
|
1185
|
+
}
|
|
1186
|
+
catch (error) {
|
|
1187
|
+
await client.query('ROLLBACK');
|
|
1188
|
+
const err = error;
|
|
1189
|
+
if (err.code === '23503') {
|
|
1190
|
+
// Foreign key constraint violation (only expected for the INSERT query)
|
|
1191
|
+
throw new error_1.DBOSNonExistentWorkflowError(`Sent to non-existent destination workflow UUID: ${destinationID}`);
|
|
1460
1192
|
}
|
|
1461
|
-
|
|
1462
|
-
|
|
1193
|
+
else {
|
|
1194
|
+
throw err;
|
|
1463
1195
|
}
|
|
1464
1196
|
}
|
|
1465
|
-
|
|
1466
|
-
|
|
1467
|
-
return parseStatus(json);
|
|
1197
|
+
finally {
|
|
1198
|
+
client.release();
|
|
1468
1199
|
}
|
|
1469
|
-
|
|
1470
|
-
|
|
1200
|
+
}
|
|
1201
|
+
async sendDirect(destinationID, message, topic, serialization, messageUUID) {
|
|
1202
|
+
topic = topic ?? this.nullTopic;
|
|
1203
|
+
messageUUID = messageUUID ?? (0, crypto_1.randomUUID)();
|
|
1204
|
+
try {
|
|
1205
|
+
await this.pool.query(`INSERT INTO "${this.schemaName}".notifications (destination_uuid, topic, message, serialization, message_uuid)
|
|
1206
|
+
VALUES ($1, $2, $3, $4, $5)
|
|
1207
|
+
ON CONFLICT (message_uuid) DO NOTHING;`, [destinationID, topic, message, serialization, messageUUID]);
|
|
1208
|
+
}
|
|
1209
|
+
catch (error) {
|
|
1210
|
+
const err = error;
|
|
1211
|
+
if (err.code === '23503') {
|
|
1212
|
+
throw new error_1.DBOSNonExistentWorkflowError(`Sent to non-existent destination workflow UUID: ${destinationID}`);
|
|
1213
|
+
}
|
|
1214
|
+
throw err;
|
|
1471
1215
|
}
|
|
1472
1216
|
}
|
|
1473
|
-
async
|
|
1217
|
+
async recv(workflowID, functionID, timeoutFunctionID, topic, timeoutSeconds = dbos_executor_1.DBOSExecutor.defaultNotificationTimeoutSec) {
|
|
1218
|
+
topic = topic ?? this.nullTopic;
|
|
1219
|
+
const startTime = Date.now();
|
|
1220
|
+
// First, check for previous executions.
|
|
1221
|
+
const res = await this.getOperationResultAndThrowIfCancelled(workflowID, functionID);
|
|
1222
|
+
if (res) {
|
|
1223
|
+
if (res.functionName !== exports.DBOS_FUNCNAME_RECV) {
|
|
1224
|
+
throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, exports.DBOS_FUNCNAME_RECV, res.functionName);
|
|
1225
|
+
}
|
|
1226
|
+
return { serializedValue: res.output, serialization: res.serialization ?? null };
|
|
1227
|
+
}
|
|
1474
1228
|
const timeoutms = timeoutSeconds !== undefined ? timeoutSeconds * 1000 : undefined;
|
|
1475
1229
|
let finishTime = timeoutms !== undefined ? Date.now() + timeoutms : undefined;
|
|
1476
1230
|
while (true) {
|
|
1231
|
+
// register the key with the global notifications listener.
|
|
1477
1232
|
let resolveNotification;
|
|
1478
|
-
const
|
|
1233
|
+
const messagePromise = new Promise((resolve) => {
|
|
1479
1234
|
resolveNotification = resolve;
|
|
1480
1235
|
});
|
|
1481
|
-
const
|
|
1236
|
+
const payload = `${workflowID}::${topic}`;
|
|
1237
|
+
const cbr = this.notificationsMap.registerCallback(payload, resolveNotification);
|
|
1238
|
+
const crh = this.cancelWakeupMap.registerCallback(workflowID, (_res) => {
|
|
1482
1239
|
resolveNotification();
|
|
1483
1240
|
});
|
|
1484
|
-
const crh = callerID
|
|
1485
|
-
? this.cancelWakeupMap.registerCallback(callerID, (_res) => {
|
|
1486
|
-
resolveNotification();
|
|
1487
|
-
})
|
|
1488
|
-
: undefined;
|
|
1489
1241
|
try {
|
|
1490
|
-
|
|
1491
|
-
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
if (rows.length > 0) {
|
|
1496
|
-
const status = rows[0].status;
|
|
1497
|
-
if (status === workflow_1.StatusString.SUCCESS) {
|
|
1498
|
-
return { output: rows[0].output, serialization: rows[0].serialization };
|
|
1499
|
-
}
|
|
1500
|
-
else if (status === workflow_1.StatusString.ERROR) {
|
|
1501
|
-
return { error: rows[0].error, serialization: rows[0].serialization };
|
|
1502
|
-
}
|
|
1503
|
-
else if (status === workflow_1.StatusString.CANCELLED) {
|
|
1504
|
-
return { cancelled: true };
|
|
1505
|
-
}
|
|
1506
|
-
else if (status === workflow_1.StatusString.MAX_RECOVERY_ATTEMPTS_EXCEEDED) {
|
|
1507
|
-
return { maxRecoveryAttemptsExceeded: true };
|
|
1508
|
-
}
|
|
1509
|
-
else {
|
|
1510
|
-
// Status is not actionable
|
|
1511
|
-
}
|
|
1512
|
-
}
|
|
1513
|
-
}
|
|
1514
|
-
catch (e) {
|
|
1515
|
-
const err = e;
|
|
1516
|
-
this.logger.error(`Exception from system database: ${err}`, err);
|
|
1517
|
-
throw err;
|
|
1518
|
-
}
|
|
1242
|
+
await this.checkIfCanceled(workflowID);
|
|
1243
|
+
// Check if the key is already in the DB, then wait for the notification if it isn't.
|
|
1244
|
+
const initRecvRows = (await this.pool.query(`SELECT topic FROM "${this.schemaName}".notifications WHERE destination_uuid=$1 AND topic=$2 AND consumed = false;`, [workflowID, topic])).rows;
|
|
1245
|
+
if (initRecvRows.length !== 0)
|
|
1246
|
+
break;
|
|
1519
1247
|
const ct = Date.now();
|
|
1520
1248
|
if (finishTime && ct > finishTime)
|
|
1521
|
-
|
|
1249
|
+
break; // Time's up
|
|
1522
1250
|
let timeoutPromise = Promise.resolve();
|
|
1523
1251
|
let timeoutCancel = () => { };
|
|
1524
|
-
if (
|
|
1525
|
-
const { promise, cancel, endTime } = await this.#durableSleep(
|
|
1526
|
-
finishTime = endTime;
|
|
1252
|
+
if (timeoutms) {
|
|
1253
|
+
const { promise, cancel, endTime } = await this.#durableSleep(workflowID, timeoutFunctionID, timeoutms, this.dbPollingIntervalEventMs);
|
|
1527
1254
|
timeoutPromise = promise;
|
|
1528
1255
|
timeoutCancel = cancel;
|
|
1256
|
+
finishTime = endTime;
|
|
1529
1257
|
}
|
|
1530
1258
|
else {
|
|
1531
|
-
let poll = finishTime ? finishTime - ct : this.
|
|
1532
|
-
poll = Math.min(this.
|
|
1259
|
+
let poll = finishTime ? finishTime - ct : this.dbPollingIntervalEventMs;
|
|
1260
|
+
poll = Math.min(this.dbPollingIntervalEventMs, poll);
|
|
1533
1261
|
const { promise, cancel } = (0, utils_1.cancellableSleep)(poll);
|
|
1534
1262
|
timeoutPromise = promise;
|
|
1535
1263
|
timeoutCancel = cancel;
|
|
1536
1264
|
}
|
|
1537
1265
|
try {
|
|
1538
|
-
await Promise.race([
|
|
1266
|
+
await Promise.race([messagePromise, timeoutPromise]);
|
|
1539
1267
|
}
|
|
1540
1268
|
finally {
|
|
1541
1269
|
timeoutCancel();
|
|
1542
1270
|
}
|
|
1543
1271
|
}
|
|
1544
1272
|
finally {
|
|
1545
|
-
this.
|
|
1546
|
-
|
|
1547
|
-
|
|
1273
|
+
this.notificationsMap.deregisterCallback(cbr);
|
|
1274
|
+
this.cancelWakeupMap.deregisterCallback(crh);
|
|
1275
|
+
}
|
|
1276
|
+
}
|
|
1277
|
+
await this.checkIfCanceled(workflowID);
|
|
1278
|
+
// Transactionally consume and return the message if it's in the DB, otherwise return null.
|
|
1279
|
+
let message = null;
|
|
1280
|
+
let serialization = null;
|
|
1281
|
+
const client = await this.pool.connect();
|
|
1282
|
+
try {
|
|
1283
|
+
await client.query(`BEGIN ISOLATION LEVEL READ COMMITTED`);
|
|
1284
|
+
const finalRecvRows = (await client.query(`UPDATE "${this.schemaName}".notifications
|
|
1285
|
+
SET consumed = true
|
|
1286
|
+
WHERE destination_uuid = $1
|
|
1287
|
+
AND topic = $2
|
|
1288
|
+
AND consumed = false
|
|
1289
|
+
AND message_uuid = (
|
|
1290
|
+
SELECT message_uuid
|
|
1291
|
+
FROM "${this.schemaName}".notifications
|
|
1292
|
+
WHERE destination_uuid = $1
|
|
1293
|
+
AND topic = $2
|
|
1294
|
+
AND consumed = false
|
|
1295
|
+
ORDER BY created_at_epoch_ms ASC
|
|
1296
|
+
LIMIT 1
|
|
1297
|
+
)
|
|
1298
|
+
RETURNING notifications.message, notifications.serialization;`, [workflowID, topic])).rows;
|
|
1299
|
+
if (finalRecvRows.length > 0) {
|
|
1300
|
+
message = finalRecvRows[0].message;
|
|
1301
|
+
serialization = finalRecvRows[0].serialization;
|
|
1548
1302
|
}
|
|
1303
|
+
await this.recordOperationResultInternal(client, workflowID, functionID, exports.DBOS_FUNCNAME_RECV, true, startTime, Date.now(), {
|
|
1304
|
+
output: message,
|
|
1305
|
+
serialization,
|
|
1306
|
+
});
|
|
1307
|
+
await client.query(`COMMIT`);
|
|
1308
|
+
}
|
|
1309
|
+
catch (e) {
|
|
1310
|
+
this.logger.error(e);
|
|
1311
|
+
await client.query(`ROLLBACK`);
|
|
1312
|
+
throw e;
|
|
1549
1313
|
}
|
|
1314
|
+
finally {
|
|
1315
|
+
client.release();
|
|
1316
|
+
}
|
|
1317
|
+
return { serializedValue: message, serialization };
|
|
1550
1318
|
}
|
|
1551
|
-
|
|
1552
|
-
|
|
1319
|
+
// ==================== Events ====================
|
|
1320
|
+
async setEvent(workflowID, functionID, key, message, serialization) {
|
|
1321
|
+
const client = await this.pool.connect();
|
|
1322
|
+
try {
|
|
1323
|
+
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
1324
|
+
await this.#runAndRecordResult(client, exports.DBOS_FUNCNAME_SETEVENT, workflowID, functionID, async () => {
|
|
1325
|
+
await client.query(`INSERT INTO "${this.schemaName}".workflow_events (workflow_uuid, key, value, serialization)
|
|
1326
|
+
VALUES ($1, $2, $3, $4)
|
|
1327
|
+
ON CONFLICT (workflow_uuid, key)
|
|
1328
|
+
DO UPDATE SET value = $3
|
|
1329
|
+
RETURNING workflow_uuid;`, [workflowID, key, message, serialization]);
|
|
1330
|
+
// Also write to the immutable history table for fork support
|
|
1331
|
+
await client.query(`INSERT INTO "${this.schemaName}".workflow_events_history (workflow_uuid, function_id, key, value, serialization)
|
|
1332
|
+
VALUES ($1, $2, $3, $4, $5)
|
|
1333
|
+
ON CONFLICT (workflow_uuid, function_id, key)
|
|
1334
|
+
DO UPDATE SET value = $4;`, [workflowID, functionID, key, message, serialization]);
|
|
1335
|
+
return undefined;
|
|
1336
|
+
});
|
|
1337
|
+
await client.query('COMMIT');
|
|
1338
|
+
}
|
|
1339
|
+
catch (e) {
|
|
1340
|
+
this.logger.error(e);
|
|
1341
|
+
await client.query(`ROLLBACK`);
|
|
1342
|
+
throw e;
|
|
1343
|
+
}
|
|
1344
|
+
finally {
|
|
1345
|
+
client.release();
|
|
1346
|
+
}
|
|
1347
|
+
}
|
|
1348
|
+
async getEvent(workflowID, key, timeoutSeconds, callerWorkflow) {
|
|
1349
|
+
const startTime = Date.now();
|
|
1350
|
+
// Check if the operation has been done before for OAOO (only do this inside a workflow).
|
|
1351
|
+
if (callerWorkflow) {
|
|
1352
|
+
const res = await this.getOperationResultAndThrowIfCancelled(callerWorkflow.workflowID, callerWorkflow.functionID);
|
|
1353
|
+
if (res) {
|
|
1354
|
+
if (res.functionName !== exports.DBOS_FUNCNAME_GETEVENT) {
|
|
1355
|
+
throw new error_1.DBOSUnexpectedStepError(callerWorkflow.workflowID, callerWorkflow.functionID, exports.DBOS_FUNCNAME_GETEVENT, res.functionName);
|
|
1356
|
+
}
|
|
1357
|
+
return { serializedValue: res.output, serialization: null };
|
|
1358
|
+
}
|
|
1359
|
+
}
|
|
1360
|
+
// Get the return the value. if it's in the DB, otherwise return null.
|
|
1361
|
+
let value = null;
|
|
1362
|
+
let valueSer = null;
|
|
1363
|
+
const payloadKey = `${workflowID}::${key}`;
|
|
1364
|
+
const timeoutms = timeoutSeconds !== undefined ? timeoutSeconds * 1000 : undefined;
|
|
1365
|
+
let finishTime = timeoutms !== undefined ? Date.now() + timeoutms : undefined;
|
|
1366
|
+
// Register the key with the global notifications listener first... we do not want to look in the DB first
|
|
1367
|
+
// or that would cause a timing hole.
|
|
1553
1368
|
while (true) {
|
|
1554
1369
|
let resolveNotification;
|
|
1555
|
-
const
|
|
1370
|
+
const valuePromise = new Promise((resolve) => {
|
|
1556
1371
|
resolveNotification = resolve;
|
|
1557
1372
|
});
|
|
1558
|
-
|
|
1559
|
-
const
|
|
1560
|
-
|
|
1561
|
-
|
|
1373
|
+
const cbr = this.workflowEventsMap.registerCallback(payloadKey, resolveNotification);
|
|
1374
|
+
const crh = callerWorkflow?.workflowID
|
|
1375
|
+
? this.cancelWakeupMap.registerCallback(callerWorkflow.workflowID, (_res) => {
|
|
1376
|
+
resolveNotification();
|
|
1377
|
+
})
|
|
1562
1378
|
: undefined;
|
|
1563
1379
|
try {
|
|
1564
|
-
if (
|
|
1565
|
-
await this.checkIfCanceled(
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
if (
|
|
1571
|
-
|
|
1380
|
+
if (callerWorkflow?.workflowID)
|
|
1381
|
+
await this.checkIfCanceled(callerWorkflow?.workflowID);
|
|
1382
|
+
// Check if the key is already in the DB, then wait for the notification if it isn't.
|
|
1383
|
+
const initRecvRows = (await this.pool.query(`SELECT key, value, serialization
|
|
1384
|
+
FROM "${this.schemaName}".workflow_events
|
|
1385
|
+
WHERE workflow_uuid=$1 AND key=$2;`, [workflowID, key])).rows;
|
|
1386
|
+
if (initRecvRows.length > 0) {
|
|
1387
|
+
value = initRecvRows[0].value;
|
|
1388
|
+
valueSer = initRecvRows[0].serialization;
|
|
1389
|
+
break;
|
|
1390
|
+
}
|
|
1391
|
+
const ct = Date.now();
|
|
1392
|
+
if (finishTime && ct > finishTime)
|
|
1393
|
+
break; // Time's up
|
|
1394
|
+
// If we have a callerWorkflow, we want a durable sleep, otherwise, not
|
|
1395
|
+
let timeoutPromise = Promise.resolve();
|
|
1396
|
+
let timeoutCancel = () => { };
|
|
1397
|
+
if (callerWorkflow && timeoutms) {
|
|
1398
|
+
const { promise, cancel, endTime } = await this.#durableSleep(callerWorkflow.workflowID, callerWorkflow.timeoutFunctionID ?? -1, timeoutms, this.dbPollingIntervalEventMs);
|
|
1399
|
+
timeoutPromise = promise;
|
|
1400
|
+
timeoutCancel = cancel;
|
|
1401
|
+
finishTime = endTime;
|
|
1402
|
+
}
|
|
1403
|
+
else {
|
|
1404
|
+
let poll = finishTime ? finishTime - ct : this.dbPollingIntervalEventMs;
|
|
1405
|
+
poll = Math.min(this.dbPollingIntervalEventMs, poll);
|
|
1406
|
+
const { promise, cancel } = (0, utils_1.cancellableSleep)(poll);
|
|
1407
|
+
timeoutPromise = promise;
|
|
1408
|
+
timeoutCancel = cancel;
|
|
1572
1409
|
}
|
|
1573
|
-
const { promise: sleepPromise, cancel: sleepCancel } = (0, utils_1.cancellableSleep)(this.dbPollingIntervalResultMs);
|
|
1574
1410
|
try {
|
|
1575
|
-
await Promise.race([
|
|
1411
|
+
await Promise.race([valuePromise, timeoutPromise]);
|
|
1576
1412
|
}
|
|
1577
1413
|
finally {
|
|
1578
|
-
|
|
1414
|
+
timeoutCancel();
|
|
1579
1415
|
}
|
|
1580
1416
|
}
|
|
1581
1417
|
finally {
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
if (callerCbHandle)
|
|
1586
|
-
this.cancelWakeupMap.deregisterCallback(callerCbHandle);
|
|
1418
|
+
this.workflowEventsMap.deregisterCallback(cbr);
|
|
1419
|
+
if (crh)
|
|
1420
|
+
this.cancelWakeupMap.deregisterCallback(crh);
|
|
1587
1421
|
}
|
|
1588
1422
|
}
|
|
1589
|
-
|
|
1590
|
-
|
|
1591
|
-
|
|
1592
|
-
|
|
1593
|
-
|
|
1594
|
-
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
const connect = async () => {
|
|
1598
|
-
const reconnect = () => {
|
|
1599
|
-
if (this.reconnectTimeout) {
|
|
1600
|
-
return;
|
|
1601
|
-
}
|
|
1602
|
-
this.reconnectTimeout = setTimeout(async () => {
|
|
1603
|
-
this.reconnectTimeout = null;
|
|
1604
|
-
await connect();
|
|
1605
|
-
}, 1000);
|
|
1606
|
-
};
|
|
1607
|
-
let client = null;
|
|
1608
|
-
try {
|
|
1609
|
-
client = await this.pool.connect();
|
|
1610
|
-
await client.query('LISTEN dbos_notifications_channel;');
|
|
1611
|
-
await client.query('LISTEN dbos_workflow_events_channel;');
|
|
1612
|
-
// Self-test: verify LISTEN actually works by sending a NOTIFY and checking it arrives.
|
|
1613
|
-
// If a transaction-mode pooler (e.g. PgBouncer pool_mode=transaction) is in the path,
|
|
1614
|
-
// LISTEN succeeds but the subscription is silently lost when the backend is released.
|
|
1615
|
-
let selfTestReceived = false;
|
|
1616
|
-
const onSelfTest = (msg) => {
|
|
1617
|
-
if (msg.channel === 'dbos_notifications_channel' && msg.payload === 'dbos_listen_selftest') {
|
|
1618
|
-
selfTestReceived = true;
|
|
1619
|
-
}
|
|
1620
|
-
};
|
|
1621
|
-
client.on('notification', onSelfTest);
|
|
1622
|
-
await this.pool.query("NOTIFY dbos_notifications_channel, 'dbos_listen_selftest'");
|
|
1623
|
-
for (let i = 0; i < 30 && !selfTestReceived; i++) {
|
|
1624
|
-
await new Promise((r) => setTimeout(r, 100));
|
|
1625
|
-
}
|
|
1626
|
-
client.removeListener('notification', onSelfTest);
|
|
1627
|
-
if (!selfTestReceived) {
|
|
1628
|
-
this.logger.warn('LISTEN/NOTIFY self-test failed: notification was not received within 3 seconds. ' +
|
|
1629
|
-
'This typically means the connection is going through a transaction-mode pooler ' +
|
|
1630
|
-
'(e.g. PgBouncer with pool_mode=transaction), which silently breaks LISTEN/NOTIFY. ' +
|
|
1631
|
-
'Workflow notifications will fall back to polling, which may increase latency.');
|
|
1632
|
-
}
|
|
1633
|
-
const handler = (msg) => {
|
|
1634
|
-
if (!this.shouldUseDBNotifications)
|
|
1635
|
-
return;
|
|
1636
|
-
if (msg.channel === 'dbos_notifications_channel' && msg.payload) {
|
|
1637
|
-
this.notificationsMap.callCallbacks(msg.payload);
|
|
1638
|
-
}
|
|
1639
|
-
else if (msg.channel === 'dbos_workflow_events_channel' && msg.payload) {
|
|
1640
|
-
this.workflowEventsMap.callCallbacks(msg.payload);
|
|
1641
|
-
}
|
|
1642
|
-
};
|
|
1643
|
-
client.on('notification', handler);
|
|
1644
|
-
client.on('error', (err) => {
|
|
1645
|
-
this.logger.warn(`Error in notifications client: ${err}`);
|
|
1646
|
-
if (client) {
|
|
1647
|
-
client.removeAllListeners();
|
|
1648
|
-
client.release(true);
|
|
1649
|
-
}
|
|
1650
|
-
reconnect();
|
|
1651
|
-
});
|
|
1652
|
-
this.notificationsClient = client;
|
|
1653
|
-
}
|
|
1654
|
-
catch (error) {
|
|
1655
|
-
this.logger.warn(`Error in notifications listener: ${String(error)}`);
|
|
1656
|
-
if (client) {
|
|
1657
|
-
client.removeAllListeners();
|
|
1658
|
-
client.release(true);
|
|
1659
|
-
}
|
|
1660
|
-
reconnect();
|
|
1661
|
-
}
|
|
1662
|
-
};
|
|
1663
|
-
await connect();
|
|
1423
|
+
// Record the output if it is inside a workflow.
|
|
1424
|
+
if (callerWorkflow) {
|
|
1425
|
+
await this.recordOperationResult(callerWorkflow.workflowID, callerWorkflow.functionID, exports.DBOS_FUNCNAME_GETEVENT, true, startTime, {
|
|
1426
|
+
output: value,
|
|
1427
|
+
serialization: valueSer,
|
|
1428
|
+
});
|
|
1429
|
+
}
|
|
1430
|
+
return { serializedValue: value, serialization: valueSer };
|
|
1664
1431
|
}
|
|
1665
1432
|
// Event dispatcher queries / updates
|
|
1666
1433
|
async getEventDispatchState(service, workflowName, key) {
|
|
@@ -1703,125 +1470,78 @@ class PostgresSystemDatabase {
|
|
|
1703
1470
|
: undefined,
|
|
1704
1471
|
};
|
|
1705
1472
|
}
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
const
|
|
1709
|
-
|
|
1710
|
-
'
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
'
|
|
1721
|
-
'updated_at',
|
|
1722
|
-
'application_version',
|
|
1723
|
-
'application_id',
|
|
1724
|
-
'workflow_deadline_epoch_ms',
|
|
1725
|
-
'workflow_timeout_ms',
|
|
1726
|
-
'deduplication_id',
|
|
1727
|
-
'priority',
|
|
1728
|
-
'queue_partition_key',
|
|
1729
|
-
'started_at_epoch_ms',
|
|
1730
|
-
'forked_from',
|
|
1731
|
-
'parent_workflow_id',
|
|
1732
|
-
];
|
|
1733
|
-
input.loadInput = input.loadInput ?? true;
|
|
1734
|
-
input.loadOutput = input.loadOutput ?? true;
|
|
1735
|
-
if (input.loadInput) {
|
|
1736
|
-
selectColumns.push('inputs', 'request');
|
|
1737
|
-
}
|
|
1738
|
-
if (input.loadOutput) {
|
|
1739
|
-
selectColumns.push('output', 'error');
|
|
1740
|
-
}
|
|
1741
|
-
if (input.loadInput || input.loadOutput) {
|
|
1742
|
-
selectColumns.push('serialization');
|
|
1473
|
+
// ==================== Streams ====================
|
|
1474
|
+
async writeStreamFromStep(workflowID, functionID, key, serializedValue, serialization) {
|
|
1475
|
+
const client = await this.pool.connect();
|
|
1476
|
+
try {
|
|
1477
|
+
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
1478
|
+
// Find the maximum offset for this workflow_uuid and key combination
|
|
1479
|
+
const maxOffsetResult = await client.query(`SELECT MAX("offset") FROM "${this.schemaName}".streams
|
|
1480
|
+
WHERE workflow_uuid = $1 AND key = $2`, [workflowID, key]);
|
|
1481
|
+
// Next offset is max + 1, or 0 if no records exist
|
|
1482
|
+
const maxOffset = maxOffsetResult.rows[0].max;
|
|
1483
|
+
const nextOffset = maxOffset !== null ? maxOffset + 1 : 0;
|
|
1484
|
+
// Insert the new stream entry
|
|
1485
|
+
await client.query(`INSERT INTO "${this.schemaName}".streams (workflow_uuid, key, value, "offset", function_id, serialization)
|
|
1486
|
+
VALUES ($1, $2, $3, $4, $5, $6)`, [workflowID, key, serializedValue, nextOffset, functionID, serialization]);
|
|
1487
|
+
await client.query('COMMIT');
|
|
1743
1488
|
}
|
|
1744
|
-
|
|
1745
|
-
|
|
1746
|
-
|
|
1747
|
-
|
|
1748
|
-
let paramCounter = 1;
|
|
1749
|
-
// Helper: add a filter for a field that may be a single value or an array.
|
|
1750
|
-
// Uses = for a single value, IN (...) for an array.
|
|
1751
|
-
const addFilter = (column, value) => {
|
|
1752
|
-
if (!value)
|
|
1753
|
-
return;
|
|
1754
|
-
if (Array.isArray(value)) {
|
|
1755
|
-
const placeholders = value.map((_, i) => `$${paramCounter + i}`).join(', ');
|
|
1756
|
-
whereClauses.push(`${column} IN (${placeholders})`);
|
|
1757
|
-
params.push(...value);
|
|
1758
|
-
paramCounter += value.length;
|
|
1759
|
-
}
|
|
1760
|
-
else {
|
|
1761
|
-
whereClauses.push(`${column} = $${paramCounter}`);
|
|
1762
|
-
params.push(value);
|
|
1763
|
-
paramCounter++;
|
|
1764
|
-
}
|
|
1765
|
-
};
|
|
1766
|
-
// If queuesOnly, filter for queued workflows
|
|
1767
|
-
if (input.queuesOnly) {
|
|
1768
|
-
whereClauses.push(`queue_name IS NOT NULL`);
|
|
1769
|
-
whereClauses.push(`status IN ($${paramCounter}, $${paramCounter + 1})`);
|
|
1770
|
-
params.push(workflow_1.StatusString.ENQUEUED, workflow_1.StatusString.PENDING);
|
|
1771
|
-
paramCounter += 2;
|
|
1489
|
+
catch (e) {
|
|
1490
|
+
this.logger.error(e);
|
|
1491
|
+
await client.query('ROLLBACK');
|
|
1492
|
+
throw e;
|
|
1772
1493
|
}
|
|
1773
|
-
|
|
1774
|
-
|
|
1775
|
-
if (input.workflow_id_prefix) {
|
|
1776
|
-
if (Array.isArray(input.workflow_id_prefix)) {
|
|
1777
|
-
const likeClauses = input.workflow_id_prefix.map((_, i) => `workflow_uuid LIKE $${paramCounter + i}`);
|
|
1778
|
-
whereClauses.push(`(${likeClauses.join(' OR ')})`);
|
|
1779
|
-
params.push(...input.workflow_id_prefix.map((p) => `${p}%`));
|
|
1780
|
-
paramCounter += input.workflow_id_prefix.length;
|
|
1781
|
-
}
|
|
1782
|
-
else {
|
|
1783
|
-
whereClauses.push(`workflow_uuid LIKE $${paramCounter}`);
|
|
1784
|
-
params.push(`${input.workflow_id_prefix}%`);
|
|
1785
|
-
paramCounter++;
|
|
1786
|
-
}
|
|
1494
|
+
finally {
|
|
1495
|
+
client.release();
|
|
1787
1496
|
}
|
|
1788
|
-
|
|
1789
|
-
|
|
1790
|
-
|
|
1791
|
-
|
|
1792
|
-
|
|
1497
|
+
}
|
|
1498
|
+
async writeStreamFromWorkflow(workflowID, functionID, key, serializedValue, serialization, functionName) {
|
|
1499
|
+
const client = await this.pool.connect();
|
|
1500
|
+
try {
|
|
1501
|
+
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
1502
|
+
await this.#runAndRecordResult(client, functionName, workflowID, functionID, async () => {
|
|
1503
|
+
// Find the maximum offset for this workflow_uuid and key combination
|
|
1504
|
+
const maxOffsetResult = await client.query(`SELECT MAX("offset") FROM "${this.schemaName}".streams
|
|
1505
|
+
WHERE workflow_uuid = $1 AND key = $2`, [workflowID, key]);
|
|
1506
|
+
// Next offset is max + 1, or 0 if no records exist
|
|
1507
|
+
const maxOffset = maxOffsetResult.rows[0].max;
|
|
1508
|
+
const nextOffset = maxOffset !== null ? maxOffset + 1 : 0;
|
|
1509
|
+
// Insert the new stream entry
|
|
1510
|
+
await client.query(`INSERT INTO "${this.schemaName}".streams (workflow_uuid, key, value, "offset", function_id, serialization)
|
|
1511
|
+
VALUES ($1, $2, $3, $4, $5, $6)`, [workflowID, key, serializedValue, nextOffset, functionID, serialization]);
|
|
1512
|
+
return undefined;
|
|
1513
|
+
});
|
|
1514
|
+
await client.query('COMMIT');
|
|
1793
1515
|
}
|
|
1794
|
-
|
|
1795
|
-
|
|
1796
|
-
|
|
1797
|
-
|
|
1798
|
-
whereClauses.push(`created_at >= $${paramCounter}`);
|
|
1799
|
-
params.push(new Date(input.startTime).getTime());
|
|
1800
|
-
paramCounter++;
|
|
1516
|
+
catch (e) {
|
|
1517
|
+
this.logger.error(e);
|
|
1518
|
+
await client.query('ROLLBACK');
|
|
1519
|
+
throw e;
|
|
1801
1520
|
}
|
|
1802
|
-
|
|
1803
|
-
|
|
1804
|
-
|
|
1805
|
-
|
|
1521
|
+
finally {
|
|
1522
|
+
client.release();
|
|
1523
|
+
}
|
|
1524
|
+
}
|
|
1525
|
+
async closeStream(workflowID, functionID, key) {
|
|
1526
|
+
await this.writeStreamFromWorkflow(workflowID, functionID, key, exports.DBOS_STREAM_CLOSED_SENTINEL, 'portable_json', exports.DBOS_FUNCNAME_CLOSESTREAM);
|
|
1527
|
+
}
|
|
1528
|
+
async readStream(workflowID, key, offset) {
|
|
1529
|
+
const client = await this.pool.connect();
|
|
1530
|
+
try {
|
|
1531
|
+
const result = await client.query(`SELECT value, serialization FROM "${this.schemaName}".streams
|
|
1532
|
+
WHERE workflow_uuid = $1 AND key = $2 AND "offset" = $3`, [workflowID, key, offset]);
|
|
1533
|
+
if (result.rows.length === 0) {
|
|
1534
|
+
throw new Error(`No value found for workflow_uuid=${workflowID}, key=${key}, offset=${offset}`);
|
|
1535
|
+
}
|
|
1536
|
+
// Deserialize the value before returning
|
|
1537
|
+
const row = result.rows[0];
|
|
1538
|
+
return { serializedValue: row.value, serialization: row.serialization };
|
|
1539
|
+
}
|
|
1540
|
+
finally {
|
|
1541
|
+
client.release();
|
|
1806
1542
|
}
|
|
1807
|
-
addFilter('status', input.status);
|
|
1808
|
-
addFilter('application_version', input.applicationVersion);
|
|
1809
|
-
addFilter('executor_id', input.executorId);
|
|
1810
|
-
const whereClause = whereClauses.length > 0 ? `WHERE ${whereClauses.join(' AND ')}` : '';
|
|
1811
|
-
const orderClause = `ORDER BY created_at ${input.sortDesc ? 'DESC' : 'ASC'}`;
|
|
1812
|
-
const limitClause = input.limit ? `LIMIT ${input.limit}` : '';
|
|
1813
|
-
const offsetClause = input.offset ? `OFFSET ${input.offset}` : '';
|
|
1814
|
-
const query = `
|
|
1815
|
-
SELECT ${selectColumns.join(', ')}
|
|
1816
|
-
FROM "${schemaName}".workflow_status
|
|
1817
|
-
${whereClause}
|
|
1818
|
-
${orderClause}
|
|
1819
|
-
${limitClause}
|
|
1820
|
-
${offsetClause}
|
|
1821
|
-
`;
|
|
1822
|
-
const result = await this.pool.query(query, params);
|
|
1823
|
-
return result.rows.map(mapWorkflowStatus);
|
|
1824
1543
|
}
|
|
1544
|
+
// ==================== Queues ====================
|
|
1825
1545
|
async clearQueueAssignment(workflowID) {
|
|
1826
1546
|
// Reset the status of the task from "PENDING" to "ENQUEUED"
|
|
1827
1547
|
const wqRes = await this.pool.query(`UPDATE "${this.schemaName}".workflow_status
|
|
@@ -1960,75 +1680,125 @@ class PostgresSystemDatabase {
|
|
|
1960
1680
|
// Return the IDs of all functions we marked started
|
|
1961
1681
|
return claimedIDs;
|
|
1962
1682
|
}
|
|
1963
|
-
|
|
1964
|
-
|
|
1965
|
-
|
|
1966
|
-
|
|
1967
|
-
|
|
1968
|
-
|
|
1969
|
-
|
|
1970
|
-
|
|
1971
|
-
|
|
1972
|
-
|
|
1973
|
-
|
|
1974
|
-
|
|
1975
|
-
|
|
1976
|
-
|
|
1683
|
+
// ==================== Queries & Maintenance ====================
|
|
1684
|
+
async listWorkflows(input) {
|
|
1685
|
+
const schemaName = this.schemaName;
|
|
1686
|
+
const selectColumns = [
|
|
1687
|
+
'workflow_uuid',
|
|
1688
|
+
'status',
|
|
1689
|
+
'name',
|
|
1690
|
+
'recovery_attempts',
|
|
1691
|
+
'config_name',
|
|
1692
|
+
'class_name',
|
|
1693
|
+
'authenticated_user',
|
|
1694
|
+
'authenticated_roles',
|
|
1695
|
+
'assumed_role',
|
|
1696
|
+
'queue_name',
|
|
1697
|
+
'executor_id',
|
|
1698
|
+
'created_at',
|
|
1699
|
+
'updated_at',
|
|
1700
|
+
'application_version',
|
|
1701
|
+
'application_id',
|
|
1702
|
+
'workflow_deadline_epoch_ms',
|
|
1703
|
+
'workflow_timeout_ms',
|
|
1704
|
+
'deduplication_id',
|
|
1705
|
+
'priority',
|
|
1706
|
+
'queue_partition_key',
|
|
1707
|
+
'started_at_epoch_ms',
|
|
1708
|
+
'forked_from',
|
|
1709
|
+
'parent_workflow_id',
|
|
1710
|
+
];
|
|
1711
|
+
input.loadInput = input.loadInput ?? true;
|
|
1712
|
+
input.loadOutput = input.loadOutput ?? true;
|
|
1713
|
+
if (input.loadInput) {
|
|
1714
|
+
selectColumns.push('inputs', 'request');
|
|
1977
1715
|
}
|
|
1978
|
-
|
|
1979
|
-
|
|
1980
|
-
await client.query('ROLLBACK');
|
|
1981
|
-
throw e;
|
|
1716
|
+
if (input.loadOutput) {
|
|
1717
|
+
selectColumns.push('output', 'error');
|
|
1982
1718
|
}
|
|
1983
|
-
|
|
1984
|
-
|
|
1719
|
+
if (input.loadInput || input.loadOutput) {
|
|
1720
|
+
selectColumns.push('serialization');
|
|
1985
1721
|
}
|
|
1986
|
-
|
|
1987
|
-
|
|
1988
|
-
const
|
|
1989
|
-
|
|
1990
|
-
|
|
1991
|
-
|
|
1992
|
-
|
|
1993
|
-
|
|
1994
|
-
|
|
1995
|
-
|
|
1996
|
-
|
|
1997
|
-
const
|
|
1998
|
-
|
|
1999
|
-
|
|
2000
|
-
|
|
2001
|
-
|
|
2002
|
-
|
|
2003
|
-
|
|
1722
|
+
input.sortDesc = input.sortDesc ?? false; // By default, sort in ascending order
|
|
1723
|
+
// Build WHERE clauses
|
|
1724
|
+
const whereClauses = [];
|
|
1725
|
+
const params = [];
|
|
1726
|
+
let paramCounter = 1;
|
|
1727
|
+
// Helper: add a filter for a field that may be a single value or an array.
|
|
1728
|
+
// Uses = for a single value, IN (...) for an array.
|
|
1729
|
+
const addFilter = (column, value) => {
|
|
1730
|
+
if (!value)
|
|
1731
|
+
return;
|
|
1732
|
+
if (Array.isArray(value)) {
|
|
1733
|
+
const placeholders = value.map((_, i) => `$${paramCounter + i}`).join(', ');
|
|
1734
|
+
whereClauses.push(`${column} IN (${placeholders})`);
|
|
1735
|
+
params.push(...value);
|
|
1736
|
+
paramCounter += value.length;
|
|
1737
|
+
}
|
|
1738
|
+
else {
|
|
1739
|
+
whereClauses.push(`${column} = $${paramCounter}`);
|
|
1740
|
+
params.push(value);
|
|
1741
|
+
paramCounter++;
|
|
1742
|
+
}
|
|
1743
|
+
};
|
|
1744
|
+
// If queuesOnly, filter for queued workflows
|
|
1745
|
+
if (input.queuesOnly) {
|
|
1746
|
+
whereClauses.push(`queue_name IS NOT NULL`);
|
|
1747
|
+
whereClauses.push(`status IN ($${paramCounter}, $${paramCounter + 1})`);
|
|
1748
|
+
params.push(workflow_1.StatusString.ENQUEUED, workflow_1.StatusString.PENDING);
|
|
1749
|
+
paramCounter += 2;
|
|
2004
1750
|
}
|
|
2005
|
-
|
|
2006
|
-
|
|
2007
|
-
|
|
2008
|
-
|
|
1751
|
+
addFilter('name', input.workflowName);
|
|
1752
|
+
addFilter('queue_name', input.queueName);
|
|
1753
|
+
if (input.workflow_id_prefix) {
|
|
1754
|
+
if (Array.isArray(input.workflow_id_prefix)) {
|
|
1755
|
+
const likeClauses = input.workflow_id_prefix.map((_, i) => `workflow_uuid LIKE $${paramCounter + i}`);
|
|
1756
|
+
whereClauses.push(`(${likeClauses.join(' OR ')})`);
|
|
1757
|
+
params.push(...input.workflow_id_prefix.map((p) => `${p}%`));
|
|
1758
|
+
paramCounter += input.workflow_id_prefix.length;
|
|
1759
|
+
}
|
|
1760
|
+
else {
|
|
1761
|
+
whereClauses.push(`workflow_uuid LIKE $${paramCounter}`);
|
|
1762
|
+
params.push(`${input.workflow_id_prefix}%`);
|
|
1763
|
+
paramCounter++;
|
|
1764
|
+
}
|
|
2009
1765
|
}
|
|
2010
|
-
|
|
2011
|
-
|
|
1766
|
+
if (input.workflowIDs) {
|
|
1767
|
+
const placeholders = input.workflowIDs.map((_, i) => `$${paramCounter + i}`).join(', ');
|
|
1768
|
+
whereClauses.push(`workflow_uuid IN (${placeholders})`);
|
|
1769
|
+
params.push(...input.workflowIDs);
|
|
1770
|
+
paramCounter += input.workflowIDs.length;
|
|
2012
1771
|
}
|
|
2013
|
-
|
|
2014
|
-
|
|
2015
|
-
|
|
2016
|
-
|
|
2017
|
-
|
|
2018
|
-
|
|
2019
|
-
|
|
2020
|
-
const result = await client.query(`SELECT value, serialization FROM "${this.schemaName}".streams
|
|
2021
|
-
WHERE workflow_uuid = $1 AND key = $2 AND "offset" = $3`, [workflowID, key, offset]);
|
|
2022
|
-
if (result.rows.length === 0) {
|
|
2023
|
-
throw new Error(`No value found for workflow_uuid=${workflowID}, key=${key}, offset=${offset}`);
|
|
2024
|
-
}
|
|
2025
|
-
// Deserialize the value before returning
|
|
2026
|
-
const row = result.rows[0];
|
|
2027
|
-
return { serializedValue: row.value, serialization: row.serialization };
|
|
1772
|
+
addFilter('authenticated_user', input.authenticatedUser);
|
|
1773
|
+
addFilter('forked_from', input.forkedFrom);
|
|
1774
|
+
addFilter('parent_workflow_id', input.parentWorkflowID);
|
|
1775
|
+
if (input.startTime) {
|
|
1776
|
+
whereClauses.push(`created_at >= $${paramCounter}`);
|
|
1777
|
+
params.push(new Date(input.startTime).getTime());
|
|
1778
|
+
paramCounter++;
|
|
2028
1779
|
}
|
|
2029
|
-
|
|
2030
|
-
|
|
1780
|
+
if (input.endTime) {
|
|
1781
|
+
whereClauses.push(`created_at <= $${paramCounter}`);
|
|
1782
|
+
params.push(new Date(input.endTime).getTime());
|
|
1783
|
+
paramCounter++;
|
|
2031
1784
|
}
|
|
1785
|
+
addFilter('status', input.status);
|
|
1786
|
+
addFilter('application_version', input.applicationVersion);
|
|
1787
|
+
addFilter('executor_id', input.executorId);
|
|
1788
|
+
const whereClause = whereClauses.length > 0 ? `WHERE ${whereClauses.join(' AND ')}` : '';
|
|
1789
|
+
const orderClause = `ORDER BY created_at ${input.sortDesc ? 'DESC' : 'ASC'}`;
|
|
1790
|
+
const limitClause = input.limit ? `LIMIT ${input.limit}` : '';
|
|
1791
|
+
const offsetClause = input.offset ? `OFFSET ${input.offset}` : '';
|
|
1792
|
+
const query = `
|
|
1793
|
+
SELECT ${selectColumns.join(', ')}
|
|
1794
|
+
FROM "${schemaName}".workflow_status
|
|
1795
|
+
${whereClause}
|
|
1796
|
+
${orderClause}
|
|
1797
|
+
${limitClause}
|
|
1798
|
+
${offsetClause}
|
|
1799
|
+
`;
|
|
1800
|
+
const result = await this.pool.query(query, params);
|
|
1801
|
+
return result.rows.map(mapWorkflowStatus);
|
|
2032
1802
|
}
|
|
2033
1803
|
async garbageCollect(cutoffEpochTimestampMs, rowsThreshold) {
|
|
2034
1804
|
if (rowsThreshold !== undefined) {
|
|
@@ -2055,92 +1825,36 @@ class PostgresSystemDatabase {
|
|
|
2055
1825
|
return;
|
|
2056
1826
|
}
|
|
2057
1827
|
async getMetrics(startTime, endTime) {
|
|
2058
|
-
const startEpochMs = new Date(startTime).getTime();
|
|
2059
|
-
const endEpochMs = new Date(endTime).getTime();
|
|
2060
|
-
const metrics = [];
|
|
2061
|
-
// Query workflow metrics
|
|
2062
|
-
const workflowResult = await this.pool.query(`SELECT name, COUNT(workflow_uuid) as count
|
|
2063
|
-
FROM "${this.schemaName}".workflow_status
|
|
2064
|
-
WHERE created_at >= $1 AND created_at < $2
|
|
2065
|
-
GROUP BY name`, [startEpochMs, endEpochMs]);
|
|
2066
|
-
for (const row of workflowResult.rows) {
|
|
2067
|
-
metrics.push({
|
|
2068
|
-
metricType: 'workflow_count',
|
|
2069
|
-
metricName: row.name,
|
|
2070
|
-
value: Number(row.count),
|
|
2071
|
-
});
|
|
2072
|
-
}
|
|
2073
|
-
// Query step metrics
|
|
2074
|
-
const stepResult = await this.pool.query(`SELECT function_name, COUNT(*) as count
|
|
2075
|
-
FROM "${this.schemaName}".operation_outputs
|
|
2076
|
-
WHERE completed_at_epoch_ms >= $1 AND completed_at_epoch_ms < $2
|
|
2077
|
-
GROUP BY function_name`, [startEpochMs, endEpochMs]);
|
|
2078
|
-
for (const row of stepResult.rows) {
|
|
2079
|
-
metrics.push({
|
|
2080
|
-
metricType: 'step_count',
|
|
2081
|
-
metricName: row.function_name,
|
|
2082
|
-
value: Number(row.count),
|
|
2083
|
-
});
|
|
2084
|
-
}
|
|
2085
|
-
return metrics;
|
|
2086
|
-
}
|
|
2087
|
-
async checkPatch(workflowID, functionID, patchName, deprecated) {
|
|
2088
|
-
// Not doing a cancel check at this point.
|
|
2089
|
-
if (functionID === undefined)
|
|
2090
|
-
throw new TypeError('functionID must be defined');
|
|
2091
|
-
patchName = `DBOS.patch-${patchName}`;
|
|
2092
|
-
const { rows } = await this.pool.query(`SELECT function_name
|
|
2093
|
-
FROM "${this.schemaName}".operation_outputs
|
|
2094
|
-
WHERE workflow_uuid=$1 AND function_id=$2`, [workflowID, functionID]);
|
|
2095
|
-
if (deprecated) {
|
|
2096
|
-
// Deprecated does not write anything. We skip any existing matching patch marker if it matches
|
|
2097
|
-
if (rows.length === 0) {
|
|
2098
|
-
return { isPatched: true, hasEntry: false };
|
|
2099
|
-
}
|
|
2100
|
-
return { isPatched: true, hasEntry: rows[0].function_name === patchName };
|
|
2101
|
-
}
|
|
2102
|
-
// Nondeprecated - skip matching entry, unpatched if nonmatching entry,
|
|
2103
|
-
// If there is no entry, we insert one that indicates it is patched.
|
|
2104
|
-
if (rows.length !== 0) {
|
|
2105
|
-
if (rows[0].function_name === patchName) {
|
|
2106
|
-
return { isPatched: true, hasEntry: true };
|
|
2107
|
-
}
|
|
2108
|
-
return { isPatched: false, hasEntry: false };
|
|
2109
|
-
}
|
|
2110
|
-
// Insert a patchmarker
|
|
2111
|
-
const dn = Date.now();
|
|
2112
|
-
await this.pool.query(`INSERT INTO ${this.schemaName}.operation_outputs
|
|
2113
|
-
(workflow_uuid, function_id, output, error, function_name, child_workflow_id, started_at_epoch_ms, completed_at_epoch_ms)
|
|
2114
|
-
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
2115
|
-
ON CONFLICT DO NOTHING;`, [workflowID, functionID, null, null, patchName, null, dn, dn]);
|
|
2116
|
-
return { isPatched: true, hasEntry: true };
|
|
2117
|
-
}
|
|
2118
|
-
async runTransactionalStep(workflowID, functionID, functionName, callback) {
|
|
2119
|
-
const client = await this.pool.connect();
|
|
2120
|
-
try {
|
|
2121
|
-
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
2122
|
-
const existing = await this.#getOperationResultAndThrowIfCancelled(client, workflowID, functionID);
|
|
2123
|
-
if (existing !== undefined) {
|
|
2124
|
-
await client.query('ROLLBACK');
|
|
2125
|
-
return existing;
|
|
2126
|
-
}
|
|
2127
|
-
const startTime = Date.now();
|
|
2128
|
-
const output = await callback(client);
|
|
2129
|
-
await recordOperationResult(client, workflowID, functionID, functionName, true, this.schemaName, startTime, Date.now(), {
|
|
2130
|
-
output,
|
|
1828
|
+
const startEpochMs = new Date(startTime).getTime();
|
|
1829
|
+
const endEpochMs = new Date(endTime).getTime();
|
|
1830
|
+
const metrics = [];
|
|
1831
|
+
// Query workflow metrics
|
|
1832
|
+
const workflowResult = await this.pool.query(`SELECT name, COUNT(workflow_uuid) as count
|
|
1833
|
+
FROM "${this.schemaName}".workflow_status
|
|
1834
|
+
WHERE created_at >= $1 AND created_at < $2
|
|
1835
|
+
GROUP BY name`, [startEpochMs, endEpochMs]);
|
|
1836
|
+
for (const row of workflowResult.rows) {
|
|
1837
|
+
metrics.push({
|
|
1838
|
+
metricType: 'workflow_count',
|
|
1839
|
+
metricName: row.name,
|
|
1840
|
+
value: Number(row.count),
|
|
2131
1841
|
});
|
|
2132
|
-
await client.query('COMMIT');
|
|
2133
|
-
return undefined;
|
|
2134
|
-
}
|
|
2135
|
-
catch (e) {
|
|
2136
|
-
await client.query('ROLLBACK');
|
|
2137
|
-
throw e;
|
|
2138
1842
|
}
|
|
2139
|
-
|
|
2140
|
-
|
|
1843
|
+
// Query step metrics
|
|
1844
|
+
const stepResult = await this.pool.query(`SELECT function_name, COUNT(*) as count
|
|
1845
|
+
FROM "${this.schemaName}".operation_outputs
|
|
1846
|
+
WHERE completed_at_epoch_ms >= $1 AND completed_at_epoch_ms < $2
|
|
1847
|
+
GROUP BY function_name`, [startEpochMs, endEpochMs]);
|
|
1848
|
+
for (const row of stepResult.rows) {
|
|
1849
|
+
metrics.push({
|
|
1850
|
+
metricType: 'step_count',
|
|
1851
|
+
metricName: row.function_name,
|
|
1852
|
+
value: Number(row.count),
|
|
1853
|
+
});
|
|
2141
1854
|
}
|
|
1855
|
+
return metrics;
|
|
2142
1856
|
}
|
|
2143
|
-
//
|
|
1857
|
+
// ==================== Scheduling ====================
|
|
2144
1858
|
async createSchedule(schedule, client) {
|
|
2145
1859
|
const q = client ?? this.pool;
|
|
2146
1860
|
try {
|
|
@@ -2255,144 +1969,483 @@ class PostgresSystemDatabase {
|
|
|
2255
1969
|
client.release();
|
|
2256
1970
|
}
|
|
2257
1971
|
}
|
|
1972
|
+
// ==================== Internal ====================
|
|
1973
|
+
async insertWorkflowStatus(client, initStatus, ownerXid, incrementAttempts = false) {
|
|
1974
|
+
try {
|
|
1975
|
+
const { rows } = await client.query(`INSERT INTO "${this.schemaName}".workflow_status (
|
|
1976
|
+
workflow_uuid,
|
|
1977
|
+
status,
|
|
1978
|
+
name,
|
|
1979
|
+
class_name,
|
|
1980
|
+
config_name,
|
|
1981
|
+
queue_name,
|
|
1982
|
+
authenticated_user,
|
|
1983
|
+
assumed_role,
|
|
1984
|
+
authenticated_roles,
|
|
1985
|
+
request,
|
|
1986
|
+
executor_id,
|
|
1987
|
+
application_version,
|
|
1988
|
+
application_id,
|
|
1989
|
+
created_at,
|
|
1990
|
+
recovery_attempts,
|
|
1991
|
+
updated_at,
|
|
1992
|
+
workflow_timeout_ms,
|
|
1993
|
+
workflow_deadline_epoch_ms,
|
|
1994
|
+
inputs,
|
|
1995
|
+
deduplication_id,
|
|
1996
|
+
priority,
|
|
1997
|
+
queue_partition_key,
|
|
1998
|
+
forked_from,
|
|
1999
|
+
parent_workflow_id,
|
|
2000
|
+
serialization,
|
|
2001
|
+
owner_xid
|
|
2002
|
+
) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $26, $27)
|
|
2003
|
+
ON CONFLICT (workflow_uuid)
|
|
2004
|
+
DO UPDATE SET
|
|
2005
|
+
recovery_attempts = CASE
|
|
2006
|
+
WHEN workflow_status.status != '${workflow_1.StatusString.ENQUEUED}'
|
|
2007
|
+
THEN workflow_status.recovery_attempts + $25
|
|
2008
|
+
ELSE workflow_status.recovery_attempts
|
|
2009
|
+
END,
|
|
2010
|
+
updated_at = EXCLUDED.updated_at,
|
|
2011
|
+
executor_id = CASE
|
|
2012
|
+
WHEN EXCLUDED.status != '${workflow_1.StatusString.ENQUEUED}'
|
|
2013
|
+
THEN EXCLUDED.executor_id
|
|
2014
|
+
ELSE workflow_status.executor_id
|
|
2015
|
+
END
|
|
2016
|
+
RETURNING recovery_attempts, status, name, class_name, config_name, queue_name, workflow_deadline_epoch_ms, executor_id, owner_xid, serialization`, [
|
|
2017
|
+
initStatus.workflowUUID,
|
|
2018
|
+
initStatus.status,
|
|
2019
|
+
initStatus.workflowName,
|
|
2020
|
+
// For cross-language compatibility, these variables MUST be NULL in the database when not set
|
|
2021
|
+
initStatus.workflowClassName === '' ? null : initStatus.workflowClassName,
|
|
2022
|
+
initStatus.workflowConfigName === '' ? null : initStatus.workflowConfigName,
|
|
2023
|
+
initStatus.queueName ?? null,
|
|
2024
|
+
initStatus.authenticatedUser,
|
|
2025
|
+
initStatus.assumedRole,
|
|
2026
|
+
JSON.stringify(initStatus.authenticatedRoles),
|
|
2027
|
+
JSON.stringify(initStatus.request),
|
|
2028
|
+
initStatus.executorId,
|
|
2029
|
+
initStatus.applicationVersion ?? null,
|
|
2030
|
+
initStatus.applicationID,
|
|
2031
|
+
initStatus.createdAt,
|
|
2032
|
+
initStatus.status === workflow_1.StatusString.ENQUEUED ? 0 : 1,
|
|
2033
|
+
initStatus.updatedAt ?? Date.now(),
|
|
2034
|
+
initStatus.timeoutMS ?? null,
|
|
2035
|
+
initStatus.deadlineEpochMS ?? null,
|
|
2036
|
+
initStatus.input ?? null,
|
|
2037
|
+
initStatus.deduplicationID ?? null,
|
|
2038
|
+
initStatus.priority,
|
|
2039
|
+
initStatus.queuePartitionKey ?? null,
|
|
2040
|
+
initStatus.forkedFrom ?? null,
|
|
2041
|
+
initStatus.parentWorkflowID ?? null,
|
|
2042
|
+
(incrementAttempts ?? false) ? 1 : 0,
|
|
2043
|
+
initStatus.serialization,
|
|
2044
|
+
ownerXid,
|
|
2045
|
+
]);
|
|
2046
|
+
if (rows.length === 0) {
|
|
2047
|
+
throw new Error(`Attempt to insert workflow ${initStatus.workflowUUID} failed`);
|
|
2048
|
+
}
|
|
2049
|
+
const ret = rows[0];
|
|
2050
|
+
ret.class_name = ret.class_name ?? '';
|
|
2051
|
+
ret.config_name = ret.config_name ?? '';
|
|
2052
|
+
initStatus.serialization = ret.serialization;
|
|
2053
|
+
return ret;
|
|
2054
|
+
}
|
|
2055
|
+
catch (error) {
|
|
2056
|
+
const err = error;
|
|
2057
|
+
if (err.code === '23505') {
|
|
2058
|
+
throw new error_1.DBOSQueueDuplicatedError(initStatus.workflowUUID, initStatus.queueName ?? '', initStatus.deduplicationID ?? '');
|
|
2059
|
+
}
|
|
2060
|
+
throw error;
|
|
2061
|
+
}
|
|
2062
|
+
}
|
|
2063
|
+
async getWorkflowStatusValue(client, workflowID) {
|
|
2064
|
+
const { rows } = await client.query(`SELECT status FROM "${this.schemaName}".workflow_status WHERE workflow_uuid=$1`, [workflowID]);
|
|
2065
|
+
return rows.length === 0 ? undefined : rows[0].status;
|
|
2066
|
+
}
|
|
2067
|
+
async updateWorkflowStatus(client, workflowID, status, options = {}) {
|
|
2068
|
+
let setClause = `SET status=$2, updated_at=$3`;
|
|
2069
|
+
let whereClause = `WHERE workflow_uuid=$1`;
|
|
2070
|
+
const args = [workflowID, status, Date.now()];
|
|
2071
|
+
const update = options.update ?? {};
|
|
2072
|
+
if (update.output) {
|
|
2073
|
+
const param = args.push(update.output);
|
|
2074
|
+
setClause += `, output=$${param}`;
|
|
2075
|
+
}
|
|
2076
|
+
if (update.error) {
|
|
2077
|
+
const param = args.push(update.error);
|
|
2078
|
+
setClause += `, error=$${param}`;
|
|
2079
|
+
}
|
|
2080
|
+
if (update.resetRecoveryAttempts) {
|
|
2081
|
+
setClause += `, recovery_attempts = 0`;
|
|
2082
|
+
}
|
|
2083
|
+
if (update.resetDeadline) {
|
|
2084
|
+
setClause += `, workflow_deadline_epoch_ms = NULL`;
|
|
2085
|
+
}
|
|
2086
|
+
if (update.queueName !== undefined) {
|
|
2087
|
+
const param = args.push(update.queueName ?? undefined);
|
|
2088
|
+
setClause += `, queue_name=$${param}`;
|
|
2089
|
+
}
|
|
2090
|
+
if (update.resetDeduplicationID) {
|
|
2091
|
+
setClause += `, deduplication_id = NULL`;
|
|
2092
|
+
}
|
|
2093
|
+
if (update.resetStartedAtEpochMs) {
|
|
2094
|
+
setClause += `, started_at_epoch_ms = NULL`;
|
|
2095
|
+
}
|
|
2096
|
+
if (update.executorId !== undefined) {
|
|
2097
|
+
const param = args.push(update.executorId ?? undefined);
|
|
2098
|
+
setClause += `, executor_id=$${param}`;
|
|
2099
|
+
}
|
|
2100
|
+
if (update.resetNameTo !== undefined) {
|
|
2101
|
+
const param = args.push(update.resetNameTo ?? undefined);
|
|
2102
|
+
setClause += `, name=$${param}`;
|
|
2103
|
+
}
|
|
2104
|
+
const where = options.where ?? {};
|
|
2105
|
+
if (where.status) {
|
|
2106
|
+
const param = args.push(where.status);
|
|
2107
|
+
whereClause += ` AND status=$${param}`;
|
|
2108
|
+
}
|
|
2109
|
+
const result = await client.query(`UPDATE "${this.schemaName}".workflow_status ${setClause} ${whereClause}`, args);
|
|
2110
|
+
const throwOnFailure = options.throwOnFailure ?? true;
|
|
2111
|
+
if (throwOnFailure && result.rowCount !== 1) {
|
|
2112
|
+
throw new error_1.DBOSWorkflowConflictError(`Attempt to record transition of nonexistent workflow ${workflowID}`);
|
|
2113
|
+
}
|
|
2114
|
+
}
|
|
2115
|
+
async recordOperationResultInternal(client, workflowID, functionID, functionName, checkConflict, startTimeEpochMs, endTimeEpochMs, options = {}) {
|
|
2116
|
+
try {
|
|
2117
|
+
const out = await client.query(`INSERT INTO ${this.schemaName}.operation_outputs
|
|
2118
|
+
(workflow_uuid, function_id, output, error, function_name, child_workflow_id, started_at_epoch_ms, completed_at_epoch_ms, serialization)
|
|
2119
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
|
|
2120
|
+
ON CONFLICT DO NOTHING RETURNING completed_at_epoch_ms;`, [
|
|
2121
|
+
workflowID,
|
|
2122
|
+
functionID,
|
|
2123
|
+
options.output ?? null,
|
|
2124
|
+
options.error ?? null,
|
|
2125
|
+
functionName,
|
|
2126
|
+
options.childWorkflowID ?? null,
|
|
2127
|
+
startTimeEpochMs,
|
|
2128
|
+
endTimeEpochMs,
|
|
2129
|
+
options.serialization ?? null,
|
|
2130
|
+
]);
|
|
2131
|
+
if (checkConflict &&
|
|
2132
|
+
(out?.rowCount ?? 0) > 0 &&
|
|
2133
|
+
Number(out?.rows?.[0]?.completed_at_epoch_ms) !== endTimeEpochMs) {
|
|
2134
|
+
dbos_executor_1.DBOSExecutor.globalInstance?.logger.warn(`Step output for ${workflowID}(${functionID}):${functionName} already recorded`);
|
|
2135
|
+
throw new error_1.DBOSWorkflowConflictError(workflowID);
|
|
2136
|
+
}
|
|
2137
|
+
}
|
|
2138
|
+
catch (error) {
|
|
2139
|
+
const err = error;
|
|
2140
|
+
if (err.code === '40001' || err.code === '23505') {
|
|
2141
|
+
// Serialization and primary key conflict (Postgres).
|
|
2142
|
+
throw new error_1.DBOSWorkflowConflictError(workflowID);
|
|
2143
|
+
}
|
|
2144
|
+
else {
|
|
2145
|
+
throw err;
|
|
2146
|
+
}
|
|
2147
|
+
}
|
|
2148
|
+
}
|
|
2149
|
+
async #getOperationResultAndThrowIfCancelled(client, workflowID, functionID) {
|
|
2150
|
+
await this.#checkIfCanceled(client, workflowID);
|
|
2151
|
+
const { rows } = await client.query(`SELECT output, error, child_workflow_id, function_name
|
|
2152
|
+
FROM "${this.schemaName}".operation_outputs
|
|
2153
|
+
WHERE workflow_uuid=$1 AND function_id=$2`, [workflowID, functionID]);
|
|
2154
|
+
if (rows.length === 0) {
|
|
2155
|
+
return undefined;
|
|
2156
|
+
}
|
|
2157
|
+
else {
|
|
2158
|
+
return {
|
|
2159
|
+
output: rows[0].output,
|
|
2160
|
+
error: rows[0].error,
|
|
2161
|
+
childWorkflowID: rows[0].child_workflow_id,
|
|
2162
|
+
functionName: rows[0].function_name,
|
|
2163
|
+
};
|
|
2164
|
+
}
|
|
2165
|
+
}
|
|
2166
|
+
async #runAndRecordResult(client, functionName, workflowID, functionID, func) {
|
|
2167
|
+
const startTime = Date.now();
|
|
2168
|
+
const result = await this.#getOperationResultAndThrowIfCancelled(client, workflowID, functionID);
|
|
2169
|
+
if (result !== undefined) {
|
|
2170
|
+
if (result.functionName !== functionName) {
|
|
2171
|
+
throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, functionName, result.functionName);
|
|
2172
|
+
}
|
|
2173
|
+
return result.output;
|
|
2174
|
+
}
|
|
2175
|
+
const output = await func();
|
|
2176
|
+
await this.recordOperationResultInternal(client, workflowID, functionID, functionName, true, startTime, Date.now(), {
|
|
2177
|
+
output,
|
|
2178
|
+
});
|
|
2179
|
+
return output;
|
|
2180
|
+
}
|
|
2181
|
+
#setWFCancelMap(workflowID) {
|
|
2182
|
+
if (this.runningWorkflowMap.has(workflowID)) {
|
|
2183
|
+
this.workflowCancellationMap.set(workflowID, true);
|
|
2184
|
+
}
|
|
2185
|
+
this.cancelWakeupMap.callCallbacks(workflowID);
|
|
2186
|
+
}
|
|
2187
|
+
#clearWFCancelMap(workflowID) {
|
|
2188
|
+
if (this.workflowCancellationMap.has(workflowID)) {
|
|
2189
|
+
this.workflowCancellationMap.delete(workflowID);
|
|
2190
|
+
}
|
|
2191
|
+
}
|
|
2192
|
+
async #checkIfCanceled(client, workflowID) {
|
|
2193
|
+
if (this.workflowCancellationMap.get(workflowID) === true) {
|
|
2194
|
+
throw new error_1.DBOSWorkflowCancelledError(workflowID);
|
|
2195
|
+
}
|
|
2196
|
+
const statusValue = await this.getWorkflowStatusValue(client, workflowID);
|
|
2197
|
+
if (statusValue === workflow_1.StatusString.CANCELLED) {
|
|
2198
|
+
throw new error_1.DBOSWorkflowCancelledError(workflowID);
|
|
2199
|
+
}
|
|
2200
|
+
}
|
|
2201
|
+
async #durableSleep(workflowID, functionID, durationMS, maxSleepPerIteration) {
|
|
2202
|
+
if (maxSleepPerIteration === undefined)
|
|
2203
|
+
maxSleepPerIteration = durationMS;
|
|
2204
|
+
const curTime = Date.now();
|
|
2205
|
+
let endTimeMs = curTime + durationMS;
|
|
2206
|
+
const client = await this.pool.connect();
|
|
2207
|
+
try {
|
|
2208
|
+
const res = await this.#getOperationResultAndThrowIfCancelled(client, workflowID, functionID);
|
|
2209
|
+
if (res) {
|
|
2210
|
+
if (res.functionName !== exports.DBOS_FUNCNAME_SLEEP) {
|
|
2211
|
+
throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, exports.DBOS_FUNCNAME_SLEEP, res.functionName);
|
|
2212
|
+
}
|
|
2213
|
+
endTimeMs = JSON.parse(res.output);
|
|
2214
|
+
}
|
|
2215
|
+
else {
|
|
2216
|
+
await this.recordOperationResultInternal(client, workflowID, functionID, exports.DBOS_FUNCNAME_SLEEP, false, Date.now(), Date.now(), {
|
|
2217
|
+
output: serialization_1.DBOSPortableJSON.stringify(endTimeMs),
|
|
2218
|
+
serialization: serialization_1.DBOSPortableJSON.name(),
|
|
2219
|
+
});
|
|
2220
|
+
}
|
|
2221
|
+
return {
|
|
2222
|
+
...(0, utils_1.cancellableSleep)(Math.max(Math.min(maxSleepPerIteration, endTimeMs - curTime), 0)),
|
|
2223
|
+
endTime: endTimeMs,
|
|
2224
|
+
};
|
|
2225
|
+
}
|
|
2226
|
+
finally {
|
|
2227
|
+
client.release();
|
|
2228
|
+
}
|
|
2229
|
+
}
|
|
2230
|
+
/* BACKGROUND PROCESSES */
|
|
2231
|
+
/**
|
|
2232
|
+
* A background process that listens for notifications from Postgres then signals the appropriate
|
|
2233
|
+
* workflow listener by resolving its promise.
|
|
2234
|
+
*/
|
|
2235
|
+
reconnectTimeout = null;
|
|
2236
|
+
async #listenForNotifications() {
|
|
2237
|
+
const connect = async () => {
|
|
2238
|
+
const reconnect = () => {
|
|
2239
|
+
if (this.reconnectTimeout) {
|
|
2240
|
+
return;
|
|
2241
|
+
}
|
|
2242
|
+
this.reconnectTimeout = setTimeout(async () => {
|
|
2243
|
+
this.reconnectTimeout = null;
|
|
2244
|
+
await connect();
|
|
2245
|
+
}, 1000);
|
|
2246
|
+
};
|
|
2247
|
+
let client = null;
|
|
2248
|
+
try {
|
|
2249
|
+
client = await this.pool.connect();
|
|
2250
|
+
await client.query('LISTEN dbos_notifications_channel;');
|
|
2251
|
+
await client.query('LISTEN dbos_workflow_events_channel;');
|
|
2252
|
+
// Self-test: verify LISTEN actually works by sending a NOTIFY and checking it arrives.
|
|
2253
|
+
// If a transaction-mode pooler (e.g. PgBouncer pool_mode=transaction) is in the path,
|
|
2254
|
+
// LISTEN succeeds but the subscription is silently lost when the backend is released.
|
|
2255
|
+
let selfTestReceived = false;
|
|
2256
|
+
const onSelfTest = (msg) => {
|
|
2257
|
+
if (msg.channel === 'dbos_notifications_channel' && msg.payload === 'dbos_listen_selftest') {
|
|
2258
|
+
selfTestReceived = true;
|
|
2259
|
+
}
|
|
2260
|
+
};
|
|
2261
|
+
client.on('notification', onSelfTest);
|
|
2262
|
+
await this.pool.query("NOTIFY dbos_notifications_channel, 'dbos_listen_selftest'");
|
|
2263
|
+
for (let i = 0; i < 30 && !selfTestReceived; i++) {
|
|
2264
|
+
await new Promise((r) => setTimeout(r, 100));
|
|
2265
|
+
}
|
|
2266
|
+
client.removeListener('notification', onSelfTest);
|
|
2267
|
+
if (!selfTestReceived) {
|
|
2268
|
+
this.logger.warn('LISTEN/NOTIFY self-test failed: notification was not received within 3 seconds. ' +
|
|
2269
|
+
'This typically means the connection is going through a transaction-mode pooler ' +
|
|
2270
|
+
'(e.g. PgBouncer with pool_mode=transaction), which silently breaks LISTEN/NOTIFY. ' +
|
|
2271
|
+
'Workflow notifications will fall back to polling, which may increase latency.');
|
|
2272
|
+
}
|
|
2273
|
+
const handler = (msg) => {
|
|
2274
|
+
if (!this.shouldUseDBNotifications)
|
|
2275
|
+
return;
|
|
2276
|
+
if (msg.channel === 'dbos_notifications_channel' && msg.payload) {
|
|
2277
|
+
this.notificationsMap.callCallbacks(msg.payload);
|
|
2278
|
+
}
|
|
2279
|
+
else if (msg.channel === 'dbos_workflow_events_channel' && msg.payload) {
|
|
2280
|
+
this.workflowEventsMap.callCallbacks(msg.payload);
|
|
2281
|
+
}
|
|
2282
|
+
};
|
|
2283
|
+
client.on('notification', handler);
|
|
2284
|
+
client.on('error', (err) => {
|
|
2285
|
+
this.logger.warn(`Error in notifications client: ${err}`);
|
|
2286
|
+
if (client) {
|
|
2287
|
+
client.removeAllListeners();
|
|
2288
|
+
client.release(true);
|
|
2289
|
+
}
|
|
2290
|
+
reconnect();
|
|
2291
|
+
});
|
|
2292
|
+
this.notificationsClient = client;
|
|
2293
|
+
}
|
|
2294
|
+
catch (error) {
|
|
2295
|
+
this.logger.warn(`Error in notifications listener: ${String(error)}`);
|
|
2296
|
+
if (client) {
|
|
2297
|
+
client.removeAllListeners();
|
|
2298
|
+
client.release(true);
|
|
2299
|
+
}
|
|
2300
|
+
reconnect();
|
|
2301
|
+
}
|
|
2302
|
+
};
|
|
2303
|
+
await connect();
|
|
2304
|
+
}
|
|
2258
2305
|
}
|
|
2259
|
-
exports.
|
|
2306
|
+
exports.SystemDatabase = SystemDatabase;
|
|
2260
2307
|
__decorate([
|
|
2261
2308
|
dbRetry(),
|
|
2262
2309
|
__metadata("design:type", Function),
|
|
2263
2310
|
__metadata("design:paramtypes", [Object, Object, Object]),
|
|
2264
2311
|
__metadata("design:returntype", Promise)
|
|
2265
|
-
],
|
|
2312
|
+
], SystemDatabase.prototype, "initWorkflowStatus", null);
|
|
2266
2313
|
__decorate([
|
|
2267
2314
|
dbRetry(),
|
|
2268
2315
|
__metadata("design:type", Function),
|
|
2269
2316
|
__metadata("design:paramtypes", [String, Object]),
|
|
2270
2317
|
__metadata("design:returntype", Promise)
|
|
2271
|
-
],
|
|
2318
|
+
], SystemDatabase.prototype, "recordWorkflowOutput", null);
|
|
2272
2319
|
__decorate([
|
|
2273
2320
|
dbRetry(),
|
|
2274
2321
|
__metadata("design:type", Function),
|
|
2275
2322
|
__metadata("design:paramtypes", [String, Object]),
|
|
2276
2323
|
__metadata("design:returntype", Promise)
|
|
2277
|
-
],
|
|
2324
|
+
], SystemDatabase.prototype, "recordWorkflowError", null);
|
|
2278
2325
|
__decorate([
|
|
2279
2326
|
dbRetry(),
|
|
2280
2327
|
__metadata("design:type", Function),
|
|
2281
|
-
__metadata("design:paramtypes", [String, Number]),
|
|
2328
|
+
__metadata("design:paramtypes", [String, String, Number]),
|
|
2282
2329
|
__metadata("design:returntype", Promise)
|
|
2283
|
-
],
|
|
2330
|
+
], SystemDatabase.prototype, "getWorkflowStatus", null);
|
|
2284
2331
|
__decorate([
|
|
2285
2332
|
dbRetry(),
|
|
2286
2333
|
__metadata("design:type", Function),
|
|
2287
|
-
__metadata("design:paramtypes", [String, Number
|
|
2334
|
+
__metadata("design:paramtypes", [String, Number]),
|
|
2288
2335
|
__metadata("design:returntype", Promise)
|
|
2289
|
-
],
|
|
2336
|
+
], SystemDatabase.prototype, "getOperationResultAndThrowIfCancelled", null);
|
|
2290
2337
|
__decorate([
|
|
2291
2338
|
dbRetry(),
|
|
2292
2339
|
__metadata("design:type", Function),
|
|
2293
|
-
__metadata("design:paramtypes", [String, Number, Number]),
|
|
2340
|
+
__metadata("design:paramtypes", [String, Number, String, Boolean, Number, Object]),
|
|
2294
2341
|
__metadata("design:returntype", Promise)
|
|
2295
|
-
],
|
|
2342
|
+
], SystemDatabase.prototype, "recordOperationResult", null);
|
|
2296
2343
|
__decorate([
|
|
2297
2344
|
dbRetry(),
|
|
2298
2345
|
__metadata("design:type", Function),
|
|
2299
|
-
__metadata("design:paramtypes", [String, Number, String,
|
|
2346
|
+
__metadata("design:paramtypes", [String, Number, String, Boolean]),
|
|
2300
2347
|
__metadata("design:returntype", Promise)
|
|
2301
|
-
],
|
|
2348
|
+
], SystemDatabase.prototype, "checkPatch", null);
|
|
2302
2349
|
__decorate([
|
|
2303
2350
|
dbRetry(),
|
|
2304
2351
|
__metadata("design:type", Function),
|
|
2305
|
-
__metadata("design:paramtypes", [String
|
|
2352
|
+
__metadata("design:paramtypes", [String]),
|
|
2306
2353
|
__metadata("design:returntype", Promise)
|
|
2307
|
-
],
|
|
2354
|
+
], SystemDatabase.prototype, "checkIfCanceled", null);
|
|
2308
2355
|
__decorate([
|
|
2309
2356
|
dbRetry(),
|
|
2310
2357
|
__metadata("design:type", Function),
|
|
2311
|
-
__metadata("design:paramtypes", [String, Number, String,
|
|
2358
|
+
__metadata("design:paramtypes", [String, Number, String, Number]),
|
|
2312
2359
|
__metadata("design:returntype", Promise)
|
|
2313
|
-
],
|
|
2360
|
+
], SystemDatabase.prototype, "awaitWorkflowResult", null);
|
|
2314
2361
|
__decorate([
|
|
2315
2362
|
dbRetry(),
|
|
2316
2363
|
__metadata("design:type", Function),
|
|
2317
|
-
__metadata("design:paramtypes", [
|
|
2364
|
+
__metadata("design:paramtypes", [Array, String]),
|
|
2318
2365
|
__metadata("design:returntype", Promise)
|
|
2319
|
-
],
|
|
2366
|
+
], SystemDatabase.prototype, "awaitFirstWorkflowId", null);
|
|
2320
2367
|
__decorate([
|
|
2321
2368
|
dbRetry(),
|
|
2322
2369
|
__metadata("design:type", Function),
|
|
2323
|
-
__metadata("design:paramtypes", [String]),
|
|
2370
|
+
__metadata("design:paramtypes", [String, Number, Number]),
|
|
2324
2371
|
__metadata("design:returntype", Promise)
|
|
2325
|
-
],
|
|
2372
|
+
], SystemDatabase.prototype, "durableSleepms", null);
|
|
2326
2373
|
__decorate([
|
|
2327
2374
|
dbRetry(),
|
|
2328
2375
|
__metadata("design:type", Function),
|
|
2329
|
-
__metadata("design:paramtypes", [String, String,
|
|
2376
|
+
__metadata("design:paramtypes", [String, Number, String, Object, Object, Object, String]),
|
|
2330
2377
|
__metadata("design:returntype", Promise)
|
|
2331
|
-
],
|
|
2378
|
+
], SystemDatabase.prototype, "send", null);
|
|
2332
2379
|
__decorate([
|
|
2333
2380
|
dbRetry(),
|
|
2334
2381
|
__metadata("design:type", Function),
|
|
2335
|
-
__metadata("design:paramtypes", [String,
|
|
2382
|
+
__metadata("design:paramtypes", [String, Object, Object, Object, String]),
|
|
2336
2383
|
__metadata("design:returntype", Promise)
|
|
2337
|
-
],
|
|
2384
|
+
], SystemDatabase.prototype, "sendDirect", null);
|
|
2338
2385
|
__decorate([
|
|
2339
2386
|
dbRetry(),
|
|
2340
2387
|
__metadata("design:type", Function),
|
|
2341
|
-
__metadata("design:paramtypes", [
|
|
2388
|
+
__metadata("design:paramtypes", [String, Number, Number, String, Number]),
|
|
2342
2389
|
__metadata("design:returntype", Promise)
|
|
2343
|
-
],
|
|
2390
|
+
], SystemDatabase.prototype, "recv", null);
|
|
2344
2391
|
__decorate([
|
|
2345
2392
|
dbRetry(),
|
|
2346
2393
|
__metadata("design:type", Function),
|
|
2347
|
-
__metadata("design:paramtypes", [String, String,
|
|
2394
|
+
__metadata("design:paramtypes", [String, Number, String, Object, Object]),
|
|
2348
2395
|
__metadata("design:returntype", Promise)
|
|
2349
|
-
],
|
|
2396
|
+
], SystemDatabase.prototype, "setEvent", null);
|
|
2350
2397
|
__decorate([
|
|
2351
2398
|
dbRetry(),
|
|
2352
2399
|
__metadata("design:type", Function),
|
|
2353
|
-
__metadata("design:paramtypes", [Object]),
|
|
2400
|
+
__metadata("design:paramtypes", [String, String, Number, Object]),
|
|
2354
2401
|
__metadata("design:returntype", Promise)
|
|
2355
|
-
],
|
|
2402
|
+
], SystemDatabase.prototype, "getEvent", null);
|
|
2356
2403
|
__decorate([
|
|
2357
2404
|
dbRetry(),
|
|
2358
2405
|
__metadata("design:type", Function),
|
|
2359
|
-
__metadata("design:paramtypes", [String, String]),
|
|
2406
|
+
__metadata("design:paramtypes", [String, String, String]),
|
|
2360
2407
|
__metadata("design:returntype", Promise)
|
|
2361
|
-
],
|
|
2408
|
+
], SystemDatabase.prototype, "getEventDispatchState", null);
|
|
2362
2409
|
__decorate([
|
|
2363
2410
|
dbRetry(),
|
|
2364
2411
|
__metadata("design:type", Function),
|
|
2365
|
-
__metadata("design:paramtypes", [
|
|
2412
|
+
__metadata("design:paramtypes", [Object]),
|
|
2366
2413
|
__metadata("design:returntype", Promise)
|
|
2367
|
-
],
|
|
2414
|
+
], SystemDatabase.prototype, "upsertEventDispatchState", null);
|
|
2368
2415
|
__decorate([
|
|
2369
2416
|
dbRetry(),
|
|
2370
2417
|
__metadata("design:type", Function),
|
|
2371
2418
|
__metadata("design:paramtypes", [String, Number, String, String, Object]),
|
|
2372
2419
|
__metadata("design:returntype", Promise)
|
|
2373
|
-
],
|
|
2420
|
+
], SystemDatabase.prototype, "writeStreamFromStep", null);
|
|
2374
2421
|
__decorate([
|
|
2375
2422
|
dbRetry(),
|
|
2376
2423
|
__metadata("design:type", Function),
|
|
2377
2424
|
__metadata("design:paramtypes", [String, Number, String, String, Object, String]),
|
|
2378
2425
|
__metadata("design:returntype", Promise)
|
|
2379
|
-
],
|
|
2426
|
+
], SystemDatabase.prototype, "writeStreamFromWorkflow", null);
|
|
2380
2427
|
__decorate([
|
|
2381
2428
|
dbRetry(),
|
|
2382
2429
|
__metadata("design:type", Function),
|
|
2383
2430
|
__metadata("design:paramtypes", [String, String, Number]),
|
|
2384
2431
|
__metadata("design:returntype", Promise)
|
|
2385
|
-
],
|
|
2432
|
+
], SystemDatabase.prototype, "readStream", null);
|
|
2386
2433
|
__decorate([
|
|
2387
2434
|
dbRetry(),
|
|
2388
2435
|
__metadata("design:type", Function),
|
|
2389
2436
|
__metadata("design:paramtypes", [String, String]),
|
|
2390
2437
|
__metadata("design:returntype", Promise)
|
|
2391
|
-
],
|
|
2438
|
+
], SystemDatabase.prototype, "getDeduplicatedWorkflow", null);
|
|
2392
2439
|
__decorate([
|
|
2393
2440
|
dbRetry(),
|
|
2394
2441
|
__metadata("design:type", Function),
|
|
2395
|
-
__metadata("design:paramtypes", [String
|
|
2442
|
+
__metadata("design:paramtypes", [String]),
|
|
2443
|
+
__metadata("design:returntype", Promise)
|
|
2444
|
+
], SystemDatabase.prototype, "getQueuePartitions", null);
|
|
2445
|
+
__decorate([
|
|
2446
|
+
dbRetry(),
|
|
2447
|
+
__metadata("design:type", Function),
|
|
2448
|
+
__metadata("design:paramtypes", [String, String]),
|
|
2396
2449
|
__metadata("design:returntype", Promise)
|
|
2397
|
-
],
|
|
2450
|
+
], SystemDatabase.prototype, "getMetrics", null);
|
|
2398
2451
|
//# sourceMappingURL=system_database.js.map
|