@dbos-inc/dbos-sdk 2.9.2-preview → 2.9.17-preview

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. package/dist/src/client.d.ts +12 -1
  2. package/dist/src/client.d.ts.map +1 -1
  3. package/dist/src/client.js +31 -7
  4. package/dist/src/client.js.map +1 -1
  5. package/dist/src/conductor/conductor.d.ts.map +1 -1
  6. package/dist/src/conductor/conductor.js +26 -17
  7. package/dist/src/conductor/conductor.js.map +1 -1
  8. package/dist/src/conductor/protocol.d.ts +21 -5
  9. package/dist/src/conductor/protocol.d.ts.map +1 -1
  10. package/dist/src/conductor/protocol.js +25 -6
  11. package/dist/src/conductor/protocol.js.map +1 -1
  12. package/dist/src/dbos-executor.d.ts +26 -16
  13. package/dist/src/dbos-executor.d.ts.map +1 -1
  14. package/dist/src/dbos-executor.js +91 -111
  15. package/dist/src/dbos-executor.js.map +1 -1
  16. package/dist/src/dbos-runtime/cli.d.ts.map +1 -1
  17. package/dist/src/dbos-runtime/cli.js +89 -15
  18. package/dist/src/dbos-runtime/cli.js.map +1 -1
  19. package/dist/src/dbos-runtime/config.js +1 -1
  20. package/dist/src/dbos-runtime/config.js.map +1 -1
  21. package/dist/src/dbos-runtime/workflow_management.d.ts +13 -20
  22. package/dist/src/dbos-runtime/workflow_management.d.ts.map +1 -1
  23. package/dist/src/dbos-runtime/workflow_management.js +90 -120
  24. package/dist/src/dbos-runtime/workflow_management.js.map +1 -1
  25. package/dist/src/dbos.d.ts +42 -11
  26. package/dist/src/dbos.d.ts.map +1 -1
  27. package/dist/src/dbos.js +82 -31
  28. package/dist/src/dbos.js.map +1 -1
  29. package/dist/src/error.d.ts +18 -6
  30. package/dist/src/error.d.ts.map +1 -1
  31. package/dist/src/error.js +41 -16
  32. package/dist/src/error.js.map +1 -1
  33. package/dist/src/eventreceiver.d.ts +17 -8
  34. package/dist/src/eventreceiver.d.ts.map +1 -1
  35. package/dist/src/httpServer/handler.d.ts.map +1 -1
  36. package/dist/src/httpServer/handler.js +2 -1
  37. package/dist/src/httpServer/handler.js.map +1 -1
  38. package/dist/src/httpServer/middleware.js +2 -2
  39. package/dist/src/httpServer/middleware.js.map +1 -1
  40. package/dist/src/httpServer/server.d.ts +6 -0
  41. package/dist/src/httpServer/server.d.ts.map +1 -1
  42. package/dist/src/httpServer/server.js +71 -7
  43. package/dist/src/httpServer/server.js.map +1 -1
  44. package/dist/src/scheduler/scheduler.js +1 -1
  45. package/dist/src/scheduler/scheduler.js.map +1 -1
  46. package/dist/src/system_database.d.ts +79 -67
  47. package/dist/src/system_database.d.ts.map +1 -1
  48. package/dist/src/system_database.js +766 -398
  49. package/dist/src/system_database.js.map +1 -1
  50. package/dist/src/testing/testing_runtime.d.ts.map +1 -1
  51. package/dist/src/testing/testing_runtime.js +2 -1
  52. package/dist/src/testing/testing_runtime.js.map +1 -1
  53. package/dist/src/utils.d.ts +1 -0
  54. package/dist/src/utils.d.ts.map +1 -1
  55. package/dist/src/utils.js +3 -1
  56. package/dist/src/utils.js.map +1 -1
  57. package/dist/src/wfqueue.d.ts.map +1 -1
  58. package/dist/src/wfqueue.js +1 -2
  59. package/dist/src/wfqueue.js.map +1 -1
  60. package/dist/src/workflow.d.ts +24 -6
  61. package/dist/src/workflow.d.ts.map +1 -1
  62. package/dist/src/workflow.js +7 -38
  63. package/dist/src/workflow.js.map +1 -1
  64. package/dist/tsconfig.tsbuildinfo +1 -1
  65. package/package.json +1 -3
@@ -1,5 +1,4 @@
1
1
  "use strict";
2
- /* eslint-disable @typescript-eslint/no-explicit-any */
3
2
  var __importDefault = (this && this.__importDefault) || function (mod) {
4
3
  return (mod && mod.__esModule) ? mod : { "default": mod };
5
4
  };
@@ -12,6 +11,7 @@ const workflow_1 = require("./workflow");
12
11
  const utils_1 = require("./utils");
13
12
  const knex_1 = __importDefault(require("knex"));
14
13
  const path_1 = __importDefault(require("path"));
14
+ const crypto_1 = require("crypto");
15
15
  exports.DBOS_FUNCNAME_SEND = 'DBOS.send';
16
16
  exports.DBOS_FUNCNAME_RECV = 'DBOS.recv';
17
17
  exports.DBOS_FUNCNAME_SETEVENT = 'DBOS.setEvent';
@@ -40,6 +40,186 @@ async function migrateSystemDatabase(systemPoolConfig, logger) {
40
40
  }
41
41
  }
42
42
  exports.migrateSystemDatabase = migrateSystemDatabase;
43
+ class NotificationMap {
44
+ map = new Map();
45
+ curCK = 0;
46
+ registerCallback(key, cb) {
47
+ if (!this.map.has(key)) {
48
+ this.map.set(key, new Map());
49
+ }
50
+ const ck = this.curCK++;
51
+ this.map.get(key).set(ck, cb);
52
+ return { key, ck };
53
+ }
54
+ deregisterCallback(k) {
55
+ if (!this.map.has(k.key))
56
+ return;
57
+ const sm = this.map.get(k.key);
58
+ if (!sm.has(k.ck))
59
+ return;
60
+ sm.delete(k.ck);
61
+ if (sm.size === 0) {
62
+ this.map.delete(k.key);
63
+ }
64
+ }
65
+ callCallbacks(key, event) {
66
+ if (!this.map.has(key))
67
+ return;
68
+ const sm = this.map.get(key);
69
+ for (const cb of sm.values()) {
70
+ cb(event);
71
+ }
72
+ }
73
+ }
74
+ async function insertWorkflowStatus(client, initStatus) {
75
+ const { rows } = await client.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status (
76
+ workflow_uuid,
77
+ status,
78
+ name,
79
+ class_name,
80
+ config_name,
81
+ queue_name,
82
+ authenticated_user,
83
+ assumed_role,
84
+ authenticated_roles,
85
+ request,
86
+ executor_id,
87
+ application_version,
88
+ application_id,
89
+ created_at,
90
+ recovery_attempts,
91
+ updated_at
92
+ ) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
93
+ ON CONFLICT (workflow_uuid)
94
+ DO UPDATE SET
95
+ recovery_attempts = workflow_status.recovery_attempts + 1,
96
+ updated_at = EXCLUDED.updated_at,
97
+ executor_id = EXCLUDED.executor_id
98
+ RETURNING recovery_attempts, status, name, class_name, config_name, queue_name`, [
99
+ initStatus.workflowUUID,
100
+ initStatus.status,
101
+ initStatus.workflowName,
102
+ initStatus.workflowClassName,
103
+ initStatus.workflowConfigName,
104
+ initStatus.queueName ?? null,
105
+ initStatus.authenticatedUser,
106
+ initStatus.assumedRole,
107
+ JSON.stringify(initStatus.authenticatedRoles),
108
+ JSON.stringify(initStatus.request),
109
+ initStatus.executorId,
110
+ initStatus.applicationVersion ?? null,
111
+ initStatus.applicationID,
112
+ initStatus.createdAt,
113
+ initStatus.status === workflow_1.StatusString.ENQUEUED ? 0 : 1,
114
+ initStatus.updatedAt ?? Date.now(),
115
+ ]);
116
+ if (rows.length === 0) {
117
+ throw new Error(`Attempt to insert workflow ${initStatus.workflowUUID} failed`);
118
+ }
119
+ return rows[0];
120
+ }
121
+ async function insertWorkflowInputs(client, workflowID, serializedInputs) {
122
+ const { rows } = await client.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_inputs
123
+ (workflow_uuid, inputs) VALUES($1, $2)
124
+ ON CONFLICT (workflow_uuid) DO UPDATE SET workflow_uuid = excluded.workflow_uuid
125
+ RETURNING inputs`, [workflowID, serializedInputs]);
126
+ if (rows.length === 0) {
127
+ throw new Error(`Attempt to insert workflow ${workflowID} inputs failed`);
128
+ }
129
+ return rows[0].inputs;
130
+ }
131
+ async function enqueueWorkflow(client, workflowID, queueName) {
132
+ await client.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue (workflow_uuid, queue_name) VALUES ($1, $2)
133
+ ON CONFLICT (workflow_uuid) DO NOTHING;`, [workflowID, queueName]);
134
+ }
135
+ async function deleteQueuedWorkflows(client, workflowID) {
136
+ await client.query(`DELETE FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue WHERE workflow_uuid = $1`, [
137
+ workflowID,
138
+ ]);
139
+ }
140
+ async function getWorkflowStatusValue(client, workflowID) {
141
+ const { rows } = await client.query(`SELECT status FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status WHERE workflow_uuid=$1`, [workflowID]);
142
+ return rows.length === 0 ? undefined : rows[0].status;
143
+ }
144
+ async function updateWorkflowStatus(client, workflowID, status, options = {}) {
145
+ let setClause = `SET status=$2, updated_at=$3`;
146
+ let whereClause = `WHERE workflow_uuid=$1`;
147
+ const args = [workflowID, status, Date.now()];
148
+ const update = options.update ?? {};
149
+ if (update.output) {
150
+ const param = args.push(update.output);
151
+ setClause += `, output=$${param}`;
152
+ }
153
+ if (update.error) {
154
+ const param = args.push(update.error);
155
+ setClause += `, error=$${param}`;
156
+ }
157
+ if (update.resetRecoveryAttempts) {
158
+ setClause += `, recovery_attempts = 0`;
159
+ }
160
+ if (update.queueName) {
161
+ const param = args.push(update.queueName);
162
+ setClause += `, queue_name=$${param}`;
163
+ }
164
+ const where = options.where ?? {};
165
+ if (where.status) {
166
+ const param = args.push(where.status);
167
+ whereClause += ` AND status=$${param}`;
168
+ }
169
+ const result = await client.query(`UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status ${setClause} ${whereClause}`, args);
170
+ const throwOnFailure = options.throwOnFailure ?? true;
171
+ if (throwOnFailure && result.rowCount !== 1) {
172
+ throw new error_1.DBOSWorkflowConflictError(`Attempt to record transition of nonexistent workflow ${workflowID}`);
173
+ }
174
+ }
175
+ async function recordOperationResult(client, workflowID, functionID, functionName, checkConflict, options = {}) {
176
+ try {
177
+ await client.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs
178
+ (workflow_uuid, function_id, output, error, function_name, child_workflow_id)
179
+ VALUES ($1, $2, $3, $4, $5, $6)
180
+ ${checkConflict ? '' : ' ON CONFLICT DO NOTHING'};`, [
181
+ workflowID,
182
+ functionID,
183
+ options.output ?? null,
184
+ options.error ?? null,
185
+ functionName,
186
+ options.childWorkflowID ?? null,
187
+ ]);
188
+ }
189
+ catch (error) {
190
+ const err = error;
191
+ if (err.code === '40001' || err.code === '23505') {
192
+ // Serialization and primary key conflict (Postgres).
193
+ throw new error_1.DBOSWorkflowConflictError(workflowID);
194
+ }
195
+ else {
196
+ throw err;
197
+ }
198
+ }
199
+ }
200
+ function mapWorkflowStatus(row) {
201
+ return {
202
+ workflowUUID: row.workflow_uuid,
203
+ status: row.status,
204
+ workflowName: row.name,
205
+ output: row.output ? row.output : null,
206
+ error: row.error ? row.error : null,
207
+ workflowClassName: row.class_name ?? '',
208
+ workflowConfigName: row.config_name ?? '',
209
+ queueName: row.queue_name,
210
+ authenticatedUser: row.authenticated_user,
211
+ assumedRole: row.assumed_role,
212
+ authenticatedRoles: JSON.parse(row.authenticated_roles),
213
+ request: JSON.parse(row.request),
214
+ executorId: row.executor_id,
215
+ createdAt: Number(row.created_at),
216
+ updatedAt: Number(row.updated_at),
217
+ applicationVersion: row.application_version,
218
+ applicationID: row.application_id,
219
+ recoveryAttempts: Number(row.recovery_attempts),
220
+ input: row.inputs,
221
+ };
222
+ }
43
223
  class PostgresSystemDatabase {
44
224
  pgPoolConfig;
45
225
  systemDatabaseName;
@@ -47,10 +227,35 @@ class PostgresSystemDatabase {
47
227
  sysDbPoolSize;
48
228
  pool;
49
229
  systemPoolConfig;
230
+ // TODO: remove Knex connection in favor of just using Pool
50
231
  knexDB;
232
+ /*
233
+ * Generally, notifications are asynchronous. One should:
234
+ * Subscribe to updates
235
+ * Read the database item in question
236
+ * In response to updates, re-read the database item
237
+ * Unsubscribe at the end
238
+ * The notification mechanism is reliable in the sense that it will eventually deliver updates
239
+ * or the DB connection will get dropped. The right thing to do if you lose connectivity to
240
+ * the system DB is to exit the process and go through recovery... system DB writes, notifications,
241
+ * etc may not have completed correctly, and recovery is the way to rebuild in-memory state.
242
+ *
243
+ * NOTE:
244
+ * PG Notifications are not fully reliable.
245
+ * Dropped connections are recoverable - you just need to restart and scan everything.
246
+ * (The whole VM being the logical choice, so workflows can recover from any write failures.)
247
+ * The real problem is, if the pipes out of the server are full... then notifications can be
248
+ * dropped, and only the PG server log may note it. For those reasons, we do occasional polling
249
+ */
51
250
  notificationsClient = null;
52
- notificationsMap = {};
53
- workflowEventsMap = {};
251
+ dbPollingIntervalResultMs = 1000;
252
+ dbPollingIntervalEventMs = 10000;
253
+ shouldUseDBNotifications = true;
254
+ notificationsMap = new NotificationMap();
255
+ workflowEventsMap = new NotificationMap();
256
+ cancelWakeupMap = new NotificationMap();
257
+ runningWorkflowMap = new Map(); // Map from workflowID to workflow promise
258
+ workflowCancellationMap = new Map(); // Map from workflowID to its cancellation status.
54
259
  constructor(pgPoolConfig, systemDatabaseName, logger, sysDbPoolSize) {
55
260
  this.pgPoolConfig = pgPoolConfig;
56
261
  this.systemDatabaseName = systemDatabaseName;
@@ -100,7 +305,9 @@ class PostgresSystemDatabase {
100
305
  finally {
101
306
  await pgSystemClient.end();
102
307
  }
103
- await this.listenForNotifications();
308
+ if (this.shouldUseDBNotifications) {
309
+ await this.#listenForNotifications();
310
+ }
104
311
  }
105
312
  async destroy() {
106
313
  await this.knexDB.destroy();
@@ -123,203 +330,244 @@ class PostgresSystemDatabase {
123
330
  await pgSystemClient.query(`DROP DATABASE IF EXISTS ${dbosConfig.system_database};`);
124
331
  await pgSystemClient.end();
125
332
  }
126
- async initWorkflowStatus(initStatus, args) {
127
- const result = await this.pool.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status (
128
- workflow_uuid,
129
- status,
130
- name,
131
- class_name,
132
- config_name,
133
- queue_name,
134
- authenticated_user,
135
- assumed_role,
136
- authenticated_roles,
137
- request,
138
- output,
139
- executor_id,
140
- application_version,
141
- application_id,
142
- created_at,
143
- recovery_attempts,
144
- updated_at
145
- ) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17)
146
- ON CONFLICT (workflow_uuid)
147
- DO UPDATE SET
148
- recovery_attempts = workflow_status.recovery_attempts + 1,
149
- updated_at = EXCLUDED.updated_at,
150
- executor_id = EXCLUDED.executor_id
151
- RETURNING recovery_attempts, status, name, class_name, config_name, queue_name`, [
152
- initStatus.workflowUUID,
153
- initStatus.status,
154
- initStatus.workflowName,
155
- initStatus.workflowClassName,
156
- initStatus.workflowConfigName,
157
- initStatus.queueName,
158
- initStatus.authenticatedUser,
159
- initStatus.assumedRole,
160
- utils_1.DBOSJSON.stringify(initStatus.authenticatedRoles),
161
- utils_1.DBOSJSON.stringify(initStatus.request),
162
- null,
163
- initStatus.executorId,
164
- initStatus.applicationVersion,
165
- initStatus.applicationID,
166
- initStatus.createdAt,
167
- initStatus.status === workflow_1.StatusString.ENQUEUED ? 0 : 1,
168
- Date.now(),
169
- ]);
170
- // Check the started workflow matches the expected name, class_name, config_name, and queue_name
171
- // A mismatch indicates a workflow starting with the same UUID but different functions, which should not be allowed.
172
- const resRow = result.rows[0];
173
- initStatus.workflowConfigName = initStatus.workflowConfigName || '';
174
- resRow.config_name = resRow.config_name || '';
175
- resRow.queue_name = resRow.queue_name === null ? undefined : resRow.queue_name; // Convert null in SQL to undefined
176
- let msg = '';
177
- if (resRow.name !== initStatus.workflowName) {
178
- msg = `Workflow already exists with a different function name: ${resRow.name}, but the provided function name is: ${initStatus.workflowName}`;
179
- }
180
- else if (resRow.class_name !== initStatus.workflowClassName) {
181
- msg = `Workflow already exists with a different class name: ${resRow.class_name}, but the provided class name is: ${initStatus.workflowClassName}`;
182
- }
183
- else if (resRow.config_name !== initStatus.workflowConfigName) {
184
- msg = `Workflow already exists with a different class configuration: ${resRow.config_name}, but the provided class configuration is: ${initStatus.workflowConfigName}`;
185
- }
186
- else if (resRow.queue_name !== initStatus.queueName) {
187
- // This is a warning because a different queue name is not necessarily an error.
188
- this.logger.warn(`Workflow (${initStatus.workflowUUID}) already exists in queue: ${resRow.queue_name}, but the provided queue name is: ${initStatus.queueName}. The queue is not updated. ${new Error().stack}`);
189
- }
190
- if (msg !== '') {
191
- throw new error_1.DBOSConflictingWorkflowError(initStatus.workflowUUID, msg);
192
- }
193
- // recovery_attempt means "attempts" (we kept the name for backward compatibility). It's default value is 1.
194
- // Every time we init the status, we increment `recovery_attempts` by 1.
195
- // Thus, when this number becomes equal to `maxRetries + 1`, we should mark the workflow as `RETRIES_EXCEEDED`.
196
- const attempts = resRow.recovery_attempts;
197
- if (attempts > initStatus.maxRetries + 1) {
198
- await this.pool.query(`UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status SET status=$1 WHERE workflow_uuid=$2 AND status=$3`, [workflow_1.StatusString.RETRIES_EXCEEDED, initStatus.workflowUUID, workflow_1.StatusString.PENDING]);
199
- throw new error_1.DBOSDeadLetterQueueError(initStatus.workflowUUID, initStatus.maxRetries);
200
- }
201
- this.logger.debug(`Workflow ${initStatus.workflowUUID} attempt number: ${attempts}.`);
202
- const status = resRow.status;
203
- const serializedInputs = utils_1.DBOSJSON.stringify(args);
204
- const { rows } = await this.pool.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_inputs (workflow_uuid, inputs) VALUES($1, $2) ON CONFLICT (workflow_uuid) DO UPDATE SET workflow_uuid = excluded.workflow_uuid RETURNING inputs`, [initStatus.workflowUUID, serializedInputs]);
205
- if (serializedInputs !== rows[0].inputs) {
206
- this.logger.warn(`Workflow inputs for ${initStatus.workflowUUID} changed since the first call! Use the original inputs.`);
207
- }
208
- return { args: utils_1.DBOSJSON.parse(rows[0].inputs), status };
209
- }
210
- async recordWorkflowStatusChange(workflowID, status, update, client) {
211
- let rec = '';
212
- if (update.resetRecoveryAttempts) {
213
- rec = ' recovery_attempts = 0, ';
214
- }
215
- if (update.incrementRecoveryAttempts) {
216
- rec = ' recovery_attempts = recovery_attempts + 1';
217
- }
218
- const wRes = await (client ?? this.pool).query(`UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status
219
- SET ${rec} status=$2, output=$3, error=$4, updated_at=$5 WHERE workflow_uuid=$1`, [workflowID, status, update.output, update.error, Date.now()]);
220
- if (wRes.rowCount !== 1) {
221
- throw new error_1.DBOSWorkflowConflictUUIDError(`Attempt to record transition of nonexistent workflow ${workflowID}`);
333
+ async initWorkflowStatus(initStatus, serializedInputs, maxRetries) {
334
+ const client = await this.pool.connect();
335
+ try {
336
+ await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
337
+ const resRow = await insertWorkflowStatus(client, initStatus);
338
+ if (resRow.name !== initStatus.workflowName) {
339
+ const msg = `Workflow already exists with a different function name: ${resRow.name}, but the provided function name is: ${initStatus.workflowName}`;
340
+ throw new error_1.DBOSConflictingWorkflowError(initStatus.workflowUUID, msg);
341
+ }
342
+ else if (resRow.class_name !== initStatus.workflowClassName) {
343
+ const msg = `Workflow already exists with a different class name: ${resRow.class_name}, but the provided class name is: ${initStatus.workflowClassName}`;
344
+ throw new error_1.DBOSConflictingWorkflowError(initStatus.workflowUUID, msg);
345
+ }
346
+ else if ((resRow.config_name || '') !== (initStatus.workflowConfigName || '')) {
347
+ const msg = `Workflow already exists with a different class configuration: ${resRow.config_name}, but the provided class configuration is: ${initStatus.workflowConfigName}`;
348
+ throw new error_1.DBOSConflictingWorkflowError(initStatus.workflowUUID, msg);
349
+ }
350
+ else if ((resRow.queue_name ?? undefined) !== initStatus.queueName) {
351
+ // This is a warning because a different queue name is not necessarily an error.
352
+ this.logger.warn(`Workflow (${initStatus.workflowUUID}) already exists in queue: ${resRow.queue_name}, but the provided queue name is: ${initStatus.queueName}. The queue is not updated. ${new Error().stack}`);
353
+ }
354
+ // recovery_attempt means "attempts" (we kept the name for backward compatibility). It's default value is 1.
355
+ // Every time we init the status, we increment `recovery_attempts` by 1.
356
+ // Thus, when this number becomes equal to `maxRetries + 1`, we should mark the workflow as `RETRIES_EXCEEDED`.
357
+ const attempts = resRow.recovery_attempts;
358
+ if (maxRetries && attempts > maxRetries + 1) {
359
+ await updateWorkflowStatus(client, initStatus.workflowUUID, workflow_1.StatusString.RETRIES_EXCEEDED, {
360
+ where: { status: workflow_1.StatusString.PENDING },
361
+ throwOnFailure: false,
362
+ });
363
+ throw new error_1.DBOSDeadLetterQueueError(initStatus.workflowUUID, maxRetries);
364
+ }
365
+ this.logger.debug(`Workflow ${initStatus.workflowUUID} attempt number: ${attempts}.`);
366
+ const status = resRow.status;
367
+ const inputResult = await insertWorkflowInputs(client, initStatus.workflowUUID, serializedInputs);
368
+ if (serializedInputs !== inputResult) {
369
+ this.logger.warn(`Workflow inputs for ${initStatus.workflowUUID} changed since the first call! Use the original inputs.`);
370
+ }
371
+ return { serializedInputs: inputResult, status };
372
+ }
373
+ finally {
374
+ await client.query('COMMIT');
375
+ client.release();
222
376
  }
223
377
  }
224
378
  async recordWorkflowOutput(workflowID, status) {
225
- await this.recordWorkflowStatusChange(workflowID, workflow_1.StatusString.SUCCESS, { output: status.output });
379
+ const client = await this.pool.connect();
380
+ try {
381
+ await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.SUCCESS, { update: { output: status.output } });
382
+ }
383
+ finally {
384
+ client.release();
385
+ }
226
386
  }
227
387
  async recordWorkflowError(workflowID, status) {
228
- await this.recordWorkflowStatusChange(workflowID, workflow_1.StatusString.ERROR, { error: status.error });
388
+ const client = await this.pool.connect();
389
+ try {
390
+ await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ERROR, { update: { error: status.error } });
391
+ }
392
+ finally {
393
+ client.release();
394
+ }
229
395
  }
230
396
  async getPendingWorkflows(executorID, appVersion) {
231
- const getWorkflows = await this.pool.query(`SELECT workflow_uuid, queue_name FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status WHERE status=$1 AND executor_id=$2 AND application_version=$3`, [workflow_1.StatusString.PENDING, executorID, appVersion]);
397
+ const getWorkflows = await this.pool.query(`SELECT workflow_uuid, queue_name
398
+ FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status
399
+ WHERE status=$1 AND executor_id=$2 AND application_version=$3`, [workflow_1.StatusString.PENDING, executorID, appVersion]);
232
400
  return getWorkflows.rows.map((i) => ({
233
401
  workflowUUID: i.workflow_uuid,
234
402
  queueName: i.queue_name,
235
403
  }));
236
404
  }
237
405
  async getWorkflowInputs(workflowID) {
238
- const { rows } = await this.pool.query(`SELECT inputs FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_inputs WHERE workflow_uuid=$1`, [workflowID]);
406
+ const { rows } = await this.pool.query(`SELECT inputs FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_inputs
407
+ WHERE workflow_uuid=$1`, [workflowID]);
239
408
  if (rows.length === 0) {
240
409
  return null;
241
410
  }
242
- return utils_1.DBOSJSON.parse(rows[0].inputs);
411
+ return rows[0].inputs;
243
412
  }
244
- async getOperationResult(workflowID, functionID, client) {
245
- const { rows } = await (client ?? this.pool).query(`SELECT output, error, child_workflow_id, function_name
413
+ async #getOperationResultAndThrowIfCancelled(client, workflowID, functionID) {
414
+ await this.#checkIfCanceled(client, workflowID);
415
+ const { rows } = await client.query(`SELECT output, error, child_workflow_id, function_name
246
416
  FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs
247
417
  WHERE workflow_uuid=$1 AND function_id=$2`, [workflowID, functionID]);
248
418
  if (rows.length === 0) {
249
- return {};
419
+ return undefined;
250
420
  }
251
421
  else {
252
422
  return {
253
- res: {
254
- res: rows[0].output,
255
- err: rows[0].error,
256
- child: rows[0].child_workflow_id,
257
- functionName: rows[0].function_name,
258
- },
423
+ output: rows[0].output,
424
+ error: rows[0].error,
425
+ childWorkflowID: rows[0].child_workflow_id,
426
+ functionName: rows[0].function_name,
259
427
  };
260
428
  }
261
429
  }
430
+ async getOperationResultAndThrowIfCancelled(workflowID, functionID) {
431
+ const client = await this.pool.connect();
432
+ try {
433
+ return await this.#getOperationResultAndThrowIfCancelled(client, workflowID, functionID);
434
+ }
435
+ finally {
436
+ client.release();
437
+ }
438
+ }
262
439
  async getAllOperationResults(workflowID) {
263
440
  const { rows } = await this.pool.query(`SELECT * FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs WHERE workflow_uuid=$1`, [workflowID]);
264
441
  return rows;
265
442
  }
266
- async recordOperationResult(workflowID, functionID, rec, checkConflict, client) {
443
+ async recordOperationResult(workflowID, functionID, functionName, checkConflict, options = {}) {
444
+ const client = await this.pool.connect();
267
445
  try {
268
- await (client ?? this.pool).query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs
269
- (workflow_uuid, function_id, output, error, function_name, child_workflow_id)
270
- VALUES ($1, $2, $3, $4, $5, $6)
271
- ${checkConflict ? '' : ' ON CONFLICT DO NOTHING'}
272
- ;`, [
273
- workflowID,
274
- functionID,
275
- rec.serialOutput ?? null,
276
- rec.serialError ?? null,
277
- rec.functionName,
278
- rec.childWfId ?? null,
279
- ]);
446
+ await recordOperationResult(client, workflowID, functionID, functionName, checkConflict, options);
280
447
  }
281
- catch (error) {
282
- const err = error;
283
- if (err.code === '40001' || err.code === '23505') {
284
- // Serialization and primary key conflict (Postgres).
285
- throw new error_1.DBOSWorkflowConflictUUIDError(workflowID);
448
+ finally {
449
+ client.release();
450
+ }
451
+ }
452
+ async getMaxFunctionID(workflowID) {
453
+ const { rows } = await this.pool.query(`SELECT max(function_id) as max_function_id FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs WHERE workflow_uuid=$1`, [workflowID]);
454
+ return rows.length === 0 ? 0 : rows[0].max_function_id;
455
+ }
456
+ async forkWorkflow(workflowID, startStep, options = {}) {
457
+ const newWorkflowID = options.newWorkflowID ?? (0, crypto_1.randomUUID)();
458
+ const workflowStatus = await this.getWorkflowStatus(workflowID);
459
+ if (workflowStatus === null) {
460
+ throw new error_1.DBOSNonExistentWorkflowError(`Workflow ${workflowID} does not exist`);
461
+ }
462
+ if (!workflowStatus.input) {
463
+ throw new error_1.DBOSNonExistentWorkflowError(`Workflow ${workflowID} has no input`);
464
+ }
465
+ const client = await this.pool.connect();
466
+ try {
467
+ await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
468
+ const now = Date.now();
469
+ await insertWorkflowStatus(client, {
470
+ workflowUUID: newWorkflowID,
471
+ status: workflow_1.StatusString.ENQUEUED,
472
+ workflowName: workflowStatus.workflowName,
473
+ workflowClassName: workflowStatus.workflowClassName,
474
+ workflowConfigName: workflowStatus.workflowConfigName,
475
+ queueName: utils_1.INTERNAL_QUEUE_NAME,
476
+ authenticatedUser: workflowStatus.authenticatedUser,
477
+ assumedRole: workflowStatus.assumedRole,
478
+ authenticatedRoles: workflowStatus.authenticatedRoles,
479
+ output: null,
480
+ error: null,
481
+ request: workflowStatus.request,
482
+ executorId: utils_1.globalParams.executorID,
483
+ applicationVersion: options.applicationVersion ?? workflowStatus.applicationVersion,
484
+ applicationID: workflowStatus.applicationID,
485
+ createdAt: now,
486
+ recoveryAttempts: 0,
487
+ updatedAt: now,
488
+ });
489
+ await insertWorkflowInputs(client, newWorkflowID, workflowStatus.input);
490
+ if (startStep > 0) {
491
+ const query = `INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs
492
+ (workflow_uuid, function_id, output, error, function_name, child_workflow_id )
493
+ SELECT $1 AS workflow_uuid, function_id, output, error, function_name, child_workflow_id
494
+ FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs
495
+ WHERE workflow_uuid = $2 AND function_id < $3`;
496
+ await client.query(query, [newWorkflowID, workflowID, startStep]);
286
497
  }
287
- else {
288
- throw err;
498
+ await enqueueWorkflow(client, newWorkflowID, utils_1.INTERNAL_QUEUE_NAME);
499
+ await client.query('COMMIT');
500
+ return newWorkflowID;
501
+ }
502
+ catch (error) {
503
+ await client.query('ROLLBACK');
504
+ throw error;
505
+ }
506
+ finally {
507
+ client.release();
508
+ }
509
+ }
510
+ async #runAndRecordResult(client, functionName, workflowID, functionID, func) {
511
+ const result = await this.#getOperationResultAndThrowIfCancelled(client, workflowID, functionID);
512
+ if (result !== undefined) {
513
+ if (result.functionName !== functionName) {
514
+ throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, functionName, result.functionName);
289
515
  }
516
+ return result.output;
290
517
  }
518
+ const output = await func();
519
+ await recordOperationResult(client, workflowID, functionID, functionName, true, { output });
520
+ return output;
291
521
  }
292
- async runAsStep(callback, functionName, workflowID, functionID, client) {
293
- if (workflowID !== undefined && functionID !== undefined) {
294
- const res = await this.getOperationResult(workflowID, functionID, client);
295
- if (res.res !== undefined) {
296
- if (res.res.functionName !== functionName) {
297
- throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, functionName, res.res.functionName);
298
- }
299
- await client?.query('ROLLBACK');
300
- return res.res.res;
522
+ async durableSleepms(workflowID, functionID, durationMS) {
523
+ let resolveNotification;
524
+ const cancelPromise = new Promise((resolve) => {
525
+ resolveNotification = resolve;
526
+ });
527
+ const cbr = this.cancelWakeupMap.registerCallback(workflowID, resolveNotification);
528
+ try {
529
+ let timeoutPromise = Promise.resolve();
530
+ const { promise, cancel: timeoutCancel } = await this.#durableSleep(workflowID, functionID, durationMS);
531
+ timeoutPromise = promise;
532
+ try {
533
+ await Promise.race([cancelPromise, timeoutPromise]);
534
+ }
535
+ finally {
536
+ timeoutCancel();
301
537
  }
302
538
  }
303
- const serialOutput = await callback();
304
- if (workflowID !== undefined && functionID !== undefined) {
305
- await this.recordOperationResult(workflowID, functionID, { serialOutput, functionName }, true, client);
539
+ finally {
540
+ this.cancelWakeupMap.deregisterCallback(cbr);
306
541
  }
307
- return serialOutput;
542
+ await this.checkIfCanceled(workflowID);
308
543
  }
309
- async durableSleepms(workflowID, functionID, durationMS) {
544
+ async #durableSleep(workflowID, functionID, durationMS, maxSleepPerIteration) {
545
+ if (maxSleepPerIteration === undefined)
546
+ maxSleepPerIteration = durationMS;
310
547
  const curTime = Date.now();
311
548
  let endTimeMs = curTime + durationMS;
312
- const res = await this.getOperationResult(workflowID, functionID);
313
- if (res.res !== undefined) {
314
- if (res.res.functionName !== exports.DBOS_FUNCNAME_SLEEP) {
315
- throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, exports.DBOS_FUNCNAME_SLEEP, res.res.functionName);
549
+ const client = await this.pool.connect();
550
+ try {
551
+ const res = await this.#getOperationResultAndThrowIfCancelled(client, workflowID, functionID);
552
+ if (res) {
553
+ if (res.functionName !== exports.DBOS_FUNCNAME_SLEEP) {
554
+ throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, exports.DBOS_FUNCNAME_SLEEP, res.functionName);
555
+ }
556
+ endTimeMs = JSON.parse(res.output);
557
+ }
558
+ else {
559
+ await recordOperationResult(client, workflowID, functionID, exports.DBOS_FUNCNAME_SLEEP, false, {
560
+ output: JSON.stringify(endTimeMs),
561
+ });
316
562
  }
317
- endTimeMs = JSON.parse(res.res.res);
563
+ return {
564
+ ...(0, utils_1.cancellableSleep)(Math.max(Math.min(maxSleepPerIteration, endTimeMs - curTime), 0)),
565
+ endTime: endTimeMs,
566
+ };
318
567
  }
319
- else {
320
- await this.recordOperationResult(workflowID, functionID, { serialOutput: JSON.stringify(endTimeMs), functionName: exports.DBOS_FUNCNAME_SLEEP }, false);
568
+ finally {
569
+ client.release();
321
570
  }
322
- return (0, utils_1.cancellableSleep)(Math.max(endTimeMs - curTime, 0));
323
571
  }
324
572
  nullTopic = '__null__topic__';
325
573
  async send(workflowID, functionID, destinationID, message, topic) {
@@ -327,11 +575,11 @@ class PostgresSystemDatabase {
327
575
  const client = await this.pool.connect();
328
576
  await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
329
577
  try {
330
- await this.runAsStep(async () => {
578
+ await this.#runAndRecordResult(client, exports.DBOS_FUNCNAME_SEND, workflowID, functionID, async () => {
331
579
  await client.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.notifications (destination_uuid, topic, message) VALUES ($1, $2, $3);`, [destinationID, topic, message]);
332
580
  await client.query('COMMIT');
333
581
  return undefined;
334
- }, exports.DBOS_FUNCNAME_SEND, workflowID, functionID, client);
582
+ });
335
583
  }
336
584
  catch (error) {
337
585
  await client.query('ROLLBACK');
@@ -351,44 +599,63 @@ class PostgresSystemDatabase {
351
599
  async recv(workflowID, functionID, timeoutFunctionID, topic, timeoutSeconds = dbos_executor_1.DBOSExecutor.defaultNotificationTimeoutSec) {
352
600
  topic = topic ?? this.nullTopic;
353
601
  // First, check for previous executions.
354
- const res = await this.getOperationResult(workflowID, functionID);
355
- if (res.res) {
356
- if (res.res.functionName !== exports.DBOS_FUNCNAME_RECV) {
357
- throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, exports.DBOS_FUNCNAME_RECV, res.res.functionName);
602
+ const res = await this.getOperationResultAndThrowIfCancelled(workflowID, functionID);
603
+ if (res) {
604
+ if (res.functionName !== exports.DBOS_FUNCNAME_RECV) {
605
+ throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, exports.DBOS_FUNCNAME_RECV, res.functionName);
358
606
  }
359
- return res.res.res;
607
+ return res.output;
360
608
  }
361
- // Check if the key is already in the DB, then wait for the notification if it isn't.
362
- const initRecvRows = (await this.pool.query(`SELECT topic FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.notifications WHERE destination_uuid=$1 AND topic=$2;`, [workflowID, topic])).rows;
363
- if (initRecvRows.length === 0) {
364
- // Then, register the key with the global notifications listener.
609
+ const timeoutms = timeoutSeconds !== undefined ? timeoutSeconds * 1000 : undefined;
610
+ let finishTime = timeoutms !== undefined ? Date.now() + timeoutms : undefined;
611
+ while (true) {
612
+ // register the key with the global notifications listener.
365
613
  let resolveNotification;
366
614
  const messagePromise = new Promise((resolve) => {
367
615
  resolveNotification = resolve;
368
616
  });
369
617
  const payload = `${workflowID}::${topic}`;
370
- this.notificationsMap[payload] = resolveNotification; // The resolver assignment in the Promise definition runs synchronously.
371
- let timeoutPromise = Promise.resolve();
372
- let timeoutCancel = () => { };
373
- try {
374
- const { promise, cancel } = await this.durableSleepms(workflowID, timeoutFunctionID, timeoutSeconds * 1000);
375
- timeoutPromise = promise;
376
- timeoutCancel = cancel;
377
- }
378
- catch (e) {
379
- this.logger.error(e);
380
- delete this.notificationsMap[payload];
381
- timeoutCancel();
382
- throw new Error('durable sleepms failed');
383
- }
618
+ const cbr = this.notificationsMap.registerCallback(payload, resolveNotification);
619
+ const crh = this.cancelWakeupMap.registerCallback(workflowID, (_res) => {
620
+ resolveNotification();
621
+ });
384
622
  try {
385
- await Promise.race([messagePromise, timeoutPromise]);
623
+ await this.checkIfCanceled(workflowID);
624
+ // Check if the key is already in the DB, then wait for the notification if it isn't.
625
+ const initRecvRows = (await this.pool.query(`SELECT topic FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.notifications WHERE destination_uuid=$1 AND topic=$2;`, [workflowID, topic])).rows;
626
+ if (initRecvRows.length !== 0)
627
+ break;
628
+ const ct = Date.now();
629
+ if (finishTime && ct > finishTime)
630
+ break; // Time's up
631
+ let timeoutPromise = Promise.resolve();
632
+ let timeoutCancel = () => { };
633
+ if (timeoutms) {
634
+ const { promise, cancel, endTime } = await this.#durableSleep(workflowID, timeoutFunctionID, timeoutms, this.dbPollingIntervalEventMs);
635
+ timeoutPromise = promise;
636
+ timeoutCancel = cancel;
637
+ finishTime = endTime;
638
+ }
639
+ else {
640
+ let poll = finishTime ? finishTime - ct : this.dbPollingIntervalEventMs;
641
+ poll = Math.min(this.dbPollingIntervalEventMs, poll);
642
+ const { promise, cancel } = (0, utils_1.cancellableSleep)(poll);
643
+ timeoutPromise = promise;
644
+ timeoutCancel = cancel;
645
+ }
646
+ try {
647
+ await Promise.race([messagePromise, timeoutPromise]);
648
+ }
649
+ finally {
650
+ timeoutCancel();
651
+ }
386
652
  }
387
653
  finally {
388
- timeoutCancel();
389
- delete this.notificationsMap[payload];
654
+ this.notificationsMap.deregisterCallback(cbr);
655
+ this.cancelWakeupMap.deregisterCallback(crh);
390
656
  }
391
657
  }
658
+ await this.checkIfCanceled(workflowID);
392
659
  // Transactionally consume and return the message if it's in the DB, otherwise return null.
393
660
  let message = null;
394
661
  const client = await this.pool.connect();
@@ -412,7 +679,7 @@ class PostgresSystemDatabase {
412
679
  if (finalRecvRows.length > 0) {
413
680
  message = finalRecvRows[0].message;
414
681
  }
415
- await this.recordOperationResult(workflowID, functionID, { serialOutput: message, functionName: exports.DBOS_FUNCNAME_RECV }, true, client);
682
+ await recordOperationResult(client, workflowID, functionID, exports.DBOS_FUNCNAME_RECV, true, { output: message });
416
683
  await client.query(`COMMIT`);
417
684
  }
418
685
  catch (e) {
@@ -429,7 +696,7 @@ class PostgresSystemDatabase {
429
696
  const client = await this.pool.connect();
430
697
  try {
431
698
  await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
432
- await this.runAsStep(async () => {
699
+ await this.#runAndRecordResult(client, exports.DBOS_FUNCNAME_SETEVENT, workflowID, functionID, async () => {
433
700
  await client.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_events (workflow_uuid, key, value)
434
701
  VALUES ($1, $2, $3)
435
702
  ON CONFLICT (workflow_uuid, key)
@@ -437,7 +704,7 @@ class PostgresSystemDatabase {
437
704
  RETURNING workflow_uuid;`, [workflowID, key, message]);
438
705
  await client.query('COMMIT');
439
706
  return undefined;
440
- }, exports.DBOS_FUNCNAME_SETEVENT, workflowID, functionID, client);
707
+ });
441
708
  }
442
709
  catch (e) {
443
710
  this.logger.error(e);
@@ -451,51 +718,59 @@ class PostgresSystemDatabase {
451
718
  async getEvent(workflowID, key, timeoutSeconds, callerWorkflow) {
452
719
  // Check if the operation has been done before for OAOO (only do this inside a workflow).
453
720
  if (callerWorkflow) {
454
- const res = await this.getOperationResult(callerWorkflow.workflowID, callerWorkflow.functionID);
455
- if (res.res !== undefined) {
456
- if (res.res.functionName !== exports.DBOS_FUNCNAME_GETEVENT) {
457
- throw new error_1.DBOSUnexpectedStepError(callerWorkflow.workflowID, callerWorkflow.functionID, exports.DBOS_FUNCNAME_GETEVENT, res.res.functionName);
721
+ const res = await this.getOperationResultAndThrowIfCancelled(callerWorkflow.workflowID, callerWorkflow.functionID);
722
+ if (res) {
723
+ if (res.functionName !== exports.DBOS_FUNCNAME_GETEVENT) {
724
+ throw new error_1.DBOSUnexpectedStepError(callerWorkflow.workflowID, callerWorkflow.functionID, exports.DBOS_FUNCNAME_GETEVENT, res.functionName);
458
725
  }
459
- return res.res.res;
726
+ return res.output;
460
727
  }
461
728
  }
462
729
  // Get the return the value. if it's in the DB, otherwise return null.
463
730
  let value = null;
464
731
  const payloadKey = `${workflowID}::${key}`;
732
+ const timeoutms = timeoutSeconds !== undefined ? timeoutSeconds * 1000 : undefined;
733
+ let finishTime = timeoutms !== undefined ? Date.now() + timeoutms : undefined;
465
734
  // Register the key with the global notifications listener first... we do not want to look in the DB first
466
735
  // or that would cause a timing hole.
467
- let resolveNotification;
468
- const valuePromise = new Promise((resolve) => {
469
- resolveNotification = resolve;
470
- });
471
- this.workflowEventsMap[payloadKey] = resolveNotification; // The resolver assignment in the Promise definition runs synchronously.
472
- try {
473
- // Check if the key is already in the DB, then wait for the notification if it isn't.
474
- const initRecvRows = (await this.pool.query(`
475
- SELECT key, value
476
- FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_events
477
- WHERE workflow_uuid=$1 AND key=$2;`, [workflowID, key])).rows;
478
- if (initRecvRows.length > 0) {
479
- value = initRecvRows[0].value;
480
- }
481
- else {
736
+ while (true) {
737
+ let resolveNotification;
738
+ const valuePromise = new Promise((resolve) => {
739
+ resolveNotification = resolve;
740
+ });
741
+ const cbr = this.workflowEventsMap.registerCallback(payloadKey, resolveNotification);
742
+ const crh = callerWorkflow?.workflowID
743
+ ? this.cancelWakeupMap.registerCallback(callerWorkflow.workflowID, (_res) => {
744
+ resolveNotification();
745
+ })
746
+ : undefined;
747
+ try {
748
+ if (callerWorkflow?.workflowID)
749
+ await this.checkIfCanceled(callerWorkflow?.workflowID);
750
+ // Check if the key is already in the DB, then wait for the notification if it isn't.
751
+ const initRecvRows = (await this.pool.query(`SELECT key, value
752
+ FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_events
753
+ WHERE workflow_uuid=$1 AND key=$2;`, [workflowID, key])).rows;
754
+ if (initRecvRows.length > 0) {
755
+ value = initRecvRows[0].value;
756
+ break;
757
+ }
758
+ const ct = Date.now();
759
+ if (finishTime && ct > finishTime)
760
+ break; // Time's up
482
761
  // If we have a callerWorkflow, we want a durable sleep, otherwise, not
483
762
  let timeoutPromise = Promise.resolve();
484
763
  let timeoutCancel = () => { };
485
- if (callerWorkflow) {
486
- try {
487
- const { promise, cancel } = await this.durableSleepms(callerWorkflow.workflowID, callerWorkflow.timeoutFunctionID ?? -1, timeoutSeconds * 1000);
488
- timeoutPromise = promise;
489
- timeoutCancel = cancel;
490
- }
491
- catch (e) {
492
- this.logger.error(e);
493
- delete this.workflowEventsMap[payloadKey];
494
- throw new Error('durable sleepms failed');
495
- }
764
+ if (callerWorkflow && timeoutms) {
765
+ const { promise, cancel, endTime } = await this.#durableSleep(callerWorkflow.workflowID, callerWorkflow.timeoutFunctionID ?? -1, timeoutms, this.dbPollingIntervalEventMs);
766
+ timeoutPromise = promise;
767
+ timeoutCancel = cancel;
768
+ finishTime = endTime;
496
769
  }
497
770
  else {
498
- const { promise, cancel } = (0, utils_1.cancellableSleep)(timeoutSeconds * 1000);
771
+ let poll = finishTime ? finishTime - ct : this.dbPollingIntervalEventMs;
772
+ poll = Math.min(this.dbPollingIntervalEventMs, poll);
773
+ const { promise, cancel } = (0, utils_1.cancellableSleep)(poll);
499
774
  timeoutPromise = promise;
500
775
  timeoutCancel = cancel;
501
776
  }
@@ -505,70 +780,112 @@ class PostgresSystemDatabase {
505
780
  finally {
506
781
  timeoutCancel();
507
782
  }
508
- const finalRecvRows = (await this.pool.query(`
509
- SELECT value
510
- FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_events
511
- WHERE workflow_uuid=$1 AND key=$2;`, [workflowID, key])).rows;
512
- if (finalRecvRows.length > 0) {
513
- value = finalRecvRows[0].value;
514
- }
515
783
  }
516
- }
517
- finally {
518
- delete this.workflowEventsMap[payloadKey];
784
+ finally {
785
+ this.workflowEventsMap.deregisterCallback(cbr);
786
+ if (crh)
787
+ this.cancelWakeupMap.deregisterCallback(crh);
788
+ }
519
789
  }
520
790
  // Record the output if it is inside a workflow.
521
791
  if (callerWorkflow) {
522
- await this.recordOperationResult(callerWorkflow.workflowID, callerWorkflow.functionID, {
523
- serialOutput: value,
524
- functionName: exports.DBOS_FUNCNAME_GETEVENT,
525
- }, true);
792
+ await this.recordOperationResult(callerWorkflow.workflowID, callerWorkflow.functionID, exports.DBOS_FUNCNAME_GETEVENT, true, { output: value });
526
793
  }
527
794
  return value;
528
795
  }
529
796
  async setWorkflowStatus(workflowID, status, resetRecoveryAttempts) {
530
- await this.recordWorkflowStatusChange(workflowID, status, { resetRecoveryAttempts });
797
+ const client = await this.pool.connect();
798
+ try {
799
+ await updateWorkflowStatus(client, workflowID, status, { update: { resetRecoveryAttempts } });
800
+ }
801
+ finally {
802
+ client.release();
803
+ }
804
+ }
805
+ #setWFCancelMap(workflowID) {
806
+ if (this.runningWorkflowMap.has(workflowID)) {
807
+ this.workflowCancellationMap.set(workflowID, true);
808
+ }
809
+ this.cancelWakeupMap.callCallbacks(workflowID);
531
810
  }
811
+ #clearWFCancelMap(workflowID) {
812
+ if (this.workflowCancellationMap.has(workflowID)) {
813
+ this.workflowCancellationMap.delete(workflowID);
814
+ }
815
+ }
816
+ // TODO: make cancel throw an error if the workflow doesn't exist.
532
817
  async cancelWorkflow(workflowID) {
533
818
  const client = await this.pool.connect();
534
819
  try {
535
- await client.query('BEGIN');
820
+ await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
536
821
  // Remove workflow from queues table
537
- await client.query(`DELETE FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue
538
- WHERE workflow_uuid = $1`, [workflowID]);
539
- // Should we check if it is incomplete first?
540
- await this.recordWorkflowStatusChange(workflowID, workflow_1.StatusString.CANCELLED, {}, client);
822
+ await deleteQueuedWorkflows(client, workflowID);
823
+ const statusResult = await getWorkflowStatusValue(client, workflowID);
824
+ if (!statusResult || statusResult === workflow_1.StatusString.SUCCESS || statusResult === workflow_1.StatusString.ERROR) {
825
+ await client.query('COMMIT');
826
+ return;
827
+ }
828
+ await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.CANCELLED);
541
829
  await client.query('COMMIT');
542
830
  }
543
831
  catch (error) {
832
+ this.logger.error(error);
544
833
  await client.query('ROLLBACK');
545
834
  throw error;
546
835
  }
547
836
  finally {
548
837
  client.release();
549
838
  }
839
+ this.#setWFCancelMap(workflowID);
840
+ }
841
+ async #checkIfCanceled(client, workflowID) {
842
+ if (this.workflowCancellationMap.get(workflowID) === true) {
843
+ throw new error_1.DBOSWorkflowCancelledError(workflowID);
844
+ }
845
+ const statusValue = await getWorkflowStatusValue(client, workflowID);
846
+ if (statusValue === workflow_1.StatusString.CANCELLED) {
847
+ throw new error_1.DBOSWorkflowCancelledError(workflowID);
848
+ }
849
+ }
850
+ async checkIfCanceled(workflowID) {
851
+ const client = await this.pool.connect();
852
+ try {
853
+ await this.#checkIfCanceled(client, workflowID);
854
+ }
855
+ finally {
856
+ client.release();
857
+ }
550
858
  }
551
859
  async resumeWorkflow(workflowID) {
860
+ this.#clearWFCancelMap(workflowID);
552
861
  const client = await this.pool.connect();
553
862
  try {
554
- await client.query('BEGIN');
863
+ await client.query('BEGIN ISOLATION LEVEL REPEATABLE READ');
555
864
  // Check workflow status. If it is complete, do nothing.
556
- const statusResult = await client.query(`SELECT status FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status
557
- WHERE workflow_uuid = $1`, [workflowID]);
558
- if (statusResult.rows.length === 0 ||
559
- statusResult.rows[0].status === workflow_1.StatusString.SUCCESS ||
560
- statusResult.rows[0].status === workflow_1.StatusString.ERROR) {
561
- await client.query('COMMIT');
865
+ const statusResult = await getWorkflowStatusValue(client, workflowID);
866
+ if (!statusResult || statusResult === workflow_1.StatusString.SUCCESS || statusResult === workflow_1.StatusString.ERROR) {
867
+ await client.query('ROLLBACK');
868
+ if (!statusResult) {
869
+ if (statusResult === undefined) {
870
+ throw new error_1.DBOSNonExistentWorkflowError(`Workflow ${workflowID} does not exist`);
871
+ }
872
+ }
562
873
  return;
563
874
  }
564
875
  // Remove the workflow from the queues table so resume can safely be called on an ENQUEUED workflow
565
- await client.query(`DELETE FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue
566
- WHERE workflow_uuid = $1`, [workflowID]);
567
- // Update status to pending and reset recovery attempts
568
- await this.recordWorkflowStatusChange(workflowID, workflow_1.StatusString.PENDING, { resetRecoveryAttempts: true }, client);
876
+ await deleteQueuedWorkflows(client, workflowID);
877
+ await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ENQUEUED, {
878
+ update: {
879
+ queueName: utils_1.INTERNAL_QUEUE_NAME,
880
+ resetRecoveryAttempts: true,
881
+ },
882
+ throwOnFailure: false,
883
+ });
884
+ await enqueueWorkflow(client, workflowID, utils_1.INTERNAL_QUEUE_NAME);
569
885
  await client.query('COMMIT');
570
886
  }
571
887
  catch (error) {
888
+ this.logger.error(error);
572
889
  await client.query('ROLLBACK');
573
890
  throw error;
574
891
  }
@@ -576,117 +893,166 @@ class PostgresSystemDatabase {
576
893
  client.release();
577
894
  }
578
895
  }
579
- async getWorkflowStatus(workflowID, callerID, callerFN) {
580
- const internalStatus = await this.getWorkflowStatusInternal(workflowID, callerID, callerFN);
581
- if (internalStatus === null) {
582
- return null;
896
+ registerRunningWorkflow(workflowID, workflowPromise) {
897
+ // Need to await for the workflow and capture errors.
898
+ const awaitWorkflowPromise = workflowPromise
899
+ .catch((error) => {
900
+ this.logger.debug('Captured error in awaitWorkflowPromise: ' + error);
901
+ })
902
+ .finally(() => {
903
+ // Remove itself from pending workflow map.
904
+ this.runningWorkflowMap.delete(workflowID);
905
+ this.workflowCancellationMap.delete(workflowID);
906
+ });
907
+ this.runningWorkflowMap.set(workflowID, awaitWorkflowPromise);
908
+ }
909
+ async awaitRunningWorkflows() {
910
+ if (this.runningWorkflowMap.size > 0) {
911
+ this.logger.info('Waiting for pending workflows to finish.');
912
+ await Promise.allSettled(this.runningWorkflowMap.values());
583
913
  }
584
- return {
585
- status: internalStatus.status,
586
- workflowName: internalStatus.workflowName,
587
- workflowClassName: internalStatus.workflowClassName,
588
- workflowConfigName: internalStatus.workflowConfigName,
589
- queueName: internalStatus.queueName,
590
- authenticatedUser: internalStatus.authenticatedUser,
591
- assumedRole: internalStatus.assumedRole,
592
- authenticatedRoles: internalStatus.authenticatedRoles,
593
- request: internalStatus.request,
594
- executorId: internalStatus.executorId,
914
+ if (this.workflowEventsMap.map.size > 0) {
915
+ this.logger.warn('Workflow events map is not empty - shutdown is not clean.');
916
+ //throw new Error('Workflow events map is not empty - shutdown is not clean.');
917
+ }
918
+ if (this.notificationsMap.map.size > 0) {
919
+ this.logger.warn('Message notification map is not empty - shutdown is not clean.');
920
+ //throw new Error('Message notification map is not empty - shutdown is not clean.');
921
+ }
922
+ }
923
+ async getWorkflowStatus(workflowID, callerID, callerFN) {
924
+ const funcGetStatus = async () => {
925
+ const statuses = await this.listWorkflows({ workflowIDs: [workflowID] });
926
+ const status = statuses.find((s) => s.workflowUUID === workflowID);
927
+ return status ? JSON.stringify(status) : null;
595
928
  };
929
+ if (callerID && callerFN) {
930
+ const client = await this.pool.connect();
931
+ try {
932
+ // Check if the operation has been done before for OAOO (only do this inside a workflow).
933
+ const json = await this.#runAndRecordResult(client, exports.DBOS_FUNCNAME_GETSTATUS, callerID, callerFN, async () => {
934
+ const statuses = await this.listWorkflows({ workflowIDs: [workflowID] });
935
+ const status = statuses.find((s) => s.workflowUUID === workflowID);
936
+ return status ? JSON.stringify(status) : null;
937
+ });
938
+ return parseStatus(json);
939
+ }
940
+ finally {
941
+ client.release();
942
+ }
943
+ }
944
+ else {
945
+ const json = await funcGetStatus();
946
+ return parseStatus(json);
947
+ }
948
+ function parseStatus(json) {
949
+ return json ? JSON.parse(json) : null;
950
+ }
596
951
  }
597
- async getWorkflowStatusInternal(workflowID, callerID, callerFN) {
598
- // Check if the operation has been done before for OAOO (only do this inside a workflow).
599
- const sv = await this.runAsStep(async () => {
600
- const { rows } = await this.pool.query(`SELECT workflow_uuid, status, name, class_name, config_name, authenticated_user, assumed_role, authenticated_roles, request, queue_name, executor_id, created_at, updated_at, application_version, application_id, recovery_attempts FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status WHERE workflow_uuid=$1`, [workflowID]);
601
- let value = null;
602
- if (rows.length > 0) {
603
- value = {
604
- workflowUUID: rows[0].workflow_uuid,
605
- status: rows[0].status,
606
- workflowName: rows[0].name,
607
- output: null,
608
- error: null,
609
- workflowClassName: rows[0].class_name || '',
610
- workflowConfigName: rows[0].config_name || '',
611
- queueName: rows[0].queue_name || undefined,
612
- authenticatedUser: rows[0].authenticated_user,
613
- assumedRole: rows[0].assumed_role,
614
- authenticatedRoles: utils_1.DBOSJSON.parse(rows[0].authenticated_roles),
615
- request: utils_1.DBOSJSON.parse(rows[0].request),
616
- executorId: rows[0].executor_id,
617
- createdAt: Number(rows[0].created_at),
618
- updatedAt: Number(rows[0].updated_at),
619
- applicationVersion: rows[0].application_version,
620
- applicationID: rows[0].application_id,
621
- recoveryAttempts: Number(rows[0].recovery_attempts),
622
- maxRetries: 0,
623
- };
624
- }
625
- return value ? JSON.stringify(value) : null;
626
- }, exports.DBOS_FUNCNAME_GETSTATUS, callerID, callerFN);
627
- return sv ? JSON.parse(sv) : null;
628
- }
629
- async awaitWorkflowResult(workflowID, timeoutms) {
630
- const pollingIntervalMs = 1000;
631
- const et = timeoutms !== undefined ? new Date().getTime() + timeoutms : undefined;
952
+ async awaitWorkflowResult(workflowID, timeoutSeconds, callerID, timerFuncID) {
953
+ const timeoutms = timeoutSeconds !== undefined ? timeoutSeconds * 1000 : undefined;
954
+ let finishTime = timeoutms !== undefined ? Date.now() + timeoutms : undefined;
632
955
  while (true) {
633
- const { rows } = await this.pool.query(`SELECT status, output, error FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status WHERE workflow_uuid=$1`, [workflowID]);
634
- if (rows.length > 0) {
635
- const status = rows[0].status;
636
- if (status === workflow_1.StatusString.SUCCESS) {
637
- return { res: rows[0].output };
956
+ let resolveNotification;
957
+ const statusPromise = new Promise((resolve) => {
958
+ resolveNotification = resolve;
959
+ });
960
+ const irh = this.cancelWakeupMap.registerCallback(workflowID, (_res) => {
961
+ resolveNotification();
962
+ });
963
+ const crh = callerID
964
+ ? this.cancelWakeupMap.registerCallback(callerID, (_res) => {
965
+ resolveNotification();
966
+ })
967
+ : undefined;
968
+ try {
969
+ if (callerID)
970
+ await this.checkIfCanceled(callerID);
971
+ try {
972
+ const { rows } = await this.pool.query(`SELECT status, output, error FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status
973
+ WHERE workflow_uuid=$1`, [workflowID]);
974
+ if (rows.length > 0) {
975
+ const status = rows[0].status;
976
+ if (status === workflow_1.StatusString.SUCCESS) {
977
+ return { output: rows[0].output };
978
+ }
979
+ else if (status === workflow_1.StatusString.ERROR) {
980
+ return { error: rows[0].error };
981
+ }
982
+ else if (status === workflow_1.StatusString.CANCELLED) {
983
+ return { cancelled: true };
984
+ }
985
+ else {
986
+ // Status is not actionable
987
+ }
988
+ }
638
989
  }
639
- else if (status === workflow_1.StatusString.ERROR) {
640
- return { err: rows[0].error };
990
+ catch (e) {
991
+ const err = e;
992
+ this.logger.error(`Exception from system database: ${err}`);
993
+ throw err;
641
994
  }
642
- }
643
- if (et !== undefined) {
644
- const ct = new Date().getTime();
645
- if (et > ct) {
646
- await (0, utils_1.sleepms)(Math.min(pollingIntervalMs, et - ct));
995
+ const ct = Date.now();
996
+ if (finishTime && ct > finishTime)
997
+ return undefined; // Time's up
998
+ let timeoutPromise = Promise.resolve();
999
+ let timeoutCancel = () => { };
1000
+ if (timerFuncID !== undefined && callerID !== undefined && timeoutms !== undefined) {
1001
+ const { promise, cancel, endTime } = await this.#durableSleep(callerID, timerFuncID, timeoutms, this.dbPollingIntervalResultMs);
1002
+ finishTime = endTime;
1003
+ timeoutPromise = promise;
1004
+ timeoutCancel = cancel;
647
1005
  }
648
1006
  else {
649
- break;
1007
+ let poll = finishTime ? finishTime - ct : this.dbPollingIntervalResultMs;
1008
+ poll = Math.min(this.dbPollingIntervalResultMs, poll);
1009
+ const { promise, cancel } = (0, utils_1.cancellableSleep)(poll);
1010
+ timeoutPromise = promise;
1011
+ timeoutCancel = cancel;
1012
+ }
1013
+ try {
1014
+ await Promise.race([statusPromise, timeoutPromise]);
1015
+ }
1016
+ finally {
1017
+ timeoutCancel();
650
1018
  }
651
1019
  }
652
- else {
653
- await (0, utils_1.sleepms)(pollingIntervalMs);
1020
+ finally {
1021
+ this.cancelWakeupMap.deregisterCallback(irh);
1022
+ if (crh)
1023
+ this.cancelWakeupMap.deregisterCallback(crh);
654
1024
  }
655
1025
  }
656
- return undefined;
657
1026
  }
658
1027
  /* BACKGROUND PROCESSES */
659
1028
  /**
660
1029
  * A background process that listens for notifications from Postgres then signals the appropriate
661
1030
  * workflow listener by resolving its promise.
662
1031
  */
663
- async listenForNotifications() {
1032
+ async #listenForNotifications() {
664
1033
  this.notificationsClient = await this.pool.connect();
665
1034
  await this.notificationsClient.query('LISTEN dbos_notifications_channel;');
666
1035
  await this.notificationsClient.query('LISTEN dbos_workflow_events_channel;');
667
1036
  const handler = (msg) => {
1037
+ if (!this.shouldUseDBNotifications)
1038
+ return; // Testing parameter
668
1039
  if (msg.channel === 'dbos_notifications_channel') {
669
- if (msg.payload && msg.payload in this.notificationsMap) {
670
- this.notificationsMap[msg.payload]();
1040
+ if (msg.payload) {
1041
+ this.notificationsMap.callCallbacks(msg.payload);
671
1042
  }
672
1043
  }
673
- else {
674
- if (msg.payload && msg.payload in this.workflowEventsMap) {
675
- this.workflowEventsMap[msg.payload]();
1044
+ else if (msg.channel === 'dbos_workflow_events_channel') {
1045
+ if (msg.payload) {
1046
+ this.workflowEventsMap.callCallbacks(msg.payload);
676
1047
  }
677
1048
  }
678
1049
  };
679
1050
  this.notificationsClient.on('notification', handler);
680
1051
  }
681
1052
  // Event dispatcher queries / updates
682
- async getEventDispatchState(svc, wfn, key) {
683
- const res = await this.pool.query(`
684
- SELECT *
685
- FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.event_dispatch_kv
686
- WHERE workflow_fn_name = $1
687
- AND service_name = $2
688
- AND key = $3;
689
- `, [wfn, svc, key]);
1053
+ async getEventDispatchState(service, workflowName, key) {
1054
+ const res = await this.pool.query(`SELECT * FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.event_dispatch_kv
1055
+ WHERE workflow_fn_name = $1 AND service_name = $2 AND key = $3;`, [workflowName, service, key]);
690
1056
  if (res.rows.length === 0)
691
1057
  return undefined;
692
1058
  return {
@@ -701,19 +1067,18 @@ class PostgresSystemDatabase {
701
1067
  };
702
1068
  }
703
1069
  async upsertEventDispatchState(state) {
704
- const res = await this.pool.query(`
705
- INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.event_dispatch_kv (
1070
+ const res = await this.pool.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.event_dispatch_kv (
706
1071
  service_name, workflow_fn_name, key, value, update_time, update_seq)
707
- VALUES ($1, $2, $3, $4, $5, $6)
708
- ON CONFLICT (service_name, workflow_fn_name, key)
709
- DO UPDATE SET
710
- update_time = GREATEST(EXCLUDED.update_time, event_dispatch_kv.update_time),
711
- update_seq = GREATEST(EXCLUDED.update_seq, event_dispatch_kv.update_seq),
712
- value = CASE WHEN (EXCLUDED.update_time > event_dispatch_kv.update_time OR EXCLUDED.update_seq > event_dispatch_kv.update_seq OR
713
- (event_dispatch_kv.update_time IS NULL and event_dispatch_kv.update_seq IS NULL))
714
- THEN EXCLUDED.value ELSE event_dispatch_kv.value END
715
- RETURNING value, update_time, update_seq;
716
- `, [state.service, state.workflowFnName, state.key, state.value, state.updateTime, state.updateSeq]);
1072
+ VALUES ($1, $2, $3, $4, $5, $6)
1073
+ ON CONFLICT (service_name, workflow_fn_name, key)
1074
+ DO UPDATE SET
1075
+ update_time = GREATEST(EXCLUDED.update_time, event_dispatch_kv.update_time),
1076
+ update_seq = GREATEST(EXCLUDED.update_seq, event_dispatch_kv.update_seq),
1077
+ value = CASE WHEN (EXCLUDED.update_time > event_dispatch_kv.update_time
1078
+ OR EXCLUDED.update_seq > event_dispatch_kv.update_seq
1079
+ OR (event_dispatch_kv.update_time IS NULL and event_dispatch_kv.update_seq IS NULL)
1080
+ ) THEN EXCLUDED.value ELSE event_dispatch_kv.value END
1081
+ RETURNING value, update_time, update_seq;`, [state.service, state.workflowFnName, state.key, state.value, state.updateTime, state.updateSeq]);
717
1082
  return {
718
1083
  service: state.service,
719
1084
  workflowFnName: state.workflowFnName,
@@ -725,29 +1090,35 @@ class PostgresSystemDatabase {
725
1090
  : undefined,
726
1091
  };
727
1092
  }
728
- async getWorkflows(input) {
1093
+ async listWorkflows(input) {
1094
+ const schemaName = dbos_executor_1.DBOSExecutor.systemDBSchemaName;
729
1095
  input.sortDesc = input.sortDesc ?? false; // By default, sort in ascending order
730
- let query = this.knexDB(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status`).orderBy('created_at', input.sortDesc ? 'desc' : 'asc');
1096
+ let query = this.knexDB(`${schemaName}.workflow_status`)
1097
+ .join(`${schemaName}.workflow_inputs`, `${schemaName}.workflow_status.workflow_uuid`, `${schemaName}.workflow_inputs.workflow_uuid`)
1098
+ .orderBy(`${schemaName}.workflow_status.created_at`, input.sortDesc ? 'desc' : 'asc');
731
1099
  if (input.workflowName) {
732
- query = query.where('name', input.workflowName);
1100
+ query = query.where(`${schemaName}.workflow_status.name`, input.workflowName);
1101
+ }
1102
+ if (input.workflow_id_prefix) {
1103
+ query = query.whereLike(`${schemaName}.workflow_status.workflow_uuid`, `${input.workflow_id_prefix}%`);
733
1104
  }
734
1105
  if (input.workflowIDs) {
735
- query = query.whereIn('workflow_uuid', input.workflowIDs);
1106
+ query = query.whereIn(`${schemaName}.workflow_status.workflow_uuid`, input.workflowIDs);
736
1107
  }
737
1108
  if (input.authenticatedUser) {
738
- query = query.where('authenticated_user', input.authenticatedUser);
1109
+ query = query.where(`${schemaName}.workflow_status.authenticated_user`, input.authenticatedUser);
739
1110
  }
740
1111
  if (input.startTime) {
741
- query = query.where('created_at', '>=', new Date(input.startTime).getTime());
1112
+ query = query.where(`${schemaName}.workflow_status.created_at`, '>=', new Date(input.startTime).getTime());
742
1113
  }
743
1114
  if (input.endTime) {
744
- query = query.where('created_at', '<=', new Date(input.endTime).getTime());
1115
+ query = query.where(`${schemaName}.workflow_status.created_at`, '<=', new Date(input.endTime).getTime());
745
1116
  }
746
1117
  if (input.status) {
747
- query = query.where('status', input.status);
1118
+ query = query.where(`${schemaName}.workflow_status.status`, input.status);
748
1119
  }
749
1120
  if (input.applicationVersion) {
750
- query = query.where('application_version', input.applicationVersion);
1121
+ query = query.where(`${schemaName}.workflow_status.application_version`, input.applicationVersion);
751
1122
  }
752
1123
  if (input.limit) {
753
1124
  query = query.limit(input.limit);
@@ -755,31 +1126,30 @@ class PostgresSystemDatabase {
755
1126
  if (input.offset) {
756
1127
  query = query.offset(input.offset);
757
1128
  }
758
- const rows = await query.select('workflow_uuid');
759
- const workflowUUIDs = rows.map((row) => row.workflow_uuid);
760
- return {
761
- workflowUUIDs: workflowUUIDs,
762
- };
1129
+ const rows = await query;
1130
+ return rows.map(mapWorkflowStatus);
763
1131
  }
764
- async getQueuedWorkflows(input) {
1132
+ async listQueuedWorkflows(input) {
1133
+ const schemaName = dbos_executor_1.DBOSExecutor.systemDBSchemaName;
765
1134
  const sortDesc = input.sortDesc ?? false; // By default, sort in ascending order
766
- let query = this.knexDB(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue`)
767
- .join(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status`, `${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue.workflow_uuid`, '=', `${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status.workflow_uuid`)
768
- .orderBy(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status.created_at`, sortDesc ? 'desc' : 'asc');
1135
+ let query = this.knexDB(`${schemaName}.workflow_queue`)
1136
+ .join(`${schemaName}.workflow_inputs`, `${schemaName}.workflow_queue.workflow_uuid`, `${schemaName}.workflow_inputs.workflow_uuid`)
1137
+ .join(`${schemaName}.workflow_status`, `${schemaName}.workflow_queue.workflow_uuid`, `${schemaName}.workflow_status.workflow_uuid`)
1138
+ .orderBy(`${schemaName}.workflow_status.created_at`, sortDesc ? 'desc' : 'asc');
769
1139
  if (input.workflowName) {
770
- query = query.whereRaw(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status.name = ?`, [input.workflowName]);
1140
+ query = query.whereRaw(`${schemaName}.workflow_status.name = ?`, [input.workflowName]);
771
1141
  }
772
1142
  if (input.queueName) {
773
- query = query.whereRaw(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status.queue_name = ?`, [input.queueName]);
1143
+ query = query.whereRaw(`${schemaName}.workflow_status.queue_name = ?`, [input.queueName]);
774
1144
  }
775
1145
  if (input.startTime) {
776
- query = query.where(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status.created_at`, '>=', new Date(input.startTime).getTime());
1146
+ query = query.where(`${schemaName}.workflow_status.created_at`, '>=', new Date(input.startTime).getTime());
777
1147
  }
778
1148
  if (input.endTime) {
779
- query = query.where(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status.created_at`, '<=', new Date(input.endTime).getTime());
1149
+ query = query.where(`${schemaName}.workflow_status.created_at`, '<=', new Date(input.endTime).getTime());
780
1150
  }
781
1151
  if (input.status) {
782
- query = query.whereRaw(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status.status = ?`, [input.status]);
1152
+ query = query.whereRaw(`${schemaName}.workflow_status.status = ?`, [input.status]);
783
1153
  }
784
1154
  if (input.limit) {
785
1155
  query = query.limit(input.limit);
@@ -787,11 +1157,8 @@ class PostgresSystemDatabase {
787
1157
  if (input.offset) {
788
1158
  query = query.offset(input.offset);
789
1159
  }
790
- const rows = await query.select(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status.workflow_uuid`);
791
- const workflowUUIDs = rows.map((row) => row.workflow_uuid);
792
- return {
793
- workflowUUIDs: workflowUUIDs,
794
- };
1160
+ const rows = await query;
1161
+ return rows.map(mapWorkflowStatus);
795
1162
  }
796
1163
  async getWorkflowQueue(input) {
797
1164
  // Create the initial query with a join to workflow_status table to get executor_id
@@ -833,29 +1200,29 @@ class PostgresSystemDatabase {
833
1200
  return { workflows };
834
1201
  }
835
1202
  async enqueueWorkflow(workflowId, queueName) {
836
- await this.pool.query(`
837
- INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue (workflow_uuid, queue_name)
838
- VALUES ($1, $2)
839
- ON CONFLICT (workflow_uuid)
840
- DO NOTHING;
841
- `, [workflowId, queueName]);
842
- }
843
- async clearQueueAssignment(workflowId) {
844
1203
  const client = await this.pool.connect();
845
1204
  try {
1205
+ await enqueueWorkflow(client, workflowId, queueName);
1206
+ }
1207
+ finally {
1208
+ client.release();
1209
+ }
1210
+ }
1211
+ async clearQueueAssignment(workflowID) {
1212
+ const client = await this.pool.connect();
1213
+ try {
1214
+ await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
846
1215
  // Reset the start time in the queue to mark it as not started
847
- const wqRes = await client.query(`
848
- UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue
849
- SET started_at_epoch_ms = NULL
850
- WHERE workflow_uuid = $1 AND completed_at_epoch_ms IS NULL;
851
- `, [workflowId]);
1216
+ const wqRes = await client.query(`UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue
1217
+ SET started_at_epoch_ms = NULL
1218
+ WHERE workflow_uuid = $1 AND completed_at_epoch_ms IS NULL;`, [workflowID]);
852
1219
  // If no rows were affected, the workflow is not anymore in the queue or was already completed
853
1220
  if (wqRes.rowCount === 0) {
854
1221
  await client.query('ROLLBACK');
855
1222
  return false;
856
1223
  }
857
1224
  // Reset the status of the task to "ENQUEUED"
858
- await this.recordWorkflowStatusChange(workflowId, workflow_1.StatusString.ENQUEUED, {}, client);
1225
+ await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ENQUEUED);
859
1226
  await client.query('COMMIT');
860
1227
  return true;
861
1228
  }
@@ -867,24 +1234,25 @@ class PostgresSystemDatabase {
867
1234
  client.release();
868
1235
  }
869
1236
  }
870
- async dequeueWorkflow(workflowId, queue) {
871
- if (queue.rateLimit) {
872
- const time = new Date().getTime();
873
- await this.pool.query(`
874
- UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue
875
- SET completed_at_epoch_ms = $2
876
- WHERE workflow_uuid = $1;
877
- `, [workflowId, time]);
1237
+ async dequeueWorkflow(workflowID, queue) {
1238
+ const client = await this.pool.connect();
1239
+ try {
1240
+ if (queue.rateLimit) {
1241
+ const time = Date.now();
1242
+ await client.query(`UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue
1243
+ SET completed_at_epoch_ms = $2
1244
+ WHERE workflow_uuid = $1;`, [workflowID, time]);
1245
+ }
1246
+ else {
1247
+ await deleteQueuedWorkflows(client, workflowID);
1248
+ }
878
1249
  }
879
- else {
880
- await this.pool.query(`
881
- DELETE FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue
882
- WHERE workflow_uuid = $1;
883
- `, [workflowId]);
1250
+ finally {
1251
+ client.release();
884
1252
  }
885
1253
  }
886
1254
  async findAndMarkStartableWorkflows(queue, executorID, appVersion) {
887
- const startTimeMs = new Date().getTime();
1255
+ const startTimeMs = Date.now();
888
1256
  const limiterPeriodMS = queue.rateLimit ? queue.rateLimit.periodSec * 1000 : 0;
889
1257
  const claimedIDs = [];
890
1258
  await this.knexDB.transaction(async (trx) => {