@dbos-inc/dbos-sdk 2.8.12-preview → 2.8.17-preview

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/dist/schemas/system_db_schema.d.ts +1 -0
  2. package/dist/schemas/system_db_schema.d.ts.map +1 -1
  3. package/dist/schemas/user_db_schema.d.ts +1 -0
  4. package/dist/schemas/user_db_schema.d.ts.map +1 -1
  5. package/dist/schemas/user_db_schema.js.map +1 -1
  6. package/dist/src/client.d.ts.map +1 -1
  7. package/dist/src/client.js +7 -6
  8. package/dist/src/client.js.map +1 -1
  9. package/dist/src/dbos-executor.d.ts +7 -4
  10. package/dist/src/dbos-executor.d.ts.map +1 -1
  11. package/dist/src/dbos-executor.js +126 -63
  12. package/dist/src/dbos-executor.js.map +1 -1
  13. package/dist/src/dbos-runtime/cli.d.ts.map +1 -1
  14. package/dist/src/dbos-runtime/cli.js +10 -0
  15. package/dist/src/dbos-runtime/cli.js.map +1 -1
  16. package/dist/src/dbos-runtime/config.d.ts.map +1 -1
  17. package/dist/src/dbos-runtime/config.js +4 -19
  18. package/dist/src/dbos-runtime/config.js.map +1 -1
  19. package/dist/src/dbos-runtime/docker_pg_helper.d.ts +21 -0
  20. package/dist/src/dbos-runtime/docker_pg_helper.d.ts.map +1 -0
  21. package/dist/src/dbos-runtime/docker_pg_helper.js +137 -0
  22. package/dist/src/dbos-runtime/docker_pg_helper.js.map +1 -0
  23. package/dist/src/dbos-runtime/migrate.d.ts.map +1 -1
  24. package/dist/src/dbos-runtime/migrate.js +1 -2
  25. package/dist/src/dbos-runtime/migrate.js.map +1 -1
  26. package/dist/src/dbos-runtime/runtime.d.ts.map +1 -1
  27. package/dist/src/dbos-runtime/runtime.js +0 -2
  28. package/dist/src/dbos-runtime/runtime.js.map +1 -1
  29. package/dist/src/dbos-runtime/workflow_management.d.ts +5 -4
  30. package/dist/src/dbos-runtime/workflow_management.d.ts.map +1 -1
  31. package/dist/src/dbos-runtime/workflow_management.js +14 -16
  32. package/dist/src/dbos-runtime/workflow_management.js.map +1 -1
  33. package/dist/src/dbos.d.ts +1 -0
  34. package/dist/src/dbos.d.ts.map +1 -1
  35. package/dist/src/dbos.js +41 -22
  36. package/dist/src/dbos.js.map +1 -1
  37. package/dist/src/error.d.ts +7 -0
  38. package/dist/src/error.d.ts.map +1 -1
  39. package/dist/src/error.js +15 -1
  40. package/dist/src/error.js.map +1 -1
  41. package/dist/src/eventreceiver.d.ts +1 -1
  42. package/dist/src/eventreceiver.d.ts.map +1 -1
  43. package/dist/src/httpServer/server.d.ts.map +1 -1
  44. package/dist/src/httpServer/server.js +8 -19
  45. package/dist/src/httpServer/server.js.map +1 -1
  46. package/dist/src/system_database.d.ts +84 -52
  47. package/dist/src/system_database.d.ts.map +1 -1
  48. package/dist/src/system_database.js +187 -290
  49. package/dist/src/system_database.js.map +1 -1
  50. package/dist/src/workflow.d.ts +3 -2
  51. package/dist/src/workflow.d.ts.map +1 -1
  52. package/dist/src/workflow.js +36 -26
  53. package/dist/src/workflow.js.map +1 -1
  54. package/dist/tsconfig.tsbuildinfo +1 -1
  55. package/package.json +1 -1
  56. package/dist/src/dbos-runtime/db_connection.d.ts +0 -10
  57. package/dist/src/dbos-runtime/db_connection.d.ts.map +0 -1
  58. package/dist/src/dbos-runtime/db_connection.js +0 -59
  59. package/dist/src/dbos-runtime/db_connection.js.map +0 -1
  60. package/dist/src/dbos-runtime/db_wizard.d.ts +0 -3
  61. package/dist/src/dbos-runtime/db_wizard.d.ts.map +0 -1
  62. package/dist/src/dbos-runtime/db_wizard.js +0 -170
  63. package/dist/src/dbos-runtime/db_wizard.js.map +0 -1
@@ -4,17 +4,20 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
4
4
  return (mod && mod.__esModule) ? mod : { "default": mod };
5
5
  };
6
6
  Object.defineProperty(exports, "__esModule", { value: true });
7
- exports.PostgresSystemDatabase = exports.migrateSystemDatabase = void 0;
8
- const serialize_error_1 = require("serialize-error");
7
+ exports.PostgresSystemDatabase = exports.migrateSystemDatabase = exports.DBOS_FUNCNAME_GETSTATUS = exports.DBOS_FUNCNAME_SLEEP = exports.DBOS_FUNCNAME_GETEVENT = exports.DBOS_FUNCNAME_SETEVENT = exports.DBOS_FUNCNAME_RECV = exports.DBOS_FUNCNAME_SEND = void 0;
9
8
  const dbos_executor_1 = require("./dbos-executor");
10
9
  const pg_1 = require("pg");
11
10
  const error_1 = require("./error");
12
11
  const workflow_1 = require("./workflow");
13
12
  const utils_1 = require("./utils");
14
- const context_1 = require("./context");
15
13
  const knex_1 = __importDefault(require("knex"));
16
14
  const path_1 = __importDefault(require("path"));
17
- const context_2 = require("./context");
15
+ exports.DBOS_FUNCNAME_SEND = 'DBOS.send';
16
+ exports.DBOS_FUNCNAME_RECV = 'DBOS.recv';
17
+ exports.DBOS_FUNCNAME_SETEVENT = 'DBOS.setEvent';
18
+ exports.DBOS_FUNCNAME_GETEVENT = 'DBOS.getEvent';
19
+ exports.DBOS_FUNCNAME_SLEEP = 'DBOS.sleep';
20
+ exports.DBOS_FUNCNAME_GETSTATUS = 'getStatus';
18
21
  async function migrateSystemDatabase(systemPoolConfig, logger) {
19
22
  const migrationsDirectory = path_1.default.join((0, utils_1.findPackageRoot)(__dirname), 'migrations');
20
23
  const knexConfig = {
@@ -201,83 +204,25 @@ class PostgresSystemDatabase {
201
204
  }
202
205
  return { args: utils_1.DBOSJSON.parse(rows[0].inputs), status };
203
206
  }
204
- async recordWorkflowOutput(workflowUUID, status) {
205
- await this.pool.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status (
206
- workflow_uuid,
207
- status,
208
- name,
209
- class_name,
210
- config_name,
211
- queue_name,
212
- authenticated_user,
213
- assumed_role,
214
- authenticated_roles,
215
- request,
216
- output,
217
- executor_id,
218
- application_id,
219
- application_version,
220
- created_at,
221
- updated_at
222
- ) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
223
- ON CONFLICT (workflow_uuid)
224
- DO UPDATE SET status=EXCLUDED.status, output=EXCLUDED.output, updated_at=EXCLUDED.updated_at;`, [
225
- workflowUUID,
226
- workflow_1.StatusString.SUCCESS,
227
- status.workflowName,
228
- status.workflowClassName,
229
- status.workflowConfigName,
230
- status.queueName,
231
- status.authenticatedUser,
232
- status.assumedRole,
233
- utils_1.DBOSJSON.stringify(status.authenticatedRoles),
234
- utils_1.DBOSJSON.stringify(status.request),
235
- utils_1.DBOSJSON.stringify(status.output),
236
- status.executorId,
237
- status.applicationID,
238
- status.applicationVersion,
239
- status.createdAt,
240
- Date.now(),
241
- ]);
207
+ async recordWorkflowStatusChange(workflowID, status, update, client) {
208
+ let rec = '';
209
+ if (update.resetRecoveryAttempts) {
210
+ rec = ' recovery_attempts = 0, ';
211
+ }
212
+ if (update.incrementRecoveryAttempts) {
213
+ rec = ' recovery_attempts = recovery_attempts + 1';
214
+ }
215
+ const wRes = await (client ?? this.pool).query(`UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status
216
+ SET ${rec} status=$2, output=$3, error=$4, updated_at=$5 WHERE workflow_uuid=$1`, [workflowID, status, update.output, update.error, Date.now()]);
217
+ if (wRes.rowCount !== 1) {
218
+ throw new error_1.DBOSWorkflowConflictUUIDError(`Attempt to record transition of nonexistent workflow ${workflowID}`);
219
+ }
242
220
  }
243
- async recordWorkflowError(workflowUUID, status) {
244
- await this.pool.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status (
245
- workflow_uuid,
246
- status,
247
- name,
248
- class_name,
249
- config_name,
250
- queue_name,
251
- authenticated_user,
252
- assumed_role,
253
- authenticated_roles,
254
- request,
255
- error,
256
- executor_id,
257
- application_id,
258
- application_version,
259
- created_at,
260
- updated_at
261
- ) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)
262
- ON CONFLICT (workflow_uuid)
263
- DO UPDATE SET status=EXCLUDED.status, error=EXCLUDED.error, updated_at=EXCLUDED.updated_at;`, [
264
- workflowUUID,
265
- workflow_1.StatusString.ERROR,
266
- status.workflowName,
267
- status.workflowClassName,
268
- status.workflowConfigName,
269
- status.queueName,
270
- status.authenticatedUser,
271
- status.assumedRole,
272
- utils_1.DBOSJSON.stringify(status.authenticatedRoles),
273
- utils_1.DBOSJSON.stringify(status.request),
274
- status.error,
275
- status.executorId,
276
- status.applicationID,
277
- status.applicationVersion,
278
- status.createdAt,
279
- Date.now(),
280
- ]);
221
+ async recordWorkflowOutput(workflowID, status) {
222
+ await this.recordWorkflowStatusChange(workflowID, workflow_1.StatusString.SUCCESS, { output: status.output });
223
+ }
224
+ async recordWorkflowError(workflowID, status) {
225
+ await this.recordWorkflowStatusChange(workflowID, workflow_1.StatusString.ERROR, { error: status.error });
281
226
  }
282
227
  async getPendingWorkflows(executorID, appVersion) {
283
228
  const getWorkflows = await this.pool.query(`SELECT workflow_uuid, queue_name FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status WHERE status=$1 AND executor_id=$2 AND application_version=$3`, [workflow_1.StatusString.PENDING, executorID, appVersion]);
@@ -286,152 +231,111 @@ class PostgresSystemDatabase {
286
231
  queueName: i.queue_name,
287
232
  }));
288
233
  }
289
- async getWorkflowInputs(workflowUUID) {
290
- const { rows } = await this.pool.query(`SELECT inputs FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_inputs WHERE workflow_uuid=$1`, [workflowUUID]);
234
+ async getWorkflowInputs(workflowID) {
235
+ const { rows } = await this.pool.query(`SELECT inputs FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_inputs WHERE workflow_uuid=$1`, [workflowID]);
291
236
  if (rows.length === 0) {
292
237
  return null;
293
238
  }
294
239
  return utils_1.DBOSJSON.parse(rows[0].inputs);
295
240
  }
296
- async checkOperationOutput(workflowUUID, functionID) {
297
- const { rows } = await this.pool.query(`SELECT output, error FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs WHERE workflow_uuid=$1 AND function_id=$2`, [workflowUUID, functionID]);
241
+ async getOperationResult(workflowID, functionID, client) {
242
+ const { rows } = await (client ?? this.pool).query(`SELECT output, error, child_workflow_id, function_name
243
+ FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs
244
+ WHERE workflow_uuid=$1 AND function_id=$2`, [workflowID, functionID]);
298
245
  if (rows.length === 0) {
299
- return dbos_executor_1.dbosNull;
300
- }
301
- else if (utils_1.DBOSJSON.parse(rows[0].error) !== null) {
302
- throw (0, serialize_error_1.deserializeError)(utils_1.DBOSJSON.parse(rows[0].error));
246
+ return {};
303
247
  }
304
248
  else {
305
- return utils_1.DBOSJSON.parse(rows[0].output);
306
- }
307
- }
308
- async checkChildWorkflow(workflowUUID, functionID) {
309
- const { rows } = await this.pool.query(`SELECT child_workflow_id FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs WHERE workflow_uuid=$1 AND function_id=$2`, [workflowUUID, functionID]);
310
- if (rows.length > 0) {
311
- return rows[0].child_workflow_id;
312
- }
313
- else {
314
- return null;
249
+ return {
250
+ res: {
251
+ res: rows[0].output,
252
+ err: rows[0].error,
253
+ child: rows[0].child_workflow_id,
254
+ functionName: rows[0].function_name,
255
+ },
256
+ };
315
257
  }
316
258
  }
317
- async getWorkflowSteps(workflowUUID) {
318
- const { rows } = await this.pool.query(`SELECT function_id, function_name, output, error, child_workflow_id FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs WHERE workflow_uuid=$1`, [workflowUUID]);
319
- for (const row of rows) {
320
- row.output = row.output !== null ? utils_1.DBOSJSON.parse(row.output) : null;
321
- row.error = row.error !== null ? (0, serialize_error_1.deserializeError)(utils_1.DBOSJSON.parse(row.error)) : null;
322
- }
259
+ async getAllOperationResults(workflowID) {
260
+ const { rows } = await this.pool.query(`SELECT * FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs WHERE workflow_uuid=$1`, [workflowID]);
323
261
  return rows;
324
262
  }
325
- async recordOperationOutput(workflowUUID, functionID, output, functionName) {
326
- const serialOutput = utils_1.DBOSJSON.stringify(output);
263
+ async recordOperationResult(workflowID, functionID, rec, checkConflict, client) {
327
264
  try {
328
- await this.pool.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs (workflow_uuid, function_id, output, function_name) VALUES ($1, $2, $3, $4);`, [workflowUUID, functionID, serialOutput, functionName]);
265
+ await (client ?? this.pool).query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs
266
+ (workflow_uuid, function_id, output, error, function_name, child_workflow_id)
267
+ VALUES ($1, $2, $3, $4, $5, $6)
268
+ ${checkConflict ? '' : ' ON CONFLICT DO NOTHING'}
269
+ ;`, [
270
+ workflowID,
271
+ functionID,
272
+ rec.serialOutput ?? null,
273
+ rec.serialError ?? null,
274
+ rec.functionName,
275
+ rec.childWfId ?? null,
276
+ ]);
329
277
  }
330
278
  catch (error) {
331
279
  const err = error;
332
280
  if (err.code === '40001' || err.code === '23505') {
333
281
  // Serialization and primary key conflict (Postgres).
334
- throw new error_1.DBOSWorkflowConflictUUIDError(workflowUUID);
282
+ throw new error_1.DBOSWorkflowConflictUUIDError(workflowID);
335
283
  }
336
284
  else {
337
285
  throw err;
338
286
  }
339
287
  }
340
288
  }
341
- async recordGetResult(resultWorkflowID, output, error) {
342
- const ctx = (0, context_1.getCurrentContextStore)();
343
- // Only record getResult called in workflow functions
344
- if (ctx === undefined || !(0, context_1.isInWorkflowCtx)(ctx)) {
345
- return;
346
- }
347
- // Record getResult as a step
348
- const functionID = (0, context_1.assertCurrentWorkflowContext)().functionIDGetIncrement();
349
- // Because there's no corresponding check, we do nothing on conflict
350
- // and do not raise a DBOSWorkflowConflictUUIDError
351
- await this.pool.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs (workflow_uuid, function_id, output, error, child_workflow_id, function_name) VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT DO NOTHING;`, [ctx.workflowId, functionID, output, error, resultWorkflowID, 'DBOS.getResult']);
352
- }
353
- async recordOperationError(workflowUUID, functionID, error, functionName) {
354
- const serialErr = utils_1.DBOSJSON.stringify((0, serialize_error_1.serializeError)(error));
355
- try {
356
- await this.pool.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs (workflow_uuid, function_id, error, function_name) VALUES ($1, $2, $3, $4);`, [workflowUUID, functionID, serialErr, functionName]);
357
- }
358
- catch (error) {
359
- const err = error;
360
- if (err.code === '40001' || err.code === '23505') {
361
- // Serialization and primary key conflict (Postgres).
362
- throw new error_1.DBOSWorkflowConflictUUIDError(workflowUUID);
363
- }
364
- else {
365
- throw err;
289
+ async runAsStep(callback, functionName, workflowID, functionID, client) {
290
+ if (workflowID !== undefined && functionID !== undefined) {
291
+ const res = await this.getOperationResult(workflowID, functionID, client);
292
+ if (res.res !== undefined) {
293
+ if (res.res.functionName !== functionName) {
294
+ throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, functionName, res.res.functionName);
295
+ }
296
+ await client?.query('ROLLBACK');
297
+ return res.res.res;
366
298
  }
367
299
  }
368
- }
369
- async recordChildWorkflow(parentUUID, childUUID, functionID, functionName) {
370
- try {
371
- await this.pool.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs (workflow_uuid, function_id, function_name, child_workflow_id) VALUES ($1, $2, $3, $4);`, [parentUUID, functionID, functionName, childUUID]);
372
- }
373
- catch (error) {
374
- const err = error;
375
- if (err.code === '40001' || err.code === '23505') {
376
- // Serialization and primary key conflict (Postgres).
377
- throw new error_1.DBOSWorkflowConflictUUIDError(parentUUID);
378
- }
379
- else {
380
- throw err;
381
- }
300
+ const serialOutput = await callback();
301
+ if (workflowID !== undefined && functionID !== undefined) {
302
+ await this.recordOperationResult(workflowID, functionID, { serialOutput, functionName }, true, client);
382
303
  }
304
+ return serialOutput;
383
305
  }
384
- /**
385
- * Guard the operation, throwing an error if a conflicting execution is detected.
386
- */
387
- async recordNotificationOutput(client, workflowUUID, functionID, output, functionName) {
388
- try {
389
- await client.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs (workflow_uuid, function_id, output, function_name) VALUES ($1, $2, $3, $4);`, [workflowUUID, functionID, utils_1.DBOSJSON.stringify(output), functionName]);
390
- }
391
- catch (error) {
392
- const err = error;
393
- if (err.code === '40001' || err.code === '23505') {
394
- // Serialization and primary key conflict (Postgres).
395
- throw new error_1.DBOSWorkflowConflictUUIDError(workflowUUID);
396
- }
397
- else {
398
- throw err;
306
+ async durableSleepms(workflowID, functionID, durationMS) {
307
+ const curTime = Date.now();
308
+ let endTimeMs = curTime + durationMS;
309
+ const res = await this.getOperationResult(workflowID, functionID);
310
+ if (res.res !== undefined) {
311
+ if (res.res.functionName !== exports.DBOS_FUNCNAME_SLEEP) {
312
+ throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, exports.DBOS_FUNCNAME_SLEEP, res.res.functionName);
399
313
  }
400
- }
401
- }
402
- async durableSleepms(workflowUUID, functionID, durationMS) {
403
- const { rows } = await this.pool.query(`SELECT output FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs WHERE workflow_uuid=$1 AND function_id=$2`, [workflowUUID, functionID]);
404
- if (rows.length > 0) {
405
- const endTimeMs = utils_1.DBOSJSON.parse(rows[0].output);
406
- return (0, utils_1.cancellableSleep)(Math.max(endTimeMs - Date.now(), 0));
314
+ endTimeMs = JSON.parse(res.res.res);
407
315
  }
408
316
  else {
409
- const endTimeMs = Date.now() + durationMS;
410
- await this.pool.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs (workflow_uuid, function_id, output, function_name) VALUES ($1, $2, $3, $4) ON CONFLICT DO NOTHING;`, [workflowUUID, functionID, utils_1.DBOSJSON.stringify(endTimeMs), 'DBOS.sleep']);
411
- return (0, utils_1.cancellableSleep)(Math.max(endTimeMs - Date.now(), 0));
317
+ await this.recordOperationResult(workflowID, functionID, { serialOutput: JSON.stringify(endTimeMs), functionName: exports.DBOS_FUNCNAME_SLEEP }, false);
412
318
  }
319
+ return (0, utils_1.cancellableSleep)(Math.max(endTimeMs - curTime, 0));
413
320
  }
414
321
  nullTopic = '__null__topic__';
415
- async send(workflowUUID, functionID, destinationUUID, message, topic) {
322
+ async send(workflowID, functionID, destinationID, message, topic) {
416
323
  topic = topic ?? this.nullTopic;
417
324
  const client = await this.pool.connect();
418
325
  await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
419
326
  try {
420
- const { rows } = await client.query(`SELECT output FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs WHERE workflow_uuid=$1 AND function_id=$2`, [workflowUUID, functionID]);
421
- if (rows.length > 0) {
422
- await client.query('ROLLBACK');
423
- return;
424
- }
425
- await client.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.notifications (destination_uuid, topic, message) VALUES ($1, $2, $3);`, [destinationUUID, topic, utils_1.DBOSJSON.stringify(message)]);
426
- await this.recordNotificationOutput(client, workflowUUID, functionID, undefined, 'DBOS.send');
427
- await client.query('COMMIT');
327
+ await this.runAsStep(async () => {
328
+ await client.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.notifications (destination_uuid, topic, message) VALUES ($1, $2, $3);`, [destinationID, topic, message]);
329
+ await client.query('COMMIT');
330
+ return undefined;
331
+ }, exports.DBOS_FUNCNAME_SEND, workflowID, functionID, client);
428
332
  }
429
333
  catch (error) {
430
334
  await client.query('ROLLBACK');
431
335
  const err = error;
432
336
  if (err.code === '23503') {
433
337
  // Foreign key constraint violation (only expected for the INSERT query)
434
- throw new error_1.DBOSNonExistentWorkflowError(`Sent to non-existent destination workflow UUID: ${destinationUUID}`);
338
+ throw new error_1.DBOSNonExistentWorkflowError(`Sent to non-existent destination workflow UUID: ${destinationID}`);
435
339
  }
436
340
  else {
437
341
  throw err;
@@ -441,27 +345,30 @@ class PostgresSystemDatabase {
441
345
  client.release();
442
346
  }
443
347
  }
444
- async recv(workflowUUID, functionID, timeoutFunctionID, topic, timeoutSeconds = dbos_executor_1.DBOSExecutor.defaultNotificationTimeoutSec) {
348
+ async recv(workflowID, functionID, timeoutFunctionID, topic, timeoutSeconds = dbos_executor_1.DBOSExecutor.defaultNotificationTimeoutSec) {
445
349
  topic = topic ?? this.nullTopic;
446
350
  // First, check for previous executions.
447
- const checkRows = (await this.pool.query(`SELECT output FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs WHERE workflow_uuid=$1 AND function_id=$2`, [workflowUUID, functionID])).rows;
448
- if (checkRows.length > 0) {
449
- return utils_1.DBOSJSON.parse(checkRows[0].output);
351
+ const res = await this.getOperationResult(workflowID, functionID);
352
+ if (res.res) {
353
+ if (res.res.functionName !== exports.DBOS_FUNCNAME_RECV) {
354
+ throw new error_1.DBOSUnexpectedStepError(workflowID, functionID, exports.DBOS_FUNCNAME_RECV, res.res.functionName);
355
+ }
356
+ return res.res.res;
450
357
  }
451
358
  // Check if the key is already in the DB, then wait for the notification if it isn't.
452
- const initRecvRows = (await this.pool.query(`SELECT topic FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.notifications WHERE destination_uuid=$1 AND topic=$2;`, [workflowUUID, topic])).rows;
359
+ const initRecvRows = (await this.pool.query(`SELECT topic FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.notifications WHERE destination_uuid=$1 AND topic=$2;`, [workflowID, topic])).rows;
453
360
  if (initRecvRows.length === 0) {
454
361
  // Then, register the key with the global notifications listener.
455
362
  let resolveNotification;
456
363
  const messagePromise = new Promise((resolve) => {
457
364
  resolveNotification = resolve;
458
365
  });
459
- const payload = `${workflowUUID}::${topic}`;
366
+ const payload = `${workflowID}::${topic}`;
460
367
  this.notificationsMap[payload] = resolveNotification; // The resolver assignment in the Promise definition runs synchronously.
461
368
  let timeoutPromise = Promise.resolve();
462
369
  let timeoutCancel = () => { };
463
370
  try {
464
- const { promise, cancel } = await this.durableSleepms(workflowUUID, timeoutFunctionID, timeoutSeconds * 1000);
371
+ const { promise, cancel } = await this.durableSleepms(workflowID, timeoutFunctionID, timeoutSeconds * 1000);
465
372
  timeoutPromise = promise;
466
373
  timeoutCancel = cancel;
467
374
  }
@@ -498,11 +405,11 @@ class PostgresSystemDatabase {
498
405
  WHERE notifications.destination_uuid = oldest_entry.destination_uuid
499
406
  AND notifications.topic = oldest_entry.topic
500
407
  AND notifications.created_at_epoch_ms = oldest_entry.created_at_epoch_ms
501
- RETURNING notifications.*;`, [workflowUUID, topic])).rows;
408
+ RETURNING notifications.*;`, [workflowID, topic])).rows;
502
409
  if (finalRecvRows.length > 0) {
503
- message = utils_1.DBOSJSON.parse(finalRecvRows[0].message);
410
+ message = finalRecvRows[0].message;
504
411
  }
505
- await this.recordNotificationOutput(client, workflowUUID, functionID, message, 'DBOS.recv');
412
+ await this.recordOperationResult(workflowID, functionID, { serialOutput: message, functionName: exports.DBOS_FUNCNAME_RECV }, true, client);
506
413
  await client.query(`COMMIT`);
507
414
  }
508
415
  catch (e) {
@@ -515,22 +422,19 @@ class PostgresSystemDatabase {
515
422
  }
516
423
  return message;
517
424
  }
518
- async setEvent(workflowUUID, functionID, key, message) {
425
+ async setEvent(workflowID, functionID, key, message) {
519
426
  const client = await this.pool.connect();
520
427
  try {
521
428
  await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
522
- let { rows } = await client.query(`SELECT output FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs WHERE workflow_uuid=$1 AND function_id=$2`, [workflowUUID, functionID]);
523
- if (rows.length > 0) {
524
- await client.query('ROLLBACK');
525
- return;
526
- }
527
- ({ rows } = await client.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_events (workflow_uuid, key, value)
528
- VALUES ($1, $2, $3)
529
- ON CONFLICT (workflow_uuid, key)
530
- DO UPDATE SET value = $3
531
- RETURNING workflow_uuid;`, [workflowUUID, key, utils_1.DBOSJSON.stringify(message)]));
532
- await this.recordNotificationOutput(client, workflowUUID, functionID, undefined, 'DBOS.setEvent');
533
- await client.query('COMMIT');
429
+ await this.runAsStep(async () => {
430
+ await client.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_events (workflow_uuid, key, value)
431
+ VALUES ($1, $2, $3)
432
+ ON CONFLICT (workflow_uuid, key)
433
+ DO UPDATE SET value = $3
434
+ RETURNING workflow_uuid;`, [workflowID, key, message]);
435
+ await client.query('COMMIT');
436
+ return undefined;
437
+ }, exports.DBOS_FUNCNAME_SETEVENT, workflowID, functionID, client);
534
438
  }
535
439
  catch (e) {
536
440
  this.logger.error(e);
@@ -541,20 +445,20 @@ class PostgresSystemDatabase {
541
445
  client.release();
542
446
  }
543
447
  }
544
- async getEvent(workflowUUID, key, timeoutSeconds, callerWorkflow) {
448
+ async getEvent(workflowID, key, timeoutSeconds, callerWorkflow) {
545
449
  // Check if the operation has been done before for OAOO (only do this inside a workflow).
546
450
  if (callerWorkflow) {
547
- const { rows } = await this.pool.query(`
548
- SELECT output
549
- FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs
550
- WHERE workflow_uuid=$1 AND function_id=$2`, [callerWorkflow.workflowUUID, callerWorkflow.functionID]);
551
- if (rows.length > 0) {
552
- return utils_1.DBOSJSON.parse(rows[0].output);
451
+ const res = await this.getOperationResult(callerWorkflow.workflowID, callerWorkflow.functionID);
452
+ if (res.res !== undefined) {
453
+ if (res.res.functionName !== exports.DBOS_FUNCNAME_GETEVENT) {
454
+ throw new error_1.DBOSUnexpectedStepError(callerWorkflow.workflowID, callerWorkflow.functionID, exports.DBOS_FUNCNAME_GETEVENT, res.res.functionName);
455
+ }
456
+ return res.res.res;
553
457
  }
554
458
  }
555
459
  // Get the return the value. if it's in the DB, otherwise return null.
556
460
  let value = null;
557
- const payloadKey = `${workflowUUID}::${key}`;
461
+ const payloadKey = `${workflowID}::${key}`;
558
462
  // Register the key with the global notifications listener first... we do not want to look in the DB first
559
463
  // or that would cause a timing hole.
560
464
  let resolveNotification;
@@ -567,9 +471,9 @@ class PostgresSystemDatabase {
567
471
  const initRecvRows = (await this.pool.query(`
568
472
  SELECT key, value
569
473
  FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_events
570
- WHERE workflow_uuid=$1 AND key=$2;`, [workflowUUID, key])).rows;
474
+ WHERE workflow_uuid=$1 AND key=$2;`, [workflowID, key])).rows;
571
475
  if (initRecvRows.length > 0) {
572
- value = utils_1.DBOSJSON.parse(initRecvRows[0].value);
476
+ value = initRecvRows[0].value;
573
477
  }
574
478
  else {
575
479
  // If we have a callerWorkflow, we want a durable sleep, otherwise, not
@@ -577,7 +481,7 @@ class PostgresSystemDatabase {
577
481
  let timeoutCancel = () => { };
578
482
  if (callerWorkflow) {
579
483
  try {
580
- const { promise, cancel } = await this.durableSleepms(callerWorkflow.workflowUUID, callerWorkflow.timeoutFunctionID ?? -1, timeoutSeconds * 1000);
484
+ const { promise, cancel } = await this.durableSleepms(callerWorkflow.workflowID, callerWorkflow.timeoutFunctionID ?? -1, timeoutSeconds * 1000);
581
485
  timeoutPromise = promise;
582
486
  timeoutCancel = cancel;
583
487
  }
@@ -601,9 +505,9 @@ class PostgresSystemDatabase {
601
505
  const finalRecvRows = (await this.pool.query(`
602
506
  SELECT value
603
507
  FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_events
604
- WHERE workflow_uuid=$1 AND key=$2;`, [workflowUUID, key])).rows;
508
+ WHERE workflow_uuid=$1 AND key=$2;`, [workflowID, key])).rows;
605
509
  if (finalRecvRows.length > 0) {
606
- value = utils_1.DBOSJSON.parse(finalRecvRows[0].value);
510
+ value = finalRecvRows[0].value;
607
511
  }
608
512
  }
609
513
  }
@@ -612,26 +516,25 @@ class PostgresSystemDatabase {
612
516
  }
613
517
  // Record the output if it is inside a workflow.
614
518
  if (callerWorkflow) {
615
- await this.recordOperationOutput(callerWorkflow.workflowUUID, callerWorkflow.functionID, value, 'DBOS.getEvent');
519
+ await this.recordOperationResult(callerWorkflow.workflowID, callerWorkflow.functionID, {
520
+ serialOutput: value,
521
+ functionName: exports.DBOS_FUNCNAME_GETEVENT,
522
+ }, true);
616
523
  }
617
524
  return value;
618
525
  }
619
- async setWorkflowStatus(workflowUUID, status, resetRecoveryAttempts) {
620
- await this.pool.query(`UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status SET status=$1 WHERE workflow_uuid=$2`, [status, workflowUUID]);
621
- if (resetRecoveryAttempts) {
622
- await this.pool.query(`UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status SET recovery_attempts=0 WHERE workflow_uuid=$1`, [workflowUUID]);
623
- }
526
+ async setWorkflowStatus(workflowID, status, resetRecoveryAttempts) {
527
+ await this.recordWorkflowStatusChange(workflowID, status, { resetRecoveryAttempts });
624
528
  }
625
- async cancelWorkflow(workflowUUID) {
529
+ async cancelWorkflow(workflowID) {
626
530
  const client = await this.pool.connect();
627
531
  try {
628
532
  await client.query('BEGIN');
629
533
  // Remove workflow from queues table
630
534
  await client.query(`DELETE FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue
631
- WHERE workflow_uuid = $1`, [workflowUUID]);
632
- await client.query(`UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status
633
- SET status = $1
634
- WHERE workflow_uuid = $2`, [workflow_1.StatusString.CANCELLED, workflowUUID]);
535
+ WHERE workflow_uuid = $1`, [workflowID]);
536
+ // Should we check if it is incomplete first?
537
+ await this.recordWorkflowStatusChange(workflowID, workflow_1.StatusString.CANCELLED, {}, client);
635
538
  await client.query('COMMIT');
636
539
  }
637
540
  catch (error) {
@@ -642,13 +545,13 @@ class PostgresSystemDatabase {
642
545
  client.release();
643
546
  }
644
547
  }
645
- async resumeWorkflow(workflowUUID) {
548
+ async resumeWorkflow(workflowID) {
646
549
  const client = await this.pool.connect();
647
550
  try {
648
551
  await client.query('BEGIN');
649
552
  // Check workflow status. If it is complete, do nothing.
650
553
  const statusResult = await client.query(`SELECT status FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status
651
- WHERE workflow_uuid = $1`, [workflowUUID]);
554
+ WHERE workflow_uuid = $1`, [workflowID]);
652
555
  if (statusResult.rows.length === 0 ||
653
556
  statusResult.rows[0].status === workflow_1.StatusString.SUCCESS ||
654
557
  statusResult.rows[0].status === workflow_1.StatusString.ERROR) {
@@ -657,11 +560,9 @@ class PostgresSystemDatabase {
657
560
  }
658
561
  // Remove the workflow from the queues table so resume can safely be called on an ENQUEUED workflow
659
562
  await client.query(`DELETE FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue
660
- WHERE workflow_uuid = $1`, [workflowUUID]);
563
+ WHERE workflow_uuid = $1`, [workflowID]);
661
564
  // Update status to pending and reset recovery attempts
662
- await client.query(`UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status
663
- SET status = $1, recovery_attempts = 0
664
- WHERE workflow_uuid = $2`, [workflow_1.StatusString.PENDING, workflowUUID]);
565
+ await this.recordWorkflowStatusChange(workflowID, workflow_1.StatusString.PENDING, { resetRecoveryAttempts: true }, client);
665
566
  await client.query('COMMIT');
666
567
  }
667
568
  catch (error) {
@@ -672,8 +573,8 @@ class PostgresSystemDatabase {
672
573
  client.release();
673
574
  }
674
575
  }
675
- async getWorkflowStatus(workflowUUID, callerUUID) {
676
- const internalStatus = await this.getWorkflowStatusInternal(workflowUUID, callerUUID);
576
+ async getWorkflowStatus(workflowID, callerID, callerFN) {
577
+ const internalStatus = await this.getWorkflowStatusInternal(workflowID, callerID, callerFN);
677
578
  if (internalStatus === null) {
678
579
  return null;
679
580
  }
@@ -690,63 +591,66 @@ class PostgresSystemDatabase {
690
591
  executorId: internalStatus.executorId,
691
592
  };
692
593
  }
693
- async getWorkflowStatusInternal(workflowUUID, callerUUID) {
594
+ async getWorkflowStatusInternal(workflowID, callerID, callerFN) {
694
595
  // Check if the operation has been done before for OAOO (only do this inside a workflow).
695
- const wfctx = (0, context_2.getCurrentDBOSContext)();
696
- let newfunctionId = undefined;
697
- if (callerUUID !== undefined && wfctx !== undefined) {
698
- newfunctionId = wfctx.functionIDGetIncrement();
699
- const { rows } = await this.pool.query(`SELECT output FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs WHERE workflow_uuid=$1 AND function_id=$2 AND function_name=$3`, [callerUUID, newfunctionId, 'getStatus']);
596
+ const sv = await this.runAsStep(async () => {
597
+ const { rows } = await this.pool.query(`SELECT workflow_uuid, status, name, class_name, config_name, authenticated_user, assumed_role, authenticated_roles, request, queue_name, executor_id, created_at, updated_at, application_version, application_id, recovery_attempts FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status WHERE workflow_uuid=$1`, [workflowID]);
598
+ let value = null;
700
599
  if (rows.length > 0) {
701
- return utils_1.DBOSJSON.parse(rows[0].output);
702
- }
703
- }
704
- const { rows } = await this.pool.query(`SELECT workflow_uuid, status, name, class_name, config_name, authenticated_user, assumed_role, authenticated_roles, request, queue_name, executor_id, created_at, updated_at, application_version, application_id, recovery_attempts FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status WHERE workflow_uuid=$1`, [workflowUUID]);
705
- let value = null;
706
- if (rows.length > 0) {
707
- value = {
708
- workflowUUID: rows[0].workflow_uuid,
709
- status: rows[0].status,
710
- workflowName: rows[0].name,
711
- output: undefined,
712
- error: '',
713
- workflowClassName: rows[0].class_name || '',
714
- workflowConfigName: rows[0].config_name || '',
715
- queueName: rows[0].queue_name || undefined,
716
- authenticatedUser: rows[0].authenticated_user,
717
- assumedRole: rows[0].assumed_role,
718
- authenticatedRoles: utils_1.DBOSJSON.parse(rows[0].authenticated_roles),
719
- request: utils_1.DBOSJSON.parse(rows[0].request),
720
- executorId: rows[0].executor_id,
721
- createdAt: Number(rows[0].created_at),
722
- updatedAt: Number(rows[0].updated_at),
723
- applicationVersion: rows[0].application_version,
724
- applicationID: rows[0].application_id,
725
- recoveryAttempts: Number(rows[0].recovery_attempts),
726
- maxRetries: 0,
727
- };
728
- }
729
- // Record the output if it is inside a workflow.
730
- if (callerUUID !== undefined && newfunctionId !== undefined) {
731
- await this.recordOperationOutput(callerUUID, newfunctionId, value, 'getStatus');
732
- }
733
- return value;
734
- }
735
- async getWorkflowResult(workflowUUID) {
600
+ value = {
601
+ workflowUUID: rows[0].workflow_uuid,
602
+ status: rows[0].status,
603
+ workflowName: rows[0].name,
604
+ output: null,
605
+ error: null,
606
+ workflowClassName: rows[0].class_name || '',
607
+ workflowConfigName: rows[0].config_name || '',
608
+ queueName: rows[0].queue_name || undefined,
609
+ authenticatedUser: rows[0].authenticated_user,
610
+ assumedRole: rows[0].assumed_role,
611
+ authenticatedRoles: utils_1.DBOSJSON.parse(rows[0].authenticated_roles),
612
+ request: utils_1.DBOSJSON.parse(rows[0].request),
613
+ executorId: rows[0].executor_id,
614
+ createdAt: Number(rows[0].created_at),
615
+ updatedAt: Number(rows[0].updated_at),
616
+ applicationVersion: rows[0].application_version,
617
+ applicationID: rows[0].application_id,
618
+ recoveryAttempts: Number(rows[0].recovery_attempts),
619
+ maxRetries: 0,
620
+ };
621
+ }
622
+ return value ? JSON.stringify(value) : null;
623
+ }, exports.DBOS_FUNCNAME_GETSTATUS, callerID, callerFN);
624
+ return sv ? JSON.parse(sv) : null;
625
+ }
626
+ async awaitWorkflowResult(workflowID, timeoutms) {
736
627
  const pollingIntervalMs = 1000;
628
+ const et = timeoutms !== undefined ? new Date().getTime() + timeoutms : undefined;
737
629
  while (true) {
738
- const { rows } = await this.pool.query(`SELECT status, output, error FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status WHERE workflow_uuid=$1`, [workflowUUID]);
630
+ const { rows } = await this.pool.query(`SELECT status, output, error FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status WHERE workflow_uuid=$1`, [workflowID]);
739
631
  if (rows.length > 0) {
740
632
  const status = rows[0].status;
741
633
  if (status === workflow_1.StatusString.SUCCESS) {
742
- return utils_1.DBOSJSON.parse(rows[0].output);
634
+ return { res: rows[0].output };
743
635
  }
744
636
  else if (status === workflow_1.StatusString.ERROR) {
745
- throw (0, serialize_error_1.deserializeError)(utils_1.DBOSJSON.parse(rows[0].error));
637
+ return { err: rows[0].error };
638
+ }
639
+ }
640
+ if (et !== undefined) {
641
+ const ct = new Date().getTime();
642
+ if (et > ct) {
643
+ await (0, utils_1.sleepms)(Math.min(pollingIntervalMs, et - ct));
644
+ }
645
+ else {
646
+ break;
746
647
  }
747
648
  }
748
- await (0, utils_1.sleepms)(pollingIntervalMs);
649
+ else {
650
+ await (0, utils_1.sleepms)(pollingIntervalMs);
651
+ }
749
652
  }
653
+ return undefined;
750
654
  }
751
655
  /* BACKGROUND PROCESSES */
752
656
  /**
@@ -948,14 +852,7 @@ class PostgresSystemDatabase {
948
852
  return false;
949
853
  }
950
854
  // Reset the status of the task to "ENQUEUED"
951
- const wsRes = await client.query(`
952
- UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status
953
- SET status = $2
954
- WHERE workflow_uuid = $1;
955
- `, [workflowId, workflow_1.StatusString.ENQUEUED]);
956
- if (wsRes.rowCount === 0) {
957
- throw new Error(`UNREACHABLE: Workflow ${workflowId} is found in the workflow_queue table but not found in the workflow_status table`);
958
- }
855
+ await this.recordWorkflowStatusChange(workflowId, workflow_1.StatusString.ENQUEUED, {}, client);
959
856
  await client.query('COMMIT');
960
857
  return true;
961
858
  }