@dbos-inc/dbos-sdk 2.1.2-preview → 2.1.9-preview
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -2
- package/dbos-config.schema.json +2 -11
- package/dist/src/context.d.ts +2 -0
- package/dist/src/context.d.ts.map +1 -1
- package/dist/src/context.js +16 -1
- package/dist/src/context.js.map +1 -1
- package/dist/src/dbos-executor.d.ts +9 -8
- package/dist/src/dbos-executor.d.ts.map +1 -1
- package/dist/src/dbos-executor.js +361 -36
- package/dist/src/dbos-executor.js.map +1 -1
- package/dist/src/dbos-runtime/cli.d.ts +1 -0
- package/dist/src/dbos-runtime/cli.d.ts.map +1 -1
- package/dist/src/dbos-runtime/cli.js +13 -2
- package/dist/src/dbos-runtime/cli.js.map +1 -1
- package/dist/src/dbos-runtime/config.d.ts +8 -7
- package/dist/src/dbos-runtime/config.d.ts.map +1 -1
- package/dist/src/dbos-runtime/config.js +25 -17
- package/dist/src/dbos-runtime/config.js.map +1 -1
- package/dist/src/dbos-runtime/db_connection.d.ts +10 -0
- package/dist/src/dbos-runtime/db_connection.d.ts.map +1 -0
- package/dist/src/dbos-runtime/db_connection.js +59 -0
- package/dist/src/dbos-runtime/db_connection.js.map +1 -0
- package/dist/src/dbos-runtime/db_wizard.d.ts.map +1 -1
- package/dist/src/dbos-runtime/db_wizard.js +10 -14
- package/dist/src/dbos-runtime/db_wizard.js.map +1 -1
- package/dist/src/dbos-runtime/migrate.d.ts.map +1 -1
- package/dist/src/dbos-runtime/migrate.js +2 -3
- package/dist/src/dbos-runtime/migrate.js.map +1 -1
- package/dist/src/dbos-runtime/reset.d.ts +4 -0
- package/dist/src/dbos-runtime/reset.d.ts.map +1 -0
- package/dist/src/dbos-runtime/reset.js +39 -0
- package/dist/src/dbos-runtime/reset.js.map +1 -0
- package/dist/src/dbos.d.ts +2 -0
- package/dist/src/dbos.d.ts.map +1 -1
- package/dist/src/dbos.js +50 -1
- package/dist/src/dbos.js.map +1 -1
- package/dist/src/debugger/debug_workflow.d.ts +1 -1
- package/dist/src/debugger/debug_workflow.d.ts.map +1 -1
- package/dist/src/debugger/debug_workflow.js +2 -2
- package/dist/src/debugger/debug_workflow.js.map +1 -1
- package/dist/src/error.d.ts +3 -0
- package/dist/src/error.d.ts.map +1 -1
- package/dist/src/error.js +10 -2
- package/dist/src/error.js.map +1 -1
- package/dist/src/eventreceiver.d.ts +2 -0
- package/dist/src/eventreceiver.d.ts.map +1 -1
- package/dist/src/httpServer/handler.js.map +1 -1
- package/dist/src/procedure.d.ts +3 -3
- package/dist/src/procedure.d.ts.map +1 -1
- package/dist/src/procedure.js +3 -1
- package/dist/src/procedure.js.map +1 -1
- package/dist/src/system_database.d.ts +8 -2
- package/dist/src/system_database.d.ts.map +1 -1
- package/dist/src/system_database.js +32 -4
- package/dist/src/system_database.js.map +1 -1
- package/dist/src/testing/testing_runtime.js.map +1 -1
- package/dist/src/utils.d.ts.map +1 -1
- package/dist/src/utils.js +1 -14
- package/dist/src/utils.js.map +1 -1
- package/dist/src/workflow.d.ts +1 -13
- package/dist/src/workflow.d.ts.map +1 -1
- package/dist/src/workflow.js +4 -322
- package/dist/src/workflow.js.map +1 -1
- package/dist/tsconfig.build.tsbuildinfo +1 -1
- package/package.json +1 -1
@@ -24,6 +24,7 @@ const debug_workflow_1 = require("./debugger/debug_workflow");
|
|
24
24
|
const serialize_error_1 = require("serialize-error");
|
25
25
|
const utils_1 = require("./utils");
|
26
26
|
const node_path_1 = __importDefault(require("node:path"));
|
27
|
+
const procedure_1 = require("./procedure");
|
27
28
|
const lodash_1 = require("lodash");
|
28
29
|
const wfqueue_1 = require("./wfqueue");
|
29
30
|
const debugpoint_1 = require("./debugpoint");
|
@@ -279,7 +280,6 @@ class DBOSExecutor {
|
|
279
280
|
}
|
280
281
|
this.logger.debug(`Loaded ${length} ORM entities`);
|
281
282
|
}
|
282
|
-
await ((0, user_database_1.createDBIfDoesNotExist)(this.config.poolConfig, this.logger));
|
283
283
|
this.configureDbClient();
|
284
284
|
if (!this.userDatabase) {
|
285
285
|
this.logger.error("No user database configured!");
|
@@ -354,24 +354,6 @@ class DBOSExecutor {
|
|
354
354
|
this.logger.error(`Unknown notice severity: ${msg.severity} - ${msg.message}`);
|
355
355
|
}
|
356
356
|
}
|
357
|
-
async callProcedure(proc, args) {
|
358
|
-
const client = await this.procedurePool.connect();
|
359
|
-
const log = (msg) => this.#logNotice(msg);
|
360
|
-
const procClassName = this.getProcedureClassName(proc);
|
361
|
-
const plainProcName = `${procClassName}_${proc.name}_p`;
|
362
|
-
const procName = this.config.appVersion
|
363
|
-
? `v${this.config.appVersion}_${plainProcName}`
|
364
|
-
: plainProcName;
|
365
|
-
const sql = `CALL "${procName}"(${args.map((_v, i) => `$${i + 1}`).join()});`;
|
366
|
-
try {
|
367
|
-
client.on('notice', log);
|
368
|
-
return await client.query(sql, args).then(value => value.rows);
|
369
|
-
}
|
370
|
-
finally {
|
371
|
-
client.off('notice', log);
|
372
|
-
client.release();
|
373
|
-
}
|
374
|
-
}
|
375
357
|
async destroy() {
|
376
358
|
if (this.pendingWorkflowMap.size > 0) {
|
377
359
|
this.logger.info("Waiting for pending workflows to finish.");
|
@@ -541,13 +523,16 @@ class DBOSExecutor {
|
|
541
523
|
internalStatus.name = `${DBOSExecutor.tempWorkflowName}-${wCtxt.tempWfOperationType}-${wCtxt.tempWfOperationName}`;
|
542
524
|
internalStatus.className = params.tempWfClass ?? "";
|
543
525
|
}
|
526
|
+
let status = undefined;
|
544
527
|
// Synchronously set the workflow's status to PENDING and record workflow inputs (for non single-transaction workflows).
|
545
528
|
// We have to do it for all types of workflows because operation_outputs table has a foreign key constraint on workflow status table.
|
546
529
|
if ((wCtxt.tempWfOperationType !== TempWorkflowType.transaction
|
547
530
|
&& wCtxt.tempWfOperationType !== TempWorkflowType.procedure)
|
548
531
|
|| params.queueName !== undefined) {
|
549
532
|
// TODO: Make this transactional (and with the queue step below)
|
550
|
-
|
533
|
+
const ires = await this.systemDatabase.initWorkflowStatus(internalStatus, args);
|
534
|
+
args = ires.args;
|
535
|
+
status = ires.status;
|
551
536
|
await (0, debugpoint_1.debugTriggerPoint)(debugpoint_1.DEBUG_TRIGGER_WORKFLOW_ENQUEUE);
|
552
537
|
}
|
553
538
|
const runWorkflow = async () => {
|
@@ -617,7 +602,7 @@ class DBOSExecutor {
|
|
617
602
|
}
|
618
603
|
return result;
|
619
604
|
};
|
620
|
-
if (params.queueName === undefined || params.executeWorkflow) {
|
605
|
+
if (status !== 'SUCCESS' && status !== 'ERROR' && (params.queueName === undefined || params.executeWorkflow)) {
|
621
606
|
const workflowPromise = runWorkflow();
|
622
607
|
// Need to await for the workflow and capture errors.
|
623
608
|
const awaitWorkflowPromise = workflowPromise
|
@@ -633,7 +618,9 @@ class DBOSExecutor {
|
|
633
618
|
return new workflow_1.InvokedHandle(this.systemDatabase, workflowPromise, workflowUUID, wf.name, callerUUID, callerFunctionID);
|
634
619
|
}
|
635
620
|
else {
|
636
|
-
|
621
|
+
if (params.queueName && status === 'ENQUEUED') {
|
622
|
+
await this.systemDatabase.enqueueWorkflow(workflowUUID, this.#getQueueByName(params.queueName));
|
623
|
+
}
|
637
624
|
return new workflow_1.RetrievedHandle(this.systemDatabase, workflowUUID, callerUUID, callerFunctionID);
|
638
625
|
}
|
639
626
|
}
|
@@ -684,6 +671,128 @@ class DBOSExecutor {
|
|
684
671
|
});
|
685
672
|
return new workflow_1.InvokedHandle(this.systemDatabase, workflowPromise, workflowUUID, wf.name, callerUUID, callerFunctionID);
|
686
673
|
}
|
674
|
+
/**
|
675
|
+
* Retrieve the transaction snapshot information of the current transaction
|
676
|
+
*/
|
677
|
+
static async #retrieveSnapshot(query) {
|
678
|
+
const rows = await query("SELECT pg_current_snapshot()::text as txn_snapshot;", []);
|
679
|
+
return rows[0].txn_snapshot;
|
680
|
+
}
|
681
|
+
/**
|
682
|
+
* Check if an operation has already executed in a workflow.
|
683
|
+
* If it previously executed successfully, return its output.
|
684
|
+
* If it previously executed and threw an error, throw that error.
|
685
|
+
* Otherwise, return DBOSNull.
|
686
|
+
* Also return the transaction snapshot information of this current transaction.
|
687
|
+
*/
|
688
|
+
async #checkExecution(query, workflowUUID, funcID) {
|
689
|
+
// Note: we read the current snapshot, not the recorded one!
|
690
|
+
const rows = await query("(SELECT output, error, txn_snapshot, true as recorded FROM dbos.transaction_outputs WHERE workflow_uuid=$1 AND function_id=$2 UNION ALL SELECT null as output, null as error, pg_current_snapshot()::text as txn_snapshot, false as recorded) ORDER BY recorded", [workflowUUID, funcID]);
|
691
|
+
if (rows.length === 0 || rows.length > 2) {
|
692
|
+
this.logger.error("Unexpected! This should never happen. Returned rows: " + rows.toString());
|
693
|
+
throw new error_1.DBOSError("This should never happen. Returned rows: " + rows.toString());
|
694
|
+
}
|
695
|
+
const res = {
|
696
|
+
output: exports.dbosNull,
|
697
|
+
txn_snapshot: ""
|
698
|
+
};
|
699
|
+
// recorded=false row will be first because we used ORDER BY.
|
700
|
+
res.txn_snapshot = rows[0].txn_snapshot;
|
701
|
+
if (rows.length === 2) {
|
702
|
+
if (utils_1.DBOSJSON.parse(rows[1].error) !== null) {
|
703
|
+
throw (0, serialize_error_1.deserializeError)(utils_1.DBOSJSON.parse(rows[1].error));
|
704
|
+
}
|
705
|
+
else {
|
706
|
+
res.output = utils_1.DBOSJSON.parse(rows[1].output);
|
707
|
+
}
|
708
|
+
}
|
709
|
+
return res;
|
710
|
+
}
|
711
|
+
/**
|
712
|
+
* Write a operation's output to the database.
|
713
|
+
*/
|
714
|
+
static async #recordOutput(query, workflowUUID, funcID, txnSnapshot, output, isKeyConflict) {
|
715
|
+
try {
|
716
|
+
const serialOutput = utils_1.DBOSJSON.stringify(output);
|
717
|
+
const rows = await query("INSERT INTO dbos.transaction_outputs (workflow_uuid, function_id, output, txn_id, txn_snapshot, created_at) VALUES ($1, $2, $3, (select pg_current_xact_id_if_assigned()::text), $4, $5) RETURNING txn_id;", [workflowUUID, funcID, serialOutput, txnSnapshot, Date.now()]);
|
718
|
+
return rows[0].txn_id;
|
719
|
+
}
|
720
|
+
catch (error) {
|
721
|
+
if (isKeyConflict(error)) {
|
722
|
+
// Serialization and primary key conflict (Postgres).
|
723
|
+
throw new error_1.DBOSWorkflowConflictUUIDError(workflowUUID);
|
724
|
+
}
|
725
|
+
else {
|
726
|
+
throw error;
|
727
|
+
}
|
728
|
+
}
|
729
|
+
}
|
730
|
+
/**
|
731
|
+
* Record an error in an operation to the database.
|
732
|
+
*/
|
733
|
+
static async #recordError(query, workflowUUID, funcID, txnSnapshot, err, isKeyConflict) {
|
734
|
+
try {
|
735
|
+
const serialErr = utils_1.DBOSJSON.stringify((0, serialize_error_1.serializeError)(err));
|
736
|
+
await query("INSERT INTO dbos.transaction_outputs (workflow_uuid, function_id, error, txn_id, txn_snapshot, created_at) VALUES ($1, $2, $3, null, $4, $5) RETURNING txn_id;", [workflowUUID, funcID, serialErr, txnSnapshot, Date.now()]);
|
737
|
+
}
|
738
|
+
catch (error) {
|
739
|
+
if (isKeyConflict(error)) {
|
740
|
+
// Serialization and primary key conflict (Postgres).
|
741
|
+
throw new error_1.DBOSWorkflowConflictUUIDError(workflowUUID);
|
742
|
+
}
|
743
|
+
else {
|
744
|
+
throw error;
|
745
|
+
}
|
746
|
+
}
|
747
|
+
}
|
748
|
+
/**
|
749
|
+
* Write all entries in the workflow result buffer to the database.
|
750
|
+
* If it encounters a primary key error, this indicates a concurrent execution with the same UUID, so throw an DBOSError.
|
751
|
+
*/
|
752
|
+
async #flushResultBuffer(query, resultBuffer, workflowUUID, isKeyConflict) {
|
753
|
+
const funcIDs = Array.from(resultBuffer.keys());
|
754
|
+
if (funcIDs.length === 0) {
|
755
|
+
return;
|
756
|
+
}
|
757
|
+
funcIDs.sort();
|
758
|
+
try {
|
759
|
+
let sqlStmt = "INSERT INTO dbos.transaction_outputs (workflow_uuid, function_id, output, error, txn_id, txn_snapshot, created_at) VALUES ";
|
760
|
+
let paramCnt = 1;
|
761
|
+
const values = [];
|
762
|
+
for (const funcID of funcIDs) {
|
763
|
+
// Capture output and also transaction snapshot information.
|
764
|
+
// Initially, no txn_id because no queries executed.
|
765
|
+
const recorded = resultBuffer.get(funcID);
|
766
|
+
const output = recorded.output;
|
767
|
+
const txnSnapshot = recorded.txn_snapshot;
|
768
|
+
const createdAt = recorded.created_at;
|
769
|
+
if (paramCnt > 1) {
|
770
|
+
sqlStmt += ", ";
|
771
|
+
}
|
772
|
+
sqlStmt += `($${paramCnt++}, $${paramCnt++}, $${paramCnt++}, $${paramCnt++}, null, $${paramCnt++}, $${paramCnt++})`;
|
773
|
+
values.push(workflowUUID, funcID, utils_1.DBOSJSON.stringify(output), utils_1.DBOSJSON.stringify(null), txnSnapshot, createdAt);
|
774
|
+
}
|
775
|
+
this.logger.debug(sqlStmt);
|
776
|
+
await query(sqlStmt, values);
|
777
|
+
}
|
778
|
+
catch (error) {
|
779
|
+
if (isKeyConflict(error)) {
|
780
|
+
// Serialization and primary key conflict (Postgres).
|
781
|
+
throw new error_1.DBOSWorkflowConflictUUIDError(workflowUUID);
|
782
|
+
}
|
783
|
+
else {
|
784
|
+
throw error;
|
785
|
+
}
|
786
|
+
}
|
787
|
+
}
|
788
|
+
flushResultBuffer(client, resultBuffer, workflowUUID) {
|
789
|
+
const func = (sql, args) => this.userDatabase.queryWithClient(client, sql, ...args);
|
790
|
+
return this.#flushResultBuffer(func, resultBuffer, workflowUUID, (error) => this.userDatabase.isKeyConflictError(error));
|
791
|
+
}
|
792
|
+
#flushResultBufferProc(client, resultBuffer, workflowUUID) {
|
793
|
+
const func = (sql, args) => client.query(sql, args).then(v => v.rows);
|
794
|
+
return this.#flushResultBuffer(func, resultBuffer, workflowUUID, user_database_1.pgNodeIsKeyConflictError);
|
795
|
+
}
|
687
796
|
async transaction(txn, params, ...args) {
|
688
797
|
// Create a workflow and call transaction.
|
689
798
|
const temp_workflow = async (ctxt, ...args) => {
|
@@ -724,7 +833,8 @@ class DBOSExecutor {
|
|
724
833
|
// If the UUID is preset, it is possible this execution previously happened. Check, and return its original result if it did.
|
725
834
|
// Note: It is possible to retrieve a generated ID from a workflow handle, run a concurrent execution, and cause trouble for yourself. We recommend against this.
|
726
835
|
if (wfCtx.presetUUID) {
|
727
|
-
const
|
836
|
+
const func = (sql, args) => this.userDatabase.queryWithClient(client, sql, ...args);
|
837
|
+
const check = await this.#checkExecution(func, workflowUUID, funcId);
|
728
838
|
txn_snapshot = check.txn_snapshot;
|
729
839
|
if (check.output !== exports.dbosNull) {
|
730
840
|
tCtxt.span.setAttribute("cached", true);
|
@@ -735,11 +845,12 @@ class DBOSExecutor {
|
|
735
845
|
}
|
736
846
|
else {
|
737
847
|
// Collect snapshot information for read-only transactions and non-preset UUID transactions, if not already collected above
|
738
|
-
|
848
|
+
const func = (sql, args) => this.userDatabase.queryWithClient(client, sql, ...args);
|
849
|
+
txn_snapshot = await DBOSExecutor.#retrieveSnapshot(func);
|
739
850
|
}
|
740
851
|
// For non-read-only transactions, flush the result buffer.
|
741
852
|
if (!readOnly) {
|
742
|
-
await
|
853
|
+
await this.flushResultBuffer(client, wfCtx.resultBuffer, wfCtx.workflowUUID);
|
743
854
|
}
|
744
855
|
// Execute the user's transaction.
|
745
856
|
let cresult;
|
@@ -768,7 +879,8 @@ class DBOSExecutor {
|
|
768
879
|
else {
|
769
880
|
try {
|
770
881
|
// Synchronously record the output of write transactions and obtain the transaction ID.
|
771
|
-
const
|
882
|
+
const func = (sql, args) => this.userDatabase.queryWithClient(client, sql, ...args);
|
883
|
+
const pg_txn_id = await DBOSExecutor.#recordOutput(func, wfCtx.workflowUUID, funcId, txn_snapshot, result, (error) => this.userDatabase.isKeyConflictError(error));
|
772
884
|
tCtxt.span.setAttribute("pg_txn_id", pg_txn_id);
|
773
885
|
wfCtx.resultBuffer.clear();
|
774
886
|
}
|
@@ -803,8 +915,9 @@ class DBOSExecutor {
|
|
803
915
|
// Record and throw other errors.
|
804
916
|
const e = err;
|
805
917
|
await this.userDatabase.transaction(async (client) => {
|
806
|
-
await
|
807
|
-
|
918
|
+
await this.flushResultBuffer(client, wfCtx.resultBuffer, wfCtx.workflowUUID);
|
919
|
+
const func = (sql, args) => this.userDatabase.queryWithClient(client, sql, ...args);
|
920
|
+
await DBOSExecutor.#recordError(func, wfCtx.workflowUUID, funcId, txn_snapshot, e, (error) => this.userDatabase.isKeyConflictError(error));
|
808
921
|
}, { isolationLevel: transaction_1.IsolationLevel.ReadCommitted });
|
809
922
|
wfCtx.resultBuffer.clear();
|
810
923
|
span.setStatus({ code: api_1.SpanStatusCode.ERROR, message: e.message });
|
@@ -817,15 +930,209 @@ class DBOSExecutor {
|
|
817
930
|
// Create a workflow and call procedure.
|
818
931
|
const temp_workflow = async (ctxt, ...args) => {
|
819
932
|
const ctxtImpl = ctxt;
|
820
|
-
return
|
933
|
+
return this.callProcedureFunction(proc, ctxtImpl, ...args);
|
821
934
|
};
|
822
|
-
return (await this.workflow(temp_workflow, {
|
935
|
+
return await (await this.workflow(temp_workflow, {
|
936
|
+
...params,
|
823
937
|
tempWfType: TempWorkflowType.procedure,
|
824
938
|
tempWfName: (0, decorators_1.getRegisteredMethodName)(proc),
|
825
939
|
tempWfClass: (0, decorators_1.getRegisteredMethodClassName)(proc),
|
826
940
|
}, ...args)).getResult();
|
827
941
|
}
|
828
|
-
async
|
942
|
+
async callProcedureFunction(proc, wfCtx, ...args) {
|
943
|
+
const procInfo = this.getProcedureInfo(proc);
|
944
|
+
if (procInfo === undefined) {
|
945
|
+
throw new error_1.DBOSNotRegisteredError(proc.name);
|
946
|
+
}
|
947
|
+
const executeLocally = procInfo.config.executeLocally ?? false;
|
948
|
+
const funcId = wfCtx.functionIDGetIncrement();
|
949
|
+
const span = this.tracer.startSpan(proc.name, {
|
950
|
+
operationUUID: wfCtx.workflowUUID,
|
951
|
+
operationType: exports.OperationType.PROCEDURE,
|
952
|
+
authenticatedUser: wfCtx.authenticatedUser,
|
953
|
+
assumedRole: wfCtx.assumedRole,
|
954
|
+
authenticatedRoles: wfCtx.authenticatedRoles,
|
955
|
+
readOnly: procInfo.config.readOnly ?? false,
|
956
|
+
isolationLevel: procInfo.config.isolationLevel,
|
957
|
+
executeLocally,
|
958
|
+
}, wfCtx.span);
|
959
|
+
try {
|
960
|
+
const result = executeLocally
|
961
|
+
? await this.#callProcedureFunctionLocal(proc, args, wfCtx, span, procInfo, funcId)
|
962
|
+
: await this.#callProcedureFunctionRemote(proc, args, wfCtx, span, procInfo.config, funcId);
|
963
|
+
span.setStatus({ code: api_1.SpanStatusCode.OK });
|
964
|
+
return result;
|
965
|
+
}
|
966
|
+
catch (e) {
|
967
|
+
const { message } = e;
|
968
|
+
span.setStatus({ code: api_1.SpanStatusCode.ERROR, message });
|
969
|
+
throw e;
|
970
|
+
}
|
971
|
+
finally {
|
972
|
+
this.tracer.endSpan(span);
|
973
|
+
}
|
974
|
+
}
|
975
|
+
async #callProcedureFunctionLocal(proc, args, wfCtx, span, procInfo, funcId) {
|
976
|
+
let retryWaitMillis = 1;
|
977
|
+
const backoffFactor = 1.5;
|
978
|
+
const maxRetryWaitMs = 2000; // Maximum wait 2 seconds.
|
979
|
+
const readOnly = procInfo.config.readOnly ?? false;
|
980
|
+
while (true) {
|
981
|
+
let txn_snapshot = "invalid";
|
982
|
+
const wrappedProcedure = async (client) => {
|
983
|
+
const ctxt = new procedure_1.StoredProcedureContextImpl(client, wfCtx, span, this.logger, funcId, proc.name);
|
984
|
+
if (wfCtx.presetUUID) {
|
985
|
+
const func = (sql, args) => this.procedurePool.query(sql, args).then(v => v.rows);
|
986
|
+
const check = await this.#checkExecution(func, wfCtx.workflowUUID, funcId);
|
987
|
+
txn_snapshot = check.txn_snapshot;
|
988
|
+
if (check.output !== exports.dbosNull) {
|
989
|
+
ctxt.span.setAttribute("cached", true);
|
990
|
+
ctxt.span.setStatus({ code: api_1.SpanStatusCode.OK });
|
991
|
+
this.tracer.endSpan(ctxt.span);
|
992
|
+
return check.output;
|
993
|
+
}
|
994
|
+
}
|
995
|
+
else {
|
996
|
+
// Collect snapshot information for read-only transactions and non-preset UUID transactions, if not already collected above
|
997
|
+
const func = (sql, args) => this.procedurePool.query(sql, args).then(v => v.rows);
|
998
|
+
txn_snapshot = await DBOSExecutor.#retrieveSnapshot(func);
|
999
|
+
}
|
1000
|
+
// For non-read-only transactions, flush the result buffer.
|
1001
|
+
if (!readOnly) {
|
1002
|
+
await this.#flushResultBufferProc(client, wfCtx.resultBuffer, wfCtx.workflowUUID);
|
1003
|
+
}
|
1004
|
+
let cresult;
|
1005
|
+
if (procInfo.registration.passContext) {
|
1006
|
+
await (0, context_1.runWithStoredProcContext)(ctxt, async () => {
|
1007
|
+
cresult = await proc(ctxt, ...args);
|
1008
|
+
});
|
1009
|
+
}
|
1010
|
+
else {
|
1011
|
+
await (0, context_1.runWithStoredProcContext)(ctxt, async () => {
|
1012
|
+
const pf = proc;
|
1013
|
+
cresult = await pf(...args);
|
1014
|
+
});
|
1015
|
+
}
|
1016
|
+
const result = cresult;
|
1017
|
+
if (readOnly) {
|
1018
|
+
// Buffer the output of read-only transactions instead of synchronously writing it.
|
1019
|
+
const readOutput = {
|
1020
|
+
output: result,
|
1021
|
+
txn_snapshot: txn_snapshot,
|
1022
|
+
created_at: Date.now(),
|
1023
|
+
};
|
1024
|
+
wfCtx.resultBuffer.set(funcId, readOutput);
|
1025
|
+
}
|
1026
|
+
else {
|
1027
|
+
// Synchronously record the output of write transactions and obtain the transaction ID.
|
1028
|
+
const func = (sql, args) => client.query(sql, args).then(v => v.rows);
|
1029
|
+
const pg_txn_id = await DBOSExecutor.#recordOutput(func, wfCtx.workflowUUID, funcId, txn_snapshot, result, user_database_1.pgNodeIsKeyConflictError);
|
1030
|
+
// const pg_txn_id = await wfCtx.recordOutputProc<R>(client, funcId, txn_snapshot, result);
|
1031
|
+
ctxt.span.setAttribute("pg_txn_id", pg_txn_id);
|
1032
|
+
wfCtx.resultBuffer.clear();
|
1033
|
+
}
|
1034
|
+
return result;
|
1035
|
+
};
|
1036
|
+
try {
|
1037
|
+
const result = await this.invokeStoredProcFunction(wrappedProcedure, { isolationLevel: procInfo.config.isolationLevel });
|
1038
|
+
span.setStatus({ code: api_1.SpanStatusCode.OK });
|
1039
|
+
return result;
|
1040
|
+
}
|
1041
|
+
catch (err) {
|
1042
|
+
if (this.userDatabase.isRetriableTransactionError(err)) {
|
1043
|
+
// serialization_failure in PostgreSQL
|
1044
|
+
span.addEvent("TXN SERIALIZATION FAILURE", { "retryWaitMillis": retryWaitMillis }, performance.now());
|
1045
|
+
// Retry serialization failures.
|
1046
|
+
await (0, utils_1.sleepms)(retryWaitMillis);
|
1047
|
+
retryWaitMillis *= backoffFactor;
|
1048
|
+
retryWaitMillis = retryWaitMillis < maxRetryWaitMs ? retryWaitMillis : maxRetryWaitMs;
|
1049
|
+
continue;
|
1050
|
+
}
|
1051
|
+
// Record and throw other errors.
|
1052
|
+
const e = err;
|
1053
|
+
await this.invokeStoredProcFunction(async (client) => {
|
1054
|
+
await this.#flushResultBufferProc(client, wfCtx.resultBuffer, wfCtx.workflowUUID);
|
1055
|
+
const func = (sql, args) => client.query(sql, args).then(v => v.rows);
|
1056
|
+
await DBOSExecutor.#recordError(func, wfCtx.workflowUUID, funcId, txn_snapshot, e, user_database_1.pgNodeIsKeyConflictError);
|
1057
|
+
}, { isolationLevel: transaction_1.IsolationLevel.ReadCommitted });
|
1058
|
+
await this.userDatabase.transaction(async (client) => {
|
1059
|
+
await this.flushResultBuffer(client, wfCtx.resultBuffer, wfCtx.workflowUUID);
|
1060
|
+
const func = (sql, args) => this.userDatabase.queryWithClient(client, sql, ...args);
|
1061
|
+
await DBOSExecutor.#recordError(func, wfCtx.workflowUUID, funcId, txn_snapshot, e, (error) => this.userDatabase.isKeyConflictError(error));
|
1062
|
+
}, { isolationLevel: transaction_1.IsolationLevel.ReadCommitted });
|
1063
|
+
wfCtx.resultBuffer.clear();
|
1064
|
+
throw err;
|
1065
|
+
}
|
1066
|
+
}
|
1067
|
+
}
|
1068
|
+
async #callProcedureFunctionRemote(proc, args, wfCtx, span, config, funcId) {
|
1069
|
+
const readOnly = config.readOnly ?? false;
|
1070
|
+
const $jsonCtx = {
|
1071
|
+
request: wfCtx.request,
|
1072
|
+
authenticatedUser: wfCtx.authenticatedUser,
|
1073
|
+
authenticatedRoles: wfCtx.authenticatedRoles,
|
1074
|
+
assumedRole: wfCtx.assumedRole,
|
1075
|
+
};
|
1076
|
+
// Note, node-pg converts JS arrays to postgres array literals, so must call JSON.strigify on
|
1077
|
+
// args and bufferedResults before being passed to #invokeStoredProc
|
1078
|
+
const $args = [wfCtx.workflowUUID, funcId, wfCtx.presetUUID, $jsonCtx, null, JSON.stringify(args)];
|
1079
|
+
if (!readOnly) {
|
1080
|
+
// function_id, output, txn_snapshot, created_at
|
1081
|
+
const bufferedResults = new Array();
|
1082
|
+
for (const [functionID, { output, txn_snapshot, created_at }] of wfCtx.resultBuffer.entries()) {
|
1083
|
+
bufferedResults.push([functionID, output, txn_snapshot, created_at]);
|
1084
|
+
}
|
1085
|
+
// sort by function ID
|
1086
|
+
bufferedResults.sort((a, b) => a[0] - b[0]);
|
1087
|
+
$args.unshift(bufferedResults.length > 0 ? JSON.stringify(bufferedResults) : null);
|
1088
|
+
}
|
1089
|
+
const [{ return_value }] = await this.#invokeStoredProc(proc, $args);
|
1090
|
+
const { error, output, txn_snapshot, txn_id, created_at } = return_value;
|
1091
|
+
// buffered results are persisted in r/w stored procs, even if it returns an error
|
1092
|
+
if (!readOnly) {
|
1093
|
+
wfCtx.resultBuffer.clear();
|
1094
|
+
}
|
1095
|
+
// if the stored proc returns an error, deserialize and throw it.
|
1096
|
+
// stored proc saves the error in tx_output before returning
|
1097
|
+
if (error) {
|
1098
|
+
throw (0, serialize_error_1.deserializeError)(error);
|
1099
|
+
}
|
1100
|
+
// if txn_snapshot is provided, the output needs to be buffered
|
1101
|
+
if (readOnly && txn_snapshot) {
|
1102
|
+
wfCtx.resultBuffer.set(funcId, {
|
1103
|
+
output,
|
1104
|
+
txn_snapshot,
|
1105
|
+
created_at: created_at ?? Date.now(),
|
1106
|
+
});
|
1107
|
+
}
|
1108
|
+
if (!readOnly) {
|
1109
|
+
wfCtx.resultBuffer.clear();
|
1110
|
+
}
|
1111
|
+
if (txn_id) {
|
1112
|
+
span.setAttribute("pg_txn_id", txn_id);
|
1113
|
+
}
|
1114
|
+
span.setStatus({ code: api_1.SpanStatusCode.OK });
|
1115
|
+
return output;
|
1116
|
+
}
|
1117
|
+
async #invokeStoredProc(proc, args) {
|
1118
|
+
const client = await this.procedurePool.connect();
|
1119
|
+
const log = (msg) => this.#logNotice(msg);
|
1120
|
+
const procClassName = this.getProcedureClassName(proc);
|
1121
|
+
const plainProcName = `${procClassName}_${proc.name}_p`;
|
1122
|
+
const procName = this.config.appVersion
|
1123
|
+
? `v${this.config.appVersion}_${plainProcName}`
|
1124
|
+
: plainProcName;
|
1125
|
+
const sql = `CALL "${procName}"(${args.map((_v, i) => `$${i + 1}`).join()});`;
|
1126
|
+
try {
|
1127
|
+
client.on('notice', log);
|
1128
|
+
return await client.query(sql, args).then(value => value.rows);
|
1129
|
+
}
|
1130
|
+
finally {
|
1131
|
+
client.off('notice', log);
|
1132
|
+
client.release();
|
1133
|
+
}
|
1134
|
+
}
|
1135
|
+
async invokeStoredProcFunction(func, config) {
|
829
1136
|
const client = await this.procedurePool.connect();
|
830
1137
|
try {
|
831
1138
|
const readOnly = config.readOnly ?? false;
|
@@ -884,7 +1191,7 @@ class DBOSExecutor {
|
|
884
1191
|
}, wfCtx.span);
|
885
1192
|
const ctxt = new step_1.StepContextImpl(wfCtx, funcID, span, this.logger, commInfo.config, stepFn.name);
|
886
1193
|
await this.userDatabase.transaction(async (client) => {
|
887
|
-
await
|
1194
|
+
await this.flushResultBuffer(client, wfCtx.resultBuffer, wfCtx.workflowUUID);
|
888
1195
|
}, { isolationLevel: transaction_1.IsolationLevel.ReadCommitted });
|
889
1196
|
wfCtx.resultBuffer.clear();
|
890
1197
|
// Check if this execution previously happened, returning its original result if it did.
|
@@ -1064,11 +1371,29 @@ class DBOSExecutor {
|
|
1064
1371
|
async deactivateEventReceivers() {
|
1065
1372
|
this.logger.info("Deactivating event receivers");
|
1066
1373
|
for (const evtRcvr of this.eventReceivers || []) {
|
1067
|
-
|
1374
|
+
try {
|
1375
|
+
await evtRcvr.destroy();
|
1376
|
+
}
|
1377
|
+
catch (err) {
|
1378
|
+
const e = err;
|
1379
|
+
this.logger.warn(`Error destroying event receiver: ${e.message}`);
|
1380
|
+
}
|
1381
|
+
}
|
1382
|
+
try {
|
1383
|
+
await this.scheduler?.destroyScheduler();
|
1384
|
+
}
|
1385
|
+
catch (err) {
|
1386
|
+
const e = err;
|
1387
|
+
this.logger.warn(`Error destroying scheduler: ${e.message}`);
|
1388
|
+
}
|
1389
|
+
try {
|
1390
|
+
wfqueue_1.wfQueueRunner.stop();
|
1391
|
+
await this.wfqEnded;
|
1392
|
+
}
|
1393
|
+
catch (err) {
|
1394
|
+
const e = err;
|
1395
|
+
this.logger.warn(`Error destroying wf queue runner: ${e.message}`);
|
1068
1396
|
}
|
1069
|
-
await this.scheduler?.destroyScheduler();
|
1070
|
-
wfqueue_1.wfQueueRunner.stop();
|
1071
|
-
await this.wfqEnded;
|
1072
1397
|
}
|
1073
1398
|
async executeWorkflowUUID(workflowUUID, startNewWorkflow = false) {
|
1074
1399
|
const wfStatus = await this.systemDatabase.getWorkflowStatus(workflowUUID);
|