@dbos-inc/dbos-sdk 4.4.4-preview → 4.4.5-preview
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dbos-config.schema.json +4 -0
- package/dist/dbos-config.schema.json +4 -0
- package/dist/src/cli/cli.d.ts.map +1 -1
- package/dist/src/cli/cli.js +17 -3
- package/dist/src/cli/cli.js.map +1 -1
- package/dist/src/config.d.ts +1 -0
- package/dist/src/config.d.ts.map +1 -1
- package/dist/src/config.js +3 -0
- package/dist/src/config.js.map +1 -1
- package/dist/src/datasource.d.ts +3 -3
- package/dist/src/datasource.d.ts.map +1 -1
- package/dist/src/datasource.js +15 -6
- package/dist/src/datasource.js.map +1 -1
- package/dist/src/dbos-executor.d.ts +3 -1
- package/dist/src/dbos-executor.d.ts.map +1 -1
- package/dist/src/dbos-executor.js +3 -2
- package/dist/src/dbos-executor.js.map +1 -1
- package/dist/src/sysdb_migrations/internal/migrations.d.ts +1 -1
- package/dist/src/sysdb_migrations/internal/migrations.d.ts.map +1 -1
- package/dist/src/sysdb_migrations/internal/migrations.js +169 -164
- package/dist/src/sysdb_migrations/internal/migrations.js.map +1 -1
- package/dist/src/sysdb_migrations/migration_runner.d.ts +2 -2
- package/dist/src/sysdb_migrations/migration_runner.d.ts.map +1 -1
- package/dist/src/sysdb_migrations/migration_runner.js +9 -7
- package/dist/src/sysdb_migrations/migration_runner.js.map +1 -1
- package/dist/src/system_database.d.ts +4 -3
- package/dist/src/system_database.d.ts.map +1 -1
- package/dist/src/system_database.js +77 -73
- package/dist/src/system_database.js.map +1 -1
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/package.json +1 -1
|
@@ -30,35 +30,35 @@ exports.DBOS_FUNCNAME_WRITESTREAM = 'DBOS.writeStream';
|
|
|
30
30
|
exports.DBOS_FUNCNAME_CLOSESTREAM = 'DBOS.closeStream';
|
|
31
31
|
exports.DEFAULT_POOL_SIZE = 10;
|
|
32
32
|
exports.DBOS_STREAM_CLOSED_SENTINEL = '__DBOS_STREAM_CLOSED__';
|
|
33
|
-
async function grantDbosSchemaPermissions(databaseUrl, roleName, logger) {
|
|
34
|
-
logger.info(`Granting permissions for
|
|
33
|
+
async function grantDbosSchemaPermissions(databaseUrl, roleName, logger, schemaName = 'dbos') {
|
|
34
|
+
logger.info(`Granting permissions for ${schemaName} schema to ${roleName}`);
|
|
35
35
|
const client = new pg_1.Client((0, utils_2.getClientConfig)(databaseUrl));
|
|
36
36
|
await client.connect();
|
|
37
37
|
try {
|
|
38
|
-
// Grant usage on the
|
|
39
|
-
const grantUsageSql = `GRANT USAGE ON SCHEMA
|
|
38
|
+
// Grant usage on the schema
|
|
39
|
+
const grantUsageSql = `GRANT USAGE ON SCHEMA "${schemaName}" TO "${roleName}"`;
|
|
40
40
|
logger.info(grantUsageSql);
|
|
41
41
|
await client.query(grantUsageSql);
|
|
42
|
-
// Grant all privileges on all existing tables in
|
|
43
|
-
const grantTablesSql = `GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA
|
|
42
|
+
// Grant all privileges on all existing tables in schema (includes views)
|
|
43
|
+
const grantTablesSql = `GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA "${schemaName}" TO "${roleName}"`;
|
|
44
44
|
logger.info(grantTablesSql);
|
|
45
45
|
await client.query(grantTablesSql);
|
|
46
|
-
// Grant all privileges on all sequences in
|
|
47
|
-
const grantSequencesSql = `GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA
|
|
46
|
+
// Grant all privileges on all sequences in schema
|
|
47
|
+
const grantSequencesSql = `GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA "${schemaName}" TO "${roleName}"`;
|
|
48
48
|
logger.info(grantSequencesSql);
|
|
49
49
|
await client.query(grantSequencesSql);
|
|
50
|
-
// Grant execute on all functions and procedures in
|
|
51
|
-
const grantFunctionsSql = `GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA
|
|
50
|
+
// Grant execute on all functions and procedures in schema
|
|
51
|
+
const grantFunctionsSql = `GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA "${schemaName}" TO "${roleName}"`;
|
|
52
52
|
logger.info(grantFunctionsSql);
|
|
53
53
|
await client.query(grantFunctionsSql);
|
|
54
|
-
// Grant default privileges for future objects in
|
|
55
|
-
const alterTablesSql = `ALTER DEFAULT PRIVILEGES IN SCHEMA
|
|
54
|
+
// Grant default privileges for future objects in schema
|
|
55
|
+
const alterTablesSql = `ALTER DEFAULT PRIVILEGES IN SCHEMA "${schemaName}" GRANT ALL ON TABLES TO "${roleName}"`;
|
|
56
56
|
logger.info(alterTablesSql);
|
|
57
57
|
await client.query(alterTablesSql);
|
|
58
|
-
const alterSequencesSql = `ALTER DEFAULT PRIVILEGES IN SCHEMA
|
|
58
|
+
const alterSequencesSql = `ALTER DEFAULT PRIVILEGES IN SCHEMA "${schemaName}" GRANT ALL ON SEQUENCES TO "${roleName}"`;
|
|
59
59
|
logger.info(alterSequencesSql);
|
|
60
60
|
await client.query(alterSequencesSql);
|
|
61
|
-
const alterFunctionsSql = `ALTER DEFAULT PRIVILEGES IN SCHEMA
|
|
61
|
+
const alterFunctionsSql = `ALTER DEFAULT PRIVILEGES IN SCHEMA "${schemaName}" GRANT EXECUTE ON FUNCTIONS TO "${roleName}"`;
|
|
62
62
|
logger.info(alterFunctionsSql);
|
|
63
63
|
await client.query(alterFunctionsSql);
|
|
64
64
|
}
|
|
@@ -71,7 +71,7 @@ async function grantDbosSchemaPermissions(databaseUrl, roleName, logger) {
|
|
|
71
71
|
}
|
|
72
72
|
}
|
|
73
73
|
exports.grantDbosSchemaPermissions = grantDbosSchemaPermissions;
|
|
74
|
-
async function ensureSystemDatabase(sysDbUrl, logger, debugMode = false, customPool) {
|
|
74
|
+
async function ensureSystemDatabase(sysDbUrl, logger, debugMode = false, customPool, schemaName = 'dbos') {
|
|
75
75
|
if (debugMode) {
|
|
76
76
|
// Don't create anything in debug mode
|
|
77
77
|
return;
|
|
@@ -100,7 +100,7 @@ async function ensureSystemDatabase(sysDbUrl, logger, debugMode = false, customP
|
|
|
100
100
|
client = cconnect.client;
|
|
101
101
|
}
|
|
102
102
|
try {
|
|
103
|
-
await (0, migration_runner_1.runSysMigrationsPg)(client, migrations_1.allMigrations, {
|
|
103
|
+
await (0, migration_runner_1.runSysMigrationsPg)(client, (0, migrations_1.allMigrations)(schemaName), schemaName, {
|
|
104
104
|
onWarn: (e) => logger.info(e),
|
|
105
105
|
});
|
|
106
106
|
}
|
|
@@ -148,9 +148,9 @@ class NotificationMap {
|
|
|
148
148
|
}
|
|
149
149
|
}
|
|
150
150
|
}
|
|
151
|
-
async function insertWorkflowStatus(client, initStatus) {
|
|
151
|
+
async function insertWorkflowStatus(client, initStatus, schemaName) {
|
|
152
152
|
try {
|
|
153
|
-
const { rows } = await client.query(`INSERT INTO ${
|
|
153
|
+
const { rows } = await client.query(`INSERT INTO "${schemaName}".workflow_status (
|
|
154
154
|
workflow_uuid,
|
|
155
155
|
status,
|
|
156
156
|
name,
|
|
@@ -224,11 +224,11 @@ async function insertWorkflowStatus(client, initStatus) {
|
|
|
224
224
|
throw error;
|
|
225
225
|
}
|
|
226
226
|
}
|
|
227
|
-
async function getWorkflowStatusValue(client, workflowID) {
|
|
228
|
-
const { rows } = await client.query(`SELECT status FROM ${
|
|
227
|
+
async function getWorkflowStatusValue(client, workflowID, schemaName) {
|
|
228
|
+
const { rows } = await client.query(`SELECT status FROM "${schemaName}".workflow_status WHERE workflow_uuid=$1`, [workflowID]);
|
|
229
229
|
return rows.length === 0 ? undefined : rows[0].status;
|
|
230
230
|
}
|
|
231
|
-
async function updateWorkflowStatus(client, workflowID, status, options = {}) {
|
|
231
|
+
async function updateWorkflowStatus(client, workflowID, status, schemaName, options = {}) {
|
|
232
232
|
let setClause = `SET status=$2, updated_at=$3`;
|
|
233
233
|
let whereClause = `WHERE workflow_uuid=$1`;
|
|
234
234
|
const args = [workflowID, status, Date.now()];
|
|
@@ -262,15 +262,15 @@ async function updateWorkflowStatus(client, workflowID, status, options = {}) {
|
|
|
262
262
|
const param = args.push(where.status);
|
|
263
263
|
whereClause += ` AND status=$${param}`;
|
|
264
264
|
}
|
|
265
|
-
const result = await client.query(`UPDATE ${
|
|
265
|
+
const result = await client.query(`UPDATE "${schemaName}".workflow_status ${setClause} ${whereClause}`, args);
|
|
266
266
|
const throwOnFailure = options.throwOnFailure ?? true;
|
|
267
267
|
if (throwOnFailure && result.rowCount !== 1) {
|
|
268
268
|
throw new error_1.DBOSWorkflowConflictError(`Attempt to record transition of nonexistent workflow ${workflowID}`);
|
|
269
269
|
}
|
|
270
270
|
}
|
|
271
|
-
async function recordOperationResult(client, workflowID, functionID, functionName, checkConflict, options = {}) {
|
|
271
|
+
async function recordOperationResult(client, workflowID, functionID, functionName, checkConflict, schemaName, options = {}) {
|
|
272
272
|
try {
|
|
273
|
-
await client.query(`INSERT INTO ${
|
|
273
|
+
await client.query(`INSERT INTO "${schemaName}".operation_outputs
|
|
274
274
|
(workflow_uuid, function_id, output, error, function_name, child_workflow_id)
|
|
275
275
|
VALUES ($1, $2, $3, $4, $5, $6)
|
|
276
276
|
${checkConflict ? '' : ' ON CONFLICT DO NOTHING'};`, [
|
|
@@ -462,6 +462,7 @@ class PostgresSystemDatabase {
|
|
|
462
462
|
systemDatabaseUrl;
|
|
463
463
|
logger;
|
|
464
464
|
pool;
|
|
465
|
+
schemaName;
|
|
465
466
|
/*
|
|
466
467
|
* Generally, notifications are asynchronous. One should:
|
|
467
468
|
* Subscribe to updates
|
|
@@ -490,9 +491,10 @@ class PostgresSystemDatabase {
|
|
|
490
491
|
customPool = false;
|
|
491
492
|
runningWorkflowMap = new Map(); // Map from workflowID to workflow promise
|
|
492
493
|
workflowCancellationMap = new Map(); // Map from workflowID to its cancellation status.
|
|
493
|
-
constructor(systemDatabaseUrl, logger, sysDbPoolSize = exports.DEFAULT_POOL_SIZE, systemDatabasePool) {
|
|
494
|
+
constructor(systemDatabaseUrl, logger, sysDbPoolSize = exports.DEFAULT_POOL_SIZE, systemDatabasePool, schemaName = 'dbos') {
|
|
494
495
|
this.systemDatabaseUrl = systemDatabaseUrl;
|
|
495
496
|
this.logger = logger;
|
|
497
|
+
this.schemaName = schemaName;
|
|
496
498
|
if (systemDatabasePool) {
|
|
497
499
|
this.pool = systemDatabasePool;
|
|
498
500
|
this.customPool = true;
|
|
@@ -516,7 +518,7 @@ class PostgresSystemDatabase {
|
|
|
516
518
|
});
|
|
517
519
|
}
|
|
518
520
|
async init(debugMode = false) {
|
|
519
|
-
await ensureSystemDatabase(this.systemDatabaseUrl, this.logger, debugMode, this.customPool ? this.pool : undefined);
|
|
521
|
+
await ensureSystemDatabase(this.systemDatabaseUrl, this.logger, debugMode, this.customPool ? this.pool : undefined, this.schemaName);
|
|
520
522
|
if (this.shouldUseDBNotifications) {
|
|
521
523
|
await this.#listenForNotifications();
|
|
522
524
|
}
|
|
@@ -539,7 +541,7 @@ class PostgresSystemDatabase {
|
|
|
539
541
|
const client = await this.pool.connect();
|
|
540
542
|
try {
|
|
541
543
|
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
542
|
-
const resRow = await insertWorkflowStatus(client, initStatus);
|
|
544
|
+
const resRow = await insertWorkflowStatus(client, initStatus, this.schemaName);
|
|
543
545
|
if (resRow.name !== initStatus.workflowName) {
|
|
544
546
|
const msg = `Workflow already exists with a different function name: ${resRow.name}, but the provided function name is: ${initStatus.workflowName}`;
|
|
545
547
|
throw new error_1.DBOSConflictingWorkflowError(initStatus.workflowUUID, msg);
|
|
@@ -561,7 +563,7 @@ class PostgresSystemDatabase {
|
|
|
561
563
|
// Thus, when this number becomes equal to `maxRetries + 1`, we should mark the workflow as `MAX_RECOVERY_ATTEMPTS_EXCEEDED`.
|
|
562
564
|
const attempts = resRow.recovery_attempts;
|
|
563
565
|
if (maxRetries && attempts > maxRetries + 1) {
|
|
564
|
-
await updateWorkflowStatus(client, initStatus.workflowUUID, workflow_1.StatusString.MAX_RECOVERY_ATTEMPTS_EXCEEDED, {
|
|
566
|
+
await updateWorkflowStatus(client, initStatus.workflowUUID, workflow_1.StatusString.MAX_RECOVERY_ATTEMPTS_EXCEEDED, this.schemaName, {
|
|
565
567
|
where: { status: workflow_1.StatusString.PENDING },
|
|
566
568
|
throwOnFailure: false,
|
|
567
569
|
});
|
|
@@ -584,7 +586,7 @@ class PostgresSystemDatabase {
|
|
|
584
586
|
async recordWorkflowOutput(workflowID, status) {
|
|
585
587
|
const client = await this.pool.connect();
|
|
586
588
|
try {
|
|
587
|
-
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.SUCCESS, {
|
|
589
|
+
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.SUCCESS, this.schemaName, {
|
|
588
590
|
update: { output: status.output, resetDeduplicationID: true },
|
|
589
591
|
});
|
|
590
592
|
}
|
|
@@ -595,7 +597,7 @@ class PostgresSystemDatabase {
|
|
|
595
597
|
async recordWorkflowError(workflowID, status) {
|
|
596
598
|
const client = await this.pool.connect();
|
|
597
599
|
try {
|
|
598
|
-
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ERROR, {
|
|
600
|
+
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ERROR, this.schemaName, {
|
|
599
601
|
update: { error: status.error, resetDeduplicationID: true },
|
|
600
602
|
});
|
|
601
603
|
}
|
|
@@ -605,7 +607,7 @@ class PostgresSystemDatabase {
|
|
|
605
607
|
}
|
|
606
608
|
async getPendingWorkflows(executorID, appVersion) {
|
|
607
609
|
const getWorkflows = await this.pool.query(`SELECT workflow_uuid, queue_name
|
|
608
|
-
FROM ${
|
|
610
|
+
FROM "${this.schemaName}".workflow_status
|
|
609
611
|
WHERE status=$1 AND executor_id=$2 AND application_version=$3`, [workflow_1.StatusString.PENDING, executorID, appVersion]);
|
|
610
612
|
return getWorkflows.rows.map((i) => ({
|
|
611
613
|
workflowUUID: i.workflow_uuid,
|
|
@@ -615,7 +617,7 @@ class PostgresSystemDatabase {
|
|
|
615
617
|
async #getOperationResultAndThrowIfCancelled(client, workflowID, functionID) {
|
|
616
618
|
await this.#checkIfCanceled(client, workflowID);
|
|
617
619
|
const { rows } = await client.query(`SELECT output, error, child_workflow_id, function_name
|
|
618
|
-
FROM ${
|
|
620
|
+
FROM "${this.schemaName}".operation_outputs
|
|
619
621
|
WHERE workflow_uuid=$1 AND function_id=$2`, [workflowID, functionID]);
|
|
620
622
|
if (rows.length === 0) {
|
|
621
623
|
return undefined;
|
|
@@ -639,13 +641,13 @@ class PostgresSystemDatabase {
|
|
|
639
641
|
}
|
|
640
642
|
}
|
|
641
643
|
async getAllOperationResults(workflowID) {
|
|
642
|
-
const { rows } = await this.pool.query(`SELECT * FROM ${
|
|
644
|
+
const { rows } = await this.pool.query(`SELECT * FROM "${this.schemaName}".operation_outputs WHERE workflow_uuid=$1`, [workflowID]);
|
|
643
645
|
return rows;
|
|
644
646
|
}
|
|
645
647
|
async recordOperationResult(workflowID, functionID, functionName, checkConflict, options = {}) {
|
|
646
648
|
const client = await this.pool.connect();
|
|
647
649
|
try {
|
|
648
|
-
await recordOperationResult(client, workflowID, functionID, functionName, checkConflict, options);
|
|
650
|
+
await recordOperationResult(client, workflowID, functionID, functionName, checkConflict, this.schemaName, options);
|
|
649
651
|
}
|
|
650
652
|
finally {
|
|
651
653
|
client.release();
|
|
@@ -688,12 +690,12 @@ class PostgresSystemDatabase {
|
|
|
688
690
|
deduplicationID: undefined,
|
|
689
691
|
priority: 0,
|
|
690
692
|
queuePartitionKey: undefined,
|
|
691
|
-
});
|
|
693
|
+
}, this.schemaName);
|
|
692
694
|
if (startStep > 0) {
|
|
693
|
-
const query = `INSERT INTO ${
|
|
695
|
+
const query = `INSERT INTO "${this.schemaName}".operation_outputs
|
|
694
696
|
(workflow_uuid, function_id, output, error, function_name, child_workflow_id )
|
|
695
697
|
SELECT $1 AS workflow_uuid, function_id, output, error, function_name, child_workflow_id
|
|
696
|
-
FROM ${
|
|
698
|
+
FROM "${this.schemaName}".operation_outputs
|
|
697
699
|
WHERE workflow_uuid = $2 AND function_id < $3`;
|
|
698
700
|
await client.query(query, [newWorkflowID, workflowID, startStep]);
|
|
699
701
|
}
|
|
@@ -717,7 +719,7 @@ class PostgresSystemDatabase {
|
|
|
717
719
|
return result.output;
|
|
718
720
|
}
|
|
719
721
|
const output = await func();
|
|
720
|
-
await recordOperationResult(client, workflowID, functionID, functionName, true, { output });
|
|
722
|
+
await recordOperationResult(client, workflowID, functionID, functionName, true, this.schemaName, { output });
|
|
721
723
|
return output;
|
|
722
724
|
}
|
|
723
725
|
async durableSleepms(workflowID, functionID, durationMS) {
|
|
@@ -757,7 +759,7 @@ class PostgresSystemDatabase {
|
|
|
757
759
|
endTimeMs = JSON.parse(res.output);
|
|
758
760
|
}
|
|
759
761
|
else {
|
|
760
|
-
await recordOperationResult(client, workflowID, functionID, exports.DBOS_FUNCNAME_SLEEP, false, {
|
|
762
|
+
await recordOperationResult(client, workflowID, functionID, exports.DBOS_FUNCNAME_SLEEP, false, this.schemaName, {
|
|
761
763
|
output: JSON.stringify(endTimeMs),
|
|
762
764
|
});
|
|
763
765
|
}
|
|
@@ -777,7 +779,7 @@ class PostgresSystemDatabase {
|
|
|
777
779
|
try {
|
|
778
780
|
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
779
781
|
await this.#runAndRecordResult(client, exports.DBOS_FUNCNAME_SEND, workflowID, functionID, async () => {
|
|
780
|
-
await client.query(`INSERT INTO ${
|
|
782
|
+
await client.query(`INSERT INTO "${this.schemaName}".notifications (destination_uuid, topic, message) VALUES ($1, $2, $3);`, [destinationID, topic, message]);
|
|
781
783
|
return undefined;
|
|
782
784
|
});
|
|
783
785
|
await client.query('COMMIT');
|
|
@@ -823,7 +825,7 @@ class PostgresSystemDatabase {
|
|
|
823
825
|
try {
|
|
824
826
|
await this.checkIfCanceled(workflowID);
|
|
825
827
|
// Check if the key is already in the DB, then wait for the notification if it isn't.
|
|
826
|
-
const initRecvRows = (await this.pool.query(`SELECT topic FROM ${
|
|
828
|
+
const initRecvRows = (await this.pool.query(`SELECT topic FROM "${this.schemaName}".notifications WHERE destination_uuid=$1 AND topic=$2;`, [workflowID, topic])).rows;
|
|
827
829
|
if (initRecvRows.length !== 0)
|
|
828
830
|
break;
|
|
829
831
|
const ct = Date.now();
|
|
@@ -864,14 +866,14 @@ class PostgresSystemDatabase {
|
|
|
864
866
|
await client.query(`BEGIN ISOLATION LEVEL READ COMMITTED`);
|
|
865
867
|
const finalRecvRows = (await client.query(`WITH oldest_entry AS (
|
|
866
868
|
SELECT destination_uuid, topic, message, created_at_epoch_ms
|
|
867
|
-
FROM ${
|
|
869
|
+
FROM "${this.schemaName}".notifications
|
|
868
870
|
WHERE destination_uuid = $1
|
|
869
871
|
AND topic = $2
|
|
870
872
|
ORDER BY created_at_epoch_ms ASC
|
|
871
873
|
LIMIT 1
|
|
872
874
|
)
|
|
873
875
|
|
|
874
|
-
DELETE FROM ${
|
|
876
|
+
DELETE FROM "${this.schemaName}".notifications
|
|
875
877
|
USING oldest_entry
|
|
876
878
|
WHERE notifications.destination_uuid = oldest_entry.destination_uuid
|
|
877
879
|
AND notifications.topic = oldest_entry.topic
|
|
@@ -880,7 +882,9 @@ class PostgresSystemDatabase {
|
|
|
880
882
|
if (finalRecvRows.length > 0) {
|
|
881
883
|
message = finalRecvRows[0].message;
|
|
882
884
|
}
|
|
883
|
-
await recordOperationResult(client, workflowID, functionID, exports.DBOS_FUNCNAME_RECV, true, {
|
|
885
|
+
await recordOperationResult(client, workflowID, functionID, exports.DBOS_FUNCNAME_RECV, true, this.schemaName, {
|
|
886
|
+
output: message,
|
|
887
|
+
});
|
|
884
888
|
await client.query(`COMMIT`);
|
|
885
889
|
}
|
|
886
890
|
catch (e) {
|
|
@@ -897,7 +901,7 @@ class PostgresSystemDatabase {
|
|
|
897
901
|
async setWorkflowStatus(workflowID, status, resetRecoveryAttempts) {
|
|
898
902
|
const client = await this.pool.connect();
|
|
899
903
|
try {
|
|
900
|
-
await updateWorkflowStatus(client, workflowID, status, { update: { resetRecoveryAttempts } });
|
|
904
|
+
await updateWorkflowStatus(client, workflowID, status, this.schemaName, { update: { resetRecoveryAttempts } });
|
|
901
905
|
}
|
|
902
906
|
finally {
|
|
903
907
|
client.release();
|
|
@@ -908,7 +912,7 @@ class PostgresSystemDatabase {
|
|
|
908
912
|
try {
|
|
909
913
|
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
910
914
|
await this.#runAndRecordResult(client, exports.DBOS_FUNCNAME_SETEVENT, workflowID, functionID, async () => {
|
|
911
|
-
await client.query(`INSERT INTO ${
|
|
915
|
+
await client.query(`INSERT INTO "${this.schemaName}".workflow_events (workflow_uuid, key, value)
|
|
912
916
|
VALUES ($1, $2, $3)
|
|
913
917
|
ON CONFLICT (workflow_uuid, key)
|
|
914
918
|
DO UPDATE SET value = $3
|
|
@@ -960,7 +964,7 @@ class PostgresSystemDatabase {
|
|
|
960
964
|
await this.checkIfCanceled(callerWorkflow?.workflowID);
|
|
961
965
|
// Check if the key is already in the DB, then wait for the notification if it isn't.
|
|
962
966
|
const initRecvRows = (await this.pool.query(`SELECT key, value
|
|
963
|
-
FROM ${
|
|
967
|
+
FROM "${this.schemaName}".workflow_events
|
|
964
968
|
WHERE workflow_uuid=$1 AND key=$2;`, [workflowID, key])).rows;
|
|
965
969
|
if (initRecvRows.length > 0) {
|
|
966
970
|
value = initRecvRows[0].value;
|
|
@@ -1019,7 +1023,7 @@ class PostgresSystemDatabase {
|
|
|
1019
1023
|
const client = await this.pool.connect();
|
|
1020
1024
|
try {
|
|
1021
1025
|
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
1022
|
-
const statusResult = await getWorkflowStatusValue(client, workflowID);
|
|
1026
|
+
const statusResult = await getWorkflowStatusValue(client, workflowID, this.schemaName);
|
|
1023
1027
|
if (!statusResult) {
|
|
1024
1028
|
throw new error_1.DBOSNonExistentWorkflowError(`Workflow ${workflowID} does not exist`);
|
|
1025
1029
|
}
|
|
@@ -1030,7 +1034,7 @@ class PostgresSystemDatabase {
|
|
|
1030
1034
|
return;
|
|
1031
1035
|
}
|
|
1032
1036
|
// Set the workflow's status to CANCELLED and remove it from any queue it is on
|
|
1033
|
-
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.CANCELLED, {
|
|
1037
|
+
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.CANCELLED, this.schemaName, {
|
|
1034
1038
|
update: { queueName: null, resetDeduplicationID: true, resetStartedAtEpochMs: true },
|
|
1035
1039
|
});
|
|
1036
1040
|
await client.query('COMMIT');
|
|
@@ -1049,7 +1053,7 @@ class PostgresSystemDatabase {
|
|
|
1049
1053
|
if (this.workflowCancellationMap.get(workflowID) === true) {
|
|
1050
1054
|
throw new error_1.DBOSWorkflowCancelledError(workflowID);
|
|
1051
1055
|
}
|
|
1052
|
-
const statusValue = await getWorkflowStatusValue(client, workflowID);
|
|
1056
|
+
const statusValue = await getWorkflowStatusValue(client, workflowID, this.schemaName);
|
|
1053
1057
|
if (statusValue === workflow_1.StatusString.CANCELLED) {
|
|
1054
1058
|
throw new error_1.DBOSWorkflowCancelledError(workflowID);
|
|
1055
1059
|
}
|
|
@@ -1069,7 +1073,7 @@ class PostgresSystemDatabase {
|
|
|
1069
1073
|
try {
|
|
1070
1074
|
await client.query('BEGIN ISOLATION LEVEL REPEATABLE READ');
|
|
1071
1075
|
// Check workflow status. If it is complete, do nothing.
|
|
1072
|
-
const statusResult = await getWorkflowStatusValue(client, workflowID);
|
|
1076
|
+
const statusResult = await getWorkflowStatusValue(client, workflowID, this.schemaName);
|
|
1073
1077
|
if (!statusResult || statusResult === workflow_1.StatusString.SUCCESS || statusResult === workflow_1.StatusString.ERROR) {
|
|
1074
1078
|
await client.query('ROLLBACK');
|
|
1075
1079
|
if (!statusResult) {
|
|
@@ -1080,7 +1084,7 @@ class PostgresSystemDatabase {
|
|
|
1080
1084
|
return;
|
|
1081
1085
|
}
|
|
1082
1086
|
// Set the workflow's status to ENQUEUED and reset recovery attempts and deadline.
|
|
1083
|
-
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ENQUEUED, {
|
|
1087
|
+
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ENQUEUED, this.schemaName, {
|
|
1084
1088
|
update: {
|
|
1085
1089
|
queueName: utils_1.INTERNAL_QUEUE_NAME,
|
|
1086
1090
|
resetRecoveryAttempts: true,
|
|
@@ -1173,7 +1177,7 @@ class PostgresSystemDatabase {
|
|
|
1173
1177
|
if (callerID)
|
|
1174
1178
|
await this.checkIfCanceled(callerID);
|
|
1175
1179
|
try {
|
|
1176
|
-
const { rows } = await this.pool.query(`SELECT status, output, error FROM ${
|
|
1180
|
+
const { rows } = await this.pool.query(`SELECT status, output, error FROM "${this.schemaName}".workflow_status
|
|
1177
1181
|
WHERE workflow_uuid=$1`, [workflowID]);
|
|
1178
1182
|
if (rows.length > 0) {
|
|
1179
1183
|
const status = rows[0].status;
|
|
@@ -1284,7 +1288,7 @@ class PostgresSystemDatabase {
|
|
|
1284
1288
|
}
|
|
1285
1289
|
// Event dispatcher queries / updates
|
|
1286
1290
|
async getEventDispatchState(service, workflowName, key) {
|
|
1287
|
-
const res = await this.pool.query(`SELECT * FROM ${
|
|
1291
|
+
const res = await this.pool.query(`SELECT * FROM "${this.schemaName}".event_dispatch_kv
|
|
1288
1292
|
WHERE workflow_fn_name = $1 AND service_name = $2 AND key = $3;`, [workflowName, service, key]);
|
|
1289
1293
|
if (res.rows.length === 0)
|
|
1290
1294
|
return undefined;
|
|
@@ -1300,7 +1304,7 @@ class PostgresSystemDatabase {
|
|
|
1300
1304
|
};
|
|
1301
1305
|
}
|
|
1302
1306
|
async upsertEventDispatchState(state) {
|
|
1303
|
-
const res = await this.pool.query(`INSERT INTO ${
|
|
1307
|
+
const res = await this.pool.query(`INSERT INTO "${this.schemaName}".event_dispatch_kv (
|
|
1304
1308
|
service_name, workflow_fn_name, key, value, update_time, update_seq)
|
|
1305
1309
|
VALUES ($1, $2, $3, $4, $5, $6)
|
|
1306
1310
|
ON CONFLICT (service_name, workflow_fn_name, key)
|
|
@@ -1324,7 +1328,7 @@ class PostgresSystemDatabase {
|
|
|
1324
1328
|
};
|
|
1325
1329
|
}
|
|
1326
1330
|
async listWorkflows(input) {
|
|
1327
|
-
const schemaName =
|
|
1331
|
+
const schemaName = this.schemaName;
|
|
1328
1332
|
const selectColumns = [
|
|
1329
1333
|
'workflow_uuid',
|
|
1330
1334
|
'status',
|
|
@@ -1404,7 +1408,7 @@ class PostgresSystemDatabase {
|
|
|
1404
1408
|
const offsetClause = input.offset ? `OFFSET ${input.offset}` : '';
|
|
1405
1409
|
const query = `
|
|
1406
1410
|
SELECT ${selectColumns.join(', ')}
|
|
1407
|
-
FROM ${schemaName}.workflow_status
|
|
1411
|
+
FROM "${schemaName}".workflow_status
|
|
1408
1412
|
${whereClause}
|
|
1409
1413
|
${orderClause}
|
|
1410
1414
|
${limitClause}
|
|
@@ -1414,7 +1418,7 @@ class PostgresSystemDatabase {
|
|
|
1414
1418
|
return result.rows.map(mapWorkflowStatus);
|
|
1415
1419
|
}
|
|
1416
1420
|
async listQueuedWorkflows(input) {
|
|
1417
|
-
const schemaName =
|
|
1421
|
+
const schemaName = this.schemaName;
|
|
1418
1422
|
const selectColumns = [
|
|
1419
1423
|
'workflow_uuid',
|
|
1420
1424
|
'status',
|
|
@@ -1479,7 +1483,7 @@ class PostgresSystemDatabase {
|
|
|
1479
1483
|
const offsetClause = input.offset ? `OFFSET ${input.offset}` : '';
|
|
1480
1484
|
const query = `
|
|
1481
1485
|
SELECT ${selectColumns.join(', ')}
|
|
1482
|
-
FROM ${schemaName}.workflow_status
|
|
1486
|
+
FROM "${schemaName}".workflow_status
|
|
1483
1487
|
${whereClause}
|
|
1484
1488
|
${orderClause}
|
|
1485
1489
|
${limitClause}
|
|
@@ -1490,14 +1494,14 @@ class PostgresSystemDatabase {
|
|
|
1490
1494
|
}
|
|
1491
1495
|
async clearQueueAssignment(workflowID) {
|
|
1492
1496
|
// Reset the status of the task from "PENDING" to "ENQUEUED"
|
|
1493
|
-
const wqRes = await this.pool.query(`UPDATE ${
|
|
1497
|
+
const wqRes = await this.pool.query(`UPDATE "${this.schemaName}".workflow_status
|
|
1494
1498
|
SET started_at_epoch_ms = NULL, status = $2
|
|
1495
1499
|
WHERE workflow_uuid = $1 AND queue_name is NOT NULL AND status = $3`, [workflowID, workflow_1.StatusString.ENQUEUED, workflow_1.StatusString.PENDING]);
|
|
1496
1500
|
// If no rows were affected, the workflow is not anymore in the queue or was already completed
|
|
1497
1501
|
return (wqRes.rowCount ?? 0) > 0;
|
|
1498
1502
|
}
|
|
1499
1503
|
async getDeduplicatedWorkflow(queueName, deduplicationID) {
|
|
1500
|
-
const { rows } = await this.pool.query(`SELECT workflow_uuid FROM ${
|
|
1504
|
+
const { rows } = await this.pool.query(`SELECT workflow_uuid FROM "${this.schemaName}".workflow_status
|
|
1501
1505
|
WHERE queue_name = $1 AND deduplication_id = $2`, [queueName, deduplicationID]);
|
|
1502
1506
|
if (rows.length === 0) {
|
|
1503
1507
|
return null;
|
|
@@ -1505,7 +1509,7 @@ class PostgresSystemDatabase {
|
|
|
1505
1509
|
return rows[0].workflow_uuid;
|
|
1506
1510
|
}
|
|
1507
1511
|
async getQueuePartitions(queueName) {
|
|
1508
|
-
const { rows } = await this.pool.query(`SELECT DISTINCT queue_partition_key FROM ${
|
|
1512
|
+
const { rows } = await this.pool.query(`SELECT DISTINCT queue_partition_key FROM "${this.schemaName}".workflow_status
|
|
1509
1513
|
WHERE queue_name = $1
|
|
1510
1514
|
AND status = $2
|
|
1511
1515
|
AND queue_partition_key IS NOT NULL`, [queueName, workflow_1.StatusString.ENQUEUED]);
|
|
@@ -1532,7 +1536,7 @@ class PostgresSystemDatabase {
|
|
|
1532
1536
|
let numRecentQueries = 0;
|
|
1533
1537
|
if (queue.rateLimit) {
|
|
1534
1538
|
const params = [queue.name, workflow_1.StatusString.ENQUEUED, startTimeMs - limiterPeriodMS, ...partitionParams];
|
|
1535
|
-
const countResult = await client.query(`SELECT COUNT(*) FROM ${
|
|
1539
|
+
const countResult = await client.query(`SELECT COUNT(*) FROM "${this.schemaName}".workflow_status
|
|
1536
1540
|
WHERE queue_name = $1
|
|
1537
1541
|
AND status <> $2
|
|
1538
1542
|
AND started_at_epoch_ms > $3
|
|
@@ -1551,7 +1555,7 @@ class PostgresSystemDatabase {
|
|
|
1551
1555
|
// Count how many workflows on this queue are currently PENDING both locally and globally.
|
|
1552
1556
|
const params = [queue.name, workflow_1.StatusString.PENDING, ...partitionParams];
|
|
1553
1557
|
const runningTasksResult = await client.query(`SELECT executor_id, COUNT(*) as task_count
|
|
1554
|
-
FROM ${
|
|
1558
|
+
FROM "${this.schemaName}".workflow_status
|
|
1555
1559
|
WHERE queue_name = $1 AND status = $2
|
|
1556
1560
|
${partitionFilter.replace('$PARTITION', '$3')}
|
|
1557
1561
|
GROUP BY executor_id`, params);
|
|
@@ -1585,7 +1589,7 @@ class PostgresSystemDatabase {
|
|
|
1585
1589
|
const selectParams = [workflow_1.StatusString.ENQUEUED, queue.name, appVersion, ...partitionParams];
|
|
1586
1590
|
const selectQuery = `
|
|
1587
1591
|
SELECT workflow_uuid
|
|
1588
|
-
FROM ${
|
|
1592
|
+
FROM "${this.schemaName}".workflow_status
|
|
1589
1593
|
WHERE status = $1
|
|
1590
1594
|
AND queue_name = $2
|
|
1591
1595
|
AND (application_version IS NULL OR application_version = $3)
|
|
@@ -1604,7 +1608,7 @@ class PostgresSystemDatabase {
|
|
|
1604
1608
|
break;
|
|
1605
1609
|
}
|
|
1606
1610
|
// Start the functions by marking them as pending and updating their executor IDs
|
|
1607
|
-
await client.query(`UPDATE ${
|
|
1611
|
+
await client.query(`UPDATE "${this.schemaName}".workflow_status
|
|
1608
1612
|
SET status = $1,
|
|
1609
1613
|
executor_id = $2,
|
|
1610
1614
|
application_version = $3,
|
|
@@ -1634,7 +1638,7 @@ class PostgresSystemDatabase {
|
|
|
1634
1638
|
try {
|
|
1635
1639
|
await client.query('BEGIN ISOLATION LEVEL READ COMMITTED');
|
|
1636
1640
|
// Find the maximum offset for this workflow_uuid and key combination
|
|
1637
|
-
const maxOffsetResult = await client.query(`SELECT MAX("offset") FROM ${
|
|
1641
|
+
const maxOffsetResult = await client.query(`SELECT MAX("offset") FROM "${this.schemaName}".streams
|
|
1638
1642
|
WHERE workflow_uuid = $1 AND key = $2`, [workflowID, key]);
|
|
1639
1643
|
// Next offset is max + 1, or 0 if no records exist
|
|
1640
1644
|
const maxOffset = maxOffsetResult.rows[0].max;
|
|
@@ -1642,7 +1646,7 @@ class PostgresSystemDatabase {
|
|
|
1642
1646
|
// Serialize the value before storing
|
|
1643
1647
|
const serializedValue = JSON.stringify(value);
|
|
1644
1648
|
// Insert the new stream entry
|
|
1645
|
-
await client.query(`INSERT INTO ${
|
|
1649
|
+
await client.query(`INSERT INTO "${this.schemaName}".streams (workflow_uuid, key, value, "offset")
|
|
1646
1650
|
VALUES ($1, $2, $3, $4)`, [workflowID, key, serializedValue, nextOffset]);
|
|
1647
1651
|
await client.query('COMMIT');
|
|
1648
1652
|
}
|
|
@@ -1662,7 +1666,7 @@ class PostgresSystemDatabase {
|
|
|
1662
1666
|
const functionName = value === exports.DBOS_STREAM_CLOSED_SENTINEL ? exports.DBOS_FUNCNAME_CLOSESTREAM : exports.DBOS_FUNCNAME_WRITESTREAM;
|
|
1663
1667
|
await this.#runAndRecordResult(client, functionName, workflowID, functionID, async () => {
|
|
1664
1668
|
// Find the maximum offset for this workflow_uuid and key combination
|
|
1665
|
-
const maxOffsetResult = await client.query(`SELECT MAX("offset") FROM ${
|
|
1669
|
+
const maxOffsetResult = await client.query(`SELECT MAX("offset") FROM "${this.schemaName}".streams
|
|
1666
1670
|
WHERE workflow_uuid = $1 AND key = $2`, [workflowID, key]);
|
|
1667
1671
|
// Next offset is max + 1, or 0 if no records exist
|
|
1668
1672
|
const maxOffset = maxOffsetResult.rows[0].max;
|
|
@@ -1670,7 +1674,7 @@ class PostgresSystemDatabase {
|
|
|
1670
1674
|
// Serialize the value before storing
|
|
1671
1675
|
const serializedValue = JSON.stringify(value);
|
|
1672
1676
|
// Insert the new stream entry
|
|
1673
|
-
await client.query(`INSERT INTO ${
|
|
1677
|
+
await client.query(`INSERT INTO "${this.schemaName}".streams (workflow_uuid, key, value, "offset")
|
|
1674
1678
|
VALUES ($1, $2, $3, $4)`, [workflowID, key, serializedValue, nextOffset]);
|
|
1675
1679
|
return undefined;
|
|
1676
1680
|
});
|
|
@@ -1691,7 +1695,7 @@ class PostgresSystemDatabase {
|
|
|
1691
1695
|
async readStream(workflowID, key, offset) {
|
|
1692
1696
|
const client = await this.pool.connect();
|
|
1693
1697
|
try {
|
|
1694
|
-
const result = await client.query(`SELECT value FROM ${
|
|
1698
|
+
const result = await client.query(`SELECT value FROM "${this.schemaName}".streams
|
|
1695
1699
|
WHERE workflow_uuid = $1 AND key = $2 AND "offset" = $3`, [workflowID, key, offset]);
|
|
1696
1700
|
if (result.rows.length === 0) {
|
|
1697
1701
|
throw new Error(`No value found for workflow_uuid=${workflowID}, key=${key}, offset=${offset}`);
|
|
@@ -1708,7 +1712,7 @@ class PostgresSystemDatabase {
|
|
|
1708
1712
|
if (rowsThreshold !== undefined) {
|
|
1709
1713
|
// Get the created_at timestamp of the rows_threshold newest row
|
|
1710
1714
|
const result = await this.pool.query(`SELECT created_at
|
|
1711
|
-
FROM ${
|
|
1715
|
+
FROM "${this.schemaName}".workflow_status
|
|
1712
1716
|
ORDER BY created_at DESC
|
|
1713
1717
|
LIMIT 1 OFFSET $1`, [rowsThreshold - 1]);
|
|
1714
1718
|
if (result.rows.length > 0) {
|
|
@@ -1723,7 +1727,7 @@ class PostgresSystemDatabase {
|
|
|
1723
1727
|
return;
|
|
1724
1728
|
}
|
|
1725
1729
|
// Delete all workflows older than cutoff that are NOT PENDING or ENQUEUED
|
|
1726
|
-
await this.pool.query(`DELETE FROM ${
|
|
1730
|
+
await this.pool.query(`DELETE FROM "${this.schemaName}".workflow_status
|
|
1727
1731
|
WHERE created_at < $1
|
|
1728
1732
|
AND status NOT IN ($2, $3)`, [cutoffEpochTimestampMs, workflow_1.StatusString.PENDING, workflow_1.StatusString.ENQUEUED]);
|
|
1729
1733
|
return;
|