@dbos-inc/dbos-sdk 2.10.15-preview → 2.10.17-preview
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/schemas/system_db_schema.d.ts +3 -8
- package/dist/schemas/system_db_schema.d.ts.map +1 -1
- package/dist/src/client.d.ts.map +1 -1
- package/dist/src/client.js +4 -3
- package/dist/src/client.js.map +1 -1
- package/dist/src/dbos-executor.d.ts +1 -2
- package/dist/src/dbos-executor.d.ts.map +1 -1
- package/dist/src/dbos-executor.js +10 -12
- package/dist/src/dbos-executor.js.map +1 -1
- package/dist/src/dbos.d.ts +1 -6
- package/dist/src/dbos.d.ts.map +1 -1
- package/dist/src/dbos.js +0 -9
- package/dist/src/dbos.js.map +1 -1
- package/dist/src/eventreceiver.d.ts +13 -8
- package/dist/src/eventreceiver.d.ts.map +1 -1
- package/dist/src/index.d.ts +1 -1
- package/dist/src/index.d.ts.map +1 -1
- package/dist/src/index.js.map +1 -1
- package/dist/src/system_database.d.ts +2 -9
- package/dist/src/system_database.d.ts.map +1 -1
- package/dist/src/system_database.js +141 -229
- package/dist/src/system_database.js.map +1 -1
- package/dist/src/testing/testing_runtime.js +1 -1
- package/dist/src/testing/testing_runtime.js.map +1 -1
- package/dist/src/wfqueue.d.ts +3 -0
- package/dist/src/wfqueue.d.ts.map +1 -1
- package/dist/src/wfqueue.js +2 -0
- package/dist/src/wfqueue.js.map +1 -1
- package/dist/src/workflow.d.ts +0 -16
- package/dist/src/workflow.d.ts.map +1 -1
- package/dist/src/workflow.js.map +1 -1
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/migrations/20252528000000_consolidate_queues.js +30 -0
- package/package.json +2 -2
@@ -72,62 +72,70 @@ class NotificationMap {
|
|
72
72
|
}
|
73
73
|
}
|
74
74
|
async function insertWorkflowStatus(client, initStatus) {
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
75
|
+
try {
|
76
|
+
const { rows } = await client.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status (
|
77
|
+
workflow_uuid,
|
78
|
+
status,
|
79
|
+
name,
|
80
|
+
class_name,
|
81
|
+
config_name,
|
82
|
+
queue_name,
|
83
|
+
authenticated_user,
|
84
|
+
assumed_role,
|
85
|
+
authenticated_roles,
|
86
|
+
request,
|
87
|
+
executor_id,
|
88
|
+
application_version,
|
89
|
+
application_id,
|
90
|
+
created_at,
|
91
|
+
recovery_attempts,
|
92
|
+
updated_at,
|
93
|
+
workflow_timeout_ms,
|
94
|
+
workflow_deadline_epoch_ms,
|
95
|
+
inputs,
|
96
|
+
deduplication_id,
|
97
|
+
priority
|
98
|
+
) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21)
|
99
|
+
ON CONFLICT (workflow_uuid)
|
100
|
+
DO UPDATE SET
|
101
|
+
recovery_attempts = workflow_status.recovery_attempts + 1,
|
102
|
+
updated_at = EXCLUDED.updated_at,
|
103
|
+
executor_id = EXCLUDED.executor_id
|
104
|
+
RETURNING recovery_attempts, status, name, class_name, config_name, queue_name, workflow_deadline_epoch_ms`, [
|
105
|
+
initStatus.workflowUUID,
|
106
|
+
initStatus.status,
|
107
|
+
initStatus.workflowName,
|
108
|
+
initStatus.workflowClassName,
|
109
|
+
initStatus.workflowConfigName,
|
110
|
+
initStatus.queueName ?? null,
|
111
|
+
initStatus.authenticatedUser,
|
112
|
+
initStatus.assumedRole,
|
113
|
+
JSON.stringify(initStatus.authenticatedRoles),
|
114
|
+
JSON.stringify(initStatus.request),
|
115
|
+
initStatus.executorId,
|
116
|
+
initStatus.applicationVersion ?? null,
|
117
|
+
initStatus.applicationID,
|
118
|
+
initStatus.createdAt,
|
119
|
+
initStatus.status === workflow_1.StatusString.ENQUEUED ? 0 : 1,
|
120
|
+
initStatus.updatedAt ?? Date.now(),
|
121
|
+
initStatus.timeoutMS ?? null,
|
122
|
+
initStatus.deadlineEpochMS ?? null,
|
123
|
+
initStatus.input ?? null,
|
124
|
+
initStatus.deduplicationID ?? null,
|
125
|
+
initStatus.priority,
|
126
|
+
]);
|
127
|
+
if (rows.length === 0) {
|
128
|
+
throw new Error(`Attempt to insert workflow ${initStatus.workflowUUID} failed`);
|
129
|
+
}
|
130
|
+
return rows[0];
|
131
|
+
}
|
132
|
+
catch (error) {
|
133
|
+
const err = error;
|
134
|
+
if (err.code === '23505') {
|
135
|
+
throw new error_1.DBOSQueueDuplicatedError(initStatus.workflowUUID, initStatus.queueName ?? '', initStatus.deduplicationID ?? '');
|
136
|
+
}
|
137
|
+
throw error;
|
124
138
|
}
|
125
|
-
return rows[0];
|
126
|
-
}
|
127
|
-
async function deleteQueuedWorkflows(client, workflowID) {
|
128
|
-
await client.query(`DELETE FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue WHERE workflow_uuid = $1`, [
|
129
|
-
workflowID,
|
130
|
-
]);
|
131
139
|
}
|
132
140
|
async function getWorkflowStatusValue(client, workflowID) {
|
133
141
|
const { rows } = await client.query(`SELECT status FROM ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status WHERE workflow_uuid=$1`, [workflowID]);
|
@@ -152,10 +160,16 @@ async function updateWorkflowStatus(client, workflowID, status, options = {}) {
|
|
152
160
|
if (update.resetDeadline) {
|
153
161
|
setClause += `, workflow_deadline_epoch_ms = NULL`;
|
154
162
|
}
|
155
|
-
if (update.queueName) {
|
156
|
-
const param = args.push(update.queueName);
|
163
|
+
if (update.queueName !== undefined) {
|
164
|
+
const param = args.push(update.queueName ?? undefined);
|
157
165
|
setClause += `, queue_name=$${param}`;
|
158
166
|
}
|
167
|
+
if (update.resetDeduplicationID) {
|
168
|
+
setClause += `, deduplication_id = NULL`;
|
169
|
+
}
|
170
|
+
if (update.resetStartedAtEpochMs) {
|
171
|
+
setClause += `, started_at_epoch_ms = NULL`;
|
172
|
+
}
|
159
173
|
const where = options.where ?? {};
|
160
174
|
if (where.status) {
|
161
175
|
const param = args.push(where.status);
|
@@ -215,6 +229,8 @@ function mapWorkflowStatus(row) {
|
|
215
229
|
input: row.inputs,
|
216
230
|
timeoutMS: row.workflow_timeout_ms ? Number(row.workflow_timeout_ms) : undefined,
|
217
231
|
deadlineEpochMS: row.workflow_deadline_epoch_ms ? Number(row.workflow_deadline_epoch_ms) : undefined,
|
232
|
+
deduplicationID: row.deduplication_id ?? undefined,
|
233
|
+
priority: row.priority ?? 0,
|
218
234
|
};
|
219
235
|
}
|
220
236
|
class PostgresSystemDatabase {
|
@@ -372,7 +388,9 @@ class PostgresSystemDatabase {
|
|
372
388
|
async recordWorkflowOutput(workflowID, status) {
|
373
389
|
const client = await this.pool.connect();
|
374
390
|
try {
|
375
|
-
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.SUCCESS, {
|
391
|
+
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.SUCCESS, {
|
392
|
+
update: { output: status.output, resetDeduplicationID: true },
|
393
|
+
});
|
376
394
|
}
|
377
395
|
finally {
|
378
396
|
client.release();
|
@@ -381,7 +399,9 @@ class PostgresSystemDatabase {
|
|
381
399
|
async recordWorkflowError(workflowID, status) {
|
382
400
|
const client = await this.pool.connect();
|
383
401
|
try {
|
384
|
-
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ERROR, {
|
402
|
+
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ERROR, {
|
403
|
+
update: { error: status.error, resetDeduplicationID: true },
|
404
|
+
});
|
385
405
|
}
|
386
406
|
finally {
|
387
407
|
client.release();
|
@@ -469,6 +489,8 @@ class PostgresSystemDatabase {
|
|
469
489
|
updatedAt: now,
|
470
490
|
timeoutMS: options.timeoutMS ?? workflowStatus.timeoutMS,
|
471
491
|
input: workflowStatus.input,
|
492
|
+
deduplicationID: undefined,
|
493
|
+
priority: 0,
|
472
494
|
});
|
473
495
|
if (startStep > 0) {
|
474
496
|
const query = `INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.operation_outputs
|
@@ -478,7 +500,6 @@ class PostgresSystemDatabase {
|
|
478
500
|
WHERE workflow_uuid = $2 AND function_id < $3`;
|
479
501
|
await client.query(query, [newWorkflowID, workflowID, startStep]);
|
480
502
|
}
|
481
|
-
await this.#enqueueWorkflow(client, newWorkflowID, utils_1.INTERNAL_QUEUE_NAME);
|
482
503
|
await client.query('COMMIT');
|
483
504
|
return newWorkflowID;
|
484
505
|
}
|
@@ -810,9 +831,10 @@ class PostgresSystemDatabase {
|
|
810
831
|
await client.query('ROLLBACK');
|
811
832
|
return;
|
812
833
|
}
|
813
|
-
//
|
814
|
-
await
|
815
|
-
|
834
|
+
// Set the workflow's status to CANCELLED and remove it from any queue it is on
|
835
|
+
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.CANCELLED, {
|
836
|
+
update: { queueName: null, resetDeduplicationID: true, resetStartedAtEpochMs: true },
|
837
|
+
});
|
816
838
|
await client.query('COMMIT');
|
817
839
|
}
|
818
840
|
catch (error) {
|
@@ -859,17 +881,17 @@ class PostgresSystemDatabase {
|
|
859
881
|
}
|
860
882
|
return;
|
861
883
|
}
|
862
|
-
//
|
863
|
-
await deleteQueuedWorkflows(client, workflowID);
|
884
|
+
// Set the workflow's status to ENQUEUED and reset recovery attempts and deadline.
|
864
885
|
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ENQUEUED, {
|
865
886
|
update: {
|
866
887
|
queueName: utils_1.INTERNAL_QUEUE_NAME,
|
867
888
|
resetRecoveryAttempts: true,
|
868
889
|
resetDeadline: true,
|
890
|
+
resetDeduplicationID: true,
|
891
|
+
resetStartedAtEpochMs: true,
|
869
892
|
},
|
870
893
|
throwOnFailure: false,
|
871
894
|
});
|
872
|
-
await this.#enqueueWorkflow(client, workflowID, utils_1.INTERNAL_QUEUE_NAME);
|
873
895
|
await client.query('COMMIT');
|
874
896
|
}
|
875
897
|
catch (error) {
|
@@ -1114,14 +1136,15 @@ class PostgresSystemDatabase {
|
|
1114
1136
|
async listQueuedWorkflows(input) {
|
1115
1137
|
const schemaName = dbos_executor_1.DBOSExecutor.systemDBSchemaName;
|
1116
1138
|
const sortDesc = input.sortDesc ?? false; // By default, sort in ascending order
|
1117
|
-
let query = this.knexDB(`${schemaName}.
|
1118
|
-
.
|
1139
|
+
let query = this.knexDB(`${schemaName}.workflow_status`)
|
1140
|
+
.whereNotNull(`${schemaName}.workflow_status.queue_name`)
|
1141
|
+
.whereIn(`${schemaName}.workflow_status.status`, [workflow_1.StatusString.ENQUEUED, workflow_1.StatusString.PENDING])
|
1119
1142
|
.orderBy(`${schemaName}.workflow_status.created_at`, sortDesc ? 'desc' : 'asc');
|
1120
1143
|
if (input.workflowName) {
|
1121
|
-
query = query.
|
1144
|
+
query = query.where(`${schemaName}.workflow_status.name`, input.workflowName);
|
1122
1145
|
}
|
1123
1146
|
if (input.queueName) {
|
1124
|
-
query = query.
|
1147
|
+
query = query.where(`${schemaName}.workflow_status.queue_name`, input.queueName);
|
1125
1148
|
}
|
1126
1149
|
if (input.startTime) {
|
1127
1150
|
query = query.where(`${schemaName}.workflow_status.created_at`, '>=', new Date(input.startTime).getTime());
|
@@ -1130,7 +1153,7 @@ class PostgresSystemDatabase {
|
|
1130
1153
|
query = query.where(`${schemaName}.workflow_status.created_at`, '<=', new Date(input.endTime).getTime());
|
1131
1154
|
}
|
1132
1155
|
if (input.status) {
|
1133
|
-
query = query.
|
1156
|
+
query = query.where(`${schemaName}.workflow_status.status`, input.status);
|
1134
1157
|
}
|
1135
1158
|
if (input.limit) {
|
1136
1159
|
query = query.limit(input.limit);
|
@@ -1141,115 +1164,13 @@ class PostgresSystemDatabase {
|
|
1141
1164
|
const rows = await query;
|
1142
1165
|
return rows.map(mapWorkflowStatus);
|
1143
1166
|
}
|
1144
|
-
async getWorkflowQueue(input) {
|
1145
|
-
// Create the initial query with a join to workflow_status table to get executor_id
|
1146
|
-
let query = this.knexDB(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue as wq`)
|
1147
|
-
.join(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status as ws`, 'wq.workflow_uuid', '=', 'ws.workflow_uuid')
|
1148
|
-
.orderBy('wq.created_at_epoch_ms', 'desc');
|
1149
|
-
if (input.queueName) {
|
1150
|
-
query = query.where('wq.queue_name', input.queueName);
|
1151
|
-
}
|
1152
|
-
if (input.startTime) {
|
1153
|
-
query = query.where('wq.created_at_epoch_ms', '>=', new Date(input.startTime).getTime());
|
1154
|
-
}
|
1155
|
-
if (input.endTime) {
|
1156
|
-
query = query.where('wq.created_at_epoch_ms', '<=', new Date(input.endTime).getTime());
|
1157
|
-
}
|
1158
|
-
if (input.limit) {
|
1159
|
-
query = query.limit(input.limit);
|
1160
|
-
}
|
1161
|
-
const rows = await query
|
1162
|
-
.select({
|
1163
|
-
workflow_uuid: 'wq.workflow_uuid',
|
1164
|
-
executor_id: 'ws.executor_id',
|
1165
|
-
queue_name: 'wq.queue_name',
|
1166
|
-
created_at_epoch_ms: 'wq.created_at_epoch_ms',
|
1167
|
-
started_at_epoch_ms: 'wq.started_at_epoch_ms',
|
1168
|
-
completed_at_epoch_ms: 'wq.completed_at_epoch_ms',
|
1169
|
-
})
|
1170
|
-
.then((rows) => rows);
|
1171
|
-
const workflows = rows.map((row) => {
|
1172
|
-
return {
|
1173
|
-
workflowID: row.workflow_uuid,
|
1174
|
-
executorID: row.executor_id,
|
1175
|
-
queueName: row.queue_name,
|
1176
|
-
createdAt: row.created_at_epoch_ms,
|
1177
|
-
startedAt: row.started_at_epoch_ms,
|
1178
|
-
completedAt: row.completed_at_epoch_ms,
|
1179
|
-
};
|
1180
|
-
});
|
1181
|
-
return { workflows };
|
1182
|
-
}
|
1183
|
-
async #enqueueWorkflow(client, workflowID, queueName, enqueueOptions) {
|
1184
|
-
const dedupID = enqueueOptions?.deduplicationID ?? null;
|
1185
|
-
const priority = enqueueOptions?.priority ?? 0;
|
1186
|
-
try {
|
1187
|
-
await client.query(`INSERT INTO ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue (workflow_uuid, queue_name, deduplication_id, priority)
|
1188
|
-
VALUES ($1, $2, $3, $4)
|
1189
|
-
ON CONFLICT (workflow_uuid)
|
1190
|
-
DO NOTHING;`, [workflowID, queueName, dedupID, priority]);
|
1191
|
-
}
|
1192
|
-
catch (error) {
|
1193
|
-
const err = error;
|
1194
|
-
if (err.code === '23505') {
|
1195
|
-
// unique constraint violation (only expected for the INSERT query)
|
1196
|
-
throw new error_1.DBOSQueueDuplicatedError(workflowID, queueName, dedupID ?? '');
|
1197
|
-
}
|
1198
|
-
this.logger.error(`Error enqueuing workflow ${workflowID} to queue ${queueName}`);
|
1199
|
-
throw error;
|
1200
|
-
}
|
1201
|
-
}
|
1202
|
-
async enqueueWorkflow(workflowId, queueName, enqueueOptions) {
|
1203
|
-
const client = await this.pool.connect();
|
1204
|
-
try {
|
1205
|
-
await this.#enqueueWorkflow(client, workflowId, queueName, enqueueOptions);
|
1206
|
-
}
|
1207
|
-
finally {
|
1208
|
-
client.release();
|
1209
|
-
}
|
1210
|
-
}
|
1211
1167
|
async clearQueueAssignment(workflowID) {
|
1212
|
-
|
1213
|
-
|
1214
|
-
|
1215
|
-
|
1216
|
-
|
1217
|
-
|
1218
|
-
WHERE workflow_uuid = $1 AND completed_at_epoch_ms IS NULL;`, [workflowID]);
|
1219
|
-
// If no rows were affected, the workflow is not anymore in the queue or was already completed
|
1220
|
-
if (wqRes.rowCount === 0) {
|
1221
|
-
await client.query('ROLLBACK');
|
1222
|
-
return false;
|
1223
|
-
}
|
1224
|
-
// Reset the status of the task to "ENQUEUED"
|
1225
|
-
await updateWorkflowStatus(client, workflowID, workflow_1.StatusString.ENQUEUED);
|
1226
|
-
await client.query('COMMIT');
|
1227
|
-
return true;
|
1228
|
-
}
|
1229
|
-
catch (error) {
|
1230
|
-
await client.query('ROLLBACK');
|
1231
|
-
throw error;
|
1232
|
-
}
|
1233
|
-
finally {
|
1234
|
-
client.release();
|
1235
|
-
}
|
1236
|
-
}
|
1237
|
-
async dequeueWorkflow(workflowID, queue) {
|
1238
|
-
const client = await this.pool.connect();
|
1239
|
-
try {
|
1240
|
-
if (queue.rateLimit) {
|
1241
|
-
const time = Date.now();
|
1242
|
-
await client.query(`UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue
|
1243
|
-
SET completed_at_epoch_ms = $2
|
1244
|
-
WHERE workflow_uuid = $1;`, [workflowID, time]);
|
1245
|
-
}
|
1246
|
-
else {
|
1247
|
-
await deleteQueuedWorkflows(client, workflowID);
|
1248
|
-
}
|
1249
|
-
}
|
1250
|
-
finally {
|
1251
|
-
client.release();
|
1252
|
-
}
|
1168
|
+
// Reset the status of the task from "PENDING" to "ENQUEUED"
|
1169
|
+
const wqRes = await this.pool.query(`UPDATE ${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status
|
1170
|
+
SET started_at_epoch_ms = NULL, status = $2
|
1171
|
+
WHERE workflow_uuid = $1 AND queue_name is NOT NULL AND status = $3`, [workflowID, workflow_1.StatusString.ENQUEUED, workflow_1.StatusString.PENDING]);
|
1172
|
+
// If no rows were affected, the workflow is not anymore in the queue or was already completed
|
1173
|
+
return (wqRes.rowCount ?? 0) > 0;
|
1253
1174
|
}
|
1254
1175
|
async findAndMarkStartableWorkflows(queue, executorID, appVersion) {
|
1255
1176
|
const startTimeMs = Date.now();
|
@@ -1259,64 +1180,67 @@ class PostgresSystemDatabase {
|
|
1259
1180
|
// If there is a rate limit, compute how many functions have started in its period.
|
1260
1181
|
let numRecentQueries = 0;
|
1261
1182
|
if (queue.rateLimit) {
|
1262
|
-
const numRecentQueriesS = (await trx(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.
|
1183
|
+
const numRecentQueriesS = (await trx(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status`)
|
1263
1184
|
.count()
|
1264
1185
|
.where('queue_name', queue.name)
|
1186
|
+
.andWhere('status', '<>', workflow_1.StatusString.ENQUEUED)
|
1265
1187
|
.andWhere('started_at_epoch_ms', '>', startTimeMs - limiterPeriodMS)
|
1266
1188
|
.first()).count;
|
1267
1189
|
numRecentQueries = parseInt(`${numRecentQueriesS}`);
|
1268
1190
|
if (numRecentQueries >= queue.rateLimit.limitPerPeriod) {
|
1269
|
-
return
|
1191
|
+
return [];
|
1270
1192
|
}
|
1271
1193
|
}
|
1272
1194
|
// Dequeue functions eligible for this worker and ordered by the time at which they were enqueued.
|
1273
1195
|
// If there is a global or local concurrency limit N, select only the N oldest enqueued
|
1274
1196
|
// functions, else select all of them.
|
1275
|
-
// First lets figure out how many tasks are eligible for dequeue.
|
1276
|
-
// This means figuring out how many unstarted tasks are within the local and global concurrency limits
|
1277
|
-
const runningTasksSubquery = trx(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue as wq`)
|
1278
|
-
.join(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status as ws`, 'wq.workflow_uuid', '=', 'ws.workflow_uuid')
|
1279
|
-
.select('ws.executor_id')
|
1280
|
-
.count('* as task_count')
|
1281
|
-
.where('wq.queue_name', queue.name)
|
1282
|
-
.whereNotNull('wq.started_at_epoch_ms') // started
|
1283
|
-
.whereNull('wq.completed_at_epoch_ms') // not completed
|
1284
|
-
.groupBy('ws.executor_id');
|
1285
|
-
const runningTasksResult = await runningTasksSubquery;
|
1286
|
-
const runningTasksResultDict = {};
|
1287
|
-
runningTasksResult.forEach((row) => {
|
1288
|
-
runningTasksResultDict[row.executor_id] = Number(row.task_count);
|
1289
|
-
});
|
1290
|
-
const runningTasksForThisWorker = runningTasksResultDict[executorID] || 0;
|
1291
1197
|
let maxTasks = Infinity;
|
1292
|
-
if (queue.workerConcurrency !== undefined) {
|
1293
|
-
|
1294
|
-
|
1295
|
-
|
1296
|
-
|
1297
|
-
|
1298
|
-
|
1198
|
+
if (queue.workerConcurrency !== undefined || queue.concurrency !== undefined) {
|
1199
|
+
// Count how many workflows on this queue are currently PENDING both locally and globally.
|
1200
|
+
const runningTasksSubquery = trx(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status`)
|
1201
|
+
.select('executor_id')
|
1202
|
+
.count('* as task_count')
|
1203
|
+
.where('queue_name', queue.name)
|
1204
|
+
.andWhere('status', workflow_1.StatusString.PENDING)
|
1205
|
+
.groupBy('executor_id');
|
1206
|
+
const runningTasksResult = await runningTasksSubquery;
|
1207
|
+
const runningTasksResultDict = {};
|
1208
|
+
runningTasksResult.forEach((row) => {
|
1209
|
+
runningTasksResultDict[row.executor_id] = Number(row.task_count);
|
1210
|
+
});
|
1211
|
+
const runningTasksForThisWorker = runningTasksResultDict[executorID] || 0;
|
1212
|
+
if (queue.workerConcurrency !== undefined) {
|
1213
|
+
maxTasks = Math.max(0, queue.workerConcurrency - runningTasksForThisWorker);
|
1214
|
+
}
|
1215
|
+
if (queue.concurrency !== undefined) {
|
1216
|
+
const totalRunningTasks = Object.values(runningTasksResultDict).reduce((acc, val) => acc + val, 0);
|
1217
|
+
if (totalRunningTasks > queue.concurrency) {
|
1218
|
+
this.logger.warn(`Total running tasks (${totalRunningTasks}) exceeds the global concurrency limit (${queue.concurrency})`);
|
1219
|
+
}
|
1220
|
+
const availableTasks = Math.max(0, queue.concurrency - totalRunningTasks);
|
1221
|
+
maxTasks = Math.min(maxTasks, availableTasks);
|
1299
1222
|
}
|
1300
|
-
const availableTasks = Math.max(0, queue.concurrency - totalRunningTasks);
|
1301
|
-
maxTasks = Math.min(maxTasks, availableTasks);
|
1302
1223
|
}
|
1303
|
-
//
|
1304
|
-
|
1305
|
-
|
1306
|
-
.
|
1307
|
-
.
|
1308
|
-
.andWhere('wq.queue_name', queue.name)
|
1224
|
+
// Retrieve the first max_tasks workflows in the queue.
|
1225
|
+
// Only retrieve workflows of the local version (or without version set)
|
1226
|
+
let query = trx(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_status`)
|
1227
|
+
.where('status', workflow_1.StatusString.ENQUEUED)
|
1228
|
+
.andWhere('queue_name', queue.name)
|
1309
1229
|
.andWhere((b) => {
|
1310
|
-
b.whereNull('
|
1230
|
+
b.whereNull('application_version').orWhere('application_version', appVersion);
|
1311
1231
|
})
|
1312
|
-
.orderBy('wq.priority', 'asc')
|
1313
|
-
.orderBy('wq.created_at_epoch_ms', 'asc')
|
1314
1232
|
.forUpdate()
|
1315
1233
|
.noWait();
|
1234
|
+
if (queue.priorityEnabled) {
|
1235
|
+
query = query.orderBy('priority', 'asc').orderBy('created_at', 'asc');
|
1236
|
+
}
|
1237
|
+
else {
|
1238
|
+
query = query.orderBy('created_at', 'asc');
|
1239
|
+
}
|
1316
1240
|
if (maxTasks !== Infinity) {
|
1317
1241
|
query = query.limit(maxTasks);
|
1318
1242
|
}
|
1319
|
-
const rows = (await query.select(['
|
1243
|
+
const rows = (await query.select(['workflow_uuid']));
|
1320
1244
|
// Start the workflows
|
1321
1245
|
const workflowIDs = rows.map((row) => row.workflow_uuid);
|
1322
1246
|
for (const id of workflowIDs) {
|
@@ -1333,28 +1257,16 @@ class PostgresSystemDatabase {
|
|
1333
1257
|
status: workflow_1.StatusString.PENDING,
|
1334
1258
|
executor_id: executorID,
|
1335
1259
|
application_version: appVersion,
|
1260
|
+
started_at_epoch_ms: startTimeMs,
|
1336
1261
|
workflow_deadline_epoch_ms: trx.raw('CASE WHEN workflow_timeout_ms IS NOT NULL AND workflow_deadline_epoch_ms IS NULL THEN (EXTRACT(epoch FROM now()) * 1000)::bigint + workflow_timeout_ms ELSE workflow_deadline_epoch_ms END'),
|
1337
1262
|
});
|
1338
1263
|
if (res > 0) {
|
1339
1264
|
claimedIDs.push(id);
|
1340
|
-
await trx(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue`)
|
1341
|
-
.where('workflow_uuid', id)
|
1342
|
-
.update('started_at_epoch_ms', startTimeMs);
|
1343
1265
|
}
|
1344
1266
|
// If we did not update this record, probably someone else did. Count in either case.
|
1345
1267
|
++numRecentQueries;
|
1346
1268
|
}
|
1347
1269
|
}, { isolationLevel: 'repeatable read' });
|
1348
|
-
// If we have a rate limit, garbage-collect all completed functions started
|
1349
|
-
// before the period. If there's no limiter, there's no need--they were
|
1350
|
-
// deleted on completion.
|
1351
|
-
if (queue.rateLimit) {
|
1352
|
-
await this.knexDB(`${dbos_executor_1.DBOSExecutor.systemDBSchemaName}.workflow_queue`)
|
1353
|
-
.whereNotNull('completed_at_epoch_ms')
|
1354
|
-
.andWhere('queue_name', queue.name)
|
1355
|
-
.andWhere('started_at_epoch_ms', '<', startTimeMs - limiterPeriodMS)
|
1356
|
-
.delete();
|
1357
|
-
}
|
1358
1270
|
// Return the IDs of all functions we marked started
|
1359
1271
|
return claimedIDs;
|
1360
1272
|
}
|