@usehelical/workflows 0.0.1-alpha.12 → 0.0.1-alpha.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{index.d.ts → index.d.mts} +13 -2
- package/dist/index.mjs +3 -0
- package/dist/index.mjs.map +1 -0
- package/dist/{state-B89wwR8P.d.ts → state-CmpqDrnz.d.mts} +1 -13
- package/dist/workflows.d.mts +19 -0
- package/dist/workflows.mjs +7 -0
- package/dist/workflows.mjs.map +1 -0
- package/package.json +1 -3
- package/dist/chunk-WKVKC6AI.js +0 -650
- package/dist/chunk-WKVKC6AI.js.map +0 -1
- package/dist/index.js +0 -1028
- package/dist/index.js.map +0 -1
- package/dist/workflows.d.ts +0 -45
- package/dist/workflows.js +0 -334
- package/dist/workflows.js.map +0 -1
package/dist/index.js
DELETED
|
@@ -1,1028 +0,0 @@
|
|
|
1
|
-
import { createRunHandle, getRun, withDbRetry, executeWorkflow, deserialize, serialize, WorkflowNotFoundError, QueueNotFoundError, cancelRun, RunNotFoundError, insertPendingRun, MaxRecoveryAttemptsExceededError } from './chunk-WKVKC6AI.js';
|
|
2
|
-
import crypto from 'crypto';
|
|
3
|
-
import { Kysely, PostgresDialect, sql } from 'kysely';
|
|
4
|
-
import { Pool } from 'pg';
|
|
5
|
-
|
|
6
|
-
// core/internal/events/polling-loop.ts
|
|
7
|
-
var PollingLoop = class {
|
|
8
|
-
intervalMs;
|
|
9
|
-
jitterFactor;
|
|
10
|
-
intervalHandle = null;
|
|
11
|
-
callback;
|
|
12
|
-
running = false;
|
|
13
|
-
constructor(intervalMs, callback, jitterFactor = 0.1) {
|
|
14
|
-
this.intervalMs = intervalMs;
|
|
15
|
-
this.callback = callback;
|
|
16
|
-
this.jitterFactor = Math.max(0, Math.min(1, jitterFactor));
|
|
17
|
-
}
|
|
18
|
-
calculateNextInterval() {
|
|
19
|
-
const jitterRange = this.intervalMs * this.jitterFactor;
|
|
20
|
-
const jitter = (Math.random() * 2 - 1) * jitterRange;
|
|
21
|
-
return Math.max(0, this.intervalMs + jitter);
|
|
22
|
-
}
|
|
23
|
-
scheduleNext() {
|
|
24
|
-
if (!this.running) {
|
|
25
|
-
return;
|
|
26
|
-
}
|
|
27
|
-
const nextInterval = this.calculateNextInterval();
|
|
28
|
-
this.intervalHandle = setTimeout(() => {
|
|
29
|
-
try {
|
|
30
|
-
this.callback();
|
|
31
|
-
} catch (error) {
|
|
32
|
-
console.error("PollingLoop callback error:", error);
|
|
33
|
-
}
|
|
34
|
-
this.scheduleNext();
|
|
35
|
-
}, nextInterval);
|
|
36
|
-
}
|
|
37
|
-
start() {
|
|
38
|
-
if (this.running) {
|
|
39
|
-
return;
|
|
40
|
-
}
|
|
41
|
-
this.running = true;
|
|
42
|
-
this.scheduleNext();
|
|
43
|
-
}
|
|
44
|
-
stop() {
|
|
45
|
-
this.running = false;
|
|
46
|
-
if (this.intervalHandle) {
|
|
47
|
-
clearTimeout(this.intervalHandle);
|
|
48
|
-
this.intervalHandle = null;
|
|
49
|
-
}
|
|
50
|
-
}
|
|
51
|
-
isRunning() {
|
|
52
|
-
return this.running;
|
|
53
|
-
}
|
|
54
|
-
};
|
|
55
|
-
|
|
56
|
-
// core/internal/events/event-bus-core.ts
|
|
57
|
-
var EventBusCore = class {
|
|
58
|
-
constructor(config, pollingLoop) {
|
|
59
|
-
this.config = config;
|
|
60
|
-
this.allowWildcardSubscriptions = config.allowWildcardSubscriptions;
|
|
61
|
-
this.pollingLoop = pollingLoop;
|
|
62
|
-
}
|
|
63
|
-
subscribers = /* @__PURE__ */ new Map();
|
|
64
|
-
eventSequence = /* @__PURE__ */ new Map();
|
|
65
|
-
pollingLoop;
|
|
66
|
-
allowWildcardSubscriptions;
|
|
67
|
-
subscribe(subject, key, callback) {
|
|
68
|
-
const subscriptionKey = createSubscriptionKey(subject, key);
|
|
69
|
-
if (!this.subscribers.has(subscriptionKey)) {
|
|
70
|
-
this.subscribers.set(subscriptionKey, /* @__PURE__ */ new Set());
|
|
71
|
-
}
|
|
72
|
-
this.subscribers.get(subscriptionKey).add(callback);
|
|
73
|
-
this.pollingLoop.start();
|
|
74
|
-
return () => this.unsubscribe(subscriptionKey, callback);
|
|
75
|
-
}
|
|
76
|
-
unsubscribe(subscriptionKey, callback) {
|
|
77
|
-
const subscriptionCallbacks = this.subscribers.get(subscriptionKey);
|
|
78
|
-
if (subscriptionCallbacks) {
|
|
79
|
-
subscriptionCallbacks.delete(callback);
|
|
80
|
-
if (subscriptionCallbacks.size === 0) {
|
|
81
|
-
this.subscribers.delete(subscriptionKey);
|
|
82
|
-
this.eventSequence.delete(subscriptionKey);
|
|
83
|
-
}
|
|
84
|
-
}
|
|
85
|
-
if (this.subscribers.size === 0) {
|
|
86
|
-
this.pollingLoop.stop();
|
|
87
|
-
}
|
|
88
|
-
}
|
|
89
|
-
emitEvent(subject, key, event, eventChangeId) {
|
|
90
|
-
const subscriptionKeys = [createSubscriptionKey(subject, key)].concat(
|
|
91
|
-
this.allowWildcardSubscriptions ? [createSubscriptionKey(subject, "*")] : []
|
|
92
|
-
);
|
|
93
|
-
for (const subscriptionKey of subscriptionKeys) {
|
|
94
|
-
const subscribers = this.subscribers.get(subscriptionKey);
|
|
95
|
-
if (subscribers) {
|
|
96
|
-
for (const subscriptionCallback of subscribers) {
|
|
97
|
-
subscriptionCallback(event);
|
|
98
|
-
}
|
|
99
|
-
}
|
|
100
|
-
}
|
|
101
|
-
this.eventSequence.set(subscriptionKeys[0], eventChangeId);
|
|
102
|
-
}
|
|
103
|
-
checkHasSubscribers(subject, key) {
|
|
104
|
-
const subscriptionKeys = [createSubscriptionKey(subject, key)].concat(
|
|
105
|
-
this.allowWildcardSubscriptions ? [createSubscriptionKey(subject, "*")] : []
|
|
106
|
-
);
|
|
107
|
-
return subscriptionKeys.some((subscriptionKey) => this.subscribers.has(subscriptionKey));
|
|
108
|
-
}
|
|
109
|
-
getEventSequence(subject, key) {
|
|
110
|
-
return this.eventSequence.get(createSubscriptionKey(subject, key)) ?? 0;
|
|
111
|
-
}
|
|
112
|
-
getSubscriptionKeys() {
|
|
113
|
-
return Array.from(this.subscribers.keys()).map(splitSubscriptionKey);
|
|
114
|
-
}
|
|
115
|
-
};
|
|
116
|
-
function createSubscriptionKey(subject, key) {
|
|
117
|
-
return `${subject}::${key}`;
|
|
118
|
-
}
|
|
119
|
-
function splitSubscriptionKey(subscriptionKey) {
|
|
120
|
-
return subscriptionKey.split("::");
|
|
121
|
-
}
|
|
122
|
-
|
|
123
|
-
// core/internal/repository/get-state.ts
|
|
124
|
-
async function getState(db, runId, key) {
|
|
125
|
-
const result = await db.selectFrom("state").select(["key", "value", "change_id"]).where("run_id", "=", runId).where("key", "=", key).executeTakeFirst();
|
|
126
|
-
if (!result) {
|
|
127
|
-
return void 0;
|
|
128
|
-
}
|
|
129
|
-
return result.value;
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
// core/internal/repository/get-state-batch.ts
|
|
133
|
-
async function getStateBatch(db, stateRetrievalRequests) {
|
|
134
|
-
const results = await db.selectFrom("state").select(["key", "value", "change_id", "run_id", "change_id"]).where(
|
|
135
|
-
"run_id",
|
|
136
|
-
"in",
|
|
137
|
-
stateRetrievalRequests.map((r) => r.runId)
|
|
138
|
-
).where(
|
|
139
|
-
"key",
|
|
140
|
-
"in",
|
|
141
|
-
stateRetrievalRequests.map((r) => r.key)
|
|
142
|
-
).execute();
|
|
143
|
-
return results.map((r) => ({
|
|
144
|
-
runId: r.run_id,
|
|
145
|
-
key: r.key,
|
|
146
|
-
value: r.value,
|
|
147
|
-
changeId: r.change_id
|
|
148
|
-
}));
|
|
149
|
-
}
|
|
150
|
-
|
|
151
|
-
// core/internal/events/state-event-bus.ts
|
|
152
|
-
var POLLING_FALLBACK_INTERVAL_MS = 1e4;
|
|
153
|
-
var StateEventBus = class {
|
|
154
|
-
constructor(db) {
|
|
155
|
-
this.db = db;
|
|
156
|
-
this.pollingLoop = new PollingLoop(POLLING_FALLBACK_INTERVAL_MS, this.handlePoll.bind(this));
|
|
157
|
-
this.bus = new EventBusCore({ allowWildcardSubscriptions: false }, this.pollingLoop);
|
|
158
|
-
this.pollingLoop.start();
|
|
159
|
-
}
|
|
160
|
-
bus;
|
|
161
|
-
pollingLoop;
|
|
162
|
-
handleNotify(payload) {
|
|
163
|
-
const [workflowId, key, changeIdString] = payload.split("::");
|
|
164
|
-
const changeId = Number(changeIdString);
|
|
165
|
-
if (!this.bus.checkHasSubscribers(workflowId, key) || this.bus.getEventSequence(workflowId, key) >= changeId) {
|
|
166
|
-
return;
|
|
167
|
-
}
|
|
168
|
-
getState(this.db, workflowId, key).then((state) => {
|
|
169
|
-
if (!state) {
|
|
170
|
-
return;
|
|
171
|
-
}
|
|
172
|
-
this.bus.emitEvent(workflowId, key, state, changeId);
|
|
173
|
-
});
|
|
174
|
-
}
|
|
175
|
-
async handlePoll() {
|
|
176
|
-
const stateRetrievalRequests = getStateRetrievalRequests(this.bus.getSubscriptionKeys());
|
|
177
|
-
if (stateRetrievalRequests.length === 0) {
|
|
178
|
-
return;
|
|
179
|
-
}
|
|
180
|
-
const states = await getStateBatch(this.db, stateRetrievalRequests);
|
|
181
|
-
for (const state of states) {
|
|
182
|
-
if (this.bus.getEventSequence(state.runId, state.key) >= state.changeId) {
|
|
183
|
-
continue;
|
|
184
|
-
}
|
|
185
|
-
this.bus.emitEvent(state.runId, state.key, state.value, state.changeId);
|
|
186
|
-
}
|
|
187
|
-
}
|
|
188
|
-
subscribe(workflowId, key, callback) {
|
|
189
|
-
return this.bus.subscribe(workflowId, key, callback);
|
|
190
|
-
}
|
|
191
|
-
emitEvent(workflowId, key, data, changeId) {
|
|
192
|
-
this.bus.emitEvent(workflowId, key, data, changeId);
|
|
193
|
-
}
|
|
194
|
-
destroy() {
|
|
195
|
-
this.pollingLoop.stop();
|
|
196
|
-
}
|
|
197
|
-
};
|
|
198
|
-
function getStateRetrievalRequests(keys) {
|
|
199
|
-
return keys.map((k) => {
|
|
200
|
-
const [runId, key] = k;
|
|
201
|
-
return {
|
|
202
|
-
runId,
|
|
203
|
-
key
|
|
204
|
-
};
|
|
205
|
-
});
|
|
206
|
-
}
|
|
207
|
-
async function getMessageBatch(db, messageRetrievalRequests) {
|
|
208
|
-
const uniquePairs = Array.from(
|
|
209
|
-
new Map(
|
|
210
|
-
messageRetrievalRequests.map((r) => [`${r.destinationWorkflowId}:${r.messageType}`, r])
|
|
211
|
-
).values()
|
|
212
|
-
);
|
|
213
|
-
const results = await db.selectFrom("messages").select(["id", "payload", "type", "destination_run_id"]).where(
|
|
214
|
-
sql`(destination_run_id, type) IN (${sql.join(
|
|
215
|
-
uniquePairs.map((r) => sql`(${r.destinationWorkflowId}, ${r.messageType})`)
|
|
216
|
-
)})`
|
|
217
|
-
).execute();
|
|
218
|
-
return results.map((r) => ({
|
|
219
|
-
id: r.id,
|
|
220
|
-
payload: r.payload,
|
|
221
|
-
type: r.type,
|
|
222
|
-
destinationRunId: r.destination_run_id
|
|
223
|
-
}));
|
|
224
|
-
}
|
|
225
|
-
|
|
226
|
-
// core/internal/events/message-event-bus.ts
|
|
227
|
-
var POLLING_FALLBACK_INTERVAL_MS2 = 1e4;
|
|
228
|
-
var MessageEventBus = class {
|
|
229
|
-
constructor(db) {
|
|
230
|
-
this.db = db;
|
|
231
|
-
this.pollingLoop = new PollingLoop(POLLING_FALLBACK_INTERVAL_MS2, this.handlePoll.bind(this));
|
|
232
|
-
this.bus = new EventBusCore({ allowWildcardSubscriptions: true }, this.pollingLoop);
|
|
233
|
-
this.pollingLoop.start();
|
|
234
|
-
}
|
|
235
|
-
bus;
|
|
236
|
-
pollingLoop;
|
|
237
|
-
handleNotify(payload) {
|
|
238
|
-
const [destinationWorkflowId, messageType, messageCount] = payload.split("::");
|
|
239
|
-
if (!this.bus.checkHasSubscribers(destinationWorkflowId, messageType)) {
|
|
240
|
-
return;
|
|
241
|
-
}
|
|
242
|
-
this.bus.emitEvent(destinationWorkflowId, messageType, void 0, Number(messageCount));
|
|
243
|
-
}
|
|
244
|
-
async handlePoll() {
|
|
245
|
-
const messageRetrievalRequests = getMessageRetrievalRequests(this.bus.getSubscriptionKeys());
|
|
246
|
-
if (messageRetrievalRequests.length === 0) {
|
|
247
|
-
return;
|
|
248
|
-
}
|
|
249
|
-
const messages = await getMessageBatch(this.db, messageRetrievalRequests);
|
|
250
|
-
for (const message of messages) {
|
|
251
|
-
this.bus.emitEvent(message.destinationRunId, message.type, void 0, 1);
|
|
252
|
-
}
|
|
253
|
-
}
|
|
254
|
-
subscribe(destinationWorkflowId, type, cb) {
|
|
255
|
-
return this.bus.subscribe(destinationWorkflowId, type, cb);
|
|
256
|
-
}
|
|
257
|
-
destroy() {
|
|
258
|
-
this.pollingLoop.stop();
|
|
259
|
-
}
|
|
260
|
-
};
|
|
261
|
-
function getMessageRetrievalRequests(subscriptionKeys) {
|
|
262
|
-
return subscriptionKeys.map(([destinationWorkflowId, messageType]) => ({
|
|
263
|
-
destinationWorkflowId,
|
|
264
|
-
messageType
|
|
265
|
-
}));
|
|
266
|
-
}
|
|
267
|
-
async function runWorkflow(ctx, wf, args = [], options = {}) {
|
|
268
|
-
const { db, executorId, workflowRegistry } = ctx;
|
|
269
|
-
const workflow = typeof wf === "string" ? workflowRegistry.getByName(wf) : workflowRegistry.getByWorkflowDefinition(wf);
|
|
270
|
-
if (!workflow) {
|
|
271
|
-
throw new WorkflowNotFoundError("Workflow not found");
|
|
272
|
-
}
|
|
273
|
-
const runId = options.id ?? crypto.randomUUID();
|
|
274
|
-
const { path } = await insertPendingRun(db, {
|
|
275
|
-
runId,
|
|
276
|
-
path: [runId],
|
|
277
|
-
inputs: serialize(args),
|
|
278
|
-
executorId,
|
|
279
|
-
workflowName: workflow.name
|
|
280
|
-
});
|
|
281
|
-
await executeWorkflow(ctx, {
|
|
282
|
-
runId,
|
|
283
|
-
runPath: path,
|
|
284
|
-
workflowName: workflow.name,
|
|
285
|
-
fn: workflow.fn,
|
|
286
|
-
args,
|
|
287
|
-
options
|
|
288
|
-
});
|
|
289
|
-
return createRunHandle(ctx, runId);
|
|
290
|
-
}
|
|
291
|
-
|
|
292
|
-
// core/internal/run-registry.ts
|
|
293
|
-
var RunRegistry = class {
|
|
294
|
-
runs = /* @__PURE__ */ new Map();
|
|
295
|
-
registerRun(runId, entry) {
|
|
296
|
-
let state = "pending";
|
|
297
|
-
entry.promise.then(
|
|
298
|
-
() => {
|
|
299
|
-
state = "fulfilled";
|
|
300
|
-
},
|
|
301
|
-
() => {
|
|
302
|
-
state = "rejected";
|
|
303
|
-
}
|
|
304
|
-
);
|
|
305
|
-
this.runs.set(runId, {
|
|
306
|
-
...entry,
|
|
307
|
-
getPromiseState: () => state
|
|
308
|
-
});
|
|
309
|
-
}
|
|
310
|
-
unregisterRun(runId) {
|
|
311
|
-
this.runs.delete(runId);
|
|
312
|
-
}
|
|
313
|
-
getRun(runId) {
|
|
314
|
-
return this.runs.get(runId);
|
|
315
|
-
}
|
|
316
|
-
};
|
|
317
|
-
|
|
318
|
-
// core/internal/repository/get-operations.ts
|
|
319
|
-
async function getOperations(db, runId) {
|
|
320
|
-
const results = await db.selectFrom("operations").select(["output", "error"]).where("run_id", "=", runId).orderBy("sequence_id", "desc").execute();
|
|
321
|
-
return results.map((r) => ({
|
|
322
|
-
result: r.output ?? void 0,
|
|
323
|
-
error: r.error ?? void 0
|
|
324
|
-
}));
|
|
325
|
-
}
|
|
326
|
-
|
|
327
|
-
// core/internal/repository/get-pending-runs.ts
|
|
328
|
-
async function getPendingRuns(db, executorId) {
|
|
329
|
-
const pendingRuns = await db.selectFrom("runs").select(["id", "path", "inputs", "workflow_name"]).where("status", "=", "PENDING" /* PENDING */).where("executor_id", "=", executorId).execute();
|
|
330
|
-
return pendingRuns.map((run) => ({
|
|
331
|
-
id: run.id,
|
|
332
|
-
path: run.path,
|
|
333
|
-
inputs: run.inputs,
|
|
334
|
-
workflowName: run.workflow_name
|
|
335
|
-
}));
|
|
336
|
-
}
|
|
337
|
-
var DEFAULT_MAX_RETRIES = 10;
|
|
338
|
-
async function upsertRun(db, options) {
|
|
339
|
-
const incrementAttempts = options.isRecovery ? 1 : 0;
|
|
340
|
-
const initialRecoveryAttempts = options.status === "QUEUED" ? 0 : 1;
|
|
341
|
-
const result = await db.insertInto("runs").values({
|
|
342
|
-
id: options.runId,
|
|
343
|
-
path: options.path,
|
|
344
|
-
workflow_name: options.workflowName,
|
|
345
|
-
status: options.status,
|
|
346
|
-
inputs: options.inputs,
|
|
347
|
-
idempotency_key: options.idempotencyKey,
|
|
348
|
-
executor_id: options.executorId,
|
|
349
|
-
parent_run_id: options.parentRunId,
|
|
350
|
-
timeout_ms: options.timeout,
|
|
351
|
-
deadline_epoch_ms: options.deadline,
|
|
352
|
-
recovery_attempts: initialRecoveryAttempts,
|
|
353
|
-
queue_name: options.queueName,
|
|
354
|
-
created_at: sql`(extract(epoch from now()) * 1000)::bigint`,
|
|
355
|
-
updated_at: sql`(extract(epoch from now()) * 1000)::bigint`
|
|
356
|
-
}).onConflict(
|
|
357
|
-
(oc) => oc.column("id").doUpdateSet({
|
|
358
|
-
recovery_attempts: sql`CASE
|
|
359
|
-
WHEN runs.status != 'QUEUED'
|
|
360
|
-
THEN runs.recovery_attempts + ${incrementAttempts}
|
|
361
|
-
ELSE runs.recovery_attempts
|
|
362
|
-
END`,
|
|
363
|
-
// Update executor_id when NEW status is not ENQUEUED
|
|
364
|
-
// This allows dequeue operations to claim the workflow
|
|
365
|
-
executor_id: sql`CASE
|
|
366
|
-
WHEN EXCLUDED.status != 'QUEUED'
|
|
367
|
-
THEN EXCLUDED.executor_id
|
|
368
|
-
ELSE runs.executor_id
|
|
369
|
-
END`,
|
|
370
|
-
updated_at: sql`(extract(epoch from now()) * 1000)::bigint`
|
|
371
|
-
})
|
|
372
|
-
).returning([
|
|
373
|
-
"id",
|
|
374
|
-
"change_id",
|
|
375
|
-
"recovery_attempts",
|
|
376
|
-
"executor_id",
|
|
377
|
-
"idempotency_key",
|
|
378
|
-
"status",
|
|
379
|
-
"path"
|
|
380
|
-
]).executeTakeFirst();
|
|
381
|
-
if (!result) {
|
|
382
|
-
throw new Error("Unexpectedly failed to upsert run");
|
|
383
|
-
}
|
|
384
|
-
const isOwner = result.idempotency_key === options.idempotencyKey;
|
|
385
|
-
const shouldExecute = isOwner || options.isRecovery;
|
|
386
|
-
if (shouldExecute) {
|
|
387
|
-
const maxRetries = options.maxRetries ?? DEFAULT_MAX_RETRIES;
|
|
388
|
-
if (result.recovery_attempts > maxRetries + 1) {
|
|
389
|
-
await db.updateTable("runs").set({
|
|
390
|
-
status: "MAX_RECOVERY_ATTEMPTS_EXCEEDED",
|
|
391
|
-
updated_at: sql`(extract(epoch from now()) * 1000)::bigint`
|
|
392
|
-
}).where("id", "=", result.id).where("status", "=", "PENDING").execute();
|
|
393
|
-
throw new MaxRecoveryAttemptsExceededError(result.id, maxRetries);
|
|
394
|
-
}
|
|
395
|
-
}
|
|
396
|
-
return {
|
|
397
|
-
runId: result.id,
|
|
398
|
-
path: options.path,
|
|
399
|
-
changeId: result.change_id,
|
|
400
|
-
recoveryAttempts: result.recovery_attempts,
|
|
401
|
-
executorId: result.executor_id ?? void 0,
|
|
402
|
-
idempotencyKey: result.idempotency_key ?? void 0,
|
|
403
|
-
status: result.status,
|
|
404
|
-
shouldExecute: shouldExecute ?? false
|
|
405
|
-
};
|
|
406
|
-
}
|
|
407
|
-
|
|
408
|
-
// core/internal/recover-pending-runs.ts
|
|
409
|
-
async function recoverPendingRuns(ctx) {
|
|
410
|
-
const { db, executorId, workflowRegistry } = ctx;
|
|
411
|
-
const pendingRuns = await getPendingRuns(db, executorId);
|
|
412
|
-
for (const run of pendingRuns) {
|
|
413
|
-
try {
|
|
414
|
-
const operations = await getOperations(db, run.id);
|
|
415
|
-
const workflow = workflowRegistry.getByName(run.workflowName);
|
|
416
|
-
if (!workflow) {
|
|
417
|
-
console.error(`Workflow ${run.workflowName} not found for recovery`);
|
|
418
|
-
continue;
|
|
419
|
-
}
|
|
420
|
-
const args = run.inputs ? deserialize(run.inputs) : [];
|
|
421
|
-
const upsertResult = await upsertRun(db, {
|
|
422
|
-
runId: run.id,
|
|
423
|
-
path: run.path,
|
|
424
|
-
inputs: run.inputs ?? "",
|
|
425
|
-
executorId,
|
|
426
|
-
workflowName: run.workflowName,
|
|
427
|
-
status: "PENDING" /* PENDING */,
|
|
428
|
-
isRecovery: true
|
|
429
|
-
});
|
|
430
|
-
if (!upsertResult.shouldExecute) {
|
|
431
|
-
console.log(`Run ${run.id} already executed, skipping recovery`);
|
|
432
|
-
continue;
|
|
433
|
-
}
|
|
434
|
-
await executeWorkflow(ctx, {
|
|
435
|
-
runId: upsertResult.runId,
|
|
436
|
-
runPath: upsertResult.path,
|
|
437
|
-
workflowName: run.workflowName,
|
|
438
|
-
fn: workflow.fn,
|
|
439
|
-
args,
|
|
440
|
-
operations
|
|
441
|
-
});
|
|
442
|
-
} catch (error) {
|
|
443
|
-
console.error(`Error recovering run ${run.id}:`, error);
|
|
444
|
-
}
|
|
445
|
-
}
|
|
446
|
-
}
|
|
447
|
-
|
|
448
|
-
// core/internal/workflow-registry.ts
|
|
449
|
-
var WorkflowRegistry = class {
|
|
450
|
-
workflows = {};
|
|
451
|
-
// eslint-disable-next-line @typescript-eslint/no-unsafe-function-type
|
|
452
|
-
fnToName = /* @__PURE__ */ new Map();
|
|
453
|
-
constructor(workflows) {
|
|
454
|
-
this.workflows = workflows;
|
|
455
|
-
for (const [name, entry] of Object.entries(workflows)) {
|
|
456
|
-
this.fnToName.set(entry, name);
|
|
457
|
-
}
|
|
458
|
-
}
|
|
459
|
-
getByName(name) {
|
|
460
|
-
const entry = this.workflows[name];
|
|
461
|
-
if (!entry) {
|
|
462
|
-
return void 0;
|
|
463
|
-
}
|
|
464
|
-
return {
|
|
465
|
-
...entry(),
|
|
466
|
-
name
|
|
467
|
-
};
|
|
468
|
-
}
|
|
469
|
-
getByWorkflowDefinition(definition) {
|
|
470
|
-
const name = this.fnToName.get(definition);
|
|
471
|
-
if (!name) {
|
|
472
|
-
return void 0;
|
|
473
|
-
}
|
|
474
|
-
return {
|
|
475
|
-
...definition(),
|
|
476
|
-
name
|
|
477
|
-
};
|
|
478
|
-
}
|
|
479
|
-
};
|
|
480
|
-
|
|
481
|
-
// core/internal/repository/get-run-batch.ts
|
|
482
|
-
async function getRunBatch(db, runIds) {
|
|
483
|
-
const results = await db.selectFrom("runs").select(["id", "inputs", "output", "error", "status", "change_id", "queue_name"]).where("id", "in", runIds).execute();
|
|
484
|
-
return results.map((r) => ({
|
|
485
|
-
id: r.id,
|
|
486
|
-
input: r.inputs ?? void 0,
|
|
487
|
-
output: r.output ?? void 0,
|
|
488
|
-
error: r.error ?? void 0,
|
|
489
|
-
status: r.status,
|
|
490
|
-
changeId: r.change_id,
|
|
491
|
-
queueName: r.queue_name ?? void 0
|
|
492
|
-
}));
|
|
493
|
-
}
|
|
494
|
-
|
|
495
|
-
// core/internal/events/run-event-bus.ts
|
|
496
|
-
var POLLING_FALLBACK_INTERVAL_MS3 = 1e4;
|
|
497
|
-
var RunEventBus = class {
|
|
498
|
-
constructor(db) {
|
|
499
|
-
this.db = db;
|
|
500
|
-
this.pollingLoop = new PollingLoop(POLLING_FALLBACK_INTERVAL_MS3, this.handlePoll.bind(this));
|
|
501
|
-
this.bus = new EventBusCore({ allowWildcardSubscriptions: true }, this.pollingLoop);
|
|
502
|
-
this.pollingLoop.start();
|
|
503
|
-
}
|
|
504
|
-
bus;
|
|
505
|
-
pollingLoop;
|
|
506
|
-
handleNotify(payload) {
|
|
507
|
-
const [runId, status, changeIdString] = payload.split("::");
|
|
508
|
-
const changeId = Number(changeIdString);
|
|
509
|
-
if (!this.bus.checkHasSubscribers(runId, status) || this.bus.getEventSequence(runId, status) >= changeId) {
|
|
510
|
-
return;
|
|
511
|
-
}
|
|
512
|
-
getRun(this.db, runId).then((run) => {
|
|
513
|
-
if (!run) {
|
|
514
|
-
return;
|
|
515
|
-
}
|
|
516
|
-
this.bus.emitEvent(
|
|
517
|
-
runId,
|
|
518
|
-
status,
|
|
519
|
-
{
|
|
520
|
-
status: run.status,
|
|
521
|
-
queueName: run.queueName,
|
|
522
|
-
result: run.output,
|
|
523
|
-
error: run.error
|
|
524
|
-
},
|
|
525
|
-
changeId
|
|
526
|
-
);
|
|
527
|
-
});
|
|
528
|
-
}
|
|
529
|
-
async handlePoll() {
|
|
530
|
-
const workflowIds = [
|
|
531
|
-
...new Set(this.bus.getSubscriptionKeys().map(([workflowId]) => workflowId))
|
|
532
|
-
];
|
|
533
|
-
if (workflowIds.length === 0) {
|
|
534
|
-
return;
|
|
535
|
-
}
|
|
536
|
-
const runs = await getRunBatch(this.db, workflowIds);
|
|
537
|
-
for (const run of runs) {
|
|
538
|
-
this.bus.emitEvent(
|
|
539
|
-
run.id,
|
|
540
|
-
run.status,
|
|
541
|
-
{
|
|
542
|
-
status: run.status,
|
|
543
|
-
queueName: run.queueName,
|
|
544
|
-
result: run.output,
|
|
545
|
-
error: run.error
|
|
546
|
-
},
|
|
547
|
-
run.changeId
|
|
548
|
-
);
|
|
549
|
-
}
|
|
550
|
-
}
|
|
551
|
-
subscribe(runId, status, cb) {
|
|
552
|
-
return this.bus.subscribe(runId, status, cb);
|
|
553
|
-
}
|
|
554
|
-
destroy() {
|
|
555
|
-
this.pollingLoop.stop();
|
|
556
|
-
}
|
|
557
|
-
};
|
|
558
|
-
|
|
559
|
-
// core/internal/queue-registry.ts
|
|
560
|
-
var QueueRegistry = class {
|
|
561
|
-
queues = {};
|
|
562
|
-
// eslint-disable-next-line @typescript-eslint/no-unsafe-function-type
|
|
563
|
-
fnToName = /* @__PURE__ */ new Map();
|
|
564
|
-
constructor(queues) {
|
|
565
|
-
this.queues = queues;
|
|
566
|
-
for (const [name, entry] of Object.entries(queues)) {
|
|
567
|
-
this.fnToName.set(entry, name);
|
|
568
|
-
}
|
|
569
|
-
}
|
|
570
|
-
getByName(name) {
|
|
571
|
-
const entry = this.queues[name];
|
|
572
|
-
if (!entry) {
|
|
573
|
-
return void 0;
|
|
574
|
-
}
|
|
575
|
-
return {
|
|
576
|
-
name,
|
|
577
|
-
...entry()
|
|
578
|
-
};
|
|
579
|
-
}
|
|
580
|
-
getByQueueEntry(entry) {
|
|
581
|
-
const name = this.fnToName.get(entry);
|
|
582
|
-
if (!name) {
|
|
583
|
-
return void 0;
|
|
584
|
-
}
|
|
585
|
-
return {
|
|
586
|
-
...entry(),
|
|
587
|
-
name
|
|
588
|
-
};
|
|
589
|
-
}
|
|
590
|
-
getQueueInstances() {
|
|
591
|
-
return Object.entries(this.queues).reduce(
|
|
592
|
-
(acc, [name, entry]) => {
|
|
593
|
-
const options = entry();
|
|
594
|
-
acc[name] = {
|
|
595
|
-
rateLimit: options.rateLimit ?? void 0,
|
|
596
|
-
workerConcurrency: options.workerConcurrency ?? void 0,
|
|
597
|
-
concurrency: options.concurrency ?? void 0,
|
|
598
|
-
priorityEnabled: options.priorityEnabled ?? void 0
|
|
599
|
-
};
|
|
600
|
-
return acc;
|
|
601
|
-
},
|
|
602
|
-
{}
|
|
603
|
-
);
|
|
604
|
-
}
|
|
605
|
-
};
|
|
606
|
-
|
|
607
|
-
// client/cancel-run.ts
|
|
608
|
-
async function cancelRun2(ctx, runId) {
|
|
609
|
-
const { db, runRegistry } = ctx;
|
|
610
|
-
const run = await cancelRun(runId, db);
|
|
611
|
-
if (!run) {
|
|
612
|
-
throw new RunNotFoundError(runId);
|
|
613
|
-
}
|
|
614
|
-
for (const pathPart of run.path) {
|
|
615
|
-
const run2 = runRegistry.getRun(pathPart);
|
|
616
|
-
if (run2) {
|
|
617
|
-
run2.abortController.abort();
|
|
618
|
-
}
|
|
619
|
-
}
|
|
620
|
-
}
|
|
621
|
-
async function enqueueRun(db, options) {
|
|
622
|
-
const result = await db.insertInto("runs").values({
|
|
623
|
-
id: options.runId,
|
|
624
|
-
path: options.path,
|
|
625
|
-
inputs: options.inputs,
|
|
626
|
-
queue_name: options.queueName,
|
|
627
|
-
queue_partition_key: options.queuePartitionKey,
|
|
628
|
-
queue_deduplication_id: options.deduplicationId,
|
|
629
|
-
executor_id: options.executorId,
|
|
630
|
-
workflow_name: options.workflowName,
|
|
631
|
-
status: "QUEUED" /* QUEUED */,
|
|
632
|
-
recovery_attempts: options.recoveryAttempts,
|
|
633
|
-
created_at: sql`(extract(epoch from now()) * 1000)::bigint`,
|
|
634
|
-
updated_at: sql`(extract(epoch from now()) * 1000)::bigint`
|
|
635
|
-
}).onConflict((oc) => oc.columns(["queue_name", "queue_deduplication_id"]).doNothing()).returning(["id", "change_id"]).executeTakeFirst();
|
|
636
|
-
return {
|
|
637
|
-
runId: result?.id,
|
|
638
|
-
changeId: result?.change_id
|
|
639
|
-
};
|
|
640
|
-
}
|
|
641
|
-
|
|
642
|
-
// client/queue-workflow.ts
|
|
643
|
-
async function queueWorkflow(ctx, queue, wf, args, options) {
|
|
644
|
-
const { db, executorId, workflowRegistry, queueRegistry } = ctx;
|
|
645
|
-
const runId = crypto.randomUUID();
|
|
646
|
-
const workflowName = typeof wf === "string" ? wf : workflowRegistry.getByWorkflowDefinition(wf)?.name;
|
|
647
|
-
if (!workflowName) {
|
|
648
|
-
throw new WorkflowNotFoundError("Workflow name not specified");
|
|
649
|
-
}
|
|
650
|
-
const queueName = typeof queue === "string" ? queue : queueRegistry.getByQueueEntry(queue)?.name;
|
|
651
|
-
if (!queueName) {
|
|
652
|
-
throw new QueueNotFoundError("Queue name not specified");
|
|
653
|
-
}
|
|
654
|
-
await enqueueRun(db, {
|
|
655
|
-
runId,
|
|
656
|
-
path: [runId],
|
|
657
|
-
inputs: serialize(args),
|
|
658
|
-
executorId,
|
|
659
|
-
workflowName,
|
|
660
|
-
timeout: options?.timeout,
|
|
661
|
-
deadline: options?.deadline,
|
|
662
|
-
queueName
|
|
663
|
-
});
|
|
664
|
-
return createRunHandle(ctx, runId);
|
|
665
|
-
}
|
|
666
|
-
async function dequeueRun(tx, runId, executorId) {
|
|
667
|
-
const result = await tx.updateTable("runs").set({
|
|
668
|
-
status: "PENDING" /* PENDING */,
|
|
669
|
-
started_at_epoch_ms: sql`(extract(epoch from now()) * 1000)::bigint`,
|
|
670
|
-
updated_at: sql`(extract(epoch from now()) * 1000)::bigint`,
|
|
671
|
-
executor_id: executorId
|
|
672
|
-
}).where("id", "=", runId).where("status", "=", "QUEUED" /* QUEUED */).returning([
|
|
673
|
-
"id",
|
|
674
|
-
"change_id",
|
|
675
|
-
"path",
|
|
676
|
-
"timeout_ms",
|
|
677
|
-
"deadline_epoch_ms",
|
|
678
|
-
"inputs",
|
|
679
|
-
"workflow_name"
|
|
680
|
-
]).executeTakeFirst();
|
|
681
|
-
if (!result) {
|
|
682
|
-
throw new RunNotFoundError(runId);
|
|
683
|
-
}
|
|
684
|
-
return {
|
|
685
|
-
runId: result.id,
|
|
686
|
-
changeId: result.change_id,
|
|
687
|
-
path: result.path,
|
|
688
|
-
timeoutMs: result.timeout_ms ? Number(result.timeout_ms) : void 0,
|
|
689
|
-
deadlineEpochMs: result.deadline_epoch_ms ? Number(result.deadline_epoch_ms) : void 0,
|
|
690
|
-
inputs: result.inputs ?? void 0,
|
|
691
|
-
workflowName: result.workflow_name
|
|
692
|
-
};
|
|
693
|
-
}
|
|
694
|
-
|
|
695
|
-
// core/internal/repository/get-executable-runs.ts
|
|
696
|
-
async function getExecutableRuns(db, {
|
|
697
|
-
queueName,
|
|
698
|
-
executorId,
|
|
699
|
-
workerConcurrency,
|
|
700
|
-
globalConcurrency,
|
|
701
|
-
rateLimit,
|
|
702
|
-
partitionKey,
|
|
703
|
-
priorityEnabled
|
|
704
|
-
}) {
|
|
705
|
-
const startTimeMs = Date.now();
|
|
706
|
-
const limiterPeriodMs = rateLimit ? rateLimit.period * 1e3 : 0;
|
|
707
|
-
return await withDbRetry(
|
|
708
|
-
async () => db.transaction().execute(async (tx) => {
|
|
709
|
-
if (rateLimit) {
|
|
710
|
-
const result = await tx.selectFrom("runs").select(({ fn }) => [fn.count("id").as("count")]).where("queue_name", "=", queueName).where("status", "!=", "QUEUED" /* QUEUED */).where("started_at_epoch_ms", ">", (startTimeMs - limiterPeriodMs).toString()).$if(
|
|
711
|
-
partitionKey !== void 0,
|
|
712
|
-
(qb) => qb.where("queue_partition_key", "=", partitionKey)
|
|
713
|
-
).executeTakeFirstOrThrow();
|
|
714
|
-
if (result.count >= rateLimit.limitPerPeriod) {
|
|
715
|
-
return [];
|
|
716
|
-
}
|
|
717
|
-
}
|
|
718
|
-
let maxTasks = Infinity;
|
|
719
|
-
if (globalConcurrency || workerConcurrency) {
|
|
720
|
-
const runningTasks = await tx.selectFrom("runs").select(["executor_id", ({ fn }) => fn.count("id").as("task_count")]).where("queue_name", "=", queueName).where("status", "=", "PENDING" /* PENDING */).$if(
|
|
721
|
-
partitionKey !== void 0,
|
|
722
|
-
(qb) => qb.where("queue_partition_key", "=", partitionKey)
|
|
723
|
-
).groupBy("executor_id").execute();
|
|
724
|
-
const tasksByExecutor = Object.fromEntries(
|
|
725
|
-
runningTasks.map((row) => [row.executor_id, row.task_count])
|
|
726
|
-
);
|
|
727
|
-
const runningForThisWorker = tasksByExecutor[executorId] ?? 0;
|
|
728
|
-
if (workerConcurrency !== void 0) {
|
|
729
|
-
maxTasks = Math.max(0, workerConcurrency - runningForThisWorker);
|
|
730
|
-
}
|
|
731
|
-
if (globalConcurrency !== void 0) {
|
|
732
|
-
const totalRunning = Object.values(tasksByExecutor).reduce((a, b) => a + b, 0);
|
|
733
|
-
const availableGlobal = Math.max(0, globalConcurrency - totalRunning);
|
|
734
|
-
maxTasks = Math.min(maxTasks, availableGlobal);
|
|
735
|
-
}
|
|
736
|
-
if (maxTasks <= 0) {
|
|
737
|
-
return [];
|
|
738
|
-
}
|
|
739
|
-
}
|
|
740
|
-
const lockClause = globalConcurrency ? "FOR UPDATE NOWAIT" : "FOR UPDATE SKIP LOCKED";
|
|
741
|
-
const workflowIds = await sql`
|
|
742
|
-
SELECT id
|
|
743
|
-
FROM runs
|
|
744
|
-
WHERE status = ${"QUEUED" /* QUEUED */}
|
|
745
|
-
AND queue_name = ${queueName}
|
|
746
|
-
${partitionKey !== void 0 ? sql`AND queue_partition_key = ${partitionKey}` : sql``}
|
|
747
|
-
${priorityEnabled ? sql`ORDER BY priority ASC, created_at ASC` : sql`ORDER BY created_at ASC`}
|
|
748
|
-
${maxTasks !== Infinity ? sql`LIMIT ${maxTasks}` : sql``}
|
|
749
|
-
${sql.raw(lockClause)}
|
|
750
|
-
`.execute(tx);
|
|
751
|
-
const claimedRuns = [];
|
|
752
|
-
for (const { id } of workflowIds.rows) {
|
|
753
|
-
const dequeuedRun = await dequeueRun(tx, id, executorId);
|
|
754
|
-
claimedRuns.push(dequeuedRun);
|
|
755
|
-
}
|
|
756
|
-
return claimedRuns;
|
|
757
|
-
})
|
|
758
|
-
);
|
|
759
|
-
}
|
|
760
|
-
|
|
761
|
-
// core/internal/repository/get-queue-partitions.ts
|
|
762
|
-
async function getQueuePartitions(db, queueName) {
|
|
763
|
-
const result = await db.selectFrom("runs").select("queue_partition_key").distinct().where("queue_name", "=", queueName).where("status", "=", "QUEUED" /* QUEUED */).where("queue_partition_key", "is not", null).execute();
|
|
764
|
-
return result.map((row) => row.queue_partition_key);
|
|
765
|
-
}
|
|
766
|
-
|
|
767
|
-
// core/internal/queue-manager.ts
|
|
768
|
-
var POLLING_INTERVAL_MS = 1e3;
|
|
769
|
-
var QueueManager = class {
|
|
770
|
-
constructor(ctx) {
|
|
771
|
-
this.ctx = ctx;
|
|
772
|
-
this.pollingLoop = new PollingLoop(POLLING_INTERVAL_MS, this.handlePoll.bind(this));
|
|
773
|
-
this.queues = this.ctx.queueRegistry.getQueueInstances();
|
|
774
|
-
}
|
|
775
|
-
pollingLoop;
|
|
776
|
-
queues = {};
|
|
777
|
-
async handlePoll() {
|
|
778
|
-
for (const [queueName, queue] of Object.entries(this.queues)) {
|
|
779
|
-
await this.dispatch(queueName, queue);
|
|
780
|
-
}
|
|
781
|
-
}
|
|
782
|
-
async dispatch(queueName, queue) {
|
|
783
|
-
const { db, executorId } = this.ctx;
|
|
784
|
-
let partitions = [];
|
|
785
|
-
if (queue.partitioningEnabled) {
|
|
786
|
-
partitions = await getQueuePartitions(db, queueName);
|
|
787
|
-
for (const partition of partitions) {
|
|
788
|
-
const runs2 = await getExecutableRuns(db, {
|
|
789
|
-
queueName,
|
|
790
|
-
executorId,
|
|
791
|
-
workerConcurrency: queue.workerConcurrency,
|
|
792
|
-
globalConcurrency: queue.concurrency,
|
|
793
|
-
rateLimit: queue.rateLimit,
|
|
794
|
-
partitionKey: partition,
|
|
795
|
-
priorityEnabled: queue.priorityEnabled
|
|
796
|
-
});
|
|
797
|
-
for (const run of runs2) {
|
|
798
|
-
const workflow = this.ctx.workflowRegistry.getByName(run.workflowName);
|
|
799
|
-
if (!workflow) {
|
|
800
|
-
console.error(`Workflow ${run.workflowName} not found`);
|
|
801
|
-
continue;
|
|
802
|
-
}
|
|
803
|
-
await executeWorkflow(this.ctx, {
|
|
804
|
-
runId: run.runId,
|
|
805
|
-
runPath: run.path,
|
|
806
|
-
workflowName: run.workflowName,
|
|
807
|
-
fn: workflow.fn,
|
|
808
|
-
args: deserialize(run.inputs ?? "[]")
|
|
809
|
-
});
|
|
810
|
-
}
|
|
811
|
-
}
|
|
812
|
-
return;
|
|
813
|
-
}
|
|
814
|
-
const runs = await getExecutableRuns(db, {
|
|
815
|
-
queueName,
|
|
816
|
-
executorId,
|
|
817
|
-
workerConcurrency: queue.workerConcurrency,
|
|
818
|
-
globalConcurrency: queue.concurrency,
|
|
819
|
-
rateLimit: queue.rateLimit,
|
|
820
|
-
priorityEnabled: queue.priorityEnabled
|
|
821
|
-
});
|
|
822
|
-
for (const run of runs) {
|
|
823
|
-
const workflow = this.ctx.workflowRegistry.getByName(run.workflowName);
|
|
824
|
-
if (!workflow) {
|
|
825
|
-
console.error(`Workflow ${run.workflowName} not found`);
|
|
826
|
-
continue;
|
|
827
|
-
}
|
|
828
|
-
await executeWorkflow(this.ctx, {
|
|
829
|
-
runId: run.runId,
|
|
830
|
-
runPath: run.path,
|
|
831
|
-
workflowName: run.workflowName,
|
|
832
|
-
fn: workflow.fn,
|
|
833
|
-
args: deserialize(run.inputs ?? "[]")
|
|
834
|
-
});
|
|
835
|
-
}
|
|
836
|
-
}
|
|
837
|
-
start() {
|
|
838
|
-
this.pollingLoop.start();
|
|
839
|
-
}
|
|
840
|
-
destroy() {
|
|
841
|
-
this.pollingLoop.stop();
|
|
842
|
-
}
|
|
843
|
-
};
|
|
844
|
-
|
|
845
|
-
// core/internal/events/setup-postgres-notify.ts
|
|
846
|
-
var CHANNELS = ["runs", "messages", "state"];
|
|
847
|
-
async function setupPostgresNotify(client, subscriptions) {
|
|
848
|
-
await withDbRetry(async () => {
|
|
849
|
-
try {
|
|
850
|
-
await client.query(`BEGIN`);
|
|
851
|
-
for (const channel of CHANNELS) {
|
|
852
|
-
await client.query(`LISTEN "helical_${channel}"`);
|
|
853
|
-
}
|
|
854
|
-
await client.query(`COMMIT`);
|
|
855
|
-
} catch (error) {
|
|
856
|
-
await client.query(`ROLLBACK`);
|
|
857
|
-
throw error;
|
|
858
|
-
}
|
|
859
|
-
for (const [channel, callback] of Object.entries(subscriptions)) {
|
|
860
|
-
await client.listen(`helical_${channel}`, callback);
|
|
861
|
-
}
|
|
862
|
-
});
|
|
863
|
-
}
|
|
864
|
-
function createPgDriver({ connectionString }) {
|
|
865
|
-
const pool = new Pool({
|
|
866
|
-
connectionString,
|
|
867
|
-
max: 10
|
|
868
|
-
});
|
|
869
|
-
let clientPromise;
|
|
870
|
-
const getClient = async () => {
|
|
871
|
-
if (!clientPromise) {
|
|
872
|
-
clientPromise = pool.connect();
|
|
873
|
-
}
|
|
874
|
-
return await clientPromise;
|
|
875
|
-
};
|
|
876
|
-
return {
|
|
877
|
-
client: {
|
|
878
|
-
listen: async (channel, callback) => {
|
|
879
|
-
const client = await getClient();
|
|
880
|
-
client.on("notification", (msg) => {
|
|
881
|
-
if (msg.channel !== channel) return;
|
|
882
|
-
callback(msg.payload);
|
|
883
|
-
});
|
|
884
|
-
},
|
|
885
|
-
query: async (query) => {
|
|
886
|
-
const client = await getClient();
|
|
887
|
-
await client.query(query);
|
|
888
|
-
}
|
|
889
|
-
},
|
|
890
|
-
db: new Kysely({ dialect: new PostgresDialect({ pool }) })
|
|
891
|
-
};
|
|
892
|
-
}
|
|
893
|
-
var INTERNAL_QUEUE_NAME = "_helical_internal_queue";
|
|
894
|
-
async function resumeRun(db, runId) {
|
|
895
|
-
const result = await db.updateTable("runs").set({
|
|
896
|
-
status: "QUEUED" /* QUEUED */,
|
|
897
|
-
queue_name: INTERNAL_QUEUE_NAME,
|
|
898
|
-
deadline_epoch_ms: null,
|
|
899
|
-
timeout_ms: null,
|
|
900
|
-
recovery_attempts: 0,
|
|
901
|
-
started_at_epoch_ms: sql`(extract(epoch from now()) * 1000)::bigint`,
|
|
902
|
-
updated_at: sql`(extract(epoch from now()) * 1000)::bigint`
|
|
903
|
-
}).where("id", "=", runId).where("status", "=", "PENDING" /* PENDING */).execute();
|
|
904
|
-
if (!result) {
|
|
905
|
-
throw new RunNotFoundError(runId);
|
|
906
|
-
}
|
|
907
|
-
}
|
|
908
|
-
|
|
909
|
-
// client/resume-run.ts
|
|
910
|
-
async function resumeRun2(ctx, runId) {
|
|
911
|
-
const { db } = ctx;
|
|
912
|
-
await resumeRun(db, runId);
|
|
913
|
-
return createRunHandle(ctx, runId);
|
|
914
|
-
}
|
|
915
|
-
|
|
916
|
-
// core/internal/repository/insert-message.ts
|
|
917
|
-
async function insertMessage(db, options) {
|
|
918
|
-
return await db.insertInto("messages").values({
|
|
919
|
-
destination_run_id: options.destinationWorkflowId,
|
|
920
|
-
type: options.messageType,
|
|
921
|
-
payload: options.data
|
|
922
|
-
}).execute();
|
|
923
|
-
}
|
|
924
|
-
|
|
925
|
-
// client/send-message.ts
|
|
926
|
-
async function sendMessage(ctx, target, name, data) {
|
|
927
|
-
const { db } = ctx;
|
|
928
|
-
const destinationWorkflowId = typeof target === "string" ? target : target.id;
|
|
929
|
-
const messageType = typeof name === "string" ? name : name.name;
|
|
930
|
-
const serializedData = serialize(data);
|
|
931
|
-
await insertMessage(db, {
|
|
932
|
-
destinationWorkflowId,
|
|
933
|
-
messageType,
|
|
934
|
-
data: serializedData
|
|
935
|
-
});
|
|
936
|
-
}
|
|
937
|
-
|
|
938
|
-
// client/get-state.ts
|
|
939
|
-
var StateNotAvailableError = class extends Error {
|
|
940
|
-
};
|
|
941
|
-
async function getState2(ctx, target, key) {
|
|
942
|
-
const { db, stateEventBus } = ctx;
|
|
943
|
-
const destinationWorkflowId = typeof target === "string" ? target : target.id;
|
|
944
|
-
const stateKey = typeof key === "string" ? key : key.name;
|
|
945
|
-
while (true) {
|
|
946
|
-
try {
|
|
947
|
-
return await withDbRetry(async () => {
|
|
948
|
-
const state = await getState(db, destinationWorkflowId, stateKey);
|
|
949
|
-
if (!state) {
|
|
950
|
-
throw new StateNotAvailableError();
|
|
951
|
-
}
|
|
952
|
-
return deserialize(state);
|
|
953
|
-
});
|
|
954
|
-
} catch (error) {
|
|
955
|
-
if (error instanceof StateNotAvailableError) {
|
|
956
|
-
await waitForStateNotification(stateEventBus, destinationWorkflowId, stateKey);
|
|
957
|
-
continue;
|
|
958
|
-
}
|
|
959
|
-
throw error;
|
|
960
|
-
}
|
|
961
|
-
}
|
|
962
|
-
}
|
|
963
|
-
async function waitForStateNotification(stateEventBus, runId, key) {
|
|
964
|
-
return new Promise((resolve) => {
|
|
965
|
-
const unsubscribe = stateEventBus.subscribe(runId, key, (state) => {
|
|
966
|
-
unsubscribe();
|
|
967
|
-
resolve(state);
|
|
968
|
-
});
|
|
969
|
-
});
|
|
970
|
-
}
|
|
971
|
-
|
|
972
|
-
// client/runtime.ts
|
|
973
|
-
function createInstance(props) {
|
|
974
|
-
const { db, client } = createPgDriver({ connectionString: props.options.connectionString });
|
|
975
|
-
const messageEventBus = new MessageEventBus(db);
|
|
976
|
-
const stateEventBus = new StateEventBus(db);
|
|
977
|
-
const executorId = props.options.instanceId || crypto.randomUUID();
|
|
978
|
-
const runRegistry = new RunRegistry();
|
|
979
|
-
const workflowRegistry = new WorkflowRegistry(props.workflows);
|
|
980
|
-
const runEventBus = new RunEventBus(db);
|
|
981
|
-
const queueRegistry = new QueueRegistry(props.queues || {});
|
|
982
|
-
const runtimeContext = {
|
|
983
|
-
db,
|
|
984
|
-
executorId,
|
|
985
|
-
messageEventBus,
|
|
986
|
-
stateEventBus,
|
|
987
|
-
runRegistry,
|
|
988
|
-
workflowRegistry,
|
|
989
|
-
runEventBus,
|
|
990
|
-
queueRegistry
|
|
991
|
-
};
|
|
992
|
-
const notifySetupPromise = setupPostgresNotify(client, {
|
|
993
|
-
runs: runEventBus.handleNotify.bind(runEventBus),
|
|
994
|
-
state: stateEventBus.handleNotify.bind(stateEventBus),
|
|
995
|
-
messages: messageEventBus.handleNotify.bind(messageEventBus)
|
|
996
|
-
});
|
|
997
|
-
const queueManager = new QueueManager(runtimeContext);
|
|
998
|
-
queueManager.start();
|
|
999
|
-
recoverPendingRuns(runtimeContext);
|
|
1000
|
-
return {
|
|
1001
|
-
runWorkflow: async (wf, args, options) => {
|
|
1002
|
-
await notifySetupPromise;
|
|
1003
|
-
return runWorkflow(runtimeContext, wf, args, options);
|
|
1004
|
-
},
|
|
1005
|
-
cancelRun: async (runId) => cancelRun2(runtimeContext, runId),
|
|
1006
|
-
resumeRun: async (runId) => resumeRun2(runtimeContext, runId),
|
|
1007
|
-
getRun: async (runId) => {
|
|
1008
|
-
await notifySetupPromise;
|
|
1009
|
-
return createRunHandle(runtimeContext, runId);
|
|
1010
|
-
},
|
|
1011
|
-
queueWorkflow: async (queue, wf, args, options) => {
|
|
1012
|
-
await notifySetupPromise;
|
|
1013
|
-
return queueWorkflow(runtimeContext, queue, wf, args, options);
|
|
1014
|
-
},
|
|
1015
|
-
sendMessage: async (target, name, data) => {
|
|
1016
|
-
await notifySetupPromise;
|
|
1017
|
-
return sendMessage(runtimeContext, target, name, data);
|
|
1018
|
-
},
|
|
1019
|
-
getState: async (target, key) => {
|
|
1020
|
-
await notifySetupPromise;
|
|
1021
|
-
return getState2(runtimeContext, target, key);
|
|
1022
|
-
}
|
|
1023
|
-
};
|
|
1024
|
-
}
|
|
1025
|
-
|
|
1026
|
-
export { createInstance };
|
|
1027
|
-
//# sourceMappingURL=index.js.map
|
|
1028
|
-
//# sourceMappingURL=index.js.map
|