@voyantjs/workflows 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +201 -0
- package/NOTICE +52 -0
- package/README.md +46 -0
- package/package.json +78 -0
- package/src/auth/index.ts +156 -0
- package/src/conditions.ts +43 -0
- package/src/handler/index.ts +361 -0
- package/src/index.ts +18 -0
- package/src/protocol/index.ts +133 -0
- package/src/rate-limit/index.ts +182 -0
- package/src/runtime/ctx.ts +838 -0
- package/src/runtime/determinism.ts +75 -0
- package/src/runtime/errors.ts +58 -0
- package/src/runtime/executor.ts +427 -0
- package/src/runtime/journal.ts +79 -0
- package/src/testing/index.ts +729 -0
- package/src/trigger.ts +146 -0
- package/src/types.ts +81 -0
- package/src/workflow.ts +306 -0
|
@@ -0,0 +1,838 @@
|
|
|
1
|
+
// Builds the `ctx` object passed to the workflow body.
|
|
2
|
+
//
|
|
3
|
+
// The executor owns the waitpoint-pending queue and the callbacks
|
|
4
|
+
// into the orchestrator; ctx is a thin shell that delegates.
|
|
5
|
+
|
|
6
|
+
import type {
|
|
7
|
+
Duration,
|
|
8
|
+
EnvironmentName,
|
|
9
|
+
RetryPolicy,
|
|
10
|
+
RunTrigger,
|
|
11
|
+
WaitpointKind,
|
|
12
|
+
} from "../types.js";
|
|
13
|
+
import type {
|
|
14
|
+
EnvironmentContext,
|
|
15
|
+
MetadataApi,
|
|
16
|
+
MetadataValue,
|
|
17
|
+
ParallelApi,
|
|
18
|
+
RunContext,
|
|
19
|
+
StepApi,
|
|
20
|
+
StepContext,
|
|
21
|
+
StepFn,
|
|
22
|
+
StepOptions,
|
|
23
|
+
StreamApi,
|
|
24
|
+
WaitForEventApi,
|
|
25
|
+
WaitForSignalApi,
|
|
26
|
+
WaitForTokenApi,
|
|
27
|
+
Waitable,
|
|
28
|
+
WorkflowContext,
|
|
29
|
+
WorkflowHandle,
|
|
30
|
+
InvokeApi,
|
|
31
|
+
InvokeOptions,
|
|
32
|
+
TokenWait,
|
|
33
|
+
GroupApi,
|
|
34
|
+
GroupScope,
|
|
35
|
+
} from "../workflow.js";
|
|
36
|
+
import type { JournalSlice, StepJournalEntry, WaitpointResolutionEntry } from "./journal.js";
|
|
37
|
+
import type { SerializedError } from "../protocol/index.js";
|
|
38
|
+
import {
|
|
39
|
+
WaitpointPendingSignal,
|
|
40
|
+
RunCancelledSignal,
|
|
41
|
+
CompensateRequestedSignal,
|
|
42
|
+
isWaitpointPending,
|
|
43
|
+
isRunCancelled,
|
|
44
|
+
isCompensateRequested,
|
|
45
|
+
} from "./errors.js";
|
|
46
|
+
import {
|
|
47
|
+
type ClockState,
|
|
48
|
+
advanceClockTo,
|
|
49
|
+
createRandomUUID,
|
|
50
|
+
now,
|
|
51
|
+
} from "./determinism.js";
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Callbacks the executor provides for operations that must reach the
|
|
55
|
+
* orchestrator (over HTTP in production, in-memory in tests).
|
|
56
|
+
*/
|
|
57
|
+
export interface RuntimeCallbacks {
|
|
58
|
+
/** Run a new step and journal the result. Called only for steps not already in the journal. */
|
|
59
|
+
runStep(args: {
|
|
60
|
+
stepId: string;
|
|
61
|
+
attempt: number;
|
|
62
|
+
input: unknown;
|
|
63
|
+
options: StepOptions<unknown>;
|
|
64
|
+
fn: StepFn<unknown>;
|
|
65
|
+
stepCtx: StepContext;
|
|
66
|
+
}): Promise<StepJournalEntry>;
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Called when a step completes successfully and had a `compensate`
|
|
70
|
+
* function declared. The executor collects these in completion order
|
|
71
|
+
* and runs them in reverse if the body throws or `ctx.compensate()`
|
|
72
|
+
* is invoked.
|
|
73
|
+
*/
|
|
74
|
+
recordCompensable(args: {
|
|
75
|
+
stepId: string;
|
|
76
|
+
output: unknown;
|
|
77
|
+
compensate: (output: unknown) => Promise<void>;
|
|
78
|
+
}): void;
|
|
79
|
+
|
|
80
|
+
/** Current length of the compensable list. Used by `ctx.group` checkpoints. */
|
|
81
|
+
compensableLength(): number;
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Remove and return compensables added since `fromIndex`. Used by
|
|
85
|
+
* `ctx.group` to run scoped rollback without touching outer
|
|
86
|
+
* compensables.
|
|
87
|
+
*/
|
|
88
|
+
spliceCompensable(fromIndex: number): Array<{
|
|
89
|
+
stepId: string;
|
|
90
|
+
output: unknown;
|
|
91
|
+
compensate: (output: unknown) => Promise<void>;
|
|
92
|
+
}>;
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Called by `ctx.stream()` for each chunk produced by the source.
|
|
96
|
+
* In production this emits a `stream.chunk` WebSocket event and
|
|
97
|
+
* journals the chunk; in tests the harness collects chunks.
|
|
98
|
+
*/
|
|
99
|
+
pushStreamChunk(args: {
|
|
100
|
+
streamId: string;
|
|
101
|
+
seq: number;
|
|
102
|
+
encoding: "text" | "json" | "base64";
|
|
103
|
+
chunk: unknown;
|
|
104
|
+
final: boolean;
|
|
105
|
+
}): void;
|
|
106
|
+
|
|
107
|
+
/** Register a new waitpoint; execution will yield after this returns. */
|
|
108
|
+
registerWaitpoint(args: {
|
|
109
|
+
clientWaitpointId: string;
|
|
110
|
+
kind: WaitpointKind;
|
|
111
|
+
meta: Record<string, unknown>;
|
|
112
|
+
timeoutMs?: number;
|
|
113
|
+
}): void;
|
|
114
|
+
|
|
115
|
+
/** Push a metadata mutation; flushed on waitpoint yield and run completion. */
|
|
116
|
+
pushMetadata(op: {
|
|
117
|
+
op: "set" | "increment" | "append" | "remove";
|
|
118
|
+
key: string;
|
|
119
|
+
value?: unknown;
|
|
120
|
+
target?: "self" | "parent" | "root";
|
|
121
|
+
}): void;
|
|
122
|
+
|
|
123
|
+
/** Increment invocation counter when the body resumes after eviction. */
|
|
124
|
+
readonly invocationCount: number;
|
|
125
|
+
|
|
126
|
+
/** Cancellation signal exposed as `ctx.signal`. */
|
|
127
|
+
readonly abortSignal: AbortSignal;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
export interface RuntimeEnvironment {
|
|
131
|
+
readonly run: RunContext;
|
|
132
|
+
readonly workflow: { id: string; version: string };
|
|
133
|
+
readonly environment: EnvironmentContext;
|
|
134
|
+
readonly project: { id: string; slug: string };
|
|
135
|
+
readonly organization: { id: string; slug: string };
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
export interface CtxBuildArgs {
|
|
139
|
+
env: RuntimeEnvironment;
|
|
140
|
+
journal: JournalSlice;
|
|
141
|
+
callbacks: RuntimeCallbacks;
|
|
142
|
+
clock: ClockState;
|
|
143
|
+
random: () => number;
|
|
144
|
+
/** Mutated as ctx.setRetry is called; each step option inherits. */
|
|
145
|
+
retryOverride: { current: RetryPolicy | undefined };
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
export function buildCtx(args: CtxBuildArgs): WorkflowContext<unknown> {
|
|
149
|
+
const { env, journal, callbacks, clock, random, retryOverride } = args;
|
|
150
|
+
|
|
151
|
+
// Per-ctx client-id counter. Reset on each ctx (= each invocation),
|
|
152
|
+
// which means ids are stable relative to body execution order.
|
|
153
|
+
let clientIdSeq = 0;
|
|
154
|
+
const nextClientId = (): number => ++clientIdSeq;
|
|
155
|
+
|
|
156
|
+
function checkCancel(): void {
|
|
157
|
+
if (callbacks.abortSignal.aborted) {
|
|
158
|
+
throw new RunCancelledSignal();
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
// ---- step ----
|
|
163
|
+
|
|
164
|
+
const step: StepApi = (async (
|
|
165
|
+
id: string,
|
|
166
|
+
optsOrFn: StepOptions<unknown> | StepFn<unknown>,
|
|
167
|
+
maybeFn?: StepFn<unknown>,
|
|
168
|
+
) => {
|
|
169
|
+
checkCancel();
|
|
170
|
+
const opts: StepOptions<unknown> = typeof optsOrFn === "function" ? {} : optsOrFn;
|
|
171
|
+
const fn: StepFn<unknown> = typeof optsOrFn === "function" ? optsOrFn : (maybeFn as StepFn<unknown>);
|
|
172
|
+
|
|
173
|
+
// Journal hit? Return cached.
|
|
174
|
+
const cached = journal.stepResults[id];
|
|
175
|
+
if (cached) {
|
|
176
|
+
advanceClockTo(clock, cached.finishedAt);
|
|
177
|
+
if (cached.status === "ok") {
|
|
178
|
+
// Re-register compensable on replay so compensations are available
|
|
179
|
+
// if this invocation ends up rolling back.
|
|
180
|
+
if (opts.compensate) {
|
|
181
|
+
callbacks.recordCompensable({
|
|
182
|
+
stepId: id,
|
|
183
|
+
output: cached.output,
|
|
184
|
+
compensate: opts.compensate as (output: unknown) => Promise<void>,
|
|
185
|
+
});
|
|
186
|
+
}
|
|
187
|
+
return cached.output;
|
|
188
|
+
}
|
|
189
|
+
// Journaled error rethrows on replay so catch blocks behave consistently.
|
|
190
|
+
const e = new Error(cached.error?.message ?? "step failed");
|
|
191
|
+
(e as { code?: string }).code = cached.error?.code;
|
|
192
|
+
throw e;
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
// Execute a new step via the callback, with the retry loop.
|
|
196
|
+
const mergedOpts: StepOptions<unknown> = {
|
|
197
|
+
...opts,
|
|
198
|
+
retry: opts.retry ?? retryOverride.current,
|
|
199
|
+
};
|
|
200
|
+
const policy = normalizeRetry(mergedOpts.retry);
|
|
201
|
+
let attempt = 0;
|
|
202
|
+
let lastEntry: StepJournalEntry | undefined;
|
|
203
|
+
|
|
204
|
+
// Per-step timeout: compose the run-level abort signal with a
|
|
205
|
+
// per-call AbortSignal.timeout so cooperative step bodies (fetch,
|
|
206
|
+
// setTimeout wrappers, custom AbortSignal observers) stop early
|
|
207
|
+
// on timeout. Hard enforcement for uncooperative bodies is done
|
|
208
|
+
// below by racing the wrapped fn against a timeout rejection.
|
|
209
|
+
const timeoutMs = mergedOpts.timeout !== undefined ? toMs(mergedOpts.timeout) : undefined;
|
|
210
|
+
const fnWithTimeout: StepFn<unknown> =
|
|
211
|
+
timeoutMs !== undefined
|
|
212
|
+
? async (stepCtx) => {
|
|
213
|
+
let timer: ReturnType<typeof setTimeout> | undefined;
|
|
214
|
+
try {
|
|
215
|
+
return await Promise.race([
|
|
216
|
+
fn(stepCtx),
|
|
217
|
+
new Promise<never>((_, reject) => {
|
|
218
|
+
timer = setTimeout(() => {
|
|
219
|
+
const e = new Error(`step "${id}" timed out after ${timeoutMs}ms`);
|
|
220
|
+
(e as Error & { code?: string }).code = "TIMEOUT";
|
|
221
|
+
reject(e);
|
|
222
|
+
}, timeoutMs);
|
|
223
|
+
}),
|
|
224
|
+
]);
|
|
225
|
+
} finally {
|
|
226
|
+
if (timer !== undefined) clearTimeout(timer);
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
: fn;
|
|
230
|
+
|
|
231
|
+
while (attempt < policy.max) {
|
|
232
|
+
attempt += 1;
|
|
233
|
+
const stepCtx: StepContext = {
|
|
234
|
+
signal:
|
|
235
|
+
timeoutMs !== undefined
|
|
236
|
+
? AbortSignal.any([callbacks.abortSignal, AbortSignal.timeout(timeoutMs)])
|
|
237
|
+
: callbacks.abortSignal,
|
|
238
|
+
attempt,
|
|
239
|
+
log: (level, msg, data) => {
|
|
240
|
+
console[level === "error" ? "error" : level === "warn" ? "warn" : "log"](`[${id}]`, msg, data ?? "");
|
|
241
|
+
},
|
|
242
|
+
};
|
|
243
|
+
const entry = await callbacks.runStep({
|
|
244
|
+
stepId: id,
|
|
245
|
+
attempt,
|
|
246
|
+
input: undefined,
|
|
247
|
+
options: mergedOpts,
|
|
248
|
+
fn: fnWithTimeout,
|
|
249
|
+
stepCtx,
|
|
250
|
+
});
|
|
251
|
+
lastEntry = entry;
|
|
252
|
+
|
|
253
|
+
if (entry.status === "ok") {
|
|
254
|
+
journal.stepResults[id] = entry;
|
|
255
|
+
advanceClockTo(clock, entry.finishedAt);
|
|
256
|
+
if (opts.compensate) {
|
|
257
|
+
callbacks.recordCompensable({
|
|
258
|
+
stepId: id,
|
|
259
|
+
output: entry.output,
|
|
260
|
+
compensate: opts.compensate as (output: unknown) => Promise<void>,
|
|
261
|
+
});
|
|
262
|
+
}
|
|
263
|
+
return entry.output;
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
// Failed attempt. Check if we should stop retrying.
|
|
267
|
+
if (entry.error?.code === "FATAL") break;
|
|
268
|
+
if (attempt >= policy.max) break;
|
|
269
|
+
|
|
270
|
+
// In production the step handler returns { retryAfter } to the DO
|
|
271
|
+
// which sets an alarm; here the spike/test harness continues
|
|
272
|
+
// immediately. retryAfter from RetryableError wins over the policy
|
|
273
|
+
// backoff when set.
|
|
274
|
+
const retryAfter = readRetryAfter(entry.error);
|
|
275
|
+
await maybeDelay(retryAfter ?? backoffDelay(policy, attempt));
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
// Retries exhausted (or never retried).
|
|
279
|
+
const finalEntry = lastEntry!;
|
|
280
|
+
journal.stepResults[id] = finalEntry;
|
|
281
|
+
advanceClockTo(clock, finalEntry.finishedAt);
|
|
282
|
+
const e = new Error(finalEntry.error?.message ?? "step failed");
|
|
283
|
+
(e as { code?: string }).code = finalEntry.error?.code;
|
|
284
|
+
throw e;
|
|
285
|
+
}) as StepApi;
|
|
286
|
+
|
|
287
|
+
// ---- waits ----
|
|
288
|
+
|
|
289
|
+
function yieldWaitpoint(
|
|
290
|
+
clientWaitpointId: string,
|
|
291
|
+
kind: WaitpointKind,
|
|
292
|
+
meta: Record<string, unknown>,
|
|
293
|
+
timeoutMs?: number,
|
|
294
|
+
): never {
|
|
295
|
+
callbacks.registerWaitpoint({ clientWaitpointId, kind, meta, timeoutMs });
|
|
296
|
+
throw new WaitpointPendingSignal(clientWaitpointId);
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
function lookupWaitpoint(id: string): WaitpointResolutionEntry | undefined {
|
|
300
|
+
return journal.waitpointsResolved[id];
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
const sleep = async (duration: Duration): Promise<void> => {
|
|
304
|
+
checkCancel();
|
|
305
|
+
const id = `sleep:${nextClientId()}`;
|
|
306
|
+
const resolved = lookupWaitpoint(id);
|
|
307
|
+
if (resolved) {
|
|
308
|
+
advanceClockTo(clock, resolved.resolvedAt);
|
|
309
|
+
return;
|
|
310
|
+
}
|
|
311
|
+
const ms = toMs(duration);
|
|
312
|
+
yieldWaitpoint(id, "DATETIME", { durationMs: ms }, ms);
|
|
313
|
+
};
|
|
314
|
+
|
|
315
|
+
function makeWaitable<T>(
|
|
316
|
+
kind: WaitpointKind,
|
|
317
|
+
clientWaitpointId: string,
|
|
318
|
+
iterIdPrefix: string,
|
|
319
|
+
meta: Record<string, unknown>,
|
|
320
|
+
timeoutMs?: number,
|
|
321
|
+
onTimeout: "null" | "throw" = "null",
|
|
322
|
+
): Waitable<T> {
|
|
323
|
+
// --- thenable: single first-match-wins resolution ---
|
|
324
|
+
const resolve = (): T | null => {
|
|
325
|
+
const resolved = lookupWaitpoint(clientWaitpointId);
|
|
326
|
+
if (!resolved) {
|
|
327
|
+
yieldWaitpoint(clientWaitpointId, kind, meta, timeoutMs);
|
|
328
|
+
}
|
|
329
|
+
advanceClockTo(clock, resolved.resolvedAt);
|
|
330
|
+
if (resolved.payload === undefined && onTimeout === "throw") {
|
|
331
|
+
throw new Error(`waitpoint ${clientWaitpointId} timed out`);
|
|
332
|
+
}
|
|
333
|
+
return (resolved.payload ?? null) as T | null;
|
|
334
|
+
};
|
|
335
|
+
|
|
336
|
+
// --- iterable: fresh waitpoint per .next() call ---
|
|
337
|
+
function makeIterator(): AsyncIterableIterator<T> {
|
|
338
|
+
let closed = false;
|
|
339
|
+
return {
|
|
340
|
+
async next(): Promise<IteratorResult<T>> {
|
|
341
|
+
if (closed) return { value: undefined as unknown as T, done: true };
|
|
342
|
+
checkCancel();
|
|
343
|
+
const iterId = `${iterIdPrefix}:iter:${nextClientId()}`;
|
|
344
|
+
const resolvedIter = lookupWaitpoint(iterId);
|
|
345
|
+
if (!resolvedIter) {
|
|
346
|
+
yieldWaitpoint(iterId, kind, { ...meta, iter: true }, timeoutMs);
|
|
347
|
+
}
|
|
348
|
+
advanceClockTo(clock, resolvedIter.resolvedAt);
|
|
349
|
+
// End-of-stream marker. Harness / orchestrator writes this to
|
|
350
|
+
// tell the iterator the source has no more events.
|
|
351
|
+
const payload = resolvedIter.payload as unknown;
|
|
352
|
+
if (isStreamEnd(payload)) {
|
|
353
|
+
closed = true;
|
|
354
|
+
return { value: undefined as unknown as T, done: true };
|
|
355
|
+
}
|
|
356
|
+
if (payload === undefined && onTimeout === "throw") {
|
|
357
|
+
throw new Error(`waitpoint ${iterId} timed out`);
|
|
358
|
+
}
|
|
359
|
+
return { value: payload as T, done: false };
|
|
360
|
+
},
|
|
361
|
+
async return(): Promise<IteratorResult<T>> {
|
|
362
|
+
closed = true;
|
|
363
|
+
return { value: undefined as unknown as T, done: true };
|
|
364
|
+
},
|
|
365
|
+
[Symbol.asyncIterator]() {
|
|
366
|
+
return this;
|
|
367
|
+
},
|
|
368
|
+
};
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
const thenable: Waitable<T> = {
|
|
372
|
+
then(onFulfilled, onRejected) {
|
|
373
|
+
try {
|
|
374
|
+
const r = resolve();
|
|
375
|
+
return Promise.resolve(r).then(onFulfilled, onRejected);
|
|
376
|
+
} catch (e) {
|
|
377
|
+
return Promise.reject(e).then(onFulfilled, onRejected);
|
|
378
|
+
}
|
|
379
|
+
},
|
|
380
|
+
[Symbol.asyncIterator]() {
|
|
381
|
+
return makeIterator();
|
|
382
|
+
},
|
|
383
|
+
close() {
|
|
384
|
+
// no-op; `return()` on the iterator handles early break.
|
|
385
|
+
},
|
|
386
|
+
};
|
|
387
|
+
return thenable;
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
function isStreamEnd(payload: unknown): boolean {
|
|
391
|
+
return (
|
|
392
|
+
typeof payload === "object" &&
|
|
393
|
+
payload !== null &&
|
|
394
|
+
(payload as { __voyantStreamEnd?: boolean }).__voyantStreamEnd === true
|
|
395
|
+
);
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
const waitForEvent: WaitForEventApi = ((eventType: string, opts?: { timeout?: Duration; onTimeout?: "null" | "throw" }) => {
|
|
399
|
+
checkCancel();
|
|
400
|
+
const thenableId = `event:${eventType}:${nextClientId()}`;
|
|
401
|
+
const iterPrefix = `event:${eventType}`;
|
|
402
|
+
return makeWaitable(
|
|
403
|
+
"EVENT",
|
|
404
|
+
thenableId,
|
|
405
|
+
iterPrefix,
|
|
406
|
+
{ eventType },
|
|
407
|
+
opts?.timeout ? toMs(opts.timeout) : undefined,
|
|
408
|
+
opts?.onTimeout,
|
|
409
|
+
);
|
|
410
|
+
}) as WaitForEventApi;
|
|
411
|
+
|
|
412
|
+
const waitForSignal: WaitForSignalApi = ((name: string, opts?: { timeout?: Duration; onTimeout?: "null" | "throw" }) => {
|
|
413
|
+
checkCancel();
|
|
414
|
+
const thenableId = `signal:${name}:${nextClientId()}`;
|
|
415
|
+
const iterPrefix = `signal:${name}`;
|
|
416
|
+
return makeWaitable(
|
|
417
|
+
"SIGNAL",
|
|
418
|
+
thenableId,
|
|
419
|
+
iterPrefix,
|
|
420
|
+
{ signalName: name },
|
|
421
|
+
opts?.timeout ? toMs(opts.timeout) : undefined,
|
|
422
|
+
opts?.onTimeout,
|
|
423
|
+
);
|
|
424
|
+
}) as WaitForSignalApi;
|
|
425
|
+
|
|
426
|
+
const waitForToken: WaitForTokenApi = (async (opts?: {
|
|
427
|
+
tokenId?: string;
|
|
428
|
+
timeout?: Duration;
|
|
429
|
+
onTimeout?: "null" | "throw";
|
|
430
|
+
}) => {
|
|
431
|
+
checkCancel();
|
|
432
|
+
// Allocate a stable id per call. User-supplied `tokenId` is kept
|
|
433
|
+
// verbatim so external systems can reference the same value.
|
|
434
|
+
const tokenId = opts?.tokenId ?? `tok_${nextClientId()}`;
|
|
435
|
+
const waitpointId = `token:${tokenId}`;
|
|
436
|
+
const timeoutMs = opts?.timeout ? toMs(opts.timeout) : undefined;
|
|
437
|
+
const onTimeout = opts?.onTimeout ?? "null";
|
|
438
|
+
|
|
439
|
+
return {
|
|
440
|
+
tokenId,
|
|
441
|
+
url: `/__voyant/tokens/${tokenId}`,
|
|
442
|
+
wait: async (): Promise<unknown> => {
|
|
443
|
+
checkCancel();
|
|
444
|
+
const resolved = lookupWaitpoint(waitpointId);
|
|
445
|
+
if (resolved) {
|
|
446
|
+
advanceClockTo(clock, resolved.resolvedAt);
|
|
447
|
+
if (resolved.payload === undefined && onTimeout === "throw") {
|
|
448
|
+
throw new Error(`token ${tokenId} timed out`);
|
|
449
|
+
}
|
|
450
|
+
return resolved.payload ?? null;
|
|
451
|
+
}
|
|
452
|
+
yieldWaitpoint(waitpointId, "MANUAL", { tokenId }, timeoutMs);
|
|
453
|
+
},
|
|
454
|
+
} as TokenWait<unknown>;
|
|
455
|
+
}) as WaitForTokenApi;
|
|
456
|
+
|
|
457
|
+
// ---- invoke / parallel ----
|
|
458
|
+
|
|
459
|
+
const invoke: InvokeApi = (async <TIn, TOut>(
|
|
460
|
+
wf: WorkflowHandle<TIn, TOut>,
|
|
461
|
+
input: TIn,
|
|
462
|
+
opts?: InvokeOptions,
|
|
463
|
+
): Promise<TOut> => {
|
|
464
|
+
checkCancel();
|
|
465
|
+
const id = `invoke:${wf.id}:${nextClientId()}`;
|
|
466
|
+
const resolved = journal.waitpointsResolved[id];
|
|
467
|
+
if (resolved) {
|
|
468
|
+
advanceClockTo(clock, resolved.resolvedAt);
|
|
469
|
+
if (resolved.error) {
|
|
470
|
+
const e = new Error(resolved.error.message);
|
|
471
|
+
(e as { code?: string }).code = resolved.error.code;
|
|
472
|
+
throw e;
|
|
473
|
+
}
|
|
474
|
+
return resolved.payload as TOut;
|
|
475
|
+
}
|
|
476
|
+
yieldWaitpoint(id, "RUN", {
|
|
477
|
+
childWorkflowId: wf.id,
|
|
478
|
+
childInput: input,
|
|
479
|
+
detach: opts?.detach ?? false,
|
|
480
|
+
tags: opts?.tags ?? [],
|
|
481
|
+
lockToVersion: opts?.lockToVersion,
|
|
482
|
+
idempotencyKey: opts?.idempotencyKey,
|
|
483
|
+
});
|
|
484
|
+
}) as InvokeApi;
|
|
485
|
+
|
|
486
|
+
const parallel: ParallelApi = async <T, R>(
|
|
487
|
+
items: readonly T[],
|
|
488
|
+
fn: (item: T, index: number) => Promise<R>,
|
|
489
|
+
opts?: { concurrency?: number; settle?: boolean },
|
|
490
|
+
): Promise<R[]> => {
|
|
491
|
+
checkCancel();
|
|
492
|
+
const total = items.length;
|
|
493
|
+
if (total === 0) return [];
|
|
494
|
+
const concurrency = Math.max(1, opts?.concurrency ?? total);
|
|
495
|
+
const settle = opts?.settle ?? false;
|
|
496
|
+
|
|
497
|
+
const results: R[] = new Array(total);
|
|
498
|
+
const errors: { index: number; error: unknown }[] = [];
|
|
499
|
+
let cursor = 0;
|
|
500
|
+
let aborted = false;
|
|
501
|
+
|
|
502
|
+
async function worker(): Promise<void> {
|
|
503
|
+
while (!aborted) {
|
|
504
|
+
const i = cursor++;
|
|
505
|
+
if (i >= total) return;
|
|
506
|
+
try {
|
|
507
|
+
results[i] = await fn(items[i]!, i);
|
|
508
|
+
} catch (err) {
|
|
509
|
+
if (settle) {
|
|
510
|
+
errors.push({ index: i, error: err });
|
|
511
|
+
} else {
|
|
512
|
+
aborted = true;
|
|
513
|
+
throw err;
|
|
514
|
+
}
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
const workerCount = Math.min(concurrency, total);
|
|
520
|
+
const workers = Array.from({ length: workerCount }, () => worker());
|
|
521
|
+
|
|
522
|
+
if (settle) {
|
|
523
|
+
await Promise.all(workers);
|
|
524
|
+
if (errors.length > 0) {
|
|
525
|
+
// Attach details so callers can inspect which items failed.
|
|
526
|
+
const agg = new AggregateError(
|
|
527
|
+
errors.map((e) => (e.error instanceof Error ? e.error : new Error(String(e.error)))),
|
|
528
|
+
`ctx.parallel: ${errors.length}/${total} iteration${errors.length === 1 ? "" : "s"} failed`,
|
|
529
|
+
);
|
|
530
|
+
(agg as { failedIndices?: number[] }).failedIndices = errors.map((e) => e.index);
|
|
531
|
+
throw agg;
|
|
532
|
+
}
|
|
533
|
+
return results;
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
await Promise.all(workers);
|
|
537
|
+
return results;
|
|
538
|
+
};
|
|
539
|
+
|
|
540
|
+
// ---- streams ----
|
|
541
|
+
|
|
542
|
+
const activeStreamIds = new Set<string>();
|
|
543
|
+
|
|
544
|
+
async function consumeStream(
|
|
545
|
+
streamId: string,
|
|
546
|
+
source: AsyncIterable<unknown>,
|
|
547
|
+
encoding: "text" | "json" | "base64",
|
|
548
|
+
): Promise<void> {
|
|
549
|
+
checkCancel();
|
|
550
|
+
if (activeStreamIds.has(streamId)) {
|
|
551
|
+
throw new Error(`ctx.stream: duplicate streamId "${streamId}" within the same run`);
|
|
552
|
+
}
|
|
553
|
+
activeStreamIds.add(streamId);
|
|
554
|
+
// Replay skip: the prior invocation already drained this source
|
|
555
|
+
// and the orchestrator has the chunks. Re-iterating would double
|
|
556
|
+
// any side effects (LLM calls, billable APIs, file reads).
|
|
557
|
+
if (journal.streamsCompleted[streamId]) {
|
|
558
|
+
return;
|
|
559
|
+
}
|
|
560
|
+
let seq = 0;
|
|
561
|
+
const iter = source[Symbol.asyncIterator]();
|
|
562
|
+
try {
|
|
563
|
+
while (true) {
|
|
564
|
+
checkCancel();
|
|
565
|
+
const { value, done } = await iter.next();
|
|
566
|
+
if (done) {
|
|
567
|
+
callbacks.pushStreamChunk({ streamId, seq: seq + 1, encoding, chunk: null, final: true });
|
|
568
|
+
journal.streamsCompleted[streamId] = { chunkCount: seq + 1 };
|
|
569
|
+
return;
|
|
570
|
+
}
|
|
571
|
+
seq += 1;
|
|
572
|
+
const chunk = normalizeChunk(value, encoding);
|
|
573
|
+
callbacks.pushStreamChunk({ streamId, seq, encoding, chunk, final: false });
|
|
574
|
+
}
|
|
575
|
+
} catch (err) {
|
|
576
|
+
// Emit a final frame so consumers know the stream closed, then
|
|
577
|
+
// propagate so the workflow body's error handling kicks in. No
|
|
578
|
+
// journal entry — a failed stream should re-iterate on replay
|
|
579
|
+
// (so the error surfaces deterministically).
|
|
580
|
+
callbacks.pushStreamChunk({ streamId, seq: seq + 1, encoding, chunk: null, final: true });
|
|
581
|
+
throw err;
|
|
582
|
+
}
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
const streamImpl = async (
|
|
586
|
+
streamId: string,
|
|
587
|
+
sourceOrFn:
|
|
588
|
+
| AsyncIterable<unknown>
|
|
589
|
+
| (() => AsyncGenerator<unknown>),
|
|
590
|
+
): Promise<void> => {
|
|
591
|
+
const source =
|
|
592
|
+
typeof sourceOrFn === "function"
|
|
593
|
+
? (sourceOrFn as () => AsyncGenerator<unknown>)()
|
|
594
|
+
: sourceOrFn;
|
|
595
|
+
await consumeStream(streamId, source, inferEncoding(source));
|
|
596
|
+
};
|
|
597
|
+
|
|
598
|
+
// Typed shape variants. Each forwards to consumeStream with a fixed encoding.
|
|
599
|
+
(streamImpl as unknown as { text: StreamApi["text"] }).text = async (id, source) => {
|
|
600
|
+
await consumeStream(id, source, "text");
|
|
601
|
+
};
|
|
602
|
+
(streamImpl as unknown as { json: StreamApi["json"] }).json = async (id, source) => {
|
|
603
|
+
await consumeStream(id, source, "json");
|
|
604
|
+
};
|
|
605
|
+
(streamImpl as unknown as { bytes: StreamApi["bytes"] }).bytes = async (id, source) => {
|
|
606
|
+
await consumeStream(id, source, "base64");
|
|
607
|
+
};
|
|
608
|
+
|
|
609
|
+
const stream = streamImpl as unknown as StreamApi;
|
|
610
|
+
|
|
611
|
+
// ---- groups ----
|
|
612
|
+
|
|
613
|
+
// `ctx.group(name, fn)` creates a compensation scope. Implementation
|
|
614
|
+
// strategy: the outer compensable list is the single source of truth;
|
|
615
|
+
// each group tracks a checkpoint index. If the scope's body throws or
|
|
616
|
+
// explicitly calls `scope.compensate()`, we splice off compensables
|
|
617
|
+
// added since the checkpoint and run them LIFO, leaving outer
|
|
618
|
+
// compensables untouched.
|
|
619
|
+
//
|
|
620
|
+
// If the scope body completes normally, compensables stay in the
|
|
621
|
+
// outer list — they'll still be rolled back if the enclosing workflow
|
|
622
|
+
// later throws.
|
|
623
|
+
const runScopedCompensations = async (fromIndex: number): Promise<void> => {
|
|
624
|
+
const scopeEntries = callbacks.spliceCompensable(fromIndex);
|
|
625
|
+
for (let i = scopeEntries.length - 1; i >= 0; i--) {
|
|
626
|
+
const c = scopeEntries[i]!;
|
|
627
|
+
try {
|
|
628
|
+
await c.compensate(c.output);
|
|
629
|
+
} catch {
|
|
630
|
+
// One bad compensation in a scope does not abort the others.
|
|
631
|
+
// Errors here don't surface to the executor — the outer rollback
|
|
632
|
+
// machinery only sees the user error that triggered the scope
|
|
633
|
+
// unwind.
|
|
634
|
+
}
|
|
635
|
+
}
|
|
636
|
+
};
|
|
637
|
+
|
|
638
|
+
const group: GroupApi = async <T>(
|
|
639
|
+
_name: string,
|
|
640
|
+
fn: (scope: GroupScope) => Promise<T>,
|
|
641
|
+
): Promise<T> => {
|
|
642
|
+
checkCancel();
|
|
643
|
+
const checkpointStart = callbacks.compensableLength();
|
|
644
|
+
try {
|
|
645
|
+
return await fn({
|
|
646
|
+
step,
|
|
647
|
+
compensate: async (): Promise<never> => {
|
|
648
|
+
await runScopedCompensations(checkpointStart);
|
|
649
|
+
throw new CompensateRequestedSignal();
|
|
650
|
+
},
|
|
651
|
+
});
|
|
652
|
+
} catch (err) {
|
|
653
|
+
// Only run scoped compensations for real user errors — internal
|
|
654
|
+
// signals (waitpoint yield, cancellation, compensate-requested)
|
|
655
|
+
// are re-thrown unchanged so the executor can route them.
|
|
656
|
+
if (
|
|
657
|
+
!isWaitpointPending(err) &&
|
|
658
|
+
!isRunCancelled(err) &&
|
|
659
|
+
!isCompensateRequested(err)
|
|
660
|
+
) {
|
|
661
|
+
await runScopedCompensations(checkpointStart);
|
|
662
|
+
}
|
|
663
|
+
throw err;
|
|
664
|
+
}
|
|
665
|
+
};
|
|
666
|
+
|
|
667
|
+
// ---- metadata ----
|
|
668
|
+
|
|
669
|
+
const metadata: MetadataApi = {
|
|
670
|
+
set(key, value) { callbacks.pushMetadata({ op: "set", key, value }); },
|
|
671
|
+
increment(key, by = 1) { callbacks.pushMetadata({ op: "increment", key, value: by }); },
|
|
672
|
+
append(key, value) { callbacks.pushMetadata({ op: "append", key, value }); },
|
|
673
|
+
remove(key) { callbacks.pushMetadata({ op: "remove", key }); },
|
|
674
|
+
// Mutations are pushed immediately via `callbacks.pushMetadata`
|
|
675
|
+
// and collected on the response envelope; no explicit flush is
|
|
676
|
+
// needed.
|
|
677
|
+
flush: async () => {},
|
|
678
|
+
};
|
|
679
|
+
|
|
680
|
+
// ---- retry override ----
|
|
681
|
+
|
|
682
|
+
function setRetry(policy: RetryPolicy): void {
|
|
683
|
+
retryOverride.current = policy;
|
|
684
|
+
}
|
|
685
|
+
|
|
686
|
+
return {
|
|
687
|
+
run: env.run,
|
|
688
|
+
workflow: env.workflow,
|
|
689
|
+
environment: env.environment,
|
|
690
|
+
project: env.project,
|
|
691
|
+
organization: env.organization,
|
|
692
|
+
invocationCount: callbacks.invocationCount,
|
|
693
|
+
signal: callbacks.abortSignal,
|
|
694
|
+
step,
|
|
695
|
+
sleep,
|
|
696
|
+
waitForEvent,
|
|
697
|
+
waitForSignal,
|
|
698
|
+
waitForToken,
|
|
699
|
+
invoke,
|
|
700
|
+
parallel,
|
|
701
|
+
stream,
|
|
702
|
+
group,
|
|
703
|
+
metadata,
|
|
704
|
+
now: () => now(clock),
|
|
705
|
+
random,
|
|
706
|
+
randomUUID: createRandomUUID(random),
|
|
707
|
+
setRetry,
|
|
708
|
+
compensate: async (): Promise<never> => {
|
|
709
|
+
checkCancel();
|
|
710
|
+
throw new CompensateRequestedSignal();
|
|
711
|
+
},
|
|
712
|
+
} satisfies WorkflowContext<unknown>;
|
|
713
|
+
}
|
|
714
|
+
|
|
715
|
+
// ---- helpers ----
|
|
716
|
+
|
|
717
|
+
function inferEncoding(source: unknown): "text" | "json" | "base64" {
|
|
718
|
+
// Default to json for the generic ctx.stream(id, generator) call. The
|
|
719
|
+
// typed variants (text/json/bytes) override this.
|
|
720
|
+
void source;
|
|
721
|
+
return "json";
|
|
722
|
+
}
|
|
723
|
+
|
|
724
|
+
function normalizeChunk(value: unknown, encoding: "text" | "json" | "base64"): unknown {
|
|
725
|
+
if (encoding === "text") {
|
|
726
|
+
return typeof value === "string" ? value : String(value);
|
|
727
|
+
}
|
|
728
|
+
if (encoding === "base64") {
|
|
729
|
+
if (value instanceof Uint8Array) {
|
|
730
|
+
return toBase64(value);
|
|
731
|
+
}
|
|
732
|
+
throw new Error("ctx.stream.bytes: expected Uint8Array chunks");
|
|
733
|
+
}
|
|
734
|
+
return value; // json — pass through
|
|
735
|
+
}
|
|
736
|
+
|
|
737
|
+
function toBase64(bytes: Uint8Array): string {
|
|
738
|
+
// Node + modern runtimes provide Buffer or btoa. Use Buffer when
|
|
739
|
+
// available for efficiency; fall back to manual encode for isolates.
|
|
740
|
+
const g = globalThis as unknown as {
|
|
741
|
+
Buffer?: { from(b: Uint8Array): { toString(enc: "base64"): string } };
|
|
742
|
+
btoa?: (s: string) => string;
|
|
743
|
+
};
|
|
744
|
+
if (g.Buffer) return g.Buffer.from(bytes).toString("base64");
|
|
745
|
+
if (g.btoa) {
|
|
746
|
+
let s = "";
|
|
747
|
+
for (let i = 0; i < bytes.length; i++) s += String.fromCharCode(bytes[i]!);
|
|
748
|
+
return g.btoa(s);
|
|
749
|
+
}
|
|
750
|
+
// Manual fallback (rare).
|
|
751
|
+
const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
|
|
752
|
+
let out = "";
|
|
753
|
+
let i = 0;
|
|
754
|
+
while (i < bytes.length) {
|
|
755
|
+
const b1 = bytes[i++]!;
|
|
756
|
+
const b2 = i < bytes.length ? bytes[i++]! : 0;
|
|
757
|
+
const b3 = i < bytes.length ? bytes[i++]! : 0;
|
|
758
|
+
out += chars[b1 >> 2]!;
|
|
759
|
+
out += chars[((b1 & 3) << 4) | (b2 >> 4)]!;
|
|
760
|
+
out += i - 1 > bytes.length ? "=" : chars[((b2 & 15) << 2) | (b3 >> 6)]!;
|
|
761
|
+
out += i > bytes.length ? "=" : chars[b3 & 63]!;
|
|
762
|
+
}
|
|
763
|
+
return out;
|
|
764
|
+
}
|
|
765
|
+
|
|
766
|
+
interface ResolvedRetryPolicy {
|
|
767
|
+
max: number;
|
|
768
|
+
backoff: "exponential" | "linear" | "fixed";
|
|
769
|
+
initial: number; // ms
|
|
770
|
+
maxDelay: number; // ms
|
|
771
|
+
}
|
|
772
|
+
|
|
773
|
+
function normalizeRetry(
|
|
774
|
+
input: RetryPolicy | { max: 0 } | undefined,
|
|
775
|
+
): ResolvedRetryPolicy {
|
|
776
|
+
if (!input) return { max: 1, backoff: "exponential", initial: 1000, maxDelay: 60_000 };
|
|
777
|
+
const max = input.max ?? 3;
|
|
778
|
+
const policy = input as RetryPolicy;
|
|
779
|
+
return {
|
|
780
|
+
max: Math.max(1, max),
|
|
781
|
+
backoff: policy.backoff ?? "exponential",
|
|
782
|
+
initial: policy.initial !== undefined ? toMs(policy.initial) : 1000,
|
|
783
|
+
maxDelay: policy.maxDelay !== undefined ? toMs(policy.maxDelay) : 60_000,
|
|
784
|
+
};
|
|
785
|
+
}
|
|
786
|
+
|
|
787
|
+
function backoffDelay(policy: ResolvedRetryPolicy, attempt: number): number {
|
|
788
|
+
// `attempt` is 1-indexed; delay applies *before* the next attempt.
|
|
789
|
+
if (policy.backoff === "fixed") return Math.min(policy.initial, policy.maxDelay);
|
|
790
|
+
if (policy.backoff === "linear") return Math.min(policy.initial * attempt, policy.maxDelay);
|
|
791
|
+
// exponential
|
|
792
|
+
return Math.min(policy.initial * Math.pow(2, attempt - 1), policy.maxDelay);
|
|
793
|
+
}
|
|
794
|
+
|
|
795
|
+
function readRetryAfter(err: SerializedError | undefined): number | undefined {
|
|
796
|
+
if (!err) return undefined;
|
|
797
|
+
if (err.code !== "RETRYABLE") return undefined;
|
|
798
|
+
const raw = (err.data as { retryAfter?: unknown } | undefined)?.retryAfter;
|
|
799
|
+
if (raw === undefined) return undefined;
|
|
800
|
+
if (typeof raw === "number") return raw;
|
|
801
|
+
if (raw instanceof Date) return raw.getTime() - Date.now();
|
|
802
|
+
if (typeof raw === "string") {
|
|
803
|
+
try { return toMs(raw as Duration); } catch { return undefined; }
|
|
804
|
+
}
|
|
805
|
+
return undefined;
|
|
806
|
+
}
|
|
807
|
+
|
|
808
|
+
/**
|
|
809
|
+
* In the real runtime, retry delay is expressed to the orchestrator as a
|
|
810
|
+
* `retryAfter` field on the step callback response, and the DO sets an
|
|
811
|
+
* alarm — no worker sits idle. In tests we skip the delay (pass it
|
|
812
|
+
* through `setTimeout(0)` at most) so the suite stays fast.
|
|
813
|
+
*/
|
|
814
|
+
async function maybeDelay(ms: number): Promise<void> {
|
|
815
|
+
if (ms <= 0) return;
|
|
816
|
+
// Cap at 10ms in-process regardless of declared delay. Test harness
|
|
817
|
+
// doesn't model real time; production replaces this with a DO alarm.
|
|
818
|
+
await new Promise((resolve) => setTimeout(resolve, Math.min(ms, 10)));
|
|
819
|
+
}
|
|
820
|
+
|
|
821
|
+
function toMs(d: Duration): number {
|
|
822
|
+
if (typeof d === "number") return d;
|
|
823
|
+
const m = /^(\d+)(ms|s|m|h|d|w)$/.exec(d);
|
|
824
|
+
if (!m) throw new Error(`invalid duration: ${String(d)}`);
|
|
825
|
+
const n = Number(m[1]);
|
|
826
|
+
switch (m[2]) {
|
|
827
|
+
case "ms": return n;
|
|
828
|
+
case "s": return n * 1000;
|
|
829
|
+
case "m": return n * 60_000;
|
|
830
|
+
case "h": return n * 3_600_000;
|
|
831
|
+
case "d": return n * 86_400_000;
|
|
832
|
+
case "w": return n * 604_800_000;
|
|
833
|
+
default: throw new Error(`invalid duration unit: ${m[2]}`);
|
|
834
|
+
}
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
// Re-exports used by the executor for metadata type checking.
|
|
838
|
+
export type { MetadataValue };
|