@voyantjs/workflows 0.6.7 → 0.6.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/dist/auth/index.d.ts +26 -0
  2. package/dist/auth/index.d.ts.map +1 -0
  3. package/dist/auth/index.js +137 -0
  4. package/dist/conditions.d.ts +29 -0
  5. package/dist/conditions.d.ts.map +1 -0
  6. package/dist/conditions.js +5 -0
  7. package/dist/handler/index.d.ts +104 -0
  8. package/dist/handler/index.d.ts.map +1 -0
  9. package/dist/handler/index.js +238 -0
  10. package/dist/index.d.ts +6 -0
  11. package/dist/index.d.ts.map +1 -0
  12. package/dist/index.js +10 -0
  13. package/dist/protocol/index.d.ts +187 -0
  14. package/dist/protocol/index.d.ts.map +1 -0
  15. package/dist/protocol/index.js +7 -0
  16. package/dist/rate-limit/index.d.ts +40 -0
  17. package/dist/rate-limit/index.d.ts.map +1 -0
  18. package/dist/rate-limit/index.js +139 -0
  19. package/dist/runtime/ctx.d.ts +102 -0
  20. package/dist/runtime/ctx.d.ts.map +1 -0
  21. package/dist/runtime/ctx.js +607 -0
  22. package/dist/runtime/determinism.d.ts +19 -0
  23. package/dist/runtime/determinism.d.ts.map +1 -0
  24. package/dist/runtime/determinism.js +61 -0
  25. package/dist/runtime/errors.d.ts +21 -0
  26. package/dist/runtime/errors.d.ts.map +1 -0
  27. package/dist/runtime/errors.js +45 -0
  28. package/dist/runtime/executor.d.ts +159 -0
  29. package/dist/runtime/executor.d.ts.map +1 -0
  30. package/dist/runtime/executor.js +225 -0
  31. package/dist/runtime/journal.d.ts +55 -0
  32. package/dist/runtime/journal.d.ts.map +1 -0
  33. package/dist/runtime/journal.js +28 -0
  34. package/dist/testing/index.d.ts +117 -0
  35. package/dist/testing/index.d.ts.map +1 -0
  36. package/dist/testing/index.js +595 -0
  37. package/dist/trigger.d.ts +122 -0
  38. package/dist/trigger.d.ts.map +1 -0
  39. package/dist/trigger.js +23 -0
  40. package/dist/types.d.ts +63 -0
  41. package/dist/types.d.ts.map +1 -0
  42. package/dist/types.js +3 -0
  43. package/dist/workflow.d.ts +212 -0
  44. package/dist/workflow.d.ts.map +1 -0
  45. package/dist/workflow.js +46 -0
  46. package/package.json +30 -30
  47. package/src/auth/index.ts +46 -52
  48. package/src/conditions.ts +13 -13
  49. package/src/handler/index.ts +110 -106
  50. package/src/index.ts +7 -7
  51. package/src/protocol/index.ts +137 -71
  52. package/src/rate-limit/index.ts +77 -78
  53. package/src/runtime/ctx.ts +354 -342
  54. package/src/runtime/determinism.ts +27 -27
  55. package/src/runtime/errors.ts +17 -17
  56. package/src/runtime/executor.ts +179 -172
  57. package/src/runtime/journal.ts +25 -25
  58. package/src/testing/index.ts +268 -202
  59. package/src/trigger.ts +64 -71
  60. package/src/types.ts +16 -18
  61. package/src/workflow.ts +154 -152
@@ -0,0 +1,607 @@
1
+ // Builds the `ctx` object passed to the workflow body.
2
+ //
3
+ // The executor owns the waitpoint-pending queue and the callbacks
4
+ // into the orchestrator; ctx is a thin shell that delegates.
5
+ import { advanceClockTo, createRandomUUID, now } from "./determinism.js";
6
+ import { CompensateRequestedSignal, isCompensateRequested, isRunCancelled, isWaitpointPending, RunCancelledSignal, WaitpointPendingSignal, } from "./errors.js";
7
+ export function buildCtx(args) {
8
+ const { env, journal, callbacks, clock, random, retryOverride } = args;
9
+ // Per-ctx client-id counter. Reset on each ctx (= each invocation),
10
+ // which means ids are stable relative to body execution order.
11
+ let clientIdSeq = 0;
12
+ const nextClientId = () => ++clientIdSeq;
13
+ function checkCancel() {
14
+ if (callbacks.abortSignal.aborted) {
15
+ throw new RunCancelledSignal();
16
+ }
17
+ }
18
+ // ---- step ----
19
+ const step = (async (id, optsOrFn, maybeFn) => {
20
+ checkCancel();
21
+ const opts = typeof optsOrFn === "function" ? {} : optsOrFn;
22
+ const fn = typeof optsOrFn === "function" ? optsOrFn : maybeFn;
23
+ // Journal hit? Return cached.
24
+ const cached = journal.stepResults[id];
25
+ if (cached) {
26
+ advanceClockTo(clock, cached.finishedAt);
27
+ if (cached.status === "ok") {
28
+ // Re-register compensable on replay so compensations are available
29
+ // if this invocation ends up rolling back.
30
+ if (opts.compensate) {
31
+ callbacks.recordCompensable({
32
+ stepId: id,
33
+ output: cached.output,
34
+ compensate: opts.compensate,
35
+ });
36
+ }
37
+ return cached.output;
38
+ }
39
+ // Journaled error rethrows on replay so catch blocks behave consistently.
40
+ const e = new Error(cached.error?.message ?? "step failed");
41
+ e.code = cached.error?.code;
42
+ throw e;
43
+ }
44
+ // Execute a new step via the callback, with the retry loop.
45
+ const mergedOpts = {
46
+ ...opts,
47
+ retry: opts.retry ?? retryOverride.current,
48
+ };
49
+ const policy = normalizeRetry(mergedOpts.retry);
50
+ let attempt = 0;
51
+ let lastEntry;
52
+ // Per-step timeout: compose the run-level abort signal with a
53
+ // per-call AbortSignal.timeout so cooperative step bodies (fetch,
54
+ // setTimeout wrappers, custom AbortSignal observers) stop early
55
+ // on timeout. Hard enforcement for uncooperative bodies is done
56
+ // below by racing the wrapped fn against a timeout rejection.
57
+ const timeoutMs = mergedOpts.timeout !== undefined ? toMs(mergedOpts.timeout) : undefined;
58
+ const fnWithTimeout = timeoutMs !== undefined
59
+ ? async (stepCtx) => {
60
+ let timer;
61
+ try {
62
+ return await Promise.race([
63
+ fn(stepCtx),
64
+ new Promise((_, reject) => {
65
+ timer = setTimeout(() => {
66
+ const e = new Error(`step "${id}" timed out after ${timeoutMs}ms`);
67
+ e.code = "TIMEOUT";
68
+ reject(e);
69
+ }, timeoutMs);
70
+ }),
71
+ ]);
72
+ }
73
+ finally {
74
+ if (timer !== undefined)
75
+ clearTimeout(timer);
76
+ }
77
+ }
78
+ : fn;
79
+ while (attempt < policy.max) {
80
+ attempt += 1;
81
+ const stepCtx = {
82
+ signal: timeoutMs !== undefined
83
+ ? AbortSignal.any([callbacks.abortSignal, AbortSignal.timeout(timeoutMs)])
84
+ : callbacks.abortSignal,
85
+ attempt,
86
+ log: (level, msg, data) => {
87
+ console[level === "error" ? "error" : level === "warn" ? "warn" : "log"](`[${id}]`, msg, data ?? "");
88
+ },
89
+ };
90
+ const entry = await callbacks.runStep({
91
+ stepId: id,
92
+ attempt,
93
+ input: undefined,
94
+ options: mergedOpts,
95
+ fn: fnWithTimeout,
96
+ stepCtx,
97
+ });
98
+ lastEntry = entry;
99
+ if (entry.status === "ok") {
100
+ journal.stepResults[id] = entry;
101
+ advanceClockTo(clock, entry.finishedAt);
102
+ if (opts.compensate) {
103
+ callbacks.recordCompensable({
104
+ stepId: id,
105
+ output: entry.output,
106
+ compensate: opts.compensate,
107
+ });
108
+ }
109
+ return entry.output;
110
+ }
111
+ // Failed attempt. Check if we should stop retrying.
112
+ if (entry.error?.code === "FATAL")
113
+ break;
114
+ if (attempt >= policy.max)
115
+ break;
116
+ // In production the step handler returns { retryAfter } to the DO
117
+ // which sets an alarm; here the spike/test harness continues
118
+ // immediately. retryAfter from RetryableError wins over the policy
119
+ // backoff when set.
120
+ const retryAfter = readRetryAfter(entry.error);
121
+ await maybeDelay(retryAfter ?? backoffDelay(policy, attempt));
122
+ }
123
+ // Retries exhausted (or never retried).
124
+ const finalEntry = lastEntry;
125
+ journal.stepResults[id] = finalEntry;
126
+ advanceClockTo(clock, finalEntry.finishedAt);
127
+ const e = new Error(finalEntry.error?.message ?? "step failed");
128
+ e.code = finalEntry.error?.code;
129
+ throw e;
130
+ });
131
+ // ---- waits ----
132
+ function yieldWaitpoint(clientWaitpointId, kind, meta, timeoutMs) {
133
+ callbacks.registerWaitpoint({ clientWaitpointId, kind, meta, timeoutMs });
134
+ throw new WaitpointPendingSignal(clientWaitpointId);
135
+ }
136
+ function lookupWaitpoint(id) {
137
+ return journal.waitpointsResolved[id];
138
+ }
139
+ const sleep = async (duration) => {
140
+ checkCancel();
141
+ const id = `sleep:${nextClientId()}`;
142
+ const resolved = lookupWaitpoint(id);
143
+ if (resolved) {
144
+ advanceClockTo(clock, resolved.resolvedAt);
145
+ return;
146
+ }
147
+ const ms = toMs(duration);
148
+ yieldWaitpoint(id, "DATETIME", { durationMs: ms }, ms);
149
+ };
150
+ function makeWaitable(kind, clientWaitpointId, iterIdPrefix, meta, timeoutMs, onTimeout = "null") {
151
+ // --- thenable: single first-match-wins resolution ---
152
+ const resolve = () => {
153
+ const resolved = lookupWaitpoint(clientWaitpointId);
154
+ if (!resolved) {
155
+ yieldWaitpoint(clientWaitpointId, kind, meta, timeoutMs);
156
+ }
157
+ advanceClockTo(clock, resolved.resolvedAt);
158
+ if (resolved.payload === undefined && onTimeout === "throw") {
159
+ throw new Error(`waitpoint ${clientWaitpointId} timed out`);
160
+ }
161
+ return (resolved.payload ?? null);
162
+ };
163
+ // --- iterable: fresh waitpoint per .next() call ---
164
+ function makeIterator() {
165
+ let closed = false;
166
+ return {
167
+ async next() {
168
+ if (closed)
169
+ return { value: undefined, done: true };
170
+ checkCancel();
171
+ const iterId = `${iterIdPrefix}:iter:${nextClientId()}`;
172
+ const resolvedIter = lookupWaitpoint(iterId);
173
+ if (!resolvedIter) {
174
+ yieldWaitpoint(iterId, kind, { ...meta, iter: true }, timeoutMs);
175
+ }
176
+ advanceClockTo(clock, resolvedIter.resolvedAt);
177
+ // End-of-stream marker. Harness / orchestrator writes this to
178
+ // tell the iterator the source has no more events.
179
+ const payload = resolvedIter.payload;
180
+ if (isStreamEnd(payload)) {
181
+ closed = true;
182
+ return { value: undefined, done: true };
183
+ }
184
+ if (payload === undefined && onTimeout === "throw") {
185
+ throw new Error(`waitpoint ${iterId} timed out`);
186
+ }
187
+ return { value: payload, done: false };
188
+ },
189
+ async return() {
190
+ closed = true;
191
+ return { value: undefined, done: true };
192
+ },
193
+ [Symbol.asyncIterator]() {
194
+ return this;
195
+ },
196
+ };
197
+ }
198
+ const thenable = {
199
+ // biome-ignore lint/suspicious/noThenProperty: Waitable intentionally implements PromiseLike for `await`.
200
+ then(onFulfilled, onRejected) {
201
+ try {
202
+ const r = resolve();
203
+ return Promise.resolve(r).then(onFulfilled, onRejected);
204
+ }
205
+ catch (e) {
206
+ return Promise.reject(e).then(onFulfilled, onRejected);
207
+ }
208
+ },
209
+ [Symbol.asyncIterator]() {
210
+ return makeIterator();
211
+ },
212
+ close() {
213
+ // no-op; `return()` on the iterator handles early break.
214
+ },
215
+ };
216
+ return thenable;
217
+ }
218
+ function isStreamEnd(payload) {
219
+ return (typeof payload === "object" &&
220
+ payload !== null &&
221
+ payload.__voyantStreamEnd === true);
222
+ }
223
+ const waitForEvent = ((eventType, opts) => {
224
+ checkCancel();
225
+ const thenableId = `event:${eventType}:${nextClientId()}`;
226
+ const iterPrefix = `event:${eventType}`;
227
+ return makeWaitable("EVENT", thenableId, iterPrefix, { eventType }, opts?.timeout ? toMs(opts.timeout) : undefined, opts?.onTimeout);
228
+ });
229
+ const waitForSignal = ((name, opts) => {
230
+ checkCancel();
231
+ const thenableId = `signal:${name}:${nextClientId()}`;
232
+ const iterPrefix = `signal:${name}`;
233
+ return makeWaitable("SIGNAL", thenableId, iterPrefix, { signalName: name }, opts?.timeout ? toMs(opts.timeout) : undefined, opts?.onTimeout);
234
+ });
235
+ const waitForToken = (async (opts) => {
236
+ checkCancel();
237
+ // Allocate a stable id per call. User-supplied `tokenId` is kept
238
+ // verbatim so external systems can reference the same value.
239
+ const tokenId = opts?.tokenId ?? `tok_${nextClientId()}`;
240
+ const waitpointId = `token:${tokenId}`;
241
+ const timeoutMs = opts?.timeout ? toMs(opts.timeout) : undefined;
242
+ const onTimeout = opts?.onTimeout ?? "null";
243
+ return {
244
+ tokenId,
245
+ url: `/__voyant/tokens/${tokenId}`,
246
+ wait: async () => {
247
+ checkCancel();
248
+ const resolved = lookupWaitpoint(waitpointId);
249
+ if (resolved) {
250
+ advanceClockTo(clock, resolved.resolvedAt);
251
+ if (resolved.payload === undefined && onTimeout === "throw") {
252
+ throw new Error(`token ${tokenId} timed out`);
253
+ }
254
+ return resolved.payload ?? null;
255
+ }
256
+ yieldWaitpoint(waitpointId, "MANUAL", { tokenId }, timeoutMs);
257
+ },
258
+ };
259
+ });
260
+ // ---- invoke / parallel ----
261
+ const invoke = (async (wf, input, opts) => {
262
+ checkCancel();
263
+ const id = `invoke:${wf.id}:${nextClientId()}`;
264
+ const resolved = journal.waitpointsResolved[id];
265
+ if (resolved) {
266
+ advanceClockTo(clock, resolved.resolvedAt);
267
+ if (resolved.error) {
268
+ const e = new Error(resolved.error.message);
269
+ e.code = resolved.error.code;
270
+ throw e;
271
+ }
272
+ return resolved.payload;
273
+ }
274
+ yieldWaitpoint(id, "RUN", {
275
+ childWorkflowId: wf.id,
276
+ childInput: input,
277
+ detach: opts?.detach ?? false,
278
+ tags: opts?.tags ?? [],
279
+ lockToVersion: opts?.lockToVersion,
280
+ idempotencyKey: opts?.idempotencyKey,
281
+ });
282
+ });
283
+ const parallel = async (items, fn, opts) => {
284
+ checkCancel();
285
+ const total = items.length;
286
+ if (total === 0)
287
+ return [];
288
+ const concurrency = Math.max(1, opts?.concurrency ?? total);
289
+ const settle = opts?.settle ?? false;
290
+ const results = new Array(total);
291
+ const errors = [];
292
+ let cursor = 0;
293
+ let aborted = false;
294
+ async function worker() {
295
+ while (!aborted) {
296
+ const i = cursor++;
297
+ if (i >= total)
298
+ return;
299
+ try {
300
+ results[i] = await fn(items[i], i);
301
+ }
302
+ catch (err) {
303
+ if (settle) {
304
+ errors.push({ index: i, error: err });
305
+ }
306
+ else {
307
+ aborted = true;
308
+ throw err;
309
+ }
310
+ }
311
+ }
312
+ }
313
+ const workerCount = Math.min(concurrency, total);
314
+ const workers = Array.from({ length: workerCount }, () => worker());
315
+ if (settle) {
316
+ await Promise.all(workers);
317
+ if (errors.length > 0) {
318
+ // Attach details so callers can inspect which items failed.
319
+ const agg = new AggregateError(errors.map((e) => (e.error instanceof Error ? e.error : new Error(String(e.error)))), `ctx.parallel: ${errors.length}/${total} iteration${errors.length === 1 ? "" : "s"} failed`);
320
+ agg.failedIndices = errors.map((e) => e.index);
321
+ throw agg;
322
+ }
323
+ return results;
324
+ }
325
+ await Promise.all(workers);
326
+ return results;
327
+ };
328
+ // ---- streams ----
329
+ const activeStreamIds = new Set();
330
+ async function consumeStream(streamId, source, encoding) {
331
+ checkCancel();
332
+ if (activeStreamIds.has(streamId)) {
333
+ throw new Error(`ctx.stream: duplicate streamId "${streamId}" within the same run`);
334
+ }
335
+ activeStreamIds.add(streamId);
336
+ // Replay skip: the prior invocation already drained this source
337
+ // and the orchestrator has the chunks. Re-iterating would double
338
+ // any side effects (LLM calls, billable APIs, file reads).
339
+ if (journal.streamsCompleted[streamId]) {
340
+ return;
341
+ }
342
+ let seq = 0;
343
+ const iter = source[Symbol.asyncIterator]();
344
+ try {
345
+ while (true) {
346
+ checkCancel();
347
+ const { value, done } = await iter.next();
348
+ if (done) {
349
+ callbacks.pushStreamChunk({ streamId, seq: seq + 1, encoding, chunk: null, final: true });
350
+ journal.streamsCompleted[streamId] = { chunkCount: seq + 1 };
351
+ return;
352
+ }
353
+ seq += 1;
354
+ const chunk = normalizeChunk(value, encoding);
355
+ callbacks.pushStreamChunk({ streamId, seq, encoding, chunk, final: false });
356
+ }
357
+ }
358
+ catch (err) {
359
+ // Emit a final frame so consumers know the stream closed, then
360
+ // propagate so the workflow body's error handling kicks in. No
361
+ // journal entry — a failed stream should re-iterate on replay
362
+ // (so the error surfaces deterministically).
363
+ callbacks.pushStreamChunk({ streamId, seq: seq + 1, encoding, chunk: null, final: true });
364
+ throw err;
365
+ }
366
+ }
367
+ const streamImpl = async (streamId, sourceOrFn) => {
368
+ const source = typeof sourceOrFn === "function"
369
+ ? sourceOrFn()
370
+ : sourceOrFn;
371
+ await consumeStream(streamId, source, inferEncoding(source));
372
+ };
373
+ streamImpl.text = async (id, source) => {
374
+ await consumeStream(id, source, "text");
375
+ };
376
+ streamImpl.json = async (id, source) => {
377
+ await consumeStream(id, source, "json");
378
+ };
379
+ streamImpl.bytes = async (id, source) => {
380
+ await consumeStream(id, source, "base64");
381
+ };
382
+ const stream = streamImpl;
383
+ // ---- groups ----
384
+ // `ctx.group(name, fn)` creates a compensation scope. Implementation
385
+ // strategy: the outer compensable list is the single source of truth;
386
+ // each group tracks a checkpoint index. If the scope's body throws or
387
+ // explicitly calls `scope.compensate()`, we splice off compensables
388
+ // added since the checkpoint and run them LIFO, leaving outer
389
+ // compensables untouched.
390
+ //
391
+ // If the scope body completes normally, compensables stay in the
392
+ // outer list — they'll still be rolled back if the enclosing workflow
393
+ // later throws.
394
+ const runScopedCompensations = async (fromIndex) => {
395
+ const scopeEntries = callbacks.spliceCompensable(fromIndex);
396
+ for (let i = scopeEntries.length - 1; i >= 0; i--) {
397
+ const c = scopeEntries[i];
398
+ try {
399
+ await c.compensate(c.output);
400
+ }
401
+ catch {
402
+ // One bad compensation in a scope does not abort the others.
403
+ // Errors here don't surface to the executor — the outer rollback
404
+ // machinery only sees the user error that triggered the scope
405
+ // unwind.
406
+ }
407
+ }
408
+ };
409
+ const group = async (_name, fn) => {
410
+ checkCancel();
411
+ const checkpointStart = callbacks.compensableLength();
412
+ try {
413
+ return await fn({
414
+ step,
415
+ compensate: async () => {
416
+ await runScopedCompensations(checkpointStart);
417
+ throw new CompensateRequestedSignal();
418
+ },
419
+ });
420
+ }
421
+ catch (err) {
422
+ // Only run scoped compensations for real user errors — internal
423
+ // signals (waitpoint yield, cancellation, compensate-requested)
424
+ // are re-thrown unchanged so the executor can route them.
425
+ if (!isWaitpointPending(err) && !isRunCancelled(err) && !isCompensateRequested(err)) {
426
+ await runScopedCompensations(checkpointStart);
427
+ }
428
+ throw err;
429
+ }
430
+ };
431
+ // ---- metadata ----
432
+ const metadata = {
433
+ set(key, value) {
434
+ callbacks.pushMetadata({ op: "set", key, value });
435
+ },
436
+ increment(key, by = 1) {
437
+ callbacks.pushMetadata({ op: "increment", key, value: by });
438
+ },
439
+ append(key, value) {
440
+ callbacks.pushMetadata({ op: "append", key, value });
441
+ },
442
+ remove(key) {
443
+ callbacks.pushMetadata({ op: "remove", key });
444
+ },
445
+ // Mutations are pushed immediately via `callbacks.pushMetadata`
446
+ // and collected on the response envelope; no explicit flush is
447
+ // needed.
448
+ flush: async () => { },
449
+ };
450
+ // ---- retry override ----
451
+ function setRetry(policy) {
452
+ retryOverride.current = policy;
453
+ }
454
+ return {
455
+ run: env.run,
456
+ workflow: env.workflow,
457
+ environment: env.environment,
458
+ project: env.project,
459
+ organization: env.organization,
460
+ invocationCount: callbacks.invocationCount,
461
+ signal: callbacks.abortSignal,
462
+ step,
463
+ sleep,
464
+ waitForEvent,
465
+ waitForSignal,
466
+ waitForToken,
467
+ invoke,
468
+ parallel,
469
+ stream,
470
+ group,
471
+ metadata,
472
+ now: () => now(clock),
473
+ random,
474
+ randomUUID: createRandomUUID(random),
475
+ setRetry,
476
+ compensate: async () => {
477
+ checkCancel();
478
+ throw new CompensateRequestedSignal();
479
+ },
480
+ };
481
+ }
482
+ // ---- helpers ----
483
+ function inferEncoding(source) {
484
+ // Default to json for the generic ctx.stream(id, generator) call. The
485
+ // typed variants (text/json/bytes) override this.
486
+ void source;
487
+ return "json";
488
+ }
489
+ function normalizeChunk(value, encoding) {
490
+ if (encoding === "text") {
491
+ return typeof value === "string" ? value : String(value);
492
+ }
493
+ if (encoding === "base64") {
494
+ if (value instanceof Uint8Array) {
495
+ return toBase64(value);
496
+ }
497
+ throw new Error("ctx.stream.bytes: expected Uint8Array chunks");
498
+ }
499
+ return value; // json — pass through
500
+ }
501
+ function toBase64(bytes) {
502
+ // Node + modern runtimes provide Buffer or btoa. Use Buffer when
503
+ // available for efficiency; fall back to manual encode for isolates.
504
+ const g = globalThis;
505
+ if (g.Buffer)
506
+ return g.Buffer.from(bytes).toString("base64");
507
+ if (g.btoa) {
508
+ let s = "";
509
+ for (let i = 0; i < bytes.length; i++)
510
+ s += String.fromCharCode(bytes[i]);
511
+ return g.btoa(s);
512
+ }
513
+ // Manual fallback (rare).
514
+ const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
515
+ let out = "";
516
+ let i = 0;
517
+ while (i < bytes.length) {
518
+ const b1 = bytes[i++];
519
+ const b2 = i < bytes.length ? bytes[i++] : 0;
520
+ const b3 = i < bytes.length ? bytes[i++] : 0;
521
+ out += chars[b1 >> 2];
522
+ out += chars[((b1 & 3) << 4) | (b2 >> 4)];
523
+ out += i - 1 > bytes.length ? "=" : chars[((b2 & 15) << 2) | (b3 >> 6)];
524
+ out += i > bytes.length ? "=" : chars[b3 & 63];
525
+ }
526
+ return out;
527
+ }
528
+ function normalizeRetry(input) {
529
+ if (!input)
530
+ return { max: 1, backoff: "exponential", initial: 1000, maxDelay: 60_000 };
531
+ const max = input.max ?? 3;
532
+ const policy = input;
533
+ return {
534
+ max: Math.max(1, max),
535
+ backoff: policy.backoff ?? "exponential",
536
+ initial: policy.initial !== undefined ? toMs(policy.initial) : 1000,
537
+ maxDelay: policy.maxDelay !== undefined ? toMs(policy.maxDelay) : 60_000,
538
+ };
539
+ }
540
+ function backoffDelay(policy, attempt) {
541
+ // `attempt` is 1-indexed; delay applies *before* the next attempt.
542
+ if (policy.backoff === "fixed")
543
+ return Math.min(policy.initial, policy.maxDelay);
544
+ if (policy.backoff === "linear")
545
+ return Math.min(policy.initial * attempt, policy.maxDelay);
546
+ // exponential
547
+ return Math.min(policy.initial * 2 ** (attempt - 1), policy.maxDelay);
548
+ }
549
+ function readRetryAfter(err) {
550
+ if (!err)
551
+ return undefined;
552
+ if (err.code !== "RETRYABLE")
553
+ return undefined;
554
+ const raw = err.data?.retryAfter;
555
+ if (raw === undefined)
556
+ return undefined;
557
+ if (typeof raw === "number")
558
+ return raw;
559
+ if (raw instanceof Date)
560
+ return raw.getTime() - Date.now();
561
+ if (typeof raw === "string") {
562
+ try {
563
+ return toMs(raw);
564
+ }
565
+ catch {
566
+ return undefined;
567
+ }
568
+ }
569
+ return undefined;
570
+ }
571
+ /**
572
+ * In the real runtime, retry delay is expressed to the orchestrator as a
573
+ * `retryAfter` field on the step callback response, and the DO sets an
574
+ * alarm — no worker sits idle. In tests we skip the delay (pass it
575
+ * through `setTimeout(0)` at most) so the suite stays fast.
576
+ */
577
+ async function maybeDelay(ms) {
578
+ if (ms <= 0)
579
+ return;
580
+ // Cap at 10ms in-process regardless of declared delay. Test harness
581
+ // doesn't model real time; production replaces this with a DO alarm.
582
+ await new Promise((resolve) => setTimeout(resolve, Math.min(ms, 10)));
583
+ }
584
+ function toMs(d) {
585
+ if (typeof d === "number")
586
+ return d;
587
+ const m = /^(\d+)(ms|s|m|h|d|w)$/.exec(d);
588
+ if (!m)
589
+ throw new Error(`invalid duration: ${String(d)}`);
590
+ const n = Number(m[1]);
591
+ switch (m[2]) {
592
+ case "ms":
593
+ return n;
594
+ case "s":
595
+ return n * 1000;
596
+ case "m":
597
+ return n * 60_000;
598
+ case "h":
599
+ return n * 3_600_000;
600
+ case "d":
601
+ return n * 86_400_000;
602
+ case "w":
603
+ return n * 604_800_000;
604
+ default:
605
+ throw new Error(`invalid duration unit: ${m[2]}`);
606
+ }
607
+ }
@@ -0,0 +1,19 @@
1
+ export interface ClockState {
2
+ /** Base wall-clock time recorded at run start. */
3
+ readonly baseWallClock: number;
4
+ /** Offset from baseWallClock at which ctx.now() should return — set by the executor when replaying journaled events. */
5
+ offset: number;
6
+ }
7
+ export declare function createClock(runStartedAt: number): ClockState;
8
+ export declare function now(clock: ClockState): number;
9
+ /** Advance the clock to the event currently being replayed. */
10
+ export declare function advanceClockTo(clock: ClockState, eventAt: number): void;
11
+ /**
12
+ * Mulberry32 PRNG — fast, fine for workflow-determinism use. Seeded
13
+ * from a 32-bit hash of the run id. Not cryptographic.
14
+ */
15
+ export declare function seededRandom(seed: number): () => number;
16
+ export declare function hashSeed(runId: string): number;
17
+ export declare function createRandom(runId: string): () => number;
18
+ export declare function createRandomUUID(rng: () => number): () => string;
19
+ //# sourceMappingURL=determinism.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"determinism.d.ts","sourceRoot":"","sources":["../../src/runtime/determinism.ts"],"names":[],"mappings":"AAOA,MAAM,WAAW,UAAU;IACzB,kDAAkD;IAClD,QAAQ,CAAC,aAAa,EAAE,MAAM,CAAA;IAC9B,wHAAwH;IACxH,MAAM,EAAE,MAAM,CAAA;CACf;AAED,wBAAgB,WAAW,CAAC,YAAY,EAAE,MAAM,GAAG,UAAU,CAE5D;AAED,wBAAgB,GAAG,CAAC,KAAK,EAAE,UAAU,GAAG,MAAM,CAE7C;AAED,+DAA+D;AAC/D,wBAAgB,cAAc,CAAC,KAAK,EAAE,UAAU,EAAE,OAAO,EAAE,MAAM,GAAG,IAAI,CAEvE;AAED;;;GAGG;AACH,wBAAgB,YAAY,CAAC,IAAI,EAAE,MAAM,GAAG,MAAM,MAAM,CASvD;AAED,wBAAgB,QAAQ,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,CAQ9C;AAED,wBAAgB,YAAY,CAAC,KAAK,EAAE,MAAM,GAAG,MAAM,MAAM,CAExD;AAID,wBAAgB,gBAAgB,CAAC,GAAG,EAAE,MAAM,MAAM,GAAG,MAAM,MAAM,CAgBhE"}
@@ -0,0 +1,61 @@
1
+ // Deterministic clock and RNG used by the workflow body.
2
+ //
3
+ // `ctx.now()` must return the timestamp of the event currently being
4
+ // consumed from the journal during replay (§4.5 of docs/design.md).
5
+ // `ctx.random()` / `ctx.randomUUID()` are seeded from the run id so
6
+ // replays produce the same values.
7
+ export function createClock(runStartedAt) {
8
+ return { baseWallClock: runStartedAt, offset: 0 };
9
+ }
10
+ export function now(clock) {
11
+ return clock.baseWallClock + clock.offset;
12
+ }
13
+ /** Advance the clock to the event currently being replayed. */
14
+ export function advanceClockTo(clock, eventAt) {
15
+ clock.offset = eventAt - clock.baseWallClock;
16
+ }
17
+ /**
18
+ * Mulberry32 PRNG — fast, fine for workflow-determinism use. Seeded
19
+ * from a 32-bit hash of the run id. Not cryptographic.
20
+ */
21
+ export function seededRandom(seed) {
22
+ let state = seed >>> 0;
23
+ return () => {
24
+ state = (state + 0x6d2b79f5) >>> 0;
25
+ let t = state;
26
+ t = Math.imul(t ^ (t >>> 15), t | 1);
27
+ t ^= t + Math.imul(t ^ (t >>> 7), t | 61);
28
+ return ((t ^ (t >>> 14)) >>> 0) / 4294967296;
29
+ };
30
+ }
31
+ export function hashSeed(runId) {
32
+ // FNV-1a 32-bit
33
+ let hash = 0x811c9dc5;
34
+ for (let i = 0; i < runId.length; i++) {
35
+ hash ^= runId.charCodeAt(i);
36
+ hash = Math.imul(hash, 0x01000193);
37
+ }
38
+ return hash >>> 0;
39
+ }
40
+ export function createRandom(runId) {
41
+ return seededRandom(hashSeed(runId));
42
+ }
43
+ const HEX = "0123456789abcdef";
44
+ export function createRandomUUID(rng) {
45
+ // v4-shaped deterministic UUID. Not cryptographically random; matches
46
+ // the style of crypto.randomUUID but is reproducible across replays.
47
+ return () => {
48
+ const bytes = new Uint8Array(16);
49
+ for (let i = 0; i < 16; i++)
50
+ bytes[i] = Math.floor(rng() * 256);
51
+ bytes[6] = (bytes[6] & 0x0f) | 0x40; // version 4 (high nibble of byte 6)
52
+ bytes[8] = (bytes[8] & 0x3f) | 0x80; // variant RFC 4122 (high bits of byte 8)
53
+ let s = "";
54
+ for (let i = 0; i < 16; i++) {
55
+ const b = bytes[i];
56
+ s += HEX[b >>> 4] + HEX[b & 0x0f];
57
+ }
58
+ // s is 32 hex chars; slice into 8-4-4-4-12 groups.
59
+ return `${s.slice(0, 8)}-${s.slice(8, 12)}-${s.slice(12, 16)}-${s.slice(16, 20)}-${s.slice(20, 32)}`;
60
+ };
61
+ }