@bratsos/workflow-engine 0.1.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +274 -513
  2. package/dist/{chunk-7IITBLFY.js → chunk-NYKMT46J.js} +268 -25
  3. package/dist/chunk-NYKMT46J.js.map +1 -0
  4. package/dist/chunk-SPXBCZLB.js +17 -0
  5. package/dist/chunk-SPXBCZLB.js.map +1 -0
  6. package/dist/chunk-WZ533CPU.js +1108 -0
  7. package/dist/chunk-WZ533CPU.js.map +1 -0
  8. package/dist/{client-5vz5Vv4A.d.ts → client-D4PoxADF.d.ts} +3 -143
  9. package/dist/client.d.ts +3 -2
  10. package/dist/{index-DmR3E8D7.d.ts → index-DAzCfO1R.d.ts} +20 -1
  11. package/dist/index.d.ts +234 -601
  12. package/dist/index.js +46 -2034
  13. package/dist/index.js.map +1 -1
  14. package/dist/{interface-Cv22wvLG.d.ts → interface-MMqhfQQK.d.ts} +69 -2
  15. package/dist/kernel/index.d.ts +26 -0
  16. package/dist/kernel/index.js +3 -0
  17. package/dist/kernel/index.js.map +1 -0
  18. package/dist/kernel/testing/index.d.ts +44 -0
  19. package/dist/kernel/testing/index.js +85 -0
  20. package/dist/kernel/testing/index.js.map +1 -0
  21. package/dist/persistence/index.d.ts +2 -2
  22. package/dist/persistence/index.js +2 -1
  23. package/dist/persistence/prisma/index.d.ts +2 -2
  24. package/dist/persistence/prisma/index.js +2 -1
  25. package/dist/plugins-CPC-X0rR.d.ts +421 -0
  26. package/dist/ports-tU3rzPXJ.d.ts +245 -0
  27. package/dist/stage-BPw7m9Wx.d.ts +144 -0
  28. package/dist/testing/index.d.ts +23 -1
  29. package/dist/testing/index.js +156 -13
  30. package/dist/testing/index.js.map +1 -1
  31. package/package.json +11 -1
  32. package/skills/workflow-engine/SKILL.md +234 -348
  33. package/skills/workflow-engine/references/03-runtime-setup.md +111 -426
  34. package/skills/workflow-engine/references/05-persistence-setup.md +32 -0
  35. package/skills/workflow-engine/references/07-testing-patterns.md +141 -474
  36. package/skills/workflow-engine/references/08-common-patterns.md +125 -428
  37. package/dist/chunk-7IITBLFY.js.map +0 -1
@@ -1,497 +1,182 @@
1
- # Runtime Setup
1
+ # Kernel & Host Setup
2
2
 
3
- Complete guide for configuring and running WorkflowRuntime.
3
+ Complete guide for configuring the command kernel and choosing a host.
4
4
 
5
- ## Creating a Runtime
5
+ ## Creating a Kernel
6
+
7
+ The kernel is the core command dispatcher. It's environment-agnostic -- no timers, no signals, no global state.
6
8
 
7
9
  ```typescript
8
- import { createWorkflowRuntime } from "@bratsos/workflow-engine";
10
+ import { createKernel } from "@bratsos/workflow-engine/kernel";
11
+ import type {
12
+ Kernel,
13
+ KernelConfig,
14
+ Persistence,
15
+ BlobStore,
16
+ JobTransport,
17
+ EventSink,
18
+ Scheduler,
19
+ Clock,
20
+ } from "@bratsos/workflow-engine/kernel";
9
21
  import {
10
22
  createPrismaWorkflowPersistence,
11
23
  createPrismaJobQueue,
12
- createPrismaAICallLogger,
13
24
  } from "@bratsos/workflow-engine/persistence/prisma";
14
25
  import { PrismaClient } from "@prisma/client";
15
26
 
16
27
  const prisma = new PrismaClient();
17
28
 
18
- // PostgreSQL (default)
19
- const runtime = createWorkflowRuntime({
20
- // Required
29
+ const kernel = createKernel({
30
+ // Required: metadata storage (runs, stages, logs, outbox, idempotency)
21
31
  persistence: createPrismaWorkflowPersistence(prisma),
22
- jobQueue: createPrismaJobQueue(prisma),
23
- registry: {
24
- getWorkflow: (id) => workflowMap[id] ?? null,
25
- },
26
-
27
- // Optional
28
- aiCallLogger: createPrismaAICallLogger(prisma),
29
- pollIntervalMs: 10000,
30
- jobPollIntervalMs: 1000,
31
- staleJobThresholdMs: 60000,
32
- workerId: "worker-1",
33
- getWorkflowPriority: (id) => priorityMap[id] ?? 5,
34
- });
35
-
36
- // SQLite - pass databaseType to persistence and job queue
37
- const runtime = createWorkflowRuntime({
38
- persistence: createPrismaWorkflowPersistence(prisma, { databaseType: "sqlite" }),
39
- jobQueue: createPrismaJobQueue(prisma, { databaseType: "sqlite" }),
40
- registry: { getWorkflow: (id) => workflowMap[id] ?? null },
41
- aiCallLogger: createPrismaAICallLogger(prisma),
42
- });
43
- ```
44
-
45
- ## WorkflowRuntimeConfig
46
-
47
- ```typescript
48
- interface WorkflowRuntimeConfig {
49
- /** Persistence implementation (required) */
50
- persistence: WorkflowPersistence;
51
-
52
- /** Job queue implementation (required) */
53
- jobQueue: JobQueue;
54
-
55
- /** Workflow registry (required) */
56
- registry: WorkflowRegistry;
57
-
58
- /** AI call logger for createAIHelper (optional) */
59
- aiCallLogger?: AICallLogger;
60
-
61
- /** Orchestration poll interval in ms (default: 10000) */
62
- pollIntervalMs?: number;
63
-
64
- /** Job dequeue interval in ms (default: 1000) */
65
- jobPollIntervalMs?: number;
66
-
67
- /** Worker ID (default: auto-generated) */
68
- workerId?: string;
69
-
70
- /** Stale job threshold in ms (default: 60000) */
71
- staleJobThresholdMs?: number;
72
-
73
- /** Function to determine workflow priority */
74
- getWorkflowPriority?: (workflowId: string) => number;
75
- }
76
- ```
77
-
78
- ## WorkflowRegistry
79
-
80
- The registry maps workflow IDs to workflow definitions:
81
-
82
- ```typescript
83
- interface WorkflowRegistry {
84
- getWorkflow(workflowId: string): Workflow<any, any, any> | null;
85
- }
86
-
87
- // Simple implementation
88
- const registry: WorkflowRegistry = {
89
- getWorkflow: (id) => {
90
- const workflows = {
91
- "document-analysis": documentAnalysisWorkflow,
92
- "data-processing": dataProcessingWorkflow,
93
- };
94
- return workflows[id] ?? null;
95
- },
96
- };
97
-
98
- // With type safety
99
- const workflowMap: Record<string, Workflow<any, any, any>> = {
100
- "document-analysis": documentAnalysisWorkflow,
101
- "data-processing": dataProcessingWorkflow,
102
- };
103
-
104
- const registry: WorkflowRegistry = {
105
- getWorkflow: (id) => workflowMap[id] ?? null,
106
- };
107
- ```
108
-
109
- ## Lifecycle Methods
110
-
111
- ### start()
112
32
 
113
- Start the runtime as a worker that processes jobs and polls for state changes.
33
+ // Required: large payload storage
34
+ blobStore: myBlobStore,
114
35
 
115
- ```typescript
116
- await runtime.start();
117
- // Runtime is now:
118
- // - Polling for pending workflows
119
- // - Processing jobs from the queue
120
- // - Checking suspended stages
121
- // - Handling graceful shutdown on SIGTERM/SIGINT
122
- ```
123
-
124
- ### stop()
125
-
126
- Stop the runtime gracefully.
36
+ // Required: job queue
37
+ jobTransport: createPrismaJobQueue(prisma),
127
38
 
128
- ```typescript
129
- runtime.stop();
130
- // Stops polling and job processing
131
- // Current job completes before stopping
132
- ```
39
+ // Required: async event publishing
40
+ eventSink: myEventSink,
133
41
 
134
- ## Creating and Running Workflows
42
+ // Required: deferred command triggers
43
+ scheduler: myScheduler,
135
44
 
136
- ### createRun(options)
45
+ // Required: injectable time source
46
+ clock: { now: () => new Date() },
137
47
 
138
- Create a new workflow run. The runtime picks it up automatically on the next poll.
139
-
140
- ```typescript
141
- interface CreateRunOptions {
142
- workflowId: string; // Required
143
- input: Record<string, unknown>; // Required
144
- config?: Record<string, unknown>; // Optional
145
- priority?: number; // Optional (default: 5)
146
- metadata?: Record<string, unknown>; // Optional domain-specific data
147
- }
148
-
149
- const { workflowRunId } = await runtime.createRun({
150
- workflowId: "document-analysis",
151
- input: { documentUrl: "https://example.com/doc.pdf" },
152
- config: {
153
- extract: { maxLength: 5000 },
154
- },
155
- priority: 8, // Higher = more important
156
- metadata: {
157
- userId: "user-123",
158
- requestId: "req-456",
48
+ // Required: workflow definition lookup
49
+ registry: {
50
+ getWorkflow: (id) => workflowMap.get(id),
159
51
  },
160
52
  });
161
53
  ```
162
54
 
163
- The method:
164
- 1. Validates the workflow exists in the registry
165
- 2. Validates input against the workflow's input schema
166
- 3. Merges provided config with workflow defaults
167
- 4. Validates merged config against all stage config schemas
168
- 5. Creates a WorkflowRun record with status PENDING
169
-
170
- ### transitionWorkflow(workflowRunId)
171
-
172
- Manually trigger workflow state transition (usually handled automatically).
55
+ ## Port Interfaces
173
56
 
174
- ```typescript
175
- await runtime.transitionWorkflow(workflowRunId);
176
- ```
57
+ | Port | Interface | Purpose |
58
+ |------|-----------|---------|
59
+ | `persistence` | `Persistence` | CRUD for runs, stages, logs, outbox events, idempotency keys |
60
+ | `blobStore` | `BlobStore` | `put(key, data)`, `get(key)`, `has(key)`, `delete(key)`, `list(prefix)` |
61
+ | `jobTransport` | `JobTransport` | `enqueue`, `enqueueParallel`, `dequeue`, `complete`, `suspend`, `fail` |
62
+ | `eventSink` | `EventSink` | `emit(event)` - async event publishing |
63
+ | `scheduler` | `Scheduler` | `schedule(type, payload, runAt)`, `cancel(type, correlationId)` |
64
+ | `clock` | `Clock` | `now()` - returns `Date` |
177
65
 
178
- ### pollSuspendedStages()
66
+ ## Node Host
179
67
 
180
- Manually check suspended stages (usually handled automatically).
68
+ For long-running worker processes (Node.js, Docker containers, etc.).
181
69
 
182
70
  ```typescript
183
- await runtime.pollSuspendedStages();
184
- ```
185
-
186
- ## AI Helper Integration
71
+ import { createNodeHost } from "@bratsos/workflow-engine-host-node";
187
72
 
188
- ### createAIHelper(topic, logContext?)
189
-
190
- Create an AIHelper bound to the runtime's logger.
191
-
192
- ```typescript
193
- // Simple usage
194
- const ai = runtime.createAIHelper("my-task");
195
-
196
- // With log context (for batch operations)
197
- const logContext = runtime.createLogContext(workflowRunId, stageRecordId);
198
- const ai = runtime.createAIHelper(`workflow.${workflowRunId}`, logContext);
199
- ```
200
-
201
- ### createLogContext(workflowRunId, stageRecordId)
202
-
203
- Create a log context for AIHelper (enables batch logging to persistence).
204
-
205
- ```typescript
206
- const logContext = runtime.createLogContext(workflowRunId, stageRecordId);
207
- // { workflowRunId, stageRecordId, createLog: fn }
208
- ```
209
-
210
- ## Complete Setup Example
211
-
212
- ```typescript
213
- import {
214
- createWorkflowRuntime,
215
- WorkflowBuilder,
216
- defineStage,
217
- } from "@bratsos/workflow-engine";
218
- import {
219
- createPrismaWorkflowPersistence,
220
- createPrismaJobQueue,
221
- createPrismaAICallLogger,
222
- } from "@bratsos/workflow-engine/persistence/prisma";
223
- import { PrismaClient } from "@prisma/client";
224
- import { z } from "zod";
225
-
226
- // Initialize Prisma
227
- const prisma = new PrismaClient();
228
-
229
- // Define stages
230
- const helloStage = defineStage({
231
- id: "hello",
232
- name: "Hello Stage",
233
- schemas: {
234
- input: z.object({ name: z.string() }),
235
- output: z.object({ greeting: z.string() }),
236
- config: z.object({ prefix: z.string().default("Hello") }),
237
- },
238
- async execute(ctx) {
239
- return {
240
- output: { greeting: `${ctx.config.prefix}, ${ctx.input.name}!` },
241
- };
242
- },
243
- });
244
-
245
- // Build workflow
246
- const helloWorkflow = new WorkflowBuilder(
247
- "hello-workflow",
248
- "Hello Workflow",
249
- "A simple greeting workflow",
250
- z.object({ name: z.string() }),
251
- z.object({ greeting: z.string() })
252
- )
253
- .pipe(helloStage)
254
- .build();
255
-
256
- // Create registry
257
- const registry = {
258
- getWorkflow: (id: string) => {
259
- if (id === "hello-workflow") return helloWorkflow;
260
- return null;
261
- },
262
- };
73
+ const host = createNodeHost({
74
+ kernel,
75
+ jobTransport: createPrismaJobQueue(prisma),
76
+ workerId: "worker-1",
263
77
 
264
- // Create runtime
265
- const runtime = createWorkflowRuntime({
266
- persistence: createPrismaWorkflowPersistence(prisma),
267
- jobQueue: createPrismaJobQueue(prisma),
268
- aiCallLogger: createPrismaAICallLogger(prisma),
269
- registry,
270
- pollIntervalMs: 5000,
271
- jobPollIntervalMs: 500,
78
+ // Optional tuning
79
+ orchestrationIntervalMs: 10_000, // Claim pending, poll suspended, reap stale, flush outbox
80
+ jobPollIntervalMs: 1_000, // Dequeue and execute jobs
81
+ staleLeaseThresholdMs: 60_000, // Release stale job leases
82
+ maxClaimsPerTick: 10, // Max pending runs to claim per tick
83
+ maxSuspendedChecksPerTick: 10, // Max suspended stages to poll per tick
84
+ maxOutboxFlushPerTick: 100, // Max outbox events to flush per tick
272
85
  });
273
86
 
274
- // Start runtime
275
- async function main() {
276
- console.log("Starting runtime...");
277
- await runtime.start();
278
-
279
- // Create a workflow run
280
- const { workflowRunId } = await runtime.createRun({
281
- workflowId: "hello-workflow",
282
- input: { name: "World" },
283
- });
284
-
285
- console.log(`Created workflow run: ${workflowRunId}`);
286
-
287
- // Runtime will automatically:
288
- // 1. Pick up the pending workflow
289
- // 2. Enqueue the first stage
290
- // 3. Execute the stage
291
- // 4. Mark workflow as completed
292
- }
293
-
294
- main().catch(console.error);
87
+ // Start polling loops and register SIGTERM/SIGINT handlers
88
+ await host.start();
295
89
 
296
90
  // Graceful shutdown
297
- process.on("SIGTERM", () => {
298
- runtime.stop();
299
- prisma.$disconnect();
300
- });
301
- ```
91
+ await host.stop();
302
92
 
303
- ## Rerunning Workflows from a Specific Stage
304
-
305
- You can rerun a workflow starting from a specific stage, skipping earlier stages and using their persisted outputs. This is useful for:
306
- - Retrying after a stage failure (fix the bug, rerun from the failed stage)
307
- - Re-processing data with updated stage logic
308
- - Testing specific stages in isolation
309
-
310
- ### Using WorkflowExecutor.execute() with fromStage
311
-
312
- ```typescript
313
- import { WorkflowExecutor } from "@bratsos/workflow-engine";
314
-
315
- // Given: A workflow that has already been run (stages 1-4 completed)
316
- const executor = new WorkflowExecutor(
317
- workflow,
318
- workflowRunId,
319
- workflowType,
320
- { persistence, aiLogger }
321
- );
322
-
323
- // Rerun from stage 3 - skips stages 1-2, runs 3-4
324
- const result = await executor.execute(
325
- input, // Original input (not used when fromStage is set)
326
- config,
327
- { fromStage: "stage-3" }
328
- );
93
+ // Runtime stats
94
+ const stats = host.getStats();
95
+ // { workerId, jobsProcessed, orchestrationTicks, isRunning, uptimeMs }
329
96
  ```
330
97
 
331
- ### How It Works
332
-
333
- 1. **Finds the execution group** containing the specified stage
334
- 2. **Loads input** from the previous stage's persisted output (or workflow input if first stage)
335
- 3. **Rebuilds workflowContext** from all completed stages before the target group
336
- 4. **Deletes stage records** for the target stage and all subsequent stages (clean re-execution)
337
- 5. **Executes** from the target stage forward
338
-
339
- ### Requirements
340
-
341
- - **Previous stages must have been executed** - their outputs must be persisted
342
- - **Stage must exist** in the workflow definition
343
-
344
- ### Error Handling
98
+ ### Worker Process Pattern
345
99
 
346
100
  ```typescript
347
- // Error: Stage doesn't exist
348
- await executor.execute(input, config, { fromStage: "non-existent" });
349
- // Throws: Stage "non-existent" not found in workflow "my-workflow"
350
-
351
- // Error: No prior execution
352
- await executor.execute(input, config, { fromStage: "stage-3" });
353
- // Throws: Cannot rerun from stage "stage-3": no completed stages found before execution group 3
354
- ```
355
-
356
- ### Common Use Cases
357
-
358
- **Retry After Failure:**
359
- ```typescript
360
- // Stage 3 failed, you fixed the bug
361
- await executor.execute(input, config, { fromStage: "stage-3" });
362
- ```
363
-
364
- **Re-process with Updated Logic:**
365
- ```typescript
366
- // Updated stage-2 implementation, want to rerun from there
367
- await executor.execute(input, config, { fromStage: "stage-2" });
368
- ```
369
-
370
- **Fresh Start from Beginning:**
371
- ```typescript
372
- // Rerun entire workflow
373
- await executor.execute(input, config, { fromStage: "stage-1" });
374
- ```
375
-
376
- ### workflowContext Availability
101
+ // worker.ts
102
+ import { host } from "./setup";
377
103
 
378
- When rerunning from a stage, `ctx.workflowContext` contains outputs from all stages **before** the target group:
104
+ process.on("SIGTERM", () => host.stop());
105
+ process.on("SIGINT", () => host.stop());
379
106
 
380
- ```typescript
381
- // Rerunning from stage-3 (group 3)
382
- // ctx.workflowContext contains:
383
- // - "stage-1": { ... } // from group 1
384
- // - "stage-2": { ... } // from group 2
385
- // - NOT "stage-3" or later
107
+ console.log("Starting workflow worker...");
108
+ await host.start();
386
109
  ```
387
110
 
388
- ## Worker Deployment Patterns
111
+ ## Serverless Host
389
112
 
390
- ### Single Worker
113
+ For stateless environments (Cloudflare Workers, AWS Lambda, Vercel Edge, Deno Deploy).
391
114
 
392
115
  ```typescript
393
- // worker.ts
394
- const runtime = createWorkflowRuntime({ ... });
395
- await runtime.start();
396
- ```
397
-
398
- ### Multiple Workers (Horizontal Scaling)
116
+ import { createServerlessHost } from "@bratsos/workflow-engine-host-serverless";
399
117
 
400
- ```typescript
401
- // Each worker gets a unique ID
402
- const workerId = `worker-${process.env.POD_NAME || process.pid}`;
118
+ const host = createServerlessHost({
119
+ kernel,
120
+ jobTransport,
121
+ workerId: "my-worker",
403
122
 
404
- const runtime = createWorkflowRuntime({
405
- ...config,
406
- workerId,
123
+ // Optional tuning (same as Node host)
124
+ staleLeaseThresholdMs: 60_000,
125
+ maxClaimsPerTick: 10,
126
+ maxSuspendedChecksPerTick: 10,
127
+ maxOutboxFlushPerTick: 100,
407
128
  });
408
-
409
- await runtime.start();
410
- // Workers compete for jobs using atomic dequeue
411
- // Each job is processed by exactly one worker
412
129
  ```
413
130
 
414
- ### API Server + Separate Workers
131
+ ### Handle a Single Job
132
+
133
+ When a queue message arrives (Cloudflare Queue, SQS, etc.):
415
134
 
416
135
  ```typescript
417
- // api-server.ts - Only creates runs, doesn't process
418
- const runtime = createWorkflowRuntime({ ...config });
419
- // Don't call runtime.start()
420
-
421
- app.post("/workflows/:id/runs", async (req, res) => {
422
- const { workflowRunId } = await runtime.createRun({
423
- workflowId: req.params.id,
424
- input: req.body,
425
- });
426
- res.json({ workflowRunId });
136
+ const result = await host.handleJob({
137
+ jobId: msg.id,
138
+ workflowRunId: msg.body.workflowRunId,
139
+ workflowId: msg.body.workflowId,
140
+ stageId: msg.body.stageId,
141
+ attempt: msg.body.attempt,
142
+ maxAttempts: msg.body.maxAttempts,
143
+ payload: msg.body.payload,
427
144
  });
428
145
 
429
- // worker.ts - Only processes, doesn't create
430
- const runtime = createWorkflowRuntime({ ...config });
431
- await runtime.start();
146
+ if (result.outcome === "completed") msg.ack();
147
+ else if (result.outcome === "suspended") msg.ack();
148
+ else msg.retry();
432
149
  ```
433
150
 
434
- ## Configuration Recommendations
151
+ ### Dequeue and Process Jobs
435
152
 
436
- ### Development
153
+ For environments that poll rather than receive:
437
154
 
438
155
  ```typescript
439
- const runtime = createWorkflowRuntime({
440
- ...config,
441
- pollIntervalMs: 2000, // Fast polling for development
442
- jobPollIntervalMs: 500, // Quick job pickup
443
- staleJobThresholdMs: 30000, // Short timeout
444
- });
156
+ const result = await host.processAvailableJobs({ maxJobs: 5 });
157
+ // { processed, succeeded, failed }
445
158
  ```
446
159
 
447
- ### Production
448
-
449
- ```typescript
450
- const runtime = createWorkflowRuntime({
451
- ...config,
452
- pollIntervalMs: 10000, // Standard polling
453
- jobPollIntervalMs: 1000, // Balance between latency and DB load
454
- staleJobThresholdMs: 60000, // Allow for longer processing
455
- workerId: `worker-${process.env.HOSTNAME}`,
456
- });
457
- ```
160
+ ### Maintenance Tick
458
161
 
459
- ### High-Throughput
162
+ Run from a cron trigger (Cloudflare Cron, EventBridge, etc.):
460
163
 
461
164
  ```typescript
462
- const runtime = createWorkflowRuntime({
463
- ...config,
464
- pollIntervalMs: 5000, // More frequent orchestration
465
- jobPollIntervalMs: 100, // Aggressive job pickup
466
- staleJobThresholdMs: 120000, // Longer timeout for long jobs
467
- });
165
+ const tick = await host.runMaintenanceTick();
166
+ // { claimed, suspendedChecked, staleReleased, eventsFlushed }
167
+ // Resumed suspended stages are automatically followed by run.transition.
468
168
  ```
469
169
 
470
- ## Monitoring
471
-
472
- The runtime logs key events to console:
170
+ ## Multi-Worker Setup
473
171
 
474
- ```
475
- [Runtime] Starting worker worker-12345-hostname
476
- [Runtime] Poll interval: 10000ms, Job poll: 1000ms
477
- [Runtime] Created WorkflowRun abc123 for document-analysis
478
- [Runtime] Found 1 pending workflows
479
- [Runtime] Started workflow abc123
480
- [Runtime] Processing stage extract for workflow abc123
481
- [Runtime] Worker worker-12345-hostname: processed 10 jobs
482
- [Runtime] Workflow abc123 completed
483
- ```
484
-
485
- For production monitoring, integrate with your observability stack:
172
+ Multiple workers can share the same database. Each worker needs a unique `workerId`:
486
173
 
487
174
  ```typescript
488
- // Custom logging
489
- const originalLog = console.log;
490
- console.log = (...args) => {
491
- if (args[0]?.includes("[Runtime]")) {
492
- metrics.increment("workflow.runtime.log");
493
- logger.info(args.join(" "));
494
- }
495
- originalLog(...args);
496
- };
175
+ // Worker 1
176
+ createNodeHost({ kernel, jobTransport, workerId: "worker-1" });
177
+
178
+ // Worker 2
179
+ createNodeHost({ kernel, jobTransport, workerId: "worker-2" });
497
180
  ```
181
+
182
+ The `claimPendingRun` operation uses `FOR UPDATE SKIP LOCKED` in PostgreSQL to prevent race conditions.
@@ -39,6 +39,8 @@ The workflow engine uses three persistence interfaces:
39
39
 
40
40
  ```typescript
41
41
  interface WorkflowPersistence {
42
+ withTransaction<T>(fn: (tx: WorkflowPersistence) => Promise<T>): Promise<T>;
43
+
42
44
  // WorkflowRun operations
43
45
  createRun(data: CreateRunInput): Promise<WorkflowRunRecord>;
44
46
  updateRun(id: string, data: UpdateRunInput): Promise<void>;
@@ -74,6 +76,36 @@ interface WorkflowPersistence {
74
76
 
75
77
  // Stage output convenience
76
78
  saveStageOutput(runId, workflowType, stageId, output): Promise<string>;
79
+
80
+ // Outbox operations
81
+ appendOutboxEvents(events: CreateOutboxEventInput[]): Promise<void>;
82
+ getUnpublishedOutboxEvents(limit?: number): Promise<OutboxRecord[]>;
83
+ markOutboxEventsPublished(ids: string[]): Promise<void>;
84
+ incrementOutboxRetryCount(id: string): Promise<number>;
85
+ moveOutboxEventToDLQ(id: string): Promise<void>;
86
+ replayDLQEvents(maxEvents: number): Promise<number>;
87
+
88
+ // Idempotency operations
89
+ acquireIdempotencyKey(key: string, commandType: string): Promise<
90
+ | { status: "acquired" }
91
+ | { status: "replay"; result: unknown }
92
+ | { status: "in_progress" }
93
+ >;
94
+ completeIdempotencyKey(key: string, commandType: string, result: unknown): Promise<void>;
95
+ releaseIdempotencyKey(key: string, commandType: string): Promise<void>;
96
+ }
97
+ ```
98
+
99
+ ```typescript
100
+ interface DequeueResult {
101
+ jobId: string;
102
+ workflowRunId: string;
103
+ workflowId: string;
104
+ stageId: string;
105
+ priority: number;
106
+ attempt: number;
107
+ maxAttempts: number;
108
+ payload: Record<string, unknown>;
77
109
  }
78
110
  ```
79
111