@bratsos/workflow-engine 0.0.11 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/README.md +270 -513
  2. package/dist/chunk-D7RVRRM2.js +3 -0
  3. package/dist/chunk-D7RVRRM2.js.map +1 -0
  4. package/dist/chunk-HL3OJG7W.js +1033 -0
  5. package/dist/chunk-HL3OJG7W.js.map +1 -0
  6. package/dist/chunk-MUWP5SF2.js +33 -0
  7. package/dist/chunk-MUWP5SF2.js.map +1 -0
  8. package/dist/chunk-NYKMT46J.js +1143 -0
  9. package/dist/chunk-NYKMT46J.js.map +1 -0
  10. package/dist/chunk-P4KMGCT3.js +2292 -0
  11. package/dist/chunk-P4KMGCT3.js.map +1 -0
  12. package/dist/chunk-SPXBCZLB.js +17 -0
  13. package/dist/chunk-SPXBCZLB.js.map +1 -0
  14. package/dist/cli/sync-models.d.ts +1 -0
  15. package/dist/cli/sync-models.js +210 -0
  16. package/dist/cli/sync-models.js.map +1 -0
  17. package/dist/client-D4PoxADF.d.ts +798 -0
  18. package/dist/client.d.ts +5 -0
  19. package/dist/client.js +4 -0
  20. package/dist/client.js.map +1 -0
  21. package/dist/index-DAzCfO1R.d.ts +217 -0
  22. package/dist/index.d.ts +569 -0
  23. package/dist/index.js +399 -0
  24. package/dist/index.js.map +1 -0
  25. package/dist/interface-MMqhfQQK.d.ts +411 -0
  26. package/dist/kernel/index.d.ts +26 -0
  27. package/dist/kernel/index.js +3 -0
  28. package/dist/kernel/index.js.map +1 -0
  29. package/dist/kernel/testing/index.d.ts +44 -0
  30. package/dist/kernel/testing/index.js +85 -0
  31. package/dist/kernel/testing/index.js.map +1 -0
  32. package/dist/persistence/index.d.ts +2 -0
  33. package/dist/persistence/index.js +6 -0
  34. package/dist/persistence/index.js.map +1 -0
  35. package/dist/persistence/prisma/index.d.ts +37 -0
  36. package/dist/persistence/prisma/index.js +5 -0
  37. package/dist/persistence/prisma/index.js.map +1 -0
  38. package/dist/plugins-BCnDUwIc.d.ts +415 -0
  39. package/dist/ports-tU3rzPXJ.d.ts +245 -0
  40. package/dist/stage-BPw7m9Wx.d.ts +144 -0
  41. package/dist/testing/index.d.ts +264 -0
  42. package/dist/testing/index.js +920 -0
  43. package/dist/testing/index.js.map +1 -0
  44. package/package.json +11 -1
  45. package/skills/workflow-engine/SKILL.md +234 -348
  46. package/skills/workflow-engine/references/03-runtime-setup.md +111 -426
  47. package/skills/workflow-engine/references/05-persistence-setup.md +32 -0
  48. package/skills/workflow-engine/references/07-testing-patterns.md +141 -474
  49. package/skills/workflow-engine/references/08-common-patterns.md +118 -431
@@ -1,503 +1,190 @@
1
1
  # Common Patterns
2
2
 
3
- Best practices, recipes, and troubleshooting for the workflow engine.
3
+ Best practices, recipes, and patterns for the command kernel.
4
4
 
5
- ## Document Processing Pipeline
5
+ ## Idempotency
6
6
 
7
- A common pattern for processing documents through multiple stages:
7
+ The `run.create` and `job.execute` commands support idempotency keys. Replaying a command with the same key returns the cached result without re-executing:
8
8
 
9
9
  ```typescript
10
- import { WorkflowBuilder, defineStage } from "@bratsos/workflow-engine";
11
- import { z } from "zod";
12
-
13
- // Stage 1: Extract content
14
- const extractStage = defineStage({
15
- id: "extract",
16
- name: "Extract Content",
17
- schemas: {
18
- input: z.object({ documentUrl: z.string().url() }),
19
- output: z.object({
20
- text: z.string(),
21
- metadata: z.object({
22
- title: z.string().optional(),
23
- pageCount: z.number(),
24
- }),
25
- }),
26
- config: z.object({
27
- maxPages: z.number().default(100),
28
- }),
29
- },
30
- async execute(ctx) {
31
- const content = await fetchAndExtract(ctx.input.documentUrl, ctx.config);
32
- return { output: content };
33
- },
34
- });
35
-
36
- // Stage 2: Analyze (parallel)
37
- const classifyStage = defineStage({
38
- id: "classify",
39
- name: "Classify",
40
- dependencies: ["extract"],
41
- schemas: {
42
- input: "none",
43
- output: z.object({ categories: z.array(z.string()) }),
44
- config: z.object({ model: z.string().default("gemini-2.5-flash") }),
45
- },
46
- async execute(ctx) {
47
- const { text } = ctx.require("extract");
48
- const ai = createAIHelper("classify", aiLogger);
49
- const { object } = await ai.generateObject(ctx.config.model, text, ctx.schemas.output);
50
- return { output: object };
51
- },
52
- });
10
+ const cmd = {
11
+ type: "run.create" as const,
12
+ idempotencyKey: "order-123-workflow",
13
+ workflowId: "process-order",
14
+ input: { orderId: "123" },
15
+ };
53
16
 
54
- const summarizeStage = defineStage({
55
- id: "summarize",
56
- name: "Summarize",
57
- dependencies: ["extract"],
58
- schemas: {
59
- input: "none",
60
- output: z.object({ summary: z.string() }),
61
- config: z.object({
62
- maxWords: z.number().default(200),
63
- model: z.string().default("gemini-2.5-flash"),
64
- }),
65
- },
66
- async execute(ctx) {
67
- const { text } = ctx.require("extract");
68
- const ai = createAIHelper("summarize", aiLogger);
69
- const { text: summary } = await ai.generateText(
70
- ctx.config.model,
71
- `Summarize in ${ctx.config.maxWords} words:\n\n${text}`
72
- );
73
- return { output: { summary } };
74
- },
75
- });
17
+ // First call creates the run
18
+ const first = await kernel.dispatch(cmd);
76
19
 
77
- // Stage 3: Merge results
78
- const mergeStage = defineStage({
79
- id: "merge",
80
- name: "Merge Results",
81
- dependencies: ["classify", "summarize"],
82
- schemas: {
83
- input: "none",
84
- output: z.object({
85
- title: z.string().optional(),
86
- summary: z.string(),
87
- categories: z.array(z.string()),
88
- }),
89
- config: z.object({}),
90
- },
91
- async execute(ctx) {
92
- const extraction = ctx.require("extract");
93
- const classification = ctx.require("classify");
94
- const summary = ctx.require("summarize");
95
-
96
- return {
97
- output: {
98
- title: extraction.metadata.title,
99
- summary: summary.summary,
100
- categories: classification.categories,
101
- },
102
- };
103
- },
104
- });
105
-
106
- // Build workflow
107
- const documentPipeline = new WorkflowBuilder(
108
- "document-pipeline",
109
- "Document Pipeline",
110
- "Extract, classify, and summarize documents",
111
- z.object({ documentUrl: z.string().url() }),
112
- z.object({ title: z.string().optional(), summary: z.string(), categories: z.array(z.string()) })
113
- )
114
- .pipe(extractStage)
115
- .parallel([classifyStage, summarizeStage])
116
- .pipe(mergeStage)
117
- .build();
20
+ // Second call returns cached result (no duplicate run)
21
+ const second = await kernel.dispatch(cmd);
22
+ // first.workflowRunId === second.workflowRunId
118
23
  ```
119
24
 
120
- ## AI Classification Workflow
121
-
122
- Pattern for multi-label classification with confidence scores:
123
-
124
- ```typescript
125
- const ClassificationSchema = z.object({
126
- labels: z.array(z.object({
127
- name: z.string(),
128
- confidence: z.number().min(0).max(1),
129
- reasoning: z.string(),
130
- })),
131
- primaryLabel: z.string(),
132
- });
133
-
134
- const classificationStage = defineStage({
135
- id: "classification",
136
- name: "AI Classification",
137
- schemas: {
138
- input: z.object({ text: z.string() }),
139
- output: ClassificationSchema,
140
- config: z.object({
141
- model: z.string().default("gemini-2.5-flash"),
142
- labels: z.array(z.string()),
143
- minConfidence: z.number().default(0.7),
144
- }),
145
- },
146
- async execute(ctx) {
147
- const ai = createAIHelper("classification", aiLogger);
148
-
149
- const prompt = `Classify the following text into these categories: ${ctx.config.labels.join(", ")}
150
-
151
- For each applicable label, provide:
152
- - The label name
153
- - A confidence score (0-1)
154
- - Brief reasoning
155
-
156
- Text to classify:
157
- ${ctx.input.text}`;
158
-
159
- const { object } = await ai.generateObject(ctx.config.model, prompt, ClassificationSchema);
160
-
161
- // Filter by minimum confidence
162
- const filtered = {
163
- ...object,
164
- labels: object.labels.filter(l => l.confidence >= ctx.config.minConfidence),
165
- };
25
+ Use deterministic keys derived from domain data (e.g., `order-${orderId}`) to prevent duplicate processing.
166
26
 
167
- return { output: filtered };
168
- },
169
- });
170
- ```
27
+ If the same key is currently executing, dispatch throws `IdempotencyInProgressError`. Retry with backoff instead of issuing parallel same-key commands.
171
28
 
172
- ## Error Recovery Pattern
29
+ ## Transactional Outbox
173
30
 
174
- Graceful error handling with fallbacks:
31
+ Events are not emitted directly. Instead, handlers write events to a transactional outbox table. The `outbox.flush` command publishes pending events through the EventSink:
175
32
 
176
33
  ```typescript
177
- const robustStage = defineStage({
178
- id: "robust-stage",
179
- name: "Robust Stage",
180
- schemas: {
181
- input: z.object({ data: z.any() }),
182
- output: z.object({
183
- result: z.any(),
184
- usedFallback: z.boolean(),
185
- error: z.string().optional(),
186
- }),
187
- config: z.object({
188
- primaryModel: z.string().default("claude-sonnet-4-20250514"),
189
- fallbackModel: z.string().default("gemini-2.5-flash"),
190
- maxRetries: z.number().default(3),
191
- }),
192
- },
193
- async execute(ctx) {
194
- const ai = createAIHelper("robust", aiLogger);
195
-
196
- // Try primary model
197
- for (let attempt = 1; attempt <= ctx.config.maxRetries; attempt++) {
198
- try {
199
- const result = await ai.generateText(ctx.config.primaryModel, ctx.input.data);
200
- return {
201
- output: { result: result.text, usedFallback: false },
202
- };
203
- } catch (error) {
204
- await ctx.log("WARN", `Primary model failed (attempt ${attempt})`, {
205
- error: error.message,
206
- });
207
-
208
- if (attempt === ctx.config.maxRetries) break;
209
- await sleep(1000 * attempt); // Exponential backoff
210
- }
211
- }
212
-
213
- // Try fallback model
214
- try {
215
- await ctx.log("INFO", "Using fallback model");
216
- const result = await ai.generateText(ctx.config.fallbackModel, ctx.input.data);
217
- return {
218
- output: {
219
- result: result.text,
220
- usedFallback: true,
221
- error: "Primary model failed, used fallback",
222
- },
223
- };
224
- } catch (error) {
225
- await ctx.log("ERROR", "Fallback model also failed");
226
- throw new Error(`All models failed: ${error.message}`);
227
- }
228
- },
229
- });
34
+ // Events accumulate in the outbox as commands execute
35
+ await kernel.dispatch({ type: "run.create", ... });
36
+ await kernel.dispatch({ type: "job.execute", ... });
37
+
38
+ // Flush publishes all pending events
39
+ await kernel.dispatch({ type: "outbox.flush", maxEvents: 100 });
230
40
  ```
231
41
 
232
- ## Cost Optimization Strategies
42
+ This ensures events are only published after the underlying database transaction succeeds, preventing lost or phantom events.
233
43
 
234
- ### 1. Use Batch Operations
44
+ The outbox includes retry logic with a dead-letter queue (DLQ). Events that fail to publish are retried up to 3 times before being moved to the DLQ. Use `plugin.replayDLQ` to reprocess them:
235
45
 
236
46
  ```typescript
237
- // Instead of many individual calls
238
- for (const item of items) {
239
- await ai.generateText(model, item.prompt); // Expensive!
240
- }
241
-
242
- // Use batch for 50% savings
243
- const batch = ai.batch(model, "anthropic");
244
- const handle = await batch.submit(items.map((item, i) => ({
245
- id: `item-${i}`,
246
- prompt: item.prompt,
247
- })));
47
+ await kernel.dispatch({ type: "plugin.replayDLQ", maxEvents: 50 });
248
48
  ```
249
49
 
250
- ### 2. Cache Expensive Results
251
-
252
- ```typescript
253
- async execute(ctx) {
254
- const cacheKey = `result-${hash(ctx.input)}`;
255
-
256
- // Check cache
257
- if (await ctx.storage.exists(cacheKey)) {
258
- return { output: await ctx.storage.load(cacheKey) };
259
- }
260
-
261
- // Compute expensive result
262
- const result = await expensiveOperation(ctx.input);
50
+ ## Stale Lease Recovery
263
51
 
264
- // Cache for future runs
265
- await ctx.storage.save(cacheKey, result);
266
-
267
- return { output: result };
268
- }
269
- ```
270
-
271
- ### 3. Use Appropriate Models
52
+ When a worker crashes, its job leases become stale. The `lease.reapStale` command releases them:
272
53
 
273
54
  ```typescript
274
- // Quick classification - use fast model
275
- const { object } = await ai.generateObject("gemini-2.5-flash", prompt, schema);
276
-
277
- // Complex reasoning - use powerful model
278
- const { text } = await ai.generateText("claude-sonnet-4-20250514", complexPrompt, {
279
- maxTokens: 4000,
55
+ await kernel.dispatch({
56
+ type: "lease.reapStale",
57
+ staleThresholdMs: 60_000, // Release jobs locked > 60s
280
58
  });
281
59
  ```
282
60
 
283
- ### 4. Optimize Token Usage
284
-
285
- ```typescript
286
- // Truncate long inputs
287
- const truncatedText = text.slice(0, 50000);
288
-
289
- // Use structured output to reduce tokens
290
- const { object } = await ai.generateObject(model, prompt, schema);
291
- // vs: const { text } = await ai.generateText(model, prompt); JSON.parse(text);
292
- ```
61
+ In the Node host, this runs automatically on each orchestration tick. For serverless, include it in your maintenance cron.
293
62
 
294
- ## Logging Best Practices
63
+ ## Rerun From Stage
295
64
 
296
- ### Structured Logging
65
+ Rerun a workflow from a specific stage, keeping outputs from earlier stages:
297
66
 
298
67
  ```typescript
299
- async execute(ctx) {
300
- await ctx.log("INFO", "Stage started", {
301
- inputSize: JSON.stringify(ctx.input).length,
302
- config: ctx.config,
303
- });
304
-
305
- try {
306
- const result = await processData(ctx.input);
307
-
308
- await ctx.log("INFO", "Processing complete", {
309
- itemsProcessed: result.items.length,
310
- duration: Date.now() - startTime,
311
- });
68
+ const { deletedStages } = await kernel.dispatch({
69
+ type: "run.rerunFrom",
70
+ workflowRunId: "run-123",
71
+ fromStageId: "summarize",
72
+ });
312
73
 
313
- return { output: result };
314
- } catch (error) {
315
- await ctx.log("ERROR", "Processing failed", {
316
- error: error.message,
317
- stack: error.stack,
318
- });
319
- throw error;
320
- }
321
- }
74
+ // Stages from "summarize" onward are deleted and re-queued
75
+ // Earlier stages (e.g., "extract") keep their outputs
322
76
  ```
323
77
 
324
- ### Log Levels
325
-
326
- | Level | Use For |
327
- |-------|---------|
328
- | DEBUG | Detailed debugging info |
329
- | INFO | Normal operations |
330
- | WARN | Recoverable issues |
331
- | ERROR | Failures |
332
-
333
- ## Type-Safe Context Passing
78
+ ## Plugin System
334
79
 
335
- ### Define Workflow Context Type
80
+ Plugins react to kernel events published through the outbox:
336
81
 
337
82
  ```typescript
338
- // Define exact shape of workflow context
339
- type MyWorkflowContext = {
340
- "extract": { text: string; metadata: { title: string } };
341
- "classify": { categories: string[] };
342
- "summarize": { summary: string };
343
- };
83
+ import { definePlugin, createPluginRunner } from "@bratsos/workflow-engine/kernel";
344
84
 
345
- // Use in stage definition
346
- const mergeStage = defineStage<
347
- "none",
348
- typeof OutputSchema,
349
- typeof ConfigSchema,
350
- MyWorkflowContext
351
- >({
352
- id: "merge",
353
- // ctx.require("extract") is now typed as { text: string; metadata: { title: string } }
354
- async execute(ctx) {
355
- const extract = ctx.require("extract"); // Typed!
356
- const classify = ctx.require("classify"); // Typed!
85
+ const metricsPlugin = definePlugin({
86
+ name: "metrics",
87
+ handlers: {
88
+ "workflow:completed": async (event) => {
89
+ await recordMetric("workflow_completed", { workflowId: event.workflowId });
90
+ },
91
+ "stage:failed": async (event) => {
92
+ await alertOnFailure(event);
93
+ },
357
94
  },
358
95
  });
359
- ```
360
96
 
361
- ### Infer from Workflow
362
-
363
- ```typescript
364
- import type { InferWorkflowContext } from "@bratsos/workflow-engine";
365
-
366
- const workflow = new WorkflowBuilder(...)
367
- .pipe(extractStage)
368
- .pipe(classifyStage)
369
- .build();
97
+ const runner = createPluginRunner({
98
+ plugins: [metricsPlugin],
99
+ eventSink: myEventSink,
100
+ });
370
101
 
371
- type WorkflowContext = InferWorkflowContext<typeof workflow>;
102
+ // Process events from the outbox
103
+ await runner.processEvents(events);
372
104
  ```
373
105
 
374
- ## Troubleshooting Guide
106
+ ## Multi-Worker Coordination
375
107
 
376
- ### Stage Not Found in Context
108
+ Multiple workers can process jobs from the same queue safely:
377
109
 
378
- **Error**: `Missing required stage output: "stage-id"`
110
+ - **Run claiming** uses `FOR UPDATE SKIP LOCKED` in PostgreSQL -- no duplicate claims
111
+ - **Job dequeuing** uses atomic `UPDATE ... WHERE status = 'PENDING'` -- no duplicate execution
112
+ - **Stale lease recovery** releases jobs from crashed workers
379
113
 
380
- **Cause**: Stage dependency not in workflow or hasn't run yet.
381
-
382
- **Fix**:
383
- 1. Check `dependencies` array includes the stage
384
- 2. Verify stage is added before dependent stage in workflow
385
- 3. Use `ctx.optional()` if stage is truly optional
386
-
387
- ### Batch Never Completes
388
-
389
- **Symptoms**: Stage stays SUSPENDED forever
390
-
391
- **Causes**:
392
- 1. `nextPollAt` not being updated
393
- 2. `maxWaitTime` too short
394
- 3. Batch provider issues
395
-
396
- **Fix**:
397
114
  ```typescript
398
- // In checkCompletion
399
- return {
400
- ready: false,
401
- nextCheckIn: 60000, // Make sure this is set
402
- };
403
-
404
- // Check batch status manually
405
- const batch = ai.batch(model, provider);
406
- const status = await batch.getStatus(batchId);
407
- console.log("Batch status:", status);
115
+ // Worker 1 and Worker 2 can run simultaneously
116
+ const host1 = createNodeHost({ kernel, jobTransport, workerId: "worker-1" });
117
+ const host2 = createNodeHost({ kernel, jobTransport, workerId: "worker-2" });
408
118
  ```
409
119
 
410
- ### Type Mismatch in Workflow
411
-
412
- **Error**: TypeScript errors about incompatible types
413
-
414
- **Cause**: Output of one stage doesn't match input of next.
415
-
416
- **Fix**:
417
- 1. Use `input: "none"` for stages that only use context
418
- 2. Ensure schemas match across stages
419
- 3. Use `.passthrough()` on Zod schemas for flexibility
420
-
421
- ### Prisma Enum Errors
422
-
423
- **Error**: `Invalid value for argument 'status'. Expected Status`
424
-
425
- **Cause**: Prisma 7.x requires typed enums, not strings
426
-
427
- **Fix**: The library handles this automatically via the enum compatibility layer. If you see this error:
428
- 1. Ensure you're using `createPrismaWorkflowPersistence(prisma)`
429
- 2. Check you're not bypassing the persistence layer with direct Prisma calls
120
+ ## Optimistic Concurrency
430
121
 
431
- ### Memory Issues with Large Batches
122
+ The persistence layer uses version fields on records to detect concurrent modifications:
432
123
 
433
- **Symptoms**: Process crashes or slows down
434
-
435
- **Fix**:
436
124
  ```typescript
437
- // Process in smaller batches
438
- const CHUNK_SIZE = 100;
439
- for (let i = 0; i < items.length; i += CHUNK_SIZE) {
440
- const chunk = items.slice(i, i + CHUNK_SIZE);
441
- await processChunk(chunk);
442
- }
443
-
444
- // Stream instead of loading all at once
445
- for await (const chunk of streamResults(batchId)) {
446
- yield processChunk(chunk);
447
- }
125
+ // If two workers try to update the same run simultaneously,
126
+ // one will get a StaleVersionError and retry
127
+ import { StaleVersionError } from "@bratsos/workflow-engine";
448
128
  ```
449
129
 
450
- ### Workflow Stuck in RUNNING
451
-
452
- **Symptoms**: Workflow never completes
130
+ ## Document Processing Pipeline
453
131
 
454
- **Causes**:
455
- 1. Stage threw unhandled error
456
- 2. Job queue not processing
457
- 3. Worker crashed
132
+ A common pattern combining sequential and parallel stages:
458
133
 
459
- **Fix**:
460
134
  ```typescript
461
- // Check for failed stages
462
- const stages = await persistence.getStagesByRun(runId);
463
- const failed = stages.filter(s => s.status === "FAILED");
464
- if (failed.length > 0) {
465
- console.log("Failed stages:", failed.map(s => ({ id: s.stageId, error: s.errorMessage })));
466
- }
467
-
468
- // Release stale jobs
469
- await jobQueue.releaseStaleJobs(60000);
135
+ const workflow = new WorkflowBuilder(
136
+ "doc-processor", "Document Processor", "Process documents",
137
+ InputSchema, OutputSchema,
138
+ )
139
+ .pipe(extractTextStage) // Stage 1: Extract
140
+ .parallel([
141
+ sentimentAnalysisStage, // Stage 2a: Analyze sentiment
142
+ keywordExtractionStage, // Stage 2b: Extract keywords
143
+ ])
144
+ .pipe(aggregateResultsStage) // Stage 3: Combine results
145
+ .build();
470
146
  ```
471
147
 
472
- ## Configuration Recipes
473
-
474
- ### Development Config
148
+ Subsequent stages access parallel outputs by stage ID:
475
149
 
476
150
  ```typescript
477
- const devConfig = {
478
- pollIntervalMs: 2000,
479
- jobPollIntervalMs: 500,
480
- staleJobThresholdMs: 30000,
481
- };
151
+ async execute(ctx) {
152
+ const sentiment = ctx.require("sentiment-analysis");
153
+ const keywords = ctx.require("keyword-extraction");
154
+ // ...
155
+ }
482
156
  ```
483
157
 
484
- ### Production Config
158
+ ## Error Handling in Stages
485
159
 
486
160
  ```typescript
487
- const prodConfig = {
488
- pollIntervalMs: 10000,
489
- jobPollIntervalMs: 1000,
490
- staleJobThresholdMs: 120000,
491
- workerId: `worker-${process.env.HOSTNAME}`,
492
- };
161
+ async execute(ctx) {
162
+ try {
163
+ const result = await processDocument(ctx.input);
164
+ return { output: result };
165
+ } catch (error) {
166
+ ctx.log("ERROR", "Processing failed", {
167
+ error: error instanceof Error ? error.message : String(error),
168
+ });
169
+ throw error; // Re-throw to mark stage as FAILED
170
+ }
171
+ }
493
172
  ```
494
173
 
495
- ### High-Throughput Config
174
+ Failed stages trigger the `stage:failed` event. The host's maintenance tick detects failure through `run.transition`, which marks the workflow as FAILED.
175
+
176
+ ## Progress Reporting
496
177
 
497
178
  ```typescript
498
- const throughputConfig = {
499
- pollIntervalMs: 5000,
500
- jobPollIntervalMs: 100,
501
- staleJobThresholdMs: 300000,
502
- };
179
+ async execute(ctx) {
180
+ for (const [index, item] of items.entries()) {
181
+ ctx.onProgress({
182
+ progress: (index + 1) / items.length,
183
+ message: `Processing item ${index + 1}/${items.length}`,
184
+ details: { currentItem: item.id },
185
+ });
186
+ await processItem(item);
187
+ }
188
+ return { output: { processedCount: items.length } };
189
+ }
503
190
  ```