@bratsos/workflow-engine 0.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,546 @@
1
+ # Testing Patterns
2
+
3
+ Complete guide for testing workflow stages and complete workflows.
4
+
5
+ ## Test Utilities
6
+
7
+ The workflow engine provides testing utilities:
8
+
9
+ ```typescript
10
+ import {
11
+ TestWorkflowPersistence,
12
+ TestJobQueue,
13
+ MockAIHelper,
14
+ } from "@bratsos/workflow-engine/testing";
15
+ ```
16
+
17
+ ## TestWorkflowPersistence
18
+
19
+ In-memory implementation of `WorkflowPersistence` for testing:
20
+
21
+ ```typescript
22
+ const persistence = new TestWorkflowPersistence();
23
+
24
+ // Create a run
25
+ const run = await persistence.createRun({
26
+ workflowId: "test-workflow",
27
+ workflowName: "Test Workflow",
28
+ workflowType: "test",
29
+ input: { data: "test" },
30
+ });
31
+
32
+ // Update and query
33
+ await persistence.updateRun(run.id, { status: "RUNNING" });
34
+ const retrieved = await persistence.getRun(run.id);
35
+
36
+ // Reset between tests
37
+ persistence.reset();
38
+ ```
39
+
40
+ ## TestJobQueue
41
+
42
+ In-memory implementation of `JobQueue`:
43
+
44
+ ```typescript
45
+ const jobQueue = new TestJobQueue();
46
+
47
+ // Enqueue a job
48
+ const jobId = await jobQueue.enqueue({
49
+ workflowRunId: "run-123",
50
+ stageId: "stage-1",
51
+ priority: 5,
52
+ });
53
+
54
+ // Dequeue
55
+ const job = await jobQueue.dequeue();
56
+
57
+ // Complete or fail
58
+ await jobQueue.complete(jobId);
59
+ await jobQueue.fail(jobId, "Error message", true); // retry=true
60
+
61
+ // Reset between tests
62
+ jobQueue.reset();
63
+ ```
64
+
65
+ ## MockAIHelper
66
+
67
+ Mock implementation of `AIHelper` for predictable test results:
68
+
69
+ ```typescript
70
+ const mockAI = new MockAIHelper();
71
+
72
+ // Mock generateText responses
73
+ mockAI.mockGenerateText("Expected text response");
74
+ mockAI.mockGenerateText("Second response"); // Queue multiple
75
+
76
+ // Mock generateObject responses
77
+ mockAI.mockGenerateObject({
78
+ title: "Test Title",
79
+ tags: ["test", "mock"],
80
+ });
81
+
82
+ // Mock embeddings
83
+ mockAI.mockEmbed([0.1, 0.2, 0.3, /* ... */]);
84
+
85
+ // Mock errors
86
+ mockAI.mockError(new Error("API rate limit"));
87
+
88
+ // Use in tests
89
+ const result = await mockAI.generateText("gemini-2.5-flash", "Any prompt");
90
+ // Returns "Expected text response"
91
+
92
+ // Check calls
93
+ console.log(mockAI.calls.length); // Number of calls
94
+ console.log(mockAI.calls[0].modelKey); // Model used
95
+ console.log(mockAI.calls[0].prompt); // Prompt sent
96
+
97
+ // Reset between tests
98
+ mockAI.reset();
99
+ ```
100
+
101
+ ## Testing Individual Stages
102
+
103
+ ### Unit Testing a Sync Stage
104
+
105
+ ```typescript
106
+ import { describe, it, expect, beforeEach } from "vitest";
107
+ import { TestWorkflowPersistence, MockAIHelper } from "@bratsos/workflow-engine/testing";
108
+ import { myStage } from "./my-stage";
109
+
110
+ describe("myStage", () => {
111
+ let persistence: TestWorkflowPersistence;
112
+ let mockAI: MockAIHelper;
113
+
114
+ beforeEach(() => {
115
+ persistence = new TestWorkflowPersistence();
116
+ mockAI = new MockAIHelper();
117
+ });
118
+
119
+ it("should process input correctly", async () => {
120
+ // Arrange
121
+ const input = { data: "test input" };
122
+ const config = { verbose: true };
123
+
124
+ // Create context
125
+ const ctx = {
126
+ input,
127
+ config,
128
+ workflowContext: {},
129
+ workflowRunId: "test-run",
130
+ stageId: "my-stage",
131
+ log: async () => {},
132
+ storage: persistence.createStorage("test-run"),
133
+ require: (id: string) => {
134
+ throw new Error(`Stage ${id} not found`);
135
+ },
136
+ optional: (id: string) => undefined,
137
+ };
138
+
139
+ // Act
140
+ const result = await myStage.execute(ctx);
141
+
142
+ // Assert
143
+ expect(result.output).toBeDefined();
144
+ expect(result.output.processedData).toBe("TEST INPUT");
145
+ });
146
+
147
+ it("should use AI helper correctly", async () => {
148
+ // Mock AI response
149
+ mockAI.mockGenerateObject({ classification: "tech" });
150
+
151
+ const ctx = {
152
+ input: { text: "AI content" },
153
+ config: {},
154
+ workflowContext: {},
155
+ workflowRunId: "test-run",
156
+ stageId: "ai-stage",
157
+ log: async () => {},
158
+ storage: persistence.createStorage("test-run"),
159
+ require: () => ({}),
160
+ optional: () => undefined,
161
+ // Inject mock AI helper
162
+ ai: mockAI,
163
+ };
164
+
165
+ const result = await aiStage.execute(ctx);
166
+
167
+ // Verify AI was called
168
+ expect(mockAI.calls.length).toBe(1);
169
+ expect(mockAI.calls[0].modelKey).toBe("gemini-2.5-flash");
170
+
171
+ // Verify result
172
+ expect(result.output.classification).toBe("tech");
173
+ });
174
+
175
+ it("should handle errors gracefully", async () => {
176
+ mockAI.mockError(new Error("API error"));
177
+
178
+ const ctx = createTestContext({ ai: mockAI });
179
+
180
+ await expect(myStage.execute(ctx)).rejects.toThrow("API error");
181
+ });
182
+ });
183
+ ```
184
+
185
+ ### Testing Stage with Dependencies
186
+
187
+ ```typescript
188
+ it("should access previous stage output", async () => {
189
+ // Setup workflow context with previous stage output
190
+ const workflowContext = {
191
+ "data-extraction": {
192
+ title: "Test Document",
193
+ sections: [{ heading: "Intro", content: "..." }],
194
+ },
195
+ };
196
+
197
+ const ctx = {
198
+ input: {},
199
+ config: {},
200
+ workflowContext,
201
+ workflowRunId: "test-run",
202
+ stageId: "processing-stage",
203
+ log: async () => {},
204
+ storage: persistence.createStorage("test-run"),
205
+ require: (id: string) => {
206
+ const output = workflowContext[id];
207
+ if (!output) throw new Error(`Missing ${id}`);
208
+ return output;
209
+ },
210
+ optional: (id: string) => workflowContext[id],
211
+ };
212
+
213
+ const result = await processingStage.execute(ctx);
214
+
215
+ expect(result.output.processedTitle).toBe("Test Document");
216
+ });
217
+ ```
218
+
219
+ ## Testing Async Batch Stages
220
+
221
+ ### Testing Execute (Suspension)
222
+
223
+ ```typescript
224
+ it("should suspend and return batch info", async () => {
225
+ const ctx = createTestContext({
226
+ workflowContext: {
227
+ "data-extraction": { items: ["a", "b", "c"] },
228
+ },
229
+ });
230
+
231
+ const result = await batchStage.execute(ctx);
232
+
233
+ // Verify suspended result
234
+ expect(result.suspended).toBe(true);
235
+ expect(result.state.batchId).toBeDefined();
236
+ expect(result.state.pollInterval).toBe(60000);
237
+ expect(result.pollConfig.nextPollAt).toBeInstanceOf(Date);
238
+ });
239
+ ```
240
+
241
+ ### Testing Execute (Resume)
242
+
243
+ ```typescript
244
+ it("should return cached results on resume", async () => {
245
+ // Setup storage with cached results
246
+ await persistence.saveArtifact({
247
+ workflowRunId: "test-run",
248
+ key: "batch-result",
249
+ type: "ARTIFACT",
250
+ data: { items: ["processed-a", "processed-b"] },
251
+ size: 100,
252
+ });
253
+
254
+ const ctx = createTestContext({
255
+ resumeState: {
256
+ batchId: "batch-123",
257
+ submittedAt: new Date().toISOString(),
258
+ pollInterval: 60000,
259
+ maxWaitTime: 3600000,
260
+ },
261
+ });
262
+
263
+ const result = await batchStage.execute(ctx);
264
+
265
+ expect(result.suspended).toBeUndefined();
266
+ expect(result.output.items).toEqual(["processed-a", "processed-b"]);
267
+ });
268
+ ```
269
+
270
+ ### Testing checkCompletion
271
+
272
+ ```typescript
273
+ import { vi } from "vitest";
274
+
275
+ describe("checkCompletion", () => {
276
+ it("should return not ready when batch is processing", async () => {
277
+ // Mock batch API
278
+ vi.spyOn(batchProvider, "getStatus").mockResolvedValue({
279
+ status: "processing",
280
+ });
281
+
282
+ const result = await batchStage.checkCompletion(
283
+ { batchId: "batch-123", submittedAt: "...", pollInterval: 60000, maxWaitTime: 3600000 },
284
+ { workflowRunId: "test-run", stageId: "batch-stage", config: {}, log: async () => {}, storage: persistence.createStorage("test-run") }
285
+ );
286
+
287
+ expect(result.ready).toBe(false);
288
+ expect(result.nextCheckIn).toBe(60000);
289
+ });
290
+
291
+ it("should return results when batch is completed", async () => {
292
+ vi.spyOn(batchProvider, "getStatus").mockResolvedValue({ status: "completed" });
293
+ vi.spyOn(batchProvider, "getResults").mockResolvedValue([
294
+ { id: "req-1", result: "processed", inputTokens: 100, outputTokens: 50, status: "succeeded" },
295
+ ]);
296
+
297
+ const result = await batchStage.checkCompletion(
298
+ { batchId: "batch-123", submittedAt: "...", pollInterval: 60000, maxWaitTime: 3600000 },
299
+ createCheckContext()
300
+ );
301
+
302
+ expect(result.ready).toBe(true);
303
+ expect(result.output.items).toHaveLength(1);
304
+ });
305
+
306
+ it("should return error when batch fails", async () => {
307
+ vi.spyOn(batchProvider, "getStatus").mockResolvedValue({ status: "failed" });
308
+
309
+ const result = await batchStage.checkCompletion(
310
+ { batchId: "batch-123", submittedAt: "...", pollInterval: 60000, maxWaitTime: 3600000 },
311
+ createCheckContext()
312
+ );
313
+
314
+ expect(result.ready).toBe(false);
315
+ expect(result.error).toBeDefined();
316
+ });
317
+ });
318
+ ```
319
+
320
+ ## Integration Testing Workflows
321
+
322
+ ### Testing Complete Workflow
323
+
324
+ ```typescript
325
+ import { WorkflowExecutor } from "@bratsos/workflow-engine";
326
+
327
+ describe("documentWorkflow integration", () => {
328
+ let persistence: TestWorkflowPersistence;
329
+ let mockAI: MockAIHelper;
330
+
331
+ beforeEach(() => {
332
+ persistence = new TestWorkflowPersistence();
333
+ mockAI = new MockAIHelper();
334
+
335
+ // Setup mock responses for all AI calls
336
+ mockAI.mockGenerateObject({ title: "Test", sections: [] });
337
+ mockAI.mockGenerateObject({ categories: ["tech"] });
338
+ mockAI.mockGenerateText("This is a summary.");
339
+ });
340
+
341
+ it("should execute complete workflow", async () => {
342
+ // Create run record
343
+ const run = await persistence.createRun({
344
+ workflowId: "document-analysis",
345
+ workflowName: "Document Analysis",
346
+ workflowType: "document-analysis",
347
+ input: { documentUrl: "https://example.com/doc.pdf" },
348
+ });
349
+
350
+ // Create executor
351
+ const executor = new WorkflowExecutor(
352
+ documentWorkflow,
353
+ run.id,
354
+ "document-analysis",
355
+ {
356
+ persistence,
357
+ aiLogger: createMockAILogger(),
358
+ }
359
+ );
360
+
361
+ // Execute
362
+ const result = await executor.execute(
363
+ { documentUrl: "https://example.com/doc.pdf" },
364
+ documentWorkflow.getDefaultConfig()
365
+ );
366
+
367
+ // Verify
368
+ expect(result.status).toBe("COMPLETED");
369
+
370
+ // Check stages were created
371
+ const stages = await persistence.getStagesByRun(run.id);
372
+ expect(stages.length).toBe(4);
373
+ expect(stages.every(s => s.status === "COMPLETED")).toBe(true);
374
+ });
375
+ });
376
+ ```
377
+
378
+ ### Testing with Runtime
379
+
380
+ ```typescript
381
+ describe("workflow with runtime", () => {
382
+ let runtime: WorkflowRuntime;
383
+ let persistence: TestWorkflowPersistence;
384
+ let jobQueue: TestJobQueue;
385
+
386
+ beforeEach(() => {
387
+ persistence = new TestWorkflowPersistence();
388
+ jobQueue = new TestJobQueue();
389
+
390
+ runtime = createWorkflowRuntime({
391
+ persistence,
392
+ jobQueue,
393
+ registry: { getWorkflow: (id) => workflows[id] ?? null },
394
+ pollIntervalMs: 100,
395
+ jobPollIntervalMs: 50,
396
+ });
397
+ });
398
+
399
+ afterEach(() => {
400
+ runtime.stop();
401
+ });
402
+
403
+ it("should process workflow through runtime", async () => {
404
+ // Create run
405
+ const { workflowRunId } = await runtime.createRun({
406
+ workflowId: "simple-workflow",
407
+ input: { data: "test" },
408
+ });
409
+
410
+ // Start runtime (in test mode)
411
+ await runtime.start();
412
+
413
+ // Wait for completion
414
+ await waitFor(async () => {
415
+ const run = await persistence.getRun(workflowRunId);
416
+ return run?.status === "COMPLETED";
417
+ }, { timeout: 5000 });
418
+
419
+ // Verify
420
+ const run = await persistence.getRun(workflowRunId);
421
+ expect(run?.status).toBe("COMPLETED");
422
+ });
423
+ });
424
+ ```
425
+
426
+ ## Test Helpers
427
+
428
+ ### Creating Test Context
429
+
430
+ ```typescript
431
+ function createTestContext<TInput, TConfig>(
432
+ overrides: Partial<{
433
+ input: TInput;
434
+ config: TConfig;
435
+ workflowContext: Record<string, unknown>;
436
+ resumeState: unknown;
437
+ ai: MockAIHelper;
438
+ }> = {}
439
+ ) {
440
+ const persistence = new TestWorkflowPersistence();
441
+
442
+ return {
443
+ input: overrides.input ?? {},
444
+ config: overrides.config ?? {},
445
+ workflowContext: overrides.workflowContext ?? {},
446
+ workflowRunId: "test-run",
447
+ stageId: "test-stage",
448
+ resumeState: overrides.resumeState,
449
+ log: async (level: string, message: string) => {
450
+ console.log(`[${level}] ${message}`);
451
+ },
452
+ storage: persistence.createStorage("test-run"),
453
+ require: (id: string) => {
454
+ const output = overrides.workflowContext?.[id];
455
+ if (!output) throw new Error(`Missing required stage: ${id}`);
456
+ return output;
457
+ },
458
+ optional: (id: string) => overrides.workflowContext?.[id],
459
+ };
460
+ }
461
+ ```
462
+
463
+ ### Wait Helper
464
+
465
+ ```typescript
466
+ async function waitFor(
467
+ condition: () => Promise<boolean>,
468
+ options: { timeout?: number; interval?: number } = {}
469
+ ): Promise<void> {
470
+ const { timeout = 5000, interval = 100 } = options;
471
+ const start = Date.now();
472
+
473
+ while (Date.now() - start < timeout) {
474
+ if (await condition()) return;
475
+ await new Promise(r => setTimeout(r, interval));
476
+ }
477
+
478
+ throw new Error(`Condition not met within ${timeout}ms`);
479
+ }
480
+ ```
481
+
482
+ ### Mock AI Logger
483
+
484
+ ```typescript
485
+ function createMockAILogger(): AICallLogger {
486
+ const calls: CreateAICallInput[] = [];
487
+
488
+ return {
489
+ logCall: (call) => { calls.push(call); },
490
+ logBatchResults: async (batchId, results) => { calls.push(...results); },
491
+ getStats: async (topic) => ({
492
+ totalCalls: calls.filter(c => c.topic.startsWith(topic)).length,
493
+ totalInputTokens: 0,
494
+ totalOutputTokens: 0,
495
+ totalCost: 0,
496
+ perModel: {},
497
+ }),
498
+ isRecorded: async (batchId) => calls.some(c => c.metadata?.batchId === batchId),
499
+ };
500
+ }
501
+ ```
502
+
503
+ ## Best Practices
504
+
505
+ ### 1. Isolate Tests
506
+
507
+ ```typescript
508
+ beforeEach(() => {
509
+ persistence.reset();
510
+ jobQueue.reset();
511
+ mockAI.reset();
512
+ });
513
+ ```
514
+
515
+ ### 2. Test Edge Cases
516
+
517
+ ```typescript
518
+ it("should handle empty input", async () => { ... });
519
+ it("should handle missing dependencies", async () => { ... });
520
+ it("should handle AI errors", async () => { ... });
521
+ it("should handle timeout", async () => { ... });
522
+ ```
523
+
524
+ ### 3. Use Type-Safe Mocks
525
+
526
+ ```typescript
527
+ // Bad: loosely typed
528
+ const ctx = { input: {}, config: {} } as any;
529
+
530
+ // Good: properly typed
531
+ const ctx = createTestContext<InputType, ConfigType>({
532
+ input: { data: "test" },
533
+ config: { verbose: true },
534
+ });
535
+ ```
536
+
537
+ ### 4. Test Metrics and Artifacts
538
+
539
+ ```typescript
540
+ it("should record metrics", async () => {
541
+ const result = await stage.execute(ctx);
542
+
543
+ expect(result.customMetrics?.itemsProcessed).toBe(10);
544
+ expect(result.artifacts?.rawData).toBeDefined();
545
+ });
546
+ ```