@qianxude/tem 0.3.0 → 0.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  import * as i from '../interfaces/index.js';
2
- import { TaskService } from '../services/task.js';
2
+ import { TaskService, BatchInterruptionService } from '../services/index.js';
3
3
  import { ConcurrencyController, RateLimiter, type RateLimitConfig } from '../utils/index.js';
4
4
 
5
5
  /**
@@ -18,6 +18,10 @@ export interface WorkerConfig {
18
18
  concurrency: number;
19
19
  pollIntervalMs: number;
20
20
  rateLimit?: RateLimitConfig;
21
+ /** Specific batch ID to process (optional - if set, only processes this batch) */
22
+ batchId?: string;
23
+ /** Interruption service for checking batch status */
24
+ interruptionService?: BatchInterruptionService;
21
25
  }
22
26
 
23
27
  export class Worker {
@@ -28,6 +32,13 @@ export class Worker {
28
32
  private pollIntervalMs: number;
29
33
  private abortController: AbortController;
30
34
  private inFlightTasks: Set<Promise<void>> = new Set();
35
+ private batchId?: string;
36
+ private interruptionService?: BatchInterruptionService;
37
+
38
+ // Track failure context for interruption decisions
39
+ private consecutiveFailures = 0;
40
+ private rateLimitHits = 0;
41
+ private concurrencyErrors = 0;
31
42
 
32
43
  constructor(
33
44
  private taskService: TaskService,
@@ -36,6 +47,8 @@ export class Worker {
36
47
  this.concurrency = new ConcurrencyController(config.concurrency);
37
48
  this.pollIntervalMs = config.pollIntervalMs;
38
49
  this.abortController = new AbortController();
50
+ this.batchId = config.batchId;
51
+ this.interruptionService = config.interruptionService;
39
52
 
40
53
  if (config.rateLimit) {
41
54
  this.rateLimiter = new RateLimiter(config.rateLimit);
@@ -93,8 +106,18 @@ export class Worker {
93
106
  break;
94
107
  }
95
108
 
109
+ // For batch-specific workers: check batch is still active
110
+ if (this.batchId && this.interruptionService) {
111
+ const isActive = await this.interruptionService.isBatchActive(this.batchId);
112
+ if (!isActive) {
113
+ this.concurrency.release();
114
+ this.stop();
115
+ break;
116
+ }
117
+ }
118
+
96
119
  // Claim a task while holding the concurrency slot
97
- const task = await this.taskService.claim();
120
+ const task = await this.taskService.claim(this.batchId);
98
121
 
99
122
  if (!task) {
100
123
  // No task available, release the slot and sleep
@@ -123,6 +146,8 @@ export class Worker {
123
146
  * Note: Assumes concurrency slot has already been acquired.
124
147
  */
125
148
  private async execute(task: i.Task): Promise<void> {
149
+ const taskStartTime = Date.now();
150
+
126
151
  try {
127
152
  if (this.rateLimiter) {
128
153
  await this.rateLimiter.acquire();
@@ -134,6 +159,8 @@ export class Worker {
134
159
  }
135
160
 
136
161
  const payload = JSON.parse(task.payload);
162
+
163
+ // Build context with optional deadline
137
164
  const context: i.TaskContext = {
138
165
  taskId: task.id,
139
166
  batchId: task.batchId,
@@ -141,10 +168,22 @@ export class Worker {
141
168
  signal: this.abortController.signal,
142
169
  };
143
170
 
171
+ // If we have interruption service and batchId, set deadline from criteria
172
+ if (this.interruptionService && task.batchId) {
173
+ const { criteria } = await this.interruptionService['batchService'].getWithCriteria(task.batchId);
174
+ if (criteria?.taskTimeoutMs) {
175
+ context.deadline = new Date(taskStartTime + criteria.taskTimeoutMs);
176
+ }
177
+ }
178
+
144
179
  const result = await handler(payload, context);
145
180
  await this.taskService.complete(task.id, result);
181
+
182
+ // Reset consecutive failures on success
183
+ this.consecutiveFailures = 0;
146
184
  } catch (error) {
147
- await this.handleError(task, error);
185
+ const taskRuntimeMs = Date.now() - taskStartTime;
186
+ await this.handleError(task, error, taskRuntimeMs);
148
187
  } finally {
149
188
  this.concurrency.release();
150
189
  }
@@ -153,16 +192,49 @@ export class Worker {
153
192
  /**
154
193
  * Handle task execution errors.
155
194
  */
156
- private async handleError(task: i.Task, error: unknown): Promise<void> {
195
+ private async handleError(task: i.Task, error: unknown, taskRuntimeMs?: number): Promise<void> {
157
196
  const isRetryable = !(error instanceof NonRetryableError);
158
197
  const shouldRetry = isRetryable && task.attempt < task.maxAttempt;
159
198
 
199
+ // Track failure type for interruption decisions
200
+ const errorMessage = error instanceof Error ? error.message : String(error);
201
+ if (this.isRateLimitError(errorMessage)) {
202
+ this.rateLimitHits++;
203
+ } else if (this.isConcurrencyError(errorMessage)) {
204
+ this.concurrencyErrors++;
205
+ }
206
+
160
207
  if (shouldRetry) {
161
208
  // Reset to pending for automatic retry (attempt already incremented by claim)
162
209
  await this.taskService.retry(task.id);
210
+ this.consecutiveFailures++;
163
211
  } else {
164
- const message = error instanceof Error ? error.message : String(error);
165
- await this.taskService.fail(task.id, message);
212
+ await this.taskService.fail(task.id, errorMessage);
213
+ this.consecutiveFailures++;
214
+
215
+ // Check if batch should be interrupted
216
+ if (task.batchId && this.interruptionService) {
217
+ const interrupted = await this.interruptionService.checkAndInterruptIfNeeded(
218
+ task.batchId,
219
+ {
220
+ consecutiveFailures: this.consecutiveFailures,
221
+ rateLimitHits: this.rateLimitHits,
222
+ concurrencyErrors: this.concurrencyErrors,
223
+ currentTaskRuntimeMs: taskRuntimeMs,
224
+ }
225
+ );
226
+ if (interrupted) {
227
+ this.stop();
228
+ }
229
+ }
166
230
  }
167
231
  }
232
+
233
+ private isRateLimitError(message: string): boolean {
234
+ return message.includes('429') || message.toLowerCase().includes('rate limit');
235
+ }
236
+
237
+ private isConcurrencyError(message: string): boolean {
238
+ return message.includes('502') || message.includes('503') || message.toLowerCase().includes('bad gateway') || message.toLowerCase().includes('service unavailable');
239
+ }
168
240
  }
@@ -33,13 +33,20 @@ export class Database implements i.DatabaseConnection {
33
33
  )
34
34
  `);
35
35
 
36
- // Check if initial schema needs to be applied
37
- const migrationCount = this.db
38
- .query('SELECT COUNT(*) as count FROM _migration WHERE name = $name')
39
- .get({ $name: '001_initial_schema' }) as { count: number };
40
-
41
- if (migrationCount.count === 0) {
42
- this.applyInitialSchema();
36
+ // Check and apply migrations in order
37
+ const migrations = [
38
+ { name: '001_initial_schema', apply: () => this.applyInitialSchema() },
39
+ { name: '002_batch_interruption', apply: () => this.applyBatchInterruptionMigration() },
40
+ ];
41
+
42
+ for (const migration of migrations) {
43
+ const migrationCount = this.db
44
+ .query('SELECT COUNT(*) as count FROM _migration WHERE name = $name')
45
+ .get({ $name: migration.name }) as { count: number };
46
+
47
+ if (migrationCount.count === 0) {
48
+ migration.apply();
49
+ }
43
50
  }
44
51
  }
45
52
 
@@ -90,6 +97,39 @@ export class Database implements i.DatabaseConnection {
90
97
  });
91
98
  }
92
99
 
100
+ private applyBatchInterruptionMigration(): void {
101
+ const migration = `
102
+ -- Add status to batch table
103
+ ALTER TABLE batch ADD COLUMN status TEXT NOT NULL DEFAULT 'active'
104
+ CHECK(status IN ('active', 'interrupted', 'completed'));
105
+
106
+ -- Add interruption criteria storage (JSON)
107
+ ALTER TABLE batch ADD COLUMN interruption_criteria TEXT;
108
+
109
+ -- Index for quickly finding active batches
110
+ CREATE INDEX IF NOT EXISTS idx_batch_status ON batch(status);
111
+
112
+ -- New table: interruption log
113
+ CREATE TABLE IF NOT EXISTS batch_interrupt_log (
114
+ id TEXT PRIMARY KEY,
115
+ batch_id TEXT NOT NULL REFERENCES batch(id) ON DELETE CASCADE,
116
+ reason TEXT NOT NULL,
117
+ message TEXT NOT NULL,
118
+ stats_snapshot TEXT NOT NULL, -- JSON of BatchStats
119
+ created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
120
+ );
121
+
122
+ CREATE INDEX IF NOT EXISTS idx_interrupt_log_batch_id ON batch_interrupt_log(batch_id);
123
+ `;
124
+
125
+ this.transaction(() => {
126
+ this.db.exec(migration);
127
+ this.db
128
+ .query('INSERT INTO _migration (name) VALUES ($name)')
129
+ .run({ $name: '002_batch_interruption' });
130
+ });
131
+ }
132
+
93
133
  query<T = unknown>(sql: string, params?: SQLQueryBindings[]): T[] {
94
134
  const stmt = this.db.prepare(sql);
95
135
  const results = stmt.all(...(params ?? []));
package/src/index.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  // Main exports for TEM framework
2
2
  export * as interfaces from './interfaces/index.js';
3
3
  export { Database, type DatabaseOptions } from './database/index.js';
4
- export { BatchService, TaskService } from './services/index.js';
4
+ export { BatchService, TaskService, BatchInterruptionService } from './services/index.js';
5
5
  export {
6
6
  ConcurrencyController,
7
7
  RateLimiter,
@@ -6,18 +6,48 @@
6
6
  // ============================================================================
7
7
 
8
8
  export type TaskStatus = 'pending' | 'running' | 'completed' | 'failed';
9
+ export type BatchStatus = 'active' | 'interrupted' | 'completed';
10
+
11
+ export type BatchInterruptionReason =
12
+ | 'error_rate_exceeded'
13
+ | 'failed_tasks_exceeded'
14
+ | 'consecutive_failures_exceeded'
15
+ | 'rate_limit_hits_exceeded'
16
+ | 'concurrency_errors_exceeded'
17
+ | 'task_timeout'
18
+ | 'batch_runtime_exceeded'
19
+ | 'manual';
9
20
 
10
21
  // ============================================================================
11
22
  // Entity Types
12
23
  // ============================================================================
13
24
 
25
+ export interface BatchInterruptionCriteria {
26
+ /** Max error rate (0-1, e.g., 0.1 = 10%) */
27
+ maxErrorRate?: number;
28
+ /** Max absolute number of failed tasks */
29
+ maxFailedTasks?: number;
30
+ /** Max consecutive failures before interruption */
31
+ maxConsecutiveFailures?: number;
32
+ /** Max rate limit (429) hits before interruption */
33
+ maxRateLimitHits?: number;
34
+ /** Max concurrency errors (502/503) before interruption - indicates too aggressive concurrency */
35
+ maxConcurrencyErrors?: number;
36
+ /** Max runtime for a single task in ms */
37
+ taskTimeoutMs?: number;
38
+ /** Max total batch runtime in ms */
39
+ maxBatchRuntimeMs?: number;
40
+ }
41
+
14
42
  export interface Batch {
15
43
  id: string;
16
44
  code: string;
17
45
  type: string;
46
+ status: BatchStatus;
18
47
  createdAt: Date;
19
48
  completedAt: Date | null;
20
49
  metadata: Record<string, unknown> | null;
50
+ interruptionCriteria: BatchInterruptionCriteria | null;
21
51
  }
22
52
 
23
53
  export interface BatchStats {
@@ -29,6 +59,14 @@ export interface BatchStats {
29
59
  failed: number;
30
60
  }
31
61
 
62
+ export interface BatchInterruption {
63
+ batchId: string;
64
+ reason: BatchInterruptionReason;
65
+ message: string;
66
+ statsAtInterruption: BatchStats;
67
+ createdAt: Date;
68
+ }
69
+
32
70
  export interface Task {
33
71
  id: string;
34
72
  batchId: string | null;
@@ -88,6 +126,8 @@ export interface DetectOptions {
88
126
  maxConcurrencyToTest?: number;
89
127
  /** Duration to run rate limit tests (default: 10000) */
90
128
  rateLimitTestDurationMs?: number;
129
+ /** Maximum number of requests to send during rate limit detection (default: 200) */
130
+ maxRateLimitTestRequests?: number;
91
131
  }
92
132
 
93
133
  export interface DetectedConfig {
@@ -118,6 +158,8 @@ export interface TaskContext {
118
158
  batchId: string | null;
119
159
  attempt: number;
120
160
  signal: AbortSignal;
161
+ /** Deadline for task execution (for timeout enforcement) */
162
+ deadline?: Date;
121
163
  }
122
164
 
123
165
  // ============================================================================
@@ -144,6 +186,7 @@ export interface CreateBatchInput {
144
186
  code: string;
145
187
  type: string;
146
188
  metadata?: Record<string, unknown>;
189
+ interruptionCriteria?: BatchInterruptionCriteria;
147
190
  }
148
191
 
149
192
  export interface CreateTaskInput {
@@ -162,6 +205,23 @@ export interface BatchService {
162
205
  complete(id: string): Promise<void>;
163
206
  resume(id: string): Promise<number>;
164
207
  retryFailed(id: string): Promise<number>;
208
+ updateStatus(id: string, status: BatchStatus): Promise<void>;
209
+ getWithCriteria(id: string): Promise<{ batch: Batch; criteria: BatchInterruptionCriteria | null }>;
210
+ }
211
+
212
+ export interface BatchInterruptionService {
213
+ checkAndInterruptIfNeeded(
214
+ batchId: string,
215
+ context: {
216
+ consecutiveFailures?: number;
217
+ rateLimitHits?: number;
218
+ concurrencyErrors?: number;
219
+ currentTaskRuntimeMs?: number;
220
+ }
221
+ ): Promise<boolean>;
222
+ interrupt(batchId: string, reason: BatchInterruptionReason, message: string): Promise<void>;
223
+ isBatchActive(batchId: string): Promise<boolean>;
224
+ getInterruptionLog(batchId: string): Promise<BatchInterruption[]>;
165
225
  }
166
226
 
167
227
  export interface TaskService {
@@ -1,14 +1,15 @@
1
1
  # Mock Server
2
2
 
3
- A lightweight HTTP server for simulating external API services with configurable concurrency and rate limiting constraints. Used for testing TEM's task execution capabilities under various load conditions.
3
+ A lightweight HTTP server for simulating external API services with configurable concurrency, rate limiting, and error simulation. Used for testing TEM's task execution capabilities under various load conditions.
4
4
 
5
5
  ## Overview
6
6
 
7
7
  The mock server provides a controlled environment to test how TEM handles:
8
8
 
9
- - **Concurrency limits** - Simulate services that reject requests when too many are in flight
10
- - **Rate limiting** - Test backoff and retry behavior against rate-limited endpoints
11
- - **Processing delays** - Verify timeout handling and async processing
9
+ - **Concurrency limits** Simulate services that reject requests when too many are in flight (503 errors)
10
+ - **Rate limiting** Test backoff and retry behavior against rate-limited endpoints (429 errors)
11
+ - **Error simulation** Verify resilience with configurable random failure rates
12
+ - **Processing delays** — Verify timeout handling and async processing
12
13
 
13
14
  ## Architecture
14
15
 
@@ -30,6 +31,8 @@ The mock server provides a controlled environment to test how TEM handles:
30
31
  | Component | File | Purpose |
31
32
  |-----------|------|---------|
32
33
  | `startMockServer` | `server.ts` | Server lifecycle management |
34
+ | `createMockService` | `server.ts` | Client helper to create services |
35
+ | `createErrorSimulation` | `server.ts` | Helper to create error simulation config |
33
36
  | `createRouter` | `router.ts` | HTTP routing and request handling |
34
37
  | `MockService` | `service.ts` | Per-service concurrency and rate limiting |
35
38
  | `RejectingRateLimiter` | `service.ts` | Token bucket rate limiter with immediate reject |
@@ -49,7 +52,8 @@ startMockServer({
49
52
  defaultService: {
50
53
  maxConcurrency: 3,
51
54
  rateLimit: { limit: 10, windowMs: 1000 },
52
- delayMs: [10, 50]
55
+ delayMs: [10, 50],
56
+ errorSimulation: { rate: 0.1, statusCode: 500 }
53
57
  }
54
58
  });
55
59
  ```
@@ -61,10 +65,7 @@ Dynamic service creation and management. Each service has its own concurrency/ra
61
65
  **Use case:** Complex tests with multiple services having different constraints.
62
66
 
63
67
  ```typescript
64
- startMockServer({
65
- port: 8080,
66
- mode: 'multi'
67
- });
68
+ startMockServer({ port: 8080, mode: 'multi' });
68
69
 
69
70
  // Create services dynamically via HTTP API
70
71
  ```
@@ -75,7 +76,7 @@ startMockServer({
75
76
 
76
77
  | Method | Path | Description |
77
78
  |--------|------|-------------|
78
- | `GET` | `/` | Access the default service |
79
+ | `GET` / `POST` | `/` | Access the default service |
79
80
  | `POST` | `/shutdown` | Shutdown the server |
80
81
 
81
82
  ### Multi Mode Endpoints
@@ -84,7 +85,7 @@ startMockServer({
84
85
  |--------|------|-------------|
85
86
  | `POST` | `/service/:name` | Create or replace a service |
86
87
  | `DELETE` | `/service/:name` | Delete a service |
87
- | `GET` | `/mock/:name` | Access a service |
88
+ | `GET` / `POST` | `/mock/:name` | Access a service |
88
89
  | `POST` | `/shutdown` | Shutdown the server |
89
90
 
90
91
  ### Create Service (Multi Mode Only)
@@ -99,7 +100,12 @@ Content-Type: application/json
99
100
  "limit": 10, // Requests per window (required)
100
101
  "windowMs": 1000 // Window size in ms (required)
101
102
  },
102
- "delayMs": [10, 200] // [min, max] processing delay (optional, default: [10, 200])
103
+ "delayMs": [10, 200], // [min, max] processing delay (optional, default: [10, 200])
104
+ "errorSimulation": { // Optional error simulation config
105
+ "rate": 0.1, // Error rate 0-1 (10% = 0.1)
106
+ "statusCode": 503, // HTTP status to return (default: 500)
107
+ "errorMessage": "simulated_error" // Error message (default: "internal_server_error")
108
+ }
103
109
  }
104
110
  ```
105
111
 
@@ -114,11 +120,30 @@ Content-Type: application/json
114
120
  }
115
121
  ```
116
122
 
123
+ ### Delete Service (Multi Mode Only)
124
+
125
+ ```http
126
+ DELETE /service/:name
127
+ ```
128
+
129
+ **Response:**
130
+ ```http
131
+ HTTP/1.1 200 OK
132
+ Content-Type: application/json
133
+
134
+ {
135
+ "service": "test1",
136
+ "status": "deleted"
137
+ }
138
+ ```
139
+
117
140
  ### Access Service
118
141
 
119
142
  ```http
120
143
  GET /mock/:name # Multi mode
144
+ POST /mock/:name # Multi mode (body ignored)
121
145
  GET / # Single mode
146
+ POST / # Single mode (body ignored)
122
147
  ```
123
148
 
124
149
  **Success Response (200):**
@@ -152,6 +177,8 @@ Content-Type: application/json
152
177
  }
153
178
  ```
154
179
 
180
+ The server will stop accepting new connections and exit the process after a brief delay.
181
+
155
182
  ## Error Responses
156
183
 
157
184
  | Status | Error Code | Description |
@@ -163,6 +190,21 @@ Content-Type: application/json
163
190
  | `429` | `rate_limit_exceeded` | Rate limit reached |
164
191
  | `503` | `concurrency_limit_exceeded` | Concurrency limit reached |
165
192
 
193
+ ### Error Simulation
194
+
195
+ When `errorSimulation` is configured, requests may randomly fail with:
196
+
197
+ ```http
198
+ HTTP/1.1 500 Internal Server Error // Or configured statusCode
199
+ Content-Type: application/json
200
+
201
+ {
202
+ "error": "internal_server_error" // Or configured errorMessage
203
+ }
204
+ ```
205
+
206
+ The error is checked **before** acquiring concurrency/rate limit resources, so simulated errors don't count against limits.
207
+
166
208
  ## Configuration
167
209
 
168
210
  ### ServerConfig
@@ -185,6 +227,21 @@ interface ServiceConfig {
185
227
  windowMs: number; // Window duration in milliseconds
186
228
  };
187
229
  delayMs: [number, number]; // [min, max] simulated processing delay
230
+ errorSimulation?: { // Optional error simulation
231
+ rate: number; // Error rate 0-1
232
+ statusCode?: number; // HTTP status code (default: 500)
233
+ errorMessage?: string; // Error message (default: "internal_server_error")
234
+ };
235
+ }
236
+ ```
237
+
238
+ ### ErrorSimulationConfig
239
+
240
+ ```typescript
241
+ interface ErrorSimulationConfig {
242
+ rate: number; // Error rate 0-1 (e.g., 0.1 = 10% error rate)
243
+ statusCode?: number; // HTTP status code to return (default: 500)
244
+ errorMessage?: string; // Error message (default: "internal_server_error")
188
245
  }
189
246
  ```
190
247
 
@@ -315,6 +372,29 @@ console.log(r3.status); // 429
315
372
  console.log(await r3.json()); // { error: 'rate_limit_exceeded' }
316
373
  ```
317
374
 
375
+ ### Testing Error Simulation
376
+
377
+ ```typescript
378
+ import { startMockServer, stopMockServer, createErrorSimulation } from './src/mock-server';
379
+
380
+ startMockServer({
381
+ port: 8080,
382
+ mode: 'single',
383
+ defaultService: {
384
+ maxConcurrency: 10,
385
+ rateLimit: { limit: 100, windowMs: 1000 },
386
+ delayMs: [10, 50],
387
+ errorSimulation: createErrorSimulation(0.3, 503, "service_unavailable")
388
+ }
389
+ });
390
+
391
+ // Approximately 30% of requests will fail with 503
392
+ for (let i = 0; i < 10; i++) {
393
+ const res = await fetch('http://localhost:8080/');
394
+ console.log(res.status); // Mix of 200 and 503
395
+ }
396
+ ```
397
+
318
398
  ## Rate Limiting Algorithm
319
399
 
320
400
  The mock server uses a **token bucket** algorithm for rate limiting:
@@ -332,15 +412,31 @@ Concurrency is tracked per-service:
332
412
  - Decrements when request completes (in `finally` block)
333
413
  - If `currentConcurrency >= maxConcurrency`, new requests get 503
334
414
 
415
+ ## Error Simulation
416
+
417
+ Error simulation is checked before acquiring resources:
418
+
419
+ ```typescript
420
+ // Pseudocode
421
+ if (Math.random() < errorSimulation.rate) {
422
+ return errorResponse; // Doesn't consume concurrency/rate limit tokens
423
+ }
424
+ // Continue with normal request handling...
425
+ ```
426
+
427
+ This ensures that simulated errors don't deplete your concurrency slots or rate limit budget.
428
+
335
429
  ## Programmatic API
336
430
 
431
+ ### Server Lifecycle
432
+
337
433
  ```typescript
338
434
  import { startMockServer, stopMockServer, getServerState } from './src/mock-server';
339
435
 
340
436
  // Start server
341
437
  startMockServer(config: ServerConfig): void
342
438
 
343
- // Stop server programmatically
439
+ // Stop server programmatically (for testing)
344
440
  stopMockServer(): void
345
441
 
346
442
  // Get current state (for testing)
@@ -350,3 +446,74 @@ getServerState(): {
350
446
  hasDefaultService: boolean;
351
447
  }
352
448
  ```
449
+
450
+ ### Client Helpers
451
+
452
+ ```typescript
453
+ import { createMockService, createErrorSimulation } from './src/mock-server';
454
+
455
+ // Create a service programmatically (wraps HTTP call)
456
+ createMockService(
457
+ name: string,
458
+ config: CreateServiceRequest,
459
+ mockUrl?: string // defaults to http://localhost:19999
460
+ ): Promise<Response>
461
+
462
+ // Create error simulation config with validation
463
+ createErrorSimulation(
464
+ rate: number, // 0-1 error rate
465
+ statusCode?: number, // HTTP status (default: 500)
466
+ errorMessage?: string // Error message
467
+ ): ErrorSimulationConfig
468
+ ```
469
+
470
+ Example using client helpers:
471
+
472
+ ```typescript
473
+ import { createMockService, createErrorSimulation } from './src/mock-server';
474
+
475
+ // Create service with error simulation
476
+ await createMockService('flaky-api', {
477
+ maxConcurrency: 5,
478
+ rateLimit: { limit: 10, windowMs: 1000 },
479
+ delayMs: [50, 100],
480
+ errorSimulation: createErrorSimulation(0.2, 503)
481
+ });
482
+
483
+ // Use the service
484
+ const res = await fetch('http://localhost:19999/mock/flaky-api');
485
+ ```
486
+
487
+ ## Testing with TEM
488
+
489
+ The mock server is designed to integrate seamlessly with TEM for testing retry and error handling:
490
+
491
+ ```typescript
492
+ import { TEM } from '@qianxude/tem';
493
+ import { startMockServer, createMockService, createErrorSimulation } from './src/mock-server';
494
+
495
+ // Start mock server with flaky service
496
+ startMockServer({ port: 19999, mode: 'multi' });
497
+
498
+ await createMockService('api', {
499
+ maxConcurrency: 3,
500
+ rateLimit: { limit: 10, windowMs: 1000 },
501
+ errorSimulation: createErrorSimulation(0.2) // 20% failure rate
502
+ });
503
+
504
+ // Configure TEM to match mock server limits
505
+ const tem = new TEM({
506
+ databasePath: ':memory:',
507
+ concurrency: 3,
508
+ rateLimit: { requests: 10, windowMs: 1000 }
509
+ });
510
+
511
+ // Register handler that calls mock server
512
+ tem.worker.register('test', async (payload) => {
513
+ const res = await fetch('http://localhost:19999/mock/api');
514
+ if (!res.ok) throw new Error(`HTTP ${res.status}`);
515
+ return res.json();
516
+ });
517
+
518
+ // TEM's retry mechanism will handle the 20% failure rate
519
+ ```