ai-functions 2.0.2 → 2.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +4 -5
- package/CHANGELOG.md +38 -0
- package/LICENSE +21 -0
- package/README.md +361 -159
- package/dist/ai-promise.d.ts +47 -0
- package/dist/ai-promise.d.ts.map +1 -1
- package/dist/ai-promise.js +291 -3
- package/dist/ai-promise.js.map +1 -1
- package/dist/ai.d.ts +17 -18
- package/dist/ai.d.ts.map +1 -1
- package/dist/ai.js +93 -39
- package/dist/ai.js.map +1 -1
- package/dist/batch-map.d.ts +46 -4
- package/dist/batch-map.d.ts.map +1 -1
- package/dist/batch-map.js +35 -2
- package/dist/batch-map.js.map +1 -1
- package/dist/batch-queue.d.ts +116 -12
- package/dist/batch-queue.d.ts.map +1 -1
- package/dist/batch-queue.js +47 -2
- package/dist/batch-queue.js.map +1 -1
- package/dist/budget.d.ts +272 -0
- package/dist/budget.d.ts.map +1 -0
- package/dist/budget.js +500 -0
- package/dist/budget.js.map +1 -0
- package/dist/cache.d.ts +272 -0
- package/dist/cache.d.ts.map +1 -0
- package/dist/cache.js +412 -0
- package/dist/cache.js.map +1 -0
- package/dist/context.d.ts +32 -1
- package/dist/context.d.ts.map +1 -1
- package/dist/context.js +16 -1
- package/dist/context.js.map +1 -1
- package/dist/eval/runner.d.ts +2 -1
- package/dist/eval/runner.d.ts.map +1 -1
- package/dist/eval/runner.js.map +1 -1
- package/dist/generate.d.ts.map +1 -1
- package/dist/generate.js +6 -10
- package/dist/generate.js.map +1 -1
- package/dist/index.d.ts +27 -20
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +72 -42
- package/dist/index.js.map +1 -1
- package/dist/primitives.d.ts +17 -0
- package/dist/primitives.d.ts.map +1 -1
- package/dist/primitives.js +19 -1
- package/dist/primitives.js.map +1 -1
- package/dist/retry.d.ts +303 -0
- package/dist/retry.d.ts.map +1 -0
- package/dist/retry.js +539 -0
- package/dist/retry.js.map +1 -0
- package/dist/schema.d.ts.map +1 -1
- package/dist/schema.js +1 -9
- package/dist/schema.js.map +1 -1
- package/dist/tool-orchestration.d.ts +391 -0
- package/dist/tool-orchestration.d.ts.map +1 -0
- package/dist/tool-orchestration.js +663 -0
- package/dist/tool-orchestration.js.map +1 -0
- package/dist/types.d.ts +50 -33
- package/dist/types.d.ts.map +1 -1
- package/evalite.config.js +14 -0
- package/evals/classification.eval.js +97 -0
- package/evals/marketing.eval.js +289 -0
- package/evals/math.eval.js +83 -0
- package/evals/run-evals.js +151 -0
- package/evals/structured-output.eval.js +131 -0
- package/evals/writing.eval.js +105 -0
- package/examples/batch-blog-posts.js +128 -0
- package/package.json +26 -26
- package/src/ai-promise.ts +359 -3
- package/src/ai.ts +155 -110
- package/src/batch/anthropic.js +256 -0
- package/src/batch/bedrock.js +584 -0
- package/src/batch/cloudflare.js +287 -0
- package/src/batch/google.js +359 -0
- package/src/batch/index.js +30 -0
- package/src/batch/memory.js +187 -0
- package/src/batch/openai.js +402 -0
- package/src/batch-map.ts +46 -4
- package/src/batch-queue.ts +116 -12
- package/src/budget.ts +727 -0
- package/src/cache.ts +653 -0
- package/src/context.ts +33 -1
- package/src/eval/index.js +7 -0
- package/src/eval/models.js +119 -0
- package/src/eval/runner.js +147 -0
- package/src/eval/runner.ts +3 -2
- package/src/generate.ts +7 -12
- package/src/index.ts +231 -53
- package/src/primitives.ts +19 -1
- package/src/retry.ts +776 -0
- package/src/schema.ts +1 -10
- package/src/tool-orchestration.ts +1008 -0
- package/src/types.ts +59 -41
- package/test/ai-proxy.test.js +157 -0
- package/test/async-iterators.test.js +261 -0
- package/test/backward-compat.test.ts +147 -0
- package/test/batch-autosubmit-errors.test.ts +598 -0
- package/test/batch-background.test.js +352 -0
- package/test/batch-blog-posts.test.js +293 -0
- package/test/blog-generation.test.js +390 -0
- package/test/browse-read.test.js +480 -0
- package/test/budget-tracking.test.ts +800 -0
- package/test/cache.test.ts +712 -0
- package/test/context-isolation.test.ts +687 -0
- package/test/core-functions.test.js +490 -0
- package/test/decide.test.js +260 -0
- package/test/define.test.js +232 -0
- package/test/e2e-bedrock-manual.js +136 -0
- package/test/e2e-bedrock.test.js +164 -0
- package/test/e2e-flex-gateway.js +131 -0
- package/test/e2e-flex-manual.js +156 -0
- package/test/e2e-flex.test.js +174 -0
- package/test/e2e-google-manual.js +150 -0
- package/test/e2e-google.test.js +181 -0
- package/test/embeddings.test.js +220 -0
- package/test/evals/define-function.eval.test.js +309 -0
- package/test/evals/deterministic.eval.test.ts +376 -0
- package/test/evals/primitives.eval.test.js +360 -0
- package/test/function-types.test.js +407 -0
- package/test/generate-core.test.js +213 -0
- package/test/generate.test.js +143 -0
- package/test/generic-order.test.ts +342 -0
- package/test/implicit-batch.test.js +326 -0
- package/test/json-parse-error-handling.test.ts +463 -0
- package/test/retry.test.ts +1016 -0
- package/test/schema.test.js +96 -0
- package/test/streaming.test.ts +316 -0
- package/test/tagged-templates.test.js +240 -0
- package/test/tool-orchestration.test.ts +770 -0
- package/vitest.config.js +39 -0
|
@@ -0,0 +1,598 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tests for batch auto-submit error scenarios
|
|
3
|
+
*
|
|
4
|
+
* RED PHASE: These tests expose error handling gaps in BatchQueue's auto-submit feature.
|
|
5
|
+
*
|
|
6
|
+
* The problem: When auto-submit triggers on maxItems threshold, errors from submit()
|
|
7
|
+
* are caught and only logged (line 250: `this.submit().catch(console.error)`).
|
|
8
|
+
* This means callers have no way to know the batch submission failed.
|
|
9
|
+
*
|
|
10
|
+
* Test scenarios:
|
|
11
|
+
* - Network failure during batch submit
|
|
12
|
+
* - Rate limit errors from API
|
|
13
|
+
* - Partial batch success/failure
|
|
14
|
+
* - Timeout during submission
|
|
15
|
+
*
|
|
16
|
+
* @see primitives.org.ai-7au
|
|
17
|
+
*/
|
|
18
|
+
|
|
19
|
+
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'
|
|
20
|
+
import {
|
|
21
|
+
createBatch,
|
|
22
|
+
BatchQueue,
|
|
23
|
+
registerBatchAdapter,
|
|
24
|
+
type BatchAdapter,
|
|
25
|
+
type BatchItem,
|
|
26
|
+
type BatchQueueOptions,
|
|
27
|
+
type BatchResult,
|
|
28
|
+
type BatchJob,
|
|
29
|
+
type BatchSubmitResult,
|
|
30
|
+
} from '../src/batch-queue.js'
|
|
31
|
+
|
|
32
|
+
// ============================================================================
|
|
33
|
+
// Test Helpers
|
|
34
|
+
// ============================================================================
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Create a mock adapter that fails on submit
|
|
38
|
+
*/
|
|
39
|
+
function createFailingAdapter(error: Error): BatchAdapter {
|
|
40
|
+
return {
|
|
41
|
+
async submit(): Promise<BatchSubmitResult> {
|
|
42
|
+
throw error
|
|
43
|
+
},
|
|
44
|
+
async getStatus(batchId: string): Promise<BatchJob> {
|
|
45
|
+
return {
|
|
46
|
+
id: batchId,
|
|
47
|
+
provider: 'openai',
|
|
48
|
+
status: 'failed',
|
|
49
|
+
totalItems: 0,
|
|
50
|
+
completedItems: 0,
|
|
51
|
+
failedItems: 0,
|
|
52
|
+
createdAt: new Date(),
|
|
53
|
+
}
|
|
54
|
+
},
|
|
55
|
+
async cancel(): Promise<void> {},
|
|
56
|
+
async getResults(): Promise<BatchResult[]> {
|
|
57
|
+
return []
|
|
58
|
+
},
|
|
59
|
+
async waitForCompletion(): Promise<BatchResult[]> {
|
|
60
|
+
return []
|
|
61
|
+
},
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Create a mock adapter that succeeds
|
|
67
|
+
*/
|
|
68
|
+
function createSuccessAdapter(): BatchAdapter {
|
|
69
|
+
let batchCounter = 0
|
|
70
|
+
return {
|
|
71
|
+
async submit(items: BatchItem[], options: BatchQueueOptions): Promise<BatchSubmitResult> {
|
|
72
|
+
const batchId = `batch_test_${++batchCounter}`
|
|
73
|
+
const results: BatchResult[] = items.map((item) => ({
|
|
74
|
+
id: item.id,
|
|
75
|
+
customId: item.id,
|
|
76
|
+
status: 'completed' as const,
|
|
77
|
+
result: `Result for ${item.prompt}`,
|
|
78
|
+
}))
|
|
79
|
+
return {
|
|
80
|
+
job: {
|
|
81
|
+
id: batchId,
|
|
82
|
+
provider: options.provider || 'openai',
|
|
83
|
+
status: 'completed',
|
|
84
|
+
totalItems: items.length,
|
|
85
|
+
completedItems: items.length,
|
|
86
|
+
failedItems: 0,
|
|
87
|
+
createdAt: new Date(),
|
|
88
|
+
completedAt: new Date(),
|
|
89
|
+
},
|
|
90
|
+
completion: Promise.resolve(results),
|
|
91
|
+
}
|
|
92
|
+
},
|
|
93
|
+
async getStatus(batchId: string): Promise<BatchJob> {
|
|
94
|
+
return {
|
|
95
|
+
id: batchId,
|
|
96
|
+
provider: 'openai',
|
|
97
|
+
status: 'completed',
|
|
98
|
+
totalItems: 0,
|
|
99
|
+
completedItems: 0,
|
|
100
|
+
failedItems: 0,
|
|
101
|
+
createdAt: new Date(),
|
|
102
|
+
}
|
|
103
|
+
},
|
|
104
|
+
async cancel(): Promise<void> {},
|
|
105
|
+
async getResults(): Promise<BatchResult[]> {
|
|
106
|
+
return []
|
|
107
|
+
},
|
|
108
|
+
async waitForCompletion(): Promise<BatchResult[]> {
|
|
109
|
+
return []
|
|
110
|
+
},
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
/**
|
|
115
|
+
* Create an adapter that times out
|
|
116
|
+
*/
|
|
117
|
+
function createTimeoutAdapter(timeoutMs: number): BatchAdapter {
|
|
118
|
+
return {
|
|
119
|
+
async submit(): Promise<BatchSubmitResult> {
|
|
120
|
+
await new Promise((_, reject) =>
|
|
121
|
+
setTimeout(() => reject(new Error('Request timeout')), timeoutMs)
|
|
122
|
+
)
|
|
123
|
+
throw new Error('Request timeout')
|
|
124
|
+
},
|
|
125
|
+
async getStatus(batchId: string): Promise<BatchJob> {
|
|
126
|
+
return {
|
|
127
|
+
id: batchId,
|
|
128
|
+
provider: 'openai',
|
|
129
|
+
status: 'failed',
|
|
130
|
+
totalItems: 0,
|
|
131
|
+
completedItems: 0,
|
|
132
|
+
failedItems: 0,
|
|
133
|
+
createdAt: new Date(),
|
|
134
|
+
}
|
|
135
|
+
},
|
|
136
|
+
async cancel(): Promise<void> {},
|
|
137
|
+
async getResults(): Promise<BatchResult[]> {
|
|
138
|
+
return []
|
|
139
|
+
},
|
|
140
|
+
async waitForCompletion(): Promise<BatchResult[]> {
|
|
141
|
+
return []
|
|
142
|
+
},
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
/**
|
|
147
|
+
* Create an adapter that returns rate limit error
|
|
148
|
+
*/
|
|
149
|
+
function createRateLimitAdapter(): BatchAdapter {
|
|
150
|
+
return createFailingAdapter(
|
|
151
|
+
Object.assign(new Error('Rate limit exceeded'), {
|
|
152
|
+
status: 429,
|
|
153
|
+
headers: { 'retry-after': '60' }
|
|
154
|
+
})
|
|
155
|
+
)
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// ============================================================================
|
|
159
|
+
// Tests
|
|
160
|
+
// ============================================================================
|
|
161
|
+
|
|
162
|
+
describe('Batch auto-submit error handling', () => {
|
|
163
|
+
let consoleErrorSpy: ReturnType<typeof vi.spyOn>
|
|
164
|
+
|
|
165
|
+
beforeEach(() => {
|
|
166
|
+
// Capture console.error to verify errors are logged
|
|
167
|
+
consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {})
|
|
168
|
+
})
|
|
169
|
+
|
|
170
|
+
afterEach(() => {
|
|
171
|
+
consoleErrorSpy.mockRestore()
|
|
172
|
+
})
|
|
173
|
+
|
|
174
|
+
describe('Network failure during auto-submit', () => {
|
|
175
|
+
it('should emit an error event when auto-submit fails due to network error', async () => {
|
|
176
|
+
// Register failing adapter
|
|
177
|
+
const networkError = new Error('Network connection failed')
|
|
178
|
+
registerBatchAdapter('openai', createFailingAdapter(networkError))
|
|
179
|
+
|
|
180
|
+
const errorHandler = vi.fn()
|
|
181
|
+
const batch = createBatch({
|
|
182
|
+
provider: 'openai',
|
|
183
|
+
autoSubmit: true,
|
|
184
|
+
maxItems: 3
|
|
185
|
+
})
|
|
186
|
+
|
|
187
|
+
// Subscribe to error events (this is what we expect to exist)
|
|
188
|
+
// This will fail because BatchQueue doesn't emit events
|
|
189
|
+
if ('on' in batch) {
|
|
190
|
+
(batch as BatchQueue & { on: (event: string, handler: (e: Error) => void) => void })
|
|
191
|
+
.on('error', errorHandler)
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
// Add items to trigger auto-submit
|
|
195
|
+
batch.add('prompt 1')
|
|
196
|
+
batch.add('prompt 2')
|
|
197
|
+
batch.add('prompt 3') // This should trigger auto-submit
|
|
198
|
+
|
|
199
|
+
// Wait for async auto-submit to complete
|
|
200
|
+
await new Promise(resolve => setTimeout(resolve, 100))
|
|
201
|
+
|
|
202
|
+
// FAILING: Currently errors are swallowed, errorHandler never called
|
|
203
|
+
// The error should be propagated to the error handler
|
|
204
|
+
expect(errorHandler).toHaveBeenCalledWith(networkError)
|
|
205
|
+
})
|
|
206
|
+
|
|
207
|
+
it('should reject pending item promises when auto-submit fails', async () => {
|
|
208
|
+
const networkError = new Error('Network connection failed')
|
|
209
|
+
registerBatchAdapter('openai', createFailingAdapter(networkError))
|
|
210
|
+
|
|
211
|
+
const batch = createBatch({
|
|
212
|
+
provider: 'openai',
|
|
213
|
+
autoSubmit: true,
|
|
214
|
+
maxItems: 3
|
|
215
|
+
})
|
|
216
|
+
|
|
217
|
+
// Get item references before auto-submit triggers
|
|
218
|
+
const item1 = batch.add('prompt 1')
|
|
219
|
+
const item2 = batch.add('prompt 2')
|
|
220
|
+
const item3 = batch.add('prompt 3') // Triggers auto-submit
|
|
221
|
+
|
|
222
|
+
// Wait for async auto-submit to complete
|
|
223
|
+
await new Promise(resolve => setTimeout(resolve, 100))
|
|
224
|
+
|
|
225
|
+
// FAILING: Items should have error status after failed auto-submit
|
|
226
|
+
// Currently they remain in 'pending' status with no indication of failure
|
|
227
|
+
expect(item1.status).toBe('failed')
|
|
228
|
+
expect(item1.error).toBe('Network connection failed')
|
|
229
|
+
expect(item2.status).toBe('failed')
|
|
230
|
+
expect(item3.status).toBe('failed')
|
|
231
|
+
})
|
|
232
|
+
|
|
233
|
+
it('should provide a way to await auto-submit completion or failure', async () => {
|
|
234
|
+
const networkError = new Error('Network connection failed')
|
|
235
|
+
registerBatchAdapter('openai', createFailingAdapter(networkError))
|
|
236
|
+
|
|
237
|
+
const batch = createBatch({
|
|
238
|
+
provider: 'openai',
|
|
239
|
+
autoSubmit: true,
|
|
240
|
+
maxItems: 3
|
|
241
|
+
})
|
|
242
|
+
|
|
243
|
+
batch.add('prompt 1')
|
|
244
|
+
batch.add('prompt 2')
|
|
245
|
+
batch.add('prompt 3') // Triggers auto-submit
|
|
246
|
+
|
|
247
|
+
// FAILING: There should be a way to await the auto-submit result
|
|
248
|
+
// Currently the submission is fire-and-forget with no way to await it
|
|
249
|
+
// Expected: batch.awaitAutoSubmit() or batch.getSubmissionPromise()
|
|
250
|
+
|
|
251
|
+
// This property should exist to allow awaiting auto-submit
|
|
252
|
+
expect('autoSubmitPromise' in batch).toBe(true)
|
|
253
|
+
|
|
254
|
+
// The promise should be available for awaiting
|
|
255
|
+
const autoSubmitPromise = (batch as BatchQueue & { autoSubmitPromise?: Promise<void> }).autoSubmitPromise
|
|
256
|
+
expect(autoSubmitPromise).toBeDefined()
|
|
257
|
+
|
|
258
|
+
// Awaiting it should surface the error
|
|
259
|
+
await expect(autoSubmitPromise).rejects.toThrow('Network connection failed')
|
|
260
|
+
})
|
|
261
|
+
})
|
|
262
|
+
|
|
263
|
+
describe('Rate limit errors during auto-submit', () => {
|
|
264
|
+
it('should expose rate limit errors to callers', async () => {
|
|
265
|
+
registerBatchAdapter('openai', createRateLimitAdapter())
|
|
266
|
+
|
|
267
|
+
const batch = createBatch({
|
|
268
|
+
provider: 'openai',
|
|
269
|
+
autoSubmit: true,
|
|
270
|
+
maxItems: 2
|
|
271
|
+
})
|
|
272
|
+
|
|
273
|
+
batch.add('prompt 1')
|
|
274
|
+
batch.add('prompt 2') // Triggers auto-submit
|
|
275
|
+
|
|
276
|
+
await new Promise(resolve => setTimeout(resolve, 100))
|
|
277
|
+
|
|
278
|
+
// FAILING: Rate limit error should be exposed to caller
|
|
279
|
+
// Currently it's only logged to console.error
|
|
280
|
+
expect(consoleErrorSpy).toHaveBeenCalled()
|
|
281
|
+
|
|
282
|
+
// There should be a way to check for submission errors
|
|
283
|
+
// This property doesn't exist - that's the gap
|
|
284
|
+
const submissionError = (batch as BatchQueue & { submissionError?: Error }).submissionError
|
|
285
|
+
expect(submissionError).toBeDefined()
|
|
286
|
+
expect(submissionError?.message).toContain('Rate limit')
|
|
287
|
+
})
|
|
288
|
+
|
|
289
|
+
it('should include retry-after information in rate limit errors', async () => {
|
|
290
|
+
registerBatchAdapter('openai', createRateLimitAdapter())
|
|
291
|
+
|
|
292
|
+
const batch = createBatch({
|
|
293
|
+
provider: 'openai',
|
|
294
|
+
autoSubmit: true,
|
|
295
|
+
maxItems: 2
|
|
296
|
+
})
|
|
297
|
+
|
|
298
|
+
batch.add('prompt 1')
|
|
299
|
+
batch.add('prompt 2')
|
|
300
|
+
|
|
301
|
+
await new Promise(resolve => setTimeout(resolve, 100))
|
|
302
|
+
|
|
303
|
+
// FAILING: Rate limit metadata should be accessible
|
|
304
|
+
const job = batch.getJob()
|
|
305
|
+
// Job should contain rate limit retry info
|
|
306
|
+
expect(job).toBeDefined()
|
|
307
|
+
expect((job as BatchJob & { retryAfter?: number })?.retryAfter).toBeDefined()
|
|
308
|
+
})
|
|
309
|
+
})
|
|
310
|
+
|
|
311
|
+
describe('Timeout during auto-submit', () => {
|
|
312
|
+
it('should handle submission timeouts gracefully', async () => {
|
|
313
|
+
registerBatchAdapter('openai', createTimeoutAdapter(50))
|
|
314
|
+
|
|
315
|
+
const batch = createBatch({
|
|
316
|
+
provider: 'openai',
|
|
317
|
+
autoSubmit: true,
|
|
318
|
+
maxItems: 2
|
|
319
|
+
})
|
|
320
|
+
|
|
321
|
+
batch.add('prompt 1')
|
|
322
|
+
batch.add('prompt 2')
|
|
323
|
+
|
|
324
|
+
// Wait for timeout to occur
|
|
325
|
+
await new Promise(resolve => setTimeout(resolve, 200))
|
|
326
|
+
|
|
327
|
+
// FAILING: Timeout error should be captured and accessible
|
|
328
|
+
expect(consoleErrorSpy).toHaveBeenCalledWith(expect.any(Error))
|
|
329
|
+
|
|
330
|
+
// Items should reflect the failure
|
|
331
|
+
const items = batch.getItems()
|
|
332
|
+
expect(items[0].status).toBe('failed')
|
|
333
|
+
expect(items[0].error).toContain('timeout')
|
|
334
|
+
})
|
|
335
|
+
})
|
|
336
|
+
|
|
337
|
+
describe('Error recovery scenarios', () => {
|
|
338
|
+
it('should allow retry after auto-submit failure', async () => {
|
|
339
|
+
const failingAdapter = createFailingAdapter(new Error('Temporary failure'))
|
|
340
|
+
registerBatchAdapter('openai', failingAdapter)
|
|
341
|
+
|
|
342
|
+
const batch = createBatch({
|
|
343
|
+
provider: 'openai',
|
|
344
|
+
autoSubmit: true,
|
|
345
|
+
maxItems: 2
|
|
346
|
+
})
|
|
347
|
+
|
|
348
|
+
batch.add('prompt 1')
|
|
349
|
+
batch.add('prompt 2') // Triggers auto-submit (fails)
|
|
350
|
+
|
|
351
|
+
await new Promise(resolve => setTimeout(resolve, 100))
|
|
352
|
+
|
|
353
|
+
// Replace with working adapter
|
|
354
|
+
registerBatchAdapter('openai', createSuccessAdapter())
|
|
355
|
+
|
|
356
|
+
// FAILING: There should be a way to retry failed auto-submit
|
|
357
|
+
// Currently once auto-submit fires and fails, the batch is stuck
|
|
358
|
+
const retry = () => {
|
|
359
|
+
if ('retry' in batch) {
|
|
360
|
+
return (batch as BatchQueue & { retry: () => Promise<void> }).retry()
|
|
361
|
+
}
|
|
362
|
+
return Promise.reject(new Error('No retry method available'))
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
// Should be able to retry the submission
|
|
366
|
+
await expect(retry()).resolves.not.toThrow()
|
|
367
|
+
})
|
|
368
|
+
|
|
369
|
+
it('should reset submission state on failure to allow manual submit', async () => {
|
|
370
|
+
registerBatchAdapter('openai', createFailingAdapter(new Error('Submit failed')))
|
|
371
|
+
|
|
372
|
+
const batch = createBatch({
|
|
373
|
+
provider: 'openai',
|
|
374
|
+
autoSubmit: true,
|
|
375
|
+
maxItems: 2
|
|
376
|
+
})
|
|
377
|
+
|
|
378
|
+
batch.add('prompt 1')
|
|
379
|
+
batch.add('prompt 2') // Triggers auto-submit (fails)
|
|
380
|
+
|
|
381
|
+
await new Promise(resolve => setTimeout(resolve, 100))
|
|
382
|
+
|
|
383
|
+
// Replace with working adapter
|
|
384
|
+
registerBatchAdapter('openai', createSuccessAdapter())
|
|
385
|
+
|
|
386
|
+
// FAILING: After auto-submit failure, manual submit should be possible
|
|
387
|
+
// Currently isSubmitted is set to true before the async submit completes
|
|
388
|
+
// so we can't retry with manual submit()
|
|
389
|
+
expect(batch.isSubmitted).toBe(false) // Should be false after failed auto-submit
|
|
390
|
+
|
|
391
|
+
// Manual submit should work after failed auto-submit
|
|
392
|
+
await expect(batch.submit()).resolves.toBeDefined()
|
|
393
|
+
})
|
|
394
|
+
})
|
|
395
|
+
|
|
396
|
+
describe('Partial batch failure during auto-submit', () => {
|
|
397
|
+
it('should emit error event when some items fail during auto-submit', async () => {
|
|
398
|
+
// Create adapter that fails some items
|
|
399
|
+
const partialAdapter: BatchAdapter = {
|
|
400
|
+
async submit(items: BatchItem[]): Promise<BatchSubmitResult> {
|
|
401
|
+
const results: BatchResult[] = items.map((item, i) => ({
|
|
402
|
+
id: item.id,
|
|
403
|
+
customId: item.id,
|
|
404
|
+
status: i % 2 === 0 ? 'completed' : 'failed',
|
|
405
|
+
result: i % 2 === 0 ? `Result for ${item.prompt}` : undefined,
|
|
406
|
+
error: i % 2 === 1 ? 'Processing failed' : undefined,
|
|
407
|
+
}))
|
|
408
|
+
return {
|
|
409
|
+
job: {
|
|
410
|
+
id: 'batch_partial',
|
|
411
|
+
provider: 'openai',
|
|
412
|
+
status: 'completed',
|
|
413
|
+
totalItems: items.length,
|
|
414
|
+
completedItems: results.filter(r => r.status === 'completed').length,
|
|
415
|
+
failedItems: results.filter(r => r.status === 'failed').length,
|
|
416
|
+
createdAt: new Date(),
|
|
417
|
+
},
|
|
418
|
+
completion: Promise.resolve(results),
|
|
419
|
+
}
|
|
420
|
+
},
|
|
421
|
+
async getStatus(batchId: string): Promise<BatchJob> {
|
|
422
|
+
return {
|
|
423
|
+
id: batchId,
|
|
424
|
+
provider: 'openai',
|
|
425
|
+
status: 'completed',
|
|
426
|
+
totalItems: 4,
|
|
427
|
+
completedItems: 2,
|
|
428
|
+
failedItems: 2,
|
|
429
|
+
createdAt: new Date(),
|
|
430
|
+
}
|
|
431
|
+
},
|
|
432
|
+
async cancel(): Promise<void> {},
|
|
433
|
+
async getResults(): Promise<BatchResult[]> {
|
|
434
|
+
return []
|
|
435
|
+
},
|
|
436
|
+
async waitForCompletion(): Promise<BatchResult[]> {
|
|
437
|
+
return []
|
|
438
|
+
},
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
registerBatchAdapter('openai', partialAdapter)
|
|
442
|
+
|
|
443
|
+
const partialFailureHandler = vi.fn()
|
|
444
|
+
const batch = createBatch({
|
|
445
|
+
provider: 'openai',
|
|
446
|
+
autoSubmit: true,
|
|
447
|
+
maxItems: 4
|
|
448
|
+
})
|
|
449
|
+
|
|
450
|
+
// FAILING: There should be a way to subscribe to partial failure events
|
|
451
|
+
// This tests that callers can be notified when some items fail
|
|
452
|
+
if ('on' in batch) {
|
|
453
|
+
(batch as BatchQueue & { on: (event: string, handler: (results: BatchResult[]) => void) => void })
|
|
454
|
+
.on('partial-failure', partialFailureHandler)
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
batch.add('prompt 1')
|
|
458
|
+
batch.add('prompt 2')
|
|
459
|
+
batch.add('prompt 3')
|
|
460
|
+
batch.add('prompt 4') // Triggers auto-submit
|
|
461
|
+
|
|
462
|
+
// Wait for auto-submit to complete
|
|
463
|
+
await new Promise(resolve => setTimeout(resolve, 100))
|
|
464
|
+
|
|
465
|
+
// FAILING: Partial failure handler should be called with failed items
|
|
466
|
+
expect(partialFailureHandler).toHaveBeenCalled()
|
|
467
|
+
expect(partialFailureHandler).toHaveBeenCalledWith(
|
|
468
|
+
expect.arrayContaining([
|
|
469
|
+
expect.objectContaining({ status: 'failed', error: 'Processing failed' })
|
|
470
|
+
])
|
|
471
|
+
)
|
|
472
|
+
})
|
|
473
|
+
|
|
474
|
+
it('should provide aggregated error info after partial auto-submit failure', async () => {
|
|
475
|
+
const partialAdapter: BatchAdapter = {
|
|
476
|
+
async submit(items: BatchItem[]): Promise<BatchSubmitResult> {
|
|
477
|
+
const results: BatchResult[] = items.map((item, i) => ({
|
|
478
|
+
id: item.id,
|
|
479
|
+
customId: item.id,
|
|
480
|
+
status: i % 2 === 0 ? 'completed' : 'failed',
|
|
481
|
+
result: i % 2 === 0 ? `Result for ${item.prompt}` : undefined,
|
|
482
|
+
error: i % 2 === 1 ? 'Processing failed' : undefined,
|
|
483
|
+
}))
|
|
484
|
+
return {
|
|
485
|
+
job: {
|
|
486
|
+
id: 'batch_partial',
|
|
487
|
+
provider: 'openai',
|
|
488
|
+
status: 'completed',
|
|
489
|
+
totalItems: items.length,
|
|
490
|
+
completedItems: results.filter(r => r.status === 'completed').length,
|
|
491
|
+
failedItems: results.filter(r => r.status === 'failed').length,
|
|
492
|
+
createdAt: new Date(),
|
|
493
|
+
},
|
|
494
|
+
completion: Promise.resolve(results),
|
|
495
|
+
}
|
|
496
|
+
},
|
|
497
|
+
async getStatus(batchId: string): Promise<BatchJob> {
|
|
498
|
+
return {
|
|
499
|
+
id: batchId,
|
|
500
|
+
provider: 'openai',
|
|
501
|
+
status: 'completed',
|
|
502
|
+
totalItems: 4,
|
|
503
|
+
completedItems: 2,
|
|
504
|
+
failedItems: 2,
|
|
505
|
+
createdAt: new Date(),
|
|
506
|
+
}
|
|
507
|
+
},
|
|
508
|
+
async cancel(): Promise<void> {},
|
|
509
|
+
async getResults(): Promise<BatchResult[]> {
|
|
510
|
+
return []
|
|
511
|
+
},
|
|
512
|
+
async waitForCompletion(): Promise<BatchResult[]> {
|
|
513
|
+
return []
|
|
514
|
+
},
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
registerBatchAdapter('openai', partialAdapter)
|
|
518
|
+
|
|
519
|
+
const batch = createBatch({
|
|
520
|
+
provider: 'openai',
|
|
521
|
+
autoSubmit: true,
|
|
522
|
+
maxItems: 4
|
|
523
|
+
})
|
|
524
|
+
|
|
525
|
+
batch.add('prompt 1')
|
|
526
|
+
batch.add('prompt 2')
|
|
527
|
+
batch.add('prompt 3')
|
|
528
|
+
batch.add('prompt 4') // Triggers auto-submit
|
|
529
|
+
|
|
530
|
+
await new Promise(resolve => setTimeout(resolve, 100))
|
|
531
|
+
|
|
532
|
+
// FAILING: There should be a way to get failure summary
|
|
533
|
+
const failedItems = (batch as BatchQueue & { getFailedItems?: () => BatchItem[] }).getFailedItems?.()
|
|
534
|
+
expect(failedItems).toBeDefined()
|
|
535
|
+
expect(failedItems?.length).toBe(2)
|
|
536
|
+
})
|
|
537
|
+
})
|
|
538
|
+
|
|
539
|
+
describe('Console.error verification (current behavior)', () => {
|
|
540
|
+
it('verifies errors are currently only logged, not propagated', async () => {
|
|
541
|
+
const testError = new Error('Test submission error')
|
|
542
|
+
registerBatchAdapter('openai', createFailingAdapter(testError))
|
|
543
|
+
|
|
544
|
+
const batch = createBatch({
|
|
545
|
+
provider: 'openai',
|
|
546
|
+
autoSubmit: true,
|
|
547
|
+
maxItems: 2
|
|
548
|
+
})
|
|
549
|
+
|
|
550
|
+
batch.add('prompt 1')
|
|
551
|
+
batch.add('prompt 2') // Triggers auto-submit
|
|
552
|
+
|
|
553
|
+
await new Promise(resolve => setTimeout(resolve, 100))
|
|
554
|
+
|
|
555
|
+
// This passes - errors ARE logged
|
|
556
|
+
expect(consoleErrorSpy).toHaveBeenCalledWith(testError)
|
|
557
|
+
|
|
558
|
+
// But there's no other way to access the error
|
|
559
|
+
// - No error event emitted
|
|
560
|
+
// - No error property on batch
|
|
561
|
+
// - No way to await the auto-submit
|
|
562
|
+
// - Items remain in 'pending' state
|
|
563
|
+
|
|
564
|
+
const items = batch.getItems()
|
|
565
|
+
// FAILING: Items should not remain pending after failed submission
|
|
566
|
+
expect(items[0].status).not.toBe('pending')
|
|
567
|
+
})
|
|
568
|
+
})
|
|
569
|
+
})
|
|
570
|
+
|
|
571
|
+
describe('Suggested API improvements', () => {
|
|
572
|
+
it('documents expected error handling API', () => {
|
|
573
|
+
// This test documents what the error handling API SHOULD look like
|
|
574
|
+
// All these assertions will fail, showing the gaps
|
|
575
|
+
|
|
576
|
+
const batch = createBatch({
|
|
577
|
+
provider: 'openai',
|
|
578
|
+
autoSubmit: true,
|
|
579
|
+
maxItems: 5
|
|
580
|
+
})
|
|
581
|
+
|
|
582
|
+
// 1. Event-based error handling
|
|
583
|
+
expect('on' in batch).toBe(true)
|
|
584
|
+
expect(typeof (batch as unknown as { on?: unknown }).on).toBe('function')
|
|
585
|
+
|
|
586
|
+
// 2. Promise-based error handling
|
|
587
|
+
expect('awaitAutoSubmit' in batch).toBe(true)
|
|
588
|
+
expect(typeof (batch as unknown as { awaitAutoSubmit?: unknown }).awaitAutoSubmit).toBe('function')
|
|
589
|
+
|
|
590
|
+
// 3. Error state inspection
|
|
591
|
+
expect('submissionError' in batch).toBe(true)
|
|
592
|
+
expect('hasSubmissionError' in batch).toBe(true)
|
|
593
|
+
|
|
594
|
+
// 4. Retry capability
|
|
595
|
+
expect('retry' in batch).toBe(true)
|
|
596
|
+
expect(typeof (batch as unknown as { retry?: unknown }).retry).toBe('function')
|
|
597
|
+
})
|
|
598
|
+
})
|