@pwshub/aisdk 0.0.5 → 0.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +181 -1
- package/index.d.ts +43 -1
- package/package.json +2 -2
- package/src/coerce.js +77 -5
- package/src/coerce.test.js +114 -40
- package/src/config.js +13 -0
- package/src/errors.js +52 -5
- package/src/index.js +117 -17
- package/src/index.test.js +859 -0
- package/src/logger.js +48 -0
- package/src/models.js +5 -0
- package/src/providers.js +19 -9
- package/src/registry.js +169 -24
- package/src/security.js +114 -0
- package/src/validation.js +4 -4
- package/src/validation.test.js +7 -6
|
@@ -0,0 +1,859 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Integration tests for AI client module.
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import {
|
|
6
|
+
describe, it, beforeEach, afterEach,
|
|
7
|
+
} from 'node:test'
|
|
8
|
+
import assert from 'node:assert'
|
|
9
|
+
import { createAi, ProviderError, InputError, setLogger, noopLogger } from './index.js'
|
|
10
|
+
|
|
11
|
+
// Store original fetch
|
|
12
|
+
const originalFetch = typeof global.fetch === 'function' ? global.fetch : null
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Creates a mock fetch response
|
|
16
|
+
* @param {Object} options
|
|
17
|
+
* @param {boolean} options.ok
|
|
18
|
+
* @param {number} options.status
|
|
19
|
+
* @param {Object} options.data
|
|
20
|
+
* @param {string} [options.text]
|
|
21
|
+
* @param {Map} [options.headers]
|
|
22
|
+
* @returns {Promise<Response>}
|
|
23
|
+
*/
|
|
24
|
+
const mockFetchResponse = ({ ok = true, status = 200, data = {}, text = '', headers = new Map() }) => {
|
|
25
|
+
return Promise.resolve({
|
|
26
|
+
ok,
|
|
27
|
+
status,
|
|
28
|
+
headers: {
|
|
29
|
+
get: (name) => headers.get(name),
|
|
30
|
+
},
|
|
31
|
+
json: () => Promise.resolve(data),
|
|
32
|
+
text: () => Promise.resolve(text),
|
|
33
|
+
})
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
describe('createAi', () => {
|
|
37
|
+
beforeEach(() => {
|
|
38
|
+
if (originalFetch) {
|
|
39
|
+
global.fetch = originalFetch
|
|
40
|
+
}
|
|
41
|
+
})
|
|
42
|
+
|
|
43
|
+
it('should create AI client with default options', () => {
|
|
44
|
+
const ai = createAi()
|
|
45
|
+
assert.ok(ai)
|
|
46
|
+
assert.ok(typeof ai.ask === 'function')
|
|
47
|
+
assert.ok(typeof ai.listModels === 'function')
|
|
48
|
+
assert.ok(typeof ai.addModels === 'function')
|
|
49
|
+
})
|
|
50
|
+
|
|
51
|
+
it('should create AI client with custom options', () => {
|
|
52
|
+
const ai = createAi({
|
|
53
|
+
gatewayUrl: 'https://custom.api.example.com',
|
|
54
|
+
timeout: 5000,
|
|
55
|
+
})
|
|
56
|
+
assert.ok(ai)
|
|
57
|
+
assert.ok(typeof ai.ask === 'function')
|
|
58
|
+
})
|
|
59
|
+
|
|
60
|
+
it('should create isolated registry per instance', () => {
|
|
61
|
+
const ai1 = createAi()
|
|
62
|
+
const ai2 = createAi()
|
|
63
|
+
|
|
64
|
+
const models1 = ai1.listModels()
|
|
65
|
+
const models2 = ai2.listModels()
|
|
66
|
+
|
|
67
|
+
// Both should have models but be independent
|
|
68
|
+
assert.ok(models1.length > 0)
|
|
69
|
+
assert.ok(models2.length > 0)
|
|
70
|
+
assert.strictEqual(models1.length, models2.length)
|
|
71
|
+
})
|
|
72
|
+
|
|
73
|
+
it('should accept custom models in createAi', () => {
|
|
74
|
+
const customModels = [
|
|
75
|
+
{
|
|
76
|
+
name: 'custom-model',
|
|
77
|
+
provider: 'openai',
|
|
78
|
+
input_price: 0.5,
|
|
79
|
+
output_price: 1.5,
|
|
80
|
+
max_in: 128000,
|
|
81
|
+
max_out: 4096,
|
|
82
|
+
enable: true,
|
|
83
|
+
},
|
|
84
|
+
]
|
|
85
|
+
|
|
86
|
+
const ai = createAi({ models: customModels })
|
|
87
|
+
const models = ai.listModels()
|
|
88
|
+
|
|
89
|
+
assert.strictEqual(models.length, 1)
|
|
90
|
+
assert.strictEqual(models[0].name, 'custom-model')
|
|
91
|
+
})
|
|
92
|
+
})
|
|
93
|
+
|
|
94
|
+
describe('ask with mock fetch', () => {
|
|
95
|
+
beforeEach(() => {
|
|
96
|
+
if (originalFetch) {
|
|
97
|
+
global.fetch = originalFetch
|
|
98
|
+
}
|
|
99
|
+
})
|
|
100
|
+
|
|
101
|
+
afterEach(() => {
|
|
102
|
+
if (originalFetch) {
|
|
103
|
+
global.fetch = originalFetch
|
|
104
|
+
}
|
|
105
|
+
})
|
|
106
|
+
|
|
107
|
+
it('should succeed with valid request', async () => {
|
|
108
|
+
global.fetch = () => mockFetchResponse({
|
|
109
|
+
ok: true,
|
|
110
|
+
status: 200,
|
|
111
|
+
data: {
|
|
112
|
+
choices: [{ message: { content: 'Hello from AI!' } }],
|
|
113
|
+
usage: {
|
|
114
|
+
prompt_tokens: 10,
|
|
115
|
+
completion_tokens: 20,
|
|
116
|
+
},
|
|
117
|
+
},
|
|
118
|
+
})
|
|
119
|
+
|
|
120
|
+
const ai = createAi()
|
|
121
|
+
const result = await ai.ask({
|
|
122
|
+
model: 'openai/gpt-4o',
|
|
123
|
+
apikey: 'test-key',
|
|
124
|
+
prompt: 'Hello',
|
|
125
|
+
})
|
|
126
|
+
|
|
127
|
+
assert.strictEqual(result.text, 'Hello from AI!')
|
|
128
|
+
assert.strictEqual(result.usage.inputTokens, 10)
|
|
129
|
+
assert.strictEqual(result.usage.outputTokens, 20)
|
|
130
|
+
})
|
|
131
|
+
|
|
132
|
+
it('should throw InputError for invalid API key', async () => {
|
|
133
|
+
global.fetch = () => mockFetchResponse({
|
|
134
|
+
ok: false,
|
|
135
|
+
status: 401,
|
|
136
|
+
text: 'Invalid API key',
|
|
137
|
+
})
|
|
138
|
+
|
|
139
|
+
const ai = createAi()
|
|
140
|
+
|
|
141
|
+
await assert.rejects(
|
|
142
|
+
() => ai.ask({
|
|
143
|
+
model: 'openai/gpt-4o',
|
|
144
|
+
apikey: '',
|
|
145
|
+
prompt: 'Hello',
|
|
146
|
+
}),
|
|
147
|
+
InputError
|
|
148
|
+
)
|
|
149
|
+
})
|
|
150
|
+
|
|
151
|
+
it('should throw InputError for empty prompt', async () => {
|
|
152
|
+
const ai = createAi()
|
|
153
|
+
|
|
154
|
+
await assert.rejects(
|
|
155
|
+
() => ai.ask({
|
|
156
|
+
model: 'openai/gpt-4o',
|
|
157
|
+
apikey: 'test-key',
|
|
158
|
+
prompt: '',
|
|
159
|
+
}),
|
|
160
|
+
InputError
|
|
161
|
+
)
|
|
162
|
+
})
|
|
163
|
+
|
|
164
|
+
it('should throw ProviderError for rate limit', async () => {
|
|
165
|
+
global.fetch = () => mockFetchResponse({
|
|
166
|
+
ok: false,
|
|
167
|
+
status: 429,
|
|
168
|
+
text: 'Rate limit exceeded',
|
|
169
|
+
})
|
|
170
|
+
|
|
171
|
+
const ai = createAi()
|
|
172
|
+
|
|
173
|
+
await assert.rejects(
|
|
174
|
+
() => ai.ask({
|
|
175
|
+
model: 'openai/gpt-4o',
|
|
176
|
+
apikey: 'test-key',
|
|
177
|
+
prompt: 'Hello',
|
|
178
|
+
}),
|
|
179
|
+
ProviderError
|
|
180
|
+
)
|
|
181
|
+
})
|
|
182
|
+
|
|
183
|
+
it('should throw ProviderError for server error', async () => {
|
|
184
|
+
global.fetch = () => mockFetchResponse({
|
|
185
|
+
ok: false,
|
|
186
|
+
status: 500,
|
|
187
|
+
text: 'Internal server error',
|
|
188
|
+
})
|
|
189
|
+
|
|
190
|
+
const ai = createAi()
|
|
191
|
+
|
|
192
|
+
await assert.rejects(
|
|
193
|
+
() => ai.ask({
|
|
194
|
+
model: 'openai/gpt-4o',
|
|
195
|
+
apikey: 'test-key',
|
|
196
|
+
prompt: 'Hello',
|
|
197
|
+
}),
|
|
198
|
+
ProviderError
|
|
199
|
+
)
|
|
200
|
+
})
|
|
201
|
+
})
|
|
202
|
+
|
|
203
|
+
describe('ask with fallbacks', () => {
|
|
204
|
+
beforeEach(() => {
|
|
205
|
+
if (originalFetch) {
|
|
206
|
+
global.fetch = originalFetch
|
|
207
|
+
}
|
|
208
|
+
})
|
|
209
|
+
|
|
210
|
+
afterEach(() => {
|
|
211
|
+
if (originalFetch) {
|
|
212
|
+
global.fetch = originalFetch
|
|
213
|
+
}
|
|
214
|
+
})
|
|
215
|
+
|
|
216
|
+
it('should try fallback models on ProviderError', async () => {
|
|
217
|
+
let callCount = 0
|
|
218
|
+
|
|
219
|
+
global.fetch = () => {
|
|
220
|
+
callCount++
|
|
221
|
+
if (callCount === 1) {
|
|
222
|
+
// First call fails with 429
|
|
223
|
+
return mockFetchResponse({
|
|
224
|
+
ok: false,
|
|
225
|
+
status: 429,
|
|
226
|
+
text: 'Rate limited',
|
|
227
|
+
})
|
|
228
|
+
}
|
|
229
|
+
// Second call succeeds
|
|
230
|
+
return mockFetchResponse({
|
|
231
|
+
ok: true,
|
|
232
|
+
status: 200,
|
|
233
|
+
data: {
|
|
234
|
+
choices: [{ message: { content: 'Success from fallback!' } }],
|
|
235
|
+
usage: { prompt_tokens: 5, completion_tokens: 10 },
|
|
236
|
+
},
|
|
237
|
+
})
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
const ai = createAi()
|
|
241
|
+
const result = await ai.ask({
|
|
242
|
+
model: 'openai/gpt-4o',
|
|
243
|
+
apikey: 'test-key',
|
|
244
|
+
prompt: 'Hello',
|
|
245
|
+
fallbacks: ['openai/gpt-4o-mini'],
|
|
246
|
+
})
|
|
247
|
+
|
|
248
|
+
assert.strictEqual(callCount, 2)
|
|
249
|
+
assert.strictEqual(result.text, 'Success from fallback!')
|
|
250
|
+
})
|
|
251
|
+
|
|
252
|
+
it('should not try fallbacks on InputError', async () => {
|
|
253
|
+
let callCount = 0
|
|
254
|
+
|
|
255
|
+
global.fetch = () => {
|
|
256
|
+
callCount++
|
|
257
|
+
return mockFetchResponse({
|
|
258
|
+
ok: false,
|
|
259
|
+
status: 401,
|
|
260
|
+
text: 'Invalid API key',
|
|
261
|
+
})
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
const ai = createAi()
|
|
265
|
+
|
|
266
|
+
await assert.rejects(
|
|
267
|
+
() => ai.ask({
|
|
268
|
+
model: 'openai/gpt-4o',
|
|
269
|
+
apikey: 'invalid-key',
|
|
270
|
+
prompt: 'Hello',
|
|
271
|
+
fallbacks: ['openai/gpt-4o-mini'],
|
|
272
|
+
}),
|
|
273
|
+
InputError
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
assert.strictEqual(callCount, 1) // Only one call, no fallback
|
|
277
|
+
})
|
|
278
|
+
|
|
279
|
+
it('should throw after all fallbacks fail', async () => {
|
|
280
|
+
let callCount = 0
|
|
281
|
+
|
|
282
|
+
global.fetch = () => {
|
|
283
|
+
callCount++
|
|
284
|
+
return mockFetchResponse({
|
|
285
|
+
ok: false,
|
|
286
|
+
status: 429,
|
|
287
|
+
text: 'Rate limited',
|
|
288
|
+
})
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
const ai = createAi()
|
|
292
|
+
|
|
293
|
+
await assert.rejects(
|
|
294
|
+
() => ai.ask({
|
|
295
|
+
model: 'openai/gpt-4o',
|
|
296
|
+
apikey: 'test-key',
|
|
297
|
+
prompt: 'Hello',
|
|
298
|
+
fallbacks: ['openai/gpt-4o-mini', 'anthropic/claude-haiku-4-5'],
|
|
299
|
+
}),
|
|
300
|
+
ProviderError
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
assert.strictEqual(callCount, 3) // All models tried
|
|
304
|
+
})
|
|
305
|
+
|
|
306
|
+
it('should use result from first successful model', async () => {
|
|
307
|
+
let callCount = 0
|
|
308
|
+
|
|
309
|
+
global.fetch = (_url, _options) => {
|
|
310
|
+
callCount++
|
|
311
|
+
if (callCount <= 2) {
|
|
312
|
+
return mockFetchResponse({
|
|
313
|
+
ok: false,
|
|
314
|
+
status: 429,
|
|
315
|
+
text: 'Rate limited',
|
|
316
|
+
})
|
|
317
|
+
}
|
|
318
|
+
// Third call is to Anthropic - return Anthropic format
|
|
319
|
+
return mockFetchResponse({
|
|
320
|
+
ok: true,
|
|
321
|
+
status: 200,
|
|
322
|
+
data: {
|
|
323
|
+
content: [{ type: 'text', text: 'Third model succeeded!' }],
|
|
324
|
+
usage: { input_tokens: 5, output_tokens: 10 },
|
|
325
|
+
},
|
|
326
|
+
})
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
const ai = createAi()
|
|
330
|
+
const result = await ai.ask({
|
|
331
|
+
model: 'openai/gpt-4o',
|
|
332
|
+
apikey: 'test-key',
|
|
333
|
+
prompt: 'Hello',
|
|
334
|
+
fallbacks: ['openai/gpt-4o-mini', 'anthropic/claude-haiku-4-5'],
|
|
335
|
+
})
|
|
336
|
+
|
|
337
|
+
assert.strictEqual(callCount, 3)
|
|
338
|
+
assert.ok(result.text.includes('Third model'))
|
|
339
|
+
})
|
|
340
|
+
})
|
|
341
|
+
|
|
342
|
+
describe('timeout', () => {
|
|
343
|
+
beforeEach(() => {
|
|
344
|
+
if (originalFetch) {
|
|
345
|
+
global.fetch = originalFetch
|
|
346
|
+
}
|
|
347
|
+
})
|
|
348
|
+
|
|
349
|
+
afterEach(() => {
|
|
350
|
+
if (originalFetch) {
|
|
351
|
+
global.fetch = originalFetch
|
|
352
|
+
}
|
|
353
|
+
})
|
|
354
|
+
|
|
355
|
+
it('should timeout request after specified duration', async () => {
|
|
356
|
+
global.fetch = (_url, options) => {
|
|
357
|
+
// Simulate slow response that respects abort signal
|
|
358
|
+
return new Promise((resolve, reject) => {
|
|
359
|
+
const timeoutId = setTimeout(() => {
|
|
360
|
+
resolve(mockFetchResponse({
|
|
361
|
+
ok: true,
|
|
362
|
+
status: 200,
|
|
363
|
+
data: {
|
|
364
|
+
choices: [{ message: { content: 'Slow response' } }],
|
|
365
|
+
usage: { prompt_tokens: 5, completion_tokens: 10 },
|
|
366
|
+
},
|
|
367
|
+
}))
|
|
368
|
+
}, 200)
|
|
369
|
+
|
|
370
|
+
// Respect abort signal for timeout
|
|
371
|
+
if (options?.signal) {
|
|
372
|
+
options.signal.addEventListener('abort', () => {
|
|
373
|
+
clearTimeout(timeoutId)
|
|
374
|
+
reject(new DOMException('Aborted', 'AbortError'))
|
|
375
|
+
})
|
|
376
|
+
}
|
|
377
|
+
})
|
|
378
|
+
}
|
|
379
|
+
|
|
380
|
+
const ai = createAi({ timeout: 50 })
|
|
381
|
+
|
|
382
|
+
await assert.rejects(
|
|
383
|
+
() => ai.ask({
|
|
384
|
+
model: 'openai/gpt-4o',
|
|
385
|
+
apikey: 'test-key',
|
|
386
|
+
prompt: 'Hello',
|
|
387
|
+
}),
|
|
388
|
+
ProviderError
|
|
389
|
+
)
|
|
390
|
+
})
|
|
391
|
+
|
|
392
|
+
it('should succeed if request completes before timeout', async () => {
|
|
393
|
+
global.fetch = async () => {
|
|
394
|
+
await new Promise((resolve) => setTimeout(resolve, 10))
|
|
395
|
+
return mockFetchResponse({
|
|
396
|
+
ok: true,
|
|
397
|
+
status: 200,
|
|
398
|
+
data: {
|
|
399
|
+
choices: [{ message: { content: 'Fast response' } }],
|
|
400
|
+
usage: { prompt_tokens: 5, completion_tokens: 10 },
|
|
401
|
+
},
|
|
402
|
+
})
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
const ai = createAi({ timeout: 500 })
|
|
406
|
+
const result = await ai.ask({
|
|
407
|
+
model: 'openai/gpt-4o',
|
|
408
|
+
apikey: 'test-key',
|
|
409
|
+
prompt: 'Hello',
|
|
410
|
+
})
|
|
411
|
+
|
|
412
|
+
assert.strictEqual(result.text, 'Fast response')
|
|
413
|
+
})
|
|
414
|
+
})
|
|
415
|
+
|
|
416
|
+
describe('hooks', () => {
|
|
417
|
+
beforeEach(() => {
|
|
418
|
+
if (originalFetch) {
|
|
419
|
+
global.fetch = originalFetch
|
|
420
|
+
}
|
|
421
|
+
})
|
|
422
|
+
|
|
423
|
+
afterEach(() => {
|
|
424
|
+
if (originalFetch) {
|
|
425
|
+
global.fetch = originalFetch
|
|
426
|
+
}
|
|
427
|
+
})
|
|
428
|
+
|
|
429
|
+
it('should invoke onRequest hook before request', async () => {
|
|
430
|
+
let hookCalled = false
|
|
431
|
+
let hookContext = null
|
|
432
|
+
|
|
433
|
+
global.fetch = () => mockFetchResponse({
|
|
434
|
+
ok: true,
|
|
435
|
+
status: 200,
|
|
436
|
+
data: {
|
|
437
|
+
choices: [{ message: { content: 'Response' } }],
|
|
438
|
+
usage: { prompt_tokens: 5, completion_tokens: 10 },
|
|
439
|
+
},
|
|
440
|
+
})
|
|
441
|
+
|
|
442
|
+
const ai = createAi({
|
|
443
|
+
onRequest: (context) => {
|
|
444
|
+
hookCalled = true
|
|
445
|
+
hookContext = context
|
|
446
|
+
},
|
|
447
|
+
})
|
|
448
|
+
|
|
449
|
+
await ai.ask({
|
|
450
|
+
model: 'openai/gpt-4o',
|
|
451
|
+
apikey: 'test-key',
|
|
452
|
+
prompt: 'Hello',
|
|
453
|
+
})
|
|
454
|
+
|
|
455
|
+
assert.ok(hookCalled)
|
|
456
|
+
assert.ok(hookContext)
|
|
457
|
+
assert.strictEqual(hookContext.model, 'openai/gpt-4o')
|
|
458
|
+
assert.strictEqual(hookContext.provider, 'openai')
|
|
459
|
+
assert.ok(hookContext.url)
|
|
460
|
+
assert.ok(hookContext.body)
|
|
461
|
+
})
|
|
462
|
+
|
|
463
|
+
it('should invoke onResponse hook after response', async () => {
|
|
464
|
+
let hookCalled = false
|
|
465
|
+
let hookContext = null
|
|
466
|
+
|
|
467
|
+
global.fetch = () => mockFetchResponse({
|
|
468
|
+
ok: true,
|
|
469
|
+
status: 200,
|
|
470
|
+
data: {
|
|
471
|
+
choices: [{ message: { content: 'Response' } }],
|
|
472
|
+
usage: { prompt_tokens: 5, completion_tokens: 10 },
|
|
473
|
+
},
|
|
474
|
+
})
|
|
475
|
+
|
|
476
|
+
const ai = createAi({
|
|
477
|
+
onResponse: (context) => {
|
|
478
|
+
hookCalled = true
|
|
479
|
+
hookContext = context
|
|
480
|
+
},
|
|
481
|
+
})
|
|
482
|
+
|
|
483
|
+
await ai.ask({
|
|
484
|
+
model: 'openai/gpt-4o',
|
|
485
|
+
apikey: 'test-key',
|
|
486
|
+
prompt: 'Hello',
|
|
487
|
+
})
|
|
488
|
+
|
|
489
|
+
assert.ok(hookCalled)
|
|
490
|
+
assert.ok(hookContext)
|
|
491
|
+
assert.strictEqual(hookContext.status, 200)
|
|
492
|
+
assert.ok(hookContext.duration >= 0)
|
|
493
|
+
assert.ok(hookContext.data)
|
|
494
|
+
})
|
|
495
|
+
|
|
496
|
+
it('should invoke both hooks in order', async () => {
|
|
497
|
+
const callOrder = []
|
|
498
|
+
|
|
499
|
+
global.fetch = () => mockFetchResponse({
|
|
500
|
+
ok: true,
|
|
501
|
+
status: 200,
|
|
502
|
+
data: {
|
|
503
|
+
choices: [{ message: { content: 'Response' } }],
|
|
504
|
+
usage: { prompt_tokens: 5, completion_tokens: 10 },
|
|
505
|
+
},
|
|
506
|
+
})
|
|
507
|
+
|
|
508
|
+
const ai = createAi({
|
|
509
|
+
onRequest: () => {
|
|
510
|
+
callOrder.push('request')
|
|
511
|
+
},
|
|
512
|
+
onResponse: () => {
|
|
513
|
+
callOrder.push('response')
|
|
514
|
+
},
|
|
515
|
+
})
|
|
516
|
+
|
|
517
|
+
await ai.ask({
|
|
518
|
+
model: 'openai/gpt-4o',
|
|
519
|
+
apikey: 'test-key',
|
|
520
|
+
prompt: 'Hello',
|
|
521
|
+
})
|
|
522
|
+
|
|
523
|
+
assert.deepStrictEqual(callOrder, ['request', 'response'])
|
|
524
|
+
})
|
|
525
|
+
|
|
526
|
+
it('should support async hooks', async () => {
|
|
527
|
+
let hookCompleted = false
|
|
528
|
+
|
|
529
|
+
global.fetch = () => mockFetchResponse({
|
|
530
|
+
ok: true,
|
|
531
|
+
status: 200,
|
|
532
|
+
data: {
|
|
533
|
+
choices: [{ message: { content: 'Response' } }],
|
|
534
|
+
usage: { prompt_tokens: 5, completion_tokens: 10 },
|
|
535
|
+
},
|
|
536
|
+
})
|
|
537
|
+
|
|
538
|
+
const ai = createAi({
|
|
539
|
+
onRequest: async () => {
|
|
540
|
+
await new Promise((resolve) => setTimeout(resolve, 10))
|
|
541
|
+
},
|
|
542
|
+
onResponse: async () => {
|
|
543
|
+
await new Promise((resolve) => setTimeout(resolve, 10))
|
|
544
|
+
hookCompleted = true
|
|
545
|
+
},
|
|
546
|
+
})
|
|
547
|
+
|
|
548
|
+
await ai.ask({
|
|
549
|
+
model: 'openai/gpt-4o',
|
|
550
|
+
apikey: 'test-key',
|
|
551
|
+
prompt: 'Hello',
|
|
552
|
+
})
|
|
553
|
+
|
|
554
|
+
assert.ok(hookCompleted)
|
|
555
|
+
})
|
|
556
|
+
})
|
|
557
|
+
|
|
558
|
+
describe('logger', () => {
|
|
559
|
+
beforeEach(() => {
|
|
560
|
+
if (originalFetch) {
|
|
561
|
+
global.fetch = originalFetch
|
|
562
|
+
}
|
|
563
|
+
// Reset to default logger
|
|
564
|
+
setLogger({
|
|
565
|
+
warn: (msg) => console.warn(msg),
|
|
566
|
+
error: (msg) => console.error(msg),
|
|
567
|
+
debug: (msg) => console.debug(msg),
|
|
568
|
+
})
|
|
569
|
+
})
|
|
570
|
+
|
|
571
|
+
afterEach(() => {
|
|
572
|
+
setLogger({
|
|
573
|
+
warn: (msg) => console.warn(msg),
|
|
574
|
+
error: (msg) => console.error(msg),
|
|
575
|
+
debug: (msg) => console.debug(msg),
|
|
576
|
+
})
|
|
577
|
+
})
|
|
578
|
+
|
|
579
|
+
it('should use custom logger', () => {
|
|
580
|
+
let warnCalled = false
|
|
581
|
+
let errorCalled = false
|
|
582
|
+
|
|
583
|
+
const customLogger = {
|
|
584
|
+
warn: () => {
|
|
585
|
+
warnCalled = true
|
|
586
|
+
},
|
|
587
|
+
error: () => {
|
|
588
|
+
errorCalled = true
|
|
589
|
+
},
|
|
590
|
+
debug: () => {},
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
setLogger(customLogger)
|
|
594
|
+
|
|
595
|
+
// Logger should be set
|
|
596
|
+
assert.ok(warnCalled === false)
|
|
597
|
+
assert.ok(errorCalled === false)
|
|
598
|
+
})
|
|
599
|
+
|
|
600
|
+
it('should silence output with noopLogger', () => {
|
|
601
|
+
setLogger(noopLogger)
|
|
602
|
+
|
|
603
|
+
// Noop logger should not produce output
|
|
604
|
+
assert.ok(noopLogger.warn)
|
|
605
|
+
assert.ok(noopLogger.error)
|
|
606
|
+
assert.ok(noopLogger.debug)
|
|
607
|
+
})
|
|
608
|
+
})
|
|
609
|
+
|
|
610
|
+
describe('cost calculation', () => {
|
|
611
|
+
beforeEach(() => {
|
|
612
|
+
if (originalFetch) {
|
|
613
|
+
global.fetch = originalFetch
|
|
614
|
+
}
|
|
615
|
+
})
|
|
616
|
+
|
|
617
|
+
afterEach(() => {
|
|
618
|
+
if (originalFetch) {
|
|
619
|
+
global.fetch = originalFetch
|
|
620
|
+
}
|
|
621
|
+
})
|
|
622
|
+
|
|
623
|
+
it('should calculate estimated cost correctly', async () => {
|
|
624
|
+
global.fetch = () => mockFetchResponse({
|
|
625
|
+
ok: true,
|
|
626
|
+
status: 200,
|
|
627
|
+
data: {
|
|
628
|
+
choices: [{ message: { content: 'Response' } }],
|
|
629
|
+
usage: {
|
|
630
|
+
prompt_tokens: 100,
|
|
631
|
+
completion_tokens: 200,
|
|
632
|
+
},
|
|
633
|
+
},
|
|
634
|
+
})
|
|
635
|
+
|
|
636
|
+
const ai = createAi()
|
|
637
|
+
const result = await ai.ask({
|
|
638
|
+
model: 'openai/gpt-4o',
|
|
639
|
+
apikey: 'test-key',
|
|
640
|
+
prompt: 'Hello',
|
|
641
|
+
})
|
|
642
|
+
|
|
643
|
+
// gpt-4o: $2.50/1M input, $10/1M output
|
|
644
|
+
// Input: 100/1M * 2.50 = 0.00025
|
|
645
|
+
// Output: 200/1M * 10 = 0.002
|
|
646
|
+
// Total: 0.00225
|
|
647
|
+
assert.ok(result.usage.estimatedCost > 0)
|
|
648
|
+
assert.ok(typeof result.usage.estimatedCost === 'number')
|
|
649
|
+
})
|
|
650
|
+
|
|
651
|
+
it('should include reasoning tokens in cost calculation', async () => {
|
|
652
|
+
global.fetch = () => mockFetchResponse({
|
|
653
|
+
ok: true,
|
|
654
|
+
status: 200,
|
|
655
|
+
data: {
|
|
656
|
+
choices: [{ message: { content: 'Response' } }],
|
|
657
|
+
usage: {
|
|
658
|
+
prompt_tokens: 100,
|
|
659
|
+
completion_tokens: 200,
|
|
660
|
+
completion_tokens_details: {
|
|
661
|
+
reasoning_tokens: 50,
|
|
662
|
+
},
|
|
663
|
+
},
|
|
664
|
+
},
|
|
665
|
+
})
|
|
666
|
+
|
|
667
|
+
const ai = createAi()
|
|
668
|
+
const result = await ai.ask({
|
|
669
|
+
model: 'openai/gpt-4o',
|
|
670
|
+
apikey: 'test-key',
|
|
671
|
+
prompt: 'Hello',
|
|
672
|
+
})
|
|
673
|
+
|
|
674
|
+
assert.strictEqual(result.usage.reasoningTokens, 50)
|
|
675
|
+
assert.ok(result.usage.estimatedCost > 0)
|
|
676
|
+
})
|
|
677
|
+
})
|
|
678
|
+
|
|
679
|
+
describe('messages array support', () => {
|
|
680
|
+
beforeEach(() => {
|
|
681
|
+
if (originalFetch) {
|
|
682
|
+
global.fetch = originalFetch
|
|
683
|
+
}
|
|
684
|
+
})
|
|
685
|
+
|
|
686
|
+
afterEach(() => {
|
|
687
|
+
if (originalFetch) {
|
|
688
|
+
global.fetch = originalFetch
|
|
689
|
+
}
|
|
690
|
+
})
|
|
691
|
+
|
|
692
|
+
it('should accept messages array instead of prompt', async () => {
|
|
693
|
+
global.fetch = (url, options) => {
|
|
694
|
+
const body = JSON.parse(options.body)
|
|
695
|
+
assert.ok(body.messages)
|
|
696
|
+
assert.strictEqual(body.messages.length, 2)
|
|
697
|
+
|
|
698
|
+
return mockFetchResponse({
|
|
699
|
+
ok: true,
|
|
700
|
+
status: 200,
|
|
701
|
+
data: {
|
|
702
|
+
choices: [{ message: { content: 'Multi-turn response' } }],
|
|
703
|
+
usage: { prompt_tokens: 20, completion_tokens: 15 },
|
|
704
|
+
},
|
|
705
|
+
})
|
|
706
|
+
}
|
|
707
|
+
|
|
708
|
+
const ai = createAi()
|
|
709
|
+
const result = await ai.ask({
|
|
710
|
+
model: 'openai/gpt-4o',
|
|
711
|
+
apikey: 'test-key',
|
|
712
|
+
messages: [
|
|
713
|
+
{ role: 'user', content: 'What is the capital of Vietnam?' },
|
|
714
|
+
{ role: 'assistant', content: 'The capital is Hanoi.' },
|
|
715
|
+
],
|
|
716
|
+
})
|
|
717
|
+
|
|
718
|
+
assert.strictEqual(result.text, 'Multi-turn response')
|
|
719
|
+
})
|
|
720
|
+
|
|
721
|
+
it('should throw for empty message content', async () => {
|
|
722
|
+
const ai = createAi()
|
|
723
|
+
|
|
724
|
+
await assert.rejects(
|
|
725
|
+
() => ai.ask({
|
|
726
|
+
model: 'openai/gpt-4o',
|
|
727
|
+
apikey: 'test-key',
|
|
728
|
+
messages: [{ role: 'user', content: '' }],
|
|
729
|
+
}),
|
|
730
|
+
InputError
|
|
731
|
+
)
|
|
732
|
+
})
|
|
733
|
+
|
|
734
|
+
it('should throw for invalid role in messages', async () => {
|
|
735
|
+
const ai = createAi()
|
|
736
|
+
|
|
737
|
+
await assert.rejects(
|
|
738
|
+
() => ai.ask({
|
|
739
|
+
model: 'openai/gpt-4o',
|
|
740
|
+
apikey: 'test-key',
|
|
741
|
+
messages: [{ role: 'bot', content: 'Hello' }],
|
|
742
|
+
}),
|
|
743
|
+
InputError
|
|
744
|
+
)
|
|
745
|
+
})
|
|
746
|
+
})
|
|
747
|
+
|
|
748
|
+
describe('stop parameter', () => {
|
|
749
|
+
beforeEach(() => {
|
|
750
|
+
if (originalFetch) {
|
|
751
|
+
global.fetch = originalFetch
|
|
752
|
+
}
|
|
753
|
+
})
|
|
754
|
+
|
|
755
|
+
afterEach(() => {
|
|
756
|
+
if (originalFetch) {
|
|
757
|
+
global.fetch = originalFetch
|
|
758
|
+
}
|
|
759
|
+
})
|
|
760
|
+
|
|
761
|
+
it('should pass stop parameter to provider', async () => {
|
|
762
|
+
let requestBody = null
|
|
763
|
+
|
|
764
|
+
global.fetch = (url, options) => {
|
|
765
|
+
requestBody = JSON.parse(options.body)
|
|
766
|
+
return mockFetchResponse({
|
|
767
|
+
ok: true,
|
|
768
|
+
status: 200,
|
|
769
|
+
data: {
|
|
770
|
+
choices: [{ message: { content: 'Response' } }],
|
|
771
|
+
usage: { prompt_tokens: 5, completion_tokens: 10 },
|
|
772
|
+
},
|
|
773
|
+
})
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
const ai = createAi()
|
|
777
|
+
await ai.ask({
|
|
778
|
+
model: 'openai/gpt-4o',
|
|
779
|
+
apikey: 'test-key',
|
|
780
|
+
prompt: 'Hello',
|
|
781
|
+
stop: ['END', 'STOP'],
|
|
782
|
+
})
|
|
783
|
+
|
|
784
|
+
assert.ok(requestBody.stop)
|
|
785
|
+
assert.deepStrictEqual(requestBody.stop, ['END', 'STOP'])
|
|
786
|
+
})
|
|
787
|
+
|
|
788
|
+
it('should pass stop as string to provider', async () => {
|
|
789
|
+
let requestBody = null
|
|
790
|
+
|
|
791
|
+
global.fetch = (url, options) => {
|
|
792
|
+
requestBody = JSON.parse(options.body)
|
|
793
|
+
return mockFetchResponse({
|
|
794
|
+
ok: true,
|
|
795
|
+
status: 200,
|
|
796
|
+
data: {
|
|
797
|
+
choices: [{ message: { content: 'Response' } }],
|
|
798
|
+
usage: { prompt_tokens: 5, completion_tokens: 10 },
|
|
799
|
+
},
|
|
800
|
+
})
|
|
801
|
+
}
|
|
802
|
+
|
|
803
|
+
const ai = createAi()
|
|
804
|
+
await ai.ask({
|
|
805
|
+
model: 'openai/gpt-4o',
|
|
806
|
+
apikey: 'test-key',
|
|
807
|
+
prompt: 'Hello',
|
|
808
|
+
stop: 'END',
|
|
809
|
+
})
|
|
810
|
+
|
|
811
|
+
assert.ok(requestBody.stop)
|
|
812
|
+
assert.strictEqual(requestBody.stop, 'END')
|
|
813
|
+
})
|
|
814
|
+
})
|
|
815
|
+
|
|
816
|
+
describe('ProviderError properties', () => {
|
|
817
|
+
it('should have correct properties', () => {
|
|
818
|
+
const error = new ProviderError('Test error', {
|
|
819
|
+
status: 429,
|
|
820
|
+
provider: 'openai',
|
|
821
|
+
model: 'gpt-4o',
|
|
822
|
+
raw: 'Rate limited',
|
|
823
|
+
})
|
|
824
|
+
|
|
825
|
+
assert.strictEqual(error.name, 'ProviderError')
|
|
826
|
+
assert.strictEqual(error.status, 429)
|
|
827
|
+
assert.strictEqual(error.provider, 'openai')
|
|
828
|
+
assert.strictEqual(error.model, 'gpt-4o')
|
|
829
|
+
assert.strictEqual(error.raw, 'Rate limited')
|
|
830
|
+
})
|
|
831
|
+
|
|
832
|
+
it('should support retryAfter property', () => {
|
|
833
|
+
const error = new ProviderError('Rate limited', {
|
|
834
|
+
status: 429,
|
|
835
|
+
provider: 'openai',
|
|
836
|
+
model: 'gpt-4o',
|
|
837
|
+
retryAfter: 5000,
|
|
838
|
+
})
|
|
839
|
+
|
|
840
|
+
assert.strictEqual(error.retryAfter, 5000)
|
|
841
|
+
})
|
|
842
|
+
})
|
|
843
|
+
|
|
844
|
+
describe('InputError properties', () => {
|
|
845
|
+
it('should have correct properties', () => {
|
|
846
|
+
const error = new InputError('Invalid request', {
|
|
847
|
+
status: 400,
|
|
848
|
+
provider: 'openai',
|
|
849
|
+
model: 'gpt-4o',
|
|
850
|
+
raw: 'Bad request',
|
|
851
|
+
})
|
|
852
|
+
|
|
853
|
+
assert.strictEqual(error.name, 'InputError')
|
|
854
|
+
assert.strictEqual(error.status, 400)
|
|
855
|
+
assert.strictEqual(error.provider, 'openai')
|
|
856
|
+
assert.strictEqual(error.model, 'gpt-4o')
|
|
857
|
+
assert.strictEqual(error.raw, 'Bad request')
|
|
858
|
+
})
|
|
859
|
+
})
|