@pwshub/aisdk 0.0.4 → 0.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # @pwshub/aisdk
2
2
 
3
- A thin, unified AI client for OpenAI, Anthropic, Google, DashScope, and DeepSeek with automatic parameter normalization and fallback support.
3
+ A thin, unified AI client for OpenAI, Anthropic, Google, DashScope, DeepSeek, and Mistral with automatic parameter normalization and fallback support.
4
4
 
5
5
  [![npm version](https://badge.fury.io/js/@pwshub%2Faisdk.svg)](https://badge.fury.io/js/@pwshub%2Faisdk)
6
6
  ![CodeQL](https://github.com/pwshub/aisdk/workflows/CodeQL/badge.svg)
@@ -14,6 +14,15 @@ A thin, unified AI client for OpenAI, Anthropic, Google, DashScope, and DeepSeek
14
14
  - **Fallback support**: Chain multiple models with automatic fallback on provider errors
15
15
  - **Token usage tracking**: Detailed token counts and estimated cost per request
16
16
  - **Provider-specific options**: Pass provider-specific parameters when needed
17
+ - **Request timeout**: Configurable timeout per client instance
18
+ - **Request/Response hooks**: `onRequest` and `onResponse` callbacks for observability
19
+ - **Configurable logging**: Custom or silent loggers via `setLogger()`, `getLogger()`, `noopLogger`
20
+ - **Instance-based registry**: Each `createAi()` gets isolated model registry
21
+ - **Custom models at creation**: Load custom models via `createAi({ models: [...] })`
22
+ - **Stop sequences**: Control generation with `stop: string | string[]`
23
+ - **Retry-After support**: `retryAfter` property on `ProviderError` for rate limit handling
24
+ - **API key validation**: Pre-request validation with provider-specific format warnings
25
+ - **Empty prompt validation**: Rejects empty prompts and message content
17
26
 
18
27
  ## Limitations
19
28
 
@@ -45,7 +54,7 @@ const ai = createAi()
45
54
 
46
55
  // Basic usage
47
56
  const result = await ai.ask({
48
- model: 'gpt-4o',
57
+ model: 'openai/gpt-4o',
49
58
  apikey: 'your-api-key-here',
50
59
  prompt: 'What is the capital of Vietnam?',
51
60
  temperature: 0.5,
@@ -63,21 +72,27 @@ Creates an AI client instance.
63
72
 
64
73
  **Options:**
65
74
  - `gatewayUrl` (optional): Override the default API endpoint URL
75
+ - `timeout` (optional): Request timeout in milliseconds (default: 30000)
76
+ - `models` (optional): Custom model registry to load on creation
77
+ - `onRequest` (optional): Hook called before each request with context `{ model, provider, url, headers, body }`
78
+ - `onResponse` (optional): Hook called after each response with context `{ model, provider, url, headers, body, status, data, duration }`
66
79
 
67
80
  **Returns:** An object with:
68
81
  - `ask(params)`: Send a generation request
69
82
  - `listModels()`: Get all available models from the registry
83
+ - `addModels(models)`: Add models to this instance's registry
70
84
 
71
85
  ### `ai.ask(params)`
72
86
 
73
87
  Sends a text generation request.
74
88
 
75
89
  **Parameters:**
76
- - `model` (string, required): Model ID (must exist in models.json)
77
- - `apikey` (string, required): API key for the provider
78
- - `prompt` (string, required): The user message
90
+ - `model` (string, required): Use `provider/name` format (e.g., `anthropic/claude-sonnet-4-6`)
91
+ - `apikey` (string, required): API key for the provider. With ollama local, set to any string.
92
+ - `prompt` (string, required): The user message (or use `messages` array)
79
93
  - `system` (string, optional): Optional system prompt
80
- - `fallbacks` (string[], optional): Ordered list of fallback model IDs
94
+ - `messages` (array, optional): Array of `{ role, content }` objects for multi-turn conversations
95
+ - `fallbacks` (string[], optional): Ordered list of fallback models (same format as `model`)
81
96
  - `providerOptions` (object, optional): Provider-specific options
82
97
  - `temperature` (number, optional): Sampling temperature
83
98
  - `maxTokens` (number, optional): Maximum output tokens
@@ -85,6 +100,8 @@ Sends a text generation request.
85
100
  - `topK` (number, optional): Top-K sampling
86
101
  - `frequencyPenalty` (number, optional): Frequency penalty
87
102
  - `presencePenalty` (number, optional): Presence penalty
103
+ - `stop` (string | string[], optional): Stop sequences to end generation
104
+ - `seed` (number, optional): Random seed for reproducible output
88
105
 
89
106
  **Returns:** Promise resolving to:
90
107
  ```javascript
@@ -115,7 +132,7 @@ import { createAi } from '@pwshub/aisdk'
115
132
  const ai = createAi()
116
133
 
117
134
  const result = await ai.ask({
118
- model: 'gpt-4o',
135
+ model: 'openai/gpt-4o',
119
136
  apikey: process.env.OPENAI_API_KEY,
120
137
  prompt: 'Explain quantum entanglement',
121
138
  temperature: 0.7,
@@ -127,7 +144,7 @@ const result = await ai.ask({
127
144
 
128
145
  ```javascript
129
146
  const result = await ai.ask({
130
- model: 'claude-sonnet-4-6',
147
+ model: 'anthropic/claude-sonnet-4-6',
131
148
  apikey: process.env.ANTHROPIC_API_KEY,
132
149
  prompt: 'Write a haiku about TypeScript',
133
150
  temperature: 0.5,
@@ -138,7 +155,7 @@ const result = await ai.ask({
138
155
 
139
156
  ```javascript
140
157
  const result = await ai.ask({
141
- model: 'gemini-2.5-flash',
158
+ model: 'google/gemini-2.5-flash',
142
159
  apikey: process.env.GOOGLE_API_KEY,
143
160
  prompt: 'What is 2+2?',
144
161
  providerOptions: {
@@ -155,7 +172,7 @@ Gemini 2.5 Pro and other reasoning models use thinking tokens by default. Disabl
155
172
 
156
173
  ```javascript
157
174
  const result = await ai.ask({
158
- model: 'gemini-2.5-pro',
175
+ model: 'google/gemini-2.5-pro',
159
176
  apikey: process.env.GOOGLE_API_KEY,
160
177
  prompt: 'What is the capital of Vietnam?',
161
178
  maxTokens: 256,
@@ -175,10 +192,10 @@ const result = await ai.ask({
175
192
  ```javascript
176
193
  try {
177
194
  const result = await ai.ask({
178
- model: 'gpt-4o',
195
+ model: 'openai/gpt-4o',
179
196
  apikey: process.env.OPENAI_API_KEY,
180
197
  prompt: 'Hello',
181
- fallbacks: ['gpt-4o-mini', 'claude-haiku-4-5'],
198
+ fallbacks: ['openai/gpt-4o-mini', 'anthropic/claude-haiku-4-5'],
182
199
  })
183
200
 
184
201
  if (result.model !== 'gpt-4o') {
@@ -197,7 +214,7 @@ try {
197
214
 
198
215
  ```javascript
199
216
  const result = await ai.ask({
200
- model: 'qwen3.5-plus',
217
+ model: 'dashscope/qwen3.5-plus',
201
218
  apikey: process.env.DASHSCOPE_API_KEY,
202
219
  prompt: 'Hello',
203
220
  })
@@ -227,7 +244,7 @@ const aiCN = createAi({
227
244
 
228
245
  // Use the regional client
229
246
  const result = await aiSingapore.ask({
230
- model: 'qwen3.5-plus',
247
+ model: 'dashscope/qwen3.5-plus',
231
248
  apikey: process.env.DASHSCOPE_API_KEY,
232
249
  prompt: 'Hello from Singapore!',
233
250
  })
@@ -237,74 +254,291 @@ const result = await aiSingapore.ask({
237
254
 
238
255
  ```javascript
239
256
  const result = await ai.ask({
240
- model: 'deepseek-chat',
257
+ model: 'deepseek/deepseek-chat',
241
258
  apikey: process.env.DEEPSEEK_API_KEY,
242
259
  prompt: 'Hello',
243
260
  })
244
261
  ```
245
262
 
263
+ ### Mistral
264
+
265
+ ```javascript
266
+ const result = await ai.ask({
267
+ model: 'mistral/mistral-large-latest',
268
+ apikey: process.env.MISTRAL_API_KEY,
269
+ prompt: 'Hello',
270
+ temperature: 0.7,
271
+ })
272
+ ```
273
+
274
+ ### Mistral with Random Seed
275
+
276
+ For reproducible results, use `randomSeed`:
277
+
278
+ ```javascript
279
+ const result = await ai.ask({
280
+ model: 'mistral/mistral-medium-latest',
281
+ apikey: process.env.MISTRAL_API_KEY,
282
+ prompt: 'Write a poem',
283
+ randomSeed: 42,
284
+ })
285
+ ```
286
+
287
+ ### With Stop Sequences
288
+
289
+ Control where generation stops using `stop` parameter:
290
+
291
+ ```javascript
292
+ // Single stop sequence
293
+ const result = await ai.ask({
294
+ model: 'openai/gpt-4o',
295
+ apikey: process.env.OPENAI_API_KEY,
296
+ prompt: 'Complete this sentence: The quick brown fox',
297
+ stop: '.', // Stop at first period
298
+ })
299
+
300
+ // Multiple stop sequences
301
+ const result = await ai.ask({
302
+ model: 'anthropic/claude-sonnet-4-6',
303
+ apikey: process.env.ANTHROPIC_API_KEY,
304
+ prompt: 'Write a story',
305
+ stop: ['\n\n', 'THE END'], // Stop at double newline or "THE END"
306
+ })
307
+ ```
308
+
309
+ ### With Request Timeout
310
+
311
+ Set a custom timeout for requests:
312
+
313
+ ```javascript
314
+ import { createAi } from '@pwshub/aisdk'
315
+
316
+ const ai = createAi({
317
+ timeout: 5000, // 5 second timeout
318
+ })
319
+
320
+ try {
321
+ const result = await ai.ask({
322
+ model: 'openai/gpt-4o',
323
+ apikey: process.env.OPENAI_API_KEY,
324
+ prompt: 'Hello',
325
+ })
326
+ } catch (error) {
327
+ if (error.message.includes('timeout')) {
328
+ console.error('Request timed out after 5 seconds')
329
+ }
330
+ }
331
+ ```
332
+
333
+ ### With Request/Response Hooks
334
+
335
+ Add observability with hooks:
336
+
337
+ ```javascript
338
+ import { createAi } from '@pwshub/aisdk'
339
+
340
+ const ai = createAi({
341
+ onRequest: (context) => {
342
+ console.log(`Sending request to ${context.provider}/${context.model}`)
343
+ console.log(`URL: ${context.url}`)
344
+ // context.headers and context.body are also available
345
+ },
346
+ onResponse: (context) => {
347
+ console.log(`Response from ${context.provider}/${context.model}`)
348
+ console.log(`Status: ${context.status}, Duration: ${context.duration}ms`)
349
+ // context.data contains the raw response
350
+ },
351
+ })
352
+
353
+ const result = await ai.ask({
354
+ model: 'openai/gpt-4o',
355
+ apikey: process.env.OPENAI_API_KEY,
356
+ prompt: 'Hello',
357
+ })
358
+ ```
359
+
360
+ ### Custom Logger
361
+
362
+ Configure logging behavior:
363
+
364
+ ```javascript
365
+ import { createAi, setLogger, noopLogger } from '@pwshub/aisdk'
366
+
367
+ // Use a custom logger
368
+ setLogger({
369
+ warn: (msg) => myLogger.warning(msg),
370
+ error: (msg) => myLogger.error(msg),
371
+ debug: (msg) => myLogger.debug(msg),
372
+ })
373
+
374
+ // Or silence all logging (production)
375
+ setLogger(noopLogger)
376
+
377
+ // Get current logger
378
+ const logger = getLogger()
379
+
380
+ const ai = createAi()
381
+ ```
382
+
383
+ ### Instance-Based Registry
384
+
385
+ Each `createAi()` instance has its own isolated model registry:
386
+
387
+ ```javascript
388
+ import { createAi, addModels } from '@pwshub/aisdk'
389
+
390
+ // Create two independent instances
391
+ const ai1 = createAi()
392
+ const ai2 = createAi()
393
+
394
+ // Add models to ai1 only
395
+ ai1.addModels([
396
+ { name: 'llama3.2', provider: 'ollama' },
397
+ ])
398
+
399
+ // ai1 has the custom model
400
+ console.log(ai1.listModels().length) // includes llama3.2
401
+
402
+ // ai2 doesn't have it (isolated registry)
403
+ console.log(ai2.listModels().length) // default models only
404
+ ```
405
+
406
+ ### Custom Models at Creation
407
+
408
+ Load custom models when creating the AI client:
409
+
410
+ ```javascript
411
+ import { createAi } from '@pwshub/aisdk'
412
+
413
+ const customModels = [
414
+ { name: 'llama3.2', provider: 'ollama' },
415
+ { name: 'mistral', provider: 'ollama' },
416
+ {
417
+ name: 'gpt-4o-custom',
418
+ provider: 'openai',
419
+ input_price: 0.5,
420
+ output_price: 1.5,
421
+ },
422
+ ]
423
+
424
+ const ai = createAi({
425
+ models: customModels,
426
+ })
427
+
428
+ // This instance only has the custom models
429
+ console.log(ai.listModels())
430
+ ```
431
+
246
432
  ## Supported Models
247
433
 
248
- The library comes with **30 pre-configured models** from all supported providers:
434
+ The library comes with just a few popular models configured in src/models.js
249
435
 
250
- - **OpenAI**: gpt-4.1-nano, gpt-4.1-mini, gpt-4.1, gpt-4o, gpt-4o-mini, gpt-5, gpt-5-mini, gpt-5-nano, gpt-5.1, gpt-5.2, gpt-5.4, o3-mini, o4-mini
251
- - **Anthropic**: claude-haiku-4-5, claude-sonnet-4-6, claude-sonnet-4-5, claude-opus-4-6
252
- - **Google**: gemini-2.5-flash, gemini-2.5-flash-lite, gemini-2.5-pro, gemini-3.1-pro-preview, gemini-3.1-flash-lite-preview
253
- - **DashScope**: qwen-flash, qwen3.5-flash, qwen-plus, qwen3.5-plus, qwen-max, qwen3-max
254
- - **DeepSeek**: deepseek-chat, deepseek-reasoner
436
+ ## Model Management
255
437
 
256
- ### Managing Models
438
+ Models are automatically loaded from the built-in registry when the library is imported. You can add custom models or replace the entire list with your own (e.g., from a CMS).
257
439
 
258
- Models are managed via `addModels()` and `setModels()`:
440
+ ### Adding Custom Models
441
+
442
+ Use `addModels()` to add models to the existing registry. Only `name` and `provider` are required — other fields get sensible defaults:
259
443
 
260
444
  ```javascript
261
- import { createAi, addModels, setModels, listModels } from '@pwshub/aisdk'
445
+ import { createAi, addModels, listModels } from '@pwshub/aisdk'
262
446
 
263
- // List all available models (30 models loaded by default)
264
- console.log(listModels())
447
+ // Add minimal model records (auto-generates ID and sets defaults)
448
+ addModels([
449
+ { name: 'llama3.2', provider: 'ollama' },
450
+ { name: 'mistral', provider: 'ollama' },
451
+ { name: 'gemma3', provider: 'ollama' },
452
+ ])
265
453
 
266
- // Add more models to the existing list
454
+ // Add models with custom pricing
267
455
  addModels([
268
456
  {
269
- id: 'my-custom-model',
270
457
  name: 'my-custom-model',
271
458
  provider: 'openai',
272
- input_price: 1,
273
- output_price: 2,
274
- cache_price: 0.5,
459
+ input_price: 0.5,
460
+ output_price: 1.5,
275
461
  max_in: 128000,
276
462
  max_out: 16384,
277
- enable: true,
278
463
  },
279
464
  ])
280
465
 
281
- // Replace all models with your own list (e.g., from CMS)
466
+ // View all available models
467
+ console.log(listModels())
468
+ ```
469
+
470
+ **Default values for missing fields:**
471
+ - `id`: Auto-generated as `${provider}_${name}` (e.g., `ollama_llama3.2`)
472
+ - `input_price`, `output_price`, `cache_price`: `0`
473
+ - `max_in`: `32000`
474
+ - `max_out`: `8000`
475
+ - `enable`: `true`
476
+
477
+ ### Loading Models from CMS
478
+
479
+ Use `setModels()` to replace the entire registry with models from your CMS:
480
+
481
+ ```javascript
482
+ import { createAi, setModels } from '@pwshub/aisdk'
483
+
484
+ // Fetch models from your CMS
282
485
  const modelsFromCms = await fetch('https://cms.example.com/api/models').then(r => r.json())
486
+
487
+ // Expected format from CMS:
488
+ // [
489
+ // { id: 'uuid-123', name: 'llama3.2', provider: 'ollama', ... },
490
+ // { id: 'uuid-456', name: 'mistral', provider: 'ollama', ... }
491
+ // ]
492
+
283
493
  setModels(modelsFromCms)
284
494
 
285
495
  const ai = createAi()
286
- const result = await ai.ask({
287
- model: 'gemini-2.5-flash',
288
- apikey: 'your-api-key',
289
- prompt: 'Hello!',
290
- })
291
496
  ```
292
497
 
293
- > **Note:** Models are loaded automatically from `src/models.js` when the library is imported. You don't need to call `setModels()` unless you want to use a custom model list.
498
+ > **Note:** Model `id` can be any unique string (UUID, slug, etc.). The library uses it for internal tracking. When using models from CMS, you reference them by `provider/name` format (see below).
499
+
500
+ ### Using Models
501
+
502
+ Models MUST be referenced in `provider/name` format:
503
+
504
+ ```javascript
505
+ const ai = createAi()
506
+
507
+ // Correct: provider/name format
508
+ await ai.ask({
509
+ model: 'openai/gpt-4o',
510
+ apikey: process.env.OPENAI_API_KEY,
511
+ prompt: 'Hello',
512
+ })
513
+
514
+ // Correct: works for all providers
515
+ await ai.ask({
516
+ model: 'ollama/llama3.2',
517
+ apikey: '',
518
+ prompt: 'Hello',
519
+ })
520
+
521
+ await ai.ask({
522
+ model: 'anthropic/claude-sonnet-4-6',
523
+ apikey: process.env.ANTHROPIC_API_KEY,
524
+ prompt: 'Hello',
525
+ })
526
+ ```
294
527
 
295
528
  ### Model Record Format
296
529
 
297
- Each model record should include:
298
- - `id`: Model identifier used in requests
299
- - `name`: Official model name (used in API calls)
300
- - `provider`: Provider ID (openai, anthropic, google, dashscope, deepseek)
301
- - `input_price`: Price per 1M input tokens (USD)
302
- - `output_price`: Price per 1M output tokens (USD)
303
- - `cache_price`: Price per 1M cached tokens (USD)
304
- - `max_in`: Maximum input tokens (context window)
305
- - `max_out`: Maximum output tokens
306
- - `enable`: Boolean to enable/disable the model
307
- - `supportedParams` (optional): Array of supported parameter names
530
+ | Field | Required | Default | Description |
531
+ |-------|----------|---------|-------------|
532
+ | `name` | Yes | - | Model name used in API calls |
533
+ | `provider` | Yes | - | Provider ID (openai, anthropic, google, dashscope, deepseek, mistral, ollama) |
534
+ | `id` | No | `${provider}_${name}` | Unique identifier (auto-generated if not provided) |
535
+ | `input_price` | No | `0` | Price per 1M input tokens (USD) |
536
+ | `output_price` | No | `0` | Price per 1M output tokens (USD) |
537
+ | `cache_price` | No | `0` | Price per 1M cached tokens (USD) |
538
+ | `max_in` | No | `32000` | Maximum input tokens (context window) |
539
+ | `max_out` | No | `8000` | Maximum output tokens |
540
+ | `enable` | No | `true` | Enable/disable the model |
541
+ | `supportedParams` | No | Provider defaults | Array of supported parameter names |
308
542
 
309
543
  ## Error Handling
310
544
 
@@ -315,7 +549,7 @@ const ai = createAi()
315
549
 
316
550
  try {
317
551
  const result = await ai.ask({
318
- model: 'gpt-4o',
552
+ model: 'openai/gpt-4o',
319
553
  apikey: process.env.OPENAI_API_KEY,
320
554
  prompt: 'Hello',
321
555
  })
@@ -324,6 +558,11 @@ try {
324
558
  // Provider-side error (rate limit, server error)
325
559
  // Safe to retry or fallback to another model
326
560
  console.error('Provider error:', error.status, error.message)
561
+
562
+ // For rate limits (429), check retryAfter for recommended wait time
563
+ if (error.retryAfter) {
564
+ console.log(`Retry after ${error.retryAfter} seconds`)
565
+ }
327
566
  } else if (error instanceof InputError) {
328
567
  // Client-side error (bad request, invalid API key)
329
568
  // Do NOT retry — fix the input
@@ -332,6 +571,19 @@ try {
332
571
  }
333
572
  ```
334
573
 
574
+ **ProviderError properties:**
575
+ - `status`: HTTP status code (429, 5xx, etc.)
576
+ - `provider`: Provider ID (e.g., 'openai', 'anthropic')
577
+ - `model`: Model identifier that failed
578
+ - `raw`: Raw response data from provider
579
+ - `retryAfter`: Seconds to wait before retrying (only for 429 responses with Retry-After header)
580
+
581
+ **InputError properties:**
582
+ - `status`: HTTP status code (400, 401, 403, 422)
583
+ - `provider`: Provider ID
584
+ - `model`: Model identifier
585
+ - `raw`: Raw response data from provider
586
+
335
587
  ## Running Evaluation Scripts
336
588
 
337
589
  The package includes evaluation scripts to test each provider:
@@ -351,6 +603,9 @@ DASHSCOPE_API_KEY=your-key npm run eval:dashscope
351
603
 
352
604
  # DeepSeek
353
605
  DEEPSEEK_API_KEY=your-key npm run eval:deepseek
606
+
607
+ # Mistral
608
+ MISTRAL_API_KEY=your-key npm run eval:mistral
354
609
  ```
355
610
 
356
611
  ## Development
@@ -379,4 +634,4 @@ npm run lint:fix
379
634
 
380
635
  ## License
381
636
 
382
- MIT
637
+ The MIT License (MIT)
package/index.d.ts CHANGED
@@ -4,13 +4,42 @@
4
4
 
5
5
  export interface AiOptions {
6
6
  gatewayUrl?: string;
7
+ timeout?: number;
8
+ models?: ModelRecord[];
9
+ onRequest?: (context: HookContext) => void | Promise<void>;
10
+ onResponse?: (context: ResponseHookContext) => void | Promise<void>;
11
+ }
12
+
13
+ export interface HookContext {
14
+ model: string;
15
+ provider: string;
16
+ url: string;
17
+ headers: Record<string, string>;
18
+ body: Record<string, unknown>;
19
+ }
20
+
21
+ export interface ResponseHookContext {
22
+ model: string;
23
+ provider: string;
24
+ url: string;
25
+ headers: Record<string, string>;
26
+ body: Record<string, unknown>;
27
+ status: number;
28
+ data: unknown;
29
+ duration: number;
30
+ }
31
+
32
+ export interface Message {
33
+ role: 'user' | 'assistant' | 'system';
34
+ content: string;
7
35
  }
8
36
 
9
37
  export interface AskParams {
10
38
  model: string;
11
39
  apikey: string;
12
- prompt: string;
40
+ prompt?: string;
13
41
  system?: string;
42
+ messages?: Message[];
14
43
  fallbacks?: string[];
15
44
  providerOptions?: Record<string, unknown>;
16
45
  temperature?: number;
@@ -19,6 +48,10 @@ export interface AskParams {
19
48
  topK?: number;
20
49
  frequencyPenalty?: number;
21
50
  presencePenalty?: number;
51
+ randomSeed?: number;
52
+ seed?: number;
53
+ numPredict?: number;
54
+ stop?: string | string[];
22
55
  }
23
56
 
24
57
  export interface Usage {
@@ -36,16 +69,23 @@ export interface AskResult {
36
69
  }
37
70
 
38
71
  export interface ModelRecord {
39
- id: string;
72
+ id?: string;
40
73
  name: string;
41
74
  provider: string;
42
- input_price: number;
43
- output_price: number;
44
- cache_price: number;
45
- max_in: number;
46
- max_out: number;
47
- enable: boolean;
75
+ input_price?: number;
76
+ output_price?: number;
77
+ cache_price?: number;
78
+ max_in?: number;
79
+ max_out?: number;
80
+ enable?: boolean;
48
81
  supportedParams?: string[];
82
+ paramOverrides?: Record<string, ParamOverride>;
83
+ }
84
+
85
+ export interface ParamOverride {
86
+ fixedValue?: number;
87
+ supportedValues?: number[];
88
+ range?: { min: number; max: number };
49
89
  }
50
90
 
51
91
  export class ProviderError extends Error {
@@ -53,7 +93,8 @@ export class ProviderError extends Error {
53
93
  provider: string;
54
94
  model: string;
55
95
  raw?: unknown;
56
- constructor(message: string, options: { status: number; provider: string; model: string; raw?: unknown });
96
+ retryAfter?: number;
97
+ constructor(message: string, options: { status: number; provider: string; model: string; raw?: unknown; retryAfter?: number });
57
98
  }
58
99
 
59
100
  export class InputError extends Error {
@@ -64,12 +105,22 @@ export class InputError extends Error {
64
105
  constructor(message: string, options: { status: number; provider: string; model: string; raw?: unknown });
65
106
  }
66
107
 
108
+ export interface Logger {
109
+ warn: (message: string) => void;
110
+ error: (message: string) => void;
111
+ debug: (message: string) => void;
112
+ }
113
+
67
114
  export interface AiClient {
68
115
  ask: (params: AskParams) => Promise<AskResult>;
69
116
  listModels: () => ModelRecord[];
117
+ addModels: (models: ModelRecord[]) => void;
70
118
  }
71
119
 
72
120
  export function createAi(opts?: AiOptions): AiClient;
73
121
  export function addModels(models: ModelRecord[]): void;
74
122
  export function setModels(models: ModelRecord[]): void;
75
123
  export function listModels(): ModelRecord[];
124
+ export function setLogger(logger: Logger): void;
125
+ export function getLogger(): Logger;
126
+ export const noopLogger: Logger;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@pwshub/aisdk",
3
- "version": "0.0.4",
4
- "description": "A thin, unified AI client for OpenAI, Anthropic, Google, DashScope, and DeepSeek with automatic param normalization and fallback support",
3
+ "version": "0.0.6",
4
+ "description": "A thin, unified AI client for OpenAI, Anthropic, Google, DashScope, DeepSeek, and Mistral with automatic param normalization, fallback support, hooks, and timeout",
5
5
  "repository": {
6
6
  "type": "git",
7
7
  "url": "https://github.com/pwshub/aisdk"
@@ -22,14 +22,15 @@
22
22
  "index.d.ts"
23
23
  ],
24
24
  "scripts": {
25
- "test": "node --test test/*.test.js",
26
- "lint": "eslint src/ test/",
27
- "lint:fix": "eslint src/ test/ --fix",
25
+ "test": "node --test src/*.test.js",
26
+ "lint": "eslint src/",
27
+ "lint:fix": "eslint src/ --fix",
28
28
  "eval:openai": "node examples/openai.js",
29
29
  "eval:anthropic": "node examples/anthropic.js",
30
30
  "eval:google": "node examples/google.js",
31
31
  "eval:dashscope": "node examples/dashscope.js",
32
- "eval:deepseek": "node examples/deepseek.js"
32
+ "eval:deepseek": "node examples/deepseek.js",
33
+ "eval:mistral": "node examples/mistral.js"
33
34
  },
34
35
  "devDependencies": {
35
36
  "@eslint/js": "^10.0.1",
@@ -47,6 +48,7 @@
47
48
  "gpt",
48
49
  "qwen",
49
50
  "deepseek",
51
+ "mistral",
50
52
  "chat",
51
53
  "generation",
52
54
  "sdk"