@pwshub/aisdk 0.0.5 → 0.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -14,6 +14,15 @@ A thin, unified AI client for OpenAI, Anthropic, Google, DashScope, DeepSeek, an
14
14
  - **Fallback support**: Chain multiple models with automatic fallback on provider errors
15
15
  - **Token usage tracking**: Detailed token counts and estimated cost per request
16
16
  - **Provider-specific options**: Pass provider-specific parameters when needed
17
+ - **Request timeout**: Configurable timeout per client instance
18
+ - **Request/Response hooks**: `onRequest` and `onResponse` callbacks for observability
19
+ - **Configurable logging**: Custom or silent loggers via `setLogger()`, `getLogger()`, `noopLogger`
20
+ - **Instance-based registry**: Each `createAi()` gets isolated model registry
21
+ - **Custom models at creation**: Load custom models via `createAi({ models: [...] })`
22
+ - **Stop sequences**: Control generation with `stop: string | string[]`
23
+ - **Retry-After support**: `retryAfter` property on `ProviderError` for rate limit handling
24
+ - **API key validation**: Pre-request validation with provider-specific format warnings
25
+ - **Empty prompt validation**: Rejects empty prompts and message content
17
26
 
18
27
  ## Limitations
19
28
 
@@ -63,10 +72,15 @@ Creates an AI client instance.
63
72
 
64
73
  **Options:**
65
74
  - `gatewayUrl` (optional): Override the default API endpoint URL
75
+ - `timeout` (optional): Request timeout in milliseconds (default: 30000)
76
+ - `models` (optional): Custom model registry to load on creation
77
+ - `onRequest` (optional): Hook called before each request with context `{ model, provider, url, headers, body }`
78
+ - `onResponse` (optional): Hook called after each response with context `{ model, provider, url, headers, body, status, data, duration }`
66
79
 
67
80
  **Returns:** An object with:
68
81
  - `ask(params)`: Send a generation request
69
82
  - `listModels()`: Get all available models from the registry
83
+ - `addModels(models)`: Add models to this instance's registry
70
84
 
71
85
  ### `ai.ask(params)`
72
86
 
@@ -75,8 +89,9 @@ Sends a text generation request.
75
89
  **Parameters:**
76
90
  - `model` (string, required): Use `provider/name` format (e.g., `anthropic/claude-sonnet-4-6`)
77
91
  - `apikey` (string, required): API key for the provider. With ollama local, set to any string.
78
- - `prompt` (string, required): The user message
92
+ - `prompt` (string, required): The user message (or use `messages` array)
79
93
  - `system` (string, optional): Optional system prompt
94
+ - `messages` (array, optional): Array of `{ role, content }` objects for multi-turn conversations
80
95
  - `fallbacks` (string[], optional): Ordered list of fallback models (same format as `model`)
81
96
  - `providerOptions` (object, optional): Provider-specific options
82
97
  - `temperature` (number, optional): Sampling temperature
@@ -85,6 +100,8 @@ Sends a text generation request.
85
100
  - `topK` (number, optional): Top-K sampling
86
101
  - `frequencyPenalty` (number, optional): Frequency penalty
87
102
  - `presencePenalty` (number, optional): Presence penalty
103
+ - `stop` (string | string[], optional): Stop sequences to end generation
104
+ - `seed` (number, optional): Random seed for reproducible output
88
105
 
89
106
  **Returns:** Promise resolving to:
90
107
  ```javascript
@@ -267,6 +284,151 @@ const result = await ai.ask({
267
284
  })
268
285
  ```
269
286
 
287
+ ### With Stop Sequences
288
+
289
+ Control where generation stops using `stop` parameter:
290
+
291
+ ```javascript
292
+ // Single stop sequence
293
+ const result = await ai.ask({
294
+ model: 'openai/gpt-4o',
295
+ apikey: process.env.OPENAI_API_KEY,
296
+ prompt: 'Complete this sentence: The quick brown fox',
297
+ stop: '.', // Stop at first period
298
+ })
299
+
300
+ // Multiple stop sequences
301
+ const result = await ai.ask({
302
+ model: 'anthropic/claude-sonnet-4-6',
303
+ apikey: process.env.ANTHROPIC_API_KEY,
304
+ prompt: 'Write a story',
305
+ stop: ['\n\n', 'THE END'], // Stop at double newline or "THE END"
306
+ })
307
+ ```
308
+
309
+ ### With Request Timeout
310
+
311
+ Set a custom timeout for requests:
312
+
313
+ ```javascript
314
+ import { createAi } from '@pwshub/aisdk'
315
+
316
+ const ai = createAi({
317
+ timeout: 5000, // 5 second timeout
318
+ })
319
+
320
+ try {
321
+ const result = await ai.ask({
322
+ model: 'openai/gpt-4o',
323
+ apikey: process.env.OPENAI_API_KEY,
324
+ prompt: 'Hello',
325
+ })
326
+ } catch (error) {
327
+ if (error.message.includes('timeout')) {
328
+ console.error('Request timed out after 5 seconds')
329
+ }
330
+ }
331
+ ```
332
+
333
+ ### With Request/Response Hooks
334
+
335
+ Add observability with hooks:
336
+
337
+ ```javascript
338
+ import { createAi } from '@pwshub/aisdk'
339
+
340
+ const ai = createAi({
341
+ onRequest: (context) => {
342
+ console.log(`Sending request to ${context.provider}/${context.model}`)
343
+ console.log(`URL: ${context.url}`)
344
+ // context.headers and context.body are also available
345
+ },
346
+ onResponse: (context) => {
347
+ console.log(`Response from ${context.provider}/${context.model}`)
348
+ console.log(`Status: ${context.status}, Duration: ${context.duration}ms`)
349
+ // context.data contains the raw response
350
+ },
351
+ })
352
+
353
+ const result = await ai.ask({
354
+ model: 'openai/gpt-4o',
355
+ apikey: process.env.OPENAI_API_KEY,
356
+ prompt: 'Hello',
357
+ })
358
+ ```
359
+
360
+ ### Custom Logger
361
+
362
+ Configure logging behavior:
363
+
364
+ ```javascript
365
+ import { createAi, setLogger, noopLogger } from '@pwshub/aisdk'
366
+
367
+ // Use a custom logger
368
+ setLogger({
369
+ warn: (msg) => myLogger.warning(msg),
370
+ error: (msg) => myLogger.error(msg),
371
+ debug: (msg) => myLogger.debug(msg),
372
+ })
373
+
374
+ // Or silence all logging (production)
375
+ setLogger(noopLogger)
376
+
377
+ // Get current logger
378
+ const logger = getLogger()
379
+
380
+ const ai = createAi()
381
+ ```
382
+
383
+ ### Instance-Based Registry
384
+
385
+ Each `createAi()` instance has its own isolated model registry:
386
+
387
+ ```javascript
388
+ import { createAi, addModels } from '@pwshub/aisdk'
389
+
390
+ // Create two independent instances
391
+ const ai1 = createAi()
392
+ const ai2 = createAi()
393
+
394
+ // Add models to ai1 only
395
+ ai1.addModels([
396
+ { name: 'llama3.2', provider: 'ollama' },
397
+ ])
398
+
399
+ // ai1 has the custom model
400
+ console.log(ai1.listModels().length) // includes llama3.2
401
+
402
+ // ai2 doesn't have it (isolated registry)
403
+ console.log(ai2.listModels().length) // default models only
404
+ ```
405
+
406
+ ### Custom Models at Creation
407
+
408
+ Load custom models when creating the AI client:
409
+
410
+ ```javascript
411
+ import { createAi } from '@pwshub/aisdk'
412
+
413
+ const customModels = [
414
+ { name: 'llama3.2', provider: 'ollama' },
415
+ { name: 'mistral', provider: 'ollama' },
416
+ {
417
+ name: 'gpt-4o-custom',
418
+ provider: 'openai',
419
+ input_price: 0.5,
420
+ output_price: 1.5,
421
+ },
422
+ ]
423
+
424
+ const ai = createAi({
425
+ models: customModels,
426
+ })
427
+
428
+ // This instance only has the custom models
429
+ console.log(ai.listModels())
430
+ ```
431
+
270
432
  ## Supported Models
271
433
 
272
434
  The library comes with just a few popular models configured in src/models.js
@@ -396,6 +558,11 @@ try {
396
558
  // Provider-side error (rate limit, server error)
397
559
  // Safe to retry or fallback to another model
398
560
  console.error('Provider error:', error.status, error.message)
561
+
562
+ // For rate limits (429), check retryAfter for recommended wait time
563
+ if (error.retryAfter) {
564
+ console.log(`Retry after ${error.retryAfter} seconds`)
565
+ }
399
566
  } else if (error instanceof InputError) {
400
567
  // Client-side error (bad request, invalid API key)
401
568
  // Do NOT retry — fix the input
@@ -404,6 +571,19 @@ try {
404
571
  }
405
572
  ```
406
573
 
574
+ **ProviderError properties:**
575
+ - `status`: HTTP status code (429, 5xx, etc.)
576
+ - `provider`: Provider ID (e.g., 'openai', 'anthropic')
577
+ - `model`: Model identifier that failed
578
+ - `raw`: Raw response data from provider
579
+ - `retryAfter`: Seconds to wait before retrying (only for 429 responses with Retry-After header)
580
+
581
+ **InputError properties:**
582
+ - `status`: HTTP status code (400, 401, 403, 422)
583
+ - `provider`: Provider ID
584
+ - `model`: Model identifier
585
+ - `raw`: Raw response data from provider
586
+
407
587
  ## Running Evaluation Scripts
408
588
 
409
589
  The package includes evaluation scripts to test each provider:
package/index.d.ts CHANGED
@@ -4,6 +4,29 @@
4
4
 
5
5
  export interface AiOptions {
6
6
  gatewayUrl?: string;
7
+ timeout?: number;
8
+ models?: ModelRecord[];
9
+ onRequest?: (context: HookContext) => void | Promise<void>;
10
+ onResponse?: (context: ResponseHookContext) => void | Promise<void>;
11
+ }
12
+
13
+ export interface HookContext {
14
+ model: string;
15
+ provider: string;
16
+ url: string;
17
+ headers: Record<string, string>;
18
+ body: Record<string, unknown>;
19
+ }
20
+
21
+ export interface ResponseHookContext {
22
+ model: string;
23
+ provider: string;
24
+ url: string;
25
+ headers: Record<string, string>;
26
+ body: Record<string, unknown>;
27
+ status: number;
28
+ data: unknown;
29
+ duration: number;
7
30
  }
8
31
 
9
32
  export interface Message {
@@ -28,6 +51,7 @@ export interface AskParams {
28
51
  randomSeed?: number;
29
52
  seed?: number;
30
53
  numPredict?: number;
54
+ stop?: string | string[];
31
55
  }
32
56
 
33
57
  export interface Usage {
@@ -55,6 +79,13 @@ export interface ModelRecord {
55
79
  max_out?: number;
56
80
  enable?: boolean;
57
81
  supportedParams?: string[];
82
+ paramOverrides?: Record<string, ParamOverride>;
83
+ }
84
+
85
+ export interface ParamOverride {
86
+ fixedValue?: number;
87
+ supportedValues?: number[];
88
+ range?: { min: number; max: number };
58
89
  }
59
90
 
60
91
  export class ProviderError extends Error {
@@ -62,7 +93,8 @@ export class ProviderError extends Error {
62
93
  provider: string;
63
94
  model: string;
64
95
  raw?: unknown;
65
- constructor(message: string, options: { status: number; provider: string; model: string; raw?: unknown });
96
+ retryAfter?: number;
97
+ constructor(message: string, options: { status: number; provider: string; model: string; raw?: unknown; retryAfter?: number });
66
98
  }
67
99
 
68
100
  export class InputError extends Error {
@@ -73,12 +105,22 @@ export class InputError extends Error {
73
105
  constructor(message: string, options: { status: number; provider: string; model: string; raw?: unknown });
74
106
  }
75
107
 
108
+ export interface Logger {
109
+ warn: (message: string) => void;
110
+ error: (message: string) => void;
111
+ debug: (message: string) => void;
112
+ }
113
+
76
114
  export interface AiClient {
77
115
  ask: (params: AskParams) => Promise<AskResult>;
78
116
  listModels: () => ModelRecord[];
117
+ addModels: (models: ModelRecord[]) => void;
79
118
  }
80
119
 
81
120
  export function createAi(opts?: AiOptions): AiClient;
82
121
  export function addModels(models: ModelRecord[]): void;
83
122
  export function setModels(models: ModelRecord[]): void;
84
123
  export function listModels(): ModelRecord[];
124
+ export function setLogger(logger: Logger): void;
125
+ export function getLogger(): Logger;
126
+ export const noopLogger: Logger;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@pwshub/aisdk",
3
- "version": "0.0.5",
4
- "description": "A thin, unified AI client for OpenAI, Anthropic, Google, DashScope, DeepSeek, and Mistral with automatic param normalization and fallback support",
3
+ "version": "0.0.6",
4
+ "description": "A thin, unified AI client for OpenAI, Anthropic, Google, DashScope, DeepSeek, and Mistral with automatic param normalization, fallback support, hooks, and timeout",
5
5
  "repository": {
6
6
  "type": "git",
7
7
  "url": "https://github.com/pwshub/aisdk"
package/src/coerce.js CHANGED
@@ -6,6 +6,7 @@
6
6
  * with a console.warn for visibility.
7
7
  *
8
8
  * Uses the merged WIRE_KEYS config from config.js for range information.
9
+ * Also supports model-specific overrides via the overrides parameter.
9
10
  */
10
11
 
11
12
  import { getWireMap } from './config.js'
@@ -14,27 +15,82 @@ import { getWireMap } from './config.js'
14
15
  * @typedef {import('./registry.js').ProviderId} ProviderId
15
16
  */
16
17
 
18
+ /**
19
+ * @typedef {Object} ParamOverride
20
+ * @property {number} [fixedValue] - Force param to this value
21
+ * @property {number[]} [supportedValues] - Only allow these discrete values
22
+ * @property {{min: number, max: number}} [range] - Override the default range
23
+ */
24
+
25
+ /**
26
+ * @typedef {Object} CoerceOptions
27
+ * @property {string} modelId - Model identifier for override lookup
28
+ * @property {Record<string, ParamOverride>} [overrides] - Model-specific param overrides
29
+ */
30
+
17
31
  const clamp = (value, min, max) => Math.min(Math.max(value, min), max)
18
32
 
19
33
  /**
20
34
  * Coerce config values to provider's acceptable ranges.
21
- * Logs warnings for clamped values.
35
+ * Logs warnings for clamped or dropped values.
22
36
  *
23
37
  * @param {Record<string, unknown>} config
24
38
  * @param {ProviderId} providerId
25
- * @returns {Record<string, unknown>}
39
+ * @param {CoerceOptions} [options]
40
+ * @returns {{ coerced: Record<string, unknown>, dropped: string[] }}
26
41
  */
27
- export const coerceConfig = (config, providerId) => {
42
+ export const coerceConfig = (config, providerId, options = {}) => {
43
+ const { modelId, overrides = {} } = options
28
44
  const wireMap = getWireMap(providerId)
29
45
  if (!wireMap) {
30
- return config
46
+ return { coerced: config, dropped: [] }
31
47
  }
32
48
 
33
49
  const result = { ...config }
50
+ const dropped = []
34
51
 
35
52
  for (const [key, value] of Object.entries(config)) {
36
53
  const descriptor = wireMap[key]
37
- const range = descriptor?.range
54
+ const override = overrides[key]
55
+ if (!descriptor && !override) {
56
+ continue
57
+ }
58
+
59
+ // Merge descriptor and override (override takes precedence)
60
+ const effectiveDescriptor = {
61
+ ...descriptor,
62
+ ...override,
63
+ range: override?.range || descriptor?.range,
64
+ }
65
+
66
+ // Handle fixedValue: force param to specific value
67
+ if (effectiveDescriptor.fixedValue !== undefined && typeof value === 'number') {
68
+ if (value !== effectiveDescriptor.fixedValue) {
69
+ console.warn(
70
+ `[ai-client] "${key}" value ${value} not supported by model "${modelId}", forced to ${effectiveDescriptor.fixedValue}`
71
+ )
72
+ result[key] = effectiveDescriptor.fixedValue
73
+ }
74
+ continue
75
+ }
76
+
77
+ // Handle supportedValues: only allow discrete values
78
+ if (effectiveDescriptor.supportedValues?.length && typeof value === 'number') {
79
+ if (!effectiveDescriptor.supportedValues.includes(value)) {
80
+ // Clamp to nearest supported value
81
+ const nearest = effectiveDescriptor.supportedValues.reduce((prev, curr) =>
82
+ Math.abs(curr - value) < Math.abs(prev - value) ? curr : prev
83
+ )
84
+ console.warn(
85
+ `[ai-client] "${key}" value ${value} not supported by model "${modelId}", clamped to nearest allowed ${nearest}`
86
+ )
87
+ result[key] = nearest
88
+ }
89
+ continue
90
+ }
91
+
92
+ // Handle range clamping (existing behavior)
93
+ const range = effectiveDescriptor.range
38
94
  if (!range || typeof value !== 'number') {
39
95
  continue
40
96
  }
@@ -48,5 +104,21 @@ export const coerceConfig = (config, providerId) => {
48
104
  }
49
105
  }
50
106
 
107
+ return { coerced: result, dropped }
108
+ }
109
+
110
+ /**
111
+ * Drop params that are not supported by a model.
112
+ * Used when provider returns an "unsupported_value" error.
113
+ *
114
+ * @param {Record<string, unknown>} config
115
+ * @param {string[]} paramsToDrop
116
+ * @returns {Record<string, unknown>}
117
+ */
118
+ export const dropParams = (config, paramsToDrop) => {
119
+ const result = { ...config }
120
+ for (const param of paramsToDrop) {
121
+ delete result[param]
122
+ }
51
123
  return result
52
124
  }
@@ -12,119 +12,119 @@ describe('coerceConfig', () => {
12
12
  describe('openai', () => {
13
13
  it('should clamp temperature to valid range [0, 2]', () => {
14
14
  const config = { temperature: 3 }
15
- const result = coerceConfig(config, 'openai')
16
- assert.strictEqual(result.temperature, 2)
15
+ const { coerced } = coerceConfig(config, 'openai')
16
+ assert.strictEqual(coerced.temperature, 2)
17
17
  })
18
18
 
19
19
  it('should clamp temperature below range', () => {
20
20
  const config = { temperature: -1 }
21
- const result = coerceConfig(config, 'openai')
22
- assert.strictEqual(result.temperature, 0)
21
+ const { coerced } = coerceConfig(config, 'openai')
22
+ assert.strictEqual(coerced.temperature, 0)
23
23
  })
24
24
 
25
25
  it('should not clamp temperature within range', () => {
26
26
  const config = { temperature: 1 }
27
- const result = coerceConfig(config, 'openai')
28
- assert.strictEqual(result.temperature, 1)
27
+ const { coerced } = coerceConfig(config, 'openai')
28
+ assert.strictEqual(coerced.temperature, 1)
29
29
  })
30
30
 
31
31
  it('should clamp topP to valid range [0, 1]', () => {
32
32
  const config = { topP: 1.5 }
33
- const result = coerceConfig(config, 'openai')
34
- assert.strictEqual(result.topP, 1)
33
+ const { coerced } = coerceConfig(config, 'openai')
34
+ assert.strictEqual(coerced.topP, 1)
35
35
  })
36
36
 
37
37
  it('should clamp frequencyPenalty to valid range [-2, 2]', () => {
38
38
  const config = { frequencyPenalty: 3 }
39
- const result = coerceConfig(config, 'openai')
40
- assert.strictEqual(result.frequencyPenalty, 2)
39
+ const { coerced } = coerceConfig(config, 'openai')
40
+ assert.strictEqual(coerced.frequencyPenalty, 2)
41
41
  })
42
42
 
43
43
  it('should clamp presencePenalty to valid range [-2, 2]', () => {
44
44
  const config = { presencePenalty: -3 }
45
- const result = coerceConfig(config, 'openai')
46
- assert.strictEqual(result.presencePenalty, -2)
45
+ const { coerced } = coerceConfig(config, 'openai')
46
+ assert.strictEqual(coerced.presencePenalty, -2)
47
47
  })
48
48
  })
49
49
 
50
50
  describe('anthropic', () => {
51
51
  it('should clamp temperature to valid range [0, 1]', () => {
52
52
  const config = { temperature: 1.5 }
53
- const result = coerceConfig(config, 'anthropic')
54
- assert.strictEqual(result.temperature, 1)
53
+ const { coerced } = coerceConfig(config, 'anthropic')
54
+ assert.strictEqual(coerced.temperature, 1)
55
55
  })
56
56
 
57
57
  it('should clamp topK to valid range [1, 100]', () => {
58
58
  const config = { topK: 150 }
59
- const result = coerceConfig(config, 'anthropic')
60
- assert.strictEqual(result.topK, 100)
59
+ const { coerced } = coerceConfig(config, 'anthropic')
60
+ assert.strictEqual(coerced.topK, 100)
61
61
  })
62
62
 
63
63
  it('should clamp topK below range', () => {
64
64
  const config = { topK: 0 }
65
- const result = coerceConfig(config, 'anthropic')
66
- assert.strictEqual(result.topK, 1)
65
+ const { coerced } = coerceConfig(config, 'anthropic')
66
+ assert.strictEqual(coerced.topK, 1)
67
67
  })
68
68
  })
69
69
 
70
70
  describe('google', () => {
71
71
  it('should clamp temperature to valid range [0, 2]', () => {
72
72
  const config = { temperature: 3 }
73
- const result = coerceConfig(config, 'google')
74
- assert.strictEqual(result.temperature, 2)
73
+ const { coerced } = coerceConfig(config, 'google')
74
+ assert.strictEqual(coerced.temperature, 2)
75
75
  })
76
76
 
77
77
  it('should clamp topK to valid range [1, 100]', () => {
78
78
  const config = { topK: 200 }
79
- const result = coerceConfig(config, 'google')
80
- assert.strictEqual(result.topK, 100)
79
+ const { coerced } = coerceConfig(config, 'google')
80
+ assert.strictEqual(coerced.topK, 100)
81
81
  })
82
82
  })
83
83
 
84
84
  describe('dashscope', () => {
85
85
  it('should clamp temperature to valid range [0, 2]', () => {
86
86
  const config = { temperature: 5 }
87
- const result = coerceConfig(config, 'dashscope')
88
- assert.strictEqual(result.temperature, 2)
87
+ const { coerced } = coerceConfig(config, 'dashscope')
88
+ assert.strictEqual(coerced.temperature, 2)
89
89
  })
90
90
 
91
91
  it('should clamp topP to valid range [0, 1]', () => {
92
92
  const config = { topP: 2 }
93
- const result = coerceConfig(config, 'dashscope')
94
- assert.strictEqual(result.topP, 1)
93
+ const { coerced } = coerceConfig(config, 'dashscope')
94
+ assert.strictEqual(coerced.topP, 1)
95
95
  })
96
96
  })
97
97
 
98
98
  describe('deepseek', () => {
99
99
  it('should clamp temperature to valid range [0, 2]', () => {
100
100
  const config = { temperature: 3 }
101
- const result = coerceConfig(config, 'deepseek')
102
- assert.strictEqual(result.temperature, 2)
101
+ const { coerced } = coerceConfig(config, 'deepseek')
102
+ assert.strictEqual(coerced.temperature, 2)
103
103
  })
104
104
 
105
105
  it('should clamp frequencyPenalty to valid range [-2, 2]', () => {
106
106
  const config = { frequencyPenalty: 5 }
107
- const result = coerceConfig(config, 'deepseek')
108
- assert.strictEqual(result.frequencyPenalty, 2)
107
+ const { coerced } = coerceConfig(config, 'deepseek')
108
+ assert.strictEqual(coerced.frequencyPenalty, 2)
109
109
  })
110
110
  })
111
111
 
112
112
  describe('edge cases', () => {
113
113
  it('should return config unchanged for unknown provider', () => {
114
114
  const config = { temperature: 100 }
115
- const result = coerceConfig(config, 'unknown')
116
- assert.strictEqual(result.temperature, 100)
115
+ const { coerced } = coerceConfig(config, 'unknown')
116
+ assert.strictEqual(coerced.temperature, 100)
117
117
  })
118
118
 
119
119
  it('should not clamp non-numeric values', () => {
120
120
  const config = { temperature: 'hot' }
121
- const result = coerceConfig(config, 'openai')
122
- assert.strictEqual(result.temperature, 'hot')
121
+ const { coerced } = coerceConfig(config, 'openai')
122
+ assert.strictEqual(coerced.temperature, 'hot')
123
123
  })
124
124
 
125
125
  it('should handle empty config', () => {
126
- const result = coerceConfig({}, 'openai')
127
- assert.deepStrictEqual(result, {})
126
+ const { coerced, dropped } = coerceConfig({}, 'openai')
127
+ assert.deepStrictEqual({ coerced, dropped }, { coerced: {}, dropped: [] })
128
128
  })
129
129
 
130
130
  it('should clamp multiple values at once', () => {
@@ -133,10 +133,84 @@ describe('coerceConfig', () => {
133
133
  topP: 2,
134
134
  maxTokens: 100,
135
135
  }
136
- const result = coerceConfig(config, 'openai')
137
- assert.strictEqual(result.temperature, 2)
138
- assert.strictEqual(result.topP, 1)
139
- assert.strictEqual(result.maxTokens, 100) // maxTokens has no range
136
+ const { coerced } = coerceConfig(config, 'openai')
137
+ assert.strictEqual(coerced.temperature, 2)
138
+ assert.strictEqual(coerced.topP, 1)
139
+ assert.strictEqual(coerced.maxTokens, 100) // maxTokens has no range
140
+ })
141
+ })
142
+
143
+ describe('fixedValue overrides', () => {
144
+ it('should force temperature to fixedValue', () => {
145
+ const config = { temperature: 0.5 }
146
+ const { coerced } = coerceConfig(config, 'openai', {
147
+ modelId: 'openai/gpt-5-nano',
148
+ overrides: {
149
+ temperature: { fixedValue: 1 },
150
+ },
151
+ })
152
+ assert.strictEqual(coerced.temperature, 1)
153
+ })
154
+
155
+ it('should not change value if it already matches fixedValue', () => {
156
+ const config = { temperature: 1 }
157
+ const { coerced } = coerceConfig(config, 'openai', {
158
+ modelId: 'openai/gpt-5-nano',
159
+ overrides: {
160
+ temperature: { fixedValue: 1 },
161
+ },
162
+ })
163
+ assert.strictEqual(coerced.temperature, 1)
164
+ })
165
+
166
+ it('should force multiple params to fixed values', () => {
167
+ const config = { temperature: 0.5, topP: 0.8 }
168
+ const { coerced } = coerceConfig(config, 'openai', {
169
+ modelId: 'openai/gpt-5-nano',
170
+ overrides: {
171
+ temperature: { fixedValue: 1 },
172
+ topP: { fixedValue: 1 },
173
+ },
174
+ })
175
+ assert.strictEqual(coerced.temperature, 1)
176
+ assert.strictEqual(coerced.topP, 1)
177
+ })
178
+ })
179
+
180
+ describe('supportedValues (discrete values)', () => {
181
+ it('should clamp to nearest supported value', () => {
182
+ const config = { temperature: 0.3 }
183
+ const { coerced } = coerceConfig(config, 'openai', {
184
+ modelId: 'openai/some-model',
185
+ overrides: {
186
+ temperature: { supportedValues: [0, 0.5, 1] },
187
+ },
188
+ })
189
+ assert.strictEqual(coerced.temperature, 0.5)
190
+ })
191
+
192
+ it('should not change value if it matches a supported value', () => {
193
+ const config = { temperature: 0.5 }
194
+ const { coerced } = coerceConfig(config, 'openai', {
195
+ modelId: 'openai/some-model',
196
+ overrides: {
197
+ temperature: { supportedValues: [0, 0.5, 1] },
198
+ },
199
+ })
200
+ assert.strictEqual(coerced.temperature, 0.5)
201
+ })
202
+ })
203
+
204
+ describe('range overrides', () => {
205
+ it('should use overridden range instead of default', () => {
206
+ const config = { temperature: 3 }
207
+ const { coerced } = coerceConfig(config, 'openai', {
208
+ modelId: 'openai/some-model',
209
+ overrides: {
210
+ temperature: { range: { min: 0, max: 0.5 } },
211
+ },
212
+ })
213
+ assert.strictEqual(coerced.temperature, 0.5)
140
214
  })
141
215
  })
142
216
  })