@pwshub/aisdk 0.0.3 → 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,6 +2,10 @@
2
2
 
3
3
  A thin, unified AI client for OpenAI, Anthropic, Google, DashScope, and DeepSeek with automatic parameter normalization and fallback support.
4
4
 
5
+ [![npm version](https://badge.fury.io/js/@pwshub%2Faisdk.svg)](https://badge.fury.io/js/@pwshub%2Faisdk)
6
+ ![CodeQL](https://github.com/pwshub/aisdk/workflows/CodeQL/badge.svg)
7
+ ![CI test](https://github.com/pwshub/aisdk/workflows/ci-test/badge.svg)
8
+
5
9
  ## Features
6
10
 
7
11
  - **Unified API**: Single interface for multiple AI providers
@@ -241,22 +245,40 @@ const result = await ai.ask({
241
245
 
242
246
  ## Supported Models
243
247
 
244
- This library does not ship with a predefined list of models. Instead, it accepts **any model** from the supported providers:
248
+ The library comes with **30 pre-configured models** from all supported providers:
245
249
 
246
- - **OpenAI**: Any OpenAI model
247
- - **Anthropic**: Any Anthropic model
248
- - **Google**: Any Google model
249
- - **DashScope**: Any DashScope model
250
- - **DeepSeek**: Any DeepSeek model
250
+ - **OpenAI**: gpt-4.1-nano, gpt-4.1-mini, gpt-4.1, gpt-4o, gpt-4o-mini, gpt-5, gpt-5-mini, gpt-5-nano, gpt-5.1, gpt-5.2, gpt-5.4, o3-mini, o4-mini
251
+ - **Anthropic**: claude-haiku-4-5, claude-sonnet-4-6, claude-sonnet-4-5, claude-opus-4-6
252
+ - **Google**: gemini-2.5-flash, gemini-2.5-flash-lite, gemini-2.5-pro, gemini-3.1-pro-preview, gemini-3.1-flash-lite-preview
253
+ - **DashScope**: qwen-flash, qwen3.5-flash, qwen-plus, qwen3.5-plus, qwen-max, qwen3-max
254
+ - **DeepSeek**: deepseek-chat, deepseek-reasoner
251
255
 
252
- ### Loading Models
256
+ ### Managing Models
253
257
 
254
- Models are loaded programmatically via `setModels()` from external sources (CMS, API, or local files for evaluation):
258
+ Models are managed via `addModels()` and `setModels()`:
255
259
 
256
260
  ```javascript
257
- import { createAi, setModels } from '@pwshub/aisdk'
261
+ import { createAi, addModels, setModels, listModels } from '@pwshub/aisdk'
262
+
263
+ // List all available models (30 models loaded by default)
264
+ console.log(listModels())
265
+
266
+ // Add more models to the existing list
267
+ addModels([
268
+ {
269
+ id: 'my-custom-model',
270
+ name: 'my-custom-model',
271
+ provider: 'openai',
272
+ input_price: 1,
273
+ output_price: 2,
274
+ cache_price: 0.5,
275
+ max_in: 128000,
276
+ max_out: 16384,
277
+ enable: true,
278
+ },
279
+ ])
258
280
 
259
- // Load models from your CMS or API
281
+ // Replace all models with your own list (e.g., from CMS)
260
282
  const modelsFromCms = await fetch('https://cms.example.com/api/models').then(r => r.json())
261
283
  setModels(modelsFromCms)
262
284
 
@@ -268,6 +290,8 @@ const result = await ai.ask({
268
290
  })
269
291
  ```
270
292
 
293
+ > **Note:** Models are loaded automatically from `src/models.js` when the library is imported. You don't need to call `setModels()` unless you want to use a custom model list.
294
+
271
295
  ### Model Record Format
272
296
 
273
297
  Each model record should include:
@@ -282,8 +306,6 @@ Each model record should include:
282
306
  - `enable`: Boolean to enable/disable the model
283
307
  - `supportedParams` (optional): Array of supported parameter names
284
308
 
285
- > **Note**: The `examples/` folder includes `models.json` as a reference for running evaluation scripts.
286
-
287
309
  ## Error Handling
288
310
 
289
311
  ```javascript
package/index.d.ts CHANGED
@@ -70,5 +70,6 @@ export interface AiClient {
70
70
  }
71
71
 
72
72
  export function createAi(opts?: AiOptions): AiClient;
73
+ export function addModels(models: ModelRecord[]): void;
73
74
  export function setModels(models: ModelRecord[]): void;
74
75
  export function listModels(): ModelRecord[];
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@pwshub/aisdk",
3
- "version": "0.0.3",
3
+ "version": "0.0.4",
4
4
  "description": "A thin, unified AI client for OpenAI, Anthropic, Google, DashScope, and DeepSeek with automatic param normalization and fallback support",
5
5
  "repository": {
6
6
  "type": "git",
package/src/index.js CHANGED
@@ -38,10 +38,21 @@
38
38
  * },
39
39
  * })
40
40
  *
41
+ * @example Using messages array for multi-turn conversations
42
+ * const result = await ai.ask({
43
+ * model: 'claude-sonnet-4-20250514',
44
+ * apikey: 'your-api-key',
45
+ * messages: [
46
+ * { role: 'user', content: 'What is the capital of Vietnam?' },
47
+ * { role: 'assistant', content: 'The capital of Vietnam is Hanoi.' },
48
+ * { role: 'user', content: 'What is its population?' },
49
+ * ],
50
+ * })
51
+ *
41
52
  */
42
53
 
43
54
  import {
44
- getModel, listModels, setModels,
55
+ getModel, listModels, setModels, addModels,
45
56
  } from './registry.js'
46
57
  import { normalizeConfig } from './config.js'
47
58
  import { coerceConfig } from './coerce.js'
@@ -64,8 +75,9 @@ export {
64
75
  * @typedef {Object} AskParams
65
76
  * @property {string} model - Model ID (must be registered via setModels())
66
77
  * @property {string} apikey - API key for the provider
67
- * @property {string} prompt - The user message
68
- * @property {string} [system] - Optional system prompt
78
+ * @property {string} [prompt] - The user message (alternative to messages)
79
+ * @property {string} [system] - Optional system prompt (used with prompt)
80
+ * @property {import('./providers.js').Message[]} [messages] - Array of messages with role and content (alternative to prompt)
69
81
  * @property {string[]} [fallbacks] - Ordered list of fallback model IDs
70
82
  * @property {Record<string, unknown>} [providerOptions] - Provider-specific options merged into body
71
83
  * @property {number} [temperature]
@@ -152,11 +164,11 @@ const callModel = async (modelId, params, gatewayUrl) => {
152
164
  const normalizedConfig = normalizeConfig(coerced, providerId, supportedParams, modelId)
153
165
 
154
166
  const {
155
- prompt, system, providerOptions = {},
167
+ prompt, system, messages, providerOptions = {},
156
168
  } = params
157
169
 
158
170
  /** @type {import('./providers.js').Message[]} */
159
- const messages = [
171
+ const messageList = messages ?? [
160
172
  ...(system ? [{
161
173
  role: 'system', content: system,
162
174
  }] : []),
@@ -166,7 +178,7 @@ const callModel = async (modelId, params, gatewayUrl) => {
166
178
  ]
167
179
 
168
180
  const url = gatewayUrl ?? adapter.url(modelName, apikey)
169
- const body = adapter.buildBody(modelName, messages, normalizedConfig, providerOptions)
181
+ const body = adapter.buildBody(modelName, messageList, normalizedConfig, providerOptions)
170
182
 
171
183
  let res
172
184
  try {
@@ -267,4 +279,4 @@ export const createAi = (opts = {}) => {
267
279
  }
268
280
  }
269
281
 
270
- export { setModels }
282
+ export { addModels, setModels, listModels }
package/src/models.js ADDED
@@ -0,0 +1,345 @@
1
+ /**
2
+ * @fileoverview Default model registry for @pwshub/aisdk.
3
+ *
4
+ * This module exports a default list of models that are loaded automatically
5
+ * when the library is imported. Users can modify this list via addModels()
6
+ * and setModels() from the main export.
7
+ */
8
+
9
+ /**
10
+ * @typedef {import('./registry.js').ModelRecord} ModelRecord
11
+ */
12
+
13
+ /** @type {ModelRecord[]} */
14
+ export const DEFAULT_MODELS = [
15
+ {
16
+ id: 'claude-haiku-4-5',
17
+ name: 'claude-haiku-4-5',
18
+ provider: 'anthropic',
19
+ input_price: 1,
20
+ output_price: 5,
21
+ cache_price: 0,
22
+ max_in: 200000,
23
+ max_out: 64000,
24
+ enable: true,
25
+ },
26
+ {
27
+ id: 'claude-sonnet-4-6',
28
+ name: 'claude-sonnet-4-6',
29
+ provider: 'anthropic',
30
+ input_price: 3,
31
+ output_price: 15,
32
+ cache_price: 0,
33
+ max_in: 200000,
34
+ max_out: 64000,
35
+ enable: true,
36
+ },
37
+ {
38
+ id: 'claude-sonnet-4-5',
39
+ name: 'claude-sonnet-4-5',
40
+ provider: 'anthropic',
41
+ input_price: 3,
42
+ output_price: 15,
43
+ cache_price: 0,
44
+ max_in: 200000,
45
+ max_out: 1000000,
46
+ enable: true,
47
+ },
48
+ {
49
+ id: 'claude-opus-4-6',
50
+ name: 'claude-opus-4-6',
51
+ provider: 'anthropic',
52
+ input_price: 5,
53
+ output_price: 25,
54
+ cache_price: 0,
55
+ max_in: 200000,
56
+ max_out: 128000,
57
+ enable: true,
58
+ },
59
+ {
60
+ id: 'gemini-2.5-flash',
61
+ name: 'gemini-2.5-flash',
62
+ provider: 'google',
63
+ input_price: 0.3,
64
+ output_price: 2.5,
65
+ cache_price: 0.03,
66
+ max_in: 1048576,
67
+ max_out: 65536,
68
+ enable: true,
69
+ },
70
+ {
71
+ id: 'gemini-2.5-flash-lite',
72
+ name: 'gemini-2.5-flash-lite',
73
+ provider: 'google',
74
+ input_price: 0.1,
75
+ output_price: 0.4,
76
+ cache_price: 0.01,
77
+ max_in: 1048576,
78
+ max_out: 65536,
79
+ enable: true,
80
+ },
81
+ {
82
+ id: 'gemini-2.5-pro',
83
+ name: 'gemini-2.5-pro',
84
+ provider: 'google',
85
+ input_price: 1.25,
86
+ output_price: 10,
87
+ cache_price: 0.125,
88
+ max_in: 1048576,
89
+ max_out: 65536,
90
+ enable: true,
91
+ },
92
+ {
93
+ id: 'gemini-3.1-pro-preview',
94
+ name: 'gemini-3.1-pro-preview',
95
+ provider: 'google',
96
+ input_price: 2,
97
+ output_price: 12,
98
+ cache_price: 0.2,
99
+ max_in: 1048576,
100
+ max_out: 65536,
101
+ enable: true,
102
+ },
103
+ {
104
+ id: 'gemini-3.1-flash-lite-preview',
105
+ name: 'gemini-3.1-flash-lite-preview',
106
+ provider: 'google',
107
+ input_price: 0.25,
108
+ output_price: 1.5,
109
+ cache_price: 0.025,
110
+ max_in: 1048576,
111
+ max_out: 65536,
112
+ enable: true,
113
+ },
114
+ {
115
+ id: 'gpt-4.1-nano',
116
+ name: 'gpt-4.1-nano',
117
+ provider: 'openai',
118
+ input_price: 0.1,
119
+ output_price: 0.4,
120
+ cache_price: 0.025,
121
+ max_in: 1047576,
122
+ max_out: 32768,
123
+ enable: true,
124
+ },
125
+ {
126
+ id: 'gpt-4.1-mini',
127
+ name: 'gpt-4.1-mini',
128
+ provider: 'openai',
129
+ input_price: 0.4,
130
+ output_price: 1.6,
131
+ cache_price: 0.1,
132
+ max_in: 1047576,
133
+ max_out: 32768,
134
+ enable: true,
135
+ },
136
+ {
137
+ id: 'gpt-4.1',
138
+ name: 'gpt-4.1',
139
+ provider: 'openai',
140
+ input_price: 2,
141
+ output_price: 8,
142
+ cache_price: 0.5,
143
+ max_in: 1047576,
144
+ max_out: 32768,
145
+ enable: true,
146
+ },
147
+ {
148
+ id: 'gpt-4o',
149
+ name: 'gpt-4o',
150
+ provider: 'openai',
151
+ input_price: 2.5,
152
+ output_price: 10,
153
+ cache_price: 1.25,
154
+ max_in: 128000,
155
+ max_out: 16384,
156
+ enable: true,
157
+ },
158
+ {
159
+ id: 'gpt-4o-mini',
160
+ name: 'gpt-4o-mini',
161
+ provider: 'openai',
162
+ input_price: 0.15,
163
+ output_price: 0.6,
164
+ cache_price: 0.075,
165
+ max_in: 128000,
166
+ max_out: 16384,
167
+ enable: true,
168
+ },
169
+ {
170
+ id: 'gpt-5',
171
+ name: 'gpt-5',
172
+ provider: 'openai',
173
+ input_price: 1.25,
174
+ output_price: 10,
175
+ cache_price: 0.125,
176
+ max_in: 400000,
177
+ max_out: 128000,
178
+ enable: true,
179
+ },
180
+ {
181
+ id: 'gpt-5-mini',
182
+ name: 'gpt-5-mini',
183
+ provider: 'openai',
184
+ input_price: 0.25,
185
+ output_price: 2,
186
+ cache_price: 0.025,
187
+ max_in: 400000,
188
+ max_out: 128000,
189
+ enable: true,
190
+ },
191
+ {
192
+ id: 'gpt-5-nano',
193
+ name: 'gpt-5-nano',
194
+ provider: 'openai',
195
+ input_price: 0.05,
196
+ output_price: 0.4,
197
+ cache_price: 0.005,
198
+ max_in: 400000,
199
+ max_out: 128000,
200
+ enable: true,
201
+ },
202
+ {
203
+ id: 'gpt-5.1',
204
+ name: 'gpt-5.1',
205
+ provider: 'openai',
206
+ input_price: 1.25,
207
+ output_price: 10,
208
+ cache_price: 0.125,
209
+ max_in: 400000,
210
+ max_out: 128000,
211
+ enable: true,
212
+ },
213
+ {
214
+ id: 'gpt-5.2',
215
+ name: 'gpt-5.2',
216
+ provider: 'openai',
217
+ input_price: 1.75,
218
+ output_price: 14,
219
+ cache_price: 0.175,
220
+ max_in: 400000,
221
+ max_out: 128000,
222
+ enable: true,
223
+ },
224
+ {
225
+ id: 'gpt-5.4',
226
+ name: 'gpt-5.4',
227
+ provider: 'openai',
228
+ input_price: 2.5,
229
+ output_price: 15,
230
+ cache_price: 0.25,
231
+ max_in: 1050000,
232
+ max_out: 128000,
233
+ enable: true,
234
+ },
235
+ {
236
+ id: 'o3-mini',
237
+ name: 'o3-mini',
238
+ provider: 'openai',
239
+ input_price: 1.1,
240
+ output_price: 4.4,
241
+ cache_price: 0.55,
242
+ max_in: 200000,
243
+ max_out: 100000,
244
+ enable: true,
245
+ },
246
+ {
247
+ id: 'o4-mini',
248
+ name: 'o4-mini',
249
+ provider: 'openai',
250
+ input_price: 1.1,
251
+ output_price: 4.4,
252
+ cache_price: 0.275,
253
+ max_in: 200000,
254
+ max_out: 100000,
255
+ enable: true,
256
+ },
257
+ {
258
+ id: 'deepseek-chat',
259
+ name: 'deepseek-chat',
260
+ provider: 'deepseek',
261
+ input_price: 0.28,
262
+ output_price: 0.42,
263
+ cache_price: 0.028,
264
+ max_in: 128000,
265
+ max_out: 8000,
266
+ enable: true,
267
+ },
268
+ {
269
+ id: 'deepseek-reasoner',
270
+ name: 'deepseek-reasoner',
271
+ provider: 'deepseek',
272
+ input_price: 0.28,
273
+ output_price: 0.42,
274
+ cache_price: 0.028,
275
+ max_in: 128000,
276
+ max_out: 64000,
277
+ enable: true,
278
+ },
279
+ {
280
+ id: 'qwen-flash',
281
+ name: 'qwen-flash',
282
+ provider: 'dashscope',
283
+ input_price: 0.05,
284
+ output_price: 0.4,
285
+ cache_price: 0,
286
+ max_in: 995904,
287
+ max_out: 32768,
288
+ enable: true,
289
+ },
290
+ {
291
+ id: 'qwen3.5-flash',
292
+ name: 'qwen3.5-flash',
293
+ provider: 'dashscope',
294
+ input_price: 0.1,
295
+ output_price: 0.4,
296
+ cache_price: 0,
297
+ max_in: 983616,
298
+ max_out: 65536,
299
+ enable: true,
300
+ },
301
+ {
302
+ id: 'qwen-plus',
303
+ name: 'qwen-plus',
304
+ provider: 'dashscope',
305
+ input_price: 0.4,
306
+ output_price: 1.2,
307
+ cache_price: 0,
308
+ max_in: 997952,
309
+ max_out: 32768,
310
+ enable: true,
311
+ },
312
+ {
313
+ id: 'qwen3.5-plus',
314
+ name: 'qwen3.5-plus',
315
+ provider: 'dashscope',
316
+ input_price: 0.4,
317
+ output_price: 2.4,
318
+ cache_price: 0,
319
+ max_in: 991808,
320
+ max_out: 65536,
321
+ enable: true,
322
+ },
323
+ {
324
+ id: 'qwen-max',
325
+ name: 'qwen-max',
326
+ provider: 'dashscope',
327
+ input_price: 1.6,
328
+ output_price: 6.4,
329
+ cache_price: 0,
330
+ max_in: 30720,
331
+ max_out: 8192,
332
+ enable: true,
333
+ },
334
+ {
335
+ id: 'qwen3-max',
336
+ name: 'qwen3-max',
337
+ provider: 'dashscope',
338
+ input_price: 1.2,
339
+ output_price: 6,
340
+ cache_price: 0,
341
+ max_in: 258048,
342
+ max_out: 65536,
343
+ enable: true,
344
+ },
345
+ ]
package/src/providers.js CHANGED
@@ -139,10 +139,19 @@ const google = {
139
139
  role: m.role === 'assistant' ? 'model' : 'user',
140
140
  parts: [{ text: m.content }],
141
141
  }))
142
+
143
+ // Thinking models (e.g., gemini-2.5-pro) need more tokens for reasoning
144
+ // Set a higher default maxOutputTokens if not specified
145
+ const hasMaxTokens = config.generationConfig?.maxOutputTokens !== undefined
146
+ const defaultGenerationConfig = hasMaxTokens ? {} : { maxOutputTokens: 8192 }
147
+
142
148
  return {
143
149
  contents,
144
150
  ...(system && { systemInstruction: { parts: [{ text: system }] } }),
145
- ...config, // includes nested generationConfig
151
+ generationConfig: {
152
+ ...defaultGenerationConfig,
153
+ ...config.generationConfig,
154
+ },
146
155
  ...providerOptions, // safetySettings, thinkingConfig, etc.
147
156
  }
148
157
  },
@@ -164,21 +173,40 @@ const google = {
164
173
  throw new Error('Google response missing content')
165
174
  }
166
175
 
167
- // Gemini 2.5 Pro may return parts as array or direct text
176
+ // Gemini 2.5 Pro (thinking model) may return content without parts
177
+ // when all tokens were used for reasoning
178
+ if (!content.parts || (Array.isArray(content.parts) && content.parts.length === 0)) {
179
+ const thoughts = data.usageMetadata?.thoughtsTokenCount ?? 0
180
+ const totalTokens = data.usageMetadata?.totalTokenCount ?? 0
181
+
182
+ if (finishReason === 'MAX_TOKENS' && thoughts > 0) {
183
+ throw new Error(
184
+ `Google model used ${thoughts}/${totalTokens} tokens for internal reasoning and has no tokens left for output. ` +
185
+ `Increase maxTokens to allow room for both thinking and response.`
186
+ )
187
+ }
188
+
189
+ throw new Error('Google response has no content parts')
190
+ }
191
+
192
+ // Gemini may return parts as array or direct text
168
193
  if (Array.isArray(content.parts)) {
169
- const text = content.parts[0]?.text
170
- if (!text) {
171
- // Model may have used all tokens for reasoning (thoughtsTokenCount)
194
+ // Concatenate all text parts (model may return multiple text blocks)
195
+ const texts = content.parts.filter((p) => p.text).map((p) => p.text)
196
+ if (texts.length === 0) {
172
197
  const thoughts = data.usageMetadata?.thoughtsTokenCount ?? 0
173
198
  if (finishReason === 'MAX_TOKENS' && thoughts > 0) {
174
- throw new Error(`Google response missing content (used ${thoughts} tokens for reasoning, maxTokens may be too low)`)
199
+ throw new Error(
200
+ `Google model used ${thoughts}/${data.usageMetadata?.totalTokenCount ?? 0} tokens for internal reasoning and has no tokens left for output. ` +
201
+ `Increase maxTokens to allow room for both thinking and response.`
202
+ )
175
203
  }
176
- throw new Error('Google response missing content')
204
+ throw new Error('Google response has no text content')
177
205
  }
178
- return text
206
+ return texts.join('')
179
207
  }
180
208
 
181
- // Some models may return content directly
209
+ // Some models may return content directly as string
182
210
  if (typeof content.parts === 'string') {
183
211
  return content.parts
184
212
  }
package/src/registry.js CHANGED
@@ -1,9 +1,10 @@
1
1
  /**
2
2
  * @fileoverview Model registry — in-memory store for model records.
3
3
  *
4
- * Models are loaded programmatically via setModels() from external sources
5
- * (CMS, API, or local files for evaluation). This module provides O(1) lookups
6
- * at runtime via a Map indexed by model ID.
4
+ * Default models are loaded automatically from ./models.js at import time.
5
+ * Users can modify the registry via addModels() and setModels().
6
+ *
7
+ * This module provides O(1) lookups at runtime via a Map indexed by model ID.
7
8
  *
8
9
  * `supportedParams` is optional per record. When absent, the provider's
9
10
  * default param set is used.
@@ -11,6 +12,8 @@
11
12
  * @typedef {'openai'|'anthropic'|'google'|'dashscope'|'deepseek'} ProviderId
12
13
  */
13
14
 
15
+ import { DEFAULT_MODELS } from './models.js'
16
+
14
17
  /**
15
18
  * Mirrors the Directus collection schema exactly.
16
19
  * `supportedParams` is optional — added later via Directus field.
@@ -48,6 +51,17 @@ const VALID_PROVIDERS = ['openai', 'anthropic', 'google', 'dashscope', 'deepseek
48
51
  /** @type {Map<string, ModelRecord>} */
49
52
  let REGISTRY = new Map()
50
53
 
54
+ /**
55
+ * Initializes the registry with default models.
56
+ * Called automatically at module import.
57
+ */
58
+ const initRegistry = () => {
59
+ REGISTRY = new Map(DEFAULT_MODELS.map((model) => [model.id, model]))
60
+ }
61
+
62
+ // Initialize with default models on import
63
+ initRegistry()
64
+
51
65
  /**
52
66
  * Validates a single model record structure and types.
53
67
  *
@@ -143,11 +157,33 @@ export const listModels = () =>
143
157
  [...REGISTRY.values()].filter((m) => m.enable)
144
158
 
145
159
  /**
146
- * Programmatically sets the model registry from an array of model records.
147
- * Use this when loading models from a CMS or other external source instead of
148
- * the built-in models.json file.
160
+ * Adds one or more models to the registry.
161
+ * Existing models with the same ID are overwritten.
162
+ *
163
+ * @param {ModelRecord[]} models - Array of model records to add
164
+ * @throws {Error} When models is not an array or contains invalid records
165
+ */
166
+ export const addModels = (models) => {
167
+ if (!Array.isArray(models)) {
168
+ throw new Error(`addModels expects an array. Got: ${typeof models}`)
169
+ }
170
+
171
+ // Validate each model record
172
+ models.forEach((model, index) => {
173
+ validateModelRecord(model, index)
174
+ })
175
+
176
+ // Add models to the registry
177
+ models.forEach((model) => {
178
+ REGISTRY.set(model.id, model)
179
+ })
180
+ }
181
+
182
+ /**
183
+ * Replaces the entire model registry with a new list of models.
184
+ * Use this to load models from a CMS or other external source.
149
185
  *
150
- * @param {ModelRecord[]} models - Array of model records (same format as models.json)
186
+ * @param {ModelRecord[]} models - Array of model records
151
187
  * @throws {Error} When models is not an array or contains invalid records
152
188
  */
153
189
  export const setModels = (models) => {
package/src/validation.js CHANGED
@@ -9,7 +9,7 @@
9
9
  * @typedef {Object} AskParams
10
10
  * @property {string} model
11
11
  * @property {string} apikey
12
- * @property {string} prompt
12
+ * @property {string} [prompt]
13
13
  * @property {string} [system]
14
14
  * @property {import('../index.js').Message[]} [messages]
15
15
  * @property {number} [temperature]
@@ -42,8 +42,14 @@ export const validateAskOptions = (params) => {
42
42
  errors.push('"apikey" must be a non-empty string')
43
43
  }
44
44
 
45
- if (!params.prompt || typeof params.prompt !== 'string') {
46
- errors.push('"prompt" must be a non-empty string')
45
+ // Either prompt or messages must be provided (but not both required)
46
+ if (params.prompt === undefined && params.messages === undefined) {
47
+ errors.push('either "prompt" or "messages" must be provided')
48
+ }
49
+
50
+ // When using messages, system can still be provided (will be prepended)
51
+ if (params.prompt !== undefined && typeof params.prompt !== 'string') {
52
+ errors.push('"prompt" must be a string')
47
53
  }
48
54
 
49
55
  // Optional string fields