llm-strings 1.1.1 → 1.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/validate.cjs CHANGED
@@ -1,10 +1,667 @@
1
- "use strict";Object.defineProperty(exports, "__esModule", {value: true});
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
2
19
 
3
- var _chunkRSUXM42Xcjs = require('./chunk-RSUXM42X.cjs');
4
- require('./chunk-MGWGNZDJ.cjs');
5
- require('./chunk-N6NVBE43.cjs');
6
- require('./chunk-NSCBY4VD.cjs');
20
+ // src/validate.ts
21
+ var validate_exports = {};
22
+ __export(validate_exports, {
23
+ validate: () => validate
24
+ });
25
+ module.exports = __toCommonJS(validate_exports);
7
26
 
27
+ // src/parse.ts
28
+ function parse(connectionString) {
29
+ const url = new URL(connectionString);
30
+ if (url.protocol !== "llm:") {
31
+ throw new Error(
32
+ `Invalid scheme: expected "llm://", got "${url.protocol}//"`
33
+ );
34
+ }
35
+ const host = url.hostname;
36
+ const model = url.pathname.replace(/^\//, "");
37
+ const label = url.username || void 0;
38
+ const apiKey = url.password || void 0;
39
+ const params = {};
40
+ for (const [key, value] of url.searchParams) {
41
+ params[key] = value;
42
+ }
43
+ return {
44
+ raw: connectionString,
45
+ host,
46
+ model,
47
+ label,
48
+ apiKey,
49
+ params
50
+ };
51
+ }
8
52
 
9
- exports.validate = _chunkRSUXM42Xcjs.validate;
53
+ // src/provider-core.ts
54
+ function detectProvider(host) {
55
+ if (host.includes("openrouter")) return "openrouter";
56
+ if (host.includes("gateway.ai.vercel")) return "vercel";
57
+ if (host.includes("amazonaws") || host.includes("bedrock")) return "bedrock";
58
+ if (host.includes("openai")) return "openai";
59
+ if (host.includes("anthropic") || host.includes("claude")) return "anthropic";
60
+ if (host.includes("googleapis") || host.includes("google")) return "google";
61
+ if (host.includes("mistral")) return "mistral";
62
+ if (host.includes("cohere")) return "cohere";
63
+ return void 0;
64
+ }
65
+ var ALIASES = {
66
+ // temperature
67
+ temp: "temperature",
68
+ // max_tokens
69
+ max: "max_tokens",
70
+ max_out: "max_tokens",
71
+ max_output: "max_tokens",
72
+ max_output_tokens: "max_tokens",
73
+ max_completion_tokens: "max_tokens",
74
+ maxOutputTokens: "max_tokens",
75
+ maxTokens: "max_tokens",
76
+ // top_p
77
+ topp: "top_p",
78
+ topP: "top_p",
79
+ nucleus: "top_p",
80
+ // top_k
81
+ topk: "top_k",
82
+ topK: "top_k",
83
+ // frequency_penalty
84
+ freq: "frequency_penalty",
85
+ freq_penalty: "frequency_penalty",
86
+ frequencyPenalty: "frequency_penalty",
87
+ repetition_penalty: "frequency_penalty",
88
+ // presence_penalty
89
+ pres: "presence_penalty",
90
+ pres_penalty: "presence_penalty",
91
+ presencePenalty: "presence_penalty",
92
+ // stop
93
+ stop_sequences: "stop",
94
+ stopSequences: "stop",
95
+ stop_sequence: "stop",
96
+ // seed
97
+ random_seed: "seed",
98
+ randomSeed: "seed",
99
+ // n (completions count)
100
+ candidateCount: "n",
101
+ candidate_count: "n",
102
+ num_completions: "n",
103
+ // effort / reasoning
104
+ reasoning_effort: "effort",
105
+ reasoning: "effort",
106
+ // cache
107
+ cache_control: "cache",
108
+ cacheControl: "cache",
109
+ cachePoint: "cache",
110
+ cache_point: "cache"
111
+ };
112
+ var PROVIDER_PARAMS = {
113
+ openai: {
114
+ temperature: "temperature",
115
+ max_tokens: "max_tokens",
116
+ top_p: "top_p",
117
+ frequency_penalty: "frequency_penalty",
118
+ presence_penalty: "presence_penalty",
119
+ stop: "stop",
120
+ n: "n",
121
+ seed: "seed",
122
+ stream: "stream",
123
+ effort: "reasoning_effort"
124
+ },
125
+ anthropic: {
126
+ temperature: "temperature",
127
+ max_tokens: "max_tokens",
128
+ top_p: "top_p",
129
+ top_k: "top_k",
130
+ stop: "stop_sequences",
131
+ stream: "stream",
132
+ effort: "effort",
133
+ cache: "cache_control",
134
+ cache_ttl: "cache_ttl"
135
+ },
136
+ google: {
137
+ temperature: "temperature",
138
+ max_tokens: "maxOutputTokens",
139
+ top_p: "topP",
140
+ top_k: "topK",
141
+ frequency_penalty: "frequencyPenalty",
142
+ presence_penalty: "presencePenalty",
143
+ stop: "stopSequences",
144
+ n: "candidateCount",
145
+ stream: "stream",
146
+ seed: "seed",
147
+ responseMimeType: "responseMimeType",
148
+ responseSchema: "responseSchema"
149
+ },
150
+ mistral: {
151
+ temperature: "temperature",
152
+ max_tokens: "max_tokens",
153
+ top_p: "top_p",
154
+ frequency_penalty: "frequency_penalty",
155
+ presence_penalty: "presence_penalty",
156
+ stop: "stop",
157
+ n: "n",
158
+ seed: "random_seed",
159
+ stream: "stream",
160
+ safe_prompt: "safe_prompt",
161
+ min_tokens: "min_tokens"
162
+ },
163
+ cohere: {
164
+ temperature: "temperature",
165
+ max_tokens: "max_tokens",
166
+ top_p: "p",
167
+ top_k: "k",
168
+ frequency_penalty: "frequency_penalty",
169
+ presence_penalty: "presence_penalty",
170
+ stop: "stop_sequences",
171
+ stream: "stream",
172
+ seed: "seed"
173
+ },
174
+ bedrock: {
175
+ // Bedrock Converse API uses camelCase
176
+ temperature: "temperature",
177
+ max_tokens: "maxTokens",
178
+ top_p: "topP",
179
+ top_k: "topK",
180
+ // Claude models via additionalModelRequestFields
181
+ stop: "stopSequences",
182
+ stream: "stream",
183
+ cache: "cache_control",
184
+ cache_ttl: "cache_ttl"
185
+ },
186
+ openrouter: {
187
+ // OpenAI-compatible API with extra routing params
188
+ temperature: "temperature",
189
+ max_tokens: "max_tokens",
190
+ top_p: "top_p",
191
+ top_k: "top_k",
192
+ frequency_penalty: "frequency_penalty",
193
+ presence_penalty: "presence_penalty",
194
+ stop: "stop",
195
+ n: "n",
196
+ seed: "seed",
197
+ stream: "stream",
198
+ effort: "reasoning_effort"
199
+ },
200
+ vercel: {
201
+ // OpenAI-compatible gateway
202
+ temperature: "temperature",
203
+ max_tokens: "max_tokens",
204
+ top_p: "top_p",
205
+ top_k: "top_k",
206
+ frequency_penalty: "frequency_penalty",
207
+ presence_penalty: "presence_penalty",
208
+ stop: "stop",
209
+ n: "n",
210
+ seed: "seed",
211
+ stream: "stream",
212
+ effort: "reasoning_effort"
213
+ }
214
+ };
215
+ var PARAM_SPECS = {
216
+ openai: {
217
+ temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
218
+ max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
219
+ top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
220
+ frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
221
+ presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
222
+ stop: { type: "string", description: "Stop sequences" },
223
+ n: { type: "number", min: 1, default: 1, description: "Completions count" },
224
+ seed: { type: "number", description: "Random seed" },
225
+ stream: { type: "boolean", default: false, description: "Stream response" },
226
+ reasoning_effort: {
227
+ type: "string",
228
+ values: ["none", "minimal", "low", "medium", "high", "xhigh"],
229
+ default: "medium",
230
+ description: "Reasoning effort"
231
+ }
232
+ },
233
+ anthropic: {
234
+ temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
235
+ max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
236
+ top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
237
+ top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
238
+ stop_sequences: { type: "string", description: "Stop sequences" },
239
+ stream: { type: "boolean", default: false, description: "Stream response" },
240
+ effort: { type: "string", values: ["low", "medium", "high", "max"], default: "medium", description: "Thinking effort" },
241
+ cache_control: { type: "string", values: ["ephemeral"], default: "ephemeral", description: "Cache control" },
242
+ cache_ttl: { type: "string", values: ["5m", "1h"], default: "5m", description: "Cache TTL" }
243
+ },
244
+ google: {
245
+ temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
246
+ maxOutputTokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
247
+ topP: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
248
+ topK: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
249
+ frequencyPenalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
250
+ presencePenalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
251
+ stopSequences: { type: "string", description: "Stop sequences" },
252
+ candidateCount: { type: "number", min: 1, default: 1, description: "Candidate count" },
253
+ stream: { type: "boolean", default: false, description: "Stream response" },
254
+ seed: { type: "number", description: "Random seed" },
255
+ responseMimeType: { type: "string", description: "Response MIME type" },
256
+ responseSchema: { type: "string", description: "Response schema" }
257
+ },
258
+ mistral: {
259
+ temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
260
+ max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
261
+ top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
262
+ frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
263
+ presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
264
+ stop: { type: "string", description: "Stop sequences" },
265
+ n: { type: "number", min: 1, default: 1, description: "Completions count" },
266
+ random_seed: { type: "number", description: "Random seed" },
267
+ stream: { type: "boolean", default: false, description: "Stream response" },
268
+ safe_prompt: { type: "boolean", default: false, description: "Enable safe prompt" },
269
+ min_tokens: { type: "number", min: 0, default: 0, description: "Minimum tokens" }
270
+ },
271
+ cohere: {
272
+ temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
273
+ max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
274
+ p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling (p)" },
275
+ k: { type: "number", min: 0, max: 500, default: 40, description: "Top-K sampling (k)" },
276
+ frequency_penalty: { type: "number", min: 0, max: 1, default: 0, description: "Penalize frequent tokens" },
277
+ presence_penalty: { type: "number", min: 0, max: 1, default: 0, description: "Penalize repeated topics" },
278
+ stop_sequences: { type: "string", description: "Stop sequences" },
279
+ stream: { type: "boolean", default: false, description: "Stream response" },
280
+ seed: { type: "number", description: "Random seed" }
281
+ },
282
+ bedrock: {
283
+ // Converse API inferenceConfig params
284
+ temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
285
+ maxTokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
286
+ topP: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
287
+ topK: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
288
+ stopSequences: { type: "string", description: "Stop sequences" },
289
+ stream: { type: "boolean", default: false, description: "Stream response" },
290
+ cache_control: { type: "string", values: ["ephemeral"], default: "ephemeral", description: "Cache control" },
291
+ cache_ttl: { type: "string", values: ["5m", "1h"], default: "5m", description: "Cache TTL" }
292
+ },
293
+ openrouter: {
294
+ // Loose validation — proxies to many providers with varying ranges
295
+ temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
296
+ max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
297
+ top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
298
+ top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
299
+ frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
300
+ presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
301
+ stop: { type: "string", description: "Stop sequences" },
302
+ n: { type: "number", min: 1, default: 1, description: "Completions count" },
303
+ seed: { type: "number", description: "Random seed" },
304
+ stream: { type: "boolean", default: false, description: "Stream response" },
305
+ reasoning_effort: {
306
+ type: "string",
307
+ values: ["none", "minimal", "low", "medium", "high", "xhigh"],
308
+ default: "medium",
309
+ description: "Reasoning effort"
310
+ }
311
+ },
312
+ vercel: {
313
+ // Loose validation — proxies to many providers with varying ranges
314
+ temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
315
+ max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
316
+ top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
317
+ top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
318
+ frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
319
+ presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
320
+ stop: { type: "string", description: "Stop sequences" },
321
+ n: { type: "number", min: 1, default: 1, description: "Completions count" },
322
+ seed: { type: "number", description: "Random seed" },
323
+ stream: { type: "boolean", default: false, description: "Stream response" },
324
+ reasoning_effort: {
325
+ type: "string",
326
+ values: ["none", "minimal", "low", "medium", "high", "xhigh"],
327
+ default: "medium",
328
+ description: "Reasoning effort"
329
+ }
330
+ }
331
+ };
332
+ function isReasoningModel(model) {
333
+ const name = model.includes("/") ? model.split("/").pop() : model;
334
+ return /^o[134]/.test(name);
335
+ }
336
+ function canHostOpenAIModels(provider) {
337
+ return provider === "openai" || provider === "openrouter" || provider === "vercel";
338
+ }
339
+ function isGatewayProvider(provider) {
340
+ return provider === "openrouter" || provider === "vercel";
341
+ }
342
+ function detectGatewaySubProvider(model) {
343
+ const slash = model.indexOf("/");
344
+ if (slash < 1) return void 0;
345
+ const prefix = model.slice(0, slash);
346
+ const direct = ["openai", "anthropic", "google", "mistral", "cohere"];
347
+ return direct.find((p) => p === prefix);
348
+ }
349
+ var REASONING_MODEL_UNSUPPORTED = /* @__PURE__ */ new Set([
350
+ "temperature",
351
+ "top_p",
352
+ "frequency_penalty",
353
+ "presence_penalty",
354
+ "n"
355
+ ]);
356
+ function detectBedrockModelFamily(model) {
357
+ const parts = model.split(".");
358
+ let prefix = parts[0];
359
+ if (["us", "eu", "apac", "global"].includes(prefix) && parts.length > 1) {
360
+ prefix = parts[1];
361
+ }
362
+ const families = [
363
+ "anthropic",
364
+ "meta",
365
+ "amazon",
366
+ "mistral",
367
+ "cohere",
368
+ "ai21"
369
+ ];
370
+ return families.find((f) => prefix === f);
371
+ }
372
+ function bedrockSupportsCaching(model) {
373
+ const family = detectBedrockModelFamily(model);
374
+ if (family === "anthropic") return true;
375
+ if (family === "amazon" && model.includes("nova")) return true;
376
+ return false;
377
+ }
378
+ var CACHE_VALUES = {
379
+ openai: void 0,
380
+ // OpenAI auto-caches; no explicit param
381
+ anthropic: "ephemeral",
382
+ google: void 0,
383
+ // Google uses explicit caching API, not a param
384
+ mistral: void 0,
385
+ cohere: void 0,
386
+ bedrock: "ephemeral",
387
+ // Supported for Claude models on Bedrock
388
+ openrouter: void 0,
389
+ // Depends on underlying provider
390
+ vercel: void 0
391
+ // Depends on underlying provider
392
+ };
393
+ var CACHE_TTLS = {
394
+ openai: void 0,
395
+ anthropic: ["5m", "1h"],
396
+ google: void 0,
397
+ mistral: void 0,
398
+ cohere: void 0,
399
+ bedrock: ["5m", "1h"],
400
+ // Claude on Bedrock uses same TTLs as direct Anthropic
401
+ openrouter: void 0,
402
+ vercel: void 0
403
+ };
404
+ var DURATION_RE = /^\d+[mh]$/;
405
+
406
+ // src/normalize.ts
407
+ function normalize(config, options = {}) {
408
+ const provider = detectProvider(config.host);
409
+ const subProvider = provider && isGatewayProvider(provider) ? detectGatewaySubProvider(config.model) : void 0;
410
+ const changes = [];
411
+ const params = {};
412
+ for (const [rawKey, value] of Object.entries(config.params)) {
413
+ let key = rawKey;
414
+ if (ALIASES[key]) {
415
+ const canonical = ALIASES[key];
416
+ if (options.verbose) {
417
+ changes.push({
418
+ from: key,
419
+ to: canonical,
420
+ value,
421
+ reason: `alias: "${key}" \u2192 "${canonical}"`
422
+ });
423
+ }
424
+ key = canonical;
425
+ }
426
+ if (key === "cache" && provider) {
427
+ let cacheValue = CACHE_VALUES[provider];
428
+ if (provider === "bedrock" && !bedrockSupportsCaching(config.model)) {
429
+ cacheValue = void 0;
430
+ }
431
+ if (!cacheValue) {
432
+ if (options.verbose) {
433
+ changes.push({
434
+ from: "cache",
435
+ to: "(dropped)",
436
+ value,
437
+ reason: `${provider} does not use a cache param for this model (caching is automatic or unsupported)`
438
+ });
439
+ }
440
+ continue;
441
+ }
442
+ const isBool = value === "true" || value === "1" || value === "yes";
443
+ const isDuration = DURATION_RE.test(value);
444
+ if (isBool || isDuration) {
445
+ const providerKey = PROVIDER_PARAMS[provider]?.["cache"] ?? "cache";
446
+ if (options.verbose) {
447
+ changes.push({
448
+ from: "cache",
449
+ to: providerKey,
450
+ value: cacheValue,
451
+ reason: `cache=${value} \u2192 ${providerKey}=${cacheValue} for ${provider}`
452
+ });
453
+ }
454
+ params[providerKey] = cacheValue;
455
+ if (isDuration && CACHE_TTLS[provider]) {
456
+ if (options.verbose) {
457
+ changes.push({
458
+ from: "cache",
459
+ to: "cache_ttl",
460
+ value,
461
+ reason: `cache=${value} \u2192 cache_ttl=${value} for ${provider}`
462
+ });
463
+ }
464
+ params["cache_ttl"] = value;
465
+ }
466
+ continue;
467
+ }
468
+ }
469
+ if (provider && PROVIDER_PARAMS[provider]) {
470
+ const providerKey = PROVIDER_PARAMS[provider][key];
471
+ if (providerKey && providerKey !== key) {
472
+ if (options.verbose) {
473
+ changes.push({
474
+ from: key,
475
+ to: providerKey,
476
+ value,
477
+ reason: `${provider} uses "${providerKey}" instead of "${key}"`
478
+ });
479
+ }
480
+ key = providerKey;
481
+ }
482
+ }
483
+ if (provider && canHostOpenAIModels(provider) && isReasoningModel(config.model) && key === "max_tokens") {
484
+ if (options.verbose) {
485
+ changes.push({
486
+ from: "max_tokens",
487
+ to: "max_completion_tokens",
488
+ value,
489
+ reason: "OpenAI reasoning models use max_completion_tokens instead of max_tokens"
490
+ });
491
+ }
492
+ key = "max_completion_tokens";
493
+ }
494
+ params[key] = value;
495
+ }
496
+ return {
497
+ config: { ...config, params },
498
+ provider,
499
+ subProvider,
500
+ changes
501
+ };
502
+ }
503
+
504
+ // src/validate.ts
505
+ function buildReverseParamMap(provider) {
506
+ const map = {};
507
+ for (const [canonical, specific] of Object.entries(
508
+ PROVIDER_PARAMS[provider]
509
+ )) {
510
+ map[specific] = canonical;
511
+ }
512
+ return map;
513
+ }
514
+ function lookupSubProviderSpec(gatewayParamName, gatewayReverseMap, subProvider) {
515
+ const canonical = gatewayReverseMap[gatewayParamName] ?? gatewayParamName;
516
+ const subProviderKey = PROVIDER_PARAMS[subProvider]?.[canonical];
517
+ if (!subProviderKey) return { spec: void 0, canonical };
518
+ return { spec: PARAM_SPECS[subProvider]?.[subProviderKey], canonical };
519
+ }
520
+ function buildSubProviderKnownParams(gateway, subProvider) {
521
+ const known = /* @__PURE__ */ new Set();
522
+ const subProviderCanonicals = new Set(
523
+ Object.keys(PROVIDER_PARAMS[subProvider])
524
+ );
525
+ for (const [canonical, gatewaySpecific] of Object.entries(
526
+ PROVIDER_PARAMS[gateway]
527
+ )) {
528
+ if (subProviderCanonicals.has(canonical)) {
529
+ known.add(gatewaySpecific);
530
+ }
531
+ }
532
+ return known;
533
+ }
534
+ function validate(connectionString, options = {}) {
535
+ const parsed = parse(connectionString);
536
+ const { config, provider, subProvider } = normalize(parsed);
537
+ const issues = [];
538
+ if (!provider) {
539
+ issues.push({
540
+ param: "host",
541
+ value: config.host,
542
+ message: `Unknown provider for host "${config.host}". Validation skipped.`,
543
+ severity: options.strict ? "error" : "warning"
544
+ });
545
+ return issues;
546
+ }
547
+ const effectiveProvider = subProvider ?? provider;
548
+ const specs = PARAM_SPECS[effectiveProvider];
549
+ const gatewayReverseMap = subProvider ? buildReverseParamMap(provider) : void 0;
550
+ const knownParams = subProvider ? buildSubProviderKnownParams(provider, subProvider) : new Set(Object.values(PROVIDER_PARAMS[provider]));
551
+ for (const [key, value] of Object.entries(config.params)) {
552
+ if (canHostOpenAIModels(provider) && isReasoningModel(config.model) && REASONING_MODEL_UNSUPPORTED.has(key)) {
553
+ issues.push({
554
+ param: key,
555
+ value,
556
+ message: `"${key}" is not supported by OpenAI reasoning model "${config.model}". Use "reasoning_effort" instead of temperature for controlling output.`,
557
+ severity: "error"
558
+ });
559
+ continue;
560
+ }
561
+ if (provider === "bedrock") {
562
+ const family = detectBedrockModelFamily(config.model);
563
+ if (key === "topK" && family && family !== "anthropic" && family !== "cohere" && family !== "mistral") {
564
+ issues.push({
565
+ param: key,
566
+ value,
567
+ message: `"topK" is not supported by ${family} models on Bedrock.`,
568
+ severity: "error"
569
+ });
570
+ continue;
571
+ }
572
+ if (key === "cache_control" && !bedrockSupportsCaching(config.model)) {
573
+ issues.push({
574
+ param: key,
575
+ value,
576
+ message: `Prompt caching is only supported for Anthropic Claude and Amazon Nova models on Bedrock, not ${family ?? "unknown"} models.`,
577
+ severity: "error"
578
+ });
579
+ continue;
580
+ }
581
+ }
582
+ if (!knownParams.has(key) && !specs[key]) {
583
+ issues.push({
584
+ param: key,
585
+ value,
586
+ message: `Unknown param "${key}" for ${effectiveProvider}.`,
587
+ severity: options.strict ? "error" : "warning"
588
+ });
589
+ continue;
590
+ }
591
+ let spec = specs[key];
592
+ if (subProvider && gatewayReverseMap && !spec) {
593
+ const result = lookupSubProviderSpec(
594
+ key,
595
+ gatewayReverseMap,
596
+ subProvider
597
+ );
598
+ spec = result.spec;
599
+ }
600
+ if (!spec) continue;
601
+ if ((effectiveProvider === "anthropic" || provider === "bedrock" && detectBedrockModelFamily(config.model) === "anthropic") && (key === "temperature" || key === "top_p" || key === "topP")) {
602
+ const otherKey = key === "temperature" ? provider === "bedrock" ? "topP" : "top_p" : "temperature";
603
+ if (key === "temperature" && config.params[otherKey] !== void 0) {
604
+ issues.push({
605
+ param: key,
606
+ value,
607
+ message: `Cannot specify both "temperature" and "${otherKey}" for Anthropic models.`,
608
+ severity: "error"
609
+ });
610
+ }
611
+ }
612
+ if (spec.type === "number") {
613
+ const num = Number(value);
614
+ if (isNaN(num)) {
615
+ issues.push({
616
+ param: key,
617
+ value,
618
+ message: `"${key}" should be a number, got "${value}".`,
619
+ severity: "error"
620
+ });
621
+ continue;
622
+ }
623
+ if (spec.min !== void 0 && num < spec.min) {
624
+ issues.push({
625
+ param: key,
626
+ value,
627
+ message: `"${key}" must be >= ${spec.min}, got ${num}.`,
628
+ severity: "error"
629
+ });
630
+ }
631
+ if (spec.max !== void 0 && num > spec.max) {
632
+ issues.push({
633
+ param: key,
634
+ value,
635
+ message: `"${key}" must be <= ${spec.max}, got ${num}.`,
636
+ severity: "error"
637
+ });
638
+ }
639
+ }
640
+ if (spec.type === "boolean") {
641
+ if (!["true", "false", "0", "1"].includes(value)) {
642
+ issues.push({
643
+ param: key,
644
+ value,
645
+ message: `"${key}" should be a boolean (true/false), got "${value}".`,
646
+ severity: "error"
647
+ });
648
+ }
649
+ }
650
+ if (spec.type === "string" && spec.values) {
651
+ if (!spec.values.includes(value)) {
652
+ issues.push({
653
+ param: key,
654
+ value,
655
+ message: `"${key}" must be one of [${spec.values.join(", ")}], got "${value}".`,
656
+ severity: "error"
657
+ });
658
+ }
659
+ }
660
+ }
661
+ return issues;
662
+ }
663
+ // Annotate the CommonJS export names for ESM import in node:
664
+ 0 && (module.exports = {
665
+ validate
666
+ });
10
667
  //# sourceMappingURL=validate.cjs.map