llm-strings 1.0.1 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/README.md +87 -12
  2. package/dist/chunk-6P5GSSNW.js +176 -0
  3. package/dist/chunk-6P5GSSNW.js.map +1 -0
  4. package/dist/chunk-FCEV23OT.js +37 -0
  5. package/dist/chunk-FCEV23OT.js.map +1 -0
  6. package/dist/chunk-MGWGNZDJ.cjs +116 -0
  7. package/dist/chunk-MGWGNZDJ.cjs.map +1 -0
  8. package/dist/chunk-MPIHGH6L.js +116 -0
  9. package/dist/chunk-MPIHGH6L.js.map +1 -0
  10. package/dist/chunk-N6NVBE43.cjs +37 -0
  11. package/dist/chunk-N6NVBE43.cjs.map +1 -0
  12. package/dist/chunk-NSCBY4VD.cjs +370 -0
  13. package/dist/chunk-NSCBY4VD.cjs.map +1 -0
  14. package/dist/chunk-RR3VXIW2.cjs +176 -0
  15. package/dist/chunk-RR3VXIW2.cjs.map +1 -0
  16. package/dist/chunk-RSUXM42X.cjs +180 -0
  17. package/dist/chunk-RSUXM42X.cjs.map +1 -0
  18. package/dist/chunk-UYMVUTLV.js +180 -0
  19. package/dist/chunk-UYMVUTLV.js.map +1 -0
  20. package/dist/chunk-XID353H7.js +370 -0
  21. package/dist/chunk-XID353H7.js.map +1 -0
  22. package/dist/index.cjs +46 -811
  23. package/dist/index.cjs.map +1 -1
  24. package/dist/index.d.cts +5 -80
  25. package/dist/index.d.ts +5 -80
  26. package/dist/index.js +29 -753
  27. package/dist/index.js.map +1 -1
  28. package/dist/normalize.cjs +8 -0
  29. package/dist/normalize.cjs.map +1 -0
  30. package/dist/normalize.d.cts +33 -0
  31. package/dist/normalize.d.ts +33 -0
  32. package/dist/normalize.js +8 -0
  33. package/dist/normalize.js.map +1 -0
  34. package/dist/parse.cjs +9 -0
  35. package/dist/parse.cjs.map +1 -0
  36. package/dist/parse.d.cts +32 -0
  37. package/dist/parse.d.ts +32 -0
  38. package/dist/parse.js +9 -0
  39. package/dist/parse.js.map +1 -0
  40. package/dist/provider-core-BUaKKLpd.d.cts +53 -0
  41. package/dist/provider-core-BUaKKLpd.d.ts +53 -0
  42. package/dist/providers.cjs +40 -560
  43. package/dist/providers.cjs.map +1 -1
  44. package/dist/providers.d.cts +4 -42
  45. package/dist/providers.d.ts +4 -42
  46. package/dist/providers.js +23 -504
  47. package/dist/providers.js.map +1 -1
  48. package/dist/validate.cjs +10 -0
  49. package/dist/validate.cjs.map +1 -0
  50. package/dist/validate.d.cts +21 -0
  51. package/dist/validate.d.ts +21 -0
  52. package/dist/validate.js +10 -0
  53. package/dist/validate.js.map +1 -0
  54. package/package.json +33 -1
package/README.md CHANGED
@@ -289,7 +289,7 @@ const issues = validate("llm://openrouter.ai/openai/o3?temp=0.7");
289
289
  | Cohere | `api.cohere.com` | snake_case |
290
290
  | AWS Bedrock | `bedrock-runtime.{region}.amazonaws.com` | camelCase |
291
291
  | OpenRouter | `openrouter.ai` | snake_case |
292
- | Vercel AI | `gateway.ai.vercel.sh` | snake_case |
292
+ | Vercel AI | `gateway.ai.vercel.app` | snake_case |
293
293
 
294
294
  Gateways like OpenRouter and Vercel route to any upstream provider. Bedrock hosts models from multiple families (Anthropic, Meta, Amazon, Mistral, Cohere, AI21) with cross-region inference support. Each provider's parameter names differ — normalization handles the translation automatically.
295
295
 
@@ -297,17 +297,32 @@ Gateways like OpenRouter and Vercel route to any upstream provider. Bedrock host
297
297
 
298
298
  Use these shortcuts in your connection strings — they expand automatically during normalization:
299
299
 
300
- | Shorthand | Canonical |
301
- | ------------------------------------------ | -------------------- |
302
- | `temp` | `temperature` |
303
- | `max`, `max_out`, `maxTokens` | `max_tokens` |
304
- | `topp`, `topP`, `nucleus` | `top_p` |
305
- | `topk`, `topK` | `top_k` |
306
- | `freq`, `freq_penalty` | `frequency_penalty` |
307
- | `pres`, `pres_penalty` | `presence_penalty` |
308
- | `stop_sequences`, `stopSequences` | `stop` |
309
- | `reasoning`, `reasoning_effort` | `effort` |
310
- | `cache_control`, `cacheControl` | `cache` |
300
+ | Shorthand | Canonical |
301
+ | -------------------------------------------------------------------- | -------------------- |
302
+ | `temp` | `temperature` |
303
+ | `max`, `max_out`, `max_output`, `max_output_tokens`, `maxTokens`, `maxOutputTokens`, `max_completion_tokens` | `max_tokens` |
304
+ | `topp`, `topP`, `nucleus` | `top_p` |
305
+ | `topk`, `topK` | `top_k` |
306
+ | `freq`, `freq_penalty`, `frequencyPenalty`, `repetition_penalty` | `frequency_penalty` |
307
+ | `pres`, `pres_penalty`, `presencePenalty` | `presence_penalty` |
308
+ | `stop_sequences`, `stopSequences`, `stop_sequence` | `stop` |
309
+ | `random_seed`, `randomSeed` | `seed` |
310
+ | `candidateCount`, `candidate_count`, `num_completions` | `n` |
311
+ | `reasoning`, `reasoning_effort` | `effort` |
312
+ | `cache_control`, `cacheControl`, `cachePoint`, `cache_point` | `cache` |
313
+
314
+ ## Sub-path Imports
315
+
316
+ For smaller bundles, import only what you need:
317
+
318
+ ```ts
319
+ import { parse, build } from "llm-strings/parse";
320
+ import { normalize } from "llm-strings/normalize";
321
+ import { validate } from "llm-strings/validate";
322
+ import { detectProvider, ALIASES, PROVIDER_PARAMS, PARAM_SPECS } from "llm-strings/providers";
323
+ ```
324
+
325
+ All sub-paths ship ESM + CJS with full type declarations.
311
326
 
312
327
  ## API Reference
313
328
 
@@ -355,6 +370,38 @@ Identifies the provider from a hostname string.
355
370
 
356
371
  Identifies the model family (anthropic, meta, amazon, mistral, cohere, ai21) from a Bedrock model ID. Handles cross-region (`us.`, `eu.`, `apac.`) and global inference profiles.
357
372
 
373
+ ### `detectGatewaySubProvider(model): Provider | undefined`
374
+
375
+ Extracts the underlying provider from a gateway model string (e.g. `"anthropic/claude-sonnet-4-5"` → `"anthropic"`). Returns `undefined` for unknown prefixes or models without a `/`.
376
+
377
+ ### `isReasoningModel(model): boolean`
378
+
379
+ Returns `true` for OpenAI reasoning models (o1, o3, o4 families). Handles gateway prefixes like `"openai/o3"`.
380
+
381
+ ### `isGatewayProvider(provider): boolean`
382
+
383
+ Returns `true` for gateway providers (`openrouter`, `vercel`) that proxy to other providers.
384
+
385
+ ### `canHostOpenAIModels(provider): boolean`
386
+
387
+ Returns `true` for providers that can route to OpenAI models and need reasoning-model checks (`openai`, `openrouter`, `vercel`).
388
+
389
+ ### `bedrockSupportsCaching(model): boolean`
390
+
391
+ Returns `true` if the Bedrock model supports prompt caching (Claude and Nova models only).
392
+
393
+ ### Constants
394
+
395
+ | Export | Description |
396
+ | --- | --- |
397
+ | `ALIASES` | Shorthand → canonical param name mapping |
398
+ | `PROVIDER_PARAMS` | Canonical → provider-specific param names, per provider |
399
+ | `PARAM_SPECS` | Validation rules (type, min/max, enum) per provider, keyed by provider-specific param name |
400
+ | `REASONING_MODEL_UNSUPPORTED` | Set of canonical params unsupported by reasoning models |
401
+ | `PROVIDER_META` | Array of provider metadata (id, name, host, brand color) for UI integrations |
402
+ | `MODELS` | Suggested model IDs per provider |
403
+ | `CANONICAL_PARAM_SPECS` | Canonical param specs per provider with descriptions — useful for building UIs |
404
+
358
405
  ## TypeScript
359
406
 
360
407
  Full type definitions ship with the package:
@@ -369,9 +416,37 @@ import type {
369
416
  ValidationIssue,
370
417
  Provider,
371
418
  BedrockModelFamily,
419
+ ParamSpec,
420
+ ProviderMeta,
421
+ CanonicalParamSpec,
372
422
  } from "llm-strings";
373
423
  ```
374
424
 
425
+ ## Provider Metadata (for UI integrations)
426
+
427
+ The library exports metadata useful for building UIs — provider names, brand colors, suggested models, and canonical parameter specs:
428
+
429
+ ```ts
430
+ import { PROVIDER_META, MODELS, CANONICAL_PARAM_SPECS } from "llm-strings";
431
+
432
+ // Provider display info
433
+ PROVIDER_META.forEach((p) => console.log(`${p.name}: ${p.host} (${p.color})`));
434
+ // OpenAI: api.openai.com (#10a37f)
435
+ // Anthropic: api.anthropic.com (#e8956a)
436
+ // ...
437
+
438
+ // Suggested models per provider
439
+ MODELS.openai; // → ["gpt-5.2", "gpt-5.2-pro", "gpt-4.1", "gpt-4.1-mini", ...]
440
+ MODELS.anthropic; // → ["claude-opus-4-6", "claude-sonnet-4-6", "claude-sonnet-4-5", ...]
441
+
442
+ // Canonical param specs — useful for building config forms
443
+ CANONICAL_PARAM_SPECS.openai.temperature;
444
+ // → { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" }
445
+
446
+ CANONICAL_PARAM_SPECS.anthropic.effort;
447
+ // → { type: "enum", values: ["low", "medium", "high", "max"], default: "medium", description: "Thinking effort" }
448
+ ```
449
+
375
450
  ## Contributing
376
451
 
377
452
  Contributions are welcome! Please feel free to submit a Pull Request.
@@ -0,0 +1,176 @@
1
+ // src/provider-meta.ts
2
+ var PROVIDER_META = [
3
+ { id: "openai", name: "OpenAI", host: "api.openai.com", color: "#10a37f" },
4
+ { id: "anthropic", name: "Anthropic", host: "api.anthropic.com", color: "#e8956a" },
5
+ { id: "google", name: "Google", host: "generativelanguage.googleapis.com", color: "#4285f4" },
6
+ { id: "mistral", name: "Mistral", host: "api.mistral.ai", color: "#ff7000" },
7
+ { id: "cohere", name: "Cohere", host: "api.cohere.com", color: "#39594d" },
8
+ { id: "bedrock", name: "Bedrock", host: "bedrock-runtime.us-east-1.amazonaws.com", color: "#ff9900" },
9
+ { id: "openrouter", name: "OpenRouter", host: "openrouter.ai", color: "#818cf8" },
10
+ { id: "vercel", name: "Vercel", host: "gateway.ai.vercel.app", color: "#ededed" }
11
+ ];
12
+ var MODELS = {
13
+ openai: [
14
+ "gpt-5.2",
15
+ "gpt-5.2-pro",
16
+ "gpt-4.1",
17
+ "gpt-4.1-mini",
18
+ "gpt-4.1-nano",
19
+ "o3",
20
+ "o3-mini",
21
+ "o4-mini",
22
+ "o1-pro"
23
+ ],
24
+ anthropic: [
25
+ "claude-opus-4-6",
26
+ "claude-sonnet-4-6",
27
+ "claude-sonnet-4-5",
28
+ "claude-haiku-4-5"
29
+ ],
30
+ google: [
31
+ "gemini-3-pro-preview",
32
+ "gemini-3-flash-preview",
33
+ "gemini-2.5-pro",
34
+ "gemini-2.5-flash"
35
+ ],
36
+ mistral: [
37
+ "mistral-large-latest",
38
+ "mistral-medium-latest",
39
+ "mistral-small-latest",
40
+ "codestral-latest",
41
+ "magistral-medium-latest"
42
+ ],
43
+ cohere: [
44
+ "command-a-03-2025",
45
+ "command-r-plus-08-2024",
46
+ "command-r-08-2024",
47
+ "command-r7b-12-2024"
48
+ ],
49
+ bedrock: [
50
+ "anthropic.claude-opus-4-6-v1",
51
+ "anthropic.claude-sonnet-4-6-v1",
52
+ "anthropic.claude-haiku-4-5-v1",
53
+ "amazon.nova-pro-v1",
54
+ "amazon.nova-lite-v1",
55
+ "meta.llama3-70b-instruct-v1:0"
56
+ ],
57
+ openrouter: [
58
+ "openai/gpt-5.2",
59
+ "anthropic/claude-opus-4-6",
60
+ "google/gemini-2.5-pro",
61
+ "mistral/mistral-large-latest"
62
+ ],
63
+ vercel: [
64
+ "openai/gpt-5.2",
65
+ "anthropic/claude-opus-4-6",
66
+ "google/gemini-2.5-pro",
67
+ "google/gemini-3-pro-preview",
68
+ "google/gemini-3-flash-preview",
69
+ "mistral/mistral-large-latest",
70
+ "qwen/qwen2.5-pro"
71
+ ]
72
+ };
73
+ var CANONICAL_PARAM_SPECS = {
74
+ openai: {
75
+ temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
76
+ max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
77
+ top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
78
+ frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
79
+ presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
80
+ stop: { type: "string", default: "", description: "Stop sequences" },
81
+ n: { type: "number", min: 1, default: 1, description: "Completions count" },
82
+ seed: { type: "number", default: "", description: "Random seed" },
83
+ stream: { type: "boolean", default: false, description: "Stream response" },
84
+ effort: { type: "enum", values: ["none", "minimal", "low", "medium", "high", "xhigh"], default: "medium", description: "Reasoning effort" }
85
+ },
86
+ anthropic: {
87
+ temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
88
+ max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
89
+ top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
90
+ top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
91
+ stop: { type: "string", default: "", description: "Stop sequences" },
92
+ stream: { type: "boolean", default: false, description: "Stream response" },
93
+ effort: { type: "enum", values: ["low", "medium", "high", "max"], default: "medium", description: "Thinking effort" },
94
+ cache: { type: "enum", values: ["ephemeral"], default: "ephemeral", description: "Cache control" },
95
+ cache_ttl: { type: "enum", values: ["5m", "1h"], default: "5m", description: "Cache TTL" }
96
+ },
97
+ google: {
98
+ temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
99
+ max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
100
+ top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
101
+ top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
102
+ frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
103
+ presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
104
+ stop: { type: "string", default: "", description: "Stop sequences" },
105
+ n: { type: "number", min: 1, default: 1, description: "Candidate count" },
106
+ stream: { type: "boolean", default: false, description: "Stream response" },
107
+ seed: { type: "number", default: "", description: "Random seed" }
108
+ },
109
+ mistral: {
110
+ temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
111
+ max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
112
+ top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
113
+ frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
114
+ presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
115
+ stop: { type: "string", default: "", description: "Stop sequences" },
116
+ n: { type: "number", min: 1, default: 1, description: "Completions count" },
117
+ seed: { type: "number", default: "", description: "Random seed" },
118
+ stream: { type: "boolean", default: false, description: "Stream response" },
119
+ safe_prompt: { type: "boolean", default: false, description: "Enable safe prompt" },
120
+ min_tokens: { type: "number", min: 0, default: 0, description: "Minimum tokens" }
121
+ },
122
+ cohere: {
123
+ temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
124
+ max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
125
+ top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling (p)" },
126
+ top_k: { type: "number", min: 0, max: 500, default: 40, description: "Top-K sampling (k)" },
127
+ frequency_penalty: { type: "number", min: 0, max: 1, default: 0, description: "Penalize frequent tokens" },
128
+ presence_penalty: { type: "number", min: 0, max: 1, default: 0, description: "Penalize repeated topics" },
129
+ stop: { type: "string", default: "", description: "Stop sequences" },
130
+ stream: { type: "boolean", default: false, description: "Stream response" },
131
+ seed: { type: "number", default: "", description: "Random seed" }
132
+ },
133
+ bedrock: {
134
+ temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
135
+ max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
136
+ top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
137
+ top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
138
+ stop: { type: "string", default: "", description: "Stop sequences" },
139
+ stream: { type: "boolean", default: false, description: "Stream response" },
140
+ cache: { type: "enum", values: ["ephemeral"], default: "ephemeral", description: "Cache control" },
141
+ cache_ttl: { type: "enum", values: ["5m", "1h"], default: "5m", description: "Cache TTL" }
142
+ },
143
+ openrouter: {
144
+ temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
145
+ max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
146
+ top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
147
+ top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
148
+ frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
149
+ presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
150
+ stop: { type: "string", default: "", description: "Stop sequences" },
151
+ n: { type: "number", min: 1, default: 1, description: "Completions count" },
152
+ seed: { type: "number", default: "", description: "Random seed" },
153
+ stream: { type: "boolean", default: false, description: "Stream response" },
154
+ effort: { type: "enum", values: ["none", "minimal", "low", "medium", "high", "xhigh"], default: "medium", description: "Reasoning effort" }
155
+ },
156
+ vercel: {
157
+ temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
158
+ max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
159
+ top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
160
+ top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
161
+ frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
162
+ presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
163
+ stop: { type: "string", default: "", description: "Stop sequences" },
164
+ n: { type: "number", min: 1, default: 1, description: "Completions count" },
165
+ seed: { type: "number", default: "", description: "Random seed" },
166
+ stream: { type: "boolean", default: false, description: "Stream response" },
167
+ effort: { type: "enum", values: ["none", "minimal", "low", "medium", "high", "xhigh"], default: "medium", description: "Reasoning effort" }
168
+ }
169
+ };
170
+
171
+ export {
172
+ PROVIDER_META,
173
+ MODELS,
174
+ CANONICAL_PARAM_SPECS
175
+ };
176
+ //# sourceMappingURL=chunk-6P5GSSNW.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/provider-meta.ts"],"sourcesContent":["import type { Provider } from \"./provider-core.js\";\n\n/* ------------------------------------------------------------------ */\n/* UI-consumable metadata for 3rd-party integrations */\n/* ------------------------------------------------------------------ */\n\nexport interface ProviderMeta {\n /** Provider identifier — matches the Provider union type. */\n id: Provider;\n /** Human-readable display name. */\n name: string;\n /** Default / canonical API hostname. */\n host: string;\n /** Brand color as a CSS hex value. */\n color: string;\n}\n\nexport const PROVIDER_META: ProviderMeta[] = [\n { id: \"openai\", name: \"OpenAI\", host: \"api.openai.com\", color: \"#10a37f\" },\n { id: \"anthropic\", name: \"Anthropic\", host: \"api.anthropic.com\", color: \"#e8956a\" },\n { id: \"google\", name: \"Google\", host: \"generativelanguage.googleapis.com\", color: \"#4285f4\" },\n { id: \"mistral\", name: \"Mistral\", host: \"api.mistral.ai\", color: \"#ff7000\" },\n { id: \"cohere\", name: \"Cohere\", host: \"api.cohere.com\", color: \"#39594d\" },\n { id: \"bedrock\", name: \"Bedrock\", host: \"bedrock-runtime.us-east-1.amazonaws.com\", color: \"#ff9900\" },\n { id: \"openrouter\", name: \"OpenRouter\", host: \"openrouter.ai\", color: \"#818cf8\" },\n { id: \"vercel\", name: \"Vercel\", host: \"gateway.ai.vercel.app\", color: \"#ededed\" },\n];\n\n/**\n * Suggested / common model IDs per provider, ordered by recency.\n * Not exhaustive — providers add models frequently.\n */\nexport const MODELS: Record<Provider, string[]> = {\n openai: [\n \"gpt-5.2\", \"gpt-5.2-pro\",\n \"gpt-4.1\", \"gpt-4.1-mini\", \"gpt-4.1-nano\",\n \"o3\", \"o3-mini\", \"o4-mini\", \"o1-pro\",\n ],\n anthropic: [\n \"claude-opus-4-6\", \"claude-sonnet-4-6\",\n \"claude-sonnet-4-5\", \"claude-haiku-4-5\",\n ],\n google: [\n \"gemini-3-pro-preview\", \"gemini-3-flash-preview\",\n \"gemini-2.5-pro\", \"gemini-2.5-flash\",\n ],\n mistral: [\n \"mistral-large-latest\", \"mistral-medium-latest\",\n \"mistral-small-latest\", \"codestral-latest\",\n \"magistral-medium-latest\",\n ],\n cohere: [\n \"command-a-03-2025\",\n \"command-r-plus-08-2024\", \"command-r-08-2024\",\n \"command-r7b-12-2024\",\n ],\n bedrock: [\n \"anthropic.claude-opus-4-6-v1\", \"anthropic.claude-sonnet-4-6-v1\",\n \"anthropic.claude-haiku-4-5-v1\",\n \"amazon.nova-pro-v1\", \"amazon.nova-lite-v1\",\n \"meta.llama3-70b-instruct-v1:0\",\n ],\n openrouter: [\n \"openai/gpt-5.2\", \"anthropic/claude-opus-4-6\",\n \"google/gemini-2.5-pro\", \"mistral/mistral-large-latest\",\n ],\n vercel: [\n \"openai/gpt-5.2\", \"anthropic/claude-opus-4-6\",\n \"google/gemini-2.5-pro\", \"google/gemini-3-pro-preview\",\n \"google/gemini-3-flash-preview\", \"mistral/mistral-large-latest\",\n \"qwen/qwen2.5-pro\",\n ],\n};\n\n/**\n * Canonical parameter spec — keyed by canonical (snake_case) param names\n * with defaults and descriptions for UI consumption.\n */\nexport interface CanonicalParamSpec {\n type: \"number\" | \"string\" | \"boolean\" | \"enum\";\n min?: number;\n max?: number;\n values?: string[];\n default?: string | number | boolean;\n description?: string;\n}\n\nexport const CANONICAL_PARAM_SPECS: Record<Provider, Record<string, CanonicalParamSpec>> = {\n openai: {\n temperature: { type: \"number\", min: 0, max: 2, default: 0.7, description: \"Controls randomness\" },\n max_tokens: { type: \"number\", min: 1, default: 4096, description: \"Maximum output tokens\" },\n top_p: { type: \"number\", min: 0, max: 1, default: 1, description: \"Nucleus sampling\" },\n frequency_penalty: { type: \"number\", min: -2, max: 2, default: 0, description: \"Penalize frequent tokens\" },\n presence_penalty: { type: \"number\", min: -2, max: 2, default: 0, description: \"Penalize repeated topics\" },\n stop: { type: \"string\", default: \"\", description: \"Stop sequences\" },\n n: { type: \"number\", min: 1, default: 1, description: \"Completions count\" },\n seed: { type: \"number\", default: \"\", description: \"Random seed\" },\n stream: { type: \"boolean\", default: false, description: \"Stream response\" },\n effort: { type: \"enum\", values: [\"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\"], default: \"medium\", description: \"Reasoning effort\" },\n },\n anthropic: {\n temperature: { type: \"number\", min: 0, max: 1, default: 0.7, description: \"Controls randomness\" },\n max_tokens: { type: \"number\", min: 1, default: 4096, description: \"Maximum output tokens\" },\n top_p: { type: \"number\", min: 0, max: 1, default: 1, description: \"Nucleus sampling\" },\n top_k: { type: \"number\", min: 0, default: 40, description: \"Top-K sampling\" },\n stop: { type: \"string\", default: \"\", description: \"Stop sequences\" },\n stream: { type: \"boolean\", default: false, description: \"Stream response\" },\n effort: { type: \"enum\", values: [\"low\", \"medium\", \"high\", \"max\"], default: \"medium\", description: \"Thinking effort\" },\n cache: { type: \"enum\", values: [\"ephemeral\"], default: \"ephemeral\", description: \"Cache control\" },\n cache_ttl: { type: \"enum\", values: [\"5m\", \"1h\"], default: \"5m\", description: \"Cache TTL\" },\n },\n google: {\n temperature: { type: \"number\", min: 0, max: 2, default: 0.7, description: \"Controls randomness\" },\n max_tokens: { type: \"number\", min: 1, default: 4096, description: \"Maximum output tokens\" },\n top_p: { type: \"number\", min: 0, max: 1, default: 1, description: \"Nucleus sampling\" },\n top_k: { type: \"number\", min: 0, default: 40, description: \"Top-K sampling\" },\n frequency_penalty: { type: \"number\", min: -2, max: 2, default: 0, description: \"Penalize frequent tokens\" },\n presence_penalty: { type: \"number\", min: -2, max: 2, default: 0, description: \"Penalize repeated topics\" },\n stop: { type: \"string\", default: \"\", description: \"Stop sequences\" },\n n: { type: \"number\", min: 1, default: 1, description: \"Candidate count\" },\n stream: { type: \"boolean\", default: false, description: \"Stream response\" },\n seed: { type: \"number\", default: \"\", description: \"Random seed\" },\n },\n mistral: {\n temperature: { type: \"number\", min: 0, max: 1, default: 0.7, description: \"Controls randomness\" },\n max_tokens: { type: \"number\", min: 1, default: 4096, description: \"Maximum output tokens\" },\n top_p: { type: \"number\", min: 0, max: 1, default: 1, description: \"Nucleus sampling\" },\n frequency_penalty: { type: \"number\", min: -2, max: 2, default: 0, description: \"Penalize frequent tokens\" },\n presence_penalty: { type: \"number\", min: -2, max: 2, default: 0, description: \"Penalize repeated topics\" },\n stop: { type: \"string\", default: \"\", description: \"Stop sequences\" },\n n: { type: \"number\", min: 1, default: 1, description: \"Completions count\" },\n seed: { type: \"number\", default: \"\", description: \"Random seed\" },\n stream: { type: \"boolean\", default: false, description: \"Stream response\" },\n safe_prompt: { type: \"boolean\", default: false, description: \"Enable safe prompt\" },\n min_tokens: { type: \"number\", min: 0, default: 0, description: \"Minimum tokens\" },\n },\n cohere: {\n temperature: { type: \"number\", min: 0, max: 1, default: 0.7, description: \"Controls randomness\" },\n max_tokens: { type: \"number\", min: 1, default: 4096, description: \"Maximum output tokens\" },\n top_p: { type: \"number\", min: 0, max: 1, default: 1, description: \"Nucleus sampling (p)\" },\n top_k: { type: \"number\", min: 0, max: 500, default: 40, description: \"Top-K sampling (k)\" },\n frequency_penalty: { type: \"number\", min: 0, max: 1, default: 0, description: \"Penalize frequent tokens\" },\n presence_penalty: { type: \"number\", min: 0, max: 1, default: 0, description: \"Penalize repeated topics\" },\n stop: { type: \"string\", default: \"\", description: \"Stop sequences\" },\n stream: { type: \"boolean\", default: false, description: \"Stream response\" },\n seed: { type: \"number\", default: \"\", description: \"Random seed\" },\n },\n bedrock: {\n temperature: { type: \"number\", min: 0, max: 1, default: 0.7, description: \"Controls randomness\" },\n max_tokens: { type: \"number\", min: 1, default: 4096, description: \"Maximum output tokens\" },\n top_p: { type: \"number\", min: 0, max: 1, default: 1, description: \"Nucleus sampling\" },\n top_k: { type: \"number\", min: 0, default: 40, description: \"Top-K sampling\" },\n stop: { type: \"string\", default: \"\", description: \"Stop sequences\" },\n stream: { type: \"boolean\", default: false, description: \"Stream response\" },\n cache: { type: \"enum\", values: [\"ephemeral\"], default: \"ephemeral\", description: \"Cache control\" },\n cache_ttl: { type: \"enum\", values: [\"5m\", \"1h\"], default: \"5m\", description: \"Cache TTL\" },\n },\n openrouter: {\n temperature: { type: \"number\", min: 0, max: 2, default: 0.7, description: \"Controls randomness\" },\n max_tokens: { type: \"number\", min: 1, default: 4096, description: \"Maximum output tokens\" },\n top_p: { type: \"number\", min: 0, max: 1, default: 1, description: \"Nucleus sampling\" },\n top_k: { type: \"number\", min: 0, default: 40, description: \"Top-K sampling\" },\n frequency_penalty: { type: \"number\", min: -2, max: 2, default: 0, description: \"Penalize frequent tokens\" },\n presence_penalty: { type: \"number\", min: -2, max: 2, default: 0, description: \"Penalize repeated topics\" },\n stop: { type: \"string\", default: \"\", description: \"Stop sequences\" },\n n: { type: \"number\", min: 1, default: 1, description: \"Completions count\" },\n seed: { type: \"number\", default: \"\", description: \"Random seed\" },\n stream: { type: \"boolean\", default: false, description: \"Stream response\" },\n effort: { type: \"enum\", values: [\"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\"], default: \"medium\", description: \"Reasoning effort\" },\n },\n vercel: {\n temperature: { type: \"number\", min: 0, max: 2, default: 0.7, description: \"Controls randomness\" },\n max_tokens: { type: \"number\", min: 1, default: 4096, description: \"Maximum output tokens\" },\n top_p: { type: \"number\", min: 0, max: 1, default: 1, description: \"Nucleus sampling\" },\n top_k: { type: \"number\", min: 0, default: 40, description: \"Top-K sampling\" },\n frequency_penalty: { type: \"number\", min: -2, max: 2, default: 0, description: \"Penalize frequent tokens\" },\n presence_penalty: { type: \"number\", min: -2, max: 2, default: 0, description: \"Penalize repeated topics\" },\n stop: { type: \"string\", default: \"\", description: \"Stop sequences\" },\n n: { type: \"number\", min: 1, default: 1, description: \"Completions count\" },\n seed: { type: \"number\", default: \"\", description: \"Random seed\" },\n stream: { type: \"boolean\", default: false, description: \"Stream response\" },\n effort: { type: \"enum\", values: [\"none\", \"minimal\", \"low\", \"medium\", \"high\", \"xhigh\"], default: \"medium\", description: \"Reasoning effort\" },\n },\n};\n"],"mappings":";AAiBO,IAAM,gBAAgC;AAAA,EAC3C,EAAE,IAAI,UAAc,MAAM,UAAc,MAAM,kBAA2C,OAAO,UAAU;AAAA,EAC1G,EAAE,IAAI,aAAc,MAAM,aAAe,MAAM,qBAA2C,OAAO,UAAU;AAAA,EAC3G,EAAE,IAAI,UAAc,MAAM,UAAe,MAAM,qCAA4C,OAAO,UAAU;AAAA,EAC5G,EAAE,IAAI,WAAc,MAAM,WAAe,MAAM,kBAA2C,OAAO,UAAU;AAAA,EAC3G,EAAE,IAAI,UAAc,MAAM,UAAe,MAAM,kBAA2C,OAAO,UAAU;AAAA,EAC3G,EAAE,IAAI,WAAc,MAAM,WAAe,MAAM,2CAA2C,OAAO,UAAU;AAAA,EAC3G,EAAE,IAAI,cAAc,MAAM,cAAe,MAAM,iBAA2C,OAAO,UAAU;AAAA,EAC3G,EAAE,IAAI,UAAc,MAAM,UAAe,MAAM,yBAA2C,OAAO,UAAU;AAC7G;AAMO,IAAM,SAAqC;AAAA,EAChD,QAAQ;AAAA,IACN;AAAA,IAAW;AAAA,IACX;AAAA,IAAW;AAAA,IAAgB;AAAA,IAC3B;AAAA,IAAM;AAAA,IAAW;AAAA,IAAW;AAAA,EAC9B;AAAA,EACA,WAAW;AAAA,IACT;AAAA,IAAmB;AAAA,IACnB;AAAA,IAAqB;AAAA,EACvB;AAAA,EACA,QAAQ;AAAA,IACN;AAAA,IAAwB;AAAA,IACxB;AAAA,IAAkB;AAAA,EACpB;AAAA,EACA,SAAS;AAAA,IACP;AAAA,IAAwB;AAAA,IACxB;AAAA,IAAwB;AAAA,IACxB;AAAA,EACF;AAAA,EACA,QAAQ;AAAA,IACN;AAAA,IACA;AAAA,IAA0B;AAAA,IAC1B;AAAA,EACF;AAAA,EACA,SAAS;AAAA,IACP;AAAA,IAAgC;AAAA,IAChC;AAAA,IACA;AAAA,IAAsB;AAAA,IACtB;AAAA,EACF;AAAA,EACA,YAAY;AAAA,IACV;AAAA,IAAkB;AAAA,IAClB;AAAA,IAAyB;AAAA,EAC3B;AAAA,EACA,QAAQ;AAAA,IACN;AAAA,IAAkB;AAAA,IAClB;AAAA,IAAyB;AAAA,IACzB;AAAA,IAAiC;AAAA,IACjC;AAAA,EACF;AACF;AAeO,IAAM,wBAA8E;AAAA,EACzF,QAAQ;AAAA,IACN,aAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAI,SAAS,KAAM,aAAa,sBAAsB;AAAA,IACzG,YAAmB,EAAE,MAAM,UAAW,KAAK,GAAY,SAAS,MAAM,aAAa,wBAAwB;AAAA,IAC3G,OAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAI,SAAS,GAAM,aAAa,mBAAmB;AAAA,IACtG,mBAAmB,EAAE,MAAM,UAAW,KAAK,IAAI,KAAK,GAAG,SAAS,GAAM,aAAa,2BAA2B;AAAA,IAC9G,kBAAmB,EAAE,MAAM,UAAW,KAAK,IAAI,KAAK,GAAG,SAAS,GAAM,aAAa,2BAA2B;AAAA,IAC9G,MAAmB,EAAE,MAAM,UAA6B,SAAS,IAAM,aAAa,iBAAiB;AAAA,IACrG,GAAmB,EAAE,MAAM,UAAW,KAAK,GAAa,SAAS,GAAM,aAAa,oBAAoB;AAAA,IACxG,MAAmB,EAAE,MAAM,UAA6B,SAAS,IAAM,aAAa,cAAc;AAAA,IAClG,QAAmB,EAAE,MAAM,WAA6B,SAAS,OAAO,aAAa,kBAAkB;AAAA,IACvG,QAAmB,EAAE,MAAM,QAAQ,QAAQ,CAAC,QAAQ,WAAW,OAAO,UAAU,QAAQ,OAAO,GAAG,SAAS,UAAU,aAAa,mBAAmB;AAAA,EACvJ;AAAA,EACA,WAAW;AAAA,IACT,aAAa,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAG,SAAS,KAAM,aAAa,sBAAsB;AAAA,IAClG,YAAa,EAAE,MAAM,UAAW,KAAK,GAAW,SAAS,MAAM,aAAa,wBAAwB;AAAA,IACpG,OAAa,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAG,SAAS,GAAM,aAAa,mBAAmB;AAAA,IAC/F,OAAa,EAAE,MAAM,UAAW,KAAK,GAAW,SAAS,IAAM,aAAa,iBAAiB;AAAA,IAC7F,MAAa,EAAE,MAAM,UAA4B,SAAS,IAAM,aAAa,iBAAiB;AAAA,IAC9F,QAAa,EAAE,MAAM,WAA4B,SAAS,OAAO,aAAa,kBAAkB;AAAA,IAChG,QAAa,EAAE,MAAM,QAAQ,QAAQ,CAAC,OAAO,UAAU,QAAQ,KAAK,GAAG,SAAS,UAAU,aAAa,kBAAkB;AAAA,IACzH,OAAa,EAAE,MAAM,QAAQ,QAAQ,CAAC,WAAW,GAAK,SAAS,aAAa,aAAa,gBAAgB;AAAA,IACzG,WAAa,EAAE,MAAM,QAAQ,QAAQ,CAAC,MAAM,IAAI,GAAM,SAAS,MAAa,aAAa,YAAY;AAAA,EACvG;AAAA,EACA,QAAQ;AAAA,IACN,aAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAI,SAAS,KAAM,aAAa,sBAAsB;AAAA,IACzG,YAAmB,EAAE,MAAM,UAAW,KAAK,GAAY,SAAS,MAAM,aAAa,wBAAwB;AAAA,IAC3G,OAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAI,SAAS,GAAM,aAAa,mBAAmB;AAAA,IACtG,OAAmB,EAAE,MAAM,UAAW,KAAK,GAAY,SAAS,IAAM,aAAa,iBAAiB;AAAA,IACpG,mBAAmB,EAAE,MAAM,UAAW,KAAK,IAAI,KAAK,GAAG,SAAS,GAAM,aAAa,2BAA2B;AAAA,IAC9G,kBAAmB,EAAE,MAAM,UAAW,KAAK,IAAI,KAAK,GAAG,SAAS,GAAM,aAAa,2BAA2B;AAAA,IAC9G,MAAmB,EAAE,MAAM,UAA6B,SAAS,IAAM,aAAa,iBAAiB;AAAA,IACrG,GAAmB,EAAE,MAAM,UAAW,KAAK,GAAa,SAAS,GAAM,aAAa,kBAAkB;AAAA,IACtG,QAAmB,EAAE,MAAM,WAA6B,SAAS,OAAO,aAAa,kBAAkB;AAAA,IACvG,MAAmB,EAAE,MAAM,UAA6B,SAAS,IAAM,aAAa,cAAc;AAAA,EACpG;AAAA,EACA,SAAS;AAAA,IACP,aAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAI,SAAS,KAAM,aAAa,sBAAsB;AAAA,IACzG,YAAmB,EAAE,MAAM,UAAW,KAAK,GAAY,SAAS,MAAM,aAAa,wBAAwB;AAAA,IAC3G,OAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAI,SAAS,GAAM,aAAa,mBAAmB;AAAA,IACtG,mBAAmB,EAAE,MAAM,UAAW,KAAK,IAAI,KAAK,GAAG,SAAS,GAAM,aAAa,2BAA2B;AAAA,IAC9G,kBAAmB,EAAE,MAAM,UAAW,KAAK,IAAI,KAAK,GAAG,SAAS,GAAM,aAAa,2BAA2B;AAAA,IAC9G,MAAmB,EAAE,MAAM,UAA6B,SAAS,IAAM,aAAa,iBAAiB;AAAA,IACrG,GAAmB,EAAE,MAAM,UAAW,KAAK,GAAa,SAAS,GAAM,aAAa,oBAAoB;AAAA,IACxG,MAAmB,EAAE,MAAM,UAA6B,SAAS,IAAM,aAAa,cAAc;AAAA,IAClG,QAAmB,EAAE,MAAM,WAA6B,SAAS,OAAO,aAAa,kBAAkB;AAAA,IACvG,aAAmB,EAAE,MAAM,WAA6B,SAAS,OAAO,aAAa,qBAAqB;AAAA,IAC1G,YAAmB,EAAE,MAAM,UAAW,KAAK,GAAY,SAAS,GAAM,aAAa,iBAAiB;AAAA,EACtG;AAAA,EACA,QAAQ;AAAA,IACN,aAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAS,SAAS,KAAM,aAAa,sBAAsB;AAAA,IAC9G,YAAmB,EAAE,MAAM,UAAW,KAAK,GAAiB,SAAS,MAAM,aAAa,wBAAwB;AAAA,IAChH,OAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAS,SAAS,GAAM,aAAa,uBAAuB;AAAA,IAC/G,OAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,KAAS,SAAS,IAAM,aAAa,qBAAqB;AAAA,IAC7G,mBAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAS,SAAS,GAAM,aAAa,2BAA2B;AAAA,IACnH,kBAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAS,SAAS,GAAM,aAAa,2BAA2B;AAAA,IACnH,MAAmB,EAAE,MAAM,UAAkC,SAAS,IAAM,aAAa,iBAAiB;AAAA,IAC1G,QAAmB,EAAE,MAAM,WAAkC,SAAS,OAAO,aAAa,kBAAkB;AAAA,IAC5G,MAAmB,EAAE,MAAM,UAAkC,SAAS,IAAM,aAAa,cAAc;AAAA,EACzG;AAAA,EACA,SAAS;AAAA,IACP,aAAa,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAG,SAAS,KAAM,aAAa,sBAAsB;AAAA,IAClG,YAAa,EAAE,MAAM,UAAW,KAAK,GAAW,SAAS,MAAM,aAAa,wBAAwB;AAAA,IACpG,OAAa,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAG,SAAS,GAAM,aAAa,mBAAmB;AAAA,IAC/F,OAAa,EAAE,MAAM,UAAW,KAAK,GAAW,SAAS,IAAM,aAAa,iBAAiB;AAAA,IAC7F,MAAa,EAAE,MAAM,UAA4B,SAAS,IAAM,aAAa,iBAAiB;AAAA,IAC9F,QAAa,EAAE,MAAM,WAA4B,SAAS,OAAO,aAAa,kBAAkB;AAAA,IAChG,OAAa,EAAE,MAAM,QAAQ,QAAQ,CAAC,WAAW,GAAK,SAAS,aAAa,aAAa,gBAAgB;AAAA,IACzG,WAAa,EAAE,MAAM,QAAQ,QAAQ,CAAC,MAAM,IAAI,GAAM,SAAS,MAAa,aAAa,YAAY;AAAA,EACvG;AAAA,EACA,YAAY;AAAA,IACV,aAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAI,SAAS,KAAM,aAAa,sBAAsB;AAAA,IACzG,YAAmB,EAAE,MAAM,UAAW,KAAK,GAAY,SAAS,MAAM,aAAa,wBAAwB;AAAA,IAC3G,OAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAI,SAAS,GAAM,aAAa,mBAAmB;AAAA,IACtG,OAAmB,EAAE,MAAM,UAAW,KAAK,GAAY,SAAS,IAAM,aAAa,iBAAiB;AAAA,IACpG,mBAAmB,EAAE,MAAM,UAAW,KAAK,IAAI,KAAK,GAAG,SAAS,GAAM,aAAa,2BAA2B;AAAA,IAC9G,kBAAmB,EAAE,MAAM,UAAW,KAAK,IAAI,KAAK,GAAG,SAAS,GAAM,aAAa,2BAA2B;AAAA,IAC9G,MAAmB,EAAE,MAAM,UAA6B,SAAS,IAAM,aAAa,iBAAiB;AAAA,IACrG,GAAmB,EAAE,MAAM,UAAW,KAAK,GAAa,SAAS,GAAM,aAAa,oBAAoB;AAAA,IACxG,MAAmB,EAAE,MAAM,UAA6B,SAAS,IAAM,aAAa,cAAc;AAAA,IAClG,QAAmB,EAAE,MAAM,WAA6B,SAAS,OAAO,aAAa,kBAAkB;AAAA,IACvG,QAAmB,EAAE,MAAM,QAAQ,QAAQ,CAAC,QAAQ,WAAW,OAAO,UAAU,QAAQ,OAAO,GAAG,SAAS,UAAU,aAAa,mBAAmB;AAAA,EACvJ;AAAA,EACA,QAAQ;AAAA,IACN,aAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAI,SAAS,KAAM,aAAa,sBAAsB;AAAA,IACzG,YAAmB,EAAE,MAAM,UAAW,KAAK,GAAY,SAAS,MAAM,aAAa,wBAAwB;AAAA,IAC3G,OAAmB,EAAE,MAAM,UAAW,KAAK,GAAG,KAAK,GAAI,SAAS,GAAM,aAAa,mBAAmB;AAAA,IACtG,OAAmB,EAAE,MAAM,UAAW,KAAK,GAAY,SAAS,IAAM,aAAa,iBAAiB;AAAA,IACpG,mBAAmB,EAAE,MAAM,UAAW,KAAK,IAAI,KAAK,GAAG,SAAS,GAAM,aAAa,2BAA2B;AAAA,IAC9G,kBAAmB,EAAE,MAAM,UAAW,KAAK,IAAI,KAAK,GAAG,SAAS,GAAM,aAAa,2BAA2B;AAAA,IAC9G,MAAmB,EAAE,MAAM,UAA6B,SAAS,IAAM,aAAa,iBAAiB;AAAA,IACrG,GAAmB,EAAE,MAAM,UAAW,KAAK,GAAa,SAAS,GAAM,aAAa,oBAAoB;AAAA,IACxG,MAAmB,EAAE,MAAM,UAA6B,SAAS,IAAM,aAAa,cAAc;AAAA,IAClG,QAAmB,EAAE,MAAM,WAA6B,SAAS,OAAO,aAAa,kBAAkB;AAAA,IACvG,QAAmB,EAAE,MAAM,QAAQ,QAAQ,CAAC,QAAQ,WAAW,OAAO,UAAU,QAAQ,OAAO,GAAG,SAAS,UAAU,aAAa,mBAAmB;AAAA,EACvJ;AACF;","names":[]}
@@ -0,0 +1,37 @@
1
+ // src/parse.ts
2
+ function parse(connectionString) {
3
+ const url = new URL(connectionString);
4
+ if (url.protocol !== "llm:") {
5
+ throw new Error(
6
+ `Invalid scheme: expected "llm://", got "${url.protocol}//"`
7
+ );
8
+ }
9
+ const host = url.hostname;
10
+ const model = url.pathname.replace(/^\//, "");
11
+ const label = url.username || void 0;
12
+ const apiKey = url.password || void 0;
13
+ const params = {};
14
+ for (const [key, value] of url.searchParams) {
15
+ params[key] = value;
16
+ }
17
+ return {
18
+ raw: connectionString,
19
+ host,
20
+ model,
21
+ label,
22
+ apiKey,
23
+ params
24
+ };
25
+ }
26
+ function build(config) {
27
+ const auth = config.label || config.apiKey ? `${config.label ?? ""}${config.apiKey ? `:${config.apiKey}` : ""}@` : "";
28
+ const query = new URLSearchParams(config.params).toString();
29
+ const qs = query ? `?${query}` : "";
30
+ return `llm://${auth}${config.host}/${config.model}${qs}`;
31
+ }
32
+
33
+ export {
34
+ parse,
35
+ build
36
+ };
37
+ //# sourceMappingURL=chunk-FCEV23OT.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/parse.ts"],"sourcesContent":["export interface LlmConnectionConfig {\n /** The original connection string */\n raw: string;\n /** Provider's API base URL (e.g. \"api.openai.com\") */\n host: string;\n /** Model name (e.g. \"gpt-5.2\") */\n model: string;\n /** Optional label or app name */\n label?: string;\n /** Optional API key or password */\n apiKey?: string;\n /** Additional config parameters (temp, max_tokens, etc.) */\n params: Record<string, string>;\n}\n\n/**\n * Parse an LLM connection string into its component parts.\n *\n * Format: `llm://[label[:apiKey]@]host/model[?key=value&...]`\n *\n * @example\n * ```ts\n * parse(\"llm://api.openai.com/gpt-5.2?temp=0.7&max_tokens=1500\")\n * parse(\"llm://app-name:sk-proj-123456@api.openai.com/gpt-5.2?temp=0.7\")\n * ```\n */\nexport function parse(connectionString: string): LlmConnectionConfig {\n const url = new URL(connectionString);\n\n if (url.protocol !== \"llm:\") {\n throw new Error(\n `Invalid scheme: expected \"llm://\", got \"${url.protocol}//\"`,\n );\n }\n\n const host = url.hostname;\n const model = url.pathname.replace(/^\\//, \"\");\n const label = url.username || undefined;\n const apiKey = url.password || undefined;\n\n const params: Record<string, string> = {};\n for (const [key, value] of url.searchParams) {\n params[key] = value;\n }\n\n return {\n raw: connectionString,\n host,\n model,\n label,\n apiKey,\n params,\n };\n}\n\n/**\n * Build an LLM connection string from a config object.\n */\nexport function build(config: Omit<LlmConnectionConfig, \"raw\">): string {\n const auth =\n config.label || config.apiKey\n ? `${config.label ?? \"\"}${config.apiKey ? `:${config.apiKey}` : \"\"}@`\n : \"\";\n\n const query = new URLSearchParams(config.params).toString();\n const qs = query ? `?${query}` : \"\";\n\n return `llm://${auth}${config.host}/${config.model}${qs}`;\n}\n"],"mappings":";AA0BO,SAAS,MAAM,kBAA+C;AACnE,QAAM,MAAM,IAAI,IAAI,gBAAgB;AAEpC,MAAI,IAAI,aAAa,QAAQ;AAC3B,UAAM,IAAI;AAAA,MACR,2CAA2C,IAAI,QAAQ;AAAA,IACzD;AAAA,EACF;AAEA,QAAM,OAAO,IAAI;AACjB,QAAM,QAAQ,IAAI,SAAS,QAAQ,OAAO,EAAE;AAC5C,QAAM,QAAQ,IAAI,YAAY;AAC9B,QAAM,SAAS,IAAI,YAAY;AAE/B,QAAM,SAAiC,CAAC;AACxC,aAAW,CAAC,KAAK,KAAK,KAAK,IAAI,cAAc;AAC3C,WAAO,GAAG,IAAI;AAAA,EAChB;AAEA,SAAO;AAAA,IACL,KAAK;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAKO,SAAS,MAAM,QAAkD;AACtE,QAAM,OACJ,OAAO,SAAS,OAAO,SACnB,GAAG,OAAO,SAAS,EAAE,GAAG,OAAO,SAAS,IAAI,OAAO,MAAM,KAAK,EAAE,MAChE;AAEN,QAAM,QAAQ,IAAI,gBAAgB,OAAO,MAAM,EAAE,SAAS;AAC1D,QAAM,KAAK,QAAQ,IAAI,KAAK,KAAK;AAEjC,SAAO,SAAS,IAAI,GAAG,OAAO,IAAI,IAAI,OAAO,KAAK,GAAG,EAAE;AACzD;","names":[]}
@@ -0,0 +1,116 @@
1
+ "use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _nullishCoalesce(lhs, rhsFn) { if (lhs != null) { return lhs; } else { return rhsFn(); } } function _optionalChain(ops) { let lastAccessLHS = undefined; let value = ops[0]; let i = 1; while (i < ops.length) { const op = ops[i]; const fn = ops[i + 1]; i += 2; if ((op === 'optionalAccess' || op === 'optionalCall') && value == null) { return undefined; } if (op === 'access' || op === 'optionalAccess') { lastAccessLHS = value; value = fn(value); } else if (op === 'call' || op === 'optionalCall') { value = fn((...args) => value.call(lastAccessLHS, ...args)); lastAccessLHS = undefined; } } return value; }
2
+
3
+
4
+
5
+
6
+
7
+
8
+
9
+
10
+
11
+
12
+
13
+ var _chunkNSCBY4VDcjs = require('./chunk-NSCBY4VD.cjs');
14
+
15
+ // src/normalize.ts
16
+ function normalize(config, options = {}) {
17
+ const provider = _chunkNSCBY4VDcjs.detectProvider.call(void 0, config.host);
18
+ const subProvider = provider && _chunkNSCBY4VDcjs.isGatewayProvider.call(void 0, provider) ? _chunkNSCBY4VDcjs.detectGatewaySubProvider.call(void 0, config.model) : void 0;
19
+ const changes = [];
20
+ const params = {};
21
+ for (const [rawKey, value] of Object.entries(config.params)) {
22
+ let key = rawKey;
23
+ if (_chunkNSCBY4VDcjs.ALIASES[key]) {
24
+ const canonical = _chunkNSCBY4VDcjs.ALIASES[key];
25
+ if (options.verbose) {
26
+ changes.push({
27
+ from: key,
28
+ to: canonical,
29
+ value,
30
+ reason: `alias: "${key}" \u2192 "${canonical}"`
31
+ });
32
+ }
33
+ key = canonical;
34
+ }
35
+ if (key === "cache" && provider) {
36
+ let cacheValue = _chunkNSCBY4VDcjs.CACHE_VALUES[provider];
37
+ if (provider === "bedrock" && !_chunkNSCBY4VDcjs.bedrockSupportsCaching.call(void 0, config.model)) {
38
+ cacheValue = void 0;
39
+ }
40
+ if (!cacheValue) {
41
+ if (options.verbose) {
42
+ changes.push({
43
+ from: "cache",
44
+ to: "(dropped)",
45
+ value,
46
+ reason: `${provider} does not use a cache param for this model (caching is automatic or unsupported)`
47
+ });
48
+ }
49
+ continue;
50
+ }
51
+ const isBool = value === "true" || value === "1" || value === "yes";
52
+ const isDuration = _chunkNSCBY4VDcjs.DURATION_RE.test(value);
53
+ if (isBool || isDuration) {
54
+ const providerKey = _nullishCoalesce(_optionalChain([_chunkNSCBY4VDcjs.PROVIDER_PARAMS, 'access', _ => _[provider], 'optionalAccess', _2 => _2["cache"]]), () => ( "cache"));
55
+ if (options.verbose) {
56
+ changes.push({
57
+ from: "cache",
58
+ to: providerKey,
59
+ value: cacheValue,
60
+ reason: `cache=${value} \u2192 ${providerKey}=${cacheValue} for ${provider}`
61
+ });
62
+ }
63
+ params[providerKey] = cacheValue;
64
+ if (isDuration && _chunkNSCBY4VDcjs.CACHE_TTLS[provider]) {
65
+ if (options.verbose) {
66
+ changes.push({
67
+ from: "cache",
68
+ to: "cache_ttl",
69
+ value,
70
+ reason: `cache=${value} \u2192 cache_ttl=${value} for ${provider}`
71
+ });
72
+ }
73
+ params["cache_ttl"] = value;
74
+ }
75
+ continue;
76
+ }
77
+ }
78
+ if (provider && _chunkNSCBY4VDcjs.PROVIDER_PARAMS[provider]) {
79
+ const providerKey = _chunkNSCBY4VDcjs.PROVIDER_PARAMS[provider][key];
80
+ if (providerKey && providerKey !== key) {
81
+ if (options.verbose) {
82
+ changes.push({
83
+ from: key,
84
+ to: providerKey,
85
+ value,
86
+ reason: `${provider} uses "${providerKey}" instead of "${key}"`
87
+ });
88
+ }
89
+ key = providerKey;
90
+ }
91
+ }
92
+ if (provider && _chunkNSCBY4VDcjs.canHostOpenAIModels.call(void 0, provider) && _chunkNSCBY4VDcjs.isReasoningModel.call(void 0, config.model) && key === "max_tokens") {
93
+ if (options.verbose) {
94
+ changes.push({
95
+ from: "max_tokens",
96
+ to: "max_completion_tokens",
97
+ value,
98
+ reason: "OpenAI reasoning models use max_completion_tokens instead of max_tokens"
99
+ });
100
+ }
101
+ key = "max_completion_tokens";
102
+ }
103
+ params[key] = value;
104
+ }
105
+ return {
106
+ config: { ...config, params },
107
+ provider,
108
+ subProvider,
109
+ changes
110
+ };
111
+ }
112
+
113
+
114
+
115
+ exports.normalize = normalize;
116
+ //# sourceMappingURL=chunk-MGWGNZDJ.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["/Users/dan/code/oss/llm-strings/dist/chunk-MGWGNZDJ.cjs","../src/normalize.ts"],"names":[],"mappings":"AAAA;AACE;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACF,wDAA6B;AAC7B;AACA;ACgCO,SAAS,SAAA,CACd,MAAA,EACA,QAAA,EAA4B,CAAC,CAAA,EACZ;AACjB,EAAA,MAAM,SAAA,EAAW,8CAAA,MAAe,CAAO,IAAI,CAAA;AAC3C,EAAA,MAAM,YAAA,EACJ,SAAA,GAAY,iDAAA,QAA0B,EAAA,EAClC,wDAAA,MAAyB,CAAO,KAAK,EAAA,EACrC,KAAA,CAAA;AACN,EAAA,MAAM,QAAA,EAA6B,CAAC,CAAA;AACpC,EAAA,MAAM,OAAA,EAAiC,CAAC,CAAA;AAExC,EAAA,IAAA,CAAA,MAAW,CAAC,MAAA,EAAQ,KAAK,EAAA,GAAK,MAAA,CAAO,OAAA,CAAQ,MAAA,CAAO,MAAM,CAAA,EAAG;AAC3D,IAAA,IAAI,IAAA,EAAM,MAAA;AAGV,IAAA,GAAA,CAAI,yBAAA,CAAQ,GAAG,CAAA,EAAG;AAChB,MAAA,MAAM,UAAA,EAAY,yBAAA,CAAQ,GAAG,CAAA;AAC7B,MAAA,GAAA,CAAI,OAAA,CAAQ,OAAA,EAAS;AACnB,QAAA,OAAA,CAAQ,IAAA,CAAK;AAAA,UACX,IAAA,EAAM,GAAA;AAAA,UACN,EAAA,EAAI,SAAA;AAAA,UACJ,KAAA;AAAA,UACA,MAAA,EAAQ,CAAA,QAAA,EAAW,GAAG,CAAA,UAAA,EAAQ,SAAS,CAAA,CAAA;AAAA,QACzC,CAAC,CAAA;AAAA,MACH;AACA,MAAA,IAAA,EAAM,SAAA;AAAA,IACR;AAGA,IAAA,GAAA,CAAI,IAAA,IAAQ,QAAA,GAAW,QAAA,EAAU;AAC/B,MAAA,IAAI,WAAA,EAAa,8BAAA,CAAa,QAAQ,CAAA;AAGtC,MAAA,GAAA,CAAI,SAAA,IAAa,UAAA,GAAa,CAAC,sDAAA,MAAuB,CAAO,KAAK,CAAA,EAAG;AACnE,QAAA,WAAA,EAAa,KAAA,CAAA;AAAA,MACf;AAGA,MAAA,GAAA,CAAI,CAAC,UAAA,EAAY;AACf,QAAA,GAAA,CAAI,OAAA,CAAQ,OAAA,EAAS;AACnB,UAAA,OAAA,CAAQ,IAAA,CAAK;AAAA,YACX,IAAA,EAAM,OAAA;AAAA,YACN,EAAA,EAAI,WAAA;AAAA,YACJ,KAAA;AAAA,YACA,MAAA,EAAQ,CAAA,EAAA;AACT,UAAA;AACH,QAAA;AACA,QAAA;AACF,MAAA;AAEe,MAAA;AACT,MAAA;AAEQ,MAAA;AACN,QAAA;AAEM,QAAA;AACF,UAAA;AACA,YAAA;AACF,YAAA;AACG,YAAA;AACC,YAAA;AACT,UAAA;AACH,QAAA;AACO,QAAA;AAGH,QAAA;AACE,UAAA;AACM,YAAA;AACA,cAAA;AACF,cAAA;AACJ,cAAA;AACA,cAAA;AACD,YAAA;AACH,UAAA;AACO,UAAA;AACT,QAAA;AACA,QAAA;AACF,MAAA;AACF,IAAA;AAGgB,IAAA;AACR,MAAA;AACF,MAAA;AACU,QAAA;AACF,UAAA;AACA,YAAA;AACF,YAAA;AACJ,YAAA;AACQ,YAAA;AACT,UAAA;AACH,QAAA;AACM,QAAA;AACR,MAAA;AACF,IAAA;AAKE,IAAA;AAIY,MAAA;AACG,QAAA;AACL,UAAA;AACF,UAAA;AACJ,UAAA;AAEE,UAAA;AACH,QAAA;AACH,MAAA;AACM,MAAA;AACR,IAAA;AAEc,IAAA;AAChB,EAAA;AAEO,EAAA;AACQ,IAAA;AACb,IAAA;AACA,IAAA;AACA,IAAA;AACF,EAAA;AACF;AD9DqB;AACA;AACA;AACA","file":"/Users/dan/code/oss/llm-strings/dist/chunk-MGWGNZDJ.cjs","sourcesContent":[null,"import type { LlmConnectionConfig } from \"./parse.js\";\nimport {\n ALIASES,\n CACHE_TTLS,\n CACHE_VALUES,\n DURATION_RE,\n PROVIDER_PARAMS,\n bedrockSupportsCaching,\n canHostOpenAIModels,\n detectGatewaySubProvider,\n detectProvider,\n isGatewayProvider,\n isReasoningModel,\n type Provider,\n} from \"./provider-core.js\";\n\nexport interface NormalizeChange {\n from: string;\n to: string;\n value: string;\n reason: string;\n}\n\nexport interface NormalizeResult {\n config: LlmConnectionConfig;\n provider: Provider | undefined;\n /** Underlying provider extracted from gateway model prefix (e.g. \"anthropic\" from \"anthropic/claude-sonnet-4-5\"). */\n subProvider: Provider | undefined;\n changes: NormalizeChange[];\n}\n\nexport interface NormalizeOptions {\n /** Include detailed change log in the result. */\n verbose?: boolean;\n}\n\n/**\n * Normalize an LLM connection config's params for its target provider.\n *\n * 1. Expands shorthand aliases (e.g. `temp` → `temperature`)\n * 2. Maps canonical param names to provider-specific names\n * (e.g. `max_tokens` → `maxOutputTokens` for Google)\n * 3. Normalizes special values (e.g. `cache=true` → `cache_control=ephemeral` for Anthropic)\n * 4. For OpenAI reasoning models, remaps `max_tokens` → `max_completion_tokens`\n * and warns about unsupported sampling params\n */\nexport function normalize(\n config: LlmConnectionConfig,\n options: NormalizeOptions = {},\n): NormalizeResult {\n const provider = detectProvider(config.host);\n const subProvider =\n provider && isGatewayProvider(provider)\n ? detectGatewaySubProvider(config.model)\n : undefined;\n const changes: NormalizeChange[] = [];\n const params: Record<string, string> = {};\n\n for (const [rawKey, value] of Object.entries(config.params)) {\n let key = rawKey;\n\n // Step 1: Expand aliases to canonical name\n if (ALIASES[key]) {\n const canonical = ALIASES[key];\n if (options.verbose) {\n changes.push({\n from: key,\n to: canonical,\n value,\n reason: `alias: \"${key}\" → \"${canonical}\"`,\n });\n }\n key = canonical;\n }\n\n // Step 2: Handle special \"cache\" param\n if (key === \"cache\" && provider) {\n let cacheValue = CACHE_VALUES[provider];\n\n // Bedrock supports cache for Anthropic Claude and Amazon Nova models\n if (provider === \"bedrock\" && !bedrockSupportsCaching(config.model)) {\n cacheValue = undefined;\n }\n\n // Provider/model doesn't support cache — drop it\n if (!cacheValue) {\n if (options.verbose) {\n changes.push({\n from: \"cache\",\n to: \"(dropped)\",\n value,\n reason: `${provider} does not use a cache param for this model (caching is automatic or unsupported)`,\n });\n }\n continue;\n }\n\n const isBool = value === \"true\" || value === \"1\" || value === \"yes\";\n const isDuration = DURATION_RE.test(value);\n\n if (isBool || isDuration) {\n const providerKey =\n PROVIDER_PARAMS[provider]?.[\"cache\"] ?? \"cache\";\n if (options.verbose) {\n changes.push({\n from: \"cache\",\n to: providerKey,\n value: cacheValue,\n reason: `cache=${value} → ${providerKey}=${cacheValue} for ${provider}`,\n });\n }\n params[providerKey] = cacheValue;\n\n // Emit cache_ttl when a duration is specified\n if (isDuration && CACHE_TTLS[provider]) {\n if (options.verbose) {\n changes.push({\n from: \"cache\",\n to: \"cache_ttl\",\n value,\n reason: `cache=${value} → cache_ttl=${value} for ${provider}`,\n });\n }\n params[\"cache_ttl\"] = value;\n }\n continue;\n }\n }\n\n // Step 3: Map canonical → provider-specific param name\n if (provider && PROVIDER_PARAMS[provider]) {\n const providerKey = PROVIDER_PARAMS[provider][key];\n if (providerKey && providerKey !== key) {\n if (options.verbose) {\n changes.push({\n from: key,\n to: providerKey,\n value,\n reason: `${provider} uses \"${providerKey}\" instead of \"${key}\"`,\n });\n }\n key = providerKey;\n }\n }\n\n // Step 4: OpenAI reasoning model adjustments (direct or via gateway)\n if (\n provider &&\n canHostOpenAIModels(provider) &&\n isReasoningModel(config.model) &&\n key === \"max_tokens\"\n ) {\n if (options.verbose) {\n changes.push({\n from: \"max_tokens\",\n to: \"max_completion_tokens\",\n value,\n reason:\n \"OpenAI reasoning models use max_completion_tokens instead of max_tokens\",\n });\n }\n key = \"max_completion_tokens\";\n }\n\n params[key] = value;\n }\n\n return {\n config: { ...config, params },\n provider,\n subProvider,\n changes,\n };\n}\n"]}
@@ -0,0 +1,116 @@
1
+ import {
2
+ ALIASES,
3
+ CACHE_TTLS,
4
+ CACHE_VALUES,
5
+ DURATION_RE,
6
+ PROVIDER_PARAMS,
7
+ bedrockSupportsCaching,
8
+ canHostOpenAIModels,
9
+ detectGatewaySubProvider,
10
+ detectProvider,
11
+ isGatewayProvider,
12
+ isReasoningModel
13
+ } from "./chunk-XID353H7.js";
14
+
15
+ // src/normalize.ts
16
+ function normalize(config, options = {}) {
17
+ const provider = detectProvider(config.host);
18
+ const subProvider = provider && isGatewayProvider(provider) ? detectGatewaySubProvider(config.model) : void 0;
19
+ const changes = [];
20
+ const params = {};
21
+ for (const [rawKey, value] of Object.entries(config.params)) {
22
+ let key = rawKey;
23
+ if (ALIASES[key]) {
24
+ const canonical = ALIASES[key];
25
+ if (options.verbose) {
26
+ changes.push({
27
+ from: key,
28
+ to: canonical,
29
+ value,
30
+ reason: `alias: "${key}" \u2192 "${canonical}"`
31
+ });
32
+ }
33
+ key = canonical;
34
+ }
35
+ if (key === "cache" && provider) {
36
+ let cacheValue = CACHE_VALUES[provider];
37
+ if (provider === "bedrock" && !bedrockSupportsCaching(config.model)) {
38
+ cacheValue = void 0;
39
+ }
40
+ if (!cacheValue) {
41
+ if (options.verbose) {
42
+ changes.push({
43
+ from: "cache",
44
+ to: "(dropped)",
45
+ value,
46
+ reason: `${provider} does not use a cache param for this model (caching is automatic or unsupported)`
47
+ });
48
+ }
49
+ continue;
50
+ }
51
+ const isBool = value === "true" || value === "1" || value === "yes";
52
+ const isDuration = DURATION_RE.test(value);
53
+ if (isBool || isDuration) {
54
+ const providerKey = PROVIDER_PARAMS[provider]?.["cache"] ?? "cache";
55
+ if (options.verbose) {
56
+ changes.push({
57
+ from: "cache",
58
+ to: providerKey,
59
+ value: cacheValue,
60
+ reason: `cache=${value} \u2192 ${providerKey}=${cacheValue} for ${provider}`
61
+ });
62
+ }
63
+ params[providerKey] = cacheValue;
64
+ if (isDuration && CACHE_TTLS[provider]) {
65
+ if (options.verbose) {
66
+ changes.push({
67
+ from: "cache",
68
+ to: "cache_ttl",
69
+ value,
70
+ reason: `cache=${value} \u2192 cache_ttl=${value} for ${provider}`
71
+ });
72
+ }
73
+ params["cache_ttl"] = value;
74
+ }
75
+ continue;
76
+ }
77
+ }
78
+ if (provider && PROVIDER_PARAMS[provider]) {
79
+ const providerKey = PROVIDER_PARAMS[provider][key];
80
+ if (providerKey && providerKey !== key) {
81
+ if (options.verbose) {
82
+ changes.push({
83
+ from: key,
84
+ to: providerKey,
85
+ value,
86
+ reason: `${provider} uses "${providerKey}" instead of "${key}"`
87
+ });
88
+ }
89
+ key = providerKey;
90
+ }
91
+ }
92
+ if (provider && canHostOpenAIModels(provider) && isReasoningModel(config.model) && key === "max_tokens") {
93
+ if (options.verbose) {
94
+ changes.push({
95
+ from: "max_tokens",
96
+ to: "max_completion_tokens",
97
+ value,
98
+ reason: "OpenAI reasoning models use max_completion_tokens instead of max_tokens"
99
+ });
100
+ }
101
+ key = "max_completion_tokens";
102
+ }
103
+ params[key] = value;
104
+ }
105
+ return {
106
+ config: { ...config, params },
107
+ provider,
108
+ subProvider,
109
+ changes
110
+ };
111
+ }
112
+
113
+ export {
114
+ normalize
115
+ };
116
+ //# sourceMappingURL=chunk-MPIHGH6L.js.map