llm-strings 1.0.1 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/README.md +95 -14
  2. package/dist/chunk-FCEV23OT.js +37 -0
  3. package/dist/chunk-FCEV23OT.js.map +1 -0
  4. package/dist/chunk-MGWGNZDJ.cjs +116 -0
  5. package/dist/chunk-MGWGNZDJ.cjs.map +1 -0
  6. package/dist/chunk-MPIHGH6L.js +116 -0
  7. package/dist/chunk-MPIHGH6L.js.map +1 -0
  8. package/dist/chunk-N6NVBE43.cjs +37 -0
  9. package/dist/chunk-N6NVBE43.cjs.map +1 -0
  10. package/dist/chunk-NSCBY4VD.cjs +370 -0
  11. package/dist/chunk-NSCBY4VD.cjs.map +1 -0
  12. package/dist/chunk-RSUXM42X.cjs +180 -0
  13. package/dist/chunk-RSUXM42X.cjs.map +1 -0
  14. package/dist/chunk-UYMVUTLV.js +180 -0
  15. package/dist/chunk-UYMVUTLV.js.map +1 -0
  16. package/dist/chunk-XID353H7.js +370 -0
  17. package/dist/chunk-XID353H7.js.map +1 -0
  18. package/dist/index.cjs +12 -806
  19. package/dist/index.cjs.map +1 -1
  20. package/dist/index.d.cts +4 -80
  21. package/dist/index.d.ts +4 -80
  22. package/dist/index.js +11 -764
  23. package/dist/index.js.map +1 -1
  24. package/dist/normalize.cjs +8 -0
  25. package/dist/normalize.cjs.map +1 -0
  26. package/dist/normalize.d.cts +33 -0
  27. package/dist/normalize.d.ts +33 -0
  28. package/dist/normalize.js +8 -0
  29. package/dist/normalize.js.map +1 -0
  30. package/dist/parse.cjs +9 -0
  31. package/dist/parse.cjs.map +1 -0
  32. package/dist/parse.d.cts +32 -0
  33. package/dist/parse.d.ts +32 -0
  34. package/dist/parse.js +9 -0
  35. package/dist/parse.js.map +1 -0
  36. package/dist/provider-core-DinpG40u.d.cts +53 -0
  37. package/dist/provider-core-DinpG40u.d.ts +53 -0
  38. package/dist/providers.cjs +37 -392
  39. package/dist/providers.cjs.map +1 -1
  40. package/dist/providers.d.cts +4 -42
  41. package/dist/providers.d.ts +4 -42
  42. package/dist/providers.js +20 -336
  43. package/dist/providers.js.map +1 -1
  44. package/dist/validate.cjs +10 -0
  45. package/dist/validate.cjs.map +1 -0
  46. package/dist/validate.d.cts +21 -0
  47. package/dist/validate.d.ts +21 -0
  48. package/dist/validate.js +10 -0
  49. package/dist/validate.js.map +1 -0
  50. package/package.json +33 -1
package/README.md CHANGED
@@ -247,7 +247,8 @@ const url = build({
247
247
  ### AWS Bedrock with cross-region inference
248
248
 
249
249
  ```ts
250
- import { parse, normalize, detectBedrockModelFamily } from "llm-strings";
250
+ import { parse, normalize } from "llm-strings";
251
+ import { detectBedrockModelFamily } from "llm-strings/providers";
251
252
 
252
253
  const config = parse(
253
254
  "llm://bedrock-runtime.us-east-1.amazonaws.com/us.anthropic.claude-sonnet-4-5-20250929-v1:0?temp=0.5&max=4096"
@@ -289,7 +290,7 @@ const issues = validate("llm://openrouter.ai/openai/o3?temp=0.7");
289
290
  | Cohere | `api.cohere.com` | snake_case |
290
291
  | AWS Bedrock | `bedrock-runtime.{region}.amazonaws.com` | camelCase |
291
292
  | OpenRouter | `openrouter.ai` | snake_case |
292
- | Vercel AI | `gateway.ai.vercel.sh` | snake_case |
293
+ | Vercel AI | `gateway.ai.vercel.app` | snake_case |
293
294
 
294
295
  Gateways like OpenRouter and Vercel route to any upstream provider. Bedrock hosts models from multiple families (Anthropic, Meta, Amazon, Mistral, Cohere, AI21) with cross-region inference support. Each provider's parameter names differ — normalization handles the translation automatically.
295
296
 
@@ -297,17 +298,32 @@ Gateways like OpenRouter and Vercel route to any upstream provider. Bedrock host
297
298
 
298
299
  Use these shortcuts in your connection strings — they expand automatically during normalization:
299
300
 
300
- | Shorthand | Canonical |
301
- | ------------------------------------------ | -------------------- |
302
- | `temp` | `temperature` |
303
- | `max`, `max_out`, `maxTokens` | `max_tokens` |
304
- | `topp`, `topP`, `nucleus` | `top_p` |
305
- | `topk`, `topK` | `top_k` |
306
- | `freq`, `freq_penalty` | `frequency_penalty` |
307
- | `pres`, `pres_penalty` | `presence_penalty` |
308
- | `stop_sequences`, `stopSequences` | `stop` |
309
- | `reasoning`, `reasoning_effort` | `effort` |
310
- | `cache_control`, `cacheControl` | `cache` |
301
+ | Shorthand | Canonical |
302
+ | -------------------------------------------------------------------- | -------------------- |
303
+ | `temp` | `temperature` |
304
+ | `max`, `max_out`, `max_output`, `max_output_tokens`, `maxTokens`, `maxOutputTokens`, `max_completion_tokens` | `max_tokens` |
305
+ | `topp`, `topP`, `nucleus` | `top_p` |
306
+ | `topk`, `topK` | `top_k` |
307
+ | `freq`, `freq_penalty`, `frequencyPenalty`, `repetition_penalty` | `frequency_penalty` |
308
+ | `pres`, `pres_penalty`, `presencePenalty` | `presence_penalty` |
309
+ | `stop_sequences`, `stopSequences`, `stop_sequence` | `stop` |
310
+ | `random_seed`, `randomSeed` | `seed` |
311
+ | `candidateCount`, `candidate_count`, `num_completions` | `n` |
312
+ | `reasoning`, `reasoning_effort` | `effort` |
313
+ | `cache_control`, `cacheControl`, `cachePoint`, `cache_point` | `cache` |
314
+
315
+ ## Sub-path Imports
316
+
317
+ For smaller bundles, import only what you need:
318
+
319
+ ```ts
320
+ import { parse, build } from "llm-strings/parse";
321
+ import { normalize } from "llm-strings/normalize";
322
+ import { validate } from "llm-strings/validate";
323
+ import { detectProvider, ALIASES, PROVIDER_PARAMS, PARAM_SPECS } from "llm-strings/providers";
324
+ ```
325
+
326
+ All sub-paths ship ESM + CJS with full type declarations.
311
327
 
312
328
  ## API Reference
313
329
 
@@ -355,11 +371,44 @@ Identifies the provider from a hostname string.
355
371
 
356
372
  Identifies the model family (anthropic, meta, amazon, mistral, cohere, ai21) from a Bedrock model ID. Handles cross-region (`us.`, `eu.`, `apac.`) and global inference profiles.
357
373
 
374
+ ### `detectGatewaySubProvider(model): Provider | undefined`
375
+
376
+ Extracts the underlying provider from a gateway model string (e.g. `"anthropic/claude-sonnet-4-5"` → `"anthropic"`). Returns `undefined` for unknown prefixes or models without a `/`.
377
+
378
+ ### `isReasoningModel(model): boolean`
379
+
380
+ Returns `true` for OpenAI reasoning models (o1, o3, o4 families). Handles gateway prefixes like `"openai/o3"`.
381
+
382
+ ### `isGatewayProvider(provider): boolean`
383
+
384
+ Returns `true` for gateway providers (`openrouter`, `vercel`) that proxy to other providers.
385
+
386
+ ### `canHostOpenAIModels(provider): boolean`
387
+
388
+ Returns `true` for providers that can route to OpenAI models and need reasoning-model checks (`openai`, `openrouter`, `vercel`).
389
+
390
+ ### `bedrockSupportsCaching(model): boolean`
391
+
392
+ Returns `true` if the Bedrock model supports prompt caching (Claude and Nova models only).
393
+
394
+ ### Constants
395
+
396
+ | Export | Description |
397
+ | --- | --- |
398
+ | `ALIASES` | Shorthand → canonical param name mapping |
399
+ | `PROVIDER_PARAMS` | Canonical → provider-specific param names, per provider |
400
+ | `PARAM_SPECS` | Validation rules (type, min/max, enum) per provider, keyed by provider-specific param name |
401
+ | `REASONING_MODEL_UNSUPPORTED` | Set of canonical params unsupported by reasoning models |
402
+ | `PROVIDER_META` | Array of provider metadata (id, name, host, brand color) for UI integrations |
403
+ | `MODELS` | Suggested model IDs per provider |
404
+ | `CANONICAL_PARAM_SPECS` | Canonical param specs per provider with descriptions — useful for building UIs |
405
+
358
406
  ## TypeScript
359
407
 
360
408
  Full type definitions ship with the package:
361
409
 
362
410
  ```ts
411
+ // Core types from the main entry
363
412
  import type {
364
413
  LlmConnectionConfig,
365
414
  NormalizeResult,
@@ -367,9 +416,41 @@ import type {
367
416
  NormalizeOptions,
368
417
  ValidateOptions,
369
418
  ValidationIssue,
419
+ } from "llm-strings";
420
+
421
+ // Provider types from the providers sub-path
422
+ import type {
370
423
  Provider,
371
424
  BedrockModelFamily,
372
- } from "llm-strings";
425
+ ParamSpec,
426
+ ProviderMeta,
427
+ CanonicalParamSpec,
428
+ } from "llm-strings/providers";
429
+ ```
430
+
431
+ ## Provider Metadata (for UI integrations)
432
+
433
+ The library exports metadata useful for building UIs — provider names, brand colors, suggested models, and canonical parameter specs:
434
+
435
+ ```ts
436
+ import { PROVIDER_META, MODELS, CANONICAL_PARAM_SPECS } from "llm-strings/providers";
437
+
438
+ // Provider display info
439
+ PROVIDER_META.forEach((p) => console.log(`${p.name}: ${p.host} (${p.color})`));
440
+ // OpenAI: api.openai.com (#10a37f)
441
+ // Anthropic: api.anthropic.com (#e8956a)
442
+ // ...
443
+
444
+ // Suggested models per provider
445
+ MODELS.openai; // → ["gpt-5.2", "gpt-5.2-pro", "gpt-4.1", "gpt-4.1-mini", ...]
446
+ MODELS.anthropic; // → ["claude-opus-4-6", "claude-sonnet-4-6", "claude-sonnet-4-5", ...]
447
+
448
+ // Canonical param specs — useful for building config forms
449
+ CANONICAL_PARAM_SPECS.openai.temperature;
450
+ // → { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" }
451
+
452
+ CANONICAL_PARAM_SPECS.anthropic.effort;
453
+ // → { type: "enum", values: ["low", "medium", "high", "max"], default: "medium", description: "Thinking effort" }
373
454
  ```
374
455
 
375
456
  ## Contributing
@@ -0,0 +1,37 @@
1
+ // src/parse.ts
2
+ function parse(connectionString) {
3
+ const url = new URL(connectionString);
4
+ if (url.protocol !== "llm:") {
5
+ throw new Error(
6
+ `Invalid scheme: expected "llm://", got "${url.protocol}//"`
7
+ );
8
+ }
9
+ const host = url.hostname;
10
+ const model = url.pathname.replace(/^\//, "");
11
+ const label = url.username || void 0;
12
+ const apiKey = url.password || void 0;
13
+ const params = {};
14
+ for (const [key, value] of url.searchParams) {
15
+ params[key] = value;
16
+ }
17
+ return {
18
+ raw: connectionString,
19
+ host,
20
+ model,
21
+ label,
22
+ apiKey,
23
+ params
24
+ };
25
+ }
26
+ function build(config) {
27
+ const auth = config.label || config.apiKey ? `${config.label ?? ""}${config.apiKey ? `:${config.apiKey}` : ""}@` : "";
28
+ const query = new URLSearchParams(config.params).toString();
29
+ const qs = query ? `?${query}` : "";
30
+ return `llm://${auth}${config.host}/${config.model}${qs}`;
31
+ }
32
+
33
+ export {
34
+ parse,
35
+ build
36
+ };
37
+ //# sourceMappingURL=chunk-FCEV23OT.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/parse.ts"],"sourcesContent":["export interface LlmConnectionConfig {\n /** The original connection string */\n raw: string;\n /** Provider's API base URL (e.g. \"api.openai.com\") */\n host: string;\n /** Model name (e.g. \"gpt-5.2\") */\n model: string;\n /** Optional label or app name */\n label?: string;\n /** Optional API key or password */\n apiKey?: string;\n /** Additional config parameters (temp, max_tokens, etc.) */\n params: Record<string, string>;\n}\n\n/**\n * Parse an LLM connection string into its component parts.\n *\n * Format: `llm://[label[:apiKey]@]host/model[?key=value&...]`\n *\n * @example\n * ```ts\n * parse(\"llm://api.openai.com/gpt-5.2?temp=0.7&max_tokens=1500\")\n * parse(\"llm://app-name:sk-proj-123456@api.openai.com/gpt-5.2?temp=0.7\")\n * ```\n */\nexport function parse(connectionString: string): LlmConnectionConfig {\n const url = new URL(connectionString);\n\n if (url.protocol !== \"llm:\") {\n throw new Error(\n `Invalid scheme: expected \"llm://\", got \"${url.protocol}//\"`,\n );\n }\n\n const host = url.hostname;\n const model = url.pathname.replace(/^\\//, \"\");\n const label = url.username || undefined;\n const apiKey = url.password || undefined;\n\n const params: Record<string, string> = {};\n for (const [key, value] of url.searchParams) {\n params[key] = value;\n }\n\n return {\n raw: connectionString,\n host,\n model,\n label,\n apiKey,\n params,\n };\n}\n\n/**\n * Build an LLM connection string from a config object.\n */\nexport function build(config: Omit<LlmConnectionConfig, \"raw\">): string {\n const auth =\n config.label || config.apiKey\n ? `${config.label ?? \"\"}${config.apiKey ? `:${config.apiKey}` : \"\"}@`\n : \"\";\n\n const query = new URLSearchParams(config.params).toString();\n const qs = query ? `?${query}` : \"\";\n\n return `llm://${auth}${config.host}/${config.model}${qs}`;\n}\n"],"mappings":";AA0BO,SAAS,MAAM,kBAA+C;AACnE,QAAM,MAAM,IAAI,IAAI,gBAAgB;AAEpC,MAAI,IAAI,aAAa,QAAQ;AAC3B,UAAM,IAAI;AAAA,MACR,2CAA2C,IAAI,QAAQ;AAAA,IACzD;AAAA,EACF;AAEA,QAAM,OAAO,IAAI;AACjB,QAAM,QAAQ,IAAI,SAAS,QAAQ,OAAO,EAAE;AAC5C,QAAM,QAAQ,IAAI,YAAY;AAC9B,QAAM,SAAS,IAAI,YAAY;AAE/B,QAAM,SAAiC,CAAC;AACxC,aAAW,CAAC,KAAK,KAAK,KAAK,IAAI,cAAc;AAC3C,WAAO,GAAG,IAAI;AAAA,EAChB;AAEA,SAAO;AAAA,IACL,KAAK;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAKO,SAAS,MAAM,QAAkD;AACtE,QAAM,OACJ,OAAO,SAAS,OAAO,SACnB,GAAG,OAAO,SAAS,EAAE,GAAG,OAAO,SAAS,IAAI,OAAO,MAAM,KAAK,EAAE,MAChE;AAEN,QAAM,QAAQ,IAAI,gBAAgB,OAAO,MAAM,EAAE,SAAS;AAC1D,QAAM,KAAK,QAAQ,IAAI,KAAK,KAAK;AAEjC,SAAO,SAAS,IAAI,GAAG,OAAO,IAAI,IAAI,OAAO,KAAK,GAAG,EAAE;AACzD;","names":[]}
@@ -0,0 +1,116 @@
1
+ "use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _nullishCoalesce(lhs, rhsFn) { if (lhs != null) { return lhs; } else { return rhsFn(); } } function _optionalChain(ops) { let lastAccessLHS = undefined; let value = ops[0]; let i = 1; while (i < ops.length) { const op = ops[i]; const fn = ops[i + 1]; i += 2; if ((op === 'optionalAccess' || op === 'optionalCall') && value == null) { return undefined; } if (op === 'access' || op === 'optionalAccess') { lastAccessLHS = value; value = fn(value); } else if (op === 'call' || op === 'optionalCall') { value = fn((...args) => value.call(lastAccessLHS, ...args)); lastAccessLHS = undefined; } } return value; }
2
+
3
+
4
+
5
+
6
+
7
+
8
+
9
+
10
+
11
+
12
+
13
+ var _chunkNSCBY4VDcjs = require('./chunk-NSCBY4VD.cjs');
14
+
15
+ // src/normalize.ts
16
+ function normalize(config, options = {}) {
17
+ const provider = _chunkNSCBY4VDcjs.detectProvider.call(void 0, config.host);
18
+ const subProvider = provider && _chunkNSCBY4VDcjs.isGatewayProvider.call(void 0, provider) ? _chunkNSCBY4VDcjs.detectGatewaySubProvider.call(void 0, config.model) : void 0;
19
+ const changes = [];
20
+ const params = {};
21
+ for (const [rawKey, value] of Object.entries(config.params)) {
22
+ let key = rawKey;
23
+ if (_chunkNSCBY4VDcjs.ALIASES[key]) {
24
+ const canonical = _chunkNSCBY4VDcjs.ALIASES[key];
25
+ if (options.verbose) {
26
+ changes.push({
27
+ from: key,
28
+ to: canonical,
29
+ value,
30
+ reason: `alias: "${key}" \u2192 "${canonical}"`
31
+ });
32
+ }
33
+ key = canonical;
34
+ }
35
+ if (key === "cache" && provider) {
36
+ let cacheValue = _chunkNSCBY4VDcjs.CACHE_VALUES[provider];
37
+ if (provider === "bedrock" && !_chunkNSCBY4VDcjs.bedrockSupportsCaching.call(void 0, config.model)) {
38
+ cacheValue = void 0;
39
+ }
40
+ if (!cacheValue) {
41
+ if (options.verbose) {
42
+ changes.push({
43
+ from: "cache",
44
+ to: "(dropped)",
45
+ value,
46
+ reason: `${provider} does not use a cache param for this model (caching is automatic or unsupported)`
47
+ });
48
+ }
49
+ continue;
50
+ }
51
+ const isBool = value === "true" || value === "1" || value === "yes";
52
+ const isDuration = _chunkNSCBY4VDcjs.DURATION_RE.test(value);
53
+ if (isBool || isDuration) {
54
+ const providerKey = _nullishCoalesce(_optionalChain([_chunkNSCBY4VDcjs.PROVIDER_PARAMS, 'access', _ => _[provider], 'optionalAccess', _2 => _2["cache"]]), () => ( "cache"));
55
+ if (options.verbose) {
56
+ changes.push({
57
+ from: "cache",
58
+ to: providerKey,
59
+ value: cacheValue,
60
+ reason: `cache=${value} \u2192 ${providerKey}=${cacheValue} for ${provider}`
61
+ });
62
+ }
63
+ params[providerKey] = cacheValue;
64
+ if (isDuration && _chunkNSCBY4VDcjs.CACHE_TTLS[provider]) {
65
+ if (options.verbose) {
66
+ changes.push({
67
+ from: "cache",
68
+ to: "cache_ttl",
69
+ value,
70
+ reason: `cache=${value} \u2192 cache_ttl=${value} for ${provider}`
71
+ });
72
+ }
73
+ params["cache_ttl"] = value;
74
+ }
75
+ continue;
76
+ }
77
+ }
78
+ if (provider && _chunkNSCBY4VDcjs.PROVIDER_PARAMS[provider]) {
79
+ const providerKey = _chunkNSCBY4VDcjs.PROVIDER_PARAMS[provider][key];
80
+ if (providerKey && providerKey !== key) {
81
+ if (options.verbose) {
82
+ changes.push({
83
+ from: key,
84
+ to: providerKey,
85
+ value,
86
+ reason: `${provider} uses "${providerKey}" instead of "${key}"`
87
+ });
88
+ }
89
+ key = providerKey;
90
+ }
91
+ }
92
+ if (provider && _chunkNSCBY4VDcjs.canHostOpenAIModels.call(void 0, provider) && _chunkNSCBY4VDcjs.isReasoningModel.call(void 0, config.model) && key === "max_tokens") {
93
+ if (options.verbose) {
94
+ changes.push({
95
+ from: "max_tokens",
96
+ to: "max_completion_tokens",
97
+ value,
98
+ reason: "OpenAI reasoning models use max_completion_tokens instead of max_tokens"
99
+ });
100
+ }
101
+ key = "max_completion_tokens";
102
+ }
103
+ params[key] = value;
104
+ }
105
+ return {
106
+ config: { ...config, params },
107
+ provider,
108
+ subProvider,
109
+ changes
110
+ };
111
+ }
112
+
113
+
114
+
115
+ exports.normalize = normalize;
116
+ //# sourceMappingURL=chunk-MGWGNZDJ.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["/Users/dan/code/oss/llm-strings/dist/chunk-MGWGNZDJ.cjs","../src/normalize.ts"],"names":[],"mappings":"AAAA;AACE;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACF,wDAA6B;AAC7B;AACA;ACgCO,SAAS,SAAA,CACd,MAAA,EACA,QAAA,EAA4B,CAAC,CAAA,EACZ;AACjB,EAAA,MAAM,SAAA,EAAW,8CAAA,MAAe,CAAO,IAAI,CAAA;AAC3C,EAAA,MAAM,YAAA,EACJ,SAAA,GAAY,iDAAA,QAA0B,EAAA,EAClC,wDAAA,MAAyB,CAAO,KAAK,EAAA,EACrC,KAAA,CAAA;AACN,EAAA,MAAM,QAAA,EAA6B,CAAC,CAAA;AACpC,EAAA,MAAM,OAAA,EAAiC,CAAC,CAAA;AAExC,EAAA,IAAA,CAAA,MAAW,CAAC,MAAA,EAAQ,KAAK,EAAA,GAAK,MAAA,CAAO,OAAA,CAAQ,MAAA,CAAO,MAAM,CAAA,EAAG;AAC3D,IAAA,IAAI,IAAA,EAAM,MAAA;AAGV,IAAA,GAAA,CAAI,yBAAA,CAAQ,GAAG,CAAA,EAAG;AAChB,MAAA,MAAM,UAAA,EAAY,yBAAA,CAAQ,GAAG,CAAA;AAC7B,MAAA,GAAA,CAAI,OAAA,CAAQ,OAAA,EAAS;AACnB,QAAA,OAAA,CAAQ,IAAA,CAAK;AAAA,UACX,IAAA,EAAM,GAAA;AAAA,UACN,EAAA,EAAI,SAAA;AAAA,UACJ,KAAA;AAAA,UACA,MAAA,EAAQ,CAAA,QAAA,EAAW,GAAG,CAAA,UAAA,EAAQ,SAAS,CAAA,CAAA;AAAA,QACzC,CAAC,CAAA;AAAA,MACH;AACA,MAAA,IAAA,EAAM,SAAA;AAAA,IACR;AAGA,IAAA,GAAA,CAAI,IAAA,IAAQ,QAAA,GAAW,QAAA,EAAU;AAC/B,MAAA,IAAI,WAAA,EAAa,8BAAA,CAAa,QAAQ,CAAA;AAGtC,MAAA,GAAA,CAAI,SAAA,IAAa,UAAA,GAAa,CAAC,sDAAA,MAAuB,CAAO,KAAK,CAAA,EAAG;AACnE,QAAA,WAAA,EAAa,KAAA,CAAA;AAAA,MACf;AAGA,MAAA,GAAA,CAAI,CAAC,UAAA,EAAY;AACf,QAAA,GAAA,CAAI,OAAA,CAAQ,OAAA,EAAS;AACnB,UAAA,OAAA,CAAQ,IAAA,CAAK;AAAA,YACX,IAAA,EAAM,OAAA;AAAA,YACN,EAAA,EAAI,WAAA;AAAA,YACJ,KAAA;AAAA,YACA,MAAA,EAAQ,CAAA,EAAA;AACT,UAAA;AACH,QAAA;AACA,QAAA;AACF,MAAA;AAEe,MAAA;AACT,MAAA;AAEQ,MAAA;AACN,QAAA;AAEM,QAAA;AACF,UAAA;AACA,YAAA;AACF,YAAA;AACG,YAAA;AACC,YAAA;AACT,UAAA;AACH,QAAA;AACO,QAAA;AAGH,QAAA;AACE,UAAA;AACM,YAAA;AACA,cAAA;AACF,cAAA;AACJ,cAAA;AACA,cAAA;AACD,YAAA;AACH,UAAA;AACO,UAAA;AACT,QAAA;AACA,QAAA;AACF,MAAA;AACF,IAAA;AAGgB,IAAA;AACR,MAAA;AACF,MAAA;AACU,QAAA;AACF,UAAA;AACA,YAAA;AACF,YAAA;AACJ,YAAA;AACQ,YAAA;AACT,UAAA;AACH,QAAA;AACM,QAAA;AACR,MAAA;AACF,IAAA;AAKE,IAAA;AAIY,MAAA;AACG,QAAA;AACL,UAAA;AACF,UAAA;AACJ,UAAA;AAEE,UAAA;AACH,QAAA;AACH,MAAA;AACM,MAAA;AACR,IAAA;AAEc,IAAA;AAChB,EAAA;AAEO,EAAA;AACQ,IAAA;AACb,IAAA;AACA,IAAA;AACA,IAAA;AACF,EAAA;AACF;AD9DqB;AACA;AACA;AACA","file":"/Users/dan/code/oss/llm-strings/dist/chunk-MGWGNZDJ.cjs","sourcesContent":[null,"import type { LlmConnectionConfig } from \"./parse.js\";\nimport {\n ALIASES,\n CACHE_TTLS,\n CACHE_VALUES,\n DURATION_RE,\n PROVIDER_PARAMS,\n bedrockSupportsCaching,\n canHostOpenAIModels,\n detectGatewaySubProvider,\n detectProvider,\n isGatewayProvider,\n isReasoningModel,\n type Provider,\n} from \"./provider-core.js\";\n\nexport interface NormalizeChange {\n from: string;\n to: string;\n value: string;\n reason: string;\n}\n\nexport interface NormalizeResult {\n config: LlmConnectionConfig;\n provider: Provider | undefined;\n /** Underlying provider extracted from gateway model prefix (e.g. \"anthropic\" from \"anthropic/claude-sonnet-4-5\"). */\n subProvider: Provider | undefined;\n changes: NormalizeChange[];\n}\n\nexport interface NormalizeOptions {\n /** Include detailed change log in the result. */\n verbose?: boolean;\n}\n\n/**\n * Normalize an LLM connection config's params for its target provider.\n *\n * 1. Expands shorthand aliases (e.g. `temp` → `temperature`)\n * 2. Maps canonical param names to provider-specific names\n * (e.g. `max_tokens` → `maxOutputTokens` for Google)\n * 3. Normalizes special values (e.g. `cache=true` → `cache_control=ephemeral` for Anthropic)\n * 4. For OpenAI reasoning models, remaps `max_tokens` → `max_completion_tokens`\n * and warns about unsupported sampling params\n */\nexport function normalize(\n config: LlmConnectionConfig,\n options: NormalizeOptions = {},\n): NormalizeResult {\n const provider = detectProvider(config.host);\n const subProvider =\n provider && isGatewayProvider(provider)\n ? detectGatewaySubProvider(config.model)\n : undefined;\n const changes: NormalizeChange[] = [];\n const params: Record<string, string> = {};\n\n for (const [rawKey, value] of Object.entries(config.params)) {\n let key = rawKey;\n\n // Step 1: Expand aliases to canonical name\n if (ALIASES[key]) {\n const canonical = ALIASES[key];\n if (options.verbose) {\n changes.push({\n from: key,\n to: canonical,\n value,\n reason: `alias: \"${key}\" → \"${canonical}\"`,\n });\n }\n key = canonical;\n }\n\n // Step 2: Handle special \"cache\" param\n if (key === \"cache\" && provider) {\n let cacheValue = CACHE_VALUES[provider];\n\n // Bedrock supports cache for Anthropic Claude and Amazon Nova models\n if (provider === \"bedrock\" && !bedrockSupportsCaching(config.model)) {\n cacheValue = undefined;\n }\n\n // Provider/model doesn't support cache — drop it\n if (!cacheValue) {\n if (options.verbose) {\n changes.push({\n from: \"cache\",\n to: \"(dropped)\",\n value,\n reason: `${provider} does not use a cache param for this model (caching is automatic or unsupported)`,\n });\n }\n continue;\n }\n\n const isBool = value === \"true\" || value === \"1\" || value === \"yes\";\n const isDuration = DURATION_RE.test(value);\n\n if (isBool || isDuration) {\n const providerKey =\n PROVIDER_PARAMS[provider]?.[\"cache\"] ?? \"cache\";\n if (options.verbose) {\n changes.push({\n from: \"cache\",\n to: providerKey,\n value: cacheValue,\n reason: `cache=${value} → ${providerKey}=${cacheValue} for ${provider}`,\n });\n }\n params[providerKey] = cacheValue;\n\n // Emit cache_ttl when a duration is specified\n if (isDuration && CACHE_TTLS[provider]) {\n if (options.verbose) {\n changes.push({\n from: \"cache\",\n to: \"cache_ttl\",\n value,\n reason: `cache=${value} → cache_ttl=${value} for ${provider}`,\n });\n }\n params[\"cache_ttl\"] = value;\n }\n continue;\n }\n }\n\n // Step 3: Map canonical → provider-specific param name\n if (provider && PROVIDER_PARAMS[provider]) {\n const providerKey = PROVIDER_PARAMS[provider][key];\n if (providerKey && providerKey !== key) {\n if (options.verbose) {\n changes.push({\n from: key,\n to: providerKey,\n value,\n reason: `${provider} uses \"${providerKey}\" instead of \"${key}\"`,\n });\n }\n key = providerKey;\n }\n }\n\n // Step 4: OpenAI reasoning model adjustments (direct or via gateway)\n if (\n provider &&\n canHostOpenAIModels(provider) &&\n isReasoningModel(config.model) &&\n key === \"max_tokens\"\n ) {\n if (options.verbose) {\n changes.push({\n from: \"max_tokens\",\n to: \"max_completion_tokens\",\n value,\n reason:\n \"OpenAI reasoning models use max_completion_tokens instead of max_tokens\",\n });\n }\n key = \"max_completion_tokens\";\n }\n\n params[key] = value;\n }\n\n return {\n config: { ...config, params },\n provider,\n subProvider,\n changes,\n };\n}\n"]}
@@ -0,0 +1,116 @@
1
+ import {
2
+ ALIASES,
3
+ CACHE_TTLS,
4
+ CACHE_VALUES,
5
+ DURATION_RE,
6
+ PROVIDER_PARAMS,
7
+ bedrockSupportsCaching,
8
+ canHostOpenAIModels,
9
+ detectGatewaySubProvider,
10
+ detectProvider,
11
+ isGatewayProvider,
12
+ isReasoningModel
13
+ } from "./chunk-XID353H7.js";
14
+
15
+ // src/normalize.ts
16
+ function normalize(config, options = {}) {
17
+ const provider = detectProvider(config.host);
18
+ const subProvider = provider && isGatewayProvider(provider) ? detectGatewaySubProvider(config.model) : void 0;
19
+ const changes = [];
20
+ const params = {};
21
+ for (const [rawKey, value] of Object.entries(config.params)) {
22
+ let key = rawKey;
23
+ if (ALIASES[key]) {
24
+ const canonical = ALIASES[key];
25
+ if (options.verbose) {
26
+ changes.push({
27
+ from: key,
28
+ to: canonical,
29
+ value,
30
+ reason: `alias: "${key}" \u2192 "${canonical}"`
31
+ });
32
+ }
33
+ key = canonical;
34
+ }
35
+ if (key === "cache" && provider) {
36
+ let cacheValue = CACHE_VALUES[provider];
37
+ if (provider === "bedrock" && !bedrockSupportsCaching(config.model)) {
38
+ cacheValue = void 0;
39
+ }
40
+ if (!cacheValue) {
41
+ if (options.verbose) {
42
+ changes.push({
43
+ from: "cache",
44
+ to: "(dropped)",
45
+ value,
46
+ reason: `${provider} does not use a cache param for this model (caching is automatic or unsupported)`
47
+ });
48
+ }
49
+ continue;
50
+ }
51
+ const isBool = value === "true" || value === "1" || value === "yes";
52
+ const isDuration = DURATION_RE.test(value);
53
+ if (isBool || isDuration) {
54
+ const providerKey = PROVIDER_PARAMS[provider]?.["cache"] ?? "cache";
55
+ if (options.verbose) {
56
+ changes.push({
57
+ from: "cache",
58
+ to: providerKey,
59
+ value: cacheValue,
60
+ reason: `cache=${value} \u2192 ${providerKey}=${cacheValue} for ${provider}`
61
+ });
62
+ }
63
+ params[providerKey] = cacheValue;
64
+ if (isDuration && CACHE_TTLS[provider]) {
65
+ if (options.verbose) {
66
+ changes.push({
67
+ from: "cache",
68
+ to: "cache_ttl",
69
+ value,
70
+ reason: `cache=${value} \u2192 cache_ttl=${value} for ${provider}`
71
+ });
72
+ }
73
+ params["cache_ttl"] = value;
74
+ }
75
+ continue;
76
+ }
77
+ }
78
+ if (provider && PROVIDER_PARAMS[provider]) {
79
+ const providerKey = PROVIDER_PARAMS[provider][key];
80
+ if (providerKey && providerKey !== key) {
81
+ if (options.verbose) {
82
+ changes.push({
83
+ from: key,
84
+ to: providerKey,
85
+ value,
86
+ reason: `${provider} uses "${providerKey}" instead of "${key}"`
87
+ });
88
+ }
89
+ key = providerKey;
90
+ }
91
+ }
92
+ if (provider && canHostOpenAIModels(provider) && isReasoningModel(config.model) && key === "max_tokens") {
93
+ if (options.verbose) {
94
+ changes.push({
95
+ from: "max_tokens",
96
+ to: "max_completion_tokens",
97
+ value,
98
+ reason: "OpenAI reasoning models use max_completion_tokens instead of max_tokens"
99
+ });
100
+ }
101
+ key = "max_completion_tokens";
102
+ }
103
+ params[key] = value;
104
+ }
105
+ return {
106
+ config: { ...config, params },
107
+ provider,
108
+ subProvider,
109
+ changes
110
+ };
111
+ }
112
+
113
+ export {
114
+ normalize
115
+ };
116
+ //# sourceMappingURL=chunk-MPIHGH6L.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/normalize.ts"],"sourcesContent":["import type { LlmConnectionConfig } from \"./parse.js\";\nimport {\n ALIASES,\n CACHE_TTLS,\n CACHE_VALUES,\n DURATION_RE,\n PROVIDER_PARAMS,\n bedrockSupportsCaching,\n canHostOpenAIModels,\n detectGatewaySubProvider,\n detectProvider,\n isGatewayProvider,\n isReasoningModel,\n type Provider,\n} from \"./provider-core.js\";\n\nexport interface NormalizeChange {\n from: string;\n to: string;\n value: string;\n reason: string;\n}\n\nexport interface NormalizeResult {\n config: LlmConnectionConfig;\n provider: Provider | undefined;\n /** Underlying provider extracted from gateway model prefix (e.g. \"anthropic\" from \"anthropic/claude-sonnet-4-5\"). */\n subProvider: Provider | undefined;\n changes: NormalizeChange[];\n}\n\nexport interface NormalizeOptions {\n /** Include detailed change log in the result. */\n verbose?: boolean;\n}\n\n/**\n * Normalize an LLM connection config's params for its target provider.\n *\n * 1. Expands shorthand aliases (e.g. `temp` → `temperature`)\n * 2. Maps canonical param names to provider-specific names\n * (e.g. `max_tokens` → `maxOutputTokens` for Google)\n * 3. Normalizes special values (e.g. `cache=true` → `cache_control=ephemeral` for Anthropic)\n * 4. For OpenAI reasoning models, remaps `max_tokens` → `max_completion_tokens`\n * and warns about unsupported sampling params\n */\nexport function normalize(\n config: LlmConnectionConfig,\n options: NormalizeOptions = {},\n): NormalizeResult {\n const provider = detectProvider(config.host);\n const subProvider =\n provider && isGatewayProvider(provider)\n ? detectGatewaySubProvider(config.model)\n : undefined;\n const changes: NormalizeChange[] = [];\n const params: Record<string, string> = {};\n\n for (const [rawKey, value] of Object.entries(config.params)) {\n let key = rawKey;\n\n // Step 1: Expand aliases to canonical name\n if (ALIASES[key]) {\n const canonical = ALIASES[key];\n if (options.verbose) {\n changes.push({\n from: key,\n to: canonical,\n value,\n reason: `alias: \"${key}\" → \"${canonical}\"`,\n });\n }\n key = canonical;\n }\n\n // Step 2: Handle special \"cache\" param\n if (key === \"cache\" && provider) {\n let cacheValue = CACHE_VALUES[provider];\n\n // Bedrock supports cache for Anthropic Claude and Amazon Nova models\n if (provider === \"bedrock\" && !bedrockSupportsCaching(config.model)) {\n cacheValue = undefined;\n }\n\n // Provider/model doesn't support cache — drop it\n if (!cacheValue) {\n if (options.verbose) {\n changes.push({\n from: \"cache\",\n to: \"(dropped)\",\n value,\n reason: `${provider} does not use a cache param for this model (caching is automatic or unsupported)`,\n });\n }\n continue;\n }\n\n const isBool = value === \"true\" || value === \"1\" || value === \"yes\";\n const isDuration = DURATION_RE.test(value);\n\n if (isBool || isDuration) {\n const providerKey =\n PROVIDER_PARAMS[provider]?.[\"cache\"] ?? \"cache\";\n if (options.verbose) {\n changes.push({\n from: \"cache\",\n to: providerKey,\n value: cacheValue,\n reason: `cache=${value} → ${providerKey}=${cacheValue} for ${provider}`,\n });\n }\n params[providerKey] = cacheValue;\n\n // Emit cache_ttl when a duration is specified\n if (isDuration && CACHE_TTLS[provider]) {\n if (options.verbose) {\n changes.push({\n from: \"cache\",\n to: \"cache_ttl\",\n value,\n reason: `cache=${value} → cache_ttl=${value} for ${provider}`,\n });\n }\n params[\"cache_ttl\"] = value;\n }\n continue;\n }\n }\n\n // Step 3: Map canonical → provider-specific param name\n if (provider && PROVIDER_PARAMS[provider]) {\n const providerKey = PROVIDER_PARAMS[provider][key];\n if (providerKey && providerKey !== key) {\n if (options.verbose) {\n changes.push({\n from: key,\n to: providerKey,\n value,\n reason: `${provider} uses \"${providerKey}\" instead of \"${key}\"`,\n });\n }\n key = providerKey;\n }\n }\n\n // Step 4: OpenAI reasoning model adjustments (direct or via gateway)\n if (\n provider &&\n canHostOpenAIModels(provider) &&\n isReasoningModel(config.model) &&\n key === \"max_tokens\"\n ) {\n if (options.verbose) {\n changes.push({\n from: \"max_tokens\",\n to: \"max_completion_tokens\",\n value,\n reason:\n \"OpenAI reasoning models use max_completion_tokens instead of max_tokens\",\n });\n }\n key = \"max_completion_tokens\";\n }\n\n params[key] = value;\n }\n\n return {\n config: { ...config, params },\n provider,\n subProvider,\n changes,\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;AA8CO,SAAS,UACd,QACA,UAA4B,CAAC,GACZ;AACjB,QAAM,WAAW,eAAe,OAAO,IAAI;AAC3C,QAAM,cACJ,YAAY,kBAAkB,QAAQ,IAClC,yBAAyB,OAAO,KAAK,IACrC;AACN,QAAM,UAA6B,CAAC;AACpC,QAAM,SAAiC,CAAC;AAExC,aAAW,CAAC,QAAQ,KAAK,KAAK,OAAO,QAAQ,OAAO,MAAM,GAAG;AAC3D,QAAI,MAAM;AAGV,QAAI,QAAQ,GAAG,GAAG;AAChB,YAAM,YAAY,QAAQ,GAAG;AAC7B,UAAI,QAAQ,SAAS;AACnB,gBAAQ,KAAK;AAAA,UACX,MAAM;AAAA,UACN,IAAI;AAAA,UACJ;AAAA,UACA,QAAQ,WAAW,GAAG,aAAQ,SAAS;AAAA,QACzC,CAAC;AAAA,MACH;AACA,YAAM;AAAA,IACR;AAGA,QAAI,QAAQ,WAAW,UAAU;AAC/B,UAAI,aAAa,aAAa,QAAQ;AAGtC,UAAI,aAAa,aAAa,CAAC,uBAAuB,OAAO,KAAK,GAAG;AACnE,qBAAa;AAAA,MACf;AAGA,UAAI,CAAC,YAAY;AACf,YAAI,QAAQ,SAAS;AACnB,kBAAQ,KAAK;AAAA,YACX,MAAM;AAAA,YACN,IAAI;AAAA,YACJ;AAAA,YACA,QAAQ,GAAG,QAAQ;AAAA,UACrB,CAAC;AAAA,QACH;AACA;AAAA,MACF;AAEA,YAAM,SAAS,UAAU,UAAU,UAAU,OAAO,UAAU;AAC9D,YAAM,aAAa,YAAY,KAAK,KAAK;AAEzC,UAAI,UAAU,YAAY;AACxB,cAAM,cACJ,gBAAgB,QAAQ,IAAI,OAAO,KAAK;AAC1C,YAAI,QAAQ,SAAS;AACnB,kBAAQ,KAAK;AAAA,YACX,MAAM;AAAA,YACN,IAAI;AAAA,YACJ,OAAO;AAAA,YACP,QAAQ,SAAS,KAAK,WAAM,WAAW,IAAI,UAAU,QAAQ,QAAQ;AAAA,UACvE,CAAC;AAAA,QACH;AACA,eAAO,WAAW,IAAI;AAGtB,YAAI,cAAc,WAAW,QAAQ,GAAG;AACtC,cAAI,QAAQ,SAAS;AACnB,oBAAQ,KAAK;AAAA,cACX,MAAM;AAAA,cACN,IAAI;AAAA,cACJ;AAAA,cACA,QAAQ,SAAS,KAAK,qBAAgB,KAAK,QAAQ,QAAQ;AAAA,YAC7D,CAAC;AAAA,UACH;AACA,iBAAO,WAAW,IAAI;AAAA,QACxB;AACA;AAAA,MACF;AAAA,IACF;AAGA,QAAI,YAAY,gBAAgB,QAAQ,GAAG;AACzC,YAAM,cAAc,gBAAgB,QAAQ,EAAE,GAAG;AACjD,UAAI,eAAe,gBAAgB,KAAK;AACtC,YAAI,QAAQ,SAAS;AACnB,kBAAQ,KAAK;AAAA,YACX,MAAM;AAAA,YACN,IAAI;AAAA,YACJ;AAAA,YACA,QAAQ,GAAG,QAAQ,UAAU,WAAW,iBAAiB,GAAG;AAAA,UAC9D,CAAC;AAAA,QACH;AACA,cAAM;AAAA,MACR;AAAA,IACF;AAGA,QACE,YACA,oBAAoB,QAAQ,KAC5B,iBAAiB,OAAO,KAAK,KAC7B,QAAQ,cACR;AACA,UAAI,QAAQ,SAAS;AACnB,gBAAQ,KAAK;AAAA,UACX,MAAM;AAAA,UACN,IAAI;AAAA,UACJ;AAAA,UACA,QACE;AAAA,QACJ,CAAC;AAAA,MACH;AACA,YAAM;AAAA,IACR;AAEA,WAAO,GAAG,IAAI;AAAA,EAChB;AAEA,SAAO;AAAA,IACL,QAAQ,EAAE,GAAG,QAAQ,OAAO;AAAA,IAC5B;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;","names":[]}
@@ -0,0 +1,37 @@
1
+ "use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _nullishCoalesce(lhs, rhsFn) { if (lhs != null) { return lhs; } else { return rhsFn(); } }// src/parse.ts
2
+ function parse(connectionString) {
3
+ const url = new URL(connectionString);
4
+ if (url.protocol !== "llm:") {
5
+ throw new Error(
6
+ `Invalid scheme: expected "llm://", got "${url.protocol}//"`
7
+ );
8
+ }
9
+ const host = url.hostname;
10
+ const model = url.pathname.replace(/^\//, "");
11
+ const label = url.username || void 0;
12
+ const apiKey = url.password || void 0;
13
+ const params = {};
14
+ for (const [key, value] of url.searchParams) {
15
+ params[key] = value;
16
+ }
17
+ return {
18
+ raw: connectionString,
19
+ host,
20
+ model,
21
+ label,
22
+ apiKey,
23
+ params
24
+ };
25
+ }
26
+ function build(config) {
27
+ const auth = config.label || config.apiKey ? `${_nullishCoalesce(config.label, () => ( ""))}${config.apiKey ? `:${config.apiKey}` : ""}@` : "";
28
+ const query = new URLSearchParams(config.params).toString();
29
+ const qs = query ? `?${query}` : "";
30
+ return `llm://${auth}${config.host}/${config.model}${qs}`;
31
+ }
32
+
33
+
34
+
35
+
36
+ exports.parse = parse; exports.build = build;
37
+ //# sourceMappingURL=chunk-N6NVBE43.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["/Users/dan/code/oss/llm-strings/dist/chunk-N6NVBE43.cjs","../src/parse.ts"],"names":[],"mappings":"AAAA;AC0BO,SAAS,KAAA,CAAM,gBAAA,EAA+C;AACnE,EAAA,MAAM,IAAA,EAAM,IAAI,GAAA,CAAI,gBAAgB,CAAA;AAEpC,EAAA,GAAA,CAAI,GAAA,CAAI,SAAA,IAAa,MAAA,EAAQ;AAC3B,IAAA,MAAM,IAAI,KAAA;AAAA,MACR,CAAA,wCAAA,EAA2C,GAAA,CAAI,QAAQ,CAAA,GAAA;AAAA,IACzD,CAAA;AAAA,EACF;AAEA,EAAA,MAAM,KAAA,EAAO,GAAA,CAAI,QAAA;AACjB,EAAA,MAAM,MAAA,EAAQ,GAAA,CAAI,QAAA,CAAS,OAAA,CAAQ,KAAA,EAAO,EAAE,CAAA;AAC5C,EAAA,MAAM,MAAA,EAAQ,GAAA,CAAI,SAAA,GAAY,KAAA,CAAA;AAC9B,EAAA,MAAM,OAAA,EAAS,GAAA,CAAI,SAAA,GAAY,KAAA,CAAA;AAE/B,EAAA,MAAM,OAAA,EAAiC,CAAC,CAAA;AACxC,EAAA,IAAA,CAAA,MAAW,CAAC,GAAA,EAAK,KAAK,EAAA,GAAK,GAAA,CAAI,YAAA,EAAc;AAC3C,IAAA,MAAA,CAAO,GAAG,EAAA,EAAI,KAAA;AAAA,EAChB;AAEA,EAAA,OAAO;AAAA,IACL,GAAA,EAAK,gBAAA;AAAA,IACL,IAAA;AAAA,IACA,KAAA;AAAA,IACA,KAAA;AAAA,IACA,MAAA;AAAA,IACA;AAAA,EACF,CAAA;AACF;AAKO,SAAS,KAAA,CAAM,MAAA,EAAkD;AACtE,EAAA,MAAM,KAAA,EACJ,MAAA,CAAO,MAAA,GAAS,MAAA,CAAO,OAAA,EACnB,CAAA,mBAAA;AAGmC,EAAA;AACR,EAAA;AAEY,EAAA;AAC/C;ADrCgD;AACA;AACA;AACA;AACA","file":"/Users/dan/code/oss/llm-strings/dist/chunk-N6NVBE43.cjs","sourcesContent":[null,"export interface LlmConnectionConfig {\n /** The original connection string */\n raw: string;\n /** Provider's API base URL (e.g. \"api.openai.com\") */\n host: string;\n /** Model name (e.g. \"gpt-5.2\") */\n model: string;\n /** Optional label or app name */\n label?: string;\n /** Optional API key or password */\n apiKey?: string;\n /** Additional config parameters (temp, max_tokens, etc.) */\n params: Record<string, string>;\n}\n\n/**\n * Parse an LLM connection string into its component parts.\n *\n * Format: `llm://[label[:apiKey]@]host/model[?key=value&...]`\n *\n * @example\n * ```ts\n * parse(\"llm://api.openai.com/gpt-5.2?temp=0.7&max_tokens=1500\")\n * parse(\"llm://app-name:sk-proj-123456@api.openai.com/gpt-5.2?temp=0.7\")\n * ```\n */\nexport function parse(connectionString: string): LlmConnectionConfig {\n const url = new URL(connectionString);\n\n if (url.protocol !== \"llm:\") {\n throw new Error(\n `Invalid scheme: expected \"llm://\", got \"${url.protocol}//\"`,\n );\n }\n\n const host = url.hostname;\n const model = url.pathname.replace(/^\\//, \"\");\n const label = url.username || undefined;\n const apiKey = url.password || undefined;\n\n const params: Record<string, string> = {};\n for (const [key, value] of url.searchParams) {\n params[key] = value;\n }\n\n return {\n raw: connectionString,\n host,\n model,\n label,\n apiKey,\n params,\n };\n}\n\n/**\n * Build an LLM connection string from a config object.\n */\nexport function build(config: Omit<LlmConnectionConfig, \"raw\">): string {\n const auth =\n config.label || config.apiKey\n ? `${config.label ?? \"\"}${config.apiKey ? `:${config.apiKey}` : \"\"}@`\n : \"\";\n\n const query = new URLSearchParams(config.params).toString();\n const qs = query ? `?${query}` : \"\";\n\n return `llm://${auth}${config.host}/${config.model}${qs}`;\n}\n"]}