llm-strings 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs ADDED
@@ -0,0 +1,631 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ ALIASES: () => ALIASES,
24
+ PROVIDER_PARAMS: () => PROVIDER_PARAMS,
25
+ build: () => build,
26
+ detectBedrockModelFamily: () => detectBedrockModelFamily,
27
+ detectProvider: () => detectProvider,
28
+ normalize: () => normalize,
29
+ parse: () => parse,
30
+ validate: () => validate
31
+ });
32
+ module.exports = __toCommonJS(index_exports);
33
+
34
+ // src/parse.ts
35
+ function parse(connectionString) {
36
+ const url = new URL(connectionString);
37
+ if (url.protocol !== "llm:") {
38
+ throw new Error(
39
+ `Invalid scheme: expected "llm://", got "${url.protocol}//"`
40
+ );
41
+ }
42
+ const host = url.hostname;
43
+ const model = url.pathname.replace(/^\//, "");
44
+ const label = url.username || void 0;
45
+ const apiKey = url.password || void 0;
46
+ const params = {};
47
+ for (const [key, value] of url.searchParams) {
48
+ params[key] = value;
49
+ }
50
+ return {
51
+ raw: connectionString,
52
+ host,
53
+ model,
54
+ label,
55
+ apiKey,
56
+ params
57
+ };
58
+ }
59
+ function build(config) {
60
+ const auth = config.label || config.apiKey ? `${config.label ?? ""}${config.apiKey ? `:${config.apiKey}` : ""}@` : "";
61
+ const query = new URLSearchParams(config.params).toString();
62
+ const qs = query ? `?${query}` : "";
63
+ return `llm://${auth}${config.host}/${config.model}${qs}`;
64
+ }
65
+
66
+ // src/providers.ts
67
+ function detectProvider(host) {
68
+ if (host.includes("openrouter")) return "openrouter";
69
+ if (host.includes("gateway.ai.vercel")) return "vercel";
70
+ if (host.includes("amazonaws") || host.includes("bedrock")) return "bedrock";
71
+ if (host.includes("openai")) return "openai";
72
+ if (host.includes("anthropic") || host.includes("claude")) return "anthropic";
73
+ if (host.includes("googleapis") || host.includes("google")) return "google";
74
+ if (host.includes("mistral")) return "mistral";
75
+ if (host.includes("cohere")) return "cohere";
76
+ return void 0;
77
+ }
78
+ var ALIASES = {
79
+ // temperature
80
+ temp: "temperature",
81
+ // max_tokens
82
+ max: "max_tokens",
83
+ max_out: "max_tokens",
84
+ max_output: "max_tokens",
85
+ max_output_tokens: "max_tokens",
86
+ max_completion_tokens: "max_tokens",
87
+ maxOutputTokens: "max_tokens",
88
+ maxTokens: "max_tokens",
89
+ // top_p
90
+ topp: "top_p",
91
+ topP: "top_p",
92
+ nucleus: "top_p",
93
+ // top_k
94
+ topk: "top_k",
95
+ topK: "top_k",
96
+ // frequency_penalty
97
+ freq: "frequency_penalty",
98
+ freq_penalty: "frequency_penalty",
99
+ frequencyPenalty: "frequency_penalty",
100
+ repetition_penalty: "frequency_penalty",
101
+ // presence_penalty
102
+ pres: "presence_penalty",
103
+ pres_penalty: "presence_penalty",
104
+ presencePenalty: "presence_penalty",
105
+ // stop
106
+ stop_sequences: "stop",
107
+ stopSequences: "stop",
108
+ stop_sequence: "stop",
109
+ // seed
110
+ random_seed: "seed",
111
+ randomSeed: "seed",
112
+ // n (completions count)
113
+ candidateCount: "n",
114
+ candidate_count: "n",
115
+ num_completions: "n",
116
+ // effort / reasoning
117
+ reasoning_effort: "effort",
118
+ reasoning: "effort",
119
+ // cache
120
+ cache_control: "cache",
121
+ cacheControl: "cache",
122
+ cachePoint: "cache",
123
+ cache_point: "cache"
124
+ };
125
+ var PROVIDER_PARAMS = {
126
+ openai: {
127
+ temperature: "temperature",
128
+ max_tokens: "max_tokens",
129
+ top_p: "top_p",
130
+ frequency_penalty: "frequency_penalty",
131
+ presence_penalty: "presence_penalty",
132
+ stop: "stop",
133
+ n: "n",
134
+ seed: "seed",
135
+ stream: "stream",
136
+ effort: "reasoning_effort"
137
+ },
138
+ anthropic: {
139
+ temperature: "temperature",
140
+ max_tokens: "max_tokens",
141
+ top_p: "top_p",
142
+ top_k: "top_k",
143
+ stop: "stop_sequences",
144
+ stream: "stream",
145
+ effort: "effort",
146
+ cache: "cache_control",
147
+ cache_ttl: "cache_ttl"
148
+ },
149
+ google: {
150
+ temperature: "temperature",
151
+ max_tokens: "maxOutputTokens",
152
+ top_p: "topP",
153
+ top_k: "topK",
154
+ frequency_penalty: "frequencyPenalty",
155
+ presence_penalty: "presencePenalty",
156
+ stop: "stopSequences",
157
+ n: "candidateCount",
158
+ stream: "stream",
159
+ seed: "seed",
160
+ responseMimeType: "responseMimeType",
161
+ responseSchema: "responseSchema"
162
+ },
163
+ mistral: {
164
+ temperature: "temperature",
165
+ max_tokens: "max_tokens",
166
+ top_p: "top_p",
167
+ frequency_penalty: "frequency_penalty",
168
+ presence_penalty: "presence_penalty",
169
+ stop: "stop",
170
+ n: "n",
171
+ seed: "random_seed",
172
+ stream: "stream",
173
+ safe_prompt: "safe_prompt",
174
+ min_tokens: "min_tokens"
175
+ },
176
+ cohere: {
177
+ temperature: "temperature",
178
+ max_tokens: "max_tokens",
179
+ top_p: "p",
180
+ top_k: "k",
181
+ frequency_penalty: "frequency_penalty",
182
+ presence_penalty: "presence_penalty",
183
+ stop: "stop_sequences",
184
+ stream: "stream",
185
+ seed: "seed"
186
+ },
187
+ bedrock: {
188
+ // Bedrock Converse API uses camelCase
189
+ temperature: "temperature",
190
+ max_tokens: "maxTokens",
191
+ top_p: "topP",
192
+ top_k: "topK",
193
+ // Claude models via additionalModelRequestFields
194
+ stop: "stopSequences",
195
+ stream: "stream",
196
+ cache: "cache_control",
197
+ cache_ttl: "cache_ttl"
198
+ },
199
+ openrouter: {
200
+ // OpenAI-compatible API with extra routing params
201
+ temperature: "temperature",
202
+ max_tokens: "max_tokens",
203
+ top_p: "top_p",
204
+ top_k: "top_k",
205
+ frequency_penalty: "frequency_penalty",
206
+ presence_penalty: "presence_penalty",
207
+ stop: "stop",
208
+ n: "n",
209
+ seed: "seed",
210
+ stream: "stream",
211
+ effort: "reasoning_effort"
212
+ },
213
+ vercel: {
214
+ // OpenAI-compatible gateway
215
+ temperature: "temperature",
216
+ max_tokens: "max_tokens",
217
+ top_p: "top_p",
218
+ top_k: "top_k",
219
+ frequency_penalty: "frequency_penalty",
220
+ presence_penalty: "presence_penalty",
221
+ stop: "stop",
222
+ n: "n",
223
+ seed: "seed",
224
+ stream: "stream",
225
+ effort: "reasoning_effort"
226
+ }
227
+ };
228
+ var PARAM_SPECS = {
229
+ openai: {
230
+ temperature: { type: "number", min: 0, max: 2 },
231
+ max_tokens: { type: "number", min: 1 },
232
+ top_p: { type: "number", min: 0, max: 1 },
233
+ frequency_penalty: { type: "number", min: -2, max: 2 },
234
+ presence_penalty: { type: "number", min: -2, max: 2 },
235
+ stop: { type: "string" },
236
+ n: { type: "number", min: 1 },
237
+ seed: { type: "number" },
238
+ stream: { type: "boolean" },
239
+ reasoning_effort: {
240
+ type: "string",
241
+ values: ["none", "minimal", "low", "medium", "high", "xhigh"]
242
+ }
243
+ },
244
+ anthropic: {
245
+ temperature: { type: "number", min: 0, max: 1 },
246
+ max_tokens: { type: "number", min: 1 },
247
+ top_p: { type: "number", min: 0, max: 1 },
248
+ top_k: { type: "number", min: 0 },
249
+ stop_sequences: { type: "string" },
250
+ stream: { type: "boolean" },
251
+ effort: { type: "string", values: ["low", "medium", "high", "max"] },
252
+ cache_control: { type: "string", values: ["ephemeral"] },
253
+ cache_ttl: { type: "string", values: ["5m", "1h"] }
254
+ },
255
+ google: {
256
+ temperature: { type: "number", min: 0, max: 2 },
257
+ maxOutputTokens: { type: "number", min: 1 },
258
+ topP: { type: "number", min: 0, max: 1 },
259
+ topK: { type: "number", min: 0 },
260
+ frequencyPenalty: { type: "number", min: -2, max: 2 },
261
+ presencePenalty: { type: "number", min: -2, max: 2 },
262
+ stopSequences: { type: "string" },
263
+ candidateCount: { type: "number", min: 1 },
264
+ stream: { type: "boolean" },
265
+ seed: { type: "number" },
266
+ responseMimeType: { type: "string" },
267
+ responseSchema: { type: "string" }
268
+ },
269
+ mistral: {
270
+ temperature: { type: "number", min: 0, max: 1 },
271
+ max_tokens: { type: "number", min: 1 },
272
+ top_p: { type: "number", min: 0, max: 1 },
273
+ frequency_penalty: { type: "number", min: -2, max: 2 },
274
+ presence_penalty: { type: "number", min: -2, max: 2 },
275
+ stop: { type: "string" },
276
+ n: { type: "number", min: 1 },
277
+ random_seed: { type: "number" },
278
+ stream: { type: "boolean" },
279
+ safe_prompt: { type: "boolean" },
280
+ min_tokens: { type: "number", min: 0 }
281
+ },
282
+ cohere: {
283
+ temperature: { type: "number", min: 0, max: 1 },
284
+ max_tokens: { type: "number", min: 1 },
285
+ p: { type: "number", min: 0, max: 1 },
286
+ k: { type: "number", min: 0, max: 500 },
287
+ frequency_penalty: { type: "number", min: 0, max: 1 },
288
+ presence_penalty: { type: "number", min: 0, max: 1 },
289
+ stop_sequences: { type: "string" },
290
+ stream: { type: "boolean" },
291
+ seed: { type: "number" }
292
+ },
293
+ bedrock: {
294
+ // Converse API inferenceConfig params
295
+ temperature: { type: "number", min: 0, max: 1 },
296
+ maxTokens: { type: "number", min: 1 },
297
+ topP: { type: "number", min: 0, max: 1 },
298
+ topK: { type: "number", min: 0 },
299
+ stopSequences: { type: "string" },
300
+ stream: { type: "boolean" },
301
+ cache_control: { type: "string", values: ["ephemeral"] },
302
+ cache_ttl: { type: "string", values: ["5m", "1h"] }
303
+ },
304
+ openrouter: {
305
+ // Loose validation — proxies to many providers with varying ranges
306
+ temperature: { type: "number", min: 0, max: 2 },
307
+ max_tokens: { type: "number", min: 1 },
308
+ top_p: { type: "number", min: 0, max: 1 },
309
+ top_k: { type: "number", min: 0 },
310
+ frequency_penalty: { type: "number", min: -2, max: 2 },
311
+ presence_penalty: { type: "number", min: -2, max: 2 },
312
+ stop: { type: "string" },
313
+ n: { type: "number", min: 1 },
314
+ seed: { type: "number" },
315
+ stream: { type: "boolean" },
316
+ reasoning_effort: {
317
+ type: "string",
318
+ values: ["none", "minimal", "low", "medium", "high", "xhigh"]
319
+ }
320
+ },
321
+ vercel: {
322
+ // Loose validation — proxies to many providers with varying ranges
323
+ temperature: { type: "number", min: 0, max: 2 },
324
+ max_tokens: { type: "number", min: 1 },
325
+ top_p: { type: "number", min: 0, max: 1 },
326
+ top_k: { type: "number", min: 0 },
327
+ frequency_penalty: { type: "number", min: -2, max: 2 },
328
+ presence_penalty: { type: "number", min: -2, max: 2 },
329
+ stop: { type: "string" },
330
+ n: { type: "number", min: 1 },
331
+ seed: { type: "number" },
332
+ stream: { type: "boolean" },
333
+ reasoning_effort: {
334
+ type: "string",
335
+ values: ["none", "minimal", "low", "medium", "high", "xhigh"]
336
+ }
337
+ }
338
+ };
339
+ function isReasoningModel(model) {
340
+ const name = model.includes("/") ? model.split("/").pop() : model;
341
+ return /^o[134]/.test(name);
342
+ }
343
+ function canHostOpenAIModels(provider) {
344
+ return provider === "openai" || provider === "openrouter" || provider === "vercel";
345
+ }
346
+ var REASONING_MODEL_UNSUPPORTED = /* @__PURE__ */ new Set([
347
+ "temperature",
348
+ "top_p",
349
+ "frequency_penalty",
350
+ "presence_penalty",
351
+ "n"
352
+ ]);
353
+ function detectBedrockModelFamily(model) {
354
+ const parts = model.split(".");
355
+ let prefix = parts[0];
356
+ if (["us", "eu", "apac", "global"].includes(prefix) && parts.length > 1) {
357
+ prefix = parts[1];
358
+ }
359
+ const families = [
360
+ "anthropic",
361
+ "meta",
362
+ "amazon",
363
+ "mistral",
364
+ "cohere",
365
+ "ai21"
366
+ ];
367
+ return families.find((f) => prefix === f);
368
+ }
369
+ function bedrockSupportsCaching(model) {
370
+ const family = detectBedrockModelFamily(model);
371
+ if (family === "anthropic") return true;
372
+ if (family === "amazon" && model.includes("nova")) return true;
373
+ return false;
374
+ }
375
+ var CACHE_VALUES = {
376
+ openai: void 0,
377
+ // OpenAI auto-caches; no explicit param
378
+ anthropic: "ephemeral",
379
+ google: void 0,
380
+ // Google uses explicit caching API, not a param
381
+ mistral: void 0,
382
+ cohere: void 0,
383
+ bedrock: "ephemeral",
384
+ // Supported for Claude models on Bedrock
385
+ openrouter: void 0,
386
+ // Depends on underlying provider
387
+ vercel: void 0
388
+ // Depends on underlying provider
389
+ };
390
+ var CACHE_TTLS = {
391
+ openai: void 0,
392
+ anthropic: ["5m", "1h"],
393
+ google: void 0,
394
+ mistral: void 0,
395
+ cohere: void 0,
396
+ bedrock: ["5m", "1h"],
397
+ // Claude on Bedrock uses same TTLs as direct Anthropic
398
+ openrouter: void 0,
399
+ vercel: void 0
400
+ };
401
+ var DURATION_RE = /^\d+[mh]$/;
402
+
403
+ // src/normalize.ts
404
+ function normalize(config, options = {}) {
405
+ const provider = detectProvider(config.host);
406
+ const changes = [];
407
+ const params = {};
408
+ for (const [rawKey, value] of Object.entries(config.params)) {
409
+ let key = rawKey;
410
+ if (ALIASES[key]) {
411
+ const canonical = ALIASES[key];
412
+ if (options.verbose) {
413
+ changes.push({
414
+ from: key,
415
+ to: canonical,
416
+ value,
417
+ reason: `alias: "${key}" \u2192 "${canonical}"`
418
+ });
419
+ }
420
+ key = canonical;
421
+ }
422
+ if (key === "cache" && provider) {
423
+ let cacheValue = CACHE_VALUES[provider];
424
+ if (provider === "bedrock" && !bedrockSupportsCaching(config.model)) {
425
+ cacheValue = void 0;
426
+ }
427
+ if (!cacheValue) {
428
+ if (options.verbose) {
429
+ changes.push({
430
+ from: "cache",
431
+ to: "(dropped)",
432
+ value,
433
+ reason: `${provider} does not use a cache param for this model (caching is automatic or unsupported)`
434
+ });
435
+ }
436
+ continue;
437
+ }
438
+ const isBool = value === "true" || value === "1" || value === "yes";
439
+ const isDuration = DURATION_RE.test(value);
440
+ if (isBool || isDuration) {
441
+ const providerKey = PROVIDER_PARAMS[provider]?.["cache"] ?? "cache";
442
+ if (options.verbose) {
443
+ changes.push({
444
+ from: "cache",
445
+ to: providerKey,
446
+ value: cacheValue,
447
+ reason: `cache=${value} \u2192 ${providerKey}=${cacheValue} for ${provider}`
448
+ });
449
+ }
450
+ params[providerKey] = cacheValue;
451
+ if (isDuration && CACHE_TTLS[provider]) {
452
+ if (options.verbose) {
453
+ changes.push({
454
+ from: "cache",
455
+ to: "cache_ttl",
456
+ value,
457
+ reason: `cache=${value} \u2192 cache_ttl=${value} for ${provider}`
458
+ });
459
+ }
460
+ params["cache_ttl"] = value;
461
+ }
462
+ continue;
463
+ }
464
+ }
465
+ if (provider && PROVIDER_PARAMS[provider]) {
466
+ const providerKey = PROVIDER_PARAMS[provider][key];
467
+ if (providerKey && providerKey !== key) {
468
+ if (options.verbose) {
469
+ changes.push({
470
+ from: key,
471
+ to: providerKey,
472
+ value,
473
+ reason: `${provider} uses "${providerKey}" instead of "${key}"`
474
+ });
475
+ }
476
+ key = providerKey;
477
+ }
478
+ }
479
+ if (provider && canHostOpenAIModels(provider) && isReasoningModel(config.model) && key === "max_tokens") {
480
+ if (options.verbose) {
481
+ changes.push({
482
+ from: "max_tokens",
483
+ to: "max_completion_tokens",
484
+ value,
485
+ reason: "OpenAI reasoning models use max_completion_tokens instead of max_tokens"
486
+ });
487
+ }
488
+ key = "max_completion_tokens";
489
+ }
490
+ params[key] = value;
491
+ }
492
+ return {
493
+ config: { ...config, params },
494
+ provider,
495
+ changes
496
+ };
497
+ }
498
+
499
+ // src/validate.ts
500
+ function validate(connectionString) {
501
+ const parsed = parse(connectionString);
502
+ const { config } = normalize(parsed);
503
+ const provider = detectProvider(config.host);
504
+ const issues = [];
505
+ if (!provider) {
506
+ issues.push({
507
+ param: "host",
508
+ value: config.host,
509
+ message: `Unknown provider for host "${config.host}". Validation skipped.`,
510
+ severity: "warning"
511
+ });
512
+ return issues;
513
+ }
514
+ const specs = PARAM_SPECS[provider];
515
+ const knownParams = new Set(Object.values(PROVIDER_PARAMS[provider]));
516
+ for (const [key, value] of Object.entries(config.params)) {
517
+ if (canHostOpenAIModels(provider) && isReasoningModel(config.model) && REASONING_MODEL_UNSUPPORTED.has(key)) {
518
+ issues.push({
519
+ param: key,
520
+ value,
521
+ message: `"${key}" is not supported by OpenAI reasoning model "${config.model}". Use "reasoning_effort" instead of temperature for controlling output.`,
522
+ severity: "error"
523
+ });
524
+ continue;
525
+ }
526
+ if (provider === "bedrock") {
527
+ const family = detectBedrockModelFamily(config.model);
528
+ if (key === "topK" && family && family !== "anthropic" && family !== "cohere" && family !== "mistral") {
529
+ issues.push({
530
+ param: key,
531
+ value,
532
+ message: `"topK" is not supported by ${family} models on Bedrock.`,
533
+ severity: "error"
534
+ });
535
+ continue;
536
+ }
537
+ if (key === "cache_control" && !bedrockSupportsCaching(config.model)) {
538
+ issues.push({
539
+ param: key,
540
+ value,
541
+ message: `Prompt caching is only supported for Anthropic Claude and Amazon Nova models on Bedrock, not ${family ?? "unknown"} models.`,
542
+ severity: "error"
543
+ });
544
+ continue;
545
+ }
546
+ }
547
+ if (!knownParams.has(key) && !specs[key]) {
548
+ issues.push({
549
+ param: key,
550
+ value,
551
+ message: `Unknown param "${key}" for ${provider}.`,
552
+ severity: "warning"
553
+ });
554
+ continue;
555
+ }
556
+ const spec = specs[key];
557
+ if (!spec) continue;
558
+ if ((provider === "anthropic" || provider === "bedrock" && detectBedrockModelFamily(config.model) === "anthropic") && (key === "temperature" || key === "top_p" || key === "topP")) {
559
+ const otherKey = key === "temperature" ? provider === "bedrock" ? "topP" : "top_p" : "temperature";
560
+ if (key === "temperature" && config.params[otherKey] !== void 0) {
561
+ issues.push({
562
+ param: key,
563
+ value,
564
+ message: `Cannot specify both "temperature" and "${otherKey}" for Anthropic models.`,
565
+ severity: "error"
566
+ });
567
+ }
568
+ }
569
+ if (spec.type === "number") {
570
+ const num = Number(value);
571
+ if (isNaN(num)) {
572
+ issues.push({
573
+ param: key,
574
+ value,
575
+ message: `"${key}" should be a number, got "${value}".`,
576
+ severity: "error"
577
+ });
578
+ continue;
579
+ }
580
+ if (spec.min !== void 0 && num < spec.min) {
581
+ issues.push({
582
+ param: key,
583
+ value,
584
+ message: `"${key}" must be >= ${spec.min}, got ${num}.`,
585
+ severity: "error"
586
+ });
587
+ }
588
+ if (spec.max !== void 0 && num > spec.max) {
589
+ issues.push({
590
+ param: key,
591
+ value,
592
+ message: `"${key}" must be <= ${spec.max}, got ${num}.`,
593
+ severity: "error"
594
+ });
595
+ }
596
+ }
597
+ if (spec.type === "boolean") {
598
+ if (!["true", "false", "0", "1"].includes(value)) {
599
+ issues.push({
600
+ param: key,
601
+ value,
602
+ message: `"${key}" should be a boolean (true/false), got "${value}".`,
603
+ severity: "error"
604
+ });
605
+ }
606
+ }
607
+ if (spec.type === "string" && spec.values) {
608
+ if (!spec.values.includes(value)) {
609
+ issues.push({
610
+ param: key,
611
+ value,
612
+ message: `"${key}" must be one of [${spec.values.join(", ")}], got "${value}".`,
613
+ severity: "error"
614
+ });
615
+ }
616
+ }
617
+ }
618
+ return issues;
619
+ }
620
+ // Annotate the CommonJS export names for ESM import in node:
621
+ 0 && (module.exports = {
622
+ ALIASES,
623
+ PROVIDER_PARAMS,
624
+ build,
625
+ detectBedrockModelFamily,
626
+ detectProvider,
627
+ normalize,
628
+ parse,
629
+ validate
630
+ });
631
+ //# sourceMappingURL=index.cjs.map