llm-strings 1.1.1 → 1.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +673 -12
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts +4 -4
- package/dist/normalize.cjs +353 -4
- package/dist/normalize.cjs.map +1 -1
- package/dist/normalize.d.ts +2 -2
- package/dist/parse.cjs +60 -7
- package/dist/parse.cjs.map +1 -1
- package/dist/providers.cjs +414 -35
- package/dist/providers.cjs.map +1 -1
- package/dist/providers.d.ts +2 -2
- package/dist/validate.cjs +663 -6
- package/dist/validate.cjs.map +1 -1
- package/package.json +4 -3
- package/dist/chunk-MGWGNZDJ.cjs +0 -116
- package/dist/chunk-MGWGNZDJ.cjs.map +0 -1
- package/dist/chunk-N6NVBE43.cjs +0 -37
- package/dist/chunk-N6NVBE43.cjs.map +0 -1
- package/dist/chunk-NSCBY4VD.cjs +0 -370
- package/dist/chunk-NSCBY4VD.cjs.map +0 -1
- package/dist/chunk-RSUXM42X.cjs +0 -180
- package/dist/chunk-RSUXM42X.cjs.map +0 -1
package/dist/parse.cjs
CHANGED
|
@@ -1,9 +1,62 @@
|
|
|
1
|
-
"use strict";
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
2
19
|
|
|
3
|
-
|
|
4
|
-
var
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
20
|
+
// src/parse.ts
|
|
21
|
+
var parse_exports = {};
|
|
22
|
+
__export(parse_exports, {
|
|
23
|
+
build: () => build,
|
|
24
|
+
parse: () => parse
|
|
25
|
+
});
|
|
26
|
+
module.exports = __toCommonJS(parse_exports);
|
|
27
|
+
function parse(connectionString) {
|
|
28
|
+
const url = new URL(connectionString);
|
|
29
|
+
if (url.protocol !== "llm:") {
|
|
30
|
+
throw new Error(
|
|
31
|
+
`Invalid scheme: expected "llm://", got "${url.protocol}//"`
|
|
32
|
+
);
|
|
33
|
+
}
|
|
34
|
+
const host = url.hostname;
|
|
35
|
+
const model = url.pathname.replace(/^\//, "");
|
|
36
|
+
const label = url.username || void 0;
|
|
37
|
+
const apiKey = url.password || void 0;
|
|
38
|
+
const params = {};
|
|
39
|
+
for (const [key, value] of url.searchParams) {
|
|
40
|
+
params[key] = value;
|
|
41
|
+
}
|
|
42
|
+
return {
|
|
43
|
+
raw: connectionString,
|
|
44
|
+
host,
|
|
45
|
+
model,
|
|
46
|
+
label,
|
|
47
|
+
apiKey,
|
|
48
|
+
params
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
function build(config) {
|
|
52
|
+
const auth = config.label || config.apiKey ? `${config.label ?? ""}${config.apiKey ? `:${config.apiKey}` : ""}@` : "";
|
|
53
|
+
const query = new URLSearchParams(config.params).toString();
|
|
54
|
+
const qs = query ? `?${query}` : "";
|
|
55
|
+
return `llm://${auth}${config.host}/${config.model}${qs}`;
|
|
56
|
+
}
|
|
57
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
58
|
+
0 && (module.exports = {
|
|
59
|
+
build,
|
|
60
|
+
parse
|
|
61
|
+
});
|
|
9
62
|
//# sourceMappingURL=parse.cjs.map
|
package/dist/parse.cjs.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["/
|
|
1
|
+
{"version":3,"sources":["../src/parse.ts"],"sourcesContent":["export interface LlmConnectionConfig {\n /** The original connection string */\n raw: string;\n /** Provider's API base URL (e.g. \"api.openai.com\") */\n host: string;\n /** Model name (e.g. \"gpt-5.2\") */\n model: string;\n /** Optional label or app name */\n label?: string;\n /** Optional API key or password */\n apiKey?: string;\n /** Additional config parameters (temp, max_tokens, etc.) */\n params: Record<string, string>;\n}\n\n/**\n * Parse an LLM connection string into its component parts.\n *\n * Format: `llm://[label[:apiKey]@]host/model[?key=value&...]`\n *\n * @example\n * ```ts\n * parse(\"llm://api.openai.com/gpt-5.2?temp=0.7&max_tokens=1500\")\n * parse(\"llm://app-name:sk-proj-123456@api.openai.com/gpt-5.2?temp=0.7\")\n * ```\n */\nexport function parse(connectionString: string): LlmConnectionConfig {\n const url = new URL(connectionString);\n\n if (url.protocol !== \"llm:\") {\n throw new Error(\n `Invalid scheme: expected \"llm://\", got \"${url.protocol}//\"`,\n );\n }\n\n const host = url.hostname;\n const model = url.pathname.replace(/^\\//, \"\");\n const label = url.username || undefined;\n const apiKey = url.password || undefined;\n\n const params: Record<string, string> = {};\n for (const [key, value] of url.searchParams) {\n params[key] = value;\n }\n\n return {\n raw: connectionString,\n host,\n model,\n label,\n apiKey,\n params,\n };\n}\n\n/**\n * Build an LLM connection string from a config object.\n */\nexport function build(config: Omit<LlmConnectionConfig, \"raw\">): string {\n const auth =\n config.label || config.apiKey\n ? `${config.label ?? \"\"}${config.apiKey ? `:${config.apiKey}` : \"\"}@`\n : \"\";\n\n const query = new URLSearchParams(config.params).toString();\n const qs = query ? `?${query}` : \"\";\n\n return `llm://${auth}${config.host}/${config.model}${qs}`;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA0BO,SAAS,MAAM,kBAA+C;AACnE,QAAM,MAAM,IAAI,IAAI,gBAAgB;AAEpC,MAAI,IAAI,aAAa,QAAQ;AAC3B,UAAM,IAAI;AAAA,MACR,2CAA2C,IAAI,QAAQ;AAAA,IACzD;AAAA,EACF;AAEA,QAAM,OAAO,IAAI;AACjB,QAAM,QAAQ,IAAI,SAAS,QAAQ,OAAO,EAAE;AAC5C,QAAM,QAAQ,IAAI,YAAY;AAC9B,QAAM,SAAS,IAAI,YAAY;AAE/B,QAAM,SAAiC,CAAC;AACxC,aAAW,CAAC,KAAK,KAAK,KAAK,IAAI,cAAc;AAC3C,WAAO,GAAG,IAAI;AAAA,EAChB;AAEA,SAAO;AAAA,IACL,KAAK;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAKO,SAAS,MAAM,QAAkD;AACtE,QAAM,OACJ,OAAO,SAAS,OAAO,SACnB,GAAG,OAAO,SAAS,EAAE,GAAG,OAAO,SAAS,IAAI,OAAO,MAAM,KAAK,EAAE,MAChE;AAEN,QAAM,QAAQ,IAAI,gBAAgB,OAAO,MAAM,EAAE,SAAS;AAC1D,QAAM,KAAK,QAAQ,IAAI,KAAK,KAAK;AAEjC,SAAO,SAAS,IAAI,GAAG,OAAO,IAAI,IAAI,OAAO,KAAK,GAAG,EAAE;AACzD;","names":[]}
|
package/dist/providers.cjs
CHANGED
|
@@ -1,19 +1,397 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/providers.ts
|
|
21
|
+
var providers_exports = {};
|
|
22
|
+
__export(providers_exports, {
|
|
23
|
+
ALIASES: () => ALIASES,
|
|
24
|
+
CACHE_TTLS: () => CACHE_TTLS,
|
|
25
|
+
CACHE_VALUES: () => CACHE_VALUES,
|
|
26
|
+
CANONICAL_PARAM_SPECS: () => CANONICAL_PARAM_SPECS,
|
|
27
|
+
DURATION_RE: () => DURATION_RE,
|
|
28
|
+
MODELS: () => MODELS,
|
|
29
|
+
PARAM_SPECS: () => PARAM_SPECS,
|
|
30
|
+
PROVIDER_META: () => PROVIDER_META,
|
|
31
|
+
PROVIDER_PARAMS: () => PROVIDER_PARAMS,
|
|
32
|
+
REASONING_MODEL_UNSUPPORTED: () => REASONING_MODEL_UNSUPPORTED,
|
|
33
|
+
bedrockSupportsCaching: () => bedrockSupportsCaching,
|
|
34
|
+
canHostOpenAIModels: () => canHostOpenAIModels,
|
|
35
|
+
detectBedrockModelFamily: () => detectBedrockModelFamily,
|
|
36
|
+
detectGatewaySubProvider: () => detectGatewaySubProvider,
|
|
37
|
+
detectProvider: () => detectProvider,
|
|
38
|
+
isGatewayProvider: () => isGatewayProvider,
|
|
39
|
+
isReasoningModel: () => isReasoningModel
|
|
40
|
+
});
|
|
41
|
+
module.exports = __toCommonJS(providers_exports);
|
|
42
|
+
|
|
43
|
+
// src/provider-core.ts
|
|
44
|
+
function detectProvider(host) {
|
|
45
|
+
if (host.includes("openrouter")) return "openrouter";
|
|
46
|
+
if (host.includes("gateway.ai.vercel")) return "vercel";
|
|
47
|
+
if (host.includes("amazonaws") || host.includes("bedrock")) return "bedrock";
|
|
48
|
+
if (host.includes("openai")) return "openai";
|
|
49
|
+
if (host.includes("anthropic") || host.includes("claude")) return "anthropic";
|
|
50
|
+
if (host.includes("googleapis") || host.includes("google")) return "google";
|
|
51
|
+
if (host.includes("mistral")) return "mistral";
|
|
52
|
+
if (host.includes("cohere")) return "cohere";
|
|
53
|
+
return void 0;
|
|
54
|
+
}
|
|
55
|
+
var ALIASES = {
|
|
56
|
+
// temperature
|
|
57
|
+
temp: "temperature",
|
|
58
|
+
// max_tokens
|
|
59
|
+
max: "max_tokens",
|
|
60
|
+
max_out: "max_tokens",
|
|
61
|
+
max_output: "max_tokens",
|
|
62
|
+
max_output_tokens: "max_tokens",
|
|
63
|
+
max_completion_tokens: "max_tokens",
|
|
64
|
+
maxOutputTokens: "max_tokens",
|
|
65
|
+
maxTokens: "max_tokens",
|
|
66
|
+
// top_p
|
|
67
|
+
topp: "top_p",
|
|
68
|
+
topP: "top_p",
|
|
69
|
+
nucleus: "top_p",
|
|
70
|
+
// top_k
|
|
71
|
+
topk: "top_k",
|
|
72
|
+
topK: "top_k",
|
|
73
|
+
// frequency_penalty
|
|
74
|
+
freq: "frequency_penalty",
|
|
75
|
+
freq_penalty: "frequency_penalty",
|
|
76
|
+
frequencyPenalty: "frequency_penalty",
|
|
77
|
+
repetition_penalty: "frequency_penalty",
|
|
78
|
+
// presence_penalty
|
|
79
|
+
pres: "presence_penalty",
|
|
80
|
+
pres_penalty: "presence_penalty",
|
|
81
|
+
presencePenalty: "presence_penalty",
|
|
82
|
+
// stop
|
|
83
|
+
stop_sequences: "stop",
|
|
84
|
+
stopSequences: "stop",
|
|
85
|
+
stop_sequence: "stop",
|
|
86
|
+
// seed
|
|
87
|
+
random_seed: "seed",
|
|
88
|
+
randomSeed: "seed",
|
|
89
|
+
// n (completions count)
|
|
90
|
+
candidateCount: "n",
|
|
91
|
+
candidate_count: "n",
|
|
92
|
+
num_completions: "n",
|
|
93
|
+
// effort / reasoning
|
|
94
|
+
reasoning_effort: "effort",
|
|
95
|
+
reasoning: "effort",
|
|
96
|
+
// cache
|
|
97
|
+
cache_control: "cache",
|
|
98
|
+
cacheControl: "cache",
|
|
99
|
+
cachePoint: "cache",
|
|
100
|
+
cache_point: "cache"
|
|
101
|
+
};
|
|
102
|
+
var PROVIDER_PARAMS = {
|
|
103
|
+
openai: {
|
|
104
|
+
temperature: "temperature",
|
|
105
|
+
max_tokens: "max_tokens",
|
|
106
|
+
top_p: "top_p",
|
|
107
|
+
frequency_penalty: "frequency_penalty",
|
|
108
|
+
presence_penalty: "presence_penalty",
|
|
109
|
+
stop: "stop",
|
|
110
|
+
n: "n",
|
|
111
|
+
seed: "seed",
|
|
112
|
+
stream: "stream",
|
|
113
|
+
effort: "reasoning_effort"
|
|
114
|
+
},
|
|
115
|
+
anthropic: {
|
|
116
|
+
temperature: "temperature",
|
|
117
|
+
max_tokens: "max_tokens",
|
|
118
|
+
top_p: "top_p",
|
|
119
|
+
top_k: "top_k",
|
|
120
|
+
stop: "stop_sequences",
|
|
121
|
+
stream: "stream",
|
|
122
|
+
effort: "effort",
|
|
123
|
+
cache: "cache_control",
|
|
124
|
+
cache_ttl: "cache_ttl"
|
|
125
|
+
},
|
|
126
|
+
google: {
|
|
127
|
+
temperature: "temperature",
|
|
128
|
+
max_tokens: "maxOutputTokens",
|
|
129
|
+
top_p: "topP",
|
|
130
|
+
top_k: "topK",
|
|
131
|
+
frequency_penalty: "frequencyPenalty",
|
|
132
|
+
presence_penalty: "presencePenalty",
|
|
133
|
+
stop: "stopSequences",
|
|
134
|
+
n: "candidateCount",
|
|
135
|
+
stream: "stream",
|
|
136
|
+
seed: "seed",
|
|
137
|
+
responseMimeType: "responseMimeType",
|
|
138
|
+
responseSchema: "responseSchema"
|
|
139
|
+
},
|
|
140
|
+
mistral: {
|
|
141
|
+
temperature: "temperature",
|
|
142
|
+
max_tokens: "max_tokens",
|
|
143
|
+
top_p: "top_p",
|
|
144
|
+
frequency_penalty: "frequency_penalty",
|
|
145
|
+
presence_penalty: "presence_penalty",
|
|
146
|
+
stop: "stop",
|
|
147
|
+
n: "n",
|
|
148
|
+
seed: "random_seed",
|
|
149
|
+
stream: "stream",
|
|
150
|
+
safe_prompt: "safe_prompt",
|
|
151
|
+
min_tokens: "min_tokens"
|
|
152
|
+
},
|
|
153
|
+
cohere: {
|
|
154
|
+
temperature: "temperature",
|
|
155
|
+
max_tokens: "max_tokens",
|
|
156
|
+
top_p: "p",
|
|
157
|
+
top_k: "k",
|
|
158
|
+
frequency_penalty: "frequency_penalty",
|
|
159
|
+
presence_penalty: "presence_penalty",
|
|
160
|
+
stop: "stop_sequences",
|
|
161
|
+
stream: "stream",
|
|
162
|
+
seed: "seed"
|
|
163
|
+
},
|
|
164
|
+
bedrock: {
|
|
165
|
+
// Bedrock Converse API uses camelCase
|
|
166
|
+
temperature: "temperature",
|
|
167
|
+
max_tokens: "maxTokens",
|
|
168
|
+
top_p: "topP",
|
|
169
|
+
top_k: "topK",
|
|
170
|
+
// Claude models via additionalModelRequestFields
|
|
171
|
+
stop: "stopSequences",
|
|
172
|
+
stream: "stream",
|
|
173
|
+
cache: "cache_control",
|
|
174
|
+
cache_ttl: "cache_ttl"
|
|
175
|
+
},
|
|
176
|
+
openrouter: {
|
|
177
|
+
// OpenAI-compatible API with extra routing params
|
|
178
|
+
temperature: "temperature",
|
|
179
|
+
max_tokens: "max_tokens",
|
|
180
|
+
top_p: "top_p",
|
|
181
|
+
top_k: "top_k",
|
|
182
|
+
frequency_penalty: "frequency_penalty",
|
|
183
|
+
presence_penalty: "presence_penalty",
|
|
184
|
+
stop: "stop",
|
|
185
|
+
n: "n",
|
|
186
|
+
seed: "seed",
|
|
187
|
+
stream: "stream",
|
|
188
|
+
effort: "reasoning_effort"
|
|
189
|
+
},
|
|
190
|
+
vercel: {
|
|
191
|
+
// OpenAI-compatible gateway
|
|
192
|
+
temperature: "temperature",
|
|
193
|
+
max_tokens: "max_tokens",
|
|
194
|
+
top_p: "top_p",
|
|
195
|
+
top_k: "top_k",
|
|
196
|
+
frequency_penalty: "frequency_penalty",
|
|
197
|
+
presence_penalty: "presence_penalty",
|
|
198
|
+
stop: "stop",
|
|
199
|
+
n: "n",
|
|
200
|
+
seed: "seed",
|
|
201
|
+
stream: "stream",
|
|
202
|
+
effort: "reasoning_effort"
|
|
203
|
+
}
|
|
204
|
+
};
|
|
205
|
+
var PARAM_SPECS = {
|
|
206
|
+
openai: {
|
|
207
|
+
temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
|
|
208
|
+
max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
209
|
+
top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
|
|
210
|
+
frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
|
|
211
|
+
presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
|
|
212
|
+
stop: { type: "string", description: "Stop sequences" },
|
|
213
|
+
n: { type: "number", min: 1, default: 1, description: "Completions count" },
|
|
214
|
+
seed: { type: "number", description: "Random seed" },
|
|
215
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
216
|
+
reasoning_effort: {
|
|
217
|
+
type: "string",
|
|
218
|
+
values: ["none", "minimal", "low", "medium", "high", "xhigh"],
|
|
219
|
+
default: "medium",
|
|
220
|
+
description: "Reasoning effort"
|
|
221
|
+
}
|
|
222
|
+
},
|
|
223
|
+
anthropic: {
|
|
224
|
+
temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
|
|
225
|
+
max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
226
|
+
top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
|
|
227
|
+
top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
|
|
228
|
+
stop_sequences: { type: "string", description: "Stop sequences" },
|
|
229
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
230
|
+
effort: { type: "string", values: ["low", "medium", "high", "max"], default: "medium", description: "Thinking effort" },
|
|
231
|
+
cache_control: { type: "string", values: ["ephemeral"], default: "ephemeral", description: "Cache control" },
|
|
232
|
+
cache_ttl: { type: "string", values: ["5m", "1h"], default: "5m", description: "Cache TTL" }
|
|
233
|
+
},
|
|
234
|
+
google: {
|
|
235
|
+
temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
|
|
236
|
+
maxOutputTokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
237
|
+
topP: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
|
|
238
|
+
topK: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
|
|
239
|
+
frequencyPenalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
|
|
240
|
+
presencePenalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
|
|
241
|
+
stopSequences: { type: "string", description: "Stop sequences" },
|
|
242
|
+
candidateCount: { type: "number", min: 1, default: 1, description: "Candidate count" },
|
|
243
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
244
|
+
seed: { type: "number", description: "Random seed" },
|
|
245
|
+
responseMimeType: { type: "string", description: "Response MIME type" },
|
|
246
|
+
responseSchema: { type: "string", description: "Response schema" }
|
|
247
|
+
},
|
|
248
|
+
mistral: {
|
|
249
|
+
temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
|
|
250
|
+
max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
251
|
+
top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
|
|
252
|
+
frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
|
|
253
|
+
presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
|
|
254
|
+
stop: { type: "string", description: "Stop sequences" },
|
|
255
|
+
n: { type: "number", min: 1, default: 1, description: "Completions count" },
|
|
256
|
+
random_seed: { type: "number", description: "Random seed" },
|
|
257
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
258
|
+
safe_prompt: { type: "boolean", default: false, description: "Enable safe prompt" },
|
|
259
|
+
min_tokens: { type: "number", min: 0, default: 0, description: "Minimum tokens" }
|
|
260
|
+
},
|
|
261
|
+
cohere: {
|
|
262
|
+
temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
|
|
263
|
+
max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
264
|
+
p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling (p)" },
|
|
265
|
+
k: { type: "number", min: 0, max: 500, default: 40, description: "Top-K sampling (k)" },
|
|
266
|
+
frequency_penalty: { type: "number", min: 0, max: 1, default: 0, description: "Penalize frequent tokens" },
|
|
267
|
+
presence_penalty: { type: "number", min: 0, max: 1, default: 0, description: "Penalize repeated topics" },
|
|
268
|
+
stop_sequences: { type: "string", description: "Stop sequences" },
|
|
269
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
270
|
+
seed: { type: "number", description: "Random seed" }
|
|
271
|
+
},
|
|
272
|
+
bedrock: {
|
|
273
|
+
// Converse API inferenceConfig params
|
|
274
|
+
temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
|
|
275
|
+
maxTokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
276
|
+
topP: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
|
|
277
|
+
topK: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
|
|
278
|
+
stopSequences: { type: "string", description: "Stop sequences" },
|
|
279
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
280
|
+
cache_control: { type: "string", values: ["ephemeral"], default: "ephemeral", description: "Cache control" },
|
|
281
|
+
cache_ttl: { type: "string", values: ["5m", "1h"], default: "5m", description: "Cache TTL" }
|
|
282
|
+
},
|
|
283
|
+
openrouter: {
|
|
284
|
+
// Loose validation — proxies to many providers with varying ranges
|
|
285
|
+
temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
|
|
286
|
+
max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
287
|
+
top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
|
|
288
|
+
top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
|
|
289
|
+
frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
|
|
290
|
+
presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
|
|
291
|
+
stop: { type: "string", description: "Stop sequences" },
|
|
292
|
+
n: { type: "number", min: 1, default: 1, description: "Completions count" },
|
|
293
|
+
seed: { type: "number", description: "Random seed" },
|
|
294
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
295
|
+
reasoning_effort: {
|
|
296
|
+
type: "string",
|
|
297
|
+
values: ["none", "minimal", "low", "medium", "high", "xhigh"],
|
|
298
|
+
default: "medium",
|
|
299
|
+
description: "Reasoning effort"
|
|
300
|
+
}
|
|
301
|
+
},
|
|
302
|
+
vercel: {
|
|
303
|
+
// Loose validation — proxies to many providers with varying ranges
|
|
304
|
+
temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
|
|
305
|
+
max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
306
|
+
top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
|
|
307
|
+
top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
|
|
308
|
+
frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
|
|
309
|
+
presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
|
|
310
|
+
stop: { type: "string", description: "Stop sequences" },
|
|
311
|
+
n: { type: "number", min: 1, default: 1, description: "Completions count" },
|
|
312
|
+
seed: { type: "number", description: "Random seed" },
|
|
313
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
314
|
+
reasoning_effort: {
|
|
315
|
+
type: "string",
|
|
316
|
+
values: ["none", "minimal", "low", "medium", "high", "xhigh"],
|
|
317
|
+
default: "medium",
|
|
318
|
+
description: "Reasoning effort"
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
};
|
|
322
|
+
function isReasoningModel(model) {
|
|
323
|
+
const name = model.includes("/") ? model.split("/").pop() : model;
|
|
324
|
+
return /^o[134]/.test(name);
|
|
325
|
+
}
|
|
326
|
+
function canHostOpenAIModels(provider) {
|
|
327
|
+
return provider === "openai" || provider === "openrouter" || provider === "vercel";
|
|
328
|
+
}
|
|
329
|
+
function isGatewayProvider(provider) {
|
|
330
|
+
return provider === "openrouter" || provider === "vercel";
|
|
331
|
+
}
|
|
332
|
+
function detectGatewaySubProvider(model) {
|
|
333
|
+
const slash = model.indexOf("/");
|
|
334
|
+
if (slash < 1) return void 0;
|
|
335
|
+
const prefix = model.slice(0, slash);
|
|
336
|
+
const direct = ["openai", "anthropic", "google", "mistral", "cohere"];
|
|
337
|
+
return direct.find((p) => p === prefix);
|
|
338
|
+
}
|
|
339
|
+
var REASONING_MODEL_UNSUPPORTED = /* @__PURE__ */ new Set([
|
|
340
|
+
"temperature",
|
|
341
|
+
"top_p",
|
|
342
|
+
"frequency_penalty",
|
|
343
|
+
"presence_penalty",
|
|
344
|
+
"n"
|
|
345
|
+
]);
|
|
346
|
+
function detectBedrockModelFamily(model) {
|
|
347
|
+
const parts = model.split(".");
|
|
348
|
+
let prefix = parts[0];
|
|
349
|
+
if (["us", "eu", "apac", "global"].includes(prefix) && parts.length > 1) {
|
|
350
|
+
prefix = parts[1];
|
|
351
|
+
}
|
|
352
|
+
const families = [
|
|
353
|
+
"anthropic",
|
|
354
|
+
"meta",
|
|
355
|
+
"amazon",
|
|
356
|
+
"mistral",
|
|
357
|
+
"cohere",
|
|
358
|
+
"ai21"
|
|
359
|
+
];
|
|
360
|
+
return families.find((f) => prefix === f);
|
|
361
|
+
}
|
|
362
|
+
function bedrockSupportsCaching(model) {
|
|
363
|
+
const family = detectBedrockModelFamily(model);
|
|
364
|
+
if (family === "anthropic") return true;
|
|
365
|
+
if (family === "amazon" && model.includes("nova")) return true;
|
|
366
|
+
return false;
|
|
367
|
+
}
|
|
368
|
+
var CACHE_VALUES = {
|
|
369
|
+
openai: void 0,
|
|
370
|
+
// OpenAI auto-caches; no explicit param
|
|
371
|
+
anthropic: "ephemeral",
|
|
372
|
+
google: void 0,
|
|
373
|
+
// Google uses explicit caching API, not a param
|
|
374
|
+
mistral: void 0,
|
|
375
|
+
cohere: void 0,
|
|
376
|
+
bedrock: "ephemeral",
|
|
377
|
+
// Supported for Claude models on Bedrock
|
|
378
|
+
openrouter: void 0,
|
|
379
|
+
// Depends on underlying provider
|
|
380
|
+
vercel: void 0
|
|
381
|
+
// Depends on underlying provider
|
|
382
|
+
};
|
|
383
|
+
var CACHE_TTLS = {
|
|
384
|
+
openai: void 0,
|
|
385
|
+
anthropic: ["5m", "1h"],
|
|
386
|
+
google: void 0,
|
|
387
|
+
mistral: void 0,
|
|
388
|
+
cohere: void 0,
|
|
389
|
+
bedrock: ["5m", "1h"],
|
|
390
|
+
// Claude on Bedrock uses same TTLs as direct Anthropic
|
|
391
|
+
openrouter: void 0,
|
|
392
|
+
vercel: void 0
|
|
393
|
+
};
|
|
394
|
+
var DURATION_RE = /^\d+[mh]$/;
|
|
17
395
|
|
|
18
396
|
// src/provider-meta.ts
|
|
19
397
|
var PROVIDER_META = [
|
|
@@ -184,23 +562,24 @@ var CANONICAL_PARAM_SPECS = {
|
|
|
184
562
|
effort: { type: "enum", values: ["none", "minimal", "low", "medium", "high", "xhigh"], default: "medium", description: "Reasoning effort" }
|
|
185
563
|
}
|
|
186
564
|
};
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
565
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
566
|
+
0 && (module.exports = {
|
|
567
|
+
ALIASES,
|
|
568
|
+
CACHE_TTLS,
|
|
569
|
+
CACHE_VALUES,
|
|
570
|
+
CANONICAL_PARAM_SPECS,
|
|
571
|
+
DURATION_RE,
|
|
572
|
+
MODELS,
|
|
573
|
+
PARAM_SPECS,
|
|
574
|
+
PROVIDER_META,
|
|
575
|
+
PROVIDER_PARAMS,
|
|
576
|
+
REASONING_MODEL_UNSUPPORTED,
|
|
577
|
+
bedrockSupportsCaching,
|
|
578
|
+
canHostOpenAIModels,
|
|
579
|
+
detectBedrockModelFamily,
|
|
580
|
+
detectGatewaySubProvider,
|
|
581
|
+
detectProvider,
|
|
582
|
+
isGatewayProvider,
|
|
583
|
+
isReasoningModel
|
|
584
|
+
});
|
|
206
585
|
//# sourceMappingURL=providers.cjs.map
|