llm-strings 1.0.0 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.cts +4 -64
- package/dist/index.d.ts +4 -64
- package/dist/providers.cjs +561 -0
- package/dist/providers.cjs.map +1 -0
- package/dist/providers.d.cts +72 -0
- package/dist/providers.d.ts +72 -0
- package/dist/providers.js +522 -0
- package/dist/providers.js.map +1 -0
- package/package.json +1 -1
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
type Provider = "openai" | "anthropic" | "google" | "mistral" | "cohere" | "bedrock" | "openrouter" | "vercel";
|
|
2
|
+
declare function detectProvider(host: string): Provider | undefined;
|
|
3
|
+
/**
|
|
4
|
+
* Shorthand aliases → canonical param name.
|
|
5
|
+
* Canonical names use snake_case and follow OpenAI conventions where possible.
|
|
6
|
+
*/
|
|
7
|
+
declare const ALIASES: Record<string, string>;
|
|
8
|
+
/**
|
|
9
|
+
* Canonical param name → provider-specific API param name.
|
|
10
|
+
* Only includes params the provider actually supports.
|
|
11
|
+
*/
|
|
12
|
+
declare const PROVIDER_PARAMS: Record<Provider, Record<string, string>>;
|
|
13
|
+
/**
|
|
14
|
+
* Validation specs per provider, keyed by provider-specific param name.
|
|
15
|
+
*/
|
|
16
|
+
interface ParamSpec {
|
|
17
|
+
type: "number" | "string" | "boolean";
|
|
18
|
+
min?: number;
|
|
19
|
+
max?: number;
|
|
20
|
+
values?: string[];
|
|
21
|
+
}
|
|
22
|
+
declare const PARAM_SPECS: Record<Provider, Record<string, ParamSpec>>;
|
|
23
|
+
/** OpenAI reasoning models don't support standard sampling params. */
|
|
24
|
+
declare function isReasoningModel(model: string): boolean;
|
|
25
|
+
/** Providers that can route to OpenAI models (and need reasoning-model checks). */
|
|
26
|
+
declare function canHostOpenAIModels(provider: Provider): boolean;
|
|
27
|
+
declare const REASONING_MODEL_UNSUPPORTED: Set<string>;
|
|
28
|
+
/**
|
|
29
|
+
* Bedrock model IDs are prefixed with the vendor name.
|
|
30
|
+
* e.g. "anthropic.claude-sonnet-4-5-20250929-v1:0"
|
|
31
|
+
*/
|
|
32
|
+
type BedrockModelFamily = "anthropic" | "meta" | "amazon" | "mistral" | "cohere" | "ai21";
|
|
33
|
+
declare function detectBedrockModelFamily(model: string): BedrockModelFamily | undefined;
|
|
34
|
+
/** Whether a Bedrock model supports prompt caching (Claude and Nova only). */
|
|
35
|
+
declare function bedrockSupportsCaching(model: string): boolean;
|
|
36
|
+
/** Cache value normalization per provider. */
|
|
37
|
+
declare const CACHE_VALUES: Record<Provider, string | undefined>;
|
|
38
|
+
/** Valid cache TTL values per provider. */
|
|
39
|
+
declare const CACHE_TTLS: Record<Provider, string[] | undefined>;
|
|
40
|
+
/** Match a duration expression like "5m", "1h", "30m". */
|
|
41
|
+
declare const DURATION_RE: RegExp;
|
|
42
|
+
interface ProviderMeta {
|
|
43
|
+
/** Provider identifier — matches the Provider union type. */
|
|
44
|
+
id: Provider;
|
|
45
|
+
/** Human-readable display name. */
|
|
46
|
+
name: string;
|
|
47
|
+
/** Default / canonical API hostname. */
|
|
48
|
+
host: string;
|
|
49
|
+
/** Brand color as a CSS hex value. */
|
|
50
|
+
color: string;
|
|
51
|
+
}
|
|
52
|
+
declare const PROVIDER_META: ProviderMeta[];
|
|
53
|
+
/**
|
|
54
|
+
* Suggested / common model IDs per provider, ordered by recency.
|
|
55
|
+
* Not exhaustive — providers add models frequently.
|
|
56
|
+
*/
|
|
57
|
+
declare const MODELS: Record<Provider, string[]>;
|
|
58
|
+
/**
|
|
59
|
+
* Canonical parameter spec — keyed by canonical (snake_case) param names
|
|
60
|
+
* with defaults and descriptions for UI consumption.
|
|
61
|
+
*/
|
|
62
|
+
interface CanonicalParamSpec {
|
|
63
|
+
type: "number" | "string" | "boolean" | "enum";
|
|
64
|
+
min?: number;
|
|
65
|
+
max?: number;
|
|
66
|
+
values?: string[];
|
|
67
|
+
default?: string | number | boolean;
|
|
68
|
+
description?: string;
|
|
69
|
+
}
|
|
70
|
+
declare const CANONICAL_PARAM_SPECS: Record<Provider, Record<string, CanonicalParamSpec>>;
|
|
71
|
+
|
|
72
|
+
export { ALIASES, type BedrockModelFamily, CACHE_TTLS, CACHE_VALUES, CANONICAL_PARAM_SPECS, type CanonicalParamSpec, DURATION_RE, MODELS, PARAM_SPECS, PROVIDER_META, PROVIDER_PARAMS, type ParamSpec, type Provider, type ProviderMeta, REASONING_MODEL_UNSUPPORTED, bedrockSupportsCaching, canHostOpenAIModels, detectBedrockModelFamily, detectProvider, isReasoningModel };
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
type Provider = "openai" | "anthropic" | "google" | "mistral" | "cohere" | "bedrock" | "openrouter" | "vercel";
|
|
2
|
+
declare function detectProvider(host: string): Provider | undefined;
|
|
3
|
+
/**
|
|
4
|
+
* Shorthand aliases → canonical param name.
|
|
5
|
+
* Canonical names use snake_case and follow OpenAI conventions where possible.
|
|
6
|
+
*/
|
|
7
|
+
declare const ALIASES: Record<string, string>;
|
|
8
|
+
/**
|
|
9
|
+
* Canonical param name → provider-specific API param name.
|
|
10
|
+
* Only includes params the provider actually supports.
|
|
11
|
+
*/
|
|
12
|
+
declare const PROVIDER_PARAMS: Record<Provider, Record<string, string>>;
|
|
13
|
+
/**
|
|
14
|
+
* Validation specs per provider, keyed by provider-specific param name.
|
|
15
|
+
*/
|
|
16
|
+
interface ParamSpec {
|
|
17
|
+
type: "number" | "string" | "boolean";
|
|
18
|
+
min?: number;
|
|
19
|
+
max?: number;
|
|
20
|
+
values?: string[];
|
|
21
|
+
}
|
|
22
|
+
declare const PARAM_SPECS: Record<Provider, Record<string, ParamSpec>>;
|
|
23
|
+
/** OpenAI reasoning models don't support standard sampling params. */
|
|
24
|
+
declare function isReasoningModel(model: string): boolean;
|
|
25
|
+
/** Providers that can route to OpenAI models (and need reasoning-model checks). */
|
|
26
|
+
declare function canHostOpenAIModels(provider: Provider): boolean;
|
|
27
|
+
declare const REASONING_MODEL_UNSUPPORTED: Set<string>;
|
|
28
|
+
/**
|
|
29
|
+
* Bedrock model IDs are prefixed with the vendor name.
|
|
30
|
+
* e.g. "anthropic.claude-sonnet-4-5-20250929-v1:0"
|
|
31
|
+
*/
|
|
32
|
+
type BedrockModelFamily = "anthropic" | "meta" | "amazon" | "mistral" | "cohere" | "ai21";
|
|
33
|
+
declare function detectBedrockModelFamily(model: string): BedrockModelFamily | undefined;
|
|
34
|
+
/** Whether a Bedrock model supports prompt caching (Claude and Nova only). */
|
|
35
|
+
declare function bedrockSupportsCaching(model: string): boolean;
|
|
36
|
+
/** Cache value normalization per provider. */
|
|
37
|
+
declare const CACHE_VALUES: Record<Provider, string | undefined>;
|
|
38
|
+
/** Valid cache TTL values per provider. */
|
|
39
|
+
declare const CACHE_TTLS: Record<Provider, string[] | undefined>;
|
|
40
|
+
/** Match a duration expression like "5m", "1h", "30m". */
|
|
41
|
+
declare const DURATION_RE: RegExp;
|
|
42
|
+
interface ProviderMeta {
|
|
43
|
+
/** Provider identifier — matches the Provider union type. */
|
|
44
|
+
id: Provider;
|
|
45
|
+
/** Human-readable display name. */
|
|
46
|
+
name: string;
|
|
47
|
+
/** Default / canonical API hostname. */
|
|
48
|
+
host: string;
|
|
49
|
+
/** Brand color as a CSS hex value. */
|
|
50
|
+
color: string;
|
|
51
|
+
}
|
|
52
|
+
declare const PROVIDER_META: ProviderMeta[];
|
|
53
|
+
/**
|
|
54
|
+
* Suggested / common model IDs per provider, ordered by recency.
|
|
55
|
+
* Not exhaustive — providers add models frequently.
|
|
56
|
+
*/
|
|
57
|
+
declare const MODELS: Record<Provider, string[]>;
|
|
58
|
+
/**
|
|
59
|
+
* Canonical parameter spec — keyed by canonical (snake_case) param names
|
|
60
|
+
* with defaults and descriptions for UI consumption.
|
|
61
|
+
*/
|
|
62
|
+
interface CanonicalParamSpec {
|
|
63
|
+
type: "number" | "string" | "boolean" | "enum";
|
|
64
|
+
min?: number;
|
|
65
|
+
max?: number;
|
|
66
|
+
values?: string[];
|
|
67
|
+
default?: string | number | boolean;
|
|
68
|
+
description?: string;
|
|
69
|
+
}
|
|
70
|
+
declare const CANONICAL_PARAM_SPECS: Record<Provider, Record<string, CanonicalParamSpec>>;
|
|
71
|
+
|
|
72
|
+
export { ALIASES, type BedrockModelFamily, CACHE_TTLS, CACHE_VALUES, CANONICAL_PARAM_SPECS, type CanonicalParamSpec, DURATION_RE, MODELS, PARAM_SPECS, PROVIDER_META, PROVIDER_PARAMS, type ParamSpec, type Provider, type ProviderMeta, REASONING_MODEL_UNSUPPORTED, bedrockSupportsCaching, canHostOpenAIModels, detectBedrockModelFamily, detectProvider, isReasoningModel };
|
|
@@ -0,0 +1,522 @@
|
|
|
1
|
+
// src/providers.ts
|
|
2
|
+
function detectProvider(host) {
|
|
3
|
+
if (host.includes("openrouter")) return "openrouter";
|
|
4
|
+
if (host.includes("gateway.ai.vercel")) return "vercel";
|
|
5
|
+
if (host.includes("amazonaws") || host.includes("bedrock")) return "bedrock";
|
|
6
|
+
if (host.includes("openai")) return "openai";
|
|
7
|
+
if (host.includes("anthropic") || host.includes("claude")) return "anthropic";
|
|
8
|
+
if (host.includes("googleapis") || host.includes("google")) return "google";
|
|
9
|
+
if (host.includes("mistral")) return "mistral";
|
|
10
|
+
if (host.includes("cohere")) return "cohere";
|
|
11
|
+
return void 0;
|
|
12
|
+
}
|
|
13
|
+
var ALIASES = {
|
|
14
|
+
// temperature
|
|
15
|
+
temp: "temperature",
|
|
16
|
+
// max_tokens
|
|
17
|
+
max: "max_tokens",
|
|
18
|
+
max_out: "max_tokens",
|
|
19
|
+
max_output: "max_tokens",
|
|
20
|
+
max_output_tokens: "max_tokens",
|
|
21
|
+
max_completion_tokens: "max_tokens",
|
|
22
|
+
maxOutputTokens: "max_tokens",
|
|
23
|
+
maxTokens: "max_tokens",
|
|
24
|
+
// top_p
|
|
25
|
+
topp: "top_p",
|
|
26
|
+
topP: "top_p",
|
|
27
|
+
nucleus: "top_p",
|
|
28
|
+
// top_k
|
|
29
|
+
topk: "top_k",
|
|
30
|
+
topK: "top_k",
|
|
31
|
+
// frequency_penalty
|
|
32
|
+
freq: "frequency_penalty",
|
|
33
|
+
freq_penalty: "frequency_penalty",
|
|
34
|
+
frequencyPenalty: "frequency_penalty",
|
|
35
|
+
repetition_penalty: "frequency_penalty",
|
|
36
|
+
// presence_penalty
|
|
37
|
+
pres: "presence_penalty",
|
|
38
|
+
pres_penalty: "presence_penalty",
|
|
39
|
+
presencePenalty: "presence_penalty",
|
|
40
|
+
// stop
|
|
41
|
+
stop_sequences: "stop",
|
|
42
|
+
stopSequences: "stop",
|
|
43
|
+
stop_sequence: "stop",
|
|
44
|
+
// seed
|
|
45
|
+
random_seed: "seed",
|
|
46
|
+
randomSeed: "seed",
|
|
47
|
+
// n (completions count)
|
|
48
|
+
candidateCount: "n",
|
|
49
|
+
candidate_count: "n",
|
|
50
|
+
num_completions: "n",
|
|
51
|
+
// effort / reasoning
|
|
52
|
+
reasoning_effort: "effort",
|
|
53
|
+
reasoning: "effort",
|
|
54
|
+
// cache
|
|
55
|
+
cache_control: "cache",
|
|
56
|
+
cacheControl: "cache",
|
|
57
|
+
cachePoint: "cache",
|
|
58
|
+
cache_point: "cache"
|
|
59
|
+
};
|
|
60
|
+
var PROVIDER_PARAMS = {
|
|
61
|
+
openai: {
|
|
62
|
+
temperature: "temperature",
|
|
63
|
+
max_tokens: "max_tokens",
|
|
64
|
+
top_p: "top_p",
|
|
65
|
+
frequency_penalty: "frequency_penalty",
|
|
66
|
+
presence_penalty: "presence_penalty",
|
|
67
|
+
stop: "stop",
|
|
68
|
+
n: "n",
|
|
69
|
+
seed: "seed",
|
|
70
|
+
stream: "stream",
|
|
71
|
+
effort: "reasoning_effort"
|
|
72
|
+
},
|
|
73
|
+
anthropic: {
|
|
74
|
+
temperature: "temperature",
|
|
75
|
+
max_tokens: "max_tokens",
|
|
76
|
+
top_p: "top_p",
|
|
77
|
+
top_k: "top_k",
|
|
78
|
+
stop: "stop_sequences",
|
|
79
|
+
stream: "stream",
|
|
80
|
+
effort: "effort",
|
|
81
|
+
cache: "cache_control",
|
|
82
|
+
cache_ttl: "cache_ttl"
|
|
83
|
+
},
|
|
84
|
+
google: {
|
|
85
|
+
temperature: "temperature",
|
|
86
|
+
max_tokens: "maxOutputTokens",
|
|
87
|
+
top_p: "topP",
|
|
88
|
+
top_k: "topK",
|
|
89
|
+
frequency_penalty: "frequencyPenalty",
|
|
90
|
+
presence_penalty: "presencePenalty",
|
|
91
|
+
stop: "stopSequences",
|
|
92
|
+
n: "candidateCount",
|
|
93
|
+
stream: "stream",
|
|
94
|
+
seed: "seed",
|
|
95
|
+
responseMimeType: "responseMimeType",
|
|
96
|
+
responseSchema: "responseSchema"
|
|
97
|
+
},
|
|
98
|
+
mistral: {
|
|
99
|
+
temperature: "temperature",
|
|
100
|
+
max_tokens: "max_tokens",
|
|
101
|
+
top_p: "top_p",
|
|
102
|
+
frequency_penalty: "frequency_penalty",
|
|
103
|
+
presence_penalty: "presence_penalty",
|
|
104
|
+
stop: "stop",
|
|
105
|
+
n: "n",
|
|
106
|
+
seed: "random_seed",
|
|
107
|
+
stream: "stream",
|
|
108
|
+
safe_prompt: "safe_prompt",
|
|
109
|
+
min_tokens: "min_tokens"
|
|
110
|
+
},
|
|
111
|
+
cohere: {
|
|
112
|
+
temperature: "temperature",
|
|
113
|
+
max_tokens: "max_tokens",
|
|
114
|
+
top_p: "p",
|
|
115
|
+
top_k: "k",
|
|
116
|
+
frequency_penalty: "frequency_penalty",
|
|
117
|
+
presence_penalty: "presence_penalty",
|
|
118
|
+
stop: "stop_sequences",
|
|
119
|
+
stream: "stream",
|
|
120
|
+
seed: "seed"
|
|
121
|
+
},
|
|
122
|
+
bedrock: {
|
|
123
|
+
// Bedrock Converse API uses camelCase
|
|
124
|
+
temperature: "temperature",
|
|
125
|
+
max_tokens: "maxTokens",
|
|
126
|
+
top_p: "topP",
|
|
127
|
+
top_k: "topK",
|
|
128
|
+
// Claude models via additionalModelRequestFields
|
|
129
|
+
stop: "stopSequences",
|
|
130
|
+
stream: "stream",
|
|
131
|
+
cache: "cache_control",
|
|
132
|
+
cache_ttl: "cache_ttl"
|
|
133
|
+
},
|
|
134
|
+
openrouter: {
|
|
135
|
+
// OpenAI-compatible API with extra routing params
|
|
136
|
+
temperature: "temperature",
|
|
137
|
+
max_tokens: "max_tokens",
|
|
138
|
+
top_p: "top_p",
|
|
139
|
+
top_k: "top_k",
|
|
140
|
+
frequency_penalty: "frequency_penalty",
|
|
141
|
+
presence_penalty: "presence_penalty",
|
|
142
|
+
stop: "stop",
|
|
143
|
+
n: "n",
|
|
144
|
+
seed: "seed",
|
|
145
|
+
stream: "stream",
|
|
146
|
+
effort: "reasoning_effort"
|
|
147
|
+
},
|
|
148
|
+
vercel: {
|
|
149
|
+
// OpenAI-compatible gateway
|
|
150
|
+
temperature: "temperature",
|
|
151
|
+
max_tokens: "max_tokens",
|
|
152
|
+
top_p: "top_p",
|
|
153
|
+
top_k: "top_k",
|
|
154
|
+
frequency_penalty: "frequency_penalty",
|
|
155
|
+
presence_penalty: "presence_penalty",
|
|
156
|
+
stop: "stop",
|
|
157
|
+
n: "n",
|
|
158
|
+
seed: "seed",
|
|
159
|
+
stream: "stream",
|
|
160
|
+
effort: "reasoning_effort"
|
|
161
|
+
}
|
|
162
|
+
};
|
|
163
|
+
var PARAM_SPECS = {
|
|
164
|
+
openai: {
|
|
165
|
+
temperature: { type: "number", min: 0, max: 2 },
|
|
166
|
+
max_tokens: { type: "number", min: 1 },
|
|
167
|
+
top_p: { type: "number", min: 0, max: 1 },
|
|
168
|
+
frequency_penalty: { type: "number", min: -2, max: 2 },
|
|
169
|
+
presence_penalty: { type: "number", min: -2, max: 2 },
|
|
170
|
+
stop: { type: "string" },
|
|
171
|
+
n: { type: "number", min: 1 },
|
|
172
|
+
seed: { type: "number" },
|
|
173
|
+
stream: { type: "boolean" },
|
|
174
|
+
reasoning_effort: {
|
|
175
|
+
type: "string",
|
|
176
|
+
values: ["none", "minimal", "low", "medium", "high", "xhigh"]
|
|
177
|
+
}
|
|
178
|
+
},
|
|
179
|
+
anthropic: {
|
|
180
|
+
temperature: { type: "number", min: 0, max: 1 },
|
|
181
|
+
max_tokens: { type: "number", min: 1 },
|
|
182
|
+
top_p: { type: "number", min: 0, max: 1 },
|
|
183
|
+
top_k: { type: "number", min: 0 },
|
|
184
|
+
stop_sequences: { type: "string" },
|
|
185
|
+
stream: { type: "boolean" },
|
|
186
|
+
effort: { type: "string", values: ["low", "medium", "high", "max"] },
|
|
187
|
+
cache_control: { type: "string", values: ["ephemeral"] },
|
|
188
|
+
cache_ttl: { type: "string", values: ["5m", "1h"] }
|
|
189
|
+
},
|
|
190
|
+
google: {
|
|
191
|
+
temperature: { type: "number", min: 0, max: 2 },
|
|
192
|
+
maxOutputTokens: { type: "number", min: 1 },
|
|
193
|
+
topP: { type: "number", min: 0, max: 1 },
|
|
194
|
+
topK: { type: "number", min: 0 },
|
|
195
|
+
frequencyPenalty: { type: "number", min: -2, max: 2 },
|
|
196
|
+
presencePenalty: { type: "number", min: -2, max: 2 },
|
|
197
|
+
stopSequences: { type: "string" },
|
|
198
|
+
candidateCount: { type: "number", min: 1 },
|
|
199
|
+
stream: { type: "boolean" },
|
|
200
|
+
seed: { type: "number" },
|
|
201
|
+
responseMimeType: { type: "string" },
|
|
202
|
+
responseSchema: { type: "string" }
|
|
203
|
+
},
|
|
204
|
+
mistral: {
|
|
205
|
+
temperature: { type: "number", min: 0, max: 1 },
|
|
206
|
+
max_tokens: { type: "number", min: 1 },
|
|
207
|
+
top_p: { type: "number", min: 0, max: 1 },
|
|
208
|
+
frequency_penalty: { type: "number", min: -2, max: 2 },
|
|
209
|
+
presence_penalty: { type: "number", min: -2, max: 2 },
|
|
210
|
+
stop: { type: "string" },
|
|
211
|
+
n: { type: "number", min: 1 },
|
|
212
|
+
random_seed: { type: "number" },
|
|
213
|
+
stream: { type: "boolean" },
|
|
214
|
+
safe_prompt: { type: "boolean" },
|
|
215
|
+
min_tokens: { type: "number", min: 0 }
|
|
216
|
+
},
|
|
217
|
+
cohere: {
|
|
218
|
+
temperature: { type: "number", min: 0, max: 1 },
|
|
219
|
+
max_tokens: { type: "number", min: 1 },
|
|
220
|
+
p: { type: "number", min: 0, max: 1 },
|
|
221
|
+
k: { type: "number", min: 0, max: 500 },
|
|
222
|
+
frequency_penalty: { type: "number", min: 0, max: 1 },
|
|
223
|
+
presence_penalty: { type: "number", min: 0, max: 1 },
|
|
224
|
+
stop_sequences: { type: "string" },
|
|
225
|
+
stream: { type: "boolean" },
|
|
226
|
+
seed: { type: "number" }
|
|
227
|
+
},
|
|
228
|
+
bedrock: {
|
|
229
|
+
// Converse API inferenceConfig params
|
|
230
|
+
temperature: { type: "number", min: 0, max: 1 },
|
|
231
|
+
maxTokens: { type: "number", min: 1 },
|
|
232
|
+
topP: { type: "number", min: 0, max: 1 },
|
|
233
|
+
topK: { type: "number", min: 0 },
|
|
234
|
+
stopSequences: { type: "string" },
|
|
235
|
+
stream: { type: "boolean" },
|
|
236
|
+
cache_control: { type: "string", values: ["ephemeral"] },
|
|
237
|
+
cache_ttl: { type: "string", values: ["5m", "1h"] }
|
|
238
|
+
},
|
|
239
|
+
openrouter: {
|
|
240
|
+
// Loose validation — proxies to many providers with varying ranges
|
|
241
|
+
temperature: { type: "number", min: 0, max: 2 },
|
|
242
|
+
max_tokens: { type: "number", min: 1 },
|
|
243
|
+
top_p: { type: "number", min: 0, max: 1 },
|
|
244
|
+
top_k: { type: "number", min: 0 },
|
|
245
|
+
frequency_penalty: { type: "number", min: -2, max: 2 },
|
|
246
|
+
presence_penalty: { type: "number", min: -2, max: 2 },
|
|
247
|
+
stop: { type: "string" },
|
|
248
|
+
n: { type: "number", min: 1 },
|
|
249
|
+
seed: { type: "number" },
|
|
250
|
+
stream: { type: "boolean" },
|
|
251
|
+
reasoning_effort: {
|
|
252
|
+
type: "string",
|
|
253
|
+
values: ["none", "minimal", "low", "medium", "high", "xhigh"]
|
|
254
|
+
}
|
|
255
|
+
},
|
|
256
|
+
vercel: {
|
|
257
|
+
// Loose validation — proxies to many providers with varying ranges
|
|
258
|
+
temperature: { type: "number", min: 0, max: 2 },
|
|
259
|
+
max_tokens: { type: "number", min: 1 },
|
|
260
|
+
top_p: { type: "number", min: 0, max: 1 },
|
|
261
|
+
top_k: { type: "number", min: 0 },
|
|
262
|
+
frequency_penalty: { type: "number", min: -2, max: 2 },
|
|
263
|
+
presence_penalty: { type: "number", min: -2, max: 2 },
|
|
264
|
+
stop: { type: "string" },
|
|
265
|
+
n: { type: "number", min: 1 },
|
|
266
|
+
seed: { type: "number" },
|
|
267
|
+
stream: { type: "boolean" },
|
|
268
|
+
reasoning_effort: {
|
|
269
|
+
type: "string",
|
|
270
|
+
values: ["none", "minimal", "low", "medium", "high", "xhigh"]
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
};
|
|
274
|
+
function isReasoningModel(model) {
|
|
275
|
+
const name = model.includes("/") ? model.split("/").pop() : model;
|
|
276
|
+
return /^o[134]/.test(name);
|
|
277
|
+
}
|
|
278
|
+
function canHostOpenAIModels(provider) {
|
|
279
|
+
return provider === "openai" || provider === "openrouter" || provider === "vercel";
|
|
280
|
+
}
|
|
281
|
+
var REASONING_MODEL_UNSUPPORTED = /* @__PURE__ */ new Set([
|
|
282
|
+
"temperature",
|
|
283
|
+
"top_p",
|
|
284
|
+
"frequency_penalty",
|
|
285
|
+
"presence_penalty",
|
|
286
|
+
"n"
|
|
287
|
+
]);
|
|
288
|
+
function detectBedrockModelFamily(model) {
|
|
289
|
+
const parts = model.split(".");
|
|
290
|
+
let prefix = parts[0];
|
|
291
|
+
if (["us", "eu", "apac", "global"].includes(prefix) && parts.length > 1) {
|
|
292
|
+
prefix = parts[1];
|
|
293
|
+
}
|
|
294
|
+
const families = [
|
|
295
|
+
"anthropic",
|
|
296
|
+
"meta",
|
|
297
|
+
"amazon",
|
|
298
|
+
"mistral",
|
|
299
|
+
"cohere",
|
|
300
|
+
"ai21"
|
|
301
|
+
];
|
|
302
|
+
return families.find((f) => prefix === f);
|
|
303
|
+
}
|
|
304
|
+
function bedrockSupportsCaching(model) {
|
|
305
|
+
const family = detectBedrockModelFamily(model);
|
|
306
|
+
if (family === "anthropic") return true;
|
|
307
|
+
if (family === "amazon" && model.includes("nova")) return true;
|
|
308
|
+
return false;
|
|
309
|
+
}
|
|
310
|
+
var CACHE_VALUES = {
|
|
311
|
+
openai: void 0,
|
|
312
|
+
// OpenAI auto-caches; no explicit param
|
|
313
|
+
anthropic: "ephemeral",
|
|
314
|
+
google: void 0,
|
|
315
|
+
// Google uses explicit caching API, not a param
|
|
316
|
+
mistral: void 0,
|
|
317
|
+
cohere: void 0,
|
|
318
|
+
bedrock: "ephemeral",
|
|
319
|
+
// Supported for Claude models on Bedrock
|
|
320
|
+
openrouter: void 0,
|
|
321
|
+
// Depends on underlying provider
|
|
322
|
+
vercel: void 0
|
|
323
|
+
// Depends on underlying provider
|
|
324
|
+
};
|
|
325
|
+
var CACHE_TTLS = {
|
|
326
|
+
openai: void 0,
|
|
327
|
+
anthropic: ["5m", "1h"],
|
|
328
|
+
google: void 0,
|
|
329
|
+
mistral: void 0,
|
|
330
|
+
cohere: void 0,
|
|
331
|
+
bedrock: ["5m", "1h"],
|
|
332
|
+
// Claude on Bedrock uses same TTLs as direct Anthropic
|
|
333
|
+
openrouter: void 0,
|
|
334
|
+
vercel: void 0
|
|
335
|
+
};
|
|
336
|
+
var DURATION_RE = /^\d+[mh]$/;
|
|
337
|
+
var PROVIDER_META = [
|
|
338
|
+
{ id: "openai", name: "OpenAI", host: "api.openai.com", color: "#10a37f" },
|
|
339
|
+
{ id: "anthropic", name: "Anthropic", host: "api.anthropic.com", color: "#e8956a" },
|
|
340
|
+
{ id: "google", name: "Google", host: "generativelanguage.googleapis.com", color: "#4285f4" },
|
|
341
|
+
{ id: "mistral", name: "Mistral", host: "api.mistral.ai", color: "#ff7000" },
|
|
342
|
+
{ id: "cohere", name: "Cohere", host: "api.cohere.com", color: "#39594d" },
|
|
343
|
+
{ id: "bedrock", name: "Bedrock", host: "bedrock-runtime.us-east-1.amazonaws.com", color: "#ff9900" },
|
|
344
|
+
{ id: "openrouter", name: "OpenRouter", host: "openrouter.ai", color: "#818cf8" },
|
|
345
|
+
{ id: "vercel", name: "Vercel", host: "gateway.ai.vercel.app", color: "#ededed" }
|
|
346
|
+
];
|
|
347
|
+
var MODELS = {
|
|
348
|
+
openai: [
|
|
349
|
+
"gpt-5.2",
|
|
350
|
+
"gpt-5.2-pro",
|
|
351
|
+
"gpt-4.1",
|
|
352
|
+
"gpt-4.1-mini",
|
|
353
|
+
"gpt-4.1-nano",
|
|
354
|
+
"o3",
|
|
355
|
+
"o3-mini",
|
|
356
|
+
"o4-mini",
|
|
357
|
+
"o1-pro"
|
|
358
|
+
],
|
|
359
|
+
anthropic: [
|
|
360
|
+
"claude-opus-4-6",
|
|
361
|
+
"claude-sonnet-4-6",
|
|
362
|
+
"claude-sonnet-4-5",
|
|
363
|
+
"claude-haiku-4-5"
|
|
364
|
+
],
|
|
365
|
+
google: [
|
|
366
|
+
"gemini-3-pro-preview",
|
|
367
|
+
"gemini-3-flash-preview",
|
|
368
|
+
"gemini-2.5-pro",
|
|
369
|
+
"gemini-2.5-flash"
|
|
370
|
+
],
|
|
371
|
+
mistral: [
|
|
372
|
+
"mistral-large-latest",
|
|
373
|
+
"mistral-medium-latest",
|
|
374
|
+
"mistral-small-latest",
|
|
375
|
+
"codestral-latest",
|
|
376
|
+
"magistral-medium-latest"
|
|
377
|
+
],
|
|
378
|
+
cohere: [
|
|
379
|
+
"command-a-03-2025",
|
|
380
|
+
"command-r-plus-08-2024",
|
|
381
|
+
"command-r-08-2024",
|
|
382
|
+
"command-r7b-12-2024"
|
|
383
|
+
],
|
|
384
|
+
bedrock: [
|
|
385
|
+
"anthropic.claude-opus-4-6-v1",
|
|
386
|
+
"anthropic.claude-sonnet-4-6-v1",
|
|
387
|
+
"anthropic.claude-haiku-4-5-v1",
|
|
388
|
+
"amazon.nova-pro-v1",
|
|
389
|
+
"amazon.nova-lite-v1",
|
|
390
|
+
"meta.llama3-70b-instruct-v1:0"
|
|
391
|
+
],
|
|
392
|
+
openrouter: [
|
|
393
|
+
"openai/gpt-5.2",
|
|
394
|
+
"anthropic/claude-opus-4-6",
|
|
395
|
+
"google/gemini-2.5-pro",
|
|
396
|
+
"mistral/mistral-large-latest"
|
|
397
|
+
],
|
|
398
|
+
vercel: [
|
|
399
|
+
"openai/gpt-5.2",
|
|
400
|
+
"anthropic/claude-opus-4-6",
|
|
401
|
+
"google/gemini-2.5-pro",
|
|
402
|
+
"google/gemini-3-pro-preview",
|
|
403
|
+
"google/gemini-3-flash-preview",
|
|
404
|
+
"mistral/mistral-large-latest",
|
|
405
|
+
"qwen/qwen2.5-pro"
|
|
406
|
+
]
|
|
407
|
+
};
|
|
408
|
+
var CANONICAL_PARAM_SPECS = {
|
|
409
|
+
openai: {
|
|
410
|
+
temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
|
|
411
|
+
max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
412
|
+
top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
|
|
413
|
+
frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
|
|
414
|
+
presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
|
|
415
|
+
stop: { type: "string", default: "", description: "Stop sequences" },
|
|
416
|
+
n: { type: "number", min: 1, default: 1, description: "Completions count" },
|
|
417
|
+
seed: { type: "number", default: "", description: "Random seed" },
|
|
418
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
419
|
+
effort: { type: "enum", values: ["none", "minimal", "low", "medium", "high", "xhigh"], default: "medium", description: "Reasoning effort" }
|
|
420
|
+
},
|
|
421
|
+
anthropic: {
|
|
422
|
+
temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
|
|
423
|
+
max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
424
|
+
top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
|
|
425
|
+
top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
|
|
426
|
+
stop: { type: "string", default: "", description: "Stop sequences" },
|
|
427
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
428
|
+
effort: { type: "enum", values: ["low", "medium", "high", "max"], default: "medium", description: "Thinking effort" },
|
|
429
|
+
cache: { type: "enum", values: ["ephemeral"], default: "ephemeral", description: "Cache control" },
|
|
430
|
+
cache_ttl: { type: "enum", values: ["5m", "1h"], default: "5m", description: "Cache TTL" }
|
|
431
|
+
},
|
|
432
|
+
google: {
|
|
433
|
+
temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
|
|
434
|
+
max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
435
|
+
top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
|
|
436
|
+
top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
|
|
437
|
+
frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
|
|
438
|
+
presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
|
|
439
|
+
stop: { type: "string", default: "", description: "Stop sequences" },
|
|
440
|
+
n: { type: "number", min: 1, default: 1, description: "Candidate count" },
|
|
441
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
442
|
+
seed: { type: "number", default: "", description: "Random seed" }
|
|
443
|
+
},
|
|
444
|
+
mistral: {
|
|
445
|
+
temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
|
|
446
|
+
max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
447
|
+
top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
|
|
448
|
+
frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
|
|
449
|
+
presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
|
|
450
|
+
stop: { type: "string", default: "", description: "Stop sequences" },
|
|
451
|
+
n: { type: "number", min: 1, default: 1, description: "Completions count" },
|
|
452
|
+
seed: { type: "number", default: "", description: "Random seed" },
|
|
453
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
454
|
+
safe_prompt: { type: "boolean", default: false, description: "Enable safe prompt" },
|
|
455
|
+
min_tokens: { type: "number", min: 0, default: 0, description: "Minimum tokens" }
|
|
456
|
+
},
|
|
457
|
+
cohere: {
|
|
458
|
+
temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
|
|
459
|
+
max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
460
|
+
top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling (p)" },
|
|
461
|
+
top_k: { type: "number", min: 0, max: 500, default: 40, description: "Top-K sampling (k)" },
|
|
462
|
+
frequency_penalty: { type: "number", min: 0, max: 1, default: 0, description: "Penalize frequent tokens" },
|
|
463
|
+
presence_penalty: { type: "number", min: 0, max: 1, default: 0, description: "Penalize repeated topics" },
|
|
464
|
+
stop: { type: "string", default: "", description: "Stop sequences" },
|
|
465
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
466
|
+
seed: { type: "number", default: "", description: "Random seed" }
|
|
467
|
+
},
|
|
468
|
+
bedrock: {
|
|
469
|
+
temperature: { type: "number", min: 0, max: 1, default: 0.7, description: "Controls randomness" },
|
|
470
|
+
max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
471
|
+
top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
|
|
472
|
+
top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
|
|
473
|
+
stop: { type: "string", default: "", description: "Stop sequences" },
|
|
474
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
475
|
+
cache: { type: "enum", values: ["ephemeral"], default: "ephemeral", description: "Cache control" },
|
|
476
|
+
cache_ttl: { type: "enum", values: ["5m", "1h"], default: "5m", description: "Cache TTL" }
|
|
477
|
+
},
|
|
478
|
+
openrouter: {
|
|
479
|
+
temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
|
|
480
|
+
max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
481
|
+
top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
|
|
482
|
+
top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
|
|
483
|
+
frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
|
|
484
|
+
presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
|
|
485
|
+
stop: { type: "string", default: "", description: "Stop sequences" },
|
|
486
|
+
n: { type: "number", min: 1, default: 1, description: "Completions count" },
|
|
487
|
+
seed: { type: "number", default: "", description: "Random seed" },
|
|
488
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
489
|
+
effort: { type: "enum", values: ["none", "minimal", "low", "medium", "high", "xhigh"], default: "medium", description: "Reasoning effort" }
|
|
490
|
+
},
|
|
491
|
+
vercel: {
|
|
492
|
+
temperature: { type: "number", min: 0, max: 2, default: 0.7, description: "Controls randomness" },
|
|
493
|
+
max_tokens: { type: "number", min: 1, default: 4096, description: "Maximum output tokens" },
|
|
494
|
+
top_p: { type: "number", min: 0, max: 1, default: 1, description: "Nucleus sampling" },
|
|
495
|
+
top_k: { type: "number", min: 0, default: 40, description: "Top-K sampling" },
|
|
496
|
+
frequency_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize frequent tokens" },
|
|
497
|
+
presence_penalty: { type: "number", min: -2, max: 2, default: 0, description: "Penalize repeated topics" },
|
|
498
|
+
stop: { type: "string", default: "", description: "Stop sequences" },
|
|
499
|
+
n: { type: "number", min: 1, default: 1, description: "Completions count" },
|
|
500
|
+
seed: { type: "number", default: "", description: "Random seed" },
|
|
501
|
+
stream: { type: "boolean", default: false, description: "Stream response" },
|
|
502
|
+
effort: { type: "enum", values: ["none", "minimal", "low", "medium", "high", "xhigh"], default: "medium", description: "Reasoning effort" }
|
|
503
|
+
}
|
|
504
|
+
};
|
|
505
|
+
export {
|
|
506
|
+
ALIASES,
|
|
507
|
+
CACHE_TTLS,
|
|
508
|
+
CACHE_VALUES,
|
|
509
|
+
CANONICAL_PARAM_SPECS,
|
|
510
|
+
DURATION_RE,
|
|
511
|
+
MODELS,
|
|
512
|
+
PARAM_SPECS,
|
|
513
|
+
PROVIDER_META,
|
|
514
|
+
PROVIDER_PARAMS,
|
|
515
|
+
REASONING_MODEL_UNSUPPORTED,
|
|
516
|
+
bedrockSupportsCaching,
|
|
517
|
+
canHostOpenAIModels,
|
|
518
|
+
detectBedrockModelFamily,
|
|
519
|
+
detectProvider,
|
|
520
|
+
isReasoningModel
|
|
521
|
+
};
|
|
522
|
+
//# sourceMappingURL=providers.js.map
|