190proof 1.0.88 → 1.0.89

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -12,6 +12,7 @@ Fully-local unified interface across multiple AI providers that includes:
12
12
  - 🔄 Automatic retries with configurable attempts
13
13
  - 📡 Streaming by default
14
14
  - ☁️ Cloud service providers supported (Azure, AWS Bedrock)
15
+ - 🔌 Provider prefix strings for any model without waiting for package updates
15
16
 
16
17
  ## Installation
17
18
 
@@ -23,12 +24,13 @@ npm install 190proof
23
24
 
24
25
  ### Basic Example
25
26
 
27
+ Use any model from any provider with the `provider:model-id` format:
28
+
26
29
  ```typescript
27
- import { callWithRetries } from "190proof";
28
- import { GPTModel, GenericPayload } from "190proof/interfaces";
30
+ import { callWithRetries, GenericPayload } from "190proof";
29
31
 
30
32
  const payload: GenericPayload = {
31
- model: GPTModel.GPT4O_MINI,
33
+ model: "openai:gpt-4o-mini",
32
34
  messages: [
33
35
  {
34
36
  role: "user",
@@ -44,36 +46,35 @@ console.log(response.content);
44
46
  ### Using Different Providers
45
47
 
46
48
  ```typescript
47
- import { callWithRetries } from "190proof";
48
- import {
49
- ClaudeModel,
50
- GeminiModel,
51
- GroqModel,
52
- OpenRouterModel,
53
- GenericPayload,
54
- } from "190proof/interfaces";
49
+ import { callWithRetries, GenericPayload } from "190proof";
50
+
51
+ // OpenAI
52
+ const openaiPayload: GenericPayload = {
53
+ model: "openai:gpt-5",
54
+ messages: [{ role: "user", content: "Hello!" }],
55
+ };
55
56
 
56
57
  // Anthropic
57
58
  const claudePayload: GenericPayload = {
58
- model: ClaudeModel.SONNET_4,
59
+ model: "anthropic:claude-sonnet-4-5",
59
60
  messages: [{ role: "user", content: "Hello!" }],
60
61
  };
61
62
 
62
63
  // Google
63
64
  const geminiPayload: GenericPayload = {
64
- model: GeminiModel.GEMINI_2_0_FLASH,
65
+ model: "google:gemini-2.0-flash",
65
66
  messages: [{ role: "user", content: "Hello!" }],
66
67
  };
67
68
 
68
69
  // Groq
69
70
  const groqPayload: GenericPayload = {
70
- model: GroqModel.LLAMA_3_70B_8192,
71
+ model: "groq:llama-3.3-70b-versatile",
71
72
  messages: [{ role: "user", content: "Hello!" }],
72
73
  };
73
74
 
74
75
  // OpenRouter
75
76
  const openRouterPayload: GenericPayload = {
76
- model: OpenRouterModel.QWEN3_6_PLUS_FREE,
77
+ model: "openrouter:google/gemma-4-31b-it:free",
77
78
  messages: [{ role: "user", content: "Hello!" }],
78
79
  };
79
80
 
@@ -84,7 +85,7 @@ const response = await callWithRetries("request-id", claudePayload);
84
85
 
85
86
  ```typescript
86
87
  const payload: GenericPayload = {
87
- model: GPTModel.GPT4O,
88
+ model: "openai:gpt-4o",
88
89
  messages: [
89
90
  {
90
91
  role: "user",
@@ -117,7 +118,7 @@ const response = await callWithRetries("function-call-example", payload);
117
118
 
118
119
  ```typescript
119
120
  const payload: GenericPayload = {
120
- model: ClaudeModel.SONNET_4,
121
+ model: "anthropic:claude-sonnet-4-5",
121
122
  messages: [
122
123
  {
123
124
  role: "user",
@@ -139,7 +140,7 @@ const response = await callWithRetries("image-example", payload);
139
140
 
140
141
  ```typescript
141
142
  const payload: GenericPayload = {
142
- model: GeminiModel.GEMINI_2_0_FLASH,
143
+ model: "google:gemini-2.0-flash",
143
144
  messages: [
144
145
  {
145
146
  role: "system",
@@ -155,61 +156,83 @@ const payload: GenericPayload = {
155
156
  const response = await callWithRetries("system-message-example", payload);
156
157
  ```
157
158
 
159
+ ### Inspecting Model Routing
160
+
161
+ Use `parseModelString` to see how a model string will be routed:
162
+
163
+ ```typescript
164
+ import { parseModelString } from "190proof";
165
+
166
+ parseModelString("openai:gpt-7");
167
+ // → { provider: "openai", modelId: "gpt-7" }
168
+
169
+ parseModelString("openrouter:org/model-name:free");
170
+ // → { provider: "openrouter", modelId: "org/model-name:free" }
171
+
172
+ ```
173
+
174
+ ## Provider Prefix Format
175
+
176
+ The model string format is `provider:model-id`, where the provider prefix is one of:
177
+
178
+ | Prefix | Provider |
179
+ |---|---|
180
+ | `openai` | OpenAI |
181
+ | `anthropic` | Anthropic |
182
+ | `google` | Google (Gemini) |
183
+ | `groq` | Groq |
184
+ | `openrouter` | OpenRouter |
185
+
186
+ The prefix is stripped before sending to the API, so the model ID should be exactly what the provider expects (e.g. `"openai:gpt-4o"` sends `"gpt-4o"` to OpenAI).
187
+
158
188
  ## Supported Models
159
189
 
160
- ### OpenAI Models
161
-
162
- - `gpt-3.5-turbo-0613`
163
- - `gpt-3.5-turbo-16k-0613`
164
- - `gpt-3.5-turbo-0125`
165
- - `gpt-4-1106-preview`
166
- - `gpt-4-0125-preview`
167
- - `gpt-4-turbo-2024-04-09`
168
- - `gpt-4o`
169
- - `gpt-4o-mini`
170
- - `o1-preview`
171
- - `o1-mini`
172
- - `o3-mini`
173
- - `gpt-4.1`
174
- - `gpt-4.1-mini`
175
- - `gpt-4.1-nano`
176
- - `gpt-5`
177
- - `gpt-5-mini`
178
-
179
- ### Anthropic Models
180
-
181
- - `claude-3-haiku-20240307`
182
- - `claude-3-sonnet-20240229`
183
- - `claude-3-opus-20240229`
184
- - `claude-3-5-haiku-20241022`
185
- - `claude-3-5-sonnet-20241022`
186
- - `claude-sonnet-4-20250514`
187
- - `claude-opus-4-20250514`
188
- - `claude-opus-4-1`
189
- - `claude-haiku-4-5`
190
- - `claude-sonnet-4-5`
191
- - `claude-opus-4-5`
192
-
193
- ### Google Models
194
-
195
- - `gemini-1.5-pro-latest`
196
- - `gemini-exp-1206`
197
- - `gemini-2.0-flash`
198
- - `gemini-2.0-flash-exp-image-generation`
199
- - `gemini-2.0-flash-thinking-exp`
200
- - `gemini-2.0-flash-thinking-exp-01-21`
201
- - `gemini-2.5-flash-preview-04-17`
202
- - `gemini-3-flash-preview`
203
- - `gemini-3.1-flash-lite-preview`
204
-
205
- ### Groq Models
206
-
207
- - `llama3-70b-8192`
208
- - `deepseek-r1-distill-llama-70b`
209
-
210
- ### OpenRouter Models
211
-
212
- - `qwen/qwen3.6-plus:free`
190
+ These models are tested. You can use any model with the `provider:model-id` format.
191
+
192
+ ### OpenAI
193
+
194
+ - `openai:gpt-5`
195
+ - `openai:gpt-5-mini`
196
+ - `openai:gpt-4.1`
197
+ - `openai:gpt-4.1-mini`
198
+ - `openai:gpt-4.1-nano`
199
+ - `openai:gpt-4o`
200
+ - `openai:gpt-4o-mini`
201
+ - `openai:o3-mini`
202
+ - `openai:o1-preview`
203
+ - `openai:o1-mini`
204
+
205
+ ### Anthropic
206
+
207
+ - `anthropic:claude-opus-4-5`
208
+ - `anthropic:claude-sonnet-4-5`
209
+ - `anthropic:claude-haiku-4-5`
210
+ - `anthropic:claude-opus-4-1`
211
+ - `anthropic:claude-opus-4-20250514`
212
+ - `anthropic:claude-sonnet-4-20250514`
213
+ - `anthropic:claude-3-5-sonnet-20241022`
214
+ - `anthropic:claude-3-5-haiku-20241022`
215
+
216
+ ### Google
217
+
218
+ - `google:gemini-3.1-flash-lite-preview`
219
+ - `google:gemini-3-flash-preview`
220
+ - `google:gemini-2.5-flash-preview-04-17`
221
+ - `google:gemini-2.0-flash`
222
+ - `google:gemini-2.0-flash-exp-image-generation`
223
+ - `google:gemini-1.5-pro-latest`
224
+
225
+ ### Groq
226
+
227
+ - `groq:llama-3.3-70b-versatile`
228
+ - `groq:llama3-70b-8192`
229
+ - `groq:qwen/qwen3-32b`
230
+ - `groq:deepseek-r1-distill-llama-70b`
231
+
232
+ ### OpenRouter
233
+
234
+ - `openrouter:google/gemma-4-31b-it:free`
235
+ - `openrouter:google/gemma-4-31b-it`
213
236
 
214
237
  ## Environment Variables
215
238
 
@@ -269,6 +292,18 @@ interface ParsedResponseMessage {
269
292
  }
270
293
  ```
271
294
 
295
+ ### `parseModelString(model)`
296
+
297
+ Parses a model string into its provider and model ID components.
298
+
299
+ #### Parameters
300
+
301
+ - `model`: `string` - A model string in `"provider:model-id"` format
302
+
303
+ #### Returns
304
+
305
+ `{ provider: Provider, modelId: string }`
306
+
272
307
  ### Configuration Options
273
308
 
274
309
  #### OpenAI Config
@@ -280,7 +315,7 @@ interface OpenAIConfig {
280
315
  baseUrl: string;
281
316
  orgId?: string;
282
317
  modelConfigMap?: Record<
283
- GPTModel,
318
+ string,
284
319
  {
285
320
  resource: string;
286
321
  deployment: string;
package/dist/index.d.mts CHANGED
@@ -1,3 +1,4 @@
1
+ /** @deprecated Use provider prefix strings instead, e.g. `"anthropic:claude-sonnet-4-5"` */
1
2
  declare enum ClaudeModel {
2
3
  HAIKU_3 = "claude-3-haiku-20240307",
3
4
  SONNET_3 = "claude-3-sonnet-20240229",
@@ -11,6 +12,7 @@ declare enum ClaudeModel {
11
12
  SONNET_4_5 = "claude-sonnet-4-5",
12
13
  OPUS_4_5 = "claude-opus-4-5"
13
14
  }
15
+ /** @deprecated Use provider prefix strings instead, e.g. `"openai:gpt-4o"` */
14
16
  declare enum GPTModel {
15
17
  GPT35_0613 = "gpt-3.5-turbo-0613",
16
18
  GPT35_0613_16K = "gpt-3.5-turbo-16k-0613",
@@ -29,15 +31,19 @@ declare enum GPTModel {
29
31
  GPT5 = "gpt-5",
30
32
  GPT5_MINI = "gpt-5-mini"
31
33
  }
34
+ /** @deprecated Use provider prefix strings instead, e.g. `"groq:llama-3.3-70b-versatile"` */
32
35
  declare enum GroqModel {
33
36
  LLAMA_3_70B_8192 = "llama3-70b-8192",
34
37
  LLAMA_3_3_70B_VERSATILE = "llama-3.3-70b-versatile",
35
38
  QWEN3_32B = "qwen/qwen3-32b",
36
39
  DEEPSEEK_R1_DISTILL_LLAMA_70B = "deepseek-r1-distill-llama-70b"
37
40
  }
41
+ /** @deprecated Use provider prefix strings instead, e.g. `"openrouter:qwen/qwen3.6-plus:free"` */
38
42
  declare enum OpenRouterModel {
39
- QWEN3_6_PLUS_FREE = "qwen/qwen3.6-plus:free"
43
+ GEMMA_4_31B_IT_FREE = "google/gemma-4-31b-it:free",
44
+ GEMMA_4_31B_IT = "google/gemma-4-31b-it"
40
45
  }
46
+ /** @deprecated Use provider prefix strings instead, e.g. `"google:gemini-2.0-flash"` */
41
47
  declare enum GeminiModel {
42
48
  GEMINI_1_5_PRO = "gemini-1.5-pro-latest",
43
49
  GEMINI_EXP_1206 = "gemini-exp-1206",
@@ -102,7 +108,8 @@ interface FunctionDefinition {
102
108
  description?: string;
103
109
  parameters: Record<string, any>;
104
110
  }
105
- type AnyModel = GPTModel | ClaudeModel | GroqModel | GeminiModel | OpenRouterModel;
111
+ type Provider = "openai" | "anthropic" | "google" | "groq" | "openrouter";
112
+ type AnyModel = GPTModel | ClaudeModel | GroqModel | GeminiModel | OpenRouterModel | (string & {});
106
113
  interface GenericPayload {
107
114
  model: AnyModel;
108
115
  messages: GenericMessage[];
@@ -114,6 +121,10 @@ interface GenericPayload {
114
121
  fallbackModel?: AnyModel;
115
122
  }
116
123
 
124
+ declare function parseModelString(model: string): {
125
+ provider: Provider;
126
+ modelId: string;
127
+ };
117
128
  declare function callWithRetries(id: string | string[], aiPayload: GenericPayload, aiConfig?: OpenAIConfig | AnthropicAIConfig, retries?: number, chunkTimeoutMs?: number): Promise<ParsedResponseMessage>;
118
129
 
119
- export { type AnyModel, ClaudeModel, type FunctionDefinition, GPTModel, GeminiModel, type GenericMessage, type GenericPayload, GroqModel, type OpenAIConfig, OpenRouterModel, callWithRetries };
130
+ export { type AnyModel, ClaudeModel, type FunctionDefinition, GPTModel, GeminiModel, type GenericMessage, type GenericPayload, GroqModel, type OpenAIConfig, OpenRouterModel, type Provider, callWithRetries, parseModelString };
package/dist/index.d.ts CHANGED
@@ -1,3 +1,4 @@
1
+ /** @deprecated Use provider prefix strings instead, e.g. `"anthropic:claude-sonnet-4-5"` */
1
2
  declare enum ClaudeModel {
2
3
  HAIKU_3 = "claude-3-haiku-20240307",
3
4
  SONNET_3 = "claude-3-sonnet-20240229",
@@ -11,6 +12,7 @@ declare enum ClaudeModel {
11
12
  SONNET_4_5 = "claude-sonnet-4-5",
12
13
  OPUS_4_5 = "claude-opus-4-5"
13
14
  }
15
+ /** @deprecated Use provider prefix strings instead, e.g. `"openai:gpt-4o"` */
14
16
  declare enum GPTModel {
15
17
  GPT35_0613 = "gpt-3.5-turbo-0613",
16
18
  GPT35_0613_16K = "gpt-3.5-turbo-16k-0613",
@@ -29,15 +31,19 @@ declare enum GPTModel {
29
31
  GPT5 = "gpt-5",
30
32
  GPT5_MINI = "gpt-5-mini"
31
33
  }
34
+ /** @deprecated Use provider prefix strings instead, e.g. `"groq:llama-3.3-70b-versatile"` */
32
35
  declare enum GroqModel {
33
36
  LLAMA_3_70B_8192 = "llama3-70b-8192",
34
37
  LLAMA_3_3_70B_VERSATILE = "llama-3.3-70b-versatile",
35
38
  QWEN3_32B = "qwen/qwen3-32b",
36
39
  DEEPSEEK_R1_DISTILL_LLAMA_70B = "deepseek-r1-distill-llama-70b"
37
40
  }
41
+ /** @deprecated Use provider prefix strings instead, e.g. `"openrouter:qwen/qwen3.6-plus:free"` */
38
42
  declare enum OpenRouterModel {
39
- QWEN3_6_PLUS_FREE = "qwen/qwen3.6-plus:free"
43
+ GEMMA_4_31B_IT_FREE = "google/gemma-4-31b-it:free",
44
+ GEMMA_4_31B_IT = "google/gemma-4-31b-it"
40
45
  }
46
+ /** @deprecated Use provider prefix strings instead, e.g. `"google:gemini-2.0-flash"` */
41
47
  declare enum GeminiModel {
42
48
  GEMINI_1_5_PRO = "gemini-1.5-pro-latest",
43
49
  GEMINI_EXP_1206 = "gemini-exp-1206",
@@ -102,7 +108,8 @@ interface FunctionDefinition {
102
108
  description?: string;
103
109
  parameters: Record<string, any>;
104
110
  }
105
- type AnyModel = GPTModel | ClaudeModel | GroqModel | GeminiModel | OpenRouterModel;
111
+ type Provider = "openai" | "anthropic" | "google" | "groq" | "openrouter";
112
+ type AnyModel = GPTModel | ClaudeModel | GroqModel | GeminiModel | OpenRouterModel | (string & {});
106
113
  interface GenericPayload {
107
114
  model: AnyModel;
108
115
  messages: GenericMessage[];
@@ -114,6 +121,10 @@ interface GenericPayload {
114
121
  fallbackModel?: AnyModel;
115
122
  }
116
123
 
124
+ declare function parseModelString(model: string): {
125
+ provider: Provider;
126
+ modelId: string;
127
+ };
117
128
  declare function callWithRetries(id: string | string[], aiPayload: GenericPayload, aiConfig?: OpenAIConfig | AnthropicAIConfig, retries?: number, chunkTimeoutMs?: number): Promise<ParsedResponseMessage>;
118
129
 
119
- export { type AnyModel, ClaudeModel, type FunctionDefinition, GPTModel, GeminiModel, type GenericMessage, type GenericPayload, GroqModel, type OpenAIConfig, OpenRouterModel, callWithRetries };
130
+ export { type AnyModel, ClaudeModel, type FunctionDefinition, GPTModel, GeminiModel, type GenericMessage, type GenericPayload, GroqModel, type OpenAIConfig, OpenRouterModel, type Provider, callWithRetries, parseModelString };
package/dist/index.js CHANGED
@@ -35,7 +35,8 @@ __export(proof_exports, {
35
35
  GeminiModel: () => GeminiModel,
36
36
  GroqModel: () => GroqModel,
37
37
  OpenRouterModel: () => OpenRouterModel,
38
- callWithRetries: () => callWithRetries
38
+ callWithRetries: () => callWithRetries,
39
+ parseModelString: () => parseModelString
39
40
  });
40
41
  module.exports = __toCommonJS(proof_exports);
41
42
 
@@ -81,7 +82,8 @@ var GroqModel = /* @__PURE__ */ ((GroqModel2) => {
81
82
  return GroqModel2;
82
83
  })(GroqModel || {});
83
84
  var OpenRouterModel = /* @__PURE__ */ ((OpenRouterModel2) => {
84
- OpenRouterModel2["QWEN3_6_PLUS_FREE"] = "qwen/qwen3.6-plus:free";
85
+ OpenRouterModel2["GEMMA_4_31B_IT_FREE"] = "google/gemma-4-31b-it:free";
86
+ OpenRouterModel2["GEMMA_4_31B_IT"] = "google/gemma-4-31b-it";
85
87
  return OpenRouterModel2;
86
88
  })(OpenRouterModel || {});
87
89
  var GeminiModel = /* @__PURE__ */ ((GeminiModel2) => {
@@ -503,7 +505,8 @@ async function callOpenAiWithRetries(id, openAiPayload, openAiConfig, retries =
503
505
  openAiConfig == null ? void 0 : openAiConfig.service,
504
506
  openAiPayload.model
505
507
  );
506
- const useStreaming = openAiPayload.model !== "o1-mini" /* O1_MINI */ && openAiPayload.model !== "o1-preview" /* O1_PREVIEW */;
508
+ const modelStr = openAiPayload.model;
509
+ const useStreaming = modelStr !== "o1-mini" /* O1_MINI */ && modelStr !== "o1-preview" /* O1_PREVIEW */ && !modelStr.startsWith("o1");
507
510
  return withRetries(
508
511
  id,
509
512
  "OpenAI",
@@ -1092,62 +1095,82 @@ async function callOpenRouter(id, payload) {
1092
1095
  async function callOpenRouterWithRetries(id, payload, retries = 5) {
1093
1096
  return withRetries(id, "OpenRouter", () => callOpenRouter(id, payload), { retries });
1094
1097
  }
1095
- function isAnthropicPayload(payload) {
1096
- return Object.values(ClaudeModel).includes(payload.model);
1097
- }
1098
- function isOpenAiPayload(payload) {
1099
- return Object.values(GPTModel).includes(payload.model);
1100
- }
1101
- function isGroqPayload(payload) {
1102
- return Object.values(GroqModel).includes(payload.model);
1103
- }
1104
- function isGoogleAIPayload(payload) {
1105
- return Object.values(GeminiModel).includes(payload.model);
1106
- }
1107
- function isOpenRouterPayload(payload) {
1108
- return Object.values(OpenRouterModel).includes(payload.model);
1098
+ var VALID_PROVIDERS = ["openai", "anthropic", "google", "groq", "openrouter"];
1099
+ var ENUM_PROVIDER_MAP = [
1100
+ { values: new Set(Object.values(GPTModel)), provider: "openai" },
1101
+ { values: new Set(Object.values(ClaudeModel)), provider: "anthropic" },
1102
+ { values: new Set(Object.values(GeminiModel)), provider: "google" },
1103
+ { values: new Set(Object.values(GroqModel)), provider: "groq" },
1104
+ { values: new Set(Object.values(OpenRouterModel)), provider: "openrouter" }
1105
+ ];
1106
+ function parseModelString(model) {
1107
+ const colonIndex = model.indexOf(":");
1108
+ if (colonIndex !== -1) {
1109
+ const prefix = model.substring(0, colonIndex);
1110
+ if (VALID_PROVIDERS.includes(prefix)) {
1111
+ const modelId = model.substring(colonIndex + 1);
1112
+ if (!modelId) {
1113
+ throw new Error(
1114
+ `Empty model ID in model string '${model}'. Expected format: 'provider:model-id'`
1115
+ );
1116
+ }
1117
+ return { provider: prefix, modelId };
1118
+ }
1119
+ }
1120
+ for (const { values, provider } of ENUM_PROVIDER_MAP) {
1121
+ if (values.has(model)) {
1122
+ return { provider, modelId: model };
1123
+ }
1124
+ }
1125
+ if (colonIndex !== -1) {
1126
+ const prefix = model.substring(0, colonIndex);
1127
+ throw new Error(
1128
+ `Unknown provider '${prefix}' in model string '${model}'. Valid providers: ${VALID_PROVIDERS.join(", ")}`
1129
+ );
1130
+ }
1131
+ throw new Error(
1132
+ `Unable to determine provider for model '${model}'. Use a provider prefix (e.g. 'openai:${model}') or a known model enum value. Valid providers: ${VALID_PROVIDERS.join(", ")}`
1133
+ );
1109
1134
  }
1110
1135
  async function callWithRetries(id, aiPayload, aiConfig, retries = 5, chunkTimeoutMs = 15e3) {
1111
1136
  try {
1112
- if (isAnthropicPayload(aiPayload)) {
1113
- return await callAnthropicWithRetries(
1114
- id,
1115
- await prepareAnthropicPayload(id, aiPayload),
1116
- aiConfig,
1117
- retries
1118
- );
1119
- }
1120
- if (isOpenAiPayload(aiPayload)) {
1121
- return await callOpenAiWithRetries(
1122
- id,
1123
- await prepareOpenAIPayload(id, aiPayload),
1124
- aiConfig,
1125
- retries,
1126
- chunkTimeoutMs
1127
- );
1128
- }
1129
- if (isGroqPayload(aiPayload)) {
1130
- return await callGroqWithRetries(
1131
- id,
1132
- prepareGroqPayload(aiPayload),
1133
- retries
1134
- );
1135
- }
1136
- if (isGoogleAIPayload(aiPayload)) {
1137
- return await callGoogleAIWithRetries(
1138
- id,
1139
- await prepareGoogleAIPayload(id, aiPayload),
1140
- retries
1141
- );
1142
- }
1143
- if (isOpenRouterPayload(aiPayload)) {
1144
- return await callOpenRouterWithRetries(
1145
- id,
1146
- prepareOpenRouterPayload(aiPayload),
1147
- retries
1148
- );
1137
+ const { provider, modelId } = parseModelString(aiPayload.model);
1138
+ const routingPayload = { ...aiPayload, model: modelId };
1139
+ switch (provider) {
1140
+ case "anthropic":
1141
+ return await callAnthropicWithRetries(
1142
+ id,
1143
+ await prepareAnthropicPayload(id, routingPayload),
1144
+ aiConfig,
1145
+ retries
1146
+ );
1147
+ case "openai":
1148
+ return await callOpenAiWithRetries(
1149
+ id,
1150
+ await prepareOpenAIPayload(id, routingPayload),
1151
+ aiConfig,
1152
+ retries,
1153
+ chunkTimeoutMs
1154
+ );
1155
+ case "groq":
1156
+ return await callGroqWithRetries(
1157
+ id,
1158
+ prepareGroqPayload(routingPayload),
1159
+ retries
1160
+ );
1161
+ case "google":
1162
+ return await callGoogleAIWithRetries(
1163
+ id,
1164
+ await prepareGoogleAIPayload(id, routingPayload),
1165
+ retries
1166
+ );
1167
+ case "openrouter":
1168
+ return await callOpenRouterWithRetries(
1169
+ id,
1170
+ prepareOpenRouterPayload(routingPayload),
1171
+ retries
1172
+ );
1149
1173
  }
1150
- throw new Error("Invalid AI payload: Unknown model type.");
1151
1174
  } catch (error2) {
1152
1175
  if (aiPayload.fallbackModel) {
1153
1176
  logger_default.error(
@@ -1180,6 +1203,7 @@ async function callWithRetries(id, aiPayload, aiConfig, retries = 5, chunkTimeou
1180
1203
  GeminiModel,
1181
1204
  GroqModel,
1182
1205
  OpenRouterModel,
1183
- callWithRetries
1206
+ callWithRetries,
1207
+ parseModelString
1184
1208
  });
1185
1209
  //# sourceMappingURL=index.js.map