@oai2lmapi/opencode-provider 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/plugin.js ADDED
@@ -0,0 +1,835 @@
1
+ // src/config.ts
2
+ import { readFileSync, existsSync } from "node:fs";
3
+ import { homedir } from "node:os";
4
+ import { join, isAbsolute } from "node:path";
5
+ function getDataDir() {
6
+ const xdgDataHome = process.env["XDG_DATA_HOME"];
7
+ if (xdgDataHome && isAbsolute(xdgDataHome)) {
8
+ return join(xdgDataHome, "opencode");
9
+ }
10
+ return join(homedir(), ".local", "share", "opencode");
11
+ }
12
+ function getConfigDir() {
13
+ const xdgConfigHome = process.env["XDG_CONFIG_HOME"];
14
+ if (xdgConfigHome && isAbsolute(xdgConfigHome)) {
15
+ return join(xdgConfigHome, "opencode");
16
+ }
17
+ return join(homedir(), ".config", "opencode");
18
+ }
19
+ var CONFIG_FILENAME = "oai2lm.json";
20
+ function readJsonFile(filepath) {
21
+ try {
22
+ if (!existsSync(filepath)) {
23
+ return void 0;
24
+ }
25
+ const content = readFileSync(filepath, "utf-8");
26
+ return JSON.parse(content);
27
+ } catch (error) {
28
+ if (error instanceof SyntaxError) {
29
+ console.warn(
30
+ `Failed to parse JSON in config file ${filepath}: ${error.message}`
31
+ );
32
+ } else {
33
+ console.warn(`Failed to read config file ${filepath}:`, error);
34
+ }
35
+ return void 0;
36
+ }
37
+ }
38
+ function loadConfig() {
39
+ const dataDir = getDataDir();
40
+ const configDir = getConfigDir();
41
+ const dataPath = join(dataDir, CONFIG_FILENAME);
42
+ const dataConfig = readJsonFile(dataPath);
43
+ const configPath = join(configDir, CONFIG_FILENAME);
44
+ const configDirConfig = readJsonFile(configPath);
45
+ if (dataConfig && configDirConfig) {
46
+ return {
47
+ ...configDirConfig,
48
+ ...dataConfig,
49
+ headers: {
50
+ ...configDirConfig.headers,
51
+ ...dataConfig.headers
52
+ },
53
+ modelOverrides: {
54
+ ...configDirConfig.modelOverrides,
55
+ ...dataConfig.modelOverrides
56
+ }
57
+ };
58
+ }
59
+ return dataConfig || configDirConfig;
60
+ }
61
+ function resolveApiKey(config) {
62
+ if (config.apiKey) {
63
+ const envMatch = config.apiKey.match(/^\{env:(\w+)\}$/);
64
+ if (envMatch) {
65
+ return process.env[envMatch[1]];
66
+ }
67
+ const fileMatch = config.apiKey.match(/^\{file:(.+)\}$/);
68
+ if (fileMatch) {
69
+ try {
70
+ let filePath = fileMatch[1];
71
+ if (filePath.startsWith("~")) {
72
+ filePath = join(homedir(), filePath.slice(1));
73
+ }
74
+ return readFileSync(filePath, "utf-8").trim();
75
+ } catch {
76
+ return void 0;
77
+ }
78
+ }
79
+ return config.apiKey;
80
+ }
81
+ return process.env["OAI2LM_API_KEY"];
82
+ }
83
+
84
+ // src/discover.ts
85
+ async function discoverModels(baseURL, apiKey, headers) {
86
+ const url = `${baseURL}/models`;
87
+ const response = await fetch(url, {
88
+ method: "GET",
89
+ headers: {
90
+ Authorization: `Bearer ${apiKey}`,
91
+ "Content-Type": "application/json",
92
+ ...headers
93
+ }
94
+ });
95
+ if (!response.ok) {
96
+ throw new Error(
97
+ `Failed to fetch models from ${url}: ${response.status} ${response.statusText}`
98
+ );
99
+ }
100
+ const data = await response.json();
101
+ const models = data.data || [];
102
+ return models.map((model) => ({
103
+ id: model.id,
104
+ name: model.name,
105
+ object: model.object,
106
+ created: model.created,
107
+ owned_by: model.owned_by,
108
+ metadata: extractMetadataFromModel(model)
109
+ }));
110
+ }
111
+ function extractMetadataFromModel(model) {
112
+ const metadata = {};
113
+ if (model.context_length) {
114
+ metadata.maxInputTokens = model.context_length;
115
+ }
116
+ if (model.max_tokens) {
117
+ metadata.maxOutputTokens = model.max_tokens;
118
+ }
119
+ if (model.max_input_tokens) {
120
+ metadata.maxInputTokens = model.max_input_tokens;
121
+ }
122
+ if (model.max_output_tokens) {
123
+ metadata.maxOutputTokens = model.max_output_tokens;
124
+ }
125
+ if (typeof model.function_call === "boolean") {
126
+ metadata.supportsToolCalling = model.function_call;
127
+ } else if (typeof model.supports_function_calling === "boolean") {
128
+ metadata.supportsToolCalling = model.supports_function_calling;
129
+ } else if (typeof model.supports_tools === "boolean") {
130
+ metadata.supportsToolCalling = model.supports_tools;
131
+ }
132
+ if (typeof model.vision === "boolean") {
133
+ metadata.supportsImageInput = model.vision;
134
+ } else if (typeof model.supports_vision === "boolean") {
135
+ metadata.supportsImageInput = model.supports_vision;
136
+ } else if (model.modalities?.includes("vision")) {
137
+ metadata.supportsImageInput = true;
138
+ } else if (model.modalities?.includes("image")) {
139
+ metadata.supportsImageInput = true;
140
+ }
141
+ return metadata;
142
+ }
143
+
144
+ // ../model-metadata/dist/esm/index.mjs
145
+ var DEFAULT_MODEL_METADATA = {
146
+ maxInputTokens: 8192,
147
+ maxOutputTokens: 4096,
148
+ supportsToolCalling: false,
149
+ supportsImageInput: false,
150
+ modelType: "llm"
151
+ };
152
+ var md = (maxInputTokens, maxOutputTokens, supportsToolCalling, supportsImageInput, modelType = "llm") => ({
153
+ maxInputTokens,
154
+ maxOutputTokens,
155
+ supportsToolCalling,
156
+ supportsImageInput,
157
+ modelType
158
+ });
159
+ var MODEL_FAMILY_PATTERNS = [
160
+ // ============== ByteDance Seed Family ==============
161
+ {
162
+ pattern: /(doubao-)?seed/i,
163
+ metadata: md(262144, 32768, true, true),
164
+ subPatterns: [
165
+ { pattern: /seed-1\.6-flash/i, metadata: md(262144, 16384, true, true) },
166
+ { pattern: /seed-1\.6/i, metadata: md(262144, 32768, true, true) }
167
+ ]
168
+ },
169
+ // ============== OpenAI GPT-5 Family ==============
170
+ {
171
+ pattern: /gpt-5/i,
172
+ metadata: md(4e5, 128e3, true, true),
173
+ subPatterns: [
174
+ { pattern: /gpt-5\.2-pro/i, metadata: md(4e5, 128e3, true, true) },
175
+ { pattern: /gpt-5\.2-chat/i, metadata: md(128e3, 16384, true, true) },
176
+ { pattern: /gpt-5\.2/i, metadata: md(4e5, 128e3, true, true) },
177
+ { pattern: /gpt-5\.1-codex-max/i, metadata: md(4e5, 128e3, true, true) },
178
+ { pattern: /gpt-5\.1-codex-mini/i, metadata: md(4e5, 1e5, true, true) },
179
+ { pattern: /gpt-5\.1-codex/i, metadata: md(4e5, 128e3, true, true) },
180
+ { pattern: /gpt-5\.1-chat/i, metadata: md(128e3, 16384, true, true) },
181
+ { pattern: /gpt-5\.1/i, metadata: md(4e5, 128e3, true, true) },
182
+ { pattern: /gpt-5-image-mini/i, metadata: md(4e5, 128e3, true, true) },
183
+ { pattern: /gpt-5-image/i, metadata: md(4e5, 128e3, true, true) },
184
+ { pattern: /gpt-5-pro/i, metadata: md(4e5, 128e3, true, true) },
185
+ { pattern: /gpt-5-chat/i, metadata: md(128e3, 16384, true, true) },
186
+ { pattern: /gpt-5-nano/i, metadata: md(4e5, 128e3, true, true) },
187
+ { pattern: /gpt-5-mini/i, metadata: md(4e5, 128e3, true, true) },
188
+ { pattern: /gpt-5-codex/i, metadata: md(4e5, 128e3, true, true) }
189
+ ]
190
+ },
191
+ // ============== OpenAI GPT-4.1 Family ==============
192
+ {
193
+ pattern: /gpt-4\.1/i,
194
+ metadata: md(1047576, 32768, true, true),
195
+ subPatterns: [
196
+ { pattern: /gpt-4\.1-nano/i, metadata: md(1047576, 32768, true, true) },
197
+ { pattern: /gpt-4\.1-mini/i, metadata: md(1047576, 32768, true, true) }
198
+ ]
199
+ },
200
+ // ============== OpenAI o3/o4 Reasoning Models ==============
201
+ {
202
+ pattern: /o[34]/i,
203
+ metadata: md(2e5, 1e5, true, true),
204
+ subPatterns: [
205
+ { pattern: /o4-mini-high/i, metadata: md(2e5, 1e5, true, true) },
206
+ { pattern: /o4-mini-deep-research/i, metadata: md(2e5, 1e5, true, true) },
207
+ { pattern: /o4-mini/i, metadata: md(2e5, 1e5, true, true) },
208
+ { pattern: /o3-pro/i, metadata: md(2e5, 1e5, true, true) },
209
+ { pattern: /o3-mini-high/i, metadata: md(2e5, 1e5, true, false) },
210
+ { pattern: /o3-mini/i, metadata: md(2e5, 1e5, true, false) },
211
+ { pattern: /o3-deep-research/i, metadata: md(2e5, 1e5, true, true) }
212
+ ]
213
+ },
214
+ // ============== OpenAI GPT-4o Family ==============
215
+ {
216
+ pattern: /gpt-4o/i,
217
+ metadata: md(128e3, 16384, true, true),
218
+ subPatterns: [
219
+ { pattern: /gpt-4o-mini/i, metadata: md(128e3, 16384, true, true) },
220
+ { pattern: /chatgpt-4o/i, metadata: md(128e3, 16384, true, true) }
221
+ ]
222
+ },
223
+ // ============== OpenAI GPT-4 Turbo Family ==============
224
+ {
225
+ pattern: /gpt-4-turbo/i,
226
+ metadata: md(128e3, 4096, true, true),
227
+ subPatterns: [
228
+ { pattern: /gpt-4-turbo-preview/i, metadata: md(128e3, 4096, true, false) }
229
+ ]
230
+ },
231
+ // ============== OpenAI GPT-4 Family ==============
232
+ {
233
+ pattern: /gpt-4/i,
234
+ metadata: md(8192, 4096, true, false),
235
+ subPatterns: [
236
+ { pattern: /gpt-4-1106-preview/i, metadata: md(128e3, 4096, true, false) },
237
+ { pattern: /gpt-4-0314/i, metadata: md(8192, 4096, true, false) }
238
+ ]
239
+ },
240
+ // ============== OpenAI Codex Family ==============
241
+ { pattern: /codex-mini/i, metadata: md(2e5, 1e5, true, true) },
242
+ // ============== OpenAI GPT-OSS Models ==============
243
+ {
244
+ pattern: /gpt-oss/i,
245
+ metadata: md(131072, 65536, true, false),
246
+ subPatterns: [
247
+ { pattern: /gpt-oss-safeguard-20b/i, metadata: md(131072, 65536, true, false) },
248
+ { pattern: /gpt-oss-120b/i, metadata: md(131072, 65536, true, false) },
249
+ { pattern: /gpt-oss-20b/i, metadata: md(131072, 65536, true, false) }
250
+ ]
251
+ },
252
+ // ============== Anthropic Claude 4 Family ==============
253
+ {
254
+ pattern: /claude-(opus|sonnet|haiku)-4/i,
255
+ metadata: { maxInputTokens: 2e5, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
256
+ subPatterns: [
257
+ { pattern: /claude-opus-4\.5/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
258
+ { pattern: /claude-sonnet-4\.5/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
259
+ { pattern: /claude-haiku-4\.5/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
260
+ { pattern: /claude-opus-4\.1/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
261
+ { pattern: /claude-opus-4(?![.\d])/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
262
+ { pattern: /claude-sonnet-4(?![.\d])/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
263
+ ]
264
+ },
265
+ // ============== Anthropic Claude 3.x Family ==============
266
+ {
267
+ pattern: /claude-3/i,
268
+ metadata: md(2e5, 4096, true, true),
269
+ subPatterns: [
270
+ { pattern: /claude-3\.7-sonnet/i, metadata: md(2e5, 128e3, true, true) },
271
+ { pattern: /claude-3\.5-sonnet/i, metadata: md(2e5, 8192, true, true) },
272
+ { pattern: /claude-3\.5-haiku/i, metadata: md(2e5, 8192, true, true) },
273
+ { pattern: /claude-3-opus/i, metadata: md(2e5, 4096, true, true) },
274
+ { pattern: /claude-3-haiku/i, metadata: md(2e5, 4096, true, true) }
275
+ ]
276
+ },
277
+ // ============== Google Gemini 3 Family ==============
278
+ {
279
+ pattern: /gemini-3/i,
280
+ metadata: { maxInputTokens: 1048576, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
281
+ subPatterns: [
282
+ { pattern: /gemini-3-pro-image/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 32768, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
283
+ { pattern: /gemini-3-pro/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
284
+ { pattern: /gemini-3-flash/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
285
+ ]
286
+ },
287
+ // ============== Google Gemini 2.5 Family ==============
288
+ {
289
+ pattern: /gemini-2\.5/i,
290
+ metadata: { maxInputTokens: 1048576, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
291
+ subPatterns: [
292
+ { pattern: /gemini-2\.5-pro/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
293
+ { pattern: /gemini-2\.5-flash-image/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
294
+ { pattern: /gemini-2\.5-flash-lite/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
295
+ { pattern: /gemini-2\.5-flash/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
296
+ ]
297
+ },
298
+ // ============== Google Gemini 2.0 Family ==============
299
+ { pattern: /gemini-2\.0/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
300
+ // ============== Google Gemma Family ==============
301
+ {
302
+ pattern: /gemma/i,
303
+ metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" },
304
+ subPatterns: [
305
+ { pattern: /gemma-2-27b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
306
+ { pattern: /gemma-2-9b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
307
+ { pattern: /gemma-3n/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } }
308
+ ]
309
+ },
310
+ // ============== Qwen3 Family ==============
311
+ {
312
+ pattern: /qwen3/i,
313
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
314
+ subPatterns: [
315
+ { pattern: /qwen3-coder-(480b|plus)/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
316
+ { pattern: /qwen3-coder-(30b|flash)/i, metadata: { maxInputTokens: 16e4, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
317
+ { pattern: /qwen3-coder/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
318
+ { pattern: /qwen3-vl-235b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
319
+ { pattern: /qwen3-vl-32b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
320
+ { pattern: /qwen3-vl-30b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
321
+ { pattern: /qwen3-vl-8b/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
322
+ { pattern: /qwen3-vl/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
323
+ { pattern: /qwen3-max/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
324
+ { pattern: /qwen3-next/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
325
+ { pattern: /qwen3-235b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
326
+ { pattern: /qwen3-32b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
327
+ { pattern: /qwen3-30b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
328
+ { pattern: /qwen3-14b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
329
+ { pattern: /qwen3-8b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 2e4, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
330
+ { pattern: /qwen3-4b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
331
+ ]
332
+ },
333
+ // ============== Qwen2.5 Family ==============
334
+ {
335
+ pattern: /qwen2\.5/i,
336
+ metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
337
+ subPatterns: [
338
+ { pattern: /qwen2\.5-72b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
339
+ { pattern: /qwen2\.5-vl/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
340
+ { pattern: /qwen2\.5-coder/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
341
+ ]
342
+ },
343
+ // ============== Qwen Family ==============
344
+ {
345
+ pattern: /qwen/i,
346
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
347
+ subPatterns: [
348
+ { pattern: /qwen-coder-(480b|plus)/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
349
+ { pattern: /qwen-coder-(30b|flash)/i, metadata: { maxInputTokens: 16e4, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
350
+ { pattern: /qwen-coder/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
351
+ { pattern: /qwen-vl-235b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
352
+ { pattern: /qwen-vl-32b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
353
+ { pattern: /qwen-vl-30b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
354
+ { pattern: /qwen-vl-8b/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
355
+ { pattern: /qwen-vl/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
356
+ { pattern: /qwen-max/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
357
+ { pattern: /qwen-next/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
358
+ { pattern: /qwen-235b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
359
+ { pattern: /qwen-32b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
360
+ { pattern: /qwen-30b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
361
+ { pattern: /qwen-14b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
362
+ { pattern: /qwen-8b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 2e4, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
363
+ { pattern: /qwen-4b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
364
+ ]
365
+ },
366
+ // ============== QwQ/QvQ Reasoning Models ==============
367
+ { pattern: /qwq-32b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
368
+ { pattern: /qwq/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
369
+ { pattern: /qvq/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 16384, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
370
+ // ============== DeepSeek Family ==============
371
+ {
372
+ pattern: /deepseek/i,
373
+ metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
374
+ subPatterns: [
375
+ { pattern: /deepseek-v3\.2-exp/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
376
+ { pattern: /deepseek-v3\.2-speciale/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
377
+ { pattern: /deepseek-v3\.2/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
378
+ { pattern: /deepseek-v3\.1-terminus/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
379
+ { pattern: /deepseek-v3\.1-nex-n1/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
380
+ { pattern: /deepseek-(v3\.1|chat-v3\.1)/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
381
+ { pattern: /deepseek-r1-distill-llama-70b/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
382
+ { pattern: /deepseek-r1-distill-qwen-32b/i, metadata: { maxInputTokens: 64e3, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
383
+ { pattern: /deepseek-r1-distill-qwen-14b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
384
+ { pattern: /deepseek-r1-0528-qwen3-8b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
385
+ { pattern: /deepseek-r1-0528/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
386
+ { pattern: /deepseek-r1/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
387
+ { pattern: /deepseek-prover-v2/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
388
+ { pattern: /deepseek-prover/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
389
+ { pattern: /deepseek-chat-v3-0324/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
390
+ { pattern: /deepseek-chat/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
391
+ ]
392
+ },
393
+ // ============== Meta Llama 4 Family ==============
394
+ {
395
+ pattern: /llama-4/i,
396
+ metadata: { maxInputTokens: 1048576, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
397
+ subPatterns: [
398
+ { pattern: /llama-4-maverick/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
399
+ { pattern: /llama-4-scout/i, metadata: { maxInputTokens: 1e7, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
400
+ ]
401
+ },
402
+ // ============== Meta Llama 3.x Family ==============
403
+ {
404
+ pattern: /llama-3(\.[123])?/i,
405
+ metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
406
+ subPatterns: [
407
+ { pattern: /llama-3\.3-70b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
408
+ { pattern: /llama-3\.2-90b-vision/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
409
+ { pattern: /llama-3\.2-11b-vision/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
410
+ { pattern: /llama-3\.2-1b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
411
+ { pattern: /llama-3\.1-405b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
412
+ { pattern: /llama-3\.1-70b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
413
+ { pattern: /llama-3\.1-8b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
414
+ { pattern: /llama-3-70b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
415
+ { pattern: /llama-3-8b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
416
+ { pattern: /llama-3\.3-nemotron/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
417
+ ]
418
+ },
419
+ // ============== Meta Llama Guard Family ==============
420
+ {
421
+ pattern: /llama-guard/i,
422
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "other" },
423
+ subPatterns: [
424
+ { pattern: /llama-guard-4-12b/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "other" } },
425
+ { pattern: /llama-guard-3-8b/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "other" } },
426
+ { pattern: /llama-guard-2-8b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "other" } }
427
+ ]
428
+ },
429
+ // ============== Mistral Family ==============
430
+ {
431
+ pattern: /(mistral|mixtral|pixtral|codestral|devstral)/i,
432
+ metadata: md(32768, 8192, true, false),
433
+ subPatterns: [
434
+ { pattern: /mistral-large/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
435
+ { pattern: /mistral-nemo/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
436
+ { pattern: /devstral/i, metadata: md(262144, 32768, true, false) },
437
+ { pattern: /pixtral/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
438
+ { pattern: /mistral-small/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
439
+ { pattern: /mixtral-8x22b/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
440
+ { pattern: /mixtral-8x7b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
441
+ { pattern: /mistral-7b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
442
+ ]
443
+ },
444
+ // ============== xAI Grok Family ==============
445
+ {
446
+ pattern: /grok/i,
447
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
448
+ subPatterns: [
449
+ { pattern: /grok-4\.1-fast/i, metadata: { maxInputTokens: 2e6, maxOutputTokens: 3e4, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
450
+ { pattern: /grok-4-fast/i, metadata: { maxInputTokens: 2e6, maxOutputTokens: 3e4, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
451
+ { pattern: /grok-4/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
452
+ { pattern: /grok-3-mini/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
453
+ { pattern: /grok-3/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
454
+ { pattern: /grok-code-fast/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 1e4, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
455
+ ]
456
+ },
457
+ // ============== Moonshot/Kimi Family ==============
458
+ {
459
+ pattern: /(kimi|moonshot)/i,
460
+ metadata: md(262144, 262144, true, false),
461
+ subPatterns: [
462
+ { pattern: /kimi-k2/i, metadata: md(262144, 262144, true, false) }
463
+ ]
464
+ },
465
+ // ============== Amazon Nova Family ==============
466
+ {
467
+ pattern: /nova/i,
468
+ metadata: { maxInputTokens: 1e6, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
469
+ subPatterns: [
470
+ { pattern: /nova-premier/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
471
+ { pattern: /nova-2-lite/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
472
+ { pattern: /nova-lite/i, metadata: { maxInputTokens: 3e5, maxOutputTokens: 5120, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
473
+ { pattern: /nova-micro/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 5120, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
474
+ { pattern: /nova-pro/i, metadata: { maxInputTokens: 3e5, maxOutputTokens: 5120, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
475
+ ]
476
+ },
477
+ // ============== Cohere Command Family ==============
478
+ {
479
+ pattern: /command/i,
480
+ metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
481
+ subPatterns: [
482
+ { pattern: /command-r-plus/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
483
+ { pattern: /command-r/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
484
+ ]
485
+ },
486
+ // ============== NVIDIA Nemotron Family ==============
487
+ {
488
+ pattern: /nemotron/i,
489
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
490
+ subPatterns: [
491
+ { pattern: /nemotron-3-nano/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
492
+ { pattern: /nemotron-nano.*vl/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
493
+ { pattern: /nemotron-nano/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
494
+ { pattern: /llama.*nemotron/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
495
+ ]
496
+ },
497
+ // ============== MiniMax Family ==============
498
+ { pattern: /minimax/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
499
+ // ============== Z.AI GLM Family ==============
500
+ {
501
+ pattern: /glm/i,
502
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
503
+ subPatterns: [
504
+ { pattern: /glm-4\.6v/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 24e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
505
+ { pattern: /glm-4\.6/i, metadata: { maxInputTokens: 204800, maxOutputTokens: 204800, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
506
+ { pattern: /glm-4\.5v/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
507
+ { pattern: /glm-4\.5/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
508
+ { pattern: /glm-4/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
509
+ ]
510
+ },
511
+ // ============== THUDM GLM Family ==============
512
+ // Note: This pattern is now subsumed by the Z.AI GLM pattern above
513
+ // ============== Baidu ERNIE Family ==============
514
+ { pattern: /ernie/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
515
+ // ============== Tencent Hunyuan Family ==============
516
+ { pattern: /hunyuan/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
517
+ // ============== AI21 Jamba Family ==============
518
+ { pattern: /jamba/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
519
+ // ============== Perplexity Sonar Family ==============
520
+ { pattern: /sonar/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 8e3, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
521
+ // ============== Microsoft Phi Family ==============
522
+ {
523
+ pattern: /phi-3/i,
524
+ metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
525
+ subPatterns: [
526
+ { pattern: /phi-3\.5-mini/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
527
+ { pattern: /phi-3-mini/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
528
+ { pattern: /phi-3-medium/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
529
+ ]
530
+ },
531
+ // ============== IBM Granite Family ==============
532
+ { pattern: /granite/i, metadata: { maxInputTokens: 131e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
533
+ // ============== Nous Research Hermes Family ==============
534
+ { pattern: /hermes/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
535
+ { pattern: /deephermes/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
536
+ // ============== Inception Mercury Family ==============
537
+ { pattern: /mercury/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
538
+ // ============== StepFun Step Family ==============
539
+ { pattern: /step/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
540
+ // ============== Deep Cogito Family ==============
541
+ { pattern: /cogito/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
542
+ // ============== Prime Intellect Family ==============
543
+ { pattern: /intellect/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
544
+ // ============== AllenAI Olmo Family ==============
545
+ { pattern: /olmo/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
546
+ // ============== Arcee AI Family ==============
547
+ // Match model series names: trinity, virtuoso, coder, maestro, spotlight
548
+ {
549
+ pattern: /(trinity|virtuoso|maestro|spotlight)/i,
550
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
551
+ subPatterns: [
552
+ { pattern: /virtuoso-large/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
553
+ { pattern: /maestro-reasoning/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
554
+ { pattern: /coder-large/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
555
+ { pattern: /spotlight/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65537, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
556
+ { pattern: /trinity-mini/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
557
+ ]
558
+ },
559
+ // ============== Meituan LongCat Family ==============
560
+ { pattern: /longcat/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
561
+ // ============== Morph Family ==============
562
+ { pattern: /morph/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
563
+ // ============== Relace Family ==============
564
+ { pattern: /relace/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 128e3, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
565
+ // ============== TNG Chimera Family ==============
566
+ { pattern: /tng.*chimera/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
567
+ // ============== Xiaomi MiMo Family ==============
568
+ { pattern: /mimo/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
569
+ // ============== Alibaba Tongyi Family ==============
570
+ { pattern: /tongyi/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
571
+ // ============== KwaiPilot KAT Family ==============
572
+ { pattern: /(kwaipilot.*kat|kat-coder)/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
573
+ // ============== Liquid AI LFM Family ==============
574
+ { pattern: /lfm/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
575
+ // ============== OpenGVLab InternVL Family ==============
576
+ { pattern: /internvl/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
577
+ // ============== ByteDance UI-TARS Family ==============
578
+ { pattern: /ui-tars/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 2048, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
579
+ // ============== Aion Labs Family ==============
580
+ { pattern: /aion/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
581
+ // ============== Essential AI Rnj Family ==============
582
+ { pattern: /rnj/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
583
+ // ============== Switchpoint Router ==============
584
+ { pattern: /switchpoint/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
585
+ ];
586
+ var NON_LLM_PATTERNS = [
587
+ // Embedding models
588
+ { pattern: /embed(ding)?/i, modelType: "embedding" },
589
+ { pattern: /text-embed/i, modelType: "embedding" },
590
+ { pattern: /bge-/i, modelType: "embedding" },
591
+ { pattern: /e5-/i, modelType: "embedding" },
592
+ { pattern: /gte-/i, modelType: "embedding" },
593
+ { pattern: /sentence-/i, modelType: "embedding" },
594
+ { pattern: /all-minilm/i, modelType: "embedding" },
595
+ { pattern: /nomic-embed/i, modelType: "embedding" },
596
+ { pattern: /jina-embed/i, modelType: "embedding" },
597
+ { pattern: /voyage-/i, modelType: "embedding" },
598
+ // Rerank models
599
+ { pattern: /rerank/i, modelType: "rerank" },
600
+ { pattern: /ranker/i, modelType: "rerank" },
601
+ { pattern: /jina-reranker/i, modelType: "rerank" },
602
+ // Image generation models
603
+ { pattern: /dall-e/i, modelType: "image" },
604
+ { pattern: /stable-diffusion/i, modelType: "image" },
605
+ { pattern: /sdxl/i, modelType: "image" },
606
+ { pattern: /midjourney/i, modelType: "image" },
607
+ { pattern: /imagen/i, modelType: "image" },
608
+ { pattern: /flux-\d/i, modelType: "image" },
609
+ { pattern: /playground-v/i, modelType: "image" },
610
+ { pattern: /ideogram/i, modelType: "image" },
611
+ { pattern: /recraft/i, modelType: "image" },
612
+ // Audio models
613
+ { pattern: /whisper/i, modelType: "audio" },
614
+ { pattern: /tts-/i, modelType: "audio" },
615
+ { pattern: /^speech[-_]/i, modelType: "audio" },
616
+ { pattern: /voxtral/i, modelType: "audio" },
617
+ // Moderation models
618
+ { pattern: /moderation/i, modelType: "other" },
619
+ { pattern: /content-filter/i, modelType: "other" },
620
+ { pattern: /guard/i, modelType: "other" },
621
+ { pattern: /safeguard/i, modelType: "other" }
622
+ ];
623
+ function normalizeModelId(modelId) {
624
+ return modelId.toLowerCase().replace(/^(openai\/|anthropic\/|google\/|meta-llama\/|mistralai\/|cohere\/|qwen\/|deepseek\/|deepseek-ai\/|microsoft\/|nvidia\/|x-ai\/|amazon\/|ai21\/|perplexity\/|ibm-granite\/|z-ai\/|thudm\/|baidu\/|tencent\/|moonshotai\/|stepfun-ai\/|nousresearch\/|prime-intellect\/|allenai\/|arcee-ai\/|meituan\/|morph\/|relace\/|inception\/|minimax\/|opengvlab\/|bytedance\/|liquid\/|tngtech\/|xiaomi\/|alibaba\/|kwaipilot\/|deepcogito\/|essentialai\/)/, "").replace(/-instruct$/, "").replace(/-chat$/, "").replace(/-preview$/, "").replace(/-latest$/, "").replace(/:free$/, "").replace(/:extended$/, "").replace(/:exacto$/, "").replace(/:thinking$/, "").replace(/@\d{4}-\d{2}-\d{2}$/, "").replace(/[-_](\d{8})$/, "");
625
+ }
626
+ function matchPattern(modelId, pattern) {
627
+ if (pattern instanceof RegExp) {
628
+ return pattern.test(modelId);
629
+ }
630
+ return modelId.toLowerCase().startsWith(pattern.toLowerCase());
631
+ }
632
+ function matchHierarchicalPattern(modelId, patterns) {
633
+ for (const familyPattern of patterns) {
634
+ if (matchPattern(modelId, familyPattern.pattern)) {
635
+ if (familyPattern.subPatterns) {
636
+ for (const subPattern of familyPattern.subPatterns) {
637
+ if (matchPattern(modelId, subPattern.pattern)) {
638
+ if (subPattern.subPatterns) {
639
+ const deepMatch = matchHierarchicalPattern(modelId, [subPattern]);
640
+ if (deepMatch) {
641
+ return deepMatch;
642
+ }
643
+ }
644
+ return subPattern.metadata;
645
+ }
646
+ }
647
+ }
648
+ return familyPattern.metadata;
649
+ }
650
+ }
651
+ return null;
652
+ }
653
+ function getModelMetadata(modelId) {
654
+ if (typeof modelId !== "string" || !modelId) {
655
+ return DEFAULT_MODEL_METADATA;
656
+ }
657
+ const directMatch = matchHierarchicalPattern(modelId, MODEL_FAMILY_PATTERNS);
658
+ if (directMatch) {
659
+ return directMatch;
660
+ }
661
+ const normalizedId = normalizeModelId(modelId);
662
+ const normalizedMatch = matchHierarchicalPattern(normalizedId, MODEL_FAMILY_PATTERNS);
663
+ if (normalizedMatch) {
664
+ return normalizedMatch;
665
+ }
666
+ for (const { pattern, modelType } of NON_LLM_PATTERNS) {
667
+ if (pattern.test(modelId)) {
668
+ return {
669
+ ...DEFAULT_MODEL_METADATA,
670
+ modelType
671
+ };
672
+ }
673
+ }
674
+ return DEFAULT_MODEL_METADATA;
675
+ }
676
+ function getModelMetadataFromPatterns(modelId) {
677
+ return getModelMetadata(modelId);
678
+ }
679
+
680
+ // src/metadata.ts
681
+ function getModelMetadataFromPatterns2(modelId) {
682
+ return getModelMetadataFromPatterns(modelId);
683
+ }
684
+ function mergeMetadata(apiMetadata, patternMetadata) {
685
+ if (!apiMetadata) {
686
+ return patternMetadata;
687
+ }
688
+ return {
689
+ maxInputTokens: apiMetadata.maxInputTokens ?? patternMetadata.maxInputTokens,
690
+ maxOutputTokens: apiMetadata.maxOutputTokens ?? patternMetadata.maxOutputTokens,
691
+ supportsToolCalling: apiMetadata.supportsToolCalling ?? patternMetadata.supportsToolCalling,
692
+ supportsImageInput: apiMetadata.supportsImageInput ?? patternMetadata.supportsImageInput,
693
+ modelType: patternMetadata.modelType
694
+ };
695
+ }
696
+
697
+ // src/plugin.ts
698
+ function generateModelsConfig(models, providerName = "custom-provider") {
699
+ const modelsConfig = {};
700
+ for (const model of models) {
701
+ const metadata = getModelMetadataFromPatterns2(model.id);
702
+ const merged = mergeMetadata(model.metadata, metadata);
703
+ modelsConfig[model.id] = {
704
+ name: model.name || model.id,
705
+ tool_call: merged.supportsToolCalling ?? true,
706
+ attachment: merged.supportsImageInput ?? false,
707
+ limit: {
708
+ context: merged.maxInputTokens || 128e3,
709
+ output: merged.maxOutputTokens || 16384
710
+ }
711
+ };
712
+ }
713
+ const config = {
714
+ provider: {
715
+ [providerName]: {
716
+ name: providerName,
717
+ npm: "@oai2lmapi/opencode-provider",
718
+ options: {
719
+ baseURL: "YOUR_API_BASE_URL",
720
+ apiKey: "{env:YOUR_API_KEY_ENV}"
721
+ },
722
+ models: modelsConfig
723
+ }
724
+ }
725
+ };
726
+ return JSON.stringify(config, null, 2);
727
+ }
728
+ function formatModelsForDisplay(models, providerName) {
729
+ const lines = [
730
+ `# Discovered ${models.length} models`,
731
+ "",
732
+ "## Quick Setup",
733
+ "",
734
+ "Add the following to your opencode.json:",
735
+ "",
736
+ "```json",
737
+ generateModelsConfig(models, providerName),
738
+ "```",
739
+ "",
740
+ "## Model Details",
741
+ ""
742
+ ];
743
+ for (const model of models) {
744
+ const metadata = getModelMetadataFromPatterns2(model.id);
745
+ const merged = mergeMetadata(model.metadata, metadata);
746
+ lines.push(`### ${model.id}`);
747
+ if (model.name && model.name !== model.id) {
748
+ lines.push(`- Name: ${model.name}`);
749
+ }
750
+ lines.push(`- Context: ${merged.maxInputTokens || "unknown"} tokens`);
751
+ lines.push(`- Output: ${merged.maxOutputTokens || "unknown"} tokens`);
752
+ lines.push(`- Tool Calling: ${merged.supportsToolCalling ? "Yes" : "No"}`);
753
+ lines.push(`- Vision: ${merged.supportsImageInput ? "Yes" : "No"}`);
754
+ lines.push("");
755
+ }
756
+ return lines.join("\n");
757
+ }
758
+ async function oai2lmPlugin(_input) {
759
+ return {
760
+ tool: {
761
+ oai2lm_discover: {
762
+ description: `Discover available models from an OpenAI-compatible API and generate opencode.json configuration.
763
+
764
+ This tool will:
765
+ 1. Connect to the specified API endpoint
766
+ 2. Fetch all available models
767
+ 3. Enrich them with metadata (token limits, capabilities)
768
+ 4. Generate ready-to-use opencode.json configuration
769
+
770
+ Use this when you want to add a new OpenAI-compatible provider to OpenCode.`,
771
+ parameters: {
772
+ type: "object",
773
+ properties: {
774
+ baseURL: {
775
+ type: "string",
776
+ description: "Base URL of the OpenAI-compatible API (e.g., https://api.example.com/v1). If not provided, will try to load from oai2lm.json config."
777
+ },
778
+ apiKey: {
779
+ type: "string",
780
+ description: "API key for authentication. If not provided, will try to load from config or environment."
781
+ },
782
+ providerName: {
783
+ type: "string",
784
+ description: "Name for the provider in opencode.json (e.g., 'my-api'). Defaults to 'custom-provider'."
785
+ },
786
+ filter: {
787
+ type: "string",
788
+ description: "Optional regex pattern to filter models (e.g., 'gpt|claude')"
789
+ }
790
+ },
791
+ required: []
792
+ },
793
+ execute: async (args) => {
794
+ try {
795
+ const config = loadConfig();
796
+ const baseURL = args.baseURL || config?.baseURL || "";
797
+ const apiKey = args.apiKey || (config ? resolveApiKey(config) : "") || "";
798
+ const providerName = args.providerName || config?.name || "custom-provider";
799
+ const filter = args.filter || config?.modelFilter;
800
+ if (!baseURL) {
801
+ return `Error: No baseURL provided. Either:
802
+ 1. Pass it as an argument: oai2lm_discover(baseURL: "https://api.example.com/v1")
803
+ 2. Or create an oai2lm.json config file with baseURL
804
+
805
+ Example oai2lm.json:
806
+ {
807
+ "baseURL": "https://api.example.com/v1",
808
+ "apiKey": "your-api-key"
809
+ }`;
810
+ }
811
+ let models = await discoverModels(baseURL, apiKey, config?.headers);
812
+ if (filter) {
813
+ const filterRegex = new RegExp(filter, "i");
814
+ models = models.filter((m) => filterRegex.test(m.id));
815
+ }
816
+ if (models.length === 0) {
817
+ return `No models found at ${baseURL}/models. Check your API key and endpoint.`;
818
+ }
819
+ return formatModelsForDisplay(models, providerName);
820
+ } catch (error) {
821
+ const message = error instanceof Error ? error.message : String(error);
822
+ return `Error discovering models: ${message}`;
823
+ }
824
+ }
825
+ }
826
+ }
827
+ };
828
+ }
829
+ var plugin_default = oai2lmPlugin;
830
+ export {
831
+ plugin_default as default,
832
+ generateModelsConfig,
833
+ oai2lmPlugin
834
+ };
835
+ //# sourceMappingURL=plugin.js.map