@oai2lmapi/opencode-provider 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js ADDED
@@ -0,0 +1,888 @@
1
+ #!/usr/bin/env node
2
+
3
+ // src/config.ts
4
+ import { readFileSync, existsSync } from "node:fs";
5
+ import { homedir } from "node:os";
6
+ import { join, isAbsolute } from "node:path";
7
+ function getDataDir() {
8
+ const xdgDataHome = process.env["XDG_DATA_HOME"];
9
+ if (xdgDataHome && isAbsolute(xdgDataHome)) {
10
+ return join(xdgDataHome, "opencode");
11
+ }
12
+ return join(homedir(), ".local", "share", "opencode");
13
+ }
14
+ function getConfigDir() {
15
+ const xdgConfigHome = process.env["XDG_CONFIG_HOME"];
16
+ if (xdgConfigHome && isAbsolute(xdgConfigHome)) {
17
+ return join(xdgConfigHome, "opencode");
18
+ }
19
+ return join(homedir(), ".config", "opencode");
20
+ }
21
+ var CONFIG_FILENAME = "oai2lm.json";
22
+ function readJsonFile(filepath) {
23
+ try {
24
+ if (!existsSync(filepath)) {
25
+ return void 0;
26
+ }
27
+ const content = readFileSync(filepath, "utf-8");
28
+ return JSON.parse(content);
29
+ } catch (error) {
30
+ if (error instanceof SyntaxError) {
31
+ console.warn(
32
+ `Failed to parse JSON in config file ${filepath}: ${error.message}`
33
+ );
34
+ } else {
35
+ console.warn(`Failed to read config file ${filepath}:`, error);
36
+ }
37
+ return void 0;
38
+ }
39
+ }
40
+ function loadConfig() {
41
+ const dataDir = getDataDir();
42
+ const configDir = getConfigDir();
43
+ const dataPath = join(dataDir, CONFIG_FILENAME);
44
+ const dataConfig = readJsonFile(dataPath);
45
+ const configPath = join(configDir, CONFIG_FILENAME);
46
+ const configDirConfig = readJsonFile(configPath);
47
+ if (dataConfig && configDirConfig) {
48
+ return {
49
+ ...configDirConfig,
50
+ ...dataConfig,
51
+ headers: {
52
+ ...configDirConfig.headers,
53
+ ...dataConfig.headers
54
+ },
55
+ modelOverrides: {
56
+ ...configDirConfig.modelOverrides,
57
+ ...dataConfig.modelOverrides
58
+ }
59
+ };
60
+ }
61
+ return dataConfig || configDirConfig;
62
+ }
63
+ function resolveApiKey(config) {
64
+ if (config.apiKey) {
65
+ const envMatch = config.apiKey.match(/^\{env:(\w+)\}$/);
66
+ if (envMatch) {
67
+ return process.env[envMatch[1]];
68
+ }
69
+ const fileMatch = config.apiKey.match(/^\{file:(.+)\}$/);
70
+ if (fileMatch) {
71
+ try {
72
+ let filePath = fileMatch[1];
73
+ if (filePath.startsWith("~")) {
74
+ filePath = join(homedir(), filePath.slice(1));
75
+ }
76
+ return readFileSync(filePath, "utf-8").trim();
77
+ } catch {
78
+ return void 0;
79
+ }
80
+ }
81
+ return config.apiKey;
82
+ }
83
+ return process.env["OAI2LM_API_KEY"];
84
+ }
85
+
86
+ // src/discover.ts
87
+ async function discoverModels(baseURL, apiKey, headers) {
88
+ const url = `${baseURL}/models`;
89
+ const response = await fetch(url, {
90
+ method: "GET",
91
+ headers: {
92
+ Authorization: `Bearer ${apiKey}`,
93
+ "Content-Type": "application/json",
94
+ ...headers
95
+ }
96
+ });
97
+ if (!response.ok) {
98
+ throw new Error(
99
+ `Failed to fetch models from ${url}: ${response.status} ${response.statusText}`
100
+ );
101
+ }
102
+ const data = await response.json();
103
+ const models = data.data || [];
104
+ return models.map((model) => ({
105
+ id: model.id,
106
+ name: model.name,
107
+ object: model.object,
108
+ created: model.created,
109
+ owned_by: model.owned_by,
110
+ metadata: extractMetadataFromModel(model)
111
+ }));
112
+ }
113
+ function extractMetadataFromModel(model) {
114
+ const metadata = {};
115
+ if (model.context_length) {
116
+ metadata.maxInputTokens = model.context_length;
117
+ }
118
+ if (model.max_tokens) {
119
+ metadata.maxOutputTokens = model.max_tokens;
120
+ }
121
+ if (model.max_input_tokens) {
122
+ metadata.maxInputTokens = model.max_input_tokens;
123
+ }
124
+ if (model.max_output_tokens) {
125
+ metadata.maxOutputTokens = model.max_output_tokens;
126
+ }
127
+ if (typeof model.function_call === "boolean") {
128
+ metadata.supportsToolCalling = model.function_call;
129
+ } else if (typeof model.supports_function_calling === "boolean") {
130
+ metadata.supportsToolCalling = model.supports_function_calling;
131
+ } else if (typeof model.supports_tools === "boolean") {
132
+ metadata.supportsToolCalling = model.supports_tools;
133
+ }
134
+ if (typeof model.vision === "boolean") {
135
+ metadata.supportsImageInput = model.vision;
136
+ } else if (typeof model.supports_vision === "boolean") {
137
+ metadata.supportsImageInput = model.supports_vision;
138
+ } else if (model.modalities?.includes("vision")) {
139
+ metadata.supportsImageInput = true;
140
+ } else if (model.modalities?.includes("image")) {
141
+ metadata.supportsImageInput = true;
142
+ }
143
+ return metadata;
144
+ }
145
+
146
+ // ../model-metadata/dist/esm/index.mjs
147
+ var DEFAULT_MODEL_METADATA = {
148
+ maxInputTokens: 8192,
149
+ maxOutputTokens: 4096,
150
+ supportsToolCalling: false,
151
+ supportsImageInput: false,
152
+ modelType: "llm"
153
+ };
154
+ var md = (maxInputTokens, maxOutputTokens, supportsToolCalling, supportsImageInput, modelType = "llm") => ({
155
+ maxInputTokens,
156
+ maxOutputTokens,
157
+ supportsToolCalling,
158
+ supportsImageInput,
159
+ modelType
160
+ });
161
+ var MODEL_FAMILY_PATTERNS = [
162
+ // ============== ByteDance Seed Family ==============
163
+ {
164
+ pattern: /(doubao-)?seed/i,
165
+ metadata: md(262144, 32768, true, true),
166
+ subPatterns: [
167
+ { pattern: /seed-1\.6-flash/i, metadata: md(262144, 16384, true, true) },
168
+ { pattern: /seed-1\.6/i, metadata: md(262144, 32768, true, true) }
169
+ ]
170
+ },
171
+ // ============== OpenAI GPT-5 Family ==============
172
+ {
173
+ pattern: /gpt-5/i,
174
+ metadata: md(4e5, 128e3, true, true),
175
+ subPatterns: [
176
+ { pattern: /gpt-5\.2-pro/i, metadata: md(4e5, 128e3, true, true) },
177
+ { pattern: /gpt-5\.2-chat/i, metadata: md(128e3, 16384, true, true) },
178
+ { pattern: /gpt-5\.2/i, metadata: md(4e5, 128e3, true, true) },
179
+ { pattern: /gpt-5\.1-codex-max/i, metadata: md(4e5, 128e3, true, true) },
180
+ { pattern: /gpt-5\.1-codex-mini/i, metadata: md(4e5, 1e5, true, true) },
181
+ { pattern: /gpt-5\.1-codex/i, metadata: md(4e5, 128e3, true, true) },
182
+ { pattern: /gpt-5\.1-chat/i, metadata: md(128e3, 16384, true, true) },
183
+ { pattern: /gpt-5\.1/i, metadata: md(4e5, 128e3, true, true) },
184
+ { pattern: /gpt-5-image-mini/i, metadata: md(4e5, 128e3, true, true) },
185
+ { pattern: /gpt-5-image/i, metadata: md(4e5, 128e3, true, true) },
186
+ { pattern: /gpt-5-pro/i, metadata: md(4e5, 128e3, true, true) },
187
+ { pattern: /gpt-5-chat/i, metadata: md(128e3, 16384, true, true) },
188
+ { pattern: /gpt-5-nano/i, metadata: md(4e5, 128e3, true, true) },
189
+ { pattern: /gpt-5-mini/i, metadata: md(4e5, 128e3, true, true) },
190
+ { pattern: /gpt-5-codex/i, metadata: md(4e5, 128e3, true, true) }
191
+ ]
192
+ },
193
+ // ============== OpenAI GPT-4.1 Family ==============
194
+ {
195
+ pattern: /gpt-4\.1/i,
196
+ metadata: md(1047576, 32768, true, true),
197
+ subPatterns: [
198
+ { pattern: /gpt-4\.1-nano/i, metadata: md(1047576, 32768, true, true) },
199
+ { pattern: /gpt-4\.1-mini/i, metadata: md(1047576, 32768, true, true) }
200
+ ]
201
+ },
202
+ // ============== OpenAI o3/o4 Reasoning Models ==============
203
+ {
204
+ pattern: /o[34]/i,
205
+ metadata: md(2e5, 1e5, true, true),
206
+ subPatterns: [
207
+ { pattern: /o4-mini-high/i, metadata: md(2e5, 1e5, true, true) },
208
+ { pattern: /o4-mini-deep-research/i, metadata: md(2e5, 1e5, true, true) },
209
+ { pattern: /o4-mini/i, metadata: md(2e5, 1e5, true, true) },
210
+ { pattern: /o3-pro/i, metadata: md(2e5, 1e5, true, true) },
211
+ { pattern: /o3-mini-high/i, metadata: md(2e5, 1e5, true, false) },
212
+ { pattern: /o3-mini/i, metadata: md(2e5, 1e5, true, false) },
213
+ { pattern: /o3-deep-research/i, metadata: md(2e5, 1e5, true, true) }
214
+ ]
215
+ },
216
+ // ============== OpenAI GPT-4o Family ==============
217
+ {
218
+ pattern: /gpt-4o/i,
219
+ metadata: md(128e3, 16384, true, true),
220
+ subPatterns: [
221
+ { pattern: /gpt-4o-mini/i, metadata: md(128e3, 16384, true, true) },
222
+ { pattern: /chatgpt-4o/i, metadata: md(128e3, 16384, true, true) }
223
+ ]
224
+ },
225
+ // ============== OpenAI GPT-4 Turbo Family ==============
226
+ {
227
+ pattern: /gpt-4-turbo/i,
228
+ metadata: md(128e3, 4096, true, true),
229
+ subPatterns: [
230
+ { pattern: /gpt-4-turbo-preview/i, metadata: md(128e3, 4096, true, false) }
231
+ ]
232
+ },
233
+ // ============== OpenAI GPT-4 Family ==============
234
+ {
235
+ pattern: /gpt-4/i,
236
+ metadata: md(8192, 4096, true, false),
237
+ subPatterns: [
238
+ { pattern: /gpt-4-1106-preview/i, metadata: md(128e3, 4096, true, false) },
239
+ { pattern: /gpt-4-0314/i, metadata: md(8192, 4096, true, false) }
240
+ ]
241
+ },
242
+ // ============== OpenAI Codex Family ==============
243
+ { pattern: /codex-mini/i, metadata: md(2e5, 1e5, true, true) },
244
+ // ============== OpenAI GPT-OSS Models ==============
245
+ {
246
+ pattern: /gpt-oss/i,
247
+ metadata: md(131072, 65536, true, false),
248
+ subPatterns: [
249
+ { pattern: /gpt-oss-safeguard-20b/i, metadata: md(131072, 65536, true, false) },
250
+ { pattern: /gpt-oss-120b/i, metadata: md(131072, 65536, true, false) },
251
+ { pattern: /gpt-oss-20b/i, metadata: md(131072, 65536, true, false) }
252
+ ]
253
+ },
254
+ // ============== Anthropic Claude 4 Family ==============
255
+ {
256
+ pattern: /claude-(opus|sonnet|haiku)-4/i,
257
+ metadata: { maxInputTokens: 2e5, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
258
+ subPatterns: [
259
+ { pattern: /claude-opus-4\.5/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
260
+ { pattern: /claude-sonnet-4\.5/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
261
+ { pattern: /claude-haiku-4\.5/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
262
+ { pattern: /claude-opus-4\.1/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
263
+ { pattern: /claude-opus-4(?![.\d])/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
264
+ { pattern: /claude-sonnet-4(?![.\d])/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
265
+ ]
266
+ },
267
+ // ============== Anthropic Claude 3.x Family ==============
268
+ {
269
+ pattern: /claude-3/i,
270
+ metadata: md(2e5, 4096, true, true),
271
+ subPatterns: [
272
+ { pattern: /claude-3\.7-sonnet/i, metadata: md(2e5, 128e3, true, true) },
273
+ { pattern: /claude-3\.5-sonnet/i, metadata: md(2e5, 8192, true, true) },
274
+ { pattern: /claude-3\.5-haiku/i, metadata: md(2e5, 8192, true, true) },
275
+ { pattern: /claude-3-opus/i, metadata: md(2e5, 4096, true, true) },
276
+ { pattern: /claude-3-haiku/i, metadata: md(2e5, 4096, true, true) }
277
+ ]
278
+ },
279
+ // ============== Google Gemini 3 Family ==============
280
+ {
281
+ pattern: /gemini-3/i,
282
+ metadata: { maxInputTokens: 1048576, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
283
+ subPatterns: [
284
+ { pattern: /gemini-3-pro-image/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 32768, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
285
+ { pattern: /gemini-3-pro/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
286
+ { pattern: /gemini-3-flash/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
287
+ ]
288
+ },
289
+ // ============== Google Gemini 2.5 Family ==============
290
+ {
291
+ pattern: /gemini-2\.5/i,
292
+ metadata: { maxInputTokens: 1048576, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
293
+ subPatterns: [
294
+ { pattern: /gemini-2\.5-pro/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
295
+ { pattern: /gemini-2\.5-flash-image/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
296
+ { pattern: /gemini-2\.5-flash-lite/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
297
+ { pattern: /gemini-2\.5-flash/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
298
+ ]
299
+ },
300
+ // ============== Google Gemini 2.0 Family ==============
301
+ { pattern: /gemini-2\.0/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
302
+ // ============== Google Gemma Family ==============
303
+ {
304
+ pattern: /gemma/i,
305
+ metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" },
306
+ subPatterns: [
307
+ { pattern: /gemma-2-27b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
308
+ { pattern: /gemma-2-9b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
309
+ { pattern: /gemma-3n/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } }
310
+ ]
311
+ },
312
+ // ============== Qwen3 Family ==============
313
+ {
314
+ pattern: /qwen3/i,
315
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
316
+ subPatterns: [
317
+ { pattern: /qwen3-coder-(480b|plus)/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
318
+ { pattern: /qwen3-coder-(30b|flash)/i, metadata: { maxInputTokens: 16e4, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
319
+ { pattern: /qwen3-coder/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
320
+ { pattern: /qwen3-vl-235b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
321
+ { pattern: /qwen3-vl-32b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
322
+ { pattern: /qwen3-vl-30b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
323
+ { pattern: /qwen3-vl-8b/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
324
+ { pattern: /qwen3-vl/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
325
+ { pattern: /qwen3-max/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
326
+ { pattern: /qwen3-next/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
327
+ { pattern: /qwen3-235b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
328
+ { pattern: /qwen3-32b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
329
+ { pattern: /qwen3-30b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
330
+ { pattern: /qwen3-14b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
331
+ { pattern: /qwen3-8b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 2e4, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
332
+ { pattern: /qwen3-4b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
333
+ ]
334
+ },
335
+ // ============== Qwen2.5 Family ==============
336
+ {
337
+ pattern: /qwen2\.5/i,
338
+ metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
339
+ subPatterns: [
340
+ { pattern: /qwen2\.5-72b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
341
+ { pattern: /qwen2\.5-vl/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
342
+ { pattern: /qwen2\.5-coder/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
343
+ ]
344
+ },
345
+ // ============== Qwen Family ==============
346
+ {
347
+ pattern: /qwen/i,
348
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
349
+ subPatterns: [
350
+ { pattern: /qwen-coder-(480b|plus)/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
351
+ { pattern: /qwen-coder-(30b|flash)/i, metadata: { maxInputTokens: 16e4, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
352
+ { pattern: /qwen-coder/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
353
+ { pattern: /qwen-vl-235b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
354
+ { pattern: /qwen-vl-32b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
355
+ { pattern: /qwen-vl-30b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
356
+ { pattern: /qwen-vl-8b/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
357
+ { pattern: /qwen-vl/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
358
+ { pattern: /qwen-max/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
359
+ { pattern: /qwen-next/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
360
+ { pattern: /qwen-235b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
361
+ { pattern: /qwen-32b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
362
+ { pattern: /qwen-30b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
363
+ { pattern: /qwen-14b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
364
+ { pattern: /qwen-8b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 2e4, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
365
+ { pattern: /qwen-4b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
366
+ ]
367
+ },
368
+ // ============== QwQ/QvQ Reasoning Models ==============
369
+ { pattern: /qwq-32b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
370
+ { pattern: /qwq/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
371
+ { pattern: /qvq/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 16384, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
372
+ // ============== DeepSeek Family ==============
373
+ {
374
+ pattern: /deepseek/i,
375
+ metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
376
+ subPatterns: [
377
+ { pattern: /deepseek-v3\.2-exp/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
378
+ { pattern: /deepseek-v3\.2-speciale/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
379
+ { pattern: /deepseek-v3\.2/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
380
+ { pattern: /deepseek-v3\.1-terminus/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
381
+ { pattern: /deepseek-v3\.1-nex-n1/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
382
+ { pattern: /deepseek-(v3\.1|chat-v3\.1)/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
383
+ { pattern: /deepseek-r1-distill-llama-70b/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
384
+ { pattern: /deepseek-r1-distill-qwen-32b/i, metadata: { maxInputTokens: 64e3, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
385
+ { pattern: /deepseek-r1-distill-qwen-14b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
386
+ { pattern: /deepseek-r1-0528-qwen3-8b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
387
+ { pattern: /deepseek-r1-0528/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
388
+ { pattern: /deepseek-r1/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
389
+ { pattern: /deepseek-prover-v2/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
390
+ { pattern: /deepseek-prover/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
391
+ { pattern: /deepseek-chat-v3-0324/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
392
+ { pattern: /deepseek-chat/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
393
+ ]
394
+ },
395
+ // ============== Meta Llama 4 Family ==============
396
+ {
397
+ pattern: /llama-4/i,
398
+ metadata: { maxInputTokens: 1048576, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
399
+ subPatterns: [
400
+ { pattern: /llama-4-maverick/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
401
+ { pattern: /llama-4-scout/i, metadata: { maxInputTokens: 1e7, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
402
+ ]
403
+ },
404
+ // ============== Meta Llama 3.x Family ==============
405
+ {
406
+ pattern: /llama-3(\.[123])?/i,
407
+ metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
408
+ subPatterns: [
409
+ { pattern: /llama-3\.3-70b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
410
+ { pattern: /llama-3\.2-90b-vision/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
411
+ { pattern: /llama-3\.2-11b-vision/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
412
+ { pattern: /llama-3\.2-1b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
413
+ { pattern: /llama-3\.1-405b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
414
+ { pattern: /llama-3\.1-70b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
415
+ { pattern: /llama-3\.1-8b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
416
+ { pattern: /llama-3-70b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
417
+ { pattern: /llama-3-8b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
418
+ { pattern: /llama-3\.3-nemotron/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
419
+ ]
420
+ },
421
+ // ============== Meta Llama Guard Family ==============
422
+ {
423
+ pattern: /llama-guard/i,
424
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "other" },
425
+ subPatterns: [
426
+ { pattern: /llama-guard-4-12b/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "other" } },
427
+ { pattern: /llama-guard-3-8b/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "other" } },
428
+ { pattern: /llama-guard-2-8b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "other" } }
429
+ ]
430
+ },
431
+ // ============== Mistral Family ==============
432
+ {
433
+ pattern: /(mistral|mixtral|pixtral|codestral|devstral)/i,
434
+ metadata: md(32768, 8192, true, false),
435
+ subPatterns: [
436
+ { pattern: /mistral-large/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
437
+ { pattern: /mistral-nemo/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
438
+ { pattern: /devstral/i, metadata: md(262144, 32768, true, false) },
439
+ { pattern: /pixtral/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
440
+ { pattern: /mistral-small/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
441
+ { pattern: /mixtral-8x22b/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
442
+ { pattern: /mixtral-8x7b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
443
+ { pattern: /mistral-7b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
444
+ ]
445
+ },
446
+ // ============== xAI Grok Family ==============
447
+ {
448
+ pattern: /grok/i,
449
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
450
+ subPatterns: [
451
+ { pattern: /grok-4\.1-fast/i, metadata: { maxInputTokens: 2e6, maxOutputTokens: 3e4, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
452
+ { pattern: /grok-4-fast/i, metadata: { maxInputTokens: 2e6, maxOutputTokens: 3e4, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
453
+ { pattern: /grok-4/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
454
+ { pattern: /grok-3-mini/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
455
+ { pattern: /grok-3/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
456
+ { pattern: /grok-code-fast/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 1e4, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
457
+ ]
458
+ },
459
+ // ============== Moonshot/Kimi Family ==============
460
+ {
461
+ pattern: /(kimi|moonshot)/i,
462
+ metadata: md(262144, 262144, true, false),
463
+ subPatterns: [
464
+ { pattern: /kimi-k2/i, metadata: md(262144, 262144, true, false) }
465
+ ]
466
+ },
467
+ // ============== Amazon Nova Family ==============
468
+ {
469
+ pattern: /nova/i,
470
+ metadata: { maxInputTokens: 1e6, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
471
+ subPatterns: [
472
+ { pattern: /nova-premier/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
473
+ { pattern: /nova-2-lite/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
474
+ { pattern: /nova-lite/i, metadata: { maxInputTokens: 3e5, maxOutputTokens: 5120, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
475
+ { pattern: /nova-micro/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 5120, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
476
+ { pattern: /nova-pro/i, metadata: { maxInputTokens: 3e5, maxOutputTokens: 5120, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
477
+ ]
478
+ },
479
+ // ============== Cohere Command Family ==============
480
+ {
481
+ pattern: /command/i,
482
+ metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
483
+ subPatterns: [
484
+ { pattern: /command-r-plus/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
485
+ { pattern: /command-r/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
486
+ ]
487
+ },
488
+ // ============== NVIDIA Nemotron Family ==============
489
+ {
490
+ pattern: /nemotron/i,
491
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
492
+ subPatterns: [
493
+ { pattern: /nemotron-3-nano/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
494
+ { pattern: /nemotron-nano.*vl/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
495
+ { pattern: /nemotron-nano/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
496
+ { pattern: /llama.*nemotron/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
497
+ ]
498
+ },
499
+ // ============== MiniMax Family ==============
500
+ { pattern: /minimax/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
501
+ // ============== Z.AI GLM Family ==============
502
+ {
503
+ pattern: /glm/i,
504
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
505
+ subPatterns: [
506
+ { pattern: /glm-4\.6v/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 24e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
507
+ { pattern: /glm-4\.6/i, metadata: { maxInputTokens: 204800, maxOutputTokens: 204800, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
508
+ { pattern: /glm-4\.5v/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
509
+ { pattern: /glm-4\.5/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
510
+ { pattern: /glm-4/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
511
+ ]
512
+ },
513
+ // ============== THUDM GLM Family ==============
514
+ // Note: This pattern is now subsumed by the Z.AI GLM pattern above
515
+ // ============== Baidu ERNIE Family ==============
516
+ { pattern: /ernie/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
517
+ // ============== Tencent Hunyuan Family ==============
518
+ { pattern: /hunyuan/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
519
+ // ============== AI21 Jamba Family ==============
520
+ { pattern: /jamba/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
521
+ // ============== Perplexity Sonar Family ==============
522
+ { pattern: /sonar/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 8e3, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
523
+ // ============== Microsoft Phi Family ==============
524
+ {
525
+ pattern: /phi-3/i,
526
+ metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
527
+ subPatterns: [
528
+ { pattern: /phi-3\.5-mini/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
529
+ { pattern: /phi-3-mini/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
530
+ { pattern: /phi-3-medium/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
531
+ ]
532
+ },
533
+ // ============== IBM Granite Family ==============
534
+ { pattern: /granite/i, metadata: { maxInputTokens: 131e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
535
+ // ============== Nous Research Hermes Family ==============
536
+ { pattern: /hermes/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
537
+ { pattern: /deephermes/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
538
+ // ============== Inception Mercury Family ==============
539
+ { pattern: /mercury/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
540
+ // ============== StepFun Step Family ==============
541
+ { pattern: /step/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
542
+ // ============== Deep Cogito Family ==============
543
+ { pattern: /cogito/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
544
+ // ============== Prime Intellect Family ==============
545
+ { pattern: /intellect/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
546
+ // ============== AllenAI Olmo Family ==============
547
+ { pattern: /olmo/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
548
+ // ============== Arcee AI Family ==============
549
+ // Match model series names: trinity, virtuoso, coder, maestro, spotlight
550
+ {
551
+ pattern: /(trinity|virtuoso|maestro|spotlight)/i,
552
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
553
+ subPatterns: [
554
+ { pattern: /virtuoso-large/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
555
+ { pattern: /maestro-reasoning/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
556
+ { pattern: /coder-large/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
557
+ { pattern: /spotlight/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65537, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
558
+ { pattern: /trinity-mini/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
559
+ ]
560
+ },
561
+ // ============== Meituan LongCat Family ==============
562
+ { pattern: /longcat/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
563
+ // ============== Morph Family ==============
564
+ { pattern: /morph/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
565
+ // ============== Relace Family ==============
566
+ { pattern: /relace/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 128e3, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
567
+ // ============== TNG Chimera Family ==============
568
+ { pattern: /tng.*chimera/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
569
+ // ============== Xiaomi MiMo Family ==============
570
+ { pattern: /mimo/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
571
+ // ============== Alibaba Tongyi Family ==============
572
+ { pattern: /tongyi/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
573
+ // ============== KwaiPilot KAT Family ==============
574
+ { pattern: /(kwaipilot.*kat|kat-coder)/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
575
+ // ============== Liquid AI LFM Family ==============
576
+ { pattern: /lfm/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
577
+ // ============== OpenGVLab InternVL Family ==============
578
+ { pattern: /internvl/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
579
+ // ============== ByteDance UI-TARS Family ==============
580
+ { pattern: /ui-tars/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 2048, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
581
+ // ============== Aion Labs Family ==============
582
+ { pattern: /aion/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
583
+ // ============== Essential AI Rnj Family ==============
584
+ { pattern: /rnj/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
585
+ // ============== Switchpoint Router ==============
586
+ { pattern: /switchpoint/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
587
+ ];
588
+ var NON_LLM_PATTERNS = [
589
+ // Embedding models
590
+ { pattern: /embed(ding)?/i, modelType: "embedding" },
591
+ { pattern: /text-embed/i, modelType: "embedding" },
592
+ { pattern: /bge-/i, modelType: "embedding" },
593
+ { pattern: /e5-/i, modelType: "embedding" },
594
+ { pattern: /gte-/i, modelType: "embedding" },
595
+ { pattern: /sentence-/i, modelType: "embedding" },
596
+ { pattern: /all-minilm/i, modelType: "embedding" },
597
+ { pattern: /nomic-embed/i, modelType: "embedding" },
598
+ { pattern: /jina-embed/i, modelType: "embedding" },
599
+ { pattern: /voyage-/i, modelType: "embedding" },
600
+ // Rerank models
601
+ { pattern: /rerank/i, modelType: "rerank" },
602
+ { pattern: /ranker/i, modelType: "rerank" },
603
+ { pattern: /jina-reranker/i, modelType: "rerank" },
604
+ // Image generation models
605
+ { pattern: /dall-e/i, modelType: "image" },
606
+ { pattern: /stable-diffusion/i, modelType: "image" },
607
+ { pattern: /sdxl/i, modelType: "image" },
608
+ { pattern: /midjourney/i, modelType: "image" },
609
+ { pattern: /imagen/i, modelType: "image" },
610
+ { pattern: /flux-\d/i, modelType: "image" },
611
+ { pattern: /playground-v/i, modelType: "image" },
612
+ { pattern: /ideogram/i, modelType: "image" },
613
+ { pattern: /recraft/i, modelType: "image" },
614
+ // Audio models
615
+ { pattern: /whisper/i, modelType: "audio" },
616
+ { pattern: /tts-/i, modelType: "audio" },
617
+ { pattern: /^speech[-_]/i, modelType: "audio" },
618
+ { pattern: /voxtral/i, modelType: "audio" },
619
+ // Moderation models
620
+ { pattern: /moderation/i, modelType: "other" },
621
+ { pattern: /content-filter/i, modelType: "other" },
622
+ { pattern: /guard/i, modelType: "other" },
623
+ { pattern: /safeguard/i, modelType: "other" }
624
+ ];
625
+ function normalizeModelId(modelId) {
626
+ return modelId.toLowerCase().replace(/^(openai\/|anthropic\/|google\/|meta-llama\/|mistralai\/|cohere\/|qwen\/|deepseek\/|deepseek-ai\/|microsoft\/|nvidia\/|x-ai\/|amazon\/|ai21\/|perplexity\/|ibm-granite\/|z-ai\/|thudm\/|baidu\/|tencent\/|moonshotai\/|stepfun-ai\/|nousresearch\/|prime-intellect\/|allenai\/|arcee-ai\/|meituan\/|morph\/|relace\/|inception\/|minimax\/|opengvlab\/|bytedance\/|liquid\/|tngtech\/|xiaomi\/|alibaba\/|kwaipilot\/|deepcogito\/|essentialai\/)/, "").replace(/-instruct$/, "").replace(/-chat$/, "").replace(/-preview$/, "").replace(/-latest$/, "").replace(/:free$/, "").replace(/:extended$/, "").replace(/:exacto$/, "").replace(/:thinking$/, "").replace(/@\d{4}-\d{2}-\d{2}$/, "").replace(/[-_](\d{8})$/, "");
627
+ }
628
+ function matchPattern(modelId, pattern) {
629
+ if (pattern instanceof RegExp) {
630
+ return pattern.test(modelId);
631
+ }
632
+ return modelId.toLowerCase().startsWith(pattern.toLowerCase());
633
+ }
634
+ function matchHierarchicalPattern(modelId, patterns) {
635
+ for (const familyPattern of patterns) {
636
+ if (matchPattern(modelId, familyPattern.pattern)) {
637
+ if (familyPattern.subPatterns) {
638
+ for (const subPattern of familyPattern.subPatterns) {
639
+ if (matchPattern(modelId, subPattern.pattern)) {
640
+ if (subPattern.subPatterns) {
641
+ const deepMatch = matchHierarchicalPattern(modelId, [subPattern]);
642
+ if (deepMatch) {
643
+ return deepMatch;
644
+ }
645
+ }
646
+ return subPattern.metadata;
647
+ }
648
+ }
649
+ }
650
+ return familyPattern.metadata;
651
+ }
652
+ }
653
+ return null;
654
+ }
655
+ function getModelMetadata(modelId) {
656
+ if (typeof modelId !== "string" || !modelId) {
657
+ return DEFAULT_MODEL_METADATA;
658
+ }
659
+ const directMatch = matchHierarchicalPattern(modelId, MODEL_FAMILY_PATTERNS);
660
+ if (directMatch) {
661
+ return directMatch;
662
+ }
663
+ const normalizedId = normalizeModelId(modelId);
664
+ const normalizedMatch = matchHierarchicalPattern(normalizedId, MODEL_FAMILY_PATTERNS);
665
+ if (normalizedMatch) {
666
+ return normalizedMatch;
667
+ }
668
+ for (const { pattern, modelType } of NON_LLM_PATTERNS) {
669
+ if (pattern.test(modelId)) {
670
+ return {
671
+ ...DEFAULT_MODEL_METADATA,
672
+ modelType
673
+ };
674
+ }
675
+ }
676
+ return DEFAULT_MODEL_METADATA;
677
+ }
678
+ function getModelMetadataFromPatterns(modelId) {
679
+ return getModelMetadata(modelId);
680
+ }
681
+
682
+ // src/metadata.ts
683
+ function getModelMetadataFromPatterns2(modelId) {
684
+ return getModelMetadataFromPatterns(modelId);
685
+ }
686
+ function mergeMetadata(apiMetadata, patternMetadata) {
687
+ if (!apiMetadata) {
688
+ return patternMetadata;
689
+ }
690
+ return {
691
+ maxInputTokens: apiMetadata.maxInputTokens ?? patternMetadata.maxInputTokens,
692
+ maxOutputTokens: apiMetadata.maxOutputTokens ?? patternMetadata.maxOutputTokens,
693
+ supportsToolCalling: apiMetadata.supportsToolCalling ?? patternMetadata.supportsToolCalling,
694
+ supportsImageInput: apiMetadata.supportsImageInput ?? patternMetadata.supportsImageInput,
695
+ modelType: patternMetadata.modelType
696
+ };
697
+ }
698
+
699
+ // src/plugin.ts
700
+ function generateModelsConfig(models, providerName = "custom-provider") {
701
+ const modelsConfig = {};
702
+ for (const model of models) {
703
+ const metadata = getModelMetadataFromPatterns2(model.id);
704
+ const merged = mergeMetadata(model.metadata, metadata);
705
+ modelsConfig[model.id] = {
706
+ name: model.name || model.id,
707
+ tool_call: merged.supportsToolCalling ?? true,
708
+ attachment: merged.supportsImageInput ?? false,
709
+ limit: {
710
+ context: merged.maxInputTokens || 128e3,
711
+ output: merged.maxOutputTokens || 16384
712
+ }
713
+ };
714
+ }
715
+ const config = {
716
+ provider: {
717
+ [providerName]: {
718
+ name: providerName,
719
+ npm: "@oai2lmapi/opencode-provider",
720
+ options: {
721
+ baseURL: "YOUR_API_BASE_URL",
722
+ apiKey: "{env:YOUR_API_KEY_ENV}"
723
+ },
724
+ models: modelsConfig
725
+ }
726
+ }
727
+ };
728
+ return JSON.stringify(config, null, 2);
729
+ }
730
+
731
+ // src/cli.ts
732
+ function parseArgs(args) {
733
+ const options = {};
734
+ for (let i = 0; i < args.length; i++) {
735
+ const arg = args[i];
736
+ const next = args[i + 1];
737
+ switch (arg) {
738
+ case "--baseURL":
739
+ case "-b":
740
+ options.baseURL = next;
741
+ i++;
742
+ break;
743
+ case "--apiKey":
744
+ case "-k":
745
+ options.apiKey = next;
746
+ i++;
747
+ break;
748
+ case "--provider":
749
+ case "-p":
750
+ options.providerName = next;
751
+ i++;
752
+ break;
753
+ case "--filter":
754
+ case "-f":
755
+ options.filter = next;
756
+ i++;
757
+ break;
758
+ case "--output":
759
+ case "-o":
760
+ options.output = next;
761
+ i++;
762
+ break;
763
+ case "--config":
764
+ case "-c":
765
+ options.useConfig = true;
766
+ break;
767
+ case "--help":
768
+ case "-h":
769
+ printHelp();
770
+ process.exit(0);
771
+ }
772
+ }
773
+ return options;
774
+ }
775
+ function printHelp() {
776
+ console.log(`
777
+ oai2lm-discover - Discover models from OpenAI-compatible APIs
778
+
779
+ USAGE:
780
+ oai2lm-discover [options]
781
+
782
+ OPTIONS:
783
+ -b, --baseURL <url> Base URL of the API (e.g., https://api.example.com/v1)
784
+ -k, --apiKey <key> API key for authentication
785
+ -p, --provider <name> Provider name for config (default: custom-provider)
786
+ -f, --filter <regex> Filter models by regex pattern
787
+ -o, --output <format> Output format: json, table, or config (default: config)
788
+ -c, --config Load settings from oai2lm.json
789
+ -h, --help Show this help message
790
+
791
+ EXAMPLES:
792
+ # Discover models and generate opencode.json config
793
+ oai2lm-discover -b https://api.example.com/v1 -k sk-xxx -p my-api
794
+
795
+ # Use settings from oai2lm.json
796
+ oai2lm-discover --config
797
+
798
+ # Filter to specific models
799
+ oai2lm-discover -b https://api.openai.com/v1 -k sk-xxx -f "gpt-4"
800
+
801
+ # Output as JSON
802
+ oai2lm-discover --config -o json
803
+ `);
804
+ }
805
+ function printTable(models) {
806
+ const headers = ["ID", "Name", "Context", "Output", "Tools", "Vision"];
807
+ const rows = models.map((m) => [
808
+ m.id,
809
+ m.name || "-",
810
+ m.context.toString(),
811
+ m.output.toString(),
812
+ m.tools ? "\u2713" : "-",
813
+ m.vision ? "\u2713" : "-"
814
+ ]);
815
+ const widths = headers.map(
816
+ (h, i) => Math.max(h.length, ...rows.map((r) => r[i].length))
817
+ );
818
+ const headerLine = headers.map((h, i) => h.padEnd(widths[i])).join(" | ");
819
+ const separator = widths.map((w) => "-".repeat(w)).join("-+-");
820
+ console.log(headerLine);
821
+ console.log(separator);
822
+ for (const row of rows) {
823
+ console.log(row.map((c, i) => c.padEnd(widths[i])).join(" | "));
824
+ }
825
+ }
826
+ async function main() {
827
+ const args = process.argv.slice(2);
828
+ const options = parseArgs(args);
829
+ let config = options.useConfig ? loadConfig() : void 0;
830
+ if (!options.baseURL && !config) {
831
+ config = loadConfig();
832
+ }
833
+ const baseURL = options.baseURL || config?.baseURL;
834
+ const apiKey = options.apiKey || (config ? resolveApiKey(config) : void 0);
835
+ const providerName = options.providerName || config?.name || "custom-provider";
836
+ const filter = options.filter || config?.modelFilter;
837
+ const output = options.output || "config";
838
+ if (!baseURL) {
839
+ console.error("Error: No baseURL provided.");
840
+ console.error("Use --baseURL or create an oai2lm.json config file.");
841
+ console.error("Run 'oai2lm-discover --help' for more information.");
842
+ process.exit(1);
843
+ }
844
+ try {
845
+ console.error(`Discovering models from ${baseURL}...`);
846
+ let models = await discoverModels(baseURL, apiKey || "", config?.headers);
847
+ if (filter) {
848
+ const filterRegex = new RegExp(filter, "i");
849
+ models = models.filter((m) => filterRegex.test(m.id));
850
+ }
851
+ if (models.length === 0) {
852
+ console.error("No models found.");
853
+ process.exit(1);
854
+ }
855
+ console.error(`Found ${models.length} models.
856
+ `);
857
+ const enrichedModels = models.map((model) => {
858
+ const metadata = getModelMetadataFromPatterns2(model.id);
859
+ const merged = mergeMetadata(model.metadata, metadata);
860
+ return {
861
+ id: model.id,
862
+ name: model.name,
863
+ context: merged.maxInputTokens || 128e3,
864
+ output: merged.maxOutputTokens || 16384,
865
+ tools: merged.supportsToolCalling ?? true,
866
+ vision: merged.supportsImageInput ?? false
867
+ };
868
+ });
869
+ switch (output) {
870
+ case "json":
871
+ console.log(JSON.stringify(enrichedModels, null, 2));
872
+ break;
873
+ case "table":
874
+ printTable(enrichedModels);
875
+ break;
876
+ case "config":
877
+ default:
878
+ console.log(generateModelsConfig(models, providerName));
879
+ break;
880
+ }
881
+ } catch (error) {
882
+ const message = error instanceof Error ? error.message : String(error);
883
+ console.error(`Error: ${message}`);
884
+ process.exit(1);
885
+ }
886
+ }
887
+ main();
888
+ //# sourceMappingURL=cli.js.map