@oai2lmapi/opencode-provider 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,22 +1,858 @@
1
- /**
2
- * @oai2lmapi/opencode-provider
3
- *
4
- * OpenAI-compatible provider for OpenCode with auto-discovery and advanced features
5
- *
6
- * Main exports:
7
- * - createOAI2LMProvider: Factory function to create a callable provider
8
- * - createOAI2LMProviderFromConfig: Factory function to create provider from config file
9
- *
10
- * For configuration utilities, import from './config.js'
11
- */
12
- // Main provider exports - these are the primary API
13
- export { createOAI2LMProvider, createOAI2LMProviderFromConfig, } from './provider.js';
14
- // Model metadata utilities (functions only, no constants that could be mistaken for plugins)
15
- export { getModelMetadataFromPatterns } from './modelMetadata.js';
16
- // Configuration utilities - export functions only from main entry
17
- // Constants are available via direct import from './config.js'
18
- export { loadConfig, createSettingsFromConfig, getConfigFilePath, getDataDir, getConfigDir, resolveApiKey, resolveBaseURL, } from './config.js';
19
- // DEFAULT_MODEL_METADATA is intentionally not exported from the main entry point
20
- // to avoid OpenCode's plugin loader attempting to call it as a function.
21
- // Users who need DEFAULT_MODEL_METADATA should import directly from './modelMetadata.js'
22
- //# sourceMappingURL=index.js.map
1
+ // src/provider.ts
2
+ import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
3
+
4
+ // ../model-metadata/dist/esm/index.mjs
5
+ var DEFAULT_MODEL_METADATA = {
6
+ maxInputTokens: 8192,
7
+ maxOutputTokens: 4096,
8
+ supportsToolCalling: false,
9
+ supportsImageInput: false,
10
+ modelType: "llm"
11
+ };
12
+ var md = (maxInputTokens, maxOutputTokens, supportsToolCalling2, supportsImageInput, modelType = "llm") => ({
13
+ maxInputTokens,
14
+ maxOutputTokens,
15
+ supportsToolCalling: supportsToolCalling2,
16
+ supportsImageInput,
17
+ modelType
18
+ });
19
+ var MODEL_FAMILY_PATTERNS = [
20
+ // ============== ByteDance Seed Family ==============
21
+ {
22
+ pattern: /(doubao-)?seed/i,
23
+ metadata: md(262144, 32768, true, true),
24
+ subPatterns: [
25
+ { pattern: /seed-1\.6-flash/i, metadata: md(262144, 16384, true, true) },
26
+ { pattern: /seed-1\.6/i, metadata: md(262144, 32768, true, true) }
27
+ ]
28
+ },
29
+ // ============== OpenAI GPT-5 Family ==============
30
+ {
31
+ pattern: /gpt-5/i,
32
+ metadata: md(4e5, 128e3, true, true),
33
+ subPatterns: [
34
+ { pattern: /gpt-5\.2-pro/i, metadata: md(4e5, 128e3, true, true) },
35
+ { pattern: /gpt-5\.2-chat/i, metadata: md(128e3, 16384, true, true) },
36
+ { pattern: /gpt-5\.2/i, metadata: md(4e5, 128e3, true, true) },
37
+ { pattern: /gpt-5\.1-codex-max/i, metadata: md(4e5, 128e3, true, true) },
38
+ { pattern: /gpt-5\.1-codex-mini/i, metadata: md(4e5, 1e5, true, true) },
39
+ { pattern: /gpt-5\.1-codex/i, metadata: md(4e5, 128e3, true, true) },
40
+ { pattern: /gpt-5\.1-chat/i, metadata: md(128e3, 16384, true, true) },
41
+ { pattern: /gpt-5\.1/i, metadata: md(4e5, 128e3, true, true) },
42
+ { pattern: /gpt-5-image-mini/i, metadata: md(4e5, 128e3, true, true) },
43
+ { pattern: /gpt-5-image/i, metadata: md(4e5, 128e3, true, true) },
44
+ { pattern: /gpt-5-pro/i, metadata: md(4e5, 128e3, true, true) },
45
+ { pattern: /gpt-5-chat/i, metadata: md(128e3, 16384, true, true) },
46
+ { pattern: /gpt-5-nano/i, metadata: md(4e5, 128e3, true, true) },
47
+ { pattern: /gpt-5-mini/i, metadata: md(4e5, 128e3, true, true) },
48
+ { pattern: /gpt-5-codex/i, metadata: md(4e5, 128e3, true, true) }
49
+ ]
50
+ },
51
+ // ============== OpenAI GPT-4.1 Family ==============
52
+ {
53
+ pattern: /gpt-4\.1/i,
54
+ metadata: md(1047576, 32768, true, true),
55
+ subPatterns: [
56
+ { pattern: /gpt-4\.1-nano/i, metadata: md(1047576, 32768, true, true) },
57
+ { pattern: /gpt-4\.1-mini/i, metadata: md(1047576, 32768, true, true) }
58
+ ]
59
+ },
60
+ // ============== OpenAI o3/o4 Reasoning Models ==============
61
+ {
62
+ pattern: /o[34]/i,
63
+ metadata: md(2e5, 1e5, true, true),
64
+ subPatterns: [
65
+ { pattern: /o4-mini-high/i, metadata: md(2e5, 1e5, true, true) },
66
+ { pattern: /o4-mini-deep-research/i, metadata: md(2e5, 1e5, true, true) },
67
+ { pattern: /o4-mini/i, metadata: md(2e5, 1e5, true, true) },
68
+ { pattern: /o3-pro/i, metadata: md(2e5, 1e5, true, true) },
69
+ { pattern: /o3-mini-high/i, metadata: md(2e5, 1e5, true, false) },
70
+ { pattern: /o3-mini/i, metadata: md(2e5, 1e5, true, false) },
71
+ { pattern: /o3-deep-research/i, metadata: md(2e5, 1e5, true, true) }
72
+ ]
73
+ },
74
+ // ============== OpenAI GPT-4o Family ==============
75
+ {
76
+ pattern: /gpt-4o/i,
77
+ metadata: md(128e3, 16384, true, true),
78
+ subPatterns: [
79
+ { pattern: /gpt-4o-mini/i, metadata: md(128e3, 16384, true, true) },
80
+ { pattern: /chatgpt-4o/i, metadata: md(128e3, 16384, true, true) }
81
+ ]
82
+ },
83
+ // ============== OpenAI GPT-4 Turbo Family ==============
84
+ {
85
+ pattern: /gpt-4-turbo/i,
86
+ metadata: md(128e3, 4096, true, true),
87
+ subPatterns: [
88
+ { pattern: /gpt-4-turbo-preview/i, metadata: md(128e3, 4096, true, false) }
89
+ ]
90
+ },
91
+ // ============== OpenAI GPT-4 Family ==============
92
+ {
93
+ pattern: /gpt-4/i,
94
+ metadata: md(8192, 4096, true, false),
95
+ subPatterns: [
96
+ { pattern: /gpt-4-1106-preview/i, metadata: md(128e3, 4096, true, false) },
97
+ { pattern: /gpt-4-0314/i, metadata: md(8192, 4096, true, false) }
98
+ ]
99
+ },
100
+ // ============== OpenAI Codex Family ==============
101
+ { pattern: /codex-mini/i, metadata: md(2e5, 1e5, true, true) },
102
+ // ============== OpenAI GPT-OSS Models ==============
103
+ {
104
+ pattern: /gpt-oss/i,
105
+ metadata: md(131072, 65536, true, false),
106
+ subPatterns: [
107
+ { pattern: /gpt-oss-safeguard-20b/i, metadata: md(131072, 65536, true, false) },
108
+ { pattern: /gpt-oss-120b/i, metadata: md(131072, 65536, true, false) },
109
+ { pattern: /gpt-oss-20b/i, metadata: md(131072, 65536, true, false) }
110
+ ]
111
+ },
112
+ // ============== Anthropic Claude 4 Family ==============
113
+ {
114
+ pattern: /claude-(opus|sonnet|haiku)-4/i,
115
+ metadata: { maxInputTokens: 2e5, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
116
+ subPatterns: [
117
+ { pattern: /claude-opus-4\.5/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
118
+ { pattern: /claude-sonnet-4\.5/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
119
+ { pattern: /claude-haiku-4\.5/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
120
+ { pattern: /claude-opus-4\.1/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
121
+ { pattern: /claude-opus-4(?![.\d])/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
122
+ { pattern: /claude-sonnet-4(?![.\d])/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
123
+ ]
124
+ },
125
+ // ============== Anthropic Claude 3.x Family ==============
126
+ {
127
+ pattern: /claude-3/i,
128
+ metadata: md(2e5, 4096, true, true),
129
+ subPatterns: [
130
+ { pattern: /claude-3\.7-sonnet/i, metadata: md(2e5, 128e3, true, true) },
131
+ { pattern: /claude-3\.5-sonnet/i, metadata: md(2e5, 8192, true, true) },
132
+ { pattern: /claude-3\.5-haiku/i, metadata: md(2e5, 8192, true, true) },
133
+ { pattern: /claude-3-opus/i, metadata: md(2e5, 4096, true, true) },
134
+ { pattern: /claude-3-haiku/i, metadata: md(2e5, 4096, true, true) }
135
+ ]
136
+ },
137
+ // ============== Google Gemini 3 Family ==============
138
+ {
139
+ pattern: /gemini-3/i,
140
+ metadata: { maxInputTokens: 1048576, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
141
+ subPatterns: [
142
+ { pattern: /gemini-3-pro-image/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 32768, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
143
+ { pattern: /gemini-3-pro/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
144
+ { pattern: /gemini-3-flash/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
145
+ ]
146
+ },
147
+ // ============== Google Gemini 2.5 Family ==============
148
+ {
149
+ pattern: /gemini-2\.5/i,
150
+ metadata: { maxInputTokens: 1048576, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
151
+ subPatterns: [
152
+ { pattern: /gemini-2\.5-pro/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
153
+ { pattern: /gemini-2\.5-flash-image/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
154
+ { pattern: /gemini-2\.5-flash-lite/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
155
+ { pattern: /gemini-2\.5-flash/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
156
+ ]
157
+ },
158
+ // ============== Google Gemini 2.0 Family ==============
159
+ { pattern: /gemini-2\.0/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
160
+ // ============== Google Gemma Family ==============
161
+ {
162
+ pattern: /gemma/i,
163
+ metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" },
164
+ subPatterns: [
165
+ { pattern: /gemma-2-27b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
166
+ { pattern: /gemma-2-9b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
167
+ { pattern: /gemma-3n/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } }
168
+ ]
169
+ },
170
+ // ============== Qwen3 Family ==============
171
+ {
172
+ pattern: /qwen3/i,
173
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
174
+ subPatterns: [
175
+ { pattern: /qwen3-coder-(480b|plus)/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
176
+ { pattern: /qwen3-coder-(30b|flash)/i, metadata: { maxInputTokens: 16e4, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
177
+ { pattern: /qwen3-coder/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
178
+ { pattern: /qwen3-vl-235b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
179
+ { pattern: /qwen3-vl-32b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
180
+ { pattern: /qwen3-vl-30b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
181
+ { pattern: /qwen3-vl-8b/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
182
+ { pattern: /qwen3-vl/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
183
+ { pattern: /qwen3-max/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
184
+ { pattern: /qwen3-next/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
185
+ { pattern: /qwen3-235b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
186
+ { pattern: /qwen3-32b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
187
+ { pattern: /qwen3-30b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
188
+ { pattern: /qwen3-14b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
189
+ { pattern: /qwen3-8b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 2e4, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
190
+ { pattern: /qwen3-4b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
191
+ ]
192
+ },
193
+ // ============== Qwen2.5 Family ==============
194
+ {
195
+ pattern: /qwen2\.5/i,
196
+ metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
197
+ subPatterns: [
198
+ { pattern: /qwen2\.5-72b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
199
+ { pattern: /qwen2\.5-vl/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
200
+ { pattern: /qwen2\.5-coder/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
201
+ ]
202
+ },
203
+ // ============== Qwen Family ==============
204
+ {
205
+ pattern: /qwen/i,
206
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
207
+ subPatterns: [
208
+ { pattern: /qwen-coder-(480b|plus)/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
209
+ { pattern: /qwen-coder-(30b|flash)/i, metadata: { maxInputTokens: 16e4, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
210
+ { pattern: /qwen-coder/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
211
+ { pattern: /qwen-vl-235b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
212
+ { pattern: /qwen-vl-32b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
213
+ { pattern: /qwen-vl-30b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
214
+ { pattern: /qwen-vl-8b/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
215
+ { pattern: /qwen-vl/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
216
+ { pattern: /qwen-max/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
217
+ { pattern: /qwen-next/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
218
+ { pattern: /qwen-235b/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 262144, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
219
+ { pattern: /qwen-32b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
220
+ { pattern: /qwen-30b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
221
+ { pattern: /qwen-14b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 40960, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
222
+ { pattern: /qwen-8b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 2e4, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
223
+ { pattern: /qwen-4b/i, metadata: { maxInputTokens: 40960, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
224
+ ]
225
+ },
226
+ // ============== QwQ/QvQ Reasoning Models ==============
227
+ { pattern: /qwq-32b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
228
+ { pattern: /qwq/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
229
+ { pattern: /qvq/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 16384, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
230
+ // ============== DeepSeek Family ==============
231
+ {
232
+ pattern: /deepseek/i,
233
+ metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
234
+ subPatterns: [
235
+ { pattern: /deepseek-v3\.2-exp/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
236
+ { pattern: /deepseek-v3\.2-speciale/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
237
+ { pattern: /deepseek-v3\.2/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
238
+ { pattern: /deepseek-v3\.1-terminus/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
239
+ { pattern: /deepseek-v3\.1-nex-n1/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
240
+ { pattern: /deepseek-(v3\.1|chat-v3\.1)/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
241
+ { pattern: /deepseek-r1-distill-llama-70b/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
242
+ { pattern: /deepseek-r1-distill-qwen-32b/i, metadata: { maxInputTokens: 64e3, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
243
+ { pattern: /deepseek-r1-distill-qwen-14b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
244
+ { pattern: /deepseek-r1-0528-qwen3-8b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
245
+ { pattern: /deepseek-r1-0528/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
246
+ { pattern: /deepseek-r1/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
247
+ { pattern: /deepseek-prover-v2/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
248
+ { pattern: /deepseek-prover/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
249
+ { pattern: /deepseek-chat-v3-0324/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
250
+ { pattern: /deepseek-chat/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
251
+ ]
252
+ },
253
+ // ============== Meta Llama 4 Family ==============
254
+ {
255
+ pattern: /llama-4/i,
256
+ metadata: { maxInputTokens: 1048576, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
257
+ subPatterns: [
258
+ { pattern: /llama-4-maverick/i, metadata: { maxInputTokens: 1048576, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
259
+ { pattern: /llama-4-scout/i, metadata: { maxInputTokens: 1e7, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
260
+ ]
261
+ },
262
+ // ============== Meta Llama 3.x Family ==============
263
+ {
264
+ pattern: /llama-3(\.[123])?/i,
265
+ metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
266
+ subPatterns: [
267
+ { pattern: /llama-3\.3-70b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
268
+ { pattern: /llama-3\.2-90b-vision/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
269
+ { pattern: /llama-3\.2-11b-vision/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
270
+ { pattern: /llama-3\.2-1b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
271
+ { pattern: /llama-3\.1-405b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
272
+ { pattern: /llama-3\.1-70b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
273
+ { pattern: /llama-3\.1-8b/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
274
+ { pattern: /llama-3-70b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
275
+ { pattern: /llama-3-8b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
276
+ { pattern: /llama-3\.3-nemotron/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
277
+ ]
278
+ },
279
+ // ============== Meta Llama Guard Family ==============
280
+ {
281
+ pattern: /llama-guard/i,
282
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "other" },
283
+ subPatterns: [
284
+ { pattern: /llama-guard-4-12b/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "other" } },
285
+ { pattern: /llama-guard-3-8b/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "other" } },
286
+ { pattern: /llama-guard-2-8b/i, metadata: { maxInputTokens: 8192, maxOutputTokens: 8192, supportsToolCalling: true, supportsImageInput: false, modelType: "other" } }
287
+ ]
288
+ },
289
+ // ============== Mistral Family ==============
290
+ {
291
+ pattern: /(mistral|mixtral|pixtral|codestral|devstral)/i,
292
+ metadata: md(32768, 8192, true, false),
293
+ subPatterns: [
294
+ { pattern: /mistral-large/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
295
+ { pattern: /mistral-nemo/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
296
+ { pattern: /devstral/i, metadata: md(262144, 32768, true, false) },
297
+ { pattern: /pixtral/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
298
+ { pattern: /mistral-small/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
299
+ { pattern: /mixtral-8x22b/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
300
+ { pattern: /mixtral-8x7b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
301
+ { pattern: /mistral-7b/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
302
+ ]
303
+ },
304
+ // ============== xAI Grok Family ==============
305
+ {
306
+ pattern: /grok/i,
307
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
308
+ subPatterns: [
309
+ { pattern: /grok-4\.1-fast/i, metadata: { maxInputTokens: 2e6, maxOutputTokens: 3e4, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
310
+ { pattern: /grok-4-fast/i, metadata: { maxInputTokens: 2e6, maxOutputTokens: 3e4, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
311
+ { pattern: /grok-4/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
312
+ { pattern: /grok-3-mini/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
313
+ { pattern: /grok-3/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
314
+ { pattern: /grok-code-fast/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 1e4, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
315
+ ]
316
+ },
317
+ // ============== Moonshot/Kimi Family ==============
318
+ {
319
+ pattern: /(kimi|moonshot)/i,
320
+ metadata: md(262144, 262144, true, false),
321
+ subPatterns: [
322
+ { pattern: /kimi-k2/i, metadata: md(262144, 262144, true, false) }
323
+ ]
324
+ },
325
+ // ============== Amazon Nova Family ==============
326
+ {
327
+ pattern: /nova/i,
328
+ metadata: { maxInputTokens: 1e6, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" },
329
+ subPatterns: [
330
+ { pattern: /nova-premier/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
331
+ { pattern: /nova-2-lite/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 65535, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
332
+ { pattern: /nova-lite/i, metadata: { maxInputTokens: 3e5, maxOutputTokens: 5120, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
333
+ { pattern: /nova-micro/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 5120, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
334
+ { pattern: /nova-pro/i, metadata: { maxInputTokens: 3e5, maxOutputTokens: 5120, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } }
335
+ ]
336
+ },
337
+ // ============== Cohere Command Family ==============
338
+ {
339
+ pattern: /command/i,
340
+ metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
341
+ subPatterns: [
342
+ { pattern: /command-r-plus/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
343
+ { pattern: /command-r/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
344
+ ]
345
+ },
346
+ // ============== NVIDIA Nemotron Family ==============
347
+ {
348
+ pattern: /nemotron/i,
349
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
350
+ subPatterns: [
351
+ { pattern: /nemotron-3-nano/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
352
+ { pattern: /nemotron-nano.*vl/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
353
+ { pattern: /nemotron-nano/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
354
+ { pattern: /llama.*nemotron/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
355
+ ]
356
+ },
357
+ // ============== MiniMax Family ==============
358
+ { pattern: /minimax/i, metadata: { maxInputTokens: 1e6, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
359
+ // ============== Z.AI GLM Family ==============
360
+ {
361
+ pattern: /glm/i,
362
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
363
+ subPatterns: [
364
+ { pattern: /glm-4\.6v/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 24e3, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
365
+ { pattern: /glm-4\.6/i, metadata: { maxInputTokens: 204800, maxOutputTokens: 204800, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
366
+ { pattern: /glm-4\.5v/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
367
+ { pattern: /glm-4\.5/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
368
+ { pattern: /glm-4/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
369
+ ]
370
+ },
371
+ // ============== THUDM GLM Family ==============
372
+ // Note: This pattern is now subsumed by the Z.AI GLM pattern above
373
+ // ============== Baidu ERNIE Family ==============
374
+ { pattern: /ernie/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
375
+ // ============== Tencent Hunyuan Family ==============
376
+ { pattern: /hunyuan/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
377
+ // ============== AI21 Jamba Family ==============
378
+ { pattern: /jamba/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
379
+ // ============== Perplexity Sonar Family ==============
380
+ { pattern: /sonar/i, metadata: { maxInputTokens: 2e5, maxOutputTokens: 8e3, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
381
+ // ============== Microsoft Phi Family ==============
382
+ {
383
+ pattern: /phi-3/i,
384
+ metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
385
+ subPatterns: [
386
+ { pattern: /phi-3\.5-mini/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
387
+ { pattern: /phi-3-mini/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
388
+ { pattern: /phi-3-medium/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 4096, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
389
+ ]
390
+ },
391
+ // ============== IBM Granite Family ==============
392
+ { pattern: /granite/i, metadata: { maxInputTokens: 131e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
393
+ // ============== Nous Research Hermes Family ==============
394
+ { pattern: /hermes/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
395
+ { pattern: /deephermes/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
396
+ // ============== Inception Mercury Family ==============
397
+ { pattern: /mercury/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
398
+ // ============== StepFun Step Family ==============
399
+ { pattern: /step/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: true, modelType: "llm" } },
400
+ // ============== Deep Cogito Family ==============
401
+ { pattern: /cogito/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
402
+ // ============== Prime Intellect Family ==============
403
+ { pattern: /intellect/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
404
+ // ============== AllenAI Olmo Family ==============
405
+ { pattern: /olmo/i, metadata: { maxInputTokens: 65536, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
406
+ // ============== Arcee AI Family ==============
407
+ // Match model series names: trinity, virtuoso, coder, maestro, spotlight
408
+ {
409
+ pattern: /(trinity|virtuoso|maestro|spotlight)/i,
410
+ metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" },
411
+ subPatterns: [
412
+ { pattern: /virtuoso-large/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 64e3, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
413
+ { pattern: /maestro-reasoning/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 32e3, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
414
+ { pattern: /coder-large/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
415
+ { pattern: /spotlight/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65537, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
416
+ { pattern: /trinity-mini/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
417
+ ]
418
+ },
419
+ // ============== Meituan LongCat Family ==============
420
+ { pattern: /longcat/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
421
+ // ============== Morph Family ==============
422
+ { pattern: /morph/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 131072, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
423
+ // ============== Relace Family ==============
424
+ { pattern: /relace/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 128e3, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
425
+ // ============== TNG Chimera Family ==============
426
+ { pattern: /tng.*chimera/i, metadata: { maxInputTokens: 163840, maxOutputTokens: 163840, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
427
+ // ============== Xiaomi MiMo Family ==============
428
+ { pattern: /mimo/i, metadata: { maxInputTokens: 262144, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
429
+ // ============== Alibaba Tongyi Family ==============
430
+ { pattern: /tongyi/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 131072, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
431
+ // ============== KwaiPilot KAT Family ==============
432
+ { pattern: /(kwaipilot.*kat|kat-coder)/i, metadata: { maxInputTokens: 256e3, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
433
+ // ============== Liquid AI LFM Family ==============
434
+ { pattern: /lfm/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: false, supportsImageInput: false, modelType: "llm" } },
435
+ // ============== OpenGVLab InternVL Family ==============
436
+ { pattern: /internvl/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 32768, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
437
+ // ============== ByteDance UI-TARS Family ==============
438
+ { pattern: /ui-tars/i, metadata: { maxInputTokens: 128e3, maxOutputTokens: 2048, supportsToolCalling: false, supportsImageInput: true, modelType: "llm" } },
439
+ // ============== Aion Labs Family ==============
440
+ { pattern: /aion/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 32768, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
441
+ // ============== Essential AI Rnj Family ==============
442
+ { pattern: /rnj/i, metadata: { maxInputTokens: 32768, maxOutputTokens: 16384, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } },
443
+ // ============== Switchpoint Router ==============
444
+ { pattern: /switchpoint/i, metadata: { maxInputTokens: 131072, maxOutputTokens: 65536, supportsToolCalling: true, supportsImageInput: false, modelType: "llm" } }
445
+ ];
446
+ var NON_LLM_PATTERNS = [
447
+ // Embedding models
448
+ { pattern: /embed(ding)?/i, modelType: "embedding" },
449
+ { pattern: /text-embed/i, modelType: "embedding" },
450
+ { pattern: /bge-/i, modelType: "embedding" },
451
+ { pattern: /e5-/i, modelType: "embedding" },
452
+ { pattern: /gte-/i, modelType: "embedding" },
453
+ { pattern: /sentence-/i, modelType: "embedding" },
454
+ { pattern: /all-minilm/i, modelType: "embedding" },
455
+ { pattern: /nomic-embed/i, modelType: "embedding" },
456
+ { pattern: /jina-embed/i, modelType: "embedding" },
457
+ { pattern: /voyage-/i, modelType: "embedding" },
458
+ // Rerank models
459
+ { pattern: /rerank/i, modelType: "rerank" },
460
+ { pattern: /ranker/i, modelType: "rerank" },
461
+ { pattern: /jina-reranker/i, modelType: "rerank" },
462
+ // Image generation models
463
+ { pattern: /dall-e/i, modelType: "image" },
464
+ { pattern: /stable-diffusion/i, modelType: "image" },
465
+ { pattern: /sdxl/i, modelType: "image" },
466
+ { pattern: /midjourney/i, modelType: "image" },
467
+ { pattern: /imagen/i, modelType: "image" },
468
+ { pattern: /flux-\d/i, modelType: "image" },
469
+ { pattern: /playground-v/i, modelType: "image" },
470
+ { pattern: /ideogram/i, modelType: "image" },
471
+ { pattern: /recraft/i, modelType: "image" },
472
+ // Audio models
473
+ { pattern: /whisper/i, modelType: "audio" },
474
+ { pattern: /tts-/i, modelType: "audio" },
475
+ { pattern: /^speech[-_]/i, modelType: "audio" },
476
+ { pattern: /voxtral/i, modelType: "audio" },
477
+ // Moderation models
478
+ { pattern: /moderation/i, modelType: "other" },
479
+ { pattern: /content-filter/i, modelType: "other" },
480
+ { pattern: /guard/i, modelType: "other" },
481
+ { pattern: /safeguard/i, modelType: "other" }
482
+ ];
483
+ function normalizeModelId(modelId) {
484
+ return modelId.toLowerCase().replace(/^(openai\/|anthropic\/|google\/|meta-llama\/|mistralai\/|cohere\/|qwen\/|deepseek\/|deepseek-ai\/|microsoft\/|nvidia\/|x-ai\/|amazon\/|ai21\/|perplexity\/|ibm-granite\/|z-ai\/|thudm\/|baidu\/|tencent\/|moonshotai\/|stepfun-ai\/|nousresearch\/|prime-intellect\/|allenai\/|arcee-ai\/|meituan\/|morph\/|relace\/|inception\/|minimax\/|opengvlab\/|bytedance\/|liquid\/|tngtech\/|xiaomi\/|alibaba\/|kwaipilot\/|deepcogito\/|essentialai\/)/, "").replace(/-instruct$/, "").replace(/-chat$/, "").replace(/-preview$/, "").replace(/-latest$/, "").replace(/:free$/, "").replace(/:extended$/, "").replace(/:exacto$/, "").replace(/:thinking$/, "").replace(/@\d{4}-\d{2}-\d{2}$/, "").replace(/[-_](\d{8})$/, "");
485
+ }
486
+ function matchPattern(modelId, pattern) {
487
+ if (pattern instanceof RegExp) {
488
+ return pattern.test(modelId);
489
+ }
490
+ return modelId.toLowerCase().startsWith(pattern.toLowerCase());
491
+ }
492
+ function matchHierarchicalPattern(modelId, patterns) {
493
+ for (const familyPattern of patterns) {
494
+ if (matchPattern(modelId, familyPattern.pattern)) {
495
+ if (familyPattern.subPatterns) {
496
+ for (const subPattern of familyPattern.subPatterns) {
497
+ if (matchPattern(modelId, subPattern.pattern)) {
498
+ if (subPattern.subPatterns) {
499
+ const deepMatch = matchHierarchicalPattern(modelId, [subPattern]);
500
+ if (deepMatch) {
501
+ return deepMatch;
502
+ }
503
+ }
504
+ return subPattern.metadata;
505
+ }
506
+ }
507
+ }
508
+ return familyPattern.metadata;
509
+ }
510
+ }
511
+ return null;
512
+ }
513
+ function getModelMetadata(modelId) {
514
+ const directMatch = matchHierarchicalPattern(modelId, MODEL_FAMILY_PATTERNS);
515
+ if (directMatch) {
516
+ return directMatch;
517
+ }
518
+ const normalizedId = normalizeModelId(modelId);
519
+ const normalizedMatch = matchHierarchicalPattern(normalizedId, MODEL_FAMILY_PATTERNS);
520
+ if (normalizedMatch) {
521
+ return normalizedMatch;
522
+ }
523
+ for (const { pattern, modelType } of NON_LLM_PATTERNS) {
524
+ if (pattern.test(modelId)) {
525
+ return {
526
+ ...DEFAULT_MODEL_METADATA,
527
+ modelType
528
+ };
529
+ }
530
+ }
531
+ return DEFAULT_MODEL_METADATA;
532
+ }
533
+ function getModelMetadataFromPatterns(modelId) {
534
+ return getModelMetadata(modelId);
535
+ }
536
+ function mergeMetadata(apiMetadata, patternMetadata) {
537
+ return {
538
+ maxInputTokens: apiMetadata?.maxInputTokens ?? patternMetadata.maxInputTokens,
539
+ maxOutputTokens: apiMetadata?.maxOutputTokens ?? patternMetadata.maxOutputTokens,
540
+ supportsToolCalling: apiMetadata?.supportsToolCalling ?? patternMetadata.supportsToolCalling,
541
+ supportsImageInput: apiMetadata?.supportsImageInput ?? patternMetadata.supportsImageInput,
542
+ modelType: apiMetadata?.modelType ?? patternMetadata.modelType
543
+ };
544
+ }
545
+
546
+ // src/modelMetadata.ts
547
+ var getModelMetadataFromPatterns2 = getModelMetadataFromPatterns;
548
+ var mergeMetadata2 = mergeMetadata;
549
+
550
+ // src/modelDiscovery.ts
551
+ var ModelDiscovery = class {
552
+ // 5 minutes
553
+ constructor(baseURL, apiKey, headers, fetchFn = fetch) {
554
+ this.baseURL = baseURL;
555
+ this.apiKey = apiKey;
556
+ this.headers = headers;
557
+ this.fetchFn = fetchFn;
558
+ }
559
+ modelsCache = /* @__PURE__ */ new Map();
560
+ lastFetchTime = 0;
561
+ cacheDuration = 5 * 60 * 1e3;
562
+ /**
563
+ * Fetch models from API /models endpoint
564
+ */
565
+ async fetchModels() {
566
+ const now = Date.now();
567
+ if (this.modelsCache.size > 0 && now - this.lastFetchTime < this.cacheDuration) {
568
+ return Array.from(this.modelsCache.values());
569
+ }
570
+ try {
571
+ const url = `${this.baseURL}/models`;
572
+ const response = await this.fetchFn(url, {
573
+ method: "GET",
574
+ headers: {
575
+ Authorization: `Bearer ${this.apiKey}`,
576
+ ...this.headers
577
+ }
578
+ });
579
+ if (!response.ok) {
580
+ console.warn(`Failed to fetch models from ${url}: ${response.status} ${response.statusText}`);
581
+ return [];
582
+ }
583
+ const data = await response.json();
584
+ const models = data.data || [];
585
+ for (const model of models) {
586
+ const patternMetadata = getModelMetadataFromPatterns2(model.id);
587
+ const apiMetadata = this.extractMetadataFromModel(model);
588
+ model.metadata = mergeMetadata2(apiMetadata, patternMetadata);
589
+ this.modelsCache.set(model.id, model);
590
+ }
591
+ this.lastFetchTime = now;
592
+ return models;
593
+ } catch (error) {
594
+ console.error("Error fetching models:", error);
595
+ return [];
596
+ }
597
+ }
598
+ /**
599
+ * Get metadata for a specific model
600
+ */
601
+ async getModelMetadata(modelId) {
602
+ const cached = this.modelsCache.get(modelId);
603
+ if (cached?.metadata) {
604
+ return cached.metadata;
605
+ }
606
+ await this.fetchModels();
607
+ const model = this.modelsCache.get(modelId);
608
+ if (model?.metadata) {
609
+ return model.metadata;
610
+ }
611
+ return getModelMetadataFromPatterns2(modelId);
612
+ }
613
+ /**
614
+ * Extract metadata from model object returned by API
615
+ */
616
+ extractMetadataFromModel(model) {
617
+ const metadata = {};
618
+ if (model.context_length) {
619
+ metadata.maxInputTokens = model.context_length;
620
+ }
621
+ if (model.max_tokens) {
622
+ metadata.maxOutputTokens = model.max_tokens;
623
+ }
624
+ if (model.max_input_tokens) {
625
+ metadata.maxInputTokens = model.max_input_tokens;
626
+ }
627
+ if (model.max_output_tokens) {
628
+ metadata.maxOutputTokens = model.max_output_tokens;
629
+ }
630
+ if (typeof model.function_call === "boolean") {
631
+ metadata.supportsToolCalling = model.function_call;
632
+ } else if (typeof model.supports_function_calling === "boolean") {
633
+ metadata.supportsToolCalling = model.supports_function_calling;
634
+ } else if (typeof model.supports_tools === "boolean") {
635
+ metadata.supportsToolCalling = model.supports_tools;
636
+ }
637
+ if (typeof model.vision === "boolean") {
638
+ metadata.supportsImageInput = model.vision;
639
+ } else if (typeof model.supports_vision === "boolean") {
640
+ metadata.supportsImageInput = model.supports_vision;
641
+ } else if (model.modalities?.includes("vision")) {
642
+ metadata.supportsImageInput = true;
643
+ } else if (model.modalities?.includes("image")) {
644
+ metadata.supportsImageInput = true;
645
+ }
646
+ return metadata;
647
+ }
648
+ /**
649
+ * Clear cache
650
+ */
651
+ clearCache() {
652
+ this.modelsCache.clear();
653
+ this.lastFetchTime = 0;
654
+ }
655
+ };
656
+
657
+ // src/utils.ts
658
+ function wildcardToRegex(pattern) {
659
+ const escaped = pattern.replace(/[.+^${}()|[\]\\]/g, "\\$&");
660
+ const regex = escaped.replace(/\*/g, ".*").replace(/\?/g, ".");
661
+ return new RegExp(`^${regex}$`, "i");
662
+ }
663
+ function matchesWildcard(modelId, pattern) {
664
+ const regex = wildcardToRegex(pattern);
665
+ return regex.test(modelId);
666
+ }
667
+ function findBestMatch(modelId, patterns) {
668
+ const matches = patterns.filter((p) => matchesWildcard(modelId, p));
669
+ if (matches.length === 0) {
670
+ return void 0;
671
+ }
672
+ matches.sort((a, b) => {
673
+ const aHasWildcard = a.includes("*") || a.includes("?");
674
+ const bHasWildcard = b.includes("*") || b.includes("?");
675
+ if (!aHasWildcard && bHasWildcard) return -1;
676
+ if (aHasWildcard && !bHasWildcard) return 1;
677
+ return b.length - a.length;
678
+ });
679
+ return matches[0];
680
+ }
681
+
682
+ // src/config.ts
683
+ import { readFileSync, existsSync } from "node:fs";
684
+ import { homedir } from "node:os";
685
+ import { join, isAbsolute } from "node:path";
686
+ function getDataDir() {
687
+ const xdgDataHome = process.env["XDG_DATA_HOME"];
688
+ if (xdgDataHome && isAbsolute(xdgDataHome)) {
689
+ return join(xdgDataHome, "opencode");
690
+ }
691
+ return join(homedir(), ".local", "share", "opencode");
692
+ }
693
+ function getConfigDir() {
694
+ const xdgConfigHome = process.env["XDG_CONFIG_HOME"];
695
+ if (xdgConfigHome && isAbsolute(xdgConfigHome)) {
696
+ return join(xdgConfigHome, "opencode");
697
+ }
698
+ return join(homedir(), ".config", "opencode");
699
+ }
700
+ var CONFIG_FILENAME = "oai2lm.json";
701
+ function readJsonFile(filepath) {
702
+ try {
703
+ if (!existsSync(filepath)) {
704
+ return void 0;
705
+ }
706
+ const content = readFileSync(filepath, "utf-8");
707
+ return JSON.parse(content);
708
+ } catch (error) {
709
+ if (error instanceof SyntaxError) {
710
+ const message = error.message;
711
+ console.warn(`Failed to parse JSON in config file ${filepath}: ${message}`);
712
+ } else {
713
+ console.warn(`Failed to read config file ${filepath}:`, error);
714
+ }
715
+ return void 0;
716
+ }
717
+ }
718
+ function loadConfig() {
719
+ const paths = [
720
+ join(getDataDir(), CONFIG_FILENAME),
721
+ join(getConfigDir(), CONFIG_FILENAME)
722
+ ];
723
+ for (const configPath of paths) {
724
+ const config = readJsonFile(configPath);
725
+ if (config) {
726
+ return config;
727
+ }
728
+ }
729
+ return void 0;
730
+ }
731
+ function resolveApiKey(explicitKey, config) {
732
+ if (typeof explicitKey === "string" && explicitKey.trim().length > 0) {
733
+ return explicitKey.trim();
734
+ }
735
+ const envKey = process.env["OAI2LM_API_KEY"];
736
+ if (typeof envKey === "string" && envKey.trim() !== "") {
737
+ return envKey.trim();
738
+ }
739
+ return config?.apiKey;
740
+ }
741
+ function resolveBaseURL(explicitURL, config) {
742
+ if (typeof explicitURL === "string" && explicitURL.trim().length > 0) {
743
+ return explicitURL.trim();
744
+ }
745
+ const envURL = process.env["OAI2LM_BASE_URL"];
746
+ if (typeof envURL === "string" && envURL.trim().length > 0) {
747
+ return envURL.trim();
748
+ }
749
+ if (typeof config?.baseURL === "string" && config.baseURL.trim().length > 0) {
750
+ return config.baseURL.trim();
751
+ }
752
+ return "https://api.openai.com/v1";
753
+ }
754
+ function createSettingsFromConfig(overrides) {
755
+ const config = loadConfig();
756
+ const apiKey = resolveApiKey(overrides?.apiKey, config);
757
+ if (!apiKey) {
758
+ const dataPath = join(getDataDir(), CONFIG_FILENAME);
759
+ const configPath = join(getConfigDir(), CONFIG_FILENAME);
760
+ throw new Error(
761
+ "API key not found. Please set OAI2LM_API_KEY environment variable, or add apiKey to " + dataPath + " or " + configPath + ", or pass apiKey in settings."
762
+ );
763
+ }
764
+ const baseURL = resolveBaseURL(overrides?.baseURL, config);
765
+ const modelOverrides = {
766
+ ...config?.modelOverrides ?? {},
767
+ ...overrides?.modelOverrides ?? {}
768
+ };
769
+ return {
770
+ apiKey,
771
+ baseURL,
772
+ name: overrides?.name ?? config?.name ?? "oai2lm",
773
+ headers: {
774
+ ...config?.headers ?? {},
775
+ ...overrides?.headers ?? {}
776
+ },
777
+ autoDiscoverModels: overrides?.autoDiscoverModels ?? config?.autoDiscoverModels ?? true,
778
+ modelOverrides: Object.keys(modelOverrides).length > 0 ? modelOverrides : void 0,
779
+ fetch: overrides?.fetch
780
+ };
781
+ }
782
+ function getConfigFilePath() {
783
+ return join(getDataDir(), CONFIG_FILENAME);
784
+ }
785
+
786
+ // src/provider.ts
787
+ function createOAI2LMProvider(settings) {
788
+ const providerName = settings.name || "oai2lm";
789
+ const modelOverrides = settings.modelOverrides || {};
790
+ const fetchFn = settings.fetch || fetch;
791
+ const baseProvider = createOpenAICompatible({
792
+ baseURL: settings.baseURL,
793
+ name: providerName,
794
+ apiKey: settings.apiKey,
795
+ headers: settings.headers,
796
+ fetch: fetchFn
797
+ });
798
+ const modelDiscovery = new ModelDiscovery(
799
+ settings.baseURL,
800
+ settings.apiKey,
801
+ settings.headers || {},
802
+ fetchFn
803
+ );
804
+ async function discoverModels() {
805
+ return modelDiscovery.fetchModels();
806
+ }
807
+ async function getModelMetadata2(modelId) {
808
+ return modelDiscovery.getModelMetadata(modelId);
809
+ }
810
+ function clearModelCache() {
811
+ modelDiscovery.clearCache();
812
+ }
813
+ function getModelOverride(modelId) {
814
+ const patterns = Object.keys(modelOverrides);
815
+ const bestMatch = findBestMatch(modelId, patterns);
816
+ return bestMatch ? modelOverrides[bestMatch] : void 0;
817
+ }
818
+ if (settings.autoDiscoverModels !== false) {
819
+ discoverModels().catch((err) => {
820
+ console.warn("Failed to auto-discover models:", err);
821
+ });
822
+ }
823
+ const provider = function(modelId) {
824
+ return baseProvider.languageModel(modelId);
825
+ };
826
+ provider.languageModel = baseProvider.languageModel.bind(baseProvider);
827
+ provider.chatModel = baseProvider.chatModel.bind(baseProvider);
828
+ provider.completionModel = baseProvider.completionModel.bind(baseProvider);
829
+ provider.textEmbeddingModel = baseProvider.textEmbeddingModel.bind(baseProvider);
830
+ provider.imageModel = baseProvider.imageModel.bind(baseProvider);
831
+ provider.discoverModels = discoverModels;
832
+ provider.getModelMetadata = getModelMetadata2;
833
+ provider.clearModelCache = clearModelCache;
834
+ provider.getModelOverride = getModelOverride;
835
+ Object.defineProperty(provider, "providerName", {
836
+ value: providerName,
837
+ writable: false,
838
+ enumerable: true
839
+ });
840
+ return provider;
841
+ }
842
+ function createOAI2LMProviderFromConfig(overrides) {
843
+ const settings = createSettingsFromConfig(overrides);
844
+ return createOAI2LMProvider(settings);
845
+ }
846
+ export {
847
+ createOAI2LMProvider,
848
+ createOAI2LMProviderFromConfig,
849
+ createSettingsFromConfig,
850
+ getConfigDir,
851
+ getConfigFilePath,
852
+ getDataDir,
853
+ getModelMetadataFromPatterns2 as getModelMetadataFromPatterns,
854
+ loadConfig,
855
+ resolveApiKey,
856
+ resolveBaseURL
857
+ };
858
+ //# sourceMappingURL=index.js.map