@funkai/models 0.3.0 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/.turbo/turbo-build.log +34 -33
  2. package/CHANGELOG.md +36 -0
  3. package/dist/alibaba-B6q4Ng1R.mjs.map +1 -1
  4. package/dist/amazon-bedrock-Cv9AHQBH.mjs.map +1 -1
  5. package/dist/anthropic-yB7ST97_.mjs.map +1 -1
  6. package/dist/cerebras-COfl7XM-.mjs.map +1 -1
  7. package/dist/cohere-B7TgO0hT.mjs.map +1 -1
  8. package/dist/deepinfra-B0GxUwCG.mjs.map +1 -1
  9. package/dist/deepseek-D64ZEsvS.mjs.map +1 -1
  10. package/dist/fireworks-ai-DJYvdAi_.mjs.map +1 -1
  11. package/dist/google-BypRl349.mjs.map +1 -1
  12. package/dist/google-vertex-DbS-zTGD.mjs.map +1 -1
  13. package/dist/groq-ei_PerYi.mjs.map +1 -1
  14. package/dist/huggingface-DaM1EeLP.mjs.map +1 -1
  15. package/dist/inception-CspEzqNV.mjs.map +1 -1
  16. package/dist/index.d.mts +1 -1
  17. package/dist/index.d.mts.map +1 -1
  18. package/dist/index.mjs +14 -2
  19. package/dist/index.mjs.map +1 -1
  20. package/dist/llama-Cf3-koap.mjs.map +1 -1
  21. package/dist/mistral-BI9MdAO4.mjs.map +1 -1
  22. package/dist/nvidia-COHacuoa.mjs.map +1 -1
  23. package/dist/openai-C0nCfZUq.mjs.map +1 -1
  24. package/dist/openrouter-DSFzxKQb.mjs.map +1 -1
  25. package/dist/perplexity-zeZ2WlBU.mjs.map +1 -1
  26. package/dist/providers/alibaba.d.mts +1 -1
  27. package/dist/providers/amazon-bedrock.d.mts +1 -1
  28. package/dist/providers/anthropic.d.mts +1 -1
  29. package/dist/providers/cerebras.d.mts +1 -1
  30. package/dist/providers/cohere.d.mts +1 -1
  31. package/dist/providers/deepinfra.d.mts +1 -1
  32. package/dist/providers/deepseek.d.mts +1 -1
  33. package/dist/providers/fireworks-ai.d.mts +1 -1
  34. package/dist/providers/google-vertex.d.mts +1 -1
  35. package/dist/providers/google.d.mts +1 -1
  36. package/dist/providers/groq.d.mts +1 -1
  37. package/dist/providers/huggingface.d.mts +1 -1
  38. package/dist/providers/inception.d.mts +1 -1
  39. package/dist/providers/llama.d.mts +1 -1
  40. package/dist/providers/mistral.d.mts +1 -1
  41. package/dist/providers/nvidia.d.mts +1 -1
  42. package/dist/providers/openai.d.mts +1 -1
  43. package/dist/providers/openrouter.d.mts +1 -1
  44. package/dist/providers/perplexity.d.mts +1 -1
  45. package/dist/providers/togetherai.d.mts +1 -1
  46. package/dist/providers/xai.d.mts +1 -1
  47. package/dist/togetherai-BvcxUfPE.mjs.map +1 -1
  48. package/dist/{types-DjdaZckF.d.mts → types-DIzolT_s.d.mts} +61 -21
  49. package/dist/types-DIzolT_s.d.mts.map +1 -0
  50. package/dist/xai-fSuAkQJo.mjs.map +1 -1
  51. package/package.json +6 -3
  52. package/scripts/generate-models.ts +147 -56
  53. package/src/catalog/index.test.ts +8 -8
  54. package/src/catalog/index.ts +5 -1
  55. package/src/catalog/providers/alibaba.ts +91 -91
  56. package/src/catalog/providers/amazon-bedrock.ts +205 -185
  57. package/src/catalog/providers/anthropic.ts +87 -62
  58. package/src/catalog/providers/cerebras.ts +9 -9
  59. package/src/catalog/providers/cohere.ts +16 -16
  60. package/src/catalog/providers/deepinfra.ts +71 -71
  61. package/src/catalog/providers/deepseek.ts +3 -3
  62. package/src/catalog/providers/fireworks-ai.ts +36 -36
  63. package/src/catalog/providers/google-vertex.ts +62 -62
  64. package/src/catalog/providers/google.ts +69 -69
  65. package/src/catalog/providers/groq.ts +24 -24
  66. package/src/catalog/providers/huggingface.ts +52 -52
  67. package/src/catalog/providers/inception.ts +9 -9
  68. package/src/catalog/providers/index.ts +1 -0
  69. package/src/catalog/providers/llama.ts +7 -7
  70. package/src/catalog/providers/mistral.ts +60 -60
  71. package/src/catalog/providers/nvidia.ts +84 -84
  72. package/src/catalog/providers/openai.ts +115 -115
  73. package/src/catalog/providers/openrouter.ts +448 -433
  74. package/src/catalog/providers/perplexity.ts +9 -9
  75. package/src/catalog/providers/togetherai.ts +47 -47
  76. package/src/catalog/providers/xai.ts +49 -49
  77. package/src/catalog/types.ts +60 -20
  78. package/src/cost/calculate.test.ts +11 -11
  79. package/src/provider/registry.ts +21 -2
  80. package/src/provider/types.ts +1 -1
  81. package/tsconfig.json +2 -1
  82. package/tsdown.config.ts +7 -3
  83. package/dist/types-DjdaZckF.d.mts.map +0 -1
@@ -31,8 +31,8 @@ export const GROQ_MODELS = [
31
31
  provider: "groq",
32
32
  family: "qwen",
33
33
  pricing: { input: 2.9e-7, output: 3.9e-7 },
34
- contextWindow: 131072,
35
- maxOutput: 16384,
34
+ contextWindow: 131_072,
35
+ maxOutput: 16_384,
36
36
  modalities: { input: ["text"], output: ["text"] },
37
37
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: false },
38
38
  },
@@ -42,8 +42,8 @@ export const GROQ_MODELS = [
42
42
  provider: "groq",
43
43
  family: "llama",
44
44
  pricing: { input: 5e-8, output: 8e-8 },
45
- contextWindow: 131072,
46
- maxOutput: 131072,
45
+ contextWindow: 131_072,
46
+ maxOutput: 131_072,
47
47
  modalities: { input: ["text"], output: ["text"] },
48
48
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: false },
49
49
  },
@@ -64,7 +64,7 @@ export const GROQ_MODELS = [
64
64
  provider: "groq",
65
65
  family: "deepseek-thinking",
66
66
  pricing: { input: 7.5e-7, output: 9.9e-7 },
67
- contextWindow: 131072,
67
+ contextWindow: 131_072,
68
68
  maxOutput: 8192,
69
69
  modalities: { input: ["text"], output: ["text"] },
70
70
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: false },
@@ -86,8 +86,8 @@ export const GROQ_MODELS = [
86
86
  provider: "groq",
87
87
  family: "mistral",
88
88
  pricing: { input: 7.9e-7, output: 7.9e-7 },
89
- contextWindow: 32768,
90
- maxOutput: 32768,
89
+ contextWindow: 32_768,
90
+ maxOutput: 32_768,
91
91
  modalities: { input: ["text"], output: ["text"] },
92
92
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: false },
93
93
  },
@@ -97,8 +97,8 @@ export const GROQ_MODELS = [
97
97
  provider: "groq",
98
98
  family: "llama",
99
99
  pricing: { input: 5.9e-7, output: 7.9e-7 },
100
- contextWindow: 131072,
101
- maxOutput: 32768,
100
+ contextWindow: 131_072,
101
+ maxOutput: 32_768,
102
102
  modalities: { input: ["text"], output: ["text"] },
103
103
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: false },
104
104
  },
@@ -118,9 +118,9 @@ export const GROQ_MODELS = [
118
118
  name: "Kimi K2 Instruct",
119
119
  provider: "groq",
120
120
  family: "kimi",
121
- pricing: { input: 0.000001, output: 0.000003 },
122
- contextWindow: 131072,
123
- maxOutput: 16384,
121
+ pricing: { input: 0.000_001, output: 0.000_003 },
122
+ contextWindow: 131_072,
123
+ maxOutput: 16_384,
124
124
  modalities: { input: ["text"], output: ["text"] },
125
125
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: false },
126
126
  },
@@ -129,9 +129,9 @@ export const GROQ_MODELS = [
129
129
  name: "Kimi K2 Instruct 0905",
130
130
  provider: "groq",
131
131
  family: "kimi",
132
- pricing: { input: 0.000001, output: 0.000003 },
133
- contextWindow: 262144,
134
- maxOutput: 16384,
132
+ pricing: { input: 0.000_001, output: 0.000_003 },
133
+ contextWindow: 262_144,
134
+ maxOutput: 16_384,
135
135
  modalities: { input: ["text"], output: ["text"] },
136
136
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: true },
137
137
  },
@@ -141,8 +141,8 @@ export const GROQ_MODELS = [
141
141
  provider: "groq",
142
142
  family: "qwen",
143
143
  pricing: { input: 2.9e-7, output: 5.9e-7 },
144
- contextWindow: 131072,
145
- maxOutput: 16384,
144
+ contextWindow: 131_072,
145
+ maxOutput: 16_384,
146
146
  modalities: { input: ["text"], output: ["text"] },
147
147
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: false },
148
148
  },
@@ -152,7 +152,7 @@ export const GROQ_MODELS = [
152
152
  provider: "groq",
153
153
  family: "llama",
154
154
  pricing: { input: 1.1e-7, output: 3.4e-7 },
155
- contextWindow: 131072,
155
+ contextWindow: 131_072,
156
156
  maxOutput: 8192,
157
157
  modalities: { input: ["text", "image"], output: ["text"] },
158
158
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: true },
@@ -163,7 +163,7 @@ export const GROQ_MODELS = [
163
163
  provider: "groq",
164
164
  family: "llama",
165
165
  pricing: { input: 2e-7, output: 2e-7 },
166
- contextWindow: 131072,
166
+ contextWindow: 131_072,
167
167
  maxOutput: 1024,
168
168
  modalities: { input: ["text", "image"], output: ["text"] },
169
169
  capabilities: { reasoning: false, toolCall: false, attachment: false, structuredOutput: false },
@@ -174,7 +174,7 @@ export const GROQ_MODELS = [
174
174
  provider: "groq",
175
175
  family: "llama",
176
176
  pricing: { input: 2e-7, output: 6e-7 },
177
- contextWindow: 131072,
177
+ contextWindow: 131_072,
178
178
  maxOutput: 8192,
179
179
  modalities: { input: ["text", "image"], output: ["text"] },
180
180
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: true },
@@ -185,8 +185,8 @@ export const GROQ_MODELS = [
185
185
  provider: "groq",
186
186
  family: "gpt-oss",
187
187
  pricing: { input: 1.5e-7, output: 6e-7 },
188
- contextWindow: 131072,
189
- maxOutput: 65536,
188
+ contextWindow: 131_072,
189
+ maxOutput: 65_536,
190
190
  modalities: { input: ["text"], output: ["text"] },
191
191
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: true },
192
192
  },
@@ -196,8 +196,8 @@ export const GROQ_MODELS = [
196
196
  provider: "groq",
197
197
  family: "gpt-oss",
198
198
  pricing: { input: 7.5e-8, output: 3e-7 },
199
- contextWindow: 131072,
200
- maxOutput: 65536,
199
+ contextWindow: 131_072,
200
+ maxOutput: 65_536,
201
201
  modalities: { input: ["text"], output: ["text"] },
202
202
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: true },
203
203
  },
@@ -20,8 +20,8 @@ export const HUGGINGFACE_MODELS = [
20
20
  provider: "huggingface",
21
21
  family: "glm",
22
22
  pricing: { input: 0, output: 0 },
23
- contextWindow: 200000,
24
- maxOutput: 128000,
23
+ contextWindow: 200_000,
24
+ maxOutput: 128_000,
25
25
  modalities: { input: ["text"], output: ["text"] },
26
26
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: false },
27
27
  },
@@ -30,9 +30,9 @@ export const HUGGINGFACE_MODELS = [
30
30
  name: "GLM-4.7",
31
31
  provider: "huggingface",
32
32
  family: "glm",
33
- pricing: { input: 6e-7, output: 0.0000022, cacheRead: 1.1e-7 },
34
- contextWindow: 204800,
35
- maxOutput: 131072,
33
+ pricing: { input: 6e-7, output: 0.000_002_2, cacheRead: 1.1e-7 },
34
+ contextWindow: 204_800,
35
+ maxOutput: 131_072,
36
36
  modalities: { input: ["text"], output: ["text"] },
37
37
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: false },
38
38
  },
@@ -41,9 +41,9 @@ export const HUGGINGFACE_MODELS = [
41
41
  name: "GLM-5",
42
42
  provider: "huggingface",
43
43
  family: "glm",
44
- pricing: { input: 0.000001, output: 0.0000032, cacheRead: 2e-7 },
45
- contextWindow: 202752,
46
- maxOutput: 131072,
44
+ pricing: { input: 0.000_001, output: 0.000_003_2, cacheRead: 2e-7 },
45
+ contextWindow: 202_752,
46
+ maxOutput: 131_072,
47
47
  modalities: { input: ["text"], output: ["text"] },
48
48
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: false },
49
49
  },
@@ -53,7 +53,7 @@ export const HUGGINGFACE_MODELS = [
53
53
  provider: "huggingface",
54
54
  family: "mimo",
55
55
  pricing: { input: 1e-7, output: 3e-7 },
56
- contextWindow: 262144,
56
+ contextWindow: 262_144,
57
57
  maxOutput: 4096,
58
58
  modalities: { input: ["text"], output: ["text"] },
59
59
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: false },
@@ -63,9 +63,9 @@ export const HUGGINGFACE_MODELS = [
63
63
  name: "MiniMax-M2.5",
64
64
  provider: "huggingface",
65
65
  family: "minimax",
66
- pricing: { input: 3e-7, output: 0.0000012, cacheRead: 3e-8 },
67
- contextWindow: 204800,
68
- maxOutput: 131072,
66
+ pricing: { input: 3e-7, output: 0.000_001_2, cacheRead: 3e-8 },
67
+ contextWindow: 204_800,
68
+ maxOutput: 131_072,
69
69
  modalities: { input: ["text"], output: ["text"] },
70
70
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: false },
71
71
  },
@@ -74,9 +74,9 @@ export const HUGGINGFACE_MODELS = [
74
74
  name: "MiniMax-M2.1",
75
75
  provider: "huggingface",
76
76
  family: "minimax",
77
- pricing: { input: 3e-7, output: 0.0000012 },
78
- contextWindow: 204800,
79
- maxOutput: 131072,
77
+ pricing: { input: 3e-7, output: 0.000_001_2 },
78
+ contextWindow: 204_800,
79
+ maxOutput: 131_072,
80
80
  modalities: { input: ["text"], output: ["text"] },
81
81
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: false },
82
82
  },
@@ -85,9 +85,9 @@ export const HUGGINGFACE_MODELS = [
85
85
  name: "DeepSeek-R1-0528",
86
86
  provider: "huggingface",
87
87
  family: "deepseek-thinking",
88
- pricing: { input: 0.000003, output: 0.000005 },
89
- contextWindow: 163840,
90
- maxOutput: 163840,
88
+ pricing: { input: 0.000_003, output: 0.000_005 },
89
+ contextWindow: 163_840,
90
+ maxOutput: 163_840,
91
91
  modalities: { input: ["text"], output: ["text"] },
92
92
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: false },
93
93
  },
@@ -97,8 +97,8 @@ export const HUGGINGFACE_MODELS = [
97
97
  provider: "huggingface",
98
98
  family: "deepseek",
99
99
  pricing: { input: 2.8e-7, output: 4e-7 },
100
- contextWindow: 163840,
101
- maxOutput: 65536,
100
+ contextWindow: 163_840,
101
+ maxOutput: 65_536,
102
102
  modalities: { input: ["text"], output: ["text"] },
103
103
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: false },
104
104
  },
@@ -107,9 +107,9 @@ export const HUGGINGFACE_MODELS = [
107
107
  name: "Kimi-K2-Instruct",
108
108
  provider: "huggingface",
109
109
  family: "kimi",
110
- pricing: { input: 0.000001, output: 0.000003 },
111
- contextWindow: 131072,
112
- maxOutput: 16384,
110
+ pricing: { input: 0.000_001, output: 0.000_003 },
111
+ contextWindow: 131_072,
112
+ maxOutput: 16_384,
113
113
  modalities: { input: ["text"], output: ["text"] },
114
114
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: false },
115
115
  },
@@ -118,9 +118,9 @@ export const HUGGINGFACE_MODELS = [
118
118
  name: "Kimi-K2-Instruct-0905",
119
119
  provider: "huggingface",
120
120
  family: "kimi",
121
- pricing: { input: 0.000001, output: 0.000003 },
122
- contextWindow: 262144,
123
- maxOutput: 16384,
121
+ pricing: { input: 0.000_001, output: 0.000_003 },
122
+ contextWindow: 262_144,
123
+ maxOutput: 16_384,
124
124
  modalities: { input: ["text"], output: ["text"] },
125
125
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: false },
126
126
  },
@@ -129,9 +129,9 @@ export const HUGGINGFACE_MODELS = [
129
129
  name: "Kimi-K2.5",
130
130
  provider: "huggingface",
131
131
  family: "kimi",
132
- pricing: { input: 6e-7, output: 0.000003, cacheRead: 1e-7 },
133
- contextWindow: 262144,
134
- maxOutput: 262144,
132
+ pricing: { input: 6e-7, output: 0.000_003, cacheRead: 1e-7 },
133
+ contextWindow: 262_144,
134
+ maxOutput: 262_144,
135
135
  modalities: { input: ["text", "image", "video"], output: ["text"] },
136
136
  capabilities: { reasoning: true, toolCall: true, attachment: true, structuredOutput: false },
137
137
  },
@@ -140,9 +140,9 @@ export const HUGGINGFACE_MODELS = [
140
140
  name: "Kimi-K2-Thinking",
141
141
  provider: "huggingface",
142
142
  family: "kimi-thinking",
143
- pricing: { input: 6e-7, output: 0.0000025, cacheRead: 1.5e-7 },
144
- contextWindow: 262144,
145
- maxOutput: 262144,
143
+ pricing: { input: 6e-7, output: 0.000_002_5, cacheRead: 1.5e-7 },
144
+ contextWindow: 262_144,
145
+ maxOutput: 262_144,
146
146
  modalities: { input: ["text"], output: ["text"] },
147
147
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: false },
148
148
  },
@@ -151,9 +151,9 @@ export const HUGGINGFACE_MODELS = [
151
151
  name: "Qwen3-Next-80B-A3B-Instruct",
152
152
  provider: "huggingface",
153
153
  family: "qwen",
154
- pricing: { input: 2.5e-7, output: 0.000001 },
155
- contextWindow: 262144,
156
- maxOutput: 66536,
154
+ pricing: { input: 2.5e-7, output: 0.000_001 },
155
+ contextWindow: 262_144,
156
+ maxOutput: 66_536,
157
157
  modalities: { input: ["text"], output: ["text"] },
158
158
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: false },
159
159
  },
@@ -162,9 +162,9 @@ export const HUGGINGFACE_MODELS = [
162
162
  name: "Qwen3.5-397B-A17B",
163
163
  provider: "huggingface",
164
164
  family: "qwen",
165
- pricing: { input: 6e-7, output: 0.0000036 },
166
- contextWindow: 262144,
167
- maxOutput: 32768,
165
+ pricing: { input: 6e-7, output: 0.000_003_6 },
166
+ contextWindow: 262_144,
167
+ maxOutput: 32_768,
168
168
  modalities: { input: ["text", "image"], output: ["text"] },
169
169
  capabilities: { reasoning: true, toolCall: true, attachment: true, structuredOutput: false },
170
170
  },
@@ -173,9 +173,9 @@ export const HUGGINGFACE_MODELS = [
173
173
  name: "Qwen3-235B-A22B-Thinking-2507",
174
174
  provider: "huggingface",
175
175
  family: "qwen",
176
- pricing: { input: 3e-7, output: 0.000003 },
177
- contextWindow: 262144,
178
- maxOutput: 131072,
176
+ pricing: { input: 3e-7, output: 0.000_003 },
177
+ contextWindow: 262_144,
178
+ maxOutput: 131_072,
179
179
  modalities: { input: ["text"], output: ["text"] },
180
180
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: false },
181
181
  },
@@ -184,9 +184,9 @@ export const HUGGINGFACE_MODELS = [
184
184
  name: "Qwen3-Coder-Next",
185
185
  provider: "huggingface",
186
186
  family: "qwen",
187
- pricing: { input: 2e-7, output: 0.0000015 },
188
- contextWindow: 262144,
189
- maxOutput: 65536,
187
+ pricing: { input: 2e-7, output: 0.000_001_5 },
188
+ contextWindow: 262_144,
189
+ maxOutput: 65_536,
190
190
  modalities: { input: ["text"], output: ["text"] },
191
191
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: false },
192
192
  },
@@ -195,9 +195,9 @@ export const HUGGINGFACE_MODELS = [
195
195
  name: "Qwen3-Coder-480B-A35B-Instruct",
196
196
  provider: "huggingface",
197
197
  family: "qwen",
198
- pricing: { input: 0.000002, output: 0.000002 },
199
- contextWindow: 262144,
200
- maxOutput: 66536,
198
+ pricing: { input: 0.000_002, output: 0.000_002 },
199
+ contextWindow: 262_144,
200
+ maxOutput: 66_536,
201
201
  modalities: { input: ["text"], output: ["text"] },
202
202
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: false },
203
203
  },
@@ -207,7 +207,7 @@ export const HUGGINGFACE_MODELS = [
207
207
  provider: "huggingface",
208
208
  family: "qwen",
209
209
  pricing: { input: 1e-8, output: 0 },
210
- contextWindow: 32000,
210
+ contextWindow: 32_000,
211
211
  maxOutput: 2048,
212
212
  modalities: { input: ["text"], output: ["text"] },
213
213
  capabilities: { reasoning: false, toolCall: false, attachment: false, structuredOutput: false },
@@ -218,7 +218,7 @@ export const HUGGINGFACE_MODELS = [
218
218
  provider: "huggingface",
219
219
  family: "qwen",
220
220
  pricing: { input: 1e-8, output: 0 },
221
- contextWindow: 32000,
221
+ contextWindow: 32_000,
222
222
  maxOutput: 4096,
223
223
  modalities: { input: ["text"], output: ["text"] },
224
224
  capabilities: { reasoning: false, toolCall: false, attachment: false, structuredOutput: false },
@@ -228,9 +228,9 @@ export const HUGGINGFACE_MODELS = [
228
228
  name: "Qwen3-Next-80B-A3B-Thinking",
229
229
  provider: "huggingface",
230
230
  family: "qwen",
231
- pricing: { input: 3e-7, output: 0.000002 },
232
- contextWindow: 262144,
233
- maxOutput: 131072,
231
+ pricing: { input: 3e-7, output: 0.000_002 },
232
+ contextWindow: 262_144,
233
+ maxOutput: 131_072,
234
234
  modalities: { input: ["text"], output: ["text"] },
235
235
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: false },
236
236
  },
@@ -20,8 +20,8 @@ export const INCEPTION_MODELS = [
20
20
  provider: "inception",
21
21
  family: "mercury",
22
22
  pricing: { input: 2.5e-7, output: 7.5e-7, cacheRead: 2.5e-8 },
23
- contextWindow: 128000,
24
- maxOutput: 50000,
23
+ contextWindow: 128_000,
24
+ maxOutput: 50_000,
25
25
  modalities: { input: ["text"], output: ["text"] },
26
26
  capabilities: { reasoning: true, toolCall: true, attachment: false, structuredOutput: true },
27
27
  },
@@ -30,9 +30,9 @@ export const INCEPTION_MODELS = [
30
30
  name: "Mercury",
31
31
  provider: "inception",
32
32
  family: "mercury",
33
- pricing: { input: 2.5e-7, output: 0.000001, cacheRead: 2.5e-7, cacheWrite: 0.000001 },
34
- contextWindow: 128000,
35
- maxOutput: 16384,
33
+ pricing: { input: 2.5e-7, output: 0.000_001, cacheRead: 2.5e-7, cacheWrite: 0.000_001 },
34
+ contextWindow: 128_000,
35
+ maxOutput: 16_384,
36
36
  modalities: { input: ["text"], output: ["text"] },
37
37
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: false },
38
38
  },
@@ -42,7 +42,7 @@ export const INCEPTION_MODELS = [
42
42
  provider: "inception",
43
43
  family: "",
44
44
  pricing: { input: 2.5e-7, output: 7.5e-7, cacheRead: 2.5e-8 },
45
- contextWindow: 128000,
45
+ contextWindow: 128_000,
46
46
  maxOutput: 8192,
47
47
  modalities: { input: ["text"], output: ["text"] },
48
48
  capabilities: { reasoning: true, toolCall: false, attachment: false, structuredOutput: false },
@@ -52,9 +52,9 @@ export const INCEPTION_MODELS = [
52
52
  name: "Mercury Coder",
53
53
  provider: "inception",
54
54
  family: "mercury",
55
- pricing: { input: 2.5e-7, output: 0.000001, cacheRead: 2.5e-7, cacheWrite: 0.000001 },
56
- contextWindow: 128000,
57
- maxOutput: 16384,
55
+ pricing: { input: 2.5e-7, output: 0.000_001, cacheRead: 2.5e-7, cacheWrite: 0.000_001 },
56
+ contextWindow: 128_000,
57
+ maxOutput: 16_384,
58
58
  modalities: { input: ["text"], output: ["text"] },
59
59
  capabilities: { reasoning: false, toolCall: true, attachment: false, structuredOutput: false },
60
60
  },
@@ -11,6 +11,7 @@
11
11
  // Update: pnpm --filter=@funkai/models generate:models
12
12
  // ──────────────────────────────────────────────────────────────
13
13
 
14
+ // oxlint-disable eslint-plugin-import/max-dependencies
14
15
  import type { ModelDefinition } from "../types.js";
15
16
  import { ALIBABA_MODELS } from "./alibaba.js";
16
17
  import { AMAZON_BEDROCK_MODELS } from "./amazon-bedrock.js";
@@ -20,7 +20,7 @@ export const LLAMA_MODELS = [
20
20
  provider: "llama",
21
21
  family: "llama",
22
22
  pricing: { input: 0, output: 0 },
23
- contextWindow: 128000,
23
+ contextWindow: 128_000,
24
24
  maxOutput: 4096,
25
25
  modalities: { input: ["text"], output: ["text"] },
26
26
  capabilities: { reasoning: false, toolCall: true, attachment: true, structuredOutput: false },
@@ -31,7 +31,7 @@ export const LLAMA_MODELS = [
31
31
  provider: "llama",
32
32
  family: "llama",
33
33
  pricing: { input: 0, output: 0 },
34
- contextWindow: 128000,
34
+ contextWindow: 128_000,
35
35
  maxOutput: 4096,
36
36
  modalities: { input: ["text", "image"], output: ["text"] },
37
37
  capabilities: { reasoning: false, toolCall: true, attachment: true, structuredOutput: false },
@@ -42,7 +42,7 @@ export const LLAMA_MODELS = [
42
42
  provider: "llama",
43
43
  family: "llama",
44
44
  pricing: { input: 0, output: 0 },
45
- contextWindow: 128000,
45
+ contextWindow: 128_000,
46
46
  maxOutput: 4096,
47
47
  modalities: { input: ["text"], output: ["text"] },
48
48
  capabilities: { reasoning: false, toolCall: true, attachment: true, structuredOutput: false },
@@ -53,7 +53,7 @@ export const LLAMA_MODELS = [
53
53
  provider: "llama",
54
54
  family: "llama",
55
55
  pricing: { input: 0, output: 0 },
56
- contextWindow: 128000,
56
+ contextWindow: 128_000,
57
57
  maxOutput: 4096,
58
58
  modalities: { input: ["text"], output: ["text"] },
59
59
  capabilities: { reasoning: false, toolCall: true, attachment: true, structuredOutput: false },
@@ -64,7 +64,7 @@ export const LLAMA_MODELS = [
64
64
  provider: "llama",
65
65
  family: "llama",
66
66
  pricing: { input: 0, output: 0 },
67
- contextWindow: 128000,
67
+ contextWindow: 128_000,
68
68
  maxOutput: 4096,
69
69
  modalities: { input: ["text"], output: ["text"] },
70
70
  capabilities: { reasoning: false, toolCall: true, attachment: true, structuredOutput: false },
@@ -75,7 +75,7 @@ export const LLAMA_MODELS = [
75
75
  provider: "llama",
76
76
  family: "llama",
77
77
  pricing: { input: 0, output: 0 },
78
- contextWindow: 128000,
78
+ contextWindow: 128_000,
79
79
  maxOutput: 4096,
80
80
  modalities: { input: ["text"], output: ["text"] },
81
81
  capabilities: { reasoning: false, toolCall: true, attachment: true, structuredOutput: false },
@@ -86,7 +86,7 @@ export const LLAMA_MODELS = [
86
86
  provider: "llama",
87
87
  family: "llama",
88
88
  pricing: { input: 0, output: 0 },
89
- contextWindow: 128000,
89
+ contextWindow: 128_000,
90
90
  maxOutput: 4096,
91
91
  modalities: { input: ["text", "image"], output: ["text"] },
92
92
  capabilities: { reasoning: false, toolCall: true, attachment: true, structuredOutput: false },