free-coding-models 0.1.62 → 0.1.64
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +118 -28
- package/bin/free-coding-models.js +717 -115
- package/lib/config.js +33 -3
- package/lib/utils.js +4 -3
- package/package.json +1 -1
- package/sources.js +57 -4
package/lib/config.js
CHANGED
|
@@ -17,6 +17,10 @@
|
|
|
17
17
|
* "cerebras": "csk_xxx",
|
|
18
18
|
* "sambanova": "sn-xxx",
|
|
19
19
|
* "openrouter": "sk-or-xxx",
|
|
20
|
+
* "huggingface":"hf_xxx",
|
|
21
|
+
* "replicate": "r8_xxx",
|
|
22
|
+
* "deepinfra": "di_xxx",
|
|
23
|
+
* "fireworks": "fw_xxx",
|
|
20
24
|
* "codestral": "csk-xxx",
|
|
21
25
|
* "hyperbolic": "eyJ...",
|
|
22
26
|
* "scaleway": "scw-xxx",
|
|
@@ -28,10 +32,19 @@
|
|
|
28
32
|
* "cerebras": { "enabled": true },
|
|
29
33
|
* "sambanova": { "enabled": true },
|
|
30
34
|
* "openrouter": { "enabled": true },
|
|
35
|
+
* "huggingface":{ "enabled": true },
|
|
36
|
+
* "replicate": { "enabled": true },
|
|
37
|
+
* "deepinfra": { "enabled": true },
|
|
38
|
+
* "fireworks": { "enabled": true },
|
|
31
39
|
* "codestral": { "enabled": true },
|
|
32
40
|
* "hyperbolic": { "enabled": true },
|
|
33
41
|
* "scaleway": { "enabled": true },
|
|
34
42
|
* "googleai": { "enabled": true }
|
|
43
|
+
* },
|
|
44
|
+
* "telemetry": {
|
|
45
|
+
* "enabled": true,
|
|
46
|
+
* "consentVersion": 1,
|
|
47
|
+
* "anonymousId": "anon_550e8400-e29b-41d4-a716-446655440000"
|
|
35
48
|
* }
|
|
36
49
|
* }
|
|
37
50
|
*
|
|
@@ -69,6 +82,10 @@ const ENV_VARS = {
|
|
|
69
82
|
cerebras: 'CEREBRAS_API_KEY',
|
|
70
83
|
sambanova: 'SAMBANOVA_API_KEY',
|
|
71
84
|
openrouter: 'OPENROUTER_API_KEY',
|
|
85
|
+
huggingface:['HUGGINGFACE_API_KEY', 'HF_TOKEN'],
|
|
86
|
+
replicate: 'REPLICATE_API_TOKEN',
|
|
87
|
+
deepinfra: ['DEEPINFRA_API_KEY', 'DEEPINFRA_TOKEN'],
|
|
88
|
+
fireworks: 'FIREWORKS_API_KEY',
|
|
72
89
|
codestral: 'CODESTRAL_API_KEY',
|
|
73
90
|
hyperbolic: 'HYPERBOLIC_API_KEY',
|
|
74
91
|
scaleway: 'SCALEWAY_API_KEY',
|
|
@@ -86,7 +103,7 @@ const ENV_VARS = {
|
|
|
86
103
|
* 📖 The migration reads the old file as a plain nvidia API key and writes
|
|
87
104
|
* a proper JSON config. The old file is NOT deleted (safety first).
|
|
88
105
|
*
|
|
89
|
-
* @returns {{ apiKeys: Record<string,string>, providers: Record<string,{enabled:boolean}
|
|
106
|
+
* @returns {{ apiKeys: Record<string,string>, providers: Record<string,{enabled:boolean}>, telemetry: { enabled: boolean | null, consentVersion: number, anonymousId: string | null } }}
|
|
90
107
|
*/
|
|
91
108
|
export function loadConfig() {
|
|
92
109
|
// 📖 Try new JSON config first
|
|
@@ -97,6 +114,10 @@ export function loadConfig() {
|
|
|
97
114
|
// 📖 Ensure the shape is always complete — fill missing sections with defaults
|
|
98
115
|
if (!parsed.apiKeys) parsed.apiKeys = {}
|
|
99
116
|
if (!parsed.providers) parsed.providers = {}
|
|
117
|
+
if (!parsed.telemetry || typeof parsed.telemetry !== 'object') parsed.telemetry = { enabled: null, consentVersion: 0, anonymousId: null }
|
|
118
|
+
if (typeof parsed.telemetry.enabled !== 'boolean') parsed.telemetry.enabled = null
|
|
119
|
+
if (typeof parsed.telemetry.consentVersion !== 'number') parsed.telemetry.consentVersion = 0
|
|
120
|
+
if (typeof parsed.telemetry.anonymousId !== 'string' || !parsed.telemetry.anonymousId.trim()) parsed.telemetry.anonymousId = null
|
|
100
121
|
return parsed
|
|
101
122
|
} catch {
|
|
102
123
|
// 📖 Corrupted JSON — return empty config (user will re-enter keys)
|
|
@@ -129,7 +150,7 @@ export function loadConfig() {
|
|
|
129
150
|
* 📖 Uses mode 0o600 so the file is only readable by the owning user (API keys!).
|
|
130
151
|
* 📖 Pretty-prints JSON for human readability.
|
|
131
152
|
*
|
|
132
|
-
* @param {{ apiKeys: Record<string,string>, providers: Record<string,{enabled:boolean}
|
|
153
|
+
* @param {{ apiKeys: Record<string,string>, providers: Record<string,{enabled:boolean}>, telemetry?: { enabled?: boolean | null, consentVersion?: number, anonymousId?: string | null } }} config
|
|
133
154
|
*/
|
|
134
155
|
export function saveConfig(config) {
|
|
135
156
|
try {
|
|
@@ -154,7 +175,10 @@ export function saveConfig(config) {
|
|
|
154
175
|
export function getApiKey(config, providerKey) {
|
|
155
176
|
// 📖 Env var override — takes precedence over everything
|
|
156
177
|
const envVar = ENV_VARS[providerKey]
|
|
157
|
-
|
|
178
|
+
const envCandidates = Array.isArray(envVar) ? envVar : [envVar]
|
|
179
|
+
for (const candidate of envCandidates) {
|
|
180
|
+
if (candidate && process.env[candidate]) return process.env[candidate]
|
|
181
|
+
}
|
|
158
182
|
|
|
159
183
|
// 📖 Config file value
|
|
160
184
|
const key = config?.apiKeys?.[providerKey]
|
|
@@ -184,5 +208,11 @@ function _emptyConfig() {
|
|
|
184
208
|
return {
|
|
185
209
|
apiKeys: {},
|
|
186
210
|
providers: {},
|
|
211
|
+
// 📖 Telemetry consent is explicit. null = not decided yet.
|
|
212
|
+
telemetry: {
|
|
213
|
+
enabled: null,
|
|
214
|
+
consentVersion: 0,
|
|
215
|
+
anonymousId: null,
|
|
216
|
+
},
|
|
187
217
|
}
|
|
188
218
|
}
|
package/lib/utils.js
CHANGED
|
@@ -277,11 +277,11 @@ export function findBestModel(results) {
|
|
|
277
277
|
//
|
|
278
278
|
// 📖 Argument types:
|
|
279
279
|
// - API key: first positional arg that doesn't start with "--" (e.g., "nvapi-xxx")
|
|
280
|
-
// - Boolean flags: --best, --fiable, --opencode, --opencode-desktop, --openclaw (case-insensitive)
|
|
280
|
+
// - Boolean flags: --best, --fiable, --opencode, --opencode-desktop, --openclaw, --no-telemetry (case-insensitive)
|
|
281
281
|
// - Value flag: --tier <letter> (the next non-flag arg is the tier value)
|
|
282
282
|
//
|
|
283
283
|
// 📖 Returns:
|
|
284
|
-
// { apiKey, bestMode, fiableMode, openCodeMode, openCodeDesktopMode, openClawMode, tierFilter }
|
|
284
|
+
// { apiKey, bestMode, fiableMode, openCodeMode, openCodeDesktopMode, openClawMode, noTelemetry, tierFilter }
|
|
285
285
|
//
|
|
286
286
|
// 📖 Note: apiKey may be null here — the main CLI falls back to env vars and saved config.
|
|
287
287
|
export function parseArgs(argv) {
|
|
@@ -310,8 +310,9 @@ export function parseArgs(argv) {
|
|
|
310
310
|
const openCodeMode = flags.includes('--opencode')
|
|
311
311
|
const openCodeDesktopMode = flags.includes('--opencode-desktop')
|
|
312
312
|
const openClawMode = flags.includes('--openclaw')
|
|
313
|
+
const noTelemetry = flags.includes('--no-telemetry')
|
|
313
314
|
|
|
314
315
|
let tierFilter = tierValueIdx !== -1 ? args[tierValueIdx].toUpperCase() : null
|
|
315
316
|
|
|
316
|
-
return { apiKey, bestMode, fiableMode, openCodeMode, openCodeDesktopMode, openClawMode, tierFilter }
|
|
317
|
+
return { apiKey, bestMode, fiableMode, openCodeMode, openCodeDesktopMode, openClawMode, noTelemetry, tierFilter }
|
|
317
318
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "free-coding-models",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.64",
|
|
4
4
|
"description": "Find the fastest coding LLM models in seconds — ping free models from multiple providers, pick the best one for OpenCode, Cursor, or any AI coding assistant.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"nvidia",
|
package/sources.js
CHANGED
|
@@ -27,8 +27,8 @@
|
|
|
27
27
|
* 📖 Secondary: https://swe-rebench.com (independent evals, scores are lower)
|
|
28
28
|
* 📖 Leaderboard tracker: https://www.marc0.dev/en/leaderboard
|
|
29
29
|
*
|
|
30
|
-
* @exports nvidiaNim, groq, cerebras, sambanova, openrouter, codestral, hyperbolic, scaleway, googleai — model arrays per provider
|
|
31
|
-
* @exports sources — map of { nvidia, groq, cerebras, sambanova, openrouter, codestral, hyperbolic, scaleway, googleai } each with { name, url, models }
|
|
30
|
+
* @exports nvidiaNim, groq, cerebras, sambanova, openrouter, huggingface, replicate, deepinfra, fireworks, codestral, hyperbolic, scaleway, googleai — model arrays per provider
|
|
31
|
+
* @exports sources — map of { nvidia, groq, cerebras, sambanova, openrouter, huggingface, replicate, deepinfra, fireworks, codestral, hyperbolic, scaleway, googleai } each with { name, url, models }
|
|
32
32
|
* @exports MODELS — flat array of [modelId, label, tier, sweScore, ctx, providerKey]
|
|
33
33
|
*
|
|
34
34
|
* 📖 MODELS now includes providerKey as 6th element so ping() knows which
|
|
@@ -139,13 +139,17 @@ export const sambanova = [
|
|
|
139
139
|
['Meta-Llama-3.3-70B-Instruct', 'Llama 3.3 70B', 'A-', '39.5%', '128k'],
|
|
140
140
|
// ── B tier ──
|
|
141
141
|
['Meta-Llama-3.1-8B-Instruct', 'Llama 3.1 8B', 'B', '28.8%', '128k'],
|
|
142
|
+
// ── A tier — requested Llama3-Groq coding tuned family ──
|
|
143
|
+
['Llama-3-Groq-70B-Tool-Use', 'Llama3-Groq 70B', 'A', '43.0%', '128k'],
|
|
142
144
|
]
|
|
143
145
|
|
|
144
146
|
// 📖 OpenRouter source - https://openrouter.ai
|
|
145
147
|
// 📖 Free :free models with shared quota — 50 free req/day
|
|
146
|
-
// 📖 API keys at https://openrouter.ai/
|
|
148
|
+
// 📖 API keys at https://openrouter.ai/keys
|
|
147
149
|
export const openrouter = [
|
|
148
|
-
['qwen/qwen3-coder:free',
|
|
150
|
+
['qwen/qwen3-coder:480b-free', 'Qwen3 Coder 480B', 'S+', '70.6%', '256k'],
|
|
151
|
+
['mistralai/devstral-2-free', 'Devstral 2', 'S+', '72.2%', '256k'],
|
|
152
|
+
['mimo-v2-flash-free', 'Mimo V2 Flash', 'A', '45.0%', '128k'],
|
|
149
153
|
['stepfun/step-3.5-flash:free', 'Step 3.5 Flash', 'S+', '74.4%', '256k'],
|
|
150
154
|
['deepseek/deepseek-r1-0528:free', 'DeepSeek R1 0528', 'S', '61.0%', '128k'],
|
|
151
155
|
['qwen/qwen3-next-80b-a3b-instruct:free', 'Qwen3 80B Instruct', 'S', '65.0%', '128k'],
|
|
@@ -155,6 +159,35 @@ export const openrouter = [
|
|
|
155
159
|
['meta-llama/llama-3.3-70b-instruct:free', 'Llama 3.3 70B', 'A-', '39.5%', '128k'],
|
|
156
160
|
]
|
|
157
161
|
|
|
162
|
+
// 📖 Hugging Face Inference source - https://huggingface.co
|
|
163
|
+
// 📖 OpenAI-compatible endpoint via router.huggingface.co/v1
|
|
164
|
+
// 📖 Free monthly credits on developer accounts (~$0.10) — token at https://huggingface.co/settings/tokens
|
|
165
|
+
export const huggingface = [
|
|
166
|
+
['deepseek-ai/DeepSeek-V3-Coder', 'DeepSeek V3 Coder', 'S', '62.0%', '128k'],
|
|
167
|
+
['bigcode/starcoder2-15b', 'StarCoder2 15B', 'B', '25.0%', '16k'],
|
|
168
|
+
]
|
|
169
|
+
|
|
170
|
+
// 📖 Replicate source - https://replicate.com
|
|
171
|
+
// 📖 Uses predictions endpoint (not OpenAI chat-completions) with token auth
|
|
172
|
+
export const replicate = [
|
|
173
|
+
['codellama/CodeLlama-70b-Instruct-hf', 'CodeLlama 70B', 'A-', '39.0%', '16k'],
|
|
174
|
+
]
|
|
175
|
+
|
|
176
|
+
// 📖 DeepInfra source - https://deepinfra.com
|
|
177
|
+
// 📖 OpenAI-compatible endpoint: https://api.deepinfra.com/v1/openai/chat/completions
|
|
178
|
+
export const deepinfra = [
|
|
179
|
+
['mistralai/Mixtral-8x22B-Instruct-v0.1', 'Mixtral Code', 'B+', '32.0%', '64k'],
|
|
180
|
+
['meta-llama/Meta-Llama-3.1-70B-Instruct', 'Llama 3.1 70B', 'A-', '39.5%', '128k'],
|
|
181
|
+
]
|
|
182
|
+
|
|
183
|
+
// 📖 Fireworks AI source - https://fireworks.ai
|
|
184
|
+
// 📖 OpenAI-compatible endpoint: https://api.fireworks.ai/inference/v1/chat/completions
|
|
185
|
+
// 📖 Free trial credits: $1 for new developers
|
|
186
|
+
export const fireworks = [
|
|
187
|
+
['accounts/fireworks/models/deepseek-v3', 'DeepSeek V3', 'S', '62.0%', '128k'],
|
|
188
|
+
['accounts/fireworks/models/deepseek-r1', 'DeepSeek R1', 'S', '61.0%', '128k'],
|
|
189
|
+
]
|
|
190
|
+
|
|
158
191
|
// 📖 Mistral Codestral source - https://codestral.mistral.ai
|
|
159
192
|
// 📖 Free coding model — 30 req/min, 2000/day (phone number required for key)
|
|
160
193
|
// 📖 API keys at https://codestral.mistral.ai
|
|
@@ -225,6 +258,26 @@ export const sources = {
|
|
|
225
258
|
url: 'https://openrouter.ai/api/v1/chat/completions',
|
|
226
259
|
models: openrouter,
|
|
227
260
|
},
|
|
261
|
+
huggingface: {
|
|
262
|
+
name: 'Hugging Face',
|
|
263
|
+
url: 'https://router.huggingface.co/v1/chat/completions',
|
|
264
|
+
models: huggingface,
|
|
265
|
+
},
|
|
266
|
+
replicate: {
|
|
267
|
+
name: 'Replicate',
|
|
268
|
+
url: 'https://api.replicate.com/v1/predictions',
|
|
269
|
+
models: replicate,
|
|
270
|
+
},
|
|
271
|
+
deepinfra: {
|
|
272
|
+
name: 'DeepInfra',
|
|
273
|
+
url: 'https://api.deepinfra.com/v1/openai/chat/completions',
|
|
274
|
+
models: deepinfra,
|
|
275
|
+
},
|
|
276
|
+
fireworks: {
|
|
277
|
+
name: 'Fireworks',
|
|
278
|
+
url: 'https://api.fireworks.ai/inference/v1/chat/completions',
|
|
279
|
+
models: fireworks,
|
|
280
|
+
},
|
|
228
281
|
codestral: {
|
|
229
282
|
name: 'Codestral',
|
|
230
283
|
url: 'https://codestral.mistral.ai/v1/chat/completions',
|