free-coding-models 0.1.63 → 0.1.65

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/config.js CHANGED
@@ -17,6 +17,10 @@
17
17
  * "cerebras": "csk_xxx",
18
18
  * "sambanova": "sn-xxx",
19
19
  * "openrouter": "sk-or-xxx",
20
+ * "huggingface":"hf_xxx",
21
+ * "replicate": "r8_xxx",
22
+ * "deepinfra": "di_xxx",
23
+ * "fireworks": "fw_xxx",
20
24
  * "codestral": "csk-xxx",
21
25
  * "hyperbolic": "eyJ...",
22
26
  * "scaleway": "scw-xxx",
@@ -28,11 +32,18 @@
28
32
  * "cerebras": { "enabled": true },
29
33
  * "sambanova": { "enabled": true },
30
34
  * "openrouter": { "enabled": true },
35
+ * "huggingface":{ "enabled": true },
36
+ * "replicate": { "enabled": true },
37
+ * "deepinfra": { "enabled": true },
38
+ * "fireworks": { "enabled": true },
31
39
  * "codestral": { "enabled": true },
32
40
  * "hyperbolic": { "enabled": true },
33
41
  * "scaleway": { "enabled": true },
34
42
  * "googleai": { "enabled": true }
35
43
  * },
44
+ * "favorites": [
45
+ * "nvidia/deepseek-ai/deepseek-v3.2"
46
+ * },
36
47
  * "telemetry": {
37
48
  * "enabled": true,
38
49
  * "consentVersion": 1,
@@ -48,6 +59,7 @@
48
59
  * → loadConfig() — Read ~/.free-coding-models.json; auto-migrate old plain-text config if needed
49
60
  * → saveConfig(config) — Write config to ~/.free-coding-models.json with 0o600 permissions
50
61
  * → getApiKey(config, providerKey) — Get effective API key (env var override > config > null)
62
+ * → isProviderEnabled(config, providerKey) — Check if provider is enabled (defaults true)
51
63
  *
52
64
  * @exports loadConfig, saveConfig, getApiKey
53
65
  * @exports CONFIG_PATH — path to the JSON config file
@@ -74,6 +86,10 @@ const ENV_VARS = {
74
86
  cerebras: 'CEREBRAS_API_KEY',
75
87
  sambanova: 'SAMBANOVA_API_KEY',
76
88
  openrouter: 'OPENROUTER_API_KEY',
89
+ huggingface:['HUGGINGFACE_API_KEY', 'HF_TOKEN'],
90
+ replicate: 'REPLICATE_API_TOKEN',
91
+ deepinfra: ['DEEPINFRA_API_KEY', 'DEEPINFRA_TOKEN'],
92
+ fireworks: 'FIREWORKS_API_KEY',
77
93
  codestral: 'CODESTRAL_API_KEY',
78
94
  hyperbolic: 'HYPERBOLIC_API_KEY',
79
95
  scaleway: 'SCALEWAY_API_KEY',
@@ -91,7 +107,7 @@ const ENV_VARS = {
91
107
  * 📖 The migration reads the old file as a plain nvidia API key and writes
92
108
  * a proper JSON config. The old file is NOT deleted (safety first).
93
109
  *
94
- * @returns {{ apiKeys: Record<string,string>, providers: Record<string,{enabled:boolean}>, telemetry: { enabled: boolean | null, consentVersion: number, anonymousId: string | null } }}
110
+ * @returns {{ apiKeys: Record<string,string>, providers: Record<string,{enabled:boolean}>, favorites: string[], telemetry: { enabled: boolean | null, consentVersion: number, anonymousId: string | null } }}
95
111
  */
96
112
  export function loadConfig() {
97
113
  // 📖 Try new JSON config first
@@ -102,6 +118,9 @@ export function loadConfig() {
102
118
  // 📖 Ensure the shape is always complete — fill missing sections with defaults
103
119
  if (!parsed.apiKeys) parsed.apiKeys = {}
104
120
  if (!parsed.providers) parsed.providers = {}
121
+ // 📖 Favorites: list of "providerKey/modelId" pinned rows.
122
+ if (!Array.isArray(parsed.favorites)) parsed.favorites = []
123
+ parsed.favorites = parsed.favorites.filter((fav) => typeof fav === 'string' && fav.trim().length > 0)
105
124
  if (!parsed.telemetry || typeof parsed.telemetry !== 'object') parsed.telemetry = { enabled: null, consentVersion: 0, anonymousId: null }
106
125
  if (typeof parsed.telemetry.enabled !== 'boolean') parsed.telemetry.enabled = null
107
126
  if (typeof parsed.telemetry.consentVersion !== 'number') parsed.telemetry.consentVersion = 0
@@ -138,7 +157,7 @@ export function loadConfig() {
138
157
  * 📖 Uses mode 0o600 so the file is only readable by the owning user (API keys!).
139
158
  * 📖 Pretty-prints JSON for human readability.
140
159
  *
141
- * @param {{ apiKeys: Record<string,string>, providers: Record<string,{enabled:boolean}>, telemetry?: { enabled?: boolean | null, consentVersion?: number, anonymousId?: string | null } }} config
160
+ * @param {{ apiKeys: Record<string,string>, providers: Record<string,{enabled:boolean}>, favorites?: string[], telemetry?: { enabled?: boolean | null, consentVersion?: number, anonymousId?: string | null } }} config
142
161
  */
143
162
  export function saveConfig(config) {
144
163
  try {
@@ -163,7 +182,10 @@ export function saveConfig(config) {
163
182
  export function getApiKey(config, providerKey) {
164
183
  // 📖 Env var override — takes precedence over everything
165
184
  const envVar = ENV_VARS[providerKey]
166
- if (envVar && process.env[envVar]) return process.env[envVar]
185
+ const envCandidates = Array.isArray(envVar) ? envVar : [envVar]
186
+ for (const candidate of envCandidates) {
187
+ if (candidate && process.env[candidate]) return process.env[candidate]
188
+ }
167
189
 
168
190
  // 📖 Config file value
169
191
  const key = config?.apiKeys?.[providerKey]
@@ -193,6 +215,8 @@ function _emptyConfig() {
193
215
  return {
194
216
  apiKeys: {},
195
217
  providers: {},
218
+ // 📖 Pinned favorites rendered at top of the table ("providerKey/modelId").
219
+ favorites: [],
196
220
  // 📖 Telemetry consent is explicit. null = not decided yet.
197
221
  telemetry: {
198
222
  enabled: null,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "free-coding-models",
3
- "version": "0.1.63",
3
+ "version": "0.1.65",
4
4
  "description": "Find the fastest coding LLM models in seconds — ping free models from multiple providers, pick the best one for OpenCode, Cursor, or any AI coding assistant.",
5
5
  "keywords": [
6
6
  "nvidia",
package/sources.js CHANGED
@@ -27,8 +27,8 @@
27
27
  * 📖 Secondary: https://swe-rebench.com (independent evals, scores are lower)
28
28
  * 📖 Leaderboard tracker: https://www.marc0.dev/en/leaderboard
29
29
  *
30
- * @exports nvidiaNim, groq, cerebras, sambanova, openrouter, codestral, hyperbolic, scaleway, googleai — model arrays per provider
31
- * @exports sources — map of { nvidia, groq, cerebras, sambanova, openrouter, codestral, hyperbolic, scaleway, googleai } each with { name, url, models }
30
+ * @exports nvidiaNim, groq, cerebras, sambanova, openrouter, huggingface, replicate, deepinfra, fireworks, codestral, hyperbolic, scaleway, googleai — model arrays per provider
31
+ * @exports sources — map of { nvidia, groq, cerebras, sambanova, openrouter, huggingface, replicate, deepinfra, fireworks, codestral, hyperbolic, scaleway, googleai } each with { name, url, models }
32
32
  * @exports MODELS — flat array of [modelId, label, tier, sweScore, ctx, providerKey]
33
33
  *
34
34
  * 📖 MODELS now includes providerKey as 6th element so ping() knows which
@@ -139,13 +139,17 @@ export const sambanova = [
139
139
  ['Meta-Llama-3.3-70B-Instruct', 'Llama 3.3 70B', 'A-', '39.5%', '128k'],
140
140
  // ── B tier ──
141
141
  ['Meta-Llama-3.1-8B-Instruct', 'Llama 3.1 8B', 'B', '28.8%', '128k'],
142
+ // ── A tier — requested Llama3-Groq coding tuned family ──
143
+ ['Llama-3-Groq-70B-Tool-Use', 'Llama3-Groq 70B', 'A', '43.0%', '128k'],
142
144
  ]
143
145
 
144
146
  // 📖 OpenRouter source - https://openrouter.ai
145
147
  // 📖 Free :free models with shared quota — 50 free req/day
146
- // 📖 API keys at https://openrouter.ai/settings/keys
148
+ // 📖 API keys at https://openrouter.ai/keys
147
149
  export const openrouter = [
148
- ['qwen/qwen3-coder:free', 'Qwen3 Coder', 'S+', '70.6%', '256k'],
150
+ ['qwen/qwen3-coder:480b-free', 'Qwen3 Coder 480B', 'S+', '70.6%', '256k'],
151
+ ['mistralai/devstral-2-free', 'Devstral 2', 'S+', '72.2%', '256k'],
152
+ ['mimo-v2-flash-free', 'Mimo V2 Flash', 'A', '45.0%', '128k'],
149
153
  ['stepfun/step-3.5-flash:free', 'Step 3.5 Flash', 'S+', '74.4%', '256k'],
150
154
  ['deepseek/deepseek-r1-0528:free', 'DeepSeek R1 0528', 'S', '61.0%', '128k'],
151
155
  ['qwen/qwen3-next-80b-a3b-instruct:free', 'Qwen3 80B Instruct', 'S', '65.0%', '128k'],
@@ -155,6 +159,35 @@ export const openrouter = [
155
159
  ['meta-llama/llama-3.3-70b-instruct:free', 'Llama 3.3 70B', 'A-', '39.5%', '128k'],
156
160
  ]
157
161
 
162
+ // 📖 Hugging Face Inference source - https://huggingface.co
163
+ // 📖 OpenAI-compatible endpoint via router.huggingface.co/v1
164
+ // 📖 Free monthly credits on developer accounts (~$0.10) — token at https://huggingface.co/settings/tokens
165
+ export const huggingface = [
166
+ ['deepseek-ai/DeepSeek-V3-Coder', 'DeepSeek V3 Coder', 'S', '62.0%', '128k'],
167
+ ['bigcode/starcoder2-15b', 'StarCoder2 15B', 'B', '25.0%', '16k'],
168
+ ]
169
+
170
+ // 📖 Replicate source - https://replicate.com
171
+ // 📖 Uses predictions endpoint (not OpenAI chat-completions) with token auth
172
+ export const replicate = [
173
+ ['codellama/CodeLlama-70b-Instruct-hf', 'CodeLlama 70B', 'A-', '39.0%', '16k'],
174
+ ]
175
+
176
+ // 📖 DeepInfra source - https://deepinfra.com
177
+ // 📖 OpenAI-compatible endpoint: https://api.deepinfra.com/v1/openai/chat/completions
178
+ export const deepinfra = [
179
+ ['mistralai/Mixtral-8x22B-Instruct-v0.1', 'Mixtral Code', 'B+', '32.0%', '64k'],
180
+ ['meta-llama/Meta-Llama-3.1-70B-Instruct', 'Llama 3.1 70B', 'A-', '39.5%', '128k'],
181
+ ]
182
+
183
+ // 📖 Fireworks AI source - https://fireworks.ai
184
+ // 📖 OpenAI-compatible endpoint: https://api.fireworks.ai/inference/v1/chat/completions
185
+ // 📖 Free trial credits: $1 for new developers
186
+ export const fireworks = [
187
+ ['accounts/fireworks/models/deepseek-v3', 'DeepSeek V3', 'S', '62.0%', '128k'],
188
+ ['accounts/fireworks/models/deepseek-r1', 'DeepSeek R1', 'S', '61.0%', '128k'],
189
+ ]
190
+
158
191
  // 📖 Mistral Codestral source - https://codestral.mistral.ai
159
192
  // 📖 Free coding model — 30 req/min, 2000/day (phone number required for key)
160
193
  // 📖 API keys at https://codestral.mistral.ai
@@ -225,6 +258,26 @@ export const sources = {
225
258
  url: 'https://openrouter.ai/api/v1/chat/completions',
226
259
  models: openrouter,
227
260
  },
261
+ huggingface: {
262
+ name: 'Hugging Face',
263
+ url: 'https://router.huggingface.co/v1/chat/completions',
264
+ models: huggingface,
265
+ },
266
+ replicate: {
267
+ name: 'Replicate',
268
+ url: 'https://api.replicate.com/v1/predictions',
269
+ models: replicate,
270
+ },
271
+ deepinfra: {
272
+ name: 'DeepInfra',
273
+ url: 'https://api.deepinfra.com/v1/openai/chat/completions',
274
+ models: deepinfra,
275
+ },
276
+ fireworks: {
277
+ name: 'Fireworks',
278
+ url: 'https://api.fireworks.ai/inference/v1/chat/completions',
279
+ models: fireworks,
280
+ },
228
281
  codestral: {
229
282
  name: 'Codestral',
230
283
  url: 'https://codestral.mistral.ai/v1/chat/completions',