free-coding-models 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json ADDED
@@ -0,0 +1,56 @@
1
+ {
2
+ "name": "free-coding-models",
3
+ "version": "0.1.1",
4
+ "description": "Find the fastest coding LLM models in seconds — ping free models from multiple providers, pick the best one for OpenCode, Cursor, or any AI coding assistant.",
5
+ "keywords": [
6
+ "nvidia",
7
+ "nim",
8
+ "llm",
9
+ "cli",
10
+ "ai",
11
+ "models",
12
+ "benchmark",
13
+ "latency",
14
+ "availability",
15
+ "deepseek",
16
+ "qwen",
17
+ "llama",
18
+ "mistral",
19
+ "glm",
20
+ "kimi",
21
+ "gpt",
22
+ "chatgpt",
23
+ "openai",
24
+ "api"
25
+ ],
26
+ "homepage": "https://github.com/vava-nessa/free-coding-models#readme",
27
+ "bugs": {
28
+ "url": "https://github.com/vava-nessa/free-coding-models/issues"
29
+ },
30
+ "repository": {
31
+ "type": "git",
32
+ "url": "git+https://github.com/vava-nessa/free-coding-models.git"
33
+ },
34
+ "license": "MIT",
35
+ "author": "vava",
36
+ "type": "module",
37
+ "main": "bin/free-coding-models.js",
38
+ "bin": {
39
+ "free-coding-models": "./bin/free-coding-models.js"
40
+ },
41
+ "files": [
42
+ "bin/",
43
+ "sources.js",
44
+ "README.md",
45
+ "LICENSE"
46
+ ],
47
+ "scripts": {
48
+ "start": "node bin/free-coding-models.js"
49
+ },
50
+ "dependencies": {
51
+ "chalk": "^5.4.1"
52
+ },
53
+ "engines": {
54
+ "node": ">=18.0.0"
55
+ }
56
+ }
package/sources.js ADDED
@@ -0,0 +1,104 @@
1
+ /**
2
+ * @file sources.js
3
+ * @description Model sources for AI availability checker.
4
+ *
5
+ * @details
6
+ * This file contains all model definitions organized by provider/source.
7
+ * Each source has its own models array with [model_id, display_label, tier].
8
+ * Add new sources here to support additional providers beyond NVIDIA NIM.
9
+ *
10
+ * 🎯 Tier scale (based on Aider Polyglot benchmark):
11
+ * - S+: 75%+ (elite frontier coders)
12
+ * - S: 62-74% (excellent)
13
+ * - A+: 54-62% (great)
14
+ * - A: 44-54% (good)
15
+ * - A-: 36-44% (decent)
16
+ * - B+: 25-36% (average)
17
+ * - B: 14-25% (below average)
18
+ * - C: <14% (lightweight/edge)
19
+ *
20
+ * 📖 Source: https://aider.chat/docs/leaderboards (Polyglot = 225 exercises, 6 languages)
21
+ *
22
+ * @exports Object containing all sources and their models
23
+ */
24
+
25
+ // 📖 NVIDIA NIM source - https://build.nvidia.com
26
+ export const nvidiaNim = [
27
+ // ── S+ tier — Aider polyglot ≥75% or equivalent frontier coding performance ──
28
+ ['deepseek-ai/deepseek-v3.1', 'DeepSeek V3.1', 'S+'], // ~76.1% Aider polyglot (thinking mode)
29
+ ['deepseek-ai/deepseek-v3.1-terminus', 'DeepSeek V3.1 Term', 'S+'], // same base, terminus variant
30
+ ['deepseek-ai/deepseek-v3.2', 'DeepSeek V3.2', 'S+'], // 74.2% Aider polyglot (reasoner)
31
+ ['moonshotai/kimi-k2.5', 'Kimi K2.5', 'S+'], // newer than K2 (59%), estimated S+
32
+ ['mistralai/devstral-2-123b-instruct-2512', 'Devstral 2 123B', 'S+'], // coding-focused 123B, estimated S+
33
+ ['nvidia/llama-3.1-nemotron-ultra-253b-v1', 'Nemotron Ultra 253B', 'S+'], // 253B NVIDIA flagship, estimated S+
34
+ ['mistralai/mistral-large-3-675b-instruct-2512', 'Mistral Large 675B', 'S+'], // 675B frontier, estimated S+
35
+ // ── S tier — Aider polyglot 62–74% ─────────────────────────────────────────
36
+ ['qwen/qwen2.5-coder-32b-instruct', 'Qwen2.5 Coder 32B', 'S'], // 71.4% Aider edit (best confirmed small coder)
37
+ ['z-ai/glm5', 'GLM 5', 'S'], // GLM flagship, estimated S
38
+ ['qwen/qwen3.5-397b-a17b', 'Qwen3.5 400B VLM', 'S'], // 400B VLM, estimated S
39
+ ['qwen/qwen3-coder-480b-a35b-instruct', 'Qwen3 Coder 480B', 'S'], // 61.8% Aider polyglot
40
+ ['qwen/qwen3-next-80b-a3b-thinking', 'Qwen3 80B Thinking', 'S'], // 80B thinking, estimated S
41
+ ['meta/llama-3.1-405b-instruct', 'Llama 3.1 405B', 'S'], // 66.2% Aider edit benchmark
42
+ ['minimaxai/minimax-m2.1', 'MiniMax M2.1', 'S'], // M2.1 flagship, estimated S
43
+ // ── A+ tier — Aider polyglot 54–62% ────────────────────────────────────────
44
+ ['moonshotai/kimi-k2-thinking', 'Kimi K2 Thinking', 'A+'], // thinking variant of K2 (59.1%)
45
+ ['moonshotai/kimi-k2-instruct', 'Kimi K2 Instruct', 'A+'], // 59.1% Aider polyglot (confirmed)
46
+ ['qwen/qwen3-235b-a22b', 'Qwen3 235B', 'A+'], // 59.6% Aider polyglot (confirmed)
47
+ ['meta/llama-3.3-70b-instruct', 'Llama 3.3 70B', 'A+'], // 59.4% Aider edit benchmark
48
+ ['z-ai/glm4.7', 'GLM 4.7', 'A+'], // GLM 4.7, estimated A+
49
+ ['qwen/qwen3-next-80b-a3b-instruct', 'Qwen3 80B Instruct', 'A+'], // 80B instruct, estimated A+
50
+ // ── A tier — Aider polyglot 44–54% ─────────────────────────────────────────
51
+ ['minimaxai/minimax-m2', 'MiniMax M2', 'A'], // MiniMax M2, estimated A
52
+ ['mistralai/mistral-medium-3-instruct', 'Mistral Medium 3', 'A'], // medium model, estimated A
53
+ ['mistralai/magistral-small-2506', 'Magistral Small', 'A'], // reasoning variant, estimated A
54
+ ['nvidia/nemotron-3-nano-30b-a3b', 'Nemotron Nano 30B', 'A'], // 30B NVIDIA, estimated A
55
+ ['deepseek-ai/deepseek-r1-distill-qwen-32b', 'R1 Distill 32B', 'A'], // 32B R1 distill, estimated A
56
+ // ── A- tier — Aider polyglot 36–44% ────────────────────────────────────────
57
+ ['openai/gpt-oss-120b', 'GPT OSS 120B', 'A-'], // 41.8% Aider polyglot (confirmed)
58
+ ['nvidia/llama-3.3-nemotron-super-49b-v1.5', 'Nemotron Super 49B', 'A-'], // 49B NVIDIA, estimated A-
59
+ ['meta/llama-4-scout-17b-16e-instruct', 'Llama 4 Scout', 'A-'], // Scout 17B, estimated A-
60
+ ['deepseek-ai/deepseek-r1-distill-qwen-14b', 'R1 Distill 14B', 'A-'], // 14B R1 distill, estimated A-
61
+ ['igenius/colosseum_355b_instruct_16k', 'Colosseum 355B', 'A-'], // 355B MoE, estimated A-
62
+ // ── B+ tier — Aider polyglot 25–36% ────────────────────────────────────────
63
+ ['qwen/qwq-32b', 'QwQ 32B', 'B+'], // 20.9% Aider (format penalty — actually stronger)
64
+ ['openai/gpt-oss-20b', 'GPT OSS 20B', 'B+'], // smaller OSS variant, estimated B+
65
+ ['stockmark/stockmark-2-100b-instruct', 'Stockmark 100B', 'B+'], // JP-specialized 100B, estimated B+
66
+ ['bytedance/seed-oss-36b-instruct', 'Seed OSS 36B', 'B+'], // ByteDance 36B, estimated B+
67
+ ['stepfun-ai/step-3.5-flash', 'Step 3.5 Flash', 'B+'], // flash model, estimated B+
68
+ // ── B tier — Aider polyglot 14–25% ─────────────────────────────────────────
69
+ ['meta/llama-4-maverick-17b-128e-instruct', 'Llama 4 Maverick', 'B'], // 15.6% Aider polyglot (confirmed)
70
+ ['mistralai/mixtral-8x22b-instruct-v0.1', 'Mixtral 8x22B', 'B'], // older MoE, estimated B
71
+ ['mistralai/ministral-14b-instruct-2512', 'Ministral 14B', 'B'], // 14B, estimated B
72
+ ['ibm/granite-34b-code-instruct', 'Granite 34B Code', 'B'], // IBM code model, estimated B
73
+ ['deepseek-ai/deepseek-r1-distill-llama-8b', 'R1 Distill 8B', 'B'], // 8B R1 distill, estimated B
74
+ // ── C tier — Aider polyglot <14% or lightweight edge models ─────────────────
75
+ ['deepseek-ai/deepseek-r1-distill-qwen-7b', 'R1 Distill 7B', 'C'], // 7B, too small for complex coding
76
+ ['google/gemma-2-9b-it', 'Gemma 2 9B', 'C'], // 9B, lightweight
77
+ ['microsoft/phi-3.5-mini-instruct', 'Phi 3.5 Mini', 'C'], // mini, edge-focused
78
+ ['microsoft/phi-4-mini-instruct', 'Phi 4 Mini', 'C'], // mini, edge-focused
79
+ ]
80
+
81
+ // 📖 All sources combined - used by the main script
82
+ export const sources = {
83
+ nvidia: {
84
+ name: 'NVIDIA NIM',
85
+ models: nvidiaNim,
86
+ },
87
+ // 📖 Add more sources here in the future, for example:
88
+ // openai: {
89
+ // name: 'OpenAI',
90
+ // models: [...],
91
+ // },
92
+ // anthropic: {
93
+ // name: 'Anthropic',
94
+ // models: [...],
95
+ // },
96
+ }
97
+
98
+ // 📖 Flatten all models from all sources for backward compatibility
99
+ export const MODELS = []
100
+ for (const [sourceKey, sourceData] of Object.entries(sources)) {
101
+ for (const [modelId, label, tier] of sourceData.models) {
102
+ MODELS.push([modelId, label, tier])
103
+ }
104
+ }