free-coding-models 0.1.12 → 0.1.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/lib/utils.js +279 -0
  2. package/package.json +2 -1
package/lib/utils.js ADDED
@@ -0,0 +1,279 @@
1
+ /**
2
+ * @file lib/utils.js
3
+ * @description Pure utility functions extracted from the main CLI for testability.
4
+ *
5
+ * 📖 This file was created to separate the "brain" of the app from the "body" (TUI, I/O, chalk).
6
+ * Every function here is a pure function — no side effects, no process.exit, no console output.
7
+ * This makes them trivial to unit test with `node:test` without mocking anything.
8
+ *
9
+ * 📖 The main CLI (bin/free-coding-models.js) imports everything from here.
10
+ * If you need to add new logic (calculations, data transforms, parsing),
11
+ * add it here so tests can cover it.
12
+ *
13
+ * 📖 Data flow:
14
+ * sources.js → MODELS array → main CLI creates result objects → these utils process them
15
+ *
16
+ * 📖 Result object shape (created by the main CLI, consumed by these functions):
17
+ * {
18
+ * idx: number, // 1-based index for display
19
+ * modelId: string, // e.g. "deepseek-ai/deepseek-v3.2"
20
+ * label: string, // e.g. "DeepSeek V3.2" (human-friendly name)
21
+ * tier: string, // e.g. "S+", "A", "B+" — from sources.js
22
+ * status: string, // "pending" | "up" | "down" | "timeout"
23
+ * pings: Array<{ms: number, code: string}>, // full ping history since start
24
+ * httpCode: string|null // last HTTP status code (for detecting 429 rate limits)
25
+ * }
26
+ *
27
+ * @functions
28
+ * → getAvg(result) — Calculate average latency from successful pings only
29
+ * → getVerdict(result) — Determine model health verdict based on avg latency and status
30
+ * → getUptime(result) — Calculate uptime percentage (successful / total pings)
31
+ * → sortResults(results, sortColumn, sortDirection) — Sort model results by any column
32
+ * → filterByTier(results, tierLetter) — Filter results by tier letter (S/A/B/C)
33
+ * → findBestModel(results) — Pick the best model by status → avg → uptime priority
34
+ * → parseArgs(argv) — Parse CLI arguments into structured flags and values
35
+ *
36
+ * @exports getAvg, getVerdict, getUptime, sortResults, filterByTier, findBestModel, parseArgs
37
+ * @exports TIER_ORDER, VERDICT_ORDER, TIER_LETTER_MAP
38
+ *
39
+ * @see bin/free-coding-models.js — main CLI that imports these utils
40
+ * @see sources.js — model definitions consumed by these functions
41
+ * @see test/test.js — unit tests that validate all these functions
42
+ */
43
+
44
+ // ─── Constants ────────────────────────────────────────────────────────────────
45
+
46
+ // 📖 Tier sort order — defines the hierarchy from best to worst.
47
+ // 📖 Used by sortResults to compare tiers numerically via indexOf.
48
+ // 📖 S+ (elite frontier coders) is index 0, C (lightweight edge) is index 7.
49
+ // 📖 This must stay in sync with the tiers defined in sources.js.
50
+ export const TIER_ORDER = ['S+', 'S', 'A+', 'A', 'A-', 'B+', 'B', 'C']
51
+
52
+ // 📖 Verdict strings in order from healthiest to unhealthiest.
53
+ // 📖 Used by sortResults when sorting by the "verdict" column.
54
+ // 📖 "Perfect" means < 400ms avg, "Pending" means no data yet.
55
+ // 📖 The order matters — it determines sort rank in the TUI table.
56
+ export const VERDICT_ORDER = ['Perfect', 'Normal', 'Slow', 'Very Slow', 'Overloaded', 'Unstable', 'Not Active', 'Pending']
57
+
58
+ // 📖 Maps a CLI tier letter (--tier S/A/B/C) to the full tier strings it includes.
59
+ // 📖 Example: --tier A matches A+, A, and A- models (all "A-family" tiers).
60
+ // 📖 This avoids users needing to know the exact sub-tier names.
61
+ // 📖 Used by filterByTier() and the --tier CLI flag.
62
+ export const TIER_LETTER_MAP = {
63
+ 'S': ['S+', 'S'], // 📖 Frontier coders — top Aider polyglot scores
64
+ 'A': ['A+', 'A', 'A-'], // 📖 Excellent alternatives — strong at most coding tasks
65
+ 'B': ['B+', 'B'], // 📖 Solid performers — good for targeted programming
66
+ 'C': ['C'], // 📖 Lightweight/edge models — code completion on constrained infra
67
+ }
68
+
69
+ // ─── Core Logic Functions ────────────────────────────────────────────────────
70
+
71
+ // 📖 getAvg: Calculate average latency from ONLY successful pings (HTTP 200).
72
+ // 📖 Failed pings (timeouts, 429s, 500s) are excluded to avoid skewing the average.
73
+ // 📖 Returns Infinity when no successful pings exist — this sorts "unknown" models to the bottom.
74
+ // 📖 The rounding to integer avoids displaying fractional milliseconds in the TUI.
75
+ //
76
+ // 📖 Example:
77
+ // pings = [{ms: 200, code: '200'}, {ms: 0, code: '429'}, {ms: 400, code: '200'}]
78
+ // → getAvg returns 300 (only the two 200s count: (200+400)/2)
79
+ export const getAvg = (r) => {
80
+ const successfulPings = (r.pings || []).filter(p => p.code === '200')
81
+ if (successfulPings.length === 0) return Infinity
82
+ return Math.round(successfulPings.reduce((a, b) => a + b.ms, 0) / successfulPings.length)
83
+ }
84
+
85
+ // 📖 getVerdict: Determine a human-readable health verdict for a model.
86
+ // 📖 This is the "Status" column label shown in the TUI table.
87
+ //
88
+ // 📖 Decision priority (first match wins):
89
+ // 1. HTTP 429 → "Overloaded" (rate limited by NVIDIA, not a latency issue)
90
+ // 2. Timeout/down BUT was previously up → "Unstable" (it worked before, now it doesn't)
91
+ // 3. Timeout/down and never worked → "Not Active" (model might be offline)
92
+ // 4. No successful pings yet → "Pending" (still waiting for first response)
93
+ // 5. Avg < 400ms → "Perfect"
94
+ // 6. Avg < 1000ms → "Normal"
95
+ // 7. Avg < 3000ms → "Slow"
96
+ // 8. Avg < 5000ms → "Very Slow"
97
+ // 9. Avg >= 5000ms → "Unstable"
98
+ //
99
+ // 📖 The "wasUpBefore" check is key — it distinguishes between a model that's
100
+ // temporarily flaky vs one that was never reachable in the first place.
101
+ export const getVerdict = (r) => {
102
+ const avg = getAvg(r)
103
+ const wasUpBefore = r.pings.length > 0 && r.pings.some(p => p.code === '200')
104
+
105
+ if (r.httpCode === '429') return 'Overloaded'
106
+ if ((r.status === 'timeout' || r.status === 'down') && wasUpBefore) return 'Unstable'
107
+ if (r.status === 'timeout' || r.status === 'down') return 'Not Active'
108
+ if (avg === Infinity) return 'Pending'
109
+ if (avg < 400) return 'Perfect'
110
+ if (avg < 1000) return 'Normal'
111
+ if (avg < 3000) return 'Slow'
112
+ if (avg < 5000) return 'Very Slow'
113
+ if (avg < 10000) return 'Unstable'
114
+ return 'Unstable'
115
+ }
116
+
117
+ // 📖 getUptime: Calculate the percentage of successful pings (code 200) over total pings.
118
+ // 📖 Returns 0 when no pings have been made yet (avoids division by zero).
119
+ // 📖 Displayed as "Up%" column in the TUI — e.g., "85%" means 85% of pings got HTTP 200.
120
+ // 📖 This metric is useful for identifying models that are technically "up" but flaky.
121
+ export const getUptime = (r) => {
122
+ if (r.pings.length === 0) return 0
123
+ const successful = r.pings.filter(p => p.code === '200').length
124
+ return Math.round((successful / r.pings.length) * 100)
125
+ }
126
+
127
+ // 📖 sortResults: Sort the results array by any column the user can click/press in the TUI.
128
+ // 📖 Returns a NEW array — never mutates the original (important for React-style re-renders).
129
+ //
130
+ // 📖 Supported columns (matching the keyboard shortcuts in the TUI):
131
+ // - 'rank' (R key) — original index from sources.js
132
+ // - 'tier' (T key) — tier hierarchy (S+ first, C last)
133
+ // - 'origin' (O key) — provider name (all NVIDIA NIM for now, future-proofed)
134
+ // - 'model' (M key) — alphabetical by display label
135
+ // - 'ping' (P key) — last ping latency (only successful ones count)
136
+ // - 'avg' (A key) — average latency across all successful pings
137
+ // - 'status' (S key) — alphabetical status string
138
+ // - 'verdict' (V key) — verdict order (Perfect → Pending)
139
+ // - 'uptime' (U key) — uptime percentage
140
+ //
141
+ // 📖 sortDirection 'asc' = ascending (smallest first), 'desc' = descending (largest first)
142
+ export const sortResults = (results, sortColumn, sortDirection) => {
143
+ return [...results].sort((a, b) => {
144
+ let cmp = 0
145
+
146
+ switch (sortColumn) {
147
+ case 'rank':
148
+ cmp = a.idx - b.idx
149
+ break
150
+ case 'tier':
151
+ // 📖 Compare by position in TIER_ORDER — lower index = better tier
152
+ cmp = TIER_ORDER.indexOf(a.tier) - TIER_ORDER.indexOf(b.tier)
153
+ break
154
+ case 'origin':
155
+ // 📖 All models are NVIDIA NIM for now — this is future-proofed for multi-source
156
+ cmp = 'NVIDIA NIM'.localeCompare('NVIDIA NIM')
157
+ break
158
+ case 'model':
159
+ cmp = a.label.localeCompare(b.label)
160
+ break
161
+ case 'ping': {
162
+ // 📖 Sort by LAST ping only — gives a real-time "right now" snapshot
163
+ // 📖 Failed last pings sort to the bottom (Infinity)
164
+ const aLast = a.pings.length > 0 ? a.pings[a.pings.length - 1] : null
165
+ const bLast = b.pings.length > 0 ? b.pings[b.pings.length - 1] : null
166
+ const aPing = aLast?.code === '200' ? aLast.ms : Infinity
167
+ const bPing = bLast?.code === '200' ? bLast.ms : Infinity
168
+ cmp = aPing - bPing
169
+ break
170
+ }
171
+ case 'avg':
172
+ cmp = getAvg(a) - getAvg(b)
173
+ break
174
+ case 'status':
175
+ cmp = a.status.localeCompare(b.status)
176
+ break
177
+ case 'verdict': {
178
+ // 📖 Sort by verdict order — "Perfect" first, "Pending" last
179
+ const aVerdict = getVerdict(a)
180
+ const bVerdict = getVerdict(b)
181
+ cmp = VERDICT_ORDER.indexOf(aVerdict) - VERDICT_ORDER.indexOf(bVerdict)
182
+ break
183
+ }
184
+ case 'uptime':
185
+ cmp = getUptime(a) - getUptime(b)
186
+ break
187
+ }
188
+
189
+ // 📖 Flip comparison for descending order
190
+ return sortDirection === 'asc' ? cmp : -cmp
191
+ })
192
+ }
193
+
194
+ // 📖 filterByTier: Filter model results by a single tier letter.
195
+ // 📖 Uses TIER_LETTER_MAP to expand the letter into matching tier strings.
196
+ // 📖 Returns null if the tier letter is invalid — the caller decides how to handle
197
+ // (the main CLI exits with an error message, tests can assert null).
198
+ //
199
+ // 📖 Example: filterByTier(results, 'A') → returns only models with tier A+, A, or A-
200
+ export function filterByTier(results, tierLetter) {
201
+ const letter = tierLetter.toUpperCase()
202
+ const allowed = TIER_LETTER_MAP[letter]
203
+ if (!allowed) return null
204
+ return results.filter(r => allowed.includes(r.tier))
205
+ }
206
+
207
+ // 📖 findBestModel: Pick the single best model from a results array.
208
+ // 📖 Used by --fiable mode to output the most reliable model after 10s of analysis.
209
+ //
210
+ // 📖 Selection priority (tri-key sort):
211
+ // 1. Status: "up" models always beat non-up models
212
+ // 2. Average latency: faster average wins (lower is better)
213
+ // 3. Uptime %: higher uptime wins as tiebreaker
214
+ //
215
+ // 📖 Returns null if the array is empty.
216
+ export function findBestModel(results) {
217
+ const sorted = [...results].sort((a, b) => {
218
+ const avgA = getAvg(a)
219
+ const avgB = getAvg(b)
220
+ const uptimeA = getUptime(a)
221
+ const uptimeB = getUptime(b)
222
+
223
+ // 📖 Priority 1: Models that are currently responding beat those that aren't
224
+ if (a.status === 'up' && b.status !== 'up') return -1
225
+ if (a.status !== 'up' && b.status === 'up') return 1
226
+
227
+ // 📖 Priority 2: Lower average latency = faster = better
228
+ if (avgA !== avgB) return avgA - avgB
229
+
230
+ // 📖 Priority 3: Higher uptime = more reliable = better (tiebreaker)
231
+ return uptimeB - uptimeA
232
+ })
233
+
234
+ return sorted.length > 0 ? sorted[0] : null
235
+ }
236
+
237
+ // ─── CLI Argument Parsing ────────────────────────────────────────────────────
238
+
239
+ // 📖 parseArgs: Parse process.argv into a structured object of flags and values.
240
+ // 📖 Expects the full argv array (including 'node' and 'script' at indices 0-1).
241
+ // 📖 Slices from index 2 to get user-provided arguments only.
242
+ //
243
+ // 📖 Argument types:
244
+ // - API key: first positional arg that doesn't start with "--" (e.g., "nvapi-xxx")
245
+ // - Boolean flags: --best, --fiable, --opencode, --openclaw (case-insensitive)
246
+ // - Value flag: --tier <letter> (the next non-flag arg is the tier value)
247
+ //
248
+ // 📖 Returns:
249
+ // { apiKey, bestMode, fiableMode, openCodeMode, openClawMode, tierFilter }
250
+ //
251
+ // 📖 Note: apiKey may be null here — the main CLI falls back to env vars and saved config.
252
+ export function parseArgs(argv) {
253
+ const args = argv.slice(2)
254
+ let apiKey = null
255
+ const flags = []
256
+
257
+ for (const arg of args) {
258
+ if (arg.startsWith('--')) {
259
+ flags.push(arg.toLowerCase())
260
+ } else if (!apiKey) {
261
+ apiKey = arg
262
+ }
263
+ }
264
+
265
+ const bestMode = flags.includes('--best')
266
+ const fiableMode = flags.includes('--fiable')
267
+ const openCodeMode = flags.includes('--opencode')
268
+ const openClawMode = flags.includes('--openclaw')
269
+
270
+ // 📖 --tier requires a value after it (e.g., --tier S)
271
+ // 📖 If the next arg is another flag (--), treat it as missing value → tierFilter stays null
272
+ let tierFilter = null
273
+ const tierIdx = args.findIndex(a => a.toLowerCase() === '--tier')
274
+ if (tierIdx !== -1 && args[tierIdx + 1] && !args[tierIdx + 1].startsWith('--')) {
275
+ tierFilter = args[tierIdx + 1].toUpperCase()
276
+ }
277
+
278
+ return { apiKey, bestMode, fiableMode, openCodeMode, openClawMode, tierFilter }
279
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "free-coding-models",
3
- "version": "0.1.12",
3
+ "version": "0.1.14",
4
4
  "description": "Find the fastest coding LLM models in seconds — ping free models from multiple providers, pick the best one for OpenCode, Cursor, or any AI coding assistant.",
5
5
  "keywords": [
6
6
  "nvidia",
@@ -40,6 +40,7 @@
40
40
  },
41
41
  "files": [
42
42
  "bin/",
43
+ "lib/",
43
44
  "sources.js",
44
45
  "patch-openclaw.js",
45
46
  "patch-openclaw-models.js",