free-coding-models 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,844 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * @file free-coding-models.js
4
+ * @description Live terminal availability checker for coding LLM models with OpenCode integration.
5
+ *
6
+ * @details
7
+ * This CLI tool discovers and benchmarks language models optimized for coding.
8
+ * It runs in an alternate screen buffer, pings all models in parallel, re-pings successful ones
9
+ * multiple times for reliable latency measurements, and prints a clean final table.
10
+ * During benchmarking, users can navigate with arrow keys and press Enter to launch OpenCode immediately.
11
+ *
12
+ * 🎯 Key features:
13
+ * - Parallel pings across all models with animated real-time updates
14
+ * - Continuous monitoring with 10-second ping intervals (never stops)
15
+ * - Rolling averages calculated from ALL successful pings since start
16
+ * - Best-per-tier highlighting with medals (🥇🥈🥉)
17
+ * - Interactive navigation with arrow keys directly in the table
18
+ * - Instant OpenCode launch on Enter key press (any model, even timeout/down)
19
+ * - Automatic OpenCode config detection and model setup
20
+ * - Persistent API key storage in ~/.free-coding-models
21
+ * - Multi-source support via sources.js (easily add new providers)
22
+ * - Uptime percentage tracking (successful pings / total pings)
23
+ * - Sortable columns (R/T/O/M/P/A/S/V/U keys)
24
+ *
25
+ * → Functions:
26
+ * - `loadApiKey` / `saveApiKey`: Manage persisted API key in ~/.free-coding-models
27
+ * - `promptApiKey`: Interactive wizard for first-time API key setup
28
+ * - `ping`: Perform HTTP request to NIM endpoint with timeout handling
29
+ * - `renderTable`: Generate ASCII table with colored latency indicators and status emojis
30
+ * - `getAvg`: Calculate average latency from all successful pings
31
+ * - `getVerdict`: Determine verdict string based on average latency (Overloaded for 429)
32
+ * - `getUptime`: Calculate uptime percentage from ping history
33
+ * - `sortResults`: Sort models by various columns
34
+ * - `checkNvidiaNimConfig`: Check if NVIDIA NIM provider is configured in OpenCode
35
+ * - `startOpenCode`: Launch OpenCode with selected model (configures if needed)
36
+ * - `main`: Orchestrates CLI flow, wizard, ping loops, animation, and output
37
+ *
38
+ * 📦 Dependencies:
39
+ * - Node.js 18+ (native fetch)
40
+ * - chalk: Terminal styling and colors
41
+ * - readline: Interactive input handling
42
+ * - sources.js: Model definitions from all providers
43
+ *
44
+ * ⚙️ Configuration:
45
+ * - API key stored in ~/.free-coding-models
46
+ * - Models loaded from sources.js (extensible for new providers)
47
+ * - OpenCode config: ~/.config/opencode/opencode.json
48
+ * - Ping timeout: 6s per attempt, max 2 retries (12s total)
49
+ * - Ping interval: 10 seconds (continuous monitoring mode)
50
+ * - Animation: 12 FPS with braille spinners
51
+ * - Reliability: Green → Yellow → Orange → Red → Black (degrades with instability)
52
+ *
53
+ * @see {@link https://build.nvidia.com} NVIDIA API key generation
54
+ * @see {@link https://github.com/opencode-ai/opencode} OpenCode repository
55
+ */
56
+
57
+ import chalk from 'chalk'
58
+ import { createRequire } from 'module'
59
+ import { readFileSync, writeFileSync, existsSync, copyFileSync, mkdirSync } from 'fs'
60
+ import { homedir } from 'os'
61
+ import { join } from 'path'
62
+ import { MODELS } from '../sources.js'
63
+
64
+ const require = createRequire(import.meta.url)
65
+ const readline = require('readline')
66
+
67
+ // ─── Config path ──────────────────────────────────────────────────────────────
68
+ const CONFIG_PATH = join(homedir(), '.free-coding-models')
69
+
70
+ function loadApiKey() {
71
+ try {
72
+ if (existsSync(CONFIG_PATH)) {
73
+ return readFileSync(CONFIG_PATH, 'utf8').trim()
74
+ }
75
+ } catch {}
76
+ return null
77
+ }
78
+
79
+ function saveApiKey(key) {
80
+ try {
81
+ writeFileSync(CONFIG_PATH, key, { mode: 0o600 })
82
+ } catch {}
83
+ }
84
+
85
+ // ─── First-run wizard ─────────────────────────────────────────────────────────
86
+ async function promptApiKey() {
87
+ console.log()
88
+ console.log(chalk.dim(' 🔑 Setup your NVIDIA API key'))
89
+ console.log(chalk.dim(' 📝 Get a free key at: ') + chalk.cyanBright('https://build.nvidia.com'))
90
+ console.log(chalk.dim(' 💾 Key will be saved to ~/.free-coding-models'))
91
+ console.log()
92
+
93
+ const rl = readline.createInterface({
94
+ input: process.stdin,
95
+ output: process.stdout,
96
+ })
97
+
98
+ return new Promise((resolve) => {
99
+ rl.question(chalk.bold(' Enter your API key: '), (answer) => {
100
+ rl.close()
101
+ const key = answer.trim()
102
+ if (key) {
103
+ saveApiKey(key)
104
+ console.log()
105
+ console.log(chalk.green(' ✅ API key saved to ~/.free-coding-models'))
106
+ console.log()
107
+ }
108
+ resolve(key || null)
109
+ })
110
+ })
111
+ }
112
+
113
+ // ─── Alternate screen control ─────────────────────────────────────────────────
114
+ // 📖 \x1b[?1049h = enter alt screen \x1b[?1049l = leave alt screen
115
+ // 📖 \x1b[?25l = hide cursor \x1b[?25h = show cursor
116
+ // 📖 \x1b[H = cursor to top \x1b[2J = clear screen
117
+ const ALT_ENTER = '\x1b[?1049h\x1b[?25l'
118
+ const ALT_LEAVE = '\x1b[?1049l\x1b[?25h'
119
+ const ALT_CLEAR = '\x1b[H\x1b[2J'
120
+
121
+ // ─── API Configuration ───────────────────────────────────────────────────────────
122
+ // 📖 Models are now loaded from sources.js to support multiple providers
123
+ // 📖 This allows easy addition of new model sources beyond NVIDIA NIM
124
+
125
+ const NIM_URL = 'https://integrate.api.nvidia.com/v1/chat/completions'
126
+ const PING_TIMEOUT = 15_000 // 📖 15s per attempt before abort - slow models get more time
127
+ const PING_INTERVAL = 2_000 // 📖 Ping all models every 2 seconds in continuous mode
128
+
129
+ const FPS = 12
130
+ const COL_MODEL = 22
131
+ // 📖 COL_MS = dashes in hline per ping column = visual width including 2 padding spaces
132
+ // 📖 Max value: 12001ms = 7 chars. padStart(COL_MS-2) fits content, +2 spaces = COL_MS dashes
133
+ // 📖 COL_MS 11 → content padded to 9 → handles up to "12001ms" (7 chars) with room
134
+ const COL_MS = 11
135
+
136
+ // ─── Styling ──────────────────────────────────────────────────────────────────
137
+ // 📖 Tier colors: green gradient (best) → yellow → orange → red (worst)
138
+ // 📖 Uses chalk.rgb() for fine-grained color control across 8 tier levels
139
+ const TIER_COLOR = {
140
+ 'S+': t => chalk.bold.rgb(0, 255, 80)(t), // 🟢 bright neon green — elite
141
+ 'S': t => chalk.bold.rgb(80, 220, 0)(t), // 🟢 green — excellent
142
+ 'A+': t => chalk.bold.rgb(170, 210, 0)(t), // 🟡 yellow-green — great
143
+ 'A': t => chalk.bold.rgb(240, 190, 0)(t), // 🟡 yellow — good
144
+ 'A-': t => chalk.bold.rgb(255, 130, 0)(t), // 🟠 amber — decent
145
+ 'B+': t => chalk.bold.rgb(255, 70, 0)(t), // 🟠 orange-red — average
146
+ 'B': t => chalk.bold.rgb(210, 20, 0)(t), // 🔴 red — below avg
147
+ 'C': t => chalk.bold.rgb(140, 0, 0)(t), // 🔴 dark red — lightweight
148
+ }
149
+
150
+ // 📖 COL_MS - 2 = visual content width (the 2 padding spaces are handled by │ x │ template)
151
+ const CELL_W = COL_MS - 2 // 9 chars of content per ms cell
152
+
153
+ const msCell = (ms) => {
154
+ if (ms === null) return chalk.dim('—'.padStart(CELL_W))
155
+ const str = String(ms).padStart(CELL_W)
156
+ if (ms === 'TIMEOUT') return chalk.red(str)
157
+ if (ms < 500) return chalk.greenBright(str)
158
+ if (ms < 1500) return chalk.yellow(str)
159
+ return chalk.red(str)
160
+ }
161
+
162
+ const FRAMES = ['⠋','⠙','⠹','⠸','⠼','⠴','⠦','⠧','⠇','⠏']
163
+ // 📖 Spinner cell: braille (1-wide) + padding to fill CELL_W visual chars
164
+ const spinCell = (f, o = 0) => chalk.dim.yellow(FRAMES[(f + o) % FRAMES.length].padEnd(CELL_W))
165
+
166
+ // ─── Table renderer ───────────────────────────────────────────────────────────
167
+
168
+ const TIER_ORDER = ['S+', 'S', 'A+', 'A', 'A-', 'B+', 'B', 'C']
169
+ const getAvg = r => {
170
+ // 📖 Calculate average only from successful pings (code 200)
171
+ // 📖 pings are objects: { ms, code }
172
+ const successfulPings = (r.pings || []).filter(p => p.code === '200')
173
+ if (successfulPings.length === 0) return Infinity
174
+ return Math.round(successfulPings.reduce((a, b) => a + b.ms, 0) / successfulPings.length)
175
+ }
176
+
177
+ // 📖 Verdict order for sorting
178
+ const VERDICT_ORDER = ['Perfect', 'Normal', 'Slow', 'Very Slow', 'Overloaded', 'Unstable', 'Not Active', 'Pending']
179
+
180
+ // 📖 Get verdict for a model result
181
+ const getVerdict = (r) => {
182
+ const avg = getAvg(r)
183
+ const wasUpBefore = r.pings.length > 0 && r.pings.some(p => p.code === '200')
184
+
185
+ // 📖 429 = rate limited = Overloaded
186
+ if (r.httpCode === '429') return 'Overloaded'
187
+ if ((r.status === 'timeout' || r.status === 'down') && wasUpBefore) return 'Unstable'
188
+ if (r.status === 'timeout' || r.status === 'down') return 'Not Active'
189
+ if (avg === Infinity) return 'Pending'
190
+ if (avg < 400) return 'Perfect'
191
+ if (avg < 1000) return 'Normal'
192
+ if (avg < 3000) return 'Slow'
193
+ if (avg < 5000) return 'Very Slow'
194
+ if (avg < 10000) return 'Unstable'
195
+ return 'Unstable'
196
+ }
197
+
198
+ // 📖 Calculate uptime percentage (successful pings / total pings)
199
+ // 📖 Only count code 200 responses
200
+ const getUptime = (r) => {
201
+ if (r.pings.length === 0) return 0
202
+ const successful = r.pings.filter(p => p.code === '200').length
203
+ return Math.round((successful / r.pings.length) * 100)
204
+ }
205
+
206
+ // 📖 Sort results using the same logic as renderTable - used for both display and selection
207
+ const sortResults = (results, sortColumn, sortDirection) => {
208
+ return [...results].sort((a, b) => {
209
+ let cmp = 0
210
+
211
+ switch (sortColumn) {
212
+ case 'rank':
213
+ cmp = a.idx - b.idx
214
+ break
215
+ case 'tier':
216
+ cmp = TIER_ORDER.indexOf(a.tier) - TIER_ORDER.indexOf(b.tier)
217
+ break
218
+ case 'origin':
219
+ cmp = 'NVIDIA NIM'.localeCompare('NVIDIA NIM') // All same for now
220
+ break
221
+ case 'model':
222
+ cmp = a.label.localeCompare(b.label)
223
+ break
224
+ case 'ping': {
225
+ const aLast = a.pings.length > 0 ? a.pings[a.pings.length - 1] : null
226
+ const bLast = b.pings.length > 0 ? b.pings[b.pings.length - 1] : null
227
+ const aPing = aLast?.code === '200' ? aLast.ms : Infinity
228
+ const bPing = bLast?.code === '200' ? bLast.ms : Infinity
229
+ cmp = aPing - bPing
230
+ break
231
+ }
232
+ case 'avg':
233
+ cmp = getAvg(a) - getAvg(b)
234
+ break
235
+ case 'status':
236
+ cmp = a.status.localeCompare(b.status)
237
+ break
238
+ case 'verdict': {
239
+ const aVerdict = getVerdict(a)
240
+ const bVerdict = getVerdict(b)
241
+ cmp = VERDICT_ORDER.indexOf(aVerdict) - VERDICT_ORDER.indexOf(bVerdict)
242
+ break
243
+ }
244
+ case 'uptime':
245
+ cmp = getUptime(a) - getUptime(b)
246
+ break
247
+ }
248
+
249
+ return sortDirection === 'asc' ? cmp : -cmp
250
+ })
251
+ }
252
+
253
+ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = 'avg', sortDirection = 'asc', pingInterval = PING_INTERVAL, lastPingTime = Date.now()) {
254
+ const up = results.filter(r => r.status === 'up').length
255
+ const down = results.filter(r => r.status === 'down').length
256
+ const timeout = results.filter(r => r.status === 'timeout').length
257
+ const pending = results.filter(r => r.status === 'pending').length
258
+
259
+ // 📖 Calculate seconds until next ping
260
+ const timeSinceLastPing = Date.now() - lastPingTime
261
+ const timeUntilNextPing = Math.max(0, pingInterval - timeSinceLastPing)
262
+ const secondsUntilNext = Math.ceil(timeUntilNextPing / 1000)
263
+
264
+ const phase = pending > 0
265
+ ? chalk.dim(`discovering — ${pending} remaining…`)
266
+ : pendingPings > 0
267
+ ? chalk.dim(`pinging — ${pendingPings} in flight…`)
268
+ : chalk.dim(`next ping ${secondsUntilNext}s`)
269
+
270
+ // 📖 Column widths (generous spacing with margins)
271
+ const W_RANK = 6
272
+ const W_TIER = 6
273
+ const W_SOURCE = 14
274
+ const W_MODEL = 26
275
+ const W_PING = 14
276
+ const W_AVG = 11
277
+ const W_STATUS = 18
278
+ const W_VERDICT = 14
279
+ const W_UPTIME = 6
280
+
281
+ // 📖 Sort models using the shared helper
282
+ const sorted = sortResults(results, sortColumn, sortDirection)
283
+
284
+ const lines = [
285
+ '',
286
+ ` ${chalk.bold('⚡ Free Coding Models')} ` +
287
+ chalk.greenBright(`✅ ${up}`) + chalk.dim(' up ') +
288
+ chalk.yellow(`⏱ ${timeout}`) + chalk.dim(' timeout ') +
289
+ chalk.red(`❌ ${down}`) + chalk.dim(' down ') +
290
+ phase,
291
+ '',
292
+ ]
293
+
294
+ // 📖 Header row with sorting indicators
295
+ // 📖 NOTE: padEnd on chalk strings counts ANSI codes, breaking alignment
296
+ // 📖 Solution: build plain text first, then colorize
297
+ const dir = sortDirection === 'asc' ? '↑' : '↓'
298
+
299
+ const rankH = 'Rank'
300
+ const tierH = 'Tier'
301
+ const originH = 'Origin'
302
+ const modelH = 'Model'
303
+ const pingH = sortColumn === 'ping' ? dir + ' Latest Ping' : 'Latest Ping'
304
+ const avgH = sortColumn === 'avg' ? dir + ' Avg Ping' : 'Avg Ping'
305
+ const statusH = sortColumn === 'status' ? dir + ' Status' : 'Status'
306
+ const verdictH = sortColumn === 'verdict' ? dir + ' Verdict' : 'Verdict'
307
+ const uptimeH = sortColumn === 'uptime' ? dir + ' Up%' : 'Up%'
308
+
309
+ // 📖 Now colorize after padding is calculated on plain text
310
+ const rankH_c = chalk.dim(rankH.padEnd(W_RANK))
311
+ const tierH_c = chalk.dim(tierH.padEnd(W_TIER))
312
+ const originH_c = sortColumn === 'origin' ? chalk.bold.cyan(originH.padEnd(W_SOURCE)) : chalk.dim(originH.padEnd(W_SOURCE))
313
+ const modelH_c = chalk.dim(modelH.padEnd(W_MODEL))
314
+ const pingH_c = sortColumn === 'ping' ? chalk.bold.cyan(pingH.padEnd(W_PING)) : chalk.dim(pingH.padEnd(W_PING))
315
+ const avgH_c = sortColumn === 'avg' ? chalk.bold.cyan(avgH.padEnd(W_AVG)) : chalk.dim(avgH.padEnd(W_AVG))
316
+ const statusH_c = sortColumn === 'status' ? chalk.bold.cyan(statusH.padEnd(W_STATUS)) : chalk.dim(statusH.padEnd(W_STATUS))
317
+ const verdictH_c = sortColumn === 'verdict' ? chalk.bold.cyan(verdictH.padEnd(W_VERDICT)) : chalk.dim(verdictH.padEnd(W_VERDICT))
318
+ const uptimeH_c = sortColumn === 'uptime' ? chalk.bold.cyan(uptimeH.padStart(W_UPTIME)) : chalk.dim(uptimeH.padStart(W_UPTIME))
319
+
320
+ // 📖 Header with proper spacing
321
+ lines.push(' ' + rankH_c + ' ' + tierH_c + ' ' + originH_c + ' ' + modelH_c + ' ' + pingH_c + ' ' + avgH_c + ' ' + statusH_c + ' ' + verdictH_c + ' ' + uptimeH_c)
322
+
323
+ // 📖 Separator line
324
+ lines.push(
325
+ ' ' +
326
+ chalk.dim('─'.repeat(W_RANK)) + ' ' +
327
+ chalk.dim('─'.repeat(W_TIER)) + ' ' +
328
+ '─'.repeat(W_SOURCE) + ' ' +
329
+ '─'.repeat(W_MODEL) + ' ' +
330
+ chalk.dim('─'.repeat(W_PING)) + ' ' +
331
+ chalk.dim('─'.repeat(W_AVG)) + ' ' +
332
+ chalk.dim('─'.repeat(W_STATUS)) + ' ' +
333
+ chalk.dim('─'.repeat(W_VERDICT)) + ' ' +
334
+ chalk.dim('─'.repeat(W_UPTIME))
335
+ )
336
+
337
+ for (let i = 0; i < sorted.length; i++) {
338
+ const r = sorted[i]
339
+ const tierFn = TIER_COLOR[r.tier] ?? (t => chalk.white(t))
340
+
341
+ const isCursor = cursor !== null && i === cursor
342
+
343
+ // 📖 Left-aligned columns - pad plain text first, then colorize
344
+ const num = chalk.dim(String(r.idx).padEnd(W_RANK))
345
+ const tier = tierFn(r.tier.padEnd(W_TIER))
346
+ const source = chalk.green('NVIDIA NIM'.padEnd(W_SOURCE))
347
+ const name = r.label.slice(0, W_MODEL).padEnd(W_MODEL)
348
+
349
+ // 📖 Latest ping - pings are objects: { ms, code }
350
+ // 📖 Only show response time for successful pings, "—" for errors (error code is in Status column)
351
+ const latestPing = r.pings.length > 0 ? r.pings[r.pings.length - 1] : null
352
+ let pingCell
353
+ if (!latestPing) {
354
+ pingCell = chalk.dim('—'.padEnd(W_PING))
355
+ } else if (latestPing.code === '200') {
356
+ // 📖 Success - show response time
357
+ const str = String(latestPing.ms).padEnd(W_PING)
358
+ pingCell = latestPing.ms < 500 ? chalk.greenBright(str) : latestPing.ms < 1500 ? chalk.yellow(str) : chalk.red(str)
359
+ } else {
360
+ // 📖 Error or timeout - show "—" (error code is already in Status column)
361
+ pingCell = chalk.dim('—'.padEnd(W_PING))
362
+ }
363
+
364
+ // 📖 Avg ping (just number, no "ms")
365
+ const avg = getAvg(r)
366
+ let avgCell
367
+ if (avg !== Infinity) {
368
+ const str = String(avg).padEnd(W_AVG)
369
+ avgCell = avg < 500 ? chalk.greenBright(str) : avg < 1500 ? chalk.yellow(str) : chalk.red(str)
370
+ } else {
371
+ avgCell = chalk.dim('—'.padEnd(W_AVG))
372
+ }
373
+
374
+ // 📖 Status column - build plain text with emoji, pad, then colorize
375
+ // 📖 Different emojis for different error codes
376
+ let statusText, statusColor
377
+ if (r.status === 'pending') {
378
+ statusText = `${FRAMES[frame % FRAMES.length]} wait`
379
+ statusColor = (s) => chalk.dim.yellow(s)
380
+ } else if (r.status === 'up') {
381
+ statusText = `✅ UP`
382
+ statusColor = (s) => s
383
+ } else if (r.status === 'timeout') {
384
+ statusText = `⏳ TIMEOUT`
385
+ statusColor = (s) => chalk.yellow(s)
386
+ } else if (r.status === 'down') {
387
+ const code = r.httpCode ?? 'ERR'
388
+ // 📖 Different emojis for different error codes
389
+ const errorEmojis = {
390
+ '429': '🔥', // Rate limited / overloaded
391
+ '404': '🚫', // Not found
392
+ '500': '💥', // Internal server error
393
+ '502': '🔌', // Bad gateway
394
+ '503': '🔒', // Service unavailable
395
+ '504': '⏰', // Gateway timeout
396
+ }
397
+ const emoji = errorEmojis[code] || '❌'
398
+ statusText = `${emoji} ${code}`
399
+ statusColor = (s) => chalk.red(s)
400
+ } else {
401
+ statusText = '?'
402
+ statusColor = (s) => chalk.dim(s)
403
+ }
404
+ const status = statusColor(statusText.padEnd(W_STATUS))
405
+
406
+ // 📖 Verdict column - build plain text with emoji, pad, then colorize
407
+ const wasUpBefore = r.pings.length > 0 && r.pings.some(p => p.code === '200')
408
+ let verdictText, verdictColor
409
+ if (r.httpCode === '429') {
410
+ verdictText = '🔥 Overloaded'
411
+ verdictColor = (s) => chalk.yellow.bold(s)
412
+ } else if ((r.status === 'timeout' || r.status === 'down') && wasUpBefore) {
413
+ verdictText = '⚠️ Unstable'
414
+ verdictColor = (s) => chalk.magenta(s)
415
+ } else if (r.status === 'timeout' || r.status === 'down') {
416
+ verdictText = '👻 Not Active'
417
+ verdictColor = (s) => chalk.dim(s)
418
+ } else if (avg === Infinity) {
419
+ verdictText = '⏳ Pending'
420
+ verdictColor = (s) => chalk.dim(s)
421
+ } else if (avg < 400) {
422
+ verdictText = '🚀 Perfect'
423
+ verdictColor = (s) => chalk.greenBright(s)
424
+ } else if (avg < 1000) {
425
+ verdictText = '✅ Normal'
426
+ verdictColor = (s) => chalk.cyan(s)
427
+ } else if (avg < 3000) {
428
+ verdictText = '🐢 Slow'
429
+ verdictColor = (s) => chalk.yellow(s)
430
+ } else if (avg < 5000) {
431
+ verdictText = '🐌 Very Slow'
432
+ verdictColor = (s) => chalk.red(s)
433
+ } else {
434
+ verdictText = '💀 Unusable'
435
+ verdictColor = (s) => chalk.red.bold(s)
436
+ }
437
+ const speedCell = verdictColor(verdictText.padEnd(W_VERDICT))
438
+
439
+ // 📖 Uptime column - percentage of successful pings
440
+ const uptimePercent = getUptime(r)
441
+ const uptimeStr = uptimePercent + '%'
442
+ let uptimeCell
443
+ if (uptimePercent >= 90) {
444
+ uptimeCell = chalk.greenBright(uptimeStr.padStart(W_UPTIME))
445
+ } else if (uptimePercent >= 70) {
446
+ uptimeCell = chalk.yellow(uptimeStr.padStart(W_UPTIME))
447
+ } else if (uptimePercent >= 50) {
448
+ uptimeCell = chalk.rgb(255, 165, 0)(uptimeStr.padStart(W_UPTIME)) // orange
449
+ } else {
450
+ uptimeCell = chalk.red(uptimeStr.padStart(W_UPTIME))
451
+ }
452
+
453
+ // 📖 Build row with double space between columns
454
+ const row = ' ' + num + ' ' + tier + ' ' + source + ' ' + name + ' ' + pingCell + ' ' + avgCell + ' ' + status + ' ' + speedCell + ' ' + uptimeCell
455
+
456
+ if (isCursor) {
457
+ lines.push(chalk.bgRgb(139, 0, 139)(row))
458
+ } else {
459
+ lines.push(row)
460
+ }
461
+ }
462
+
463
+ lines.push('')
464
+ const intervalSec = Math.round(pingInterval / 1000)
465
+ lines.push(chalk.dim(` ↑↓ Navigate • Enter Select • R/T/O/M/P/A/S/V/U Sort • W↓/X↑ Interval (${intervalSec}s) • Ctrl+C Exit`))
466
+ lines.push('')
467
+ return lines.join('\n')
468
+ }
469
+
470
+ // ─── HTTP ping ────────────────────────────────────────────────────────────────
471
+
472
+ async function ping(apiKey, modelId) {
473
+ const ctrl = new AbortController()
474
+ const timer = setTimeout(() => ctrl.abort(), PING_TIMEOUT)
475
+ const t0 = performance.now()
476
+ try {
477
+ const resp = await fetch(NIM_URL, {
478
+ method: 'POST', signal: ctrl.signal,
479
+ headers: { 'Authorization': `Bearer ${apiKey}`, 'Content-Type': 'application/json' },
480
+ body: JSON.stringify({ model: modelId, messages: [{ role: 'user', content: 'hi' }], max_tokens: 1 }),
481
+ })
482
+ return { code: String(resp.status), ms: Math.round(performance.now() - t0) }
483
+ } catch (err) {
484
+ const isTimeout = err.name === 'AbortError'
485
+ return {
486
+ code: isTimeout ? '000' : 'ERR',
487
+ ms: isTimeout ? 'TIMEOUT' : Math.round(performance.now() - t0)
488
+ }
489
+ } finally {
490
+ clearTimeout(timer)
491
+ }
492
+ }
493
+
494
+ // ─── OpenCode integration ──────────────────────────────────────────────────────
495
+ const OPENCODE_CONFIG = join(homedir(), '.config/opencode/opencode.json')
496
+
497
+ function loadOpenCodeConfig() {
498
+ if (!existsSync(OPENCODE_CONFIG)) return { provider: {} }
499
+ try {
500
+ return JSON.parse(readFileSync(OPENCODE_CONFIG, 'utf8'))
501
+ } catch {
502
+ return { provider: {} }
503
+ }
504
+ }
505
+
506
+ function saveOpenCodeConfig(config) {
507
+ const dir = join(homedir(), '.config/opencode')
508
+ if (!existsSync(dir)) {
509
+ mkdirSync(dir, { recursive: true })
510
+ }
511
+ writeFileSync(OPENCODE_CONFIG, JSON.stringify(config, null, 2))
512
+ }
513
+
514
+ // ─── Check NVIDIA NIM in OpenCode config ───────────────────────────────────────
515
+ // 📖 Checks if NVIDIA NIM provider is configured in OpenCode config file
516
+ // 📖 OpenCode uses 'provider' (singular) not 'providers' (plural)
517
+ // 📖 Returns true if found, false otherwise
518
+ function checkNvidiaNimConfig() {
519
+ const config = loadOpenCodeConfig()
520
+ if (!config.provider) return false
521
+ // 📖 Check for nvidia/nim provider by key name or display name (case-insensitive)
522
+ const providerKeys = Object.keys(config.provider)
523
+ return providerKeys.some(key =>
524
+ key === 'nvidia' || key === 'nim' ||
525
+ config.provider[key]?.name?.toLowerCase().includes('nvidia') ||
526
+ config.provider[key]?.name?.toLowerCase().includes('nim')
527
+ )
528
+ }
529
+
530
+ // ─── Start OpenCode ────────────────────────────────────────────────────────────
531
+ // 📖 Launches OpenCode with the selected NVIDIA NIM model
532
+ // 📖 If NVIDIA NIM is configured, use --model flag, otherwise show install prompt
533
+ // 📖 Model format: { modelId, label, tier }
534
+ async function startOpenCode(model) {
535
+ const hasNim = checkNvidiaNimConfig()
536
+
537
+ if (hasNim) {
538
+ // 📖 NVIDIA NIM already configured - launch with model flag
539
+ console.log(chalk.green(` 🚀 Setting ${chalk.bold(model.label)} as default…`))
540
+ console.log(chalk.dim(` Model: nvidia/${model.modelId}`))
541
+ console.log()
542
+
543
+ const config = loadOpenCodeConfig()
544
+ const backupPath = `${OPENCODE_CONFIG}.backup-${Date.now()}`
545
+
546
+ // 📖 Backup current config
547
+ if (existsSync(OPENCODE_CONFIG)) {
548
+ copyFileSync(OPENCODE_CONFIG, backupPath)
549
+ console.log(chalk.dim(` 💾 Backup: ${backupPath}`))
550
+ }
551
+
552
+ // 📖 Update default model to nvidia/model_id
553
+ config.model = `nvidia/${model.modelId}`
554
+ saveOpenCodeConfig(config)
555
+
556
+ console.log(chalk.green(` ✓ Default model set to: nvidia/${model.modelId}`))
557
+ console.log()
558
+ console.log(chalk.dim(' Starting OpenCode…'))
559
+ console.log()
560
+
561
+ // 📖 Launch OpenCode and wait for it
562
+ const { spawn } = await import('child_process')
563
+ const child = spawn('opencode', [], {
564
+ stdio: 'inherit',
565
+ shell: false
566
+ })
567
+
568
+ // 📖 Wait for OpenCode to exit
569
+ await new Promise((resolve, reject) => {
570
+ child.on('exit', resolve)
571
+ child.on('error', reject)
572
+ })
573
+ } else {
574
+ // 📖 NVIDIA NIM not configured - show install prompt and launch
575
+ console.log(chalk.yellow(' ⚠ NVIDIA NIM not configured in OpenCode'))
576
+ console.log()
577
+ console.log(chalk.dim(' Starting OpenCode with installation prompt…'))
578
+ console.log()
579
+
580
+ const installPrompt = `Please install NVIDIA NIM provider in OpenCode by adding this to ~/.config/opencode/opencode.json:
581
+
582
+ {
583
+ "provider": {
584
+ "nvidia": {
585
+ "npm": "@ai-sdk/openai-compatible",
586
+ "name": "NVIDIA NIM",
587
+ "options": {
588
+ "baseURL": "https://integrate.api.nvidia.com/v1",
589
+ "apiKey": "{env:NVIDIA_API_KEY}"
590
+ }
591
+ }
592
+ }
593
+ }
594
+
595
+ Then set env var: export NVIDIA_API_KEY=your_key_here
596
+
597
+ After installation, you can use: opencode --model nvidia/${model.modelId}`
598
+
599
+ console.log(chalk.cyan(installPrompt))
600
+ console.log()
601
+ console.log(chalk.dim(' Starting OpenCode…'))
602
+ console.log()
603
+
604
+ const { spawn } = await import('child_process')
605
+ const child = spawn('opencode', [], {
606
+ stdio: 'inherit',
607
+ shell: false
608
+ })
609
+
610
+ // 📖 Wait for OpenCode to exit
611
+ await new Promise((resolve, reject) => {
612
+ child.on('exit', resolve)
613
+ child.on('error', reject)
614
+ })
615
+ }
616
+ }
617
+
618
+ // ─── Main ─────────────────────────────────────────────────────────────────────
619
+
620
+ async function main() {
621
+ // 📖 Priority: CLI arg > env var > saved config > wizard
622
+ let apiKey = process.argv[2] || process.env.NVIDIA_API_KEY || loadApiKey()
623
+
624
+ // 📖 Check for BEST flag - only show top tiers (A+, S, S+)
625
+ const bestMode = process.argv.includes('--BEST') || process.argv.includes('--best')
626
+
627
+ if (!apiKey) {
628
+ apiKey = await promptApiKey()
629
+ if (!apiKey) {
630
+ console.log()
631
+ console.log(chalk.red(' ✖ No API key provided.'))
632
+ console.log(chalk.dim(' Run `free-coding-models` again or set NVIDIA_API_KEY env var.'))
633
+ console.log()
634
+ process.exit(1)
635
+ }
636
+ }
637
+
638
+ // 📖 Filter models to only show top tiers if BEST mode is active
639
+ let results = MODELS.map(([modelId, label, tier], i) => ({
640
+ idx: i + 1, modelId, label, tier,
641
+ status: 'pending',
642
+ pings: [], // 📖 All ping results (ms or 'TIMEOUT')
643
+ httpCode: null,
644
+ }))
645
+
646
+ if (bestMode) {
647
+ results = results.filter(r => r.tier === 'S+' || r.tier === 'S' || r.tier === 'A+')
648
+ }
649
+
650
+ // 📖 Add interactive selection state - cursor index and user's choice
651
+ // 📖 sortColumn: 'rank'|'tier'|'origin'|'model'|'ping'|'avg'|'status'|'verdict'|'uptime'
652
+ // 📖 sortDirection: 'asc' (default) or 'desc'
653
+ // 📖 pingInterval: current interval in ms (default 5000, adjustable with W/X keys)
654
+ const state = {
655
+ results,
656
+ pendingPings: 0,
657
+ frame: 0,
658
+ cursor: 0,
659
+ selectedModel: null,
660
+ sortColumn: 'avg',
661
+ sortDirection: 'asc',
662
+ pingInterval: PING_INTERVAL, // 📖 Track current interval for C/V keys
663
+ lastPingTime: Date.now() // 📖 Track when last ping cycle started
664
+ }
665
+
666
+ // 📖 Enter alternate screen — animation runs here, zero scrollback pollution
667
+ process.stdout.write(ALT_ENTER)
668
+
669
+ // 📖 Ensure we always leave alt screen cleanly (Ctrl+C, crash, normal exit)
670
+ const exit = (code = 0) => {
671
+ clearInterval(ticker)
672
+ clearTimeout(state.pingIntervalObj)
673
+ process.stdout.write(ALT_LEAVE)
674
+ process.exit(code)
675
+ }
676
+ process.on('SIGINT', () => exit(0))
677
+ process.on('SIGTERM', () => exit(0))
678
+
679
+ // 📖 Setup keyboard input for interactive selection during pings
680
+ // 📖 Use readline with keypress event for arrow key handling
681
+ process.stdin.setEncoding('utf8')
682
+ process.stdin.resume()
683
+
684
+ let userSelected = null
685
+
686
+ const onKeyPress = async (str, key) => {
687
+ if (!key) return
688
+
689
+ // 📖 Sorting keys: R=rank, T=tier, O=origin, M=model, P=ping, A=avg, S=status, V=verdict, L=reliability
690
+ const sortKeys = {
691
+ 'r': 'rank', 't': 'tier', 'o': 'origin', 'm': 'model',
692
+ 'p': 'ping', 'a': 'avg', 's': 'status', 'v': 'verdict', 'u': 'uptime'
693
+ }
694
+
695
+ if (sortKeys[key.name]) {
696
+ const col = sortKeys[key.name]
697
+ // 📖 Toggle direction if same column, otherwise reset to asc
698
+ if (state.sortColumn === col) {
699
+ state.sortDirection = state.sortDirection === 'asc' ? 'desc' : 'asc'
700
+ } else {
701
+ state.sortColumn = col
702
+ state.sortDirection = 'asc'
703
+ }
704
+ return
705
+ }
706
+
707
+ // 📖 Interval adjustment keys: W=decrease (faster), X=increase (slower)
708
+ // 📖 Minimum 1s, maximum 60s
709
+ if (key.name === 'w') {
710
+ state.pingInterval = Math.max(1000, state.pingInterval - 1000)
711
+ return
712
+ }
713
+
714
+ if (key.name === 'x') {
715
+ state.pingInterval = Math.min(60000, state.pingInterval + 1000)
716
+ return
717
+ }
718
+
719
+ if (key.name === 'up') {
720
+ if (state.cursor > 0) {
721
+ state.cursor--
722
+ }
723
+ return
724
+ }
725
+
726
+ if (key.name === 'down') {
727
+ if (state.cursor < results.length - 1) {
728
+ state.cursor++
729
+ }
730
+ return
731
+ }
732
+
733
+ if (key.name === 'c' && key.ctrl) { // Ctrl+C
734
+ exit(0)
735
+ return
736
+ }
737
+
738
+ if (key.name === 'return') { // Enter
739
+ // 📖 Use the same sorting as the table display
740
+ const sorted = sortResults(results, state.sortColumn, state.sortDirection)
741
+ const selected = sorted[state.cursor]
742
+ // 📖 Allow selecting ANY model (even timeout/down) - user knows what they're doing
743
+ if (true) {
744
+ userSelected = { modelId: selected.modelId, label: selected.label, tier: selected.tier }
745
+ // 📖 Stop everything and launch OpenCode immediately
746
+ clearInterval(ticker)
747
+ clearTimeout(state.pingIntervalObj)
748
+ readline.emitKeypressEvents(process.stdin)
749
+ process.stdin.setRawMode(true)
750
+ process.stdin.pause()
751
+ process.stdin.removeListener('keypress', onKeyPress)
752
+ process.stdout.write(ALT_LEAVE)
753
+
754
+ // 📖 Show selection with status
755
+ if (selected.status === 'timeout') {
756
+ console.log(chalk.yellow(` ⚠ Selected: ${selected.label} (currently timing out)`))
757
+ } else if (selected.status === 'down') {
758
+ console.log(chalk.red(` ⚠ Selected: ${selected.label} (currently down)`))
759
+ } else {
760
+ console.log(chalk.cyan(` ✓ Selected: ${selected.label}`))
761
+ }
762
+ console.log()
763
+
764
+ // 📖 Wait for OpenCode to finish before exiting
765
+ await startOpenCode(userSelected)
766
+ process.exit(0)
767
+ }
768
+ }
769
+ }
770
+
771
+ // 📖 Enable keypress events on stdin
772
+ readline.emitKeypressEvents(process.stdin)
773
+ if (process.stdin.isTTY) {
774
+ process.stdin.setRawMode(true)
775
+ }
776
+
777
+ process.stdin.on('keypress', onKeyPress)
778
+
779
+ // 📖 Animation loop: clear alt screen + redraw table at FPS with cursor
780
+ const ticker = setInterval(() => {
781
+ state.frame++
782
+ process.stdout.write(ALT_CLEAR + renderTable(state.results, state.pendingPings, state.frame, state.cursor, state.sortColumn, state.sortDirection, state.pingInterval, state.lastPingTime))
783
+ }, Math.round(1000 / FPS))
784
+
785
+ process.stdout.write(ALT_CLEAR + renderTable(state.results, state.pendingPings, state.frame, state.cursor, state.sortColumn, state.sortDirection, state.pingInterval, state.lastPingTime))
786
+
787
+ // ── Continuous ping loop — ping all models every 10 seconds forever ──────────
788
+
789
+ // 📖 Single ping function that updates result
790
+ const pingModel = async (r) => {
791
+ const { code, ms } = await ping(apiKey, r.modelId)
792
+
793
+ // 📖 Store ping result as object with ms and code
794
+ // 📖 ms = actual response time (even for errors like 429)
795
+ // 📖 code = HTTP status code ('200', '429', '500', '000' for timeout)
796
+ r.pings.push({ ms, code })
797
+
798
+ // 📖 Update status based on latest ping
799
+ if (code === '200') {
800
+ r.status = 'up'
801
+ } else if (code === '000') {
802
+ r.status = 'timeout'
803
+ } else {
804
+ r.status = 'down'
805
+ r.httpCode = code
806
+ }
807
+ }
808
+
809
+ // 📖 Initial ping of all models
810
+ const initialPing = Promise.all(results.map(r => pingModel(r)))
811
+
812
+ // 📖 Continuous ping loop with dynamic interval (adjustable with W/X keys)
813
+ const schedulePing = () => {
814
+ state.pingIntervalObj = setTimeout(async () => {
815
+ state.lastPingTime = Date.now()
816
+
817
+ results.forEach(r => {
818
+ pingModel(r).catch(() => {
819
+ // Individual ping failures don't crash the loop
820
+ })
821
+ })
822
+
823
+ // 📖 Schedule next ping with current interval
824
+ schedulePing()
825
+ }, state.pingInterval)
826
+ }
827
+
828
+ // 📖 Start the ping loop
829
+ state.pingIntervalObj = null
830
+ schedulePing()
831
+
832
+ await initialPing
833
+
834
+ // 📖 Keep interface running forever - user can select anytime or Ctrl+C to exit
835
+ // 📖 The pings continue running in background with dynamic interval
836
+ // 📖 User can press W to decrease interval (faster pings) or X to increase (slower)
837
+ // 📖 Current interval shown in header: "next ping Xs"
838
+ }
839
+
840
+ main().catch((err) => {
841
+ process.stdout.write(ALT_LEAVE)
842
+ console.error(err)
843
+ process.exit(1)
844
+ })