free-coding-models 0.1.39 → 0.1.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -421,7 +421,7 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
421
421
  // 📖 Column widths (generous spacing with margins)
422
422
  const W_RANK = 6
423
423
  const W_TIER = 6
424
- const W_CTW = 6
424
+ const W_CTX = 6
425
425
  const W_SOURCE = 14
426
426
  const W_MODEL = 26
427
427
  const W_SWE = 9
@@ -454,7 +454,7 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
454
454
  const originH = 'Origin'
455
455
  const modelH = 'Model'
456
456
  const sweH = sortColumn === 'swe' ? dir + ' SWE%' : 'SWE%'
457
- const ctwH = sortColumn === 'ctw' ? dir + ' CTW' : 'CTW'
457
+ const ctxH = sortColumn === 'ctx' ? dir + ' CTX' : 'CTX'
458
458
  const pingH = sortColumn === 'ping' ? dir + ' Latest Ping' : 'Latest Ping'
459
459
  const avgH = sortColumn === 'avg' ? dir + ' Avg Ping' : 'Avg Ping'
460
460
  const healthH = sortColumn === 'condition' ? dir + ' Health' : 'Health'
@@ -477,15 +477,15 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
477
477
  const originH_c = sortColumn === 'origin' ? chalk.bold.cyan(originH.padEnd(W_SOURCE)) : colorFirst(originH, W_SOURCE)
478
478
  const modelH_c = colorFirst(modelH, W_MODEL)
479
479
  const sweH_c = sortColumn === 'swe' ? chalk.bold.cyan(sweH.padEnd(W_SWE)) : colorFirst(sweH, W_SWE)
480
- const ctwH_c = sortColumn === 'ctw' ? chalk.bold.cyan(ctwH.padEnd(W_CTW)) : colorFirst(ctwH, W_CTW)
480
+ const ctxH_c = sortColumn === 'ctx' ? chalk.bold.cyan(ctxH.padEnd(W_CTX)) : colorFirst(ctxH, W_CTX)
481
481
  const pingH_c = sortColumn === 'ping' ? chalk.bold.cyan(pingH.padEnd(W_PING)) : colorFirst('Latest Ping', W_PING)
482
482
  const avgH_c = sortColumn === 'avg' ? chalk.bold.cyan(avgH.padEnd(W_AVG)) : colorFirst('Avg Ping', W_AVG)
483
483
  const healthH_c = sortColumn === 'condition' ? chalk.bold.cyan(healthH.padEnd(W_STATUS)) : colorFirst('Health', W_STATUS)
484
484
  const verdictH_c = sortColumn === 'verdict' ? chalk.bold.cyan(verdictH.padEnd(W_VERDICT)) : colorFirst(verdictH, W_VERDICT)
485
485
  const uptimeH_c = sortColumn === 'uptime' ? chalk.bold.cyan(uptimeH.padStart(W_UPTIME)) : colorFirst(uptimeH, W_UPTIME, chalk.green)
486
486
 
487
- // 📖 Header with proper spacing (column order: Rank, Tier, SWE%, CTW, Model, Origin, Latest Ping, Avg Ping, Health, Verdict, Up%)
488
- lines.push(' ' + rankH_c + ' ' + tierH_c + ' ' + sweH_c + ' ' + ctwH_c + ' ' + modelH_c + ' ' + originH_c + ' ' + pingH_c + ' ' + avgH_c + ' ' + healthH_c + ' ' + verdictH_c + ' ' + uptimeH_c)
487
+ // 📖 Header with proper spacing (column order: Rank, Tier, SWE%, CTX, Model, Origin, Latest Ping, Avg Ping, Health, Verdict, Up%)
488
+ lines.push(' ' + rankH_c + ' ' + tierH_c + ' ' + sweH_c + ' ' + ctxH_c + ' ' + modelH_c + ' ' + originH_c + ' ' + pingH_c + ' ' + avgH_c + ' ' + healthH_c + ' ' + verdictH_c + ' ' + uptimeH_c)
489
489
 
490
490
  // 📖 Separator line
491
491
  lines.push(
@@ -493,7 +493,7 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
493
493
  chalk.dim('─'.repeat(W_RANK)) + ' ' +
494
494
  chalk.dim('─'.repeat(W_TIER)) + ' ' +
495
495
  chalk.dim('─'.repeat(W_SWE)) + ' ' +
496
- chalk.dim('─'.repeat(W_CTW)) + ' ' +
496
+ chalk.dim('─'.repeat(W_CTX)) + ' ' +
497
497
  '─'.repeat(W_MODEL) + ' ' +
498
498
  '─'.repeat(W_SOURCE) + ' ' +
499
499
  chalk.dim('─'.repeat(W_PING)) + ' ' +
@@ -529,12 +529,12 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
529
529
  : chalk.dim(sweScore.padEnd(W_SWE))
530
530
 
531
531
  // 📖 Context window column - colorized by size (larger = better)
532
- const ctwRaw = r.ctw ?? '—'
533
- const ctwCell = ctwRaw !== '—' && (ctwRaw.includes('128k') || ctwRaw.includes('200k') || ctwRaw.includes('1m'))
534
- ? chalk.greenBright(ctwRaw.padEnd(W_CTW))
535
- : ctwRaw !== '—' && (ctwRaw.includes('32k') || ctwRaw.includes('64k'))
536
- ? chalk.cyan(ctwRaw.padEnd(W_CTW))
537
- : chalk.dim(ctwRaw.padEnd(W_CTW))
532
+ const ctxRaw = r.ctx ?? '—'
533
+ const ctxCell = ctxRaw !== '—' && (ctxRaw.includes('128k') || ctxRaw.includes('200k') || ctxRaw.includes('1m'))
534
+ ? chalk.greenBright(ctxRaw.padEnd(W_CTX))
535
+ : ctxRaw !== '—' && (ctxRaw.includes('32k') || ctxRaw.includes('64k'))
536
+ ? chalk.cyan(ctxRaw.padEnd(W_CTX))
537
+ : chalk.dim(ctxRaw.padEnd(W_CTX))
538
538
 
539
539
  // 📖 Latest ping - pings are objects: { ms, code }
540
540
  // 📖 Only show response time for successful pings, "—" for errors (error code is in Status column)
@@ -640,8 +640,8 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
640
640
  uptimeCell = chalk.red(uptimeStr.padStart(W_UPTIME))
641
641
  }
642
642
 
643
- // 📖 Build row with double space between columns (order: Rank, Tier, SWE%, CTW, Model, Origin, Latest Ping, Avg Ping, Health, Verdict, Up%)
644
- const row = ' ' + num + ' ' + tier + ' ' + sweCell + ' ' + ctwCell + ' ' + name + ' ' + source + ' ' + pingCell + ' ' + avgCell + ' ' + status + ' ' + speedCell + ' ' + uptimeCell
643
+ // 📖 Build row with double space between columns (order: Rank, Tier, SWE%, CTX, Model, Origin, Latest Ping, Avg Ping, Health, Verdict, Up%)
644
+ const row = ' ' + num + ' ' + tier + ' ' + sweCell + ' ' + ctxCell + ' ' + name + ' ' + source + ' ' + pingCell + ' ' + avgCell + ' ' + status + ' ' + speedCell + ' ' + uptimeCell
645
645
 
646
646
  if (isCursor) {
647
647
  lines.push(chalk.bgRgb(139, 0, 139)(row))
@@ -1105,8 +1105,8 @@ async function runFiableMode(apiKey) {
1105
1105
  console.log(chalk.cyan(' ⚡ Analyzing models for reliability (10 seconds)...'))
1106
1106
  console.log()
1107
1107
 
1108
- let results = MODELS.map(([modelId, label, tier, sweScore, ctw], i) => ({
1109
- idx: i + 1, modelId, label, tier, sweScore, ctw,
1108
+ let results = MODELS.map(([modelId, label, tier, sweScore, ctx], i) => ({
1109
+ idx: i + 1, modelId, label, tier, sweScore, ctx,
1110
1110
  status: 'pending',
1111
1111
  pings: [],
1112
1112
  httpCode: null,
@@ -1183,21 +1183,47 @@ async function main() {
1183
1183
  }
1184
1184
  }
1185
1185
 
1186
- // 📖 Skip update check during development to avoid blocking menus
1187
- // 📖 In production, this will work correctly when versions are published
1188
- const latestVersion = null // Skip update check for now
1186
+ // 📖 Check for updates in the background
1187
+ let latestVersion = null
1188
+ try {
1189
+ latestVersion = await checkForUpdate()
1190
+ } catch {
1191
+ // Silently fail - don't block the app if npm registry is unreachable
1192
+ }
1189
1193
 
1190
1194
  // 📖 Default mode: OpenCode CLI
1191
1195
  let mode = 'opencode'
1192
1196
 
1193
- // 📖 AUTO-UPDATE: Disabled during development
1194
- // 📖 Will be re-enabled when versions are properly published
1195
-
1196
- // 📖 This section is now handled by the update notification menu above
1197
+ // 📖 Show update notification menu if a new version is available
1198
+ if (latestVersion) {
1199
+ const action = await promptUpdateNotification(latestVersion)
1200
+ if (action === 'update') {
1201
+ runUpdate(latestVersion)
1202
+ return // runUpdate will restart the process
1203
+ } else if (action === 'changelogs') {
1204
+ console.log()
1205
+ console.log(chalk.cyan(' Opening changelog in browser...'))
1206
+ console.log()
1207
+ const { execSync } = require('child_process')
1208
+ const changelogUrl = 'https://github.com/vava-nessa/free-coding-models/releases'
1209
+ try {
1210
+ if (isMac) {
1211
+ execSync(`open "${changelogUrl}"`, { stdio: 'ignore' })
1212
+ } else if (isWindows) {
1213
+ execSync(`start "" "${changelogUrl}"`, { stdio: 'ignore' })
1214
+ } else {
1215
+ execSync(`xdg-open "${changelogUrl}"`, { stdio: 'ignore' })
1216
+ }
1217
+ } catch {
1218
+ console.log(chalk.dim(` Could not open browser. Visit: ${changelogUrl}`))
1219
+ }
1220
+ }
1221
+ // If action is null (Continue without update) or changelogs, proceed to main app
1222
+ }
1197
1223
 
1198
1224
  // 📖 Create results array with all models initially visible
1199
- let results = MODELS.map(([modelId, label, tier, sweScore, ctw], i) => ({
1200
- idx: i + 1, modelId, label, tier, sweScore, ctw,
1225
+ let results = MODELS.map(([modelId, label, tier, sweScore, ctx], i) => ({
1226
+ idx: i + 1, modelId, label, tier, sweScore, ctx,
1201
1227
  status: 'pending',
1202
1228
  pings: [], // 📖 All ping results (ms or 'TIMEOUT')
1203
1229
  httpCode: null,
@@ -1306,10 +1332,10 @@ async function main() {
1306
1332
  const onKeyPress = async (str, key) => {
1307
1333
  if (!key) return
1308
1334
 
1309
- // 📖 Sorting keys: R=rank, T=tier, O=origin, M=model, L=latest ping, A=avg ping, S=SWE-bench, C=context window, H=health, V=verdict, U=uptime
1335
+ // 📖 Sorting keys: R=rank, T=tier, O=origin, M=model, L=latest ping, A=avg ping, S=SWE-bench, N=context, H=health, V=verdict, U=uptime
1310
1336
  const sortKeys = {
1311
1337
  'r': 'rank', 't': 'tier', 'o': 'origin', 'm': 'model',
1312
- 'l': 'ping', 'a': 'avg', 's': 'swe', 'c': 'ctw', 'h': 'condition', 'v': 'verdict', 'u': 'uptime'
1338
+ 'l': 'ping', 'a': 'avg', 's': 'swe', 'n': 'ctx', 'h': 'condition', 'v': 'verdict', 'u': 'uptime'
1313
1339
  }
1314
1340
 
1315
1341
  if (sortKeys[key.name]) {
package/lib/utils.js CHANGED
@@ -136,7 +136,7 @@ export const getUptime = (r) => {
136
136
  // - 'ping' (L key) — last ping latency (only successful ones count)
137
137
  // - 'avg' (A key) — average latency across all successful pings
138
138
  // - 'swe' (S key) — SWE-bench score (higher is better)
139
- // - 'ctw' (C key) — context window size (larger is better)
139
+ // - 'ctx' (N key) — context window size (larger is better)
140
140
  // - 'condition' (H key) — health status (alphabetical)
141
141
  // - 'verdict' (V key) — verdict order (Perfect → Pending)
142
142
  // - 'uptime' (U key) — uptime percentage
@@ -185,12 +185,12 @@ export const sortResults = (results, sortColumn, sortDirection) => {
185
185
  cmp = parseSwe(a.sweScore) - parseSwe(b.sweScore)
186
186
  break
187
187
  }
188
- case 'ctw': {
188
+ case 'ctx': {
189
189
  // 📖 Sort by context window size — larger is better
190
190
  // 📖 Parse strings like "128k", "32k", "1m" into numeric tokens
191
- const parseCtw = (ctw) => {
192
- if (!ctw || ctw === '—') return 0
193
- const str = ctw.toLowerCase()
191
+ const parseCtx = (ctx) => {
192
+ if (!ctx || ctx === '—') return 0
193
+ const str = ctx.toLowerCase()
194
194
  // 📖 Handle millions (1m = 1000k)
195
195
  if (str.includes('m')) {
196
196
  const num = parseFloat(str.replace('m', ''))
@@ -203,7 +203,7 @@ export const sortResults = (results, sortColumn, sortDirection) => {
203
203
  }
204
204
  return 0
205
205
  }
206
- cmp = parseCtw(a.ctw) - parseCtw(b.ctw)
206
+ cmp = parseCtx(a.ctx) - parseCtx(b.ctx)
207
207
  break
208
208
  }
209
209
  case 'condition':
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "free-coding-models",
3
- "version": "0.1.39",
3
+ "version": "0.1.40",
4
4
  "description": "Find the fastest coding LLM models in seconds — ping free models from multiple providers, pick the best one for OpenCode, Cursor, or any AI coding assistant.",
5
5
  "keywords": [
6
6
  "nvidia",
package/sources.js CHANGED
@@ -4,12 +4,12 @@
4
4
  *
5
5
  * @details
6
6
  * This file contains all model definitions organized by provider/source.
7
- * Each source has its own models array with [model_id, display_label, tier, swe_score, ctw].
7
+ * Each source has its own models array with [model_id, display_label, tier, swe_score, ctx].
8
8
  * - model_id: The model identifier for API calls
9
9
  * - display_label: Human-friendly name for display
10
10
  * - tier: Performance tier (S+, S, A+, A, A-, B+, B, C)
11
11
  * - swe_score: SWE-bench Verified score percentage
12
- * - ctw: Context window size in tokens (e.g., "128k", "32k")
12
+ * - ctx: Context window size in tokens (e.g., "128k", "32k")
13
13
  *
14
14
  * Add new sources here to support additional providers beyond NIM.
15
15
  *
@@ -95,7 +95,7 @@ export const sources = {
95
95
  // 📖 Flatten all models from all sources for backward compatibility
96
96
  export const MODELS = []
97
97
  for (const [sourceKey, sourceData] of Object.entries(sources)) {
98
- for (const [modelId, label, tier, sweScore, ctw] of sourceData.models) {
99
- MODELS.push([modelId, label, tier, sweScore, ctw])
98
+ for (const [modelId, label, tier, sweScore, ctx] of sourceData.models) {
99
+ MODELS.push([modelId, label, tier, sweScore, ctx])
100
100
  }
101
101
  }