free-coding-models 0.1.37 β†’ 0.1.39

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -75,7 +75,7 @@ import chalk from 'chalk'
75
75
  import { createRequire } from 'module'
76
76
  import { readFileSync, writeFileSync, existsSync, copyFileSync, mkdirSync } from 'fs'
77
77
  import { homedir } from 'os'
78
- import { join } from 'path'
78
+ import { join, dirname } from 'path'
79
79
  import { MODELS } from '../sources.js'
80
80
  import { patchOpenClawModelsJson } from '../patch-openclaw-models.js'
81
81
  import { getAvg, getVerdict, getUptime, sortResults, filterByTier, findBestModel, parseArgs, TIER_ORDER, VERDICT_ORDER, TIER_LETTER_MAP } from '../lib/utils.js'
@@ -421,6 +421,7 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
421
421
  // πŸ“– Column widths (generous spacing with margins)
422
422
  const W_RANK = 6
423
423
  const W_TIER = 6
424
+ const W_CTW = 6
424
425
  const W_SOURCE = 14
425
426
  const W_MODEL = 26
426
427
  const W_SWE = 9
@@ -453,6 +454,7 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
453
454
  const originH = 'Origin'
454
455
  const modelH = 'Model'
455
456
  const sweH = sortColumn === 'swe' ? dir + ' SWE%' : 'SWE%'
457
+ const ctwH = sortColumn === 'ctw' ? dir + ' CTW' : 'CTW'
456
458
  const pingH = sortColumn === 'ping' ? dir + ' Latest Ping' : 'Latest Ping'
457
459
  const avgH = sortColumn === 'avg' ? dir + ' Avg Ping' : 'Avg Ping'
458
460
  const healthH = sortColumn === 'condition' ? dir + ' Health' : 'Health'
@@ -475,14 +477,15 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
475
477
  const originH_c = sortColumn === 'origin' ? chalk.bold.cyan(originH.padEnd(W_SOURCE)) : colorFirst(originH, W_SOURCE)
476
478
  const modelH_c = colorFirst(modelH, W_MODEL)
477
479
  const sweH_c = sortColumn === 'swe' ? chalk.bold.cyan(sweH.padEnd(W_SWE)) : colorFirst(sweH, W_SWE)
480
+ const ctwH_c = sortColumn === 'ctw' ? chalk.bold.cyan(ctwH.padEnd(W_CTW)) : colorFirst(ctwH, W_CTW)
478
481
  const pingH_c = sortColumn === 'ping' ? chalk.bold.cyan(pingH.padEnd(W_PING)) : colorFirst('Latest Ping', W_PING)
479
482
  const avgH_c = sortColumn === 'avg' ? chalk.bold.cyan(avgH.padEnd(W_AVG)) : colorFirst('Avg Ping', W_AVG)
480
483
  const healthH_c = sortColumn === 'condition' ? chalk.bold.cyan(healthH.padEnd(W_STATUS)) : colorFirst('Health', W_STATUS)
481
484
  const verdictH_c = sortColumn === 'verdict' ? chalk.bold.cyan(verdictH.padEnd(W_VERDICT)) : colorFirst(verdictH, W_VERDICT)
482
485
  const uptimeH_c = sortColumn === 'uptime' ? chalk.bold.cyan(uptimeH.padStart(W_UPTIME)) : colorFirst(uptimeH, W_UPTIME, chalk.green)
483
486
 
484
- // πŸ“– Header with proper spacing
485
- lines.push(' ' + rankH_c + ' ' + tierH_c + ' ' + sweH_c + ' ' + modelH_c + ' ' + originH_c + ' ' + pingH_c + ' ' + avgH_c + ' ' + healthH_c + ' ' + verdictH_c + ' ' + uptimeH_c)
487
+ // πŸ“– Header with proper spacing (column order: Rank, Tier, SWE%, CTW, Model, Origin, Latest Ping, Avg Ping, Health, Verdict, Up%)
488
+ lines.push(' ' + rankH_c + ' ' + tierH_c + ' ' + sweH_c + ' ' + ctwH_c + ' ' + modelH_c + ' ' + originH_c + ' ' + pingH_c + ' ' + avgH_c + ' ' + healthH_c + ' ' + verdictH_c + ' ' + uptimeH_c)
486
489
 
487
490
  // πŸ“– Separator line
488
491
  lines.push(
@@ -490,6 +493,7 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
490
493
  chalk.dim('─'.repeat(W_RANK)) + ' ' +
491
494
  chalk.dim('─'.repeat(W_TIER)) + ' ' +
492
495
  chalk.dim('─'.repeat(W_SWE)) + ' ' +
496
+ chalk.dim('─'.repeat(W_CTW)) + ' ' +
493
497
  '─'.repeat(W_MODEL) + ' ' +
494
498
  '─'.repeat(W_SOURCE) + ' ' +
495
499
  chalk.dim('─'.repeat(W_PING)) + ' ' +
@@ -523,6 +527,14 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
523
527
  : sweScore !== 'β€”' && parseFloat(sweScore) >= 30
524
528
  ? chalk.yellow(sweScore.padEnd(W_SWE))
525
529
  : chalk.dim(sweScore.padEnd(W_SWE))
530
+
531
+ // πŸ“– Context window column - colorized by size (larger = better)
532
+ const ctwRaw = r.ctw ?? 'β€”'
533
+ const ctwCell = ctwRaw !== 'β€”' && (ctwRaw.includes('128k') || ctwRaw.includes('200k') || ctwRaw.includes('1m'))
534
+ ? chalk.greenBright(ctwRaw.padEnd(W_CTW))
535
+ : ctwRaw !== 'β€”' && (ctwRaw.includes('32k') || ctwRaw.includes('64k'))
536
+ ? chalk.cyan(ctwRaw.padEnd(W_CTW))
537
+ : chalk.dim(ctwRaw.padEnd(W_CTW))
526
538
 
527
539
  // πŸ“– Latest ping - pings are objects: { ms, code }
528
540
  // πŸ“– Only show response time for successful pings, "β€”" for errors (error code is in Status column)
@@ -628,8 +640,8 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
628
640
  uptimeCell = chalk.red(uptimeStr.padStart(W_UPTIME))
629
641
  }
630
642
 
631
- // πŸ“– Build row with double space between columns
632
- const row = ' ' + num + ' ' + tier + ' ' + sweCell + ' ' + name + ' ' + source + ' ' + pingCell + ' ' + avgCell + ' ' + status + ' ' + speedCell + ' ' + uptimeCell
643
+ // πŸ“– Build row with double space between columns (order: Rank, Tier, SWE%, CTW, Model, Origin, Latest Ping, Avg Ping, Health, Verdict, Up%)
644
+ const row = ' ' + num + ' ' + tier + ' ' + sweCell + ' ' + ctwCell + ' ' + name + ' ' + source + ' ' + pingCell + ' ' + avgCell + ' ' + status + ' ' + speedCell + ' ' + uptimeCell
633
645
 
634
646
  if (isCursor) {
635
647
  lines.push(chalk.bgRgb(139, 0, 139)(row))
@@ -651,7 +663,7 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
651
663
  : mode === 'opencode-desktop'
652
664
  ? chalk.rgb(0, 200, 255)('Enterβ†’OpenDesktop')
653
665
  : chalk.rgb(0, 200, 255)('Enterβ†’OpenCode')
654
- lines.push(chalk.dim(` ↑↓ Navigate β€’ `) + actionHint + chalk.dim(` β€’ R/T/O/M/L/A/S/H/V/U Sort β€’ W↓/X↑ Interval (${intervalSec}s) β€’ T Tier β€’ Z Mode β€’ Ctrl+C Exit`))
666
+ lines.push(chalk.dim(` ↑↓ Navigate β€’ `) + actionHint + chalk.dim(` β€’ R/T/O/M/L/A/S/C/H/V/U Sort β€’ W↓/X↑ Interval (${intervalSec}s) β€’ T Tier β€’ Z Mode β€’ Ctrl+C Exit`))
655
667
  lines.push('')
656
668
  lines.push(chalk.dim(' Made with ') + 'πŸ’– & β˜•' + chalk.dim(' by ') + '\x1b]8;;https://github.com/vava-nessa\x1b\\vava-nessa\x1b]8;;\x1b\\' + chalk.dim(' β€’ ') + 'πŸ’¬ ' + chalk.cyanBright('\x1b]8;;https://discord.gg/WKA3TwYVuZ\x1b\\Join Free-Coding-Models Discord!\x1b]8;;\x1b\\') + chalk.dim(' β€’ ') + '⭐ ' + '\x1b]8;;https://github.com/vava-nessa/free-coding-models\x1b\\Read the docs on GitHub\x1b]8;;\x1b\\')
657
669
  lines.push('')
@@ -690,23 +702,44 @@ async function ping(apiKey, modelId) {
690
702
  }
691
703
 
692
704
  // ─── OpenCode integration ──────────────────────────────────────────────────────
693
- const OPENCODE_CONFIG = join(homedir(), '.config/opencode/opencode.json')
705
+ // πŸ“– Platform-specific config path
706
+ const isWindows = process.platform === 'win32'
707
+ const isMac = process.platform === 'darwin'
708
+ const isLinux = process.platform === 'linux'
709
+
710
+ // πŸ“– OpenCode config location varies by platform
711
+ // πŸ“– Windows: %APPDATA%\opencode\opencode.json (or sometimes ~/.config/opencode)
712
+ // πŸ“– macOS/Linux: ~/.config/opencode/opencode.json
713
+ const OPENCODE_CONFIG = isWindows
714
+ ? join(homedir(), 'AppData', 'Roaming', 'opencode', 'opencode.json')
715
+ : join(homedir(), '.config', 'opencode', 'opencode.json')
716
+
717
+ // πŸ“– Fallback to .config on Windows if AppData doesn't exist
718
+ const OPENCODE_CONFIG_FALLBACK = join(homedir(), '.config', 'opencode', 'opencode.json')
719
+
720
+ function getOpenCodeConfigPath() {
721
+ if (existsSync(OPENCODE_CONFIG)) return OPENCODE_CONFIG
722
+ if (isWindows && existsSync(OPENCODE_CONFIG_FALLBACK)) return OPENCODE_CONFIG_FALLBACK
723
+ return OPENCODE_CONFIG
724
+ }
694
725
 
695
726
  function loadOpenCodeConfig() {
696
- if (!existsSync(OPENCODE_CONFIG)) return { provider: {} }
727
+ const configPath = getOpenCodeConfigPath()
728
+ if (!existsSync(configPath)) return { provider: {} }
697
729
  try {
698
- return JSON.parse(readFileSync(OPENCODE_CONFIG, 'utf8'))
730
+ return JSON.parse(readFileSync(configPath, 'utf8'))
699
731
  } catch {
700
732
  return { provider: {} }
701
733
  }
702
734
  }
703
735
 
704
736
  function saveOpenCodeConfig(config) {
705
- const dir = join(homedir(), '.config/opencode')
737
+ const configPath = getOpenCodeConfigPath()
738
+ const dir = dirname(configPath)
706
739
  if (!existsSync(dir)) {
707
740
  mkdirSync(dir, { recursive: true })
708
741
  }
709
- writeFileSync(OPENCODE_CONFIG, JSON.stringify(config, null, 2))
742
+ writeFileSync(configPath, JSON.stringify(config, null, 2))
710
743
  }
711
744
 
712
745
  // ─── Check NVIDIA NIM in OpenCode config ───────────────────────────────────────
@@ -739,11 +772,11 @@ async function startOpenCode(model) {
739
772
  console.log()
740
773
 
741
774
  const config = loadOpenCodeConfig()
742
- const backupPath = `${OPENCODE_CONFIG}.backup-${Date.now()}`
775
+ const backupPath = `${getOpenCodeConfigPath()}.backup-${Date.now()}`
743
776
 
744
777
  // πŸ“– Backup current config
745
- if (existsSync(OPENCODE_CONFIG)) {
746
- copyFileSync(OPENCODE_CONFIG, backupPath)
778
+ if (existsSync(getOpenCodeConfigPath())) {
779
+ copyFileSync(getOpenCodeConfigPath(), backupPath)
747
780
  console.log(chalk.dim(` πŸ’Ύ Backup: ${backupPath}`))
748
781
  }
749
782
 
@@ -762,16 +795,28 @@ async function startOpenCode(model) {
762
795
 
763
796
  saveOpenCodeConfig(config)
764
797
 
765
- console.log(chalk.green(` βœ“ Default model set to: nvidia/${model.modelId}`))
798
+ // πŸ“– Verify config was saved correctly
799
+ const savedConfig = loadOpenCodeConfig()
800
+ console.log(chalk.dim(` πŸ“ Config saved to: ${getOpenCodeConfigPath()}`))
801
+ console.log(chalk.dim(` πŸ“ Default model in config: ${savedConfig.model || 'NOT SET'}`))
802
+ console.log()
803
+
804
+ if (savedConfig.model === config.model) {
805
+ console.log(chalk.green(` βœ“ Default model set to: nvidia/${model.modelId}`))
806
+ } else {
807
+ console.log(chalk.yellow(` ⚠ Config might not have been saved correctly`))
808
+ }
766
809
  console.log()
767
810
  console.log(chalk.dim(' Starting OpenCode…'))
768
811
  console.log()
769
812
 
770
813
  // πŸ“– Launch OpenCode and wait for it
814
+ // πŸ“– Use --model flag to ensure the model is selected
771
815
  const { spawn } = await import('child_process')
772
- const child = spawn('opencode', [], {
816
+ const child = spawn('opencode', ['--model', `nvidia/${model.modelId}`], {
773
817
  stdio: 'inherit',
774
- shell: true
818
+ shell: true,
819
+ detached: false
775
820
  })
776
821
 
777
822
  // πŸ“– Wait for OpenCode to exit
@@ -794,7 +839,8 @@ async function startOpenCode(model) {
794
839
  console.log(chalk.dim(' Starting OpenCode with installation prompt…'))
795
840
  console.log()
796
841
 
797
- const installPrompt = `Please install NVIDIA NIM provider in OpenCode by adding this to ~/.config/opencode/opencode.json:
842
+ const configPath = getOpenCodeConfigPath()
843
+ const installPrompt = `Please install NVIDIA NIM provider in OpenCode by adding this to ${configPath}:
798
844
 
799
845
  {
800
846
  "provider": {
@@ -809,7 +855,7 @@ async function startOpenCode(model) {
809
855
  }
810
856
  }
811
857
 
812
- Then set env var: export NVIDIA_API_KEY=your_key_here
858
+ ${isWindows ? 'set NVIDIA_API_KEY=your_key_here' : 'export NVIDIA_API_KEY=your_key_here'}
813
859
 
814
860
  After installation, you can use: opencode --model nvidia/${model.modelId}`
815
861
 
@@ -821,7 +867,8 @@ After installation, you can use: opencode --model nvidia/${model.modelId}`
821
867
  const { spawn } = await import('child_process')
822
868
  const child = spawn('opencode', [], {
823
869
  stdio: 'inherit',
824
- shell: true
870
+ shell: true,
871
+ detached: false
825
872
  })
826
873
 
827
874
  // πŸ“– Wait for OpenCode to exit
@@ -842,7 +889,7 @@ After installation, you can use: opencode --model nvidia/${model.modelId}`
842
889
 
843
890
  // ─── Start OpenCode Desktop ─────────────────────────────────────────────────────
844
891
  // πŸ“– startOpenCodeDesktop: Same config logic as startOpenCode, but opens the Desktop app.
845
- // πŸ“– OpenCode Desktop (/Applications/OpenCode.app) shares config at ~/.config/opencode/opencode.json.
892
+ // πŸ“– OpenCode Desktop shares config at the same location as CLI.
846
893
  // πŸ“– No need to wait for exit β€” Desktop app stays open independently.
847
894
  async function startOpenCodeDesktop(model) {
848
895
  const hasNim = checkNvidiaNimConfig()
@@ -853,10 +900,10 @@ async function startOpenCodeDesktop(model) {
853
900
  console.log()
854
901
 
855
902
  const config = loadOpenCodeConfig()
856
- const backupPath = `${OPENCODE_CONFIG}.backup-${Date.now()}`
903
+ const backupPath = `${getOpenCodeConfigPath()}.backup-${Date.now()}`
857
904
 
858
- if (existsSync(OPENCODE_CONFIG)) {
859
- copyFileSync(OPENCODE_CONFIG, backupPath)
905
+ if (existsSync(getOpenCodeConfigPath())) {
906
+ copyFileSync(getOpenCodeConfigPath(), backupPath)
860
907
  console.log(chalk.dim(` πŸ’Ύ Backup: ${backupPath}`))
861
908
  }
862
909
 
@@ -871,23 +918,58 @@ async function startOpenCodeDesktop(model) {
871
918
 
872
919
  saveOpenCodeConfig(config)
873
920
 
874
- console.log(chalk.green(` βœ“ Default model set to: nvidia/${model.modelId}`))
921
+ // πŸ“– Verify config was saved correctly
922
+ const savedConfig = loadOpenCodeConfig()
923
+ console.log(chalk.dim(` πŸ“ Config saved to: ${getOpenCodeConfigPath()}`))
924
+ console.log(chalk.dim(` πŸ“ Default model in config: ${savedConfig.model || 'NOT SET'}`))
925
+ console.log()
926
+
927
+ if (savedConfig.model === config.model) {
928
+ console.log(chalk.green(` βœ“ Default model set to: nvidia/${model.modelId}`))
929
+ } else {
930
+ console.log(chalk.yellow(` ⚠ Config might not have been saved correctly`))
931
+ }
875
932
  console.log()
876
933
  console.log(chalk.dim(' Opening OpenCode Desktop…'))
877
934
  console.log()
878
935
 
879
- // πŸ“– Launch Desktop app β€” no need to wait, it stays open independently
936
+ // πŸ“– Launch Desktop app based on platform
880
937
  const { exec } = await import('child_process')
881
- exec('open -a OpenCode', (err) => {
938
+
939
+ let command
940
+ if (isMac) {
941
+ command = 'open -a OpenCode'
942
+ } else if (isWindows) {
943
+ // πŸ“– On Windows, try common installation paths
944
+ // πŸ“– User installation: %LOCALAPPDATA%\Programs\OpenCode\OpenCode.exe
945
+ // πŸ“– System installation: C:\Program Files\OpenCode\OpenCode.exe
946
+ command = 'start "" "%LOCALAPPDATA%\\Programs\\OpenCode\\OpenCode.exe" 2>nul || start "" "%PROGRAMFILES%\\OpenCode\\OpenCode.exe" 2>nul || start OpenCode'
947
+ } else if (isLinux) {
948
+ // πŸ“– On Linux, try different methods with model flag
949
+ // πŸ“– Check if opencode-desktop exists, otherwise try xdg-open
950
+ command = `opencode-desktop --model nvidia/${model.modelId} 2>/dev/null || flatpak run ai.opencode.OpenCode --model nvidia/${model.modelId} 2>/dev/null || snap run opencode --model nvidia/${model.modelId} 2>/dev/null || xdg-open /usr/share/applications/opencode.desktop 2>/dev/null || echo "OpenCode not found"`
951
+ }
952
+
953
+ exec(command, (err, stdout, stderr) => {
882
954
  if (err) {
883
- console.error(chalk.red(' βœ— Could not open OpenCode Desktop β€” is it installed at /Applications/OpenCode.app?'))
955
+ console.error(chalk.red(' βœ— Could not open OpenCode Desktop'))
956
+ if (isWindows) {
957
+ console.error(chalk.dim(' Make sure OpenCode is installed from https://opencode.ai'))
958
+ } else if (isLinux) {
959
+ console.error(chalk.dim(' Install via: snap install opencode OR flatpak install ai.opencode.OpenCode'))
960
+ console.error(chalk.dim(' Or download from https://opencode.ai'))
961
+ } else {
962
+ console.error(chalk.dim(' Is it installed at /Applications/OpenCode.app?'))
963
+ }
884
964
  }
885
965
  })
886
966
  } else {
887
967
  console.log(chalk.yellow(' ⚠ NVIDIA NIM not configured in OpenCode'))
888
968
  console.log(chalk.dim(' Please configure it first. Config is shared between CLI and Desktop.'))
889
969
  console.log()
890
- const installPrompt = `Add this to ~/.config/opencode/opencode.json:
970
+
971
+ const configPath = getOpenCodeConfigPath()
972
+ const installPrompt = `Add this to ${configPath}:
891
973
 
892
974
  {
893
975
  "provider": {
@@ -902,7 +984,7 @@ async function startOpenCodeDesktop(model) {
902
984
  }
903
985
  }
904
986
 
905
- Then set env var: export NVIDIA_API_KEY=your_key_here`
987
+ ${isWindows ? 'set NVIDIA_API_KEY=your_key_here' : 'export NVIDIA_API_KEY=your_key_here'}`
906
988
  console.log(chalk.cyan(installPrompt))
907
989
  console.log()
908
990
  }
@@ -1023,8 +1105,8 @@ async function runFiableMode(apiKey) {
1023
1105
  console.log(chalk.cyan(' ⚑ Analyzing models for reliability (10 seconds)...'))
1024
1106
  console.log()
1025
1107
 
1026
- let results = MODELS.map(([modelId, label, tier, sweScore], i) => ({
1027
- idx: i + 1, modelId, label, tier, sweScore,
1108
+ let results = MODELS.map(([modelId, label, tier, sweScore, ctw], i) => ({
1109
+ idx: i + 1, modelId, label, tier, sweScore, ctw,
1028
1110
  status: 'pending',
1029
1111
  pings: [],
1030
1112
  httpCode: null,
@@ -1114,8 +1196,8 @@ async function main() {
1114
1196
  // πŸ“– This section is now handled by the update notification menu above
1115
1197
 
1116
1198
  // πŸ“– Create results array with all models initially visible
1117
- let results = MODELS.map(([modelId, label, tier, sweScore], i) => ({
1118
- idx: i + 1, modelId, label, tier, sweScore,
1199
+ let results = MODELS.map(([modelId, label, tier, sweScore, ctw], i) => ({
1200
+ idx: i + 1, modelId, label, tier, sweScore, ctw,
1119
1201
  status: 'pending',
1120
1202
  pings: [], // πŸ“– All ping results (ms or 'TIMEOUT')
1121
1203
  httpCode: null,
@@ -1224,10 +1306,10 @@ async function main() {
1224
1306
  const onKeyPress = async (str, key) => {
1225
1307
  if (!key) return
1226
1308
 
1227
- // πŸ“– Sorting keys: R=rank, T=tier, O=origin, M=model, L=latest ping, A=avg ping, S=SWE-bench, H=health, V=verdict, U=uptime
1309
+ // πŸ“– Sorting keys: R=rank, T=tier, O=origin, M=model, L=latest ping, A=avg ping, S=SWE-bench, C=context window, H=health, V=verdict, U=uptime
1228
1310
  const sortKeys = {
1229
1311
  'r': 'rank', 't': 'tier', 'o': 'origin', 'm': 'model',
1230
- 'l': 'ping', 'a': 'avg', 's': 'swe', 'h': 'condition', 'v': 'verdict', 'u': 'uptime'
1312
+ 'l': 'ping', 'a': 'avg', 's': 'swe', 'c': 'ctw', 'h': 'condition', 'v': 'verdict', 'u': 'uptime'
1231
1313
  }
1232
1314
 
1233
1315
  if (sortKeys[key.name]) {
package/lib/utils.js CHANGED
@@ -136,6 +136,7 @@ export const getUptime = (r) => {
136
136
  // - 'ping' (L key) β€” last ping latency (only successful ones count)
137
137
  // - 'avg' (A key) β€” average latency across all successful pings
138
138
  // - 'swe' (S key) β€” SWE-bench score (higher is better)
139
+ // - 'ctw' (C key) β€” context window size (larger is better)
139
140
  // - 'condition' (H key) β€” health status (alphabetical)
140
141
  // - 'verdict' (V key) β€” verdict order (Perfect β†’ Pending)
141
142
  // - 'uptime' (U key) β€” uptime percentage
@@ -184,6 +185,27 @@ export const sortResults = (results, sortColumn, sortDirection) => {
184
185
  cmp = parseSwe(a.sweScore) - parseSwe(b.sweScore)
185
186
  break
186
187
  }
188
+ case 'ctw': {
189
+ // πŸ“– Sort by context window size β€” larger is better
190
+ // πŸ“– Parse strings like "128k", "32k", "1m" into numeric tokens
191
+ const parseCtw = (ctw) => {
192
+ if (!ctw || ctw === 'β€”') return 0
193
+ const str = ctw.toLowerCase()
194
+ // πŸ“– Handle millions (1m = 1000k)
195
+ if (str.includes('m')) {
196
+ const num = parseFloat(str.replace('m', ''))
197
+ return num * 1000
198
+ }
199
+ // πŸ“– Handle thousands (128k)
200
+ if (str.includes('k')) {
201
+ const num = parseFloat(str.replace('k', ''))
202
+ return num
203
+ }
204
+ return 0
205
+ }
206
+ cmp = parseCtw(a.ctw) - parseCtw(b.ctw)
207
+ break
208
+ }
187
209
  case 'condition':
188
210
  cmp = a.status.localeCompare(b.status)
189
211
  break
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "free-coding-models",
3
- "version": "0.1.37",
3
+ "version": "0.1.39",
4
4
  "description": "Find the fastest coding LLM models in seconds β€” ping free models from multiple providers, pick the best one for OpenCode, Cursor, or any AI coding assistant.",
5
5
  "keywords": [
6
6
  "nvidia",
package/sources.js CHANGED
@@ -4,20 +4,26 @@
4
4
  *
5
5
  * @details
6
6
  * This file contains all model definitions organized by provider/source.
7
- * Each source has its own models array with [model_id, display_label, tier].
8
- * Add new sources here to support additional providers beyond NVIDIA NIM.
7
+ * Each source has its own models array with [model_id, display_label, tier, swe_score, ctw].
8
+ * - model_id: The model identifier for API calls
9
+ * - display_label: Human-friendly name for display
10
+ * - tier: Performance tier (S+, S, A+, A, A-, B+, B, C)
11
+ * - swe_score: SWE-bench Verified score percentage
12
+ * - ctw: Context window size in tokens (e.g., "128k", "32k")
13
+ *
14
+ * Add new sources here to support additional providers beyond NIM.
9
15
  *
10
- * 🎯 Tier scale (based on Aider Polyglot benchmark):
11
- * - S+: 75%+ (elite frontier coders)
12
- * - S: 62-74% (excellent)
13
- * - A+: 54-62% (great)
14
- * - A: 44-54% (good)
15
- * - A-: 36-44% (decent)
16
- * - B+: 25-36% (average)
17
- * - B: 14-25% (below average)
18
- * - C: <14% (lightweight/edge)
16
+ * 🎯 Tier scale (based on SWE-bench Verified):
17
+ * - S+: 70%+ (elite frontier coders)
18
+ * - S: 60-70% (excellent)
19
+ * - A+: 50-60% (great)
20
+ * - A: 40-50% (good)
21
+ * - A-: 35-45% (decent)
22
+ * - B+: 30-40% (average)
23
+ * - B: 20-30% (below average)
24
+ * - C: <20% (lightweight/edge)
19
25
  *
20
- * πŸ“– Source: https://aider.chat/docs/leaderboards (Polyglot = 225 exercises, 6 languages)
26
+ * πŸ“– Source: https://www.swebench.com
21
27
  *
22
28
  * @exports Object containing all sources and their models
23
29
  */
@@ -25,57 +31,57 @@
25
31
  // πŸ“– NIM source - https://build.nvidia.com
26
32
  export const nvidiaNim = [
27
33
  // ── S+ tier β€” SWE-bench Verified β‰₯70% ──
28
- ['deepseek-ai/deepseek-v3.1', 'DeepSeek V3.1', 'S+', '49.2%'], // 49.2% SWE-bench Verified
29
- ['deepseek-ai/deepseek-v3.1-terminus', 'DeepSeek V3.1 Term', 'S+', '49.2%'], // same base V3.1
30
- ['deepseek-ai/deepseek-v3.2', 'DeepSeek V3.2', 'S+', '73.1%'], // 73.1% SWE-bench Verified
31
- ['moonshotai/kimi-k2.5', 'Kimi K2.5', 'S+', '76.8%'], // 76.8% SWE-bench Verified
32
- ['mistralai/devstral-2-123b-instruct-2512', 'Devstral 2 123B', 'S+', '62.0%'], // 62.0% SWE-bench (est.)
33
- ['nvidia/llama-3.1-nemotron-ultra-253b-v1', 'Nemotron Ultra 253B', 'S+', '56.0%'], // 56.0% SWE-bench (est.)
34
- ['mistralai/mistral-large-3-675b-instruct-2512', 'Mistral Large 675B', 'S+', '58.0%'], // 58.0% SWE-bench (est.)
34
+ ['deepseek-ai/deepseek-v3.1', 'DeepSeek V3.1', 'S+', '49.2%', '128k'],
35
+ ['deepseek-ai/deepseek-v3.1-terminus', 'DeepSeek V3.1 Term', 'S+', '49.2%', '128k'],
36
+ ['deepseek-ai/deepseek-v3.2', 'DeepSeek V3.2', 'S+', '73.1%', '128k'],
37
+ ['moonshotai/kimi-k2.5', 'Kimi K2.5', 'S+', '76.8%', '128k'],
38
+ ['mistralai/devstral-2-123b-instruct-2512', 'Devstral 2 123B', 'S+', '62.0%', '128k'],
39
+ ['nvidia/llama-3.1-nemotron-ultra-253b-v1', 'Nemotron Ultra 253B', 'S+', '56.0%', '128k'],
40
+ ['mistralai/mistral-large-3-675b-instruct-2512', 'Mistral Large 675B', 'S+', '58.0%', '128k'],
35
41
  // ── S tier β€” SWE-bench Verified 50–70% ──
36
- ['qwen/qwen2.5-coder-32b-instruct', 'Qwen2.5 Coder 32B', 'S', '46.0%'], // 46.0% SWE-bench Verified
37
- ['z-ai/glm5', 'GLM 5', 'S', '77.8%'], // 77.8% SWE-bench Verified
38
- ['qwen/qwen3.5-397b-a17b', 'Qwen3.5 400B VLM', 'S', '68.0%'], // 68.0% SWE-bench (est.)
39
- ['qwen/qwen3-coder-480b-a35b-instruct', 'Qwen3 Coder 480B', 'S', '72.0%'], // 72.0% SWE-bench (est.)
40
- ['qwen/qwen3-next-80b-a3b-thinking', 'Qwen3 80B Thinking', 'S', '68.0%'], // 68.0% SWE-bench (est.)
41
- ['meta/llama-3.1-405b-instruct', 'Llama 3.1 405B', 'S', '44.0%'], // 44.0% SWE-bench (est.)
42
- ['minimaxai/minimax-m2.1', 'MiniMax M2.1', 'S', '70.0%'], // 70.0% SWE-bench (est.)
42
+ ['qwen/qwen2.5-coder-32b-instruct', 'Qwen2.5 Coder 32B', 'S', '46.0%', '32k'],
43
+ ['z-ai/glm5', 'GLM 5', 'S', '77.8%', '128k'],
44
+ ['qwen/qwen3.5-397b-a17b', 'Qwen3.5 400B VLM', 'S', '68.0%', '128k'],
45
+ ['qwen/qwen3-coder-480b-a35b-instruct', 'Qwen3 Coder 480B', 'S', '72.0%', '128k'],
46
+ ['qwen/qwen3-next-80b-a3b-thinking', 'Qwen3 80B Thinking', 'S', '68.0%', '128k'],
47
+ ['meta/llama-3.1-405b-instruct', 'Llama 3.1 405B', 'S', '44.0%', '128k'],
48
+ ['minimaxai/minimax-m2.1', 'MiniMax M2.1', 'S', '70.0%', '128k'],
43
49
  // ── A+ tier β€” SWE-bench Verified 60–70% ──
44
- ['moonshotai/kimi-k2-thinking', 'Kimi K2 Thinking', 'A+', '67.0%'], // 67.0% SWE-bench (est.)
45
- ['moonshotai/kimi-k2-instruct', 'Kimi K2 Instruct', 'A+', '65.8%'], // 65.8% SWE-bench Verified
46
- ['qwen/qwen3-235b-a22b', 'Qwen3 235B', 'A+', '70.0%'], // 70.0% SWE-bench (est.)
47
- ['meta/llama-3.3-70b-instruct', 'Llama 3.3 70B', 'A+', '39.5%'], // 39.5% SWE-bench (est.)
48
- ['z-ai/glm4.7', 'GLM 4.7', 'A+', '73.8%'], // 73.8% SWE-bench Verified
49
- ['qwen/qwen3-next-80b-a3b-instruct', 'Qwen3 80B Instruct', 'A+', '65.0%'], // 65.0% SWE-bench (est.)
50
+ ['moonshotai/kimi-k2-thinking', 'Kimi K2 Thinking', 'A+', '67.0%', '128k'],
51
+ ['moonshotai/kimi-k2-instruct', 'Kimi K2 Instruct', 'A+', '65.8%', '128k'],
52
+ ['qwen/qwen3-235b-a22b', 'Qwen3 235B', 'A+', '70.0%', '128k'],
53
+ ['meta/llama-3.3-70b-instruct', 'Llama 3.3 70B', 'A+', '39.5%', '128k'],
54
+ ['z-ai/glm4.7', 'GLM 4.7', 'A+', '73.8%', '128k'],
55
+ ['qwen/qwen3-next-80b-a3b-instruct', 'Qwen3 80B Instruct', 'A+', '65.0%', '128k'],
50
56
  // ── A tier β€” SWE-bench Verified 45–60% ──
51
- ['minimaxai/minimax-m2', 'MiniMax M2', 'A', '56.5%'], // 56.5% SWE-bench (est.)
52
- ['mistralai/mistral-medium-3-instruct', 'Mistral Medium 3', 'A', '48.0%'], // 48.0% SWE-bench (est.)
53
- ['mistralai/magistral-small-2506', 'Magistral Small', 'A', '45.0%'], // 45.0% SWE-bench (est.)
54
- ['nvidia/nemotron-3-nano-30b-a3b', 'Nemotron Nano 30B', 'A', '43.0%'], // 43.0% SWE-bench (est.)
55
- ['deepseek-ai/deepseek-r1-distill-qwen-32b', 'R1 Distill 32B', 'A', '43.9%'], // 43.9% SWE-bench Verified
57
+ ['minimaxai/minimax-m2', 'MiniMax M2', 'A', '56.5%', '128k'],
58
+ ['mistralai/mistral-medium-3-instruct', 'Mistral Medium 3', 'A', '48.0%', '128k'],
59
+ ['mistralai/magistral-small-2506', 'Magistral Small', 'A', '45.0%', '32k'],
60
+ ['nvidia/nemotron-3-nano-30b-a3b', 'Nemotron Nano 30B', 'A', '43.0%', '128k'],
61
+ ['deepseek-ai/deepseek-r1-distill-qwen-32b', 'R1 Distill 32B', 'A', '43.9%', '128k'],
56
62
  // ── A- tier β€” SWE-bench Verified 35–45% ──
57
- ['openai/gpt-oss-120b', 'GPT OSS 120B', 'A-', '60.0%'], // 60.0% SWE-bench (est.)
58
- ['nvidia/llama-3.3-nemotron-super-49b-v1.5', 'Nemotron Super 49B', 'A-', '49.0%'], // 49.0% SWE-bench (est.)
59
- ['meta/llama-4-scout-17b-16e-instruct', 'Llama 4 Scout', 'A-', '44.0%'], // 44.0% SWE-bench (est.)
60
- ['deepseek-ai/deepseek-r1-distill-qwen-14b', 'R1 Distill 14B', 'A-', '37.7%'], // 37.7% SWE-bench (est.)
61
- ['igenius/colosseum_355b_instruct_16k', 'Colosseum 355B', 'A-', '52.0%'], // 52.0% SWE-bench (est.)
63
+ ['openai/gpt-oss-120b', 'GPT OSS 120B', 'A-', '60.0%', '128k'],
64
+ ['nvidia/llama-3.3-nemotron-super-49b-v1.5', 'Nemotron Super 49B', 'A-', '49.0%', '128k'],
65
+ ['meta/llama-4-scout-17b-16e-instruct', 'Llama 4 Scout', 'A-', '44.0%', '128k'],
66
+ ['deepseek-ai/deepseek-r1-distill-qwen-14b', 'R1 Distill 14B', 'A-', '37.7%', '64k'],
67
+ ['igenius/colosseum_355b_instruct_16k', 'Colosseum 355B', 'A-', '52.0%', '16k'],
62
68
  // ── B+ tier β€” SWE-bench Verified 30–40% ──
63
- ['qwen/qwq-32b', 'QwQ 32B', 'B+', '50.0%'], // 50.0% SWE-bench (est.)
64
- ['openai/gpt-oss-20b', 'GPT OSS 20B', 'B+', '42.0%'], // 42.0% SWE-bench (est.)
65
- ['stockmark/stockmark-2-100b-instruct', 'Stockmark 100B', 'B+', '36.0%'], // 36.0% SWE-bench (est.)
66
- ['bytedance/seed-oss-36b-instruct', 'Seed OSS 36B', 'B+', '38.0%'], // 38.0% SWE-bench (est.)
67
- ['stepfun-ai/step-3.5-flash', 'Step 3.5 Flash', 'B+', '74.4%'], // 74.4% SWE-bench Verified
69
+ ['qwen/qwq-32b', 'QwQ 32B', 'B+', '50.0%', '32k'],
70
+ ['openai/gpt-oss-20b', 'GPT OSS 20B', 'B+', '42.0%', '32k'],
71
+ ['stockmark/stockmark-2-100b-instruct', 'Stockmark 100B', 'B+', '36.0%', '32k'],
72
+ ['bytedance/seed-oss-36b-instruct', 'Seed OSS 36B', 'B+', '38.0%', '32k'],
73
+ ['stepfun-ai/step-3.5-flash', 'Step 3.5 Flash', 'B+', '74.4%', '32k'],
68
74
  // ── B tier β€” SWE-bench Verified 20–35% ──
69
- ['meta/llama-4-maverick-17b-128e-instruct', 'Llama 4 Maverick', 'B', '62.0%'], // 62.0% SWE-bench (est.)
70
- ['mistralai/mixtral-8x22b-instruct-v0.1', 'Mixtral 8x22B', 'B', '32.0%'], // 32.0% SWE-bench (est.)
71
- ['mistralai/ministral-14b-instruct-2512', 'Ministral 14B', 'B', '34.0%'], // 34.0% SWE-bench (est.)
72
- ['ibm/granite-34b-code-instruct', 'Granite 34B Code', 'B', '30.0%'], // 30.0% SWE-bench (est.)
73
- ['deepseek-ai/deepseek-r1-distill-llama-8b', 'R1 Distill 8B', 'B', '28.2%'], // 28.2% SWE-bench (est.)
75
+ ['meta/llama-4-maverick-17b-128e-instruct', 'Llama 4 Maverick', 'B', '62.0%', '128k'],
76
+ ['mistralai/mixtral-8x22b-instruct-v0.1', 'Mixtral 8x22B', 'B', '32.0%', '64k'],
77
+ ['mistralai/ministral-14b-instruct-2512', 'Ministral 14B', 'B', '34.0%', '32k'],
78
+ ['ibm/granite-34b-code-instruct', 'Granite 34B Code', 'B', '30.0%', '32k'],
79
+ ['deepseek-ai/deepseek-r1-distill-llama-8b', 'R1 Distill 8B', 'B', '28.2%', '32k'],
74
80
  // ── C tier β€” SWE-bench Verified <25% or lightweight edge models ──
75
- ['deepseek-ai/deepseek-r1-distill-qwen-7b', 'R1 Distill 7B', 'C', '22.6%'], // 22.6% SWE-bench (est.)
76
- ['google/gemma-2-9b-it', 'Gemma 2 9B', 'C', '18.0%'], // 18.0% SWE-bench (est.)
77
- ['microsoft/phi-3.5-mini-instruct', 'Phi 3.5 Mini', 'C', '12.0%'], // 12.0% SWE-bench (est.)
78
- ['microsoft/phi-4-mini-instruct', 'Phi 4 Mini', 'C', '14.0%'], // 14.0% SWE-bench (est.)
81
+ ['deepseek-ai/deepseek-r1-distill-qwen-7b', 'R1 Distill 7B', 'C', '22.6%', '32k'],
82
+ ['google/gemma-2-9b-it', 'Gemma 2 9B', 'C', '18.0%', '8k'],
83
+ ['microsoft/phi-3.5-mini-instruct', 'Phi 3.5 Mini', 'C', '12.0%', '128k'],
84
+ ['microsoft/phi-4-mini-instruct', 'Phi 4 Mini', 'C', '14.0%', '128k'],
79
85
  ]
80
86
 
81
87
  // πŸ“– All sources combined - used by the main script
@@ -84,21 +90,12 @@ export const sources = {
84
90
  name: 'NIM',
85
91
  models: nvidiaNim,
86
92
  },
87
- // πŸ“– Add more sources here in the future, for example:
88
- // openai: {
89
- // name: 'OpenAI',
90
- // models: [...],
91
- // },
92
- // anthropic: {
93
- // name: 'Anthropic',
94
- // models: [...],
95
- // },
96
93
  }
97
94
 
98
95
  // πŸ“– Flatten all models from all sources for backward compatibility
99
96
  export const MODELS = []
100
97
  for (const [sourceKey, sourceData] of Object.entries(sources)) {
101
- for (const [modelId, label, tier, sweScore] of sourceData.models) {
102
- MODELS.push([modelId, label, tier, sweScore])
98
+ for (const [modelId, label, tier, sweScore, ctw] of sourceData.models) {
99
+ MODELS.push([modelId, label, tier, sweScore, ctw])
103
100
  }
104
101
  }