free-coding-models 0.1.50 → 0.1.52

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -710,6 +710,7 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
710
710
  lines.push(chalk.dim(` ↑↓ Navigate • `) + actionHint + chalk.dim(` • R/Y/O/M/L/A/S/C/H/V/U Sort • W↓/X↑ Interval (${intervalSec}s) • T Filter tier • Z Mode • `) + chalk.yellow('P') + chalk.dim(` Settings • Ctrl+C Exit`))
711
711
  lines.push('')
712
712
  lines.push(chalk.dim(' Made with ') + '💖 & ☕' + chalk.dim(' by ') + '\x1b]8;;https://github.com/vava-nessa\x1b\\vava-nessa\x1b]8;;\x1b\\' + chalk.dim(' • ') + '🫂 ' + chalk.cyanBright('\x1b]8;;https://discord.gg/5MbTnDC3Md\x1b\\Join our Discord!\x1b]8;;\x1b\\') + chalk.dim(' • ') + '⭐ ' + '\x1b]8;;https://github.com/vava-nessa/free-coding-models\x1b\\Read the docs on GitHub\x1b]8;;\x1b\\')
713
+ lines.push(chalk.dim(' 💬 Discord: ') + chalk.cyanBright('https://discord.gg/5MbTnDC3Md'))
713
714
  lines.push('')
714
715
  // 📖 Append \x1b[K (erase to EOL) to each line so leftover chars from previous
715
716
  // 📖 frames are cleared. Then pad with blank cleared lines to fill the terminal,
@@ -758,6 +759,29 @@ const isWindows = process.platform === 'win32'
758
759
  const isMac = process.platform === 'darwin'
759
760
  const isLinux = process.platform === 'linux'
760
761
 
762
+ // ─── OpenCode model ID mapping ─────────────────────────────────────────────────
763
+ // 📖 Source model IDs -> OpenCode built-in model IDs (only where they differ)
764
+ // 📖 Groq's API aliases short names to full names, but OpenCode does exact ID matching
765
+ // 📖 against its built-in model list. Unmapped models pass through as-is.
766
+ const OPENCODE_MODEL_MAP = {
767
+ groq: {
768
+ 'moonshotai/kimi-k2-instruct': 'moonshotai/kimi-k2-instruct-0905',
769
+ 'meta-llama/llama-4-scout-17b-16e-preview': 'meta-llama/llama-4-scout-17b-16e-instruct',
770
+ 'meta-llama/llama-4-maverick-17b-128e-preview': 'meta-llama/llama-4-maverick-17b-128e-instruct',
771
+ }
772
+ }
773
+
774
+ function getOpenCodeModelId(providerKey, modelId) {
775
+ return OPENCODE_MODEL_MAP[providerKey]?.[modelId] || modelId
776
+ }
777
+
778
+ // 📖 Env var names per provider -- used for passing resolved keys to child processes
779
+ const ENV_VAR_NAMES = {
780
+ nvidia: 'NVIDIA_API_KEY',
781
+ groq: 'GROQ_API_KEY',
782
+ cerebras: 'CEREBRAS_API_KEY',
783
+ }
784
+
761
785
  // 📖 OpenCode config location varies by platform
762
786
  // 📖 Windows: %APPDATA%\opencode\opencode.json (or sometimes ~/.config/opencode)
763
787
  // 📖 macOS/Linux: ~/.config/opencode/opencode.json
@@ -809,16 +833,50 @@ function checkNvidiaNimConfig() {
809
833
  )
810
834
  }
811
835
 
836
+ // ─── Shared OpenCode spawn helper ──────────────────────────────────────────────
837
+ // 📖 Resolves the actual API key from config/env and passes it as an env var
838
+ // 📖 to the child process so OpenCode's {env:GROQ_API_KEY} references work
839
+ // 📖 even when the key is only in ~/.free-coding-models.json (not in shell env).
840
+ async function spawnOpenCode(args, providerKey, fcmConfig) {
841
+ const envVarName = ENV_VAR_NAMES[providerKey]
842
+ const resolvedKey = getApiKey(fcmConfig, providerKey)
843
+ const childEnv = { ...process.env }
844
+ if (envVarName && resolvedKey) childEnv[envVarName] = resolvedKey
845
+
846
+ const { spawn } = await import('child_process')
847
+ const child = spawn('opencode', args, {
848
+ stdio: 'inherit',
849
+ shell: true,
850
+ detached: false,
851
+ env: childEnv
852
+ })
853
+
854
+ return new Promise((resolve, reject) => {
855
+ child.on('exit', resolve)
856
+ child.on('error', (err) => {
857
+ if (err.code === 'ENOENT') {
858
+ console.error(chalk.red('\n X Could not find "opencode" -- is it installed and in your PATH?'))
859
+ console.error(chalk.dim(' Install: npm i -g opencode or see https://opencode.ai'))
860
+ resolve(1)
861
+ } else {
862
+ reject(err)
863
+ }
864
+ })
865
+ })
866
+ }
867
+
812
868
  // ─── Start OpenCode ────────────────────────────────────────────────────────────
813
869
  // 📖 Launches OpenCode with the selected model.
814
870
  // 📖 Handles all 3 providers: nvidia (needs custom provider config), groq & cerebras (built-in in OpenCode).
815
871
  // 📖 For nvidia: checks if NIM is configured, sets provider.models entry, spawns with nvidia/model-id.
816
- // 📖 For groq/cerebras: OpenCode has built-in support just sets model in config and spawns.
872
+ // 📖 For groq/cerebras: OpenCode has built-in support -- just sets model in config and spawns.
817
873
  // 📖 Model format: { modelId, label, tier, providerKey }
818
- async function startOpenCode(model) {
874
+ // 📖 fcmConfig: the free-coding-models config (for resolving API keys)
875
+ async function startOpenCode(model, fcmConfig) {
819
876
  const providerKey = model.providerKey ?? 'nvidia'
820
- // 📖 Full model reference string used in OpenCode config and --model flag
821
- const modelRef = `${providerKey}/${model.modelId}`
877
+ // 📖 Map model ID to OpenCode's built-in ID if it differs from our source ID
878
+ const ocModelId = getOpenCodeModelId(providerKey, model.modelId)
879
+ const modelRef = `${providerKey}/${ocModelId}`
822
880
 
823
881
  if (providerKey === 'nvidia') {
824
882
  // 📖 NVIDIA NIM needs a custom provider block in OpenCode config (not built-in)
@@ -840,11 +898,9 @@ async function startOpenCode(model) {
840
898
  config.model = modelRef
841
899
 
842
900
  // 📖 Register the model in the nvidia provider's models section
843
- // 📖 OpenCode requires models to be explicitly listed in provider.models
844
- // 📖 to recognize them — without this, it falls back to the previous default
845
901
  if (config.provider?.nvidia) {
846
902
  if (!config.provider.nvidia.models) config.provider.nvidia.models = {}
847
- config.provider.nvidia.models[model.modelId] = { name: model.label }
903
+ config.provider.nvidia.models[ocModelId] = { name: model.label }
848
904
  }
849
905
 
850
906
  saveOpenCodeConfig(config)
@@ -863,27 +919,9 @@ async function startOpenCode(model) {
863
919
  console.log(chalk.dim(' Starting OpenCode…'))
864
920
  console.log()
865
921
 
866
- const { spawn } = await import('child_process')
867
- const child = spawn('opencode', ['--model', modelRef], {
868
- stdio: 'inherit',
869
- shell: true,
870
- detached: false
871
- })
872
-
873
- await new Promise((resolve, reject) => {
874
- child.on('exit', resolve)
875
- child.on('error', (err) => {
876
- if (err.code === 'ENOENT') {
877
- console.error(chalk.red('\n ✗ Could not find "opencode" — is it installed and in your PATH?'))
878
- console.error(chalk.dim(' Install: npm i -g opencode or see https://opencode.ai'))
879
- resolve(1)
880
- } else {
881
- reject(err)
882
- }
883
- })
884
- })
922
+ await spawnOpenCode(['--model', modelRef], providerKey, fcmConfig)
885
923
  } else {
886
- // 📖 NVIDIA NIM not configured show install prompt
924
+ // 📖 NVIDIA NIM not configured -- show install prompt
887
925
  console.log(chalk.yellow(' ⚠ NVIDIA NIM not configured in OpenCode'))
888
926
  console.log()
889
927
  console.log(chalk.dim(' Starting OpenCode with installation prompt…'))
@@ -914,30 +952,12 @@ After installation, you can use: opencode --model ${modelRef}`
914
952
  console.log(chalk.dim(' Starting OpenCode…'))
915
953
  console.log()
916
954
 
917
- const { spawn } = await import('child_process')
918
- const child = spawn('opencode', [], {
919
- stdio: 'inherit',
920
- shell: true,
921
- detached: false
922
- })
923
-
924
- await new Promise((resolve, reject) => {
925
- child.on('exit', resolve)
926
- child.on('error', (err) => {
927
- if (err.code === 'ENOENT') {
928
- console.error(chalk.red('\n ✗ Could not find "opencode" — is it installed and in your PATH?'))
929
- console.error(chalk.dim(' Install: npm i -g opencode or see https://opencode.ai'))
930
- resolve(1)
931
- } else {
932
- reject(err)
933
- }
934
- })
935
- })
955
+ await spawnOpenCode([], providerKey, fcmConfig)
936
956
  }
937
957
  } else {
938
- // 📖 Groq and Cerebras are built-in OpenCode providers no custom provider config needed.
939
- // 📖 OpenCode discovers them via GROQ_API_KEY / CEREBRAS_API_KEY env vars automatically.
940
- // 📖 Just set the model in config and launch with --model groq/model-id.
958
+ // 📖 Groq: built-in OpenCode provider -- needs provider block with apiKey in opencode.json.
959
+ // 📖 Cerebras: NOT built-in -- needs @ai-sdk/openai-compatible + baseURL, like NVIDIA.
960
+ // 📖 Both need the model registered in provider.<key>.models so OpenCode can find it.
941
961
  console.log(chalk.green(` 🚀 Setting ${chalk.bold(model.label)} as default…`))
942
962
  console.log(chalk.dim(` Model: ${modelRef}`))
943
963
  console.log()
@@ -950,6 +970,37 @@ After installation, you can use: opencode --model ${modelRef}`
950
970
  console.log(chalk.dim(` 💾 Backup: ${backupPath}`))
951
971
  }
952
972
 
973
+ // 📖 Ensure the provider block exists in config — create it if missing
974
+ if (!config.provider) config.provider = {}
975
+ if (!config.provider[providerKey]) {
976
+ if (providerKey === 'groq') {
977
+ // 📖 Groq is a built-in OpenCode provider — just needs apiKey options, no npm package
978
+ config.provider.groq = {
979
+ options: { apiKey: '{env:GROQ_API_KEY}' },
980
+ models: {}
981
+ }
982
+ } else if (providerKey === 'cerebras') {
983
+ // 📖 Cerebras is OpenAI-compatible — needs npm package and baseURL like NVIDIA
984
+ config.provider.cerebras = {
985
+ npm: '@ai-sdk/openai-compatible',
986
+ name: 'Cerebras',
987
+ options: {
988
+ baseURL: 'https://api.cerebras.ai/v1',
989
+ apiKey: '{env:CEREBRAS_API_KEY}'
990
+ },
991
+ models: {}
992
+ }
993
+ }
994
+ }
995
+
996
+ // 📖 Register the model in the provider's models section
997
+ // 📖 Only register custom models -- skip if the model maps to a built-in OpenCode ID
998
+ const isBuiltinMapped = OPENCODE_MODEL_MAP[providerKey]?.[model.modelId]
999
+ if (!isBuiltinMapped) {
1000
+ if (!config.provider[providerKey].models) config.provider[providerKey].models = {}
1001
+ config.provider[providerKey].models[ocModelId] = { name: model.label }
1002
+ }
1003
+
953
1004
  config.model = modelRef
954
1005
  saveOpenCodeConfig(config)
955
1006
 
@@ -967,25 +1018,7 @@ After installation, you can use: opencode --model ${modelRef}`
967
1018
  console.log(chalk.dim(' Starting OpenCode…'))
968
1019
  console.log()
969
1020
 
970
- const { spawn } = await import('child_process')
971
- const child = spawn('opencode', ['--model', modelRef], {
972
- stdio: 'inherit',
973
- shell: true,
974
- detached: false
975
- })
976
-
977
- await new Promise((resolve, reject) => {
978
- child.on('exit', resolve)
979
- child.on('error', (err) => {
980
- if (err.code === 'ENOENT') {
981
- console.error(chalk.red('\n ✗ Could not find "opencode" — is it installed and in your PATH?'))
982
- console.error(chalk.dim(' Install: npm i -g opencode or see https://opencode.ai'))
983
- resolve(1)
984
- } else {
985
- reject(err)
986
- }
987
- })
988
- })
1021
+ await spawnOpenCode(['--model', modelRef], providerKey, fcmConfig)
989
1022
  }
990
1023
  }
991
1024
 
@@ -994,10 +1027,11 @@ After installation, you can use: opencode --model ${modelRef}`
994
1027
  // 📖 OpenCode Desktop shares config at the same location as CLI.
995
1028
  // 📖 Handles all 3 providers: nvidia (needs custom provider config), groq & cerebras (built-in).
996
1029
  // 📖 No need to wait for exit — Desktop app stays open independently.
997
- async function startOpenCodeDesktop(model) {
1030
+ async function startOpenCodeDesktop(model, fcmConfig) {
998
1031
  const providerKey = model.providerKey ?? 'nvidia'
999
- // 📖 Full model reference string used in OpenCode config and --model flag
1000
- const modelRef = `${providerKey}/${model.modelId}`
1032
+ // 📖 Map model ID to OpenCode's built-in ID if it differs from our source ID
1033
+ const ocModelId = getOpenCodeModelId(providerKey, model.modelId)
1034
+ const modelRef = `${providerKey}/${ocModelId}`
1001
1035
 
1002
1036
  // 📖 Helper to open the Desktop app based on platform
1003
1037
  const launchDesktop = async () => {
@@ -1046,7 +1080,7 @@ async function startOpenCodeDesktop(model) {
1046
1080
 
1047
1081
  if (config.provider?.nvidia) {
1048
1082
  if (!config.provider.nvidia.models) config.provider.nvidia.models = {}
1049
- config.provider.nvidia.models[model.modelId] = { name: model.label }
1083
+ config.provider.nvidia.models[ocModelId] = { name: model.label }
1050
1084
  }
1051
1085
 
1052
1086
  saveOpenCodeConfig(config)
@@ -1092,7 +1126,9 @@ ${isWindows ? 'set NVIDIA_API_KEY=your_key_here' : 'export NVIDIA_API_KEY=your_k
1092
1126
  console.log()
1093
1127
  }
1094
1128
  } else {
1095
- // 📖 Groq and Cerebras are built-in OpenCode providersjust set model and open Desktop.
1129
+ // 📖 Groq: built-in OpenCode providerneeds provider block with apiKey in opencode.json.
1130
+ // 📖 Cerebras: NOT built-in — needs @ai-sdk/openai-compatible + baseURL, like NVIDIA.
1131
+ // 📖 Both need the model registered in provider.<key>.models so OpenCode can find it.
1096
1132
  console.log(chalk.green(` 🖥 Setting ${chalk.bold(model.label)} as default for OpenCode Desktop…`))
1097
1133
  console.log(chalk.dim(` Model: ${modelRef}`))
1098
1134
  console.log()
@@ -1105,6 +1141,35 @@ ${isWindows ? 'set NVIDIA_API_KEY=your_key_here' : 'export NVIDIA_API_KEY=your_k
1105
1141
  console.log(chalk.dim(` 💾 Backup: ${backupPath}`))
1106
1142
  }
1107
1143
 
1144
+ // 📖 Ensure the provider block exists in config — create it if missing
1145
+ if (!config.provider) config.provider = {}
1146
+ if (!config.provider[providerKey]) {
1147
+ if (providerKey === 'groq') {
1148
+ config.provider.groq = {
1149
+ options: { apiKey: '{env:GROQ_API_KEY}' },
1150
+ models: {}
1151
+ }
1152
+ } else if (providerKey === 'cerebras') {
1153
+ config.provider.cerebras = {
1154
+ npm: '@ai-sdk/openai-compatible',
1155
+ name: 'Cerebras',
1156
+ options: {
1157
+ baseURL: 'https://api.cerebras.ai/v1',
1158
+ apiKey: '{env:CEREBRAS_API_KEY}'
1159
+ },
1160
+ models: {}
1161
+ }
1162
+ }
1163
+ }
1164
+
1165
+ // 📖 Register the model in the provider's models section
1166
+ // 📖 Only register custom models -- skip if the model maps to a built-in OpenCode ID
1167
+ const isBuiltinMapped = OPENCODE_MODEL_MAP[providerKey]?.[model.modelId]
1168
+ if (!isBuiltinMapped) {
1169
+ if (!config.provider[providerKey].models) config.provider[providerKey].models = {}
1170
+ config.provider[providerKey].models[ocModelId] = { name: model.label }
1171
+ }
1172
+
1108
1173
  config.model = modelRef
1109
1174
  saveOpenCodeConfig(config)
1110
1175
 
@@ -1188,9 +1253,14 @@ async function startOpenClaw(model, apiKey) {
1188
1253
  config.models.providers.nvidia = {
1189
1254
  baseUrl: 'https://integrate.api.nvidia.com/v1',
1190
1255
  api: 'openai-completions',
1256
+ models: [],
1191
1257
  }
1192
1258
  console.log(chalk.dim(' ➕ Added nvidia provider block to OpenClaw config (models.providers.nvidia)'))
1193
1259
  }
1260
+ // 📖 Ensure models array exists even if the provider block was created by an older version
1261
+ if (!Array.isArray(config.models.providers.nvidia.models)) {
1262
+ config.models.providers.nvidia.models = []
1263
+ }
1194
1264
 
1195
1265
  // 📖 Store API key in the root "env" section so OpenClaw can read it as NVIDIA_API_KEY env var.
1196
1266
  // 📖 Only writes if not already set to avoid overwriting an existing key.
@@ -1395,7 +1465,7 @@ async function main() {
1395
1465
  // 📖 Clamp scrollOffset so cursor is always within the visible viewport window.
1396
1466
  // 📖 Called after every cursor move, sort change, and terminal resize.
1397
1467
  const adjustScrollOffset = (st) => {
1398
- const total = st.results.length
1468
+ const total = st.visibleSorted ? st.visibleSorted.length : st.results.filter(r => !r.hidden).length
1399
1469
  let maxSlots = st.terminalRows - 10 // 5 header + 5 footer
1400
1470
  if (maxSlots < 1) maxSlots = 1
1401
1471
  if (total <= maxSlots) { st.scrollOffset = 0; return }
@@ -1442,6 +1512,7 @@ async function main() {
1442
1512
  settingsEditBuffer: '', // 📖 Typed characters for the API key being edited
1443
1513
  settingsTestResults: {}, // 📖 { providerKey: 'pending'|'ok'|'fail'|null }
1444
1514
  config, // 📖 Live reference to the config object (updated on save)
1515
+ visibleSorted: [], // 📖 Cached visible+sorted models — shared between render loop and key handlers
1445
1516
  }
1446
1517
 
1447
1518
  // 📖 Re-clamp viewport on terminal resize
@@ -1698,7 +1769,11 @@ async function main() {
1698
1769
  state.sortColumn = col
1699
1770
  state.sortDirection = 'asc'
1700
1771
  }
1701
- adjustScrollOffset(state)
1772
+ // 📖 Recompute visible sorted list and reset cursor to top to avoid stale index
1773
+ const visible = state.results.filter(r => !r.hidden)
1774
+ state.visibleSorted = sortResults(visible, state.sortColumn, state.sortDirection)
1775
+ state.cursor = 0
1776
+ state.scrollOffset = 0
1702
1777
  return
1703
1778
  }
1704
1779
 
@@ -1714,7 +1789,11 @@ async function main() {
1714
1789
  if (key.name === 't') {
1715
1790
  tierFilterMode = (tierFilterMode + 1) % TIER_CYCLE.length
1716
1791
  applyTierFilter()
1717
- adjustScrollOffset(state)
1792
+ // 📖 Recompute visible sorted list and reset cursor to avoid stale index into new filtered set
1793
+ const visible = state.results.filter(r => !r.hidden)
1794
+ state.visibleSorted = sortResults(visible, state.sortColumn, state.sortDirection)
1795
+ state.cursor = 0
1796
+ state.scrollOffset = 0
1718
1797
  return
1719
1798
  }
1720
1799
 
@@ -1741,7 +1820,7 @@ async function main() {
1741
1820
  }
1742
1821
 
1743
1822
  if (key.name === 'down') {
1744
- if (state.cursor < results.length - 1) {
1823
+ if (state.cursor < state.visibleSorted.length - 1) {
1745
1824
  state.cursor++
1746
1825
  adjustScrollOffset(state)
1747
1826
  }
@@ -1754,9 +1833,9 @@ async function main() {
1754
1833
  }
1755
1834
 
1756
1835
  if (key.name === 'return') { // Enter
1757
- // 📖 Use the same sorting as the table display
1758
- const sorted = sortResults(results, state.sortColumn, state.sortDirection)
1759
- const selected = sorted[state.cursor]
1836
+ // 📖 Use the cached visible+sorted array guaranteed to match what's on screen
1837
+ const selected = state.visibleSorted[state.cursor]
1838
+ if (!selected) return // 📖 Guard: empty visible list (all filtered out)
1760
1839
  // 📖 Allow selecting ANY model (even timeout/down) - user knows what they're doing
1761
1840
  userSelected = { modelId: selected.modelId, label: selected.label, tier: selected.tier, providerKey: selected.providerKey }
1762
1841
 
@@ -1779,13 +1858,24 @@ async function main() {
1779
1858
  }
1780
1859
  console.log()
1781
1860
 
1861
+ // 📖 Warn if no API key is configured for the selected model's provider
1862
+ if (state.mode !== 'openclaw') {
1863
+ const selectedApiKey = getApiKey(state.config, selected.providerKey)
1864
+ if (!selectedApiKey) {
1865
+ console.log(chalk.yellow(` Warning: No API key configured for ${selected.providerKey}.`))
1866
+ console.log(chalk.yellow(` OpenCode may not be able to use ${selected.label}.`))
1867
+ console.log(chalk.dim(` Set ${ENV_VAR_NAMES[selected.providerKey] || selected.providerKey.toUpperCase() + '_API_KEY'} or configure via settings (P key).`))
1868
+ console.log()
1869
+ }
1870
+ }
1871
+
1782
1872
  // 📖 Dispatch to the correct integration based on active mode
1783
1873
  if (state.mode === 'openclaw') {
1784
1874
  await startOpenClaw(userSelected, apiKey)
1785
1875
  } else if (state.mode === 'opencode-desktop') {
1786
- await startOpenCodeDesktop(userSelected)
1876
+ await startOpenCodeDesktop(userSelected, state.config)
1787
1877
  } else {
1788
- await startOpenCode(userSelected)
1878
+ await startOpenCode(userSelected, state.config)
1789
1879
  }
1790
1880
  process.exit(0)
1791
1881
  }
@@ -1802,12 +1892,21 @@ async function main() {
1802
1892
  // 📖 Animation loop: render settings overlay OR main table based on state
1803
1893
  const ticker = setInterval(() => {
1804
1894
  state.frame++
1895
+ // 📖 Cache visible+sorted models each frame so Enter handler always matches the display
1896
+ if (!state.settingsOpen) {
1897
+ const visible = state.results.filter(r => !r.hidden)
1898
+ state.visibleSorted = sortResults(visible, state.sortColumn, state.sortDirection)
1899
+ }
1805
1900
  const content = state.settingsOpen
1806
1901
  ? renderSettings()
1807
1902
  : renderTable(state.results, state.pendingPings, state.frame, state.cursor, state.sortColumn, state.sortDirection, state.pingInterval, state.lastPingTime, state.mode, tierFilterMode, state.scrollOffset, state.terminalRows)
1808
1903
  process.stdout.write(ALT_HOME + content)
1809
1904
  }, Math.round(1000 / FPS))
1810
1905
 
1906
+ // 📖 Populate visibleSorted before the first frame so Enter works immediately
1907
+ const initialVisible = state.results.filter(r => !r.hidden)
1908
+ state.visibleSorted = sortResults(initialVisible, state.sortColumn, state.sortDirection)
1909
+
1811
1910
  process.stdout.write(ALT_HOME + renderTable(state.results, state.pendingPings, state.frame, state.cursor, state.sortColumn, state.sortDirection, state.pingInterval, state.lastPingTime, state.mode, tierFilterMode, state.scrollOffset, state.terminalRows))
1812
1911
 
1813
1912
  // ── Continuous ping loop — ping all models every N seconds forever ──────────
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "free-coding-models",
3
- "version": "0.1.50",
3
+ "version": "0.1.52",
4
4
  "description": "Find the fastest coding LLM models in seconds — ping free models from multiple providers, pick the best one for OpenCode, Cursor, or any AI coding assistant.",
5
5
  "keywords": [
6
6
  "nvidia",