free-coding-models 0.1.1 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -0
- package/bin/free-coding-models.js +109 -7
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -83,6 +83,12 @@ bunx free-coding-models YOUR_API_KEY
|
|
|
83
83
|
```bash
|
|
84
84
|
# Just run it — will prompt for API key if not set
|
|
85
85
|
free-coding-models
|
|
86
|
+
|
|
87
|
+
# Show only top-tier models (A+, S, S+)
|
|
88
|
+
free-coding-models --best
|
|
89
|
+
|
|
90
|
+
# Analyze for 10 seconds and output the most reliable model
|
|
91
|
+
free-coding-models --fiable
|
|
86
92
|
```
|
|
87
93
|
|
|
88
94
|
**How it works:**
|
|
@@ -288,6 +294,10 @@ OpenCode will automatically detect this file when launched and guide you through
|
|
|
288
294
|
- **Ping interval**: 2 seconds between complete re-pings of all models (adjustable with W/X keys)
|
|
289
295
|
- **Monitor mode**: Interface stays open forever, press Ctrl+C to exit
|
|
290
296
|
|
|
297
|
+
**Flags:**
|
|
298
|
+
- **--best** — Show only top-tier models (A+, S, S+)
|
|
299
|
+
- **--fiable** — Analyze for 10 seconds and output the most reliable model in format `provider/model_id`
|
|
300
|
+
|
|
291
301
|
**Keyboard shortcuts:**
|
|
292
302
|
- **↑↓** — Navigate models
|
|
293
303
|
- **Enter** — Select model and launch OpenCode
|
|
@@ -615,15 +615,111 @@ After installation, you can use: opencode --model nvidia/${model.modelId}`
|
|
|
615
615
|
}
|
|
616
616
|
}
|
|
617
617
|
|
|
618
|
-
// ───
|
|
618
|
+
// ─── Helper function to find best model after analysis ────────────────────────
|
|
619
|
+
function findBestModel(results) {
|
|
620
|
+
// 📖 Sort by avg ping (fastest first), then by uptime percentage (most reliable)
|
|
621
|
+
const sorted = [...results].sort((a, b) => {
|
|
622
|
+
const avgA = getAvg(a)
|
|
623
|
+
const avgB = getAvg(b)
|
|
624
|
+
const uptimeA = getUptime(a)
|
|
625
|
+
const uptimeB = getUptime(b)
|
|
626
|
+
|
|
627
|
+
// 📖 Priority 1: Models that are up (status === 'up')
|
|
628
|
+
if (a.status === 'up' && b.status !== 'up') return -1
|
|
629
|
+
if (a.status !== 'up' && b.status === 'up') return 1
|
|
630
|
+
|
|
631
|
+
// 📖 Priority 2: Fastest average ping
|
|
632
|
+
if (avgA !== avgB) return avgA - avgB
|
|
633
|
+
|
|
634
|
+
// 📖 Priority 3: Highest uptime percentage
|
|
635
|
+
return uptimeB - uptimeA
|
|
636
|
+
})
|
|
637
|
+
|
|
638
|
+
return sorted.length > 0 ? sorted[0] : null
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
// ─── Function to run in fiable mode (10-second analysis then output best model) ──
|
|
642
|
+
async function runFiableMode(apiKey) {
|
|
643
|
+
console.log(chalk.cyan(' ⚡ Analyzing models for reliability (10 seconds)...'))
|
|
644
|
+
console.log()
|
|
645
|
+
|
|
646
|
+
let results = MODELS.map(([modelId, label, tier], i) => ({
|
|
647
|
+
idx: i + 1, modelId, label, tier,
|
|
648
|
+
status: 'pending',
|
|
649
|
+
pings: [],
|
|
650
|
+
httpCode: null,
|
|
651
|
+
}))
|
|
652
|
+
|
|
653
|
+
const startTime = Date.now()
|
|
654
|
+
const analysisDuration = 10000 // 10 seconds
|
|
655
|
+
|
|
656
|
+
// 📖 Run initial pings
|
|
657
|
+
const pingPromises = results.map(r => ping(apiKey, r.modelId).then(({ code, ms }) => {
|
|
658
|
+
r.pings.push({ ms, code })
|
|
659
|
+
if (code === '200') {
|
|
660
|
+
r.status = 'up'
|
|
661
|
+
} else if (code === '000') {
|
|
662
|
+
r.status = 'timeout'
|
|
663
|
+
} else {
|
|
664
|
+
r.status = 'down'
|
|
665
|
+
r.httpCode = code
|
|
666
|
+
}
|
|
667
|
+
}))
|
|
668
|
+
|
|
669
|
+
await Promise.allSettled(pingPromises)
|
|
670
|
+
|
|
671
|
+
// 📖 Continue pinging for the remaining time
|
|
672
|
+
const remainingTime = Math.max(0, analysisDuration - (Date.now() - startTime))
|
|
673
|
+
if (remainingTime > 0) {
|
|
674
|
+
await new Promise(resolve => setTimeout(resolve, remainingTime))
|
|
675
|
+
}
|
|
676
|
+
|
|
677
|
+
// 📖 Find best model
|
|
678
|
+
const best = findBestModel(results)
|
|
679
|
+
|
|
680
|
+
if (!best) {
|
|
681
|
+
console.log(chalk.red(' ✖ No reliable model found'))
|
|
682
|
+
process.exit(1)
|
|
683
|
+
}
|
|
684
|
+
|
|
685
|
+
// 📖 Output in format: provider/name
|
|
686
|
+
const provider = 'nvidia' // Always NVIDIA NIM for now
|
|
687
|
+
console.log(chalk.green(` ✓ Most reliable model:`))
|
|
688
|
+
console.log(chalk.bold(` ${provider}/${best.modelId}`))
|
|
689
|
+
console.log()
|
|
690
|
+
console.log(chalk.dim(` 📊 Stats:`))
|
|
691
|
+
console.log(chalk.dim(` Avg ping: ${getAvg(best)}ms`))
|
|
692
|
+
console.log(chalk.dim(` Uptime: ${getUptime(best)}%`))
|
|
693
|
+
console.log(chalk.dim(` Status: ${best.status === 'up' ? '✅ UP' : '❌ DOWN'}`))
|
|
694
|
+
|
|
695
|
+
process.exit(0)
|
|
696
|
+
}
|
|
619
697
|
|
|
620
698
|
async function main() {
|
|
699
|
+
// 📖 Parse CLI arguments properly
|
|
700
|
+
const args = process.argv.slice(2)
|
|
701
|
+
|
|
702
|
+
// 📖 Extract API key (first non-flag argument) and flags
|
|
703
|
+
let apiKey = null
|
|
704
|
+
const flags = []
|
|
705
|
+
|
|
706
|
+
for (const arg of args) {
|
|
707
|
+
if (arg.startsWith('--')) {
|
|
708
|
+
flags.push(arg.toLowerCase())
|
|
709
|
+
} else if (!apiKey) {
|
|
710
|
+
apiKey = arg
|
|
711
|
+
}
|
|
712
|
+
}
|
|
713
|
+
|
|
621
714
|
// 📖 Priority: CLI arg > env var > saved config > wizard
|
|
622
|
-
|
|
715
|
+
if (!apiKey) {
|
|
716
|
+
apiKey = process.env.NVIDIA_API_KEY || loadApiKey()
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
// 📖 Check for CLI flags
|
|
720
|
+
const bestMode = flags.includes('--best')
|
|
721
|
+
const fiableMode = flags.includes('--fiable') || flags.includes('--fiable') // Support both
|
|
623
722
|
|
|
624
|
-
// 📖 Check for BEST flag - only show top tiers (A+, S, S+)
|
|
625
|
-
const bestMode = process.argv.includes('--BEST') || process.argv.includes('--best')
|
|
626
|
-
|
|
627
723
|
if (!apiKey) {
|
|
628
724
|
apiKey = await promptApiKey()
|
|
629
725
|
if (!apiKey) {
|
|
@@ -634,6 +730,11 @@ async function main() {
|
|
|
634
730
|
process.exit(1)
|
|
635
731
|
}
|
|
636
732
|
}
|
|
733
|
+
|
|
734
|
+
// 📖 Handle fiable mode first (it exits after analysis)
|
|
735
|
+
if (fiableMode) {
|
|
736
|
+
await runFiableMode(apiKey)
|
|
737
|
+
}
|
|
637
738
|
|
|
638
739
|
// 📖 Filter models to only show top tiers if BEST mode is active
|
|
639
740
|
let results = MODELS.map(([modelId, label, tier], i) => ({
|
|
@@ -660,7 +761,8 @@ async function main() {
|
|
|
660
761
|
sortColumn: 'avg',
|
|
661
762
|
sortDirection: 'asc',
|
|
662
763
|
pingInterval: PING_INTERVAL, // 📖 Track current interval for C/V keys
|
|
663
|
-
lastPingTime: Date.now() // 📖 Track when last ping cycle started
|
|
764
|
+
lastPingTime: Date.now(), // 📖 Track when last ping cycle started
|
|
765
|
+
fiableMode // 📖 Pass fiable mode to state
|
|
664
766
|
}
|
|
665
767
|
|
|
666
768
|
// 📖 Enter alternate screen — animation runs here, zero scrollback pollution
|
|
@@ -841,4 +943,4 @@ main().catch((err) => {
|
|
|
841
943
|
process.stdout.write(ALT_LEAVE)
|
|
842
944
|
console.error(err)
|
|
843
945
|
process.exit(1)
|
|
844
|
-
})
|
|
946
|
+
})
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "free-coding-models",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.2",
|
|
4
4
|
"description": "Find the fastest coding LLM models in seconds — ping free models from multiple providers, pick the best one for OpenCode, Cursor, or any AI coding assistant.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"nvidia",
|