free-coding-models 0.2.9 → 0.2.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,21 @@
2
2
 
3
3
  ---
4
4
 
5
+ ## 0.2.11
6
+
7
+ ### Added
8
+ - **Pi Coding Agent support**: Enabled Pi (pi.dev) as a launchable mode in the Z key cycle. Select a model and press Enter to configure Pi's config file and spawn the PI coding agent CLI with the chosen model and API endpoint.
9
+
10
+ ---
11
+
12
+ ## 0.2.10
13
+
14
+ ### Changed
15
+ - **Discord invite link**: Updated to permanent non-expiring link `https://discord.gg/ZTNFHvvCkU` in README and TUI footer
16
+ - **NVIDIA NIM**: Added MiniMax M2.5 (S+ tier) to model list
17
+
18
+ ---
19
+
5
20
  ## 0.2.9
6
21
 
7
22
  ### Fixed
package/README.md CHANGED
@@ -26,7 +26,7 @@
26
26
  </p>
27
27
 
28
28
  <p align="center">
29
- 💬 <a href="https://discord.gg/f2AjwV2AN">Let's talk about the project on Discord</a>
29
+ 💬 <a href="https://discord.gg/ZTNFHvvCkU">Let's talk about the project on Discord</a>
30
30
  </p>
31
31
 
32
32
  By Vanessa Depraute
@@ -1023,7 +1023,7 @@ We welcome contributions! Feel free to open issues, submit pull requests, or get
1023
1023
 
1024
1024
  For questions or issues, open a [GitHub issue](https://github.com/vava-nessa/free-coding-models/issues).
1025
1025
 
1026
- 💬 Let's talk about the project on Discord: https://discord.gg/f2AjwV2AN
1026
+ 💬 Let's talk about the project on Discord: https://discord.gg/ZTNFHvvCkU
1027
1027
 
1028
1028
  ---
1029
1029
 
@@ -74,7 +74,7 @@
74
74
  * - --opencode: OpenCode CLI mode (launch CLI with selected model)
75
75
  * - --opencode-desktop: OpenCode Desktop mode (set model & open Desktop app)
76
76
  * - --openclaw: OpenClaw mode (set selected model as default in OpenClaw)
77
- * - --crush / --goose: launch the currently selected model in the supported external CLI
77
+ * - --crush / --goose / --pi: launch the currently selected model in the supported external CLI
78
78
  * - --best: Show only top-tier models (A+, S, S+)
79
79
  * - --fiable: Analyze 10s and output the most reliable model
80
80
  * - --json: Output results as JSON (for scripting/automation)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "free-coding-models",
3
- "version": "0.2.9",
3
+ "version": "0.2.11",
4
4
  "description": "Find the fastest coding LLM models in seconds — ping free models from multiple providers, pick the best one for OpenCode, Cursor, or any AI coding assistant.",
5
5
  "keywords": [
6
6
  "nvidia",
package/sources.js CHANGED
@@ -47,6 +47,7 @@ export const nvidiaNim = [
47
47
  ['z-ai/glm4.7', 'GLM 4.7', 'S+', '73.8%', '200k'],
48
48
  ['moonshotai/kimi-k2-thinking', 'Kimi K2 Thinking', 'S+', '71.3%', '256k'],
49
49
  ['minimaxai/minimax-m2.1', 'MiniMax M2.1', 'S+', '74.0%', '200k'],
50
+ ['minimaxai/minimax-m2.5', 'MiniMax M2.5', 'S+', '80.2%', '200k'],
50
51
  ['stepfun-ai/step-3.5-flash', 'Step 3.5 Flash', 'S+', '74.4%', '256k'],
51
52
  ['qwen/qwen3-coder-480b-a35b-instruct', 'Qwen3 Coder 480B', 'S+', '70.6%', '256k'],
52
53
  ['qwen/qwen3-235b-a22b', 'Qwen3 235B', 'S+', '70.0%', '128k'],
@@ -661,9 +661,9 @@ export function renderTable(results, pendingPings, frame, cursor = null, sortCol
661
661
  chalk.rgb(255, 200, 100)('\x1b]8;;https://buymeacoffee.com/vavanessadev\x1b\\Buy me a coffee\x1b]8;;\x1b\\') +
662
662
  chalk.dim(' • ') +
663
663
  '💬 ' +
664
- chalk.rgb(200, 150, 255)('\x1b]8;;https://discord.gg/f2AjwV2AN\x1b\\Discord\x1b]8;;\x1b\\') +
664
+ chalk.rgb(200, 150, 255)('\x1b]8;;https://discord.gg/ZTNFHvvCkU\x1b\\Discord\x1b]8;;\x1b\\') +
665
665
  chalk.dim(' → ') +
666
- chalk.rgb(200, 150, 255)('https://discord.gg/f2AjwV2AN') +
666
+ chalk.rgb(200, 150, 255)('https://discord.gg/ZTNFHvvCkU') +
667
667
  chalk.dim(' • ') +
668
668
  chalk.dim('Ctrl+C Exit')
669
669
  )
@@ -28,6 +28,7 @@ export const TOOL_METADATA = {
28
28
  openclaw: { label: 'OpenClaw', emoji: '🦞', flag: '--openclaw' },
29
29
  crush: { label: 'Crush', emoji: '💘', flag: '--crush' },
30
30
  goose: { label: 'Goose', emoji: '🪿', flag: '--goose' },
31
+ pi: { label: 'Pi', emoji: 'π', flag: '--pi' },
31
32
  // aider: { label: 'Aider', emoji: '🛠', flag: '--aider' },
32
33
  // 'claude-code': { label: 'Claude Code', emoji: '🧠', flag: '--claude-code' },
33
34
  // codex: { label: 'Codex CLI', emoji: '⌘', flag: '--codex' },
@@ -35,7 +36,6 @@ export const TOOL_METADATA = {
35
36
  // qwen: { label: 'Qwen Code', emoji: '🌊', flag: '--qwen' },
36
37
  // openhands: { label: 'OpenHands', emoji: '🤲', flag: '--openhands' },
37
38
  // amp: { label: 'Amp', emoji: '⚡', flag: '--amp' },
38
- // pi: { label: 'Pi', emoji: 'π', flag: '--pi' },
39
39
  }
40
40
 
41
41
  export const TOOL_MODE_ORDER = [
@@ -44,6 +44,7 @@ export const TOOL_MODE_ORDER = [
44
44
  'openclaw',
45
45
  'crush',
46
46
  'goose',
47
+ 'pi',
47
48
  // 'aider',
48
49
  // 'claude-code',
49
50
  // 'codex',
@@ -51,7 +52,6 @@ export const TOOL_MODE_ORDER = [
51
52
  // 'qwen',
52
53
  // 'openhands',
53
54
  // 'amp',
54
- // 'pi',
55
55
  ]
56
56
 
57
57
  export function getToolMeta(mode) {