free-coding-models 0.1.62 β†’ 0.1.64

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,7 +10,7 @@
10
10
  * During benchmarking, users can navigate with arrow keys and press Enter to act on the selected model.
11
11
  *
12
12
  * 🎯 Key features:
13
- * - Parallel pings across all models with animated real-time updates (3 providers: NIM, Groq, Cerebras)
13
+ * - Parallel pings across all models with animated real-time updates (multi-provider)
14
14
  * - Continuous monitoring with 2-second ping intervals (never stops)
15
15
  * - Rolling averages calculated from ALL successful pings since start
16
16
  * - Best-per-tier highlighting with medals (πŸ₯‡πŸ₯ˆπŸ₯‰)
@@ -19,7 +19,7 @@
19
19
  * - Startup mode menu (OpenCode CLI vs OpenCode Desktop vs OpenClaw) when no flag is given
20
20
  * - Automatic config detection and model setup for both tools
21
21
  * - JSON config stored in ~/.free-coding-models.json (auto-migrates from old plain-text)
22
- * - Multi-provider support via sources.js (NIM, Groq, Cerebras β€” extensible)
22
+ * - Multi-provider support via sources.js (NIM/Groq/Cerebras/OpenRouter/Hugging Face/Replicate/DeepInfra/... β€” extensible)
23
23
  * - Settings screen (P key) to manage API keys per provider, enable/disable, test keys
24
24
  * - Uptime percentage tracking (successful pings / total pings)
25
25
  * - Sortable columns (R/Y/O/M/L/A/S/N/H/V/U keys)
@@ -27,15 +27,21 @@
27
27
  *
28
28
  * β†’ Functions:
29
29
  * - `loadConfig` / `saveConfig` / `getApiKey`: Multi-provider JSON config via lib/config.js
30
- * - `promptApiKey`: Interactive wizard for first-time NVIDIA API key setup
30
+ * - `promptTelemetryConsent`: First-run consent flow for anonymous analytics
31
+ * - `getTelemetryDistinctId`: Generate/reuse a stable anonymous ID for telemetry
32
+ * - `getTelemetryTerminal`: Infer terminal family (Terminal.app, iTerm2, kitty, etc.)
33
+ * - `isTelemetryDebugEnabled` / `telemetryDebug`: Optional runtime telemetry diagnostics via env
34
+ * - `sendUsageTelemetry`: Fire-and-forget anonymous app-start event
35
+ * - `promptApiKey`: Interactive wizard for first-time multi-provider API key setup
31
36
  * - `promptModeSelection`: Startup menu to choose OpenCode vs OpenClaw
32
- * - `ping`: Perform HTTP request to NIM endpoint with timeout handling
37
+ * - `buildPingRequest` / `ping`: Build provider-specific probe requests and measure latency
33
38
  * - `renderTable`: Generate ASCII table with colored latency indicators and status emojis
34
39
  * - `getAvg`: Calculate average latency from all successful pings
35
40
  * - `getVerdict`: Determine verdict string based on average latency (Overloaded for 429)
36
41
  * - `getUptime`: Calculate uptime percentage from ping history
37
42
  * - `sortResults`: Sort models by various columns
38
43
  * - `checkNvidiaNimConfig`: Check if NVIDIA NIM provider is configured in OpenCode
44
+ * - `isTcpPortAvailable` / `resolveOpenCodeTmuxPort`: Pick a safe OpenCode port when running in tmux
39
45
  * - `startOpenCode`: Launch OpenCode CLI with selected model (configures if needed)
40
46
  * - `startOpenCodeDesktop`: Set model in shared config & open OpenCode Desktop app
41
47
  * - `loadOpenClawConfig` / `saveOpenClawConfig`: Manage ~/.openclaw/openclaw.json
@@ -52,8 +58,8 @@
52
58
  * βš™οΈ Configuration:
53
59
  * - API keys stored per-provider in ~/.free-coding-models.json (0600 perms)
54
60
  * - Old ~/.free-coding-models plain-text auto-migrated as nvidia key on first run
55
- * - Env vars override config: NVIDIA_API_KEY, GROQ_API_KEY, CEREBRAS_API_KEY
56
- * - Models loaded from sources.js β€” 53 models across NIM, Groq, Cerebras
61
+ * - Env vars override config: NVIDIA_API_KEY, GROQ_API_KEY, CEREBRAS_API_KEY, OPENROUTER_API_KEY, HUGGINGFACE_API_KEY/HF_TOKEN, REPLICATE_API_TOKEN, DEEPINFRA_API_KEY/DEEPINFRA_TOKEN, FIREWORKS_API_KEY, etc.
62
+ * - Models loaded from sources.js β€” all provider/model definitions are centralized there
57
63
  * - OpenCode config: ~/.config/opencode/opencode.json
58
64
  * - OpenClaw config: ~/.openclaw/openclaw.json
59
65
  * - Ping timeout: 15s per attempt
@@ -67,6 +73,7 @@
67
73
  * - --openclaw: OpenClaw mode (set selected model as default in OpenClaw)
68
74
  * - --best: Show only top-tier models (A+, S, S+)
69
75
  * - --fiable: Analyze 10s and output the most reliable model
76
+ * - --no-telemetry: Disable anonymous usage analytics for this run
70
77
  * - --tier S/A/B/C: Filter models by tier letter (S=S+/S, A=A+/A/A-, B=B+/B, C=C)
71
78
  *
72
79
  * @see {@link https://build.nvidia.com} NVIDIA API key generation
@@ -77,8 +84,10 @@
77
84
  import chalk from 'chalk'
78
85
  import { createRequire } from 'module'
79
86
  import { readFileSync, writeFileSync, existsSync, copyFileSync, mkdirSync } from 'fs'
87
+ import { randomUUID } from 'crypto'
80
88
  import { homedir } from 'os'
81
89
  import { join, dirname } from 'path'
90
+ import { createServer } from 'net'
82
91
  import { MODELS, sources } from '../sources.js'
83
92
  import { patchOpenClawModelsJson } from '../patch-openclaw-models.js'
84
93
  import { getAvg, getVerdict, getUptime, sortResults, filterByTier, findBestModel, parseArgs, TIER_ORDER, VERDICT_ORDER, TIER_LETTER_MAP } from '../lib/utils.js'
@@ -90,6 +99,322 @@ const readline = require('readline')
90
99
  // ─── Version check ────────────────────────────────────────────────────────────
91
100
  const pkg = require('../package.json')
92
101
  const LOCAL_VERSION = pkg.version
102
+ const TELEMETRY_CONSENT_VERSION = 1
103
+ const TELEMETRY_TIMEOUT = 1_200
104
+ const POSTHOG_CAPTURE_PATH = '/i/v0/e/'
105
+ const POSTHOG_DEFAULT_HOST = 'https://eu.i.posthog.com'
106
+ // πŸ“– Consent ASCII banner shown before telemetry choice to make first-run intent explicit.
107
+ const TELEMETRY_CONSENT_ASCII = [
108
+ 'β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ',
109
+ 'β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ',
110
+ 'β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ',
111
+ 'β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ',
112
+ 'β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ',
113
+ '',
114
+ '',
115
+ ]
116
+ // πŸ“– Maintainer defaults for global npm telemetry (safe to publish: project key is a public ingest token).
117
+ const POSTHOG_PROJECT_KEY_DEFAULT = 'phc_5P1n8HaLof6nHM0tKJYt4bV5pj2XPb272fLVigwf1YQ'
118
+ const POSTHOG_HOST_DEFAULT = 'https://eu.i.posthog.com'
119
+
120
+ // πŸ“– parseTelemetryEnv: Convert env var strings into booleans.
121
+ // πŸ“– Returns true/false when value is recognized, otherwise null.
122
+ function parseTelemetryEnv(value) {
123
+ if (typeof value !== 'string') return null
124
+ const normalized = value.trim().toLowerCase()
125
+ if (['1', 'true', 'yes', 'on'].includes(normalized)) return true
126
+ if (['0', 'false', 'no', 'off'].includes(normalized)) return false
127
+ return null
128
+ }
129
+
130
+ // πŸ“– Optional debug switch for telemetry troubleshooting (disabled by default).
131
+ function isTelemetryDebugEnabled() {
132
+ return parseTelemetryEnv(process.env.FREE_CODING_MODELS_TELEMETRY_DEBUG) === true
133
+ }
134
+
135
+ // πŸ“– Writes telemetry debug traces to stderr only when explicitly enabled.
136
+ function telemetryDebug(message, meta = null) {
137
+ if (!isTelemetryDebugEnabled()) return
138
+ const prefix = '[telemetry-debug]'
139
+ if (meta === null) {
140
+ process.stderr.write(`${prefix} ${message}\n`)
141
+ return
142
+ }
143
+ try {
144
+ process.stderr.write(`${prefix} ${message} ${JSON.stringify(meta)}\n`)
145
+ } catch {
146
+ process.stderr.write(`${prefix} ${message}\n`)
147
+ }
148
+ }
149
+
150
+ // πŸ“– Ensure telemetry config shape exists even on old config files.
151
+ function ensureTelemetryConfig(config) {
152
+ if (!config.telemetry || typeof config.telemetry !== 'object') {
153
+ config.telemetry = { enabled: null, consentVersion: 0, anonymousId: null }
154
+ }
155
+ if (typeof config.telemetry.enabled !== 'boolean') config.telemetry.enabled = null
156
+ if (typeof config.telemetry.consentVersion !== 'number') config.telemetry.consentVersion = 0
157
+ if (typeof config.telemetry.anonymousId !== 'string' || !config.telemetry.anonymousId.trim()) {
158
+ config.telemetry.anonymousId = null
159
+ }
160
+ }
161
+
162
+ // πŸ“– Create or reuse a persistent anonymous distinct_id for PostHog.
163
+ // πŸ“– Stored locally in config so one user is stable over time without personal data.
164
+ function getTelemetryDistinctId(config) {
165
+ ensureTelemetryConfig(config)
166
+ if (config.telemetry.anonymousId) return config.telemetry.anonymousId
167
+
168
+ config.telemetry.anonymousId = `anon_${randomUUID()}`
169
+ saveConfig(config)
170
+ return config.telemetry.anonymousId
171
+ }
172
+
173
+ // πŸ“– Convert Node platform to human-readable system name for analytics segmentation.
174
+ function getTelemetrySystem() {
175
+ if (process.platform === 'darwin') return 'macOS'
176
+ if (process.platform === 'win32') return 'Windows'
177
+ if (process.platform === 'linux') return 'Linux'
178
+ return process.platform
179
+ }
180
+
181
+ // πŸ“– Infer terminal family from environment hints for coarse usage segmentation.
182
+ // πŸ“– Never sends full env dumps; only a normalized terminal label is emitted.
183
+ function getTelemetryTerminal() {
184
+ const termProgramRaw = (process.env.TERM_PROGRAM || '').trim()
185
+ const termProgram = termProgramRaw.toLowerCase()
186
+ const term = (process.env.TERM || '').toLowerCase()
187
+
188
+ if (termProgram === 'apple_terminal') return 'Terminal.app'
189
+ if (termProgram === 'iterm.app') return 'iTerm2'
190
+ if (termProgram === 'warpterminal' || process.env.WARP_IS_LOCAL_SHELL_SESSION) return 'Warp'
191
+ if (process.env.WT_SESSION) return 'Windows Terminal'
192
+ if (process.env.KITTY_WINDOW_ID || term.includes('kitty')) return 'kitty'
193
+ if (process.env.GHOSTTY_RESOURCES_DIR || term.includes('ghostty')) return 'Ghostty'
194
+ if (process.env.WEZTERM_PANE || termProgram === 'wezterm') return 'WezTerm'
195
+ if (process.env.KONSOLE_VERSION || termProgram === 'konsole') return 'Konsole'
196
+ if (process.env.GNOME_TERMINAL_SCREEN || termProgram === 'gnome-terminal') return 'GNOME Terminal'
197
+ if (process.env.TERMINAL_EMULATOR === 'JetBrains-JediTerm') return 'JetBrains Terminal'
198
+ if (process.env.TABBY_CONFIG_DIRECTORY || termProgram === 'tabby') return 'Tabby'
199
+ if (termProgram === 'vscode' || process.env.VSCODE_GIT_IPC_HANDLE) return 'VS Code Terminal'
200
+ if (process.env.ALACRITTY_SOCKET || term.includes('alacritty') || termProgram === 'alacritty') return 'Alacritty'
201
+ if (term.includes('foot') || termProgram === 'foot') return 'foot'
202
+ if (termProgram === 'hyper' || process.env.HYPER) return 'Hyper'
203
+ if (process.env.TMUX) return 'tmux'
204
+ if (process.env.STY) return 'screen'
205
+ // πŸ“– Generic fallback for many terminals exposing TERM_PROGRAM (e.g., Rio, Contour, etc.).
206
+ if (termProgramRaw) return termProgramRaw
207
+ if (term) return term
208
+
209
+ return 'unknown'
210
+ }
211
+
212
+ // πŸ“– Prompt consent on first run (or when consent schema version changes).
213
+ // πŸ“– This prompt is skipped when the env var explicitly controls telemetry.
214
+ async function promptTelemetryConsent(config, cliArgs) {
215
+ if (cliArgs.noTelemetry) return
216
+
217
+ const envTelemetry = parseTelemetryEnv(process.env.FREE_CODING_MODELS_TELEMETRY)
218
+ if (envTelemetry !== null) return
219
+
220
+ ensureTelemetryConfig(config)
221
+ const hasStoredChoice = typeof config.telemetry.enabled === 'boolean'
222
+ const isConsentCurrent = config.telemetry.consentVersion >= TELEMETRY_CONSENT_VERSION
223
+ if (hasStoredChoice && isConsentCurrent) return
224
+
225
+ // πŸ“– Non-interactive runs should never hang waiting for input.
226
+ if (!process.stdin.isTTY || !process.stdout.isTTY) {
227
+ // πŸ“– Do not mutate persisted consent in headless runs.
228
+ // πŸ“– We simply skip the prompt; runtime telemetry remains governed by env/config precedence.
229
+ return
230
+ }
231
+
232
+ const options = [
233
+ { label: 'Accept & Continue', value: true, emoji: 'πŸ’–πŸ₯°πŸ’–' },
234
+ { label: 'Reject and Continue', value: false, emoji: '😒' },
235
+ ]
236
+ let selected = 0 // πŸ“– Default selection is Accept & Continue.
237
+
238
+ const accepted = await new Promise((resolve) => {
239
+ const render = () => {
240
+ const EL = '\x1b[K'
241
+ const lines = []
242
+ for (const asciiLine of TELEMETRY_CONSENT_ASCII) {
243
+ lines.push(chalk.greenBright(asciiLine))
244
+ }
245
+ lines.push(chalk.greenBright(`free-coding-models (v${LOCAL_VERSION})`))
246
+ lines.push(chalk.greenBright('Welcome ! Would you like to help improve the app and fix bugs by activating PostHog telemetry (anonymous & secure)'))
247
+ lines.push(chalk.greenBright("anonymous telemetry analytics (we don't collect anything from you)"))
248
+ lines.push('')
249
+
250
+ for (let i = 0; i < options.length; i++) {
251
+ const isSelected = i === selected
252
+ const option = options[i]
253
+ const buttonText = `${option.emoji} ${option.label}`
254
+
255
+ let button
256
+ if (isSelected && option.value) button = chalk.black.bgGreenBright(` ${buttonText} `)
257
+ else if (isSelected && !option.value) button = chalk.black.bgRedBright(` ${buttonText} `)
258
+ else if (option.value) button = chalk.greenBright(` ${buttonText} `)
259
+ else button = chalk.redBright(` ${buttonText} `)
260
+
261
+ const prefix = isSelected ? chalk.cyan(' ❯ ') : chalk.dim(' ')
262
+ lines.push(prefix + button)
263
+ }
264
+
265
+ lines.push('')
266
+ lines.push(chalk.dim(' ↑↓ Navigate β€’ Enter Select'))
267
+ lines.push(chalk.dim(' You can change this later in Settings (P).'))
268
+ lines.push('')
269
+
270
+ // πŸ“– Avoid full-screen clear escape here to prevent title/header offset issues in some terminals.
271
+ const cleared = lines.map(l => l + EL)
272
+ const terminalRows = process.stdout.rows || 24
273
+ const remaining = Math.max(0, terminalRows - cleared.length)
274
+ for (let i = 0; i < remaining; i++) cleared.push(EL)
275
+ process.stdout.write('\x1b[H' + cleared.join('\n'))
276
+ }
277
+
278
+ const cleanup = () => {
279
+ if (process.stdin.isTTY) process.stdin.setRawMode(false)
280
+ process.stdin.removeListener('keypress', onKeyPress)
281
+ process.stdin.pause()
282
+ }
283
+
284
+ const onKeyPress = (_str, key) => {
285
+ if (!key) return
286
+
287
+ if (key.ctrl && key.name === 'c') {
288
+ cleanup()
289
+ resolve(false)
290
+ return
291
+ }
292
+
293
+ if ((key.name === 'up' || key.name === 'left') && selected > 0) {
294
+ selected--
295
+ render()
296
+ return
297
+ }
298
+
299
+ if ((key.name === 'down' || key.name === 'right') && selected < options.length - 1) {
300
+ selected++
301
+ render()
302
+ return
303
+ }
304
+
305
+ if (key.name === 'return') {
306
+ cleanup()
307
+ resolve(options[selected].value)
308
+ }
309
+ }
310
+
311
+ readline.emitKeypressEvents(process.stdin)
312
+ process.stdin.setEncoding('utf8')
313
+ process.stdin.resume()
314
+ if (process.stdin.isTTY) process.stdin.setRawMode(true)
315
+ process.stdin.on('keypress', onKeyPress)
316
+ render()
317
+ })
318
+
319
+ config.telemetry.enabled = accepted
320
+ config.telemetry.consentVersion = TELEMETRY_CONSENT_VERSION
321
+ saveConfig(config)
322
+
323
+ console.log()
324
+ if (accepted) {
325
+ console.log(chalk.green(' βœ… Analytics enabled. You can disable it later in Settings (P) or with --no-telemetry.'))
326
+ } else {
327
+ console.log(chalk.yellow(' Analytics disabled. You can enable it later in Settings (P).'))
328
+ }
329
+ console.log()
330
+ }
331
+
332
+ // πŸ“– Resolve telemetry effective state with clear precedence:
333
+ // πŸ“– CLI flag > env var > config file > disabled by default.
334
+ function isTelemetryEnabled(config, cliArgs) {
335
+ if (cliArgs.noTelemetry) return false
336
+ const envTelemetry = parseTelemetryEnv(process.env.FREE_CODING_MODELS_TELEMETRY)
337
+ if (envTelemetry !== null) return envTelemetry
338
+ ensureTelemetryConfig(config)
339
+ return config.telemetry.enabled === true
340
+ }
341
+
342
+ // πŸ“– Fire-and-forget analytics ping: never blocks UX, never throws.
343
+ async function sendUsageTelemetry(config, cliArgs, payload) {
344
+ if (!isTelemetryEnabled(config, cliArgs)) {
345
+ telemetryDebug('skip: telemetry disabled', {
346
+ cliNoTelemetry: cliArgs.noTelemetry === true,
347
+ envTelemetry: process.env.FREE_CODING_MODELS_TELEMETRY || null,
348
+ configEnabled: config?.telemetry?.enabled ?? null,
349
+ })
350
+ return
351
+ }
352
+
353
+ const apiKey = (
354
+ process.env.FREE_CODING_MODELS_POSTHOG_KEY ||
355
+ process.env.POSTHOG_PROJECT_API_KEY ||
356
+ POSTHOG_PROJECT_KEY_DEFAULT ||
357
+ ''
358
+ ).trim()
359
+ if (!apiKey) {
360
+ telemetryDebug('skip: missing api key')
361
+ return
362
+ }
363
+
364
+ const host = (
365
+ process.env.FREE_CODING_MODELS_POSTHOG_HOST ||
366
+ process.env.POSTHOG_HOST ||
367
+ POSTHOG_HOST_DEFAULT ||
368
+ POSTHOG_DEFAULT_HOST
369
+ ).trim().replace(/\/+$/, '')
370
+ if (!host) {
371
+ telemetryDebug('skip: missing host')
372
+ return
373
+ }
374
+
375
+ try {
376
+ const endpoint = `${host}${POSTHOG_CAPTURE_PATH}`
377
+ const distinctId = getTelemetryDistinctId(config)
378
+ const timestamp = typeof payload?.ts === 'string' ? payload.ts : new Date().toISOString()
379
+ const signal = (typeof AbortSignal !== 'undefined' && typeof AbortSignal.timeout === 'function')
380
+ ? AbortSignal.timeout(TELEMETRY_TIMEOUT)
381
+ : undefined
382
+
383
+ const posthogBody = {
384
+ api_key: apiKey,
385
+ event: payload?.event || 'app_start',
386
+ distinct_id: distinctId,
387
+ timestamp,
388
+ properties: {
389
+ $process_person_profile: false,
390
+ source: 'cli',
391
+ app: 'free-coding-models',
392
+ version: payload?.version || LOCAL_VERSION,
393
+ app_version: payload?.version || LOCAL_VERSION,
394
+ mode: payload?.mode || 'opencode',
395
+ system: getTelemetrySystem(),
396
+ terminal: getTelemetryTerminal(),
397
+ },
398
+ }
399
+
400
+ await fetch(endpoint, {
401
+ method: 'POST',
402
+ headers: { 'content-type': 'application/json' },
403
+ body: JSON.stringify(posthogBody),
404
+ signal,
405
+ })
406
+ telemetryDebug('sent', {
407
+ event: posthogBody.event,
408
+ endpoint,
409
+ mode: posthogBody.properties.mode,
410
+ system: posthogBody.properties.system,
411
+ terminal: posthogBody.properties.terminal,
412
+ })
413
+ } catch {
414
+ // πŸ“– Ignore failures silently: analytics must never break the CLI.
415
+ telemetryDebug('error: send failed')
416
+ }
417
+ }
93
418
 
94
419
  async function checkForUpdate() {
95
420
  try {
@@ -163,7 +488,7 @@ function runUpdate(latestVersion) {
163
488
 
164
489
  // ─── First-run wizard ─────────────────────────────────────────────────────────
165
490
  // πŸ“– Shown when NO provider has a key configured yet.
166
- // πŸ“– Steps through all 3 providers sequentially β€” each is optional (Enter to skip).
491
+ // πŸ“– Steps through all configured providers sequentially β€” each is optional (Enter to skip).
167
492
  // πŸ“– At least one key must be entered to proceed. Keys saved to ~/.free-coding-models.json.
168
493
  // πŸ“– Returns the nvidia key (or null) for backward-compat with the rest of main().
169
494
  async function promptApiKey(config) {
@@ -172,81 +497,17 @@ async function promptApiKey(config) {
172
497
  console.log(chalk.dim(' Enter keys for any provider you want to use. Press Enter to skip one.'))
173
498
  console.log()
174
499
 
175
- // πŸ“– Provider definitions: label, key field, url for getting the key
176
- const providers = [
177
- {
178
- key: 'nvidia',
179
- label: 'NVIDIA NIM',
180
- color: chalk.rgb(118, 185, 0),
181
- url: 'https://build.nvidia.com',
182
- hint: 'Profile β†’ API Keys β†’ Generate',
183
- prefix: 'nvapi-',
184
- },
185
- {
186
- key: 'groq',
187
- label: 'Groq',
188
- color: chalk.rgb(249, 103, 20),
189
- url: 'https://console.groq.com/keys',
190
- hint: 'API Keys β†’ Create API Key',
191
- prefix: 'gsk_',
192
- },
193
- {
194
- key: 'cerebras',
195
- label: 'Cerebras',
196
- color: chalk.rgb(0, 180, 255),
197
- url: 'https://cloud.cerebras.ai',
198
- hint: 'API Keys β†’ Create',
199
- prefix: 'csk_ / cauth_',
200
- },
201
- {
202
- key: 'sambanova',
203
- label: 'SambaNova',
204
- color: chalk.rgb(255, 165, 0),
205
- url: 'https://cloud.sambanova.ai/apis',
206
- hint: 'API Keys β†’ Create ($5 free trial, 3 months)',
207
- prefix: 'sn-',
208
- },
209
- {
210
- key: 'openrouter',
211
- label: 'OpenRouter',
212
- color: chalk.rgb(120, 80, 255),
213
- url: 'https://openrouter.ai/settings/keys',
214
- hint: 'API Keys β†’ Create key (50 free req/day, shared quota)',
215
- prefix: 'sk-or-',
216
- },
217
- {
218
- key: 'codestral',
219
- label: 'Mistral Codestral',
220
- color: chalk.rgb(255, 100, 100),
221
- url: 'https://codestral.mistral.ai',
222
- hint: 'API Keys β†’ Create key (30 req/min, 2000/day β€” phone required)',
223
- prefix: 'csk-',
224
- },
225
- {
226
- key: 'hyperbolic',
227
- label: 'Hyperbolic',
228
- color: chalk.rgb(0, 200, 150),
229
- url: 'https://app.hyperbolic.ai/settings',
230
- hint: 'Settings β†’ API Keys ($1 free trial)',
231
- prefix: 'eyJ',
232
- },
233
- {
234
- key: 'scaleway',
235
- label: 'Scaleway',
236
- color: chalk.rgb(130, 0, 250),
237
- url: 'https://console.scaleway.com/iam/api-keys',
238
- hint: 'IAM β†’ API Keys (1M free tokens)',
239
- prefix: 'scw-',
240
- },
241
- {
242
- key: 'googleai',
243
- label: 'Google AI Studio',
244
- color: chalk.rgb(66, 133, 244),
245
- url: 'https://aistudio.google.com/apikey',
246
- hint: 'Get API key (free Gemma models, 14.4K req/day)',
247
- prefix: 'AIza',
248
- },
249
- ]
500
+ // πŸ“– Build providers from sources to keep setup in sync with actual supported providers.
501
+ const providers = Object.keys(sources).map((key) => {
502
+ const meta = PROVIDER_METADATA[key] || {}
503
+ return {
504
+ key,
505
+ label: meta.label || sources[key]?.name || key,
506
+ color: meta.color || chalk.white,
507
+ url: meta.signupUrl || 'https://example.com',
508
+ hint: meta.signupHint || 'Create API key',
509
+ }
510
+ })
250
511
 
251
512
  const rl = readline.createInterface({ input: process.stdin, output: process.stdout })
252
513
 
@@ -440,14 +701,17 @@ const spinCell = (f, o = 0) => chalk.dim.yellow(FRAMES[(f + o) % FRAMES.length].
440
701
  // πŸ“– are imported from lib/utils.js for testability
441
702
 
442
703
  // ─── Viewport calculation ────────────────────────────────────────────────────
704
+ // πŸ“– Keep these constants in sync with renderTable() fixed shell lines.
705
+ // πŸ“– If this drifts, model rows overflow and can push the title row out of view.
706
+ const TABLE_HEADER_LINES = 4 // πŸ“– title, spacer, column headers, separator
707
+ const TABLE_FOOTER_LINES = 6 // πŸ“– spacer, hints, spacer, credit+contributors, discord, spacer
708
+ const TABLE_FIXED_LINES = TABLE_HEADER_LINES + TABLE_FOOTER_LINES
709
+
443
710
  // πŸ“– Computes the visible slice of model rows that fits in the terminal.
444
- // πŸ“– Fixed lines: 5 header + 5 footer = 10 lines always consumed.
445
- // πŸ“– Header: empty, title, empty, column headers, separator (5)
446
- // πŸ“– Footer: empty, hints, empty, credit, empty (5)
447
711
  // πŸ“– When scroll indicators are needed, they each consume 1 line from the model budget.
448
712
  function calculateViewport(terminalRows, scrollOffset, totalModels) {
449
713
  if (terminalRows <= 0) return { startIdx: 0, endIdx: totalModels, hasAbove: false, hasBelow: false }
450
- let maxSlots = terminalRows - 10 // 5 header + 5 footer
714
+ let maxSlots = terminalRows - TABLE_FIXED_LINES
451
715
  if (maxSlots < 1) maxSlots = 1
452
716
  if (totalModels <= maxSlots) return { startIdx: 0, endIdx: totalModels, hasAbove: false, hasBelow: false }
453
717
 
@@ -486,7 +750,7 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
486
750
  if (mode === 'openclaw') {
487
751
  modeBadge = chalk.bold.rgb(255, 100, 50)(' [🦞 OpenClaw]')
488
752
  } else if (mode === 'opencode-desktop') {
489
- modeBadge = chalk.bold.rgb(0, 200, 255)(' [πŸ–₯ Desktop]')
753
+ modeBadge = chalk.bold.rgb(0, 200, 255)(' [πŸ–₯ Desktop]')
490
754
  } else {
491
755
  modeBadge = chalk.bold.rgb(0, 200, 255)(' [πŸ’» CLI]')
492
756
  }
@@ -529,7 +793,6 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
529
793
  const sorted = sortResults(visibleResults, sortColumn, sortDirection)
530
794
 
531
795
  const lines = [
532
- '',
533
796
  ` ${chalk.bold('⚑ Free Coding Models')} ${chalk.dim('v' + LOCAL_VERSION)}${modeBadge}${modeHint}${tierBadge}${originBadge} ` +
534
797
  chalk.greenBright(`βœ… ${up}`) + chalk.dim(' up ') +
535
798
  chalk.yellow(`⏳ ${timeout}`) + chalk.dim(' timeout ') +
@@ -771,7 +1034,15 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
771
1034
  : chalk.rgb(0, 200, 255)('Enterβ†’OpenCode')
772
1035
  lines.push(chalk.dim(` ↑↓ Navigate β€’ `) + actionHint + chalk.dim(` β€’ R/Y/O/M/L/A/S/C/H/V/U Sort β€’ T Tier β€’ N Origin β€’ W↓/X↑ (${intervalSec}s) β€’ Z Mode β€’ `) + chalk.yellow('P') + chalk.dim(` Settings β€’ `) + chalk.bgGreenBright.black.bold(' K Help ') + chalk.dim(` β€’ Ctrl+C Exit`))
773
1036
  lines.push('')
774
- lines.push(chalk.rgb(255, 150, 200)(' Made with πŸ’– & β˜• by \x1b]8;;https://github.com/vava-nessa\x1b\\vava-nessa\x1b]8;;\x1b\\') + chalk.dim(' β€’ ') + '⭐ ' + '\x1b]8;;https://github.com/vava-nessa/free-coding-models\x1b\\Star on GitHub\x1b]8;;\x1b\\')
1037
+ lines.push(
1038
+ chalk.rgb(255, 150, 200)(' Made with πŸ’– & β˜• by \x1b]8;;https://github.com/vava-nessa\x1b\\vava-nessa\x1b]8;;\x1b\\') +
1039
+ chalk.dim(' β€’ ') +
1040
+ '⭐ ' +
1041
+ '\x1b]8;;https://github.com/vava-nessa/free-coding-models\x1b\\Star on GitHub\x1b]8;;\x1b\\' +
1042
+ chalk.dim(' β€’ ') +
1043
+ '🀝 ' +
1044
+ '\x1b]8;;https://github.com/vava-nessa/free-coding-models/graphs/contributors\x1b\\Contributors\x1b]8;;\x1b\\'
1045
+ )
775
1046
  // πŸ“– Discord invite + BETA warning β€” always visible at the bottom of the TUI
776
1047
  lines.push(' πŸ’¬ ' + chalk.cyanBright('\x1b]8;;https://discord.gg/5MbTnDC3Md\x1b\\Join our Discord\x1b]8;;\x1b\\') + chalk.dim(' β†’ ') + chalk.cyanBright('https://discord.gg/5MbTnDC3Md') + chalk.dim(' β€’ ') + chalk.yellow('⚠ BETA TUI') + chalk.dim(' β€” might crash or have problems'))
777
1048
  lines.push('')
@@ -788,23 +1059,50 @@ function renderTable(results, pendingPings, frame, cursor = null, sortColumn = '
788
1059
  // ─── HTTP ping ────────────────────────────────────────────────────────────────
789
1060
 
790
1061
  // πŸ“– ping: Send a single chat completion request to measure model availability and latency.
791
- // πŸ“– url param is the provider's endpoint URL β€” differs per provider (NIM, Groq, Cerebras).
1062
+ // πŸ“– providerKey and url determine provider-specific request format.
792
1063
  // πŸ“– apiKey can be null β€” in that case no Authorization header is sent.
793
1064
  // πŸ“– A 401 response still tells us the server is UP and gives us real latency.
794
- async function ping(apiKey, modelId, url) {
1065
+ function buildPingRequest(apiKey, modelId, providerKey, url) {
1066
+ if (providerKey === 'replicate') {
1067
+ // πŸ“– Replicate uses /v1/predictions with a different payload than OpenAI chat-completions.
1068
+ const replicateHeaders = { 'Content-Type': 'application/json', Prefer: 'wait=4' }
1069
+ if (apiKey) replicateHeaders.Authorization = `Token ${apiKey}`
1070
+ return {
1071
+ url,
1072
+ headers: replicateHeaders,
1073
+ body: { version: modelId, input: { prompt: 'hi' } },
1074
+ }
1075
+ }
1076
+
1077
+ const headers = { 'Content-Type': 'application/json' }
1078
+ if (apiKey) headers.Authorization = `Bearer ${apiKey}`
1079
+ if (providerKey === 'openrouter') {
1080
+ // πŸ“– OpenRouter recommends optional app identification headers.
1081
+ headers['HTTP-Referer'] = 'https://github.com/vava-nessa/free-coding-models'
1082
+ headers['X-Title'] = 'free-coding-models'
1083
+ }
1084
+
1085
+ return {
1086
+ url,
1087
+ headers,
1088
+ body: { model: modelId, messages: [{ role: 'user', content: 'hi' }], max_tokens: 1 },
1089
+ }
1090
+ }
1091
+
1092
+ async function ping(apiKey, modelId, providerKey, url) {
795
1093
  const ctrl = new AbortController()
796
1094
  const timer = setTimeout(() => ctrl.abort(), PING_TIMEOUT)
797
1095
  const t0 = performance.now()
798
1096
  try {
799
- // πŸ“– Only attach Authorization header when a key is available
800
- const headers = { 'Content-Type': 'application/json' }
801
- if (apiKey) headers['Authorization'] = `Bearer ${apiKey}`
802
- const resp = await fetch(url, {
1097
+ const req = buildPingRequest(apiKey, modelId, providerKey, url)
1098
+ const resp = await fetch(req.url, {
803
1099
  method: 'POST', signal: ctrl.signal,
804
- headers,
805
- body: JSON.stringify({ model: modelId, messages: [{ role: 'user', content: 'hi' }], max_tokens: 1 }),
1100
+ headers: req.headers,
1101
+ body: JSON.stringify(req.body),
806
1102
  })
807
- return { code: String(resp.status), ms: Math.round(performance.now() - t0) }
1103
+ // πŸ“– Normalize all HTTP 2xx statuses to "200" so existing verdict/avg logic still works.
1104
+ const code = resp.status >= 200 && resp.status < 300 ? '200' : String(resp.status)
1105
+ return { code, ms: Math.round(performance.now() - t0) }
808
1106
  } catch (err) {
809
1107
  const isTimeout = err.name === 'AbortError'
810
1108
  return {
@@ -845,12 +1143,112 @@ const ENV_VAR_NAMES = {
845
1143
  cerebras: 'CEREBRAS_API_KEY',
846
1144
  sambanova: 'SAMBANOVA_API_KEY',
847
1145
  openrouter: 'OPENROUTER_API_KEY',
1146
+ huggingface:'HUGGINGFACE_API_KEY',
1147
+ replicate: 'REPLICATE_API_TOKEN',
1148
+ deepinfra: 'DEEPINFRA_API_KEY',
1149
+ fireworks: 'FIREWORKS_API_KEY',
848
1150
  codestral: 'CODESTRAL_API_KEY',
849
1151
  hyperbolic: 'HYPERBOLIC_API_KEY',
850
1152
  scaleway: 'SCALEWAY_API_KEY',
851
1153
  googleai: 'GOOGLE_API_KEY',
852
1154
  }
853
1155
 
1156
+ // πŸ“– Provider metadata used by the setup wizard and Settings details panel.
1157
+ // πŸ“– Keeps signup links + rate limits centralized so UI stays consistent.
1158
+ const PROVIDER_METADATA = {
1159
+ nvidia: {
1160
+ label: 'NVIDIA NIM',
1161
+ color: chalk.rgb(118, 185, 0),
1162
+ signupUrl: 'https://build.nvidia.com',
1163
+ signupHint: 'Profile β†’ API Keys β†’ Generate',
1164
+ rateLimits: 'Free tier (provider quota by model)',
1165
+ },
1166
+ groq: {
1167
+ label: 'Groq',
1168
+ color: chalk.rgb(249, 103, 20),
1169
+ signupUrl: 'https://console.groq.com/keys',
1170
+ signupHint: 'API Keys β†’ Create API Key',
1171
+ rateLimits: 'Free dev tier (provider quota)',
1172
+ },
1173
+ cerebras: {
1174
+ label: 'Cerebras',
1175
+ color: chalk.rgb(0, 180, 255),
1176
+ signupUrl: 'https://cloud.cerebras.ai',
1177
+ signupHint: 'API Keys β†’ Create',
1178
+ rateLimits: 'Free dev tier (provider quota)',
1179
+ },
1180
+ sambanova: {
1181
+ label: 'SambaNova',
1182
+ color: chalk.rgb(255, 165, 0),
1183
+ signupUrl: 'https://sambanova.ai/developers',
1184
+ signupHint: 'Developers portal β†’ Create API key',
1185
+ rateLimits: 'Dev tier generous quota',
1186
+ },
1187
+ openrouter: {
1188
+ label: 'OpenRouter',
1189
+ color: chalk.rgb(120, 80, 255),
1190
+ signupUrl: 'https://openrouter.ai/keys',
1191
+ signupHint: 'API Keys β†’ Create',
1192
+ rateLimits: '50 req/day, 20/min (:free shared quota)',
1193
+ },
1194
+ huggingface: {
1195
+ label: 'Hugging Face Inference',
1196
+ color: chalk.rgb(255, 182, 0),
1197
+ signupUrl: 'https://huggingface.co/settings/tokens',
1198
+ signupHint: 'Settings β†’ Access Tokens',
1199
+ rateLimits: 'Free monthly credits (~$0.10)',
1200
+ },
1201
+ replicate: {
1202
+ label: 'Replicate',
1203
+ color: chalk.rgb(120, 160, 255),
1204
+ signupUrl: 'https://replicate.com/account/api-tokens',
1205
+ signupHint: 'Account β†’ API Tokens',
1206
+ rateLimits: 'Developer free quota',
1207
+ },
1208
+ deepinfra: {
1209
+ label: 'DeepInfra',
1210
+ color: chalk.rgb(0, 180, 140),
1211
+ signupUrl: 'https://deepinfra.com/login',
1212
+ signupHint: 'Login β†’ API keys',
1213
+ rateLimits: 'Free dev tier (low-latency quota)',
1214
+ },
1215
+ fireworks: {
1216
+ label: 'Fireworks AI',
1217
+ color: chalk.rgb(255, 80, 50),
1218
+ signupUrl: 'https://fireworks.ai',
1219
+ signupHint: 'Create account β†’ Generate API key',
1220
+ rateLimits: '$1 free credits (new dev accounts)',
1221
+ },
1222
+ codestral: {
1223
+ label: 'Mistral Codestral',
1224
+ color: chalk.rgb(255, 100, 100),
1225
+ signupUrl: 'https://codestral.mistral.ai',
1226
+ signupHint: 'API Keys β†’ Create',
1227
+ rateLimits: '30 req/min, 2000/day',
1228
+ },
1229
+ hyperbolic: {
1230
+ label: 'Hyperbolic',
1231
+ color: chalk.rgb(0, 200, 150),
1232
+ signupUrl: 'https://app.hyperbolic.ai/settings',
1233
+ signupHint: 'Settings β†’ API Keys',
1234
+ rateLimits: '$1 free trial credits',
1235
+ },
1236
+ scaleway: {
1237
+ label: 'Scaleway',
1238
+ color: chalk.rgb(130, 0, 250),
1239
+ signupUrl: 'https://console.scaleway.com/iam/api-keys',
1240
+ signupHint: 'IAM β†’ API Keys',
1241
+ rateLimits: '1M free tokens',
1242
+ },
1243
+ googleai: {
1244
+ label: 'Google AI Studio',
1245
+ color: chalk.rgb(66, 133, 244),
1246
+ signupUrl: 'https://aistudio.google.com/apikey',
1247
+ signupHint: 'Get API key',
1248
+ rateLimits: '14.4K req/day, 30/min',
1249
+ },
1250
+ }
1251
+
854
1252
  // πŸ“– OpenCode config location varies by platform
855
1253
  // πŸ“– Windows: %APPDATA%\opencode\opencode.json (or sometimes ~/.config/opencode)
856
1254
  // πŸ“– macOS/Linux: ~/.config/opencode/opencode.json
@@ -860,6 +1258,45 @@ const OPENCODE_CONFIG = isWindows
860
1258
 
861
1259
  // πŸ“– Fallback to .config on Windows if AppData doesn't exist
862
1260
  const OPENCODE_CONFIG_FALLBACK = join(homedir(), '.config', 'opencode', 'opencode.json')
1261
+ const OPENCODE_PORT_RANGE_START = 4096
1262
+ const OPENCODE_PORT_RANGE_END = 5096
1263
+
1264
+ // πŸ“– isTcpPortAvailable: checks if a local TCP port is free for OpenCode.
1265
+ // πŸ“– Used to avoid tmux sub-agent port conflicts when multiple projects run in parallel.
1266
+ function isTcpPortAvailable(port) {
1267
+ return new Promise((resolve) => {
1268
+ const server = createServer()
1269
+ server.once('error', () => resolve(false))
1270
+ server.once('listening', () => {
1271
+ server.close(() => resolve(true))
1272
+ })
1273
+ server.listen(port)
1274
+ })
1275
+ }
1276
+
1277
+ // πŸ“– resolveOpenCodeTmuxPort: selects a safe port for OpenCode when inside tmux.
1278
+ // πŸ“– Priority:
1279
+ // πŸ“– 1) OPENCODE_PORT from env (if valid and available)
1280
+ // πŸ“– 2) First available port in 4096-5095
1281
+ async function resolveOpenCodeTmuxPort() {
1282
+ const envPortRaw = process.env.OPENCODE_PORT
1283
+ const envPort = Number.parseInt(envPortRaw || '', 10)
1284
+
1285
+ if (Number.isInteger(envPort) && envPort > 0 && envPort <= 65535) {
1286
+ if (await isTcpPortAvailable(envPort)) {
1287
+ return { port: envPort, source: 'env' }
1288
+ }
1289
+ console.log(chalk.yellow(` ⚠ OPENCODE_PORT=${envPort} is already in use; selecting another port for this run.`))
1290
+ }
1291
+
1292
+ for (let port = OPENCODE_PORT_RANGE_START; port < OPENCODE_PORT_RANGE_END; port++) {
1293
+ if (await isTcpPortAvailable(port)) {
1294
+ return { port, source: 'auto' }
1295
+ }
1296
+ }
1297
+
1298
+ return null
1299
+ }
863
1300
 
864
1301
  function getOpenCodeConfigPath() {
865
1302
  if (existsSync(OPENCODE_CONFIG)) return OPENCODE_CONFIG
@@ -910,10 +1347,30 @@ async function spawnOpenCode(args, providerKey, fcmConfig) {
910
1347
  const envVarName = ENV_VAR_NAMES[providerKey]
911
1348
  const resolvedKey = getApiKey(fcmConfig, providerKey)
912
1349
  const childEnv = { ...process.env }
1350
+ const finalArgs = [...args]
1351
+ const hasExplicitPortArg = finalArgs.includes('--port')
913
1352
  if (envVarName && resolvedKey) childEnv[envVarName] = resolvedKey
914
1353
 
1354
+ // πŸ“– In tmux, OpenCode sub-agents need a listening port to open extra panes.
1355
+ // πŸ“– We auto-pick one if the user did not provide --port explicitly.
1356
+ if (process.env.TMUX && !hasExplicitPortArg) {
1357
+ const tmuxPort = await resolveOpenCodeTmuxPort()
1358
+ if (tmuxPort) {
1359
+ const portValue = String(tmuxPort.port)
1360
+ childEnv.OPENCODE_PORT = portValue
1361
+ finalArgs.push('--port', portValue)
1362
+ if (tmuxPort.source === 'env') {
1363
+ console.log(chalk.dim(` πŸ“Ί tmux detected β€” using OPENCODE_PORT=${portValue}.`))
1364
+ } else {
1365
+ console.log(chalk.dim(` πŸ“Ί tmux detected β€” using OpenCode port ${portValue} for sub-agent panes.`))
1366
+ }
1367
+ } else {
1368
+ console.log(chalk.yellow(` ⚠ tmux detected but no free OpenCode port found in ${OPENCODE_PORT_RANGE_START}-${OPENCODE_PORT_RANGE_END - 1}; launching without --port.`))
1369
+ }
1370
+ }
1371
+
915
1372
  const { spawn } = await import('child_process')
916
- const child = spawn('opencode', args, {
1373
+ const child = spawn('opencode', finalArgs, {
917
1374
  stdio: 'inherit',
918
1375
  shell: true,
919
1376
  detached: false,
@@ -936,7 +1393,7 @@ async function spawnOpenCode(args, providerKey, fcmConfig) {
936
1393
 
937
1394
  // ─── Start OpenCode ────────────────────────────────────────────────────────────
938
1395
  // πŸ“– Launches OpenCode with the selected model.
939
- // πŸ“– Handles all 3 providers: nvidia (needs custom provider config), groq & cerebras (built-in in OpenCode).
1396
+ // πŸ“– Handles nvidia + all OpenAI-compatible providers defined in sources.js.
940
1397
  // πŸ“– For nvidia: checks if NIM is configured, sets provider.models entry, spawns with nvidia/model-id.
941
1398
  // πŸ“– For groq/cerebras: OpenCode has built-in support -- just sets model in config and spawns.
942
1399
  // πŸ“– Model format: { modelId, label, tier, providerKey }
@@ -1024,6 +1481,14 @@ After installation, you can use: opencode --model ${modelRef}`
1024
1481
  await spawnOpenCode([], providerKey, fcmConfig)
1025
1482
  }
1026
1483
  } else {
1484
+ if (providerKey === 'replicate') {
1485
+ console.log(chalk.yellow(' ⚠ Replicate models are monitor-only for now in OpenCode mode.'))
1486
+ console.log(chalk.dim(' Reason: Replicate uses /v1/predictions instead of OpenAI chat-completions.'))
1487
+ console.log(chalk.dim(' You can still benchmark this model in the TUI and use other providers for OpenCode launch.'))
1488
+ console.log()
1489
+ return
1490
+ }
1491
+
1027
1492
  // πŸ“– Groq: built-in OpenCode provider -- needs provider block with apiKey in opencode.json.
1028
1493
  // πŸ“– Cerebras: NOT built-in -- needs @ai-sdk/openai-compatible + baseURL, like NVIDIA.
1029
1494
  // πŸ“– Both need the model registered in provider.<key>.models so OpenCode can find it.
@@ -1080,6 +1545,36 @@ After installation, you can use: opencode --model ${modelRef}`
1080
1545
  },
1081
1546
  models: {}
1082
1547
  }
1548
+ } else if (providerKey === 'huggingface') {
1549
+ config.provider.huggingface = {
1550
+ npm: '@ai-sdk/openai-compatible',
1551
+ name: 'Hugging Face Inference',
1552
+ options: {
1553
+ baseURL: 'https://router.huggingface.co/v1',
1554
+ apiKey: '{env:HUGGINGFACE_API_KEY}'
1555
+ },
1556
+ models: {}
1557
+ }
1558
+ } else if (providerKey === 'deepinfra') {
1559
+ config.provider.deepinfra = {
1560
+ npm: '@ai-sdk/openai-compatible',
1561
+ name: 'DeepInfra',
1562
+ options: {
1563
+ baseURL: 'https://api.deepinfra.com/v1/openai',
1564
+ apiKey: '{env:DEEPINFRA_API_KEY}'
1565
+ },
1566
+ models: {}
1567
+ }
1568
+ } else if (providerKey === 'fireworks') {
1569
+ config.provider.fireworks = {
1570
+ npm: '@ai-sdk/openai-compatible',
1571
+ name: 'Fireworks AI',
1572
+ options: {
1573
+ baseURL: 'https://api.fireworks.ai/inference/v1',
1574
+ apiKey: '{env:FIREWORKS_API_KEY}'
1575
+ },
1576
+ models: {}
1577
+ }
1083
1578
  } else if (providerKey === 'codestral') {
1084
1579
  config.provider.codestral = {
1085
1580
  npm: '@ai-sdk/openai-compatible',
@@ -1155,7 +1650,7 @@ After installation, you can use: opencode --model ${modelRef}`
1155
1650
  // ─── Start OpenCode Desktop ─────────────────────────────────────────────────────
1156
1651
  // πŸ“– startOpenCodeDesktop: Same config logic as startOpenCode, but opens the Desktop app.
1157
1652
  // πŸ“– OpenCode Desktop shares config at the same location as CLI.
1158
- // πŸ“– Handles all 3 providers: nvidia (needs custom provider config), groq & cerebras (built-in).
1653
+ // πŸ“– Handles nvidia + all OpenAI-compatible providers defined in sources.js.
1159
1654
  // πŸ“– No need to wait for exit β€” Desktop app stays open independently.
1160
1655
  async function startOpenCodeDesktop(model, fcmConfig) {
1161
1656
  const providerKey = model.providerKey ?? 'nvidia'
@@ -1256,6 +1751,14 @@ ${isWindows ? 'set NVIDIA_API_KEY=your_key_here' : 'export NVIDIA_API_KEY=your_k
1256
1751
  console.log()
1257
1752
  }
1258
1753
  } else {
1754
+ if (providerKey === 'replicate') {
1755
+ console.log(chalk.yellow(' ⚠ Replicate models are monitor-only for now in OpenCode Desktop mode.'))
1756
+ console.log(chalk.dim(' Reason: Replicate uses /v1/predictions instead of OpenAI chat-completions.'))
1757
+ console.log(chalk.dim(' You can still benchmark this model in the TUI and use other providers for Desktop launch.'))
1758
+ console.log()
1759
+ return
1760
+ }
1761
+
1259
1762
  // πŸ“– Groq: built-in OpenCode provider β€” needs provider block with apiKey in opencode.json.
1260
1763
  // πŸ“– Cerebras: NOT built-in β€” needs @ai-sdk/openai-compatible + baseURL, like NVIDIA.
1261
1764
  // πŸ“– Both need the model registered in provider.<key>.models so OpenCode can find it.
@@ -1310,6 +1813,36 @@ ${isWindows ? 'set NVIDIA_API_KEY=your_key_here' : 'export NVIDIA_API_KEY=your_k
1310
1813
  },
1311
1814
  models: {}
1312
1815
  }
1816
+ } else if (providerKey === 'huggingface') {
1817
+ config.provider.huggingface = {
1818
+ npm: '@ai-sdk/openai-compatible',
1819
+ name: 'Hugging Face Inference',
1820
+ options: {
1821
+ baseURL: 'https://router.huggingface.co/v1',
1822
+ apiKey: '{env:HUGGINGFACE_API_KEY}'
1823
+ },
1824
+ models: {}
1825
+ }
1826
+ } else if (providerKey === 'deepinfra') {
1827
+ config.provider.deepinfra = {
1828
+ npm: '@ai-sdk/openai-compatible',
1829
+ name: 'DeepInfra',
1830
+ options: {
1831
+ baseURL: 'https://api.deepinfra.com/v1/openai',
1832
+ apiKey: '{env:DEEPINFRA_API_KEY}'
1833
+ },
1834
+ models: {}
1835
+ }
1836
+ } else if (providerKey === 'fireworks') {
1837
+ config.provider.fireworks = {
1838
+ npm: '@ai-sdk/openai-compatible',
1839
+ name: 'Fireworks AI',
1840
+ options: {
1841
+ baseURL: 'https://api.fireworks.ai/inference/v1',
1842
+ apiKey: '{env:FIREWORKS_API_KEY}'
1843
+ },
1844
+ models: {}
1845
+ }
1313
1846
  } else if (providerKey === 'codestral') {
1314
1847
  config.provider.codestral = {
1315
1848
  npm: '@ai-sdk/openai-compatible',
@@ -1521,7 +2054,7 @@ async function runFiableMode(config) {
1521
2054
  const pingPromises = results.map(r => {
1522
2055
  const rApiKey = getApiKey(config, r.providerKey)
1523
2056
  const url = sources[r.providerKey]?.url
1524
- return ping(rApiKey, r.modelId, url).then(({ code, ms }) => {
2057
+ return ping(rApiKey, r.modelId, r.providerKey, url).then(({ code, ms }) => {
1525
2058
  r.pings.push({ ms, code })
1526
2059
  if (code === '200') {
1527
2060
  r.status = 'up'
@@ -1585,6 +2118,7 @@ async function main() {
1585
2118
 
1586
2119
  // πŸ“– Load JSON config (auto-migrates old plain-text ~/.free-coding-models if needed)
1587
2120
  const config = loadConfig()
2121
+ ensureTelemetryConfig(config)
1588
2122
 
1589
2123
  // πŸ“– Check if any provider has a key β€” if not, run the first-time setup wizard
1590
2124
  const hasAnyKey = Object.keys(sources).some(pk => !!getApiKey(config, pk))
@@ -1600,9 +2134,27 @@ async function main() {
1600
2134
  }
1601
2135
  }
1602
2136
 
2137
+ // πŸ“– Ask analytics consent only when not explicitly controlled by env or CLI flag.
2138
+ await promptTelemetryConsent(config, cliArgs)
2139
+
1603
2140
  // πŸ“– Backward-compat: keep apiKey var for startOpenClaw() which still needs it
1604
2141
  let apiKey = getApiKey(config, 'nvidia')
1605
2142
 
2143
+ // πŸ“– Default mode: OpenCode CLI
2144
+ let mode = 'opencode'
2145
+ if (cliArgs.openClawMode) mode = 'openclaw'
2146
+ else if (cliArgs.openCodeDesktopMode) mode = 'opencode-desktop'
2147
+ else if (cliArgs.openCodeMode) mode = 'opencode'
2148
+
2149
+ // πŸ“– Track app opening early so fast exits are still counted.
2150
+ // πŸ“– Must run before update checks because npm registry lookups can add startup delay.
2151
+ void sendUsageTelemetry(config, cliArgs, {
2152
+ event: 'app_start',
2153
+ version: LOCAL_VERSION,
2154
+ mode,
2155
+ ts: new Date().toISOString(),
2156
+ })
2157
+
1606
2158
  // πŸ“– Check for updates in the background
1607
2159
  let latestVersion = null
1608
2160
  try {
@@ -1611,9 +2163,6 @@ async function main() {
1611
2163
  // Silently fail - don't block the app if npm registry is unreachable
1612
2164
  }
1613
2165
 
1614
- // πŸ“– Default mode: OpenCode CLI
1615
- let mode = 'opencode'
1616
-
1617
2166
  // πŸ“– Show update notification menu if a new version is available
1618
2167
  if (latestVersion) {
1619
2168
  const action = await promptUpdateNotification(latestVersion)
@@ -1643,6 +2192,7 @@ async function main() {
1643
2192
 
1644
2193
  // πŸ“– Build results from MODELS β€” only include enabled providers
1645
2194
  // πŸ“– Each result gets providerKey so ping() knows which URL + API key to use
2195
+
1646
2196
  let results = MODELS
1647
2197
  .filter(([,,,,,providerKey]) => isProviderEnabled(config, providerKey))
1648
2198
  .map(([modelId, label, tier, sweScore, ctx, providerKey], i) => ({
@@ -1657,7 +2207,7 @@ async function main() {
1657
2207
  // πŸ“– Called after every cursor move, sort change, and terminal resize.
1658
2208
  const adjustScrollOffset = (st) => {
1659
2209
  const total = st.visibleSorted ? st.visibleSorted.length : st.results.filter(r => !r.hidden).length
1660
- let maxSlots = st.terminalRows - 10 // 5 header + 5 footer
2210
+ let maxSlots = st.terminalRows - TABLE_FIXED_LINES
1661
2211
  if (maxSlots < 1) maxSlots = 1
1662
2212
  if (total <= maxSlots) { st.scrollOffset = 0; return }
1663
2213
  // Ensure cursor is not above the visible window
@@ -1754,18 +2304,21 @@ async function main() {
1754
2304
  // πŸ“– Key "T" in settings = test API key for selected provider.
1755
2305
  function renderSettings() {
1756
2306
  const providerKeys = Object.keys(sources)
2307
+ const telemetryRowIdx = providerKeys.length
1757
2308
  const EL = '\x1b[K'
1758
2309
  const lines = []
1759
2310
 
1760
2311
  lines.push('')
1761
2312
  lines.push(` ${chalk.bold('βš™ Settings')} ${chalk.dim('β€” free-coding-models v' + LOCAL_VERSION)}`)
1762
2313
  lines.push('')
1763
- lines.push(` ${chalk.bold('Providers')}`)
2314
+ lines.push(` ${chalk.bold('🧩 Providers')}`)
2315
+ lines.push(` ${chalk.dim(' ' + '─'.repeat(112))}`)
1764
2316
  lines.push('')
1765
2317
 
1766
2318
  for (let i = 0; i < providerKeys.length; i++) {
1767
2319
  const pk = providerKeys[i]
1768
2320
  const src = sources[pk]
2321
+ const meta = PROVIDER_METADATA[pk] || {}
1769
2322
  const isCursor = i === state.settingsCursor
1770
2323
  const enabled = isProviderEnabled(state.config, pk)
1771
2324
  const keyVal = state.config.apiKeys?.[pk] ?? ''
@@ -1789,20 +2342,50 @@ async function main() {
1789
2342
  if (testResult === 'pending') testBadge = chalk.yellow('[Testing…]')
1790
2343
  else if (testResult === 'ok') testBadge = chalk.greenBright('[Test βœ…]')
1791
2344
  else if (testResult === 'fail') testBadge = chalk.red('[Test ❌]')
2345
+ const rateSummary = chalk.dim((meta.rateLimits || 'No limit info').slice(0, 36))
1792
2346
 
1793
- const enabledBadge = enabled ? chalk.greenBright('βœ…') : chalk.dim('⬜')
1794
- const providerName = chalk.bold(src.name.padEnd(10))
2347
+ const enabledBadge = enabled ? chalk.greenBright('βœ…') : chalk.redBright('❌')
2348
+ const providerName = chalk.bold((meta.label || src.name || pk).slice(0, 22).padEnd(22))
1795
2349
  const bullet = isCursor ? chalk.bold.cyan(' ❯ ') : chalk.dim(' ')
1796
2350
 
1797
- const row = `${bullet}[ ${enabledBadge} ] ${providerName} ${keyDisplay.padEnd(30)} ${testBadge}`
2351
+ const row = `${bullet}[ ${enabledBadge} ] ${providerName} ${keyDisplay.padEnd(30)} ${testBadge} ${rateSummary}`
1798
2352
  lines.push(isCursor ? chalk.bgRgb(30, 30, 60)(row) : row)
1799
2353
  }
1800
2354
 
2355
+ lines.push('')
2356
+ const selectedProviderKey = providerKeys[Math.min(state.settingsCursor, providerKeys.length - 1)]
2357
+ const selectedSource = sources[selectedProviderKey]
2358
+ const selectedMeta = PROVIDER_METADATA[selectedProviderKey] || {}
2359
+ if (selectedSource && state.settingsCursor < telemetryRowIdx) {
2360
+ const selectedKey = getApiKey(state.config, selectedProviderKey)
2361
+ const setupStatus = selectedKey ? chalk.green('API key detected βœ…') : chalk.yellow('API key missing ⚠')
2362
+ lines.push(` ${chalk.bold('Setup Instructions')} β€” ${selectedMeta.label || selectedSource.name || selectedProviderKey}`)
2363
+ lines.push(chalk.dim(` 1) Create a ${selectedMeta.label || selectedSource.name} account: ${selectedMeta.signupUrl || 'signup link missing'}`))
2364
+ lines.push(chalk.dim(` 2) ${selectedMeta.signupHint || 'Generate an API key and paste it with Enter on this row'}`))
2365
+ lines.push(chalk.dim(` 3) Press ${chalk.yellow('T')} to test your key. Status: ${setupStatus}`))
2366
+ lines.push('')
2367
+ }
2368
+
2369
+ lines.push(` ${chalk.bold('πŸ“Š Analytics')}`)
2370
+ lines.push(` ${chalk.dim(' ' + '─'.repeat(112))}`)
2371
+ lines.push('')
2372
+
2373
+ const telemetryCursor = state.settingsCursor === telemetryRowIdx
2374
+ const telemetryEnabled = state.config.telemetry?.enabled === true
2375
+ const telemetryStatus = telemetryEnabled ? chalk.greenBright('βœ… Enabled') : chalk.redBright('❌ Disabled')
2376
+ const telemetryRowBullet = telemetryCursor ? chalk.bold.cyan(' ❯ ') : chalk.dim(' ')
2377
+ const telemetryEnv = parseTelemetryEnv(process.env.FREE_CODING_MODELS_TELEMETRY)
2378
+ const telemetrySource = telemetryEnv === null
2379
+ ? chalk.dim('[Config]')
2380
+ : chalk.yellow('[Env override]')
2381
+ const telemetryRow = `${telemetryRowBullet}${chalk.bold('Anonymous usage analytics').padEnd(44)} ${telemetryStatus} ${telemetrySource}`
2382
+ lines.push(telemetryCursor ? chalk.bgRgb(30, 30, 60)(telemetryRow) : telemetryRow)
2383
+
1801
2384
  lines.push('')
1802
2385
  if (state.settingsEditMode) {
1803
2386
  lines.push(chalk.dim(' Type API key β€’ Enter Save β€’ Esc Cancel'))
1804
2387
  } else {
1805
- lines.push(chalk.dim(' ↑↓ Navigate β€’ Enter Edit key β€’ Space Toggle enabled β€’ T Test key β€’ Esc Close'))
2388
+ lines.push(chalk.dim(' ↑↓ Navigate β€’ Enter Edit key / Toggle analytics β€’ Space Toggle enabled β€’ T Test key β€’ Esc Close'))
1806
2389
  }
1807
2390
  lines.push('')
1808
2391
 
@@ -1838,7 +2421,7 @@ async function main() {
1838
2421
  lines.push(` ${chalk.yellow('W')} Decrease ping interval (faster)`)
1839
2422
  lines.push(` ${chalk.yellow('X')} Increase ping interval (slower)`)
1840
2423
  lines.push(` ${chalk.yellow('Z')} Cycle launch mode ${chalk.dim('(OpenCode CLI β†’ OpenCode Desktop β†’ OpenClaw)')}`)
1841
- lines.push(` ${chalk.yellow('P')} Open settings ${chalk.dim('(manage API keys per provider, enable/disable, test)')}`)
2424
+ lines.push(` ${chalk.yellow('P')} Open settings ${chalk.dim('(manage API keys, provider toggles, analytics toggle)')}`)
1842
2425
  lines.push(` ${chalk.yellow('K')} / ${chalk.yellow('Esc')} Show/hide this help`)
1843
2426
  lines.push(` ${chalk.yellow('Ctrl+C')} Exit`)
1844
2427
  lines.push('')
@@ -1861,7 +2444,7 @@ async function main() {
1861
2444
  if (!testModel) { state.settingsTestResults[providerKey] = 'fail'; return }
1862
2445
 
1863
2446
  state.settingsTestResults[providerKey] = 'pending'
1864
- const { code } = await ping(testKey, testModel, src.url)
2447
+ const { code } = await ping(testKey, testModel, providerKey, src.url)
1865
2448
  state.settingsTestResults[providerKey] = code === '200' ? 'ok' : 'fail'
1866
2449
  }
1867
2450
 
@@ -1892,6 +2475,7 @@ async function main() {
1892
2475
  // ─── Settings overlay keyboard handling ───────────────────────────────────
1893
2476
  if (state.settingsOpen) {
1894
2477
  const providerKeys = Object.keys(sources)
2478
+ const telemetryRowIdx = providerKeys.length
1895
2479
 
1896
2480
  // πŸ“– Edit mode: capture typed characters for the API key
1897
2481
  if (state.settingsEditMode) {
@@ -1953,12 +2537,20 @@ async function main() {
1953
2537
  return
1954
2538
  }
1955
2539
 
1956
- if (key.name === 'down' && state.settingsCursor < providerKeys.length - 1) {
2540
+ if (key.name === 'down' && state.settingsCursor < telemetryRowIdx) {
1957
2541
  state.settingsCursor++
1958
2542
  return
1959
2543
  }
1960
2544
 
1961
2545
  if (key.name === 'return') {
2546
+ if (state.settingsCursor === telemetryRowIdx) {
2547
+ ensureTelemetryConfig(state.config)
2548
+ state.config.telemetry.enabled = state.config.telemetry.enabled !== true
2549
+ state.config.telemetry.consentVersion = TELEMETRY_CONSENT_VERSION
2550
+ saveConfig(state.config)
2551
+ return
2552
+ }
2553
+
1962
2554
  // πŸ“– Enter edit mode for the selected provider's key
1963
2555
  const pk = providerKeys[state.settingsCursor]
1964
2556
  state.settingsEditBuffer = state.config.apiKeys?.[pk] ?? ''
@@ -1967,6 +2559,14 @@ async function main() {
1967
2559
  }
1968
2560
 
1969
2561
  if (key.name === 'space') {
2562
+ if (state.settingsCursor === telemetryRowIdx) {
2563
+ ensureTelemetryConfig(state.config)
2564
+ state.config.telemetry.enabled = state.config.telemetry.enabled !== true
2565
+ state.config.telemetry.consentVersion = TELEMETRY_CONSENT_VERSION
2566
+ saveConfig(state.config)
2567
+ return
2568
+ }
2569
+
1970
2570
  // πŸ“– Toggle enabled/disabled for selected provider
1971
2571
  const pk = providerKeys[state.settingsCursor]
1972
2572
  if (!state.config.providers) state.config.providers = {}
@@ -1977,6 +2577,8 @@ async function main() {
1977
2577
  }
1978
2578
 
1979
2579
  if (key.name === 't') {
2580
+ if (state.settingsCursor === telemetryRowIdx) return
2581
+
1980
2582
  // πŸ“– Test the selected provider's key (fires a real ping)
1981
2583
  const pk = providerKeys[state.settingsCursor]
1982
2584
  testProviderKey(pk)
@@ -2181,7 +2783,7 @@ async function main() {
2181
2783
  const pingModel = async (r) => {
2182
2784
  const providerApiKey = getApiKey(state.config, r.providerKey) ?? null
2183
2785
  const providerUrl = sources[r.providerKey]?.url ?? sources.nvidia.url
2184
- const { code, ms } = await ping(providerApiKey, r.modelId, providerUrl)
2786
+ const { code, ms } = await ping(providerApiKey, r.modelId, r.providerKey, providerUrl)
2185
2787
 
2186
2788
  // πŸ“– Store ping result as object with ms and code
2187
2789
  // πŸ“– ms = actual response time (even for errors like 429)