free-coding-models 0.3.0 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  /**
2
2
  * @file lib/proxy-server.js
3
3
  * @description Multi-account rotation proxy server with SSE streaming,
4
- * token stats tracking, and persistent request logging.
4
+ * token stats tracking, Anthropic/OpenAI translation, and persistent request logging.
5
5
  *
6
6
  * Design:
7
7
  * - Binds to 127.0.0.1 only (never 0.0.0.0)
@@ -10,6 +10,8 @@
10
10
  * - x-ratelimit-* headers are stripped from all responses forwarded to clients
11
11
  * - Retry loop: first attempt uses sticky session fingerprint; subsequent
12
12
  * retries use fresh P2C to avoid hitting the same failed account
13
+ * - Claude-family aliases are resolved inside the proxy so Claude Code can
14
+ * keep emitting `claude-*` / `sonnet` / `haiku` style model ids safely
13
15
  *
14
16
  * @exports ProxyServer
15
17
  */
@@ -25,7 +27,13 @@ import {
25
27
  translateAnthropicToOpenAI,
26
28
  translateOpenAIToAnthropic,
27
29
  createAnthropicSSETransformer,
30
+ estimateAnthropicTokens,
28
31
  } from './anthropic-translator.js'
32
+ import {
33
+ translateResponsesToOpenAI,
34
+ translateOpenAIToResponses,
35
+ createResponsesSSETransformer,
36
+ } from './responses-translator.js'
29
37
 
30
38
  // ─── Helpers ─────────────────────────────────────────────────────────────────
31
39
 
@@ -100,6 +108,49 @@ function sendJson(res, statusCode, body) {
100
108
  res.end(json)
101
109
  }
102
110
 
111
+ function normalizeRequestedModel(modelId) {
112
+ if (typeof modelId !== 'string') return null
113
+ const trimmed = modelId.trim()
114
+ if (!trimmed) return null
115
+ return trimmed.replace(/^fcm-proxy\//, '')
116
+ }
117
+
118
+ function classifyClaudeVirtualModel(modelId) {
119
+ const normalized = normalizeRequestedModel(modelId)
120
+ if (!normalized) return null
121
+
122
+ const lower = normalized.toLowerCase()
123
+
124
+ // 📖 Mirror free-claude-code's family routing approach: classify by Claude
125
+ // 📖 family keywords, not only exact ids. Claude Code regularly emits both
126
+ // 📖 short aliases (`sonnet`) and full versioned ids (`claude-3-5-sonnet-*`).
127
+ if (lower === 'default') return 'default'
128
+ if (/^opus(?:plan)?(?:\[1m\])?$/.test(lower)) return 'opus'
129
+ if (/^sonnet(?:\[1m\])?$/.test(lower)) return 'sonnet'
130
+ if (lower === 'haiku') return 'haiku'
131
+ if (!lower.startsWith('claude-')) return null
132
+ if (lower.includes('opus')) return 'opus'
133
+ if (lower.includes('haiku')) return 'haiku'
134
+ if (lower.includes('sonnet')) return 'sonnet'
135
+ return null
136
+ }
137
+
138
+ function parseProxyAuthorizationHeader(authorization, expectedToken) {
139
+ if (!expectedToken) return { authorized: true, modelHint: null }
140
+ if (typeof authorization !== 'string' || !authorization.startsWith('Bearer ')) {
141
+ return { authorized: false, modelHint: null }
142
+ }
143
+
144
+ const rawToken = authorization.slice('Bearer '.length).trim()
145
+ if (rawToken === expectedToken) return { authorized: true, modelHint: null }
146
+ if (!rawToken.startsWith(`${expectedToken}:`)) return { authorized: false, modelHint: null }
147
+
148
+ const modelHint = normalizeRequestedModel(rawToken.slice(expectedToken.length + 1))
149
+ return modelHint
150
+ ? { authorized: true, modelHint }
151
+ : { authorized: false, modelHint: null }
152
+ }
153
+
103
154
  // ─── ProxyServer ─────────────────────────────────────────────────────────────
104
155
 
105
156
  export class ProxyServer {
@@ -188,11 +239,32 @@ export class ProxyServer {
188
239
  }
189
240
  }
190
241
 
242
+ _getAuthContext(req) {
243
+ return parseProxyAuthorizationHeader(req.headers.authorization, this._proxyApiKey)
244
+ }
245
+
191
246
  _isAuthorized(req) {
192
- if (!this._proxyApiKey) return true
193
- const authorization = req.headers.authorization
194
- if (typeof authorization !== 'string') return false
195
- return authorization === `Bearer ${this._proxyApiKey}`
247
+ return this._getAuthContext(req).authorized
248
+ }
249
+
250
+ _resolveAnthropicRequestedModel(modelId, authModelHint = null) {
251
+ const requestedModel = normalizeRequestedModel(modelId)
252
+ if (requestedModel && this._accountManager.hasAccountsForModel(requestedModel)) {
253
+ return requestedModel
254
+ }
255
+
256
+ // 📖 Claude Code still emits internal aliases / tier model ids for some
257
+ // 📖 background and helper paths. When the launcher encoded the selected
258
+ // 📖 proxy slug into the auth token, remap those virtual Claude ids here.
259
+ // 📖 This intentionally matches Claude families by substring so ids like
260
+ // 📖 `claude-3-5-sonnet-20241022` behave the same as `sonnet`.
261
+ if (authModelHint && this._accountManager.hasAccountsForModel(authModelHint)) {
262
+ if (!requestedModel || classifyClaudeVirtualModel(requestedModel)) {
263
+ return authModelHint
264
+ }
265
+ }
266
+
267
+ return requestedModel
196
268
  }
197
269
 
198
270
  // ── Request routing ────────────────────────────────────────────────────────
@@ -203,7 +275,8 @@ export class ProxyServer {
203
275
  return this._handleHealth(res)
204
276
  }
205
277
 
206
- if (!this._isAuthorized(req)) {
278
+ const authContext = this._getAuthContext(req)
279
+ if (!authContext.authorized) {
207
280
  return sendJson(res, 401, { error: 'Unauthorized' })
208
281
  }
209
282
 
@@ -221,13 +294,27 @@ export class ProxyServer {
221
294
  })
222
295
  } else if (req.method === 'POST' && req.url === '/v1/messages') {
223
296
  // 📖 Anthropic Messages API translation — enables Claude Code compatibility
224
- this._handleAnthropicMessages(req, res).catch(err => {
297
+ this._handleAnthropicMessages(req, res, authContext).catch(err => {
298
+ console.error('[proxy] Internal error:', err)
299
+ const status = err.statusCode === 413 ? 413 : 500
300
+ const msg = err.statusCode === 413 ? 'Request body too large' : 'Internal server error'
301
+ sendJson(res, status, { error: msg })
302
+ })
303
+ } else if (req.method === 'POST' && req.url === '/v1/messages/count_tokens') {
304
+ this._handleAnthropicCountTokens(req, res).catch(err => {
305
+ console.error('[proxy] Internal error:', err)
306
+ const status = err.statusCode === 413 ? 413 : 500
307
+ const msg = err.statusCode === 413 ? 'Request body too large' : 'Internal server error'
308
+ sendJson(res, status, { error: msg })
309
+ })
310
+ } else if (req.method === 'POST' && req.url === '/v1/responses') {
311
+ this._handleResponses(req, res).catch(err => {
225
312
  console.error('[proxy] Internal error:', err)
226
313
  const status = err.statusCode === 413 ? 413 : 500
227
314
  const msg = err.statusCode === 413 ? 'Request body too large' : 'Internal server error'
228
315
  sendJson(res, status, { error: msg })
229
316
  })
230
- } else if (req.method === 'POST' && (req.url === '/v1/completions' || req.url === '/v1/responses')) {
317
+ } else if (req.method === 'POST' && req.url === '/v1/completions') {
231
318
  // These legacy/alternative OpenAI endpoints are not supported by the proxy.
232
319
  // Return 501 (not 404) so callers get a clear signal instead of silently failing.
233
320
  sendJson(res, 501, {
@@ -244,19 +331,24 @@ export class ProxyServer {
244
331
  _handleModels(res) {
245
332
  const seen = new Set()
246
333
  const data = []
334
+ const models = []
247
335
  for (const acct of this._accounts) {
248
336
  const publicModelId = acct.proxyModelId || acct.modelId
249
337
  if (!seen.has(publicModelId)) {
250
338
  seen.add(publicModelId)
251
- data.push({
339
+ const modelEntry = {
252
340
  id: publicModelId,
341
+ slug: publicModelId,
342
+ name: publicModelId,
253
343
  object: 'model',
254
344
  created: Math.floor(Date.now() / 1000),
255
345
  owned_by: 'proxy',
256
- })
346
+ }
347
+ data.push(modelEntry)
348
+ models.push(modelEntry)
257
349
  }
258
350
  }
259
- sendJson(res, 200, { object: 'list', data })
351
+ sendJson(res, 200, { object: 'list', data, models })
260
352
  }
261
353
 
262
354
  // ── POST /v1/chat/completions ──────────────────────────────────────────────
@@ -708,7 +800,7 @@ export class ProxyServer {
708
800
  *
709
801
  * 📖 This makes Claude Code work natively through the FCM proxy.
710
802
  */
711
- async _handleAnthropicMessages(clientReq, clientRes) {
803
+ async _handleAnthropicMessages(clientReq, clientRes, authContext = { modelHint: null }) {
712
804
  const rawBody = await readBody(clientReq)
713
805
  let anthropicBody
714
806
  try {
@@ -719,6 +811,8 @@ export class ProxyServer {
719
811
 
720
812
  // 📖 Translate Anthropic → OpenAI
721
813
  const openaiBody = translateAnthropicToOpenAI(anthropicBody)
814
+ const resolvedModel = this._resolveAnthropicRequestedModel(openaiBody.model, authContext.modelHint)
815
+ if (resolvedModel) openaiBody.model = resolvedModel
722
816
  const isStreaming = openaiBody.stream === true
723
817
 
724
818
  if (isStreaming) {
@@ -730,6 +824,146 @@ export class ProxyServer {
730
824
  }
731
825
  }
732
826
 
827
+ /**
828
+ * 📖 Count tokens for Anthropic Messages requests without calling upstream.
829
+ * 📖 Claude Code uses this endpoint for budgeting / UI hints, so a fast local
830
+ * 📖 estimate is enough to keep the flow working through the proxy.
831
+ */
832
+ async _handleAnthropicCountTokens(clientReq, clientRes) {
833
+ const rawBody = await readBody(clientReq)
834
+ let anthropicBody
835
+ try {
836
+ anthropicBody = JSON.parse(rawBody)
837
+ } catch {
838
+ return sendJson(clientRes, 400, { error: { type: 'invalid_request_error', message: 'Invalid JSON body' } })
839
+ }
840
+
841
+ sendJson(clientRes, 200, {
842
+ input_tokens: estimateAnthropicTokens(anthropicBody),
843
+ })
844
+ }
845
+
846
+ /**
847
+ * 📖 Handle OpenAI Responses API requests by translating them to chat
848
+ * 📖 completions, forwarding through the existing proxy path, then converting
849
+ * 📖 the result back to the Responses wire format.
850
+ */
851
+ async _handleResponses(clientReq, clientRes) {
852
+ const rawBody = await readBody(clientReq)
853
+ let responsesBody
854
+ try {
855
+ responsesBody = JSON.parse(rawBody)
856
+ } catch {
857
+ return sendJson(clientRes, 400, { error: 'Invalid JSON body' })
858
+ }
859
+
860
+ const isStreaming = responsesBody.stream === true || String(clientReq.headers.accept || '').includes('text/event-stream')
861
+ const openaiBody = translateResponsesToOpenAI({ ...responsesBody, stream: isStreaming })
862
+
863
+ if (isStreaming) {
864
+ await this._handleResponsesStreaming(openaiBody, responsesBody.model, clientRes)
865
+ } else {
866
+ await this._handleResponsesJson(openaiBody, responsesBody.model, clientRes)
867
+ }
868
+ }
869
+
870
+ async _handleResponsesJson(openaiBody, requestModel, clientRes) {
871
+ const capturedChunks = []
872
+ let capturedStatusCode = 200
873
+ let capturedHeaders = {}
874
+
875
+ const fakeRes = {
876
+ headersSent: false,
877
+ destroyed: false,
878
+ socket: null,
879
+ writeHead(statusCode, headers) {
880
+ capturedStatusCode = statusCode
881
+ capturedHeaders = headers || {}
882
+ this.headersSent = true
883
+ },
884
+ write(chunk) { capturedChunks.push(chunk) },
885
+ end(data) {
886
+ if (data) capturedChunks.push(data)
887
+ },
888
+ on() { return this },
889
+ once() { return this },
890
+ emit() { return false },
891
+ destroy() { this.destroyed = true },
892
+ removeListener() { return this },
893
+ }
894
+
895
+ await this._handleChatCompletionsInternal(openaiBody, fakeRes)
896
+
897
+ const responseBody = capturedChunks.join('')
898
+ if (capturedStatusCode >= 200 && capturedStatusCode < 300) {
899
+ try {
900
+ const openaiResponse = JSON.parse(responseBody)
901
+ const responsesResponse = translateOpenAIToResponses(openaiResponse, requestModel)
902
+ sendJson(clientRes, 200, responsesResponse)
903
+ } catch {
904
+ sendJson(clientRes, capturedStatusCode, responseBody)
905
+ }
906
+ return
907
+ }
908
+
909
+ // 📖 Forward upstream-style JSON errors unchanged for OpenAI-compatible clients.
910
+ sendJson(clientRes, capturedStatusCode, responseBody)
911
+ }
912
+
913
+ async _handleResponsesStreaming(openaiBody, requestModel, clientRes) {
914
+ const { transform } = createResponsesSSETransformer(requestModel)
915
+ await this._handleResponsesStreamDirect(openaiBody, clientRes, transform)
916
+ }
917
+
918
+ async _handleResponsesStreamDirect(openaiBody, clientRes, sseTransform) {
919
+ const fingerprint = createHash('sha256')
920
+ .update(JSON.stringify(openaiBody.messages?.slice(-1) ?? []))
921
+ .digest('hex')
922
+ .slice(0, 16)
923
+
924
+ const requestedModel = typeof openaiBody.model === 'string'
925
+ ? openaiBody.model.replace(/^fcm-proxy\//, '')
926
+ : undefined
927
+
928
+ if (requestedModel && !this._accountManager.hasAccountsForModel(requestedModel)) {
929
+ return sendJson(clientRes, 404, {
930
+ error: 'Model not found',
931
+ message: `Model '${requestedModel}' is not available.`,
932
+ })
933
+ }
934
+
935
+ sseTransform.pipe(clientRes)
936
+
937
+ for (let attempt = 0; attempt < this._retries; attempt++) {
938
+ const delay = this._retryDelays[Math.min(attempt, this._retryDelays.length - 1)]
939
+ if (delay > 0) await new Promise(r => setTimeout(r, delay + Math.random() * 100))
940
+
941
+ const selectOpts = attempt === 0
942
+ ? { sessionFingerprint: fingerprint, requestedModel }
943
+ : { requestedModel }
944
+ const account = this._accountManager.selectAccount(selectOpts)
945
+ if (!account) break
946
+
947
+ const result = await this._forwardRequestForResponsesStream(account, openaiBody, sseTransform, clientRes)
948
+ if (result.done) return
949
+
950
+ const { statusCode, responseBody, responseHeaders, networkError } = result
951
+ const classified = classifyError(
952
+ networkError ? 0 : statusCode,
953
+ responseBody || '',
954
+ responseHeaders || {}
955
+ )
956
+ this._accountManager.recordFailure(account.id, classified, { providerKey: account.providerKey })
957
+ if (!classified.shouldRetry) {
958
+ sseTransform.end()
959
+ return sendJson(clientRes, statusCode || 500, responseBody || JSON.stringify({ error: 'Upstream error' }))
960
+ }
961
+ }
962
+
963
+ sseTransform.end()
964
+ sendJson(clientRes, 503, { error: 'All accounts exhausted or unavailable' })
965
+ }
966
+
733
967
  /**
734
968
  * 📖 Handle non-streaming Anthropic Messages by internally dispatching to
735
969
  * chat completions logic and translating the JSON response back.
@@ -1002,6 +1236,95 @@ export class ProxyServer {
1002
1236
  })
1003
1237
  }
1004
1238
 
1239
+ /**
1240
+ * 📖 Forward a streaming chat-completions request and translate the upstream
1241
+ * 📖 SSE stream into Responses API events on the fly.
1242
+ */
1243
+ _forwardRequestForResponsesStream(account, body, sseTransform, clientRes) {
1244
+ return new Promise(resolve => {
1245
+ const newBody = { ...body, model: account.modelId, stream: true }
1246
+ const bodyStr = JSON.stringify(newBody)
1247
+ const baseUrl = account.url.replace(/\/$/, '')
1248
+ let upstreamUrl
1249
+ try {
1250
+ upstreamUrl = new URL(baseUrl + '/chat/completions')
1251
+ } catch {
1252
+ return resolve({ done: false, statusCode: 0, responseBody: 'Invalid upstream URL', networkError: true })
1253
+ }
1254
+
1255
+ const client = selectClient(account.url)
1256
+ const startTime = Date.now()
1257
+ const requestOptions = {
1258
+ hostname: upstreamUrl.hostname,
1259
+ port: upstreamUrl.port || (upstreamUrl.protocol === 'https:' ? 443 : 80),
1260
+ path: upstreamUrl.pathname + (upstreamUrl.search || ''),
1261
+ method: 'POST',
1262
+ headers: {
1263
+ 'authorization': `Bearer ${account.apiKey}`,
1264
+ 'content-type': 'application/json',
1265
+ 'content-length': Buffer.byteLength(bodyStr),
1266
+ },
1267
+ }
1268
+
1269
+ const upstreamReq = client.request(requestOptions, upstreamRes => {
1270
+ const { statusCode } = upstreamRes
1271
+
1272
+ if (statusCode >= 200 && statusCode < 300) {
1273
+ if (!clientRes.headersSent) {
1274
+ clientRes.writeHead(200, {
1275
+ 'content-type': 'text/event-stream',
1276
+ 'cache-control': 'no-cache',
1277
+ })
1278
+ }
1279
+
1280
+ upstreamRes.on('error', err => { if (!clientRes.destroyed) clientRes.destroy(err) })
1281
+ clientRes.on('error', () => { if (!upstreamRes.destroyed) upstreamRes.destroy() })
1282
+
1283
+ upstreamRes.pipe(sseTransform, { end: true })
1284
+ upstreamRes.on('end', () => {
1285
+ this._accountManager.recordSuccess(account.id, Date.now() - startTime)
1286
+ })
1287
+
1288
+ clientRes.on('close', () => {
1289
+ if (!upstreamRes.destroyed) upstreamRes.destroy()
1290
+ if (!upstreamReq.destroyed) upstreamReq.destroy()
1291
+ })
1292
+
1293
+ resolve({ done: true })
1294
+ } else {
1295
+ const chunks = []
1296
+ upstreamRes.on('data', chunk => chunks.push(chunk))
1297
+ upstreamRes.on('end', () => {
1298
+ resolve({
1299
+ done: false,
1300
+ statusCode,
1301
+ responseBody: Buffer.concat(chunks).toString(),
1302
+ responseHeaders: upstreamRes.headers,
1303
+ networkError: false,
1304
+ })
1305
+ })
1306
+ }
1307
+ })
1308
+
1309
+ upstreamReq.on('error', err => {
1310
+ resolve({
1311
+ done: false,
1312
+ statusCode: 0,
1313
+ responseBody: err.message,
1314
+ responseHeaders: {},
1315
+ networkError: true,
1316
+ })
1317
+ })
1318
+
1319
+ upstreamReq.setTimeout(this._upstreamTimeoutMs, () => {
1320
+ upstreamReq.destroy(new Error(`Upstream request timed out after ${this._upstreamTimeoutMs}ms`))
1321
+ })
1322
+
1323
+ upstreamReq.write(bodyStr)
1324
+ upstreamReq.end()
1325
+ })
1326
+ }
1327
+
1005
1328
  /**
1006
1329
  * 📖 Internal version of chat completions handler that takes a pre-parsed body.
1007
1330
  * 📖 Used by the Anthropic JSON translation path to avoid re-parsing.
package/src/proxy-sync.js CHANGED
@@ -14,9 +14,10 @@
14
14
  * @functions
15
15
  * → syncProxyToTool(toolMode, proxyInfo, mergedModels) — write proxy endpoint to tool config
16
16
  * → cleanupToolConfig(toolMode) — remove all FCM entries from tool config
17
+ * → resolveProxySyncToolMode(toolMode) — normalize a live tool mode to a proxy-syncable target
17
18
  * → getProxySyncableTools() — list of tools that support proxy sync
18
19
  *
19
- * @exports syncProxyToTool, cleanupToolConfig, getProxySyncableTools, PROXY_SYNCABLE_TOOLS
20
+ * @exports syncProxyToTool, cleanupToolConfig, resolveProxySyncToolMode, getProxySyncableTools, PROXY_SYNCABLE_TOOLS
20
21
  *
21
22
  * @see src/endpoint-installer.js — per-provider direct install (Y key flow)
22
23
  * @see src/opencode-sync.js — OpenCode-specific sync (used internally by this module)
@@ -38,6 +39,8 @@ export const PROXY_SYNCABLE_TOOLS = [
38
39
  'aider', 'amp', 'qwen', 'claude-code', 'codex', 'openhands',
39
40
  ]
40
41
 
42
+ const PROXY_SYNCABLE_CANONICAL = new Set(PROXY_SYNCABLE_TOOLS.map(tool => tool === 'opencode-desktop' ? 'opencode' : tool))
43
+
41
44
  // ─── Shared helpers ──────────────────────────────────────────────────────────
42
45
 
43
46
  function getDefaultPaths() {
@@ -120,6 +123,12 @@ function getDefaultMaxTokens(contextWindow) {
120
123
  return Math.max(4096, Math.min(contextWindow, 32768))
121
124
  }
122
125
 
126
+ export function resolveProxySyncToolMode(toolMode) {
127
+ if (typeof toolMode !== 'string' || toolMode.length === 0) return null
128
+ const canonical = toolMode === 'opencode-desktop' ? 'opencode' : toolMode
129
+ return PROXY_SYNCABLE_CANONICAL.has(canonical) ? canonical : null
130
+ }
131
+
123
132
  // ─── Per-tool sync functions ─────────────────────────────────────────────────
124
133
  // 📖 Each writes a single `fcm-proxy` provider entry with ALL models
125
134
 
@@ -358,8 +367,8 @@ function syncEnvTool(proxyInfo, mergedModels, toolMode) {
358
367
  * @returns {{ success: boolean, path?: string, modelCount?: number, error?: string }}
359
368
  */
360
369
  export function syncProxyToTool(toolMode, proxyInfo, mergedModels) {
361
- const canonical = toolMode === 'opencode-desktop' ? 'opencode' : toolMode
362
- if (!PROXY_SYNCABLE_TOOLS.includes(toolMode) && !PROXY_SYNCABLE_TOOLS.includes(canonical)) {
370
+ const canonical = resolveProxySyncToolMode(toolMode)
371
+ if (!canonical) {
363
372
  return { success: false, error: `Tool '${toolMode}' does not support proxy sync` }
364
373
  }
365
374
 
@@ -415,7 +424,10 @@ export function syncProxyToTool(toolMode, proxyInfo, mergedModels) {
415
424
  * @returns {{ success: boolean, error?: string }}
416
425
  */
417
426
  export function cleanupToolConfig(toolMode) {
418
- const canonical = toolMode === 'opencode-desktop' ? 'opencode' : toolMode
427
+ const canonical = resolveProxySyncToolMode(toolMode)
428
+ if (!canonical) {
429
+ return { success: false, error: `Tool '${toolMode}' does not support proxy cleanup` }
430
+ }
419
431
 
420
432
  try {
421
433
  const paths = getDefaultPaths()
@@ -150,10 +150,12 @@ export function sliceOverlayLines(lines, offset, terminalRows) {
150
150
 
151
151
  // 📖 calculateViewport: Computes the visible slice of model rows that fits in the terminal.
152
152
  // 📖 When scroll indicators are needed, they each consume 1 line from the model budget.
153
+ // 📖 `extraFixedLines` lets callers reserve temporary footer rows without shrinking the
154
+ // 📖 viewport permanently for the normal case.
153
155
  // 📖 Returns { startIdx, endIdx, hasAbove, hasBelow } for rendering.
154
- export function calculateViewport(terminalRows, scrollOffset, totalModels) {
156
+ export function calculateViewport(terminalRows, scrollOffset, totalModels, extraFixedLines = 0) {
155
157
  if (terminalRows <= 0) return { startIdx: 0, endIdx: totalModels, hasAbove: false, hasBelow: false }
156
- let maxSlots = terminalRows - TABLE_FIXED_LINES
158
+ let maxSlots = terminalRows - TABLE_FIXED_LINES - extraFixedLines
157
159
  if (maxSlots < 1) maxSlots = 1
158
160
  if (totalModels <= maxSlots) return { startIdx: 0, endIdx: totalModels, hasAbove: false, hasBelow: false }
159
161
 
@@ -15,6 +15,7 @@
15
15
  * - Smart badges (mode, tier filter, origin filter, profile)
16
16
  * - Footer J badge: green "Proxy On" / red "Proxy Off" indicator with direct overlay access
17
17
  * - Install-endpoints shortcut surfaced directly in the footer hints
18
+ * - Full-width red outdated-version banner when a newer npm release is known
18
19
  * - Distinct auth-failure vs missing-key health labels so configured providers stay honest
19
20
  *
20
21
  * → Functions:
@@ -40,7 +41,7 @@ import { TIER_COLOR } from './tier-colors.js'
40
41
  import { getAvg, getVerdict, getUptime, getStabilityScore, getVersionStatusInfo } from './utils.js'
41
42
  import { usagePlaceholderForProvider } from './ping.js'
42
43
  import { formatTokenTotalCompact } from './token-usage-reader.js'
43
- import { calculateViewport, sortResultsWithPinnedFavorites, padEndDisplay } from './render-helpers.js'
44
+ import { calculateViewport, sortResultsWithPinnedFavorites, padEndDisplay, displayWidth } from './render-helpers.js'
44
45
  import { getToolMeta } from './tool-metadata.js'
45
46
 
46
47
  const ACTIVE_FILTER_BG_BY_TIER = {
@@ -92,7 +93,7 @@ export function setActiveProxy(proxyInstance) {
92
93
  }
93
94
 
94
95
  // ─── renderTable: mode param controls footer hint text (opencode vs openclaw) ─────────
95
- export function renderTable(results, pendingPings, frame, cursor = null, sortColumn = 'avg', sortDirection = 'asc', pingInterval = PING_INTERVAL, lastPingTime = Date.now(), mode = 'opencode', tierFilterMode = 0, scrollOffset = 0, terminalRows = 0, terminalCols = 0, originFilterMode = 0, activeProfile = null, profileSaveMode = false, profileSaveBuffer = '', proxyStartupStatus = null, pingMode = 'normal', pingModeSource = 'auto', hideUnconfiguredModels = false, widthWarningStartedAt = null, widthWarningDismissed = false, widthWarningShowCount = 0, settingsUpdateState = 'idle', settingsUpdateLatestVersion = null, proxyEnabled = false, isOutdated = false, latestVersion = null) {
96
+ export function renderTable(results, pendingPings, frame, cursor = null, sortColumn = 'avg', sortDirection = 'asc', pingInterval = PING_INTERVAL, lastPingTime = Date.now(), mode = 'opencode', tierFilterMode = 0, scrollOffset = 0, terminalRows = 0, terminalCols = 0, originFilterMode = 0, activeProfile = null, profileSaveMode = false, profileSaveBuffer = '', proxyStartupStatus = null, pingMode = 'normal', pingModeSource = 'auto', hideUnconfiguredModels = false, widthWarningStartedAt = null, widthWarningDismissed = false, widthWarningShowCount = 0, settingsUpdateState = 'idle', settingsUpdateLatestVersion = null, proxyEnabled = false, startupLatestVersion = null, versionAlertsEnabled = true) {
96
97
  // 📖 Filter out hidden models for display
97
98
  const visibleResults = results.filter(r => !r.hidden)
98
99
 
@@ -140,7 +141,7 @@ export function renderTable(results, pendingPings, frame, cursor = null, sortCol
140
141
  : chalk.bold.rgb(0, 200, 255)
141
142
  const modeBadge = toolBadgeColor(' [ ') + chalk.yellow.bold('Z') + toolBadgeColor(` Tool : ${toolMeta.label} ]`)
142
143
  const activeHeaderBadge = (text, bg = [57, 255, 20], fg = [0, 0, 0]) => chalk.bgRgb(...bg).rgb(...fg).bold(` ${text} `)
143
- const versionStatus = getVersionStatusInfo(settingsUpdateState, settingsUpdateLatestVersion)
144
+ const versionStatus = getVersionStatusInfo(settingsUpdateState, settingsUpdateLatestVersion, startupLatestVersion, versionAlertsEnabled)
144
145
 
145
146
  // 📖 Tier filter badge shown when filtering is active (shows exact tier name)
146
147
  const TIER_CYCLE_NAMES = [null, 'S+', 'S', 'A+', 'A', 'A-', 'B+', 'B', 'C']
@@ -336,7 +337,8 @@ export function renderTable(results, pendingPings, frame, cursor = null, sortCol
336
337
  }
337
338
 
338
339
  // 📖 Viewport clipping: only render models that fit on screen
339
- const vp = calculateViewport(terminalRows, scrollOffset, sorted.length)
340
+ const extraFooterLines = versionStatus.isOutdated ? 1 : 0
341
+ const vp = calculateViewport(terminalRows, scrollOffset, sorted.length, extraFooterLines)
340
342
 
341
343
  if (vp.hasAbove) {
342
344
  lines.push(chalk.dim(` ... ${vp.startIdx} more above ...`))
@@ -652,40 +654,36 @@ export function renderTable(results, pendingPings, frame, cursor = null, sortCol
652
654
  hotkey('I', ' Feedback, bugs & requests')
653
655
  )
654
656
  // 📖 Proxy status is now shown via the J badge in line 2 above — no need for a dedicated line
655
- if (versionStatus.isOutdated) {
656
- const outdatedBadge = chalk.bgRed.bold.yellow(' This version is outdated . ')
657
- const latestLabel = chalk.redBright(` local v${LOCAL_VERSION} · latest v${versionStatus.latestVersion}`)
658
- lines.push(` ${outdatedBadge}${latestLabel}`)
659
- }
657
+ const footerLine =
658
+ chalk.rgb(255, 150, 200)(' Made with 💖 & by \x1b]8;;https://github.com/vava-nessa\x1b\\vava-nessa\x1b]8;;\x1b\\') +
659
+ chalk.dim(' • ') +
660
+ '⭐ ' +
661
+ chalk.yellow('\x1b]8;;https://github.com/vava-nessa/free-coding-models\x1b\\Star on GitHub\x1b]8;;\x1b\\') +
662
+ chalk.dim(' • ') +
663
+ '🤝 ' +
664
+ chalk.rgb(255, 165, 0)('\x1b]8;;https://github.com/vava-nessa/free-coding-models/graphs/contributors\x1b\\Contributors\x1b]8;;\x1b\\') +
665
+ chalk.dim(' • ') +
666
+ '☕ ' +
667
+ chalk.rgb(255, 200, 100)('\x1b]8;;https://buymeacoffee.com/vavanessadev\x1b\\Buy me a coffee\x1b]8;;\x1b\\') +
668
+ chalk.dim(' • ') +
669
+ '💬 ' +
670
+ chalk.rgb(200, 150, 255)('\x1b]8;;https://discord.gg/ZTNFHvvCkU\x1b\\Discord\x1b]8;;\x1b\\') +
671
+ chalk.dim(' → ') +
672
+ chalk.rgb(200, 150, 255)('https://discord.gg/ZTNFHvvCkU') +
673
+ chalk.dim(' • ') +
674
+ chalk.yellow('N') + chalk.dim(' Changelog') +
675
+ chalk.dim(' • ') +
676
+ chalk.dim('Ctrl+C Exit')
677
+ lines.push(footerLine)
660
678
 
661
- // 📖 Build footer line, with OUTDATED warning if isOutdated is true
662
- let footerLine = ''
663
- if (isOutdated) {
664
- // 📖 Show OUTDATED in red background, high contrast warning
665
- footerLine = chalk.bgRed.bold.white(' ⚠ OUTDATED version, please update with "npm i -g free-coding-models@latest" ')
666
- } else {
667
- footerLine =
668
- chalk.rgb(255, 150, 200)(' Made with 💖 & ☕ by \x1b]8;;https://github.com/vava-nessa\x1b\\vava-nessa\x1b]8;;\x1b\\') +
669
- chalk.dim(' • ') +
670
- '⭐ ' +
671
- chalk.yellow('\x1b]8;;https://github.com/vava-nessa/free-coding-models\x1b\\Star on GitHub\x1b]8;;\x1b\\') +
672
- chalk.dim(' • ') +
673
- '🤝 ' +
674
- chalk.rgb(255, 165, 0)('\x1b]8;;https://github.com/vava-nessa/free-coding-models/graphs/contributors\x1b\\Contributors\x1b]8;;\x1b\\') +
675
- chalk.dim(' • ') +
676
- '☕ ' +
677
- chalk.rgb(255, 200, 100)('\x1b]8;;https://buymeacoffee.com/vavanessadev\x1b\\Buy me a coffee\x1b]8;;\x1b\\') +
678
- chalk.dim(' • ') +
679
- '💬 ' +
680
- chalk.rgb(200, 150, 255)('\x1b]8;;https://discord.gg/ZTNFHvvCkU\x1b\\Discord\x1b]8;;\x1b\\') +
681
- chalk.dim(' → ') +
682
- chalk.rgb(200, 150, 255)('https://discord.gg/ZTNFHvvCkU') +
683
- chalk.dim(' • ') +
684
- chalk.yellow('N') + chalk.dim(' Changelog') +
685
- chalk.dim(' • ') +
686
- chalk.dim('Ctrl+C Exit')
679
+ if (versionStatus.isOutdated) {
680
+ const outdatedMessage = ` ⚠ Update available: v${LOCAL_VERSION} -> v${versionStatus.latestVersion}. If auto-update did not complete, run: npm install -g free-coding-models@latest`
681
+ const paddedBanner = terminalCols > 0
682
+ ? outdatedMessage + ' '.repeat(Math.max(0, terminalCols - displayWidth(outdatedMessage)))
683
+ : outdatedMessage
684
+ // 📖 Reserve a dedicated full-width red row so the warning cannot blend into the footer links.
685
+ lines.push(chalk.bgRed.white.bold(paddedBanner))
687
686
  }
688
- lines.push(footerLine)
689
687
 
690
688
  // 📖 Append \x1b[K (erase to EOL) to each line so leftover chars from previous
691
689
  // 📖 frames are cleared. Then pad with blank cleared lines to fill the terminal,