free-coding-models 0.3.4 → 0.3.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,12 @@
2
2
 
3
3
  ---
4
4
 
5
+ ## 0.3.5
6
+
7
+ ### Fixed
8
+ - **Claude Code beta-route compatibility**: FCM Proxy V2 now matches routes on the URL pathname, so Anthropic requests like `/v1/messages?beta=true` and `/v1/messages/count_tokens?beta=true` resolve correctly instead of failing with a fake “selected model may not exist” error.
9
+ - **Claude proxy parity with `free-claude-code`**: The Claude integration was revalidated against the real `claude` binary, and the proxy-side Claude alias mapping now reaches the upstream provider again in the exact `free-claude-code` style flow.
10
+
5
11
  ## 0.3.4
6
12
 
7
13
  ### Added
package/README.md CHANGED
@@ -182,12 +182,11 @@ bunx free-coding-models YOUR_API_KEY
182
182
 
183
183
  ### 🆕 What's New
184
184
 
185
- **Version 0.3.4 cleans up the public proxy/docs surface and ships a small stability pass:**
185
+ **Version 0.3.5 fixes the main Claude Code proxy compatibility bug found in real-world use:**
186
186
 
187
- - **Browser hits on the proxy root are now friendly** — `GET /` returns a small status JSON instead of `{"error":"Unauthorized"}` when you sanity-check the proxy in a browser.
188
- - **`daemon stop` is now a real public CLI command** — the help text, the README, and the command parser all agree on the same daemon control surface.
189
- - **The README now matches the current UI exactly** — model count is `160`, the `Used` column is documented correctly, and the removed `Usage` column is no longer described.
190
- - **Malformed config sections are normalized safely on load** — corrupted `apiKeys`, `providers`, or `settings` values no longer leak through as broken runtime objects.
187
+ - **Claude Code beta-route requests now work** — the proxy accepts Anthropic URLs like `/v1/messages?beta=true` and `/v1/messages/count_tokens?beta=true`, which is how recent Claude Code builds really call the API.
188
+ - **Claude proxy flow now behaves like `free-claude-code` on the routing layer** — fake Claude model ids still map proxy-side to the selected free backend model, but the route matcher no longer breaks before that mapping can run.
189
+ - **The fix was validated against the real `claude` binary** — not just unit tests. The exact failure `selected model (claude-sonnet-4-6) may not exist` is now gone in local end-to-end repro.
191
190
 
192
191
  ---
193
192
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "free-coding-models",
3
- "version": "0.3.4",
3
+ "version": "0.3.5",
4
4
  "description": "Find the fastest coding LLM models in seconds — ping free models from multiple providers, pick the best one for OpenCode, Cursor, or any AI coding assistant.",
5
5
  "keywords": [
6
6
  "nvidia",
@@ -108,6 +108,21 @@ function sendJson(res, statusCode, body) {
108
108
  res.end(json)
109
109
  }
110
110
 
111
+ /**
112
+ * 📖 Match routes on the URL pathname only so Claude Code's `?beta=true`
113
+ * 📖 Anthropic requests resolve exactly like FastAPI routes do in free-claude-code.
114
+ *
115
+ * @param {http.IncomingMessage} req
116
+ * @returns {string}
117
+ */
118
+ function getRequestPathname(req) {
119
+ try {
120
+ return new URL(req.url || '/', 'http://127.0.0.1').pathname || '/'
121
+ } catch {
122
+ return req.url || '/'
123
+ }
124
+ }
125
+
111
126
  function normalizeRequestedModel(modelId) {
112
127
  if (typeof modelId !== 'string') return null
113
128
  const trimmed = modelId.trim()
@@ -303,14 +318,16 @@ export class ProxyServer {
303
318
  // ── Request routing ────────────────────────────────────────────────────────
304
319
 
305
320
  _handleRequest(req, res) {
321
+ const pathname = getRequestPathname(req)
322
+
306
323
  // 📖 Root endpoint is unauthenticated so a browser hit on http://127.0.0.1:{port}/
307
324
  // 📖 gives a useful status payload instead of a misleading Unauthorized error.
308
- if (req.method === 'GET' && req.url === '/') {
325
+ if (req.method === 'GET' && pathname === '/') {
309
326
  return this._handleRoot(res)
310
327
  }
311
328
 
312
329
  // 📖 Health endpoint is unauthenticated so external monitors can probe it
313
- if (req.method === 'GET' && req.url === '/v1/health') {
330
+ if (req.method === 'GET' && pathname === '/v1/health') {
314
331
  return this._handleHealth(res)
315
332
  }
316
333
 
@@ -319,11 +336,11 @@ export class ProxyServer {
319
336
  return sendJson(res, 401, { error: 'Unauthorized' })
320
337
  }
321
338
 
322
- if (req.method === 'GET' && req.url === '/v1/models') {
339
+ if (req.method === 'GET' && pathname === '/v1/models') {
323
340
  this._handleModels(res)
324
- } else if (req.method === 'GET' && req.url === '/v1/stats') {
341
+ } else if (req.method === 'GET' && pathname === '/v1/stats') {
325
342
  this._handleStats(res)
326
- } else if (req.method === 'POST' && req.url === '/v1/chat/completions') {
343
+ } else if (req.method === 'POST' && pathname === '/v1/chat/completions') {
327
344
  this._handleChatCompletions(req, res).catch(err => {
328
345
  console.error('[proxy] Internal error:', err)
329
346
  // 📖 Return 413 for body-too-large, generic 500 for everything else — never leak stack traces
@@ -331,7 +348,7 @@ export class ProxyServer {
331
348
  const msg = err.statusCode === 413 ? 'Request body too large' : 'Internal server error'
332
349
  sendJson(res, status, { error: msg })
333
350
  })
334
- } else if (req.method === 'POST' && req.url === '/v1/messages') {
351
+ } else if (req.method === 'POST' && pathname === '/v1/messages') {
335
352
  // 📖 Anthropic Messages API translation — enables Claude Code compatibility
336
353
  this._handleAnthropicMessages(req, res, authContext).catch(err => {
337
354
  console.error('[proxy] Internal error:', err)
@@ -339,26 +356,26 @@ export class ProxyServer {
339
356
  const msg = err.statusCode === 413 ? 'Request body too large' : 'Internal server error'
340
357
  sendJson(res, status, { error: msg })
341
358
  })
342
- } else if (req.method === 'POST' && req.url === '/v1/messages/count_tokens') {
359
+ } else if (req.method === 'POST' && pathname === '/v1/messages/count_tokens') {
343
360
  this._handleAnthropicCountTokens(req, res).catch(err => {
344
361
  console.error('[proxy] Internal error:', err)
345
362
  const status = err.statusCode === 413 ? 413 : 500
346
363
  const msg = err.statusCode === 413 ? 'Request body too large' : 'Internal server error'
347
364
  sendJson(res, status, { error: msg })
348
365
  })
349
- } else if (req.method === 'POST' && req.url === '/v1/responses') {
366
+ } else if (req.method === 'POST' && pathname === '/v1/responses') {
350
367
  this._handleResponses(req, res).catch(err => {
351
368
  console.error('[proxy] Internal error:', err)
352
369
  const status = err.statusCode === 413 ? 413 : 500
353
370
  const msg = err.statusCode === 413 ? 'Request body too large' : 'Internal server error'
354
371
  sendJson(res, status, { error: msg })
355
372
  })
356
- } else if (req.method === 'POST' && req.url === '/v1/completions') {
373
+ } else if (req.method === 'POST' && pathname === '/v1/completions') {
357
374
  // These legacy/alternative OpenAI endpoints are not supported by the proxy.
358
375
  // Return 501 (not 404) so callers get a clear signal instead of silently failing.
359
376
  sendJson(res, 501, {
360
377
  error: 'Not Implemented',
361
- message: `${req.url} is not supported by this proxy. Use POST /v1/chat/completions instead.`,
378
+ message: `${pathname} is not supported by this proxy. Use POST /v1/chat/completions instead.`,
362
379
  })
363
380
  } else {
364
381
  sendJson(res, 404, { error: 'Not found' })