free-coding-models 0.3.9 → 0.3.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,1477 +0,0 @@
1
- /**
2
- * @file lib/proxy-server.js
3
- * @description Multi-account rotation proxy server with SSE streaming,
4
- * token stats tracking, Anthropic/OpenAI translation, and persistent request logging.
5
- *
6
- * Design:
7
- * - Binds to 127.0.0.1 only (never 0.0.0.0)
8
- * - SSE is piped through without buffering (upstreamRes.pipe(clientRes))
9
- * - HTTP/HTTPS module is chosen BEFORE the request is created (single code-path)
10
- * - x-ratelimit-* headers are stripped from all responses forwarded to clients
11
- * - Retry loop: first attempt uses sticky session fingerprint; subsequent
12
- * retries use fresh P2C to avoid hitting the same failed account
13
- * - Claude-family aliases are resolved inside the proxy so Claude Code can
14
- * keep emitting `claude-*` / `sonnet` / `haiku` style model ids safely
15
- *
16
- * @exports ProxyServer
17
- */
18
-
19
- import http from 'node:http'
20
- import https from 'node:https'
21
- import { AccountManager } from './account-manager.js'
22
- import { classifyError } from './error-classifier.js'
23
- import { applyThinkingBudget, compressContext } from './request-transformer.js'
24
- import { TokenStats } from './token-stats.js'
25
- import { createHash } from 'node:crypto'
26
- import {
27
- translateAnthropicToOpenAI,
28
- translateOpenAIToAnthropic,
29
- createAnthropicSSETransformer,
30
- estimateAnthropicTokens,
31
- } from './anthropic-translator.js'
32
- import {
33
- translateResponsesToOpenAI,
34
- translateOpenAIToResponses,
35
- createResponsesSSETransformer,
36
- } from './responses-translator.js'
37
-
38
- // ─── Helpers ─────────────────────────────────────────────────────────────────
39
-
40
- /**
41
- * Choose the http or https module based on the URL scheme.
42
- * MUST be called before creating the request (single code-path).
43
- *
44
- * @param {string} url
45
- * @returns {typeof import('http') | typeof import('https')}
46
- */
47
- function selectClient(url) {
48
- return url.startsWith('https') ? https : http
49
- }
50
-
51
- /**
52
- * Return a copy of the headers object with all x-ratelimit-* entries removed.
53
- *
54
- * @param {Record<string, string | string[]>} headers
55
- * @returns {Record<string, string | string[]>}
56
- */
57
- function stripRateLimitHeaders(headers) {
58
- const result = {}
59
- for (const [key, value] of Object.entries(headers)) {
60
- if (!key.toLowerCase().startsWith('x-ratelimit')) {
61
- result[key] = value
62
- }
63
- }
64
- return result
65
- }
66
-
67
- // 📖 Max body size limit to prevent memory exhaustion attacks (10 MB)
68
- const MAX_BODY_SIZE = 10 * 1024 * 1024
69
-
70
- /**
71
- * Buffer all chunks from an http.IncomingMessage and return the body as a string.
72
- * Enforces a size limit to prevent memory exhaustion from oversized payloads.
73
- *
74
- * @param {http.IncomingMessage} req
75
- * @returns {Promise<string>}
76
- * @throws {Error} with statusCode 413 if body exceeds MAX_BODY_SIZE
77
- */
78
- function readBody(req) {
79
- return new Promise((resolve, reject) => {
80
- const chunks = []
81
- let totalSize = 0
82
- req.on('data', chunk => {
83
- totalSize += chunk.length
84
- if (totalSize > MAX_BODY_SIZE) {
85
- req.destroy()
86
- const err = new Error('Request body too large')
87
- err.statusCode = 413
88
- return reject(err)
89
- }
90
- chunks.push(chunk)
91
- })
92
- req.on('end', () => resolve(Buffer.concat(chunks).toString()))
93
- req.on('error', reject)
94
- })
95
- }
96
-
97
- /**
98
- * Write a JSON (or pre-serialised) response to the client.
99
- *
100
- * @param {http.ServerResponse} res
101
- * @param {number} statusCode
102
- * @param {object | string} body
103
- */
104
- function sendJson(res, statusCode, body) {
105
- if (res.headersSent) return
106
- const json = typeof body === 'string' ? body : JSON.stringify(body)
107
- res.writeHead(statusCode, { 'content-type': 'application/json' })
108
- res.end(json)
109
- }
110
-
111
- /**
112
- * 📖 Match routes on the URL pathname only so Claude Code's `?beta=true`
113
- * 📖 Anthropic requests resolve exactly like FastAPI routes do in free-claude-code.
114
- *
115
- * @param {http.IncomingMessage} req
116
- * @returns {string}
117
- */
118
- function getRequestPathname(req) {
119
- try {
120
- return new URL(req.url || '/', 'http://127.0.0.1').pathname || '/'
121
- } catch {
122
- return req.url || '/'
123
- }
124
- }
125
-
126
- function normalizeRequestedModel(modelId) {
127
- if (typeof modelId !== 'string') return null
128
- const trimmed = modelId.trim()
129
- if (!trimmed) return null
130
- return trimmed.replace(/^fcm-proxy\//, '')
131
- }
132
-
133
- function normalizeAnthropicRouting(anthropicRouting = null) {
134
- return {
135
- model: normalizeRequestedModel(anthropicRouting?.model),
136
- modelOpus: normalizeRequestedModel(anthropicRouting?.modelOpus),
137
- modelSonnet: normalizeRequestedModel(anthropicRouting?.modelSonnet),
138
- modelHaiku: normalizeRequestedModel(anthropicRouting?.modelHaiku),
139
- }
140
- }
141
-
142
- function classifyClaudeVirtualModel(modelId) {
143
- const normalized = normalizeRequestedModel(modelId)
144
- if (!normalized) return null
145
-
146
- const lower = normalized.toLowerCase()
147
-
148
- // 📖 Mirror free-claude-code's family routing approach: classify by Claude
149
- // 📖 family keywords, not only exact ids. Claude Code regularly emits both
150
- // 📖 short aliases (`sonnet`) and full versioned ids (`claude-3-5-sonnet-*`).
151
- if (lower === 'default') return 'default'
152
- if (/^opus(?:plan)?(?:\[1m\])?$/.test(lower)) return 'opus'
153
- if (/^sonnet(?:\[1m\])?$/.test(lower)) return 'sonnet'
154
- if (lower === 'haiku') return 'haiku'
155
- if (!lower.startsWith('claude-')) return null
156
- if (lower.includes('opus')) return 'opus'
157
- if (lower.includes('haiku')) return 'haiku'
158
- if (lower.includes('sonnet')) return 'sonnet'
159
- return null
160
- }
161
-
162
- function resolveAnthropicMappedModel(modelId, anthropicRouting) {
163
- const routing = normalizeAnthropicRouting(anthropicRouting)
164
- const fallbackModel = routing.model
165
- if (!fallbackModel && !routing.modelOpus && !routing.modelSonnet && !routing.modelHaiku) {
166
- return null
167
- }
168
-
169
- const family = classifyClaudeVirtualModel(modelId)
170
- if (family === 'opus') return routing.modelOpus || fallbackModel
171
- if (family === 'sonnet') return routing.modelSonnet || fallbackModel
172
- if (family === 'haiku') return routing.modelHaiku || fallbackModel
173
-
174
- // 📖 free-claude-code falls back to MODEL for unknown Claude ids too.
175
- return fallbackModel
176
- }
177
-
178
- function parseProxyAuthorizationHeader(authorization, expectedToken) {
179
- if (!expectedToken) return { authorized: true, modelHint: null }
180
- if (typeof authorization !== 'string' || !authorization.startsWith('Bearer ')) {
181
- return { authorized: false, modelHint: null }
182
- }
183
-
184
- const rawToken = authorization.slice('Bearer '.length).trim()
185
- if (rawToken === expectedToken) return { authorized: true, modelHint: null }
186
- if (!rawToken.startsWith(`${expectedToken}:`)) return { authorized: false, modelHint: null }
187
-
188
- const modelHint = normalizeRequestedModel(rawToken.slice(expectedToken.length + 1))
189
- return modelHint
190
- ? { authorized: true, modelHint }
191
- : { authorized: false, modelHint: null }
192
- }
193
-
194
- // ─── ProxyServer ─────────────────────────────────────────────────────────────
195
-
196
- export class ProxyServer {
197
- /**
198
- * @param {{
199
- * port?: number,
200
- * accounts?: Array<{ id: string, providerKey: string, apiKey: string, modelId: string, url: string }>,
201
- * retries?: number,
202
- * proxyApiKey?: string,
203
- * anthropicRouting?: { model?: string|null, modelOpus?: string|null, modelSonnet?: string|null, modelHaiku?: string|null },
204
- * accountManagerOpts?: object,
205
- * tokenStatsOpts?: object,
206
- * thinkingConfig?: { mode: string, budget_tokens?: number },
207
- * compressionOpts?: { level?: number, toolResultMaxChars?: number, thinkingMaxChars?: number, maxTotalChars?: number },
208
- * upstreamTimeoutMs?: number
209
- * }} opts
210
- */
211
- constructor({
212
- port = 0,
213
- accounts = [],
214
- retries = 3,
215
- proxyApiKey = null,
216
- anthropicRouting = null,
217
- accountManagerOpts = {},
218
- tokenStatsOpts = {},
219
- thinkingConfig,
220
- compressionOpts,
221
- upstreamTimeoutMs = 45_000,
222
- } = {}) {
223
- this._port = port
224
- this._retries = retries
225
- this._thinkingConfig = thinkingConfig
226
- this._compressionOpts = compressionOpts
227
- this._proxyApiKey = proxyApiKey
228
- this._anthropicRouting = normalizeAnthropicRouting(anthropicRouting)
229
- this._accounts = accounts
230
- this._upstreamTimeoutMs = upstreamTimeoutMs
231
- // 📖 Progressive backoff delays (ms) for retries — first attempt is immediate,
232
- // subsequent ones add increasing delay + random jitter (0-100ms) to avoid
233
- // re-hitting the same rate-limit window on 429s from providers
234
- this._retryDelays = [0, 300, 800]
235
- this._accountManager = new AccountManager(accounts, accountManagerOpts)
236
- this._tokenStats = new TokenStats(tokenStatsOpts)
237
- this._startTime = Date.now()
238
- this._running = false
239
- this._listeningPort = null
240
- this._server = http.createServer((req, res) => this._handleRequest(req, res))
241
- }
242
-
243
- /**
244
- * Start listening on 127.0.0.1.
245
- *
246
- * @returns {Promise<{ port: number }>}
247
- */
248
- start() {
249
- return new Promise((resolve, reject) => {
250
- this._server.once('error', reject)
251
- this._server.listen(this._port, '127.0.0.1', () => {
252
- this._server.removeListener('error', reject)
253
- this._running = true
254
- this._listeningPort = this._server.address().port
255
- resolve({ port: this._listeningPort })
256
- })
257
- })
258
- }
259
-
260
- /**
261
- * Save stats and close the server.
262
- *
263
- * @returns {Promise<void>}
264
- */
265
- stop() {
266
- this._tokenStats.save()
267
- return new Promise(resolve => {
268
- this._server.close(() => {
269
- this._running = false
270
- this._listeningPort = null
271
- resolve()
272
- })
273
- })
274
- }
275
-
276
- getStatus() {
277
- return {
278
- running: this._running,
279
- port: this._listeningPort,
280
- accountCount: this._accounts.length,
281
- healthByAccount: this._accountManager.getAllHealth(),
282
- anthropicRouting: this._anthropicRouting,
283
- }
284
- }
285
-
286
- _getAuthContext(req) {
287
- return parseProxyAuthorizationHeader(req.headers.authorization, this._proxyApiKey)
288
- }
289
-
290
- _isAuthorized(req) {
291
- return this._getAuthContext(req).authorized
292
- }
293
-
294
- _resolveAnthropicRequestedModel(modelId, authModelHint = null) {
295
- const requestedModel = normalizeRequestedModel(modelId)
296
- if (requestedModel && this._accountManager.hasAccountsForModel(requestedModel)) {
297
- return requestedModel
298
- }
299
-
300
- const mappedModel = resolveAnthropicMappedModel(requestedModel, this._anthropicRouting)
301
- if (mappedModel && this._accountManager.hasAccountsForModel(mappedModel)) {
302
- return mappedModel
303
- }
304
-
305
- // 📖 Claude Code still emits internal aliases / tier model ids for some
306
- // 📖 background and helper paths. Keep the old auth-token hint as a final
307
- // 📖 compatibility fallback for already-launched sessions, but the primary
308
- // 📖 routing path is now the free-claude-code style proxy-side mapping above.
309
- if (authModelHint && this._accountManager.hasAccountsForModel(authModelHint)) {
310
- if (!requestedModel || classifyClaudeVirtualModel(requestedModel) || requestedModel.toLowerCase().startsWith('claude-')) {
311
- return authModelHint
312
- }
313
- }
314
-
315
- return requestedModel
316
- }
317
-
318
- // ── Request routing ────────────────────────────────────────────────────────
319
-
320
- _handleRequest(req, res) {
321
- const pathname = getRequestPathname(req)
322
-
323
- // 📖 Root endpoint is unauthenticated so a browser hit on http://127.0.0.1:{port}/
324
- // 📖 gives a useful status payload instead of a misleading Unauthorized error.
325
- if (req.method === 'GET' && pathname === '/') {
326
- return this._handleRoot(res)
327
- }
328
-
329
- // 📖 Health endpoint is unauthenticated so external monitors can probe it
330
- if (req.method === 'GET' && pathname === '/v1/health') {
331
- return this._handleHealth(res)
332
- }
333
-
334
- const authContext = this._getAuthContext(req)
335
- if (!authContext.authorized) {
336
- return sendJson(res, 401, { error: 'Unauthorized' })
337
- }
338
-
339
- if (req.method === 'GET' && pathname === '/v1/models') {
340
- this._handleModels(res)
341
- } else if (req.method === 'GET' && pathname === '/v1/stats') {
342
- this._handleStats(res)
343
- } else if (req.method === 'POST' && pathname === '/v1/chat/completions') {
344
- this._handleChatCompletions(req, res).catch(err => {
345
- console.error('[proxy] Internal error:', err)
346
- // 📖 Return 413 for body-too-large, generic 500 for everything else — never leak stack traces
347
- const status = err.statusCode === 413 ? 413 : 500
348
- const msg = err.statusCode === 413 ? 'Request body too large' : 'Internal server error'
349
- sendJson(res, status, { error: msg })
350
- })
351
- } else if (req.method === 'POST' && pathname === '/v1/messages') {
352
- // 📖 Anthropic Messages API translation — enables Claude Code compatibility
353
- this._handleAnthropicMessages(req, res, authContext).catch(err => {
354
- console.error('[proxy] Internal error:', err)
355
- const status = err.statusCode === 413 ? 413 : 500
356
- const msg = err.statusCode === 413 ? 'Request body too large' : 'Internal server error'
357
- sendJson(res, status, { error: msg })
358
- })
359
- } else if (req.method === 'POST' && pathname === '/v1/messages/count_tokens') {
360
- this._handleAnthropicCountTokens(req, res).catch(err => {
361
- console.error('[proxy] Internal error:', err)
362
- const status = err.statusCode === 413 ? 413 : 500
363
- const msg = err.statusCode === 413 ? 'Request body too large' : 'Internal server error'
364
- sendJson(res, status, { error: msg })
365
- })
366
- } else if (req.method === 'POST' && pathname === '/v1/responses') {
367
- this._handleResponses(req, res).catch(err => {
368
- console.error('[proxy] Internal error:', err)
369
- const status = err.statusCode === 413 ? 413 : 500
370
- const msg = err.statusCode === 413 ? 'Request body too large' : 'Internal server error'
371
- sendJson(res, status, { error: msg })
372
- })
373
- } else if (req.method === 'POST' && pathname === '/v1/completions') {
374
- // These legacy/alternative OpenAI endpoints are not supported by the proxy.
375
- // Return 501 (not 404) so callers get a clear signal instead of silently failing.
376
- sendJson(res, 501, {
377
- error: 'Not Implemented',
378
- message: `${pathname} is not supported by this proxy. Use POST /v1/chat/completions instead.`,
379
- })
380
- } else {
381
- sendJson(res, 404, { error: 'Not found' })
382
- }
383
- }
384
-
385
- // ── GET /v1/models ─────────────────────────────────────────────────────────
386
-
387
- _handleModels(res) {
388
- const seen = new Set()
389
- const data = []
390
- const models = []
391
- for (const acct of this._accounts) {
392
- const publicModelId = acct.proxyModelId || acct.modelId
393
- if (!seen.has(publicModelId)) {
394
- seen.add(publicModelId)
395
- const modelEntry = {
396
- id: publicModelId,
397
- slug: publicModelId,
398
- name: publicModelId,
399
- object: 'model',
400
- created: Math.floor(Date.now() / 1000),
401
- owned_by: 'proxy',
402
- }
403
- data.push(modelEntry)
404
- models.push(modelEntry)
405
- }
406
- }
407
- sendJson(res, 200, { object: 'list', data, models })
408
- }
409
-
410
- // ── POST /v1/chat/completions ──────────────────────────────────────────────
411
-
412
- async _handleChatCompletions(clientReq, clientRes) {
413
- // 1. Read and parse request body
414
- const rawBody = await readBody(clientReq)
415
- let body
416
- try {
417
- body = JSON.parse(rawBody)
418
- } catch {
419
- return sendJson(clientRes, 400, { error: 'Invalid JSON body' })
420
- }
421
-
422
- // 2. Optional transformations (both functions return new objects, no mutation)
423
- if (this._compressionOpts && Array.isArray(body.messages)) {
424
- body = { ...body, messages: compressContext(body.messages, this._compressionOpts) }
425
- }
426
- if (this._thinkingConfig) {
427
- body = applyThinkingBudget(body, this._thinkingConfig)
428
- }
429
-
430
- // 3. Session fingerprint for first-attempt sticky routing
431
- const fingerprint = createHash('sha256')
432
- .update(JSON.stringify(body.messages?.slice(-1) ?? []))
433
- .digest('hex')
434
- .slice(0, 16)
435
-
436
- const requestedModel = typeof body.model === 'string'
437
- ? body.model.replace(/^fcm-proxy\//, '')
438
- : undefined
439
-
440
- // 4. Early check: if a specific model is requested but has no registered accounts,
441
- // return 404 immediately with a clear message rather than silently failing.
442
- if (requestedModel && !this._accountManager.hasAccountsForModel(requestedModel)) {
443
- return sendJson(clientRes, 404, {
444
- error: 'Model not found',
445
- message: `Model '${requestedModel}' is not available through this proxy. Use GET /v1/models to list available models.`,
446
- })
447
- }
448
-
449
- const formatSwitchReason = (classified) => {
450
- switch (classified?.type) {
451
- case 'QUOTA_EXHAUSTED':
452
- return 'quota'
453
- case 'RATE_LIMITED':
454
- return '429'
455
- case 'MODEL_NOT_FOUND':
456
- return '404'
457
- case 'MODEL_CAPACITY':
458
- return 'capacity'
459
- case 'SERVER_ERROR':
460
- return '5xx'
461
- case 'NETWORK_ERROR':
462
- return 'network'
463
- default:
464
- return 'retry'
465
- }
466
- }
467
-
468
- // 5. Retry loop with progressive backoff
469
- let pendingSwitchReason = null
470
- let previousAccount = null
471
- for (let attempt = 0; attempt < this._retries; attempt++) {
472
- // 📖 Apply backoff delay before retries (first attempt is immediate)
473
- const delay = this._retryDelays[Math.min(attempt, this._retryDelays.length - 1)]
474
- if (delay > 0) await new Promise(r => setTimeout(r, delay + Math.random() * 100))
475
-
476
- // First attempt: respect sticky session.
477
- // Subsequent retries: fresh P2C (don't hammer the same failed account).
478
- const selectOpts = attempt === 0
479
- ? { sessionFingerprint: fingerprint, requestedModel }
480
- : { requestedModel }
481
- const account = this._accountManager.selectAccount(selectOpts)
482
- if (!account) break // No available accounts → fall through to 503
483
-
484
- const result = await this._forwardRequest(account, body, clientRes, {
485
- requestedModel,
486
- switched: attempt > 0,
487
- switchReason: pendingSwitchReason,
488
- switchedFromProviderKey: previousAccount?.providerKey,
489
- switchedFromModelId: previousAccount?.modelId,
490
- })
491
-
492
- // Response fully sent (success JSON or SSE pipe established)
493
- if (result.done) return
494
-
495
- // Error path: classify → record → retry or forward error
496
- const { statusCode, responseBody, responseHeaders, networkError } = result
497
- const classified = classifyError(
498
- networkError ? 0 : statusCode,
499
- responseBody || '',
500
- responseHeaders || {}
501
- )
502
-
503
- this._accountManager.recordFailure(account.id, classified, { providerKey: account.providerKey })
504
- if (responseHeaders) {
505
- const quotaUpdated = this._accountManager.updateQuota(account.id, responseHeaders)
506
- this._persistQuotaSnapshot(account, quotaUpdated)
507
- }
508
-
509
- if (!classified.shouldRetry) {
510
- // Non-retryable (auth error, unknown) → return upstream response directly
511
- return sendJson(
512
- clientRes,
513
- statusCode || 500,
514
- responseBody || JSON.stringify({ error: 'Upstream error' })
515
- )
516
- }
517
- // shouldRetry === true → next attempt
518
- pendingSwitchReason = formatSwitchReason(classified)
519
- previousAccount = account
520
- }
521
-
522
- // All retries consumed, or no accounts available from the start
523
- sendJson(clientRes, 503, { error: 'All accounts exhausted or unavailable' })
524
- }
525
-
526
- // ── Upstream forwarding ────────────────────────────────────────────────────
527
-
528
- /**
529
- * Forward one attempt to the upstream API.
530
- *
531
- * Resolves with:
532
- * { done: true }
533
- * — The response has been committed to clientRes (success JSON sent, or
534
- * SSE pipe established). The retry loop must return immediately.
535
- *
536
- * { done: false, statusCode, responseBody, responseHeaders, networkError }
537
- * — An error occurred; the retry loop decides whether to retry or give up.
538
- *
539
- * @param {{ id: string, apiKey: string, modelId: string, url: string }} account
540
- * @param {object} body
541
- * @param {http.ServerResponse} clientRes
542
- * @param {{ requestedModel?: string, switched?: boolean, switchReason?: string|null, switchedFromProviderKey?: string, switchedFromModelId?: string }} [logContext]
543
- * @returns {Promise<{ done: boolean }>}
544
- */
545
- _forwardRequest(account, body, clientRes, logContext = {}) {
546
- return new Promise(resolve => {
547
- // Replace client-supplied model name with the account's model ID
548
- const newBody = { ...body, model: account.modelId }
549
- const bodyStr = JSON.stringify(newBody)
550
-
551
- // Build the full upstream URL from the account's base URL
552
- const baseUrl = account.url.replace(/\/$/, '')
553
- let upstreamUrl
554
- try {
555
- upstreamUrl = new URL(baseUrl + '/chat/completions')
556
- } catch {
557
- // 📖 Malformed upstream URL — resolve as network error so retry loop can continue
558
- return resolve({ done: false, statusCode: 0, responseBody: 'Invalid upstream URL', networkError: true })
559
- }
560
-
561
- // Choose http or https module BEFORE creating the request
562
- const client = selectClient(account.url)
563
- const startTime = Date.now()
564
-
565
- const requestOptions = {
566
- hostname: upstreamUrl.hostname,
567
- port: upstreamUrl.port || (upstreamUrl.protocol === 'https:' ? 443 : 80),
568
- path: upstreamUrl.pathname + (upstreamUrl.search || ''),
569
- method: 'POST',
570
- headers: {
571
- 'authorization': `Bearer ${account.apiKey}`,
572
- 'content-type': 'application/json',
573
- 'content-length': Buffer.byteLength(bodyStr),
574
- },
575
- }
576
-
577
- const upstreamReq = client.request(requestOptions, upstreamRes => {
578
- const { statusCode } = upstreamRes
579
- const headers = upstreamRes.headers
580
- const contentType = headers['content-type'] || ''
581
- const isSSE = contentType.includes('text/event-stream')
582
-
583
- if (statusCode >= 200 && statusCode < 300) {
584
- if (isSSE) {
585
- // ── SSE passthrough: MUST NOT buffer ──────────────────────────
586
- const strippedHeaders = stripRateLimitHeaders(headers)
587
- clientRes.writeHead(statusCode, {
588
- ...strippedHeaders,
589
- 'content-type': 'text/event-stream',
590
- 'cache-control': 'no-cache',
591
- })
592
-
593
- // Tap the data stream to capture usage from the last data line.
594
- // Register BEFORE pipe() so both listeners share the same event queue.
595
- // 📖 sseLineBuffer persists between chunks to handle lines split across boundaries
596
- let lastChunkData = ''
597
- let sseLineBuffer = ''
598
- upstreamRes.on('data', chunk => {
599
- sseLineBuffer += chunk.toString()
600
- const lines = sseLineBuffer.split('\n')
601
- // 📖 Last element may be an incomplete line — keep it for next chunk
602
- sseLineBuffer = lines.pop() || ''
603
- for (const line of lines) {
604
- if (line.startsWith('data: ') && !line.includes('[DONE]')) {
605
- lastChunkData = line.slice(6).trim()
606
- }
607
- }
608
- })
609
-
610
- upstreamRes.on('end', () => {
611
- let promptTokens = 0
612
- let completionTokens = 0
613
- try {
614
- const parsed = JSON.parse(lastChunkData)
615
- if (parsed.usage) {
616
- promptTokens = parsed.usage.prompt_tokens || 0
617
- completionTokens = parsed.usage.completion_tokens || 0
618
- }
619
- } catch { /* no usage in stream — ignore */ }
620
- // Always record every upstream attempt so the log page shows real requests
621
- this._tokenStats.record({
622
- accountId: account.id,
623
- modelId: account.modelId,
624
- providerKey: account.providerKey,
625
- statusCode,
626
- requestType: 'chat.completions',
627
- promptTokens,
628
- completionTokens,
629
- latencyMs: Date.now() - startTime,
630
- success: true,
631
- requestedModelId: logContext.requestedModel,
632
- switched: logContext.switched === true,
633
- switchReason: logContext.switchReason,
634
- switchedFromProviderKey: logContext.switchedFromProviderKey,
635
- switchedFromModelId: logContext.switchedFromModelId,
636
- })
637
- this._accountManager.recordSuccess(account.id, Date.now() - startTime)
638
- const quotaUpdated = this._accountManager.updateQuota(account.id, headers)
639
- this._persistQuotaSnapshot(account, quotaUpdated)
640
- })
641
-
642
- // 📖 Error handlers on both sides of the pipe to prevent uncaught errors
643
- upstreamRes.on('error', err => { if (!clientRes.destroyed) clientRes.destroy(err) })
644
- clientRes.on('error', () => { if (!upstreamRes.destroyed) upstreamRes.destroy() })
645
-
646
- // Pipe after listeners are registered; upstream → client, no buffering
647
- upstreamRes.pipe(clientRes)
648
-
649
- // ── Downstream disconnect cleanup ─────────────────────────────
650
- // If the client closes its connection mid-stream, destroy the
651
- // upstream request and response promptly so we don't hold the
652
- // upstream connection open indefinitely.
653
- clientRes.on('close', () => {
654
- if (!upstreamRes.destroyed) upstreamRes.destroy()
655
- if (!upstreamReq.destroyed) upstreamReq.destroy()
656
- })
657
-
658
- // The pipe handles the rest asynchronously; signal done to retry loop
659
- resolve({ done: true })
660
- } else {
661
- // ── JSON response ─────────────────────────────────────────────
662
- const chunks = []
663
- upstreamRes.on('data', chunk => chunks.push(chunk))
664
- upstreamRes.on('end', () => {
665
- const responseBody = Buffer.concat(chunks).toString()
666
- const latencyMs = Date.now() - startTime
667
-
668
- const quotaUpdated = this._accountManager.updateQuota(account.id, headers)
669
- this._accountManager.recordSuccess(account.id, latencyMs)
670
- this._persistQuotaSnapshot(account, quotaUpdated)
671
-
672
- // Always record every upstream attempt so the log page shows real requests.
673
- // Extract tokens if upstream provides them; default to 0 when not present.
674
- let promptTokens = 0
675
- let completionTokens = 0
676
- try {
677
- const parsed = JSON.parse(responseBody)
678
- if (parsed.usage) {
679
- promptTokens = parsed.usage.prompt_tokens || 0
680
- completionTokens = parsed.usage.completion_tokens || 0
681
- }
682
- } catch { /* non-JSON body — tokens stay 0 */ }
683
- this._tokenStats.record({
684
- accountId: account.id,
685
- modelId: account.modelId,
686
- providerKey: account.providerKey,
687
- statusCode,
688
- requestType: 'chat.completions',
689
- promptTokens,
690
- completionTokens,
691
- latencyMs,
692
- success: true,
693
- requestedModelId: logContext.requestedModel,
694
- switched: logContext.switched === true,
695
- switchReason: logContext.switchReason,
696
- switchedFromProviderKey: logContext.switchedFromProviderKey,
697
- switchedFromModelId: logContext.switchedFromModelId,
698
- })
699
-
700
- // Forward stripped response to client
701
- const strippedHeaders = stripRateLimitHeaders(headers)
702
- clientRes.writeHead(statusCode, {
703
- ...strippedHeaders,
704
- 'content-type': 'application/json',
705
- })
706
- clientRes.end(responseBody)
707
- resolve({ done: true })
708
- })
709
- }
710
- } else {
711
- // ── Error response: buffer for classification in retry loop ─────
712
- const chunks = []
713
- upstreamRes.on('data', chunk => chunks.push(chunk))
714
- upstreamRes.on('end', () => {
715
- const latencyMs = Date.now() - startTime
716
- // Log every failed upstream attempt so the log page shows real requests
717
- this._tokenStats.record({
718
- accountId: account.id,
719
- modelId: account.modelId,
720
- providerKey: account.providerKey,
721
- statusCode,
722
- requestType: 'chat.completions',
723
- promptTokens: 0,
724
- completionTokens: 0,
725
- latencyMs,
726
- success: false,
727
- requestedModelId: logContext.requestedModel,
728
- switched: logContext.switched === true,
729
- switchReason: logContext.switchReason,
730
- switchedFromProviderKey: logContext.switchedFromProviderKey,
731
- switchedFromModelId: logContext.switchedFromModelId,
732
- })
733
- resolve({
734
- done: false,
735
- statusCode,
736
- responseBody: Buffer.concat(chunks).toString(),
737
- responseHeaders: headers,
738
- networkError: false,
739
- })
740
- })
741
- }
742
- })
743
-
744
- upstreamReq.on('error', err => {
745
- // TCP / DNS / timeout errors — log as network failure
746
- const latencyMs = Date.now() - startTime
747
- this._tokenStats.record({
748
- accountId: account.id,
749
- modelId: account.modelId,
750
- providerKey: account.providerKey,
751
- statusCode: 0,
752
- requestType: 'chat.completions',
753
- promptTokens: 0,
754
- completionTokens: 0,
755
- latencyMs,
756
- success: false,
757
- requestedModelId: logContext.requestedModel,
758
- switched: logContext.switched === true,
759
- switchReason: logContext.switchReason,
760
- switchedFromProviderKey: logContext.switchedFromProviderKey,
761
- switchedFromModelId: logContext.switchedFromModelId,
762
- })
763
- // TCP / DNS / timeout errors
764
- resolve({
765
- done: false,
766
- statusCode: 0,
767
- responseBody: err.message,
768
- responseHeaders: {},
769
- networkError: true,
770
- })
771
- })
772
-
773
- // Abort the upstream request if it exceeds the configured timeout.
774
- // This prevents indefinite hangs (e.g. nvidia returning 504 after 302 s).
775
- // The 'timeout' event fires but does NOT automatically abort; we must call destroy().
776
- upstreamReq.setTimeout(this._upstreamTimeoutMs, () => {
777
- upstreamReq.destroy(new Error(`Upstream request timed out after ${this._upstreamTimeoutMs}ms`))
778
- })
779
-
780
- upstreamReq.write(bodyStr)
781
- upstreamReq.end()
782
- })
783
- }
784
-
785
- /**
786
- * Persist a quota snapshot for the given account into TokenStats.
787
- * Called after every `AccountManager.updateQuota()` so TUI can read fresh data.
788
- * Never exposes apiKey.
789
- *
790
- * @param {{ id: string, providerKey?: string, modelId?: string }} account
791
- * @param {boolean} quotaUpdated
792
- */
793
- _persistQuotaSnapshot(account, quotaUpdated = true) {
794
- if (!quotaUpdated) return
795
- const health = this._accountManager.getHealth(account.id)
796
- if (!health) return
797
- this._tokenStats.updateQuotaSnapshot(account.id, {
798
- quotaPercent: health.quotaPercent,
799
- ...(account.providerKey !== undefined && { providerKey: account.providerKey }),
800
- ...(account.modelId !== undefined && { modelId: account.modelId }),
801
- })
802
- }
803
-
804
- // ── GET /v1/health ──────────────────────────────────────────────────────────
805
-
806
- /**
807
- * 📖 Friendly unauthenticated landing endpoint for browsers and quick local checks.
808
- */
809
- _handleRoot(res) {
810
- const status = this.getStatus()
811
- const uniqueModels = new Set(this._accounts.map(acct => acct.proxyModelId || acct.modelId)).size
812
- sendJson(res, 200, {
813
- status: 'ok',
814
- service: 'fcm-proxy-v2',
815
- running: status.running,
816
- accountCount: status.accountCount,
817
- modelCount: uniqueModels,
818
- endpoints: {
819
- health: '/v1/health',
820
- models: '/v1/models',
821
- stats: '/v1/stats',
822
- },
823
- })
824
- }
825
-
826
- /**
827
- * 📖 Health endpoint for daemon liveness checks. Unauthenticated so external
828
- * monitors (TUI, launchctl, systemd) can probe without needing the token.
829
- */
830
- _handleHealth(res) {
831
- const status = this.getStatus()
832
- sendJson(res, 200, {
833
- status: 'ok',
834
- uptime: process.uptime(),
835
- port: status.port,
836
- accountCount: status.accountCount,
837
- running: status.running,
838
- })
839
- }
840
-
841
- // ── GET /v1/stats ──────────────────────────────────────────────────────────
842
-
843
- /**
844
- * 📖 Authenticated stats endpoint — returns per-account health, token stats summary,
845
- * and proxy uptime. Useful for monitoring and debugging.
846
- */
847
- _handleStats(res) {
848
- const healthByAccount = this._accountManager.getAllHealth()
849
- const summary = this._tokenStats.getSummary()
850
-
851
- // 📖 Compute totals from the summary data
852
- const dailyEntries = Object.values(summary.daily || {})
853
- const totalRequests = dailyEntries.reduce((sum, d) => sum + (d.requests || 0), 0)
854
- const totalTokens = dailyEntries.reduce((sum, d) => sum + (d.tokens || 0), 0)
855
-
856
- sendJson(res, 200, {
857
- accounts: healthByAccount,
858
- tokenStats: {
859
- byModel: summary.byModel || {},
860
- recentRequests: summary.recentRequests || [],
861
- },
862
- anthropicRouting: this._anthropicRouting,
863
- totals: {
864
- requests: totalRequests,
865
- tokens: totalTokens,
866
- },
867
- uptime: Math.floor((Date.now() - this._startTime) / 1000),
868
- })
869
- }
870
-
871
- // ── POST /v1/messages (Anthropic translation) ──────────────────────────────
872
-
873
- /**
874
- * 📖 Handle Anthropic Messages API requests by translating to OpenAI format,
875
- * forwarding through the existing chat completions handler, then translating
876
- * the response back to Anthropic format.
877
- *
878
- * 📖 This makes Claude Code work natively through the FCM proxy.
879
- */
880
- async _handleAnthropicMessages(clientReq, clientRes, authContext = { modelHint: null }) {
881
- const rawBody = await readBody(clientReq)
882
- let anthropicBody
883
- try {
884
- anthropicBody = JSON.parse(rawBody)
885
- } catch {
886
- return sendJson(clientRes, 400, { error: { type: 'invalid_request_error', message: 'Invalid JSON body' } })
887
- }
888
-
889
- // 📖 Translate Anthropic → OpenAI
890
- const openaiBody = translateAnthropicToOpenAI(anthropicBody)
891
- const resolvedModel = this._resolveAnthropicRequestedModel(openaiBody.model, authContext.modelHint)
892
- if (resolvedModel) openaiBody.model = resolvedModel
893
- const isStreaming = openaiBody.stream === true
894
-
895
- if (isStreaming) {
896
- // 📖 Streaming mode: pipe through SSE transformer
897
- await this._handleAnthropicMessagesStreaming(openaiBody, anthropicBody.model, clientRes)
898
- } else {
899
- // 📖 JSON mode: forward, translate response, return
900
- await this._handleAnthropicMessagesJson(openaiBody, anthropicBody.model, clientRes)
901
- }
902
- }
903
-
904
- /**
905
- * 📖 Count tokens for Anthropic Messages requests without calling upstream.
906
- * 📖 Claude Code uses this endpoint for budgeting / UI hints, so a fast local
907
- * 📖 estimate is enough to keep the flow working through the proxy.
908
- */
909
- async _handleAnthropicCountTokens(clientReq, clientRes) {
910
- const rawBody = await readBody(clientReq)
911
- let anthropicBody
912
- try {
913
- anthropicBody = JSON.parse(rawBody)
914
- } catch {
915
- return sendJson(clientRes, 400, { error: { type: 'invalid_request_error', message: 'Invalid JSON body' } })
916
- }
917
-
918
- sendJson(clientRes, 200, {
919
- input_tokens: estimateAnthropicTokens(anthropicBody),
920
- })
921
- }
922
-
923
- /**
924
- * 📖 Handle OpenAI Responses API requests by translating them to chat
925
- * 📖 completions, forwarding through the existing proxy path, then converting
926
- * 📖 the result back to the Responses wire format.
927
- */
928
- async _handleResponses(clientReq, clientRes) {
929
- const rawBody = await readBody(clientReq)
930
- let responsesBody
931
- try {
932
- responsesBody = JSON.parse(rawBody)
933
- } catch {
934
- return sendJson(clientRes, 400, { error: 'Invalid JSON body' })
935
- }
936
-
937
- const isStreaming = responsesBody.stream === true || String(clientReq.headers.accept || '').includes('text/event-stream')
938
- const openaiBody = translateResponsesToOpenAI({ ...responsesBody, stream: isStreaming })
939
-
940
- if (isStreaming) {
941
- await this._handleResponsesStreaming(openaiBody, responsesBody.model, clientRes)
942
- } else {
943
- await this._handleResponsesJson(openaiBody, responsesBody.model, clientRes)
944
- }
945
- }
946
-
947
- async _handleResponsesJson(openaiBody, requestModel, clientRes) {
948
- const capturedChunks = []
949
- let capturedStatusCode = 200
950
- let capturedHeaders = {}
951
-
952
- const fakeRes = {
953
- headersSent: false,
954
- destroyed: false,
955
- socket: null,
956
- writeHead(statusCode, headers) {
957
- capturedStatusCode = statusCode
958
- capturedHeaders = headers || {}
959
- this.headersSent = true
960
- },
961
- write(chunk) { capturedChunks.push(chunk) },
962
- end(data) {
963
- if (data) capturedChunks.push(data)
964
- },
965
- on() { return this },
966
- once() { return this },
967
- emit() { return false },
968
- destroy() { this.destroyed = true },
969
- removeListener() { return this },
970
- }
971
-
972
- await this._handleChatCompletionsInternal(openaiBody, fakeRes)
973
-
974
- const responseBody = capturedChunks.join('')
975
- if (capturedStatusCode >= 200 && capturedStatusCode < 300) {
976
- try {
977
- const openaiResponse = JSON.parse(responseBody)
978
- const responsesResponse = translateOpenAIToResponses(openaiResponse, requestModel)
979
- sendJson(clientRes, 200, responsesResponse)
980
- } catch {
981
- sendJson(clientRes, capturedStatusCode, responseBody)
982
- }
983
- return
984
- }
985
-
986
- // 📖 Forward upstream-style JSON errors unchanged for OpenAI-compatible clients.
987
- sendJson(clientRes, capturedStatusCode, responseBody)
988
- }
989
-
990
- async _handleResponsesStreaming(openaiBody, requestModel, clientRes) {
991
- const { transform } = createResponsesSSETransformer(requestModel)
992
- await this._handleResponsesStreamDirect(openaiBody, clientRes, transform)
993
- }
994
-
995
- async _handleResponsesStreamDirect(openaiBody, clientRes, sseTransform) {
996
- const fingerprint = createHash('sha256')
997
- .update(JSON.stringify(openaiBody.messages?.slice(-1) ?? []))
998
- .digest('hex')
999
- .slice(0, 16)
1000
-
1001
- const requestedModel = typeof openaiBody.model === 'string'
1002
- ? openaiBody.model.replace(/^fcm-proxy\//, '')
1003
- : undefined
1004
-
1005
- if (requestedModel && !this._accountManager.hasAccountsForModel(requestedModel)) {
1006
- return sendJson(clientRes, 404, {
1007
- error: 'Model not found',
1008
- message: `Model '${requestedModel}' is not available.`,
1009
- })
1010
- }
1011
-
1012
- sseTransform.pipe(clientRes)
1013
-
1014
- for (let attempt = 0; attempt < this._retries; attempt++) {
1015
- const delay = this._retryDelays[Math.min(attempt, this._retryDelays.length - 1)]
1016
- if (delay > 0) await new Promise(r => setTimeout(r, delay + Math.random() * 100))
1017
-
1018
- const selectOpts = attempt === 0
1019
- ? { sessionFingerprint: fingerprint, requestedModel }
1020
- : { requestedModel }
1021
- const account = this._accountManager.selectAccount(selectOpts)
1022
- if (!account) break
1023
-
1024
- const result = await this._forwardRequestForResponsesStream(account, openaiBody, sseTransform, clientRes)
1025
- if (result.done) return
1026
-
1027
- const { statusCode, responseBody, responseHeaders, networkError } = result
1028
- const classified = classifyError(
1029
- networkError ? 0 : statusCode,
1030
- responseBody || '',
1031
- responseHeaders || {}
1032
- )
1033
- this._accountManager.recordFailure(account.id, classified, { providerKey: account.providerKey })
1034
- if (!classified.shouldRetry) {
1035
- sseTransform.end()
1036
- return sendJson(clientRes, statusCode || 500, responseBody || JSON.stringify({ error: 'Upstream error' }))
1037
- }
1038
- }
1039
-
1040
- sseTransform.end()
1041
- sendJson(clientRes, 503, { error: 'All accounts exhausted or unavailable' })
1042
- }
1043
-
1044
- /**
1045
- * 📖 Handle non-streaming Anthropic Messages by internally dispatching to
1046
- * chat completions logic and translating the JSON response back.
1047
- */
1048
- async _handleAnthropicMessagesJson(openaiBody, requestModel, clientRes) {
1049
- // 📖 Create a fake request/response pair to capture the OpenAI response
1050
- const capturedChunks = []
1051
- let capturedStatusCode = 200
1052
- let capturedHeaders = {}
1053
-
1054
- const fakeRes = {
1055
- headersSent: false,
1056
- destroyed: false,
1057
- socket: null,
1058
- writeHead(statusCode, headers) {
1059
- capturedStatusCode = statusCode
1060
- capturedHeaders = headers || {}
1061
- this.headersSent = true
1062
- },
1063
- write(chunk) { capturedChunks.push(chunk) },
1064
- end(data) {
1065
- if (data) capturedChunks.push(data)
1066
- },
1067
- on() { return this },
1068
- once() { return this },
1069
- emit() { return false },
1070
- destroy() { this.destroyed = true },
1071
- removeListener() { return this },
1072
- }
1073
-
1074
- // 📖 Build a fake IncomingMessage-like with pre-parsed body
1075
- const fakeReq = {
1076
- method: 'POST',
1077
- url: '/v1/chat/completions',
1078
- headers: { 'content-type': 'application/json' },
1079
- on(event, cb) {
1080
- if (event === 'data') cb(Buffer.from(JSON.stringify(openaiBody)))
1081
- if (event === 'end') cb()
1082
- return this
1083
- },
1084
- removeListener() { return this },
1085
- }
1086
-
1087
- // 📖 Use internal handler directly instead of fake request
1088
- await this._handleChatCompletionsInternal(openaiBody, fakeRes)
1089
-
1090
- const responseBody = capturedChunks.join('')
1091
-
1092
- if (capturedStatusCode >= 200 && capturedStatusCode < 300) {
1093
- try {
1094
- const openaiResponse = JSON.parse(responseBody)
1095
- const anthropicResponse = translateOpenAIToAnthropic(openaiResponse, requestModel)
1096
- sendJson(clientRes, 200, anthropicResponse)
1097
- } catch {
1098
- // 📖 Couldn't parse — forward raw
1099
- sendJson(clientRes, capturedStatusCode, responseBody)
1100
- }
1101
- } else {
1102
- // 📖 Error — wrap in Anthropic error format
1103
- sendJson(clientRes, capturedStatusCode, {
1104
- type: 'error',
1105
- error: { type: 'api_error', message: responseBody },
1106
- })
1107
- }
1108
- }
1109
-
1110
- /**
1111
- * 📖 Handle streaming Anthropic Messages by forwarding as streaming OpenAI
1112
- * chat completions and piping through the SSE translator.
1113
- */
1114
- async _handleAnthropicMessagesStreaming(openaiBody, requestModel, clientRes) {
1115
- // 📖 We need to intercept the SSE response and translate it
1116
- const { transform, getUsage } = createAnthropicSSETransformer(requestModel)
1117
-
1118
- let resolveForward
1119
- const forwardPromise = new Promise(r => { resolveForward = r })
1120
-
1121
- const fakeRes = {
1122
- headersSent: false,
1123
- destroyed: false,
1124
- socket: null,
1125
- writeHead(statusCode, headers) {
1126
- this.headersSent = true
1127
- if (statusCode >= 200 && statusCode < 300) {
1128
- // 📖 Write Anthropic SSE headers
1129
- clientRes.writeHead(200, {
1130
- 'content-type': 'text/event-stream',
1131
- 'cache-control': 'no-cache',
1132
- 'connection': 'keep-alive',
1133
- })
1134
- } else {
1135
- clientRes.writeHead(statusCode, headers)
1136
- }
1137
- },
1138
- write(chunk) { /* SSE data handled via pipe */ },
1139
- end(data) {
1140
- if (data && !this.headersSent) {
1141
- // 📖 Non-streaming error response
1142
- clientRes.end(data)
1143
- }
1144
- resolveForward()
1145
- },
1146
- on() { return this },
1147
- once() { return this },
1148
- emit() { return false },
1149
- destroy() { this.destroyed = true },
1150
- removeListener() { return this },
1151
- }
1152
-
1153
- // 📖 Actually we need to pipe the upstream SSE through our transformer.
1154
- // 📖 The simplest approach: use _handleChatCompletionsInternal with stream=true
1155
- // 📖 and capture the piped response through our transformer.
1156
-
1157
- // 📖 For streaming, we go lower level — use the retry loop directly
1158
- await this._handleAnthropicStreamDirect(openaiBody, requestModel, clientRes, transform)
1159
- }
1160
-
1161
- /**
1162
- * 📖 Direct streaming handler for Anthropic messages.
1163
- * 📖 Runs the retry loop, pipes upstream SSE through the Anthropic transformer.
1164
- */
1165
- async _handleAnthropicStreamDirect(openaiBody, requestModel, clientRes, sseTransform) {
1166
- const { createHash: _createHash } = await import('node:crypto')
1167
- const fingerprint = _createHash('sha256')
1168
- .update(JSON.stringify(openaiBody.messages?.slice(-1) ?? []))
1169
- .digest('hex')
1170
- .slice(0, 16)
1171
-
1172
- const requestedModel = typeof openaiBody.model === 'string'
1173
- ? openaiBody.model.replace(/^fcm-proxy\//, '')
1174
- : undefined
1175
-
1176
- if (requestedModel && !this._accountManager.hasAccountsForModel(requestedModel)) {
1177
- return sendJson(clientRes, 404, {
1178
- type: 'error',
1179
- error: { type: 'not_found_error', message: `Model '${requestedModel}' is not available.` },
1180
- })
1181
- }
1182
-
1183
- // 📖 Pipe the transform to client
1184
- sseTransform.pipe(clientRes)
1185
-
1186
- for (let attempt = 0; attempt < this._retries; attempt++) {
1187
- // 📖 Progressive backoff for retries (same as chat completions)
1188
- const delay = this._retryDelays[Math.min(attempt, this._retryDelays.length - 1)]
1189
- if (delay > 0) await new Promise(r => setTimeout(r, delay + Math.random() * 100))
1190
-
1191
- const selectOpts = attempt === 0
1192
- ? { sessionFingerprint: fingerprint, requestedModel }
1193
- : { requestedModel }
1194
- const account = this._accountManager.selectAccount(selectOpts)
1195
- if (!account) break
1196
-
1197
- const result = await this._forwardRequestForAnthropicStream(account, openaiBody, sseTransform, clientRes)
1198
-
1199
- if (result.done) return
1200
-
1201
- const { statusCode, responseBody, responseHeaders, networkError } = result
1202
- const classified = classifyError(
1203
- networkError ? 0 : statusCode,
1204
- responseBody || '',
1205
- responseHeaders || {}
1206
- )
1207
- this._accountManager.recordFailure(account.id, classified, { providerKey: account.providerKey })
1208
- if (!classified.shouldRetry) {
1209
- sseTransform.end()
1210
- return sendJson(clientRes, statusCode || 500, {
1211
- type: 'error',
1212
- error: { type: 'api_error', message: responseBody || 'Upstream error' },
1213
- })
1214
- }
1215
- }
1216
-
1217
- sseTransform.end()
1218
- sendJson(clientRes, 503, {
1219
- type: 'error',
1220
- error: { type: 'overloaded_error', message: 'All accounts exhausted or unavailable' },
1221
- })
1222
- }
1223
-
1224
- /**
1225
- * 📖 Forward a streaming request to upstream and pipe SSE through transform.
1226
- */
1227
- _forwardRequestForAnthropicStream(account, body, sseTransform, clientRes) {
1228
- return new Promise(resolve => {
1229
- const newBody = { ...body, model: account.modelId, stream: true }
1230
- const bodyStr = JSON.stringify(newBody)
1231
- const baseUrl = account.url.replace(/\/$/, '')
1232
- let upstreamUrl
1233
- try {
1234
- upstreamUrl = new URL(baseUrl + '/chat/completions')
1235
- } catch {
1236
- return resolve({ done: false, statusCode: 0, responseBody: 'Invalid upstream URL', networkError: true })
1237
- }
1238
- const client = selectClient(account.url)
1239
- const startTime = Date.now()
1240
-
1241
- const requestOptions = {
1242
- hostname: upstreamUrl.hostname,
1243
- port: upstreamUrl.port || (upstreamUrl.protocol === 'https:' ? 443 : 80),
1244
- path: upstreamUrl.pathname + (upstreamUrl.search || ''),
1245
- method: 'POST',
1246
- headers: {
1247
- 'authorization': `Bearer ${account.apiKey}`,
1248
- 'content-type': 'application/json',
1249
- 'content-length': Buffer.byteLength(bodyStr),
1250
- },
1251
- }
1252
-
1253
- const upstreamReq = client.request(requestOptions, upstreamRes => {
1254
- const { statusCode } = upstreamRes
1255
-
1256
- if (statusCode >= 200 && statusCode < 300) {
1257
- // 📖 Write Anthropic SSE headers if not already sent
1258
- if (!clientRes.headersSent) {
1259
- clientRes.writeHead(200, {
1260
- 'content-type': 'text/event-stream',
1261
- 'cache-control': 'no-cache',
1262
- })
1263
- }
1264
-
1265
- // 📖 Error handlers on both sides of the pipe to prevent uncaught errors
1266
- upstreamRes.on('error', err => { if (!clientRes.destroyed) clientRes.destroy(err) })
1267
- clientRes.on('error', () => { if (!upstreamRes.destroyed) upstreamRes.destroy() })
1268
-
1269
- // 📖 Pipe upstream SSE through Anthropic translator
1270
- upstreamRes.pipe(sseTransform, { end: true })
1271
-
1272
- upstreamRes.on('end', () => {
1273
- this._accountManager.recordSuccess(account.id, Date.now() - startTime)
1274
- })
1275
-
1276
- clientRes.on('close', () => {
1277
- if (!upstreamRes.destroyed) upstreamRes.destroy()
1278
- if (!upstreamReq.destroyed) upstreamReq.destroy()
1279
- })
1280
-
1281
- resolve({ done: true })
1282
- } else {
1283
- const chunks = []
1284
- upstreamRes.on('data', chunk => chunks.push(chunk))
1285
- upstreamRes.on('end', () => {
1286
- resolve({
1287
- done: false,
1288
- statusCode,
1289
- responseBody: Buffer.concat(chunks).toString(),
1290
- responseHeaders: upstreamRes.headers,
1291
- networkError: false,
1292
- })
1293
- })
1294
- }
1295
- })
1296
-
1297
- upstreamReq.on('error', err => {
1298
- resolve({
1299
- done: false,
1300
- statusCode: 0,
1301
- responseBody: err.message,
1302
- responseHeaders: {},
1303
- networkError: true,
1304
- })
1305
- })
1306
-
1307
- upstreamReq.setTimeout(this._upstreamTimeoutMs, () => {
1308
- upstreamReq.destroy(new Error(`Upstream request timed out after ${this._upstreamTimeoutMs}ms`))
1309
- })
1310
-
1311
- upstreamReq.write(bodyStr)
1312
- upstreamReq.end()
1313
- })
1314
- }
1315
-
1316
- /**
1317
- * 📖 Forward a streaming chat-completions request and translate the upstream
1318
- * 📖 SSE stream into Responses API events on the fly.
1319
- */
1320
- _forwardRequestForResponsesStream(account, body, sseTransform, clientRes) {
1321
- return new Promise(resolve => {
1322
- const newBody = { ...body, model: account.modelId, stream: true }
1323
- const bodyStr = JSON.stringify(newBody)
1324
- const baseUrl = account.url.replace(/\/$/, '')
1325
- let upstreamUrl
1326
- try {
1327
- upstreamUrl = new URL(baseUrl + '/chat/completions')
1328
- } catch {
1329
- return resolve({ done: false, statusCode: 0, responseBody: 'Invalid upstream URL', networkError: true })
1330
- }
1331
-
1332
- const client = selectClient(account.url)
1333
- const startTime = Date.now()
1334
- const requestOptions = {
1335
- hostname: upstreamUrl.hostname,
1336
- port: upstreamUrl.port || (upstreamUrl.protocol === 'https:' ? 443 : 80),
1337
- path: upstreamUrl.pathname + (upstreamUrl.search || ''),
1338
- method: 'POST',
1339
- headers: {
1340
- 'authorization': `Bearer ${account.apiKey}`,
1341
- 'content-type': 'application/json',
1342
- 'content-length': Buffer.byteLength(bodyStr),
1343
- },
1344
- }
1345
-
1346
- const upstreamReq = client.request(requestOptions, upstreamRes => {
1347
- const { statusCode } = upstreamRes
1348
-
1349
- if (statusCode >= 200 && statusCode < 300) {
1350
- if (!clientRes.headersSent) {
1351
- clientRes.writeHead(200, {
1352
- 'content-type': 'text/event-stream',
1353
- 'cache-control': 'no-cache',
1354
- })
1355
- }
1356
-
1357
- upstreamRes.on('error', err => { if (!clientRes.destroyed) clientRes.destroy(err) })
1358
- clientRes.on('error', () => { if (!upstreamRes.destroyed) upstreamRes.destroy() })
1359
-
1360
- upstreamRes.pipe(sseTransform, { end: true })
1361
- upstreamRes.on('end', () => {
1362
- this._accountManager.recordSuccess(account.id, Date.now() - startTime)
1363
- })
1364
-
1365
- clientRes.on('close', () => {
1366
- if (!upstreamRes.destroyed) upstreamRes.destroy()
1367
- if (!upstreamReq.destroyed) upstreamReq.destroy()
1368
- })
1369
-
1370
- resolve({ done: true })
1371
- } else {
1372
- const chunks = []
1373
- upstreamRes.on('data', chunk => chunks.push(chunk))
1374
- upstreamRes.on('end', () => {
1375
- resolve({
1376
- done: false,
1377
- statusCode,
1378
- responseBody: Buffer.concat(chunks).toString(),
1379
- responseHeaders: upstreamRes.headers,
1380
- networkError: false,
1381
- })
1382
- })
1383
- }
1384
- })
1385
-
1386
- upstreamReq.on('error', err => {
1387
- resolve({
1388
- done: false,
1389
- statusCode: 0,
1390
- responseBody: err.message,
1391
- responseHeaders: {},
1392
- networkError: true,
1393
- })
1394
- })
1395
-
1396
- upstreamReq.setTimeout(this._upstreamTimeoutMs, () => {
1397
- upstreamReq.destroy(new Error(`Upstream request timed out after ${this._upstreamTimeoutMs}ms`))
1398
- })
1399
-
1400
- upstreamReq.write(bodyStr)
1401
- upstreamReq.end()
1402
- })
1403
- }
1404
-
1405
- /**
1406
- * 📖 Internal version of chat completions handler that takes a pre-parsed body.
1407
- * 📖 Used by the Anthropic JSON translation path to avoid re-parsing.
1408
- */
1409
- async _handleChatCompletionsInternal(body, clientRes) {
1410
- // 📖 Reuse the exact same logic as _handleChatCompletions but with pre-parsed body
1411
- if (this._compressionOpts && Array.isArray(body.messages)) {
1412
- body = { ...body, messages: compressContext(body.messages, this._compressionOpts) }
1413
- }
1414
- if (this._thinkingConfig) {
1415
- body = applyThinkingBudget(body, this._thinkingConfig)
1416
- }
1417
-
1418
- const fingerprint = createHash('sha256')
1419
- .update(JSON.stringify(body.messages?.slice(-1) ?? []))
1420
- .digest('hex')
1421
- .slice(0, 16)
1422
-
1423
- const requestedModel = typeof body.model === 'string'
1424
- ? body.model.replace(/^fcm-proxy\//, '')
1425
- : undefined
1426
-
1427
- if (requestedModel && !this._accountManager.hasAccountsForModel(requestedModel)) {
1428
- return sendJson(clientRes, 404, {
1429
- error: 'Model not found',
1430
- message: `Model '${requestedModel}' is not available.`,
1431
- })
1432
- }
1433
-
1434
- for (let attempt = 0; attempt < this._retries; attempt++) {
1435
- const delay = this._retryDelays[Math.min(attempt, this._retryDelays.length - 1)]
1436
- if (delay > 0) await new Promise(r => setTimeout(r, delay + Math.random() * 100))
1437
-
1438
- const selectOpts = attempt === 0
1439
- ? { sessionFingerprint: fingerprint, requestedModel }
1440
- : { requestedModel }
1441
- const account = this._accountManager.selectAccount(selectOpts)
1442
- if (!account) break
1443
-
1444
- const result = await this._forwardRequest(account, body, clientRes, { requestedModel })
1445
- if (result.done) return
1446
-
1447
- const { statusCode, responseBody, responseHeaders, networkError } = result
1448
- const classified = classifyError(
1449
- networkError ? 0 : statusCode,
1450
- responseBody || '',
1451
- responseHeaders || {}
1452
- )
1453
- this._accountManager.recordFailure(account.id, classified, { providerKey: account.providerKey })
1454
- if (!classified.shouldRetry) {
1455
- return sendJson(clientRes, statusCode || 500, responseBody || JSON.stringify({ error: 'Upstream error' }))
1456
- }
1457
- }
1458
-
1459
- sendJson(clientRes, 503, { error: 'All accounts exhausted or unavailable' })
1460
- }
1461
-
1462
- // ── Hot-reload accounts ─────────────────────────────────────────────────────
1463
-
1464
- /**
1465
- * 📖 Atomically swap the account list and rebuild the AccountManager.
1466
- * 📖 Used by the daemon when config changes (new API keys, providers toggled).
1467
- * 📖 In-flight requests on old accounts will finish naturally.
1468
- *
1469
- * @param {Array} accounts — new account list
1470
- * @param {{ model?: string|null, modelOpus?: string|null, modelSonnet?: string|null, modelHaiku?: string|null }} anthropicRouting
1471
- */
1472
- updateAccounts(accounts, anthropicRouting = this._anthropicRouting) {
1473
- this._accounts = accounts
1474
- this._anthropicRouting = normalizeAnthropicRouting(anthropicRouting)
1475
- this._accountManager = new AccountManager(accounts, {})
1476
- }
1477
- }