free-coding-models 0.1.82 → 0.1.84
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +53 -51
- package/bin/free-coding-models.js +429 -4276
- package/package.json +2 -2
- package/sources.js +3 -2
- package/src/account-manager.js +600 -0
- package/src/analysis.js +197 -0
- package/{lib → src}/config.js +122 -0
- package/src/constants.js +116 -0
- package/src/error-classifier.js +154 -0
- package/src/favorites.js +98 -0
- package/src/key-handler.js +1005 -0
- package/src/log-reader.js +174 -0
- package/src/model-merger.js +78 -0
- package/src/openclaw.js +131 -0
- package/src/opencode-sync.js +159 -0
- package/src/opencode.js +952 -0
- package/src/overlays.js +840 -0
- package/src/ping.js +186 -0
- package/src/provider-metadata.js +218 -0
- package/src/provider-quota-fetchers.js +319 -0
- package/src/proxy-server.js +543 -0
- package/src/quota-capabilities.js +112 -0
- package/src/render-helpers.js +239 -0
- package/src/render-table.js +567 -0
- package/src/request-transformer.js +180 -0
- package/src/setup.js +105 -0
- package/src/telemetry.js +382 -0
- package/src/tier-colors.js +37 -0
- package/src/token-stats.js +310 -0
- package/src/token-usage-reader.js +63 -0
- package/src/updater.js +237 -0
- package/src/usage-reader.js +245 -0
- package/{lib → src}/utils.js +55 -0
|
@@ -0,0 +1,543 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @file lib/proxy-server.js
|
|
3
|
+
* @description Multi-account rotation proxy server with SSE streaming,
|
|
4
|
+
* token stats tracking, and persistent request logging.
|
|
5
|
+
*
|
|
6
|
+
* Design:
|
|
7
|
+
* - Binds to 127.0.0.1 only (never 0.0.0.0)
|
|
8
|
+
* - SSE is piped through without buffering (upstreamRes.pipe(clientRes))
|
|
9
|
+
* - HTTP/HTTPS module is chosen BEFORE the request is created (single code-path)
|
|
10
|
+
* - x-ratelimit-* headers are stripped from all responses forwarded to clients
|
|
11
|
+
* - Retry loop: first attempt uses sticky session fingerprint; subsequent
|
|
12
|
+
* retries use fresh P2C to avoid hitting the same failed account
|
|
13
|
+
*
|
|
14
|
+
* @exports ProxyServer
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
import http from 'node:http'
|
|
18
|
+
import https from 'node:https'
|
|
19
|
+
import { AccountManager } from './account-manager.js'
|
|
20
|
+
import { classifyError } from './error-classifier.js'
|
|
21
|
+
import { applyThinkingBudget, compressContext } from './request-transformer.js'
|
|
22
|
+
import { TokenStats } from './token-stats.js'
|
|
23
|
+
import { createHash } from 'node:crypto'
|
|
24
|
+
|
|
25
|
+
// ─── Helpers ─────────────────────────────────────────────────────────────────
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Choose the http or https module based on the URL scheme.
|
|
29
|
+
* MUST be called before creating the request (single code-path).
|
|
30
|
+
*
|
|
31
|
+
* @param {string} url
|
|
32
|
+
* @returns {typeof import('http') | typeof import('https')}
|
|
33
|
+
*/
|
|
34
|
+
function selectClient(url) {
|
|
35
|
+
return url.startsWith('https') ? https : http
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* Return a copy of the headers object with all x-ratelimit-* entries removed.
|
|
40
|
+
*
|
|
41
|
+
* @param {Record<string, string | string[]>} headers
|
|
42
|
+
* @returns {Record<string, string | string[]>}
|
|
43
|
+
*/
|
|
44
|
+
function stripRateLimitHeaders(headers) {
|
|
45
|
+
const result = {}
|
|
46
|
+
for (const [key, value] of Object.entries(headers)) {
|
|
47
|
+
if (!key.toLowerCase().startsWith('x-ratelimit')) {
|
|
48
|
+
result[key] = value
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
return result
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Buffer all chunks from an http.IncomingMessage and return the body as a string.
|
|
56
|
+
*
|
|
57
|
+
* @param {http.IncomingMessage} req
|
|
58
|
+
* @returns {Promise<string>}
|
|
59
|
+
*/
|
|
60
|
+
function readBody(req) {
|
|
61
|
+
return new Promise((resolve, reject) => {
|
|
62
|
+
const chunks = []
|
|
63
|
+
req.on('data', chunk => chunks.push(chunk))
|
|
64
|
+
req.on('end', () => resolve(Buffer.concat(chunks).toString()))
|
|
65
|
+
req.on('error', reject)
|
|
66
|
+
})
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* Write a JSON (or pre-serialised) response to the client.
|
|
71
|
+
*
|
|
72
|
+
* @param {http.ServerResponse} res
|
|
73
|
+
* @param {number} statusCode
|
|
74
|
+
* @param {object | string} body
|
|
75
|
+
*/
|
|
76
|
+
function sendJson(res, statusCode, body) {
|
|
77
|
+
if (res.headersSent) return
|
|
78
|
+
const json = typeof body === 'string' ? body : JSON.stringify(body)
|
|
79
|
+
res.writeHead(statusCode, { 'content-type': 'application/json' })
|
|
80
|
+
res.end(json)
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// ─── ProxyServer ─────────────────────────────────────────────────────────────
|
|
84
|
+
|
|
85
|
+
export class ProxyServer {
|
|
86
|
+
/**
|
|
87
|
+
* @param {{
|
|
88
|
+
* port?: number,
|
|
89
|
+
* accounts?: Array<{ id: string, providerKey: string, apiKey: string, modelId: string, url: string }>,
|
|
90
|
+
* retries?: number,
|
|
91
|
+
* proxyApiKey?: string,
|
|
92
|
+
* accountManagerOpts?: object,
|
|
93
|
+
* tokenStatsOpts?: object,
|
|
94
|
+
* thinkingConfig?: { mode: string, budget_tokens?: number },
|
|
95
|
+
* compressionOpts?: { level?: number, toolResultMaxChars?: number, thinkingMaxChars?: number, maxTotalChars?: number },
|
|
96
|
+
* upstreamTimeoutMs?: number
|
|
97
|
+
* }} opts
|
|
98
|
+
*/
|
|
99
|
+
constructor({
|
|
100
|
+
port = 0,
|
|
101
|
+
accounts = [],
|
|
102
|
+
retries = 3,
|
|
103
|
+
proxyApiKey = null,
|
|
104
|
+
accountManagerOpts = {},
|
|
105
|
+
tokenStatsOpts = {},
|
|
106
|
+
thinkingConfig,
|
|
107
|
+
compressionOpts,
|
|
108
|
+
upstreamTimeoutMs = 45_000,
|
|
109
|
+
} = {}) {
|
|
110
|
+
this._port = port
|
|
111
|
+
this._retries = retries
|
|
112
|
+
this._thinkingConfig = thinkingConfig
|
|
113
|
+
this._compressionOpts = compressionOpts
|
|
114
|
+
this._proxyApiKey = proxyApiKey
|
|
115
|
+
this._accounts = accounts
|
|
116
|
+
this._upstreamTimeoutMs = upstreamTimeoutMs
|
|
117
|
+
this._accountManager = new AccountManager(accounts, accountManagerOpts)
|
|
118
|
+
this._tokenStats = new TokenStats(tokenStatsOpts)
|
|
119
|
+
this._running = false
|
|
120
|
+
this._listeningPort = null
|
|
121
|
+
this._server = http.createServer((req, res) => this._handleRequest(req, res))
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
/**
|
|
125
|
+
* Start listening on 127.0.0.1.
|
|
126
|
+
*
|
|
127
|
+
* @returns {Promise<{ port: number }>}
|
|
128
|
+
*/
|
|
129
|
+
start() {
|
|
130
|
+
return new Promise((resolve, reject) => {
|
|
131
|
+
this._server.once('error', reject)
|
|
132
|
+
this._server.listen(this._port, '127.0.0.1', () => {
|
|
133
|
+
this._server.removeListener('error', reject)
|
|
134
|
+
this._running = true
|
|
135
|
+
this._listeningPort = this._server.address().port
|
|
136
|
+
resolve({ port: this._listeningPort })
|
|
137
|
+
})
|
|
138
|
+
})
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
/**
|
|
142
|
+
* Save stats and close the server.
|
|
143
|
+
*
|
|
144
|
+
* @returns {Promise<void>}
|
|
145
|
+
*/
|
|
146
|
+
stop() {
|
|
147
|
+
this._tokenStats.save()
|
|
148
|
+
return new Promise(resolve => {
|
|
149
|
+
this._server.close(() => {
|
|
150
|
+
this._running = false
|
|
151
|
+
this._listeningPort = null
|
|
152
|
+
resolve()
|
|
153
|
+
})
|
|
154
|
+
})
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
getStatus() {
|
|
158
|
+
return {
|
|
159
|
+
running: this._running,
|
|
160
|
+
port: this._listeningPort,
|
|
161
|
+
accountCount: this._accounts.length,
|
|
162
|
+
healthByAccount: this._accountManager.getAllHealth(),
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
_isAuthorized(req) {
|
|
167
|
+
if (!this._proxyApiKey) return true
|
|
168
|
+
const authorization = req.headers.authorization
|
|
169
|
+
if (typeof authorization !== 'string') return false
|
|
170
|
+
return authorization === `Bearer ${this._proxyApiKey}`
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
// ── Request routing ────────────────────────────────────────────────────────
|
|
174
|
+
|
|
175
|
+
_handleRequest(req, res) {
|
|
176
|
+
if (!this._isAuthorized(req)) {
|
|
177
|
+
return sendJson(res, 401, { error: 'Unauthorized' })
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
if (req.method === 'GET' && req.url === '/v1/models') {
|
|
181
|
+
this._handleModels(res)
|
|
182
|
+
} else if (req.method === 'POST' && req.url === '/v1/chat/completions') {
|
|
183
|
+
this._handleChatCompletions(req, res).catch(err => {
|
|
184
|
+
sendJson(res, 500, { error: 'Internal server error', message: err.message })
|
|
185
|
+
})
|
|
186
|
+
} else if (req.method === 'POST' && (req.url === '/v1/completions' || req.url === '/v1/responses')) {
|
|
187
|
+
// These legacy/alternative OpenAI endpoints are not supported by the proxy.
|
|
188
|
+
// Return 501 (not 404) so callers get a clear signal instead of silently failing.
|
|
189
|
+
sendJson(res, 501, {
|
|
190
|
+
error: 'Not Implemented',
|
|
191
|
+
message: `${req.url} is not supported by this proxy. Use POST /v1/chat/completions instead.`,
|
|
192
|
+
})
|
|
193
|
+
} else {
|
|
194
|
+
sendJson(res, 404, { error: 'Not found' })
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
// ── GET /v1/models ─────────────────────────────────────────────────────────
|
|
199
|
+
|
|
200
|
+
_handleModels(res) {
|
|
201
|
+
const seen = new Set()
|
|
202
|
+
const data = []
|
|
203
|
+
for (const acct of this._accounts) {
|
|
204
|
+
const publicModelId = acct.proxyModelId || acct.modelId
|
|
205
|
+
if (!seen.has(publicModelId)) {
|
|
206
|
+
seen.add(publicModelId)
|
|
207
|
+
data.push({
|
|
208
|
+
id: publicModelId,
|
|
209
|
+
object: 'model',
|
|
210
|
+
created: Math.floor(Date.now() / 1000),
|
|
211
|
+
owned_by: 'proxy',
|
|
212
|
+
})
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
sendJson(res, 200, { object: 'list', data })
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
// ── POST /v1/chat/completions ──────────────────────────────────────────────
|
|
219
|
+
|
|
220
|
+
async _handleChatCompletions(clientReq, clientRes) {
|
|
221
|
+
// 1. Read and parse request body
|
|
222
|
+
const rawBody = await readBody(clientReq)
|
|
223
|
+
let body
|
|
224
|
+
try {
|
|
225
|
+
body = JSON.parse(rawBody)
|
|
226
|
+
} catch {
|
|
227
|
+
return sendJson(clientRes, 400, { error: 'Invalid JSON body' })
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
// 2. Optional transformations (both functions return new objects, no mutation)
|
|
231
|
+
if (this._compressionOpts && Array.isArray(body.messages)) {
|
|
232
|
+
body = { ...body, messages: compressContext(body.messages, this._compressionOpts) }
|
|
233
|
+
}
|
|
234
|
+
if (this._thinkingConfig) {
|
|
235
|
+
body = applyThinkingBudget(body, this._thinkingConfig)
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
// 3. Session fingerprint for first-attempt sticky routing
|
|
239
|
+
const fingerprint = createHash('sha256')
|
|
240
|
+
.update(JSON.stringify(body.messages?.slice(-1) ?? []))
|
|
241
|
+
.digest('hex')
|
|
242
|
+
.slice(0, 16)
|
|
243
|
+
|
|
244
|
+
const requestedModel = typeof body.model === 'string'
|
|
245
|
+
? body.model.replace(/^fcm-proxy\//, '')
|
|
246
|
+
: undefined
|
|
247
|
+
|
|
248
|
+
// 4. Early check: if a specific model is requested but has no registered accounts,
|
|
249
|
+
// return 404 immediately with a clear message rather than silently failing.
|
|
250
|
+
if (requestedModel && !this._accountManager.hasAccountsForModel(requestedModel)) {
|
|
251
|
+
return sendJson(clientRes, 404, {
|
|
252
|
+
error: 'Model not found',
|
|
253
|
+
message: `Model '${requestedModel}' is not available through this proxy. Use GET /v1/models to list available models.`,
|
|
254
|
+
})
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
// 5. Retry loop
|
|
258
|
+
for (let attempt = 0; attempt < this._retries; attempt++) {
|
|
259
|
+
// First attempt: respect sticky session.
|
|
260
|
+
// Subsequent retries: fresh P2C (don't hammer the same failed account).
|
|
261
|
+
const selectOpts = attempt === 0
|
|
262
|
+
? { sessionFingerprint: fingerprint, requestedModel }
|
|
263
|
+
: { requestedModel }
|
|
264
|
+
const account = this._accountManager.selectAccount(selectOpts)
|
|
265
|
+
if (!account) break // No available accounts → fall through to 503
|
|
266
|
+
|
|
267
|
+
const result = await this._forwardRequest(account, body, clientRes)
|
|
268
|
+
|
|
269
|
+
// Response fully sent (success JSON or SSE pipe established)
|
|
270
|
+
if (result.done) return
|
|
271
|
+
|
|
272
|
+
// Error path: classify → record → retry or forward error
|
|
273
|
+
const { statusCode, responseBody, responseHeaders, networkError } = result
|
|
274
|
+
const classified = classifyError(
|
|
275
|
+
networkError ? 0 : statusCode,
|
|
276
|
+
responseBody || '',
|
|
277
|
+
responseHeaders || {}
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
this._accountManager.recordFailure(account.id, classified, { providerKey: account.providerKey })
|
|
281
|
+
if (responseHeaders) {
|
|
282
|
+
const quotaUpdated = this._accountManager.updateQuota(account.id, responseHeaders)
|
|
283
|
+
this._persistQuotaSnapshot(account, quotaUpdated)
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
if (!classified.shouldRetry) {
|
|
287
|
+
// Non-retryable (auth error, unknown) → return upstream response directly
|
|
288
|
+
return sendJson(
|
|
289
|
+
clientRes,
|
|
290
|
+
statusCode || 500,
|
|
291
|
+
responseBody || JSON.stringify({ error: 'Upstream error' })
|
|
292
|
+
)
|
|
293
|
+
}
|
|
294
|
+
// shouldRetry === true → next attempt
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
// All retries consumed, or no accounts available from the start
|
|
298
|
+
sendJson(clientRes, 503, { error: 'All accounts exhausted or unavailable' })
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
// ── Upstream forwarding ────────────────────────────────────────────────────
|
|
302
|
+
|
|
303
|
+
/**
|
|
304
|
+
* Forward one attempt to the upstream API.
|
|
305
|
+
*
|
|
306
|
+
* Resolves with:
|
|
307
|
+
* { done: true }
|
|
308
|
+
* — The response has been committed to clientRes (success JSON sent, or
|
|
309
|
+
* SSE pipe established). The retry loop must return immediately.
|
|
310
|
+
*
|
|
311
|
+
* { done: false, statusCode, responseBody, responseHeaders, networkError }
|
|
312
|
+
* — An error occurred; the retry loop decides whether to retry or give up.
|
|
313
|
+
*
|
|
314
|
+
* @param {{ id: string, apiKey: string, modelId: string, url: string }} account
|
|
315
|
+
* @param {object} body
|
|
316
|
+
* @param {http.ServerResponse} clientRes
|
|
317
|
+
* @returns {Promise<{ done: boolean }>}
|
|
318
|
+
*/
|
|
319
|
+
_forwardRequest(account, body, clientRes) {
|
|
320
|
+
return new Promise(resolve => {
|
|
321
|
+
// Replace client-supplied model name with the account's model ID
|
|
322
|
+
const newBody = { ...body, model: account.modelId }
|
|
323
|
+
const bodyStr = JSON.stringify(newBody)
|
|
324
|
+
|
|
325
|
+
// Build the full upstream URL from the account's base URL
|
|
326
|
+
const baseUrl = account.url.replace(/\/$/, '')
|
|
327
|
+
const upstreamUrl = new URL(baseUrl + '/chat/completions')
|
|
328
|
+
|
|
329
|
+
// Choose http or https module BEFORE creating the request
|
|
330
|
+
const client = selectClient(account.url)
|
|
331
|
+
const startTime = Date.now()
|
|
332
|
+
|
|
333
|
+
const requestOptions = {
|
|
334
|
+
hostname: upstreamUrl.hostname,
|
|
335
|
+
port: upstreamUrl.port || (upstreamUrl.protocol === 'https:' ? 443 : 80),
|
|
336
|
+
path: upstreamUrl.pathname + (upstreamUrl.search || ''),
|
|
337
|
+
method: 'POST',
|
|
338
|
+
headers: {
|
|
339
|
+
'authorization': `Bearer ${account.apiKey}`,
|
|
340
|
+
'content-type': 'application/json',
|
|
341
|
+
'content-length': Buffer.byteLength(bodyStr),
|
|
342
|
+
},
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
const upstreamReq = client.request(requestOptions, upstreamRes => {
|
|
346
|
+
const { statusCode } = upstreamRes
|
|
347
|
+
const headers = upstreamRes.headers
|
|
348
|
+
const contentType = headers['content-type'] || ''
|
|
349
|
+
const isSSE = contentType.includes('text/event-stream')
|
|
350
|
+
|
|
351
|
+
if (statusCode >= 200 && statusCode < 300) {
|
|
352
|
+
if (isSSE) {
|
|
353
|
+
// ── SSE passthrough: MUST NOT buffer ──────────────────────────
|
|
354
|
+
const strippedHeaders = stripRateLimitHeaders(headers)
|
|
355
|
+
clientRes.writeHead(statusCode, {
|
|
356
|
+
...strippedHeaders,
|
|
357
|
+
'content-type': 'text/event-stream',
|
|
358
|
+
'cache-control': 'no-cache',
|
|
359
|
+
})
|
|
360
|
+
|
|
361
|
+
// Tap the data stream to capture usage from the last data line.
|
|
362
|
+
// Register BEFORE pipe() so both listeners share the same event queue.
|
|
363
|
+
let lastChunkData = ''
|
|
364
|
+
upstreamRes.on('data', chunk => {
|
|
365
|
+
const text = chunk.toString()
|
|
366
|
+
const lines = text.split('\n')
|
|
367
|
+
for (const line of lines) {
|
|
368
|
+
if (line.startsWith('data: ') && !line.includes('[DONE]')) {
|
|
369
|
+
lastChunkData = line.slice(6).trim()
|
|
370
|
+
}
|
|
371
|
+
}
|
|
372
|
+
})
|
|
373
|
+
|
|
374
|
+
upstreamRes.on('end', () => {
|
|
375
|
+
let promptTokens = 0
|
|
376
|
+
let completionTokens = 0
|
|
377
|
+
try {
|
|
378
|
+
const parsed = JSON.parse(lastChunkData)
|
|
379
|
+
if (parsed.usage) {
|
|
380
|
+
promptTokens = parsed.usage.prompt_tokens || 0
|
|
381
|
+
completionTokens = parsed.usage.completion_tokens || 0
|
|
382
|
+
}
|
|
383
|
+
} catch { /* no usage in stream — ignore */ }
|
|
384
|
+
// Always record every upstream attempt so the log page shows real requests
|
|
385
|
+
this._tokenStats.record({
|
|
386
|
+
accountId: account.id,
|
|
387
|
+
modelId: account.modelId,
|
|
388
|
+
providerKey: account.providerKey,
|
|
389
|
+
statusCode,
|
|
390
|
+
requestType: 'chat.completions',
|
|
391
|
+
promptTokens,
|
|
392
|
+
completionTokens,
|
|
393
|
+
latencyMs: Date.now() - startTime,
|
|
394
|
+
success: true,
|
|
395
|
+
})
|
|
396
|
+
this._accountManager.recordSuccess(account.id, Date.now() - startTime)
|
|
397
|
+
const quotaUpdated = this._accountManager.updateQuota(account.id, headers)
|
|
398
|
+
this._persistQuotaSnapshot(account, quotaUpdated)
|
|
399
|
+
})
|
|
400
|
+
|
|
401
|
+
// Pipe after listeners are registered; upstream → client, no buffering
|
|
402
|
+
upstreamRes.pipe(clientRes)
|
|
403
|
+
|
|
404
|
+
// ── Downstream disconnect cleanup ─────────────────────────────
|
|
405
|
+
// If the client closes its connection mid-stream, destroy the
|
|
406
|
+
// upstream request and response promptly so we don't hold the
|
|
407
|
+
// upstream connection open indefinitely.
|
|
408
|
+
clientRes.on('close', () => {
|
|
409
|
+
if (!upstreamRes.destroyed) upstreamRes.destroy()
|
|
410
|
+
if (!upstreamReq.destroyed) upstreamReq.destroy()
|
|
411
|
+
})
|
|
412
|
+
|
|
413
|
+
// The pipe handles the rest asynchronously; signal done to retry loop
|
|
414
|
+
resolve({ done: true })
|
|
415
|
+
} else {
|
|
416
|
+
// ── JSON response ─────────────────────────────────────────────
|
|
417
|
+
const chunks = []
|
|
418
|
+
upstreamRes.on('data', chunk => chunks.push(chunk))
|
|
419
|
+
upstreamRes.on('end', () => {
|
|
420
|
+
const responseBody = Buffer.concat(chunks).toString()
|
|
421
|
+
const latencyMs = Date.now() - startTime
|
|
422
|
+
|
|
423
|
+
const quotaUpdated = this._accountManager.updateQuota(account.id, headers)
|
|
424
|
+
this._accountManager.recordSuccess(account.id, latencyMs)
|
|
425
|
+
this._persistQuotaSnapshot(account, quotaUpdated)
|
|
426
|
+
|
|
427
|
+
// Always record every upstream attempt so the log page shows real requests.
|
|
428
|
+
// Extract tokens if upstream provides them; default to 0 when not present.
|
|
429
|
+
let promptTokens = 0
|
|
430
|
+
let completionTokens = 0
|
|
431
|
+
try {
|
|
432
|
+
const parsed = JSON.parse(responseBody)
|
|
433
|
+
if (parsed.usage) {
|
|
434
|
+
promptTokens = parsed.usage.prompt_tokens || 0
|
|
435
|
+
completionTokens = parsed.usage.completion_tokens || 0
|
|
436
|
+
}
|
|
437
|
+
} catch { /* non-JSON body — tokens stay 0 */ }
|
|
438
|
+
this._tokenStats.record({
|
|
439
|
+
accountId: account.id,
|
|
440
|
+
modelId: account.modelId,
|
|
441
|
+
providerKey: account.providerKey,
|
|
442
|
+
statusCode,
|
|
443
|
+
requestType: 'chat.completions',
|
|
444
|
+
promptTokens,
|
|
445
|
+
completionTokens,
|
|
446
|
+
latencyMs,
|
|
447
|
+
success: true,
|
|
448
|
+
})
|
|
449
|
+
|
|
450
|
+
// Forward stripped response to client
|
|
451
|
+
const strippedHeaders = stripRateLimitHeaders(headers)
|
|
452
|
+
clientRes.writeHead(statusCode, {
|
|
453
|
+
...strippedHeaders,
|
|
454
|
+
'content-type': 'application/json',
|
|
455
|
+
})
|
|
456
|
+
clientRes.end(responseBody)
|
|
457
|
+
resolve({ done: true })
|
|
458
|
+
})
|
|
459
|
+
}
|
|
460
|
+
} else {
|
|
461
|
+
// ── Error response: buffer for classification in retry loop ─────
|
|
462
|
+
const chunks = []
|
|
463
|
+
upstreamRes.on('data', chunk => chunks.push(chunk))
|
|
464
|
+
upstreamRes.on('end', () => {
|
|
465
|
+
const latencyMs = Date.now() - startTime
|
|
466
|
+
// Log every failed upstream attempt so the log page shows real requests
|
|
467
|
+
this._tokenStats.record({
|
|
468
|
+
accountId: account.id,
|
|
469
|
+
modelId: account.modelId,
|
|
470
|
+
providerKey: account.providerKey,
|
|
471
|
+
statusCode,
|
|
472
|
+
requestType: 'chat.completions',
|
|
473
|
+
promptTokens: 0,
|
|
474
|
+
completionTokens: 0,
|
|
475
|
+
latencyMs,
|
|
476
|
+
success: false,
|
|
477
|
+
})
|
|
478
|
+
resolve({
|
|
479
|
+
done: false,
|
|
480
|
+
statusCode,
|
|
481
|
+
responseBody: Buffer.concat(chunks).toString(),
|
|
482
|
+
responseHeaders: headers,
|
|
483
|
+
networkError: false,
|
|
484
|
+
})
|
|
485
|
+
})
|
|
486
|
+
}
|
|
487
|
+
})
|
|
488
|
+
|
|
489
|
+
upstreamReq.on('error', err => {
|
|
490
|
+
// TCP / DNS / timeout errors — log as network failure
|
|
491
|
+
const latencyMs = Date.now() - startTime
|
|
492
|
+
this._tokenStats.record({
|
|
493
|
+
accountId: account.id,
|
|
494
|
+
modelId: account.modelId,
|
|
495
|
+
providerKey: account.providerKey,
|
|
496
|
+
statusCode: 0,
|
|
497
|
+
requestType: 'chat.completions',
|
|
498
|
+
promptTokens: 0,
|
|
499
|
+
completionTokens: 0,
|
|
500
|
+
latencyMs,
|
|
501
|
+
success: false,
|
|
502
|
+
})
|
|
503
|
+
// TCP / DNS / timeout errors
|
|
504
|
+
resolve({
|
|
505
|
+
done: false,
|
|
506
|
+
statusCode: 0,
|
|
507
|
+
responseBody: err.message,
|
|
508
|
+
responseHeaders: {},
|
|
509
|
+
networkError: true,
|
|
510
|
+
})
|
|
511
|
+
})
|
|
512
|
+
|
|
513
|
+
// Abort the upstream request if it exceeds the configured timeout.
|
|
514
|
+
// This prevents indefinite hangs (e.g. nvidia returning 504 after 302 s).
|
|
515
|
+
// The 'timeout' event fires but does NOT automatically abort; we must call destroy().
|
|
516
|
+
upstreamReq.setTimeout(this._upstreamTimeoutMs, () => {
|
|
517
|
+
upstreamReq.destroy(new Error(`Upstream request timed out after ${this._upstreamTimeoutMs}ms`))
|
|
518
|
+
})
|
|
519
|
+
|
|
520
|
+
upstreamReq.write(bodyStr)
|
|
521
|
+
upstreamReq.end()
|
|
522
|
+
})
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
/**
|
|
526
|
+
* Persist a quota snapshot for the given account into TokenStats.
|
|
527
|
+
* Called after every `AccountManager.updateQuota()` so TUI can read fresh data.
|
|
528
|
+
* Never exposes apiKey.
|
|
529
|
+
*
|
|
530
|
+
* @param {{ id: string, providerKey?: string, modelId?: string }} account
|
|
531
|
+
* @param {boolean} quotaUpdated
|
|
532
|
+
*/
|
|
533
|
+
_persistQuotaSnapshot(account, quotaUpdated = true) {
|
|
534
|
+
if (!quotaUpdated) return
|
|
535
|
+
const health = this._accountManager.getHealth(account.id)
|
|
536
|
+
if (!health) return
|
|
537
|
+
this._tokenStats.updateQuotaSnapshot(account.id, {
|
|
538
|
+
quotaPercent: health.quotaPercent,
|
|
539
|
+
...(account.providerKey !== undefined && { providerKey: account.providerKey }),
|
|
540
|
+
...(account.modelId !== undefined && { modelId: account.modelId }),
|
|
541
|
+
})
|
|
542
|
+
}
|
|
543
|
+
}
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @file lib/quota-capabilities.js
|
|
3
|
+
* @description Provider quota telemetry and Usage-column behavior map.
|
|
4
|
+
*
|
|
5
|
+
* Describes how we can observe quota state for each provider:
|
|
6
|
+
* - header: Provider sends x-ratelimit-remaining / x-ratelimit-limit headers
|
|
7
|
+
* - endpoint: Provider has a dedicated usage/quota REST endpoint we can poll
|
|
8
|
+
* - unknown: No reliable quota signal available
|
|
9
|
+
*
|
|
10
|
+
* The TUI needs an extra distinction beyond telemetry transport:
|
|
11
|
+
* - `usageDisplay: 'percent'` means we can show a trustworthy remaining %.
|
|
12
|
+
* - `usageDisplay: 'ok'` means Usage is not meaningfully measurable as a live %,
|
|
13
|
+
* so the table shows a green status dot instead of a misleading number.
|
|
14
|
+
*
|
|
15
|
+
* `resetCadence` tells the reader when a stored snapshot should be invalidated
|
|
16
|
+
* even if it is still within the generic freshness TTL.
|
|
17
|
+
*
|
|
18
|
+
* supportsEndpoint (optional, for openrouter/siliconflow):
|
|
19
|
+
* true — provider has a known usage endpoint
|
|
20
|
+
* false — no endpoint, header-only or unknown
|
|
21
|
+
*
|
|
22
|
+
* @exports PROVIDER_CAPABILITIES — full map keyed by providerKey (matches sources.js)
|
|
23
|
+
* @exports getQuotaTelemetry(providerKey) — returns capability object (defaults to unknown)
|
|
24
|
+
* @exports isKnownQuotaTelemetry(providerKey) — true when telemetryType !== 'unknown'
|
|
25
|
+
*/
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* @typedef {Object} ProviderCapability
|
|
29
|
+
* @property {'header'|'endpoint'|'unknown'} telemetryType
|
|
30
|
+
* @property {boolean} [supportsEndpoint]
|
|
31
|
+
* @property {'percent'|'ok'} usageDisplay
|
|
32
|
+
* @property {'rolling'|'daily'|'unknown'|'none'} resetCadence
|
|
33
|
+
*/
|
|
34
|
+
|
|
35
|
+
/** @type {Record<string, ProviderCapability>} */
|
|
36
|
+
export const PROVIDER_CAPABILITIES = {
|
|
37
|
+
// Providers that return x-ratelimit-remaining / x-ratelimit-limit headers
|
|
38
|
+
nvidia: { telemetryType: 'header', supportsEndpoint: false, usageDisplay: 'ok', resetCadence: 'none' },
|
|
39
|
+
groq: { telemetryType: 'header', supportsEndpoint: false, usageDisplay: 'percent', resetCadence: 'daily' },
|
|
40
|
+
cerebras: { telemetryType: 'header', supportsEndpoint: false, usageDisplay: 'percent', resetCadence: 'unknown' },
|
|
41
|
+
sambanova: { telemetryType: 'header', supportsEndpoint: false, usageDisplay: 'percent', resetCadence: 'unknown' },
|
|
42
|
+
deepinfra: { telemetryType: 'header', supportsEndpoint: false, usageDisplay: 'percent', resetCadence: 'unknown' },
|
|
43
|
+
fireworks: { telemetryType: 'header', supportsEndpoint: false, usageDisplay: 'percent', resetCadence: 'unknown' },
|
|
44
|
+
together: { telemetryType: 'header', supportsEndpoint: false, usageDisplay: 'percent', resetCadence: 'unknown' },
|
|
45
|
+
hyperbolic: { telemetryType: 'header', supportsEndpoint: false, usageDisplay: 'percent', resetCadence: 'unknown' },
|
|
46
|
+
scaleway: { telemetryType: 'header', supportsEndpoint: false, usageDisplay: 'percent', resetCadence: 'unknown' },
|
|
47
|
+
googleai: { telemetryType: 'header', supportsEndpoint: false, usageDisplay: 'percent', resetCadence: 'daily' },
|
|
48
|
+
codestral: { telemetryType: 'header', supportsEndpoint: false, usageDisplay: 'percent', resetCadence: 'daily' },
|
|
49
|
+
perplexity: { telemetryType: 'header', supportsEndpoint: false, usageDisplay: 'percent', resetCadence: 'unknown' },
|
|
50
|
+
qwen: { telemetryType: 'header', supportsEndpoint: false, usageDisplay: 'percent', resetCadence: 'unknown' },
|
|
51
|
+
|
|
52
|
+
// Providers that have a dedicated usage/credits endpoint
|
|
53
|
+
openrouter: { telemetryType: 'endpoint', supportsEndpoint: true, usageDisplay: 'percent', resetCadence: 'unknown' },
|
|
54
|
+
siliconflow: { telemetryType: 'endpoint', supportsEndpoint: true, usageDisplay: 'ok', resetCadence: 'unknown' },
|
|
55
|
+
|
|
56
|
+
// Providers with no reliable quota signal
|
|
57
|
+
huggingface: { telemetryType: 'unknown', supportsEndpoint: false, usageDisplay: 'ok', resetCadence: 'none' },
|
|
58
|
+
replicate: { telemetryType: 'unknown', supportsEndpoint: false, usageDisplay: 'ok', resetCadence: 'none' },
|
|
59
|
+
cloudflare: { telemetryType: 'unknown', supportsEndpoint: false, usageDisplay: 'ok', resetCadence: 'daily' },
|
|
60
|
+
zai: { telemetryType: 'unknown', supportsEndpoint: false, usageDisplay: 'ok', resetCadence: 'none' },
|
|
61
|
+
iflow: { telemetryType: 'unknown', supportsEndpoint: false, usageDisplay: 'ok', resetCadence: 'none' },
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
/** Fallback for unrecognized providers */
|
|
65
|
+
const UNKNOWN_CAPABILITY = { telemetryType: 'unknown', supportsEndpoint: false, usageDisplay: 'ok', resetCadence: 'unknown' }
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* Get quota telemetry capability for a provider.
|
|
69
|
+
* Returns `{ telemetryType: 'unknown', supportsEndpoint: false }` for unrecognized providers.
|
|
70
|
+
*
|
|
71
|
+
* @param {string} providerKey - Provider key matching sources.js (e.g. 'groq', 'openrouter')
|
|
72
|
+
* @returns {ProviderCapability}
|
|
73
|
+
*/
|
|
74
|
+
export function getQuotaTelemetry(providerKey) {
|
|
75
|
+
return PROVIDER_CAPABILITIES[providerKey] ?? UNKNOWN_CAPABILITY
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
/**
|
|
79
|
+
* Returns true when we have a reliable quota telemetry signal for this provider
|
|
80
|
+
* (either via response headers or a dedicated endpoint).
|
|
81
|
+
*
|
|
82
|
+
* Returns false for 'unknown' providers where quota state must be inferred.
|
|
83
|
+
*
|
|
84
|
+
* @param {string} providerKey
|
|
85
|
+
* @returns {boolean}
|
|
86
|
+
*/
|
|
87
|
+
export function isKnownQuotaTelemetry(providerKey) {
|
|
88
|
+
return getQuotaTelemetry(providerKey).telemetryType !== 'unknown'
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* Returns true when the Usage column can show a real remaining percentage for
|
|
93
|
+
* the given provider.
|
|
94
|
+
*
|
|
95
|
+
* @param {string} providerKey
|
|
96
|
+
* @returns {boolean}
|
|
97
|
+
*/
|
|
98
|
+
export function supportsUsagePercent(providerKey) {
|
|
99
|
+
return getQuotaTelemetry(providerKey).usageDisplay === 'percent'
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
/**
|
|
103
|
+
* Returns true when the provider's quota commonly resets on a daily cadence.
|
|
104
|
+
* This lets the usage reader invalidate yesterday's snapshots immediately
|
|
105
|
+
* after midnight instead of waiting for the generic TTL window to expire.
|
|
106
|
+
*
|
|
107
|
+
* @param {string} providerKey
|
|
108
|
+
* @returns {boolean}
|
|
109
|
+
*/
|
|
110
|
+
export function usageResetsDaily(providerKey) {
|
|
111
|
+
return getQuotaTelemetry(providerKey).resetCadence === 'daily'
|
|
112
|
+
}
|