free-coding-models 0.2.17 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +71 -0
- package/README.md +118 -44
- package/bin/fcm-proxy-daemon.js +239 -0
- package/bin/free-coding-models.js +146 -37
- package/package.json +3 -2
- package/src/account-manager.js +34 -0
- package/src/anthropic-translator.js +440 -0
- package/src/cli-help.js +108 -0
- package/src/config.js +25 -1
- package/src/daemon-manager.js +527 -0
- package/src/endpoint-installer.js +45 -19
- package/src/key-handler.js +324 -148
- package/src/opencode.js +47 -44
- package/src/overlays.js +282 -207
- package/src/proxy-server.js +746 -10
- package/src/proxy-sync.js +564 -0
- package/src/proxy-topology.js +80 -0
- package/src/render-helpers.js +4 -2
- package/src/render-table.js +56 -49
- package/src/responses-translator.js +423 -0
- package/src/tool-launchers.js +343 -26
- package/src/utils.js +31 -8
|
@@ -0,0 +1,440 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @file src/anthropic-translator.js
|
|
3
|
+
* @description Bidirectional wire format translation between Anthropic Messages API
|
|
4
|
+
* and OpenAI Chat Completions API.
|
|
5
|
+
*
|
|
6
|
+
* 📖 This is the key module that enables Claude Code to work natively through the
|
|
7
|
+
* FCM proxy without needing the external "free-claude-code" Python proxy.
|
|
8
|
+
* Claude Code sends requests in Anthropic format (POST /v1/messages) and this
|
|
9
|
+
* module translates them to OpenAI format for the upstream providers, then
|
|
10
|
+
* translates the responses back.
|
|
11
|
+
*
|
|
12
|
+
* 📖 Supports both JSON and SSE streaming modes.
|
|
13
|
+
*
|
|
14
|
+
* @functions
|
|
15
|
+
* → translateAnthropicToOpenAI(body) — Convert Anthropic Messages request → OpenAI chat completions
|
|
16
|
+
* → translateOpenAIToAnthropic(openaiResponse, requestModel) — Convert OpenAI JSON response → Anthropic
|
|
17
|
+
* → createAnthropicSSETransformer(requestModel) — Create a Transform stream for SSE translation
|
|
18
|
+
* → estimateAnthropicTokens(body) — Fast local token estimate for `/v1/messages/count_tokens`
|
|
19
|
+
*
|
|
20
|
+
* @exports translateAnthropicToOpenAI, translateOpenAIToAnthropic, createAnthropicSSETransformer, estimateAnthropicTokens
|
|
21
|
+
* @see src/proxy-server.js — routes /v1/messages through this translator
|
|
22
|
+
*/
|
|
23
|
+
|
|
24
|
+
import { Transform } from 'node:stream'
|
|
25
|
+
import { randomUUID } from 'node:crypto'
|
|
26
|
+
|
|
27
|
+
function normalizeThinkingText(block) {
|
|
28
|
+
if (!block || typeof block !== 'object') return ''
|
|
29
|
+
if (typeof block.text === 'string' && block.text) return block.text
|
|
30
|
+
if (typeof block.thinking === 'string' && block.thinking) return block.thinking
|
|
31
|
+
if (typeof block.summary === 'string' && block.summary) return block.summary
|
|
32
|
+
return ''
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
function contentBlocksToText(blocks, { includeThinking = false } = {}) {
|
|
36
|
+
return blocks
|
|
37
|
+
.map((block) => {
|
|
38
|
+
if (block?.type === 'thinking' && includeThinking) {
|
|
39
|
+
const thinkingText = normalizeThinkingText(block)
|
|
40
|
+
return thinkingText ? `<thinking>${thinkingText}</thinking>` : ''
|
|
41
|
+
}
|
|
42
|
+
if (block?.type === 'redacted_thinking' && includeThinking) {
|
|
43
|
+
return '<thinking>[redacted]</thinking>'
|
|
44
|
+
}
|
|
45
|
+
return block?.type === 'text' ? block.text : ''
|
|
46
|
+
})
|
|
47
|
+
.filter(Boolean)
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
function extractReasoningBlocks(message = {}) {
|
|
51
|
+
if (Array.isArray(message.reasoning)) {
|
|
52
|
+
return message.reasoning
|
|
53
|
+
.map((entry) => normalizeThinkingText(entry))
|
|
54
|
+
.filter(Boolean)
|
|
55
|
+
.map((text) => ({ type: 'thinking', thinking: text }))
|
|
56
|
+
}
|
|
57
|
+
if (typeof message.reasoning_content === 'string' && message.reasoning_content.trim()) {
|
|
58
|
+
return [{ type: 'thinking', thinking: message.reasoning_content.trim() }]
|
|
59
|
+
}
|
|
60
|
+
return []
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* 📖 Translate an Anthropic Messages API request body to OpenAI Chat Completions format.
|
|
65
|
+
*
|
|
66
|
+
* Anthropic format:
|
|
67
|
+
* { model, messages: [{role, content}], system, max_tokens, stream, temperature, top_p, stop_sequences }
|
|
68
|
+
*
|
|
69
|
+
* OpenAI format:
|
|
70
|
+
* { model, messages: [{role, content}], max_tokens, stream, temperature, top_p, stop }
|
|
71
|
+
*
|
|
72
|
+
* @param {object} body — Anthropic request body
|
|
73
|
+
* @returns {object} — OpenAI-compatible request body
|
|
74
|
+
*/
|
|
75
|
+
export function translateAnthropicToOpenAI(body) {
|
|
76
|
+
// 📖 Guard against null/undefined/non-object input
|
|
77
|
+
if (!body || typeof body !== 'object') return { model: '', messages: [], stream: false }
|
|
78
|
+
if (!Array.isArray(body.messages)) body = { ...body, messages: [] }
|
|
79
|
+
|
|
80
|
+
const openaiMessages = []
|
|
81
|
+
|
|
82
|
+
// 📖 Anthropic "system" field → OpenAI system message
|
|
83
|
+
if (body.system) {
|
|
84
|
+
if (typeof body.system === 'string') {
|
|
85
|
+
openaiMessages.push({ role: 'system', content: body.system })
|
|
86
|
+
} else if (Array.isArray(body.system)) {
|
|
87
|
+
// 📖 Anthropic supports system as array of content blocks
|
|
88
|
+
const text = contentBlocksToText(body.system, { includeThinking: true }).join('\n\n')
|
|
89
|
+
if (text) openaiMessages.push({ role: 'system', content: text })
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
// 📖 Convert Anthropic messages to OpenAI format
|
|
94
|
+
if (Array.isArray(body.messages)) {
|
|
95
|
+
for (const msg of body.messages) {
|
|
96
|
+
const role = msg.role === 'assistant' ? 'assistant' : 'user'
|
|
97
|
+
|
|
98
|
+
if (typeof msg.content === 'string') {
|
|
99
|
+
openaiMessages.push({ role, content: msg.content })
|
|
100
|
+
} else if (Array.isArray(msg.content)) {
|
|
101
|
+
// 📖 Anthropic content blocks: [{type: "text", text: "..."}, {type: "tool_result", ...}]
|
|
102
|
+
const textParts = contentBlocksToText(msg.content, { includeThinking: true })
|
|
103
|
+
const toolResults = msg.content.filter(b => b.type === 'tool_result')
|
|
104
|
+
const toolUses = msg.content.filter(b => b.type === 'tool_use')
|
|
105
|
+
|
|
106
|
+
// 📖 Tool use blocks (assistant) → OpenAI tool_calls
|
|
107
|
+
if (toolUses.length > 0 && role === 'assistant') {
|
|
108
|
+
const toolCalls = toolUses.map(tu => ({
|
|
109
|
+
id: tu.id || randomUUID(),
|
|
110
|
+
type: 'function',
|
|
111
|
+
function: {
|
|
112
|
+
name: tu.name,
|
|
113
|
+
arguments: typeof tu.input === 'string' ? tu.input : JSON.stringify(tu.input || {}),
|
|
114
|
+
}
|
|
115
|
+
}))
|
|
116
|
+
openaiMessages.push({
|
|
117
|
+
role: 'assistant',
|
|
118
|
+
content: textParts.join('\n') || null,
|
|
119
|
+
tool_calls: toolCalls,
|
|
120
|
+
})
|
|
121
|
+
}
|
|
122
|
+
// 📖 Tool result blocks (user) → OpenAI tool messages
|
|
123
|
+
else if (toolResults.length > 0) {
|
|
124
|
+
// 📖 First push any text parts as user message
|
|
125
|
+
if (textParts.length > 0) {
|
|
126
|
+
openaiMessages.push({ role: 'user', content: textParts.join('\n') })
|
|
127
|
+
}
|
|
128
|
+
for (const tr of toolResults) {
|
|
129
|
+
const content = typeof tr.content === 'string'
|
|
130
|
+
? tr.content
|
|
131
|
+
: Array.isArray(tr.content)
|
|
132
|
+
? tr.content.filter(b => b.type === 'text').map(b => b.text).join('\n')
|
|
133
|
+
: JSON.stringify(tr.content || '')
|
|
134
|
+
openaiMessages.push({
|
|
135
|
+
role: 'tool',
|
|
136
|
+
tool_call_id: tr.tool_use_id || tr.id || '',
|
|
137
|
+
content,
|
|
138
|
+
})
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
// 📖 Plain text content blocks → join into single message
|
|
142
|
+
else if (textParts.length > 0) {
|
|
143
|
+
openaiMessages.push({ role, content: textParts.join('\n') })
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
const result = {
|
|
150
|
+
model: body.model,
|
|
151
|
+
messages: openaiMessages,
|
|
152
|
+
stream: body.stream === true,
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// 📖 Map Anthropic parameters to OpenAI equivalents
|
|
156
|
+
if (body.max_tokens != null) result.max_tokens = body.max_tokens
|
|
157
|
+
if (body.temperature != null) result.temperature = body.temperature
|
|
158
|
+
if (body.top_p != null) result.top_p = body.top_p
|
|
159
|
+
if (Array.isArray(body.stop_sequences) && body.stop_sequences.length > 0) {
|
|
160
|
+
result.stop = body.stop_sequences
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
// 📖 Map Anthropic tools to OpenAI function tools
|
|
164
|
+
if (Array.isArray(body.tools) && body.tools.length > 0) {
|
|
165
|
+
result.tools = body.tools.map(tool => ({
|
|
166
|
+
type: 'function',
|
|
167
|
+
function: {
|
|
168
|
+
name: tool.name,
|
|
169
|
+
description: tool.description || '',
|
|
170
|
+
parameters: tool.input_schema || {},
|
|
171
|
+
}
|
|
172
|
+
}))
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
return result
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
/**
|
|
179
|
+
* 📖 Translate an OpenAI Chat Completions JSON response to Anthropic Messages format.
|
|
180
|
+
*
|
|
181
|
+
* @param {object} openaiResponse — parsed OpenAI response
|
|
182
|
+
* @param {string} requestModel — model name from the original request
|
|
183
|
+
* @returns {object} — Anthropic Messages response
|
|
184
|
+
*/
|
|
185
|
+
export function translateOpenAIToAnthropic(openaiResponse, requestModel) {
|
|
186
|
+
const choice = openaiResponse.choices?.[0]
|
|
187
|
+
const message = choice?.message || {}
|
|
188
|
+
const content = []
|
|
189
|
+
|
|
190
|
+
for (const reasoningBlock of extractReasoningBlocks(message)) {
|
|
191
|
+
content.push(reasoningBlock)
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
// 📖 Text content → Anthropic text block
|
|
195
|
+
if (message.content) {
|
|
196
|
+
content.push({ type: 'text', text: message.content })
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
// 📖 Tool calls → Anthropic tool_use blocks
|
|
200
|
+
if (Array.isArray(message.tool_calls)) {
|
|
201
|
+
for (const tc of message.tool_calls) {
|
|
202
|
+
let input = {}
|
|
203
|
+
try { input = JSON.parse(tc.function?.arguments || '{}') } catch { /* ignore */ }
|
|
204
|
+
content.push({
|
|
205
|
+
type: 'tool_use',
|
|
206
|
+
id: tc.id || randomUUID(),
|
|
207
|
+
name: tc.function?.name || '',
|
|
208
|
+
input,
|
|
209
|
+
})
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
// 📖 Fallback: Anthropic requires at least one content block — provide empty text if none
|
|
214
|
+
if (content.length === 0) {
|
|
215
|
+
content.push({ type: 'text', text: '' })
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
// 📖 Map OpenAI finish_reason → Anthropic stop_reason
|
|
219
|
+
let stopReason = 'end_turn'
|
|
220
|
+
if (choice?.finish_reason === 'stop') stopReason = 'end_turn'
|
|
221
|
+
else if (choice?.finish_reason === 'length') stopReason = 'max_tokens'
|
|
222
|
+
else if (choice?.finish_reason === 'tool_calls') stopReason = 'tool_use'
|
|
223
|
+
|
|
224
|
+
return {
|
|
225
|
+
id: openaiResponse.id || `msg_${randomUUID().replace(/-/g, '')}`,
|
|
226
|
+
type: 'message',
|
|
227
|
+
role: 'assistant',
|
|
228
|
+
content,
|
|
229
|
+
model: requestModel || openaiResponse.model || '',
|
|
230
|
+
stop_reason: stopReason,
|
|
231
|
+
stop_sequence: null,
|
|
232
|
+
usage: {
|
|
233
|
+
input_tokens: openaiResponse.usage?.prompt_tokens || 0,
|
|
234
|
+
output_tokens: openaiResponse.usage?.completion_tokens || 0,
|
|
235
|
+
},
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
/**
|
|
240
|
+
* 📖 Create a Transform stream that converts OpenAI SSE chunks to Anthropic SSE format.
|
|
241
|
+
*
|
|
242
|
+
* OpenAI SSE:
|
|
243
|
+
* data: {"choices":[{"delta":{"content":"Hello"}}]}
|
|
244
|
+
*
|
|
245
|
+
* Anthropic SSE:
|
|
246
|
+
* event: message_start
|
|
247
|
+
* data: {"type":"message_start","message":{...}}
|
|
248
|
+
*
|
|
249
|
+
* event: content_block_start
|
|
250
|
+
* data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""}}
|
|
251
|
+
*
|
|
252
|
+
* event: content_block_delta
|
|
253
|
+
* data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Hello"}}
|
|
254
|
+
*
|
|
255
|
+
* event: message_stop
|
|
256
|
+
* data: {"type":"message_stop"}
|
|
257
|
+
*
|
|
258
|
+
* @param {string} requestModel — model from original request
|
|
259
|
+
* @returns {{ transform: Transform, getUsage: () => object }}
|
|
260
|
+
*/
|
|
261
|
+
// 📖 Max SSE buffer size to prevent memory exhaustion from malformed streams (1 MB)
|
|
262
|
+
const MAX_SSE_BUFFER = 1 * 1024 * 1024
|
|
263
|
+
|
|
264
|
+
export function createAnthropicSSETransformer(requestModel) {
|
|
265
|
+
let headerSent = false
|
|
266
|
+
// 📖 Track block indices for proper content_block_start/stop/delta indexing.
|
|
267
|
+
// nextBlockIndex increments for each new content block (text or tool_use).
|
|
268
|
+
// currentBlockIndex tracks the index of the most recently opened block.
|
|
269
|
+
let nextBlockIndex = 0
|
|
270
|
+
let currentBlockIndex = -1
|
|
271
|
+
let inputTokens = 0
|
|
272
|
+
let outputTokens = 0
|
|
273
|
+
let buffer = ''
|
|
274
|
+
|
|
275
|
+
const transform = new Transform({
|
|
276
|
+
transform(chunk, encoding, callback) {
|
|
277
|
+
buffer += chunk.toString()
|
|
278
|
+
// 📖 Guard against unbounded buffer growth from malformed SSE streams
|
|
279
|
+
if (buffer.length > MAX_SSE_BUFFER) {
|
|
280
|
+
buffer = ''
|
|
281
|
+
return callback(new Error('SSE buffer overflow'))
|
|
282
|
+
}
|
|
283
|
+
const lines = buffer.split('\n')
|
|
284
|
+
// 📖 Keep the last incomplete line in the buffer
|
|
285
|
+
buffer = lines.pop() || ''
|
|
286
|
+
|
|
287
|
+
for (const line of lines) {
|
|
288
|
+
if (!line.startsWith('data: ')) continue
|
|
289
|
+
const payload = line.slice(6).trim()
|
|
290
|
+
if (payload === '[DONE]') {
|
|
291
|
+
// 📖 End of stream — close any open block, then send message_delta + message_stop
|
|
292
|
+
if (currentBlockIndex >= 0) {
|
|
293
|
+
this.push(`event: content_block_stop\ndata: ${JSON.stringify({ type: 'content_block_stop', index: currentBlockIndex })}\n\n`)
|
|
294
|
+
currentBlockIndex = -1
|
|
295
|
+
}
|
|
296
|
+
this.push(`event: message_delta\ndata: ${JSON.stringify({
|
|
297
|
+
type: 'message_delta',
|
|
298
|
+
delta: { stop_reason: 'end_turn', stop_sequence: null },
|
|
299
|
+
usage: { output_tokens: outputTokens },
|
|
300
|
+
})}\n\n`)
|
|
301
|
+
this.push(`event: message_stop\ndata: ${JSON.stringify({ type: 'message_stop' })}\n\n`)
|
|
302
|
+
continue
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
let parsed
|
|
306
|
+
try { parsed = JSON.parse(payload) } catch { continue }
|
|
307
|
+
|
|
308
|
+
// 📖 Send message_start header on first chunk
|
|
309
|
+
if (!headerSent) {
|
|
310
|
+
headerSent = true
|
|
311
|
+
this.push(`event: message_start\ndata: ${JSON.stringify({
|
|
312
|
+
type: 'message_start',
|
|
313
|
+
message: {
|
|
314
|
+
id: parsed.id || `msg_${randomUUID().replace(/-/g, '')}`,
|
|
315
|
+
type: 'message',
|
|
316
|
+
role: 'assistant',
|
|
317
|
+
content: [],
|
|
318
|
+
model: requestModel || parsed.model || '',
|
|
319
|
+
stop_reason: null,
|
|
320
|
+
stop_sequence: null,
|
|
321
|
+
usage: { input_tokens: inputTokens, output_tokens: 0 },
|
|
322
|
+
},
|
|
323
|
+
})}\n\n`)
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
const choice = parsed.choices?.[0]
|
|
327
|
+
if (!choice) continue
|
|
328
|
+
const delta = choice.delta || {}
|
|
329
|
+
|
|
330
|
+
// 📖 Track usage if present
|
|
331
|
+
if (parsed.usage) {
|
|
332
|
+
inputTokens = parsed.usage.prompt_tokens || inputTokens
|
|
333
|
+
outputTokens = parsed.usage.completion_tokens || outputTokens
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
// 📖 Text delta
|
|
337
|
+
if (delta.content) {
|
|
338
|
+
if (currentBlockIndex < 0 || nextBlockIndex === 0) {
|
|
339
|
+
// 📖 Open first text block
|
|
340
|
+
currentBlockIndex = nextBlockIndex++
|
|
341
|
+
this.push(`event: content_block_start\ndata: ${JSON.stringify({
|
|
342
|
+
type: 'content_block_start',
|
|
343
|
+
index: currentBlockIndex,
|
|
344
|
+
content_block: { type: 'text', text: '' },
|
|
345
|
+
})}\n\n`)
|
|
346
|
+
}
|
|
347
|
+
this.push(`event: content_block_delta\ndata: ${JSON.stringify({
|
|
348
|
+
type: 'content_block_delta',
|
|
349
|
+
index: currentBlockIndex,
|
|
350
|
+
delta: { type: 'text_delta', text: delta.content },
|
|
351
|
+
})}\n\n`)
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
// 📖 Tool call deltas (if model supports tool use)
|
|
355
|
+
if (Array.isArray(delta.tool_calls)) {
|
|
356
|
+
for (const tc of delta.tool_calls) {
|
|
357
|
+
if (tc.function?.name) {
|
|
358
|
+
// 📖 New tool call — close previous block if open, then start new one
|
|
359
|
+
if (currentBlockIndex >= 0) {
|
|
360
|
+
this.push(`event: content_block_stop\ndata: ${JSON.stringify({ type: 'content_block_stop', index: currentBlockIndex })}\n\n`)
|
|
361
|
+
}
|
|
362
|
+
currentBlockIndex = nextBlockIndex++
|
|
363
|
+
this.push(`event: content_block_start\ndata: ${JSON.stringify({
|
|
364
|
+
type: 'content_block_start',
|
|
365
|
+
index: currentBlockIndex,
|
|
366
|
+
content_block: {
|
|
367
|
+
type: 'tool_use',
|
|
368
|
+
id: tc.id || randomUUID(),
|
|
369
|
+
name: tc.function.name,
|
|
370
|
+
input: {},
|
|
371
|
+
},
|
|
372
|
+
})}\n\n`)
|
|
373
|
+
}
|
|
374
|
+
if (tc.function?.arguments) {
|
|
375
|
+
this.push(`event: content_block_delta\ndata: ${JSON.stringify({
|
|
376
|
+
type: 'content_block_delta',
|
|
377
|
+
index: currentBlockIndex >= 0 ? currentBlockIndex : 0,
|
|
378
|
+
delta: { type: 'input_json_delta', partial_json: tc.function.arguments },
|
|
379
|
+
})}\n\n`)
|
|
380
|
+
}
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
// 📖 Handle finish_reason for tool_calls — close ALL open blocks
|
|
385
|
+
if (choice.finish_reason === 'tool_calls' && currentBlockIndex >= 0) {
|
|
386
|
+
this.push(`event: content_block_stop\ndata: ${JSON.stringify({ type: 'content_block_stop', index: currentBlockIndex })}\n\n`)
|
|
387
|
+
currentBlockIndex = -1
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
callback()
|
|
391
|
+
},
|
|
392
|
+
|
|
393
|
+
flush(callback) {
|
|
394
|
+
// 📖 Process any remaining buffer
|
|
395
|
+
if (buffer.trim()) {
|
|
396
|
+
// ignore incomplete data
|
|
397
|
+
}
|
|
398
|
+
callback()
|
|
399
|
+
},
|
|
400
|
+
})
|
|
401
|
+
|
|
402
|
+
return {
|
|
403
|
+
transform,
|
|
404
|
+
getUsage: () => ({ input_tokens: inputTokens, output_tokens: outputTokens }),
|
|
405
|
+
}
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
function estimateTokenCountFromText(text) {
|
|
409
|
+
const normalized = String(text || '').trim()
|
|
410
|
+
if (!normalized) return 0
|
|
411
|
+
return Math.ceil(normalized.length / 4)
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
export function estimateAnthropicTokens(body) {
|
|
415
|
+
const openaiBody = translateAnthropicToOpenAI(body)
|
|
416
|
+
const messageTokens = Array.isArray(openaiBody.messages)
|
|
417
|
+
? openaiBody.messages.reduce((total, message) => {
|
|
418
|
+
let nextTotal = total + 4
|
|
419
|
+
if (typeof message.content === 'string') {
|
|
420
|
+
nextTotal += estimateTokenCountFromText(message.content)
|
|
421
|
+
}
|
|
422
|
+
if (Array.isArray(message.tool_calls)) {
|
|
423
|
+
for (const toolCall of message.tool_calls) {
|
|
424
|
+
nextTotal += estimateTokenCountFromText(toolCall.function?.name || '')
|
|
425
|
+
nextTotal += estimateTokenCountFromText(toolCall.function?.arguments || '')
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
if (typeof message.tool_call_id === 'string') {
|
|
429
|
+
nextTotal += estimateTokenCountFromText(message.tool_call_id)
|
|
430
|
+
}
|
|
431
|
+
return nextTotal
|
|
432
|
+
}, 2)
|
|
433
|
+
: 0
|
|
434
|
+
|
|
435
|
+
const toolTokens = Array.isArray(body?.tools)
|
|
436
|
+
? body.tools.reduce((total, tool) => total + estimateTokenCountFromText(JSON.stringify(tool || {})), 0)
|
|
437
|
+
: 0
|
|
438
|
+
|
|
439
|
+
return messageTokens + toolTokens
|
|
440
|
+
}
|
package/src/cli-help.js
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @file src/cli-help.js
|
|
3
|
+
* @description Shared CLI help builder for the startup `--help` flag and the in-app help overlay.
|
|
4
|
+
*
|
|
5
|
+
* @details
|
|
6
|
+
* 📖 Keeping CLI help text in one module avoids the classic drift where the TUI overlay
|
|
7
|
+
* 📖 documents one set of flags while `--help` prints another. New flags should be added
|
|
8
|
+
* 📖 here once, then both entry points stay aligned.
|
|
9
|
+
*
|
|
10
|
+
* 📖 The builder accepts an optional `chalk` instance. When omitted, it returns plain text,
|
|
11
|
+
* 📖 which keeps unit tests simple and makes the function safe for non-TTY contexts.
|
|
12
|
+
*
|
|
13
|
+
* @functions
|
|
14
|
+
* → `buildCliHelpLines` — build formatted help lines with optional colors and indentation
|
|
15
|
+
* → `buildCliHelpText` — join the help lines into one printable string
|
|
16
|
+
*
|
|
17
|
+
* @exports buildCliHelpLines, buildCliHelpText
|
|
18
|
+
* @see ./tool-metadata.js — source of truth for launcher modes and their CLI flags
|
|
19
|
+
*/
|
|
20
|
+
|
|
21
|
+
import { getToolModeOrder, getToolMeta } from './tool-metadata.js'
|
|
22
|
+
|
|
23
|
+
const ANALYSIS_FLAGS = [
|
|
24
|
+
{ flag: '--best', description: 'Show only top tiers (A+, S, S+)' },
|
|
25
|
+
{ flag: '--fiable', description: 'Run the 10s reliability analysis mode' },
|
|
26
|
+
{ flag: '--json', description: 'Output results as JSON for scripts/automation' },
|
|
27
|
+
{ flag: '--tier <S|A|B|C>', description: 'Filter models by tier family' },
|
|
28
|
+
{ flag: '--recommend', description: 'Open Smart Recommend immediately on startup' },
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
const CONFIG_FLAGS = [
|
|
32
|
+
{ flag: '--profile <name>', description: 'Load a saved config profile before startup' },
|
|
33
|
+
{ flag: '--no-telemetry', description: 'Disable anonymous telemetry for this run' },
|
|
34
|
+
{ flag: '--clean-proxy, --proxy-clean', description: 'Remove persisted fcm-proxy config from OpenCode' },
|
|
35
|
+
{ flag: '--help, -h', description: 'Print this help and exit' },
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
const COMMANDS = [
|
|
39
|
+
{ command: 'daemon status', description: 'Show background FCM Proxy V2 service status' },
|
|
40
|
+
{ command: 'daemon install', description: 'Install and start the background service' },
|
|
41
|
+
{ command: 'daemon uninstall', description: 'Remove the background service' },
|
|
42
|
+
{ command: 'daemon restart', description: 'Restart the background service' },
|
|
43
|
+
{ command: 'daemon logs', description: 'Print the latest daemon log lines' },
|
|
44
|
+
]
|
|
45
|
+
|
|
46
|
+
const EXAMPLES = [
|
|
47
|
+
'free-coding-models --help',
|
|
48
|
+
'free-coding-models --openclaw --tier S',
|
|
49
|
+
"free-coding-models --json | jq '.[0]'",
|
|
50
|
+
'free-coding-models daemon status',
|
|
51
|
+
]
|
|
52
|
+
|
|
53
|
+
function paint(chalk, formatter, text) {
|
|
54
|
+
if (!chalk || !formatter) return text
|
|
55
|
+
return formatter(text)
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
function formatEntry(label, description, { chalk = null, indent = '', labelWidth = 40 } = {}) {
|
|
59
|
+
const coloredLabel = paint(chalk, chalk?.cyan, label.padEnd(labelWidth))
|
|
60
|
+
const coloredDescription = paint(chalk, chalk?.dim, description)
|
|
61
|
+
return `${indent}${coloredLabel} ${coloredDescription}`
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
export function buildCliHelpLines({ chalk = null, indent = '', title = 'CLI Help' } = {}) {
|
|
65
|
+
const lines = []
|
|
66
|
+
const launchFlags = getToolModeOrder()
|
|
67
|
+
.map((mode) => getToolMeta(mode))
|
|
68
|
+
.filter((meta) => meta.flag)
|
|
69
|
+
.map((meta) => ({ flag: meta.flag, description: `${meta.label} mode` }))
|
|
70
|
+
|
|
71
|
+
lines.push(`${indent}${paint(chalk, chalk?.bold, title)}`)
|
|
72
|
+
lines.push(`${indent}${paint(chalk, chalk?.dim, 'Usage: free-coding-models [apiKey] [options]')}`)
|
|
73
|
+
lines.push(`${indent}${paint(chalk, chalk?.dim, ' free-coding-models daemon [status|install|uninstall|restart|logs]')}`)
|
|
74
|
+
lines.push('')
|
|
75
|
+
lines.push(`${indent}${paint(chalk, chalk?.bold, 'Tool Flags')}`)
|
|
76
|
+
for (const entry of launchFlags) {
|
|
77
|
+
lines.push(formatEntry(entry.flag, entry.description, { chalk, indent }))
|
|
78
|
+
}
|
|
79
|
+
lines.push('')
|
|
80
|
+
lines.push(`${indent}${paint(chalk, chalk?.bold, 'Analysis Flags')}`)
|
|
81
|
+
for (const entry of ANALYSIS_FLAGS) {
|
|
82
|
+
lines.push(formatEntry(entry.flag, entry.description, { chalk, indent }))
|
|
83
|
+
}
|
|
84
|
+
lines.push('')
|
|
85
|
+
lines.push(`${indent}${paint(chalk, chalk?.bold, 'Config & Maintenance')}`)
|
|
86
|
+
for (const entry of CONFIG_FLAGS) {
|
|
87
|
+
lines.push(formatEntry(entry.flag, entry.description, { chalk, indent }))
|
|
88
|
+
}
|
|
89
|
+
lines.push('')
|
|
90
|
+
lines.push(`${indent}${paint(chalk, chalk?.bold, 'Commands')}`)
|
|
91
|
+
for (const entry of COMMANDS) {
|
|
92
|
+
lines.push(formatEntry(entry.command, entry.description, { chalk, indent }))
|
|
93
|
+
}
|
|
94
|
+
lines.push('')
|
|
95
|
+
lines.push(`${indent}${paint(chalk, chalk?.dim, 'Default launcher with no tool flag: OpenCode CLI')}`)
|
|
96
|
+
lines.push(`${indent}${paint(chalk, chalk?.dim, 'Flags can be combined: --openclaw --tier S --json')}`)
|
|
97
|
+
lines.push('')
|
|
98
|
+
lines.push(`${indent}${paint(chalk, chalk?.bold, 'Examples')}`)
|
|
99
|
+
for (const example of EXAMPLES) {
|
|
100
|
+
lines.push(`${indent}${paint(chalk, chalk?.cyan, example)}`)
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
return lines
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
export function buildCliHelpText(options = {}) {
|
|
107
|
+
return buildCliHelpLines(options).join('\n')
|
|
108
|
+
}
|
package/src/config.js
CHANGED
|
@@ -113,12 +113,16 @@
|
|
|
113
113
|
*/
|
|
114
114
|
|
|
115
115
|
import { readFileSync, writeFileSync, existsSync, mkdirSync, readdirSync, statSync, unlinkSync } from 'node:fs'
|
|
116
|
+
import { randomBytes } from 'node:crypto'
|
|
116
117
|
import { homedir } from 'node:os'
|
|
117
118
|
import { join } from 'node:path'
|
|
118
119
|
|
|
119
120
|
// 📖 New JSON config path — stores all providers' API keys + enabled state
|
|
120
121
|
export const CONFIG_PATH = join(homedir(), '.free-coding-models.json')
|
|
121
122
|
|
|
123
|
+
// 📖 Daemon data directory — PID file, logs, etc.
|
|
124
|
+
export const DAEMON_DATA_DIR = join(homedir(), '.free-coding-models')
|
|
125
|
+
|
|
122
126
|
// 📖 Old plain-text config path — used only for migration
|
|
123
127
|
const LEGACY_CONFIG_PATH = join(homedir(), '.free-coding-models')
|
|
124
128
|
|
|
@@ -648,18 +652,38 @@ export function _emptyProfileSettings() {
|
|
|
648
652
|
* 📖 normalizeProxySettings: keep proxy-related preferences stable across old configs,
|
|
649
653
|
* 📖 new installs, and profile switches. Proxy is opt-in by default.
|
|
650
654
|
*
|
|
655
|
+
* 📖 stableToken — persisted bearer token shared between TUI and daemon. Generated once
|
|
656
|
+
* on first access so env files and tool configs remain valid across restarts.
|
|
657
|
+
* 📖 daemonEnabled — opt-in for the always-on background proxy daemon (launchd / systemd).
|
|
658
|
+
* 📖 daemonConsent — ISO timestamp of when user consented to daemon install, or null.
|
|
659
|
+
*
|
|
651
660
|
* @param {object|undefined|null} proxy
|
|
652
|
-
* @returns {{ enabled: boolean, syncToOpenCode: boolean, preferredPort: number }}
|
|
661
|
+
* @returns {{ enabled: boolean, syncToOpenCode: boolean, preferredPort: number, stableToken: string, daemonEnabled: boolean, daemonConsent: string|null }}
|
|
653
662
|
*/
|
|
654
663
|
export function normalizeProxySettings(proxy = null) {
|
|
655
664
|
const preferredPort = Number.isInteger(proxy?.preferredPort) && proxy.preferredPort >= 0 && proxy.preferredPort <= 65535
|
|
656
665
|
? proxy.preferredPort
|
|
657
666
|
: 0
|
|
658
667
|
|
|
668
|
+
// 📖 Generate a stable proxy token once and persist it forever
|
|
669
|
+
const stableToken = (typeof proxy?.stableToken === 'string' && proxy.stableToken.length > 0)
|
|
670
|
+
? proxy.stableToken
|
|
671
|
+
: `fcm_${randomBytes(24).toString('hex')}`
|
|
672
|
+
|
|
659
673
|
return {
|
|
660
674
|
enabled: proxy?.enabled === true,
|
|
661
675
|
syncToOpenCode: proxy?.syncToOpenCode === true,
|
|
662
676
|
preferredPort,
|
|
677
|
+
stableToken,
|
|
678
|
+
daemonEnabled: proxy?.daemonEnabled === true,
|
|
679
|
+
daemonConsent: (typeof proxy?.daemonConsent === 'string' && proxy.daemonConsent.length > 0)
|
|
680
|
+
? proxy.daemonConsent
|
|
681
|
+
: null,
|
|
682
|
+
// 📖 activeTool — legacy field kept only for backward compatibility.
|
|
683
|
+
// 📖 Runtime sync now follows the current Z-selected tool automatically.
|
|
684
|
+
activeTool: (typeof proxy?.activeTool === 'string' && proxy.activeTool.length > 0)
|
|
685
|
+
? proxy.activeTool
|
|
686
|
+
: null,
|
|
663
687
|
}
|
|
664
688
|
}
|
|
665
689
|
|