free-coding-models 0.3.11 → 0.3.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,440 +0,0 @@
1
- /**
2
- * @file src/anthropic-translator.js
3
- * @description Bidirectional wire format translation between Anthropic Messages API
4
- * and OpenAI Chat Completions API.
5
- *
6
- * 📖 This is the key module that enables Claude Code to work natively through the
7
- * FCM proxy without needing the external Claude proxy integration.
8
- * Claude Code sends requests in Anthropic format (POST /v1/messages) and this
9
- * module translates them to OpenAI format for the upstream providers, then
10
- * translates the responses back.
11
- *
12
- * 📖 Supports both JSON and SSE streaming modes.
13
- *
14
- * @functions
15
- * → translateAnthropicToOpenAI(body) — Convert Anthropic Messages request → OpenAI chat completions
16
- * → translateOpenAIToAnthropic(openaiResponse, requestModel) — Convert OpenAI JSON response → Anthropic
17
- * → createAnthropicSSETransformer(requestModel) — Create a Transform stream for SSE translation
18
- * → estimateAnthropicTokens(body) — Fast local token estimate for `/v1/messages/count_tokens`
19
- *
20
- * @exports translateAnthropicToOpenAI, translateOpenAIToAnthropic, createAnthropicSSETransformer, estimateAnthropicTokens
21
- * @see src/proxy-server.js — routes /v1/messages through this translator
22
- */
23
-
24
- import { Transform } from 'node:stream'
25
- import { randomUUID } from 'node:crypto'
26
-
27
- function normalizeThinkingText(block) {
28
- if (!block || typeof block !== 'object') return ''
29
- if (typeof block.text === 'string' && block.text) return block.text
30
- if (typeof block.thinking === 'string' && block.thinking) return block.thinking
31
- if (typeof block.summary === 'string' && block.summary) return block.summary
32
- return ''
33
- }
34
-
35
- function contentBlocksToText(blocks, { includeThinking = false } = {}) {
36
- return blocks
37
- .map((block) => {
38
- if (block?.type === 'thinking' && includeThinking) {
39
- const thinkingText = normalizeThinkingText(block)
40
- return thinkingText ? `<thinking>${thinkingText}</thinking>` : ''
41
- }
42
- if (block?.type === 'redacted_thinking' && includeThinking) {
43
- return '<thinking>[redacted]</thinking>'
44
- }
45
- return block?.type === 'text' ? block.text : ''
46
- })
47
- .filter(Boolean)
48
- }
49
-
50
- function extractReasoningBlocks(message = {}) {
51
- if (Array.isArray(message.reasoning)) {
52
- return message.reasoning
53
- .map((entry) => normalizeThinkingText(entry))
54
- .filter(Boolean)
55
- .map((text) => ({ type: 'thinking', thinking: text }))
56
- }
57
- if (typeof message.reasoning_content === 'string' && message.reasoning_content.trim()) {
58
- return [{ type: 'thinking', thinking: message.reasoning_content.trim() }]
59
- }
60
- return []
61
- }
62
-
63
- /**
64
- * 📖 Translate an Anthropic Messages API request body to OpenAI Chat Completions format.
65
- *
66
- * Anthropic format:
67
- * { model, messages: [{role, content}], system, max_tokens, stream, temperature, top_p, stop_sequences }
68
- *
69
- * OpenAI format:
70
- * { model, messages: [{role, content}], max_tokens, stream, temperature, top_p, stop }
71
- *
72
- * @param {object} body — Anthropic request body
73
- * @returns {object} — OpenAI-compatible request body
74
- */
75
- export function translateAnthropicToOpenAI(body) {
76
- // 📖 Guard against null/undefined/non-object input
77
- if (!body || typeof body !== 'object') return { model: '', messages: [], stream: false }
78
- if (!Array.isArray(body.messages)) body = { ...body, messages: [] }
79
-
80
- const openaiMessages = []
81
-
82
- // 📖 Anthropic "system" field → OpenAI system message
83
- if (body.system) {
84
- if (typeof body.system === 'string') {
85
- openaiMessages.push({ role: 'system', content: body.system })
86
- } else if (Array.isArray(body.system)) {
87
- // 📖 Anthropic supports system as array of content blocks
88
- const text = contentBlocksToText(body.system, { includeThinking: true }).join('\n\n')
89
- if (text) openaiMessages.push({ role: 'system', content: text })
90
- }
91
- }
92
-
93
- // 📖 Convert Anthropic messages to OpenAI format
94
- if (Array.isArray(body.messages)) {
95
- for (const msg of body.messages) {
96
- const role = msg.role === 'assistant' ? 'assistant' : 'user'
97
-
98
- if (typeof msg.content === 'string') {
99
- openaiMessages.push({ role, content: msg.content })
100
- } else if (Array.isArray(msg.content)) {
101
- // 📖 Anthropic content blocks: [{type: "text", text: "..."}, {type: "tool_result", ...}]
102
- const textParts = contentBlocksToText(msg.content, { includeThinking: true })
103
- const toolResults = msg.content.filter(b => b.type === 'tool_result')
104
- const toolUses = msg.content.filter(b => b.type === 'tool_use')
105
-
106
- // 📖 Tool use blocks (assistant) → OpenAI tool_calls
107
- if (toolUses.length > 0 && role === 'assistant') {
108
- const toolCalls = toolUses.map(tu => ({
109
- id: tu.id || randomUUID(),
110
- type: 'function',
111
- function: {
112
- name: tu.name,
113
- arguments: typeof tu.input === 'string' ? tu.input : JSON.stringify(tu.input || {}),
114
- }
115
- }))
116
- openaiMessages.push({
117
- role: 'assistant',
118
- content: textParts.join('\n') || null,
119
- tool_calls: toolCalls,
120
- })
121
- }
122
- // 📖 Tool result blocks (user) → OpenAI tool messages
123
- else if (toolResults.length > 0) {
124
- // 📖 First push any text parts as user message
125
- if (textParts.length > 0) {
126
- openaiMessages.push({ role: 'user', content: textParts.join('\n') })
127
- }
128
- for (const tr of toolResults) {
129
- const content = typeof tr.content === 'string'
130
- ? tr.content
131
- : Array.isArray(tr.content)
132
- ? tr.content.filter(b => b.type === 'text').map(b => b.text).join('\n')
133
- : JSON.stringify(tr.content || '')
134
- openaiMessages.push({
135
- role: 'tool',
136
- tool_call_id: tr.tool_use_id || tr.id || '',
137
- content,
138
- })
139
- }
140
- }
141
- // 📖 Plain text content blocks → join into single message
142
- else if (textParts.length > 0) {
143
- openaiMessages.push({ role, content: textParts.join('\n') })
144
- }
145
- }
146
- }
147
- }
148
-
149
- const result = {
150
- model: body.model,
151
- messages: openaiMessages,
152
- stream: body.stream === true,
153
- }
154
-
155
- // 📖 Map Anthropic parameters to OpenAI equivalents
156
- if (body.max_tokens != null) result.max_tokens = body.max_tokens
157
- if (body.temperature != null) result.temperature = body.temperature
158
- if (body.top_p != null) result.top_p = body.top_p
159
- if (Array.isArray(body.stop_sequences) && body.stop_sequences.length > 0) {
160
- result.stop = body.stop_sequences
161
- }
162
-
163
- // 📖 Map Anthropic tools to OpenAI function tools
164
- if (Array.isArray(body.tools) && body.tools.length > 0) {
165
- result.tools = body.tools.map(tool => ({
166
- type: 'function',
167
- function: {
168
- name: tool.name,
169
- description: tool.description || '',
170
- parameters: tool.input_schema || {},
171
- }
172
- }))
173
- }
174
-
175
- return result
176
- }
177
-
178
- /**
179
- * 📖 Translate an OpenAI Chat Completions JSON response to Anthropic Messages format.
180
- *
181
- * @param {object} openaiResponse — parsed OpenAI response
182
- * @param {string} requestModel — model name from the original request
183
- * @returns {object} — Anthropic Messages response
184
- */
185
- export function translateOpenAIToAnthropic(openaiResponse, requestModel) {
186
- const choice = openaiResponse.choices?.[0]
187
- const message = choice?.message || {}
188
- const content = []
189
-
190
- for (const reasoningBlock of extractReasoningBlocks(message)) {
191
- content.push(reasoningBlock)
192
- }
193
-
194
- // 📖 Text content → Anthropic text block
195
- if (message.content) {
196
- content.push({ type: 'text', text: message.content })
197
- }
198
-
199
- // 📖 Tool calls → Anthropic tool_use blocks
200
- if (Array.isArray(message.tool_calls)) {
201
- for (const tc of message.tool_calls) {
202
- let input = {}
203
- try { input = JSON.parse(tc.function?.arguments || '{}') } catch { /* ignore */ }
204
- content.push({
205
- type: 'tool_use',
206
- id: tc.id || randomUUID(),
207
- name: tc.function?.name || '',
208
- input,
209
- })
210
- }
211
- }
212
-
213
- // 📖 Fallback: Anthropic requires at least one content block — provide empty text if none
214
- if (content.length === 0) {
215
- content.push({ type: 'text', text: '' })
216
- }
217
-
218
- // 📖 Map OpenAI finish_reason → Anthropic stop_reason
219
- let stopReason = 'end_turn'
220
- if (choice?.finish_reason === 'stop') stopReason = 'end_turn'
221
- else if (choice?.finish_reason === 'length') stopReason = 'max_tokens'
222
- else if (choice?.finish_reason === 'tool_calls') stopReason = 'tool_use'
223
-
224
- return {
225
- id: openaiResponse.id || `msg_${randomUUID().replace(/-/g, '')}`,
226
- type: 'message',
227
- role: 'assistant',
228
- content,
229
- model: requestModel || openaiResponse.model || '',
230
- stop_reason: stopReason,
231
- stop_sequence: null,
232
- usage: {
233
- input_tokens: openaiResponse.usage?.prompt_tokens || 0,
234
- output_tokens: openaiResponse.usage?.completion_tokens || 0,
235
- },
236
- }
237
- }
238
-
239
- /**
240
- * 📖 Create a Transform stream that converts OpenAI SSE chunks to Anthropic SSE format.
241
- *
242
- * OpenAI SSE:
243
- * data: {"choices":[{"delta":{"content":"Hello"}}]}
244
- *
245
- * Anthropic SSE:
246
- * event: message_start
247
- * data: {"type":"message_start","message":{...}}
248
- *
249
- * event: content_block_start
250
- * data: {"type":"content_block_start","index":0,"content_block":{"type":"text","text":""}}
251
- *
252
- * event: content_block_delta
253
- * data: {"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":"Hello"}}
254
- *
255
- * event: message_stop
256
- * data: {"type":"message_stop"}
257
- *
258
- * @param {string} requestModel — model from original request
259
- * @returns {{ transform: Transform, getUsage: () => object }}
260
- */
261
- // 📖 Max SSE buffer size to prevent memory exhaustion from malformed streams (1 MB)
262
- const MAX_SSE_BUFFER = 1 * 1024 * 1024
263
-
264
- export function createAnthropicSSETransformer(requestModel) {
265
- let headerSent = false
266
- // 📖 Track block indices for proper content_block_start/stop/delta indexing.
267
- // nextBlockIndex increments for each new content block (text or tool_use).
268
- // currentBlockIndex tracks the index of the most recently opened block.
269
- let nextBlockIndex = 0
270
- let currentBlockIndex = -1
271
- let inputTokens = 0
272
- let outputTokens = 0
273
- let buffer = ''
274
-
275
- const transform = new Transform({
276
- transform(chunk, encoding, callback) {
277
- buffer += chunk.toString()
278
- // 📖 Guard against unbounded buffer growth from malformed SSE streams
279
- if (buffer.length > MAX_SSE_BUFFER) {
280
- buffer = ''
281
- return callback(new Error('SSE buffer overflow'))
282
- }
283
- const lines = buffer.split('\n')
284
- // 📖 Keep the last incomplete line in the buffer
285
- buffer = lines.pop() || ''
286
-
287
- for (const line of lines) {
288
- if (!line.startsWith('data: ')) continue
289
- const payload = line.slice(6).trim()
290
- if (payload === '[DONE]') {
291
- // 📖 End of stream — close any open block, then send message_delta + message_stop
292
- if (currentBlockIndex >= 0) {
293
- this.push(`event: content_block_stop\ndata: ${JSON.stringify({ type: 'content_block_stop', index: currentBlockIndex })}\n\n`)
294
- currentBlockIndex = -1
295
- }
296
- this.push(`event: message_delta\ndata: ${JSON.stringify({
297
- type: 'message_delta',
298
- delta: { stop_reason: 'end_turn', stop_sequence: null },
299
- usage: { output_tokens: outputTokens },
300
- })}\n\n`)
301
- this.push(`event: message_stop\ndata: ${JSON.stringify({ type: 'message_stop' })}\n\n`)
302
- continue
303
- }
304
-
305
- let parsed
306
- try { parsed = JSON.parse(payload) } catch { continue }
307
-
308
- // 📖 Send message_start header on first chunk
309
- if (!headerSent) {
310
- headerSent = true
311
- this.push(`event: message_start\ndata: ${JSON.stringify({
312
- type: 'message_start',
313
- message: {
314
- id: parsed.id || `msg_${randomUUID().replace(/-/g, '')}`,
315
- type: 'message',
316
- role: 'assistant',
317
- content: [],
318
- model: requestModel || parsed.model || '',
319
- stop_reason: null,
320
- stop_sequence: null,
321
- usage: { input_tokens: inputTokens, output_tokens: 0 },
322
- },
323
- })}\n\n`)
324
- }
325
-
326
- const choice = parsed.choices?.[0]
327
- if (!choice) continue
328
- const delta = choice.delta || {}
329
-
330
- // 📖 Track usage if present
331
- if (parsed.usage) {
332
- inputTokens = parsed.usage.prompt_tokens || inputTokens
333
- outputTokens = parsed.usage.completion_tokens || outputTokens
334
- }
335
-
336
- // 📖 Text delta
337
- if (delta.content) {
338
- if (currentBlockIndex < 0 || nextBlockIndex === 0) {
339
- // 📖 Open first text block
340
- currentBlockIndex = nextBlockIndex++
341
- this.push(`event: content_block_start\ndata: ${JSON.stringify({
342
- type: 'content_block_start',
343
- index: currentBlockIndex,
344
- content_block: { type: 'text', text: '' },
345
- })}\n\n`)
346
- }
347
- this.push(`event: content_block_delta\ndata: ${JSON.stringify({
348
- type: 'content_block_delta',
349
- index: currentBlockIndex,
350
- delta: { type: 'text_delta', text: delta.content },
351
- })}\n\n`)
352
- }
353
-
354
- // 📖 Tool call deltas (if model supports tool use)
355
- if (Array.isArray(delta.tool_calls)) {
356
- for (const tc of delta.tool_calls) {
357
- if (tc.function?.name) {
358
- // 📖 New tool call — close previous block if open, then start new one
359
- if (currentBlockIndex >= 0) {
360
- this.push(`event: content_block_stop\ndata: ${JSON.stringify({ type: 'content_block_stop', index: currentBlockIndex })}\n\n`)
361
- }
362
- currentBlockIndex = nextBlockIndex++
363
- this.push(`event: content_block_start\ndata: ${JSON.stringify({
364
- type: 'content_block_start',
365
- index: currentBlockIndex,
366
- content_block: {
367
- type: 'tool_use',
368
- id: tc.id || randomUUID(),
369
- name: tc.function.name,
370
- input: {},
371
- },
372
- })}\n\n`)
373
- }
374
- if (tc.function?.arguments) {
375
- this.push(`event: content_block_delta\ndata: ${JSON.stringify({
376
- type: 'content_block_delta',
377
- index: currentBlockIndex >= 0 ? currentBlockIndex : 0,
378
- delta: { type: 'input_json_delta', partial_json: tc.function.arguments },
379
- })}\n\n`)
380
- }
381
- }
382
- }
383
-
384
- // 📖 Handle finish_reason for tool_calls — close ALL open blocks
385
- if (choice.finish_reason === 'tool_calls' && currentBlockIndex >= 0) {
386
- this.push(`event: content_block_stop\ndata: ${JSON.stringify({ type: 'content_block_stop', index: currentBlockIndex })}\n\n`)
387
- currentBlockIndex = -1
388
- }
389
- }
390
- callback()
391
- },
392
-
393
- flush(callback) {
394
- // 📖 Process any remaining buffer
395
- if (buffer.trim()) {
396
- // ignore incomplete data
397
- }
398
- callback()
399
- },
400
- })
401
-
402
- return {
403
- transform,
404
- getUsage: () => ({ input_tokens: inputTokens, output_tokens: outputTokens }),
405
- }
406
- }
407
-
408
- function estimateTokenCountFromText(text) {
409
- const normalized = String(text || '').trim()
410
- if (!normalized) return 0
411
- return Math.ceil(normalized.length / 4)
412
- }
413
-
414
- export function estimateAnthropicTokens(body) {
415
- const openaiBody = translateAnthropicToOpenAI(body)
416
- const messageTokens = Array.isArray(openaiBody.messages)
417
- ? openaiBody.messages.reduce((total, message) => {
418
- let nextTotal = total + 4
419
- if (typeof message.content === 'string') {
420
- nextTotal += estimateTokenCountFromText(message.content)
421
- }
422
- if (Array.isArray(message.tool_calls)) {
423
- for (const toolCall of message.tool_calls) {
424
- nextTotal += estimateTokenCountFromText(toolCall.function?.name || '')
425
- nextTotal += estimateTokenCountFromText(toolCall.function?.arguments || '')
426
- }
427
- }
428
- if (typeof message.tool_call_id === 'string') {
429
- nextTotal += estimateTokenCountFromText(message.tool_call_id)
430
- }
431
- return nextTotal
432
- }, 2)
433
- : 0
434
-
435
- const toolTokens = Array.isArray(body?.tools)
436
- ? body.tools.reduce((total, tool) => total + estimateTokenCountFromText(JSON.stringify(tool || {})), 0)
437
- : 0
438
-
439
- return messageTokens + toolTokens
440
- }