free-coding-models 0.3.0 → 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +24 -0
- package/README.md +29 -20
- package/bin/free-coding-models.js +50 -19
- package/package.json +1 -1
- package/src/anthropic-translator.js +78 -8
- package/src/cli-help.js +108 -0
- package/src/config.js +2 -1
- package/src/endpoint-installer.js +5 -4
- package/src/key-handler.js +31 -34
- package/src/opencode.js +17 -12
- package/src/overlays.js +40 -53
- package/src/proxy-server.js +335 -12
- package/src/proxy-sync.js +16 -4
- package/src/render-helpers.js +4 -2
- package/src/render-table.js +34 -36
- package/src/responses-translator.js +423 -0
- package/src/tool-launchers.js +246 -19
- package/src/utils.js +31 -8
|
@@ -0,0 +1,423 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @file src/responses-translator.js
|
|
3
|
+
* @description Bidirectional translation between the OpenAI Responses API wire format
|
|
4
|
+
* and the older OpenAI Chat Completions wire used by the upstream free providers.
|
|
5
|
+
*
|
|
6
|
+
* @details
|
|
7
|
+
* 📖 Codex CLI can speak either `responses` or `chat` depending on provider config.
|
|
8
|
+
* 📖 Our upstream accounts still expose `/chat/completions`, so this module converts:
|
|
9
|
+
* - Responses requests → Chat Completions requests
|
|
10
|
+
* - Chat Completions JSON/SSE responses → Responses JSON/SSE responses
|
|
11
|
+
*
|
|
12
|
+
* 📖 The implementation focuses on the items Codex actually uses:
|
|
13
|
+
* - `instructions` / `input` message history
|
|
14
|
+
* - function tools + function-call outputs
|
|
15
|
+
* - assistant text deltas
|
|
16
|
+
* - function call argument deltas
|
|
17
|
+
* - final `response.completed` payload with usage
|
|
18
|
+
*
|
|
19
|
+
* @functions
|
|
20
|
+
* → `translateResponsesToOpenAI` — convert a Responses request body to chat completions
|
|
21
|
+
* → `translateOpenAIToResponses` — convert a chat completions JSON response to Responses JSON
|
|
22
|
+
* → `createResponsesSSETransformer` — convert chat-completions SSE chunks to Responses SSE
|
|
23
|
+
*
|
|
24
|
+
* @exports translateResponsesToOpenAI, translateOpenAIToResponses, createResponsesSSETransformer
|
|
25
|
+
* @see src/proxy-server.js
|
|
26
|
+
*/
|
|
27
|
+
|
|
28
|
+
import { randomUUID } from 'node:crypto'
|
|
29
|
+
import { Transform } from 'node:stream'
|
|
30
|
+
|
|
31
|
+
const MAX_SSE_BUFFER = 1 * 1024 * 1024
|
|
32
|
+
|
|
33
|
+
function serializeJsonish(value) {
|
|
34
|
+
if (typeof value === 'string') return value
|
|
35
|
+
try {
|
|
36
|
+
return JSON.stringify(value ?? '')
|
|
37
|
+
} catch {
|
|
38
|
+
return String(value ?? '')
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
function normalizeResponseContent(content) {
|
|
43
|
+
if (typeof content === 'string') return [{ type: 'input_text', text: content }]
|
|
44
|
+
if (!Array.isArray(content)) return []
|
|
45
|
+
return content
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
function contentPartToText(part) {
|
|
49
|
+
if (!part || typeof part !== 'object') return ''
|
|
50
|
+
if (typeof part.text === 'string') return part.text
|
|
51
|
+
if (part.type === 'reasoning' && typeof part.summary === 'string') return part.summary
|
|
52
|
+
return ''
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
function pushTextMessage(messages, role, textParts) {
|
|
56
|
+
const text = textParts.join('\n').trim()
|
|
57
|
+
if (!text && role !== 'assistant') return
|
|
58
|
+
messages.push({ role, content: text || '' })
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
function makeFunctionToolCall(entry = {}) {
|
|
62
|
+
const callId = entry.call_id || entry.id || `call_${randomUUID().replace(/-/g, '')}`
|
|
63
|
+
return {
|
|
64
|
+
id: callId,
|
|
65
|
+
type: 'function',
|
|
66
|
+
function: {
|
|
67
|
+
name: entry.name || entry.function?.name || '',
|
|
68
|
+
arguments: typeof entry.arguments === 'string'
|
|
69
|
+
? entry.arguments
|
|
70
|
+
: serializeJsonish(entry.arguments || entry.function?.arguments || {}),
|
|
71
|
+
},
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
export function translateResponsesToOpenAI(body) {
|
|
76
|
+
if (!body || typeof body !== 'object') return { model: '', messages: [], stream: false }
|
|
77
|
+
|
|
78
|
+
const messages = []
|
|
79
|
+
|
|
80
|
+
if (typeof body.instructions === 'string' && body.instructions.trim()) {
|
|
81
|
+
messages.push({ role: 'system', content: body.instructions.trim() })
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
const inputItems = Array.isArray(body.input)
|
|
85
|
+
? body.input
|
|
86
|
+
: body.input != null
|
|
87
|
+
? [body.input]
|
|
88
|
+
: []
|
|
89
|
+
|
|
90
|
+
for (const item of inputItems) {
|
|
91
|
+
if (typeof item === 'string') {
|
|
92
|
+
messages.push({ role: 'user', content: item })
|
|
93
|
+
continue
|
|
94
|
+
}
|
|
95
|
+
if (!item || typeof item !== 'object') continue
|
|
96
|
+
|
|
97
|
+
if (item.type === 'function_call') {
|
|
98
|
+
messages.push({ role: 'assistant', content: null, tool_calls: [makeFunctionToolCall(item)] })
|
|
99
|
+
continue
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
if (item.type === 'function_call_output') {
|
|
103
|
+
messages.push({
|
|
104
|
+
role: 'tool',
|
|
105
|
+
tool_call_id: item.call_id || item.id || '',
|
|
106
|
+
content: serializeJsonish(item.output),
|
|
107
|
+
})
|
|
108
|
+
continue
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
if (item.type === 'input_text' && typeof item.text === 'string') {
|
|
112
|
+
messages.push({ role: 'user', content: item.text })
|
|
113
|
+
continue
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
if (item.type !== 'message') continue
|
|
117
|
+
|
|
118
|
+
const role = item.role === 'assistant'
|
|
119
|
+
? 'assistant'
|
|
120
|
+
: (item.role === 'developer' || item.role === 'system')
|
|
121
|
+
? 'system'
|
|
122
|
+
: 'user'
|
|
123
|
+
|
|
124
|
+
const textParts = []
|
|
125
|
+
const toolCalls = []
|
|
126
|
+
for (const part of normalizeResponseContent(item.content)) {
|
|
127
|
+
if (part.type === 'function_call') {
|
|
128
|
+
toolCalls.push(makeFunctionToolCall(part))
|
|
129
|
+
continue
|
|
130
|
+
}
|
|
131
|
+
if (part.type === 'function_call_output') {
|
|
132
|
+
messages.push({
|
|
133
|
+
role: 'tool',
|
|
134
|
+
tool_call_id: part.call_id || part.id || '',
|
|
135
|
+
content: serializeJsonish(part.output),
|
|
136
|
+
})
|
|
137
|
+
continue
|
|
138
|
+
}
|
|
139
|
+
const text = contentPartToText(part)
|
|
140
|
+
if (text) textParts.push(text)
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
if (toolCalls.length > 0) {
|
|
144
|
+
messages.push({
|
|
145
|
+
role: 'assistant',
|
|
146
|
+
content: textParts.length > 0 ? textParts.join('\n') : null,
|
|
147
|
+
tool_calls: toolCalls,
|
|
148
|
+
})
|
|
149
|
+
continue
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
pushTextMessage(messages, role, textParts)
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
const result = {
|
|
156
|
+
model: body.model,
|
|
157
|
+
messages,
|
|
158
|
+
stream: body.stream === true,
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
if (body.max_output_tokens != null) result.max_tokens = body.max_output_tokens
|
|
162
|
+
if (body.temperature != null) result.temperature = body.temperature
|
|
163
|
+
if (body.top_p != null) result.top_p = body.top_p
|
|
164
|
+
|
|
165
|
+
if (Array.isArray(body.tools) && body.tools.length > 0) {
|
|
166
|
+
result.tools = body.tools
|
|
167
|
+
.filter(tool => tool && typeof tool === 'object' && (tool.type === 'function' || typeof tool.name === 'string'))
|
|
168
|
+
.map(tool => ({
|
|
169
|
+
type: 'function',
|
|
170
|
+
function: {
|
|
171
|
+
name: tool.name || tool.function?.name || '',
|
|
172
|
+
description: tool.description || tool.function?.description || '',
|
|
173
|
+
parameters: tool.parameters || tool.input_schema || tool.function?.parameters || {},
|
|
174
|
+
},
|
|
175
|
+
}))
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
return result
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
function buildResponsesOutput(message = {}) {
|
|
182
|
+
const output = []
|
|
183
|
+
const text = typeof message.content === 'string' ? message.content : ''
|
|
184
|
+
if (text || !Array.isArray(message.tool_calls) || message.tool_calls.length === 0) {
|
|
185
|
+
output.push({
|
|
186
|
+
id: `msg_${randomUUID().replace(/-/g, '')}`,
|
|
187
|
+
type: 'message',
|
|
188
|
+
status: 'completed',
|
|
189
|
+
role: 'assistant',
|
|
190
|
+
content: [{ type: 'output_text', text: text || '', annotations: [] }],
|
|
191
|
+
})
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
if (Array.isArray(message.tool_calls)) {
|
|
195
|
+
for (const toolCall of message.tool_calls) {
|
|
196
|
+
const callId = toolCall?.id || `call_${randomUUID().replace(/-/g, '')}`
|
|
197
|
+
output.push({
|
|
198
|
+
id: callId,
|
|
199
|
+
type: 'function_call',
|
|
200
|
+
status: 'completed',
|
|
201
|
+
call_id: callId,
|
|
202
|
+
name: toolCall?.function?.name || '',
|
|
203
|
+
arguments: toolCall?.function?.arguments || '{}',
|
|
204
|
+
})
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
return output
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
export function translateOpenAIToResponses(openaiResponse, requestModel) {
|
|
212
|
+
const choice = openaiResponse?.choices?.[0] || {}
|
|
213
|
+
const message = choice?.message || {}
|
|
214
|
+
const inputTokens = openaiResponse?.usage?.prompt_tokens || 0
|
|
215
|
+
const outputTokens = openaiResponse?.usage?.completion_tokens || 0
|
|
216
|
+
|
|
217
|
+
return {
|
|
218
|
+
id: openaiResponse?.id || `resp_${randomUUID().replace(/-/g, '')}`,
|
|
219
|
+
object: 'response',
|
|
220
|
+
created_at: Math.floor(Date.now() / 1000),
|
|
221
|
+
status: 'completed',
|
|
222
|
+
model: requestModel || openaiResponse?.model || '',
|
|
223
|
+
output: buildResponsesOutput(message),
|
|
224
|
+
usage: {
|
|
225
|
+
input_tokens: inputTokens,
|
|
226
|
+
output_tokens: outputTokens,
|
|
227
|
+
total_tokens: inputTokens + outputTokens,
|
|
228
|
+
},
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
function createResponseSseEvent(type, payload) {
|
|
233
|
+
return `event: ${type}\ndata: ${JSON.stringify({ type, ...payload })}\n\n`
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
export function createResponsesSSETransformer(requestModel) {
|
|
237
|
+
let buffer = ''
|
|
238
|
+
let responseId = `resp_${randomUUID().replace(/-/g, '')}`
|
|
239
|
+
let messageItemId = `msg_${randomUUID().replace(/-/g, '')}`
|
|
240
|
+
let createdAt = Math.floor(Date.now() / 1000)
|
|
241
|
+
let createdSent = false
|
|
242
|
+
let messageAdded = false
|
|
243
|
+
let messageText = ''
|
|
244
|
+
let promptTokens = 0
|
|
245
|
+
let completionTokens = 0
|
|
246
|
+
const functionCalls = new Map()
|
|
247
|
+
|
|
248
|
+
const ensureStarted = (stream) => {
|
|
249
|
+
if (createdSent) return
|
|
250
|
+
createdSent = true
|
|
251
|
+
stream.push(createResponseSseEvent('response.created', {
|
|
252
|
+
response: {
|
|
253
|
+
id: responseId,
|
|
254
|
+
object: 'response',
|
|
255
|
+
created_at: createdAt,
|
|
256
|
+
status: 'in_progress',
|
|
257
|
+
model: requestModel || '',
|
|
258
|
+
output: [],
|
|
259
|
+
},
|
|
260
|
+
}))
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
const ensureMessageItem = (stream) => {
|
|
264
|
+
if (messageAdded) return
|
|
265
|
+
messageAdded = true
|
|
266
|
+
stream.push(createResponseSseEvent('response.output_item.added', {
|
|
267
|
+
output_index: 0,
|
|
268
|
+
item: {
|
|
269
|
+
id: messageItemId,
|
|
270
|
+
type: 'message',
|
|
271
|
+
status: 'in_progress',
|
|
272
|
+
role: 'assistant',
|
|
273
|
+
content: [{ type: 'output_text', text: '', annotations: [] }],
|
|
274
|
+
},
|
|
275
|
+
}))
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
const transform = new Transform({
|
|
279
|
+
transform(chunk, _encoding, callback) {
|
|
280
|
+
buffer += chunk.toString()
|
|
281
|
+
if (buffer.length > MAX_SSE_BUFFER) {
|
|
282
|
+
buffer = ''
|
|
283
|
+
return callback(new Error('Responses SSE buffer overflow'))
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
const lines = buffer.split('\n')
|
|
287
|
+
buffer = lines.pop() || ''
|
|
288
|
+
|
|
289
|
+
for (const line of lines) {
|
|
290
|
+
if (!line.startsWith('data: ')) continue
|
|
291
|
+
const payload = line.slice(6).trim()
|
|
292
|
+
|
|
293
|
+
if (payload === '[DONE]') {
|
|
294
|
+
ensureStarted(this)
|
|
295
|
+
ensureMessageItem(this)
|
|
296
|
+
|
|
297
|
+
const output = [{
|
|
298
|
+
id: messageItemId,
|
|
299
|
+
type: 'message',
|
|
300
|
+
status: 'completed',
|
|
301
|
+
role: 'assistant',
|
|
302
|
+
content: [{ type: 'output_text', text: messageText, annotations: [] }],
|
|
303
|
+
}]
|
|
304
|
+
this.push(createResponseSseEvent('response.output_item.done', {
|
|
305
|
+
output_index: 0,
|
|
306
|
+
item: output[0],
|
|
307
|
+
}))
|
|
308
|
+
|
|
309
|
+
const sortedCalls = [...functionCalls.entries()].sort((a, b) => a[0] - b[0])
|
|
310
|
+
for (const [index, call] of sortedCalls) {
|
|
311
|
+
const item = {
|
|
312
|
+
id: call.id,
|
|
313
|
+
type: 'function_call',
|
|
314
|
+
status: 'completed',
|
|
315
|
+
call_id: call.id,
|
|
316
|
+
name: call.name,
|
|
317
|
+
arguments: call.arguments,
|
|
318
|
+
}
|
|
319
|
+
output.push(item)
|
|
320
|
+
this.push(createResponseSseEvent('response.output_item.done', {
|
|
321
|
+
output_index: index + 1,
|
|
322
|
+
item,
|
|
323
|
+
}))
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
this.push(createResponseSseEvent('response.completed', {
|
|
327
|
+
response: {
|
|
328
|
+
id: responseId,
|
|
329
|
+
object: 'response',
|
|
330
|
+
created_at: createdAt,
|
|
331
|
+
status: 'completed',
|
|
332
|
+
model: requestModel || '',
|
|
333
|
+
output,
|
|
334
|
+
usage: {
|
|
335
|
+
input_tokens: promptTokens,
|
|
336
|
+
output_tokens: completionTokens,
|
|
337
|
+
total_tokens: promptTokens + completionTokens,
|
|
338
|
+
},
|
|
339
|
+
},
|
|
340
|
+
}))
|
|
341
|
+
continue
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
let parsed
|
|
345
|
+
try {
|
|
346
|
+
parsed = JSON.parse(payload)
|
|
347
|
+
} catch {
|
|
348
|
+
continue
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
if (typeof parsed.id === 'string' && parsed.id.length > 0) responseId = parsed.id
|
|
352
|
+
if (typeof parsed.model === 'string' && parsed.model.length > 0 && !requestModel) {
|
|
353
|
+
requestModel = parsed.model
|
|
354
|
+
}
|
|
355
|
+
if (parsed.usage) {
|
|
356
|
+
promptTokens = parsed.usage.prompt_tokens || promptTokens
|
|
357
|
+
completionTokens = parsed.usage.completion_tokens || completionTokens
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
ensureStarted(this)
|
|
361
|
+
const choice = parsed.choices?.[0]
|
|
362
|
+
if (!choice) continue
|
|
363
|
+
const delta = choice.delta || {}
|
|
364
|
+
|
|
365
|
+
if (typeof delta.content === 'string' && delta.content.length > 0) {
|
|
366
|
+
ensureMessageItem(this)
|
|
367
|
+
messageText += delta.content
|
|
368
|
+
this.push(createResponseSseEvent('response.output_text.delta', {
|
|
369
|
+
output_index: 0,
|
|
370
|
+
item_id: messageItemId,
|
|
371
|
+
content_index: 0,
|
|
372
|
+
delta: delta.content,
|
|
373
|
+
}))
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
if (Array.isArray(delta.tool_calls)) {
|
|
377
|
+
for (const toolCallDelta of delta.tool_calls) {
|
|
378
|
+
const callIndex = Number.isInteger(toolCallDelta.index) ? toolCallDelta.index : functionCalls.size
|
|
379
|
+
const existing = functionCalls.get(callIndex) || {
|
|
380
|
+
id: toolCallDelta.id || `call_${randomUUID().replace(/-/g, '')}`,
|
|
381
|
+
name: '',
|
|
382
|
+
arguments: '',
|
|
383
|
+
added: false,
|
|
384
|
+
}
|
|
385
|
+
if (typeof toolCallDelta.id === 'string' && toolCallDelta.id.length > 0) {
|
|
386
|
+
existing.id = toolCallDelta.id
|
|
387
|
+
}
|
|
388
|
+
if (typeof toolCallDelta.function?.name === 'string' && toolCallDelta.function.name.length > 0) {
|
|
389
|
+
existing.name = toolCallDelta.function.name
|
|
390
|
+
}
|
|
391
|
+
if (!existing.added) {
|
|
392
|
+
existing.added = true
|
|
393
|
+
this.push(createResponseSseEvent('response.output_item.added', {
|
|
394
|
+
output_index: callIndex + 1,
|
|
395
|
+
item: {
|
|
396
|
+
id: existing.id,
|
|
397
|
+
type: 'function_call',
|
|
398
|
+
status: 'in_progress',
|
|
399
|
+
call_id: existing.id,
|
|
400
|
+
name: existing.name,
|
|
401
|
+
arguments: existing.arguments,
|
|
402
|
+
},
|
|
403
|
+
}))
|
|
404
|
+
}
|
|
405
|
+
if (typeof toolCallDelta.function?.arguments === 'string' && toolCallDelta.function.arguments.length > 0) {
|
|
406
|
+
existing.arguments += toolCallDelta.function.arguments
|
|
407
|
+
this.push(createResponseSseEvent('response.function_call_arguments.delta', {
|
|
408
|
+
output_index: callIndex + 1,
|
|
409
|
+
item_id: existing.id,
|
|
410
|
+
delta: toolCallDelta.function.arguments,
|
|
411
|
+
}))
|
|
412
|
+
}
|
|
413
|
+
functionCalls.set(callIndex, existing)
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
callback()
|
|
419
|
+
},
|
|
420
|
+
})
|
|
421
|
+
|
|
422
|
+
return { transform }
|
|
423
|
+
}
|