banana-code 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +246 -0
  3. package/banana.js +5464 -0
  4. package/lib/agenticRunner.js +1884 -0
  5. package/lib/borderRenderer.js +41 -0
  6. package/lib/commandRunner.js +205 -0
  7. package/lib/completer.js +286 -0
  8. package/lib/config.js +301 -0
  9. package/lib/contextBuilder.js +324 -0
  10. package/lib/diffViewer.js +295 -0
  11. package/lib/fileManager.js +224 -0
  12. package/lib/historyManager.js +124 -0
  13. package/lib/hookManager.js +1143 -0
  14. package/lib/imageHandler.js +268 -0
  15. package/lib/inlineComplete.js +192 -0
  16. package/lib/interactivePicker.js +254 -0
  17. package/lib/lmStudio.js +226 -0
  18. package/lib/markdownRenderer.js +423 -0
  19. package/lib/mcpClient.js +288 -0
  20. package/lib/modelRegistry.js +350 -0
  21. package/lib/monkeyModels.js +97 -0
  22. package/lib/oauthOpenAI.js +167 -0
  23. package/lib/parser.js +134 -0
  24. package/lib/promptManager.js +96 -0
  25. package/lib/providerClients.js +1014 -0
  26. package/lib/providerManager.js +130 -0
  27. package/lib/providerStore.js +413 -0
  28. package/lib/statusBar.js +283 -0
  29. package/lib/streamHandler.js +306 -0
  30. package/lib/subAgentManager.js +406 -0
  31. package/lib/tokenCounter.js +132 -0
  32. package/lib/visionAnalyzer.js +163 -0
  33. package/lib/watcher.js +138 -0
  34. package/models.json +57 -0
  35. package/package.json +42 -0
  36. package/prompts/base.md +23 -0
  37. package/prompts/code-agent-glm.md +16 -0
  38. package/prompts/code-agent-gptoss.md +25 -0
  39. package/prompts/code-agent-nemotron.md +17 -0
  40. package/prompts/code-agent-qwen.md +20 -0
  41. package/prompts/code-agent.md +70 -0
  42. package/prompts/plan.md +44 -0
@@ -0,0 +1,1014 @@
1
+ function extractText(content) {
2
+ if (typeof content === 'string') return content;
3
+ if (!Array.isArray(content)) return '';
4
+
5
+ const chunks = [];
6
+ for (const item of content) {
7
+ if (!item) continue;
8
+ if (typeof item === 'string') {
9
+ chunks.push(item);
10
+ continue;
11
+ }
12
+ if (typeof item.text === 'string') chunks.push(item.text);
13
+ if (typeof item.content === 'string') chunks.push(item.content);
14
+ }
15
+ return chunks.join('\n').trim();
16
+ }
17
+
18
+ function parseJsonMaybe(value, fallback = {}) {
19
+ if (value === null || value === undefined) return fallback;
20
+ if (typeof value === 'object') return value;
21
+ if (typeof value !== 'string') return fallback;
22
+ try {
23
+ return JSON.parse(value);
24
+ } catch {
25
+ return fallback;
26
+ }
27
+ }
28
+
29
+ function buildSseResponseFromText(text) {
30
+ const encoder = new TextEncoder();
31
+ const payload = [
32
+ `data: ${JSON.stringify({ choices: [{ delta: { content: text || '' } }] })}\n\n`,
33
+ 'data: [DONE]\n\n'
34
+ ];
35
+
36
+ const stream = new ReadableStream({
37
+ start(controller) {
38
+ for (const line of payload) controller.enqueue(encoder.encode(line));
39
+ controller.close();
40
+ }
41
+ });
42
+
43
+ return new Response(stream, {
44
+ status: 200,
45
+ headers: { 'Content-Type': 'text/event-stream' }
46
+ });
47
+ }
48
+
49
+ class OpenAICompatibleClient {
50
+ constructor(options = {}) {
51
+ this.baseUrl = (options.baseUrl || '').replace(/\/+$/, '');
52
+ this.apiKey = options.apiKey || null;
53
+ this.bearerToken = options.bearerToken || null;
54
+ this.extraHeaders = { ...(options.extraHeaders || {}) };
55
+ this.label = options.label || 'Provider';
56
+ }
57
+
58
+ _headers() {
59
+ const headers = {
60
+ 'Content-Type': 'application/json',
61
+ ...this.extraHeaders
62
+ };
63
+ const authToken = this.bearerToken || this.apiKey;
64
+ if (authToken) headers.Authorization = `Bearer ${authToken}`;
65
+ return headers;
66
+ }
67
+
68
+ async _request(path, body, signal) {
69
+ const response = await fetch(`${this.baseUrl}${path}`, {
70
+ method: 'POST',
71
+ headers: this._headers(),
72
+ body: JSON.stringify(body),
73
+ signal
74
+ });
75
+ if (!response.ok) {
76
+ const text = await response.text().catch(() => '');
77
+ throw new Error(`${this.label} error (${response.status}): ${text || response.statusText}`);
78
+ }
79
+ return await response.json();
80
+ }
81
+
82
+ async chat(messages, options = {}) {
83
+ const body = {
84
+ model: options.model,
85
+ messages,
86
+ temperature: options.temperature ?? 0.7,
87
+ max_tokens: options.maxTokens ?? 10000,
88
+ stream: false
89
+ };
90
+ if (options.topP !== undefined) body.top_p = options.topP;
91
+ if (options.repeatPenalty !== undefined) body.repeat_penalty = options.repeatPenalty;
92
+ if (options.tools) {
93
+ body.tools = options.tools;
94
+ body.tool_choice = options.toolChoice ?? 'auto';
95
+ }
96
+ return await this._request('/v1/chat/completions', body, options.signal);
97
+ }
98
+
99
+ async chatStream(messages, options = {}) {
100
+ const body = {
101
+ model: options.model,
102
+ messages,
103
+ temperature: options.temperature ?? 0.7,
104
+ max_tokens: options.maxTokens ?? 10000,
105
+ stream: true
106
+ };
107
+ if (options.topP !== undefined) body.top_p = options.topP;
108
+ if (options.repeatPenalty !== undefined) body.repeat_penalty = options.repeatPenalty;
109
+ if (options.tools) {
110
+ body.tools = options.tools;
111
+ body.tool_choice = options.toolChoice ?? 'auto';
112
+ }
113
+ const response = await fetch(`${this.baseUrl}/v1/chat/completions`, {
114
+ method: 'POST',
115
+ headers: this._headers(),
116
+ body: JSON.stringify(body),
117
+ signal: options.signal
118
+ });
119
+ if (!response.ok) {
120
+ const text = await response.text().catch(() => '');
121
+ throw new Error(`${this.label} error (${response.status}): ${text || response.statusText}`);
122
+ }
123
+ return response;
124
+ }
125
+
126
+ async listModels() {
127
+ try {
128
+ const response = await fetch(`${this.baseUrl}/v1/models`, {
129
+ headers: this._headers()
130
+ });
131
+ if (!response.ok) return [];
132
+ const data = await response.json();
133
+ return data.data || [];
134
+ } catch {
135
+ return [];
136
+ }
137
+ }
138
+
139
+ async isConnected(options = {}) {
140
+ const throwOnError = options.throwOnError === true;
141
+ try {
142
+ const response = await fetch(`${this.baseUrl}/v1/models`, {
143
+ headers: this._headers()
144
+ });
145
+ if (response.ok) return true;
146
+ if (!throwOnError) return false;
147
+ const text = await response.text().catch(() => '');
148
+ const reason = (text || response.statusText || 'Request failed').slice(0, 300);
149
+ throw new Error(`${this.label} connection check failed (${response.status}): ${reason}`);
150
+ } catch (err) {
151
+ if (throwOnError) throw err;
152
+ return false;
153
+ }
154
+ }
155
+ }
156
+
157
+ function openAiToolsToAnthropic(tools = []) {
158
+ const list = [];
159
+ for (const tool of tools) {
160
+ const fn = tool?.function;
161
+ if (!fn?.name) continue;
162
+ list.push({
163
+ name: fn.name,
164
+ description: fn.description || '',
165
+ input_schema: fn.parameters || { type: 'object', properties: {} }
166
+ });
167
+ }
168
+ return list;
169
+ }
170
+
171
+ function parseDataUrlImage(imageUrl) {
172
+ if (typeof imageUrl !== 'string') return null;
173
+ const trimmed = imageUrl.trim();
174
+ const match = trimmed.match(/^data:([^;]+);base64,(.+)$/i);
175
+ if (!match) return null;
176
+ return {
177
+ mediaType: match[1],
178
+ data: match[2]
179
+ };
180
+ }
181
+
182
+ function openAiUserContentToAnthropic(content) {
183
+ if (typeof content === 'string') return content;
184
+ if (!Array.isArray(content)) return extractText(content);
185
+
186
+ const blocks = [];
187
+ for (const chunk of content) {
188
+ if (!chunk) continue;
189
+
190
+ if (typeof chunk === 'string') {
191
+ const text = chunk.trim();
192
+ if (text) blocks.push({ type: 'text', text });
193
+ continue;
194
+ }
195
+
196
+ if (chunk.type === 'text' && typeof chunk.text === 'string') {
197
+ const text = chunk.text.trim();
198
+ if (text) blocks.push({ type: 'text', text });
199
+ continue;
200
+ }
201
+
202
+ if (typeof chunk.text === 'string' && chunk.type !== 'image_url') {
203
+ const text = chunk.text.trim();
204
+ if (text) blocks.push({ type: 'text', text });
205
+ continue;
206
+ }
207
+
208
+ if (chunk.type === 'image_url') {
209
+ const imageUrl = typeof chunk.image_url === 'string'
210
+ ? chunk.image_url
211
+ : chunk.image_url?.url;
212
+ if (!imageUrl) continue;
213
+
214
+ const parsed = parseDataUrlImage(imageUrl);
215
+ if (parsed) {
216
+ blocks.push({
217
+ type: 'image',
218
+ source: {
219
+ type: 'base64',
220
+ media_type: parsed.mediaType,
221
+ data: parsed.data
222
+ }
223
+ });
224
+ continue;
225
+ }
226
+
227
+ if (/^https?:\/\//i.test(imageUrl)) {
228
+ blocks.push({
229
+ type: 'image',
230
+ source: {
231
+ type: 'url',
232
+ url: imageUrl
233
+ }
234
+ });
235
+ }
236
+ }
237
+ }
238
+
239
+ if (blocks.length === 0) {
240
+ const text = extractText(content);
241
+ return text || '';
242
+ }
243
+ return blocks;
244
+ }
245
+
246
+ function openAiMessagesToAnthropic(messages) {
247
+ const systemParts = [];
248
+ const out = [];
249
+ for (const message of messages || []) {
250
+ if (!message || !message.role) continue;
251
+
252
+ if (message.role === 'system') {
253
+ const sys = extractText(message.content);
254
+ if (sys) systemParts.push(sys);
255
+ continue;
256
+ }
257
+
258
+ if (message.role === 'user') {
259
+ out.push({
260
+ role: 'user',
261
+ content: openAiUserContentToAnthropic(message.content)
262
+ });
263
+ continue;
264
+ }
265
+
266
+ if (message.role === 'assistant') {
267
+ const content = [];
268
+ const text = extractText(message.content);
269
+ if (text) content.push({ type: 'text', text });
270
+
271
+ for (const call of message.tool_calls || []) {
272
+ content.push({
273
+ type: 'tool_use',
274
+ id: call.id || `tool_${Date.now()}`,
275
+ name: call.function?.name || 'unknown_tool',
276
+ input: parseJsonMaybe(call.function?.arguments, {})
277
+ });
278
+ }
279
+
280
+ if (content.length > 0) out.push({ role: 'assistant', content });
281
+ continue;
282
+ }
283
+
284
+ if (message.role === 'tool') {
285
+ out.push({
286
+ role: 'user',
287
+ content: [
288
+ {
289
+ type: 'tool_result',
290
+ tool_use_id: message.tool_call_id || `tool_${Date.now()}`,
291
+ content: typeof message.content === 'string'
292
+ ? message.content
293
+ : JSON.stringify(message.content || {})
294
+ }
295
+ ]
296
+ });
297
+ }
298
+ }
299
+
300
+ return {
301
+ system: systemParts.join('\n\n').trim(),
302
+ messages: out
303
+ };
304
+ }
305
+
306
+ function anthropicToOpenAi(data) {
307
+ let text = '';
308
+ const toolCalls = [];
309
+ for (const block of data.content || []) {
310
+ if (block.type === 'text') {
311
+ text += block.text || '';
312
+ continue;
313
+ }
314
+ if (block.type === 'tool_use') {
315
+ toolCalls.push({
316
+ type: 'function',
317
+ id: block.id || `tool_${toolCalls.length + 1}`,
318
+ function: {
319
+ name: block.name || 'unknown_tool',
320
+ arguments: JSON.stringify(block.input || {})
321
+ }
322
+ });
323
+ }
324
+ }
325
+
326
+ const message = {
327
+ role: 'assistant',
328
+ content: text || null
329
+ };
330
+ if (toolCalls.length > 0) message.tool_calls = toolCalls;
331
+
332
+ const usage = data.usage || {};
333
+ const cacheReadTokens = Number(usage.cache_read_input_tokens || 0);
334
+ const cacheCreationTokens = Number(
335
+ usage.cache_creation_input_tokens
336
+ || usage.cache_creation?.ephemeral_5m_input_tokens
337
+ || usage.cache_creation?.ephemeral_1h_input_tokens
338
+ || 0
339
+ );
340
+ const promptTokens = Number(usage.input_tokens || 0) + cacheReadTokens + cacheCreationTokens;
341
+ const completionTokens = Number(usage.output_tokens || 0);
342
+
343
+ return {
344
+ id: data.id,
345
+ object: 'chat.completion',
346
+ choices: [
347
+ {
348
+ index: 0,
349
+ finish_reason: toolCalls.length > 0 ? 'tool_calls' : (data.stop_reason || 'stop'),
350
+ message
351
+ }
352
+ ],
353
+ usage: {
354
+ prompt_tokens: promptTokens,
355
+ completion_tokens: completionTokens,
356
+ total_tokens: promptTokens + completionTokens,
357
+ cache_read_input_tokens: cacheReadTokens,
358
+ cache_creation_input_tokens: cacheCreationTokens
359
+ }
360
+ };
361
+ }
362
+
363
+ function normalizeAnthropicCacheControl(value) {
364
+ if (value === undefined) return undefined;
365
+ if (value === null || value === false) return null;
366
+ if (value === true) return { type: 'ephemeral' };
367
+
368
+ if (typeof value === 'string') {
369
+ const raw = value.trim();
370
+ const lower = raw.toLowerCase();
371
+ if (!raw || lower === 'off' || lower === 'none' || lower === 'false' || lower === '0') {
372
+ return null;
373
+ }
374
+ if (lower === 'ephemeral' || lower === 'on' || lower === 'true' || lower === '5m') {
375
+ return { type: 'ephemeral' };
376
+ }
377
+ if (lower === '1h') {
378
+ return { type: 'ephemeral', ttl: '1h' };
379
+ }
380
+ return { type: 'ephemeral', ttl: raw };
381
+ }
382
+
383
+ if (typeof value === 'object') {
384
+ const out = { ...value };
385
+ if (!out.type) out.type = 'ephemeral';
386
+ return out;
387
+ }
388
+
389
+ return null;
390
+ }
391
+
392
+ class AnthropicClient {
393
+ constructor(options = {}) {
394
+ this.baseUrl = (options.baseUrl || 'https://api.anthropic.com').replace(/\/+$/, '');
395
+ this.apiKey = options.apiKey;
396
+ this.version = options.version || '2023-06-01';
397
+
398
+ // Automatic prompt caching (Anthropic supports top-level cache_control).
399
+ // Defaults to ephemeral 5m unless explicitly disabled.
400
+ const envCacheControl = process.env.ANTHROPIC_CACHE_CONTROL;
401
+ const optionCacheControl = options.defaultCacheControl;
402
+ const normalized = normalizeAnthropicCacheControl(
403
+ optionCacheControl !== undefined
404
+ ? optionCacheControl
405
+ : (envCacheControl !== undefined ? envCacheControl : true)
406
+ );
407
+ const envTtl = process.env.ANTHROPIC_CACHE_TTL;
408
+ if (normalized && envTtl && !normalized.ttl) {
409
+ normalized.ttl = envTtl;
410
+ }
411
+ this.defaultCacheControl = normalized;
412
+ }
413
+
414
+ _headers() {
415
+ return {
416
+ 'Content-Type': 'application/json',
417
+ 'x-api-key': this.apiKey,
418
+ 'anthropic-version': this.version
419
+ };
420
+ }
421
+
422
+ async chat(messages, options = {}) {
423
+ const translated = openAiMessagesToAnthropic(messages);
424
+ const body = {
425
+ model: options.model,
426
+ messages: translated.messages,
427
+ max_tokens: options.maxTokens ?? 8192
428
+ };
429
+ if (translated.system) body.system = translated.system;
430
+ const hasTemperature = options.temperature !== undefined && options.temperature !== null;
431
+ const hasTopP = options.topP !== undefined && options.topP !== null;
432
+ // Anthropic Sonnet/Opus 4.6 rejects requests that specify both sampling params.
433
+ // Prefer temperature for compatibility with existing Banana model defaults.
434
+ if (hasTemperature) {
435
+ body.temperature = options.temperature;
436
+ } else if (hasTopP) {
437
+ body.top_p = options.topP;
438
+ }
439
+ if (options.tools) body.tools = openAiToolsToAnthropic(options.tools);
440
+
441
+ const requestedCacheControl = normalizeAnthropicCacheControl(
442
+ options.cacheControl !== undefined
443
+ ? options.cacheControl
444
+ : this.defaultCacheControl
445
+ );
446
+ if (requestedCacheControl) {
447
+ body.cache_control = { ...requestedCacheControl };
448
+ }
449
+ const sendRequest = async (payload) => fetch(`${this.baseUrl}/v1/messages`, {
450
+ method: 'POST',
451
+ headers: this._headers(),
452
+ body: JSON.stringify(payload),
453
+ signal: options.signal
454
+ });
455
+
456
+ let response = await sendRequest(body);
457
+ if (!response.ok && body.cache_control) {
458
+ const text = await response.text().catch(() => '');
459
+ const cacheControlRejected = response.status === 400
460
+ && /cache_control|ttl|ephemeral|unknown field|extra inputs/i.test(text);
461
+ if (cacheControlRejected) {
462
+ const retryBody = { ...body };
463
+ delete retryBody.cache_control;
464
+ response = await sendRequest(retryBody);
465
+ } else {
466
+ throw new Error(`Anthropic error (${response.status}): ${text || response.statusText}`);
467
+ }
468
+ }
469
+ if (!response.ok) {
470
+ const text = await response.text().catch(() => '');
471
+ throw new Error(`Anthropic error (${response.status}): ${text || response.statusText}`);
472
+ }
473
+
474
+ const data = await response.json();
475
+ return anthropicToOpenAi(data);
476
+ }
477
+
478
+ async chatStream(messages, options = {}) {
479
+ const data = await this.chat(messages, options);
480
+ const text = data.choices?.[0]?.message?.content || '';
481
+ return buildSseResponseFromText(text);
482
+ }
483
+
484
+ async listModels() {
485
+ try {
486
+ const response = await fetch(`${this.baseUrl}/v1/models`, {
487
+ headers: this._headers()
488
+ });
489
+ if (!response.ok) return [];
490
+ const data = await response.json();
491
+ return data.data || [];
492
+ } catch {
493
+ return [];
494
+ }
495
+ }
496
+
497
+ async isConnected(options = {}) {
498
+ const throwOnError = options.throwOnError === true;
499
+ try {
500
+ const response = await fetch(`${this.baseUrl}/v1/models`, {
501
+ headers: this._headers()
502
+ });
503
+ if (response.ok) return true;
504
+ if (!throwOnError) return false;
505
+ const text = await response.text().catch(() => '');
506
+ const reason = (text || response.statusText || 'Request failed').slice(0, 300);
507
+ throw new Error(`Anthropic connection check failed (${response.status}): ${reason}`);
508
+ } catch (err) {
509
+ if (throwOnError) throw err;
510
+ return false;
511
+ }
512
+ }
513
+ }
514
+
515
+ function openAiMessagesToResponses(messages) {
516
+ const instructions = [];
517
+ const input = [];
518
+
519
+ const toResponsesMessageContent = (role, content) => {
520
+ const textType = role === 'assistant' ? 'output_text' : 'input_text';
521
+ const parts = [];
522
+
523
+ const pushText = (text) => {
524
+ if (typeof text !== 'string') return;
525
+ const trimmed = text.trim();
526
+ if (!trimmed) return;
527
+ parts.push({ type: textType, text: trimmed });
528
+ };
529
+
530
+ if (typeof content === 'string') {
531
+ pushText(content);
532
+ return parts;
533
+ }
534
+
535
+ if (!Array.isArray(content)) return parts;
536
+
537
+ for (const chunk of content) {
538
+ if (!chunk) continue;
539
+
540
+ if (typeof chunk === 'string') {
541
+ pushText(chunk);
542
+ continue;
543
+ }
544
+
545
+ if (typeof chunk.text === 'string') {
546
+ pushText(chunk.text);
547
+ }
548
+
549
+ if (role !== 'assistant') {
550
+ if (chunk.type === 'image_url') {
551
+ const imageUrl = typeof chunk.image_url === 'string'
552
+ ? chunk.image_url
553
+ : chunk.image_url?.url;
554
+ if (typeof imageUrl === 'string' && imageUrl.trim()) {
555
+ parts.push({ type: 'input_image', image_url: imageUrl });
556
+ }
557
+ } else if (chunk.type === 'input_image') {
558
+ const imageUrl = typeof chunk.image_url === 'string'
559
+ ? chunk.image_url
560
+ : chunk.image_url?.url;
561
+ if (typeof imageUrl === 'string' && imageUrl.trim()) {
562
+ parts.push({ type: 'input_image', image_url: imageUrl });
563
+ }
564
+ }
565
+ }
566
+ }
567
+
568
+ return parts;
569
+ };
570
+
571
+ for (const message of messages || []) {
572
+ if (!message || !message.role) continue;
573
+ if (message.role === 'system') {
574
+ const sys = extractText(message.content);
575
+ if (sys) instructions.push(sys);
576
+ continue;
577
+ }
578
+
579
+ if (message.role === 'user' || message.role === 'assistant') {
580
+ const parts = toResponsesMessageContent(message.role, message.content);
581
+ if (parts.length > 0) {
582
+ input.push({
583
+ type: 'message',
584
+ role: message.role,
585
+ content: parts
586
+ });
587
+ }
588
+ if (message.role === 'assistant') {
589
+ for (const toolCall of message.tool_calls || []) {
590
+ input.push({
591
+ type: 'function_call',
592
+ call_id: toolCall.id || `call_${Date.now()}`,
593
+ name: toolCall.function?.name || 'unknown_tool',
594
+ arguments: typeof toolCall.function?.arguments === 'string'
595
+ ? toolCall.function.arguments
596
+ : JSON.stringify(toolCall.function?.arguments || {})
597
+ });
598
+ }
599
+ }
600
+ continue;
601
+ }
602
+
603
+ if (message.role === 'tool') {
604
+ input.push({
605
+ type: 'function_call_output',
606
+ call_id: message.tool_call_id || `call_${Date.now()}`,
607
+ output: typeof message.content === 'string'
608
+ ? message.content
609
+ : JSON.stringify(message.content || {})
610
+ });
611
+ }
612
+ }
613
+
614
+ return {
615
+ instructions: instructions.join('\n\n').trim(),
616
+ input
617
+ };
618
+ }
619
+
620
+ function openAiToolsToResponses(tools = []) {
621
+ return tools
622
+ .map((tool) => {
623
+ const fn = tool?.function;
624
+ if (!fn?.name) return null;
625
+ return {
626
+ type: 'function',
627
+ name: fn.name,
628
+ description: fn.description || '',
629
+ parameters: fn.parameters || { type: 'object', properties: {} }
630
+ };
631
+ })
632
+ .filter(Boolean);
633
+ }
634
+
635
+ function responsesToOpenAi(data) {
636
+ let text = '';
637
+ const toolCalls = [];
638
+ for (const item of data.output || []) {
639
+ if (item.type === 'message' && item.role === 'assistant') {
640
+ for (const block of item.content || []) {
641
+ if (block.type === 'output_text' || block.type === 'text') {
642
+ text += block.text || '';
643
+ }
644
+ }
645
+ continue;
646
+ }
647
+ if (item.type === 'function_call') {
648
+ toolCalls.push({
649
+ type: 'function',
650
+ id: item.call_id || item.id || `call_${toolCalls.length + 1}`,
651
+ function: {
652
+ name: item.name || 'unknown_tool',
653
+ arguments: typeof item.arguments === 'string'
654
+ ? item.arguments
655
+ : JSON.stringify(item.arguments || {})
656
+ }
657
+ });
658
+ }
659
+ }
660
+
661
+ if (!text && typeof data.output_text === 'string') {
662
+ text = data.output_text;
663
+ }
664
+
665
+ const message = {
666
+ role: 'assistant',
667
+ content: text || null
668
+ };
669
+ if (toolCalls.length > 0) message.tool_calls = toolCalls;
670
+
671
+ return {
672
+ id: data.id,
673
+ object: 'chat.completion',
674
+ choices: [
675
+ {
676
+ index: 0,
677
+ finish_reason: toolCalls.length > 0 ? 'tool_calls' : 'stop',
678
+ message
679
+ }
680
+ ],
681
+ usage: {
682
+ prompt_tokens: data.usage?.input_tokens || 0,
683
+ completion_tokens: data.usage?.output_tokens || 0,
684
+ total_tokens: (data.usage?.input_tokens || 0) + (data.usage?.output_tokens || 0)
685
+ }
686
+ };
687
+ }
688
+
689
+ function normalizeOpenAICodexOptions(options = {}) {
690
+ const normalized = { ...options };
691
+ const rawModel = String(normalized.model || '').trim();
692
+ const model = rawModel.replace(/^(openai[/:])+/i, '');
693
+ const modelKey = model.toLowerCase();
694
+ const aliasMap = {
695
+ 'codex-5.3-medium': { model: 'gpt-5.3-codex', reasoningEffort: 'medium' },
696
+ 'codex-5.3-high': { model: 'gpt-5.3-codex', reasoningEffort: 'high' }
697
+ };
698
+ const patch = aliasMap[modelKey];
699
+ if (patch) {
700
+ normalized.model = patch.model;
701
+ if (!normalized.reasoningEffort) normalized.reasoningEffort = patch.reasoningEffort;
702
+ } else if (rawModel && rawModel !== model) {
703
+ normalized.model = model;
704
+ }
705
+ return normalized;
706
+ }
707
+
708
+ function normalizeOpenAICodexClientVersion(value) {
709
+ const fallback = '4.0.0';
710
+ const raw = String(value || '').trim();
711
+ if (!raw) return fallback;
712
+
713
+ // Backend expects a semantic version triplet (e.g. 4.0.0).
714
+ const exactTriplet = raw.match(/^(\d+)\.(\d+)\.(\d+)$/);
715
+ if (exactTriplet) return `${exactTriplet[1]}.${exactTriplet[2]}.${exactTriplet[3]}`;
716
+
717
+ const embeddedTriplet = raw.match(/(\d+)\.(\d+)\.(\d+)/);
718
+ if (embeddedTriplet) return `${embeddedTriplet[1]}.${embeddedTriplet[2]}.${embeddedTriplet[3]}`;
719
+
720
+ return fallback;
721
+ }
722
+
723
+ function extractOutputTextFromResponseItem(item) {
724
+ if (!item || typeof item !== 'object') return '';
725
+ const content = Array.isArray(item.content) ? item.content : [];
726
+ let text = '';
727
+ for (const block of content) {
728
+ if (!block || typeof block !== 'object') continue;
729
+ if ((block.type === 'output_text' || block.type === 'text') && typeof block.text === 'string') {
730
+ text += block.text;
731
+ }
732
+ }
733
+ return text;
734
+ }
735
+
736
+ function buildOpenAICodexStreamError(payload) {
737
+ const error = payload?.response?.error || payload?.error || {};
738
+ const detail = error.message
739
+ || error.detail
740
+ || (typeof payload === 'string' ? payload : JSON.stringify(payload));
741
+ return new Error(`OpenAI Codex stream failed: ${detail}`);
742
+ }
743
+
744
+ async function consumeOpenAICodexSse(response, handlers = {}) {
745
+ if (!response?.body) {
746
+ throw new Error('OpenAI Codex stream returned an empty response body');
747
+ }
748
+
749
+ const onTextDelta = typeof handlers.onTextDelta === 'function' ? handlers.onTextDelta : null;
750
+ const decoder = new TextDecoder();
751
+ const reader = response.body.getReader();
752
+ let buffer = '';
753
+ let eventName = '';
754
+ let dataLines = [];
755
+
756
+ let sawOutputTextDelta = false;
757
+ let text = '';
758
+ let responseId = null;
759
+ let usage = null;
760
+ const toolCallsById = new Map();
761
+ const toolCallOrder = [];
762
+
763
+ const applyPayload = (payload, fallbackEventName = '') => {
764
+ if (!payload || typeof payload !== 'object') return;
765
+ const eventType = typeof payload.type === 'string' ? payload.type : fallbackEventName;
766
+ if (!eventType) return;
767
+
768
+ if (eventType === 'response.output_text.delta') {
769
+ const delta = typeof payload.delta === 'string' ? payload.delta : '';
770
+ if (!delta) return;
771
+ sawOutputTextDelta = true;
772
+ text += delta;
773
+ if (onTextDelta) onTextDelta(delta);
774
+ return;
775
+ }
776
+
777
+ if (eventType === 'response.output_item.added' || eventType === 'response.output_item.done') {
778
+ const item = payload.item;
779
+ if (!item || typeof item !== 'object') return;
780
+
781
+ if (item.type === 'function_call') {
782
+ const callId = String(item.call_id || item.id || `call_${toolCallsById.size + 1}`);
783
+ const existing = toolCallsById.get(callId);
784
+ let args = item.arguments;
785
+ if (args === undefined || args === null) args = '{}';
786
+ if (typeof args !== 'string') args = JSON.stringify(args);
787
+ const toolCall = {
788
+ type: 'function',
789
+ id: callId,
790
+ function: {
791
+ name: item.name || existing?.function?.name || 'unknown_tool',
792
+ arguments: args
793
+ }
794
+ };
795
+ if (!existing) toolCallOrder.push(callId);
796
+ toolCallsById.set(callId, toolCall);
797
+ return;
798
+ }
799
+
800
+ if (item.type === 'message' && !sawOutputTextDelta) {
801
+ const itemText = extractOutputTextFromResponseItem(item);
802
+ if (itemText) {
803
+ text += itemText;
804
+ }
805
+ }
806
+ return;
807
+ }
808
+
809
+ if (eventType === 'response.completed') {
810
+ const responseData = payload.response || {};
811
+ if (responseData.id) responseId = responseData.id;
812
+ if (responseData.usage && typeof responseData.usage === 'object') {
813
+ usage = responseData.usage;
814
+ }
815
+ return;
816
+ }
817
+
818
+ if (eventType === 'response.failed') {
819
+ throw buildOpenAICodexStreamError(payload);
820
+ }
821
+ };
822
+
823
+ const flushEvent = () => {
824
+ if (dataLines.length === 0) {
825
+ eventName = '';
826
+ return;
827
+ }
828
+ const dataText = dataLines.join('\n').trim();
829
+ dataLines = [];
830
+ const fallbackEventName = eventName;
831
+ eventName = '';
832
+ if (!dataText || dataText === '[DONE]') return;
833
+ let payload;
834
+ try {
835
+ payload = JSON.parse(dataText);
836
+ } catch {
837
+ return;
838
+ }
839
+ applyPayload(payload, fallbackEventName);
840
+ };
841
+
842
+ while (true) {
843
+ const { done, value } = await reader.read();
844
+ if (done) break;
845
+ buffer += decoder.decode(value, { stream: true });
846
+
847
+ let lineBreakIdx = buffer.indexOf('\n');
848
+ while (lineBreakIdx >= 0) {
849
+ let line = buffer.slice(0, lineBreakIdx);
850
+ buffer = buffer.slice(lineBreakIdx + 1);
851
+ if (line.endsWith('\r')) line = line.slice(0, -1);
852
+
853
+ if (line === '') {
854
+ flushEvent();
855
+ } else if (line.startsWith('event:')) {
856
+ eventName = line.slice(6).trim();
857
+ } else if (line.startsWith('data:')) {
858
+ dataLines.push(line.slice(5).trimStart());
859
+ }
860
+
861
+ lineBreakIdx = buffer.indexOf('\n');
862
+ }
863
+ }
864
+
865
+ if (buffer.trim().length > 0) {
866
+ if (buffer.startsWith('data:')) {
867
+ dataLines.push(buffer.slice(5).trimStart());
868
+ }
869
+ }
870
+ flushEvent();
871
+
872
+ const toolCalls = toolCallOrder
873
+ .map((id) => toolCallsById.get(id))
874
+ .filter(Boolean);
875
+
876
+ return {
877
+ responseId,
878
+ text,
879
+ usage,
880
+ toolCalls
881
+ };
882
+ }
883
+
884
+ class OpenAICodexClient {
885
+ constructor(options = {}) {
886
+ this.baseUrl = (options.baseUrl || 'https://chatgpt.com/backend-api/codex').replace(/\/+$/, '');
887
+ this.accessToken = options.accessToken;
888
+ this.clientVersion = normalizeOpenAICodexClientVersion(
889
+ options.clientVersion || process.env.OPENAI_CODEX_CLIENT_VERSION || '4.0.0'
890
+ );
891
+ }
892
+
893
+ _headers() {
894
+ return {
895
+ 'Content-Type': 'application/json',
896
+ Authorization: `Bearer ${this.accessToken}`
897
+ };
898
+ }
899
+
900
+ _url(pathname) {
901
+ const base = `${this.baseUrl}${pathname}`;
902
+ const sep = base.includes('?') ? '&' : '?';
903
+ return `${base}${sep}client_version=${encodeURIComponent(this.clientVersion)}`;
904
+ }
905
+
906
+ async chat(messages, options = {}) {
907
+ const resolvedOptions = normalizeOpenAICodexOptions(options);
908
+ const translated = openAiMessagesToResponses(messages);
909
+ const body = {
910
+ model: resolvedOptions.model,
911
+ input: translated.input,
912
+ stream: true,
913
+ store: false,
914
+ parallel_tool_calls: true
915
+ };
916
+ if (translated.instructions) body.instructions = translated.instructions;
917
+ if (resolvedOptions.tools) body.tools = openAiToolsToResponses(resolvedOptions.tools);
918
+ if (resolvedOptions.reasoningEffort) {
919
+ body.reasoning = { effort: resolvedOptions.reasoningEffort };
920
+ }
921
+
922
+ const response = await fetch(this._url('/responses'), {
923
+ method: 'POST',
924
+ headers: { ...this._headers(), Accept: 'text/event-stream' },
925
+ body: JSON.stringify(body),
926
+ signal: resolvedOptions.signal
927
+ });
928
+
929
+ if (!response.ok) {
930
+ const text = await response.text().catch(() => '');
931
+ throw new Error(`OpenAI Codex error (${response.status}): ${text || response.statusText}`);
932
+ }
933
+
934
+ const parsed = await consumeOpenAICodexSse(response);
935
+ const message = {
936
+ role: 'assistant',
937
+ content: parsed.text || null
938
+ };
939
+ if (parsed.toolCalls.length > 0) message.tool_calls = parsed.toolCalls;
940
+
941
+ const promptTokens = parsed.usage?.input_tokens ?? parsed.usage?.prompt_tokens ?? 0;
942
+ const completionTokens = parsed.usage?.output_tokens ?? parsed.usage?.completion_tokens ?? 0;
943
+ const totalTokens = parsed.usage?.total_tokens ?? (promptTokens + completionTokens);
944
+
945
+ return {
946
+ id: parsed.responseId || null,
947
+ object: 'chat.completion',
948
+ choices: [
949
+ {
950
+ index: 0,
951
+ finish_reason: parsed.toolCalls.length > 0 ? 'tool_calls' : 'stop',
952
+ message
953
+ }
954
+ ],
955
+ usage: {
956
+ prompt_tokens: promptTokens,
957
+ completion_tokens: completionTokens,
958
+ total_tokens: totalTokens
959
+ }
960
+ };
961
+ }
962
+
963
+ async chatStream(messages, options = {}) {
964
+ const data = await this.chat(messages, options);
965
+ const text = data.choices?.[0]?.message?.content || '';
966
+ return buildSseResponseFromText(text);
967
+ }
968
+
969
+ async listModels() {
970
+ try {
971
+ const response = await fetch(this._url('/models'), {
972
+ headers: this._headers()
973
+ });
974
+ if (!response.ok) return [];
975
+ const data = await response.json();
976
+ return data.data || [];
977
+ } catch {
978
+ return [];
979
+ }
980
+ }
981
+
982
+ async isConnected(options = {}) {
983
+ const throwOnError = options.throwOnError === true;
984
+ try {
985
+ const response = await fetch(this._url('/models'), {
986
+ headers: this._headers()
987
+ });
988
+ if (response.ok) return true;
989
+ if (response.status === 404) return true;
990
+ if (response.status === 401 || response.status === 403) {
991
+ if (!throwOnError) return false;
992
+ const text = await response.text().catch(() => '');
993
+ const reason = (text || response.statusText || 'Request failed').slice(0, 300);
994
+ throw new Error(`OpenAI Codex connection check failed (${response.status}): ${reason}`);
995
+ }
996
+ if (throwOnError) {
997
+ const text = await response.text().catch(() => '');
998
+ const reason = (text || response.statusText || 'Request failed').slice(0, 300);
999
+ throw new Error(`OpenAI Codex connection check returned ${response.status}: ${reason}`);
1000
+ }
1001
+ return true;
1002
+ } catch (err) {
1003
+ if (throwOnError) throw err;
1004
+ return false;
1005
+ }
1006
+ }
1007
+ }
1008
+
1009
+ module.exports = {
1010
+ OpenAICompatibleClient,
1011
+ AnthropicClient,
1012
+ OpenAICodexClient,
1013
+ extractText
1014
+ };