@jonnyhoo/ccs 1.1.0 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,856 @@
1
+ "use strict";
2
+ /**
3
+ * Anthropic-to-OpenAI Protocol Translation Proxy
4
+ *
5
+ * Translates Claude CLI's Anthropic Messages API requests into OpenAI Chat Completions API format.
6
+ * Used in direct API key mode to bypass CLIProxy entirely.
7
+ *
8
+ * Request flow:
9
+ * Claude CLI (Anthropic format) → this proxy → OpenAI-compatible endpoint
10
+ *
11
+ * Response flow:
12
+ * OpenAI-compatible endpoint → this proxy (translates SSE) → Claude CLI (Anthropic format)
13
+ *
14
+ * Handles:
15
+ * - Path rewriting: /v1/messages → /v1/chat/completions
16
+ * - Auth translation: x-api-key → Authorization: Bearer
17
+ * - Request body: Anthropic messages/tools/system → OpenAI format
18
+ * - Streaming response: OpenAI SSE chunks → Anthropic SSE events
19
+ * - Non-streaming response: OpenAI completion → Anthropic message
20
+ * - Tool calls and tool results
21
+ */
22
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
23
+ if (k2 === undefined) k2 = k;
24
+ var desc = Object.getOwnPropertyDescriptor(m, k);
25
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
26
+ desc = { enumerable: true, get: function() { return m[k]; } };
27
+ }
28
+ Object.defineProperty(o, k2, desc);
29
+ }) : (function(o, m, k, k2) {
30
+ if (k2 === undefined) k2 = k;
31
+ o[k2] = m[k];
32
+ }));
33
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
34
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
35
+ }) : function(o, v) {
36
+ o["default"] = v;
37
+ });
38
+ var __importStar = (this && this.__importStar) || function (mod) {
39
+ if (mod && mod.__esModule) return mod;
40
+ var result = {};
41
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
42
+ __setModuleDefault(result, mod);
43
+ return result;
44
+ };
45
+ Object.defineProperty(exports, "__esModule", { value: true });
46
+ exports.AnthropicToOpenAIProxy = void 0;
47
+ const http = __importStar(require("http"));
48
+ const https = __importStar(require("https"));
49
+ const url_1 = require("url");
50
+ // ─── Request Translation ─────────────────────────────────────────────────────
51
+ function translateSystemPrompt(system) {
52
+ if (!system)
53
+ return '';
54
+ if (typeof system === 'string')
55
+ return system;
56
+ // Array of content blocks - extract text
57
+ return system
58
+ .filter((b) => b.type === 'text' && b.text)
59
+ .map((b) => b.text)
60
+ .join('\n');
61
+ }
62
+ function translateMessages(messages) {
63
+ const result = [];
64
+ for (const msg of messages) {
65
+ if (typeof msg.content === 'string') {
66
+ result.push({ role: msg.role, content: msg.content });
67
+ continue;
68
+ }
69
+ if (!Array.isArray(msg.content)) {
70
+ result.push({ role: msg.role, content: String(msg.content ?? '') });
71
+ continue;
72
+ }
73
+ // Process content blocks
74
+ if (msg.role === 'assistant') {
75
+ // Assistant message may contain text + tool_use blocks
76
+ const textParts = [];
77
+ const toolCalls = [];
78
+ for (const block of msg.content) {
79
+ if (block.type === 'text' && block.text) {
80
+ textParts.push(block.text);
81
+ }
82
+ else if (block.type === 'tool_use') {
83
+ toolCalls.push({
84
+ id: block.id || `call_${Math.random().toString(36).slice(2, 10)}`,
85
+ type: 'function',
86
+ function: {
87
+ name: block.name || '',
88
+ arguments: typeof block.input === 'string' ? block.input : JSON.stringify(block.input ?? {}),
89
+ },
90
+ });
91
+ }
92
+ }
93
+ const openaiMsg = {
94
+ role: 'assistant',
95
+ content: textParts.length > 0 ? textParts.join('') : null,
96
+ };
97
+ if (toolCalls.length > 0) {
98
+ openaiMsg.tool_calls = toolCalls;
99
+ }
100
+ result.push(openaiMsg);
101
+ }
102
+ else if (msg.role === 'user') {
103
+ // User message may contain text, images, or tool_result blocks
104
+ // Split tool_results into separate OpenAI tool messages
105
+ const textParts = [];
106
+ for (const block of msg.content) {
107
+ if (block.type === 'text' && block.text) {
108
+ textParts.push(block.text);
109
+ }
110
+ else if (block.type === 'tool_result') {
111
+ // Tool results become separate messages with role: "tool"
112
+ let toolContent = '';
113
+ if (typeof block.content === 'string') {
114
+ toolContent = block.content;
115
+ }
116
+ else if (Array.isArray(block.content)) {
117
+ toolContent = block.content
118
+ .filter((b) => b.type === 'text' && b.text)
119
+ .map((b) => b.text)
120
+ .join('\n');
121
+ }
122
+ result.push({
123
+ role: 'tool',
124
+ content: toolContent,
125
+ tool_call_id: block.tool_use_id || '',
126
+ });
127
+ }
128
+ // Skip image/document blocks for now (not supported by most Chat Completions endpoints)
129
+ }
130
+ if (textParts.length > 0) {
131
+ result.push({ role: 'user', content: textParts.join('') });
132
+ }
133
+ }
134
+ else {
135
+ // Other roles - just concatenate text
136
+ const text = msg.content
137
+ .filter((b) => b.type === 'text' && b.text)
138
+ .map((b) => b.text)
139
+ .join('');
140
+ result.push({ role: msg.role, content: text || '' });
141
+ }
142
+ }
143
+ return result;
144
+ }
145
+ function translateTools(tools) {
146
+ return tools.map((tool) => ({
147
+ type: 'function',
148
+ function: {
149
+ name: tool.name,
150
+ description: tool.description,
151
+ parameters: tool.input_schema,
152
+ },
153
+ }));
154
+ }
155
+ function translateToolChoice(choice) {
156
+ switch (choice.type) {
157
+ case 'auto':
158
+ return 'auto';
159
+ case 'any':
160
+ return 'required';
161
+ case 'tool':
162
+ return { type: 'function', function: { name: choice.name || '' } };
163
+ case 'none':
164
+ return 'none';
165
+ default:
166
+ return 'auto';
167
+ }
168
+ }
169
+ function translateRequestChat(anthropicReq) {
170
+ const messages = [];
171
+ // Add system message if present
172
+ const systemText = translateSystemPrompt(anthropicReq.system);
173
+ if (systemText) {
174
+ messages.push({ role: 'system', content: systemText });
175
+ }
176
+ // Translate conversation messages
177
+ if (anthropicReq.messages) {
178
+ messages.push(...translateMessages(anthropicReq.messages));
179
+ }
180
+ const openaiReq = {
181
+ model: anthropicReq.model,
182
+ messages,
183
+ stream: anthropicReq.stream,
184
+ };
185
+ if (anthropicReq.max_tokens !== undefined) {
186
+ openaiReq.max_tokens = anthropicReq.max_tokens;
187
+ }
188
+ if (anthropicReq.temperature !== undefined) {
189
+ openaiReq.temperature = anthropicReq.temperature;
190
+ }
191
+ if (anthropicReq.top_p !== undefined) {
192
+ openaiReq.top_p = anthropicReq.top_p;
193
+ }
194
+ if (anthropicReq.stop_sequences) {
195
+ openaiReq.stop = anthropicReq.stop_sequences;
196
+ }
197
+ if (anthropicReq.tools && anthropicReq.tools.length > 0) {
198
+ openaiReq.tools = translateTools(anthropicReq.tools);
199
+ }
200
+ if (anthropicReq.tool_choice) {
201
+ openaiReq.tool_choice = translateToolChoice(anthropicReq.tool_choice);
202
+ }
203
+ // Pass through reasoning effort (already in OpenAI format from CodexReasoningProxy)
204
+ if (anthropicReq.reasoning) {
205
+ openaiReq.reasoning = anthropicReq.reasoning;
206
+ }
207
+ // Request usage info in stream for token counting
208
+ if (openaiReq.stream) {
209
+ openaiReq.stream_options = { include_usage: true };
210
+ }
211
+ return openaiReq;
212
+ }
213
+ function toResponsesMessages(messages) {
214
+ const out = [];
215
+ for (const m of messages) {
216
+ if (m.role === 'tool') {
217
+ // Responses API accepts tool results as normal user text; we prefix lightly
218
+ out.push({ role: 'tool', content: [{ type: 'text', text: m.content ?? '' }] });
219
+ continue;
220
+ }
221
+ out.push({ role: m.role, content: [{ type: 'text', text: m.content ?? '' }] });
222
+ }
223
+ return out;
224
+ }
225
+ function translateChatToResponses(chat) {
226
+ // chat is already an OpenAI Chat Completions payload; convert messages to Responses format
227
+ const req = {
228
+ model: chat.model,
229
+ input: toResponsesMessages(chat.messages),
230
+ stream: chat.stream,
231
+ temperature: chat.temperature,
232
+ top_p: chat.top_p,
233
+ stop: chat.stop,
234
+ reasoning: chat.reasoning,
235
+ };
236
+ if (chat.tools)
237
+ req.tools = chat.tools;
238
+ if (chat.tool_choice)
239
+ req.tool_choice = chat.tool_choice;
240
+ return req;
241
+ }
242
+ // ─── Streaming Response Translation ──────────────────────────────────────────
243
+ /** State tracker for translating OpenAI streaming chunks into Anthropic SSE events */
244
+ class StreamingResponseTranslator {
245
+ constructor(model) {
246
+ this.contentBlockIndex = 0;
247
+ this.inTextBlock = false;
248
+ this.inToolCallBlocks = new Map();
249
+ this.inputTokens = 0;
250
+ this.outputTokens = 0;
251
+ this.headersSent = false;
252
+ this.messageId = `msg_${Date.now().toString(36)}${Math.random().toString(36).slice(2, 8)}`;
253
+ this.model = model;
254
+ }
255
+ sse(event, data) {
256
+ return `event: ${event}\ndata: ${JSON.stringify(data)}\n\n`;
257
+ }
258
+ /** Produce the initial message_start event */
259
+ emitMessageStart() {
260
+ return this.sse('message_start', {
261
+ type: 'message_start',
262
+ message: {
263
+ id: this.messageId,
264
+ type: 'message',
265
+ role: 'assistant',
266
+ content: [],
267
+ model: this.model,
268
+ stop_reason: null,
269
+ stop_sequence: null,
270
+ usage: { input_tokens: this.inputTokens, output_tokens: 0 },
271
+ },
272
+ });
273
+ }
274
+ /** Translate a single OpenAI chunk to zero or more Anthropic SSE events */
275
+ translateChunk(chunk) {
276
+ let events = '';
277
+ // Track usage
278
+ if (chunk.usage) {
279
+ if (chunk.usage.prompt_tokens)
280
+ this.inputTokens = chunk.usage.prompt_tokens;
281
+ if (chunk.usage.completion_tokens)
282
+ this.outputTokens = chunk.usage.completion_tokens;
283
+ }
284
+ if (!chunk.choices || chunk.choices.length === 0)
285
+ return events;
286
+ const choice = chunk.choices[0];
287
+ if (!choice)
288
+ return events;
289
+ const delta = choice.delta;
290
+ // Emit message_start on first content
291
+ if (!this.headersSent && delta) {
292
+ this.headersSent = true;
293
+ events += this.emitMessageStart();
294
+ }
295
+ // Handle text content
296
+ if (delta?.content !== undefined && delta.content !== null) {
297
+ if (!this.inTextBlock) {
298
+ // Start a new text content block
299
+ events += this.sse('content_block_start', {
300
+ type: 'content_block_start',
301
+ index: this.contentBlockIndex,
302
+ content_block: { type: 'text', text: '' },
303
+ });
304
+ this.inTextBlock = true;
305
+ }
306
+ if (delta.content) {
307
+ events += this.sse('content_block_delta', {
308
+ type: 'content_block_delta',
309
+ index: this.contentBlockIndex,
310
+ delta: { type: 'text_delta', text: delta.content },
311
+ });
312
+ }
313
+ }
314
+ // Handle tool calls
315
+ if (delta?.tool_calls) {
316
+ // Close text block if open
317
+ if (this.inTextBlock) {
318
+ events += this.sse('content_block_stop', {
319
+ type: 'content_block_stop',
320
+ index: this.contentBlockIndex,
321
+ });
322
+ this.contentBlockIndex++;
323
+ this.inTextBlock = false;
324
+ }
325
+ for (const tc of delta.tool_calls) {
326
+ const tcIndex = tc.index ?? 0;
327
+ let tracked = this.inToolCallBlocks.get(tcIndex);
328
+ // New tool call
329
+ if (tc.id && tc.function?.name) {
330
+ tracked = { id: tc.id, name: tc.function.name, started: false };
331
+ this.inToolCallBlocks.set(tcIndex, tracked);
332
+ }
333
+ if (!tracked)
334
+ continue;
335
+ // Emit content_block_start for new tool call
336
+ if (!tracked.started) {
337
+ events += this.sse('content_block_start', {
338
+ type: 'content_block_start',
339
+ index: this.contentBlockIndex,
340
+ content_block: { type: 'tool_use', id: tracked.id, name: tracked.name, input: {} },
341
+ });
342
+ tracked.started = true;
343
+ }
344
+ // Emit argument deltas
345
+ if (tc.function?.arguments) {
346
+ events += this.sse('content_block_delta', {
347
+ type: 'content_block_delta',
348
+ index: this.contentBlockIndex,
349
+ delta: { type: 'input_json_delta', partial_json: tc.function.arguments },
350
+ });
351
+ }
352
+ }
353
+ }
354
+ // Handle finish
355
+ if (choice.finish_reason) {
356
+ // Close any open blocks
357
+ if (this.inTextBlock) {
358
+ events += this.sse('content_block_stop', {
359
+ type: 'content_block_stop',
360
+ index: this.contentBlockIndex,
361
+ });
362
+ this.contentBlockIndex++;
363
+ this.inTextBlock = false;
364
+ }
365
+ // Close open tool call blocks
366
+ for (const [, tracked] of this.inToolCallBlocks) {
367
+ if (tracked.started) {
368
+ events += this.sse('content_block_stop', {
369
+ type: 'content_block_stop',
370
+ index: this.contentBlockIndex,
371
+ });
372
+ this.contentBlockIndex++;
373
+ }
374
+ }
375
+ this.inToolCallBlocks.clear();
376
+ // Map finish reason
377
+ const stopReason = choice.finish_reason === 'tool_calls' ? 'tool_use' : 'end_turn';
378
+ events += this.sse('message_delta', {
379
+ type: 'message_delta',
380
+ delta: { stop_reason: stopReason, stop_sequence: null },
381
+ usage: { output_tokens: this.outputTokens },
382
+ });
383
+ events += this.sse('message_stop', { type: 'message_stop' });
384
+ }
385
+ return events;
386
+ }
387
+ /** Handle case where no chunks were received or stream ended without finish_reason */
388
+ emitFinalIfNeeded() {
389
+ let events = '';
390
+ if (!this.headersSent) {
391
+ events += this.emitMessageStart();
392
+ }
393
+ if (this.inTextBlock) {
394
+ events += this.sse('content_block_stop', {
395
+ type: 'content_block_stop',
396
+ index: this.contentBlockIndex,
397
+ });
398
+ }
399
+ for (const [, tracked] of this.inToolCallBlocks) {
400
+ if (tracked.started) {
401
+ events += this.sse('content_block_stop', {
402
+ type: 'content_block_stop',
403
+ index: this.contentBlockIndex,
404
+ });
405
+ this.contentBlockIndex++;
406
+ }
407
+ }
408
+ events += this.sse('message_delta', {
409
+ type: 'message_delta',
410
+ delta: { stop_reason: 'end_turn', stop_sequence: null },
411
+ usage: { output_tokens: this.outputTokens },
412
+ });
413
+ events += this.sse('message_stop', { type: 'message_stop' });
414
+ return events;
415
+ }
416
+ }
417
+ function translateNonStreamingResponse(openaiResp, model) {
418
+ const choice = openaiResp.choices?.[0];
419
+ const message = choice?.message;
420
+ const content = [];
421
+ // Text content
422
+ if (message?.content) {
423
+ content.push({ type: 'text', text: message.content });
424
+ }
425
+ // Tool calls
426
+ if (message?.tool_calls) {
427
+ for (const tc of message.tool_calls) {
428
+ let input = {};
429
+ try {
430
+ input = JSON.parse(tc.function.arguments);
431
+ }
432
+ catch {
433
+ input = tc.function.arguments;
434
+ }
435
+ content.push({
436
+ type: 'tool_use',
437
+ id: tc.id,
438
+ name: tc.function.name,
439
+ input,
440
+ });
441
+ }
442
+ }
443
+ const stopReason = choice?.finish_reason === 'tool_calls' ? 'tool_use' : 'end_turn';
444
+ return {
445
+ id: `msg_${Date.now().toString(36)}`,
446
+ type: 'message',
447
+ role: 'assistant',
448
+ content,
449
+ model,
450
+ stop_reason: stopReason,
451
+ stop_sequence: null,
452
+ usage: {
453
+ input_tokens: openaiResp.usage?.prompt_tokens ?? 0,
454
+ output_tokens: openaiResp.usage?.completion_tokens ?? 0,
455
+ },
456
+ };
457
+ }
458
+ // ─── Proxy Server ────────────────────────────────────────────────────────────
459
+ class AnthropicToOpenAIProxy {
460
+ constructor(config) {
461
+ this.server = null;
462
+ this.port = null;
463
+ this.config = {
464
+ targetBaseUrl: config.targetBaseUrl.replace(/\/+$/, ''),
465
+ apiKey: config.apiKey,
466
+ verbose: config.verbose ?? false,
467
+ timeoutMs: config.timeoutMs ?? 120000,
468
+ };
469
+ }
470
+ log(msg) {
471
+ if (this.config.verbose) {
472
+ console.error(`[anthropic-to-openai] ${msg}`);
473
+ }
474
+ }
475
+ async start() {
476
+ if (this.server)
477
+ return this.port ?? 0;
478
+ return new Promise((resolve, reject) => {
479
+ this.server = http.createServer((req, res) => {
480
+ void this.handleRequest(req, res);
481
+ });
482
+ this.server.listen(0, '127.0.0.1', () => {
483
+ const addr = this.server?.address();
484
+ this.port = typeof addr === 'object' && addr ? addr.port : 0;
485
+ resolve(this.port);
486
+ });
487
+ this.server.on('error', reject);
488
+ });
489
+ }
490
+ stop() {
491
+ if (!this.server)
492
+ return;
493
+ this.server.close();
494
+ this.server = null;
495
+ this.port = null;
496
+ }
497
+ readBody(req) {
498
+ return new Promise((resolve, reject) => {
499
+ const chunks = [];
500
+ const maxSize = 10 * 1024 * 1024;
501
+ let total = 0;
502
+ req.on('data', (chunk) => {
503
+ total += chunk.length;
504
+ if (total > maxSize) {
505
+ req.destroy();
506
+ reject(new Error('Request body too large'));
507
+ return;
508
+ }
509
+ chunks.push(chunk);
510
+ });
511
+ req.on('end', () => resolve(Buffer.concat(chunks).toString('utf8')));
512
+ req.on('error', reject);
513
+ });
514
+ }
515
+ async handleRequest(req, res) {
516
+ const method = req.method || 'GET';
517
+ let requestPath = req.url || '/';
518
+ // Strip /api/provider/{provider} prefix if present
519
+ // Claude CLI sends to /api/provider/codex/v1/messages, we need /v1/messages
520
+ const providerPrefixMatch = requestPath.match(/^\/api\/provider\/[^/]+(.*)$/);
521
+ if (providerPrefixMatch) {
522
+ requestPath = providerPrefixMatch[1] || '/';
523
+ }
524
+ this.log(`${method} ${req.url} → path=${requestPath}`);
525
+ // Only handle POST /v1/messages (the Anthropic Messages API endpoint)
526
+ if (method !== 'POST' || !requestPath.startsWith('/v1/messages')) {
527
+ // For other paths, return a simple response
528
+ if (method === 'GET' && requestPath === '/') {
529
+ res.writeHead(200, { 'Content-Type': 'application/json' });
530
+ res.end(JSON.stringify({ status: 'ok', proxy: 'anthropic-to-openai' }));
531
+ return;
532
+ }
533
+ res.writeHead(404, { 'Content-Type': 'application/json' });
534
+ res.end(JSON.stringify({ error: `Unsupported: ${method} ${requestPath}` }));
535
+ return;
536
+ }
537
+ try {
538
+ const rawBody = await this.readBody(req);
539
+ let anthropicReq;
540
+ try {
541
+ anthropicReq = rawBody.length ? JSON.parse(rawBody) : {};
542
+ }
543
+ catch {
544
+ res.writeHead(400, { 'Content-Type': 'application/json' });
545
+ res.end(JSON.stringify({ error: 'Invalid JSON' }));
546
+ return;
547
+ }
548
+ // Translate request
549
+ const openaiReq = translateRequestChat(anthropicReq);
550
+ const model = anthropicReq.model || 'unknown';
551
+ this.log(`Translated request: model=${model}, stream=${openaiReq.stream}, messages=${openaiReq.messages.length}`);
552
+ // Build target URL
553
+ const targetUrl = new url_1.URL(`${this.config.targetBaseUrl}/v1/chat/completions`);
554
+ if (openaiReq.stream) {
555
+ await this.handleStreaming(req, res, targetUrl, openaiReq, model);
556
+ }
557
+ else {
558
+ await this.handleNonStreaming(req, res, targetUrl, openaiReq, model);
559
+ }
560
+ }
561
+ catch (error) {
562
+ const err = error;
563
+ this.log(`Error: ${err.message}`);
564
+ if (!res.headersSent) {
565
+ res.writeHead(502, { 'Content-Type': 'application/json' });
566
+ }
567
+ // Return error in Anthropic format
568
+ res.end(JSON.stringify({
569
+ type: 'error',
570
+ error: { type: 'api_error', message: err.message },
571
+ }));
572
+ }
573
+ }
574
+ async handleStreaming(_req, clientRes, targetBase, openaiReqChat, model) {
575
+ return new Promise((_resolve, _reject) => {
576
+ const attempt = (mode) => {
577
+ return new Promise((resolveAttempt, rejectAttempt) => {
578
+ const targetUrl = new url_1.URL(mode === 'chat' ? '/v1/chat/completions' : '/v1/responses', targetBase);
579
+ const body = mode === 'chat' ? openaiReqChat : translateChatToResponses(JSON.parse(JSON.stringify({ ...openaiReqChat, stream: true })));
580
+ const bodyString = JSON.stringify(body);
581
+ const requestFn = targetUrl.protocol === 'https:' ? https.request : http.request;
582
+ const upstreamReq = requestFn({
583
+ protocol: targetUrl.protocol,
584
+ hostname: targetUrl.hostname,
585
+ port: targetUrl.port,
586
+ path: targetUrl.pathname + targetUrl.search,
587
+ method: 'POST',
588
+ timeout: this.config.timeoutMs,
589
+ headers: {
590
+ 'Content-Type': 'application/json',
591
+ 'Content-Length': Buffer.byteLength(bodyString),
592
+ 'Authorization': `Bearer ${this.config.apiKey}`,
593
+ 'Accept': 'text/event-stream',
594
+ },
595
+ }, (upstreamRes) => {
596
+ const statusCode = upstreamRes.statusCode || 200;
597
+ if (statusCode >= 400) {
598
+ const chunks = [];
599
+ upstreamRes.on('data', (c) => chunks.push(c));
600
+ upstreamRes.on('end', () => {
601
+ const bodyErr = Buffer.concat(chunks).toString('utf8');
602
+ this.log(`Upstream ${mode} error ${statusCode}: ${bodyErr.slice(0, 200)}`);
603
+ // Try fallback if we attempted chat first
604
+ if (mode === 'chat') {
605
+ attempt('responses').then(resolveAttempt).catch(rejectAttempt);
606
+ return;
607
+ }
608
+ // No fallback left: return error
609
+ if (!clientRes.headersSent) {
610
+ clientRes.writeHead(statusCode, { 'Content-Type': 'application/json' });
611
+ }
612
+ try {
613
+ const parsed = JSON.parse(bodyErr);
614
+ clientRes.end(JSON.stringify({ type: 'error', error: { type: 'api_error', message: parsed?.error?.message || bodyErr.slice(0, 500) } }));
615
+ }
616
+ catch {
617
+ clientRes.end(JSON.stringify({ type: 'error', error: { type: 'api_error', message: bodyErr.slice(0, 500) } }));
618
+ }
619
+ resolveAttempt();
620
+ });
621
+ return;
622
+ }
623
+ // Set up Anthropic SSE response
624
+ clientRes.writeHead(200, { 'Content-Type': 'text/event-stream', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive' });
625
+ const translator = new StreamingResponseTranslator(model);
626
+ let buffer = '';
627
+ let hasFinished = false;
628
+ upstreamRes.on('data', (chunk) => {
629
+ buffer += chunk.toString('utf8');
630
+ const lines = buffer.split('\n');
631
+ buffer = lines.pop() || '';
632
+ for (const line of lines) {
633
+ const trimmed = line.trim();
634
+ if (!trimmed || trimmed.startsWith(':'))
635
+ continue;
636
+ if (trimmed === 'data: [DONE]') {
637
+ if (!hasFinished) {
638
+ const final = translator.emitFinalIfNeeded();
639
+ if (final)
640
+ clientRes.write(final);
641
+ hasFinished = true;
642
+ }
643
+ continue;
644
+ }
645
+ if (trimmed.startsWith('data: ')) {
646
+ const jsonStr = trimmed.slice(6);
647
+ try {
648
+ // Try Chat Completions chunk first
649
+ const parsed = JSON.parse(jsonStr);
650
+ if (parsed && parsed.type) {
651
+ // Responses API event
652
+ const evtType = parsed.type;
653
+ if (evtType.endsWith('.output_text.delta') && typeof parsed.delta === 'string') {
654
+ // Start block if needed, then stream text
655
+ const start = translator.translateChunk({ choices: [{ delta: { content: '' } }] });
656
+ // translateChunk will emit start on first delta; we then send delta via a fake chunk with content
657
+ const textDelta = translator.translateChunk({ choices: [{ delta: { content: parsed.delta } }] });
658
+ if (start)
659
+ clientRes.write(start);
660
+ if (textDelta)
661
+ clientRes.write(textDelta);
662
+ }
663
+ else if (evtType === 'response.completed') {
664
+ hasFinished = true;
665
+ const final = translator.emitFinalIfNeeded();
666
+ if (final)
667
+ clientRes.write(final);
668
+ }
669
+ // ignore other event types for now
670
+ }
671
+ else {
672
+ const chunkObj = parsed;
673
+ const events = translator.translateChunk(chunkObj);
674
+ if (events)
675
+ clientRes.write(events);
676
+ if (chunkObj.choices?.[0]?.finish_reason)
677
+ hasFinished = true;
678
+ }
679
+ }
680
+ catch (e) {
681
+ this.log(`Failed to parse SSE chunk: ${jsonStr.slice(0, 100)} - ${e.message}`);
682
+ }
683
+ }
684
+ }
685
+ });
686
+ upstreamRes.on('end', () => {
687
+ if (buffer.trim()) {
688
+ const trimmed = buffer.trim();
689
+ if (trimmed.startsWith('data: ') && trimmed !== 'data: [DONE]') {
690
+ try {
691
+ const parsed = JSON.parse(trimmed.slice(6));
692
+ if (parsed && parsed.type) {
693
+ if (parsed.type === 'response.completed') {
694
+ hasFinished = true;
695
+ }
696
+ }
697
+ else {
698
+ const chunk = parsed;
699
+ const events = translator.translateChunk(chunk);
700
+ if (events)
701
+ clientRes.write(events);
702
+ if (chunk.choices?.[0]?.finish_reason)
703
+ hasFinished = true;
704
+ }
705
+ }
706
+ catch {
707
+ // ignore
708
+ }
709
+ }
710
+ }
711
+ if (!hasFinished) {
712
+ const final = translator.emitFinalIfNeeded();
713
+ if (final)
714
+ clientRes.write(final);
715
+ }
716
+ clientRes.end();
717
+ resolveAttempt();
718
+ });
719
+ upstreamRes.on('error', rejectAttempt);
720
+ });
721
+ upstreamReq.on('timeout', () => {
722
+ upstreamReq.destroy(new Error('Upstream request timeout'));
723
+ });
724
+ upstreamReq.on('error', (err) => {
725
+ // On network error for chat attempt, fallback to responses
726
+ if (mode === 'chat') {
727
+ attempt('responses').then(resolveAttempt).catch(rejectAttempt);
728
+ return;
729
+ }
730
+ this.log(`Request error (${mode}): ${err.message}`);
731
+ if (!clientRes.headersSent) {
732
+ clientRes.writeHead(502, { 'Content-Type': 'application/json' });
733
+ clientRes.end(JSON.stringify({ type: 'error', error: { type: 'api_error', message: err.message } }));
734
+ }
735
+ rejectAttempt(err);
736
+ });
737
+ upstreamReq.write(bodyString);
738
+ upstreamReq.end();
739
+ });
740
+ };
741
+ return attempt('chat');
742
+ });
743
+ }
744
+ async handleNonStreaming(_req, clientRes, targetBase, openaiReqChat, model) {
745
+ return new Promise((_resolve, _reject) => {
746
+ const doRequest = (mode) => {
747
+ return new Promise((resolveAttempt, rejectAttempt) => {
748
+ const targetUrl = new url_1.URL(mode === 'chat' ? '/v1/chat/completions' : '/v1/responses', targetBase);
749
+ const body = mode === 'chat' ? openaiReqChat : translateChatToResponses(openaiReqChat);
750
+ const bodyString = JSON.stringify(body);
751
+ const requestFn = targetUrl.protocol === 'https:' ? https.request : http.request;
752
+ const upstreamReq = requestFn({
753
+ protocol: targetUrl.protocol,
754
+ hostname: targetUrl.hostname,
755
+ port: targetUrl.port,
756
+ path: targetUrl.pathname + targetUrl.search,
757
+ method: 'POST',
758
+ timeout: this.config.timeoutMs,
759
+ headers: {
760
+ 'Content-Type': 'application/json',
761
+ 'Content-Length': Buffer.byteLength(bodyString),
762
+ 'Authorization': `Bearer ${this.config.apiKey}`,
763
+ },
764
+ }, (upstreamRes) => {
765
+ const chunks = [];
766
+ upstreamRes.on('data', (c) => chunks.push(c));
767
+ upstreamRes.on('end', () => {
768
+ const bodyTxt = Buffer.concat(chunks).toString('utf8');
769
+ const statusCode = upstreamRes.statusCode || 200;
770
+ if (statusCode >= 400) {
771
+ this.log(`Upstream ${mode} error ${statusCode}: ${bodyTxt.slice(0, 200)}`);
772
+ if (mode === 'chat') {
773
+ doRequest('responses').then(resolveAttempt).catch(rejectAttempt);
774
+ return;
775
+ }
776
+ clientRes.writeHead(statusCode, { 'Content-Type': 'application/json' });
777
+ clientRes.end(JSON.stringify({ type: 'error', error: { type: 'api_error', message: bodyTxt.slice(0, 500) } }));
778
+ resolveAttempt();
779
+ return;
780
+ }
781
+ try {
782
+ const parsed = JSON.parse(bodyTxt);
783
+ if (parsed && parsed.choices) {
784
+ const anthropicResp = translateNonStreamingResponse(parsed, model);
785
+ clientRes.writeHead(200, { 'Content-Type': 'application/json' });
786
+ clientRes.end(JSON.stringify(anthropicResp));
787
+ }
788
+ else if (parsed && (parsed.output_text || parsed.output)) {
789
+ // Responses API: extract text
790
+ let text = '';
791
+ if (typeof parsed.output_text === 'string') {
792
+ text = parsed.output_text;
793
+ }
794
+ else if (Array.isArray(parsed.output)) {
795
+ for (const item of parsed.output) {
796
+ if (item.type === 'output_text' && typeof item.text === 'string') {
797
+ text += item.text;
798
+ }
799
+ else if (item.type === 'message' && Array.isArray(item.content)) {
800
+ for (const c of item.content) {
801
+ if (c.type === 'output_text' && typeof c.text === 'string')
802
+ text += c.text;
803
+ if (c.type === 'text' && typeof c.text === 'string')
804
+ text += c.text;
805
+ }
806
+ }
807
+ }
808
+ }
809
+ const anthropicResp = {
810
+ id: `msg_${Date.now().toString(36)}`,
811
+ type: 'message',
812
+ role: 'assistant',
813
+ content: text ? [{ type: 'text', text }] : [],
814
+ model,
815
+ stop_reason: 'end_turn',
816
+ stop_sequence: null,
817
+ usage: { input_tokens: parsed?.usage?.prompt_tokens ?? 0, output_tokens: parsed?.usage?.completion_tokens ?? 0 },
818
+ };
819
+ clientRes.writeHead(200, { 'Content-Type': 'application/json' });
820
+ clientRes.end(JSON.stringify(anthropicResp));
821
+ }
822
+ else {
823
+ clientRes.writeHead(502, { 'Content-Type': 'application/json' });
824
+ clientRes.end(JSON.stringify({ type: 'error', error: { type: 'api_error', message: 'Unrecognized upstream response format' } }));
825
+ }
826
+ }
827
+ catch {
828
+ clientRes.writeHead(502, { 'Content-Type': 'application/json' });
829
+ clientRes.end(JSON.stringify({ type: 'error', error: { type: 'api_error', message: 'Failed to parse upstream response' } }));
830
+ }
831
+ resolveAttempt();
832
+ });
833
+ upstreamRes.on('error', rejectAttempt);
834
+ });
835
+ upstreamReq.on('timeout', () => upstreamReq.destroy(new Error('Upstream request timeout')));
836
+ upstreamReq.on('error', (err) => {
837
+ if (mode === 'chat') {
838
+ doRequest('responses').then(resolveAttempt).catch(rejectAttempt);
839
+ return;
840
+ }
841
+ if (!clientRes.headersSent) {
842
+ clientRes.writeHead(502, { 'Content-Type': 'application/json' });
843
+ clientRes.end(JSON.stringify({ type: 'error', error: { type: 'api_error', message: err.message } }));
844
+ }
845
+ rejectAttempt(err);
846
+ });
847
+ upstreamReq.write(bodyString);
848
+ upstreamReq.end();
849
+ });
850
+ };
851
+ return doRequest('chat');
852
+ });
853
+ }
854
+ }
855
+ exports.AnthropicToOpenAIProxy = AnthropicToOpenAIProxy;
856
+ //# sourceMappingURL=anthropic-to-openai-proxy.js.map