codex-claude-proxy 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,329 @@
1
+ /**
2
+ * Response Streamer
3
+ * Streams SSE events from OpenAI Responses API and converts to Anthropic format
4
+ */
5
+
6
+ import { generateMessageId } from './format-converter.js';
7
+
8
+ /**
9
+ * Stream OpenAI Responses API SSE events and yield Anthropic-format events
10
+ *
11
+ * OpenAI Responses API event types:
12
+ * - response.created
13
+ * - response.in_progress
14
+ * - response.output_item.added (type: message, function_call, reasoning)
15
+ * - response.output_text.delta
16
+ * - response.function_call_arguments.delta
17
+ * - response.function_call_arguments.done
18
+ * - response.output_item.done
19
+ * - response.completed
20
+ *
21
+ * Anthropic event types:
22
+ * - message_start
23
+ * - content_block_start
24
+ * - content_block_delta
25
+ * - content_block_stop
26
+ * - message_delta
27
+ * - message_stop
28
+ *
29
+ * @param {Response} response - The HTTP response with SSE body
30
+ * @param {string} model - The model name
31
+ * @yields {Object} Anthropic-format SSE events
32
+ */
33
+ export async function* streamResponsesAPI(response, model) {
34
+ const messageId = generateMessageId();
35
+ let hasEmittedStart = false;
36
+ let blockIndex = 0;
37
+ let currentBlockType = null;
38
+ let currentBlockId = null;
39
+ let currentToolName = null;
40
+ let inputTokens = 0;
41
+ let outputTokens = 0;
42
+ let stopReason = 'end_turn';
43
+ let pendingArguments = '';
44
+ let usage = { input_tokens: 0, output_tokens: 0 };
45
+
46
+ const reader = response.body.getReader();
47
+ const decoder = new TextDecoder();
48
+ let buffer = '';
49
+
50
+ while (true) {
51
+ const { done, value } = await reader.read();
52
+ if (done) break;
53
+
54
+ buffer += decoder.decode(value, { stream: true });
55
+ const lines = buffer.split('\n');
56
+ buffer = lines.pop() || '';
57
+
58
+ for (const line of lines) {
59
+ if (!line.startsWith('data:')) continue;
60
+
61
+ const jsonText = line.slice(5).trim();
62
+ if (!jsonText) continue;
63
+
64
+ try {
65
+ const event = JSON.parse(jsonText);
66
+ const eventType = event.type;
67
+
68
+ // Extract usage from completed response
69
+ if (eventType === 'response.completed' && event.response?.usage) {
70
+ inputTokens = event.response.usage.input_tokens || 0;
71
+ outputTokens = event.response.usage.output_tokens || 0;
72
+ usage = {
73
+ input_tokens: inputTokens,
74
+ output_tokens: outputTokens,
75
+ cache_read_input_tokens: event.response.usage.cache_read_input_tokens || 0
76
+ };
77
+ }
78
+
79
+ // Handle output item added
80
+ if (eventType === 'response.output_item.added') {
81
+ const item = event.item;
82
+
83
+ if (!hasEmittedStart) {
84
+ hasEmittedStart = true;
85
+ yield {
86
+ event: 'message_start',
87
+ data: {
88
+ type: 'message_start',
89
+ message: {
90
+ id: messageId,
91
+ type: 'message',
92
+ role: 'assistant',
93
+ model: model,
94
+ content: [],
95
+ stop_reason: null,
96
+ stop_sequence: null,
97
+ usage: { input_tokens: 0, output_tokens: 0 }
98
+ }
99
+ }
100
+ };
101
+ }
102
+
103
+ // Close previous block if any
104
+ if (currentBlockType !== null) {
105
+ yield {
106
+ event: 'content_block_stop',
107
+ data: { type: 'content_block_stop', index: blockIndex }
108
+ };
109
+ blockIndex++;
110
+ }
111
+
112
+ // Start new block based on item type
113
+ if (item.type === 'message') {
114
+ currentBlockType = 'text';
115
+ currentBlockId = item.id;
116
+ yield {
117
+ event: 'content_block_start',
118
+ data: {
119
+ type: 'content_block_start',
120
+ index: blockIndex,
121
+ content_block: { type: 'text', text: '' }
122
+ }
123
+ };
124
+ } else if (item.type === 'function_call') {
125
+ currentBlockType = 'tool_use';
126
+ currentBlockId = item.call_id || item.id;
127
+ currentToolName = item.name;
128
+ stopReason = 'tool_use';
129
+
130
+ yield {
131
+ event: 'content_block_start',
132
+ data: {
133
+ type: 'content_block_start',
134
+ index: blockIndex,
135
+ content_block: {
136
+ type: 'tool_use',
137
+ id: currentBlockId,
138
+ name: item.name,
139
+ input: {}
140
+ }
141
+ }
142
+ };
143
+ } else if (item.type === 'reasoning') {
144
+ currentBlockType = 'thinking';
145
+ currentBlockId = item.id;
146
+ yield {
147
+ event: 'content_block_start',
148
+ data: {
149
+ type: 'content_block_start',
150
+ index: blockIndex,
151
+ content_block: { type: 'thinking', thinking: '' }
152
+ }
153
+ };
154
+ }
155
+ }
156
+
157
+ // Handle text delta
158
+ if (eventType === 'response.output_text.delta') {
159
+ const delta = event.delta;
160
+ if (delta && currentBlockType === 'text') {
161
+ yield {
162
+ event: 'content_block_delta',
163
+ data: {
164
+ type: 'content_block_delta',
165
+ index: blockIndex,
166
+ delta: { type: 'text_delta', text: delta }
167
+ }
168
+ };
169
+ }
170
+ }
171
+
172
+ // Handle function call arguments delta
173
+ if (eventType === 'response.function_call_arguments.delta') {
174
+ const delta = event.delta;
175
+ if (delta && currentBlockType === 'tool_use') {
176
+ pendingArguments += delta;
177
+ yield {
178
+ event: 'content_block_delta',
179
+ data: {
180
+ type: 'content_block_delta',
181
+ index: blockIndex,
182
+ delta: { type: 'input_json_delta', partial_json: delta }
183
+ }
184
+ };
185
+ }
186
+ }
187
+
188
+ // Handle function call arguments done
189
+ if (eventType === 'response.function_call_arguments.done') {
190
+ // Arguments complete - nothing special needed, already streamed
191
+ }
192
+
193
+ // Handle output item done
194
+ if (eventType === 'response.output_item.done') {
195
+ // Block will be closed when next item starts or at the end
196
+ }
197
+
198
+ } catch (parseError) {
199
+ // Ignore parse errors for individual lines
200
+ }
201
+ }
202
+ }
203
+
204
+ // Handle no content received
205
+ if (!hasEmittedStart) {
206
+ hasEmittedStart = true;
207
+ yield {
208
+ event: 'message_start',
209
+ data: {
210
+ type: 'message_start',
211
+ message: {
212
+ id: messageId,
213
+ type: 'message',
214
+ role: 'assistant',
215
+ model: model,
216
+ content: [],
217
+ stop_reason: null,
218
+ stop_sequence: null,
219
+ usage: { input_tokens: 0, output_tokens: 0 }
220
+ }
221
+ }
222
+ };
223
+
224
+ // Emit empty text block
225
+ yield {
226
+ event: 'content_block_start',
227
+ data: {
228
+ type: 'content_block_start',
229
+ index: 0,
230
+ content_block: { type: 'text', text: '' }
231
+ }
232
+ };
233
+
234
+ yield {
235
+ event: 'content_block_delta',
236
+ data: {
237
+ type: 'content_block_delta',
238
+ index: 0,
239
+ delta: { type: 'text_delta', text: '' }
240
+ }
241
+ };
242
+
243
+ yield {
244
+ event: 'content_block_stop',
245
+ data: { type: 'content_block_stop', index: 0 }
246
+ };
247
+
248
+ blockIndex = 1;
249
+ currentBlockType = null;
250
+ } else if (currentBlockType !== null) {
251
+ // Close any open block
252
+ yield {
253
+ event: 'content_block_stop',
254
+ data: { type: 'content_block_stop', index: blockIndex }
255
+ };
256
+ }
257
+
258
+ // Emit message_delta with final usage
259
+ yield {
260
+ event: 'message_delta',
261
+ data: {
262
+ type: 'message_delta',
263
+ delta: { stop_reason: stopReason, stop_sequence: null },
264
+ usage: usage
265
+ }
266
+ };
267
+
268
+ // Emit message_stop
269
+ yield {
270
+ event: 'message_stop',
271
+ data: { type: 'message_stop' }
272
+ };
273
+ }
274
+
275
+ /**
276
+ * Parse SSE events from OpenAI Responses API (non-streaming)
277
+ * Returns the final response object
278
+ *
279
+ * @param {Response} response - The HTTP response with SSE body
280
+ * @returns {Object} The parsed response object
281
+ */
282
+ export async function parseResponsesAPIResponse(response) {
283
+ const reader = response.body.getReader();
284
+ const decoder = new TextDecoder();
285
+ let buffer = '';
286
+ let finalResponse = null;
287
+
288
+ while (true) {
289
+ const { done, value } = await reader.read();
290
+ if (done) break;
291
+
292
+ buffer += decoder.decode(value, { stream: true });
293
+ const lines = buffer.split('\n');
294
+ buffer = lines.pop() || '';
295
+
296
+ for (const line of lines) {
297
+ if (!line.startsWith('data:')) continue;
298
+
299
+ const jsonText = line.slice(5).trim();
300
+ if (!jsonText) continue;
301
+
302
+ try {
303
+ const event = JSON.parse(jsonText);
304
+
305
+ // Capture the completed response
306
+ if (event.type === 'response.completed') {
307
+ finalResponse = event.response;
308
+ }
309
+ } catch (parseError) {
310
+ // Ignore parse errors
311
+ }
312
+ }
313
+ }
314
+
315
+ return finalResponse;
316
+ }
317
+
318
+ /**
319
+ * Format Anthropic SSE event for HTTP response
320
+ */
321
+ export function formatSSEEvent(event) {
322
+ return `event: ${event.event}\ndata: ${JSON.stringify(event.data)}\n\n`;
323
+ }
324
+
325
+ export default {
326
+ streamResponsesAPI,
327
+ parseResponsesAPIResponse,
328
+ formatSSEEvent
329
+ };