@agentgazer/shared 0.3.5 → 0.3.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,806 @@
1
+ "use strict";
2
+ /**
3
+ * Format converter for cross-provider model override.
4
+ * Converts request/response formats between OpenAI and Anthropic APIs.
5
+ */
6
+ Object.defineProperty(exports, "__esModule", { value: true });
7
+ exports.openaiToAnthropic = openaiToAnthropic;
8
+ exports.anthropicToOpenaiRequest = anthropicToOpenaiRequest;
9
+ exports.openaiToAnthropicResponse = openaiToAnthropicResponse;
10
+ exports.anthropicToOpenai = anthropicToOpenai;
11
+ exports.createStreamingConverterState = createStreamingConverterState;
12
+ exports.anthropicSseToOpenaiChunks = anthropicSseToOpenaiChunks;
13
+ exports.parseAnthropicSSELine = parseAnthropicSSELine;
14
+ exports.formatOpenAISSELine = formatOpenAISSELine;
15
+ exports.formatOpenAISSEDone = formatOpenAISSEDone;
16
+ exports.createOpenAIToAnthropicStreamState = createOpenAIToAnthropicStreamState;
17
+ exports.openaiChunkToAnthropicSse = openaiChunkToAnthropicSse;
18
+ exports.formatAnthropicSSELine = formatAnthropicSSELine;
19
+ exports.finalizeOpenAIToAnthropicStream = finalizeOpenAIToAnthropicStream;
20
+ exports.isOpenAIToAnthropicStreamFinalized = isOpenAIToAnthropicStreamFinalized;
21
+ // ---------------------------------------------------------------------------
22
+ // Conversion Functions
23
+ // ---------------------------------------------------------------------------
24
+ const DEFAULT_MAX_TOKENS = 4096;
25
+ /**
26
+ * Convert OpenAI chat completion request to Anthropic messages format.
27
+ */
28
+ function openaiToAnthropic(request) {
29
+ const systemMessages = [];
30
+ const messages = [];
31
+ for (const msg of request.messages) {
32
+ if (msg.role === "system") {
33
+ // Extract system messages to top-level system field
34
+ const content = typeof msg.content === "string" ? msg.content : "";
35
+ if (content) {
36
+ systemMessages.push(content);
37
+ }
38
+ }
39
+ else if (msg.role === "user" || msg.role === "assistant") {
40
+ // Convert content format
41
+ let content;
42
+ if (typeof msg.content === "string") {
43
+ content = msg.content;
44
+ }
45
+ else if (Array.isArray(msg.content)) {
46
+ content = msg.content.map(convertOpenAIContentPartToAnthropic);
47
+ }
48
+ else {
49
+ content = "";
50
+ }
51
+ // Handle tool calls in assistant messages
52
+ if (msg.role === "assistant" && msg.tool_calls) {
53
+ const parts = [];
54
+ if (typeof content === "string" && content) {
55
+ parts.push({ type: "text", text: content });
56
+ }
57
+ else if (Array.isArray(content)) {
58
+ parts.push(...content);
59
+ }
60
+ for (const tc of msg.tool_calls) {
61
+ parts.push({
62
+ type: "tool_use",
63
+ id: tc.id,
64
+ name: tc.function.name,
65
+ input: JSON.parse(tc.function.arguments || "{}"),
66
+ });
67
+ }
68
+ messages.push({ role: "assistant", content: parts });
69
+ }
70
+ else {
71
+ messages.push({ role: msg.role, content });
72
+ }
73
+ }
74
+ else if (msg.role === "tool") {
75
+ // Convert tool response to Anthropic format
76
+ messages.push({
77
+ role: "user",
78
+ content: [{
79
+ type: "tool_result",
80
+ tool_use_id: msg.tool_call_id,
81
+ content: typeof msg.content === "string" ? msg.content : "",
82
+ }],
83
+ });
84
+ }
85
+ }
86
+ const result = {
87
+ model: request.model,
88
+ messages,
89
+ max_tokens: request.max_tokens ?? DEFAULT_MAX_TOKENS,
90
+ };
91
+ if (systemMessages.length > 0) {
92
+ result.system = systemMessages.join("\n\n");
93
+ }
94
+ if (request.temperature !== undefined) {
95
+ result.temperature = request.temperature;
96
+ }
97
+ if (request.top_p !== undefined) {
98
+ result.top_p = request.top_p;
99
+ }
100
+ if (request.stop) {
101
+ result.stop_sequences = Array.isArray(request.stop) ? request.stop : [request.stop];
102
+ }
103
+ if (request.stream !== undefined) {
104
+ result.stream = request.stream;
105
+ }
106
+ if (request.tools) {
107
+ result.tools = request.tools.map(convertOpenAIToolToAnthropic);
108
+ }
109
+ if (request.tool_choice) {
110
+ if (request.tool_choice === "none") {
111
+ // Anthropic doesn't have "none", just omit tools
112
+ }
113
+ else if (request.tool_choice === "auto") {
114
+ result.tool_choice = { type: "auto" };
115
+ }
116
+ else if (request.tool_choice === "required") {
117
+ result.tool_choice = { type: "any" };
118
+ }
119
+ else if (typeof request.tool_choice === "object") {
120
+ result.tool_choice = { type: "tool", name: request.tool_choice.function.name };
121
+ }
122
+ }
123
+ return result;
124
+ }
125
+ function convertOpenAIContentPartToAnthropic(part) {
126
+ if (part.type === "text") {
127
+ return { type: "text", text: part.text ?? "" };
128
+ }
129
+ else if (part.type === "image_url" && part.image_url) {
130
+ // Convert URL to base64 if it's a data URL, otherwise not supported
131
+ const url = part.image_url.url;
132
+ if (url.startsWith("data:")) {
133
+ const match = url.match(/^data:([^;]+);base64,(.+)$/);
134
+ if (match) {
135
+ return {
136
+ type: "image",
137
+ source: { type: "base64", media_type: match[1], data: match[2] },
138
+ };
139
+ }
140
+ }
141
+ // For HTTP URLs, Anthropic requires base64, so we can't convert directly
142
+ // Return as text fallback
143
+ return { type: "text", text: `[Image: ${url}]` };
144
+ }
145
+ return { type: "text", text: "" };
146
+ }
147
+ function convertOpenAIToolToAnthropic(tool) {
148
+ return {
149
+ name: tool.function.name,
150
+ description: tool.function.description,
151
+ input_schema: tool.function.parameters ?? { type: "object", properties: {} },
152
+ };
153
+ }
154
+ /**
155
+ * Convert Anthropic messages request to OpenAI chat completion format.
156
+ * This is the reverse of openaiToAnthropic.
157
+ */
158
+ function anthropicToOpenaiRequest(request) {
159
+ const messages = [];
160
+ // Convert system field to system message
161
+ if (request.system) {
162
+ messages.push({ role: "system", content: request.system });
163
+ }
164
+ // Convert messages
165
+ for (const msg of request.messages) {
166
+ if (msg.role === "user") {
167
+ if (typeof msg.content === "string") {
168
+ messages.push({ role: "user", content: msg.content });
169
+ }
170
+ else if (Array.isArray(msg.content)) {
171
+ // Check if it contains tool_result (should be converted to tool role)
172
+ const toolResults = msg.content.filter(p => p.type === "tool_result");
173
+ const otherParts = msg.content.filter(p => p.type !== "tool_result");
174
+ // Add tool results as separate tool messages
175
+ for (const tr of toolResults) {
176
+ // Handle content that can be string or array of content blocks
177
+ let toolContent;
178
+ if (typeof tr.content === "string") {
179
+ toolContent = tr.content;
180
+ }
181
+ else if (Array.isArray(tr.content)) {
182
+ // Extract text from content blocks
183
+ toolContent = tr.content
184
+ .filter((block) => block.type === "text")
185
+ .map(block => block.text)
186
+ .join("\n");
187
+ }
188
+ else {
189
+ toolContent = "";
190
+ }
191
+ messages.push({
192
+ role: "tool",
193
+ content: toolContent,
194
+ tool_call_id: tr.tool_use_id,
195
+ });
196
+ }
197
+ // Add other content as user message
198
+ if (otherParts.length > 0) {
199
+ const converted = otherParts.map(convertAnthropicContentPartToOpenAI);
200
+ if (converted.length === 1 && converted[0].type === "text") {
201
+ messages.push({ role: "user", content: converted[0].text ?? "" });
202
+ }
203
+ else {
204
+ messages.push({ role: "user", content: converted });
205
+ }
206
+ }
207
+ }
208
+ }
209
+ else if (msg.role === "assistant") {
210
+ if (typeof msg.content === "string") {
211
+ messages.push({ role: "assistant", content: msg.content });
212
+ }
213
+ else if (Array.isArray(msg.content)) {
214
+ const textParts = [];
215
+ const toolCalls = [];
216
+ for (const part of msg.content) {
217
+ if (part.type === "text") {
218
+ textParts.push(part.text ?? "");
219
+ }
220
+ else if (part.type === "tool_use") {
221
+ toolCalls.push({
222
+ id: part.id,
223
+ type: "function",
224
+ function: {
225
+ name: part.name,
226
+ arguments: JSON.stringify(part.input ?? {}),
227
+ },
228
+ });
229
+ }
230
+ }
231
+ const assistantMsg = {
232
+ role: "assistant",
233
+ content: textParts.join("") || null,
234
+ };
235
+ if (toolCalls.length > 0) {
236
+ assistantMsg.tool_calls = toolCalls;
237
+ }
238
+ messages.push(assistantMsg);
239
+ }
240
+ }
241
+ }
242
+ const result = {
243
+ model: request.model,
244
+ messages,
245
+ };
246
+ if (request.max_tokens !== undefined) {
247
+ result.max_tokens = request.max_tokens;
248
+ }
249
+ if (request.temperature !== undefined) {
250
+ result.temperature = request.temperature;
251
+ }
252
+ if (request.top_p !== undefined) {
253
+ result.top_p = request.top_p;
254
+ }
255
+ if (request.stop_sequences) {
256
+ result.stop = request.stop_sequences;
257
+ }
258
+ if (request.stream !== undefined) {
259
+ result.stream = request.stream;
260
+ }
261
+ if (request.tools) {
262
+ result.tools = request.tools.map(convertAnthropicToolToOpenAI);
263
+ }
264
+ if (request.tool_choice) {
265
+ if (request.tool_choice.type === "auto") {
266
+ result.tool_choice = "auto";
267
+ }
268
+ else if (request.tool_choice.type === "any") {
269
+ result.tool_choice = "required";
270
+ }
271
+ else if (request.tool_choice.type === "tool" && request.tool_choice.name) {
272
+ result.tool_choice = { type: "function", function: { name: request.tool_choice.name } };
273
+ }
274
+ }
275
+ return result;
276
+ }
277
+ function convertAnthropicContentPartToOpenAI(part) {
278
+ if (part.type === "text") {
279
+ return { type: "text", text: part.text ?? "" };
280
+ }
281
+ else if (part.type === "image" && part.source) {
282
+ // Convert base64 to data URL
283
+ const dataUrl = `data:${part.source.media_type};base64,${part.source.data}`;
284
+ return { type: "image_url", image_url: { url: dataUrl } };
285
+ }
286
+ return { type: "text", text: "" };
287
+ }
288
+ function convertAnthropicToolToOpenAI(tool) {
289
+ return {
290
+ type: "function",
291
+ function: {
292
+ name: tool.name,
293
+ description: tool.description,
294
+ parameters: tool.input_schema,
295
+ },
296
+ };
297
+ }
298
+ /**
299
+ * Convert OpenAI chat completion response to Anthropic messages format.
300
+ * This is the reverse of anthropicToOpenai.
301
+ */
302
+ function openaiToAnthropicResponse(response, requestModel) {
303
+ const content = [];
304
+ if (response.choices && response.choices.length > 0) {
305
+ const choice = response.choices[0];
306
+ const msg = choice.message;
307
+ // Convert text content
308
+ if (msg.content) {
309
+ content.push({ type: "text", text: msg.content });
310
+ }
311
+ // Convert tool calls
312
+ if (msg.tool_calls) {
313
+ for (const tc of msg.tool_calls) {
314
+ content.push({
315
+ type: "tool_use",
316
+ id: tc.id,
317
+ name: tc.function.name,
318
+ input: JSON.parse(tc.function.arguments || "{}"),
319
+ });
320
+ }
321
+ }
322
+ }
323
+ // If no content, add empty text
324
+ if (content.length === 0) {
325
+ content.push({ type: "text", text: "" });
326
+ }
327
+ const stopReason = mapOpenAIFinishReason(response.choices?.[0]?.finish_reason);
328
+ return {
329
+ id: response.id || `msg_${Date.now()}`,
330
+ type: "message",
331
+ role: "assistant",
332
+ content,
333
+ model: requestModel ?? response.model,
334
+ stop_reason: stopReason,
335
+ usage: {
336
+ input_tokens: response.usage?.prompt_tokens ?? 0,
337
+ output_tokens: response.usage?.completion_tokens ?? 0,
338
+ },
339
+ };
340
+ }
341
+ function mapOpenAIFinishReason(finishReason) {
342
+ switch (finishReason) {
343
+ case "stop":
344
+ return "end_turn";
345
+ case "length":
346
+ return "max_tokens";
347
+ case "tool_calls":
348
+ return "tool_use";
349
+ default:
350
+ return null;
351
+ }
352
+ }
353
+ /**
354
+ * Convert Anthropic messages response to OpenAI chat completion format.
355
+ */
356
+ function anthropicToOpenai(response, requestModel) {
357
+ let textContent = "";
358
+ const toolCalls = [];
359
+ for (const block of response.content) {
360
+ if (block.type === "text") {
361
+ textContent += block.text ?? "";
362
+ }
363
+ else if (block.type === "tool_use") {
364
+ toolCalls.push({
365
+ id: block.id,
366
+ type: "function",
367
+ function: {
368
+ name: block.name,
369
+ arguments: JSON.stringify(block.input ?? {}),
370
+ },
371
+ });
372
+ }
373
+ }
374
+ const finishReason = mapAnthropicStopReason(response.stop_reason);
375
+ const choice = {
376
+ index: 0,
377
+ message: {
378
+ role: "assistant",
379
+ content: textContent || null,
380
+ },
381
+ finish_reason: finishReason,
382
+ };
383
+ if (toolCalls.length > 0) {
384
+ choice.message.tool_calls = toolCalls;
385
+ }
386
+ return {
387
+ id: response.id,
388
+ object: "chat.completion",
389
+ created: Math.floor(Date.now() / 1000),
390
+ model: requestModel ?? response.model,
391
+ choices: [choice],
392
+ usage: {
393
+ prompt_tokens: response.usage.input_tokens,
394
+ completion_tokens: response.usage.output_tokens,
395
+ total_tokens: response.usage.input_tokens + response.usage.output_tokens,
396
+ },
397
+ };
398
+ }
399
+ function mapAnthropicStopReason(stopReason) {
400
+ switch (stopReason) {
401
+ case "end_turn":
402
+ case "stop_sequence":
403
+ return "stop";
404
+ case "max_tokens":
405
+ return "length";
406
+ case "tool_use":
407
+ return "tool_calls";
408
+ default:
409
+ return null;
410
+ }
411
+ }
412
+ function createStreamingConverterState() {
413
+ return {
414
+ messageId: "",
415
+ model: "",
416
+ created: Math.floor(Date.now() / 1000),
417
+ inputTokens: 0,
418
+ outputTokens: 0,
419
+ currentContentIndex: 0,
420
+ toolCallsInProgress: new Map(),
421
+ };
422
+ }
423
+ /**
424
+ * Convert Anthropic SSE event to OpenAI stream chunk(s).
425
+ * Returns array of chunks (may be empty, one, or multiple).
426
+ */
427
+ function anthropicSseToOpenaiChunks(event, state, requestModel) {
428
+ const chunks = [];
429
+ switch (event.type) {
430
+ case "message_start":
431
+ state.messageId = event.message.id;
432
+ state.model = requestModel ?? event.message.model;
433
+ state.inputTokens = event.message.usage.input_tokens;
434
+ // Emit initial chunk with role
435
+ chunks.push({
436
+ id: state.messageId,
437
+ object: "chat.completion.chunk",
438
+ created: state.created,
439
+ model: state.model,
440
+ choices: [{
441
+ index: 0,
442
+ delta: { role: "assistant" },
443
+ finish_reason: null,
444
+ }],
445
+ });
446
+ break;
447
+ case "content_block_start":
448
+ state.currentContentIndex = event.index;
449
+ if (event.content_block.type === "tool_use") {
450
+ const tc = event.content_block;
451
+ state.toolCallsInProgress.set(event.index, {
452
+ id: tc.id,
453
+ name: tc.name,
454
+ arguments: "",
455
+ });
456
+ // Emit tool call start
457
+ chunks.push({
458
+ id: state.messageId,
459
+ object: "chat.completion.chunk",
460
+ created: state.created,
461
+ model: state.model,
462
+ choices: [{
463
+ index: 0,
464
+ delta: {
465
+ tool_calls: [{
466
+ index: event.index,
467
+ id: tc.id,
468
+ type: "function",
469
+ function: { name: tc.name, arguments: "" },
470
+ }],
471
+ },
472
+ finish_reason: null,
473
+ }],
474
+ });
475
+ }
476
+ break;
477
+ case "content_block_delta":
478
+ if (event.delta.type === "text_delta") {
479
+ chunks.push({
480
+ id: state.messageId,
481
+ object: "chat.completion.chunk",
482
+ created: state.created,
483
+ model: state.model,
484
+ choices: [{
485
+ index: 0,
486
+ delta: { content: event.delta.text },
487
+ finish_reason: null,
488
+ }],
489
+ });
490
+ }
491
+ else if (event.delta.type === "input_json_delta") {
492
+ const tc = state.toolCallsInProgress.get(event.index);
493
+ if (tc) {
494
+ tc.arguments += event.delta.partial_json;
495
+ chunks.push({
496
+ id: state.messageId,
497
+ object: "chat.completion.chunk",
498
+ created: state.created,
499
+ model: state.model,
500
+ choices: [{
501
+ index: 0,
502
+ delta: {
503
+ tool_calls: [{
504
+ index: event.index,
505
+ function: { arguments: event.delta.partial_json },
506
+ }],
507
+ },
508
+ finish_reason: null,
509
+ }],
510
+ });
511
+ }
512
+ }
513
+ break;
514
+ case "content_block_stop":
515
+ // No action needed
516
+ break;
517
+ case "message_delta":
518
+ state.outputTokens = event.usage.output_tokens;
519
+ const finishReason = mapAnthropicStopReason(event.delta.stop_reason);
520
+ chunks.push({
521
+ id: state.messageId,
522
+ object: "chat.completion.chunk",
523
+ created: state.created,
524
+ model: state.model,
525
+ choices: [{
526
+ index: 0,
527
+ delta: {},
528
+ finish_reason: finishReason,
529
+ }],
530
+ usage: {
531
+ prompt_tokens: state.inputTokens,
532
+ completion_tokens: state.outputTokens,
533
+ total_tokens: state.inputTokens + state.outputTokens,
534
+ },
535
+ });
536
+ break;
537
+ case "message_stop":
538
+ // Stream complete, no chunk needed (already sent finish_reason in message_delta)
539
+ break;
540
+ }
541
+ return chunks;
542
+ }
543
+ /**
544
+ * Parse Anthropic SSE line and return event object.
545
+ */
546
+ function parseAnthropicSSELine(eventType, data) {
547
+ try {
548
+ const parsed = JSON.parse(data);
549
+ if (parsed.type === eventType) {
550
+ return parsed;
551
+ }
552
+ return parsed;
553
+ }
554
+ catch {
555
+ return null;
556
+ }
557
+ }
558
+ /**
559
+ * Format OpenAI stream chunk as SSE line.
560
+ */
561
+ function formatOpenAISSELine(chunk) {
562
+ return `data: ${JSON.stringify(chunk)}\n\n`;
563
+ }
564
+ /**
565
+ * Format SSE done marker.
566
+ */
567
+ function formatOpenAISSEDone() {
568
+ return "data: [DONE]\n\n";
569
+ }
570
+ function createOpenAIToAnthropicStreamState() {
571
+ return {
572
+ messageId: `msg_${Date.now()}`,
573
+ model: "",
574
+ inputTokens: 0,
575
+ outputTokens: 0,
576
+ sentMessageStart: false,
577
+ sentMessageStop: false,
578
+ contentBlockIndex: 0,
579
+ contentBlockStarted: false,
580
+ toolCallsInProgress: new Map(),
581
+ };
582
+ }
583
+ /**
584
+ * Convert OpenAI stream chunk to Anthropic SSE event(s).
585
+ * Returns array of SSE lines (formatted strings ready to send).
586
+ */
587
+ function openaiChunkToAnthropicSse(chunk, state, requestModel) {
588
+ const lines = [];
589
+ // Track model
590
+ if (chunk.model && !state.model) {
591
+ state.model = requestModel ?? chunk.model;
592
+ }
593
+ // Send message_start on first chunk
594
+ if (!state.sentMessageStart) {
595
+ state.messageId = chunk.id || state.messageId;
596
+ const messageStart = {
597
+ type: "message_start",
598
+ message: {
599
+ id: state.messageId,
600
+ type: "message",
601
+ role: "assistant",
602
+ content: [],
603
+ model: state.model || "unknown",
604
+ stop_reason: null,
605
+ stop_sequence: null,
606
+ usage: { input_tokens: 0, output_tokens: 0 },
607
+ },
608
+ };
609
+ lines.push(formatAnthropicSSELine("message_start", messageStart));
610
+ state.sentMessageStart = true;
611
+ }
612
+ if (chunk.choices && chunk.choices.length > 0) {
613
+ const choice = chunk.choices[0];
614
+ const delta = choice.delta;
615
+ // Handle text content
616
+ if (delta.content) {
617
+ // Start content block if not started
618
+ if (!state.contentBlockStarted) {
619
+ const blockStart = {
620
+ type: "content_block_start",
621
+ index: state.contentBlockIndex,
622
+ content_block: { type: "text", text: "" },
623
+ };
624
+ lines.push(formatAnthropicSSELine("content_block_start", blockStart));
625
+ state.contentBlockStarted = true;
626
+ }
627
+ // Send text delta
628
+ const textDelta = {
629
+ type: "content_block_delta",
630
+ index: state.contentBlockIndex,
631
+ delta: { type: "text_delta", text: delta.content },
632
+ };
633
+ lines.push(formatAnthropicSSELine("content_block_delta", textDelta));
634
+ }
635
+ // Handle tool calls
636
+ if (delta.tool_calls) {
637
+ for (const tc of delta.tool_calls) {
638
+ const tcIndex = tc.index ?? 0;
639
+ const existing = state.toolCallsInProgress.get(tcIndex);
640
+ if (tc.id && tc.function?.name) {
641
+ // New tool call starting
642
+ // Close previous content block if open (text or previous tool call)
643
+ if (state.contentBlockStarted) {
644
+ const blockStop = {
645
+ type: "content_block_stop",
646
+ index: state.contentBlockIndex,
647
+ };
648
+ lines.push(formatAnthropicSSELine("content_block_stop", blockStop));
649
+ state.contentBlockIndex++;
650
+ state.contentBlockStarted = false;
651
+ }
652
+ // Store the tool call with its Anthropic content block index
653
+ const anthropicBlockIndex = state.contentBlockIndex;
654
+ state.toolCallsInProgress.set(tcIndex, {
655
+ anthropicBlockIndex,
656
+ id: tc.id,
657
+ name: tc.function.name,
658
+ arguments: "",
659
+ });
660
+ // Start the tool_use content block
661
+ const blockStart = {
662
+ type: "content_block_start",
663
+ index: anthropicBlockIndex,
664
+ content_block: { type: "tool_use", id: tc.id, name: tc.function.name, input: {} },
665
+ };
666
+ lines.push(formatAnthropicSSELine("content_block_start", blockStart));
667
+ // Increment for next content block
668
+ state.contentBlockIndex++;
669
+ // If arguments are also present in the same chunk, emit them
670
+ if (tc.function.arguments) {
671
+ const toolCall = state.toolCallsInProgress.get(tcIndex);
672
+ toolCall.arguments += tc.function.arguments;
673
+ const inputDelta = {
674
+ type: "content_block_delta",
675
+ index: anthropicBlockIndex,
676
+ delta: { type: "input_json_delta", partial_json: tc.function.arguments },
677
+ };
678
+ lines.push(formatAnthropicSSELine("content_block_delta", inputDelta));
679
+ }
680
+ }
681
+ else if (tc.function?.arguments && existing) {
682
+ // Tool call argument delta (continuing an existing tool call)
683
+ existing.arguments += tc.function.arguments;
684
+ const inputDelta = {
685
+ type: "content_block_delta",
686
+ index: existing.anthropicBlockIndex,
687
+ delta: { type: "input_json_delta", partial_json: tc.function.arguments },
688
+ };
689
+ lines.push(formatAnthropicSSELine("content_block_delta", inputDelta));
690
+ }
691
+ }
692
+ }
693
+ // Handle finish
694
+ if (choice.finish_reason) {
695
+ // Close any open text content block
696
+ // Note: for text blocks, contentBlockIndex points to the current block (we don't increment until a new block starts)
697
+ if (state.contentBlockStarted) {
698
+ const blockStop = {
699
+ type: "content_block_stop",
700
+ index: state.contentBlockIndex,
701
+ };
702
+ lines.push(formatAnthropicSSELine("content_block_stop", blockStop));
703
+ state.contentBlockStarted = false;
704
+ }
705
+ // Close all open tool call blocks
706
+ for (const [, toolCall] of state.toolCallsInProgress) {
707
+ const blockStop = {
708
+ type: "content_block_stop",
709
+ index: toolCall.anthropicBlockIndex,
710
+ };
711
+ lines.push(formatAnthropicSSELine("content_block_stop", blockStop));
712
+ }
713
+ state.toolCallsInProgress.clear();
714
+ // Track usage if available
715
+ if (chunk.usage) {
716
+ state.inputTokens = chunk.usage.prompt_tokens;
717
+ state.outputTokens = chunk.usage.completion_tokens;
718
+ }
719
+ const stopReason = mapOpenAIFinishReasonToAnthropic(choice.finish_reason);
720
+ const messageDelta = {
721
+ type: "message_delta",
722
+ delta: { stop_reason: stopReason },
723
+ usage: { output_tokens: state.outputTokens },
724
+ };
725
+ lines.push(formatAnthropicSSELine("message_delta", messageDelta));
726
+ const messageStop = { type: "message_stop" };
727
+ lines.push(formatAnthropicSSELine("message_stop", messageStop));
728
+ state.sentMessageStop = true;
729
+ }
730
+ }
731
+ return lines;
732
+ }
733
+ function mapOpenAIFinishReasonToAnthropic(finishReason) {
734
+ switch (finishReason) {
735
+ case "stop":
736
+ return "end_turn";
737
+ case "length":
738
+ return "max_tokens";
739
+ case "tool_calls":
740
+ return "tool_use";
741
+ default:
742
+ return "end_turn";
743
+ }
744
+ }
745
+ /**
746
+ * Format Anthropic SSE event as SSE line.
747
+ */
748
+ function formatAnthropicSSELine(eventType, data) {
749
+ return `event: ${eventType}\ndata: ${JSON.stringify(data)}\n\n`;
750
+ }
751
+ /**
752
+ * Finalize an OpenAI to Anthropic stream conversion.
753
+ * This ensures message_stop is sent even if the OpenAI stream ended unexpectedly.
754
+ * Returns SSE lines to close the stream properly.
755
+ */
756
+ function finalizeOpenAIToAnthropicStream(state) {
757
+ const lines = [];
758
+ // If message_start was never sent, there's nothing to finalize
759
+ if (!state.sentMessageStart) {
760
+ return lines;
761
+ }
762
+ // If message_stop was already sent, don't send duplicate
763
+ if (state.sentMessageStop) {
764
+ return lines;
765
+ }
766
+ // Close any open content block
767
+ if (state.contentBlockStarted) {
768
+ const blockStop = {
769
+ type: "content_block_stop",
770
+ index: state.contentBlockIndex,
771
+ };
772
+ lines.push(formatAnthropicSSELine("content_block_stop", blockStop));
773
+ state.contentBlockStarted = false;
774
+ }
775
+ // Close all open tool call blocks
776
+ for (const [, toolCall] of state.toolCallsInProgress) {
777
+ const blockStop = {
778
+ type: "content_block_stop",
779
+ index: toolCall.anthropicBlockIndex,
780
+ };
781
+ lines.push(formatAnthropicSSELine("content_block_stop", blockStop));
782
+ }
783
+ state.toolCallsInProgress.clear();
784
+ // Send message_delta with end_turn and message_stop
785
+ const messageDelta = {
786
+ type: "message_delta",
787
+ delta: { stop_reason: "end_turn" },
788
+ usage: { output_tokens: state.outputTokens },
789
+ };
790
+ lines.push(formatAnthropicSSELine("message_delta", messageDelta));
791
+ const messageStop = { type: "message_stop" };
792
+ lines.push(formatAnthropicSSELine("message_stop", messageStop));
793
+ state.sentMessageStop = true;
794
+ return lines;
795
+ }
796
+ /**
797
+ * Check if the OpenAI to Anthropic stream has been properly finalized.
798
+ */
799
+ function isOpenAIToAnthropicStreamFinalized(state) {
800
+ // If we never started, we're done
801
+ if (!state.sentMessageStart)
802
+ return true;
803
+ // Check if message_stop was already sent
804
+ return state.sentMessageStop;
805
+ }
806
+ //# sourceMappingURL=format-converter.js.map