@oh-my-pi/pi-ai 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,569 @@
1
+ import OpenAI from "openai";
2
+ import type {
3
+ Tool as OpenAITool,
4
+ ResponseCreateParamsStreaming,
5
+ ResponseFunctionToolCall,
6
+ ResponseInput,
7
+ ResponseInputContent,
8
+ ResponseInputImage,
9
+ ResponseInputText,
10
+ ResponseOutputMessage,
11
+ ResponseReasoningItem,
12
+ } from "openai/resources/responses/responses.js";
13
+ import { calculateCost } from "../models";
14
+ import { getEnvApiKey } from "../stream";
15
+ import type {
16
+ Api,
17
+ AssistantMessage,
18
+ Context,
19
+ Model,
20
+ StopReason,
21
+ StreamFunction,
22
+ StreamOptions,
23
+ TextContent,
24
+ ThinkingContent,
25
+ Tool,
26
+ ToolCall,
27
+ } from "../types";
28
+ import { AssistantMessageEventStream } from "../utils/event-stream";
29
+ import { parseStreamingJson } from "../utils/json-parse";
30
+ import { sanitizeSurrogates } from "../utils/sanitize-unicode";
31
+ import { transformMessages } from "./transorm-messages";
32
+
33
+ /** Fast deterministic hash to shorten long strings */
34
+ function shortHash(str: string): string {
35
+ let h1 = 0xdeadbeef;
36
+ let h2 = 0x41c6ce57;
37
+ for (let i = 0; i < str.length; i++) {
38
+ const ch = str.charCodeAt(i);
39
+ h1 = Math.imul(h1 ^ ch, 2654435761);
40
+ h2 = Math.imul(h2 ^ ch, 1597334677);
41
+ }
42
+ h1 = Math.imul(h1 ^ (h1 >>> 16), 2246822507) ^ Math.imul(h2 ^ (h2 >>> 13), 3266489909);
43
+ h2 = Math.imul(h2 ^ (h2 >>> 16), 2246822507) ^ Math.imul(h1 ^ (h1 >>> 13), 3266489909);
44
+ return (h2 >>> 0).toString(36) + (h1 >>> 0).toString(36);
45
+ }
46
+
47
+ // OpenAI Responses-specific options
48
+ export interface OpenAIResponsesOptions extends StreamOptions {
49
+ reasoningEffort?: "minimal" | "low" | "medium" | "high" | "xhigh";
50
+ reasoningSummary?: "auto" | "detailed" | "concise" | null;
51
+ }
52
+
53
+ /**
54
+ * Generate function for OpenAI Responses API
55
+ */
56
+ export const streamOpenAIResponses: StreamFunction<"openai-responses"> = (
57
+ model: Model<"openai-responses">,
58
+ context: Context,
59
+ options?: OpenAIResponsesOptions,
60
+ ): AssistantMessageEventStream => {
61
+ const stream = new AssistantMessageEventStream();
62
+
63
+ // Start async processing
64
+ (async () => {
65
+ const output: AssistantMessage = {
66
+ role: "assistant",
67
+ content: [],
68
+ api: "openai-responses" as Api,
69
+ provider: model.provider,
70
+ model: model.id,
71
+ usage: {
72
+ input: 0,
73
+ output: 0,
74
+ cacheRead: 0,
75
+ cacheWrite: 0,
76
+ totalTokens: 0,
77
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
78
+ },
79
+ stopReason: "stop",
80
+ timestamp: Date.now(),
81
+ };
82
+
83
+ try {
84
+ // Create OpenAI client
85
+ const apiKey = options?.apiKey || getEnvApiKey(model.provider) || "";
86
+ const client = createClient(model, context, apiKey);
87
+ const params = buildParams(model, context, options);
88
+ const openaiStream = await client.responses.create(params, { signal: options?.signal });
89
+ stream.push({ type: "start", partial: output });
90
+
91
+ let currentItem: ResponseReasoningItem | ResponseOutputMessage | ResponseFunctionToolCall | null = null;
92
+ let currentBlock: ThinkingContent | TextContent | (ToolCall & { partialJson: string }) | null = null;
93
+ const blocks = output.content;
94
+ const blockIndex = () => blocks.length - 1;
95
+
96
+ for await (const event of openaiStream) {
97
+ // Handle output item start
98
+ if (event.type === "response.output_item.added") {
99
+ const item = event.item;
100
+ if (item.type === "reasoning") {
101
+ currentItem = item;
102
+ currentBlock = { type: "thinking", thinking: "" };
103
+ output.content.push(currentBlock);
104
+ stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
105
+ } else if (item.type === "message") {
106
+ currentItem = item;
107
+ currentBlock = { type: "text", text: "" };
108
+ output.content.push(currentBlock);
109
+ stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
110
+ } else if (item.type === "function_call") {
111
+ currentItem = item;
112
+ currentBlock = {
113
+ type: "toolCall",
114
+ id: `${item.call_id}|${item.id}`,
115
+ name: item.name,
116
+ arguments: {},
117
+ partialJson: item.arguments || "",
118
+ };
119
+ output.content.push(currentBlock);
120
+ stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
121
+ }
122
+ }
123
+ // Handle reasoning summary deltas
124
+ else if (event.type === "response.reasoning_summary_part.added") {
125
+ if (currentItem && currentItem.type === "reasoning") {
126
+ currentItem.summary = currentItem.summary || [];
127
+ currentItem.summary.push(event.part);
128
+ }
129
+ } else if (event.type === "response.reasoning_summary_text.delta") {
130
+ if (
131
+ currentItem &&
132
+ currentItem.type === "reasoning" &&
133
+ currentBlock &&
134
+ currentBlock.type === "thinking"
135
+ ) {
136
+ currentItem.summary = currentItem.summary || [];
137
+ const lastPart = currentItem.summary[currentItem.summary.length - 1];
138
+ if (lastPart) {
139
+ currentBlock.thinking += event.delta;
140
+ lastPart.text += event.delta;
141
+ stream.push({
142
+ type: "thinking_delta",
143
+ contentIndex: blockIndex(),
144
+ delta: event.delta,
145
+ partial: output,
146
+ });
147
+ }
148
+ }
149
+ }
150
+ // Add a new line between summary parts (hack...)
151
+ else if (event.type === "response.reasoning_summary_part.done") {
152
+ if (
153
+ currentItem &&
154
+ currentItem.type === "reasoning" &&
155
+ currentBlock &&
156
+ currentBlock.type === "thinking"
157
+ ) {
158
+ currentItem.summary = currentItem.summary || [];
159
+ const lastPart = currentItem.summary[currentItem.summary.length - 1];
160
+ if (lastPart) {
161
+ currentBlock.thinking += "\n\n";
162
+ lastPart.text += "\n\n";
163
+ stream.push({
164
+ type: "thinking_delta",
165
+ contentIndex: blockIndex(),
166
+ delta: "\n\n",
167
+ partial: output,
168
+ });
169
+ }
170
+ }
171
+ }
172
+ // Handle text output deltas
173
+ else if (event.type === "response.content_part.added") {
174
+ if (currentItem && currentItem.type === "message") {
175
+ currentItem.content = currentItem.content || [];
176
+ // Filter out ReasoningText, only accept output_text and refusal
177
+ if (event.part.type === "output_text" || event.part.type === "refusal") {
178
+ currentItem.content.push(event.part);
179
+ }
180
+ }
181
+ } else if (event.type === "response.output_text.delta") {
182
+ if (currentItem && currentItem.type === "message" && currentBlock && currentBlock.type === "text") {
183
+ const lastPart = currentItem.content[currentItem.content.length - 1];
184
+ if (lastPart && lastPart.type === "output_text") {
185
+ currentBlock.text += event.delta;
186
+ lastPart.text += event.delta;
187
+ stream.push({
188
+ type: "text_delta",
189
+ contentIndex: blockIndex(),
190
+ delta: event.delta,
191
+ partial: output,
192
+ });
193
+ }
194
+ }
195
+ } else if (event.type === "response.refusal.delta") {
196
+ if (currentItem && currentItem.type === "message" && currentBlock && currentBlock.type === "text") {
197
+ const lastPart = currentItem.content[currentItem.content.length - 1];
198
+ if (lastPart && lastPart.type === "refusal") {
199
+ currentBlock.text += event.delta;
200
+ lastPart.refusal += event.delta;
201
+ stream.push({
202
+ type: "text_delta",
203
+ contentIndex: blockIndex(),
204
+ delta: event.delta,
205
+ partial: output,
206
+ });
207
+ }
208
+ }
209
+ }
210
+ // Handle function call argument deltas
211
+ else if (event.type === "response.function_call_arguments.delta") {
212
+ if (
213
+ currentItem &&
214
+ currentItem.type === "function_call" &&
215
+ currentBlock &&
216
+ currentBlock.type === "toolCall"
217
+ ) {
218
+ currentBlock.partialJson += event.delta;
219
+ currentBlock.arguments = parseStreamingJson(currentBlock.partialJson);
220
+ stream.push({
221
+ type: "toolcall_delta",
222
+ contentIndex: blockIndex(),
223
+ delta: event.delta,
224
+ partial: output,
225
+ });
226
+ }
227
+ }
228
+ // Handle output item completion
229
+ else if (event.type === "response.output_item.done") {
230
+ const item = event.item;
231
+
232
+ if (item.type === "reasoning" && currentBlock && currentBlock.type === "thinking") {
233
+ currentBlock.thinking = item.summary?.map((s) => s.text).join("\n\n") || "";
234
+ currentBlock.thinkingSignature = JSON.stringify(item);
235
+ stream.push({
236
+ type: "thinking_end",
237
+ contentIndex: blockIndex(),
238
+ content: currentBlock.thinking,
239
+ partial: output,
240
+ });
241
+ currentBlock = null;
242
+ } else if (item.type === "message" && currentBlock && currentBlock.type === "text") {
243
+ currentBlock.text = item.content.map((c) => (c.type === "output_text" ? c.text : c.refusal)).join("");
244
+ currentBlock.textSignature = item.id;
245
+ stream.push({
246
+ type: "text_end",
247
+ contentIndex: blockIndex(),
248
+ content: currentBlock.text,
249
+ partial: output,
250
+ });
251
+ currentBlock = null;
252
+ } else if (item.type === "function_call") {
253
+ const toolCall: ToolCall = {
254
+ type: "toolCall",
255
+ id: `${item.call_id}|${item.id}`,
256
+ name: item.name,
257
+ arguments: JSON.parse(item.arguments),
258
+ };
259
+
260
+ stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
261
+ }
262
+ }
263
+ // Handle completion
264
+ else if (event.type === "response.completed") {
265
+ const response = event.response;
266
+ if (response?.usage) {
267
+ const cachedTokens = response.usage.input_tokens_details?.cached_tokens || 0;
268
+ output.usage = {
269
+ // OpenAI includes cached tokens in input_tokens, so subtract to get non-cached input
270
+ input: (response.usage.input_tokens || 0) - cachedTokens,
271
+ output: response.usage.output_tokens || 0,
272
+ cacheRead: cachedTokens,
273
+ cacheWrite: 0,
274
+ totalTokens: response.usage.total_tokens || 0,
275
+ cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
276
+ };
277
+ }
278
+ calculateCost(model, output.usage);
279
+ // Map status to stop reason
280
+ output.stopReason = mapStopReason(response?.status);
281
+ if (output.content.some((b) => b.type === "toolCall") && output.stopReason === "stop") {
282
+ output.stopReason = "toolUse";
283
+ }
284
+ }
285
+ // Handle errors
286
+ else if (event.type === "error") {
287
+ throw new Error(`Error Code ${event.code}: ${event.message}` || "Unknown error");
288
+ } else if (event.type === "response.failed") {
289
+ throw new Error("Unknown error");
290
+ }
291
+ }
292
+
293
+ if (options?.signal?.aborted) {
294
+ throw new Error("Request was aborted");
295
+ }
296
+
297
+ if (output.stopReason === "aborted" || output.stopReason === "error") {
298
+ throw new Error("An unkown error ocurred");
299
+ }
300
+
301
+ stream.push({ type: "done", reason: output.stopReason, message: output });
302
+ stream.end();
303
+ } catch (error) {
304
+ for (const block of output.content) delete (block as any).index;
305
+ output.stopReason = options?.signal?.aborted ? "aborted" : "error";
306
+ output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);
307
+ stream.push({ type: "error", reason: output.stopReason, error: output });
308
+ stream.end();
309
+ }
310
+ })();
311
+
312
+ return stream;
313
+ };
314
+
315
+ function createClient(model: Model<"openai-responses">, context: Context, apiKey?: string) {
316
+ if (!apiKey) {
317
+ if (!process.env.OPENAI_API_KEY) {
318
+ throw new Error(
319
+ "OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.",
320
+ );
321
+ }
322
+ apiKey = process.env.OPENAI_API_KEY;
323
+ }
324
+
325
+ const headers = { ...model.headers };
326
+ if (model.provider === "github-copilot") {
327
+ // Copilot expects X-Initiator to indicate whether the request is user-initiated
328
+ // or agent-initiated (e.g. follow-up after assistant/tool messages). If there is
329
+ // no prior message, default to user-initiated.
330
+ const messages = context.messages || [];
331
+ const lastMessage = messages[messages.length - 1];
332
+ const isAgentCall = lastMessage ? lastMessage.role !== "user" : false;
333
+ headers["X-Initiator"] = isAgentCall ? "agent" : "user";
334
+ headers["Openai-Intent"] = "conversation-edits";
335
+
336
+ // Copilot requires this header when sending images
337
+ const hasImages = messages.some((msg) => {
338
+ if (msg.role === "user" && Array.isArray(msg.content)) {
339
+ return msg.content.some((c) => c.type === "image");
340
+ }
341
+ if (msg.role === "toolResult" && Array.isArray(msg.content)) {
342
+ return msg.content.some((c) => c.type === "image");
343
+ }
344
+ return false;
345
+ });
346
+ if (hasImages) {
347
+ headers["Copilot-Vision-Request"] = "true";
348
+ }
349
+ }
350
+
351
+ return new OpenAI({
352
+ apiKey,
353
+ baseURL: model.baseUrl,
354
+ dangerouslyAllowBrowser: true,
355
+ defaultHeaders: headers,
356
+ });
357
+ }
358
+
359
+ function buildParams(model: Model<"openai-responses">, context: Context, options?: OpenAIResponsesOptions) {
360
+ const messages = convertMessages(model, context);
361
+
362
+ const params: ResponseCreateParamsStreaming = {
363
+ model: model.id,
364
+ input: messages,
365
+ stream: true,
366
+ };
367
+
368
+ if (options?.maxTokens) {
369
+ params.max_output_tokens = options?.maxTokens;
370
+ }
371
+
372
+ if (options?.temperature !== undefined) {
373
+ params.temperature = options?.temperature;
374
+ }
375
+
376
+ if (context.tools) {
377
+ params.tools = convertTools(context.tools);
378
+ }
379
+
380
+ if (model.reasoning) {
381
+ if (options?.reasoningEffort || options?.reasoningSummary) {
382
+ params.reasoning = {
383
+ effort: options?.reasoningEffort || "medium",
384
+ summary: options?.reasoningSummary || "auto",
385
+ };
386
+ params.include = ["reasoning.encrypted_content"];
387
+ } else {
388
+ if (model.name.startsWith("gpt-5")) {
389
+ // Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7
390
+ messages.push({
391
+ role: "developer",
392
+ content: [
393
+ {
394
+ type: "input_text",
395
+ text: "# Juice: 0 !important",
396
+ },
397
+ ],
398
+ });
399
+ }
400
+ }
401
+ }
402
+
403
+ return params;
404
+ }
405
+
406
+ function convertMessages(model: Model<"openai-responses">, context: Context): ResponseInput {
407
+ const messages: ResponseInput = [];
408
+
409
+ const transformedMessages = transformMessages(context.messages, model);
410
+
411
+ if (context.systemPrompt) {
412
+ const role = model.reasoning ? "developer" : "system";
413
+ messages.push({
414
+ role,
415
+ content: sanitizeSurrogates(context.systemPrompt),
416
+ });
417
+ }
418
+
419
+ let msgIndex = 0;
420
+ for (const msg of transformedMessages) {
421
+ if (msg.role === "user") {
422
+ if (typeof msg.content === "string") {
423
+ messages.push({
424
+ role: "user",
425
+ content: [{ type: "input_text", text: sanitizeSurrogates(msg.content) }],
426
+ });
427
+ } else {
428
+ const content: ResponseInputContent[] = msg.content.map((item): ResponseInputContent => {
429
+ if (item.type === "text") {
430
+ return {
431
+ type: "input_text",
432
+ text: sanitizeSurrogates(item.text),
433
+ } satisfies ResponseInputText;
434
+ } else {
435
+ return {
436
+ type: "input_image",
437
+ detail: "auto",
438
+ image_url: `data:${item.mimeType};base64,${item.data}`,
439
+ } satisfies ResponseInputImage;
440
+ }
441
+ });
442
+ const filteredContent = !model.input.includes("image")
443
+ ? content.filter((c) => c.type !== "input_image")
444
+ : content;
445
+ if (filteredContent.length === 0) continue;
446
+ messages.push({
447
+ role: "user",
448
+ content: filteredContent,
449
+ });
450
+ }
451
+ } else if (msg.role === "assistant") {
452
+ const output: ResponseInput = [];
453
+
454
+ for (const block of msg.content) {
455
+ // Do not submit thinking blocks if the completion had an error (i.e. abort)
456
+ if (block.type === "thinking" && msg.stopReason !== "error") {
457
+ if (block.thinkingSignature) {
458
+ const reasoningItem = JSON.parse(block.thinkingSignature);
459
+ output.push(reasoningItem);
460
+ }
461
+ } else if (block.type === "text") {
462
+ const textBlock = block as TextContent;
463
+ // OpenAI requires id to be max 64 characters
464
+ let msgId = textBlock.textSignature;
465
+ if (!msgId) {
466
+ msgId = `msg_${msgIndex}`;
467
+ } else if (msgId.length > 64) {
468
+ msgId = `msg_${shortHash(msgId)}`;
469
+ }
470
+ output.push({
471
+ type: "message",
472
+ role: "assistant",
473
+ content: [{ type: "output_text", text: sanitizeSurrogates(textBlock.text), annotations: [] }],
474
+ status: "completed",
475
+ id: msgId,
476
+ } satisfies ResponseOutputMessage);
477
+ // Do not submit toolcall blocks if the completion had an error (i.e. abort)
478
+ } else if (block.type === "toolCall" && msg.stopReason !== "error") {
479
+ const toolCall = block as ToolCall;
480
+ output.push({
481
+ type: "function_call",
482
+ id: toolCall.id.split("|")[1],
483
+ call_id: toolCall.id.split("|")[0],
484
+ name: toolCall.name,
485
+ arguments: JSON.stringify(toolCall.arguments),
486
+ });
487
+ }
488
+ }
489
+ if (output.length === 0) continue;
490
+ messages.push(...output);
491
+ } else if (msg.role === "toolResult") {
492
+ // Extract text and image content
493
+ const textResult = msg.content
494
+ .filter((c) => c.type === "text")
495
+ .map((c) => (c as any).text)
496
+ .join("\n");
497
+ const hasImages = msg.content.some((c) => c.type === "image");
498
+
499
+ // Always send function_call_output with text (or placeholder if only images)
500
+ const hasText = textResult.length > 0;
501
+ messages.push({
502
+ type: "function_call_output",
503
+ call_id: msg.toolCallId.split("|")[0],
504
+ output: sanitizeSurrogates(hasText ? textResult : "(see attached image)"),
505
+ });
506
+
507
+ // If there are images and model supports them, send a follow-up user message with images
508
+ if (hasImages && model.input.includes("image")) {
509
+ const contentParts: ResponseInputContent[] = [];
510
+
511
+ // Add text prefix
512
+ contentParts.push({
513
+ type: "input_text",
514
+ text: "Attached image(s) from tool result:",
515
+ } satisfies ResponseInputText);
516
+
517
+ // Add images
518
+ for (const block of msg.content) {
519
+ if (block.type === "image") {
520
+ contentParts.push({
521
+ type: "input_image",
522
+ detail: "auto",
523
+ image_url: `data:${(block as any).mimeType};base64,${(block as any).data}`,
524
+ } satisfies ResponseInputImage);
525
+ }
526
+ }
527
+
528
+ messages.push({
529
+ role: "user",
530
+ content: contentParts,
531
+ });
532
+ }
533
+ }
534
+ msgIndex++;
535
+ }
536
+
537
+ return messages;
538
+ }
539
+
540
+ function convertTools(tools: Tool[]): OpenAITool[] {
541
+ return tools.map((tool) => ({
542
+ type: "function",
543
+ name: tool.name,
544
+ description: tool.description,
545
+ parameters: tool.parameters as any, // TypeBox already generates JSON Schema
546
+ strict: null,
547
+ }));
548
+ }
549
+
550
+ function mapStopReason(status: OpenAI.Responses.ResponseStatus | undefined): StopReason {
551
+ if (!status) return "stop";
552
+ switch (status) {
553
+ case "completed":
554
+ return "stop";
555
+ case "incomplete":
556
+ return "length";
557
+ case "failed":
558
+ case "cancelled":
559
+ return "error";
560
+ // These two are wonky ...
561
+ case "in_progress":
562
+ case "queued":
563
+ return "stop";
564
+ default: {
565
+ const _exhaustive: never = status;
566
+ throw new Error(`Unhandled stop reason: ${_exhaustive}`);
567
+ }
568
+ }
569
+ }
@@ -0,0 +1,143 @@
1
+ import type { Api, AssistantMessage, Message, Model, ToolCall, ToolResultMessage } from "../types";
2
+
3
+ /**
4
+ * Normalize tool call ID for GitHub Copilot cross-API compatibility.
5
+ * OpenAI Responses API generates IDs that are 450+ chars with special characters like `|`.
6
+ * Other APIs (Claude, etc.) require max 40 chars and only alphanumeric + underscore + hyphen.
7
+ */
8
+ function normalizeCopilotToolCallId(id: string): string {
9
+ return id.replace(/[^a-zA-Z0-9_-]/g, "").slice(0, 40);
10
+ }
11
+
12
+ export function transformMessages<TApi extends Api>(messages: Message[], model: Model<TApi>): Message[] {
13
+ // Build a map of original tool call IDs to normalized IDs for github-copilot cross-API switches
14
+ const toolCallIdMap = new Map<string, string>();
15
+
16
+ // First pass: transform messages (thinking blocks, tool call ID normalization)
17
+ const transformed = messages.map((msg) => {
18
+ // User messages pass through unchanged
19
+ if (msg.role === "user") {
20
+ return msg;
21
+ }
22
+
23
+ // Handle toolResult messages - normalize toolCallId if we have a mapping
24
+ if (msg.role === "toolResult") {
25
+ const normalizedId = toolCallIdMap.get(msg.toolCallId);
26
+ if (normalizedId && normalizedId !== msg.toolCallId) {
27
+ return { ...msg, toolCallId: normalizedId };
28
+ }
29
+ return msg;
30
+ }
31
+
32
+ // Assistant messages need transformation check
33
+ if (msg.role === "assistant") {
34
+ const assistantMsg = msg as AssistantMessage;
35
+
36
+ // If message is from the same provider and API, keep as is
37
+ if (assistantMsg.provider === model.provider && assistantMsg.api === model.api) {
38
+ return msg;
39
+ }
40
+
41
+ // Check if we need to normalize tool call IDs (github-copilot cross-API)
42
+ const needsToolCallIdNormalization =
43
+ assistantMsg.provider === "github-copilot" &&
44
+ model.provider === "github-copilot" &&
45
+ assistantMsg.api !== model.api;
46
+
47
+ // Transform message from different provider/model
48
+ const transformedContent = assistantMsg.content.map((block) => {
49
+ if (block.type === "thinking") {
50
+ // Convert thinking block to text block with <thinking> tags
51
+ return {
52
+ type: "text" as const,
53
+ text: `<thinking>\n${block.thinking}\n</thinking>`,
54
+ };
55
+ }
56
+ // Normalize tool call IDs for github-copilot cross-API switches
57
+ if (block.type === "toolCall" && needsToolCallIdNormalization) {
58
+ const toolCall = block as ToolCall;
59
+ const normalizedId = normalizeCopilotToolCallId(toolCall.id);
60
+ if (normalizedId !== toolCall.id) {
61
+ toolCallIdMap.set(toolCall.id, normalizedId);
62
+ return { ...toolCall, id: normalizedId };
63
+ }
64
+ }
65
+ // All other blocks pass through unchanged
66
+ return block;
67
+ });
68
+
69
+ // Return transformed assistant message
70
+ return {
71
+ ...assistantMsg,
72
+ content: transformedContent,
73
+ };
74
+ }
75
+ return msg;
76
+ });
77
+
78
+ // Second pass: insert synthetic empty tool results for orphaned tool calls
79
+ // This preserves thinking signatures and satisfies API requirements
80
+ const result: Message[] = [];
81
+ let pendingToolCalls: ToolCall[] = [];
82
+ let existingToolResultIds = new Set<string>();
83
+
84
+ for (let i = 0; i < transformed.length; i++) {
85
+ const msg = transformed[i];
86
+
87
+ if (msg.role === "assistant") {
88
+ // If we have pending orphaned tool calls from a previous assistant, insert synthetic results now
89
+ if (pendingToolCalls.length > 0) {
90
+ for (const tc of pendingToolCalls) {
91
+ if (!existingToolResultIds.has(tc.id)) {
92
+ result.push({
93
+ role: "toolResult",
94
+ toolCallId: tc.id,
95
+ toolName: tc.name,
96
+ content: [{ type: "text", text: "No result provided" }],
97
+ isError: true,
98
+ timestamp: Date.now(),
99
+ } as ToolResultMessage);
100
+ }
101
+ }
102
+ pendingToolCalls = [];
103
+ existingToolResultIds = new Set();
104
+ }
105
+
106
+ // Track tool calls from this assistant message
107
+ const assistantMsg = msg as AssistantMessage;
108
+ const toolCalls = assistantMsg.content.filter((b) => b.type === "toolCall") as ToolCall[];
109
+ if (toolCalls.length > 0) {
110
+ pendingToolCalls = toolCalls;
111
+ existingToolResultIds = new Set();
112
+ }
113
+
114
+ result.push(msg);
115
+ } else if (msg.role === "toolResult") {
116
+ existingToolResultIds.add(msg.toolCallId);
117
+ result.push(msg);
118
+ } else if (msg.role === "user") {
119
+ // User message interrupts tool flow - insert synthetic results for orphaned calls
120
+ if (pendingToolCalls.length > 0) {
121
+ for (const tc of pendingToolCalls) {
122
+ if (!existingToolResultIds.has(tc.id)) {
123
+ result.push({
124
+ role: "toolResult",
125
+ toolCallId: tc.id,
126
+ toolName: tc.name,
127
+ content: [{ type: "text", text: "No result provided" }],
128
+ isError: true,
129
+ timestamp: Date.now(),
130
+ } as ToolResultMessage);
131
+ }
132
+ }
133
+ pendingToolCalls = [];
134
+ existingToolResultIds = new Set();
135
+ }
136
+ result.push(msg);
137
+ } else {
138
+ result.push(msg);
139
+ }
140
+ }
141
+
142
+ return result;
143
+ }