@oh-my-pi/pi-ai 8.0.20 → 8.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/README.md +11 -12
  2. package/package.json +49 -26
  3. package/src/cli.ts +7 -7
  4. package/src/index.ts +2 -1
  5. package/src/models.generated.ts +100 -101
  6. package/src/providers/amazon-bedrock.ts +12 -13
  7. package/src/providers/anthropic.ts +67 -37
  8. package/src/providers/cursor.ts +57 -57
  9. package/src/providers/google-gemini-cli-usage.ts +2 -2
  10. package/src/providers/google-gemini-cli.ts +8 -10
  11. package/src/providers/google-shared.ts +12 -13
  12. package/src/providers/google-vertex.ts +7 -7
  13. package/src/providers/google.ts +8 -8
  14. package/src/providers/openai-codex/request-transformer.ts +6 -6
  15. package/src/providers/openai-codex-responses.ts +28 -28
  16. package/src/providers/openai-completions.ts +39 -39
  17. package/src/providers/openai-responses.ts +31 -31
  18. package/src/providers/transform-messages.ts +3 -3
  19. package/src/storage.ts +29 -19
  20. package/src/stream.ts +6 -6
  21. package/src/types.ts +1 -2
  22. package/src/usage/claude.ts +4 -4
  23. package/src/usage/github-copilot.ts +3 -4
  24. package/src/usage/google-antigravity.ts +3 -3
  25. package/src/usage/openai-codex.ts +4 -4
  26. package/src/usage/zai.ts +3 -3
  27. package/src/usage.ts +0 -1
  28. package/src/utils/event-stream.ts +4 -4
  29. package/src/utils/oauth/anthropic.ts +0 -1
  30. package/src/utils/oauth/callback-server.ts +2 -3
  31. package/src/utils/oauth/github-copilot.ts +2 -3
  32. package/src/utils/oauth/google-antigravity.ts +0 -1
  33. package/src/utils/oauth/google-gemini-cli.ts +2 -3
  34. package/src/utils/oauth/index.ts +11 -12
  35. package/src/utils/oauth/openai-codex.ts +0 -1
  36. package/src/utils/overflow.ts +2 -2
  37. package/src/utils/retry.ts +78 -0
  38. package/src/utils/validation.ts +4 -5
  39. package/tsconfig.json +0 -42
@@ -3,10 +3,10 @@
3
3
  * Shared implementation for both google-gemini-cli and google-antigravity providers.
4
4
  * Uses the Cloud Code Assist API endpoint to access Gemini and Claude models.
5
5
  */
6
-
7
6
  import { createHash } from "node:crypto";
8
7
  import type { Content, ThinkingConfig } from "@google/genai";
9
- import { calculateCost } from "@oh-my-pi/pi-ai/models";
8
+ import { abortableSleep } from "@oh-my-pi/pi-utils";
9
+ import { calculateCost } from "../models";
10
10
  import type {
11
11
  Api,
12
12
  AssistantMessage,
@@ -17,10 +17,9 @@ import type {
17
17
  TextContent,
18
18
  ThinkingContent,
19
19
  ToolCall,
20
- } from "@oh-my-pi/pi-ai/types";
21
- import { AssistantMessageEventStream } from "@oh-my-pi/pi-ai/utils/event-stream";
22
- import { sanitizeSurrogates } from "@oh-my-pi/pi-ai/utils/sanitize-unicode";
23
- import { abortableSleep } from "@oh-my-pi/pi-utils";
20
+ } from "../types";
21
+ import { AssistantMessageEventStream } from "../utils/event-stream";
22
+ import { sanitizeSurrogates } from "../utils/sanitize-unicode";
24
23
  import {
25
24
  convertMessages,
26
25
  convertTools,
@@ -660,8 +659,7 @@ export const streamGoogleGeminiCli: StreamFunction<"google-gemini-cli"> = (
660
659
 
661
660
  const providedId = part.functionCall.id;
662
661
  const needsNewId =
663
- !providedId ||
664
- output.content.some((b) => b.type === "toolCall" && b.id === providedId);
662
+ !providedId || output.content.some(b => b.type === "toolCall" && b.id === providedId);
665
663
  const toolCallId = needsNewId
666
664
  ? `${part.functionCall.name}_${Date.now()}_${++toolCallCounter}`
667
665
  : providedId;
@@ -695,7 +693,7 @@ export const streamGoogleGeminiCli: StreamFunction<"google-gemini-cli"> = (
695
693
 
696
694
  if (candidate?.finishReason) {
697
695
  output.stopReason = mapStopReasonString(candidate.finishReason);
698
- if (output.content.some((b) => b.type === "toolCall")) {
696
+ if (output.content.some(b => b.type === "toolCall")) {
699
697
  output.stopReason = "toolUse";
700
698
  }
701
699
  }
@@ -840,7 +838,7 @@ function deriveSessionId(context: Context): string | undefined {
840
838
  } else if (Array.isArray(message.content)) {
841
839
  text = message.content
842
840
  .filter((item): item is TextContent => item.type === "text")
843
- .map((item) => item.text)
841
+ .map(item => item.text)
844
842
  .join("\n");
845
843
  }
846
844
 
@@ -1,10 +1,9 @@
1
1
  /**
2
2
  * Shared utilities for Google Generative AI and Google Cloud Code Assist providers.
3
3
  */
4
-
5
4
  import { type Content, FinishReason, FunctionCallingConfigMode, type Part, type Schema } from "@google/genai";
6
- import type { Context, ImageContent, Model, StopReason, TextContent, Tool } from "@oh-my-pi/pi-ai/types";
7
- import { sanitizeSurrogates } from "@oh-my-pi/pi-ai/utils/sanitize-unicode";
5
+ import type { Context, ImageContent, Model, StopReason, TextContent, Tool } from "../types";
6
+ import { sanitizeSurrogates } from "../utils/sanitize-unicode";
8
7
  import { transformMessages } from "./transform-messages";
9
8
 
10
9
  type GoogleApiType = "google-generative-ai" | "google-gemini-cli" | "google-vertex";
@@ -86,7 +85,7 @@ export function convertMessages<T extends GoogleApiType>(model: Model<T>, contex
86
85
  parts: [{ text: sanitizeSurrogates(msg.content) }],
87
86
  });
88
87
  } else {
89
- const parts: Part[] = msg.content.map((item) => {
88
+ const parts: Part[] = msg.content.map(item => {
90
89
  if (item.type === "text") {
91
90
  return { text: sanitizeSurrogates(item.text) };
92
91
  } else {
@@ -99,8 +98,8 @@ export function convertMessages<T extends GoogleApiType>(model: Model<T>, contex
99
98
  }
100
99
  });
101
100
  // Filter out images if model doesn't support them, and empty text blocks
102
- let filteredParts = !model.input.includes("image") ? parts.filter((p) => p.text !== undefined) : parts;
103
- filteredParts = filteredParts.filter((p) => {
101
+ let filteredParts = !model.input.includes("image") ? parts.filter(p => p.text !== undefined) : parts;
102
+ filteredParts = filteredParts.filter(p => {
104
103
  if (p.text !== undefined) {
105
104
  return p.text.trim().length > 0;
106
105
  }
@@ -180,7 +179,7 @@ export function convertMessages<T extends GoogleApiType>(model: Model<T>, contex
180
179
  } else if (msg.role === "toolResult") {
181
180
  // Extract text and image content
182
181
  const textContent = msg.content.filter((c): c is TextContent => c.type === "text");
183
- const textResult = textContent.map((c) => c.text).join("\n");
182
+ const textResult = textContent.map(c => c.text).join("\n");
184
183
  const imageContent = model.input.includes("image")
185
184
  ? msg.content.filter((c): c is ImageContent => c.type === "image")
186
185
  : [];
@@ -196,7 +195,7 @@ export function convertMessages<T extends GoogleApiType>(model: Model<T>, contex
196
195
  // Use "output" key for success, "error" key for errors as per SDK documentation
197
196
  const responseValue = hasText ? sanitizeSurrogates(textResult) : hasImages ? "(see attached image)" : "";
198
197
 
199
- const imageParts: Part[] = imageContent.map((imageBlock) => ({
198
+ const imageParts: Part[] = imageContent.map(imageBlock => ({
200
199
  inlineData: {
201
200
  mimeType: imageBlock.mimeType,
202
201
  data: imageBlock.data,
@@ -221,7 +220,7 @@ export function convertMessages<T extends GoogleApiType>(model: Model<T>, contex
221
220
  // Cloud Code Assist API requires all function responses to be in a single user turn.
222
221
  // Check if the last content is already a user turn with function responses and merge.
223
222
  const lastContent = contents[contents.length - 1];
224
- if (lastContent?.role === "user" && lastContent.parts?.some((p) => p.functionResponse)) {
223
+ if (lastContent?.role === "user" && lastContent.parts?.some(p => p.functionResponse)) {
225
224
  lastContent.parts.push(functionResponsePart);
226
225
  } else {
227
226
  contents.push({
@@ -270,7 +269,7 @@ const UNSUPPORTED_SCHEMA_FIELDS = new Set([
270
269
 
271
270
  function sanitizeSchemaImpl(value: unknown, isInsideProperties: boolean): unknown {
272
271
  if (Array.isArray(value)) {
273
- return value.map((entry) => sanitizeSchemaImpl(entry, isInsideProperties));
272
+ return value.map(entry => sanitizeSchemaImpl(entry, isInsideProperties));
274
273
  }
275
274
 
276
275
  if (!value || typeof value !== "object") {
@@ -286,11 +285,11 @@ function sanitizeSchemaImpl(value: unknown, isInsideProperties: boolean): unknow
286
285
  const variants = obj[combiner] as Record<string, unknown>[];
287
286
 
288
287
  // Check if ALL variants have a const field
289
- const allHaveConst = variants.every((v) => v && typeof v === "object" && "const" in v);
288
+ const allHaveConst = variants.every(v => v && typeof v === "object" && "const" in v);
290
289
 
291
290
  if (allHaveConst && variants.length > 0) {
292
291
  // Extract all const values into enum
293
- result.enum = variants.map((v) => v.const);
292
+ result.enum = variants.map(v => v.const);
294
293
 
295
294
  // Inherit type from first variant if present
296
295
  const firstType = variants[0]?.type;
@@ -327,7 +326,7 @@ function sanitizeSchemaImpl(value: unknown, isInsideProperties: boolean): unknow
327
326
  if (constValue !== undefined) {
328
327
  // Convert const to enum, merging with existing enum if present
329
328
  const existingEnum = Array.isArray(result.enum) ? result.enum : [];
330
- if (!existingEnum.some((item) => Object.is(item, constValue))) {
329
+ if (!existingEnum.some(item => Object.is(item, constValue))) {
331
330
  existingEnum.push(constValue);
332
331
  }
333
332
  result.enum = existingEnum;
@@ -5,7 +5,7 @@ import {
5
5
  type ThinkingConfig,
6
6
  ThinkingLevel,
7
7
  } from "@google/genai";
8
- import { calculateCost } from "@oh-my-pi/pi-ai/models";
8
+ import { calculateCost } from "../models";
9
9
  import type {
10
10
  Api,
11
11
  AssistantMessage,
@@ -16,10 +16,10 @@ import type {
16
16
  TextContent,
17
17
  ThinkingContent,
18
18
  ToolCall,
19
- } from "@oh-my-pi/pi-ai/types";
20
- import { AssistantMessageEventStream } from "@oh-my-pi/pi-ai/utils/event-stream";
21
- import { formatErrorMessageWithRetryAfter } from "@oh-my-pi/pi-ai/utils/retry-after";
22
- import { sanitizeSurrogates } from "@oh-my-pi/pi-ai/utils/sanitize-unicode";
19
+ } from "../types";
20
+ import { AssistantMessageEventStream } from "../utils/event-stream";
21
+ import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
22
+ import { sanitizeSurrogates } from "../utils/sanitize-unicode";
23
23
  import type { GoogleThinkingLevel } from "./google-gemini-cli";
24
24
  import {
25
25
  convertMessages,
@@ -183,7 +183,7 @@ export const streamGoogleVertex: StreamFunction<"google-vertex"> = (
183
183
 
184
184
  const providedId = part.functionCall.id;
185
185
  const needsNewId =
186
- !providedId || output.content.some((b) => b.type === "toolCall" && b.id === providedId);
186
+ !providedId || output.content.some(b => b.type === "toolCall" && b.id === providedId);
187
187
  const toolCallId = needsNewId
188
188
  ? `${part.functionCall.name}_${Date.now()}_${++toolCallCounter}`
189
189
  : providedId;
@@ -211,7 +211,7 @@ export const streamGoogleVertex: StreamFunction<"google-vertex"> = (
211
211
 
212
212
  if (candidate?.finishReason) {
213
213
  output.stopReason = mapStopReason(candidate.finishReason);
214
- if (output.content.some((b) => b.type === "toolCall")) {
214
+ if (output.content.some(b => b.type === "toolCall")) {
215
215
  output.stopReason = "toolUse";
216
216
  }
217
217
  }
@@ -4,8 +4,8 @@ import {
4
4
  GoogleGenAI,
5
5
  type ThinkingConfig,
6
6
  } from "@google/genai";
7
- import { calculateCost } from "@oh-my-pi/pi-ai/models";
8
- import { getEnvApiKey } from "@oh-my-pi/pi-ai/stream";
7
+ import { calculateCost } from "../models";
8
+ import { getEnvApiKey } from "../stream";
9
9
  import type {
10
10
  Api,
11
11
  AssistantMessage,
@@ -16,10 +16,10 @@ import type {
16
16
  TextContent,
17
17
  ThinkingContent,
18
18
  ToolCall,
19
- } from "@oh-my-pi/pi-ai/types";
20
- import { AssistantMessageEventStream } from "@oh-my-pi/pi-ai/utils/event-stream";
21
- import { formatErrorMessageWithRetryAfter } from "@oh-my-pi/pi-ai/utils/retry-after";
22
- import { sanitizeSurrogates } from "@oh-my-pi/pi-ai/utils/sanitize-unicode";
19
+ } from "../types";
20
+ import { AssistantMessageEventStream } from "../utils/event-stream";
21
+ import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
22
+ import { sanitizeSurrogates } from "../utils/sanitize-unicode";
23
23
  import type { GoogleThinkingLevel } from "./google-gemini-cli";
24
24
  import {
25
25
  convertMessages,
@@ -170,7 +170,7 @@ export const streamGoogle: StreamFunction<"google-generative-ai"> = (
170
170
  // Generate unique ID if not provided or if it's a duplicate
171
171
  const providedId = part.functionCall.id;
172
172
  const needsNewId =
173
- !providedId || output.content.some((b) => b.type === "toolCall" && b.id === providedId);
173
+ !providedId || output.content.some(b => b.type === "toolCall" && b.id === providedId);
174
174
  const toolCallId = needsNewId
175
175
  ? `${part.functionCall.name}_${Date.now()}_${++toolCallCounter}`
176
176
  : providedId;
@@ -198,7 +198,7 @@ export const streamGoogle: StreamFunction<"google-generative-ai"> = (
198
198
 
199
199
  if (candidate?.finishReason) {
200
200
  output.stopReason = mapStopReason(candidate.finishReason);
201
- if (output.content.some((b) => b.type === "toolCall")) {
201
+ if (output.content.some(b => b.type === "toolCall")) {
202
202
  output.stopReason = "toolUse";
203
203
  }
204
204
  }
@@ -73,8 +73,8 @@ function filterInput(input: InputItem[] | undefined): InputItem[] | undefined {
73
73
  if (!Array.isArray(input)) return input;
74
74
 
75
75
  return input
76
- .filter((item) => item.type !== "item_reference")
77
- .map((item) => {
76
+ .filter(item => item.type !== "item_reference")
77
+ .map(item => {
78
78
  if (item.id != null) {
79
79
  const { id: _id, ...rest } = item;
80
80
  return rest as InputItem;
@@ -97,11 +97,11 @@ export async function transformRequestBody(
97
97
  if (body.input) {
98
98
  const functionCallIds = new Set(
99
99
  body.input
100
- .filter((item) => item.type === "function_call" && typeof item.call_id === "string")
101
- .map((item) => item.call_id as string),
100
+ .filter(item => item.type === "function_call" && typeof item.call_id === "string")
101
+ .map(item => item.call_id as string),
102
102
  );
103
103
 
104
- body.input = body.input.map((item) => {
104
+ body.input = body.input.map(item => {
105
105
  if (item.type === "function_call_output" && typeof item.call_id === "string") {
106
106
  const callId = item.call_id as string;
107
107
  if (!functionCallIds.has(callId)) {
@@ -131,7 +131,7 @@ export async function transformRequestBody(
131
131
 
132
132
  if (prompt?.developerMessages && prompt.developerMessages.length > 0 && Array.isArray(body.input)) {
133
133
  const developerMessages = prompt.developerMessages.map(
134
- (text) =>
134
+ text =>
135
135
  ({
136
136
  type: "message",
137
137
  role: "developer",
@@ -1,6 +1,17 @@
1
- import os from "node:os";
2
- import { calculateCost } from "@oh-my-pi/pi-ai/models";
3
- import { getEnvApiKey } from "@oh-my-pi/pi-ai/stream";
1
+ import * as os from "node:os";
2
+ import { abortableSleep } from "@oh-my-pi/pi-utils";
3
+ import type {
4
+ ResponseFunctionToolCall,
5
+ ResponseInput,
6
+ ResponseInputContent,
7
+ ResponseInputImage,
8
+ ResponseInputText,
9
+ ResponseOutputMessage,
10
+ ResponseReasoningItem,
11
+ } from "openai/resources/responses/responses";
12
+ import packageJson from "../../package.json" with { type: "json" };
13
+ import { calculateCost } from "../models";
14
+ import { getEnvApiKey } from "../stream";
4
15
  import type {
5
16
  Api,
6
17
  AssistantMessage,
@@ -13,22 +24,11 @@ import type {
13
24
  ThinkingContent,
14
25
  Tool,
15
26
  ToolCall,
16
- } from "@oh-my-pi/pi-ai/types";
17
- import { AssistantMessageEventStream } from "@oh-my-pi/pi-ai/utils/event-stream";
18
- import { parseStreamingJson } from "@oh-my-pi/pi-ai/utils/json-parse";
19
- import { formatErrorMessageWithRetryAfter } from "@oh-my-pi/pi-ai/utils/retry-after";
20
- import { sanitizeSurrogates } from "@oh-my-pi/pi-ai/utils/sanitize-unicode";
21
- import { abortableSleep } from "@oh-my-pi/pi-utils";
22
- import type {
23
- ResponseFunctionToolCall,
24
- ResponseInput,
25
- ResponseInputContent,
26
- ResponseInputImage,
27
- ResponseInputText,
28
- ResponseOutputMessage,
29
- ResponseReasoningItem,
30
- } from "openai/resources/responses/responses";
31
- import packageJson from "../../package.json" with { type: "json" };
27
+ } from "../types";
28
+ import { AssistantMessageEventStream } from "../utils/event-stream";
29
+ import { parseStreamingJson } from "../utils/json-parse";
30
+ import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
31
+ import { sanitizeSurrogates } from "../utils/sanitize-unicode";
32
32
  import {
33
33
  CODEX_BASE_URL,
34
34
  JWT_CLAIM_PATH,
@@ -341,7 +341,7 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"
341
341
  } else if (eventType === "response.output_item.done") {
342
342
  const item = rawEvent.item as ResponseReasoningItem | ResponseOutputMessage | ResponseFunctionToolCall;
343
343
  if (item.type === "reasoning" && currentBlock?.type === "thinking") {
344
- currentBlock.thinking = item.summary?.map((s) => s.text).join("\n\n") || "";
344
+ currentBlock.thinking = item.summary?.map(s => s.text).join("\n\n") || "";
345
345
  currentBlock.thinkingSignature = JSON.stringify(item);
346
346
  stream.push({
347
347
  type: "thinking_end",
@@ -351,7 +351,7 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"
351
351
  });
352
352
  currentBlock = null;
353
353
  } else if (item.type === "message" && currentBlock?.type === "text") {
354
- currentBlock.text = item.content.map((c) => (c.type === "output_text" ? c.text : c.refusal)).join("");
354
+ currentBlock.text = item.content.map(c => (c.type === "output_text" ? c.text : c.refusal)).join("");
355
355
  currentBlock.textSignature = item.id;
356
356
  stream.push({
357
357
  type: "text_end",
@@ -396,7 +396,7 @@ export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses"
396
396
  }
397
397
  calculateCost(model, output.usage);
398
398
  output.stopReason = mapStopReason(response?.status);
399
- if (output.content.some((b) => b.type === "toolCall") && output.stopReason === "stop") {
399
+ if (output.content.some(b => b.type === "toolCall") && output.stopReason === "stop") {
400
400
  output.stopReason = "toolUse";
401
401
  }
402
402
  } else if (eventType === "error") {
@@ -593,9 +593,9 @@ function convertMessages(model: Model<"openai-codex-responses">, context: Contex
593
593
  });
594
594
  // Filter out images if model doesn't support them, and empty text blocks
595
595
  let filteredContent = !model.input.includes("image")
596
- ? content.filter((c) => c.type !== "input_image")
596
+ ? content.filter(c => c.type !== "input_image")
597
597
  : content;
598
- filteredContent = filteredContent.filter((c) => {
598
+ filteredContent = filteredContent.filter(c => {
599
599
  if (c.type === "input_text") {
600
600
  return c.text.trim().length > 0;
601
601
  }
@@ -647,10 +647,10 @@ function convertMessages(model: Model<"openai-codex-responses">, context: Contex
647
647
  messages.push(...output);
648
648
  } else if (msg.role === "toolResult") {
649
649
  const textResult = msg.content
650
- .filter((c) => c.type === "text")
651
- .map((c) => (c as { text: string }).text)
650
+ .filter(c => c.type === "text")
651
+ .map(c => (c as { text: string }).text)
652
652
  .join("\n");
653
- const hasImages = msg.content.some((c) => c.type === "image");
653
+ const hasImages = msg.content.some(c => c.type === "image");
654
654
  const normalized = normalizeResponsesToolCallId(msg.toolCallId);
655
655
 
656
656
  const hasText = textResult.length > 0;
@@ -692,7 +692,7 @@ function convertMessages(model: Model<"openai-codex-responses">, context: Contex
692
692
  function convertTools(
693
693
  tools: Tool[],
694
694
  ): Array<{ type: "function"; name: string; description: string; parameters: Record<string, unknown>; strict: null }> {
695
- return tools.map((tool) => ({
695
+ return tools.map(tool => ({
696
696
  type: "function",
697
697
  name: tool.name,
698
698
  description: tool.description,
@@ -1,5 +1,15 @@
1
- import { calculateCost } from "@oh-my-pi/pi-ai/models";
2
- import { getEnvApiKey } from "@oh-my-pi/pi-ai/stream";
1
+ import OpenAI from "openai";
2
+ import type {
3
+ ChatCompletionAssistantMessageParam,
4
+ ChatCompletionChunk,
5
+ ChatCompletionContentPart,
6
+ ChatCompletionContentPartImage,
7
+ ChatCompletionContentPartText,
8
+ ChatCompletionMessageParam,
9
+ ChatCompletionToolMessageParam,
10
+ } from "openai/resources/chat/completions";
11
+ import { calculateCost } from "../models";
12
+ import { getEnvApiKey } from "../stream";
3
13
  import type {
4
14
  AssistantMessage,
5
15
  Context,
@@ -14,21 +24,11 @@ import type {
14
24
  Tool,
15
25
  ToolCall,
16
26
  ToolResultMessage,
17
- } from "@oh-my-pi/pi-ai/types";
18
- import { AssistantMessageEventStream } from "@oh-my-pi/pi-ai/utils/event-stream";
19
- import { parseStreamingJson } from "@oh-my-pi/pi-ai/utils/json-parse";
20
- import { formatErrorMessageWithRetryAfter } from "@oh-my-pi/pi-ai/utils/retry-after";
21
- import { sanitizeSurrogates } from "@oh-my-pi/pi-ai/utils/sanitize-unicode";
22
- import OpenAI from "openai";
23
- import type {
24
- ChatCompletionAssistantMessageParam,
25
- ChatCompletionChunk,
26
- ChatCompletionContentPart,
27
- ChatCompletionContentPartImage,
28
- ChatCompletionContentPartText,
29
- ChatCompletionMessageParam,
30
- ChatCompletionToolMessageParam,
31
- } from "openai/resources/chat/completions";
27
+ } from "../types";
28
+ import { AssistantMessageEventStream } from "../utils/event-stream";
29
+ import { parseStreamingJson } from "../utils/json-parse";
30
+ import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
31
+ import { sanitizeSurrogates } from "../utils/sanitize-unicode";
32
32
  import { transformMessages } from "./transform-messages";
33
33
 
34
34
  /**
@@ -61,7 +61,7 @@ function hasToolHistory(messages: Message[]): boolean {
61
61
  return true;
62
62
  }
63
63
  if (msg.role === "assistant") {
64
- if (msg.content.some((block) => block.type === "toolCall")) {
64
+ if (msg.content.some(block => block.type === "toolCall")) {
65
65
  return true;
66
66
  }
67
67
  }
@@ -287,7 +287,7 @@ export const streamOpenAICompletions: StreamFunction<"openai-completions"> = (
287
287
  for (const detail of reasoningDetails) {
288
288
  if (detail.type === "reasoning.encrypted" && detail.id && detail.data) {
289
289
  const matchingToolCall = output.content.find(
290
- (b) => b.type === "toolCall" && b.id === detail.id,
290
+ b => b.type === "toolCall" && b.id === detail.id,
291
291
  ) as ToolCall | undefined;
292
292
  if (matchingToolCall) {
293
293
  matchingToolCall.thoughtSignature = JSON.stringify(detail);
@@ -356,12 +356,12 @@ function createClient(
356
356
  headers["Openai-Intent"] = "conversation-edits";
357
357
 
358
358
  // Copilot requires this header when sending images
359
- const hasImages = messages.some((msg) => {
359
+ const hasImages = messages.some(msg => {
360
360
  if (msg.role === "user" && Array.isArray(msg.content)) {
361
- return msg.content.some((c) => c.type === "image");
361
+ return msg.content.some(c => c.type === "image");
362
362
  }
363
363
  if (msg.role === "toolResult" && Array.isArray(msg.content)) {
364
- return msg.content.some((c) => c.type === "image");
364
+ return msg.content.some(c => c.type === "image");
365
365
  }
366
366
  return false;
367
367
  });
@@ -516,7 +516,7 @@ export function convertMessages(
516
516
  }
517
517
  });
518
518
  const filteredContent = !model.input.includes("image")
519
- ? content.filter((c) => c.type !== "image_url")
519
+ ? content.filter(c => c.type !== "image_url")
520
520
  : content;
521
521
  if (filteredContent.length === 0) continue;
522
522
  params.push({
@@ -531,29 +531,29 @@ export function convertMessages(
531
531
  content: compat.requiresAssistantAfterToolResult ? "" : null,
532
532
  };
533
533
 
534
- const textBlocks = msg.content.filter((b) => b.type === "text") as TextContent[];
534
+ const textBlocks = msg.content.filter(b => b.type === "text") as TextContent[];
535
535
  // Filter out empty text blocks to avoid API validation errors
536
- const nonEmptyTextBlocks = textBlocks.filter((b) => b.text && b.text.trim().length > 0);
536
+ const nonEmptyTextBlocks = textBlocks.filter(b => b.text && b.text.trim().length > 0);
537
537
  if (nonEmptyTextBlocks.length > 0) {
538
538
  // GitHub Copilot requires assistant content as a string, not an array.
539
539
  // Sending as array causes Claude models to re-answer all previous prompts.
540
540
  if (model.provider === "github-copilot") {
541
- assistantMsg.content = nonEmptyTextBlocks.map((b) => sanitizeSurrogates(b.text)).join("");
541
+ assistantMsg.content = nonEmptyTextBlocks.map(b => sanitizeSurrogates(b.text)).join("");
542
542
  } else {
543
- assistantMsg.content = nonEmptyTextBlocks.map((b) => {
543
+ assistantMsg.content = nonEmptyTextBlocks.map(b => {
544
544
  return { type: "text", text: sanitizeSurrogates(b.text) };
545
545
  });
546
546
  }
547
547
  }
548
548
 
549
549
  // Handle thinking blocks
550
- const thinkingBlocks = msg.content.filter((b) => b.type === "thinking") as ThinkingContent[];
550
+ const thinkingBlocks = msg.content.filter(b => b.type === "thinking") as ThinkingContent[];
551
551
  // Filter out empty thinking blocks to avoid API validation errors
552
- const nonEmptyThinkingBlocks = thinkingBlocks.filter((b) => b.thinking && b.thinking.trim().length > 0);
552
+ const nonEmptyThinkingBlocks = thinkingBlocks.filter(b => b.thinking && b.thinking.trim().length > 0);
553
553
  if (nonEmptyThinkingBlocks.length > 0) {
554
554
  if (compat.requiresThinkingAsText) {
555
555
  // Convert thinking blocks to plain text (no tags to avoid model mimicking them)
556
- const thinkingText = nonEmptyThinkingBlocks.map((b) => b.thinking).join("\n\n");
556
+ const thinkingText = nonEmptyThinkingBlocks.map(b => b.thinking).join("\n\n");
557
557
  const textContent = assistantMsg.content as Array<{ type: "text"; text: string }> | null;
558
558
  if (textContent) {
559
559
  textContent.unshift({ type: "text", text: thinkingText });
@@ -564,14 +564,14 @@ export function convertMessages(
564
564
  // Use the signature from the first thinking block if available (for llama.cpp server + gpt-oss)
565
565
  const signature = nonEmptyThinkingBlocks[0].thinkingSignature;
566
566
  if (signature && signature.length > 0) {
567
- (assistantMsg as any)[signature] = nonEmptyThinkingBlocks.map((b) => b.thinking).join("\n");
567
+ (assistantMsg as any)[signature] = nonEmptyThinkingBlocks.map(b => b.thinking).join("\n");
568
568
  }
569
569
  }
570
570
  }
571
571
 
572
- const toolCalls = msg.content.filter((b) => b.type === "toolCall") as ToolCall[];
572
+ const toolCalls = msg.content.filter(b => b.type === "toolCall") as ToolCall[];
573
573
  if (toolCalls.length > 0) {
574
- assistantMsg.tool_calls = toolCalls.map((tc) => ({
574
+ assistantMsg.tool_calls = toolCalls.map(tc => ({
575
575
  id: normalizeMistralToolId(tc.id, compat.requiresMistralToolIds),
576
576
  type: "function" as const,
577
577
  function: {
@@ -580,8 +580,8 @@ export function convertMessages(
580
580
  },
581
581
  }));
582
582
  const reasoningDetails = toolCalls
583
- .filter((tc) => tc.thoughtSignature)
584
- .map((tc) => {
583
+ .filter(tc => tc.thoughtSignature)
584
+ .map(tc => {
585
585
  try {
586
586
  return JSON.parse(tc.thoughtSignature!);
587
587
  } catch {
@@ -616,10 +616,10 @@ export function convertMessages(
616
616
 
617
617
  // Extract text and image content
618
618
  const textResult = toolMsg.content
619
- .filter((c) => c.type === "text")
620
- .map((c) => (c as any).text)
619
+ .filter(c => c.type === "text")
620
+ .map(c => (c as any).text)
621
621
  .join("\n");
622
- const hasImages = toolMsg.content.some((c) => c.type === "image");
622
+ const hasImages = toolMsg.content.some(c => c.type === "image");
623
623
 
624
624
  // Always send tool result with text (or placeholder if only images)
625
625
  const hasText = textResult.length > 0;
@@ -683,7 +683,7 @@ export function convertMessages(
683
683
  }
684
684
 
685
685
  function convertTools(tools: Tool[]): OpenAI.Chat.Completions.ChatCompletionTool[] {
686
- return tools.map((tool) => ({
686
+ return tools.map(tool => ({
687
687
  type: "function",
688
688
  function: {
689
689
  name: tool.name,