@oh-my-pi/pi-ai 6.9.69 → 8.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@oh-my-pi/pi-ai",
3
- "version": "6.9.69",
3
+ "version": "8.0.0",
4
4
  "description": "Unified LLM API with automatic model discovery and provider configuration",
5
5
  "type": "module",
6
6
  "main": "./src/index.ts",
@@ -17,7 +17,7 @@
17
17
  "test": "bun test"
18
18
  },
19
19
  "dependencies": {
20
- "@oh-my-pi/pi-utils": "6.9.69",
20
+ "@oh-my-pi/pi-utils": "8.0.0",
21
21
  "@anthropic-ai/sdk": "0.71.2",
22
22
  "@aws-sdk/client-bedrock-runtime": "^3.968.0",
23
23
  "@bufbuild/protobuf": "^2.10.2",
package/src/index.ts CHANGED
@@ -5,11 +5,18 @@ export * from "./providers/anthropic";
5
5
  export * from "./providers/cursor";
6
6
  export * from "./providers/google";
7
7
  export * from "./providers/google-gemini-cli";
8
+ export * from "./providers/google-gemini-cli-usage";
8
9
  export * from "./providers/google-vertex";
9
10
  export * from "./providers/openai-completions";
10
11
  export * from "./providers/openai-responses";
11
12
  export * from "./stream";
12
13
  export * from "./types";
14
+ export * from "./usage";
15
+ export * from "./usage/claude";
16
+ export * from "./usage/github-copilot";
17
+ export * from "./usage/google-antigravity";
18
+ export * from "./usage/openai-codex";
19
+ export * from "./usage/zai";
13
20
  export * from "./utils/event-stream";
14
21
  export * from "./utils/oauth/index";
15
22
  export * from "./utils/overflow";
@@ -18,7 +18,7 @@ import {
18
18
  ToolResultStatus,
19
19
  } from "@aws-sdk/client-bedrock-runtime";
20
20
 
21
- import { calculateCost } from "../models";
21
+ import { calculateCost } from "$ai/models";
22
22
  import type {
23
23
  Api,
24
24
  AssistantMessage,
@@ -34,10 +34,10 @@ import type {
34
34
  Tool,
35
35
  ToolCall,
36
36
  ToolResultMessage,
37
- } from "../types";
38
- import { AssistantMessageEventStream } from "../utils/event-stream";
39
- import { parseStreamingJson } from "../utils/json-parse";
40
- import { sanitizeSurrogates } from "../utils/sanitize-unicode";
37
+ } from "$ai/types";
38
+ import { AssistantMessageEventStream } from "$ai/utils/event-stream";
39
+ import { parseStreamingJson } from "$ai/utils/json-parse";
40
+ import { sanitizeSurrogates } from "$ai/utils/sanitize-unicode";
41
41
  import { transformMessages } from "./transform-messages";
42
42
 
43
43
  export interface BedrockOptions extends StreamOptions {
@@ -342,22 +342,39 @@ function convertMessages(context: Context, model: Model<"bedrock-converse-stream
342
342
 
343
343
  switch (m.role) {
344
344
  case "user":
345
- result.push({
346
- role: ConversationRole.USER,
347
- content:
348
- typeof m.content === "string"
349
- ? [{ text: sanitizeSurrogates(m.content) }]
350
- : m.content.map((c) => {
351
- switch (c.type) {
352
- case "text":
353
- return { text: sanitizeSurrogates(c.text) };
354
- case "image":
355
- return { image: createImageBlock(c.mimeType, c.data) };
356
- default:
357
- throw new Error("Unknown user content type");
358
- }
359
- }),
360
- });
345
+ if (typeof m.content === "string") {
346
+ // Skip empty user messages
347
+ if (!m.content || m.content.trim() === "") continue;
348
+ result.push({
349
+ role: ConversationRole.USER,
350
+ content: [{ text: sanitizeSurrogates(m.content) }],
351
+ });
352
+ } else {
353
+ const contentBlocks = m.content
354
+ .map((c) => {
355
+ switch (c.type) {
356
+ case "text":
357
+ return { text: sanitizeSurrogates(c.text) };
358
+ case "image":
359
+ return { image: createImageBlock(c.mimeType, c.data) };
360
+ default:
361
+ throw new Error("Unknown user content type");
362
+ }
363
+ })
364
+ .filter((block) => {
365
+ // Filter out empty text blocks
366
+ if ("text" in block && block.text) {
367
+ return block.text.trim().length > 0;
368
+ }
369
+ return true; // Keep non-text blocks (images)
370
+ });
371
+ // Skip message if all blocks filtered out
372
+ if (contentBlocks.length === 0) continue;
373
+ result.push({
374
+ role: ConversationRole.USER,
375
+ content: contentBlocks,
376
+ });
377
+ }
361
378
  break;
362
379
  case "assistant": {
363
380
  // Skip assistant messages with empty content (e.g., from aborted requests)
@@ -4,8 +4,8 @@ import type {
4
4
  MessageCreateParamsStreaming,
5
5
  MessageParam,
6
6
  } from "@anthropic-ai/sdk/resources/messages";
7
- import { calculateCost } from "../models";
8
- import { getEnvApiKey, OUTPUT_FALLBACK_BUFFER } from "../stream";
7
+ import { calculateCost } from "$ai/models";
8
+ import { getEnvApiKey, OUTPUT_FALLBACK_BUFFER } from "$ai/stream";
9
9
  import type {
10
10
  Api,
11
11
  AssistantMessage,
@@ -21,11 +21,11 @@ import type {
21
21
  Tool,
22
22
  ToolCall,
23
23
  ToolResultMessage,
24
- } from "../types";
25
- import { AssistantMessageEventStream } from "../utils/event-stream";
26
- import { parseStreamingJson } from "../utils/json-parse";
27
- import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
28
- import { sanitizeSurrogates } from "../utils/sanitize-unicode";
24
+ } from "$ai/types";
25
+ import { AssistantMessageEventStream } from "$ai/utils/event-stream";
26
+ import { parseStreamingJson } from "$ai/utils/json-parse";
27
+ import { formatErrorMessageWithRetryAfter } from "$ai/utils/retry-after";
28
+ import { sanitizeSurrogates } from "$ai/utils/sanitize-unicode";
29
29
 
30
30
  import { transformMessages } from "./transform-messages";
31
31
 
@@ -4,7 +4,7 @@ import http2 from "node:http2";
4
4
  import { create, fromBinary, fromJson, type JsonValue, toBinary, toJson } from "@bufbuild/protobuf";
5
5
  import { ValueSchema } from "@bufbuild/protobuf/wkt";
6
6
  import JSON5 from "json5";
7
- import { calculateCost } from "../models";
7
+ import { calculateCost } from "$ai/models";
8
8
  import type {
9
9
  Api,
10
10
  AssistantMessage,
@@ -23,10 +23,10 @@ import type {
23
23
  Tool,
24
24
  ToolCall,
25
25
  ToolResultMessage,
26
- } from "../types";
27
- import { AssistantMessageEventStream } from "../utils/event-stream";
28
- import { parseStreamingJson } from "../utils/json-parse";
29
- import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
26
+ } from "$ai/types";
27
+ import { AssistantMessageEventStream } from "$ai/utils/event-stream";
28
+ import { parseStreamingJson } from "$ai/utils/json-parse";
29
+ import { formatErrorMessageWithRetryAfter } from "$ai/utils/retry-after";
30
30
  import type { McpToolDefinition } from "./cursor/gen/agent_pb";
31
31
  import {
32
32
  AgentClientMessageSchema,
@@ -1838,11 +1838,12 @@ function buildMcpToolDefinitions(tools: Tool[] | undefined): McpToolDefinition[]
1838
1838
  function extractUserMessageText(msg: Message): string {
1839
1839
  if (msg.role !== "user") return "";
1840
1840
  const content = msg.content;
1841
- if (typeof content === "string") return content;
1842
- return content
1841
+ if (typeof content === "string") return content.trim();
1842
+ const text = content
1843
1843
  .filter((c): c is TextContent => c.type === "text")
1844
1844
  .map((c) => c.text)
1845
1845
  .join("\n");
1846
+ return text.trim();
1846
1847
  }
1847
1848
 
1848
1849
  /**
@@ -1891,7 +1892,7 @@ function buildConversationTurns(messages: Message[]): Uint8Array[] {
1891
1892
 
1892
1893
  // Create and serialize user message
1893
1894
  const userText = extractUserMessageText(msg);
1894
- if (!userText) {
1895
+ if (!userText || userText.length === 0) {
1895
1896
  i++;
1896
1897
  continue;
1897
1898
  }
@@ -1982,10 +1983,15 @@ function buildGrpcRequest(
1982
1983
  const userText =
1983
1984
  lastMessage?.role === "user"
1984
1985
  ? typeof lastMessage.content === "string"
1985
- ? lastMessage.content
1986
+ ? lastMessage.content.trim()
1986
1987
  : extractText(lastMessage.content)
1987
1988
  : "";
1988
1989
 
1990
+ // Validate that we have non-empty user text for the action
1991
+ if (!userText || userText.trim().length === 0) {
1992
+ throw new Error("Cannot send empty user message to Cursor API");
1993
+ }
1994
+
1989
1995
  const userMessage = create(UserMessageSchema, {
1990
1996
  text: userText,
1991
1997
  messageId: crypto.randomUUID(),
@@ -0,0 +1,271 @@
1
+ import type {
2
+ UsageAmount,
3
+ UsageFetchContext,
4
+ UsageFetchParams,
5
+ UsageLimit,
6
+ UsageProvider,
7
+ UsageReport,
8
+ UsageWindow,
9
+ } from "$ai/usage";
10
+ import { refreshGoogleCloudToken } from "$ai/utils/oauth/google-gemini-cli";
11
+
12
+ const DEFAULT_ENDPOINT = "https://cloudcode-pa.googleapis.com";
13
+ const CACHE_TTL_MS = 60_000;
14
+
15
+ const GEMINI_CLI_HEADERS = {
16
+ "User-Agent": "google-cloud-sdk vscode_cloudshelleditor/0.1",
17
+ "X-Goog-Api-Client": "gl-node/22.17.0",
18
+ "Client-Metadata": JSON.stringify({
19
+ ideType: "IDE_UNSPECIFIED",
20
+ platform: "PLATFORM_UNSPECIFIED",
21
+ pluginType: "GEMINI",
22
+ }),
23
+ };
24
+
25
+ const GEMINI_TIER_MAP: Array<{ tier: string; models: string[] }> = [
26
+ {
27
+ tier: "3-Flash",
28
+ models: ["gemini-3-flash-preview", "gemini-3-flash"],
29
+ },
30
+ {
31
+ tier: "Flash",
32
+ models: ["gemini-2.5-flash", "gemini-2.5-flash-lite", "gemini-2.0-flash", "gemini-1.5-flash"],
33
+ },
34
+ {
35
+ tier: "Pro",
36
+ models: ["gemini-2.5-pro", "gemini-3-pro-preview", "gemini-3-pro", "gemini-1.5-pro"],
37
+ },
38
+ ];
39
+
40
+ interface LoadCodeAssistResponse {
41
+ cloudaicompanionProject?: string | { id?: string };
42
+ currentTier?: { id?: string; name?: string };
43
+ }
44
+
45
+ interface RetrieveUserQuotaResponse {
46
+ buckets?: Array<{
47
+ modelId?: string;
48
+ remainingFraction?: number;
49
+ resetTime?: string;
50
+ }>;
51
+ }
52
+
53
+ function getProjectId(payload: LoadCodeAssistResponse | undefined): string | undefined {
54
+ if (!payload) return undefined;
55
+ if (typeof payload.cloudaicompanionProject === "string") {
56
+ return payload.cloudaicompanionProject;
57
+ }
58
+ if (payload.cloudaicompanionProject && typeof payload.cloudaicompanionProject === "object") {
59
+ return payload.cloudaicompanionProject.id;
60
+ }
61
+ return undefined;
62
+ }
63
+
64
+ function getModelTier(modelId: string): string | undefined {
65
+ for (const entry of GEMINI_TIER_MAP) {
66
+ if (entry.models.includes(modelId)) {
67
+ return entry.tier;
68
+ }
69
+ }
70
+ const normalized = modelId.toLowerCase();
71
+ if (normalized.includes("flash")) return "Flash";
72
+ if (normalized.includes("pro")) return "Pro";
73
+ return undefined;
74
+ }
75
+
76
+ function parseWindow(resetTime: string | undefined, now: number): UsageWindow {
77
+ if (!resetTime) {
78
+ return {
79
+ id: "quota",
80
+ label: "Quota window",
81
+ };
82
+ }
83
+ const resetsAt = Date.parse(resetTime);
84
+ if (Number.isNaN(resetsAt)) {
85
+ return {
86
+ id: "quota",
87
+ label: "Quota window",
88
+ };
89
+ }
90
+ return {
91
+ id: `reset-${resetsAt}`,
92
+ label: "Quota window",
93
+ resetsAt,
94
+ resetInMs: Math.max(0, resetsAt - now),
95
+ };
96
+ }
97
+
98
+ function buildAmount(remainingFraction: number | undefined): UsageAmount {
99
+ if (remainingFraction === undefined || !Number.isFinite(remainingFraction)) {
100
+ return { unit: "percent" };
101
+ }
102
+ const remaining = Math.min(Math.max(remainingFraction, 0), 1);
103
+ const used = Math.min(Math.max(1 - remaining, 0), 1);
104
+ return {
105
+ unit: "percent",
106
+ used: Math.round(used * 1000) / 10,
107
+ remaining: Math.round(remaining * 1000) / 10,
108
+ limit: 100,
109
+ usedFraction: used,
110
+ remainingFraction: remaining,
111
+ };
112
+ }
113
+
114
+ async function resolveAccessToken(params: UsageFetchParams, ctx: UsageFetchContext): Promise<string | undefined> {
115
+ const { credential } = params;
116
+ if (credential.type !== "oauth") return undefined;
117
+ const now = ctx.now();
118
+ if (credential.accessToken && (!credential.expiresAt || credential.expiresAt > now + 60_000)) {
119
+ return credential.accessToken;
120
+ }
121
+ if (!credential.refreshToken || !credential.projectId) return credential.accessToken;
122
+ try {
123
+ const refreshed = await refreshGoogleCloudToken(credential.refreshToken, credential.projectId);
124
+ return refreshed.access;
125
+ } catch (error) {
126
+ ctx.logger?.warn("Gemini CLI token refresh failed", { error: String(error) });
127
+ return credential.accessToken;
128
+ }
129
+ }
130
+
131
+ async function loadCodeAssist(
132
+ params: UsageFetchParams,
133
+ ctx: UsageFetchContext,
134
+ accessToken: string,
135
+ baseUrl: string,
136
+ projectId?: string,
137
+ ): Promise<LoadCodeAssistResponse | undefined> {
138
+ const response = await ctx.fetch(`${baseUrl}/v1internal:loadCodeAssist`, {
139
+ method: "POST",
140
+ headers: {
141
+ Authorization: `Bearer ${accessToken}`,
142
+ "Content-Type": "application/json",
143
+ ...GEMINI_CLI_HEADERS,
144
+ },
145
+ body: JSON.stringify({
146
+ ...(projectId ? { cloudaicompanionProject: projectId } : {}),
147
+ metadata: {
148
+ ideType: "IDE_UNSPECIFIED",
149
+ platform: "PLATFORM_UNSPECIFIED",
150
+ pluginType: "GEMINI",
151
+ },
152
+ }),
153
+ signal: params.signal,
154
+ });
155
+
156
+ if (!response.ok) {
157
+ const errorText = await response.text();
158
+ ctx.logger?.warn("Gemini CLI loadCodeAssist failed", {
159
+ status: response.status,
160
+ error: errorText,
161
+ });
162
+ return undefined;
163
+ }
164
+
165
+ return (await response.json()) as LoadCodeAssistResponse;
166
+ }
167
+
168
+ async function fetchQuota(
169
+ params: UsageFetchParams,
170
+ ctx: UsageFetchContext,
171
+ accessToken: string,
172
+ baseUrl: string,
173
+ projectId?: string,
174
+ ): Promise<RetrieveUserQuotaResponse | undefined> {
175
+ const response = await ctx.fetch(`${baseUrl}/v1internal:retrieveUserQuota`, {
176
+ method: "POST",
177
+ headers: {
178
+ Authorization: `Bearer ${accessToken}`,
179
+ "Content-Type": "application/json",
180
+ ...GEMINI_CLI_HEADERS,
181
+ },
182
+ body: JSON.stringify(projectId ? { project: projectId } : {}),
183
+ signal: params.signal,
184
+ });
185
+
186
+ if (!response.ok) {
187
+ const errorText = await response.text();
188
+ ctx.logger?.warn("Gemini CLI retrieveUserQuota failed", {
189
+ status: response.status,
190
+ error: errorText,
191
+ });
192
+ return undefined;
193
+ }
194
+
195
+ return (await response.json()) as RetrieveUserQuotaResponse;
196
+ }
197
+
198
+ export const googleGeminiCliUsageProvider: UsageProvider = {
199
+ id: "google-gemini-cli",
200
+ supports: ({ credential }) => credential.type === "oauth" && !!credential.accessToken,
201
+ async fetchUsage(params, ctx) {
202
+ const { credential } = params;
203
+ if (credential.type !== "oauth") {
204
+ return null;
205
+ }
206
+ const accessToken = await resolveAccessToken(params, ctx);
207
+ if (!accessToken) {
208
+ return null;
209
+ }
210
+
211
+ const now = ctx.now();
212
+ const baseUrl = (params.baseUrl?.trim() || DEFAULT_ENDPOINT).replace(/\/$/, "");
213
+ const cacheKey = `usage:${params.provider}:${credential.accountId ?? credential.email ?? "default"}:${baseUrl}:${
214
+ credential.projectId ?? "default"
215
+ }`;
216
+ const cached = await ctx.cache.get(cacheKey);
217
+ if (cached && cached.expiresAt > now) {
218
+ return cached.value;
219
+ }
220
+
221
+ const loadResponse = await loadCodeAssist(params, ctx, accessToken, baseUrl, credential.projectId);
222
+ const projectId = credential.projectId ?? getProjectId(loadResponse);
223
+ const quotaResponse = await fetchQuota(params, ctx, accessToken, baseUrl, projectId);
224
+ if (!quotaResponse) {
225
+ const entry = { value: null, expiresAt: now + CACHE_TTL_MS };
226
+ await ctx.cache.set(cacheKey, entry);
227
+ return null;
228
+ }
229
+
230
+ const limits: UsageLimit[] = [];
231
+ const buckets = quotaResponse.buckets ?? [];
232
+
233
+ buckets.forEach((bucket, index) => {
234
+ const modelId = bucket.modelId;
235
+ const window = parseWindow(bucket.resetTime, now);
236
+ const amount = buildAmount(bucket.remainingFraction);
237
+ const tier = modelId ? getModelTier(modelId) : undefined;
238
+ const label = modelId ? `Gemini ${modelId}` : "Gemini quota";
239
+ const id = `${modelId ?? "unknown"}:${window?.id ?? index}`;
240
+
241
+ limits.push({
242
+ id,
243
+ label,
244
+ scope: {
245
+ provider: params.provider,
246
+ accountId: credential.accountId,
247
+ projectId,
248
+ modelId,
249
+ tier,
250
+ windowId: window?.id,
251
+ },
252
+ window,
253
+ amount,
254
+ });
255
+ });
256
+
257
+ const report: UsageReport = {
258
+ provider: params.provider,
259
+ fetchedAt: now,
260
+ limits,
261
+ metadata: {
262
+ currentTierId: loadResponse?.currentTier?.id,
263
+ currentTierName: loadResponse?.currentTier?.name,
264
+ },
265
+ raw: quotaResponse,
266
+ };
267
+
268
+ await ctx.cache.set(cacheKey, { value: report, expiresAt: now + CACHE_TTL_MS });
269
+ return report;
270
+ },
271
+ };
@@ -7,7 +7,7 @@
7
7
  import { createHash } from "node:crypto";
8
8
  import type { Content, ThinkingConfig } from "@google/genai";
9
9
  import { abortableSleep } from "@oh-my-pi/pi-utils";
10
- import { calculateCost } from "../models";
10
+ import { calculateCost } from "$ai/models";
11
11
  import type {
12
12
  Api,
13
13
  AssistantMessage,
@@ -18,9 +18,9 @@ import type {
18
18
  TextContent,
19
19
  ThinkingContent,
20
20
  ToolCall,
21
- } from "../types";
22
- import { AssistantMessageEventStream } from "../utils/event-stream";
23
- import { sanitizeSurrogates } from "../utils/sanitize-unicode";
21
+ } from "$ai/types";
22
+ import { AssistantMessageEventStream } from "$ai/utils/event-stream";
23
+ import { sanitizeSurrogates } from "$ai/utils/sanitize-unicode";
24
24
  import {
25
25
  convertMessages,
26
26
  convertTools,
@@ -3,8 +3,8 @@
3
3
  */
4
4
 
5
5
  import { type Content, FinishReason, FunctionCallingConfigMode, type Part, type Schema } from "@google/genai";
6
- import type { Context, ImageContent, Model, StopReason, TextContent, Tool } from "../types";
7
- import { sanitizeSurrogates } from "../utils/sanitize-unicode";
6
+ import type { Context, ImageContent, Model, StopReason, TextContent, Tool } from "$ai/types";
7
+ import { sanitizeSurrogates } from "$ai/utils/sanitize-unicode";
8
8
  import { transformMessages } from "./transform-messages";
9
9
 
10
10
  type GoogleApiType = "google-generative-ai" | "google-gemini-cli" | "google-vertex";
@@ -79,6 +79,8 @@ export function convertMessages<T extends GoogleApiType>(model: Model<T>, contex
79
79
  for (const msg of transformedMessages) {
80
80
  if (msg.role === "user") {
81
81
  if (typeof msg.content === "string") {
82
+ // Skip empty user messages
83
+ if (!msg.content || msg.content.trim() === "") continue;
82
84
  contents.push({
83
85
  role: "user",
84
86
  parts: [{ text: sanitizeSurrogates(msg.content) }],
@@ -96,7 +98,14 @@ export function convertMessages<T extends GoogleApiType>(model: Model<T>, contex
96
98
  };
97
99
  }
98
100
  });
99
- const filteredParts = !model.input.includes("image") ? parts.filter((p) => p.text !== undefined) : parts;
101
+ // Filter out images if model doesn't support them, and empty text blocks
102
+ let filteredParts = !model.input.includes("image") ? parts.filter((p) => p.text !== undefined) : parts;
103
+ filteredParts = filteredParts.filter((p) => {
104
+ if (p.text !== undefined) {
105
+ return p.text.trim().length > 0;
106
+ }
107
+ return true; // Keep non-text parts (images)
108
+ });
100
109
  if (filteredParts.length === 0) continue;
101
110
  contents.push({
102
111
  role: "user",
@@ -5,7 +5,7 @@ import {
5
5
  type ThinkingConfig,
6
6
  ThinkingLevel,
7
7
  } from "@google/genai";
8
- import { calculateCost } from "../models";
8
+ import { calculateCost } from "$ai/models";
9
9
  import type {
10
10
  Api,
11
11
  AssistantMessage,
@@ -16,10 +16,10 @@ import type {
16
16
  TextContent,
17
17
  ThinkingContent,
18
18
  ToolCall,
19
- } from "../types";
20
- import { AssistantMessageEventStream } from "../utils/event-stream";
21
- import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
22
- import { sanitizeSurrogates } from "../utils/sanitize-unicode";
19
+ } from "$ai/types";
20
+ import { AssistantMessageEventStream } from "$ai/utils/event-stream";
21
+ import { formatErrorMessageWithRetryAfter } from "$ai/utils/retry-after";
22
+ import { sanitizeSurrogates } from "$ai/utils/sanitize-unicode";
23
23
  import type { GoogleThinkingLevel } from "./google-gemini-cli";
24
24
  import {
25
25
  convertMessages,
@@ -4,8 +4,8 @@ import {
4
4
  GoogleGenAI,
5
5
  type ThinkingConfig,
6
6
  } from "@google/genai";
7
- import { calculateCost } from "../models";
8
- import { getEnvApiKey } from "../stream";
7
+ import { calculateCost } from "$ai/models";
8
+ import { getEnvApiKey } from "$ai/stream";
9
9
  import type {
10
10
  Api,
11
11
  AssistantMessage,
@@ -16,10 +16,10 @@ import type {
16
16
  TextContent,
17
17
  ThinkingContent,
18
18
  ToolCall,
19
- } from "../types";
20
- import { AssistantMessageEventStream } from "../utils/event-stream";
21
- import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
22
- import { sanitizeSurrogates } from "../utils/sanitize-unicode";
19
+ } from "$ai/types";
20
+ import { AssistantMessageEventStream } from "$ai/utils/event-stream";
21
+ import { formatErrorMessageWithRetryAfter } from "$ai/utils/retry-after";
22
+ import { sanitizeSurrogates } from "$ai/utils/sanitize-unicode";
23
23
  import type { GoogleThinkingLevel } from "./google-gemini-cli";
24
24
  import {
25
25
  convertMessages,
@@ -50,6 +50,10 @@ function clampReasoningEffort(model: string, effort: ReasoningConfig["effort"]):
50
50
  return "high";
51
51
  }
52
52
 
53
+ if ((modelId === "gpt-5.2" || modelId === "gpt-5.2-codex") && effort === "minimal") {
54
+ return "low";
55
+ }
56
+
53
57
  // gpt-5.1-codex-mini only supports medium/high.
54
58
  if (modelId === "gpt-5.1-codex-mini") {
55
59
  return effort === "high" || effort === "xhigh" ? "high" : "medium";
@@ -9,9 +9,8 @@ import type {
9
9
  ResponseOutputMessage,
10
10
  ResponseReasoningItem,
11
11
  } from "openai/resources/responses/responses";
12
- import packageJson from "../../package.json" with { type: "json" };
13
- import { calculateCost } from "../models";
14
- import { getEnvApiKey } from "../stream";
12
+ import { calculateCost } from "$ai/models";
13
+ import { getEnvApiKey } from "$ai/stream";
15
14
  import type {
16
15
  Api,
17
16
  AssistantMessage,
@@ -24,11 +23,12 @@ import type {
24
23
  ThinkingContent,
25
24
  Tool,
26
25
  ToolCall,
27
- } from "../types";
28
- import { AssistantMessageEventStream } from "../utils/event-stream";
29
- import { parseStreamingJson } from "../utils/json-parse";
30
- import { formatErrorMessageWithRetryAfter } from "../utils/retry-after";
31
- import { sanitizeSurrogates } from "../utils/sanitize-unicode";
26
+ } from "$ai/types";
27
+ import { AssistantMessageEventStream } from "$ai/utils/event-stream";
28
+ import { parseStreamingJson } from "$ai/utils/json-parse";
29
+ import { formatErrorMessageWithRetryAfter } from "$ai/utils/retry-after";
30
+ import { sanitizeSurrogates } from "$ai/utils/sanitize-unicode";
31
+ import packageJson from "../../package.json" with { type: "json" };
32
32
  import {
33
33
  CODEX_BASE_URL,
34
34
  JWT_CLAIM_PATH,
@@ -571,6 +571,8 @@ function convertMessages(model: Model<"openai-codex-responses">, context: Contex
571
571
  for (const msg of transformedMessages) {
572
572
  if (msg.role === "user") {
573
573
  if (typeof msg.content === "string") {
574
+ // Skip empty user messages
575
+ if (!msg.content || msg.content.trim() === "") continue;
574
576
  messages.push({
575
577
  role: "user",
576
578
  content: [{ type: "input_text", text: sanitizeSurrogates(msg.content) }],
@@ -589,9 +591,16 @@ function convertMessages(model: Model<"openai-codex-responses">, context: Contex
589
591
  image_url: `data:${item.mimeType};base64,${item.data}`,
590
592
  } satisfies ResponseInputImage;
591
593
  });
592
- const filteredContent = !model.input.includes("image")
594
+ // Filter out images if model doesn't support them, and empty text blocks
595
+ let filteredContent = !model.input.includes("image")
593
596
  ? content.filter((c) => c.type !== "input_image")
594
597
  : content;
598
+ filteredContent = filteredContent.filter((c) => {
599
+ if (c.type === "input_text") {
600
+ return c.text.trim().length > 0;
601
+ }
602
+ return true; // Keep non-text content (images)
603
+ });
595
604
  if (filteredContent.length === 0) continue;
596
605
  messages.push({
597
606
  role: "user",