@ljoukov/llm 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +195 -0
- package/dist/index.cjs +3470 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +252 -0
- package/dist/index.d.ts +252 -0
- package/dist/index.js +3412 -0
- package/dist/index.js.map +1 -0
- package/package.json +56 -0
package/dist/index.js
ADDED
|
@@ -0,0 +1,3412 @@
|
|
|
1
|
+
// src/llm.ts
|
|
2
|
+
import { Buffer as Buffer2 } from "buffer";
|
|
3
|
+
import { AsyncLocalStorage } from "async_hooks";
|
|
4
|
+
import { randomBytes } from "crypto";
|
|
5
|
+
import {
|
|
6
|
+
FinishReason,
|
|
7
|
+
FunctionCallingConfigMode
|
|
8
|
+
} from "@google/genai";
|
|
9
|
+
import { zodToJsonSchema } from "@alcyone-labs/zod-to-json-schema";
|
|
10
|
+
import { z as z3 } from "zod";
|
|
11
|
+
|
|
12
|
+
// src/utils/asyncQueue.ts
|
|
13
|
+
function createAsyncQueue() {
|
|
14
|
+
let closed = false;
|
|
15
|
+
let error = null;
|
|
16
|
+
const values = [];
|
|
17
|
+
let pending = null;
|
|
18
|
+
const push = (value) => {
|
|
19
|
+
if (closed || error) {
|
|
20
|
+
return;
|
|
21
|
+
}
|
|
22
|
+
if (pending) {
|
|
23
|
+
const { resolve } = pending;
|
|
24
|
+
pending = null;
|
|
25
|
+
resolve({ value, done: false });
|
|
26
|
+
return;
|
|
27
|
+
}
|
|
28
|
+
values.push(value);
|
|
29
|
+
};
|
|
30
|
+
const close = () => {
|
|
31
|
+
if (closed || error) {
|
|
32
|
+
return;
|
|
33
|
+
}
|
|
34
|
+
closed = true;
|
|
35
|
+
if (pending) {
|
|
36
|
+
const { resolve } = pending;
|
|
37
|
+
pending = null;
|
|
38
|
+
resolve({ value: void 0, done: true });
|
|
39
|
+
}
|
|
40
|
+
};
|
|
41
|
+
const fail = (err) => {
|
|
42
|
+
if (closed || error) {
|
|
43
|
+
return;
|
|
44
|
+
}
|
|
45
|
+
error = err;
|
|
46
|
+
if (pending) {
|
|
47
|
+
const { reject } = pending;
|
|
48
|
+
pending = null;
|
|
49
|
+
reject(err);
|
|
50
|
+
}
|
|
51
|
+
};
|
|
52
|
+
async function* iterator() {
|
|
53
|
+
while (true) {
|
|
54
|
+
if (error) {
|
|
55
|
+
throw error;
|
|
56
|
+
}
|
|
57
|
+
if (values.length > 0) {
|
|
58
|
+
yield values.shift();
|
|
59
|
+
continue;
|
|
60
|
+
}
|
|
61
|
+
if (closed) {
|
|
62
|
+
return;
|
|
63
|
+
}
|
|
64
|
+
const next = await new Promise((resolve, reject) => {
|
|
65
|
+
pending = { resolve, reject };
|
|
66
|
+
});
|
|
67
|
+
if (next.done) {
|
|
68
|
+
return;
|
|
69
|
+
}
|
|
70
|
+
yield next.value;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
return { push, close, fail, iterable: iterator() };
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
// src/google/pricing.ts
|
|
77
|
+
var GEMINI_3_PRO_PREVIEW_PRICING = {
|
|
78
|
+
threshold: 2e5,
|
|
79
|
+
inputRateLow: 2 / 1e6,
|
|
80
|
+
inputRateHigh: 4 / 1e6,
|
|
81
|
+
cachedRateLow: 0.2 / 1e6,
|
|
82
|
+
cachedRateHigh: 0.4 / 1e6,
|
|
83
|
+
outputRateLow: 12 / 1e6,
|
|
84
|
+
outputRateHigh: 18 / 1e6
|
|
85
|
+
};
|
|
86
|
+
var GEMINI_2_5_PRO_PRICING = {
|
|
87
|
+
threshold: 2e5,
|
|
88
|
+
inputRateLow: 1.25 / 1e6,
|
|
89
|
+
inputRateHigh: 2.5 / 1e6,
|
|
90
|
+
cachedRateLow: 0.125 / 1e6,
|
|
91
|
+
cachedRateHigh: 0.25 / 1e6,
|
|
92
|
+
outputRateLow: 10 / 1e6,
|
|
93
|
+
outputRateHigh: 15 / 1e6
|
|
94
|
+
};
|
|
95
|
+
var GEMINI_IMAGE_PREVIEW_PRICING = {
|
|
96
|
+
inputRate: 2 / 1e6,
|
|
97
|
+
cachedRate: 0.2 / 1e6,
|
|
98
|
+
outputTextRate: 12 / 1e6,
|
|
99
|
+
outputImageRate: 120 / 1e6,
|
|
100
|
+
imagePrices: {
|
|
101
|
+
"1K": 0.134,
|
|
102
|
+
"2K": 0.134,
|
|
103
|
+
"4K": 0.24
|
|
104
|
+
}
|
|
105
|
+
};
|
|
106
|
+
function getGeminiProPricing(modelId) {
|
|
107
|
+
if (modelId.includes("gemini-2.5-pro")) {
|
|
108
|
+
return GEMINI_2_5_PRO_PRICING;
|
|
109
|
+
}
|
|
110
|
+
if (modelId.includes("gemini-3-pro")) {
|
|
111
|
+
return GEMINI_3_PRO_PREVIEW_PRICING;
|
|
112
|
+
}
|
|
113
|
+
return void 0;
|
|
114
|
+
}
|
|
115
|
+
function getGeminiImagePricing(modelId) {
|
|
116
|
+
if (modelId.includes("image-preview")) {
|
|
117
|
+
return GEMINI_IMAGE_PREVIEW_PRICING;
|
|
118
|
+
}
|
|
119
|
+
return void 0;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// src/openai/pricing.ts
|
|
123
|
+
var OPENAI_GPT_52_PRICING = {
|
|
124
|
+
inputRate: 1.75 / 1e6,
|
|
125
|
+
cachedRate: 0.175 / 1e6,
|
|
126
|
+
outputRate: 14 / 1e6
|
|
127
|
+
};
|
|
128
|
+
var OPENAI_GPT_51_CODEX_MINI_PRICING = {
|
|
129
|
+
inputRate: 0.25 / 1e6,
|
|
130
|
+
cachedRate: 0.025 / 1e6,
|
|
131
|
+
outputRate: 2 / 1e6
|
|
132
|
+
};
|
|
133
|
+
function getOpenAiPricing(modelId) {
|
|
134
|
+
if (modelId.includes("gpt-5.2")) {
|
|
135
|
+
return OPENAI_GPT_52_PRICING;
|
|
136
|
+
}
|
|
137
|
+
if (modelId.includes("gpt-5.1-codex-mini")) {
|
|
138
|
+
return OPENAI_GPT_51_CODEX_MINI_PRICING;
|
|
139
|
+
}
|
|
140
|
+
return void 0;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// src/utils/cost.ts
|
|
144
|
+
function resolveUsageNumber(value) {
|
|
145
|
+
if (typeof value === "number" && Number.isFinite(value)) {
|
|
146
|
+
return Math.max(0, value);
|
|
147
|
+
}
|
|
148
|
+
return 0;
|
|
149
|
+
}
|
|
150
|
+
function estimateCallCostUsd({
|
|
151
|
+
modelId,
|
|
152
|
+
tokens,
|
|
153
|
+
responseImages,
|
|
154
|
+
imageSize
|
|
155
|
+
}) {
|
|
156
|
+
if (!tokens) {
|
|
157
|
+
return 0;
|
|
158
|
+
}
|
|
159
|
+
const promptTokens = resolveUsageNumber(tokens.promptTokens);
|
|
160
|
+
const cachedTokens = resolveUsageNumber(tokens.cachedTokens);
|
|
161
|
+
const responseTokens = resolveUsageNumber(tokens.responseTokens);
|
|
162
|
+
const responseImageTokens = resolveUsageNumber(tokens.responseImageTokens);
|
|
163
|
+
const thinkingTokens = resolveUsageNumber(tokens.thinkingTokens);
|
|
164
|
+
const toolUsePromptTokens = resolveUsageNumber(tokens.toolUsePromptTokens);
|
|
165
|
+
const promptTokenTotal = promptTokens + toolUsePromptTokens;
|
|
166
|
+
const nonCachedPrompt = Math.max(0, promptTokenTotal - cachedTokens);
|
|
167
|
+
const imagePreviewPricing = getGeminiImagePricing(modelId);
|
|
168
|
+
if (imagePreviewPricing) {
|
|
169
|
+
const resolvedImageSize = imageSize && imagePreviewPricing.imagePrices[imageSize] ? imageSize : "2K";
|
|
170
|
+
const imageRate = imagePreviewPricing.imagePrices[resolvedImageSize] ?? 0;
|
|
171
|
+
const tokensPerImage = imagePreviewPricing.outputImageRate > 0 ? imageRate / imagePreviewPricing.outputImageRate : 0;
|
|
172
|
+
let responseTextForPricing = Math.max(0, responseTokens - responseImageTokens);
|
|
173
|
+
let imageTokensForPricing = responseImageTokens;
|
|
174
|
+
if (imageTokensForPricing <= 0 && responseImages > 0 && tokensPerImage > 0) {
|
|
175
|
+
const estimatedImageTokens = responseImages * tokensPerImage;
|
|
176
|
+
imageTokensForPricing = estimatedImageTokens;
|
|
177
|
+
if (responseTextForPricing >= estimatedImageTokens) {
|
|
178
|
+
responseTextForPricing -= estimatedImageTokens;
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
const textOutputCost = (responseTextForPricing + thinkingTokens) * imagePreviewPricing.outputTextRate;
|
|
182
|
+
const inputCost = nonCachedPrompt * imagePreviewPricing.inputRate;
|
|
183
|
+
const cachedCost = cachedTokens * imagePreviewPricing.cachedRate;
|
|
184
|
+
const imageOutputCost = imageTokensForPricing * imagePreviewPricing.outputImageRate;
|
|
185
|
+
return inputCost + cachedCost + textOutputCost + imageOutputCost;
|
|
186
|
+
}
|
|
187
|
+
const geminiPricing = getGeminiProPricing(modelId);
|
|
188
|
+
if (geminiPricing) {
|
|
189
|
+
const useHighTier = promptTokenTotal > geminiPricing.threshold;
|
|
190
|
+
const inputRate = useHighTier ? geminiPricing.inputRateHigh : geminiPricing.inputRateLow;
|
|
191
|
+
const cachedRate = useHighTier ? geminiPricing.cachedRateHigh : geminiPricing.cachedRateLow;
|
|
192
|
+
const outputRate = useHighTier ? geminiPricing.outputRateHigh : geminiPricing.outputRateLow;
|
|
193
|
+
const inputCost = nonCachedPrompt * inputRate;
|
|
194
|
+
const cachedCost = cachedTokens * cachedRate;
|
|
195
|
+
const outputTokens = responseTokens + thinkingTokens;
|
|
196
|
+
const outputCost = outputTokens * outputRate;
|
|
197
|
+
return inputCost + cachedCost + outputCost;
|
|
198
|
+
}
|
|
199
|
+
const openAiPricing = getOpenAiPricing(modelId);
|
|
200
|
+
if (openAiPricing) {
|
|
201
|
+
const inputCost = nonCachedPrompt * openAiPricing.inputRate;
|
|
202
|
+
const cachedCost = cachedTokens * openAiPricing.cachedRate;
|
|
203
|
+
const outputTokens = responseTokens + thinkingTokens;
|
|
204
|
+
const outputCost = outputTokens * openAiPricing.outputRate;
|
|
205
|
+
return inputCost + cachedCost + outputCost;
|
|
206
|
+
}
|
|
207
|
+
return 0;
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
// src/openai/chatgpt-codex.ts
|
|
211
|
+
import os from "os";
|
|
212
|
+
import { TextDecoder } from "util";
|
|
213
|
+
|
|
214
|
+
// src/openai/chatgpt-auth.ts
|
|
215
|
+
import { Buffer } from "buffer";
|
|
216
|
+
import { z } from "zod";
|
|
217
|
+
|
|
218
|
+
// src/utils/env.ts
|
|
219
|
+
import fs from "fs";
|
|
220
|
+
import path from "path";
|
|
221
|
+
var envLoaded = false;
|
|
222
|
+
function loadLocalEnv() {
|
|
223
|
+
if (envLoaded) {
|
|
224
|
+
return;
|
|
225
|
+
}
|
|
226
|
+
const envPath = path.join(process.cwd(), ".env.local");
|
|
227
|
+
loadEnvFromFile(envPath, { override: false });
|
|
228
|
+
envLoaded = true;
|
|
229
|
+
}
|
|
230
|
+
function loadEnvFromFile(filePath, { override = false } = {}) {
|
|
231
|
+
let content;
|
|
232
|
+
try {
|
|
233
|
+
content = fs.readFileSync(filePath, "utf8");
|
|
234
|
+
} catch (error) {
|
|
235
|
+
if (error?.code === "ENOENT") {
|
|
236
|
+
return;
|
|
237
|
+
}
|
|
238
|
+
throw error;
|
|
239
|
+
}
|
|
240
|
+
for (const line of content.split(/\r?\n/u)) {
|
|
241
|
+
const entry = parseEnvLine(line);
|
|
242
|
+
if (!entry) {
|
|
243
|
+
continue;
|
|
244
|
+
}
|
|
245
|
+
const [key, value] = entry;
|
|
246
|
+
if (override || process.env[key] === void 0) {
|
|
247
|
+
process.env[key] = value;
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
function parseEnvLine(line) {
|
|
252
|
+
const trimmed = line.trim();
|
|
253
|
+
if (!trimmed || trimmed.startsWith("#")) {
|
|
254
|
+
return null;
|
|
255
|
+
}
|
|
256
|
+
const match = trimmed.match(/^(?:export\s+)?([A-Za-z_][A-Za-z0-9_\-.]*)\s*=\s*(.*)$/u);
|
|
257
|
+
if (!match) {
|
|
258
|
+
return null;
|
|
259
|
+
}
|
|
260
|
+
const key = match[1];
|
|
261
|
+
if (!key) {
|
|
262
|
+
return null;
|
|
263
|
+
}
|
|
264
|
+
let value = match[2] ?? "";
|
|
265
|
+
if (value.startsWith('"') && value.endsWith('"') && value.length >= 2) {
|
|
266
|
+
value = value.slice(1, -1);
|
|
267
|
+
} else if (value.startsWith("'") && value.endsWith("'") && value.length >= 2) {
|
|
268
|
+
value = value.slice(1, -1);
|
|
269
|
+
} else {
|
|
270
|
+
const commentIndex = value.indexOf(" #");
|
|
271
|
+
if (commentIndex >= 0) {
|
|
272
|
+
value = value.slice(0, commentIndex);
|
|
273
|
+
}
|
|
274
|
+
value = value.trim();
|
|
275
|
+
}
|
|
276
|
+
return [key, value];
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
// src/openai/chatgpt-auth.ts
|
|
280
|
+
var CHATGPT_AUTH_JSON_ENV = "CHATGPT_AUTH_JSON";
|
|
281
|
+
var CHATGPT_AUTH_JSON_B64_ENV = "CHATGPT_AUTH_JSON_B64";
|
|
282
|
+
var CHATGPT_ACCESS_ENV = "CHATGPT_ACCESS";
|
|
283
|
+
var CHATGPT_REFRESH_ENV = "CHATGPT_REFRESH";
|
|
284
|
+
var CHATGPT_EXPIRES_ENV = "CHATGPT_EXPIRES";
|
|
285
|
+
var CHATGPT_ACCOUNT_ID_ENV = "CHATGPT_ACCOUNT_ID";
|
|
286
|
+
var CHATGPT_ID_TOKEN_ENV = "CHATGPT_ID_TOKEN";
|
|
287
|
+
var CHATGPT_ACCESS_TOKEN_ENV = "CHATGPT_ACCESS_TOKEN";
|
|
288
|
+
var CHATGPT_REFRESH_TOKEN_ENV = "CHATGPT_REFRESH_TOKEN";
|
|
289
|
+
var CHATGPT_EXPIRES_AT_ENV = "CHATGPT_EXPIRES_AT";
|
|
290
|
+
var CHATGPT_OAUTH_CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann";
|
|
291
|
+
var CHATGPT_OAUTH_TOKEN_URL = "https://auth.openai.com/oauth/token";
|
|
292
|
+
var CHATGPT_OAUTH_REDIRECT_URI = "http://localhost:1455/auth/callback";
|
|
293
|
+
var TOKEN_EXPIRY_BUFFER_MS = 3e4;
|
|
294
|
+
var AuthInputSchema = z.object({
|
|
295
|
+
access: z.string().min(1).optional(),
|
|
296
|
+
access_token: z.string().min(1).optional(),
|
|
297
|
+
accessToken: z.string().min(1).optional(),
|
|
298
|
+
refresh: z.string().min(1).optional(),
|
|
299
|
+
refresh_token: z.string().min(1).optional(),
|
|
300
|
+
refreshToken: z.string().min(1).optional(),
|
|
301
|
+
expires: z.union([z.number(), z.string()]).optional(),
|
|
302
|
+
expires_at: z.union([z.number(), z.string()]).optional(),
|
|
303
|
+
expiresAt: z.union([z.number(), z.string()]).optional(),
|
|
304
|
+
accountId: z.string().min(1).optional(),
|
|
305
|
+
account_id: z.string().min(1).optional(),
|
|
306
|
+
id_token: z.string().optional(),
|
|
307
|
+
idToken: z.string().optional()
|
|
308
|
+
}).loose();
|
|
309
|
+
var RefreshResponseSchema = z.object({
|
|
310
|
+
access_token: z.string().min(1),
|
|
311
|
+
refresh_token: z.string().min(1),
|
|
312
|
+
expires_in: z.union([z.number(), z.string()])
|
|
313
|
+
});
|
|
314
|
+
var ExchangeResponseSchema = z.object({
|
|
315
|
+
access_token: z.string().min(1),
|
|
316
|
+
refresh_token: z.string().min(1),
|
|
317
|
+
expires_in: z.union([z.number(), z.string()]),
|
|
318
|
+
id_token: z.string().optional()
|
|
319
|
+
});
|
|
320
|
+
var cachedProfile = null;
|
|
321
|
+
var refreshPromise = null;
|
|
322
|
+
function encodeChatGptAuthJson(profile) {
|
|
323
|
+
const payload = {
|
|
324
|
+
access: profile.access,
|
|
325
|
+
refresh: profile.refresh,
|
|
326
|
+
expires: profile.expires,
|
|
327
|
+
accountId: profile.accountId,
|
|
328
|
+
...profile.idToken ? { id_token: profile.idToken } : {}
|
|
329
|
+
};
|
|
330
|
+
return JSON.stringify(payload);
|
|
331
|
+
}
|
|
332
|
+
function encodeChatGptAuthJsonB64(profile) {
|
|
333
|
+
return Buffer.from(encodeChatGptAuthJson(profile)).toString("base64url");
|
|
334
|
+
}
|
|
335
|
+
async function exchangeChatGptOauthCode({
|
|
336
|
+
code,
|
|
337
|
+
verifier,
|
|
338
|
+
redirectUri = CHATGPT_OAUTH_REDIRECT_URI
|
|
339
|
+
}) {
|
|
340
|
+
const params = new URLSearchParams();
|
|
341
|
+
params.set("grant_type", "authorization_code");
|
|
342
|
+
params.set("client_id", CHATGPT_OAUTH_CLIENT_ID);
|
|
343
|
+
params.set("code", code);
|
|
344
|
+
params.set("code_verifier", verifier);
|
|
345
|
+
params.set("redirect_uri", redirectUri);
|
|
346
|
+
const response = await fetch(CHATGPT_OAUTH_TOKEN_URL, {
|
|
347
|
+
method: "POST",
|
|
348
|
+
headers: {
|
|
349
|
+
"Content-Type": "application/x-www-form-urlencoded"
|
|
350
|
+
},
|
|
351
|
+
body: params.toString()
|
|
352
|
+
});
|
|
353
|
+
if (!response.ok) {
|
|
354
|
+
const body = await response.text();
|
|
355
|
+
throw new Error(`ChatGPT OAuth token exchange failed (${response.status}): ${body}`);
|
|
356
|
+
}
|
|
357
|
+
const payload = ExchangeResponseSchema.parse(await response.json());
|
|
358
|
+
return profileFromTokenResponse(payload);
|
|
359
|
+
}
|
|
360
|
+
async function refreshChatGptOauthToken(refreshToken) {
|
|
361
|
+
const params = new URLSearchParams();
|
|
362
|
+
params.set("grant_type", "refresh_token");
|
|
363
|
+
params.set("client_id", CHATGPT_OAUTH_CLIENT_ID);
|
|
364
|
+
params.set("refresh_token", refreshToken);
|
|
365
|
+
const response = await fetch(CHATGPT_OAUTH_TOKEN_URL, {
|
|
366
|
+
method: "POST",
|
|
367
|
+
headers: {
|
|
368
|
+
"Content-Type": "application/x-www-form-urlencoded"
|
|
369
|
+
},
|
|
370
|
+
body: params.toString()
|
|
371
|
+
});
|
|
372
|
+
if (!response.ok) {
|
|
373
|
+
const body = await response.text();
|
|
374
|
+
throw new Error(`ChatGPT OAuth refresh failed (${response.status}): ${body}`);
|
|
375
|
+
}
|
|
376
|
+
const payload = RefreshResponseSchema.parse(await response.json());
|
|
377
|
+
return profileFromTokenResponse(payload);
|
|
378
|
+
}
|
|
379
|
+
async function getChatGptAuthProfile() {
|
|
380
|
+
if (cachedProfile && !isExpired(cachedProfile)) {
|
|
381
|
+
return cachedProfile;
|
|
382
|
+
}
|
|
383
|
+
if (refreshPromise) {
|
|
384
|
+
return refreshPromise;
|
|
385
|
+
}
|
|
386
|
+
refreshPromise = (async () => {
|
|
387
|
+
try {
|
|
388
|
+
const baseProfile = cachedProfile ?? loadAuthProfileFromEnv();
|
|
389
|
+
const profile = isExpired(baseProfile) ? await refreshChatGptOauthToken(baseProfile.refresh) : baseProfile;
|
|
390
|
+
cachedProfile = profile;
|
|
391
|
+
return profile;
|
|
392
|
+
} finally {
|
|
393
|
+
refreshPromise = null;
|
|
394
|
+
}
|
|
395
|
+
})();
|
|
396
|
+
return refreshPromise;
|
|
397
|
+
}
|
|
398
|
+
function profileFromTokenResponse(payload) {
|
|
399
|
+
const expires = Date.now() + normalizeNumber(payload.expires_in) * 1e3;
|
|
400
|
+
const accountId = extractChatGptAccountId(payload.id_token ?? "") ?? extractChatGptAccountId(payload.access_token);
|
|
401
|
+
if (!accountId) {
|
|
402
|
+
throw new Error("Failed to extract chatgpt_account_id from access token.");
|
|
403
|
+
}
|
|
404
|
+
return {
|
|
405
|
+
access: payload.access_token,
|
|
406
|
+
refresh: payload.refresh_token,
|
|
407
|
+
expires,
|
|
408
|
+
accountId,
|
|
409
|
+
idToken: payload.id_token
|
|
410
|
+
};
|
|
411
|
+
}
|
|
412
|
+
function normalizeAuthProfile(data) {
|
|
413
|
+
const access = data.access ?? data.access_token ?? data.accessToken ?? void 0;
|
|
414
|
+
const refresh = data.refresh ?? data.refresh_token ?? data.refreshToken ?? void 0;
|
|
415
|
+
if (!access || !refresh) {
|
|
416
|
+
throw new Error("ChatGPT credentials must include access and refresh.");
|
|
417
|
+
}
|
|
418
|
+
const expiresRaw = data.expires ?? data.expires_at ?? data.expiresAt;
|
|
419
|
+
const idToken = data.idToken ?? data.id_token ?? void 0;
|
|
420
|
+
const expires = normalizeEpochMillis(expiresRaw) ?? extractJwtExpiry(idToken ?? access) ?? Date.now() + 5 * 6e4;
|
|
421
|
+
const accountId = data.accountId ?? data.account_id ?? extractChatGptAccountId(idToken ?? "") ?? extractChatGptAccountId(access);
|
|
422
|
+
if (!accountId) {
|
|
423
|
+
throw new Error("ChatGPT credentials missing chatgpt_account_id.");
|
|
424
|
+
}
|
|
425
|
+
return {
|
|
426
|
+
access,
|
|
427
|
+
refresh,
|
|
428
|
+
expires,
|
|
429
|
+
accountId,
|
|
430
|
+
idToken: idToken ?? void 0
|
|
431
|
+
};
|
|
432
|
+
}
|
|
433
|
+
function normalizeEpochMillis(value) {
|
|
434
|
+
const numeric = normalizeNumber(value);
|
|
435
|
+
if (!Number.isFinite(numeric)) {
|
|
436
|
+
return void 0;
|
|
437
|
+
}
|
|
438
|
+
return numeric < 1e12 ? numeric * 1e3 : numeric;
|
|
439
|
+
}
|
|
440
|
+
function normalizeNumber(value) {
|
|
441
|
+
if (typeof value === "number" && Number.isFinite(value)) {
|
|
442
|
+
return value;
|
|
443
|
+
}
|
|
444
|
+
if (typeof value === "string") {
|
|
445
|
+
const parsed = Number.parseFloat(value);
|
|
446
|
+
if (Number.isFinite(parsed)) {
|
|
447
|
+
return parsed;
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
return Number.NaN;
|
|
451
|
+
}
|
|
452
|
+
function isExpired(profile) {
|
|
453
|
+
const expires = profile.expires;
|
|
454
|
+
if (!Number.isFinite(expires)) {
|
|
455
|
+
return true;
|
|
456
|
+
}
|
|
457
|
+
return Date.now() + TOKEN_EXPIRY_BUFFER_MS >= expires;
|
|
458
|
+
}
|
|
459
|
+
function loadAuthProfileFromEnv() {
|
|
460
|
+
loadLocalEnv();
|
|
461
|
+
const rawJson = process.env[CHATGPT_AUTH_JSON_ENV];
|
|
462
|
+
if (rawJson && rawJson.trim().length > 0) {
|
|
463
|
+
return normalizeAuthProfile(AuthInputSchema.parse(JSON.parse(rawJson)));
|
|
464
|
+
}
|
|
465
|
+
const rawB64 = process.env[CHATGPT_AUTH_JSON_B64_ENV];
|
|
466
|
+
if (rawB64 && rawB64.trim().length > 0) {
|
|
467
|
+
const decoded = Buffer.from(rawB64.trim(), "base64url").toString("utf8");
|
|
468
|
+
return normalizeAuthProfile(AuthInputSchema.parse(JSON.parse(decoded)));
|
|
469
|
+
}
|
|
470
|
+
const access = process.env[CHATGPT_ACCESS_ENV] ?? process.env[CHATGPT_ACCESS_TOKEN_ENV] ?? void 0;
|
|
471
|
+
const refresh = process.env[CHATGPT_REFRESH_ENV] ?? process.env[CHATGPT_REFRESH_TOKEN_ENV] ?? void 0;
|
|
472
|
+
const expires = process.env[CHATGPT_EXPIRES_ENV] ?? process.env[CHATGPT_EXPIRES_AT_ENV] ?? void 0;
|
|
473
|
+
const accountId = process.env[CHATGPT_ACCOUNT_ID_ENV] ?? void 0;
|
|
474
|
+
const idToken = process.env[CHATGPT_ID_TOKEN_ENV] ?? void 0;
|
|
475
|
+
const parsed = AuthInputSchema.parse({
|
|
476
|
+
access,
|
|
477
|
+
refresh,
|
|
478
|
+
expires,
|
|
479
|
+
accountId,
|
|
480
|
+
idToken
|
|
481
|
+
});
|
|
482
|
+
return normalizeAuthProfile(parsed);
|
|
483
|
+
}
|
|
484
|
+
function decodeJwtPayload(token) {
|
|
485
|
+
const segments = token.split(".");
|
|
486
|
+
if (segments.length < 2) {
|
|
487
|
+
return null;
|
|
488
|
+
}
|
|
489
|
+
const payloadB64 = segments[1] ?? "";
|
|
490
|
+
try {
|
|
491
|
+
const decoded = Buffer.from(payloadB64, "base64url").toString("utf8");
|
|
492
|
+
return JSON.parse(decoded);
|
|
493
|
+
} catch {
|
|
494
|
+
return null;
|
|
495
|
+
}
|
|
496
|
+
}
|
|
497
|
+
function extractJwtExpiry(token) {
|
|
498
|
+
const payload = decodeJwtPayload(token);
|
|
499
|
+
if (!payload || typeof payload !== "object") {
|
|
500
|
+
return void 0;
|
|
501
|
+
}
|
|
502
|
+
const exp = payload.exp;
|
|
503
|
+
const parsed = normalizeNumber(exp);
|
|
504
|
+
if (!Number.isFinite(parsed) || parsed <= 0) {
|
|
505
|
+
return void 0;
|
|
506
|
+
}
|
|
507
|
+
return parsed < 1e12 ? parsed * 1e3 : parsed;
|
|
508
|
+
}
|
|
509
|
+
function extractChatGptAccountId(token) {
|
|
510
|
+
const payload = decodeJwtPayload(token);
|
|
511
|
+
if (!payload || typeof payload !== "object") {
|
|
512
|
+
return void 0;
|
|
513
|
+
}
|
|
514
|
+
const accountId = payload.chatgpt_account_id;
|
|
515
|
+
return typeof accountId === "string" && accountId.length > 0 ? accountId : void 0;
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
// src/openai/chatgpt-codex.ts
|
|
519
|
+
var CHATGPT_CODEX_ENDPOINT = "https://chatgpt.com/backend-api/codex/responses";
|
|
520
|
+
async function streamChatGptCodexResponse(options) {
|
|
521
|
+
const { access, accountId } = await getChatGptAuthProfile();
|
|
522
|
+
const headers = {
|
|
523
|
+
Authorization: `Bearer ${access}`,
|
|
524
|
+
"chatgpt-account-id": accountId,
|
|
525
|
+
"OpenAI-Beta": "responses=experimental",
|
|
526
|
+
originator: "llm",
|
|
527
|
+
"User-Agent": buildUserAgent(),
|
|
528
|
+
Accept: "text/event-stream",
|
|
529
|
+
"Content-Type": "application/json"
|
|
530
|
+
};
|
|
531
|
+
if (options.sessionId) {
|
|
532
|
+
headers.session_id = options.sessionId;
|
|
533
|
+
}
|
|
534
|
+
const response = await fetch(CHATGPT_CODEX_ENDPOINT, {
|
|
535
|
+
method: "POST",
|
|
536
|
+
headers,
|
|
537
|
+
body: JSON.stringify(options.request),
|
|
538
|
+
signal: options.signal
|
|
539
|
+
});
|
|
540
|
+
if (!response.ok) {
|
|
541
|
+
const body2 = await response.text();
|
|
542
|
+
throw new Error(`ChatGPT Codex request failed (${response.status}): ${body2}`);
|
|
543
|
+
}
|
|
544
|
+
const body = response.body;
|
|
545
|
+
if (!body) {
|
|
546
|
+
throw new Error("ChatGPT Codex response body was empty.");
|
|
547
|
+
}
|
|
548
|
+
return parseEventStream(body);
|
|
549
|
+
}
|
|
550
|
+
async function collectChatGptCodexResponse(options) {
|
|
551
|
+
const stream = await streamChatGptCodexResponse(options);
|
|
552
|
+
const toolCalls = /* @__PURE__ */ new Map();
|
|
553
|
+
const toolCallOrder = [];
|
|
554
|
+
const webSearchCalls = /* @__PURE__ */ new Map();
|
|
555
|
+
const webSearchCallOrder = [];
|
|
556
|
+
let text = "";
|
|
557
|
+
const reasoningText = "";
|
|
558
|
+
let reasoningSummaryText = "";
|
|
559
|
+
let usage;
|
|
560
|
+
let model;
|
|
561
|
+
let status;
|
|
562
|
+
let blocked = false;
|
|
563
|
+
for await (const event of stream) {
|
|
564
|
+
const type = typeof event.type === "string" ? event.type : void 0;
|
|
565
|
+
if (type === "response.output_text.delta") {
|
|
566
|
+
const delta = typeof event.delta === "string" ? event.delta : "";
|
|
567
|
+
if (delta.length > 0) {
|
|
568
|
+
text += delta;
|
|
569
|
+
options.onDelta?.({ textDelta: delta });
|
|
570
|
+
}
|
|
571
|
+
continue;
|
|
572
|
+
}
|
|
573
|
+
if (type === "response.reasoning_summary_text.delta") {
|
|
574
|
+
const delta = typeof event.delta === "string" ? event.delta : "";
|
|
575
|
+
if (delta.length > 0) {
|
|
576
|
+
reasoningSummaryText += delta;
|
|
577
|
+
options.onDelta?.({ thoughtDelta: delta });
|
|
578
|
+
}
|
|
579
|
+
continue;
|
|
580
|
+
}
|
|
581
|
+
if (type === "response.reasoning_text.delta") {
|
|
582
|
+
continue;
|
|
583
|
+
}
|
|
584
|
+
if (type === "response.refusal.delta") {
|
|
585
|
+
blocked = true;
|
|
586
|
+
continue;
|
|
587
|
+
}
|
|
588
|
+
if (type === "response.output_item.added" || type === "response.output_item.done") {
|
|
589
|
+
const item = event.item;
|
|
590
|
+
if (item) {
|
|
591
|
+
if (item.type === "function_call") {
|
|
592
|
+
const id = typeof item.id === "string" ? item.id : "";
|
|
593
|
+
const callId = typeof item.call_id === "string" ? item.call_id : id;
|
|
594
|
+
const name = typeof item.name === "string" ? item.name : "";
|
|
595
|
+
const args = typeof item.arguments === "string" ? item.arguments : "";
|
|
596
|
+
if (callId) {
|
|
597
|
+
if (!toolCalls.has(callId)) {
|
|
598
|
+
toolCallOrder.push(callId);
|
|
599
|
+
}
|
|
600
|
+
toolCalls.set(callId, { id, callId, name, arguments: args });
|
|
601
|
+
}
|
|
602
|
+
} else if (item.type === "web_search_call") {
|
|
603
|
+
const id = typeof item.id === "string" ? item.id : "";
|
|
604
|
+
if (id) {
|
|
605
|
+
if (!webSearchCalls.has(id)) {
|
|
606
|
+
webSearchCallOrder.push(id);
|
|
607
|
+
}
|
|
608
|
+
webSearchCalls.set(id, {
|
|
609
|
+
id,
|
|
610
|
+
status: typeof item.status === "string" ? item.status : void 0,
|
|
611
|
+
action: item.action && typeof item.action === "object" ? item.action : void 0
|
|
612
|
+
});
|
|
613
|
+
}
|
|
614
|
+
}
|
|
615
|
+
}
|
|
616
|
+
continue;
|
|
617
|
+
}
|
|
618
|
+
if (type === "response.completed") {
|
|
619
|
+
const response = event.response;
|
|
620
|
+
if (response) {
|
|
621
|
+
usage = response.usage;
|
|
622
|
+
model = typeof response.model === "string" ? response.model : void 0;
|
|
623
|
+
status = typeof response.status === "string" ? response.status : void 0;
|
|
624
|
+
}
|
|
625
|
+
continue;
|
|
626
|
+
}
|
|
627
|
+
if (type === "response.failed") {
|
|
628
|
+
const response = event.response;
|
|
629
|
+
if (response) {
|
|
630
|
+
usage = response.usage;
|
|
631
|
+
model = typeof response.model === "string" ? response.model : void 0;
|
|
632
|
+
status = typeof response.status === "string" ? response.status : void 0;
|
|
633
|
+
}
|
|
634
|
+
continue;
|
|
635
|
+
}
|
|
636
|
+
if (type === "response.in_progress") {
|
|
637
|
+
const response = event.response;
|
|
638
|
+
if (response) {
|
|
639
|
+
usage = response.usage;
|
|
640
|
+
model = typeof response.model === "string" ? response.model : void 0;
|
|
641
|
+
status = typeof response.status === "string" ? response.status : void 0;
|
|
642
|
+
}
|
|
643
|
+
}
|
|
644
|
+
}
|
|
645
|
+
if (!reasoningSummaryText && reasoningText) {
|
|
646
|
+
reasoningSummaryText = reasoningText;
|
|
647
|
+
}
|
|
648
|
+
const orderedToolCalls = toolCallOrder.map((id) => toolCalls.get(id)).filter((call) => call !== void 0);
|
|
649
|
+
const orderedWebSearchCalls = webSearchCallOrder.map((id) => webSearchCalls.get(id)).filter((call) => call !== void 0);
|
|
650
|
+
return {
|
|
651
|
+
text,
|
|
652
|
+
reasoningText,
|
|
653
|
+
reasoningSummaryText,
|
|
654
|
+
toolCalls: orderedToolCalls,
|
|
655
|
+
webSearchCalls: orderedWebSearchCalls,
|
|
656
|
+
usage,
|
|
657
|
+
model,
|
|
658
|
+
status,
|
|
659
|
+
blocked
|
|
660
|
+
};
|
|
661
|
+
}
|
|
662
|
+
function buildUserAgent() {
|
|
663
|
+
const node = process.version;
|
|
664
|
+
const platform = os.platform();
|
|
665
|
+
const release = os.release();
|
|
666
|
+
return `@ljoukov/llm (node ${node}; ${platform} ${release})`;
|
|
667
|
+
}
|
|
668
|
+
async function* parseEventStream(stream) {
|
|
669
|
+
const reader = stream.getReader();
|
|
670
|
+
const decoder = new TextDecoder();
|
|
671
|
+
let buffer = "";
|
|
672
|
+
while (true) {
|
|
673
|
+
const { done, value } = await reader.read();
|
|
674
|
+
if (done) {
|
|
675
|
+
break;
|
|
676
|
+
}
|
|
677
|
+
buffer += decoder.decode(value, { stream: true });
|
|
678
|
+
let sepIndex = buffer.indexOf("\n\n");
|
|
679
|
+
while (sepIndex !== -1) {
|
|
680
|
+
const raw = buffer.slice(0, sepIndex);
|
|
681
|
+
buffer = buffer.slice(sepIndex + 2);
|
|
682
|
+
const event = parseEventBlock(raw);
|
|
683
|
+
if (event) {
|
|
684
|
+
yield event;
|
|
685
|
+
}
|
|
686
|
+
sepIndex = buffer.indexOf("\n\n");
|
|
687
|
+
}
|
|
688
|
+
}
|
|
689
|
+
if (buffer.trim().length > 0) {
|
|
690
|
+
const event = parseEventBlock(buffer);
|
|
691
|
+
if (event) {
|
|
692
|
+
yield event;
|
|
693
|
+
}
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
function parseEventBlock(raw) {
|
|
697
|
+
const lines = raw.split(/\r?\n/u).map((line) => line.trimEnd()).filter(Boolean);
|
|
698
|
+
const dataLines = lines.filter((line) => line.startsWith("data:")).map((line) => line.slice("data:".length).trimStart());
|
|
699
|
+
if (dataLines.length === 0) {
|
|
700
|
+
return null;
|
|
701
|
+
}
|
|
702
|
+
const payload = dataLines.join("\n");
|
|
703
|
+
if (payload === "[DONE]") {
|
|
704
|
+
return null;
|
|
705
|
+
}
|
|
706
|
+
try {
|
|
707
|
+
return JSON.parse(payload);
|
|
708
|
+
} catch {
|
|
709
|
+
return null;
|
|
710
|
+
}
|
|
711
|
+
}
|
|
712
|
+
|
|
713
|
+
// src/utils/scheduler.ts
|
|
714
|
+
function sleep(ms) {
|
|
715
|
+
return new Promise((resolve) => {
|
|
716
|
+
setTimeout(resolve, ms);
|
|
717
|
+
});
|
|
718
|
+
}
|
|
719
|
+
function toError(value) {
|
|
720
|
+
if (value instanceof Error) {
|
|
721
|
+
return value;
|
|
722
|
+
}
|
|
723
|
+
if (typeof value === "string") {
|
|
724
|
+
return new Error(value);
|
|
725
|
+
}
|
|
726
|
+
return new Error("Unknown error");
|
|
727
|
+
}
|
|
728
|
+
function createCallScheduler(options = {}) {
|
|
729
|
+
const maxParallelRequests = Math.max(1, Math.floor(options.maxParallelRequests ?? 3));
|
|
730
|
+
const minIntervalBetweenStartMs = Math.max(0, Math.floor(options.minIntervalBetweenStartMs ?? 0));
|
|
731
|
+
const startJitterMs = Math.max(0, Math.floor(options.startJitterMs ?? 0));
|
|
732
|
+
const retryPolicy = options.retry;
|
|
733
|
+
let activeCount = 0;
|
|
734
|
+
let lastStartTime = 0;
|
|
735
|
+
let startSpacingChain = Promise.resolve();
|
|
736
|
+
const queue = [];
|
|
737
|
+
async function applyStartSpacing() {
|
|
738
|
+
const previous = startSpacingChain;
|
|
739
|
+
let release;
|
|
740
|
+
startSpacingChain = new Promise((resolve) => {
|
|
741
|
+
release = resolve;
|
|
742
|
+
});
|
|
743
|
+
await previous;
|
|
744
|
+
try {
|
|
745
|
+
if (lastStartTime > 0 && minIntervalBetweenStartMs > 0) {
|
|
746
|
+
const earliestNext = lastStartTime + minIntervalBetweenStartMs;
|
|
747
|
+
const wait = Math.max(0, earliestNext - Date.now());
|
|
748
|
+
if (wait > 0) {
|
|
749
|
+
await sleep(wait);
|
|
750
|
+
}
|
|
751
|
+
}
|
|
752
|
+
if (startJitterMs > 0) {
|
|
753
|
+
await sleep(Math.floor(Math.random() * (startJitterMs + 1)));
|
|
754
|
+
}
|
|
755
|
+
lastStartTime = Date.now();
|
|
756
|
+
} finally {
|
|
757
|
+
release?.();
|
|
758
|
+
}
|
|
759
|
+
}
|
|
760
|
+
async function attemptWithRetries(fn, attempt) {
|
|
761
|
+
try {
|
|
762
|
+
await applyStartSpacing();
|
|
763
|
+
return await fn();
|
|
764
|
+
} catch (error) {
|
|
765
|
+
const err = toError(error);
|
|
766
|
+
if (!retryPolicy || attempt >= retryPolicy.maxAttempts) {
|
|
767
|
+
throw err;
|
|
768
|
+
}
|
|
769
|
+
let delay = retryPolicy.getDelayMs(attempt, error);
|
|
770
|
+
if (delay === null) {
|
|
771
|
+
throw err;
|
|
772
|
+
}
|
|
773
|
+
if (!Number.isFinite(delay)) {
|
|
774
|
+
delay = 0;
|
|
775
|
+
}
|
|
776
|
+
const normalizedDelay = Math.max(0, delay);
|
|
777
|
+
if (normalizedDelay > 0) {
|
|
778
|
+
await sleep(normalizedDelay);
|
|
779
|
+
}
|
|
780
|
+
return attemptWithRetries(fn, attempt + 1);
|
|
781
|
+
}
|
|
782
|
+
}
|
|
783
|
+
function drainQueue() {
|
|
784
|
+
while (activeCount < maxParallelRequests && queue.length > 0) {
|
|
785
|
+
const task = queue.shift();
|
|
786
|
+
if (!task) {
|
|
787
|
+
continue;
|
|
788
|
+
}
|
|
789
|
+
activeCount += 1;
|
|
790
|
+
void task();
|
|
791
|
+
}
|
|
792
|
+
}
|
|
793
|
+
function run(fn) {
|
|
794
|
+
return new Promise((resolve, reject) => {
|
|
795
|
+
const job = async () => {
|
|
796
|
+
try {
|
|
797
|
+
const result = await attemptWithRetries(fn, 1);
|
|
798
|
+
resolve(result);
|
|
799
|
+
} catch (error) {
|
|
800
|
+
reject(toError(error));
|
|
801
|
+
} finally {
|
|
802
|
+
activeCount -= 1;
|
|
803
|
+
queueMicrotask(drainQueue);
|
|
804
|
+
}
|
|
805
|
+
};
|
|
806
|
+
queue.push(job);
|
|
807
|
+
drainQueue();
|
|
808
|
+
});
|
|
809
|
+
}
|
|
810
|
+
return { run };
|
|
811
|
+
}
|
|
812
|
+
|
|
813
|
+
// src/google/client.ts
|
|
814
|
+
import { GoogleGenAI } from "@google/genai";
|
|
815
|
+
|
|
816
|
+
// src/google/auth.ts
|
|
817
|
+
import { GoogleAuth } from "google-auth-library";
|
|
818
|
+
import { z as z2 } from "zod";
|
|
819
|
+
var ServiceAccountSchema = z2.object({
|
|
820
|
+
project_id: z2.string().min(1),
|
|
821
|
+
client_email: z2.email(),
|
|
822
|
+
private_key: z2.string().min(1),
|
|
823
|
+
token_uri: z2.string().optional()
|
|
824
|
+
}).transform(({ project_id, client_email, private_key, token_uri }) => ({
|
|
825
|
+
projectId: project_id,
|
|
826
|
+
clientEmail: client_email,
|
|
827
|
+
privateKey: private_key.replace(/\\n/g, "\n"),
|
|
828
|
+
tokenUri: token_uri
|
|
829
|
+
}));
|
|
830
|
+
var cachedServiceAccount = null;
|
|
831
|
+
function parseGoogleServiceAccount(input) {
|
|
832
|
+
let parsed;
|
|
833
|
+
try {
|
|
834
|
+
parsed = JSON.parse(input);
|
|
835
|
+
} catch (error) {
|
|
836
|
+
throw new Error(`Invalid Google service account JSON: ${error.message}`);
|
|
837
|
+
}
|
|
838
|
+
return ServiceAccountSchema.parse(parsed);
|
|
839
|
+
}
|
|
840
|
+
function getGoogleServiceAccount() {
|
|
841
|
+
if (cachedServiceAccount) {
|
|
842
|
+
return cachedServiceAccount;
|
|
843
|
+
}
|
|
844
|
+
loadLocalEnv();
|
|
845
|
+
const raw = process.env.GOOGLE_SERVICE_ACCOUNT_JSON;
|
|
846
|
+
if (!raw || raw.trim().length === 0) {
|
|
847
|
+
throw new Error("GOOGLE_SERVICE_ACCOUNT_JSON must be provided for Google APIs access.");
|
|
848
|
+
}
|
|
849
|
+
cachedServiceAccount = parseGoogleServiceAccount(raw);
|
|
850
|
+
return cachedServiceAccount;
|
|
851
|
+
}
|
|
852
|
+
function normaliseScopes(scopes) {
|
|
853
|
+
if (!scopes) {
|
|
854
|
+
return void 0;
|
|
855
|
+
}
|
|
856
|
+
if (typeof scopes === "string") {
|
|
857
|
+
return [scopes];
|
|
858
|
+
}
|
|
859
|
+
if (scopes.length === 0) {
|
|
860
|
+
return void 0;
|
|
861
|
+
}
|
|
862
|
+
return Array.from(new Set(scopes)).sort();
|
|
863
|
+
}
|
|
864
|
+
function getGoogleAuthOptions(scopes) {
|
|
865
|
+
const serviceAccount = getGoogleServiceAccount();
|
|
866
|
+
const normalisedScopes = normaliseScopes(scopes);
|
|
867
|
+
const options = {
|
|
868
|
+
credentials: {
|
|
869
|
+
client_email: serviceAccount.clientEmail,
|
|
870
|
+
private_key: serviceAccount.privateKey
|
|
871
|
+
},
|
|
872
|
+
projectId: serviceAccount.projectId,
|
|
873
|
+
scopes: normalisedScopes
|
|
874
|
+
};
|
|
875
|
+
return options;
|
|
876
|
+
}
|
|
877
|
+
|
|
878
|
+
// src/google/client.ts
|
|
879
|
+
var GEMINI_MODEL_IDS = [
|
|
880
|
+
"gemini-3-pro-preview",
|
|
881
|
+
"gemini-2.5-pro",
|
|
882
|
+
"gemini-flash-latest",
|
|
883
|
+
"gemini-flash-lite-latest"
|
|
884
|
+
];
|
|
885
|
+
function isGeminiModelId(value) {
|
|
886
|
+
return GEMINI_MODEL_IDS.includes(value);
|
|
887
|
+
}
|
|
888
|
+
var CLOUD_PLATFORM_SCOPE = "https://www.googleapis.com/auth/cloud-platform";
|
|
889
|
+
var DEFAULT_VERTEX_LOCATION = "global";
|
|
890
|
+
var geminiConfiguration = {};
|
|
891
|
+
var clientPromise;
|
|
892
|
+
function normaliseConfigValue(value) {
|
|
893
|
+
if (value === void 0 || value === null) {
|
|
894
|
+
return void 0;
|
|
895
|
+
}
|
|
896
|
+
const trimmed = value.trim();
|
|
897
|
+
return trimmed.length > 0 ? trimmed : void 0;
|
|
898
|
+
}
|
|
899
|
+
function configureGemini(options = {}) {
|
|
900
|
+
const nextProjectId = normaliseConfigValue(options.projectId);
|
|
901
|
+
const nextLocation = normaliseConfigValue(options.location);
|
|
902
|
+
geminiConfiguration = {
|
|
903
|
+
projectId: nextProjectId !== void 0 ? nextProjectId : geminiConfiguration.projectId,
|
|
904
|
+
location: nextLocation !== void 0 ? nextLocation : geminiConfiguration.location
|
|
905
|
+
};
|
|
906
|
+
clientPromise = void 0;
|
|
907
|
+
}
|
|
908
|
+
function resolveProjectId() {
|
|
909
|
+
const override = geminiConfiguration.projectId;
|
|
910
|
+
if (override) {
|
|
911
|
+
return override;
|
|
912
|
+
}
|
|
913
|
+
const serviceAccount = getGoogleServiceAccount();
|
|
914
|
+
return serviceAccount.projectId;
|
|
915
|
+
}
|
|
916
|
+
function resolveLocation() {
|
|
917
|
+
const override = geminiConfiguration.location;
|
|
918
|
+
if (override) {
|
|
919
|
+
return override;
|
|
920
|
+
}
|
|
921
|
+
return DEFAULT_VERTEX_LOCATION;
|
|
922
|
+
}
|
|
923
|
+
async function getGeminiClient() {
|
|
924
|
+
if (!clientPromise) {
|
|
925
|
+
clientPromise = Promise.resolve().then(() => {
|
|
926
|
+
const projectId = resolveProjectId();
|
|
927
|
+
const location = resolveLocation();
|
|
928
|
+
const googleAuthOptions = getGoogleAuthOptions(CLOUD_PLATFORM_SCOPE);
|
|
929
|
+
return new GoogleGenAI({
|
|
930
|
+
vertexai: true,
|
|
931
|
+
project: projectId,
|
|
932
|
+
location,
|
|
933
|
+
googleAuthOptions
|
|
934
|
+
});
|
|
935
|
+
});
|
|
936
|
+
}
|
|
937
|
+
return clientPromise;
|
|
938
|
+
}
|
|
939
|
+
|
|
940
|
+
// src/google/calls.ts
|
|
941
|
+
var RETRYABLE_STATUSES = /* @__PURE__ */ new Set([408, 425, 429, 500, 502, 503, 504]);
|
|
942
|
+
var RETRYABLE_ERROR_CODES = /* @__PURE__ */ new Set(["ECONNRESET", "ETIMEDOUT", "EAI_AGAIN"]);
|
|
943
|
+
var RATE_LIMIT_REASONS = /* @__PURE__ */ new Set(["RATE_LIMIT_EXCEEDED", "RESOURCE_EXHAUSTED", "QUOTA_EXCEEDED"]);
|
|
944
|
+
function getStatus(error) {
|
|
945
|
+
const maybe = error;
|
|
946
|
+
const candidates = [maybe?.status, maybe?.statusCode, maybe?.response?.status];
|
|
947
|
+
for (const value of candidates) {
|
|
948
|
+
if (typeof value === "number") {
|
|
949
|
+
return value;
|
|
950
|
+
}
|
|
951
|
+
if (typeof value === "string") {
|
|
952
|
+
const parsed = Number(value);
|
|
953
|
+
if (!Number.isNaN(parsed)) {
|
|
954
|
+
return parsed;
|
|
955
|
+
}
|
|
956
|
+
}
|
|
957
|
+
}
|
|
958
|
+
if (typeof maybe?.code === "number") {
|
|
959
|
+
return maybe.code;
|
|
960
|
+
}
|
|
961
|
+
return void 0;
|
|
962
|
+
}
|
|
963
|
+
function getErrorCode(error) {
|
|
964
|
+
if (!error || typeof error !== "object") {
|
|
965
|
+
return void 0;
|
|
966
|
+
}
|
|
967
|
+
const maybe = error;
|
|
968
|
+
if (typeof maybe.code === "string") {
|
|
969
|
+
return maybe.code;
|
|
970
|
+
}
|
|
971
|
+
if (maybe.cause && typeof maybe.cause === "object") {
|
|
972
|
+
const causeCode = maybe.cause.code;
|
|
973
|
+
if (typeof causeCode === "string") {
|
|
974
|
+
return causeCode;
|
|
975
|
+
}
|
|
976
|
+
}
|
|
977
|
+
return void 0;
|
|
978
|
+
}
|
|
979
|
+
function getErrorReason(error) {
|
|
980
|
+
if (!error || typeof error !== "object") {
|
|
981
|
+
return void 0;
|
|
982
|
+
}
|
|
983
|
+
const details = error.errorDetails;
|
|
984
|
+
if (Array.isArray(details) && details.length > 0) {
|
|
985
|
+
const reason = details[0].reason;
|
|
986
|
+
if (typeof reason === "string") {
|
|
987
|
+
return reason;
|
|
988
|
+
}
|
|
989
|
+
}
|
|
990
|
+
const cause = error.cause;
|
|
991
|
+
if (cause && typeof cause === "object") {
|
|
992
|
+
const nestedDetails = cause.errorDetails;
|
|
993
|
+
if (Array.isArray(nestedDetails) && nestedDetails.length > 0) {
|
|
994
|
+
const reason = nestedDetails[0].reason;
|
|
995
|
+
if (typeof reason === "string") {
|
|
996
|
+
return reason;
|
|
997
|
+
}
|
|
998
|
+
}
|
|
999
|
+
}
|
|
1000
|
+
return void 0;
|
|
1001
|
+
}
|
|
1002
|
+
function getErrorMessage(error) {
|
|
1003
|
+
if (error instanceof Error) {
|
|
1004
|
+
return error.message;
|
|
1005
|
+
}
|
|
1006
|
+
if (typeof error === "string") {
|
|
1007
|
+
return error;
|
|
1008
|
+
}
|
|
1009
|
+
return "";
|
|
1010
|
+
}
|
|
1011
|
+
function parseRetryInfo(details) {
|
|
1012
|
+
if (Array.isArray(details)) {
|
|
1013
|
+
for (const entry of details) {
|
|
1014
|
+
const ms = parseRetryInfo(entry);
|
|
1015
|
+
if (ms !== void 0) {
|
|
1016
|
+
return ms;
|
|
1017
|
+
}
|
|
1018
|
+
}
|
|
1019
|
+
return void 0;
|
|
1020
|
+
}
|
|
1021
|
+
if (!details || typeof details !== "object") {
|
|
1022
|
+
return void 0;
|
|
1023
|
+
}
|
|
1024
|
+
const retryDelay = details.retryDelay;
|
|
1025
|
+
if (retryDelay) {
|
|
1026
|
+
const secondsRaw = retryDelay.seconds;
|
|
1027
|
+
const nanosRaw = retryDelay.nanos;
|
|
1028
|
+
const seconds = typeof secondsRaw === "number" ? secondsRaw : typeof secondsRaw === "string" ? Number.parseFloat(secondsRaw) : 0;
|
|
1029
|
+
const nanos = typeof nanosRaw === "number" ? nanosRaw : typeof nanosRaw === "string" ? Number.parseInt(nanosRaw, 10) : 0;
|
|
1030
|
+
if (Number.isFinite(seconds) || Number.isFinite(nanos)) {
|
|
1031
|
+
const totalMs = seconds * 1e3 + nanos / 1e6;
|
|
1032
|
+
if (totalMs > 0) {
|
|
1033
|
+
return totalMs;
|
|
1034
|
+
}
|
|
1035
|
+
}
|
|
1036
|
+
}
|
|
1037
|
+
const nestedDetails = details.details;
|
|
1038
|
+
if (nestedDetails) {
|
|
1039
|
+
const nested = parseRetryInfo(nestedDetails);
|
|
1040
|
+
if (nested !== void 0) {
|
|
1041
|
+
return nested;
|
|
1042
|
+
}
|
|
1043
|
+
}
|
|
1044
|
+
return void 0;
|
|
1045
|
+
}
|
|
1046
|
+
function parseRetryAfterFromMessage(message) {
|
|
1047
|
+
const trimmed = message.trim();
|
|
1048
|
+
if (!trimmed) {
|
|
1049
|
+
return void 0;
|
|
1050
|
+
}
|
|
1051
|
+
const regex = /retry in\\s+([0-9]+(?:\\.[0-9]+)?)\\s*(s|sec|secs|seconds?)/iu;
|
|
1052
|
+
const match = regex.exec(trimmed);
|
|
1053
|
+
if (match?.[1]) {
|
|
1054
|
+
const value = Number.parseFloat(match[1]);
|
|
1055
|
+
if (Number.isFinite(value) && value > 0) {
|
|
1056
|
+
return value * 1e3;
|
|
1057
|
+
}
|
|
1058
|
+
}
|
|
1059
|
+
return void 0;
|
|
1060
|
+
}
|
|
1061
|
+
function getRetryAfterMs(error) {
|
|
1062
|
+
if (!error || typeof error !== "object") {
|
|
1063
|
+
return void 0;
|
|
1064
|
+
}
|
|
1065
|
+
const infoFromDetails = parseRetryInfo(error.errorDetails);
|
|
1066
|
+
if (infoFromDetails !== void 0) {
|
|
1067
|
+
return infoFromDetails;
|
|
1068
|
+
}
|
|
1069
|
+
const cause = error.cause;
|
|
1070
|
+
if (cause && typeof cause === "object") {
|
|
1071
|
+
const nested = getRetryAfterMs(cause);
|
|
1072
|
+
if (nested !== void 0) {
|
|
1073
|
+
return nested;
|
|
1074
|
+
}
|
|
1075
|
+
}
|
|
1076
|
+
const message = getErrorMessage(error);
|
|
1077
|
+
if (message) {
|
|
1078
|
+
const fromMessage = parseRetryAfterFromMessage(message.toLowerCase());
|
|
1079
|
+
if (fromMessage !== void 0) {
|
|
1080
|
+
return fromMessage;
|
|
1081
|
+
}
|
|
1082
|
+
}
|
|
1083
|
+
return void 0;
|
|
1084
|
+
}
|
|
1085
|
+
function shouldRetry(error) {
|
|
1086
|
+
const status = getStatus(error);
|
|
1087
|
+
if (status && RETRYABLE_STATUSES.has(status)) {
|
|
1088
|
+
return true;
|
|
1089
|
+
}
|
|
1090
|
+
const reason = getErrorReason(error);
|
|
1091
|
+
if (reason && RATE_LIMIT_REASONS.has(reason)) {
|
|
1092
|
+
return true;
|
|
1093
|
+
}
|
|
1094
|
+
const code = getErrorCode(error);
|
|
1095
|
+
if (code && RETRYABLE_ERROR_CODES.has(code)) {
|
|
1096
|
+
return true;
|
|
1097
|
+
}
|
|
1098
|
+
const message = getErrorMessage(error).toLowerCase();
|
|
1099
|
+
if (message.includes("rate limit") || message.includes("temporarily unavailable")) {
|
|
1100
|
+
return true;
|
|
1101
|
+
}
|
|
1102
|
+
if (message.includes("fetch failed") || message.includes("socket hang up")) {
|
|
1103
|
+
return true;
|
|
1104
|
+
}
|
|
1105
|
+
if (message.includes("quota") || message.includes("insufficient")) {
|
|
1106
|
+
return false;
|
|
1107
|
+
}
|
|
1108
|
+
if (message.includes("timeout") || message.includes("network")) {
|
|
1109
|
+
return true;
|
|
1110
|
+
}
|
|
1111
|
+
return false;
|
|
1112
|
+
}
|
|
1113
|
+
function retryDelayMs(attempt) {
|
|
1114
|
+
const baseRetryDelayMs = 500;
|
|
1115
|
+
const maxRetryDelayMs = 4e3;
|
|
1116
|
+
const base = Math.min(maxRetryDelayMs, baseRetryDelayMs * 2 ** (attempt - 1));
|
|
1117
|
+
const jitter = Math.floor(Math.random() * 200);
|
|
1118
|
+
return base + jitter;
|
|
1119
|
+
}
|
|
1120
|
+
var scheduler = createCallScheduler({
|
|
1121
|
+
maxParallelRequests: 3,
|
|
1122
|
+
minIntervalBetweenStartMs: 200,
|
|
1123
|
+
startJitterMs: 200,
|
|
1124
|
+
retry: {
|
|
1125
|
+
maxAttempts: 3,
|
|
1126
|
+
getDelayMs: (attempt, error) => {
|
|
1127
|
+
if (!shouldRetry(error)) {
|
|
1128
|
+
return null;
|
|
1129
|
+
}
|
|
1130
|
+
const hintedDelay = getRetryAfterMs(error);
|
|
1131
|
+
return hintedDelay ?? retryDelayMs(attempt);
|
|
1132
|
+
}
|
|
1133
|
+
}
|
|
1134
|
+
});
|
|
1135
|
+
async function runGeminiCall(fn) {
|
|
1136
|
+
return scheduler.run(async () => fn(await getGeminiClient()));
|
|
1137
|
+
}
|
|
1138
|
+
|
|
1139
|
+
// src/openai/client.ts
|
|
1140
|
+
import OpenAI from "openai";
|
|
1141
|
+
import { Agent, fetch as undiciFetch } from "undici";
|
|
1142
|
+
var cachedApiKey = null;
|
|
1143
|
+
var cachedClient = null;
|
|
1144
|
+
var cachedFetch = null;
|
|
1145
|
+
var cachedTimeoutMs = null;
|
|
1146
|
+
var DEFAULT_OPENAI_TIMEOUT_MS = 15 * 6e4;
|
|
1147
|
+
function resolveOpenAiTimeoutMs() {
|
|
1148
|
+
if (cachedTimeoutMs !== null) {
|
|
1149
|
+
return cachedTimeoutMs;
|
|
1150
|
+
}
|
|
1151
|
+
const raw = process.env.OPENAI_STREAM_TIMEOUT_MS ?? process.env.OPENAI_TIMEOUT_MS;
|
|
1152
|
+
const parsed = raw ? Number(raw) : Number.NaN;
|
|
1153
|
+
cachedTimeoutMs = Number.isFinite(parsed) && parsed > 0 ? parsed : DEFAULT_OPENAI_TIMEOUT_MS;
|
|
1154
|
+
return cachedTimeoutMs;
|
|
1155
|
+
}
|
|
1156
|
+
function getOpenAiFetch() {
|
|
1157
|
+
if (cachedFetch) {
|
|
1158
|
+
return cachedFetch;
|
|
1159
|
+
}
|
|
1160
|
+
const timeoutMs = resolveOpenAiTimeoutMs();
|
|
1161
|
+
const dispatcher = new Agent({
|
|
1162
|
+
bodyTimeout: timeoutMs,
|
|
1163
|
+
headersTimeout: timeoutMs
|
|
1164
|
+
});
|
|
1165
|
+
cachedFetch = ((input, init) => {
|
|
1166
|
+
return undiciFetch(input, {
|
|
1167
|
+
...init ?? {},
|
|
1168
|
+
dispatcher
|
|
1169
|
+
});
|
|
1170
|
+
});
|
|
1171
|
+
return cachedFetch;
|
|
1172
|
+
}
|
|
1173
|
+
function getOpenAiApiKey() {
|
|
1174
|
+
if (cachedApiKey !== null) {
|
|
1175
|
+
return cachedApiKey;
|
|
1176
|
+
}
|
|
1177
|
+
loadLocalEnv();
|
|
1178
|
+
const raw = process.env.OPENAI_API_KEY;
|
|
1179
|
+
const value = raw?.trim();
|
|
1180
|
+
if (!value) {
|
|
1181
|
+
throw new Error("OPENAI_API_KEY must be provided to access OpenAI APIs.");
|
|
1182
|
+
}
|
|
1183
|
+
cachedApiKey = value;
|
|
1184
|
+
return cachedApiKey;
|
|
1185
|
+
}
|
|
1186
|
+
function getOpenAiClient() {
|
|
1187
|
+
if (cachedClient) {
|
|
1188
|
+
return cachedClient;
|
|
1189
|
+
}
|
|
1190
|
+
const apiKey = getOpenAiApiKey();
|
|
1191
|
+
const timeoutMs = resolveOpenAiTimeoutMs();
|
|
1192
|
+
cachedClient = new OpenAI({
|
|
1193
|
+
apiKey,
|
|
1194
|
+
fetch: getOpenAiFetch(),
|
|
1195
|
+
timeout: timeoutMs
|
|
1196
|
+
});
|
|
1197
|
+
return cachedClient;
|
|
1198
|
+
}
|
|
1199
|
+
|
|
1200
|
+
// src/openai/calls.ts
|
|
1201
|
+
var DEFAULT_OPENAI_REASONING_EFFORT = "medium";
|
|
1202
|
+
var scheduler2 = createCallScheduler({
|
|
1203
|
+
maxParallelRequests: 3,
|
|
1204
|
+
minIntervalBetweenStartMs: 200,
|
|
1205
|
+
startJitterMs: 200
|
|
1206
|
+
});
|
|
1207
|
+
async function runOpenAiCall(fn) {
|
|
1208
|
+
return scheduler2.run(async () => fn(getOpenAiClient()));
|
|
1209
|
+
}
|
|
1210
|
+
|
|
1211
|
+
// src/llm.ts
|
|
1212
|
+
var toolCallContextStorage = new AsyncLocalStorage();
|
|
1213
|
+
function getCurrentToolCallContext() {
|
|
1214
|
+
return toolCallContextStorage.getStore() ?? null;
|
|
1215
|
+
}
|
|
1216
|
+
var LlmJsonCallError = class extends Error {
|
|
1217
|
+
constructor(message, attempts) {
|
|
1218
|
+
super(message);
|
|
1219
|
+
this.attempts = attempts;
|
|
1220
|
+
this.name = "LlmJsonCallError";
|
|
1221
|
+
}
|
|
1222
|
+
};
|
|
1223
|
+
function tool(options) {
|
|
1224
|
+
return options;
|
|
1225
|
+
}
|
|
1226
|
+
function isPlainRecord(value) {
|
|
1227
|
+
return typeof value === "object" && value !== null && !Array.isArray(value);
|
|
1228
|
+
}
|
|
1229
|
+
function applyNullableJsonSchema(schema) {
|
|
1230
|
+
const anyOf = schema.anyOf;
|
|
1231
|
+
if (Array.isArray(anyOf)) {
|
|
1232
|
+
if (!anyOf.some((entry) => isPlainRecord(entry) && entry.type === "null")) {
|
|
1233
|
+
anyOf.push({ type: "null" });
|
|
1234
|
+
}
|
|
1235
|
+
return;
|
|
1236
|
+
}
|
|
1237
|
+
const type = schema.type;
|
|
1238
|
+
if (typeof type === "string") {
|
|
1239
|
+
schema.type = type === "null" ? "null" : [type, "null"];
|
|
1240
|
+
return;
|
|
1241
|
+
}
|
|
1242
|
+
if (Array.isArray(type)) {
|
|
1243
|
+
const normalized = type.filter((entry) => typeof entry === "string");
|
|
1244
|
+
if (!normalized.includes("null")) {
|
|
1245
|
+
schema.type = [...normalized, "null"];
|
|
1246
|
+
} else {
|
|
1247
|
+
schema.type = normalized;
|
|
1248
|
+
}
|
|
1249
|
+
return;
|
|
1250
|
+
}
|
|
1251
|
+
schema.type = ["null"];
|
|
1252
|
+
}
|
|
1253
|
+
function orderedJsonSchemaKeys(properties, ordering) {
|
|
1254
|
+
const keys = Object.keys(properties);
|
|
1255
|
+
if (!ordering || ordering.length === 0) {
|
|
1256
|
+
return keys;
|
|
1257
|
+
}
|
|
1258
|
+
const ordered = [];
|
|
1259
|
+
const seen = /* @__PURE__ */ new Set();
|
|
1260
|
+
for (const key of ordering) {
|
|
1261
|
+
if (Object.hasOwn(properties, key)) {
|
|
1262
|
+
ordered.push(key);
|
|
1263
|
+
seen.add(key);
|
|
1264
|
+
}
|
|
1265
|
+
}
|
|
1266
|
+
for (const key of keys) {
|
|
1267
|
+
if (!seen.has(key)) {
|
|
1268
|
+
ordered.push(key);
|
|
1269
|
+
}
|
|
1270
|
+
}
|
|
1271
|
+
return ordered;
|
|
1272
|
+
}
|
|
1273
|
+
function addGeminiPropertyOrdering(schema) {
|
|
1274
|
+
if (!isPlainRecord(schema)) {
|
|
1275
|
+
return schema;
|
|
1276
|
+
}
|
|
1277
|
+
if (typeof schema.$ref === "string") {
|
|
1278
|
+
return { $ref: schema.$ref };
|
|
1279
|
+
}
|
|
1280
|
+
const output = {};
|
|
1281
|
+
for (const [key, value] of Object.entries(schema)) {
|
|
1282
|
+
if (key === "properties") {
|
|
1283
|
+
continue;
|
|
1284
|
+
}
|
|
1285
|
+
if (key === "items") {
|
|
1286
|
+
output.items = isPlainRecord(value) ? addGeminiPropertyOrdering(value) : value;
|
|
1287
|
+
continue;
|
|
1288
|
+
}
|
|
1289
|
+
if (key === "anyOf" || key === "oneOf") {
|
|
1290
|
+
output[key] = Array.isArray(value) ? value.map((entry) => addGeminiPropertyOrdering(entry)) : value;
|
|
1291
|
+
continue;
|
|
1292
|
+
}
|
|
1293
|
+
if (key === "$defs" && isPlainRecord(value)) {
|
|
1294
|
+
const defs = {};
|
|
1295
|
+
for (const [defKey, defValue] of Object.entries(value)) {
|
|
1296
|
+
if (isPlainRecord(defValue)) {
|
|
1297
|
+
defs[defKey] = addGeminiPropertyOrdering(defValue);
|
|
1298
|
+
}
|
|
1299
|
+
}
|
|
1300
|
+
output.$defs = defs;
|
|
1301
|
+
continue;
|
|
1302
|
+
}
|
|
1303
|
+
output[key] = value;
|
|
1304
|
+
}
|
|
1305
|
+
const propertiesRaw = schema.properties;
|
|
1306
|
+
if (isPlainRecord(propertiesRaw)) {
|
|
1307
|
+
const properties = {};
|
|
1308
|
+
for (const [key, value] of Object.entries(propertiesRaw)) {
|
|
1309
|
+
properties[key] = isPlainRecord(value) ? addGeminiPropertyOrdering(value) : value;
|
|
1310
|
+
}
|
|
1311
|
+
output.properties = properties;
|
|
1312
|
+
output.propertyOrdering = Object.keys(properties);
|
|
1313
|
+
}
|
|
1314
|
+
if (schema.nullable) {
|
|
1315
|
+
applyNullableJsonSchema(output);
|
|
1316
|
+
}
|
|
1317
|
+
return output;
|
|
1318
|
+
}
|
|
1319
|
+
function normalizeOpenAiSchema(schema) {
|
|
1320
|
+
if (!isPlainRecord(schema)) {
|
|
1321
|
+
return schema;
|
|
1322
|
+
}
|
|
1323
|
+
if (typeof schema.$ref === "string") {
|
|
1324
|
+
return { $ref: schema.$ref };
|
|
1325
|
+
}
|
|
1326
|
+
const output = {};
|
|
1327
|
+
for (const [key, value] of Object.entries(schema)) {
|
|
1328
|
+
if (key === "properties") {
|
|
1329
|
+
continue;
|
|
1330
|
+
}
|
|
1331
|
+
if (key === "required") {
|
|
1332
|
+
continue;
|
|
1333
|
+
}
|
|
1334
|
+
if (key === "additionalProperties") {
|
|
1335
|
+
continue;
|
|
1336
|
+
}
|
|
1337
|
+
if (key === "propertyOrdering") {
|
|
1338
|
+
continue;
|
|
1339
|
+
}
|
|
1340
|
+
if (key === "items") {
|
|
1341
|
+
if (isPlainRecord(value)) {
|
|
1342
|
+
output.items = normalizeOpenAiSchema(value);
|
|
1343
|
+
}
|
|
1344
|
+
continue;
|
|
1345
|
+
}
|
|
1346
|
+
if (key === "anyOf" || key === "oneOf") {
|
|
1347
|
+
if (Array.isArray(value)) {
|
|
1348
|
+
output.anyOf = value.map((entry) => normalizeOpenAiSchema(entry));
|
|
1349
|
+
}
|
|
1350
|
+
continue;
|
|
1351
|
+
}
|
|
1352
|
+
if (key === "$defs" && isPlainRecord(value)) {
|
|
1353
|
+
const defs = {};
|
|
1354
|
+
for (const [defKey, defValue] of Object.entries(value)) {
|
|
1355
|
+
if (isPlainRecord(defValue)) {
|
|
1356
|
+
defs[defKey] = normalizeOpenAiSchema(defValue);
|
|
1357
|
+
}
|
|
1358
|
+
}
|
|
1359
|
+
output.$defs = defs;
|
|
1360
|
+
continue;
|
|
1361
|
+
}
|
|
1362
|
+
output[key] = value;
|
|
1363
|
+
}
|
|
1364
|
+
const propertiesRaw = schema.properties;
|
|
1365
|
+
if (isPlainRecord(propertiesRaw)) {
|
|
1366
|
+
const ordering = Array.isArray(schema.propertyOrdering) ? schema.propertyOrdering : void 0;
|
|
1367
|
+
const orderedKeys = orderedJsonSchemaKeys(propertiesRaw, ordering);
|
|
1368
|
+
const properties = {};
|
|
1369
|
+
for (const key of orderedKeys) {
|
|
1370
|
+
const value = propertiesRaw[key];
|
|
1371
|
+
if (!isPlainRecord(value)) {
|
|
1372
|
+
properties[key] = value;
|
|
1373
|
+
continue;
|
|
1374
|
+
}
|
|
1375
|
+
properties[key] = normalizeOpenAiSchema(value);
|
|
1376
|
+
}
|
|
1377
|
+
output.properties = properties;
|
|
1378
|
+
output.required = orderedKeys;
|
|
1379
|
+
output.additionalProperties = false;
|
|
1380
|
+
}
|
|
1381
|
+
const schemaType = schema.type;
|
|
1382
|
+
if (output.additionalProperties === void 0 && (schemaType === "object" || Array.isArray(schemaType) && schemaType.includes("object"))) {
|
|
1383
|
+
output.additionalProperties = false;
|
|
1384
|
+
if (!Array.isArray(output.required)) {
|
|
1385
|
+
output.required = [];
|
|
1386
|
+
}
|
|
1387
|
+
}
|
|
1388
|
+
const normalizeExclusiveBound = (options) => {
|
|
1389
|
+
const exclusiveValue = output[options.exclusiveKey];
|
|
1390
|
+
if (exclusiveValue === false) {
|
|
1391
|
+
delete output[options.exclusiveKey];
|
|
1392
|
+
return;
|
|
1393
|
+
}
|
|
1394
|
+
const inclusiveValue = output[options.inclusiveKey];
|
|
1395
|
+
if (exclusiveValue === true) {
|
|
1396
|
+
if (typeof inclusiveValue === "number" && Number.isFinite(inclusiveValue)) {
|
|
1397
|
+
output[options.exclusiveKey] = inclusiveValue;
|
|
1398
|
+
delete output[options.inclusiveKey];
|
|
1399
|
+
} else {
|
|
1400
|
+
delete output[options.exclusiveKey];
|
|
1401
|
+
}
|
|
1402
|
+
return;
|
|
1403
|
+
}
|
|
1404
|
+
if (typeof exclusiveValue === "number" && Number.isFinite(exclusiveValue)) {
|
|
1405
|
+
delete output[options.inclusiveKey];
|
|
1406
|
+
}
|
|
1407
|
+
};
|
|
1408
|
+
normalizeExclusiveBound({
|
|
1409
|
+
exclusiveKey: "exclusiveMinimum",
|
|
1410
|
+
inclusiveKey: "minimum"
|
|
1411
|
+
});
|
|
1412
|
+
normalizeExclusiveBound({
|
|
1413
|
+
exclusiveKey: "exclusiveMaximum",
|
|
1414
|
+
inclusiveKey: "maximum"
|
|
1415
|
+
});
|
|
1416
|
+
return output;
|
|
1417
|
+
}
|
|
1418
|
+
function resolveOpenAiSchemaRoot(schema) {
|
|
1419
|
+
if (!isPlainRecord(schema)) {
|
|
1420
|
+
return schema;
|
|
1421
|
+
}
|
|
1422
|
+
if (typeof schema.$ref !== "string") {
|
|
1423
|
+
return schema;
|
|
1424
|
+
}
|
|
1425
|
+
const refMatch = /^#\/(definitions|[$]defs)\/(.+)$/u.exec(schema.$ref);
|
|
1426
|
+
if (!refMatch) {
|
|
1427
|
+
return schema;
|
|
1428
|
+
}
|
|
1429
|
+
const section = refMatch[1];
|
|
1430
|
+
const key = refMatch[2];
|
|
1431
|
+
if (!section || !key) {
|
|
1432
|
+
return schema;
|
|
1433
|
+
}
|
|
1434
|
+
const defsSource = section === "definitions" ? schema.definitions : schema.$defs;
|
|
1435
|
+
if (!isPlainRecord(defsSource)) {
|
|
1436
|
+
return schema;
|
|
1437
|
+
}
|
|
1438
|
+
const resolved = defsSource[key];
|
|
1439
|
+
if (!isPlainRecord(resolved)) {
|
|
1440
|
+
return schema;
|
|
1441
|
+
}
|
|
1442
|
+
return { ...resolved };
|
|
1443
|
+
}
|
|
1444
|
+
function toGeminiJsonSchema(schema, options) {
|
|
1445
|
+
const jsonSchema = zodToJsonSchema(schema, {
|
|
1446
|
+
name: options?.name,
|
|
1447
|
+
target: "jsonSchema7"
|
|
1448
|
+
});
|
|
1449
|
+
return addGeminiPropertyOrdering(resolveOpenAiSchemaRoot(jsonSchema));
|
|
1450
|
+
}
|
|
1451
|
+
function isJsonSchemaObject(schema) {
|
|
1452
|
+
if (!schema || !isPlainRecord(schema)) {
|
|
1453
|
+
return false;
|
|
1454
|
+
}
|
|
1455
|
+
const type = schema.type;
|
|
1456
|
+
if (type === "object") {
|
|
1457
|
+
return true;
|
|
1458
|
+
}
|
|
1459
|
+
if (Array.isArray(type) && type.includes("object")) {
|
|
1460
|
+
return true;
|
|
1461
|
+
}
|
|
1462
|
+
if (isPlainRecord(schema.properties)) {
|
|
1463
|
+
return true;
|
|
1464
|
+
}
|
|
1465
|
+
return false;
|
|
1466
|
+
}
|
|
1467
|
+
function sanitisePartForLogging(part) {
|
|
1468
|
+
switch (part.type) {
|
|
1469
|
+
case "text":
|
|
1470
|
+
return {
|
|
1471
|
+
type: "text",
|
|
1472
|
+
thought: part.thought === true ? true : void 0,
|
|
1473
|
+
preview: part.text.slice(0, 200)
|
|
1474
|
+
};
|
|
1475
|
+
case "inlineData": {
|
|
1476
|
+
let omittedBytes;
|
|
1477
|
+
try {
|
|
1478
|
+
omittedBytes = Buffer2.from(part.data, "base64").byteLength;
|
|
1479
|
+
} catch {
|
|
1480
|
+
omittedBytes = Buffer2.byteLength(part.data, "utf8");
|
|
1481
|
+
}
|
|
1482
|
+
return {
|
|
1483
|
+
type: "inlineData",
|
|
1484
|
+
mimeType: part.mimeType,
|
|
1485
|
+
data: `[omitted:${omittedBytes}b]`
|
|
1486
|
+
};
|
|
1487
|
+
}
|
|
1488
|
+
default:
|
|
1489
|
+
return "[unknown part]";
|
|
1490
|
+
}
|
|
1491
|
+
}
|
|
1492
|
+
function convertGooglePartsToLlmParts(parts) {
|
|
1493
|
+
const result = [];
|
|
1494
|
+
for (const part of parts) {
|
|
1495
|
+
if (part.text !== void 0) {
|
|
1496
|
+
result.push({
|
|
1497
|
+
type: "text",
|
|
1498
|
+
text: part.text,
|
|
1499
|
+
thought: part.thought ? true : void 0
|
|
1500
|
+
});
|
|
1501
|
+
continue;
|
|
1502
|
+
}
|
|
1503
|
+
const inline = part.inlineData;
|
|
1504
|
+
if (inline?.data) {
|
|
1505
|
+
result.push({
|
|
1506
|
+
type: "inlineData",
|
|
1507
|
+
data: inline.data,
|
|
1508
|
+
mimeType: inline.mimeType
|
|
1509
|
+
});
|
|
1510
|
+
continue;
|
|
1511
|
+
}
|
|
1512
|
+
if (part.fileData?.fileUri) {
|
|
1513
|
+
throw new Error("fileData parts are not supported");
|
|
1514
|
+
}
|
|
1515
|
+
}
|
|
1516
|
+
return result;
|
|
1517
|
+
}
|
|
1518
|
+
function assertLlmRole(value) {
|
|
1519
|
+
switch (value) {
|
|
1520
|
+
case "user":
|
|
1521
|
+
case "model":
|
|
1522
|
+
case "system":
|
|
1523
|
+
case "tool":
|
|
1524
|
+
return value;
|
|
1525
|
+
default:
|
|
1526
|
+
throw new Error(`Unsupported LLM role: ${String(value)}`);
|
|
1527
|
+
}
|
|
1528
|
+
}
|
|
1529
|
+
function convertGeminiContentToLlmContent(content) {
|
|
1530
|
+
return {
|
|
1531
|
+
role: assertLlmRole(content.role),
|
|
1532
|
+
parts: convertGooglePartsToLlmParts(content.parts ?? [])
|
|
1533
|
+
};
|
|
1534
|
+
}
|
|
1535
|
+
function toGeminiPart(part) {
|
|
1536
|
+
switch (part.type) {
|
|
1537
|
+
case "text":
|
|
1538
|
+
return {
|
|
1539
|
+
text: part.text,
|
|
1540
|
+
thought: part.thought === true ? true : void 0
|
|
1541
|
+
};
|
|
1542
|
+
case "inlineData":
|
|
1543
|
+
return {
|
|
1544
|
+
inlineData: {
|
|
1545
|
+
data: part.data,
|
|
1546
|
+
mimeType: part.mimeType
|
|
1547
|
+
}
|
|
1548
|
+
};
|
|
1549
|
+
default:
|
|
1550
|
+
throw new Error("Unsupported LLM content part");
|
|
1551
|
+
}
|
|
1552
|
+
}
|
|
1553
|
+
function convertLlmContentToGeminiContent(content) {
|
|
1554
|
+
return {
|
|
1555
|
+
role: content.role,
|
|
1556
|
+
parts: content.parts.map(toGeminiPart)
|
|
1557
|
+
};
|
|
1558
|
+
}
|
|
1559
|
+
function resolveProvider(model) {
|
|
1560
|
+
if (model.startsWith("chatgpt-")) {
|
|
1561
|
+
return { provider: "chatgpt", model: model.slice("chatgpt-".length) };
|
|
1562
|
+
}
|
|
1563
|
+
if (model.startsWith("gemini-")) {
|
|
1564
|
+
return { provider: "gemini", model };
|
|
1565
|
+
}
|
|
1566
|
+
return { provider: "openai", model };
|
|
1567
|
+
}
|
|
1568
|
+
function isOpenAiCodexModel(modelId) {
|
|
1569
|
+
return modelId.includes("codex");
|
|
1570
|
+
}
|
|
1571
|
+
function resolveOpenAiReasoningEffort(modelId, override) {
|
|
1572
|
+
if (override) {
|
|
1573
|
+
return override;
|
|
1574
|
+
}
|
|
1575
|
+
if (isOpenAiCodexModel(modelId)) {
|
|
1576
|
+
return "medium";
|
|
1577
|
+
}
|
|
1578
|
+
return DEFAULT_OPENAI_REASONING_EFFORT;
|
|
1579
|
+
}
|
|
1580
|
+
function toOpenAiReasoningEffort(effort) {
|
|
1581
|
+
switch (effort) {
|
|
1582
|
+
case "low":
|
|
1583
|
+
return "low";
|
|
1584
|
+
case "medium":
|
|
1585
|
+
return "medium";
|
|
1586
|
+
case "high":
|
|
1587
|
+
return "high";
|
|
1588
|
+
case "xhigh":
|
|
1589
|
+
return "high";
|
|
1590
|
+
}
|
|
1591
|
+
}
|
|
1592
|
+
function resolveOpenAiVerbosity(modelId) {
|
|
1593
|
+
return isOpenAiCodexModel(modelId) ? "medium" : "high";
|
|
1594
|
+
}
|
|
1595
|
+
function isInlineImageMime(mimeType) {
|
|
1596
|
+
if (!mimeType) {
|
|
1597
|
+
return false;
|
|
1598
|
+
}
|
|
1599
|
+
return mimeType.startsWith("image/");
|
|
1600
|
+
}
|
|
1601
|
+
function mergeConsecutiveTextParts(parts) {
|
|
1602
|
+
if (parts.length === 0) {
|
|
1603
|
+
return [];
|
|
1604
|
+
}
|
|
1605
|
+
const merged = [];
|
|
1606
|
+
for (const part of parts) {
|
|
1607
|
+
if (part.type !== "text") {
|
|
1608
|
+
merged.push({ type: "inlineData", data: part.data, mimeType: part.mimeType });
|
|
1609
|
+
continue;
|
|
1610
|
+
}
|
|
1611
|
+
const isThought = part.thought === true;
|
|
1612
|
+
const last = merged[merged.length - 1];
|
|
1613
|
+
if (last && last.type === "text" && last.thought === true === isThought) {
|
|
1614
|
+
last.text += part.text;
|
|
1615
|
+
last.thought = isThought ? true : void 0;
|
|
1616
|
+
} else {
|
|
1617
|
+
merged.push({
|
|
1618
|
+
type: "text",
|
|
1619
|
+
text: part.text,
|
|
1620
|
+
thought: isThought ? true : void 0
|
|
1621
|
+
});
|
|
1622
|
+
}
|
|
1623
|
+
}
|
|
1624
|
+
return merged;
|
|
1625
|
+
}
|
|
1626
|
+
function extractTextByChannel(content) {
|
|
1627
|
+
if (!content) {
|
|
1628
|
+
return { text: "", thoughts: "" };
|
|
1629
|
+
}
|
|
1630
|
+
let text = "";
|
|
1631
|
+
let thoughts = "";
|
|
1632
|
+
for (const part of content.parts) {
|
|
1633
|
+
if (part.type !== "text") {
|
|
1634
|
+
continue;
|
|
1635
|
+
}
|
|
1636
|
+
if (part.thought === true) {
|
|
1637
|
+
thoughts += part.text;
|
|
1638
|
+
} else {
|
|
1639
|
+
text += part.text;
|
|
1640
|
+
}
|
|
1641
|
+
}
|
|
1642
|
+
return { text: text.trim(), thoughts: thoughts.trim() };
|
|
1643
|
+
}
|
|
1644
|
+
function normalizeJsonText(rawText) {
|
|
1645
|
+
let text = rawText.trim();
|
|
1646
|
+
if (text.startsWith("```")) {
|
|
1647
|
+
text = text.replace(/^```[a-zA-Z0-9_-]*\s*\n?/, "");
|
|
1648
|
+
text = text.replace(/```(?:\s*)?$/, "").trim();
|
|
1649
|
+
}
|
|
1650
|
+
const fenced = /^```(?:json)?\s*([\s\S]*?)\s*```$/i.exec(text);
|
|
1651
|
+
if (fenced?.[1]) {
|
|
1652
|
+
return fenced[1].trim();
|
|
1653
|
+
}
|
|
1654
|
+
if (!text.startsWith("{") && !text.startsWith("[")) {
|
|
1655
|
+
const firstBrace = text.indexOf("{");
|
|
1656
|
+
if (firstBrace !== -1) {
|
|
1657
|
+
const lastBrace = text.lastIndexOf("}");
|
|
1658
|
+
if (lastBrace !== -1 && lastBrace > firstBrace) {
|
|
1659
|
+
text = text.slice(firstBrace, lastBrace + 1).trim();
|
|
1660
|
+
}
|
|
1661
|
+
}
|
|
1662
|
+
}
|
|
1663
|
+
return text;
|
|
1664
|
+
}
|
|
1665
|
+
function escapeNewlinesInStrings(jsonText) {
|
|
1666
|
+
let output = "";
|
|
1667
|
+
let inString = false;
|
|
1668
|
+
let escaped = false;
|
|
1669
|
+
for (let i = 0; i < jsonText.length; i += 1) {
|
|
1670
|
+
const char = jsonText[i] ?? "";
|
|
1671
|
+
if (inString) {
|
|
1672
|
+
if (escaped) {
|
|
1673
|
+
output += char;
|
|
1674
|
+
escaped = false;
|
|
1675
|
+
continue;
|
|
1676
|
+
}
|
|
1677
|
+
if (char === "\\") {
|
|
1678
|
+
output += char;
|
|
1679
|
+
escaped = true;
|
|
1680
|
+
continue;
|
|
1681
|
+
}
|
|
1682
|
+
if (char === '"') {
|
|
1683
|
+
output += char;
|
|
1684
|
+
inString = false;
|
|
1685
|
+
continue;
|
|
1686
|
+
}
|
|
1687
|
+
if (char === "\n") {
|
|
1688
|
+
output += "\\n";
|
|
1689
|
+
continue;
|
|
1690
|
+
}
|
|
1691
|
+
if (char === "\r") {
|
|
1692
|
+
output += "\\r";
|
|
1693
|
+
continue;
|
|
1694
|
+
}
|
|
1695
|
+
output += char;
|
|
1696
|
+
continue;
|
|
1697
|
+
}
|
|
1698
|
+
if (char === '"') {
|
|
1699
|
+
inString = true;
|
|
1700
|
+
output += char;
|
|
1701
|
+
continue;
|
|
1702
|
+
}
|
|
1703
|
+
output += char;
|
|
1704
|
+
}
|
|
1705
|
+
return output;
|
|
1706
|
+
}
|
|
1707
|
+
function parseJsonFromLlmText(rawText) {
|
|
1708
|
+
const cleanedText = normalizeJsonText(rawText);
|
|
1709
|
+
const repairedText = escapeNewlinesInStrings(cleanedText);
|
|
1710
|
+
return JSON.parse(repairedText);
|
|
1711
|
+
}
|
|
1712
|
+
function resolveTextContents(input) {
|
|
1713
|
+
if ("contents" in input) {
|
|
1714
|
+
return input.contents;
|
|
1715
|
+
}
|
|
1716
|
+
const contents = [];
|
|
1717
|
+
if (input.systemPrompt) {
|
|
1718
|
+
contents.push({
|
|
1719
|
+
role: "system",
|
|
1720
|
+
parts: [{ type: "text", text: input.systemPrompt }]
|
|
1721
|
+
});
|
|
1722
|
+
}
|
|
1723
|
+
contents.push({
|
|
1724
|
+
role: "user",
|
|
1725
|
+
parts: [{ type: "text", text: input.prompt }]
|
|
1726
|
+
});
|
|
1727
|
+
return contents;
|
|
1728
|
+
}
|
|
1729
|
+
function toOpenAiInput(contents) {
|
|
1730
|
+
const OPENAI_ROLE_FROM_LLM = {
|
|
1731
|
+
user: "user",
|
|
1732
|
+
model: "assistant",
|
|
1733
|
+
system: "system",
|
|
1734
|
+
tool: "assistant"
|
|
1735
|
+
};
|
|
1736
|
+
return contents.map((content) => {
|
|
1737
|
+
const parts = [];
|
|
1738
|
+
for (const part of content.parts) {
|
|
1739
|
+
if (part.type === "text") {
|
|
1740
|
+
parts.push({ type: "input_text", text: part.text });
|
|
1741
|
+
continue;
|
|
1742
|
+
}
|
|
1743
|
+
const mimeType = part.mimeType ?? "application/octet-stream";
|
|
1744
|
+
const dataUrl = `data:${mimeType};base64,${part.data}`;
|
|
1745
|
+
parts.push({ type: "input_image", image_url: dataUrl, detail: "auto" });
|
|
1746
|
+
}
|
|
1747
|
+
if (parts.length === 1 && parts[0]?.type === "input_text" && typeof parts[0].text === "string") {
|
|
1748
|
+
return {
|
|
1749
|
+
role: OPENAI_ROLE_FROM_LLM[content.role],
|
|
1750
|
+
content: parts[0].text
|
|
1751
|
+
};
|
|
1752
|
+
}
|
|
1753
|
+
return {
|
|
1754
|
+
role: OPENAI_ROLE_FROM_LLM[content.role],
|
|
1755
|
+
content: parts
|
|
1756
|
+
};
|
|
1757
|
+
});
|
|
1758
|
+
}
|
|
1759
|
+
function toChatGptInput(contents) {
|
|
1760
|
+
const instructionsParts = [];
|
|
1761
|
+
const input = [];
|
|
1762
|
+
for (const content of contents) {
|
|
1763
|
+
if (content.role === "system") {
|
|
1764
|
+
for (const part of content.parts) {
|
|
1765
|
+
if (part.type === "text") {
|
|
1766
|
+
instructionsParts.push(part.text);
|
|
1767
|
+
}
|
|
1768
|
+
}
|
|
1769
|
+
continue;
|
|
1770
|
+
}
|
|
1771
|
+
const isAssistant = content.role === "model";
|
|
1772
|
+
const parts = [];
|
|
1773
|
+
for (const part of content.parts) {
|
|
1774
|
+
if (part.type === "text") {
|
|
1775
|
+
parts.push({
|
|
1776
|
+
type: isAssistant ? "output_text" : "input_text",
|
|
1777
|
+
text: part.text
|
|
1778
|
+
});
|
|
1779
|
+
continue;
|
|
1780
|
+
}
|
|
1781
|
+
const mimeType = part.mimeType ?? "application/octet-stream";
|
|
1782
|
+
const dataUrl = `data:${mimeType};base64,${part.data}`;
|
|
1783
|
+
if (isAssistant) {
|
|
1784
|
+
parts.push({
|
|
1785
|
+
type: "output_text",
|
|
1786
|
+
text: `[image:${mimeType}]`
|
|
1787
|
+
});
|
|
1788
|
+
} else {
|
|
1789
|
+
parts.push({
|
|
1790
|
+
type: "input_image",
|
|
1791
|
+
image_url: dataUrl,
|
|
1792
|
+
detail: "auto"
|
|
1793
|
+
});
|
|
1794
|
+
}
|
|
1795
|
+
}
|
|
1796
|
+
if (parts.length === 0) {
|
|
1797
|
+
parts.push({
|
|
1798
|
+
type: isAssistant ? "output_text" : "input_text",
|
|
1799
|
+
text: "(empty content)"
|
|
1800
|
+
});
|
|
1801
|
+
}
|
|
1802
|
+
if (isAssistant) {
|
|
1803
|
+
input.push({
|
|
1804
|
+
type: "message",
|
|
1805
|
+
role: "assistant",
|
|
1806
|
+
status: "completed",
|
|
1807
|
+
content: parts
|
|
1808
|
+
});
|
|
1809
|
+
} else {
|
|
1810
|
+
input.push({
|
|
1811
|
+
role: "user",
|
|
1812
|
+
content: parts
|
|
1813
|
+
});
|
|
1814
|
+
}
|
|
1815
|
+
}
|
|
1816
|
+
const instructions = instructionsParts.map((part) => part.trim()).filter((part) => part.length > 0).join("\\n\\n");
|
|
1817
|
+
return {
|
|
1818
|
+
instructions: instructions.length > 0 ? instructions : void 0,
|
|
1819
|
+
input
|
|
1820
|
+
};
|
|
1821
|
+
}
|
|
1822
|
+
function toGeminiTools(tools) {
|
|
1823
|
+
if (!tools || tools.length === 0) {
|
|
1824
|
+
return void 0;
|
|
1825
|
+
}
|
|
1826
|
+
return tools.map((tool2) => {
|
|
1827
|
+
switch (tool2.type) {
|
|
1828
|
+
case "web-search":
|
|
1829
|
+
return { googleSearch: {} };
|
|
1830
|
+
case "code-execution":
|
|
1831
|
+
return { codeExecution: {} };
|
|
1832
|
+
default:
|
|
1833
|
+
throw new Error("Unsupported tool configuration");
|
|
1834
|
+
}
|
|
1835
|
+
});
|
|
1836
|
+
}
|
|
1837
|
+
function toOpenAiTools(tools) {
|
|
1838
|
+
if (!tools || tools.length === 0) {
|
|
1839
|
+
return void 0;
|
|
1840
|
+
}
|
|
1841
|
+
return tools.map((tool2) => {
|
|
1842
|
+
switch (tool2.type) {
|
|
1843
|
+
case "web-search": {
|
|
1844
|
+
const external_web_access = tool2.mode !== "cached";
|
|
1845
|
+
return { type: "web_search", external_web_access };
|
|
1846
|
+
}
|
|
1847
|
+
case "code-execution": {
|
|
1848
|
+
return { type: "code_interpreter", container: { type: "auto" } };
|
|
1849
|
+
}
|
|
1850
|
+
default:
|
|
1851
|
+
throw new Error("Unsupported tool configuration");
|
|
1852
|
+
}
|
|
1853
|
+
});
|
|
1854
|
+
}
|
|
1855
|
+
function mergeTokenUpdates(current, next) {
|
|
1856
|
+
if (!next) {
|
|
1857
|
+
return current;
|
|
1858
|
+
}
|
|
1859
|
+
if (!current) {
|
|
1860
|
+
return next;
|
|
1861
|
+
}
|
|
1862
|
+
return {
|
|
1863
|
+
promptTokens: next.promptTokens ?? current.promptTokens,
|
|
1864
|
+
cachedTokens: next.cachedTokens ?? current.cachedTokens,
|
|
1865
|
+
responseTokens: next.responseTokens ?? current.responseTokens,
|
|
1866
|
+
responseImageTokens: next.responseImageTokens ?? current.responseImageTokens,
|
|
1867
|
+
thinkingTokens: next.thinkingTokens ?? current.thinkingTokens,
|
|
1868
|
+
totalTokens: next.totalTokens ?? current.totalTokens,
|
|
1869
|
+
toolUsePromptTokens: next.toolUsePromptTokens ?? current.toolUsePromptTokens
|
|
1870
|
+
};
|
|
1871
|
+
}
|
|
1872
|
+
function toMaybeNumber(value) {
|
|
1873
|
+
if (typeof value === "number" && Number.isFinite(value)) {
|
|
1874
|
+
return value;
|
|
1875
|
+
}
|
|
1876
|
+
if (typeof value === "string") {
|
|
1877
|
+
const parsed = Number.parseFloat(value);
|
|
1878
|
+
if (Number.isFinite(parsed)) {
|
|
1879
|
+
return parsed;
|
|
1880
|
+
}
|
|
1881
|
+
}
|
|
1882
|
+
return void 0;
|
|
1883
|
+
}
|
|
1884
|
+
function sumModalityTokenCounts(details, modality) {
|
|
1885
|
+
if (!Array.isArray(details)) {
|
|
1886
|
+
return 0;
|
|
1887
|
+
}
|
|
1888
|
+
let total = 0;
|
|
1889
|
+
for (const entry of details) {
|
|
1890
|
+
const entryModality = entry.modality;
|
|
1891
|
+
if (typeof entryModality !== "string") {
|
|
1892
|
+
continue;
|
|
1893
|
+
}
|
|
1894
|
+
if (entryModality.toUpperCase() !== modality.toUpperCase()) {
|
|
1895
|
+
continue;
|
|
1896
|
+
}
|
|
1897
|
+
const tokenCount = toMaybeNumber(entry.tokenCount);
|
|
1898
|
+
if (tokenCount !== void 0 && tokenCount > 0) {
|
|
1899
|
+
total += tokenCount;
|
|
1900
|
+
}
|
|
1901
|
+
}
|
|
1902
|
+
return total;
|
|
1903
|
+
}
|
|
1904
|
+
function extractGeminiUsageTokens(usage) {
|
|
1905
|
+
if (!usage || typeof usage !== "object") {
|
|
1906
|
+
return void 0;
|
|
1907
|
+
}
|
|
1908
|
+
const promptTokens = toMaybeNumber(usage.promptTokenCount);
|
|
1909
|
+
const cachedTokens = toMaybeNumber(
|
|
1910
|
+
usage.cachedContentTokenCount
|
|
1911
|
+
);
|
|
1912
|
+
const responseTokens = toMaybeNumber(
|
|
1913
|
+
usage.candidatesTokenCount ?? usage.responseTokenCount
|
|
1914
|
+
);
|
|
1915
|
+
const thinkingTokens = toMaybeNumber(
|
|
1916
|
+
usage.thoughtsTokenCount
|
|
1917
|
+
);
|
|
1918
|
+
const totalTokens = toMaybeNumber(usage.totalTokenCount);
|
|
1919
|
+
const toolUsePromptTokens = toMaybeNumber(
|
|
1920
|
+
usage.toolUsePromptTokenCount
|
|
1921
|
+
);
|
|
1922
|
+
const responseDetails = usage.candidatesTokensDetails ?? usage.responseTokensDetails;
|
|
1923
|
+
const responseImageTokens = sumModalityTokenCounts(responseDetails, "IMAGE");
|
|
1924
|
+
if (promptTokens === void 0 && cachedTokens === void 0 && responseTokens === void 0 && responseImageTokens === 0 && thinkingTokens === void 0 && totalTokens === void 0 && toolUsePromptTokens === void 0) {
|
|
1925
|
+
return void 0;
|
|
1926
|
+
}
|
|
1927
|
+
return {
|
|
1928
|
+
promptTokens,
|
|
1929
|
+
cachedTokens,
|
|
1930
|
+
responseTokens,
|
|
1931
|
+
responseImageTokens: responseImageTokens > 0 ? responseImageTokens : void 0,
|
|
1932
|
+
thinkingTokens,
|
|
1933
|
+
totalTokens,
|
|
1934
|
+
toolUsePromptTokens
|
|
1935
|
+
};
|
|
1936
|
+
}
|
|
1937
|
+
function extractOpenAiUsageTokens(usage) {
|
|
1938
|
+
if (!usage || typeof usage !== "object") {
|
|
1939
|
+
return void 0;
|
|
1940
|
+
}
|
|
1941
|
+
const promptTokens = toMaybeNumber(usage.input_tokens);
|
|
1942
|
+
const cachedTokens = toMaybeNumber(
|
|
1943
|
+
usage.input_tokens_details?.cached_tokens
|
|
1944
|
+
);
|
|
1945
|
+
const outputTokensRaw = toMaybeNumber(usage.output_tokens);
|
|
1946
|
+
const reasoningTokens = toMaybeNumber(
|
|
1947
|
+
usage.output_tokens_details?.reasoning_tokens
|
|
1948
|
+
);
|
|
1949
|
+
const totalTokens = toMaybeNumber(usage.total_tokens);
|
|
1950
|
+
let responseTokens;
|
|
1951
|
+
if (outputTokensRaw !== void 0) {
|
|
1952
|
+
const adjusted = outputTokensRaw - (reasoningTokens ?? 0);
|
|
1953
|
+
responseTokens = adjusted >= 0 ? adjusted : 0;
|
|
1954
|
+
}
|
|
1955
|
+
if (promptTokens === void 0 && cachedTokens === void 0 && responseTokens === void 0 && reasoningTokens === void 0 && totalTokens === void 0) {
|
|
1956
|
+
return void 0;
|
|
1957
|
+
}
|
|
1958
|
+
return {
|
|
1959
|
+
promptTokens,
|
|
1960
|
+
cachedTokens,
|
|
1961
|
+
responseTokens,
|
|
1962
|
+
thinkingTokens: reasoningTokens,
|
|
1963
|
+
totalTokens
|
|
1964
|
+
};
|
|
1965
|
+
}
|
|
1966
|
+
function extractChatGptUsageTokens(usage) {
|
|
1967
|
+
if (!usage || typeof usage !== "object") {
|
|
1968
|
+
return void 0;
|
|
1969
|
+
}
|
|
1970
|
+
const promptTokens = toMaybeNumber(usage.input_tokens);
|
|
1971
|
+
const cachedTokens = toMaybeNumber(
|
|
1972
|
+
usage.input_tokens_details?.cached_tokens
|
|
1973
|
+
);
|
|
1974
|
+
const outputTokensRaw = toMaybeNumber(usage.output_tokens);
|
|
1975
|
+
const reasoningTokens = toMaybeNumber(
|
|
1976
|
+
usage.output_tokens_details?.reasoning_tokens
|
|
1977
|
+
);
|
|
1978
|
+
const totalTokens = toMaybeNumber(usage.total_tokens);
|
|
1979
|
+
let responseTokens;
|
|
1980
|
+
if (outputTokensRaw !== void 0) {
|
|
1981
|
+
const adjusted = outputTokensRaw - (reasoningTokens ?? 0);
|
|
1982
|
+
responseTokens = adjusted >= 0 ? adjusted : 0;
|
|
1983
|
+
}
|
|
1984
|
+
if (promptTokens === void 0 && cachedTokens === void 0 && responseTokens === void 0 && reasoningTokens === void 0 && totalTokens === void 0) {
|
|
1985
|
+
return void 0;
|
|
1986
|
+
}
|
|
1987
|
+
return {
|
|
1988
|
+
promptTokens,
|
|
1989
|
+
cachedTokens,
|
|
1990
|
+
responseTokens,
|
|
1991
|
+
thinkingTokens: reasoningTokens,
|
|
1992
|
+
totalTokens
|
|
1993
|
+
};
|
|
1994
|
+
}
|
|
1995
|
+
var MODERATION_FINISH_REASONS = /* @__PURE__ */ new Set([
|
|
1996
|
+
FinishReason.SAFETY,
|
|
1997
|
+
FinishReason.BLOCKLIST,
|
|
1998
|
+
FinishReason.PROHIBITED_CONTENT,
|
|
1999
|
+
FinishReason.SPII
|
|
2000
|
+
]);
|
|
2001
|
+
function isModerationFinish(reason) {
|
|
2002
|
+
if (!reason) {
|
|
2003
|
+
return false;
|
|
2004
|
+
}
|
|
2005
|
+
return MODERATION_FINISH_REASONS.has(reason);
|
|
2006
|
+
}
|
|
2007
|
+
function mergeToolOutput(value) {
|
|
2008
|
+
if (typeof value === "string") {
|
|
2009
|
+
return value;
|
|
2010
|
+
}
|
|
2011
|
+
try {
|
|
2012
|
+
return JSON.stringify(value);
|
|
2013
|
+
} catch (error) {
|
|
2014
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
2015
|
+
return JSON.stringify({ error: "Failed to serialize tool output", detail: message });
|
|
2016
|
+
}
|
|
2017
|
+
}
|
|
2018
|
+
function parseOpenAiToolArguments(raw) {
|
|
2019
|
+
const trimmed = raw.trim();
|
|
2020
|
+
if (trimmed.length === 0) {
|
|
2021
|
+
return { value: {} };
|
|
2022
|
+
}
|
|
2023
|
+
try {
|
|
2024
|
+
return { value: JSON.parse(trimmed) };
|
|
2025
|
+
} catch (error) {
|
|
2026
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
2027
|
+
return { value: raw, error: message };
|
|
2028
|
+
}
|
|
2029
|
+
}
|
|
2030
|
+
function formatZodIssues(issues) {
|
|
2031
|
+
const messages = [];
|
|
2032
|
+
for (const issue of issues) {
|
|
2033
|
+
const path2 = issue.path.length > 0 ? issue.path.map(String).join(".") : "input";
|
|
2034
|
+
messages.push(`${path2}: ${issue.message}`);
|
|
2035
|
+
}
|
|
2036
|
+
return messages.join("; ");
|
|
2037
|
+
}
|
|
2038
|
+
function buildToolErrorOutput(message, issues) {
|
|
2039
|
+
const output = { error: message };
|
|
2040
|
+
if (issues && issues.length > 0) {
|
|
2041
|
+
output.issues = issues.map((issue) => ({
|
|
2042
|
+
path: issue.path.map(String),
|
|
2043
|
+
message: issue.message,
|
|
2044
|
+
code: issue.code
|
|
2045
|
+
}));
|
|
2046
|
+
}
|
|
2047
|
+
return output;
|
|
2048
|
+
}
|
|
2049
|
+
async function executeToolCall(params) {
|
|
2050
|
+
const { toolName, tool: tool2, rawInput, parseError } = params;
|
|
2051
|
+
if (!tool2) {
|
|
2052
|
+
const message = `Unknown tool: ${toolName}`;
|
|
2053
|
+
return {
|
|
2054
|
+
result: { toolName, input: rawInput, output: { error: message }, error: message },
|
|
2055
|
+
outputPayload: buildToolErrorOutput(message)
|
|
2056
|
+
};
|
|
2057
|
+
}
|
|
2058
|
+
if (parseError) {
|
|
2059
|
+
const message = `Invalid JSON for tool ${toolName}: ${parseError}`;
|
|
2060
|
+
return {
|
|
2061
|
+
result: { toolName, input: rawInput, output: { error: message }, error: message },
|
|
2062
|
+
outputPayload: buildToolErrorOutput(message)
|
|
2063
|
+
};
|
|
2064
|
+
}
|
|
2065
|
+
const parsed = tool2.inputSchema.safeParse(rawInput);
|
|
2066
|
+
if (!parsed.success) {
|
|
2067
|
+
const message = `Invalid tool arguments for ${toolName}: ${formatZodIssues(parsed.error.issues)}`;
|
|
2068
|
+
const outputPayload = buildToolErrorOutput(message, parsed.error.issues);
|
|
2069
|
+
return {
|
|
2070
|
+
result: { toolName, input: rawInput, output: outputPayload, error: message },
|
|
2071
|
+
outputPayload
|
|
2072
|
+
};
|
|
2073
|
+
}
|
|
2074
|
+
try {
|
|
2075
|
+
const output = await tool2.execute(parsed.data);
|
|
2076
|
+
return {
|
|
2077
|
+
result: { toolName, input: parsed.data, output },
|
|
2078
|
+
outputPayload: output
|
|
2079
|
+
};
|
|
2080
|
+
} catch (error) {
|
|
2081
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
2082
|
+
const outputPayload = buildToolErrorOutput(`Tool ${toolName} failed: ${message}`);
|
|
2083
|
+
return {
|
|
2084
|
+
result: { toolName, input: parsed.data, output: outputPayload, error: message },
|
|
2085
|
+
outputPayload
|
|
2086
|
+
};
|
|
2087
|
+
}
|
|
2088
|
+
}
|
|
2089
|
+
function buildToolLogId(turn, toolIndex) {
|
|
2090
|
+
return `turn${turn.toString()}/tool${toolIndex.toString()}`;
|
|
2091
|
+
}
|
|
2092
|
+
function sanitizeChatGptToolId(value) {
|
|
2093
|
+
const cleaned = value.replace(/[^A-Za-z0-9_-]/gu, "");
|
|
2094
|
+
if (cleaned.length === 0) {
|
|
2095
|
+
return randomBytes(8).toString("hex");
|
|
2096
|
+
}
|
|
2097
|
+
return cleaned.slice(0, 64);
|
|
2098
|
+
}
|
|
2099
|
+
function normalizeChatGptToolIds(params) {
|
|
2100
|
+
let rawCallId = params.callId ?? "";
|
|
2101
|
+
let rawItemId = params.itemId ?? "";
|
|
2102
|
+
if (rawCallId.includes("|")) {
|
|
2103
|
+
const [nextCallId, nextItemId] = rawCallId.split("|");
|
|
2104
|
+
rawCallId = nextCallId ?? rawCallId;
|
|
2105
|
+
if (nextItemId) {
|
|
2106
|
+
rawItemId = nextItemId;
|
|
2107
|
+
}
|
|
2108
|
+
} else if (rawItemId.includes("|")) {
|
|
2109
|
+
const [nextCallId, nextItemId] = rawItemId.split("|");
|
|
2110
|
+
rawCallId = nextCallId ?? rawCallId;
|
|
2111
|
+
rawItemId = nextItemId ?? rawItemId;
|
|
2112
|
+
}
|
|
2113
|
+
const callValue = sanitizeChatGptToolId(rawCallId || rawItemId || randomBytes(8).toString("hex"));
|
|
2114
|
+
let itemValue = sanitizeChatGptToolId(rawItemId || `fc-${callValue}`);
|
|
2115
|
+
if (!itemValue.startsWith("fc")) {
|
|
2116
|
+
itemValue = `fc-${itemValue}`;
|
|
2117
|
+
}
|
|
2118
|
+
return { callId: callValue, itemId: itemValue };
|
|
2119
|
+
}
|
|
2120
|
+
function extractOpenAiResponseParts(response) {
|
|
2121
|
+
const parts = [];
|
|
2122
|
+
let blocked = false;
|
|
2123
|
+
const output = response.output;
|
|
2124
|
+
if (Array.isArray(output)) {
|
|
2125
|
+
for (const item of output) {
|
|
2126
|
+
if (!item || typeof item !== "object") {
|
|
2127
|
+
continue;
|
|
2128
|
+
}
|
|
2129
|
+
const itemType = item.type;
|
|
2130
|
+
if (itemType === "message") {
|
|
2131
|
+
const content = item.content;
|
|
2132
|
+
if (Array.isArray(content)) {
|
|
2133
|
+
for (const entry of content) {
|
|
2134
|
+
if (!entry || typeof entry !== "object") {
|
|
2135
|
+
continue;
|
|
2136
|
+
}
|
|
2137
|
+
const entryType = entry.type;
|
|
2138
|
+
if (entryType === "output_text") {
|
|
2139
|
+
const text = entry.text;
|
|
2140
|
+
if (typeof text === "string" && text.length > 0) {
|
|
2141
|
+
parts.push({ type: "text", text });
|
|
2142
|
+
}
|
|
2143
|
+
} else if (entryType === "refusal") {
|
|
2144
|
+
blocked = true;
|
|
2145
|
+
}
|
|
2146
|
+
}
|
|
2147
|
+
}
|
|
2148
|
+
} else if (itemType === "reasoning") {
|
|
2149
|
+
const content = item.content;
|
|
2150
|
+
if (Array.isArray(content)) {
|
|
2151
|
+
for (const entry of content) {
|
|
2152
|
+
if (!entry || typeof entry !== "object") {
|
|
2153
|
+
continue;
|
|
2154
|
+
}
|
|
2155
|
+
const entryType = entry.type;
|
|
2156
|
+
if (entryType === "reasoning_summary_text" || entryType === "reasoning_summary") {
|
|
2157
|
+
const entryText = typeof entry.text === "string" ? entry.text : typeof entry.summary === "string" ? entry.summary : void 0;
|
|
2158
|
+
if (typeof entryText === "string" && entryText.length > 0) {
|
|
2159
|
+
parts.push({ type: "text", text: entryText, thought: true });
|
|
2160
|
+
}
|
|
2161
|
+
}
|
|
2162
|
+
}
|
|
2163
|
+
}
|
|
2164
|
+
} else if (itemType === "function_call" || itemType === "tool_call" || itemType === "custom_tool_call") {
|
|
2165
|
+
const serialized = JSON.stringify(item, null, 2);
|
|
2166
|
+
if (serialized.length > 0) {
|
|
2167
|
+
parts.push({ type: "text", text: `[tool-call]\\n${serialized}\\n` });
|
|
2168
|
+
}
|
|
2169
|
+
}
|
|
2170
|
+
}
|
|
2171
|
+
}
|
|
2172
|
+
if (parts.length === 0) {
|
|
2173
|
+
const outputText = response.output_text;
|
|
2174
|
+
if (typeof outputText === "string" && outputText.length > 0) {
|
|
2175
|
+
parts.push({ type: "text", text: outputText });
|
|
2176
|
+
}
|
|
2177
|
+
}
|
|
2178
|
+
return { parts, blocked };
|
|
2179
|
+
}
|
|
2180
|
+
function extractOpenAiFunctionCalls(output) {
|
|
2181
|
+
const calls = [];
|
|
2182
|
+
if (!Array.isArray(output)) {
|
|
2183
|
+
return calls;
|
|
2184
|
+
}
|
|
2185
|
+
for (const item of output) {
|
|
2186
|
+
if (!item || typeof item !== "object") {
|
|
2187
|
+
continue;
|
|
2188
|
+
}
|
|
2189
|
+
if (item.type === "function_call") {
|
|
2190
|
+
const name = typeof item.name === "string" ? item.name : "";
|
|
2191
|
+
const args = typeof item.arguments === "string" ? item.arguments : "";
|
|
2192
|
+
const call_id = typeof item.call_id === "string" ? item.call_id : "";
|
|
2193
|
+
const id = typeof item.id === "string" ? item.id : void 0;
|
|
2194
|
+
if (name && call_id) {
|
|
2195
|
+
calls.push({ name, arguments: args, call_id, id });
|
|
2196
|
+
}
|
|
2197
|
+
}
|
|
2198
|
+
}
|
|
2199
|
+
return calls;
|
|
2200
|
+
}
|
|
2201
|
+
function resolveGeminiThinkingConfig(modelId) {
|
|
2202
|
+
switch (modelId) {
|
|
2203
|
+
case "gemini-3-pro-preview":
|
|
2204
|
+
return { includeThoughts: true };
|
|
2205
|
+
case "gemini-2.5-pro":
|
|
2206
|
+
return { includeThoughts: true, thinkingBudget: 32768 };
|
|
2207
|
+
case "gemini-flash-latest":
|
|
2208
|
+
case "gemini-flash-lite-latest":
|
|
2209
|
+
return { includeThoughts: true, thinkingBudget: 24576 };
|
|
2210
|
+
default:
|
|
2211
|
+
return { includeThoughts: true };
|
|
2212
|
+
}
|
|
2213
|
+
}
|
|
2214
|
+
function decodeInlineDataBuffer(base64) {
|
|
2215
|
+
try {
|
|
2216
|
+
return Buffer2.from(base64, "base64");
|
|
2217
|
+
} catch {
|
|
2218
|
+
return Buffer2.from(base64, "base64url");
|
|
2219
|
+
}
|
|
2220
|
+
}
|
|
2221
|
+
function extractImages(content) {
|
|
2222
|
+
if (!content) {
|
|
2223
|
+
return [];
|
|
2224
|
+
}
|
|
2225
|
+
const images = [];
|
|
2226
|
+
for (const part of content.parts) {
|
|
2227
|
+
if (part.type !== "inlineData") {
|
|
2228
|
+
continue;
|
|
2229
|
+
}
|
|
2230
|
+
const buffer = decodeInlineDataBuffer(part.data);
|
|
2231
|
+
images.push({ mimeType: part.mimeType, data: buffer });
|
|
2232
|
+
}
|
|
2233
|
+
return images;
|
|
2234
|
+
}
|
|
2235
|
+
async function runTextCall(params) {
|
|
2236
|
+
const { request, queue, abortController } = params;
|
|
2237
|
+
const providerInfo = resolveProvider(request.model);
|
|
2238
|
+
const provider = providerInfo.provider;
|
|
2239
|
+
const modelForProvider = providerInfo.model;
|
|
2240
|
+
const contents = resolveTextContents(request);
|
|
2241
|
+
if (contents.length === 0) {
|
|
2242
|
+
throw new Error("LLM call received an empty prompt.");
|
|
2243
|
+
}
|
|
2244
|
+
let modelVersion = request.model;
|
|
2245
|
+
let blocked = false;
|
|
2246
|
+
let grounding;
|
|
2247
|
+
const responseParts = [];
|
|
2248
|
+
let responseRole;
|
|
2249
|
+
let latestUsage;
|
|
2250
|
+
let responseImages = 0;
|
|
2251
|
+
const pushDelta = (channel, text2) => {
|
|
2252
|
+
if (!text2) {
|
|
2253
|
+
return;
|
|
2254
|
+
}
|
|
2255
|
+
responseParts.push({ type: "text", text: text2, ...channel === "thought" ? { thought: true } : {} });
|
|
2256
|
+
queue.push({ type: "delta", channel, text: text2 });
|
|
2257
|
+
};
|
|
2258
|
+
const pushInline = (data, mimeType) => {
|
|
2259
|
+
if (!data) {
|
|
2260
|
+
return;
|
|
2261
|
+
}
|
|
2262
|
+
responseParts.push({ type: "inlineData", data, mimeType });
|
|
2263
|
+
if (isInlineImageMime(mimeType)) {
|
|
2264
|
+
responseImages += 1;
|
|
2265
|
+
}
|
|
2266
|
+
};
|
|
2267
|
+
const resolveAbortSignal = () => {
|
|
2268
|
+
if (!request.signal) {
|
|
2269
|
+
return abortController.signal;
|
|
2270
|
+
}
|
|
2271
|
+
if (request.signal.aborted) {
|
|
2272
|
+
abortController.abort(request.signal.reason);
|
|
2273
|
+
} else {
|
|
2274
|
+
request.signal.addEventListener(
|
|
2275
|
+
"abort",
|
|
2276
|
+
() => abortController.abort(request.signal?.reason),
|
|
2277
|
+
{ once: true }
|
|
2278
|
+
);
|
|
2279
|
+
}
|
|
2280
|
+
return abortController.signal;
|
|
2281
|
+
};
|
|
2282
|
+
const signal = resolveAbortSignal();
|
|
2283
|
+
if (provider === "openai") {
|
|
2284
|
+
const openAiInput = toOpenAiInput(contents);
|
|
2285
|
+
const openAiTools = toOpenAiTools(request.tools);
|
|
2286
|
+
const reasoningEffort = resolveOpenAiReasoningEffort(
|
|
2287
|
+
modelForProvider,
|
|
2288
|
+
request.openAiReasoningEffort
|
|
2289
|
+
);
|
|
2290
|
+
const openAiTextConfig = {
|
|
2291
|
+
format: request.openAiTextFormat ?? { type: "text" },
|
|
2292
|
+
verbosity: resolveOpenAiVerbosity(modelForProvider)
|
|
2293
|
+
};
|
|
2294
|
+
const reasoning = {
|
|
2295
|
+
effort: toOpenAiReasoningEffort(reasoningEffort),
|
|
2296
|
+
summary: "detailed"
|
|
2297
|
+
};
|
|
2298
|
+
await runOpenAiCall(async (client) => {
|
|
2299
|
+
const stream = client.responses.stream(
|
|
2300
|
+
{
|
|
2301
|
+
model: modelForProvider,
|
|
2302
|
+
input: openAiInput,
|
|
2303
|
+
reasoning,
|
|
2304
|
+
text: openAiTextConfig,
|
|
2305
|
+
...openAiTools ? { tools: openAiTools } : {},
|
|
2306
|
+
include: ["code_interpreter_call.outputs", "reasoning.encrypted_content"]
|
|
2307
|
+
},
|
|
2308
|
+
{ signal }
|
|
2309
|
+
);
|
|
2310
|
+
for await (const event of stream) {
|
|
2311
|
+
switch (event.type) {
|
|
2312
|
+
case "response.output_text.delta": {
|
|
2313
|
+
const delta = event.delta ?? "";
|
|
2314
|
+
pushDelta("response", typeof delta === "string" ? delta : "");
|
|
2315
|
+
break;
|
|
2316
|
+
}
|
|
2317
|
+
case "response.reasoning_summary_text.delta": {
|
|
2318
|
+
const delta = event.delta ?? "";
|
|
2319
|
+
pushDelta("thought", typeof delta === "string" ? delta : "");
|
|
2320
|
+
break;
|
|
2321
|
+
}
|
|
2322
|
+
case "response.refusal.delta": {
|
|
2323
|
+
blocked = true;
|
|
2324
|
+
queue.push({ type: "blocked" });
|
|
2325
|
+
break;
|
|
2326
|
+
}
|
|
2327
|
+
default:
|
|
2328
|
+
break;
|
|
2329
|
+
}
|
|
2330
|
+
}
|
|
2331
|
+
const finalResponse = await stream.finalResponse();
|
|
2332
|
+
modelVersion = typeof finalResponse.model === "string" ? finalResponse.model : request.model;
|
|
2333
|
+
queue.push({ type: "model", modelVersion });
|
|
2334
|
+
if (finalResponse.error) {
|
|
2335
|
+
const message = typeof finalResponse.error.message === "string" ? finalResponse.error.message : "OpenAI response failed";
|
|
2336
|
+
throw new Error(message);
|
|
2337
|
+
}
|
|
2338
|
+
if (finalResponse.status && finalResponse.status !== "completed" && finalResponse.status !== "in_progress") {
|
|
2339
|
+
const detail = finalResponse.incomplete_details?.reason;
|
|
2340
|
+
throw new Error(
|
|
2341
|
+
`OpenAI response status ${finalResponse.status}${detail ? ` (${detail})` : ""}`
|
|
2342
|
+
);
|
|
2343
|
+
}
|
|
2344
|
+
latestUsage = extractOpenAiUsageTokens(finalResponse.usage);
|
|
2345
|
+
if (responseParts.length === 0) {
|
|
2346
|
+
const fallback = extractOpenAiResponseParts(finalResponse);
|
|
2347
|
+
blocked = blocked || fallback.blocked;
|
|
2348
|
+
for (const part of fallback.parts) {
|
|
2349
|
+
if (part.type === "text") {
|
|
2350
|
+
pushDelta(part.thought === true ? "thought" : "response", part.text);
|
|
2351
|
+
} else {
|
|
2352
|
+
pushInline(part.data, part.mimeType);
|
|
2353
|
+
}
|
|
2354
|
+
}
|
|
2355
|
+
}
|
|
2356
|
+
});
|
|
2357
|
+
} else if (provider === "chatgpt") {
|
|
2358
|
+
const chatGptInput = toChatGptInput(contents);
|
|
2359
|
+
const reasoningEffort = resolveOpenAiReasoningEffort(
|
|
2360
|
+
request.model,
|
|
2361
|
+
request.openAiReasoningEffort
|
|
2362
|
+
);
|
|
2363
|
+
const openAiTools = toOpenAiTools(request.tools);
|
|
2364
|
+
const requestPayload = {
|
|
2365
|
+
model: modelForProvider,
|
|
2366
|
+
store: false,
|
|
2367
|
+
stream: true,
|
|
2368
|
+
instructions: chatGptInput.instructions ?? "You are a helpful assistant.",
|
|
2369
|
+
input: chatGptInput.input,
|
|
2370
|
+
include: ["reasoning.encrypted_content"],
|
|
2371
|
+
reasoning: { effort: toOpenAiReasoningEffort(reasoningEffort), summary: "detailed" },
|
|
2372
|
+
text: {
|
|
2373
|
+
format: request.openAiTextFormat ?? { type: "text" },
|
|
2374
|
+
verbosity: resolveOpenAiVerbosity(request.model)
|
|
2375
|
+
},
|
|
2376
|
+
...openAiTools ? { tools: openAiTools } : {}
|
|
2377
|
+
};
|
|
2378
|
+
let sawResponseDelta = false;
|
|
2379
|
+
let sawThoughtDelta = false;
|
|
2380
|
+
const result = await collectChatGptCodexResponse({
|
|
2381
|
+
request: requestPayload,
|
|
2382
|
+
signal,
|
|
2383
|
+
onDelta: (delta) => {
|
|
2384
|
+
if (delta.thoughtDelta) {
|
|
2385
|
+
sawThoughtDelta = true;
|
|
2386
|
+
pushDelta("thought", delta.thoughtDelta);
|
|
2387
|
+
}
|
|
2388
|
+
if (delta.textDelta) {
|
|
2389
|
+
sawResponseDelta = true;
|
|
2390
|
+
pushDelta("response", delta.textDelta);
|
|
2391
|
+
}
|
|
2392
|
+
}
|
|
2393
|
+
});
|
|
2394
|
+
blocked = blocked || result.blocked;
|
|
2395
|
+
if (blocked) {
|
|
2396
|
+
queue.push({ type: "blocked" });
|
|
2397
|
+
}
|
|
2398
|
+
if (result.model) {
|
|
2399
|
+
modelVersion = `chatgpt-${result.model}`;
|
|
2400
|
+
queue.push({ type: "model", modelVersion });
|
|
2401
|
+
}
|
|
2402
|
+
latestUsage = extractChatGptUsageTokens(result.usage);
|
|
2403
|
+
const fallbackText = typeof result.text === "string" ? result.text : "";
|
|
2404
|
+
const fallbackThoughts = typeof result.reasoningSummaryText === "string" && result.reasoningSummaryText.length > 0 ? result.reasoningSummaryText : typeof result.reasoningText === "string" ? result.reasoningText : "";
|
|
2405
|
+
if (!sawThoughtDelta && fallbackThoughts.length > 0) {
|
|
2406
|
+
pushDelta("thought", fallbackThoughts);
|
|
2407
|
+
}
|
|
2408
|
+
if (!sawResponseDelta && fallbackText.length > 0) {
|
|
2409
|
+
pushDelta("response", fallbackText);
|
|
2410
|
+
}
|
|
2411
|
+
} else {
|
|
2412
|
+
const geminiContents = contents.map(convertLlmContentToGeminiContent);
|
|
2413
|
+
const config = {
|
|
2414
|
+
maxOutputTokens: 32e3,
|
|
2415
|
+
thinkingConfig: resolveGeminiThinkingConfig(modelForProvider),
|
|
2416
|
+
...request.responseMimeType ? { responseMimeType: request.responseMimeType } : {},
|
|
2417
|
+
...request.responseJsonSchema ? { responseJsonSchema: request.responseJsonSchema } : {},
|
|
2418
|
+
...request.responseModalities ? { responseModalities: Array.from(request.responseModalities) } : {},
|
|
2419
|
+
...request.imageAspectRatio || request.imageSize ? {
|
|
2420
|
+
imageConfig: {
|
|
2421
|
+
...request.imageAspectRatio ? { aspectRatio: request.imageAspectRatio } : {},
|
|
2422
|
+
...request.imageSize ? { imageSize: request.imageSize } : {}
|
|
2423
|
+
}
|
|
2424
|
+
} : {}
|
|
2425
|
+
};
|
|
2426
|
+
const geminiTools = toGeminiTools(request.tools);
|
|
2427
|
+
if (geminiTools) {
|
|
2428
|
+
config.tools = geminiTools;
|
|
2429
|
+
}
|
|
2430
|
+
await runGeminiCall(async (client) => {
|
|
2431
|
+
const stream = await client.models.generateContentStream({
|
|
2432
|
+
model: modelForProvider,
|
|
2433
|
+
contents: geminiContents,
|
|
2434
|
+
config
|
|
2435
|
+
});
|
|
2436
|
+
let latestGrounding;
|
|
2437
|
+
for await (const chunk of stream) {
|
|
2438
|
+
if (chunk.modelVersion) {
|
|
2439
|
+
modelVersion = chunk.modelVersion;
|
|
2440
|
+
queue.push({ type: "model", modelVersion });
|
|
2441
|
+
}
|
|
2442
|
+
if (chunk.promptFeedback?.blockReason) {
|
|
2443
|
+
blocked = true;
|
|
2444
|
+
queue.push({ type: "blocked" });
|
|
2445
|
+
}
|
|
2446
|
+
latestUsage = mergeTokenUpdates(latestUsage, extractGeminiUsageTokens(chunk.usageMetadata));
|
|
2447
|
+
const candidates = chunk.candidates;
|
|
2448
|
+
if (!candidates || candidates.length === 0) {
|
|
2449
|
+
continue;
|
|
2450
|
+
}
|
|
2451
|
+
const primary = candidates[0];
|
|
2452
|
+
if (primary && isModerationFinish(primary.finishReason)) {
|
|
2453
|
+
blocked = true;
|
|
2454
|
+
queue.push({ type: "blocked" });
|
|
2455
|
+
}
|
|
2456
|
+
for (const candidate of candidates) {
|
|
2457
|
+
const candidateContent = candidate.content;
|
|
2458
|
+
if (!candidateContent) {
|
|
2459
|
+
continue;
|
|
2460
|
+
}
|
|
2461
|
+
if (candidate.groundingMetadata) {
|
|
2462
|
+
latestGrounding = candidate.groundingMetadata;
|
|
2463
|
+
}
|
|
2464
|
+
const content2 = convertGeminiContentToLlmContent(candidateContent);
|
|
2465
|
+
if (!responseRole) {
|
|
2466
|
+
responseRole = content2.role;
|
|
2467
|
+
}
|
|
2468
|
+
for (const part of content2.parts) {
|
|
2469
|
+
if (part.type === "text") {
|
|
2470
|
+
pushDelta(part.thought === true ? "thought" : "response", part.text);
|
|
2471
|
+
} else {
|
|
2472
|
+
pushInline(part.data, part.mimeType);
|
|
2473
|
+
}
|
|
2474
|
+
}
|
|
2475
|
+
}
|
|
2476
|
+
}
|
|
2477
|
+
grounding = latestGrounding;
|
|
2478
|
+
});
|
|
2479
|
+
}
|
|
2480
|
+
const mergedParts = mergeConsecutiveTextParts(responseParts);
|
|
2481
|
+
const content = mergedParts.length > 0 ? { role: responseRole ?? "model", parts: mergedParts } : void 0;
|
|
2482
|
+
const { text, thoughts } = extractTextByChannel(content);
|
|
2483
|
+
const costUsd = estimateCallCostUsd({
|
|
2484
|
+
modelId: modelVersion,
|
|
2485
|
+
tokens: latestUsage,
|
|
2486
|
+
responseImages,
|
|
2487
|
+
imageSize: request.imageSize
|
|
2488
|
+
});
|
|
2489
|
+
if (latestUsage) {
|
|
2490
|
+
queue.push({ type: "usage", usage: latestUsage, costUsd, modelVersion });
|
|
2491
|
+
}
|
|
2492
|
+
return {
|
|
2493
|
+
provider,
|
|
2494
|
+
model: request.model,
|
|
2495
|
+
modelVersion,
|
|
2496
|
+
content,
|
|
2497
|
+
text,
|
|
2498
|
+
thoughts,
|
|
2499
|
+
blocked,
|
|
2500
|
+
usage: latestUsage,
|
|
2501
|
+
costUsd,
|
|
2502
|
+
grounding
|
|
2503
|
+
};
|
|
2504
|
+
}
|
|
2505
|
+
function streamText(request) {
|
|
2506
|
+
const queue = createAsyncQueue();
|
|
2507
|
+
const abortController = new AbortController();
|
|
2508
|
+
const result = (async () => {
|
|
2509
|
+
try {
|
|
2510
|
+
const output = await runTextCall({ request, queue, abortController });
|
|
2511
|
+
queue.close();
|
|
2512
|
+
return output;
|
|
2513
|
+
} catch (error) {
|
|
2514
|
+
const err = error instanceof Error ? error : new Error(String(error));
|
|
2515
|
+
queue.fail(err);
|
|
2516
|
+
throw err;
|
|
2517
|
+
}
|
|
2518
|
+
})();
|
|
2519
|
+
return {
|
|
2520
|
+
events: queue.iterable,
|
|
2521
|
+
result,
|
|
2522
|
+
abort: () => abortController.abort()
|
|
2523
|
+
};
|
|
2524
|
+
}
|
|
2525
|
+
async function generateText(request) {
|
|
2526
|
+
const call = streamText(request);
|
|
2527
|
+
for await (const _event of call.events) {
|
|
2528
|
+
}
|
|
2529
|
+
return await call.result;
|
|
2530
|
+
}
|
|
2531
|
+
async function generateJson(request) {
|
|
2532
|
+
const maxAttempts = Math.max(1, Math.floor(request.maxAttempts ?? 2));
|
|
2533
|
+
const schemaName = (request.openAiSchemaName ?? "llm-response").trim() || "llm-response";
|
|
2534
|
+
const providerInfo = resolveProvider(request.model);
|
|
2535
|
+
const isOpenAiVariant = providerInfo.provider === "openai" || providerInfo.provider === "chatgpt";
|
|
2536
|
+
const baseJsonSchema = zodToJsonSchema(request.schema, {
|
|
2537
|
+
name: schemaName,
|
|
2538
|
+
target: isOpenAiVariant ? "openAi" : "jsonSchema7"
|
|
2539
|
+
});
|
|
2540
|
+
const responseJsonSchema = isOpenAiVariant ? resolveOpenAiSchemaRoot(baseJsonSchema) : addGeminiPropertyOrdering(baseJsonSchema);
|
|
2541
|
+
if (isOpenAiVariant && !isJsonSchemaObject(responseJsonSchema)) {
|
|
2542
|
+
throw new Error("OpenAI structured outputs require a JSON object schema at the root.");
|
|
2543
|
+
}
|
|
2544
|
+
const openAiTextFormat = providerInfo.provider === "openai" ? {
|
|
2545
|
+
type: "json_schema",
|
|
2546
|
+
name: schemaName,
|
|
2547
|
+
strict: true,
|
|
2548
|
+
schema: normalizeOpenAiSchema(responseJsonSchema)
|
|
2549
|
+
} : void 0;
|
|
2550
|
+
const failures = [];
|
|
2551
|
+
for (let attempt = 1; attempt <= maxAttempts; attempt += 1) {
|
|
2552
|
+
let rawText = "";
|
|
2553
|
+
try {
|
|
2554
|
+
const contents = resolveTextContents(request);
|
|
2555
|
+
const call = streamText({
|
|
2556
|
+
model: request.model,
|
|
2557
|
+
contents,
|
|
2558
|
+
tools: request.tools,
|
|
2559
|
+
responseMimeType: request.responseMimeType ?? "application/json",
|
|
2560
|
+
responseJsonSchema,
|
|
2561
|
+
openAiReasoningEffort: request.openAiReasoningEffort,
|
|
2562
|
+
...openAiTextFormat ? { openAiTextFormat } : {},
|
|
2563
|
+
signal: request.signal
|
|
2564
|
+
});
|
|
2565
|
+
for await (const event of call.events) {
|
|
2566
|
+
if (event.type === "delta" && event.channel === "response") {
|
|
2567
|
+
rawText += event.text;
|
|
2568
|
+
}
|
|
2569
|
+
}
|
|
2570
|
+
const result = await call.result;
|
|
2571
|
+
rawText = rawText || result.text;
|
|
2572
|
+
const cleanedText = normalizeJsonText(rawText);
|
|
2573
|
+
const repairedText = escapeNewlinesInStrings(cleanedText);
|
|
2574
|
+
const payload = JSON.parse(repairedText);
|
|
2575
|
+
const normalized = typeof request.normalizeJson === "function" ? request.normalizeJson(payload) : payload;
|
|
2576
|
+
const parsed = request.schema.parse(normalized);
|
|
2577
|
+
return { value: parsed, rawText, result };
|
|
2578
|
+
} catch (error) {
|
|
2579
|
+
const handled = error instanceof Error ? error : new Error(String(error));
|
|
2580
|
+
failures.push({ attempt, rawText, error: handled });
|
|
2581
|
+
if (attempt >= maxAttempts) {
|
|
2582
|
+
throw new LlmJsonCallError(`LLM JSON call failed after ${attempt} attempt(s)`, failures);
|
|
2583
|
+
}
|
|
2584
|
+
}
|
|
2585
|
+
}
|
|
2586
|
+
throw new LlmJsonCallError("LLM JSON call failed", failures);
|
|
2587
|
+
}
|
|
2588
|
+
var DEFAULT_TOOL_LOOP_MAX_STEPS = 8;
|
|
2589
|
+
function resolveToolLoopContents(input) {
|
|
2590
|
+
return resolveTextContents(input);
|
|
2591
|
+
}
|
|
2592
|
+
function buildOpenAiFunctionTools(tools) {
|
|
2593
|
+
const toolEntries = Object.entries(tools);
|
|
2594
|
+
return toolEntries.map(([name, toolDef]) => ({
|
|
2595
|
+
type: "function",
|
|
2596
|
+
name,
|
|
2597
|
+
description: toolDef.description ?? void 0,
|
|
2598
|
+
parameters: buildOpenAiToolSchema(toolDef.inputSchema, name),
|
|
2599
|
+
strict: true
|
|
2600
|
+
}));
|
|
2601
|
+
}
|
|
2602
|
+
function buildOpenAiToolSchema(schema, name) {
|
|
2603
|
+
const rawSchema = zodToJsonSchema(schema, { name, target: "openAi" });
|
|
2604
|
+
const normalized = normalizeOpenAiSchema(resolveOpenAiSchemaRoot(rawSchema));
|
|
2605
|
+
if (!isJsonSchemaObject(normalized)) {
|
|
2606
|
+
throw new Error(`OpenAI tool schema for ${name} must be a JSON object at the root.`);
|
|
2607
|
+
}
|
|
2608
|
+
return normalized;
|
|
2609
|
+
}
|
|
2610
|
+
function buildGeminiFunctionDeclarations(tools) {
|
|
2611
|
+
const toolEntries = Object.entries(tools);
|
|
2612
|
+
const functionDeclarations = toolEntries.map(([name, toolDef]) => ({
|
|
2613
|
+
name,
|
|
2614
|
+
description: toolDef.description ?? "",
|
|
2615
|
+
parametersJsonSchema: buildGeminiToolSchema(toolDef.inputSchema, name)
|
|
2616
|
+
}));
|
|
2617
|
+
return [{ functionDeclarations }];
|
|
2618
|
+
}
|
|
2619
|
+
function buildGeminiToolSchema(schema, name) {
|
|
2620
|
+
const jsonSchema = toGeminiJsonSchema(schema, { name });
|
|
2621
|
+
if (!isJsonSchemaObject(jsonSchema)) {
|
|
2622
|
+
throw new Error(`Gemini tool schema for ${name} must be a JSON object at the root.`);
|
|
2623
|
+
}
|
|
2624
|
+
return jsonSchema;
|
|
2625
|
+
}
|
|
2626
|
+
function extractOpenAiReasoningSummary(response) {
|
|
2627
|
+
if (!response || typeof response !== "object") {
|
|
2628
|
+
return "";
|
|
2629
|
+
}
|
|
2630
|
+
const output = response.output;
|
|
2631
|
+
if (!Array.isArray(output)) {
|
|
2632
|
+
return "";
|
|
2633
|
+
}
|
|
2634
|
+
let summary = "";
|
|
2635
|
+
for (const item of output) {
|
|
2636
|
+
if (!item || typeof item !== "object") {
|
|
2637
|
+
continue;
|
|
2638
|
+
}
|
|
2639
|
+
if (item.type !== "reasoning") {
|
|
2640
|
+
continue;
|
|
2641
|
+
}
|
|
2642
|
+
const content = item.content;
|
|
2643
|
+
if (!Array.isArray(content)) {
|
|
2644
|
+
continue;
|
|
2645
|
+
}
|
|
2646
|
+
for (const entry of content) {
|
|
2647
|
+
if (!entry || typeof entry !== "object") {
|
|
2648
|
+
continue;
|
|
2649
|
+
}
|
|
2650
|
+
const entryType = entry.type;
|
|
2651
|
+
if (entryType === "reasoning_summary_text") {
|
|
2652
|
+
const text = entry.text;
|
|
2653
|
+
if (typeof text === "string") {
|
|
2654
|
+
summary += text;
|
|
2655
|
+
}
|
|
2656
|
+
}
|
|
2657
|
+
}
|
|
2658
|
+
}
|
|
2659
|
+
return summary;
|
|
2660
|
+
}
|
|
2661
|
+
async function runToolLoop(request) {
|
|
2662
|
+
const toolEntries = Object.entries(request.tools);
|
|
2663
|
+
if (toolEntries.length === 0) {
|
|
2664
|
+
throw new Error("Tool loop requires at least one tool definition.");
|
|
2665
|
+
}
|
|
2666
|
+
const contents = resolveToolLoopContents(request);
|
|
2667
|
+
if (contents.length === 0) {
|
|
2668
|
+
throw new Error("Tool loop prompt must not be empty.");
|
|
2669
|
+
}
|
|
2670
|
+
const maxSteps = Math.max(1, Math.floor(request.maxSteps ?? DEFAULT_TOOL_LOOP_MAX_STEPS));
|
|
2671
|
+
const providerInfo = resolveProvider(request.model);
|
|
2672
|
+
const steps = [];
|
|
2673
|
+
let totalCostUsd = 0;
|
|
2674
|
+
let finalText = "";
|
|
2675
|
+
let finalThoughts = "";
|
|
2676
|
+
if (providerInfo.provider === "openai") {
|
|
2677
|
+
const openAiFunctionTools = buildOpenAiFunctionTools(request.tools);
|
|
2678
|
+
const openAiNativeTools = toOpenAiTools(request.modelTools);
|
|
2679
|
+
const openAiTools = openAiNativeTools ? [...openAiNativeTools, ...openAiFunctionTools] : [...openAiFunctionTools];
|
|
2680
|
+
const reasoningEffort = resolveOpenAiReasoningEffort(
|
|
2681
|
+
providerInfo.model,
|
|
2682
|
+
request.openAiReasoningEffort
|
|
2683
|
+
);
|
|
2684
|
+
const textConfig = {
|
|
2685
|
+
format: { type: "text" },
|
|
2686
|
+
verbosity: resolveOpenAiVerbosity(providerInfo.model)
|
|
2687
|
+
};
|
|
2688
|
+
const reasoning = {
|
|
2689
|
+
effort: toOpenAiReasoningEffort(reasoningEffort),
|
|
2690
|
+
summary: "detailed"
|
|
2691
|
+
};
|
|
2692
|
+
let previousResponseId;
|
|
2693
|
+
let input = toOpenAiInput(contents);
|
|
2694
|
+
for (let stepIndex = 0; stepIndex < maxSteps; stepIndex += 1) {
|
|
2695
|
+
const turn = stepIndex + 1;
|
|
2696
|
+
const abortController = new AbortController();
|
|
2697
|
+
if (request.signal) {
|
|
2698
|
+
if (request.signal.aborted) {
|
|
2699
|
+
abortController.abort(request.signal.reason);
|
|
2700
|
+
} else {
|
|
2701
|
+
request.signal.addEventListener(
|
|
2702
|
+
"abort",
|
|
2703
|
+
() => abortController.abort(request.signal?.reason),
|
|
2704
|
+
{ once: true }
|
|
2705
|
+
);
|
|
2706
|
+
}
|
|
2707
|
+
}
|
|
2708
|
+
const onEvent = request.onEvent;
|
|
2709
|
+
let modelVersion = request.model;
|
|
2710
|
+
let usageTokens;
|
|
2711
|
+
const emitEvent = (ev) => {
|
|
2712
|
+
onEvent?.(ev);
|
|
2713
|
+
};
|
|
2714
|
+
const finalResponse = await runOpenAiCall(async (client) => {
|
|
2715
|
+
const stream = client.responses.stream(
|
|
2716
|
+
{
|
|
2717
|
+
model: providerInfo.model,
|
|
2718
|
+
input,
|
|
2719
|
+
...previousResponseId ? { previous_response_id: previousResponseId } : {},
|
|
2720
|
+
...openAiTools.length > 0 ? { tools: openAiTools } : {},
|
|
2721
|
+
...openAiTools.length > 0 ? { parallel_tool_calls: true } : {},
|
|
2722
|
+
reasoning,
|
|
2723
|
+
text: textConfig,
|
|
2724
|
+
include: ["reasoning.encrypted_content"]
|
|
2725
|
+
},
|
|
2726
|
+
{ signal: abortController.signal }
|
|
2727
|
+
);
|
|
2728
|
+
for await (const event of stream) {
|
|
2729
|
+
switch (event.type) {
|
|
2730
|
+
case "response.output_text.delta":
|
|
2731
|
+
emitEvent({
|
|
2732
|
+
type: "delta",
|
|
2733
|
+
channel: "response",
|
|
2734
|
+
text: typeof event.delta === "string" ? event.delta : ""
|
|
2735
|
+
});
|
|
2736
|
+
break;
|
|
2737
|
+
case "response.reasoning_summary_text.delta":
|
|
2738
|
+
emitEvent({
|
|
2739
|
+
type: "delta",
|
|
2740
|
+
channel: "thought",
|
|
2741
|
+
text: typeof event.delta === "string" ? event.delta : ""
|
|
2742
|
+
});
|
|
2743
|
+
break;
|
|
2744
|
+
case "response.refusal.delta":
|
|
2745
|
+
emitEvent({ type: "blocked" });
|
|
2746
|
+
break;
|
|
2747
|
+
default:
|
|
2748
|
+
break;
|
|
2749
|
+
}
|
|
2750
|
+
}
|
|
2751
|
+
return await stream.finalResponse();
|
|
2752
|
+
});
|
|
2753
|
+
modelVersion = typeof finalResponse.model === "string" ? finalResponse.model : request.model;
|
|
2754
|
+
emitEvent({ type: "model", modelVersion });
|
|
2755
|
+
if (finalResponse.error) {
|
|
2756
|
+
const message = typeof finalResponse.error.message === "string" ? finalResponse.error.message : "OpenAI response failed";
|
|
2757
|
+
throw new Error(message);
|
|
2758
|
+
}
|
|
2759
|
+
usageTokens = extractOpenAiUsageTokens(finalResponse.usage);
|
|
2760
|
+
const responseText = extractOpenAiResponseParts(finalResponse).parts.filter((p) => p.type === "text" && p.thought !== true).map((p) => p.text).join("").trim();
|
|
2761
|
+
const reasoningSummary = extractOpenAiReasoningSummary(finalResponse).trim();
|
|
2762
|
+
const stepCostUsd = estimateCallCostUsd({
|
|
2763
|
+
modelId: modelVersion,
|
|
2764
|
+
tokens: usageTokens,
|
|
2765
|
+
responseImages: 0
|
|
2766
|
+
});
|
|
2767
|
+
totalCostUsd += stepCostUsd;
|
|
2768
|
+
if (usageTokens) {
|
|
2769
|
+
emitEvent({ type: "usage", usage: usageTokens, costUsd: stepCostUsd, modelVersion });
|
|
2770
|
+
}
|
|
2771
|
+
const functionCalls = extractOpenAiFunctionCalls(finalResponse.output);
|
|
2772
|
+
const stepToolCalls = [];
|
|
2773
|
+
if (functionCalls.length === 0) {
|
|
2774
|
+
finalText = responseText;
|
|
2775
|
+
finalThoughts = reasoningSummary;
|
|
2776
|
+
steps.push({
|
|
2777
|
+
step: steps.length + 1,
|
|
2778
|
+
modelVersion,
|
|
2779
|
+
text: responseText || void 0,
|
|
2780
|
+
thoughts: reasoningSummary || void 0,
|
|
2781
|
+
toolCalls: [],
|
|
2782
|
+
usage: usageTokens,
|
|
2783
|
+
costUsd: stepCostUsd
|
|
2784
|
+
});
|
|
2785
|
+
return { text: finalText, thoughts: finalThoughts, steps, totalCostUsd };
|
|
2786
|
+
}
|
|
2787
|
+
const callInputs = functionCalls.map((call, index) => {
|
|
2788
|
+
const toolIndex = index + 1;
|
|
2789
|
+
const toolId = buildToolLogId(turn, toolIndex);
|
|
2790
|
+
const toolName = call.name;
|
|
2791
|
+
const { value, error: parseError } = parseOpenAiToolArguments(call.arguments);
|
|
2792
|
+
return { call, toolName, value, parseError, toolId, turn, toolIndex };
|
|
2793
|
+
});
|
|
2794
|
+
const callResults = await Promise.all(
|
|
2795
|
+
callInputs.map(async (entry) => {
|
|
2796
|
+
return await toolCallContextStorage.run(
|
|
2797
|
+
{
|
|
2798
|
+
toolName: entry.toolName,
|
|
2799
|
+
toolId: entry.toolId,
|
|
2800
|
+
turn: entry.turn,
|
|
2801
|
+
toolIndex: entry.toolIndex
|
|
2802
|
+
},
|
|
2803
|
+
async () => {
|
|
2804
|
+
const { result, outputPayload } = await executeToolCall({
|
|
2805
|
+
toolName: entry.toolName,
|
|
2806
|
+
tool: request.tools[entry.toolName],
|
|
2807
|
+
rawInput: entry.value,
|
|
2808
|
+
parseError: entry.parseError
|
|
2809
|
+
});
|
|
2810
|
+
return { entry, result, outputPayload };
|
|
2811
|
+
}
|
|
2812
|
+
);
|
|
2813
|
+
})
|
|
2814
|
+
);
|
|
2815
|
+
const toolOutputs = [];
|
|
2816
|
+
for (const { entry, result, outputPayload } of callResults) {
|
|
2817
|
+
stepToolCalls.push({ ...result, callId: entry.call.call_id });
|
|
2818
|
+
toolOutputs.push({
|
|
2819
|
+
type: "function_call_output",
|
|
2820
|
+
call_id: entry.call.call_id,
|
|
2821
|
+
output: mergeToolOutput(outputPayload)
|
|
2822
|
+
});
|
|
2823
|
+
}
|
|
2824
|
+
steps.push({
|
|
2825
|
+
step: steps.length + 1,
|
|
2826
|
+
modelVersion,
|
|
2827
|
+
text: responseText || void 0,
|
|
2828
|
+
thoughts: reasoningSummary || void 0,
|
|
2829
|
+
toolCalls: stepToolCalls,
|
|
2830
|
+
usage: usageTokens,
|
|
2831
|
+
costUsd: stepCostUsd
|
|
2832
|
+
});
|
|
2833
|
+
previousResponseId = finalResponse.id;
|
|
2834
|
+
input = toolOutputs;
|
|
2835
|
+
}
|
|
2836
|
+
throw new Error(`Tool loop exceeded max steps (${maxSteps}) without final response.`);
|
|
2837
|
+
}
|
|
2838
|
+
if (providerInfo.provider === "chatgpt") {
|
|
2839
|
+
const openAiFunctionTools = buildOpenAiFunctionTools(request.tools);
|
|
2840
|
+
const openAiNativeTools = toOpenAiTools(request.modelTools);
|
|
2841
|
+
const openAiTools = openAiNativeTools ? [...openAiNativeTools, ...openAiFunctionTools] : [...openAiFunctionTools];
|
|
2842
|
+
const reasoningEffort = resolveOpenAiReasoningEffort(
|
|
2843
|
+
request.model,
|
|
2844
|
+
request.openAiReasoningEffort
|
|
2845
|
+
);
|
|
2846
|
+
const toolLoopInput = toChatGptInput(contents);
|
|
2847
|
+
let input = [...toolLoopInput.input];
|
|
2848
|
+
for (let stepIndex = 0; stepIndex < maxSteps; stepIndex += 1) {
|
|
2849
|
+
const turn = stepIndex + 1;
|
|
2850
|
+
const response = await collectChatGptCodexResponse({
|
|
2851
|
+
request: {
|
|
2852
|
+
model: providerInfo.model,
|
|
2853
|
+
store: false,
|
|
2854
|
+
stream: true,
|
|
2855
|
+
instructions: toolLoopInput.instructions ?? "You are a helpful assistant.",
|
|
2856
|
+
input,
|
|
2857
|
+
include: ["reasoning.encrypted_content"],
|
|
2858
|
+
tools: openAiTools,
|
|
2859
|
+
tool_choice: "auto",
|
|
2860
|
+
parallel_tool_calls: true,
|
|
2861
|
+
reasoning: {
|
|
2862
|
+
effort: toOpenAiReasoningEffort(reasoningEffort),
|
|
2863
|
+
summary: "detailed"
|
|
2864
|
+
},
|
|
2865
|
+
text: { verbosity: resolveOpenAiVerbosity(request.model) }
|
|
2866
|
+
},
|
|
2867
|
+
signal: request.signal,
|
|
2868
|
+
onDelta: (delta) => {
|
|
2869
|
+
if (delta.thoughtDelta) {
|
|
2870
|
+
request.onEvent?.({ type: "delta", channel: "thought", text: delta.thoughtDelta });
|
|
2871
|
+
}
|
|
2872
|
+
if (delta.textDelta) {
|
|
2873
|
+
request.onEvent?.({ type: "delta", channel: "response", text: delta.textDelta });
|
|
2874
|
+
}
|
|
2875
|
+
}
|
|
2876
|
+
});
|
|
2877
|
+
const modelVersion = response.model ? `chatgpt-${response.model}` : request.model;
|
|
2878
|
+
const usageTokens = extractChatGptUsageTokens(response.usage);
|
|
2879
|
+
const stepCostUsd = estimateCallCostUsd({
|
|
2880
|
+
modelId: modelVersion,
|
|
2881
|
+
tokens: usageTokens,
|
|
2882
|
+
responseImages: 0
|
|
2883
|
+
});
|
|
2884
|
+
totalCostUsd += stepCostUsd;
|
|
2885
|
+
const responseText = (response.text ?? "").trim();
|
|
2886
|
+
const reasoningSummaryText = (response.reasoningSummaryText ?? "").trim();
|
|
2887
|
+
const functionCalls = response.toolCalls ?? [];
|
|
2888
|
+
if (functionCalls.length === 0) {
|
|
2889
|
+
finalText = responseText;
|
|
2890
|
+
finalThoughts = reasoningSummaryText;
|
|
2891
|
+
steps.push({
|
|
2892
|
+
step: steps.length + 1,
|
|
2893
|
+
modelVersion,
|
|
2894
|
+
text: responseText || void 0,
|
|
2895
|
+
thoughts: reasoningSummaryText || void 0,
|
|
2896
|
+
toolCalls: [],
|
|
2897
|
+
usage: usageTokens,
|
|
2898
|
+
costUsd: stepCostUsd
|
|
2899
|
+
});
|
|
2900
|
+
return { text: finalText, thoughts: finalThoughts, steps, totalCostUsd };
|
|
2901
|
+
}
|
|
2902
|
+
const toolCalls = [];
|
|
2903
|
+
const toolOutputs = [];
|
|
2904
|
+
const callInputs = functionCalls.map((call, index) => {
|
|
2905
|
+
const toolIndex = index + 1;
|
|
2906
|
+
const toolId = buildToolLogId(turn, toolIndex);
|
|
2907
|
+
const toolName = call.name;
|
|
2908
|
+
const { value, error: parseError } = parseOpenAiToolArguments(call.arguments);
|
|
2909
|
+
const ids = normalizeChatGptToolIds({ callId: call.callId, itemId: call.id });
|
|
2910
|
+
return { call, toolName, value, parseError, ids, toolId, turn, toolIndex };
|
|
2911
|
+
});
|
|
2912
|
+
const callResults = await Promise.all(
|
|
2913
|
+
callInputs.map(async (entry) => {
|
|
2914
|
+
return await toolCallContextStorage.run(
|
|
2915
|
+
{
|
|
2916
|
+
toolName: entry.toolName,
|
|
2917
|
+
toolId: entry.toolId,
|
|
2918
|
+
turn: entry.turn,
|
|
2919
|
+
toolIndex: entry.toolIndex
|
|
2920
|
+
},
|
|
2921
|
+
async () => {
|
|
2922
|
+
const { result, outputPayload } = await executeToolCall({
|
|
2923
|
+
toolName: entry.toolName,
|
|
2924
|
+
tool: request.tools[entry.toolName],
|
|
2925
|
+
rawInput: entry.value,
|
|
2926
|
+
parseError: entry.parseError
|
|
2927
|
+
});
|
|
2928
|
+
return { entry, result, outputPayload };
|
|
2929
|
+
}
|
|
2930
|
+
);
|
|
2931
|
+
})
|
|
2932
|
+
);
|
|
2933
|
+
for (const { entry, result, outputPayload } of callResults) {
|
|
2934
|
+
toolCalls.push({ ...result, callId: entry.ids.callId });
|
|
2935
|
+
toolOutputs.push({
|
|
2936
|
+
type: "function_call",
|
|
2937
|
+
id: entry.ids.itemId,
|
|
2938
|
+
call_id: entry.ids.callId,
|
|
2939
|
+
name: entry.toolName,
|
|
2940
|
+
arguments: entry.call.arguments,
|
|
2941
|
+
status: "completed"
|
|
2942
|
+
});
|
|
2943
|
+
toolOutputs.push({
|
|
2944
|
+
type: "function_call_output",
|
|
2945
|
+
call_id: entry.ids.callId,
|
|
2946
|
+
output: mergeToolOutput(outputPayload)
|
|
2947
|
+
});
|
|
2948
|
+
}
|
|
2949
|
+
steps.push({
|
|
2950
|
+
step: steps.length + 1,
|
|
2951
|
+
modelVersion,
|
|
2952
|
+
text: responseText || void 0,
|
|
2953
|
+
thoughts: reasoningSummaryText || void 0,
|
|
2954
|
+
toolCalls,
|
|
2955
|
+
usage: usageTokens,
|
|
2956
|
+
costUsd: stepCostUsd
|
|
2957
|
+
});
|
|
2958
|
+
input = input.concat(toolOutputs);
|
|
2959
|
+
}
|
|
2960
|
+
throw new Error(`Tool loop exceeded max steps (${maxSteps}) without final response.`);
|
|
2961
|
+
}
|
|
2962
|
+
const geminiFunctionTools = buildGeminiFunctionDeclarations(request.tools);
|
|
2963
|
+
const geminiNativeTools = toGeminiTools(request.modelTools);
|
|
2964
|
+
const geminiTools = geminiNativeTools ? geminiNativeTools.concat(geminiFunctionTools) : geminiFunctionTools;
|
|
2965
|
+
const geminiContents = contents.map(convertLlmContentToGeminiContent);
|
|
2966
|
+
for (let stepIndex = 0; stepIndex < maxSteps; stepIndex += 1) {
|
|
2967
|
+
const config = {
|
|
2968
|
+
maxOutputTokens: 32e3,
|
|
2969
|
+
tools: geminiTools,
|
|
2970
|
+
toolConfig: {
|
|
2971
|
+
functionCallingConfig: {
|
|
2972
|
+
mode: FunctionCallingConfigMode.VALIDATED
|
|
2973
|
+
}
|
|
2974
|
+
},
|
|
2975
|
+
thinkingConfig: resolveGeminiThinkingConfig(request.model)
|
|
2976
|
+
};
|
|
2977
|
+
const onEvent = request.onEvent;
|
|
2978
|
+
const response = await runGeminiCall(async (client) => {
|
|
2979
|
+
const stream = await client.models.generateContentStream({
|
|
2980
|
+
model: request.model,
|
|
2981
|
+
contents: geminiContents,
|
|
2982
|
+
config
|
|
2983
|
+
});
|
|
2984
|
+
let responseText = "";
|
|
2985
|
+
let thoughtsText = "";
|
|
2986
|
+
const modelParts = [];
|
|
2987
|
+
const functionCalls = [];
|
|
2988
|
+
const seenFunctionCallIds = /* @__PURE__ */ new Set();
|
|
2989
|
+
const seenFunctionCallKeys = /* @__PURE__ */ new Set();
|
|
2990
|
+
let latestUsageMetadata;
|
|
2991
|
+
let resolvedModelVersion;
|
|
2992
|
+
for await (const chunk of stream) {
|
|
2993
|
+
if (chunk.modelVersion) {
|
|
2994
|
+
resolvedModelVersion = chunk.modelVersion;
|
|
2995
|
+
onEvent?.({ type: "model", modelVersion: chunk.modelVersion });
|
|
2996
|
+
}
|
|
2997
|
+
if (chunk.usageMetadata) {
|
|
2998
|
+
latestUsageMetadata = chunk.usageMetadata;
|
|
2999
|
+
}
|
|
3000
|
+
const candidates = chunk.candidates;
|
|
3001
|
+
if (!candidates || candidates.length === 0) {
|
|
3002
|
+
continue;
|
|
3003
|
+
}
|
|
3004
|
+
const primary = candidates[0];
|
|
3005
|
+
const parts = primary?.content?.parts;
|
|
3006
|
+
if (!parts || parts.length === 0) {
|
|
3007
|
+
continue;
|
|
3008
|
+
}
|
|
3009
|
+
for (const part of parts) {
|
|
3010
|
+
modelParts.push(part);
|
|
3011
|
+
const call = part.functionCall;
|
|
3012
|
+
if (call) {
|
|
3013
|
+
const id = typeof call.id === "string" ? call.id : "";
|
|
3014
|
+
const shouldAdd = (() => {
|
|
3015
|
+
if (id.length > 0) {
|
|
3016
|
+
if (seenFunctionCallIds.has(id)) {
|
|
3017
|
+
return false;
|
|
3018
|
+
}
|
|
3019
|
+
seenFunctionCallIds.add(id);
|
|
3020
|
+
return true;
|
|
3021
|
+
}
|
|
3022
|
+
const key = JSON.stringify({ name: call.name ?? "", args: call.args ?? null });
|
|
3023
|
+
if (seenFunctionCallKeys.has(key)) {
|
|
3024
|
+
return false;
|
|
3025
|
+
}
|
|
3026
|
+
seenFunctionCallKeys.add(key);
|
|
3027
|
+
return true;
|
|
3028
|
+
})();
|
|
3029
|
+
if (shouldAdd) {
|
|
3030
|
+
functionCalls.push(call);
|
|
3031
|
+
}
|
|
3032
|
+
}
|
|
3033
|
+
if (typeof part.text === "string" && part.text.length > 0) {
|
|
3034
|
+
if (part.thought) {
|
|
3035
|
+
thoughtsText += part.text;
|
|
3036
|
+
onEvent?.({ type: "delta", channel: "thought", text: part.text });
|
|
3037
|
+
} else {
|
|
3038
|
+
responseText += part.text;
|
|
3039
|
+
onEvent?.({ type: "delta", channel: "response", text: part.text });
|
|
3040
|
+
}
|
|
3041
|
+
}
|
|
3042
|
+
}
|
|
3043
|
+
}
|
|
3044
|
+
return {
|
|
3045
|
+
responseText,
|
|
3046
|
+
thoughtsText,
|
|
3047
|
+
functionCalls,
|
|
3048
|
+
modelParts,
|
|
3049
|
+
usageMetadata: latestUsageMetadata,
|
|
3050
|
+
modelVersion: resolvedModelVersion ?? request.model
|
|
3051
|
+
};
|
|
3052
|
+
});
|
|
3053
|
+
const usageTokens = extractGeminiUsageTokens(response.usageMetadata);
|
|
3054
|
+
const modelVersion = response.modelVersion ?? request.model;
|
|
3055
|
+
const stepCostUsd = estimateCallCostUsd({
|
|
3056
|
+
modelId: modelVersion,
|
|
3057
|
+
tokens: usageTokens,
|
|
3058
|
+
responseImages: 0
|
|
3059
|
+
});
|
|
3060
|
+
totalCostUsd += stepCostUsd;
|
|
3061
|
+
if (response.functionCalls.length === 0) {
|
|
3062
|
+
finalText = response.responseText.trim();
|
|
3063
|
+
finalThoughts = response.thoughtsText.trim();
|
|
3064
|
+
steps.push({
|
|
3065
|
+
step: steps.length + 1,
|
|
3066
|
+
modelVersion,
|
|
3067
|
+
text: finalText || void 0,
|
|
3068
|
+
thoughts: finalThoughts || void 0,
|
|
3069
|
+
toolCalls: [],
|
|
3070
|
+
usage: usageTokens,
|
|
3071
|
+
costUsd: stepCostUsd
|
|
3072
|
+
});
|
|
3073
|
+
return { text: finalText, thoughts: finalThoughts, steps, totalCostUsd };
|
|
3074
|
+
}
|
|
3075
|
+
const toolCalls = [];
|
|
3076
|
+
const modelPartsForHistory = response.modelParts.filter(
|
|
3077
|
+
(part) => !(typeof part.text === "string" && part.thought === true)
|
|
3078
|
+
);
|
|
3079
|
+
if (modelPartsForHistory.length > 0) {
|
|
3080
|
+
geminiContents.push({ role: "model", parts: modelPartsForHistory });
|
|
3081
|
+
} else {
|
|
3082
|
+
const parts = [];
|
|
3083
|
+
if (response.responseText) {
|
|
3084
|
+
parts.push({ text: response.responseText });
|
|
3085
|
+
}
|
|
3086
|
+
for (const call of response.functionCalls) {
|
|
3087
|
+
parts.push({ functionCall: call });
|
|
3088
|
+
}
|
|
3089
|
+
geminiContents.push({ role: "model", parts });
|
|
3090
|
+
}
|
|
3091
|
+
const responseParts = [];
|
|
3092
|
+
const callInputs = response.functionCalls.map((call, index) => {
|
|
3093
|
+
const turn = stepIndex + 1;
|
|
3094
|
+
const toolIndex = index + 1;
|
|
3095
|
+
const toolId = buildToolLogId(turn, toolIndex);
|
|
3096
|
+
const toolName = call.name ?? "unknown";
|
|
3097
|
+
const rawInput = call.args ?? {};
|
|
3098
|
+
return { call, toolName, rawInput, toolId, turn, toolIndex };
|
|
3099
|
+
});
|
|
3100
|
+
const callResults = await Promise.all(
|
|
3101
|
+
callInputs.map(async (entry) => {
|
|
3102
|
+
return await toolCallContextStorage.run(
|
|
3103
|
+
{
|
|
3104
|
+
toolName: entry.toolName,
|
|
3105
|
+
toolId: entry.toolId,
|
|
3106
|
+
turn: entry.turn,
|
|
3107
|
+
toolIndex: entry.toolIndex
|
|
3108
|
+
},
|
|
3109
|
+
async () => {
|
|
3110
|
+
const { result, outputPayload } = await executeToolCall({
|
|
3111
|
+
toolName: entry.toolName,
|
|
3112
|
+
tool: request.tools[entry.toolName],
|
|
3113
|
+
rawInput: entry.rawInput
|
|
3114
|
+
});
|
|
3115
|
+
return { entry, result, outputPayload };
|
|
3116
|
+
}
|
|
3117
|
+
);
|
|
3118
|
+
})
|
|
3119
|
+
);
|
|
3120
|
+
for (const { entry, result, outputPayload } of callResults) {
|
|
3121
|
+
toolCalls.push({ ...result, callId: entry.call.id });
|
|
3122
|
+
const responsePayload = isPlainRecord(outputPayload) ? outputPayload : { output: outputPayload };
|
|
3123
|
+
responseParts.push({
|
|
3124
|
+
functionResponse: {
|
|
3125
|
+
name: entry.toolName,
|
|
3126
|
+
response: responsePayload,
|
|
3127
|
+
...entry.call.id ? { id: entry.call.id } : {}
|
|
3128
|
+
}
|
|
3129
|
+
});
|
|
3130
|
+
}
|
|
3131
|
+
steps.push({
|
|
3132
|
+
step: steps.length + 1,
|
|
3133
|
+
modelVersion,
|
|
3134
|
+
text: response.responseText.trim() || void 0,
|
|
3135
|
+
thoughts: response.thoughtsText.trim() || void 0,
|
|
3136
|
+
toolCalls,
|
|
3137
|
+
usage: usageTokens,
|
|
3138
|
+
costUsd: stepCostUsd
|
|
3139
|
+
});
|
|
3140
|
+
geminiContents.push({ role: "user", parts: responseParts });
|
|
3141
|
+
}
|
|
3142
|
+
throw new Error(`Tool loop exceeded max steps (${maxSteps}) without final response.`);
|
|
3143
|
+
}
|
|
3144
|
+
var IMAGE_GRADE_SCHEMA = z3.enum(["pass", "fail"]);
|
|
3145
|
+
async function gradeGeneratedImage(params) {
|
|
3146
|
+
const parts = [
|
|
3147
|
+
{
|
|
3148
|
+
type: "text",
|
|
3149
|
+
text: [
|
|
3150
|
+
params.gradingPrompt,
|
|
3151
|
+
"",
|
|
3152
|
+
"Image prompt to grade:",
|
|
3153
|
+
params.imagePrompt,
|
|
3154
|
+
"",
|
|
3155
|
+
'Respond with the JSON string "pass" or "fail".'
|
|
3156
|
+
].join("\\n")
|
|
3157
|
+
},
|
|
3158
|
+
{
|
|
3159
|
+
type: "inlineData",
|
|
3160
|
+
data: params.image.data.toString("base64"),
|
|
3161
|
+
mimeType: params.image.mimeType ?? "image/png"
|
|
3162
|
+
}
|
|
3163
|
+
];
|
|
3164
|
+
const { value } = await generateJson({
|
|
3165
|
+
model: params.model,
|
|
3166
|
+
contents: [{ role: "user", parts }],
|
|
3167
|
+
schema: IMAGE_GRADE_SCHEMA
|
|
3168
|
+
});
|
|
3169
|
+
return value;
|
|
3170
|
+
}
|
|
3171
|
+
async function generateImages(request) {
|
|
3172
|
+
const maxAttempts = Math.max(1, Math.floor(request.maxAttempts ?? 4));
|
|
3173
|
+
const promptList = Array.from(request.imagePrompts);
|
|
3174
|
+
if (promptList.length === 0) {
|
|
3175
|
+
return [];
|
|
3176
|
+
}
|
|
3177
|
+
const numImages = promptList.length;
|
|
3178
|
+
const promptEntries = promptList.map((rawPrompt, arrayIndex) => {
|
|
3179
|
+
const trimmedPrompt = rawPrompt.trim();
|
|
3180
|
+
if (!trimmedPrompt) {
|
|
3181
|
+
throw new Error(`imagePrompts[${arrayIndex}] must be a non-empty string`);
|
|
3182
|
+
}
|
|
3183
|
+
return { index: arrayIndex + 1, prompt: trimmedPrompt };
|
|
3184
|
+
});
|
|
3185
|
+
const gradingPrompt = request.imageGradingPrompt.trim();
|
|
3186
|
+
if (!gradingPrompt) {
|
|
3187
|
+
throw new Error("imageGradingPrompt must be a non-empty string");
|
|
3188
|
+
}
|
|
3189
|
+
const addText = (parts, text) => {
|
|
3190
|
+
const lastPart = parts[parts.length - 1];
|
|
3191
|
+
if (lastPart !== void 0 && lastPart.type === "text") {
|
|
3192
|
+
lastPart.text = `${lastPart.text}\\n${text}`;
|
|
3193
|
+
} else {
|
|
3194
|
+
parts.push({ type: "text", text });
|
|
3195
|
+
}
|
|
3196
|
+
};
|
|
3197
|
+
const buildInitialPromptParts = () => {
|
|
3198
|
+
const parts = [];
|
|
3199
|
+
addText(
|
|
3200
|
+
parts,
|
|
3201
|
+
[
|
|
3202
|
+
`Please make all ${numImages} requested images:`,
|
|
3203
|
+
"",
|
|
3204
|
+
"Follow the style:",
|
|
3205
|
+
request.stylePrompt
|
|
3206
|
+
].join("\\n")
|
|
3207
|
+
);
|
|
3208
|
+
if (request.styleImages && request.styleImages.length > 0) {
|
|
3209
|
+
addText(
|
|
3210
|
+
parts,
|
|
3211
|
+
"\\nFollow the visual style, composition and the characters from these images:"
|
|
3212
|
+
);
|
|
3213
|
+
for (const styleImage of request.styleImages) {
|
|
3214
|
+
parts.push({
|
|
3215
|
+
type: "inlineData",
|
|
3216
|
+
data: styleImage.data.toString("base64"),
|
|
3217
|
+
mimeType: styleImage.mimeType
|
|
3218
|
+
});
|
|
3219
|
+
}
|
|
3220
|
+
}
|
|
3221
|
+
const lines = ["", "Image descriptions:"];
|
|
3222
|
+
for (const entry of promptEntries) {
|
|
3223
|
+
lines.push(`\\nImage ${entry.index}: ${entry.prompt}`);
|
|
3224
|
+
}
|
|
3225
|
+
lines.push("");
|
|
3226
|
+
lines.push(`Please make all ${numImages} images.`);
|
|
3227
|
+
addText(parts, lines.join("\\n"));
|
|
3228
|
+
return parts;
|
|
3229
|
+
};
|
|
3230
|
+
const buildContinuationPromptParts = (pending) => {
|
|
3231
|
+
const pendingIds = pending.map((entry) => entry.index).join(", ");
|
|
3232
|
+
const lines = [
|
|
3233
|
+
`Please continue generating the remaining images: ${pendingIds}.`,
|
|
3234
|
+
"",
|
|
3235
|
+
"Image descriptions:"
|
|
3236
|
+
];
|
|
3237
|
+
for (const entry of pending) {
|
|
3238
|
+
lines.push(`\\nImage ${entry.index}: ${entry.prompt}`);
|
|
3239
|
+
}
|
|
3240
|
+
lines.push(`\\nPlease make all ${pending.length} remaining images.`);
|
|
3241
|
+
return [{ type: "text", text: lines.join("\\n") }];
|
|
3242
|
+
};
|
|
3243
|
+
const contents = [{ role: "user", parts: buildInitialPromptParts() }];
|
|
3244
|
+
const orderedEntries = [...promptEntries];
|
|
3245
|
+
const resolvedImages = /* @__PURE__ */ new Map();
|
|
3246
|
+
const removeResolvedEntries = (resolved) => {
|
|
3247
|
+
if (resolved.size === 0) {
|
|
3248
|
+
return;
|
|
3249
|
+
}
|
|
3250
|
+
for (let i = promptEntries.length - 1; i >= 0; i -= 1) {
|
|
3251
|
+
const entry = promptEntries[i];
|
|
3252
|
+
if (!entry) {
|
|
3253
|
+
continue;
|
|
3254
|
+
}
|
|
3255
|
+
if (resolved.has(entry.index)) {
|
|
3256
|
+
promptEntries.splice(i, 1);
|
|
3257
|
+
}
|
|
3258
|
+
}
|
|
3259
|
+
};
|
|
3260
|
+
for (let attempt = 1; attempt <= maxAttempts; attempt += 1) {
|
|
3261
|
+
const result = await generateText({
|
|
3262
|
+
model: request.model,
|
|
3263
|
+
contents,
|
|
3264
|
+
responseModalities: ["IMAGE", "TEXT"],
|
|
3265
|
+
imageAspectRatio: request.imageAspectRatio,
|
|
3266
|
+
imageSize: request.imageSize ?? "2K"
|
|
3267
|
+
});
|
|
3268
|
+
if (result.blocked || !result.content) {
|
|
3269
|
+
continue;
|
|
3270
|
+
}
|
|
3271
|
+
const images = extractImages(result.content);
|
|
3272
|
+
if (images.length > 0 && promptEntries.length > 0) {
|
|
3273
|
+
const assignedCount = Math.min(images.length, promptEntries.length);
|
|
3274
|
+
const pendingAssignments = promptEntries.slice(0, assignedCount);
|
|
3275
|
+
const assignedImages = images.slice(0, assignedCount);
|
|
3276
|
+
const gradeResults = await Promise.all(
|
|
3277
|
+
pendingAssignments.map(
|
|
3278
|
+
(entry, index) => gradeGeneratedImage({
|
|
3279
|
+
gradingPrompt,
|
|
3280
|
+
imagePrompt: entry.prompt,
|
|
3281
|
+
image: (() => {
|
|
3282
|
+
const image = assignedImages[index];
|
|
3283
|
+
if (!image) {
|
|
3284
|
+
throw new Error("Image generation returned fewer images than expected.");
|
|
3285
|
+
}
|
|
3286
|
+
return image;
|
|
3287
|
+
})(),
|
|
3288
|
+
model: "gpt-5.2"
|
|
3289
|
+
})
|
|
3290
|
+
)
|
|
3291
|
+
);
|
|
3292
|
+
const passedEntries = /* @__PURE__ */ new Set();
|
|
3293
|
+
for (let i = 0; i < gradeResults.length; i += 1) {
|
|
3294
|
+
const grade = gradeResults[i];
|
|
3295
|
+
const entry = pendingAssignments[i];
|
|
3296
|
+
const image = assignedImages[i];
|
|
3297
|
+
if (!grade || !entry || !image) {
|
|
3298
|
+
continue;
|
|
3299
|
+
}
|
|
3300
|
+
if (grade === "pass") {
|
|
3301
|
+
resolvedImages.set(entry.index, image);
|
|
3302
|
+
passedEntries.add(entry.index);
|
|
3303
|
+
}
|
|
3304
|
+
}
|
|
3305
|
+
removeResolvedEntries(passedEntries);
|
|
3306
|
+
}
|
|
3307
|
+
if (promptEntries.length === 0) {
|
|
3308
|
+
break;
|
|
3309
|
+
}
|
|
3310
|
+
contents.push(result.content);
|
|
3311
|
+
contents.push({ role: "user", parts: buildContinuationPromptParts(promptEntries) });
|
|
3312
|
+
}
|
|
3313
|
+
const orderedImages = [];
|
|
3314
|
+
for (const entry of orderedEntries) {
|
|
3315
|
+
const image = resolvedImages.get(entry.index);
|
|
3316
|
+
if (image) {
|
|
3317
|
+
orderedImages.push(image);
|
|
3318
|
+
}
|
|
3319
|
+
}
|
|
3320
|
+
return orderedImages.slice(0, numImages);
|
|
3321
|
+
}
|
|
3322
|
+
async function generateImageInBatches(request) {
|
|
3323
|
+
const {
|
|
3324
|
+
batchSize,
|
|
3325
|
+
overlapSize,
|
|
3326
|
+
imagePrompts,
|
|
3327
|
+
styleImages: baseStyleImagesInput,
|
|
3328
|
+
...rest
|
|
3329
|
+
} = request;
|
|
3330
|
+
if (batchSize <= 0) {
|
|
3331
|
+
throw new Error("batchSize must be greater than 0");
|
|
3332
|
+
}
|
|
3333
|
+
if (imagePrompts.length === 0) {
|
|
3334
|
+
return [];
|
|
3335
|
+
}
|
|
3336
|
+
const baseStyleImages = baseStyleImagesInput ? [...baseStyleImagesInput] : [];
|
|
3337
|
+
const generatedImages = [];
|
|
3338
|
+
const totalPrompts = imagePrompts.length;
|
|
3339
|
+
for (let startIndex = 0; startIndex < totalPrompts; startIndex += batchSize) {
|
|
3340
|
+
const endIndex = Math.min(startIndex + batchSize, totalPrompts);
|
|
3341
|
+
const batchPrompts = imagePrompts.slice(startIndex, endIndex);
|
|
3342
|
+
let styleImagesForBatch = baseStyleImages;
|
|
3343
|
+
if (overlapSize > 0 && generatedImages.length > 0) {
|
|
3344
|
+
const overlapImages = generatedImages.slice(
|
|
3345
|
+
Math.max(0, generatedImages.length - overlapSize)
|
|
3346
|
+
);
|
|
3347
|
+
if (overlapImages.length > 0) {
|
|
3348
|
+
styleImagesForBatch = [...baseStyleImages, ...overlapImages];
|
|
3349
|
+
}
|
|
3350
|
+
}
|
|
3351
|
+
const batchImages = await generateImages({
|
|
3352
|
+
...rest,
|
|
3353
|
+
imagePrompts: batchPrompts,
|
|
3354
|
+
styleImages: styleImagesForBatch
|
|
3355
|
+
});
|
|
3356
|
+
generatedImages.push(...batchImages);
|
|
3357
|
+
}
|
|
3358
|
+
return generatedImages;
|
|
3359
|
+
}
|
|
3360
|
+
function stripCodexCitationMarkers(value) {
|
|
3361
|
+
const citationBlockPattern = /\uE200cite\uE202[^\uE201]*\uE201/gu;
|
|
3362
|
+
const leftoverMarkersPattern = /[\uE200\uE201\uE202]/gu;
|
|
3363
|
+
const withoutBlocks = value.replace(citationBlockPattern, "");
|
|
3364
|
+
const withoutMarkers = withoutBlocks.replace(leftoverMarkersPattern, "");
|
|
3365
|
+
const stripped = withoutMarkers !== value;
|
|
3366
|
+
return { text: withoutMarkers, stripped };
|
|
3367
|
+
}
|
|
3368
|
+
function hasMarkdownSourcesSection(value) {
|
|
3369
|
+
return /^##\s+Sources\s*$/gmu.test(value);
|
|
3370
|
+
}
|
|
3371
|
+
function appendMarkdownSourcesSection(value, sources) {
|
|
3372
|
+
const trimmed = value.trimEnd();
|
|
3373
|
+
if (sources.length === 0) {
|
|
3374
|
+
return trimmed;
|
|
3375
|
+
}
|
|
3376
|
+
if (hasMarkdownSourcesSection(trimmed)) {
|
|
3377
|
+
return trimmed;
|
|
3378
|
+
}
|
|
3379
|
+
const lines = sources.map((url) => `- <${url}>`).join("\n");
|
|
3380
|
+
return `${trimmed}
|
|
3381
|
+
|
|
3382
|
+
## Sources
|
|
3383
|
+
${lines}`;
|
|
3384
|
+
}
|
|
3385
|
+
export {
|
|
3386
|
+
LlmJsonCallError,
|
|
3387
|
+
appendMarkdownSourcesSection,
|
|
3388
|
+
configureGemini,
|
|
3389
|
+
convertGooglePartsToLlmParts,
|
|
3390
|
+
encodeChatGptAuthJson,
|
|
3391
|
+
encodeChatGptAuthJsonB64,
|
|
3392
|
+
estimateCallCostUsd,
|
|
3393
|
+
exchangeChatGptOauthCode,
|
|
3394
|
+
generateImageInBatches,
|
|
3395
|
+
generateImages,
|
|
3396
|
+
generateJson,
|
|
3397
|
+
generateText,
|
|
3398
|
+
getChatGptAuthProfile,
|
|
3399
|
+
getCurrentToolCallContext,
|
|
3400
|
+
isGeminiModelId,
|
|
3401
|
+
loadEnvFromFile,
|
|
3402
|
+
loadLocalEnv,
|
|
3403
|
+
parseJsonFromLlmText,
|
|
3404
|
+
refreshChatGptOauthToken,
|
|
3405
|
+
runToolLoop,
|
|
3406
|
+
sanitisePartForLogging,
|
|
3407
|
+
streamText,
|
|
3408
|
+
stripCodexCitationMarkers,
|
|
3409
|
+
toGeminiJsonSchema,
|
|
3410
|
+
tool
|
|
3411
|
+
};
|
|
3412
|
+
//# sourceMappingURL=index.js.map
|