@mariozechner/pi-ai 0.46.0 → 0.48.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/constants.d.ts +6 -0
- package/dist/constants.d.ts.map +1 -0
- package/dist/constants.js +14 -0
- package/dist/constants.js.map +1 -0
- package/dist/index.d.ts +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +1 -1
- package/dist/index.js.map +1 -1
- package/dist/models.generated.d.ts +20 -111
- package/dist/models.generated.d.ts.map +1 -1
- package/dist/models.generated.js +50 -136
- package/dist/models.generated.js.map +1 -1
- package/dist/providers/amazon-bedrock.d.ts.map +1 -1
- package/dist/providers/amazon-bedrock.js +7 -3
- package/dist/providers/amazon-bedrock.js.map +1 -1
- package/dist/providers/google-shared.d.ts.map +1 -1
- package/dist/providers/google-shared.js +2 -1
- package/dist/providers/google-shared.js.map +1 -1
- package/dist/providers/openai-codex-responses.d.ts +0 -2
- package/dist/providers/openai-codex-responses.d.ts.map +1 -1
- package/dist/providers/openai-codex-responses.js +476 -489
- package/dist/providers/openai-codex-responses.js.map +1 -1
- package/dist/providers/openai-completions.d.ts.map +1 -1
- package/dist/providers/openai-completions.js +18 -10
- package/dist/providers/openai-completions.js.map +1 -1
- package/dist/providers/transform-messages.d.ts.map +1 -1
- package/dist/providers/transform-messages.js +7 -0
- package/dist/providers/transform-messages.js.map +1 -1
- package/dist/utils/validation.d.ts +1 -1
- package/dist/utils/validation.d.ts.map +1 -1
- package/dist/utils/validation.js +7 -4
- package/dist/utils/validation.js.map +1 -1
- package/package.json +1 -1
- package/dist/providers/openai-codex/constants.d.ts +0 -21
- package/dist/providers/openai-codex/constants.d.ts.map +0 -1
- package/dist/providers/openai-codex/constants.js +0 -21
- package/dist/providers/openai-codex/constants.js.map +0 -1
- package/dist/providers/openai-codex/index.d.ts +0 -7
- package/dist/providers/openai-codex/index.d.ts.map +0 -1
- package/dist/providers/openai-codex/index.js +0 -7
- package/dist/providers/openai-codex/index.js.map +0 -1
- package/dist/providers/openai-codex/prompts/codex.d.ts +0 -3
- package/dist/providers/openai-codex/prompts/codex.d.ts.map +0 -1
- package/dist/providers/openai-codex/prompts/codex.js +0 -323
- package/dist/providers/openai-codex/prompts/codex.js.map +0 -1
- package/dist/providers/openai-codex/prompts/pi-codex-bridge.d.ts +0 -7
- package/dist/providers/openai-codex/prompts/pi-codex-bridge.d.ts.map +0 -1
- package/dist/providers/openai-codex/prompts/pi-codex-bridge.js +0 -50
- package/dist/providers/openai-codex/prompts/pi-codex-bridge.js.map +0 -1
- package/dist/providers/openai-codex/prompts/system-prompt.d.ts +0 -10
- package/dist/providers/openai-codex/prompts/system-prompt.d.ts.map +0 -1
- package/dist/providers/openai-codex/prompts/system-prompt.js +0 -15
- package/dist/providers/openai-codex/prompts/system-prompt.js.map +0 -1
- package/dist/providers/openai-codex/request-transformer.d.ts +0 -44
- package/dist/providers/openai-codex/request-transformer.d.ts.map +0 -1
- package/dist/providers/openai-codex/request-transformer.js +0 -99
- package/dist/providers/openai-codex/request-transformer.js.map +0 -1
- package/dist/providers/openai-codex/response-handler.d.ts +0 -19
- package/dist/providers/openai-codex/response-handler.d.ts.map +0 -1
- package/dist/providers/openai-codex/response-handler.js +0 -107
- package/dist/providers/openai-codex/response-handler.js.map +0 -1
|
@@ -1,17 +1,43 @@
|
|
|
1
1
|
import os from "node:os";
|
|
2
|
+
import { PI_STATIC_INSTRUCTIONS } from "../constants.js";
|
|
2
3
|
import { calculateCost } from "../models.js";
|
|
3
4
|
import { getEnvApiKey } from "../stream.js";
|
|
4
5
|
import { AssistantMessageEventStream } from "../utils/event-stream.js";
|
|
5
6
|
import { parseStreamingJson } from "../utils/json-parse.js";
|
|
6
7
|
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
|
|
7
|
-
import { CODEX_BASE_URL, JWT_CLAIM_PATH, OPENAI_HEADER_VALUES, OPENAI_HEADERS, URL_PATHS, } from "./openai-codex/constants.js";
|
|
8
|
-
import { getCodexInstructions } from "./openai-codex/prompts/codex.js";
|
|
9
|
-
import { buildCodexPiBridge } from "./openai-codex/prompts/pi-codex-bridge.js";
|
|
10
|
-
import { buildCodexSystemPrompt } from "./openai-codex/prompts/system-prompt.js";
|
|
11
|
-
import { transformRequestBody, } from "./openai-codex/request-transformer.js";
|
|
12
|
-
import { parseCodexError, parseCodexSseStream } from "./openai-codex/response-handler.js";
|
|
13
8
|
import { transformMessages } from "./transform-messages.js";
|
|
14
|
-
|
|
9
|
+
// ============================================================================
|
|
10
|
+
// Configuration
|
|
11
|
+
// ============================================================================
|
|
12
|
+
const CODEX_URL = "https://chatgpt.com/backend-api/codex/responses";
|
|
13
|
+
const JWT_CLAIM_PATH = "https://api.openai.com/auth";
|
|
14
|
+
const MAX_RETRIES = 3;
|
|
15
|
+
const BASE_DELAY_MS = 1000;
|
|
16
|
+
// ============================================================================
|
|
17
|
+
// Retry Helpers
|
|
18
|
+
// ============================================================================
|
|
19
|
+
function isRetryableError(status, errorText) {
|
|
20
|
+
if (status === 429 || status === 500 || status === 502 || status === 503 || status === 504) {
|
|
21
|
+
return true;
|
|
22
|
+
}
|
|
23
|
+
return /rate.?limit|overloaded|service.?unavailable|upstream.?connect|connection.?refused/i.test(errorText);
|
|
24
|
+
}
|
|
25
|
+
function sleep(ms, signal) {
|
|
26
|
+
return new Promise((resolve, reject) => {
|
|
27
|
+
if (signal?.aborted) {
|
|
28
|
+
reject(new Error("Request was aborted"));
|
|
29
|
+
return;
|
|
30
|
+
}
|
|
31
|
+
const timeout = setTimeout(resolve, ms);
|
|
32
|
+
signal?.addEventListener("abort", () => {
|
|
33
|
+
clearTimeout(timeout);
|
|
34
|
+
reject(new Error("Request was aborted"));
|
|
35
|
+
});
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
// ============================================================================
|
|
39
|
+
// Main Stream Function
|
|
40
|
+
// ============================================================================
|
|
15
41
|
export const streamOpenAICodexResponses = (model, context, options) => {
|
|
16
42
|
const stream = new AssistantMessageEventStream();
|
|
17
43
|
(async () => {
|
|
@@ -37,492 +63,422 @@ export const streamOpenAICodexResponses = (model, context, options) => {
|
|
|
37
63
|
if (!apiKey) {
|
|
38
64
|
throw new Error(`No API key for provider: ${model.provider}`);
|
|
39
65
|
}
|
|
40
|
-
const accountId =
|
|
41
|
-
const
|
|
42
|
-
const
|
|
43
|
-
const
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
};
|
|
51
|
-
if (options?.maxTokens) {
|
|
52
|
-
params.max_output_tokens = options.maxTokens;
|
|
53
|
-
}
|
|
54
|
-
if (options?.temperature !== undefined) {
|
|
55
|
-
params.temperature = options.temperature;
|
|
56
|
-
}
|
|
57
|
-
if (context.tools) {
|
|
58
|
-
params.tools = convertTools(context.tools);
|
|
59
|
-
}
|
|
60
|
-
const codexInstructions = getCodexInstructions();
|
|
61
|
-
const bridgeText = buildCodexPiBridge(context.tools);
|
|
62
|
-
const systemPrompt = buildCodexSystemPrompt({
|
|
63
|
-
codexInstructions,
|
|
64
|
-
bridgeText,
|
|
65
|
-
userSystemPrompt: context.systemPrompt,
|
|
66
|
-
});
|
|
67
|
-
params.instructions = systemPrompt.instructions;
|
|
68
|
-
const codexOptions = {
|
|
69
|
-
reasoningEffort: options?.reasoningEffort,
|
|
70
|
-
reasoningSummary: options?.reasoningSummary ?? undefined,
|
|
71
|
-
textVerbosity: options?.textVerbosity,
|
|
72
|
-
include: options?.include,
|
|
73
|
-
};
|
|
74
|
-
const transformedBody = await transformRequestBody(params, codexOptions, systemPrompt);
|
|
75
|
-
const reasoningEffort = transformedBody.reasoning?.effort ?? null;
|
|
76
|
-
const headers = createCodexHeaders(model.headers, accountId, apiKey, options?.sessionId);
|
|
77
|
-
logCodexDebug("codex request", {
|
|
78
|
-
url,
|
|
79
|
-
model: params.model,
|
|
80
|
-
reasoningEffort,
|
|
81
|
-
headers: redactHeaders(headers),
|
|
82
|
-
});
|
|
83
|
-
const response = await fetch(url, {
|
|
84
|
-
method: "POST",
|
|
85
|
-
headers,
|
|
86
|
-
body: JSON.stringify(transformedBody),
|
|
87
|
-
signal: options?.signal,
|
|
88
|
-
});
|
|
89
|
-
logCodexDebug("codex response", {
|
|
90
|
-
url: response.url,
|
|
91
|
-
status: response.status,
|
|
92
|
-
statusText: response.statusText,
|
|
93
|
-
contentType: response.headers.get("content-type") || null,
|
|
94
|
-
cfRay: response.headers.get("cf-ray") || null,
|
|
95
|
-
});
|
|
96
|
-
if (!response.ok) {
|
|
97
|
-
const info = await parseCodexError(response);
|
|
98
|
-
throw new Error(info.friendlyMessage || info.message);
|
|
99
|
-
}
|
|
100
|
-
if (!response.body) {
|
|
101
|
-
throw new Error("No response body");
|
|
102
|
-
}
|
|
103
|
-
stream.push({ type: "start", partial: output });
|
|
104
|
-
let currentItem = null;
|
|
105
|
-
let currentBlock = null;
|
|
106
|
-
const blocks = output.content;
|
|
107
|
-
const blockIndex = () => blocks.length - 1;
|
|
108
|
-
for await (const rawEvent of parseCodexSseStream(response)) {
|
|
109
|
-
const eventType = typeof rawEvent.type === "string" ? rawEvent.type : "";
|
|
110
|
-
if (!eventType)
|
|
111
|
-
continue;
|
|
112
|
-
if (eventType === "response.output_item.added") {
|
|
113
|
-
const item = rawEvent.item;
|
|
114
|
-
if (item.type === "reasoning") {
|
|
115
|
-
currentItem = item;
|
|
116
|
-
currentBlock = { type: "thinking", thinking: "" };
|
|
117
|
-
output.content.push(currentBlock);
|
|
118
|
-
stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
|
|
119
|
-
}
|
|
120
|
-
else if (item.type === "message") {
|
|
121
|
-
currentItem = item;
|
|
122
|
-
currentBlock = { type: "text", text: "" };
|
|
123
|
-
output.content.push(currentBlock);
|
|
124
|
-
stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
|
|
125
|
-
}
|
|
126
|
-
else if (item.type === "function_call") {
|
|
127
|
-
currentItem = item;
|
|
128
|
-
currentBlock = {
|
|
129
|
-
type: "toolCall",
|
|
130
|
-
id: `${item.call_id}|${item.id}`,
|
|
131
|
-
name: item.name,
|
|
132
|
-
arguments: {},
|
|
133
|
-
partialJson: item.arguments || "",
|
|
134
|
-
};
|
|
135
|
-
output.content.push(currentBlock);
|
|
136
|
-
stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
|
|
137
|
-
}
|
|
138
|
-
}
|
|
139
|
-
else if (eventType === "response.reasoning_summary_part.added") {
|
|
140
|
-
if (currentItem && currentItem.type === "reasoning") {
|
|
141
|
-
currentItem.summary = currentItem.summary || [];
|
|
142
|
-
currentItem.summary.push(rawEvent.part);
|
|
143
|
-
}
|
|
144
|
-
}
|
|
145
|
-
else if (eventType === "response.reasoning_summary_text.delta") {
|
|
146
|
-
if (currentItem && currentItem.type === "reasoning" && currentBlock?.type === "thinking") {
|
|
147
|
-
currentItem.summary = currentItem.summary || [];
|
|
148
|
-
const lastPart = currentItem.summary[currentItem.summary.length - 1];
|
|
149
|
-
if (lastPart) {
|
|
150
|
-
const delta = rawEvent.delta || "";
|
|
151
|
-
currentBlock.thinking += delta;
|
|
152
|
-
lastPart.text += delta;
|
|
153
|
-
stream.push({
|
|
154
|
-
type: "thinking_delta",
|
|
155
|
-
contentIndex: blockIndex(),
|
|
156
|
-
delta,
|
|
157
|
-
partial: output,
|
|
158
|
-
});
|
|
159
|
-
}
|
|
160
|
-
}
|
|
161
|
-
}
|
|
162
|
-
else if (eventType === "response.reasoning_summary_part.done") {
|
|
163
|
-
if (currentItem && currentItem.type === "reasoning" && currentBlock?.type === "thinking") {
|
|
164
|
-
currentItem.summary = currentItem.summary || [];
|
|
165
|
-
const lastPart = currentItem.summary[currentItem.summary.length - 1];
|
|
166
|
-
if (lastPart) {
|
|
167
|
-
currentBlock.thinking += "\n\n";
|
|
168
|
-
lastPart.text += "\n\n";
|
|
169
|
-
stream.push({
|
|
170
|
-
type: "thinking_delta",
|
|
171
|
-
contentIndex: blockIndex(),
|
|
172
|
-
delta: "\n\n",
|
|
173
|
-
partial: output,
|
|
174
|
-
});
|
|
175
|
-
}
|
|
176
|
-
}
|
|
66
|
+
const accountId = extractAccountId(apiKey);
|
|
67
|
+
const body = buildRequestBody(model, context, options);
|
|
68
|
+
const headers = buildHeaders(model.headers, accountId, apiKey, options?.sessionId);
|
|
69
|
+
const bodyJson = JSON.stringify(body);
|
|
70
|
+
// Fetch with retry logic for rate limits and transient errors
|
|
71
|
+
let response;
|
|
72
|
+
let lastError;
|
|
73
|
+
for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) {
|
|
74
|
+
if (options?.signal?.aborted) {
|
|
75
|
+
throw new Error("Request was aborted");
|
|
177
76
|
}
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
77
|
+
try {
|
|
78
|
+
response = await fetch(CODEX_URL, {
|
|
79
|
+
method: "POST",
|
|
80
|
+
headers,
|
|
81
|
+
body: bodyJson,
|
|
82
|
+
signal: options?.signal,
|
|
83
|
+
});
|
|
84
|
+
if (response.ok) {
|
|
85
|
+
break;
|
|
185
86
|
}
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
const delta = rawEvent.delta || "";
|
|
192
|
-
currentBlock.text += delta;
|
|
193
|
-
lastPart.text += delta;
|
|
194
|
-
stream.push({
|
|
195
|
-
type: "text_delta",
|
|
196
|
-
contentIndex: blockIndex(),
|
|
197
|
-
delta,
|
|
198
|
-
partial: output,
|
|
199
|
-
});
|
|
200
|
-
}
|
|
87
|
+
const errorText = await response.text();
|
|
88
|
+
if (attempt < MAX_RETRIES && isRetryableError(response.status, errorText)) {
|
|
89
|
+
const delayMs = BASE_DELAY_MS * 2 ** attempt;
|
|
90
|
+
await sleep(delayMs, options?.signal);
|
|
91
|
+
continue;
|
|
201
92
|
}
|
|
93
|
+
// Parse error for friendly message on final attempt or non-retryable error
|
|
94
|
+
const fakeResponse = new Response(errorText, {
|
|
95
|
+
status: response.status,
|
|
96
|
+
statusText: response.statusText,
|
|
97
|
+
});
|
|
98
|
+
const info = await parseErrorResponse(fakeResponse);
|
|
99
|
+
throw new Error(info.friendlyMessage || info.message);
|
|
202
100
|
}
|
|
203
|
-
|
|
204
|
-
if (
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
const delta = rawEvent.delta || "";
|
|
208
|
-
currentBlock.text += delta;
|
|
209
|
-
lastPart.refusal += delta;
|
|
210
|
-
stream.push({
|
|
211
|
-
type: "text_delta",
|
|
212
|
-
contentIndex: blockIndex(),
|
|
213
|
-
delta,
|
|
214
|
-
partial: output,
|
|
215
|
-
});
|
|
101
|
+
catch (error) {
|
|
102
|
+
if (error instanceof Error) {
|
|
103
|
+
if (error.name === "AbortError" || error.message === "Request was aborted") {
|
|
104
|
+
throw new Error("Request was aborted");
|
|
216
105
|
}
|
|
217
106
|
}
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
if (
|
|
221
|
-
const
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
stream.push({
|
|
225
|
-
type: "toolcall_delta",
|
|
226
|
-
contentIndex: blockIndex(),
|
|
227
|
-
delta,
|
|
228
|
-
partial: output,
|
|
229
|
-
});
|
|
107
|
+
lastError = error instanceof Error ? error : new Error(String(error));
|
|
108
|
+
// Network errors are retryable
|
|
109
|
+
if (attempt < MAX_RETRIES && !lastError.message.includes("usage limit")) {
|
|
110
|
+
const delayMs = BASE_DELAY_MS * 2 ** attempt;
|
|
111
|
+
await sleep(delayMs, options?.signal);
|
|
112
|
+
continue;
|
|
230
113
|
}
|
|
114
|
+
throw lastError;
|
|
231
115
|
}
|
|
232
|
-
else if (eventType === "response.output_item.done") {
|
|
233
|
-
const item = rawEvent.item;
|
|
234
|
-
if (item.type === "reasoning" && currentBlock?.type === "thinking") {
|
|
235
|
-
currentBlock.thinking = item.summary?.map((s) => s.text).join("\n\n") || "";
|
|
236
|
-
currentBlock.thinkingSignature = JSON.stringify(item);
|
|
237
|
-
stream.push({
|
|
238
|
-
type: "thinking_end",
|
|
239
|
-
contentIndex: blockIndex(),
|
|
240
|
-
content: currentBlock.thinking,
|
|
241
|
-
partial: output,
|
|
242
|
-
});
|
|
243
|
-
currentBlock = null;
|
|
244
|
-
}
|
|
245
|
-
else if (item.type === "message" && currentBlock?.type === "text") {
|
|
246
|
-
currentBlock.text = item.content.map((c) => (c.type === "output_text" ? c.text : c.refusal)).join("");
|
|
247
|
-
currentBlock.textSignature = item.id;
|
|
248
|
-
stream.push({
|
|
249
|
-
type: "text_end",
|
|
250
|
-
contentIndex: blockIndex(),
|
|
251
|
-
content: currentBlock.text,
|
|
252
|
-
partial: output,
|
|
253
|
-
});
|
|
254
|
-
currentBlock = null;
|
|
255
|
-
}
|
|
256
|
-
else if (item.type === "function_call") {
|
|
257
|
-
const toolCall = {
|
|
258
|
-
type: "toolCall",
|
|
259
|
-
id: `${item.call_id}|${item.id}`,
|
|
260
|
-
name: item.name,
|
|
261
|
-
arguments: JSON.parse(item.arguments),
|
|
262
|
-
};
|
|
263
|
-
stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
|
|
264
|
-
}
|
|
265
|
-
}
|
|
266
|
-
else if (eventType === "response.completed" || eventType === "response.done") {
|
|
267
|
-
const response = rawEvent.response;
|
|
268
|
-
if (response?.usage) {
|
|
269
|
-
const cachedTokens = response.usage.input_tokens_details?.cached_tokens || 0;
|
|
270
|
-
output.usage = {
|
|
271
|
-
input: (response.usage.input_tokens || 0) - cachedTokens,
|
|
272
|
-
output: response.usage.output_tokens || 0,
|
|
273
|
-
cacheRead: cachedTokens,
|
|
274
|
-
cacheWrite: 0,
|
|
275
|
-
totalTokens: response.usage.total_tokens || 0,
|
|
276
|
-
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
|
277
|
-
};
|
|
278
|
-
}
|
|
279
|
-
calculateCost(model, output.usage);
|
|
280
|
-
output.stopReason = mapStopReason(response?.status);
|
|
281
|
-
if (output.content.some((b) => b.type === "toolCall") && output.stopReason === "stop") {
|
|
282
|
-
output.stopReason = "toolUse";
|
|
283
|
-
}
|
|
284
|
-
}
|
|
285
|
-
else if (eventType === "error") {
|
|
286
|
-
const code = rawEvent.code || "";
|
|
287
|
-
const message = rawEvent.message || "";
|
|
288
|
-
throw new Error(formatCodexErrorEvent(rawEvent, code, message));
|
|
289
|
-
}
|
|
290
|
-
else if (eventType === "response.failed") {
|
|
291
|
-
throw new Error(formatCodexFailure(rawEvent) ?? "Codex response failed");
|
|
292
|
-
}
|
|
293
116
|
}
|
|
117
|
+
if (!response?.ok) {
|
|
118
|
+
throw lastError ?? new Error("Failed after retries");
|
|
119
|
+
}
|
|
120
|
+
if (!response.body) {
|
|
121
|
+
throw new Error("No response body");
|
|
122
|
+
}
|
|
123
|
+
stream.push({ type: "start", partial: output });
|
|
124
|
+
await processStream(response, output, stream, model);
|
|
294
125
|
if (options?.signal?.aborted) {
|
|
295
126
|
throw new Error("Request was aborted");
|
|
296
127
|
}
|
|
297
|
-
if (output.stopReason === "aborted" || output.stopReason === "error") {
|
|
298
|
-
throw new Error("Codex response failed");
|
|
299
|
-
}
|
|
300
128
|
stream.push({ type: "done", reason: output.stopReason, message: output });
|
|
301
129
|
stream.end();
|
|
302
130
|
}
|
|
303
131
|
catch (error) {
|
|
304
|
-
for (const block of output.content)
|
|
305
|
-
delete block.index;
|
|
306
132
|
output.stopReason = options?.signal?.aborted ? "aborted" : "error";
|
|
307
|
-
output.errorMessage = error instanceof Error ? error.message :
|
|
133
|
+
output.errorMessage = error instanceof Error ? error.message : String(error);
|
|
308
134
|
stream.push({ type: "error", reason: output.stopReason, error: output });
|
|
309
135
|
stream.end();
|
|
310
136
|
}
|
|
311
137
|
})();
|
|
312
138
|
return stream;
|
|
313
139
|
};
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
140
|
+
// ============================================================================
|
|
141
|
+
// Request Building
|
|
142
|
+
// ============================================================================
|
|
143
|
+
function buildRequestBody(model, context, options) {
|
|
144
|
+
const systemPrompt = buildSystemPrompt(context.systemPrompt);
|
|
145
|
+
const messages = convertMessages(model, context);
|
|
146
|
+
// Prepend developer messages
|
|
147
|
+
const developerMessages = systemPrompt.developerMessages.map((text) => ({
|
|
148
|
+
type: "message",
|
|
149
|
+
role: "developer",
|
|
150
|
+
content: [{ type: "input_text", text }],
|
|
151
|
+
}));
|
|
152
|
+
const body = {
|
|
153
|
+
model: model.id,
|
|
154
|
+
store: false,
|
|
155
|
+
stream: true,
|
|
156
|
+
instructions: systemPrompt.instructions,
|
|
157
|
+
input: [...developerMessages, ...messages],
|
|
158
|
+
text: { verbosity: options?.textVerbosity || "medium" },
|
|
159
|
+
include: ["reasoning.encrypted_content"],
|
|
160
|
+
prompt_cache_key: options?.sessionId,
|
|
161
|
+
tool_choice: "auto",
|
|
162
|
+
parallel_tool_calls: true,
|
|
163
|
+
};
|
|
164
|
+
if (options?.temperature !== undefined) {
|
|
165
|
+
body.temperature = options.temperature;
|
|
325
166
|
}
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
167
|
+
if (context.tools) {
|
|
168
|
+
body.tools = context.tools.map((tool) => ({
|
|
169
|
+
type: "function",
|
|
170
|
+
name: tool.name,
|
|
171
|
+
description: tool.description,
|
|
172
|
+
parameters: tool.parameters,
|
|
173
|
+
strict: null,
|
|
174
|
+
}));
|
|
329
175
|
}
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
if (!CODEX_DEBUG)
|
|
336
|
-
return;
|
|
337
|
-
if (details) {
|
|
338
|
-
console.error(`[codex] ${message}`, details);
|
|
339
|
-
return;
|
|
176
|
+
if (options?.reasoningEffort !== undefined) {
|
|
177
|
+
body.reasoning = {
|
|
178
|
+
effort: clampReasoningEffort(model.id, options.reasoningEffort),
|
|
179
|
+
summary: options.reasoningSummary ?? "auto",
|
|
180
|
+
};
|
|
340
181
|
}
|
|
341
|
-
|
|
182
|
+
return body;
|
|
342
183
|
}
|
|
343
|
-
function
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
lower.includes("session") ||
|
|
353
|
-
lower.includes("conversation") ||
|
|
354
|
-
lower === "cookie") {
|
|
355
|
-
redacted[key] = "[redacted]";
|
|
356
|
-
continue;
|
|
184
|
+
function buildSystemPrompt(userSystemPrompt) {
|
|
185
|
+
// PI_STATIC_INSTRUCTIONS is whitelisted and must be in the instructions field.
|
|
186
|
+
// User's system prompt goes in developer messages, with the static prefix stripped.
|
|
187
|
+
const staticPrefix = PI_STATIC_INSTRUCTIONS.trim();
|
|
188
|
+
const developerMessages = [];
|
|
189
|
+
if (userSystemPrompt?.trim()) {
|
|
190
|
+
let dynamicPart = userSystemPrompt.trim();
|
|
191
|
+
if (dynamicPart.startsWith(staticPrefix)) {
|
|
192
|
+
dynamicPart = dynamicPart.slice(staticPrefix.length).trim();
|
|
357
193
|
}
|
|
358
|
-
|
|
194
|
+
if (dynamicPart)
|
|
195
|
+
developerMessages.push(dynamicPart);
|
|
359
196
|
}
|
|
360
|
-
return
|
|
197
|
+
return { instructions: staticPrefix, developerMessages };
|
|
361
198
|
}
|
|
362
|
-
function
|
|
363
|
-
|
|
199
|
+
function clampReasoningEffort(modelId, effort) {
|
|
200
|
+
const id = modelId.includes("/") ? modelId.split("/").pop() : modelId;
|
|
201
|
+
if (id.startsWith("gpt-5.2") && effort === "minimal")
|
|
202
|
+
return "low";
|
|
203
|
+
if (id === "gpt-5.1" && effort === "xhigh")
|
|
204
|
+
return "high";
|
|
205
|
+
if (id === "gpt-5.1-codex-mini")
|
|
206
|
+
return effort === "high" || effort === "xhigh" ? "high" : "medium";
|
|
207
|
+
return effort;
|
|
364
208
|
}
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
209
|
+
// ============================================================================
|
|
210
|
+
// Message Conversion
|
|
211
|
+
// ============================================================================
|
|
212
|
+
function convertMessages(model, context) {
|
|
213
|
+
const messages = [];
|
|
214
|
+
const transformed = transformMessages(context.messages, model);
|
|
215
|
+
for (const msg of transformed) {
|
|
216
|
+
if (msg.role === "user") {
|
|
217
|
+
messages.push(convertUserMessage(msg, model));
|
|
218
|
+
}
|
|
219
|
+
else if (msg.role === "assistant") {
|
|
220
|
+
messages.push(...convertAssistantMessage(msg));
|
|
221
|
+
}
|
|
222
|
+
else if (msg.role === "toolResult") {
|
|
223
|
+
messages.push(...convertToolResult(msg, model));
|
|
224
|
+
}
|
|
373
225
|
}
|
|
374
|
-
|
|
375
|
-
|
|
226
|
+
return messages.filter(Boolean);
|
|
227
|
+
}
|
|
228
|
+
function convertUserMessage(msg, model) {
|
|
229
|
+
if (typeof msg.content === "string") {
|
|
230
|
+
return {
|
|
231
|
+
role: "user",
|
|
232
|
+
content: [{ type: "input_text", text: sanitizeSurrogates(msg.content) }],
|
|
233
|
+
};
|
|
376
234
|
}
|
|
235
|
+
const content = msg.content.map((item) => {
|
|
236
|
+
if (item.type === "text") {
|
|
237
|
+
return { type: "input_text", text: sanitizeSurrogates(item.text || "") };
|
|
238
|
+
}
|
|
239
|
+
return {
|
|
240
|
+
type: "input_image",
|
|
241
|
+
detail: "auto",
|
|
242
|
+
image_url: `data:${item.mimeType};base64,${item.data}`,
|
|
243
|
+
};
|
|
244
|
+
});
|
|
245
|
+
const filtered = model.input.includes("image") ? content : content.filter((c) => c.type !== "input_image");
|
|
246
|
+
return filtered.length > 0 ? { role: "user", content: filtered } : null;
|
|
377
247
|
}
|
|
378
|
-
function
|
|
379
|
-
const
|
|
380
|
-
const
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
248
|
+
function convertAssistantMessage(msg) {
|
|
249
|
+
const output = [];
|
|
250
|
+
for (const block of msg.content) {
|
|
251
|
+
if (block.type === "thinking" && msg.stopReason !== "error" && block.thinkingSignature) {
|
|
252
|
+
output.push(JSON.parse(block.thinkingSignature));
|
|
253
|
+
}
|
|
254
|
+
else if (block.type === "text") {
|
|
255
|
+
output.push({
|
|
256
|
+
type: "message",
|
|
257
|
+
role: "assistant",
|
|
258
|
+
content: [{ type: "output_text", text: sanitizeSurrogates(block.text), annotations: [] }],
|
|
259
|
+
status: "completed",
|
|
260
|
+
});
|
|
261
|
+
}
|
|
262
|
+
else if (block.type === "toolCall" && msg.stopReason !== "error") {
|
|
263
|
+
const [callId, id] = block.id.split("|");
|
|
264
|
+
output.push({
|
|
265
|
+
type: "function_call",
|
|
266
|
+
id,
|
|
267
|
+
call_id: callId,
|
|
268
|
+
name: block.name,
|
|
269
|
+
arguments: JSON.stringify(block.arguments),
|
|
270
|
+
});
|
|
271
|
+
}
|
|
384
272
|
}
|
|
385
|
-
return
|
|
273
|
+
return output;
|
|
386
274
|
}
|
|
387
|
-
function
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
275
|
+
function convertToolResult(msg, model) {
|
|
276
|
+
const output = [];
|
|
277
|
+
const textResult = msg.content
|
|
278
|
+
.filter((c) => c.type === "text")
|
|
279
|
+
.map((c) => c.text || "")
|
|
280
|
+
.join("\n");
|
|
281
|
+
const hasImages = msg.content.some((c) => c.type === "image");
|
|
282
|
+
output.push({
|
|
283
|
+
type: "function_call_output",
|
|
284
|
+
call_id: msg.toolCallId.split("|")[0],
|
|
285
|
+
output: sanitizeSurrogates(textResult || "(see attached image)"),
|
|
286
|
+
});
|
|
287
|
+
if (hasImages && model.input.includes("image")) {
|
|
288
|
+
const imageParts = msg.content
|
|
289
|
+
.filter((c) => c.type === "image")
|
|
290
|
+
.map((c) => ({
|
|
291
|
+
type: "input_image",
|
|
292
|
+
detail: "auto",
|
|
293
|
+
image_url: `data:${c.mimeType};base64,${c.data}`,
|
|
294
|
+
}));
|
|
295
|
+
output.push({
|
|
296
|
+
role: "user",
|
|
297
|
+
content: [{ type: "input_text", text: "Attached image(s) from tool result:" }, ...imageParts],
|
|
298
|
+
});
|
|
394
299
|
}
|
|
395
|
-
|
|
396
|
-
h2 = Math.imul(h2 ^ (h2 >>> 16), 2246822507) ^ Math.imul(h1 ^ (h1 >>> 13), 3266489909);
|
|
397
|
-
return (h2 >>> 0).toString(36) + (h1 >>> 0).toString(36);
|
|
300
|
+
return output;
|
|
398
301
|
}
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
302
|
+
// ============================================================================
|
|
303
|
+
// Response Processing
|
|
304
|
+
// ============================================================================
|
|
305
|
+
async function processStream(response, output, stream, model) {
|
|
306
|
+
let currentItem = null;
|
|
307
|
+
let currentBlock = null;
|
|
308
|
+
const blockIndex = () => output.content.length - 1;
|
|
309
|
+
for await (const event of parseSSE(response)) {
|
|
310
|
+
const type = event.type;
|
|
311
|
+
switch (type) {
|
|
312
|
+
case "response.output_item.added": {
|
|
313
|
+
const item = event.item;
|
|
314
|
+
if (item.type === "reasoning") {
|
|
315
|
+
currentItem = item;
|
|
316
|
+
currentBlock = { type: "thinking", thinking: "" };
|
|
317
|
+
output.content.push(currentBlock);
|
|
318
|
+
stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
|
|
319
|
+
}
|
|
320
|
+
else if (item.type === "message") {
|
|
321
|
+
currentItem = item;
|
|
322
|
+
currentBlock = { type: "text", text: "" };
|
|
323
|
+
output.content.push(currentBlock);
|
|
324
|
+
stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
|
|
325
|
+
}
|
|
326
|
+
else if (item.type === "function_call") {
|
|
327
|
+
currentItem = item;
|
|
328
|
+
currentBlock = {
|
|
329
|
+
type: "toolCall",
|
|
330
|
+
id: `${item.call_id}|${item.id}`,
|
|
331
|
+
name: item.name,
|
|
332
|
+
arguments: {},
|
|
333
|
+
partialJson: item.arguments || "",
|
|
334
|
+
};
|
|
335
|
+
output.content.push(currentBlock);
|
|
336
|
+
stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output });
|
|
337
|
+
}
|
|
338
|
+
break;
|
|
339
|
+
}
|
|
340
|
+
case "response.reasoning_summary_part.added": {
|
|
341
|
+
if (currentItem?.type === "reasoning") {
|
|
342
|
+
currentItem.summary = currentItem.summary || [];
|
|
343
|
+
currentItem.summary.push(event.part);
|
|
344
|
+
}
|
|
345
|
+
break;
|
|
410
346
|
}
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
347
|
+
case "response.reasoning_summary_text.delta": {
|
|
348
|
+
if (currentItem?.type === "reasoning" && currentBlock?.type === "thinking") {
|
|
349
|
+
const delta = event.delta || "";
|
|
350
|
+
const lastPart = currentItem.summary?.[currentItem.summary.length - 1];
|
|
351
|
+
if (lastPart) {
|
|
352
|
+
currentBlock.thinking += delta;
|
|
353
|
+
lastPart.text += delta;
|
|
354
|
+
stream.push({ type: "thinking_delta", contentIndex: blockIndex(), delta, partial: output });
|
|
418
355
|
}
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
detail: "auto",
|
|
422
|
-
image_url: `data:${item.mimeType};base64,${item.data}`,
|
|
423
|
-
};
|
|
424
|
-
});
|
|
425
|
-
const filteredContent = !model.input.includes("image")
|
|
426
|
-
? content.filter((c) => c.type !== "input_image")
|
|
427
|
-
: content;
|
|
428
|
-
if (filteredContent.length === 0)
|
|
429
|
-
continue;
|
|
430
|
-
messages.push({
|
|
431
|
-
role: "user",
|
|
432
|
-
content: filteredContent,
|
|
433
|
-
});
|
|
356
|
+
}
|
|
357
|
+
break;
|
|
434
358
|
}
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
output.push(reasoningItem);
|
|
359
|
+
case "response.reasoning_summary_part.done": {
|
|
360
|
+
if (currentItem?.type === "reasoning" && currentBlock?.type === "thinking") {
|
|
361
|
+
const lastPart = currentItem.summary?.[currentItem.summary.length - 1];
|
|
362
|
+
if (lastPart) {
|
|
363
|
+
currentBlock.thinking += "\n\n";
|
|
364
|
+
lastPart.text += "\n\n";
|
|
365
|
+
stream.push({ type: "thinking_delta", contentIndex: blockIndex(), delta: "\n\n", partial: output });
|
|
443
366
|
}
|
|
444
367
|
}
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
368
|
+
break;
|
|
369
|
+
}
|
|
370
|
+
case "response.content_part.added": {
|
|
371
|
+
if (currentItem?.type === "message") {
|
|
372
|
+
currentItem.content = currentItem.content || [];
|
|
373
|
+
const part = event.part;
|
|
374
|
+
if (part && (part.type === "output_text" || part.type === "refusal")) {
|
|
375
|
+
currentItem.content.push(part);
|
|
376
|
+
}
|
|
377
|
+
}
|
|
378
|
+
break;
|
|
379
|
+
}
|
|
380
|
+
case "response.output_text.delta": {
|
|
381
|
+
if (currentItem?.type === "message" && currentBlock?.type === "text") {
|
|
382
|
+
const lastPart = currentItem.content[currentItem.content.length - 1];
|
|
383
|
+
if (lastPart?.type === "output_text") {
|
|
384
|
+
const delta = event.delta || "";
|
|
385
|
+
currentBlock.text += delta;
|
|
386
|
+
lastPart.text += delta;
|
|
387
|
+
stream.push({ type: "text_delta", contentIndex: blockIndex(), delta, partial: output });
|
|
450
388
|
}
|
|
451
|
-
|
|
452
|
-
|
|
389
|
+
}
|
|
390
|
+
break;
|
|
391
|
+
}
|
|
392
|
+
case "response.refusal.delta": {
|
|
393
|
+
if (currentItem?.type === "message" && currentBlock?.type === "text") {
|
|
394
|
+
const lastPart = currentItem.content[currentItem.content.length - 1];
|
|
395
|
+
if (lastPart?.type === "refusal") {
|
|
396
|
+
const delta = event.delta || "";
|
|
397
|
+
currentBlock.text += delta;
|
|
398
|
+
lastPart.refusal += delta;
|
|
399
|
+
stream.push({ type: "text_delta", contentIndex: blockIndex(), delta, partial: output });
|
|
453
400
|
}
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
401
|
+
}
|
|
402
|
+
break;
|
|
403
|
+
}
|
|
404
|
+
case "response.function_call_arguments.delta": {
|
|
405
|
+
if (currentItem?.type === "function_call" && currentBlock?.type === "toolCall") {
|
|
406
|
+
const delta = event.delta || "";
|
|
407
|
+
currentBlock.partialJson += delta;
|
|
408
|
+
currentBlock.arguments = parseStreamingJson(currentBlock.partialJson);
|
|
409
|
+
stream.push({ type: "toolcall_delta", contentIndex: blockIndex(), delta, partial: output });
|
|
410
|
+
}
|
|
411
|
+
break;
|
|
412
|
+
}
|
|
413
|
+
case "response.output_item.done": {
|
|
414
|
+
const item = event.item;
|
|
415
|
+
if (item.type === "reasoning" && currentBlock?.type === "thinking") {
|
|
416
|
+
currentBlock.thinking = item.summary?.map((s) => s.text).join("\n\n") || "";
|
|
417
|
+
currentBlock.thinkingSignature = JSON.stringify(item);
|
|
418
|
+
stream.push({
|
|
419
|
+
type: "thinking_end",
|
|
420
|
+
contentIndex: blockIndex(),
|
|
421
|
+
content: currentBlock.thinking,
|
|
422
|
+
partial: output,
|
|
460
423
|
});
|
|
424
|
+
currentBlock = null;
|
|
461
425
|
}
|
|
462
|
-
else if (
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
426
|
+
else if (item.type === "message" && currentBlock?.type === "text") {
|
|
427
|
+
currentBlock.text = item.content.map((c) => (c.type === "output_text" ? c.text : c.refusal)).join("");
|
|
428
|
+
currentBlock.textSignature = item.id;
|
|
429
|
+
stream.push({
|
|
430
|
+
type: "text_end",
|
|
431
|
+
contentIndex: blockIndex(),
|
|
432
|
+
content: currentBlock.text,
|
|
433
|
+
partial: output,
|
|
470
434
|
});
|
|
435
|
+
currentBlock = null;
|
|
436
|
+
}
|
|
437
|
+
else if (item.type === "function_call") {
|
|
438
|
+
const toolCall = {
|
|
439
|
+
type: "toolCall",
|
|
440
|
+
id: `${item.call_id}|${item.id}`,
|
|
441
|
+
name: item.name,
|
|
442
|
+
arguments: JSON.parse(item.arguments),
|
|
443
|
+
};
|
|
444
|
+
stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output });
|
|
471
445
|
}
|
|
446
|
+
break;
|
|
472
447
|
}
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
output: sanitizeSurrogates(hasText ? textResult : "(see attached image)"),
|
|
488
|
-
});
|
|
489
|
-
if (hasImages && model.input.includes("image")) {
|
|
490
|
-
const contentParts = [];
|
|
491
|
-
contentParts.push({
|
|
492
|
-
type: "input_text",
|
|
493
|
-
text: "Attached image(s) from tool result:",
|
|
494
|
-
});
|
|
495
|
-
for (const block of msg.content) {
|
|
496
|
-
if (block.type === "image") {
|
|
497
|
-
contentParts.push({
|
|
498
|
-
type: "input_image",
|
|
499
|
-
detail: "auto",
|
|
500
|
-
image_url: `data:${block.mimeType};base64,${block.data}`,
|
|
501
|
-
});
|
|
502
|
-
}
|
|
448
|
+
case "response.completed":
|
|
449
|
+
case "response.done": {
|
|
450
|
+
const resp = event.response;
|
|
451
|
+
if (resp?.usage) {
|
|
452
|
+
const cached = resp.usage.input_tokens_details?.cached_tokens || 0;
|
|
453
|
+
output.usage = {
|
|
454
|
+
input: (resp.usage.input_tokens || 0) - cached,
|
|
455
|
+
output: resp.usage.output_tokens || 0,
|
|
456
|
+
cacheRead: cached,
|
|
457
|
+
cacheWrite: 0,
|
|
458
|
+
totalTokens: resp.usage.total_tokens || 0,
|
|
459
|
+
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
|
460
|
+
};
|
|
461
|
+
calculateCost(model, output.usage);
|
|
503
462
|
}
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
}
|
|
463
|
+
output.stopReason = mapStopReason(resp?.status);
|
|
464
|
+
if (output.content.some((b) => b.type === "toolCall") && output.stopReason === "stop") {
|
|
465
|
+
output.stopReason = "toolUse";
|
|
466
|
+
}
|
|
467
|
+
break;
|
|
468
|
+
}
|
|
469
|
+
case "error": {
|
|
470
|
+
const code = event.code || "";
|
|
471
|
+
const message = event.message || "";
|
|
472
|
+
throw new Error(`Codex error: ${message || code || JSON.stringify(event)}`);
|
|
473
|
+
}
|
|
474
|
+
case "response.failed": {
|
|
475
|
+
const msg = event.response?.error?.message;
|
|
476
|
+
throw new Error(msg || "Codex response failed");
|
|
508
477
|
}
|
|
509
478
|
}
|
|
510
|
-
msgIndex++;
|
|
511
479
|
}
|
|
512
|
-
return messages;
|
|
513
|
-
}
|
|
514
|
-
function convertTools(tools) {
|
|
515
|
-
return tools.map((tool) => ({
|
|
516
|
-
type: "function",
|
|
517
|
-
name: tool.name,
|
|
518
|
-
description: tool.description,
|
|
519
|
-
parameters: tool.parameters,
|
|
520
|
-
strict: null,
|
|
521
|
-
}));
|
|
522
480
|
}
|
|
523
481
|
function mapStopReason(status) {
|
|
524
|
-
if (!status)
|
|
525
|
-
return "stop";
|
|
526
482
|
switch (status) {
|
|
527
483
|
case "completed":
|
|
528
484
|
return "stop";
|
|
@@ -531,70 +487,101 @@ function mapStopReason(status) {
|
|
|
531
487
|
case "failed":
|
|
532
488
|
case "cancelled":
|
|
533
489
|
return "error";
|
|
534
|
-
case "in_progress":
|
|
535
|
-
case "queued":
|
|
536
|
-
return "stop";
|
|
537
490
|
default:
|
|
538
491
|
return "stop";
|
|
539
492
|
}
|
|
540
493
|
}
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
494
|
+
// ============================================================================
|
|
495
|
+
// SSE Parsing
|
|
496
|
+
// ============================================================================
|
|
497
|
+
async function* parseSSE(response) {
|
|
498
|
+
if (!response.body)
|
|
499
|
+
return;
|
|
500
|
+
const reader = response.body.getReader();
|
|
501
|
+
const decoder = new TextDecoder();
|
|
502
|
+
let buffer = "";
|
|
503
|
+
while (true) {
|
|
504
|
+
const { done, value } = await reader.read();
|
|
505
|
+
if (done)
|
|
506
|
+
break;
|
|
507
|
+
buffer += decoder.decode(value, { stream: true });
|
|
508
|
+
let idx = buffer.indexOf("\n\n");
|
|
509
|
+
while (idx !== -1) {
|
|
510
|
+
const chunk = buffer.slice(0, idx);
|
|
511
|
+
buffer = buffer.slice(idx + 2);
|
|
512
|
+
const dataLines = chunk
|
|
513
|
+
.split("\n")
|
|
514
|
+
.filter((l) => l.startsWith("data:"))
|
|
515
|
+
.map((l) => l.slice(5).trim());
|
|
516
|
+
if (dataLines.length > 0) {
|
|
517
|
+
const data = dataLines.join("\n").trim();
|
|
518
|
+
if (data && data !== "[DONE]") {
|
|
519
|
+
try {
|
|
520
|
+
yield JSON.parse(data);
|
|
521
|
+
}
|
|
522
|
+
catch { }
|
|
523
|
+
}
|
|
524
|
+
}
|
|
525
|
+
idx = buffer.indexOf("\n\n");
|
|
526
|
+
}
|
|
544
527
|
}
|
|
545
|
-
return null;
|
|
546
|
-
}
|
|
547
|
-
function getString(value) {
|
|
548
|
-
return typeof value === "string" ? value : undefined;
|
|
549
|
-
}
|
|
550
|
-
function truncate(text, limit) {
|
|
551
|
-
if (text.length <= limit)
|
|
552
|
-
return text;
|
|
553
|
-
return `${text.slice(0, limit)}...[truncated ${text.length - limit}]`;
|
|
554
528
|
}
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
const
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
if (code)
|
|
563
|
-
meta.push(`code=${code}`);
|
|
564
|
-
if (status)
|
|
565
|
-
meta.push(`status=${status}`);
|
|
566
|
-
if (message) {
|
|
567
|
-
const metaText = meta.length ? ` (${meta.join(", ")})` : "";
|
|
568
|
-
return `Codex response failed: ${message}${metaText}`;
|
|
569
|
-
}
|
|
570
|
-
if (meta.length) {
|
|
571
|
-
return `Codex response failed (${meta.join(", ")})`;
|
|
572
|
-
}
|
|
529
|
+
// ============================================================================
|
|
530
|
+
// Error Handling
|
|
531
|
+
// ============================================================================
|
|
532
|
+
async function parseErrorResponse(response) {
|
|
533
|
+
const raw = await response.text();
|
|
534
|
+
let message = raw || response.statusText || "Request failed";
|
|
535
|
+
let friendlyMessage;
|
|
573
536
|
try {
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
537
|
+
const parsed = JSON.parse(raw);
|
|
538
|
+
const err = parsed?.error;
|
|
539
|
+
if (err) {
|
|
540
|
+
const code = err.code || err.type || "";
|
|
541
|
+
if (/usage_limit_reached|usage_not_included|rate_limit_exceeded/i.test(code) || response.status === 429) {
|
|
542
|
+
const plan = err.plan_type ? ` (${err.plan_type.toLowerCase()} plan)` : "";
|
|
543
|
+
const mins = err.resets_at
|
|
544
|
+
? Math.max(0, Math.round((err.resets_at * 1000 - Date.now()) / 60000))
|
|
545
|
+
: undefined;
|
|
546
|
+
const when = mins !== undefined ? ` Try again in ~${mins} min.` : "";
|
|
547
|
+
friendlyMessage = `You have hit your ChatGPT usage limit${plan}.${when}`.trim();
|
|
548
|
+
}
|
|
549
|
+
message = err.message || friendlyMessage || message;
|
|
550
|
+
}
|
|
578
551
|
}
|
|
552
|
+
catch { }
|
|
553
|
+
return { message, friendlyMessage };
|
|
579
554
|
}
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
}
|
|
585
|
-
const meta = [];
|
|
586
|
-
if (code)
|
|
587
|
-
meta.push(`code=${code}`);
|
|
588
|
-
if (message)
|
|
589
|
-
meta.push(`message=${message}`);
|
|
590
|
-
if (meta.length > 0) {
|
|
591
|
-
return `Codex error event (${meta.join(", ")})`;
|
|
592
|
-
}
|
|
555
|
+
// ============================================================================
|
|
556
|
+
// Auth & Headers
|
|
557
|
+
// ============================================================================
|
|
558
|
+
function extractAccountId(token) {
|
|
593
559
|
try {
|
|
594
|
-
|
|
560
|
+
const parts = token.split(".");
|
|
561
|
+
if (parts.length !== 3)
|
|
562
|
+
throw new Error("Invalid token");
|
|
563
|
+
const payload = JSON.parse(Buffer.from(parts[1], "base64").toString("utf-8"));
|
|
564
|
+
const accountId = payload?.[JWT_CLAIM_PATH]?.chatgpt_account_id;
|
|
565
|
+
if (!accountId)
|
|
566
|
+
throw new Error("No account ID in token");
|
|
567
|
+
return accountId;
|
|
595
568
|
}
|
|
596
569
|
catch {
|
|
597
|
-
|
|
570
|
+
throw new Error("Failed to extract accountId from token");
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
function buildHeaders(initHeaders, accountId, token, sessionId) {
|
|
574
|
+
const headers = new Headers(initHeaders);
|
|
575
|
+
headers.set("Authorization", `Bearer ${token}`);
|
|
576
|
+
headers.set("chatgpt-account-id", accountId);
|
|
577
|
+
headers.set("OpenAI-Beta", "responses=experimental");
|
|
578
|
+
headers.set("originator", "pi");
|
|
579
|
+
headers.set("User-Agent", `pi (${os.platform()} ${os.release()}; ${os.arch()})`);
|
|
580
|
+
headers.set("accept", "text/event-stream");
|
|
581
|
+
headers.set("content-type", "application/json");
|
|
582
|
+
if (sessionId) {
|
|
583
|
+
headers.set("session_id", sessionId);
|
|
598
584
|
}
|
|
585
|
+
return headers;
|
|
599
586
|
}
|
|
600
587
|
//# sourceMappingURL=openai-codex-responses.js.map
|