@oh-my-pi/pi-ai 3.15.1 → 3.20.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/bun-imports.d.ts +14 -0
- package/src/cli.ts +16 -1
- package/src/index.ts +2 -0
- package/src/models.generated.ts +22 -73
- package/src/models.ts +16 -9
- package/src/providers/google-shared.ts +1 -1
- package/src/providers/google-vertex.ts +355 -0
- package/src/providers/openai-codex/constants.ts +25 -0
- package/src/providers/openai-codex/prompts/codex-instructions.md +105 -0
- package/src/providers/openai-codex/prompts/codex.ts +217 -0
- package/src/providers/openai-codex/prompts/pi-codex-bridge.ts +48 -0
- package/src/providers/openai-codex/request-transformer.ts +328 -0
- package/src/providers/openai-codex/response-handler.ts +133 -0
- package/src/providers/openai-codex-responses.ts +619 -0
- package/src/stream.ts +116 -7
- package/src/types.ts +9 -1
- package/src/utils/oauth/index.ts +14 -0
- package/src/utils/oauth/openai-codex.ts +334 -0
- package/src/utils/oauth/types.ts +7 -1
|
@@ -0,0 +1,328 @@
|
|
|
1
|
+
import { TOOL_REMAP_MESSAGE } from "./prompts/codex";
|
|
2
|
+
import { CODEX_PI_BRIDGE } from "./prompts/pi-codex-bridge";
|
|
3
|
+
|
|
4
|
+
export interface ReasoningConfig {
|
|
5
|
+
effort: "none" | "minimal" | "low" | "medium" | "high" | "xhigh";
|
|
6
|
+
summary: "auto" | "concise" | "detailed" | "off" | "on";
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
export interface CodexRequestOptions {
|
|
10
|
+
reasoningEffort?: ReasoningConfig["effort"];
|
|
11
|
+
reasoningSummary?: ReasoningConfig["summary"] | null;
|
|
12
|
+
textVerbosity?: "low" | "medium" | "high";
|
|
13
|
+
include?: string[];
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export interface InputItem {
|
|
17
|
+
id?: string | null;
|
|
18
|
+
type?: string | null;
|
|
19
|
+
role?: string;
|
|
20
|
+
content?: unknown;
|
|
21
|
+
call_id?: string | null;
|
|
22
|
+
name?: string;
|
|
23
|
+
output?: unknown;
|
|
24
|
+
arguments?: string;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
export interface RequestBody {
|
|
28
|
+
model: string;
|
|
29
|
+
store?: boolean;
|
|
30
|
+
stream?: boolean;
|
|
31
|
+
instructions?: string;
|
|
32
|
+
input?: InputItem[];
|
|
33
|
+
tools?: unknown;
|
|
34
|
+
temperature?: number;
|
|
35
|
+
reasoning?: Partial<ReasoningConfig>;
|
|
36
|
+
text?: {
|
|
37
|
+
verbosity?: "low" | "medium" | "high";
|
|
38
|
+
};
|
|
39
|
+
include?: string[];
|
|
40
|
+
prompt_cache_key?: string;
|
|
41
|
+
max_output_tokens?: number;
|
|
42
|
+
max_completion_tokens?: number;
|
|
43
|
+
[key: string]: unknown;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
const MODEL_MAP: Record<string, string> = {
|
|
47
|
+
"gpt-5.1-codex": "gpt-5.1-codex",
|
|
48
|
+
"gpt-5.1-codex-low": "gpt-5.1-codex",
|
|
49
|
+
"gpt-5.1-codex-medium": "gpt-5.1-codex",
|
|
50
|
+
"gpt-5.1-codex-high": "gpt-5.1-codex",
|
|
51
|
+
"gpt-5.1-codex-max": "gpt-5.1-codex-max",
|
|
52
|
+
"gpt-5.1-codex-max-low": "gpt-5.1-codex-max",
|
|
53
|
+
"gpt-5.1-codex-max-medium": "gpt-5.1-codex-max",
|
|
54
|
+
"gpt-5.1-codex-max-high": "gpt-5.1-codex-max",
|
|
55
|
+
"gpt-5.1-codex-max-xhigh": "gpt-5.1-codex-max",
|
|
56
|
+
"gpt-5.2": "gpt-5.2",
|
|
57
|
+
"gpt-5.2-none": "gpt-5.2",
|
|
58
|
+
"gpt-5.2-low": "gpt-5.2",
|
|
59
|
+
"gpt-5.2-medium": "gpt-5.2",
|
|
60
|
+
"gpt-5.2-high": "gpt-5.2",
|
|
61
|
+
"gpt-5.2-xhigh": "gpt-5.2",
|
|
62
|
+
"gpt-5.2-codex": "gpt-5.2-codex",
|
|
63
|
+
"gpt-5.2-codex-low": "gpt-5.2-codex",
|
|
64
|
+
"gpt-5.2-codex-medium": "gpt-5.2-codex",
|
|
65
|
+
"gpt-5.2-codex-high": "gpt-5.2-codex",
|
|
66
|
+
"gpt-5.2-codex-xhigh": "gpt-5.2-codex",
|
|
67
|
+
"gpt-5.1-codex-mini": "gpt-5.1-codex-mini",
|
|
68
|
+
"gpt-5.1-codex-mini-medium": "gpt-5.1-codex-mini",
|
|
69
|
+
"gpt-5.1-codex-mini-high": "gpt-5.1-codex-mini",
|
|
70
|
+
"gpt-5.1": "gpt-5.1",
|
|
71
|
+
"gpt-5.1-none": "gpt-5.1",
|
|
72
|
+
"gpt-5.1-low": "gpt-5.1",
|
|
73
|
+
"gpt-5.1-medium": "gpt-5.1",
|
|
74
|
+
"gpt-5.1-high": "gpt-5.1",
|
|
75
|
+
"gpt-5.1-chat-latest": "gpt-5.1",
|
|
76
|
+
"gpt-5-codex": "gpt-5.1-codex",
|
|
77
|
+
"codex-mini-latest": "gpt-5.1-codex-mini",
|
|
78
|
+
"gpt-5-codex-mini": "gpt-5.1-codex-mini",
|
|
79
|
+
"gpt-5-codex-mini-medium": "gpt-5.1-codex-mini",
|
|
80
|
+
"gpt-5-codex-mini-high": "gpt-5.1-codex-mini",
|
|
81
|
+
"gpt-5": "gpt-5.1",
|
|
82
|
+
"gpt-5-mini": "gpt-5.1",
|
|
83
|
+
"gpt-5-nano": "gpt-5.1",
|
|
84
|
+
};
|
|
85
|
+
|
|
86
|
+
function getNormalizedModel(modelId: string): string | undefined {
|
|
87
|
+
if (MODEL_MAP[modelId]) return MODEL_MAP[modelId];
|
|
88
|
+
const lowerModelId = modelId.toLowerCase();
|
|
89
|
+
const match = Object.keys(MODEL_MAP).find((key) => key.toLowerCase() === lowerModelId);
|
|
90
|
+
return match ? MODEL_MAP[match] : undefined;
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
export function normalizeModel(model: string | undefined): string {
|
|
94
|
+
if (!model) return "gpt-5.1";
|
|
95
|
+
|
|
96
|
+
const modelId = model.includes("/") ? model.split("/").pop()! : model;
|
|
97
|
+
const mappedModel = getNormalizedModel(modelId);
|
|
98
|
+
if (mappedModel) return mappedModel;
|
|
99
|
+
|
|
100
|
+
const normalized = modelId.toLowerCase();
|
|
101
|
+
|
|
102
|
+
if (normalized.includes("gpt-5.2-codex") || normalized.includes("gpt 5.2 codex")) {
|
|
103
|
+
return "gpt-5.2-codex";
|
|
104
|
+
}
|
|
105
|
+
if (normalized.includes("gpt-5.2") || normalized.includes("gpt 5.2")) {
|
|
106
|
+
return "gpt-5.2";
|
|
107
|
+
}
|
|
108
|
+
if (normalized.includes("gpt-5.1-codex-max") || normalized.includes("gpt 5.1 codex max")) {
|
|
109
|
+
return "gpt-5.1-codex-max";
|
|
110
|
+
}
|
|
111
|
+
if (normalized.includes("gpt-5.1-codex-mini") || normalized.includes("gpt 5.1 codex mini")) {
|
|
112
|
+
return "gpt-5.1-codex-mini";
|
|
113
|
+
}
|
|
114
|
+
if (
|
|
115
|
+
normalized.includes("codex-mini-latest") ||
|
|
116
|
+
normalized.includes("gpt-5-codex-mini") ||
|
|
117
|
+
normalized.includes("gpt 5 codex mini")
|
|
118
|
+
) {
|
|
119
|
+
return "codex-mini-latest";
|
|
120
|
+
}
|
|
121
|
+
if (normalized.includes("gpt-5.1-codex") || normalized.includes("gpt 5.1 codex")) {
|
|
122
|
+
return "gpt-5.1-codex";
|
|
123
|
+
}
|
|
124
|
+
if (normalized.includes("gpt-5.1") || normalized.includes("gpt 5.1")) {
|
|
125
|
+
return "gpt-5.1";
|
|
126
|
+
}
|
|
127
|
+
if (normalized.includes("codex")) {
|
|
128
|
+
return "gpt-5.1-codex";
|
|
129
|
+
}
|
|
130
|
+
if (normalized.includes("gpt-5") || normalized.includes("gpt 5")) {
|
|
131
|
+
return "gpt-5.1";
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
return "gpt-5.1";
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
function getReasoningConfig(modelName: string | undefined, options: CodexRequestOptions = {}): ReasoningConfig {
|
|
138
|
+
const normalizedName = modelName?.toLowerCase() ?? "";
|
|
139
|
+
|
|
140
|
+
const isGpt52Codex = normalizedName.includes("gpt-5.2-codex") || normalizedName.includes("gpt 5.2 codex");
|
|
141
|
+
const isGpt52General = (normalizedName.includes("gpt-5.2") || normalizedName.includes("gpt 5.2")) && !isGpt52Codex;
|
|
142
|
+
const isCodexMax = normalizedName.includes("codex-max") || normalizedName.includes("codex max");
|
|
143
|
+
const isCodexMini =
|
|
144
|
+
normalizedName.includes("codex-mini") ||
|
|
145
|
+
normalizedName.includes("codex mini") ||
|
|
146
|
+
normalizedName.includes("codex_mini") ||
|
|
147
|
+
normalizedName.includes("codex-mini-latest");
|
|
148
|
+
const isCodex = normalizedName.includes("codex") && !isCodexMini;
|
|
149
|
+
const isLightweight = !isCodexMini && (normalizedName.includes("nano") || normalizedName.includes("mini"));
|
|
150
|
+
const isGpt51General =
|
|
151
|
+
(normalizedName.includes("gpt-5.1") || normalizedName.includes("gpt 5.1")) &&
|
|
152
|
+
!isCodex &&
|
|
153
|
+
!isCodexMax &&
|
|
154
|
+
!isCodexMini;
|
|
155
|
+
|
|
156
|
+
const supportsXhigh = isGpt52General || isGpt52Codex || isCodexMax;
|
|
157
|
+
const supportsNone = isGpt52General || isGpt51General;
|
|
158
|
+
|
|
159
|
+
const defaultEffort: ReasoningConfig["effort"] = isCodexMini
|
|
160
|
+
? "medium"
|
|
161
|
+
: supportsXhigh
|
|
162
|
+
? "high"
|
|
163
|
+
: isLightweight
|
|
164
|
+
? "minimal"
|
|
165
|
+
: "medium";
|
|
166
|
+
|
|
167
|
+
let effort = options.reasoningEffort || defaultEffort;
|
|
168
|
+
|
|
169
|
+
if (isCodexMini) {
|
|
170
|
+
if (effort === "minimal" || effort === "low" || effort === "none") {
|
|
171
|
+
effort = "medium";
|
|
172
|
+
}
|
|
173
|
+
if (effort === "xhigh") {
|
|
174
|
+
effort = "high";
|
|
175
|
+
}
|
|
176
|
+
if (effort !== "high" && effort !== "medium") {
|
|
177
|
+
effort = "medium";
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
if (!supportsXhigh && effort === "xhigh") {
|
|
182
|
+
effort = "high";
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
if (!supportsNone && effort === "none") {
|
|
186
|
+
effort = "low";
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
if (isCodex && effort === "minimal") {
|
|
190
|
+
effort = "low";
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
return {
|
|
194
|
+
effort,
|
|
195
|
+
summary: options.reasoningSummary ?? "auto",
|
|
196
|
+
};
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
function filterInput(input: InputItem[] | undefined): InputItem[] | undefined {
|
|
200
|
+
if (!Array.isArray(input)) return input;
|
|
201
|
+
|
|
202
|
+
return input
|
|
203
|
+
.filter((item) => item.type !== "item_reference")
|
|
204
|
+
.map((item) => {
|
|
205
|
+
if (item.id != null) {
|
|
206
|
+
const { id: _id, ...rest } = item;
|
|
207
|
+
return rest as InputItem;
|
|
208
|
+
}
|
|
209
|
+
return item;
|
|
210
|
+
});
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
function addCodexBridgeMessage(
|
|
214
|
+
input: InputItem[] | undefined,
|
|
215
|
+
hasTools: boolean,
|
|
216
|
+
systemPrompt?: string,
|
|
217
|
+
): InputItem[] | undefined {
|
|
218
|
+
if (!hasTools || !Array.isArray(input)) return input;
|
|
219
|
+
|
|
220
|
+
const bridgeText = systemPrompt ? `${CODEX_PI_BRIDGE}\n\n${systemPrompt}` : CODEX_PI_BRIDGE;
|
|
221
|
+
|
|
222
|
+
const bridgeMessage: InputItem = {
|
|
223
|
+
type: "message",
|
|
224
|
+
role: "developer",
|
|
225
|
+
content: [
|
|
226
|
+
{
|
|
227
|
+
type: "input_text",
|
|
228
|
+
text: bridgeText,
|
|
229
|
+
},
|
|
230
|
+
],
|
|
231
|
+
};
|
|
232
|
+
|
|
233
|
+
return [bridgeMessage, ...input];
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
function addToolRemapMessage(input: InputItem[] | undefined, hasTools: boolean): InputItem[] | undefined {
|
|
237
|
+
if (!hasTools || !Array.isArray(input)) return input;
|
|
238
|
+
|
|
239
|
+
const toolRemapMessage: InputItem = {
|
|
240
|
+
type: "message",
|
|
241
|
+
role: "developer",
|
|
242
|
+
content: [
|
|
243
|
+
{
|
|
244
|
+
type: "input_text",
|
|
245
|
+
text: TOOL_REMAP_MESSAGE,
|
|
246
|
+
},
|
|
247
|
+
],
|
|
248
|
+
};
|
|
249
|
+
|
|
250
|
+
return [toolRemapMessage, ...input];
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
export async function transformRequestBody(
|
|
254
|
+
body: RequestBody,
|
|
255
|
+
codexInstructions: string,
|
|
256
|
+
options: CodexRequestOptions = {},
|
|
257
|
+
codexMode = true,
|
|
258
|
+
systemPrompt?: string,
|
|
259
|
+
): Promise<RequestBody> {
|
|
260
|
+
const normalizedModel = normalizeModel(body.model);
|
|
261
|
+
|
|
262
|
+
body.model = normalizedModel;
|
|
263
|
+
body.store = false;
|
|
264
|
+
body.stream = true;
|
|
265
|
+
body.instructions = codexInstructions;
|
|
266
|
+
|
|
267
|
+
if (body.input && Array.isArray(body.input)) {
|
|
268
|
+
body.input = filterInput(body.input);
|
|
269
|
+
|
|
270
|
+
if (codexMode) {
|
|
271
|
+
body.input = addCodexBridgeMessage(body.input, !!body.tools, systemPrompt);
|
|
272
|
+
} else {
|
|
273
|
+
body.input = addToolRemapMessage(body.input, !!body.tools);
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
if (body.input) {
|
|
277
|
+
const functionCallIds = new Set(
|
|
278
|
+
body.input
|
|
279
|
+
.filter((item) => item.type === "function_call" && typeof item.call_id === "string")
|
|
280
|
+
.map((item) => item.call_id as string),
|
|
281
|
+
);
|
|
282
|
+
|
|
283
|
+
body.input = body.input.map((item) => {
|
|
284
|
+
if (item.type === "function_call_output" && typeof item.call_id === "string") {
|
|
285
|
+
const callId = item.call_id as string;
|
|
286
|
+
if (!functionCallIds.has(callId)) {
|
|
287
|
+
const itemRecord = item as unknown as Record<string, unknown>;
|
|
288
|
+
const toolName = typeof itemRecord.name === "string" ? itemRecord.name : "tool";
|
|
289
|
+
let text = "";
|
|
290
|
+
try {
|
|
291
|
+
const output = itemRecord.output;
|
|
292
|
+
text = typeof output === "string" ? output : JSON.stringify(output);
|
|
293
|
+
} catch {
|
|
294
|
+
text = String(itemRecord.output ?? "");
|
|
295
|
+
}
|
|
296
|
+
if (text.length > 16000) {
|
|
297
|
+
text = `${text.slice(0, 16000)}\n...[truncated]`;
|
|
298
|
+
}
|
|
299
|
+
return {
|
|
300
|
+
type: "message",
|
|
301
|
+
role: "assistant",
|
|
302
|
+
content: `[Previous ${toolName} result; call_id=${callId}]: ${text}`,
|
|
303
|
+
} as InputItem;
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
return item;
|
|
307
|
+
});
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
const reasoningConfig = getReasoningConfig(normalizedModel, options);
|
|
312
|
+
body.reasoning = {
|
|
313
|
+
...body.reasoning,
|
|
314
|
+
...reasoningConfig,
|
|
315
|
+
};
|
|
316
|
+
|
|
317
|
+
body.text = {
|
|
318
|
+
...body.text,
|
|
319
|
+
verbosity: options.textVerbosity || "medium",
|
|
320
|
+
};
|
|
321
|
+
|
|
322
|
+
body.include = options.include || ["reasoning.encrypted_content"];
|
|
323
|
+
|
|
324
|
+
delete body.max_output_tokens;
|
|
325
|
+
delete body.max_completion_tokens;
|
|
326
|
+
|
|
327
|
+
return body;
|
|
328
|
+
}
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
export type CodexRateLimit = {
|
|
2
|
+
used_percent?: number;
|
|
3
|
+
window_minutes?: number;
|
|
4
|
+
resets_at?: number;
|
|
5
|
+
};
|
|
6
|
+
|
|
7
|
+
export type CodexRateLimits = {
|
|
8
|
+
primary?: CodexRateLimit;
|
|
9
|
+
secondary?: CodexRateLimit;
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
export type CodexErrorInfo = {
|
|
13
|
+
message: string;
|
|
14
|
+
status: number;
|
|
15
|
+
friendlyMessage?: string;
|
|
16
|
+
rateLimits?: CodexRateLimits;
|
|
17
|
+
raw?: string;
|
|
18
|
+
};
|
|
19
|
+
|
|
20
|
+
export async function parseCodexError(response: Response): Promise<CodexErrorInfo> {
|
|
21
|
+
const raw = await response.text();
|
|
22
|
+
let message = raw || response.statusText || "Request failed";
|
|
23
|
+
let friendlyMessage: string | undefined;
|
|
24
|
+
let rateLimits: CodexRateLimits | undefined;
|
|
25
|
+
|
|
26
|
+
try {
|
|
27
|
+
const parsed = JSON.parse(raw) as { error?: Record<string, unknown> };
|
|
28
|
+
const err = parsed?.error ?? {};
|
|
29
|
+
|
|
30
|
+
const headers = response.headers;
|
|
31
|
+
const primary = {
|
|
32
|
+
used_percent: toNumber(headers.get("x-codex-primary-used-percent")),
|
|
33
|
+
window_minutes: toInt(headers.get("x-codex-primary-window-minutes")),
|
|
34
|
+
resets_at: toInt(headers.get("x-codex-primary-reset-at")),
|
|
35
|
+
};
|
|
36
|
+
const secondary = {
|
|
37
|
+
used_percent: toNumber(headers.get("x-codex-secondary-used-percent")),
|
|
38
|
+
window_minutes: toInt(headers.get("x-codex-secondary-window-minutes")),
|
|
39
|
+
resets_at: toInt(headers.get("x-codex-secondary-reset-at")),
|
|
40
|
+
};
|
|
41
|
+
rateLimits =
|
|
42
|
+
primary.used_percent !== undefined || secondary.used_percent !== undefined
|
|
43
|
+
? { primary, secondary }
|
|
44
|
+
: undefined;
|
|
45
|
+
|
|
46
|
+
const code = String((err as { code?: string; type?: string }).code ?? (err as { type?: string }).type ?? "");
|
|
47
|
+
const resetsAt = (err as { resets_at?: number }).resets_at ?? primary.resets_at ?? secondary.resets_at;
|
|
48
|
+
const mins = resetsAt ? Math.max(0, Math.round((resetsAt * 1000 - Date.now()) / 60000)) : undefined;
|
|
49
|
+
|
|
50
|
+
if (/usage_limit_reached|usage_not_included|rate_limit_exceeded/i.test(code) || response.status === 429) {
|
|
51
|
+
const planType = (err as { plan_type?: string }).plan_type;
|
|
52
|
+
const plan = planType ? ` (${String(planType).toLowerCase()} plan)` : "";
|
|
53
|
+
const when = mins !== undefined ? ` Try again in ~${mins} min.` : "";
|
|
54
|
+
friendlyMessage = `You have hit your ChatGPT usage limit${plan}.${when}`.trim();
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
const errMessage = (err as { message?: string }).message;
|
|
58
|
+
message = errMessage || friendlyMessage || message;
|
|
59
|
+
} catch {
|
|
60
|
+
// raw body not JSON
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
return {
|
|
64
|
+
message,
|
|
65
|
+
status: response.status,
|
|
66
|
+
friendlyMessage,
|
|
67
|
+
rateLimits,
|
|
68
|
+
raw: raw,
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
export async function* parseCodexSseStream(response: Response): AsyncGenerator<Record<string, unknown>> {
|
|
73
|
+
if (!response.body) {
|
|
74
|
+
return;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
const reader = response.body.getReader();
|
|
78
|
+
const decoder = new TextDecoder();
|
|
79
|
+
let buffer = "";
|
|
80
|
+
|
|
81
|
+
while (true) {
|
|
82
|
+
const { done, value } = await reader.read();
|
|
83
|
+
if (done) break;
|
|
84
|
+
buffer += decoder.decode(value, { stream: true });
|
|
85
|
+
|
|
86
|
+
let index = buffer.indexOf("\n\n");
|
|
87
|
+
while (index !== -1) {
|
|
88
|
+
const chunk = buffer.slice(0, index);
|
|
89
|
+
buffer = buffer.slice(index + 2);
|
|
90
|
+
const event = parseSseChunk(chunk);
|
|
91
|
+
if (event) yield event;
|
|
92
|
+
index = buffer.indexOf("\n\n");
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
if (buffer.trim()) {
|
|
97
|
+
const event = parseSseChunk(buffer);
|
|
98
|
+
if (event) yield event;
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
function parseSseChunk(chunk: string): Record<string, unknown> | null {
|
|
103
|
+
const lines = chunk.split("\n");
|
|
104
|
+
const dataLines: string[] = [];
|
|
105
|
+
|
|
106
|
+
for (const line of lines) {
|
|
107
|
+
if (line.startsWith("data:")) {
|
|
108
|
+
dataLines.push(line.slice(5).trim());
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
if (dataLines.length === 0) return null;
|
|
113
|
+
const data = dataLines.join("\n").trim();
|
|
114
|
+
if (!data || data === "[DONE]") return null;
|
|
115
|
+
|
|
116
|
+
try {
|
|
117
|
+
return JSON.parse(data) as Record<string, unknown>;
|
|
118
|
+
} catch {
|
|
119
|
+
return null;
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
function toNumber(v: string | null): number | undefined {
|
|
124
|
+
if (v == null) return undefined;
|
|
125
|
+
const n = Number(v);
|
|
126
|
+
return Number.isFinite(n) ? n : undefined;
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
function toInt(v: string | null): number | undefined {
|
|
130
|
+
if (v == null) return undefined;
|
|
131
|
+
const n = parseInt(v, 10);
|
|
132
|
+
return Number.isFinite(n) ? n : undefined;
|
|
133
|
+
}
|