zidane 4.0.2 → 4.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +196 -614
- package/dist/agent-BoV5Twdl.d.ts +2347 -0
- package/dist/agent-BoV5Twdl.d.ts.map +1 -0
- package/dist/contexts-3Arvn7yR.js +321 -0
- package/dist/contexts-3Arvn7yR.js.map +1 -0
- package/dist/contexts.d.ts +2 -25
- package/dist/contexts.js +2 -10
- package/dist/errors-D1lhd6mX.js +118 -0
- package/dist/errors-D1lhd6mX.js.map +1 -0
- package/dist/index-28otmfLX.d.ts +400 -0
- package/dist/index-28otmfLX.d.ts.map +1 -0
- package/dist/index-BfSdALzk.d.ts +113 -0
- package/dist/index-BfSdALzk.d.ts.map +1 -0
- package/dist/index-DPsd0qwm.d.ts +254 -0
- package/dist/index-DPsd0qwm.d.ts.map +1 -0
- package/dist/index.d.ts +5 -95
- package/dist/index.js +141 -271
- package/dist/index.js.map +1 -0
- package/dist/interpolate-CukJwP2G.js +887 -0
- package/dist/interpolate-CukJwP2G.js.map +1 -0
- package/dist/mcp-8wClKY-3.js +771 -0
- package/dist/mcp-8wClKY-3.js.map +1 -0
- package/dist/mcp.d.ts +2 -4
- package/dist/mcp.js +2 -13
- package/dist/messages-z5Pq20p7.js +1020 -0
- package/dist/messages-z5Pq20p7.js.map +1 -0
- package/dist/presets-Cs7_CsMk.js +39 -0
- package/dist/presets-Cs7_CsMk.js.map +1 -0
- package/dist/presets.d.ts +2 -43
- package/dist/presets.js +2 -17
- package/dist/providers-CX-R-Oy-.js +969 -0
- package/dist/providers-CX-R-Oy-.js.map +1 -0
- package/dist/providers.d.ts +2 -4
- package/dist/providers.js +3 -23
- package/dist/session/sqlite.d.ts +7 -12
- package/dist/session/sqlite.d.ts.map +1 -0
- package/dist/session/sqlite.js +67 -79
- package/dist/session/sqlite.js.map +1 -0
- package/dist/session-Cn68UASv.js +440 -0
- package/dist/session-Cn68UASv.js.map +1 -0
- package/dist/session.d.ts +2 -4
- package/dist/session.js +3 -27
- package/dist/skills.d.ts +3 -322
- package/dist/skills.js +24 -47
- package/dist/skills.js.map +1 -0
- package/dist/stats-DoKUtF5T.js +58 -0
- package/dist/stats-DoKUtF5T.js.map +1 -0
- package/dist/tools-DpeWKzP1.js +3941 -0
- package/dist/tools-DpeWKzP1.js.map +1 -0
- package/dist/tools.d.ts +3 -95
- package/dist/tools.js +2 -40
- package/dist/tui.d.ts +533 -0
- package/dist/tui.d.ts.map +1 -0
- package/dist/tui.js +2004 -0
- package/dist/tui.js.map +1 -0
- package/dist/types-Bx_F8jet.js +39 -0
- package/dist/types-Bx_F8jet.js.map +1 -0
- package/dist/types.d.ts +4 -55
- package/dist/types.js +4 -28
- package/package.json +38 -4
- package/dist/agent-BAHrGtqu.d.ts +0 -2425
- package/dist/chunk-4ILGBQ23.js +0 -803
- package/dist/chunk-4LPBN547.js +0 -3540
- package/dist/chunk-64LLNY7F.js +0 -28
- package/dist/chunk-6STZTA4N.js +0 -830
- package/dist/chunk-7GQ7P6DM.js +0 -566
- package/dist/chunk-IC7FT4OD.js +0 -37
- package/dist/chunk-JCOB6IYO.js +0 -22
- package/dist/chunk-JH6IAAFA.js +0 -28
- package/dist/chunk-LNN5UTS2.js +0 -97
- package/dist/chunk-PMCQOMV4.js +0 -490
- package/dist/chunk-UD25QF3H.js +0 -304
- package/dist/chunk-W57VY6DJ.js +0 -834
- package/dist/sandbox-D7v6Wy62.d.ts +0 -28
- package/dist/skills-use-DwZrNmcw.d.ts +0 -80
- package/dist/types-Bai5rKpa.d.ts +0 -89
- package/dist/validation-Pm--dQEU.d.ts +0 -185
|
@@ -0,0 +1,969 @@
|
|
|
1
|
+
import { o as matchesContextExceeded } from "./errors-D1lhd6mX.js";
|
|
2
|
+
import { d as toolResultsMessage, f as userMessage, i as toAnthropic, n as fromAnthropic, s as assistantMessage, u as openaiCompat } from "./messages-z5Pq20p7.js";
|
|
3
|
+
import { resolve } from "node:path";
|
|
4
|
+
import { existsSync, readFileSync, renameSync, writeFileSync } from "node:fs";
|
|
5
|
+
import { getOAuthApiKey } from "@mariozechner/pi-ai/oauth";
|
|
6
|
+
import { getModel } from "@mariozechner/pi-ai";
|
|
7
|
+
import { streamOpenAICodexResponses } from "@mariozechner/pi-ai/openai-codex-responses";
|
|
8
|
+
//#region src/providers/oauth.ts
|
|
9
|
+
/**
|
|
10
|
+
* Resolve the creds-file path at call time rather than module-load time.
|
|
11
|
+
* Hosts that `chdir` between import and first OAuth use (test suites,
|
|
12
|
+
* multi-project CLIs) previously got a stale path captured at import.
|
|
13
|
+
*/
|
|
14
|
+
function credentialsFilePath() {
|
|
15
|
+
return resolve(process.cwd(), ".credentials.json");
|
|
16
|
+
}
|
|
17
|
+
/**
|
|
18
|
+
* POSIX mode for credentials on disk. Read+write for the owner only — refresh
|
|
19
|
+
* tokens and API keys leak if the file is world-readable on a shared box.
|
|
20
|
+
* Ignored on Windows.
|
|
21
|
+
*/
|
|
22
|
+
const CREDENTIALS_FILE_MODE = 384;
|
|
23
|
+
/**
|
|
24
|
+
* In-process mutex table keyed by provider id. Concurrent `resolveOAuthApiKey`
|
|
25
|
+
* calls for the same provider (typical when a host fans out multiple streams
|
|
26
|
+
* at once) coalesce onto a single refresh. Without this, N concurrent calls
|
|
27
|
+
* each hit `getOAuthApiKey`, producing N writes to `.credentials.json` with
|
|
28
|
+
* last-write-wins semantics — and, in worst-case interleaving, a corrupted
|
|
29
|
+
* file.
|
|
30
|
+
*/
|
|
31
|
+
const refreshLocks = /* @__PURE__ */ new Map();
|
|
32
|
+
/**
|
|
33
|
+
* Read the shared `.credentials.json` file.
|
|
34
|
+
*
|
|
35
|
+
* Returns `{}` when the file is missing OR when the file exists but is corrupted
|
|
36
|
+
* / not valid JSON. Corrupt-file tolerance is important: the refresh-then-persist
|
|
37
|
+
* path below writes atomically, but older builds wrote in place and could have
|
|
38
|
+
* truncated the file on a crash. Treating corruption as "no stored creds" lets
|
|
39
|
+
* users re-auth without manually deleting the file.
|
|
40
|
+
*/
|
|
41
|
+
function readOAuthCredentials() {
|
|
42
|
+
const path = credentialsFilePath();
|
|
43
|
+
if (!existsSync(path)) return {};
|
|
44
|
+
try {
|
|
45
|
+
const raw = readFileSync(path, "utf-8");
|
|
46
|
+
const parsed = JSON.parse(raw);
|
|
47
|
+
if (!parsed || typeof parsed !== "object" || Array.isArray(parsed)) return {};
|
|
48
|
+
return parsed;
|
|
49
|
+
} catch {
|
|
50
|
+
return {};
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Atomically write `.credentials.json` with mode 0o600.
|
|
55
|
+
*
|
|
56
|
+
* Writes to a sibling temp file first, then renames. `rename(2)` is atomic on
|
|
57
|
+
* the same filesystem, so readers either see the old file or the new one —
|
|
58
|
+
* never a half-written one. This matters when a host has multiple processes
|
|
59
|
+
* pointed at the same creds file.
|
|
60
|
+
*/
|
|
61
|
+
function writeOAuthCredentials(credentials) {
|
|
62
|
+
const path = credentialsFilePath();
|
|
63
|
+
const tmp = `${path}.${process.pid}.${Date.now()}.tmp`;
|
|
64
|
+
writeFileSync(tmp, JSON.stringify(credentials, null, 2), { mode: CREDENTIALS_FILE_MODE });
|
|
65
|
+
renameSync(tmp, path);
|
|
66
|
+
}
|
|
67
|
+
function credentialsFromParams(params, extraKeys = []) {
|
|
68
|
+
if (typeof params?.access !== "string" || typeof params.refresh !== "string" || typeof params.expires !== "number") return void 0;
|
|
69
|
+
const extras = Object.fromEntries(extraKeys.map((key) => [key, params[key]]).filter(([, value]) => value !== void 0));
|
|
70
|
+
return {
|
|
71
|
+
access: params.access,
|
|
72
|
+
refresh: params.refresh,
|
|
73
|
+
expires: params.expires,
|
|
74
|
+
...extras
|
|
75
|
+
};
|
|
76
|
+
}
|
|
77
|
+
async function resolveOAuthApiKey(options, callbacks) {
|
|
78
|
+
if (typeof options.params?.apiKey === "string") return options.params.apiKey;
|
|
79
|
+
const paramsCredentials = credentialsFromParams(options.params, options.extraCredentialKeys);
|
|
80
|
+
if (paramsCredentials) return await withRefreshLock(`params:${options.providerId}`, () => resolveCredentialSource("params", paramsCredentials));
|
|
81
|
+
if (typeof options.params?.access === "string") return options.params.access;
|
|
82
|
+
if (options.envKey && process.env[options.envKey]) return process.env[options.envKey];
|
|
83
|
+
const readCredentials = options.readCredentials ?? readOAuthCredentials;
|
|
84
|
+
const writeCredentials = options.writeCredentials ?? writeOAuthCredentials;
|
|
85
|
+
return await withRefreshLock(`file:${options.providerId}`, async () => {
|
|
86
|
+
const allCredentials = readCredentials();
|
|
87
|
+
const storedCredentials = allCredentials[options.providerId];
|
|
88
|
+
if (!storedCredentials) throw new Error(options.missingError);
|
|
89
|
+
return await resolveCredentialSource("file", storedCredentials, allCredentials, writeCredentials);
|
|
90
|
+
});
|
|
91
|
+
async function resolveCredentialSource(source, current, allCredentials, persistCredentials) {
|
|
92
|
+
try {
|
|
93
|
+
const result = await (options.getOAuthApiKey ?? getOAuthApiKey)(options.providerId, { [options.providerId]: current });
|
|
94
|
+
if (!result) throw new Error(options.missingError);
|
|
95
|
+
if (result.newCredentials !== current) {
|
|
96
|
+
if (source === "file" && allCredentials && persistCredentials) {
|
|
97
|
+
allCredentials[options.providerId] = result.newCredentials;
|
|
98
|
+
persistCredentials(allCredentials);
|
|
99
|
+
}
|
|
100
|
+
await callbacks?.onOAuthRefresh?.({
|
|
101
|
+
provider: options.provider,
|
|
102
|
+
providerId: options.providerId,
|
|
103
|
+
source,
|
|
104
|
+
previousCredentials: { ...current },
|
|
105
|
+
credentials: { ...result.newCredentials }
|
|
106
|
+
});
|
|
107
|
+
}
|
|
108
|
+
return result.apiKey;
|
|
109
|
+
} catch (err) {
|
|
110
|
+
const reason = err instanceof Error ? err.message : String(err);
|
|
111
|
+
throw new Error(options.refreshError(reason));
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
/**
|
|
116
|
+
* Coalesce concurrent refresh calls onto a single in-flight promise per key.
|
|
117
|
+
* The key combines the source (`params` vs `file`) and provider id, so a
|
|
118
|
+
* per-agent param-only refresh does not starve a separate file-backed one.
|
|
119
|
+
*/
|
|
120
|
+
async function withRefreshLock(key, fn) {
|
|
121
|
+
const existing = refreshLocks.get(key);
|
|
122
|
+
if (existing) return existing;
|
|
123
|
+
const task = (async () => {
|
|
124
|
+
try {
|
|
125
|
+
return await fn();
|
|
126
|
+
} finally {
|
|
127
|
+
refreshLocks.delete(key);
|
|
128
|
+
}
|
|
129
|
+
})();
|
|
130
|
+
refreshLocks.set(key, task);
|
|
131
|
+
return task;
|
|
132
|
+
}
|
|
133
|
+
//#endregion
|
|
134
|
+
//#region src/providers/anthropic.ts
|
|
135
|
+
let _sdkCtor = null;
|
|
136
|
+
async function loadAnthropicSdk() {
|
|
137
|
+
if (_sdkCtor) return _sdkCtor;
|
|
138
|
+
try {
|
|
139
|
+
_sdkCtor = (await import("@anthropic-ai/sdk")).default;
|
|
140
|
+
return _sdkCtor;
|
|
141
|
+
} catch (err) {
|
|
142
|
+
throw new Error("The `anthropic` provider requires the `@anthropic-ai/sdk` package, which is an optional peer dependency. Install it with your package manager (e.g. `bun add @anthropic-ai/sdk`).", err instanceof Error ? { cause: err } : void 0);
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
/** Beta flags sent unconditionally on the OAuth path (Claude Code parity). */
|
|
146
|
+
const OAUTH_DEFAULT_BETAS = ["claude-code-20250219", "oauth-2025-04-20"];
|
|
147
|
+
/**
|
|
148
|
+
* Build the `anthropic-beta` header value — OAuth defaults plus caller-supplied
|
|
149
|
+
* extras, de-duped while preserving order. Returns `undefined` when no betas
|
|
150
|
+
* apply (non-OAuth, no extras).
|
|
151
|
+
*/
|
|
152
|
+
function resolveAnthropicBetas(isOAuth, extraBetas) {
|
|
153
|
+
const seen = /* @__PURE__ */ new Set();
|
|
154
|
+
const out = [];
|
|
155
|
+
if (isOAuth) {
|
|
156
|
+
for (const b of OAUTH_DEFAULT_BETAS) if (!seen.has(b)) {
|
|
157
|
+
seen.add(b);
|
|
158
|
+
out.push(b);
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
if (extraBetas) {
|
|
162
|
+
for (const b of extraBetas) if (typeof b === "string" && b.length > 0 && !seen.has(b)) {
|
|
163
|
+
seen.add(b);
|
|
164
|
+
out.push(b);
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
return out.length > 0 ? out.join(",") : void 0;
|
|
168
|
+
}
|
|
169
|
+
function getConfiguredApiKey(anthropicParams) {
|
|
170
|
+
if (anthropicParams?.apiKey) return anthropicParams.apiKey;
|
|
171
|
+
if (anthropicParams?.access) return anthropicParams.access;
|
|
172
|
+
if (process.env.ANTHROPIC_API_KEY) return process.env.ANTHROPIC_API_KEY;
|
|
173
|
+
const access = readOAuthCredentials().anthropic?.access;
|
|
174
|
+
if (typeof access === "string" && access.length > 0) return access;
|
|
175
|
+
throw new Error("No API key found. Run `bun run auth` first.");
|
|
176
|
+
}
|
|
177
|
+
function createClient(SDK, apiKey, isOAuth, baseURL, extraBetas) {
|
|
178
|
+
const base = baseURL ? { baseURL } : {};
|
|
179
|
+
const betaHeader = resolveAnthropicBetas(isOAuth, extraBetas);
|
|
180
|
+
if (isOAuth) {
|
|
181
|
+
const defaultHeaders = {
|
|
182
|
+
"anthropic-dangerous-direct-browser-access": "true",
|
|
183
|
+
"user-agent": "zidane/2.0.0",
|
|
184
|
+
"x-app": "cli"
|
|
185
|
+
};
|
|
186
|
+
if (betaHeader) defaultHeaders["anthropic-beta"] = betaHeader;
|
|
187
|
+
return new SDK({
|
|
188
|
+
apiKey: null,
|
|
189
|
+
authToken: apiKey,
|
|
190
|
+
dangerouslyAllowBrowser: true,
|
|
191
|
+
defaultHeaders,
|
|
192
|
+
...base
|
|
193
|
+
});
|
|
194
|
+
}
|
|
195
|
+
const defaultHeaders = betaHeader ? { "anthropic-beta": betaHeader } : void 0;
|
|
196
|
+
return new SDK({
|
|
197
|
+
apiKey,
|
|
198
|
+
...defaultHeaders ? { defaultHeaders } : {},
|
|
199
|
+
...base
|
|
200
|
+
});
|
|
201
|
+
}
|
|
202
|
+
/**
|
|
203
|
+
* Map `ThinkingLevel` budgeted tiers to Anthropic's `output_config.effort`
|
|
204
|
+
* enum. Anthropic exposes `low | medium | high | xhigh | max`; we surface the
|
|
205
|
+
* three middle levels plus a `minimal` alias that collapses to `low` (the
|
|
206
|
+
* closest equivalent — Anthropic does not have a sub-`low` tier).
|
|
207
|
+
*/
|
|
208
|
+
const EFFORT_FOR_LEVEL = {
|
|
209
|
+
minimal: "low",
|
|
210
|
+
low: "low",
|
|
211
|
+
medium: "medium",
|
|
212
|
+
high: "high"
|
|
213
|
+
};
|
|
214
|
+
/**
|
|
215
|
+
* Decide how to translate a `ThinkingLevel` into Anthropic's request shape.
|
|
216
|
+
* Pure / synchronous — exported so tests can assert routing without standing
|
|
217
|
+
* up the SDK.
|
|
218
|
+
*
|
|
219
|
+
* Routing rules:
|
|
220
|
+
* - `'off'` → `null` (no thinking field, no effort hint).
|
|
221
|
+
* - `'adaptive'` → adaptive thinking with no effort hint (model decides
|
|
222
|
+
* everything). When `customBudget` is set, it is carried as `maxTokensCap`
|
|
223
|
+
* so the request builder caps `max_tokens` accordingly — adaptive has no
|
|
224
|
+
* native budget knob, but capping the response envelope soft-bounds the
|
|
225
|
+
* thinking that lives inside it.
|
|
226
|
+
* - `'minimal' | 'low' | 'medium' | 'high'` → adaptive thinking with an
|
|
227
|
+
* `effort` hint, unless `customBudget` is provided.
|
|
228
|
+
* - Any level + `customBudget` → explicit-budget `enabled` path. The caller
|
|
229
|
+
* has opted into precise budget control and accepts the Anthropic
|
|
230
|
+
* deprecation warning that comes with it on opus 4.6+. `'adaptive'` is the
|
|
231
|
+
* sole exception: it never falls back to enabled, since adaptive is the
|
|
232
|
+
* current Anthropic API surface for self-budgeted thinking.
|
|
233
|
+
*/
|
|
234
|
+
function planAnthropicThinking(level, customBudget) {
|
|
235
|
+
if (level === "off") return null;
|
|
236
|
+
if (level === "adaptive") {
|
|
237
|
+
if (typeof customBudget === "number" && customBudget > 0) return {
|
|
238
|
+
kind: "adaptive",
|
|
239
|
+
maxTokensCap: customBudget
|
|
240
|
+
};
|
|
241
|
+
return { kind: "adaptive" };
|
|
242
|
+
}
|
|
243
|
+
if (customBudget !== void 0) return {
|
|
244
|
+
kind: "enabled",
|
|
245
|
+
budgetTokens: customBudget,
|
|
246
|
+
maxTokensBump: customBudget
|
|
247
|
+
};
|
|
248
|
+
return {
|
|
249
|
+
kind: "adaptive",
|
|
250
|
+
effort: EFFORT_FOR_LEVEL[level]
|
|
251
|
+
};
|
|
252
|
+
}
|
|
253
|
+
/**
|
|
254
|
+
* Map Anthropic's native `stop_reason` to the zidane `TurnFinishReason` union.
|
|
255
|
+
*
|
|
256
|
+
* `pause_turn` and `model_context_window_exceeded` are 4.6+ stop reasons that
|
|
257
|
+
* pre-Z21 collapsed to `'other'` and silently terminated the run. They now
|
|
258
|
+
* map to `'pause'` and `'length'` respectively, and the surrounding caller
|
|
259
|
+
* adjusts the `done` flag so the loop can recover.
|
|
260
|
+
*/
|
|
261
|
+
function mapStopReason(stopReason) {
|
|
262
|
+
if (!stopReason) return void 0;
|
|
263
|
+
switch (stopReason) {
|
|
264
|
+
case "end_turn":
|
|
265
|
+
case "stop_sequence": return "stop";
|
|
266
|
+
case "tool_use": return "tool-calls";
|
|
267
|
+
case "max_tokens":
|
|
268
|
+
case "model_context_window_exceeded": return "length";
|
|
269
|
+
case "refusal": return "content-filter";
|
|
270
|
+
case "pause_turn": return "pause";
|
|
271
|
+
default: return "other";
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
const EPHEMERAL = { type: "ephemeral" };
|
|
275
|
+
/**
|
|
276
|
+
* Mutate an Anthropic request in place to add cache breakpoints on the three
|
|
277
|
+
* stable prefixes:
|
|
278
|
+
* 1. System prompt — last text block.
|
|
279
|
+
* 2. Tool definitions — last tool.
|
|
280
|
+
* 3. Conversation — last content block of the last message.
|
|
281
|
+
*
|
|
282
|
+
* Each breakpoint tells Anthropic to cache the prefix ending at that block;
|
|
283
|
+
* subsequent turns reuse the cached prefix and pay only for the delta. Safe
|
|
284
|
+
* no-op when any prefix is empty (no tools, empty system, etc.).
|
|
285
|
+
*/
|
|
286
|
+
function applyAnthropicCacheBreakpoints(params) {
|
|
287
|
+
if (typeof params.system === "string") {
|
|
288
|
+
if (params.system.length > 0) params.system = [{
|
|
289
|
+
type: "text",
|
|
290
|
+
text: params.system,
|
|
291
|
+
cache_control: EPHEMERAL
|
|
292
|
+
}];
|
|
293
|
+
} else if (Array.isArray(params.system) && params.system.length > 0) {
|
|
294
|
+
const lastIdx = params.system.length - 1;
|
|
295
|
+
params.system = params.system.map((block, i) => i === lastIdx ? {
|
|
296
|
+
...block,
|
|
297
|
+
cache_control: EPHEMERAL
|
|
298
|
+
} : block);
|
|
299
|
+
}
|
|
300
|
+
if (params.tools && params.tools.length > 0) {
|
|
301
|
+
const lastIdx = params.tools.length - 1;
|
|
302
|
+
params.tools = params.tools.map((tool, i) => i === lastIdx ? {
|
|
303
|
+
...tool,
|
|
304
|
+
cache_control: EPHEMERAL
|
|
305
|
+
} : tool);
|
|
306
|
+
}
|
|
307
|
+
if (params.messages.length === 0) return;
|
|
308
|
+
const lastMsgIdx = params.messages.length - 1;
|
|
309
|
+
const lastMsg = params.messages[lastMsgIdx];
|
|
310
|
+
if (typeof lastMsg.content === "string") {
|
|
311
|
+
if (lastMsg.content.length === 0) return;
|
|
312
|
+
params.messages[lastMsgIdx] = {
|
|
313
|
+
...lastMsg,
|
|
314
|
+
content: [{
|
|
315
|
+
type: "text",
|
|
316
|
+
text: lastMsg.content,
|
|
317
|
+
cache_control: EPHEMERAL
|
|
318
|
+
}]
|
|
319
|
+
};
|
|
320
|
+
return;
|
|
321
|
+
}
|
|
322
|
+
if (!Array.isArray(lastMsg.content) || lastMsg.content.length === 0) return;
|
|
323
|
+
const blocks = lastMsg.content;
|
|
324
|
+
let targetIdx = blocks.length - 1;
|
|
325
|
+
while (targetIdx >= 0 && isThinkingBlock(blocks[targetIdx])) targetIdx -= 1;
|
|
326
|
+
if (targetIdx < 0) return;
|
|
327
|
+
const nextBlocks = blocks.slice();
|
|
328
|
+
nextBlocks[targetIdx] = {
|
|
329
|
+
...nextBlocks[targetIdx],
|
|
330
|
+
cache_control: EPHEMERAL
|
|
331
|
+
};
|
|
332
|
+
params.messages[lastMsgIdx] = {
|
|
333
|
+
...lastMsg,
|
|
334
|
+
content: nextBlocks
|
|
335
|
+
};
|
|
336
|
+
}
|
|
337
|
+
function isThinkingBlock(block) {
|
|
338
|
+
return block.type === "thinking" || block.type === "redacted_thinking";
|
|
339
|
+
}
|
|
340
|
+
/**
|
|
341
|
+
* Duck-type check for an Anthropic SDK `APIError` — avoids a runtime dependency
|
|
342
|
+
* on `@anthropic-ai/sdk` so `classifyError` stays usable even when the optional
|
|
343
|
+
* peer dep isn't loaded (e.g. host code calling it on an unrelated provider's
|
|
344
|
+
* error).
|
|
345
|
+
*
|
|
346
|
+
* Anthropic's APIError shape: `.status: number` + `.error` (parsed body, object
|
|
347
|
+
* or null). Plain `Error`s don't have `.status` + `.error`.
|
|
348
|
+
*/
|
|
349
|
+
function looksLikeAnthropicApiError(err) {
|
|
350
|
+
if (!err || typeof err !== "object") return false;
|
|
351
|
+
const e = err;
|
|
352
|
+
return typeof e.status === "number" && "error" in e;
|
|
353
|
+
}
|
|
354
|
+
/**
|
|
355
|
+
* Classify an Anthropic SDK / HTTP error for typed-error wrapping.
|
|
356
|
+
*
|
|
357
|
+
* - `prompt is too long` (400 invalid_request_error) → `context_exceeded`.
|
|
358
|
+
* - Any other Anthropic `APIError`-shaped value → `provider_error` with the
|
|
359
|
+
* native status/type code.
|
|
360
|
+
* - Unknown errors → `null` (loop wraps in `AgentProviderError` generically).
|
|
361
|
+
*
|
|
362
|
+
* Anthropic's wire error shape is `{ type: 'error', error: { type, message } }` — the
|
|
363
|
+
* SDK stores the parsed body on `err.error`. We walk both levels so callers see the
|
|
364
|
+
* most specific `providerCode` we can find.
|
|
365
|
+
*/
|
|
366
|
+
function classifyAnthropicError(err) {
|
|
367
|
+
if (!err || typeof err !== "object") return null;
|
|
368
|
+
const anyErr = err;
|
|
369
|
+
if (anyErr.name === "AbortError") return { kind: "aborted" };
|
|
370
|
+
if (!looksLikeAnthropicApiError(err)) return null;
|
|
371
|
+
const innerType = anyErr.error?.error?.type;
|
|
372
|
+
const outerType = anyErr.error?.type;
|
|
373
|
+
const nativeType = innerType && innerType !== "error" ? innerType : outerType;
|
|
374
|
+
const message = anyErr.error?.error?.message ?? anyErr.error?.message ?? anyErr.message ?? "";
|
|
375
|
+
if (matchesContextExceeded(message)) return {
|
|
376
|
+
kind: "context_exceeded",
|
|
377
|
+
providerCode: nativeType ?? "invalid_request_error",
|
|
378
|
+
message
|
|
379
|
+
};
|
|
380
|
+
const status = anyErr.status;
|
|
381
|
+
const retryable = typeof status === "number" ? status === 429 || status >= 500 && status !== 501 : void 0;
|
|
382
|
+
return {
|
|
383
|
+
kind: "provider_error",
|
|
384
|
+
providerCode: nativeType ?? (status ? String(status) : void 0),
|
|
385
|
+
message,
|
|
386
|
+
...retryable !== void 0 ? { retryable } : {}
|
|
387
|
+
};
|
|
388
|
+
}
|
|
389
|
+
/**
|
|
390
|
+
* Build a user `SessionMessage` from multimodal prompt parts.
|
|
391
|
+
*
|
|
392
|
+
* - Text parts → text blocks.
|
|
393
|
+
* - Image parts → base64 image blocks.
|
|
394
|
+
* - Document parts with `encoding: 'text'` → inlined as a text block with an
|
|
395
|
+
* `<attachment>` header so the model sees a clearly-delimited attachment.
|
|
396
|
+
* - Document parts with `encoding: 'base64'` → serialized as an `<attachment
|
|
397
|
+
* encoding="base64">`-tagged text block. Native Anthropic document/PDF blocks
|
|
398
|
+
* are not yet wired through the SessionContentBlock union, so consumers
|
|
399
|
+
* needing true PDF ingestion should preprocess to text first.
|
|
400
|
+
*
|
|
401
|
+
* The format mirrors `defaultPromptMessage`'s output on shape — Anthropic-specific
|
|
402
|
+
* handling differs only in that `promptMessage` being present tells the agent loop
|
|
403
|
+
* to route PromptPart[] through this function rather than throwing on base64 docs.
|
|
404
|
+
*/
|
|
405
|
+
function anthropicPromptMessage(parts) {
|
|
406
|
+
const content = [];
|
|
407
|
+
for (const part of parts) {
|
|
408
|
+
if (part.type === "text") {
|
|
409
|
+
if (part.text.length > 0) content.push({
|
|
410
|
+
type: "text",
|
|
411
|
+
text: part.text
|
|
412
|
+
});
|
|
413
|
+
continue;
|
|
414
|
+
}
|
|
415
|
+
if (part.type === "image") {
|
|
416
|
+
content.push({
|
|
417
|
+
type: "image",
|
|
418
|
+
mediaType: part.mediaType,
|
|
419
|
+
data: part.data
|
|
420
|
+
});
|
|
421
|
+
continue;
|
|
422
|
+
}
|
|
423
|
+
if (part.encoding === "text") {
|
|
424
|
+
const header = part.name ? `<attachment name="${part.name}" media_type="${part.mediaType}">` : `<attachment media_type="${part.mediaType}">`;
|
|
425
|
+
content.push({
|
|
426
|
+
type: "text",
|
|
427
|
+
text: `${header}\n${part.data}\n</attachment>`
|
|
428
|
+
});
|
|
429
|
+
continue;
|
|
430
|
+
}
|
|
431
|
+
const header = part.name ? `<attachment name="${part.name}" media_type="${part.mediaType}" encoding="base64">` : `<attachment media_type="${part.mediaType}" encoding="base64">`;
|
|
432
|
+
content.push({
|
|
433
|
+
type: "text",
|
|
434
|
+
text: `${header}\n${part.data}\n</attachment>`
|
|
435
|
+
});
|
|
436
|
+
}
|
|
437
|
+
return {
|
|
438
|
+
role: "user",
|
|
439
|
+
content
|
|
440
|
+
};
|
|
441
|
+
}
|
|
442
|
+
function anthropic(anthropicParams) {
|
|
443
|
+
const isOAuth = getConfiguredApiKey(anthropicParams).includes("sk-ant-oat");
|
|
444
|
+
const defaultModel = anthropicParams?.defaultModel || "claude-opus-4-6";
|
|
445
|
+
let runtimeCredentials = typeof anthropicParams?.access === "string" && typeof anthropicParams.refresh === "string" && typeof anthropicParams.expires === "number" ? {
|
|
446
|
+
access: anthropicParams.access,
|
|
447
|
+
refresh: anthropicParams.refresh,
|
|
448
|
+
expires: anthropicParams.expires
|
|
449
|
+
} : void 0;
|
|
450
|
+
return {
|
|
451
|
+
name: "anthropic",
|
|
452
|
+
meta: {
|
|
453
|
+
defaultModel,
|
|
454
|
+
isOAuth,
|
|
455
|
+
capabilities: {
|
|
456
|
+
vision: true,
|
|
457
|
+
imageInToolResult: true
|
|
458
|
+
}
|
|
459
|
+
},
|
|
460
|
+
formatTools(tools) {
|
|
461
|
+
return tools.map((t) => ({
|
|
462
|
+
name: t.name,
|
|
463
|
+
description: t.description,
|
|
464
|
+
input_schema: t.inputSchema
|
|
465
|
+
}));
|
|
466
|
+
},
|
|
467
|
+
userMessage(content) {
|
|
468
|
+
return {
|
|
469
|
+
role: "user",
|
|
470
|
+
content: [{
|
|
471
|
+
type: "text",
|
|
472
|
+
text: content
|
|
473
|
+
}]
|
|
474
|
+
};
|
|
475
|
+
},
|
|
476
|
+
assistantMessage(content) {
|
|
477
|
+
return {
|
|
478
|
+
role: "assistant",
|
|
479
|
+
content: [{
|
|
480
|
+
type: "text",
|
|
481
|
+
text: content
|
|
482
|
+
}]
|
|
483
|
+
};
|
|
484
|
+
},
|
|
485
|
+
toolResultsMessage(results) {
|
|
486
|
+
return {
|
|
487
|
+
role: "user",
|
|
488
|
+
content: results.map((r) => ({
|
|
489
|
+
type: "tool_result",
|
|
490
|
+
callId: r.id,
|
|
491
|
+
output: r.content
|
|
492
|
+
}))
|
|
493
|
+
};
|
|
494
|
+
},
|
|
495
|
+
promptMessage: anthropicPromptMessage,
|
|
496
|
+
classifyError: classifyAnthropicError,
|
|
497
|
+
async stream(options, callbacks) {
|
|
498
|
+
const SDK = await loadAnthropicSdk();
|
|
499
|
+
const apiKey = await resolveOAuthApiKey({
|
|
500
|
+
provider: "anthropic",
|
|
501
|
+
providerId: "anthropic",
|
|
502
|
+
params: runtimeCredentials ? {
|
|
503
|
+
...anthropicParams,
|
|
504
|
+
...runtimeCredentials
|
|
505
|
+
} : anthropicParams,
|
|
506
|
+
envKey: "ANTHROPIC_API_KEY",
|
|
507
|
+
missingError: "No API key found. Run `bun run auth` first.",
|
|
508
|
+
refreshError: (reason) => `Anthropic OAuth token refresh failed. Run \`bun run auth --anthropic\` again. ${reason}`
|
|
509
|
+
}, {
|
|
510
|
+
...callbacks,
|
|
511
|
+
async onOAuthRefresh(ctx) {
|
|
512
|
+
if (ctx.source === "params") runtimeCredentials = {
|
|
513
|
+
access: ctx.credentials.access,
|
|
514
|
+
refresh: ctx.credentials.refresh,
|
|
515
|
+
expires: ctx.credentials.expires
|
|
516
|
+
};
|
|
517
|
+
await callbacks.onOAuthRefresh?.(ctx);
|
|
518
|
+
}
|
|
519
|
+
});
|
|
520
|
+
const client = createClient(SDK, apiKey, apiKey.includes("sk-ant-oat"), anthropicParams?.baseURL, anthropicParams?.extraBetas);
|
|
521
|
+
const system = isOAuth ? `You are Claude Code, Anthropic's official CLI for Claude.` : options.system;
|
|
522
|
+
const messages = isOAuth && options.system ? [
|
|
523
|
+
{
|
|
524
|
+
role: "user",
|
|
525
|
+
content: [{
|
|
526
|
+
type: "text",
|
|
527
|
+
text: options.system
|
|
528
|
+
}]
|
|
529
|
+
},
|
|
530
|
+
{
|
|
531
|
+
role: "assistant",
|
|
532
|
+
content: [{
|
|
533
|
+
type: "text",
|
|
534
|
+
text: "Understood. I will proceed with these instructions above the rest of my system prompt."
|
|
535
|
+
}]
|
|
536
|
+
},
|
|
537
|
+
...options.messages
|
|
538
|
+
] : [...options.messages];
|
|
539
|
+
const thinking = options.thinking ?? "off";
|
|
540
|
+
const modelId = options.model;
|
|
541
|
+
const params = {
|
|
542
|
+
...anthropicParams?.extraBodyParams ?? {},
|
|
543
|
+
model: modelId,
|
|
544
|
+
max_tokens: options.maxTokens,
|
|
545
|
+
system,
|
|
546
|
+
tools: options.tools,
|
|
547
|
+
messages: messages.map((m) => toAnthropic(m)),
|
|
548
|
+
stream: true
|
|
549
|
+
};
|
|
550
|
+
if (anthropicParams?.contextManagement) params.context_management = anthropicParams.contextManagement;
|
|
551
|
+
if (options.cache !== false) applyAnthropicCacheBreakpoints(params);
|
|
552
|
+
const plan = planAnthropicThinking(thinking, options.thinkingBudget);
|
|
553
|
+
if (plan) {
|
|
554
|
+
if (plan.kind === "enabled") {
|
|
555
|
+
params.thinking = {
|
|
556
|
+
type: "enabled",
|
|
557
|
+
budget_tokens: plan.budgetTokens
|
|
558
|
+
};
|
|
559
|
+
params.max_tokens = plan.maxTokensBump + params.max_tokens;
|
|
560
|
+
} else {
|
|
561
|
+
params.thinking = { type: "adaptive" };
|
|
562
|
+
if (plan.effort) params.output_config = { effort: plan.effort };
|
|
563
|
+
if (typeof plan.maxTokensCap === "number" && plan.maxTokensCap > 0) params.max_tokens = Math.min(params.max_tokens, plan.maxTokensCap);
|
|
564
|
+
}
|
|
565
|
+
params.temperature = 1;
|
|
566
|
+
}
|
|
567
|
+
if (options.toolChoice) if (options.toolChoice.type === "tool" && options.toolChoice.name) params.tool_choice = {
|
|
568
|
+
type: "tool",
|
|
569
|
+
name: options.toolChoice.name
|
|
570
|
+
};
|
|
571
|
+
else if (options.toolChoice.type === "required") params.tool_choice = { type: "any" };
|
|
572
|
+
else params.tool_choice = { type: "auto" };
|
|
573
|
+
const s = client.messages.stream(params, { signal: options.signal });
|
|
574
|
+
let text = "";
|
|
575
|
+
s.on("text", (delta) => {
|
|
576
|
+
text += delta;
|
|
577
|
+
callbacks.onText(delta);
|
|
578
|
+
});
|
|
579
|
+
if (callbacks.onThinking) s.on("thinking", (delta) => {
|
|
580
|
+
callbacks.onThinking(delta);
|
|
581
|
+
});
|
|
582
|
+
const response = await s.finalMessage();
|
|
583
|
+
const toolCalls = response.content.filter((b) => b.type === "tool_use").map((b) => ({
|
|
584
|
+
id: b.id,
|
|
585
|
+
name: b.name,
|
|
586
|
+
input: b.input
|
|
587
|
+
}));
|
|
588
|
+
const finishReason = mapStopReason(response.stop_reason);
|
|
589
|
+
const isPause = response.stop_reason === "pause_turn";
|
|
590
|
+
return {
|
|
591
|
+
assistantMessage: fromAnthropic({
|
|
592
|
+
role: "assistant",
|
|
593
|
+
content: response.content
|
|
594
|
+
}),
|
|
595
|
+
text,
|
|
596
|
+
toolCalls,
|
|
597
|
+
done: !isPause && (response.stop_reason === "end_turn" || toolCalls.length === 0),
|
|
598
|
+
usage: {
|
|
599
|
+
input: response.usage.input_tokens,
|
|
600
|
+
output: response.usage.output_tokens,
|
|
601
|
+
cacheCreation: response.usage.cache_creation_input_tokens ?? void 0,
|
|
602
|
+
cacheRead: response.usage.cache_read_input_tokens ?? void 0,
|
|
603
|
+
...finishReason ? { finishReason } : {},
|
|
604
|
+
modelId: response.model ?? options.model
|
|
605
|
+
}
|
|
606
|
+
};
|
|
607
|
+
}
|
|
608
|
+
};
|
|
609
|
+
}
|
|
610
|
+
//#endregion
|
|
611
|
+
//#region src/providers/cerebras.ts
|
|
612
|
+
const BASE_URL$1 = "https://api.cerebras.ai/v1";
|
|
613
|
+
function getApiKey$1(params) {
|
|
614
|
+
if (typeof params?.apiKey === "string" && params.apiKey.length > 0) return params.apiKey;
|
|
615
|
+
if (process.env.CEREBRAS_API_KEY) return process.env.CEREBRAS_API_KEY;
|
|
616
|
+
throw new Error("No Cerebras API key found. Pass `apiKey` or set CEREBRAS_API_KEY in your environment.");
|
|
617
|
+
}
|
|
618
|
+
/**
|
|
619
|
+
* Cerebras provider.
|
|
620
|
+
*
|
|
621
|
+
* Thin wrapper around {@link openaiCompat} with Cerebras-specific defaults
|
|
622
|
+
* (base URL, default model).
|
|
623
|
+
*/
|
|
624
|
+
function cerebras(params) {
|
|
625
|
+
return openaiCompat({
|
|
626
|
+
name: "cerebras",
|
|
627
|
+
apiKey: getApiKey$1(params),
|
|
628
|
+
baseURL: BASE_URL$1,
|
|
629
|
+
defaultModel: params?.defaultModel || "zai-glm-4.7",
|
|
630
|
+
capabilities: params?.capabilities ?? {
|
|
631
|
+
vision: false,
|
|
632
|
+
imageInToolResult: false
|
|
633
|
+
}
|
|
634
|
+
});
|
|
635
|
+
}
|
|
636
|
+
//#endregion
|
|
637
|
+
//#region src/providers/openai.ts
|
|
638
|
+
const PROVIDER_ID = "openai-codex";
|
|
639
|
+
const DEFAULT_MODEL = "gpt-5.4";
|
|
640
|
+
function resolveModel(modelId) {
|
|
641
|
+
const model = getModel(PROVIDER_ID, modelId);
|
|
642
|
+
if (model) return model;
|
|
643
|
+
const fallback = getModel(PROVIDER_ID, DEFAULT_MODEL);
|
|
644
|
+
if (!fallback) throw new Error(`OpenAI Codex model registry is missing the default model: ${DEFAULT_MODEL}`);
|
|
645
|
+
return {
|
|
646
|
+
...fallback,
|
|
647
|
+
id: modelId,
|
|
648
|
+
name: modelId
|
|
649
|
+
};
|
|
650
|
+
}
|
|
651
|
+
function emptyUsage() {
|
|
652
|
+
return {
|
|
653
|
+
input: 0,
|
|
654
|
+
output: 0,
|
|
655
|
+
cacheRead: 0,
|
|
656
|
+
cacheWrite: 0,
|
|
657
|
+
totalTokens: 0,
|
|
658
|
+
cost: {
|
|
659
|
+
input: 0,
|
|
660
|
+
output: 0,
|
|
661
|
+
cacheRead: 0,
|
|
662
|
+
cacheWrite: 0,
|
|
663
|
+
total: 0
|
|
664
|
+
}
|
|
665
|
+
};
|
|
666
|
+
}
|
|
667
|
+
function formatTools(tools) {
|
|
668
|
+
return tools.map((t) => ({
|
|
669
|
+
name: t.name,
|
|
670
|
+
description: t.description,
|
|
671
|
+
parameters: t.inputSchema
|
|
672
|
+
}));
|
|
673
|
+
}
|
|
674
|
+
function toPiMessages(messages, modelId) {
|
|
675
|
+
const out = [];
|
|
676
|
+
for (const msg of messages) {
|
|
677
|
+
const toolResults = msg.content.filter((b) => b.type === "tool_result");
|
|
678
|
+
if (toolResults.length > 0) {
|
|
679
|
+
for (const result of toolResults) {
|
|
680
|
+
const content = typeof result.output === "string" ? [{
|
|
681
|
+
type: "text",
|
|
682
|
+
text: result.output
|
|
683
|
+
}] : result.output.map((block) => block.type === "image" ? {
|
|
684
|
+
type: "image",
|
|
685
|
+
data: block.data,
|
|
686
|
+
mimeType: block.mediaType
|
|
687
|
+
} : {
|
|
688
|
+
type: "text",
|
|
689
|
+
text: block.text
|
|
690
|
+
});
|
|
691
|
+
out.push({
|
|
692
|
+
role: "toolResult",
|
|
693
|
+
toolCallId: result.callId,
|
|
694
|
+
toolName: "",
|
|
695
|
+
content,
|
|
696
|
+
isError: result.isError ?? false,
|
|
697
|
+
timestamp: Date.now()
|
|
698
|
+
});
|
|
699
|
+
}
|
|
700
|
+
continue;
|
|
701
|
+
}
|
|
702
|
+
const textBlocks = msg.content.filter((b) => b.type === "text");
|
|
703
|
+
const imageBlocks = msg.content.filter((b) => b.type === "image");
|
|
704
|
+
if (msg.role === "user") {
|
|
705
|
+
if (imageBlocks.length === 0 && textBlocks.length === 1) {
|
|
706
|
+
out.push({
|
|
707
|
+
role: "user",
|
|
708
|
+
content: textBlocks[0].text,
|
|
709
|
+
timestamp: Date.now()
|
|
710
|
+
});
|
|
711
|
+
continue;
|
|
712
|
+
}
|
|
713
|
+
out.push({
|
|
714
|
+
role: "user",
|
|
715
|
+
content: [...imageBlocks.map((img) => ({
|
|
716
|
+
type: "image",
|
|
717
|
+
data: img.data,
|
|
718
|
+
mimeType: img.mediaType
|
|
719
|
+
})), ...textBlocks.map((block) => ({
|
|
720
|
+
type: "text",
|
|
721
|
+
text: block.text
|
|
722
|
+
}))],
|
|
723
|
+
timestamp: Date.now()
|
|
724
|
+
});
|
|
725
|
+
continue;
|
|
726
|
+
}
|
|
727
|
+
const content = [];
|
|
728
|
+
for (const block of msg.content) if (block.type === "text") content.push({
|
|
729
|
+
type: "text",
|
|
730
|
+
text: block.text
|
|
731
|
+
});
|
|
732
|
+
else if (block.type === "thinking") {
|
|
733
|
+
if (block.signatureProducer === "anthropic") continue;
|
|
734
|
+
content.push({
|
|
735
|
+
type: "thinking",
|
|
736
|
+
thinking: block.text,
|
|
737
|
+
thinkingSignature: block.signature
|
|
738
|
+
});
|
|
739
|
+
} else if (block.type === "tool_call") content.push({
|
|
740
|
+
type: "toolCall",
|
|
741
|
+
id: block.id,
|
|
742
|
+
name: block.name,
|
|
743
|
+
arguments: block.input
|
|
744
|
+
});
|
|
745
|
+
out.push({
|
|
746
|
+
role: "assistant",
|
|
747
|
+
content,
|
|
748
|
+
api: "openai-codex-responses",
|
|
749
|
+
provider: PROVIDER_ID,
|
|
750
|
+
model: modelId,
|
|
751
|
+
usage: emptyUsage(),
|
|
752
|
+
stopReason: "stop",
|
|
753
|
+
timestamp: Date.now()
|
|
754
|
+
});
|
|
755
|
+
}
|
|
756
|
+
return out;
|
|
757
|
+
}
|
|
758
|
+
function fromPiAssistantMessage(message) {
|
|
759
|
+
const content = [];
|
|
760
|
+
for (const block of message.content) if (block.type === "text") content.push({
|
|
761
|
+
type: "text",
|
|
762
|
+
text: block.text
|
|
763
|
+
});
|
|
764
|
+
else if (block.type === "thinking") {
|
|
765
|
+
const out = {
|
|
766
|
+
type: "thinking",
|
|
767
|
+
text: block.thinking
|
|
768
|
+
};
|
|
769
|
+
if (typeof block.thinkingSignature === "string") {
|
|
770
|
+
out.signature = block.thinkingSignature;
|
|
771
|
+
out.signatureProducer = "openai";
|
|
772
|
+
}
|
|
773
|
+
content.push(out);
|
|
774
|
+
} else if (block.type === "toolCall") content.push({
|
|
775
|
+
type: "tool_call",
|
|
776
|
+
id: block.id,
|
|
777
|
+
name: block.name,
|
|
778
|
+
input: block.arguments
|
|
779
|
+
});
|
|
780
|
+
return {
|
|
781
|
+
role: "assistant",
|
|
782
|
+
content
|
|
783
|
+
};
|
|
784
|
+
}
|
|
785
|
+
function extractToolCalls(message) {
|
|
786
|
+
return message.content.filter((block) => block.type === "toolCall").map((block) => ({
|
|
787
|
+
id: block.id,
|
|
788
|
+
name: block.name,
|
|
789
|
+
input: block.arguments
|
|
790
|
+
}));
|
|
791
|
+
}
|
|
792
|
+
function extractText(message) {
|
|
793
|
+
return message.content.filter((block) => block.type === "text").map((block) => block.text).join("");
|
|
794
|
+
}
|
|
795
|
+
function toTurnUsage(usage, finishReason, modelId) {
|
|
796
|
+
return {
|
|
797
|
+
input: usage.input,
|
|
798
|
+
output: usage.output,
|
|
799
|
+
cacheRead: usage.cacheRead || void 0,
|
|
800
|
+
cacheCreation: usage.cacheWrite || void 0,
|
|
801
|
+
cost: usage.cost.total || void 0,
|
|
802
|
+
...finishReason ? { finishReason } : {},
|
|
803
|
+
modelId
|
|
804
|
+
};
|
|
805
|
+
}
|
|
806
|
+
/**
|
|
807
|
+
* Classify an OpenAI Codex error. pi-ai surfaces errors either as thrown `Error`s
|
|
808
|
+
* (wrapping `event.error.errorMessage`) or via stream event types.
|
|
809
|
+
*/
|
|
810
|
+
function classifyOpenAIError(err) {
|
|
811
|
+
if (!err || typeof err !== "object") return null;
|
|
812
|
+
const anyErr = err;
|
|
813
|
+
if (anyErr.name === "AbortError") return { kind: "aborted" };
|
|
814
|
+
const message = anyErr.message ?? "";
|
|
815
|
+
const code = anyErr.code ?? anyErr.type;
|
|
816
|
+
if (code === "context_length_exceeded" || matchesContextExceeded(message)) return {
|
|
817
|
+
kind: "context_exceeded",
|
|
818
|
+
providerCode: code ?? "context_length_exceeded",
|
|
819
|
+
message
|
|
820
|
+
};
|
|
821
|
+
if (message.length > 0) return {
|
|
822
|
+
kind: "provider_error",
|
|
823
|
+
providerCode: code,
|
|
824
|
+
message
|
|
825
|
+
};
|
|
826
|
+
return null;
|
|
827
|
+
}
|
|
828
|
+
function applyPayloadOverrides(payload, options) {
|
|
829
|
+
const body = payload;
|
|
830
|
+
if (options.toolChoice) if (options.toolChoice.type === "tool" && options.toolChoice.name) body.tool_choice = {
|
|
831
|
+
type: "function",
|
|
832
|
+
name: options.toolChoice.name
|
|
833
|
+
};
|
|
834
|
+
else if (options.toolChoice.type === "required") body.tool_choice = "required";
|
|
835
|
+
else body.tool_choice = "auto";
|
|
836
|
+
return body;
|
|
837
|
+
}
|
|
838
|
+
function openai(params) {
|
|
839
|
+
const defaultModel = params?.defaultModel || DEFAULT_MODEL;
|
|
840
|
+
let runtimeCredentials = typeof params?.access === "string" && typeof params.refresh === "string" && typeof params.expires === "number" ? {
|
|
841
|
+
access: params.access,
|
|
842
|
+
refresh: params.refresh,
|
|
843
|
+
expires: params.expires,
|
|
844
|
+
...params.accountId ? { accountId: params.accountId } : {}
|
|
845
|
+
} : void 0;
|
|
846
|
+
return {
|
|
847
|
+
name: "openai",
|
|
848
|
+
meta: {
|
|
849
|
+
defaultModel,
|
|
850
|
+
isOAuth: true,
|
|
851
|
+
capabilities: {
|
|
852
|
+
vision: true,
|
|
853
|
+
imageInToolResult: true
|
|
854
|
+
}
|
|
855
|
+
},
|
|
856
|
+
formatTools,
|
|
857
|
+
userMessage,
|
|
858
|
+
assistantMessage,
|
|
859
|
+
toolResultsMessage,
|
|
860
|
+
classifyError: classifyOpenAIError,
|
|
861
|
+
async stream(options, callbacks) {
|
|
862
|
+
const modelId = options.model || defaultModel;
|
|
863
|
+
const model = resolveModel(modelId);
|
|
864
|
+
const apiKey = await resolveOAuthApiKey({
|
|
865
|
+
provider: "openai",
|
|
866
|
+
providerId: PROVIDER_ID,
|
|
867
|
+
params: runtimeCredentials ? {
|
|
868
|
+
...params,
|
|
869
|
+
...runtimeCredentials
|
|
870
|
+
} : params,
|
|
871
|
+
envKey: "OPENAI_CODEX_API_KEY",
|
|
872
|
+
extraCredentialKeys: ["accountId"],
|
|
873
|
+
missingError: "No OpenAI Codex OAuth token found. Run `bun run auth --openai` first.",
|
|
874
|
+
refreshError: (reason) => `OpenAI Codex OAuth token refresh failed. Run \`bun run auth --openai\` again. ${reason}`
|
|
875
|
+
}, {
|
|
876
|
+
...callbacks,
|
|
877
|
+
async onOAuthRefresh(ctx) {
|
|
878
|
+
if (ctx.source === "params") runtimeCredentials = {
|
|
879
|
+
access: ctx.credentials.access,
|
|
880
|
+
refresh: ctx.credentials.refresh,
|
|
881
|
+
expires: ctx.credentials.expires,
|
|
882
|
+
...typeof ctx.credentials.accountId === "string" ? { accountId: ctx.credentials.accountId } : {}
|
|
883
|
+
};
|
|
884
|
+
await callbacks.onOAuthRefresh?.(ctx);
|
|
885
|
+
}
|
|
886
|
+
});
|
|
887
|
+
const context = {
|
|
888
|
+
systemPrompt: options.system,
|
|
889
|
+
messages: toPiMessages(options.messages, modelId),
|
|
890
|
+
tools: options.tools
|
|
891
|
+
};
|
|
892
|
+
const reasoningLevel = options.thinking && options.thinking !== "off" && options.thinking !== "adaptive" ? options.thinking : void 0;
|
|
893
|
+
const stream = streamOpenAICodexResponses(model, context, {
|
|
894
|
+
apiKey,
|
|
895
|
+
maxTokens: options.maxTokens,
|
|
896
|
+
signal: options.signal,
|
|
897
|
+
transport: params?.transport,
|
|
898
|
+
reasoningEffort: reasoningLevel,
|
|
899
|
+
reasoningSummary: reasoningLevel ? "auto" : void 0,
|
|
900
|
+
onPayload: (payload) => applyPayloadOverrides(payload, options)
|
|
901
|
+
});
|
|
902
|
+
let finalMessage;
|
|
903
|
+
let text = "";
|
|
904
|
+
let thinking = "";
|
|
905
|
+
for await (const event of stream) if (event.type === "text_delta") {
|
|
906
|
+
text += event.delta;
|
|
907
|
+
callbacks.onText(event.delta);
|
|
908
|
+
} else if (event.type === "thinking_delta") {
|
|
909
|
+
thinking += event.delta;
|
|
910
|
+
callbacks.onThinking?.(event.delta);
|
|
911
|
+
} else if (event.type === "thinking_end") {
|
|
912
|
+
const delta = event.content.startsWith(thinking) ? event.content.slice(thinking.length) : thinking ? "" : event.content;
|
|
913
|
+
if (delta) {
|
|
914
|
+
thinking += delta;
|
|
915
|
+
callbacks.onThinking?.(delta);
|
|
916
|
+
}
|
|
917
|
+
} else if (event.type === "done") finalMessage = event.message;
|
|
918
|
+
else if (event.type === "error") throw new Error(event.error.errorMessage || "OpenAI Codex API error");
|
|
919
|
+
finalMessage ??= await stream.result();
|
|
920
|
+
text ||= extractText(finalMessage);
|
|
921
|
+
const toolCalls = extractToolCalls(finalMessage);
|
|
922
|
+
const assistantTurn = fromPiAssistantMessage(finalMessage);
|
|
923
|
+
const finishReason = toolCalls.length > 0 ? "tool-calls" : "stop";
|
|
924
|
+
return {
|
|
925
|
+
assistantMessage: assistantTurn,
|
|
926
|
+
text,
|
|
927
|
+
toolCalls,
|
|
928
|
+
done: toolCalls.length === 0,
|
|
929
|
+
usage: toTurnUsage(finalMessage.usage, finishReason, modelId)
|
|
930
|
+
};
|
|
931
|
+
}
|
|
932
|
+
};
|
|
933
|
+
}
|
|
934
|
+
//#endregion
|
|
935
|
+
//#region src/providers/openrouter.ts
|
|
936
|
+
const BASE_URL = "https://openrouter.ai/api/v1";
|
|
937
|
+
function getApiKey(params) {
|
|
938
|
+
if (typeof params?.apiKey === "string" && params.apiKey.length > 0) return params.apiKey;
|
|
939
|
+
if (process.env.OPENROUTER_API_KEY) return process.env.OPENROUTER_API_KEY;
|
|
940
|
+
throw new Error("No OpenRouter API key found. Pass `apiKey` or set OPENROUTER_API_KEY in your environment.");
|
|
941
|
+
}
|
|
942
|
+
/**
|
|
943
|
+
* OpenRouter provider.
|
|
944
|
+
*
|
|
945
|
+
* Thin wrapper around {@link openaiCompat} with OpenRouter-specific defaults
|
|
946
|
+
* (base URL, default model) and required attribution headers.
|
|
947
|
+
*/
|
|
948
|
+
function openrouter(params) {
|
|
949
|
+
return openaiCompat({
|
|
950
|
+
name: "openrouter",
|
|
951
|
+
apiKey: getApiKey(params),
|
|
952
|
+
baseURL: BASE_URL,
|
|
953
|
+
defaultModel: params?.defaultModel || "anthropic/claude-sonnet-4-6",
|
|
954
|
+
extraHeaders: {
|
|
955
|
+
"HTTP-Referer": "https://github.com/Tahul/zidane",
|
|
956
|
+
"X-Title": "zidane"
|
|
957
|
+
},
|
|
958
|
+
capabilities: params?.capabilities ?? {
|
|
959
|
+
vision: true,
|
|
960
|
+
imageInToolResult: false
|
|
961
|
+
},
|
|
962
|
+
cacheBreakpoints: true,
|
|
963
|
+
supportsReasoning: true
|
|
964
|
+
});
|
|
965
|
+
}
|
|
966
|
+
//#endregion
|
|
967
|
+
export { anthropic as i, openai as n, cerebras as r, openrouter as t };
|
|
968
|
+
|
|
969
|
+
//# sourceMappingURL=providers-CX-R-Oy-.js.map
|