zidane 4.0.2 → 4.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/README.md +196 -614
  2. package/dist/agent-BoV5Twdl.d.ts +2347 -0
  3. package/dist/agent-BoV5Twdl.d.ts.map +1 -0
  4. package/dist/contexts-3Arvn7yR.js +321 -0
  5. package/dist/contexts-3Arvn7yR.js.map +1 -0
  6. package/dist/contexts.d.ts +2 -25
  7. package/dist/contexts.js +2 -10
  8. package/dist/errors-D1lhd6mX.js +118 -0
  9. package/dist/errors-D1lhd6mX.js.map +1 -0
  10. package/dist/index-28otmfLX.d.ts +400 -0
  11. package/dist/index-28otmfLX.d.ts.map +1 -0
  12. package/dist/index-BfSdALzk.d.ts +113 -0
  13. package/dist/index-BfSdALzk.d.ts.map +1 -0
  14. package/dist/index-DPsd0qwm.d.ts +254 -0
  15. package/dist/index-DPsd0qwm.d.ts.map +1 -0
  16. package/dist/index.d.ts +5 -95
  17. package/dist/index.js +141 -271
  18. package/dist/index.js.map +1 -0
  19. package/dist/interpolate-CukJwP2G.js +887 -0
  20. package/dist/interpolate-CukJwP2G.js.map +1 -0
  21. package/dist/mcp-8wClKY-3.js +771 -0
  22. package/dist/mcp-8wClKY-3.js.map +1 -0
  23. package/dist/mcp.d.ts +2 -4
  24. package/dist/mcp.js +2 -13
  25. package/dist/messages-z5Pq20p7.js +1020 -0
  26. package/dist/messages-z5Pq20p7.js.map +1 -0
  27. package/dist/presets-Cs7_CsMk.js +39 -0
  28. package/dist/presets-Cs7_CsMk.js.map +1 -0
  29. package/dist/presets.d.ts +2 -43
  30. package/dist/presets.js +2 -17
  31. package/dist/providers-CX-R-Oy-.js +969 -0
  32. package/dist/providers-CX-R-Oy-.js.map +1 -0
  33. package/dist/providers.d.ts +2 -4
  34. package/dist/providers.js +3 -23
  35. package/dist/session/sqlite.d.ts +7 -12
  36. package/dist/session/sqlite.d.ts.map +1 -0
  37. package/dist/session/sqlite.js +67 -79
  38. package/dist/session/sqlite.js.map +1 -0
  39. package/dist/session-Cn68UASv.js +440 -0
  40. package/dist/session-Cn68UASv.js.map +1 -0
  41. package/dist/session.d.ts +2 -4
  42. package/dist/session.js +3 -27
  43. package/dist/skills.d.ts +3 -322
  44. package/dist/skills.js +24 -47
  45. package/dist/skills.js.map +1 -0
  46. package/dist/stats-DoKUtF5T.js +58 -0
  47. package/dist/stats-DoKUtF5T.js.map +1 -0
  48. package/dist/tools-DpeWKzP1.js +3941 -0
  49. package/dist/tools-DpeWKzP1.js.map +1 -0
  50. package/dist/tools.d.ts +3 -95
  51. package/dist/tools.js +2 -40
  52. package/dist/tui.d.ts +533 -0
  53. package/dist/tui.d.ts.map +1 -0
  54. package/dist/tui.js +2004 -0
  55. package/dist/tui.js.map +1 -0
  56. package/dist/types-Bx_F8jet.js +39 -0
  57. package/dist/types-Bx_F8jet.js.map +1 -0
  58. package/dist/types.d.ts +4 -55
  59. package/dist/types.js +4 -28
  60. package/package.json +38 -4
  61. package/dist/agent-BAHrGtqu.d.ts +0 -2425
  62. package/dist/chunk-4ILGBQ23.js +0 -803
  63. package/dist/chunk-4LPBN547.js +0 -3540
  64. package/dist/chunk-64LLNY7F.js +0 -28
  65. package/dist/chunk-6STZTA4N.js +0 -830
  66. package/dist/chunk-7GQ7P6DM.js +0 -566
  67. package/dist/chunk-IC7FT4OD.js +0 -37
  68. package/dist/chunk-JCOB6IYO.js +0 -22
  69. package/dist/chunk-JH6IAAFA.js +0 -28
  70. package/dist/chunk-LNN5UTS2.js +0 -97
  71. package/dist/chunk-PMCQOMV4.js +0 -490
  72. package/dist/chunk-UD25QF3H.js +0 -304
  73. package/dist/chunk-W57VY6DJ.js +0 -834
  74. package/dist/sandbox-D7v6Wy62.d.ts +0 -28
  75. package/dist/skills-use-DwZrNmcw.d.ts +0 -80
  76. package/dist/types-Bai5rKpa.d.ts +0 -89
  77. package/dist/validation-Pm--dQEU.d.ts +0 -185
@@ -0,0 +1,1020 @@
1
+ import { o as matchesContextExceeded } from "./errors-D1lhd6mX.js";
2
+ //#region src/providers/openai-compat.ts
3
+ const TOOL_RESULTS_TAG = "__zidane_tool_results__";
4
+ const ASSISTANT_TOOL_CALLS_TAG = "__zidane_assistant_tc__";
5
+ /**
6
+ * Ceiling on how many bytes may accumulate between two newline boundaries when
7
+ * parsing an SSE stream. A broken or malicious server that emits an unbounded
8
+ * stream of non-newline bytes would otherwise grow `buffer` without end and
9
+ * eventually OOM the host. 8 MB is safely above the largest tool-call arg
10
+ * JSON we reasonably expect and still catches a pathological stream in ~1 s on
11
+ * a 10 MB/s connection.
12
+ */
13
+ const SSE_MAX_BUFFER_BYTES = 8 * 1024 * 1024;
14
+ var OpenAICompatStreamError = class extends Error {
15
+ constructor(message) {
16
+ super(message);
17
+ this.name = "OpenAICompatStreamError";
18
+ }
19
+ };
20
+ async function consumeSSE(response, callbacks, signal) {
21
+ const reader = response.body.getReader();
22
+ const decoder = new TextDecoder();
23
+ let buffer = "";
24
+ let text = "";
25
+ let thinking = "";
26
+ let finishReason = "stop";
27
+ let usage = {
28
+ input: 0,
29
+ output: 0
30
+ };
31
+ const tcMap = /* @__PURE__ */ new Map();
32
+ const reasoningMap = /* @__PURE__ */ new Map();
33
+ let sawReasoningDetails = false;
34
+ try {
35
+ while (true) {
36
+ if (signal?.aborted) break;
37
+ const { done, value } = await reader.read();
38
+ if (done) break;
39
+ buffer += decoder.decode(value, { stream: true });
40
+ if (buffer.length > SSE_MAX_BUFFER_BYTES) throw new OpenAICompatStreamError(`SSE buffer exceeded ${SSE_MAX_BUFFER_BYTES} bytes without a line boundary — upstream may be streaming non-SSE data.`);
41
+ const lines = buffer.split("\n");
42
+ buffer = lines.pop() || "";
43
+ for (const line of lines) {
44
+ if (!line.startsWith("data: ")) continue;
45
+ const data = line.slice(6).trim();
46
+ if (data === "[DONE]") continue;
47
+ let chunk;
48
+ try {
49
+ chunk = JSON.parse(data);
50
+ } catch {
51
+ continue;
52
+ }
53
+ const choice = chunk.choices?.[0];
54
+ if (!choice) continue;
55
+ const fr = choice.finish_reason;
56
+ if (fr) finishReason = fr;
57
+ const delta = choice.delta;
58
+ const reasoningDeltaArr = delta?.reasoning_details;
59
+ if (reasoningDeltaArr && reasoningDeltaArr.length > 0) {
60
+ sawReasoningDetails = true;
61
+ for (const item of reasoningDeltaArr) {
62
+ const idx = typeof item.index === "number" ? item.index : 0;
63
+ const existing = reasoningMap.get(idx) ?? {};
64
+ if (typeof item.text === "string") {
65
+ existing.text = (existing.text ?? "") + item.text;
66
+ thinking += item.text;
67
+ callbacks.onThinking?.(item.text);
68
+ }
69
+ if (typeof item.summary === "string") {
70
+ existing.summary = (existing.summary ?? "") + item.summary;
71
+ thinking += item.summary;
72
+ callbacks.onThinking?.(item.summary);
73
+ }
74
+ for (const key of [
75
+ "type",
76
+ "signature",
77
+ "data",
78
+ "format",
79
+ "id"
80
+ ]) {
81
+ const v = item[key];
82
+ if (typeof v === "string") existing[key] = v;
83
+ }
84
+ reasoningMap.set(idx, existing);
85
+ }
86
+ }
87
+ if (!sawReasoningDetails) {
88
+ const thinkingDelta = delta?.reasoning_content ?? delta?.reasoning;
89
+ if (thinkingDelta) {
90
+ thinking += thinkingDelta;
91
+ callbacks.onThinking?.(thinkingDelta);
92
+ }
93
+ }
94
+ const contentDelta = delta?.content;
95
+ if (contentDelta) {
96
+ text += contentDelta;
97
+ callbacks.onText(contentDelta);
98
+ }
99
+ const toolCallsDelta = delta?.tool_calls;
100
+ if (toolCallsDelta) for (const tc of toolCallsDelta) {
101
+ const existing = tcMap.get(tc.index);
102
+ if (existing) {
103
+ if (tc.function?.arguments) existing.args += tc.function.arguments;
104
+ } else tcMap.set(tc.index, {
105
+ id: tc.id || `call_${tc.index}`,
106
+ name: tc.function?.name || "",
107
+ args: tc.function?.arguments || ""
108
+ });
109
+ }
110
+ const chunkUsage = chunk.usage;
111
+ if (chunkUsage) {
112
+ const cachedRead = chunkUsage.prompt_tokens_details?.cached_tokens;
113
+ const cachedWrite = chunkUsage.prompt_tokens_details?.cache_creation_input_tokens ?? chunkUsage.prompt_tokens_details?.cache_write_tokens ?? chunkUsage.cache_creation_input_tokens;
114
+ usage = {
115
+ input: chunkUsage.prompt_tokens ?? 0,
116
+ output: chunkUsage.completion_tokens ?? 0,
117
+ cost: chunkUsage.total_cost ?? void 0,
118
+ ...typeof cachedRead === "number" && cachedRead > 0 ? { cacheRead: cachedRead } : {},
119
+ ...typeof cachedWrite === "number" && cachedWrite > 0 ? { cacheCreation: cachedWrite } : {}
120
+ };
121
+ }
122
+ }
123
+ }
124
+ } finally {
125
+ reader.releaseLock();
126
+ }
127
+ const toolCalls = [];
128
+ for (const tc of tcMap.values()) {
129
+ if (!tc.args) {
130
+ toolCalls.push({
131
+ id: tc.id,
132
+ name: tc.name,
133
+ input: {}
134
+ });
135
+ continue;
136
+ }
137
+ try {
138
+ toolCalls.push({
139
+ id: tc.id,
140
+ name: tc.name,
141
+ input: JSON.parse(tc.args)
142
+ });
143
+ } catch (err) {
144
+ throw new OpenAICompatStreamError(`Tool call "${tc.name}" (${tc.id}) arguments were truncated or malformed: ${err.message}`);
145
+ }
146
+ }
147
+ const reasoningDetails = Array.from(reasoningMap.entries()).sort(([a], [b]) => a - b).map(([, item]) => item);
148
+ return {
149
+ text,
150
+ thinking,
151
+ toolCalls,
152
+ finishReason,
153
+ usage,
154
+ reasoningDetails
155
+ };
156
+ }
157
+ /**
158
+ * Encode a single image block as an OpenAI `image_url` multi-part entry.
159
+ */
160
+ function toImageUrlPart(img) {
161
+ return {
162
+ type: "image_url",
163
+ image_url: { url: `data:${img.mediaType};base64,${img.data}` }
164
+ };
165
+ }
166
+ /**
167
+ * Summarize a `tool_result` output for the companion-user-message path — text blocks
168
+ * are joined (separated by `\n`) so the tool message carries all textual context; image
169
+ * blocks are collected in a flat list for the companion user message.
170
+ *
171
+ * Used only on the fallback path; the native path walks `output` in-order to preserve
172
+ * text↔image interleaving.
173
+ */
174
+ function summarizeToolResultOutput(output) {
175
+ if (typeof output === "string") return {
176
+ text: output,
177
+ images: []
178
+ };
179
+ const texts = [];
180
+ const images = [];
181
+ for (const block of output) if (block.type === "text") texts.push(block.text);
182
+ else if (block.type === "image") images.push({
183
+ mediaType: block.mediaType,
184
+ data: block.data
185
+ });
186
+ return {
187
+ text: texts.join("\n"),
188
+ images
189
+ };
190
+ }
191
+ function toOAIMessages(system, messages, options = {}) {
192
+ const out = [{
193
+ role: "system",
194
+ content: system
195
+ }];
196
+ const nativeImageInTool = options.imageInToolResult === true;
197
+ const reasoningEnabled = options.supportsReasoning === true;
198
+ const activeModel = options.model;
199
+ for (const msg of messages) {
200
+ const toolResults = msg.content.filter((b) => b.type === "tool_result");
201
+ const toolCalls = msg.content.filter((b) => b.type === "tool_call");
202
+ const textBlocks = msg.content.filter((b) => b.type === "text");
203
+ const imageBlocks = msg.content.filter((b) => b.type === "image");
204
+ const reasoningDetails = (reasoningEnabled ? msg.content.filter((b) => {
205
+ if (b.type !== "provider_reasoning") return false;
206
+ if (b.producer !== "openrouter") return false;
207
+ if (b.model && activeModel && b.model !== activeModel) return false;
208
+ return true;
209
+ }) : []).flatMap((b) => b.details);
210
+ if (toolResults.length > 0) {
211
+ for (const tr of toolResults) {
212
+ if (typeof tr.output === "string") {
213
+ out.push({
214
+ role: "tool",
215
+ tool_call_id: tr.callId,
216
+ content: tr.output
217
+ });
218
+ continue;
219
+ }
220
+ if (nativeImageInTool) {
221
+ const parts = tr.output.map((block) => block.type === "image" ? toImageUrlPart({
222
+ mediaType: block.mediaType,
223
+ data: block.data
224
+ }) : {
225
+ type: "text",
226
+ text: block.text
227
+ });
228
+ out.push({
229
+ role: "tool",
230
+ tool_call_id: tr.callId,
231
+ content: parts
232
+ });
233
+ continue;
234
+ }
235
+ const { text, images } = summarizeToolResultOutput(tr.output);
236
+ if (images.length === 0) {
237
+ out.push({
238
+ role: "tool",
239
+ tool_call_id: tr.callId,
240
+ content: text
241
+ });
242
+ continue;
243
+ }
244
+ const noun = images.length === 1 ? "image" : "images";
245
+ const attachedMarker = `[${images.length} ${noun} attached — see next user message]`;
246
+ const toolMarker = text.length > 0 ? `${text}\n\n${attachedMarker}` : attachedMarker;
247
+ out.push({
248
+ role: "tool",
249
+ tool_call_id: tr.callId,
250
+ content: toolMarker
251
+ });
252
+ out.push({
253
+ role: "user",
254
+ content: [...images.map(toImageUrlPart), {
255
+ type: "text",
256
+ text: `(${noun} returned by tool call ${tr.callId})`
257
+ }]
258
+ });
259
+ }
260
+ continue;
261
+ }
262
+ if (toolCalls.length > 0) {
263
+ const m = {
264
+ role: "assistant",
265
+ content: textBlocks.length > 0 ? textBlocks[0].text : null,
266
+ tool_calls: toolCalls.map((tc) => ({
267
+ id: tc.id,
268
+ type: "function",
269
+ function: {
270
+ name: tc.name,
271
+ arguments: JSON.stringify(tc.input)
272
+ }
273
+ }))
274
+ };
275
+ if (reasoningDetails.length > 0) m.reasoning_details = reasoningDetails;
276
+ out.push(m);
277
+ continue;
278
+ }
279
+ if (imageBlocks.length > 0) {
280
+ const parts = imageBlocks.map((img) => ({
281
+ type: "image_url",
282
+ image_url: { url: `data:${img.mediaType};base64,${img.data}` }
283
+ }));
284
+ for (const b of textBlocks) parts.push({
285
+ type: "text",
286
+ text: b.text
287
+ });
288
+ const m = {
289
+ role: msg.role,
290
+ content: parts
291
+ };
292
+ if (msg.role === "assistant" && reasoningDetails.length > 0) m.reasoning_details = reasoningDetails;
293
+ out.push(m);
294
+ continue;
295
+ }
296
+ let pushed;
297
+ if (textBlocks.length === 1) pushed = {
298
+ role: msg.role,
299
+ content: textBlocks[0].text
300
+ };
301
+ else if (textBlocks.length > 1) pushed = {
302
+ role: msg.role,
303
+ content: textBlocks.map((b) => ({
304
+ type: "text",
305
+ text: b.text
306
+ }))
307
+ };
308
+ else pushed = {
309
+ role: msg.role,
310
+ content: null
311
+ };
312
+ if (msg.role === "assistant" && reasoningDetails.length > 0) pushed.reasoning_details = reasoningDetails;
313
+ out.push(pushed);
314
+ }
315
+ return out;
316
+ }
317
+ const EPHEMERAL = { type: "ephemeral" };
318
+ /**
319
+ * Add `cache_control: { type: 'ephemeral' }` breakpoints to the system message's
320
+ * last text part and the last message's final content part.
321
+ *
322
+ * Mutates `messages` in place. Converts plain-string content into a single-element
323
+ * content array so the cache marker can attach — this shape is accepted verbatim by
324
+ * OpenRouter's Anthropic and Gemini routes and ignored by routes with automatic
325
+ * caching (OpenAI, DeepSeek, Grok, Groq, Moonshot).
326
+ *
327
+ * Skip conditions (safe no-ops):
328
+ * - Empty messages array.
329
+ * - Assistant messages with no text (tool-call-only) — attaching a cache marker to a
330
+ * `tool_calls` block has no defined semantics, so we skip and let the prior
331
+ * system/tools breakpoints carry caching.
332
+ */
333
+ function applyOAICacheBreakpoints(messages) {
334
+ if (messages.length === 0) return;
335
+ const first = messages[0];
336
+ if (first.role === "system") markLastContentPart(first);
337
+ const lastIdx = messages.length - 1;
338
+ if (lastIdx > 0) markLastContentPart(messages[lastIdx]);
339
+ }
340
+ /**
341
+ * Mark the last content part of an OAI message with `cache_control`. Normalizes
342
+ * string content into a `[{ type: 'text', text, cache_control }]` array so the
343
+ * marker has a block to attach to.
344
+ *
345
+ * No-op for messages without string or array content (tool-call-only assistant
346
+ * messages fall through; the system/tools breakpoints carry the cache prefix).
347
+ */
348
+ function markLastContentPart(msg) {
349
+ if (typeof msg.content === "string") {
350
+ if (msg.content.length === 0) return;
351
+ msg.content = [{
352
+ type: "text",
353
+ text: msg.content,
354
+ cache_control: EPHEMERAL
355
+ }];
356
+ return;
357
+ }
358
+ if (!Array.isArray(msg.content) || msg.content.length === 0) return;
359
+ const parts = msg.content;
360
+ const lastBlockIdx = parts.length - 1;
361
+ parts[lastBlockIdx] = {
362
+ ...parts[lastBlockIdx],
363
+ cache_control: EPHEMERAL
364
+ };
365
+ }
366
+ /**
367
+ * Return a copy of `tools` with `cache_control` on the last entry.
368
+ *
369
+ * OpenRouter accepts the marker alongside the standard `type` + `function` fields
370
+ * and forwards it when routing to Anthropic/Gemini. Leaves the non-cached tools
371
+ * unchanged so the caller's reference is not mutated.
372
+ */
373
+ function applyOAIToolCacheBreakpoint(tools) {
374
+ if (tools.length === 0) return tools;
375
+ const lastIdx = tools.length - 1;
376
+ return tools.map((tool, i) => i === lastIdx ? {
377
+ ...tool,
378
+ cache_control: EPHEMERAL
379
+ } : tool);
380
+ }
381
+ function formatTools(tools) {
382
+ return tools.map((t) => ({
383
+ type: "function",
384
+ function: {
385
+ name: t.name,
386
+ description: t.description,
387
+ parameters: t.inputSchema
388
+ }
389
+ }));
390
+ }
391
+ function userMessage(content) {
392
+ return {
393
+ role: "user",
394
+ content: [{
395
+ type: "text",
396
+ text: content
397
+ }]
398
+ };
399
+ }
400
+ function assistantMessage(content) {
401
+ return {
402
+ role: "assistant",
403
+ content: [{
404
+ type: "text",
405
+ text: content
406
+ }]
407
+ };
408
+ }
409
+ function toolResultsMessage(results) {
410
+ return {
411
+ role: "user",
412
+ content: results.map((r) => ({
413
+ type: "tool_result",
414
+ callId: r.id,
415
+ output: r.content
416
+ }))
417
+ };
418
+ }
419
+ function buildAssistantContent(text, toolCalls, thinking, reasoning) {
420
+ const content = [];
421
+ if (reasoning && reasoning.details.length > 0) {
422
+ const block = {
423
+ type: "provider_reasoning",
424
+ producer: reasoning.producer,
425
+ details: reasoning.details
426
+ };
427
+ if (reasoning.model) block.model = reasoning.model;
428
+ content.push(block);
429
+ }
430
+ if (thinking) content.push({
431
+ type: "thinking",
432
+ text: thinking
433
+ });
434
+ if (text) content.push({
435
+ type: "text",
436
+ text
437
+ });
438
+ for (const tc of toolCalls) content.push({
439
+ type: "tool_call",
440
+ id: tc.id,
441
+ name: tc.name,
442
+ input: tc.input
443
+ });
444
+ return {
445
+ role: "assistant",
446
+ content
447
+ };
448
+ }
449
+ /**
450
+ * HTTP error thrown when an OpenAI-compatible endpoint returns a non-OK response.
451
+ *
452
+ * The body is best-effort JSON-parsed; `error.message` / `error.code` / `error.type`
453
+ * are extracted for clean downstream classification.
454
+ */
455
+ var OpenAICompatHttpError = class extends Error {
456
+ status;
457
+ providerCode;
458
+ bodyText;
459
+ constructor(status, bodyText) {
460
+ let message = bodyText;
461
+ let code;
462
+ try {
463
+ const parsed = JSON.parse(bodyText);
464
+ message = parsed?.error?.message ?? bodyText;
465
+ code = parsed?.error?.code ?? parsed?.error?.type;
466
+ } catch {}
467
+ super(`HTTP ${status}: ${message}`);
468
+ this.name = "OpenAICompatHttpError";
469
+ this.status = status;
470
+ this.providerCode = code;
471
+ this.bodyText = bodyText;
472
+ }
473
+ };
474
+ const TRAILING_SLASH_RE = /\/$/;
475
+ /**
476
+ * Classify an OpenAI-compatible error into `ClassifiedError`.
477
+ *
478
+ * Recognizes:
479
+ * - `AbortError` (from fetch) → `aborted`.
480
+ * - `OpenAICompatHttpError` with a context-exceeded code or message → `context_exceeded`.
481
+ * - Any other `OpenAICompatHttpError` → `provider_error`.
482
+ *
483
+ * Returns `null` for unrecognized error shapes (the loop falls back to `AgentProviderError`).
484
+ */
485
+ function classifyOpenAICompatError(err) {
486
+ if (!err || typeof err !== "object") return null;
487
+ if (err.name === "AbortError") return { kind: "aborted" };
488
+ if (err instanceof OpenAICompatStreamError) return {
489
+ kind: "provider_error",
490
+ providerCode: "stream_error",
491
+ message: err.message,
492
+ retryable: true
493
+ };
494
+ if (!(err instanceof OpenAICompatHttpError)) return null;
495
+ const code = err.providerCode;
496
+ const msg = err.message;
497
+ if (code === "context_length_exceeded" || matchesContextExceeded(msg)) return {
498
+ kind: "context_exceeded",
499
+ providerCode: code ?? "context_length_exceeded",
500
+ message: msg
501
+ };
502
+ return {
503
+ kind: "provider_error",
504
+ providerCode: code ?? String(err.status),
505
+ message: msg,
506
+ retryable: isRetryableHttpStatus(err.status)
507
+ };
508
+ }
509
+ /**
510
+ * 429 + 5xx (except 501 Not Implemented) are safe to retry with backoff;
511
+ * 4xx other than 429 are terminal (bad request, auth, not found, etc.).
512
+ */
513
+ function isRetryableHttpStatus(status) {
514
+ if (status === 429) return true;
515
+ if (status >= 500 && status !== 501) return true;
516
+ return false;
517
+ }
518
+ /**
519
+ * Map an OpenAI-compatible `finish_reason` string to the zidane `TurnFinishReason` union.
520
+ */
521
+ function mapOAIFinishReason(reason) {
522
+ if (!reason) return void 0;
523
+ switch (reason) {
524
+ case "stop": return "stop";
525
+ case "tool_calls":
526
+ case "function_call": return "tool-calls";
527
+ case "length": return "length";
528
+ case "content_filter": return "content-filter";
529
+ default: return "other";
530
+ }
531
+ }
532
+ /**
533
+ * Map zidane's `ThinkingLevel` + optional explicit budget to the OpenRouter
534
+ * `reasoning` request field. Returns `undefined` when reasoning should not be
535
+ * sent (off, or no level + no budget).
536
+ *
537
+ * - `'low' | 'medium' | 'high'` → `{ effort }`.
538
+ * - `'minimal'` → `{ effort: 'low' }` (closest match in OpenRouter's vocabulary).
539
+ * - `'adaptive'` → `{}` (let upstream decide; OpenRouter passes through).
540
+ * - explicit `thinkingBudget` → `{ max_tokens }` overlaid on the level.
541
+ */
542
+ function planOpenRouterReasoning(thinking, thinkingBudget) {
543
+ if ((!thinking || thinking === "off") && typeof thinkingBudget !== "number") return void 0;
544
+ const out = {};
545
+ if (thinking && thinking !== "off" && thinking !== "adaptive") out.effort = thinking === "minimal" ? "low" : thinking;
546
+ if (typeof thinkingBudget === "number" && thinkingBudget > 0) out.max_tokens = thinkingBudget;
547
+ return out;
548
+ }
549
+ /**
550
+ * Factory for any OpenAI-compatible HTTP endpoint.
551
+ *
552
+ * Speaks the standard `POST /chat/completions` + `stream: true` + SSE dialect.
553
+ * Thin wrappers (`openrouter`, `cerebras`) call this with pinned defaults.
554
+ *
555
+ * @example Baseten (non-standard auth scheme)
556
+ * ```ts
557
+ * openaiCompat({
558
+ * name: 'baseten',
559
+ * apiKey: process.env.BASETEN_API_KEY!,
560
+ * baseURL: process.env.BASETEN_PROXY_URL!,
561
+ * authHeader: { name: 'Authorization', scheme: 'Api-Key' },
562
+ * })
563
+ * ```
564
+ */
565
+ function openaiCompat(params) {
566
+ const name = params.name ?? "openai-compat";
567
+ const defaultModel = params.defaultModel ?? "gpt-4o-mini";
568
+ const authHeaderName = params.authHeader?.name ?? "Authorization";
569
+ const authHeaderValue = params.authHeader?.scheme ? `${params.authHeader.scheme} ${params.apiKey}` : params.authHeader ? params.apiKey : `Bearer ${params.apiKey}`;
570
+ const extraHeaders = params.extraHeaders ?? {};
571
+ const endpoint = `${params.baseURL.replace(TRAILING_SLASH_RE, "")}/chat/completions`;
572
+ const capabilities = {
573
+ vision: params.capabilities?.vision ?? false,
574
+ imageInToolResult: params.capabilities?.imageInToolResult ?? false
575
+ };
576
+ const cacheBreakpointsEnabled = params.cacheBreakpoints === true;
577
+ const reasoningEnabled = params.supportsReasoning === true;
578
+ return {
579
+ name,
580
+ meta: {
581
+ defaultModel,
582
+ capabilities
583
+ },
584
+ formatTools,
585
+ userMessage,
586
+ assistantMessage,
587
+ toolResultsMessage,
588
+ classifyError: classifyOpenAICompatError,
589
+ async stream(options, callbacks) {
590
+ const modelId = options.model || defaultModel;
591
+ const messages = toOAIMessages(options.system, options.messages, {
592
+ imageInToolResult: capabilities.imageInToolResult === true,
593
+ supportsReasoning: reasoningEnabled,
594
+ model: modelId
595
+ });
596
+ const shouldCache = cacheBreakpointsEnabled && options.cache !== false;
597
+ if (shouldCache) applyOAICacheBreakpoints(messages);
598
+ const maxTokens = options.thinkingBudget ? options.thinkingBudget + options.maxTokens : options.maxTokens;
599
+ const body = {
600
+ ...params.extraBodyParams ?? {},
601
+ model: modelId,
602
+ messages,
603
+ max_tokens: maxTokens,
604
+ stream: true
605
+ };
606
+ if (reasoningEnabled) {
607
+ const reasoning = planOpenRouterReasoning(options.thinking, options.thinkingBudget);
608
+ if (reasoning) body.reasoning = reasoning;
609
+ }
610
+ if (options.tools && options.tools.length > 0) body.tools = shouldCache ? applyOAIToolCacheBreakpoint(options.tools) : options.tools;
611
+ if (options.toolChoice) if (options.toolChoice.type === "tool" && options.toolChoice.name) body.tool_choice = {
612
+ type: "function",
613
+ function: { name: options.toolChoice.name }
614
+ };
615
+ else if (options.toolChoice.type === "required") body.tool_choice = "required";
616
+ else body.tool_choice = "auto";
617
+ const response = await fetch(endpoint, {
618
+ method: "POST",
619
+ headers: {
620
+ [authHeaderName]: authHeaderValue,
621
+ "Content-Type": "application/json",
622
+ ...extraHeaders
623
+ },
624
+ body: JSON.stringify(body),
625
+ signal: options.signal
626
+ });
627
+ if (!response.ok) {
628
+ const errorText = await response.text();
629
+ throw new OpenAICompatHttpError(response.status, errorText);
630
+ }
631
+ const result = await consumeSSE(response, callbacks, options.signal);
632
+ const finishReason = mapOAIFinishReason(result.finishReason);
633
+ return {
634
+ assistantMessage: buildAssistantContent(result.text, result.toolCalls, result.thinking, reasoningEnabled && result.reasoningDetails.length > 0 ? {
635
+ details: result.reasoningDetails,
636
+ producer: "openrouter",
637
+ model: modelId
638
+ } : void 0),
639
+ text: result.text,
640
+ toolCalls: result.toolCalls,
641
+ done: result.finishReason === "stop" || result.toolCalls.length === 0,
642
+ usage: {
643
+ input: result.usage.input,
644
+ output: result.usage.output,
645
+ ...result.usage.cacheRead !== void 0 ? { cacheRead: result.usage.cacheRead } : {},
646
+ ...result.usage.cacheCreation !== void 0 ? { cacheCreation: result.usage.cacheCreation } : {},
647
+ ...result.usage.cost !== void 0 ? { cost: result.usage.cost } : {},
648
+ ...finishReason ? { finishReason } : {},
649
+ modelId
650
+ }
651
+ };
652
+ }
653
+ };
654
+ }
655
+ //#endregion
656
+ //#region src/session/messages.ts
657
+ /**
658
+ * Decode an Anthropic `tool_result.content` wire value back to zidane's canonical
659
+ * `string | ToolResultContent[]` shape.
660
+ *
661
+ * Anthropic accepts three shapes in the wire: `string`, `Array<{type:'text',text}>`,
662
+ * or `Array<{type:'text'|'image', ...}>`. We collapse to `string` only when every
663
+ * block is text to keep simple cases readable.
664
+ */
665
+ function decodeAnthropicToolResultContent(content) {
666
+ if (typeof content === "string") return content;
667
+ if (!Array.isArray(content)) return JSON.stringify(content);
668
+ const blocks = [];
669
+ for (const raw of content) {
670
+ if (!raw || typeof raw !== "object") continue;
671
+ const b = raw;
672
+ if (b.type === "text" && typeof b.text === "string") {
673
+ blocks.push({
674
+ type: "text",
675
+ text: b.text
676
+ });
677
+ continue;
678
+ }
679
+ if (b.type === "image" && b.source && typeof b.source === "object") {
680
+ const src = b.source;
681
+ if (src.type === "base64" && typeof src.data === "string" && typeof src.media_type === "string") {
682
+ blocks.push({
683
+ type: "image",
684
+ mediaType: src.media_type,
685
+ data: src.data
686
+ });
687
+ continue;
688
+ }
689
+ }
690
+ blocks.push({
691
+ type: "text",
692
+ text: JSON.stringify(raw)
693
+ });
694
+ }
695
+ if (blocks.length === 0) return "";
696
+ if (!blocks.some((b) => b.type !== "text")) return blocks.map((b) => b.text).join("\n");
697
+ return blocks;
698
+ }
699
+ /**
700
+ * Encode zidane's canonical `string | ToolResultContent[]` shape to Anthropic's
701
+ * wire format for `tool_result.content`.
702
+ */
703
+ function encodeAnthropicToolResultContent(output) {
704
+ if (typeof output === "string") return output;
705
+ return output.map((b) => {
706
+ if (b.type === "text") return {
707
+ type: "text",
708
+ text: b.text
709
+ };
710
+ return {
711
+ type: "image",
712
+ source: {
713
+ type: "base64",
714
+ media_type: b.mediaType,
715
+ data: b.data
716
+ }
717
+ };
718
+ });
719
+ }
720
+ function fromAnthropic(msg) {
721
+ const role = msg.role;
722
+ const content = [];
723
+ if (typeof msg.content === "string") {
724
+ content.push({
725
+ type: "text",
726
+ text: msg.content
727
+ });
728
+ return {
729
+ role,
730
+ content
731
+ };
732
+ }
733
+ if (Array.isArray(msg.content)) for (const block of msg.content) {
734
+ if (!block || typeof block !== "object") continue;
735
+ const b = block;
736
+ if (b.type === "text") content.push({
737
+ type: "text",
738
+ text: b.text
739
+ });
740
+ else if (b.type === "image") {
741
+ const source = b.source;
742
+ if (source?.type === "base64") content.push({
743
+ type: "image",
744
+ mediaType: source.media_type,
745
+ data: source.data
746
+ });
747
+ } else if (b.type === "tool_use") content.push({
748
+ type: "tool_call",
749
+ id: b.id,
750
+ name: b.name,
751
+ input: b.input
752
+ });
753
+ else if (b.type === "tool_result") {
754
+ const output = decodeAnthropicToolResultContent(b.content);
755
+ const block = {
756
+ type: "tool_result",
757
+ callId: b.tool_use_id,
758
+ output
759
+ };
760
+ if (b.is_error === true) block.isError = true;
761
+ content.push(block);
762
+ } else if (b.type === "thinking") {
763
+ const block = {
764
+ type: "thinking",
765
+ text: b.thinking ?? ""
766
+ };
767
+ if (typeof b.signature === "string") {
768
+ block.signature = b.signature;
769
+ block.signatureProducer = "anthropic";
770
+ }
771
+ content.push(block);
772
+ } else if (b.type === "redacted_thinking") content.push({
773
+ type: "redacted_thinking",
774
+ data: b.data ?? ""
775
+ });
776
+ }
777
+ return {
778
+ role,
779
+ content
780
+ };
781
+ }
782
+ function fromOpenAI(msg) {
783
+ const role = msg.role;
784
+ const content = [];
785
+ const c = msg.content;
786
+ if (c == null) return {
787
+ role,
788
+ content
789
+ };
790
+ if (typeof c === "string") {
791
+ content.push({
792
+ type: "text",
793
+ text: c
794
+ });
795
+ return {
796
+ role,
797
+ content
798
+ };
799
+ }
800
+ if (typeof c === "object" && !Array.isArray(c) && c._tag === "__zidane_assistant_tc__") {
801
+ const tagged = c;
802
+ if (typeof tagged.text === "string" && tagged.text) content.push({
803
+ type: "text",
804
+ text: tagged.text
805
+ });
806
+ if (Array.isArray(tagged.tool_calls)) for (const raw of tagged.tool_calls) {
807
+ if (!raw || typeof raw !== "object") continue;
808
+ const tc = raw;
809
+ const rawArgs = tc.function?.arguments;
810
+ const input = rawArgs ? typeof rawArgs === "string" ? JSON.parse(rawArgs) : rawArgs : {};
811
+ content.push({
812
+ type: "tool_call",
813
+ id: tc.id ?? "",
814
+ name: tc.function?.name ?? "",
815
+ input
816
+ });
817
+ }
818
+ return {
819
+ role,
820
+ content
821
+ };
822
+ }
823
+ if (typeof c === "object" && !Array.isArray(c) && c._tag === "__zidane_tool_results__") {
824
+ const tagged = c;
825
+ if (Array.isArray(tagged.results)) for (const raw of tagged.results) {
826
+ if (!raw || typeof raw !== "object") continue;
827
+ const r = raw;
828
+ content.push({
829
+ type: "tool_result",
830
+ callId: r.tool_call_id ?? "",
831
+ output: r.content ?? ""
832
+ });
833
+ }
834
+ return {
835
+ role,
836
+ content
837
+ };
838
+ }
839
+ if (Array.isArray(c)) {
840
+ for (const block of c) {
841
+ if (!block || typeof block !== "object") continue;
842
+ const b = block;
843
+ if (b.type === "text") content.push({
844
+ type: "text",
845
+ text: b.text
846
+ });
847
+ else if (b.type === "image_url") {
848
+ const imageUrl = b.image_url?.url;
849
+ if (imageUrl?.startsWith("data:")) {
850
+ const [meta, data] = imageUrl.slice(5).split(",", 2);
851
+ const mediaType = meta.replace(";base64", "");
852
+ content.push({
853
+ type: "image",
854
+ mediaType,
855
+ data
856
+ });
857
+ }
858
+ }
859
+ }
860
+ return {
861
+ role,
862
+ content
863
+ };
864
+ }
865
+ return {
866
+ role,
867
+ content
868
+ };
869
+ }
870
+ function toAnthropic(msg) {
871
+ const blocks = msg.content.filter((b) => !(b.type === "thinking" && b.signatureProducer === "openai")).filter((b) => b.type !== "provider_reasoning").map((block) => {
872
+ switch (block.type) {
873
+ case "text": return {
874
+ type: "text",
875
+ text: block.text
876
+ };
877
+ case "image": return {
878
+ type: "image",
879
+ source: {
880
+ type: "base64",
881
+ media_type: block.mediaType,
882
+ data: block.data
883
+ }
884
+ };
885
+ case "tool_call": return {
886
+ type: "tool_use",
887
+ id: block.id,
888
+ name: block.name,
889
+ input: block.input
890
+ };
891
+ case "tool_result": {
892
+ const out = {
893
+ type: "tool_result",
894
+ tool_use_id: block.callId,
895
+ content: encodeAnthropicToolResultContent(block.output)
896
+ };
897
+ if (block.isError) out.is_error = true;
898
+ return out;
899
+ }
900
+ case "thinking": {
901
+ const out = {
902
+ type: "thinking",
903
+ thinking: block.text
904
+ };
905
+ if (block.signature) out.signature = block.signature;
906
+ return out;
907
+ }
908
+ case "redacted_thinking": return {
909
+ type: "redacted_thinking",
910
+ data: block.data
911
+ };
912
+ default: return {
913
+ type: "text",
914
+ text: ""
915
+ };
916
+ }
917
+ });
918
+ if (blocks.length === 1 && blocks[0].type === "text") return {
919
+ role: msg.role,
920
+ content: blocks[0].text
921
+ };
922
+ return {
923
+ role: msg.role,
924
+ content: blocks
925
+ };
926
+ }
927
+ function toOpenAI(msg) {
928
+ const toolCalls = msg.content.filter((b) => b.type === "tool_call");
929
+ const toolResults = msg.content.filter((b) => b.type === "tool_result");
930
+ const textBlocks = msg.content.filter((b) => b.type === "text");
931
+ const imageBlocks = msg.content.filter((b) => b.type === "image");
932
+ if (toolResults.length > 0) return {
933
+ role: msg.role,
934
+ content: {
935
+ _tag: TOOL_RESULTS_TAG,
936
+ results: toolResults.map((b) => {
937
+ const tr = b;
938
+ return {
939
+ tool_call_id: tr.callId,
940
+ content: tr.output
941
+ };
942
+ })
943
+ }
944
+ };
945
+ if (toolCalls.length > 0) {
946
+ const textContent = textBlocks.length > 0 ? textBlocks[0].text : null;
947
+ return {
948
+ role: msg.role,
949
+ content: {
950
+ _tag: ASSISTANT_TOOL_CALLS_TAG,
951
+ text: textContent,
952
+ tool_calls: toolCalls.map((b) => {
953
+ const tc = b;
954
+ return {
955
+ id: tc.id,
956
+ type: "function",
957
+ function: {
958
+ name: tc.name,
959
+ arguments: JSON.stringify(tc.input)
960
+ }
961
+ };
962
+ })
963
+ }
964
+ };
965
+ }
966
+ if (imageBlocks.length > 0) {
967
+ const parts = imageBlocks.map((b) => {
968
+ const img = b;
969
+ return {
970
+ type: "image_url",
971
+ image_url: { url: `data:${img.mediaType};base64,${img.data}` }
972
+ };
973
+ });
974
+ for (const b of textBlocks) parts.push({
975
+ type: "text",
976
+ text: b.text
977
+ });
978
+ return {
979
+ role: msg.role,
980
+ content: parts
981
+ };
982
+ }
983
+ if (textBlocks.length === 1) return {
984
+ role: msg.role,
985
+ content: textBlocks[0].text
986
+ };
987
+ if (textBlocks.length > 1) return {
988
+ role: msg.role,
989
+ content: textBlocks.map((b) => ({
990
+ type: "text",
991
+ text: b.text
992
+ }))
993
+ };
994
+ return {
995
+ role: msg.role,
996
+ content: null
997
+ };
998
+ }
999
+ function autoDetectAndConvert(msg) {
1000
+ const c = msg.content;
1001
+ if (c && typeof c === "object" && !Array.isArray(c)) {
1002
+ const tag = c._tag;
1003
+ if (typeof tag === "string" && tag.startsWith("__zidane_")) return fromOpenAI(msg);
1004
+ }
1005
+ if (Array.isArray(c)) {
1006
+ for (const block of c) {
1007
+ if (!block || typeof block !== "object") continue;
1008
+ const b = block;
1009
+ if (b.type === "tool_use" || b.type === "tool_result" && "tool_use_id" in b) return fromAnthropic(msg);
1010
+ if (b.type === "image_url") return fromOpenAI(msg);
1011
+ }
1012
+ return fromAnthropic(msg);
1013
+ }
1014
+ if (typeof c === "string") return fromAnthropic(msg);
1015
+ return fromAnthropic(msg);
1016
+ }
1017
+ //#endregion
1018
+ export { toOpenAI as a, classifyOpenAICompatError as c, toolResultsMessage as d, userMessage as f, toAnthropic as i, mapOAIFinishReason as l, fromAnthropic as n, OpenAICompatHttpError as o, fromOpenAI as r, assistantMessage as s, autoDetectAndConvert as t, openaiCompat as u };
1019
+
1020
+ //# sourceMappingURL=messages-z5Pq20p7.js.map