zidane 3.2.0 → 3.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -242,7 +242,7 @@ interface AgentBehavior {
242
242
  * `runTurn` is 1-indexed, run-relative (resumed sessions reset).
243
243
  *
244
244
  * No-op when `thinkingBudget` is unset. Honored by every provider that
245
- * respects `thinkingBudget` (anthropic legacy enabled+budget path,
245
+ * respects `thinkingBudget` (anthropic explicit-budget `enabled` path,
246
246
  * adaptive `maxTokensCap`, openai-compat `max_tokens` padding).
247
247
  *
248
248
  * Default: `undefined` (no decay).
@@ -356,8 +356,8 @@ interface AgentBehavior {
356
356
  * model "remembers" stale content and applies a substring edit against
357
357
  * bytes that have moved.
358
358
  *
359
- * Requires a session. Off by default to preserve back-compat turn it on
360
- * for stricter eval-grade runs.
359
+ * Requires a session. Off by default; turn it on for stricter eval-grade
360
+ * runs where silent edit corruption would invalidate the result.
361
361
  *
362
362
  * Default: `false`.
363
363
  */
@@ -392,14 +392,40 @@ interface AgentBehavior {
392
392
  * for elision so the model keeps the freshest tool context. Default: `4`.
393
393
  */
394
394
  compactKeepTurns?: number;
395
- }
396
- interface ImageContent {
397
- type: 'image';
398
- source: {
399
- type: 'base64';
400
- media_type: string;
401
- data: string;
402
- };
395
+ /**
396
+ * Prefix every line of `read_file` output with its 1-indexed line number
397
+ * followed by a tab (`<N>\t<content>`) — the compact `cat -n`-style
398
+ * format Claude Code emits. The `edit` tool strips the prefix from
399
+ * `old_string` / `new_string` so the model can paste back a numbered
400
+ * chunk verbatim without breaking the match.
401
+ *
402
+ * Set `false` to opt out — useful for callers piping `read_file` into
403
+ * downstream parsers that don't recognize the prefix. Per-call
404
+ * `read_file({ lineNumbers: false })` overrides this default.
405
+ *
406
+ * Default: `true`.
407
+ */
408
+ readLineNumbers?: boolean;
409
+ /**
410
+ * Replace older `read_file` `tool_result` blocks with a short stub when
411
+ * a successful `edit` / `multi_edit` / `write_file` later in the same
412
+ * run modified the same path. The replacement is applied to the
413
+ * wire-level message list only — persisted session turns keep the
414
+ * original content.
415
+ *
416
+ * Eliminates the common waste pattern where the model carries the
417
+ * pre-edit file body forward across many turns "in case it needs it".
418
+ * Pairs cleanly with `compactStrategy: 'tail'`: stale reads shrink
419
+ * first, then the byte-threshold compaction fires if anything's left.
420
+ *
421
+ * Detection is conservative — only triggers when the corresponding
422
+ * tool_result confirms success (`Edited …`, `Created …`, `Updated …`).
423
+ * Failed edits and `No change needed` write_file calls do NOT
424
+ * invalidate prior reads.
425
+ *
426
+ * Default: `false`.
427
+ */
428
+ elideStaleReads?: boolean;
403
429
  }
404
430
  /**
405
431
  * One block of a multimodal user prompt.
@@ -506,6 +532,35 @@ type SessionContentBlock = {
506
532
  type: 'thinking';
507
533
  text: string;
508
534
  signature?: string;
535
+ /**
536
+ * Provider that minted `signature`. Signatures are provider-bound (Anthropic
537
+ * HMAC vs. OpenAI `encrypted_content`) and are dropped on cross-provider
538
+ * hops to avoid 400s. Unset means legacy/unknown — forwarded as-is.
539
+ */
540
+ signatureProducer?: 'anthropic' | 'openai';
541
+ } | {
542
+ type: 'redacted_thinking';
543
+ data: string;
544
+ } | {
545
+ /**
546
+ * Opaque round-trip envelope for reasoning state minted by an OpenAI-compat
547
+ * gateway (currently OpenRouter). The gateway expects its own
548
+ * `reasoning_details` array echoed back verbatim on the next turn so the
549
+ * upstream model can resume an extended-reasoning chain across tool calls.
550
+ *
551
+ * Stored opaquely because the items are provider-bound (Anthropic HMAC
552
+ * signatures, OpenAI `encrypted_content`, model-specific summary formats
553
+ * — all flowing through the gateway's normalized envelope).
554
+ */
555
+ type: 'provider_reasoning';
556
+ producer: 'openrouter';
557
+ details: unknown[];
558
+ /**
559
+ * Model id that produced the details. Reasoning is bound to a specific
560
+ * upstream route — a model switch on the next turn invalidates the
561
+ * embedded signatures, so the sender drops the block on mismatch.
562
+ */
563
+ model?: string;
509
564
  };
510
565
  interface SessionMessage {
511
566
  role: 'user' | 'assistant';
@@ -540,11 +595,6 @@ interface AgentRunOptions {
540
595
  prompt?: string | PromptPart[];
541
596
  system?: string;
542
597
  thinking?: ThinkingLevel;
543
- /**
544
- * Legacy image attachments. When `prompt` is a string, these are appended as image
545
- * parts after the text. Ignored when `prompt` is a `PromptPart[]`.
546
- */
547
- images?: ImageContent[];
548
598
  /** Abort signal — when triggered, the agent stops after the current turn */
549
599
  signal?: AbortSignal;
550
600
  /** Behavior overrides for this run (overrides agent defaults) */
@@ -928,6 +978,23 @@ interface OpenAICompatParams {
928
978
  * Default: `false`. The `openrouter` wrapper sets this to `true`.
929
979
  */
930
980
  cacheBreakpoints?: boolean;
981
+ /**
982
+ * Whether this endpoint speaks OpenRouter's normalized reasoning envelope —
983
+ * `reasoning: { effort | max_tokens | exclude }` on requests and structured
984
+ * `reasoning_details[]` on assistant messages, round-tripped to preserve
985
+ * extended-reasoning state across turns.
986
+ *
987
+ * - `true` — map zidane's `behavior.thinking` / `behavior.thinkingBudget` to
988
+ * the request's `reasoning` field, capture `reasoning_details`
989
+ * from streaming responses into `provider_reasoning` blocks, and
990
+ * echo them back on subsequent assistant messages.
991
+ * - `false` — never set the field; drop any stored `provider_reasoning`
992
+ * blocks before sending. Safe default for hosts that strict-
993
+ * validate the request schema.
994
+ *
995
+ * Default: `false`. The `openrouter` wrapper sets this to `true`.
996
+ */
997
+ supportsReasoning?: boolean;
931
998
  /**
932
999
  * Generic pass-through for fields on the Chat Completions request body that
933
1000
  * zidane does not yet type. Spread into the request before the typed core
@@ -1082,8 +1149,8 @@ interface Provider {
1082
1149
  } & Record<string, unknown>;
1083
1150
  /** Format tool specs for this provider */
1084
1151
  formatTools: (tools: ToolSpec[]) => unknown[];
1085
- /** Create a user message (text or with images) */
1086
- userMessage: (content: string, images?: ImageContent[]) => SessionMessage;
1152
+ /** Create a text-only user message. Multimodal content goes through `promptMessage`. */
1153
+ userMessage: (content: string) => SessionMessage;
1087
1154
  /** Create an assistant message (for priming) */
1088
1155
  assistantMessage: (content: string) => SessionMessage;
1089
1156
  /** Create a tool results message to send back */
@@ -1584,13 +1651,13 @@ interface McpConnection {
1584
1651
  */
1585
1652
  declare function normalizeMcpServers(input: unknown): McpServerConfig[];
1586
1653
  /**
1587
- * Lossy flattener — converts MCP `CallToolResult.content` blocks to a single string.
1588
- * Text blocks are extracted; non-text blocks are JSON-stringified.
1654
+ * Lossy flattener — converts MCP `CallToolResult.content` blocks to a single
1655
+ * string. Text blocks are extracted; non-text blocks are JSON-stringified.
1589
1656
  *
1590
- * Prefer {@link normalizeMcpBlocks} for new code it preserves image blocks so the
1591
- * provider can route them through to the model natively (Anthropic tool_result blocks,
1592
- * OpenAI companion-user-message). This function is kept for back-compat and is useful
1593
- * as a logging/display helper where a single string is required.
1657
+ * Use this only at UI / log boundaries that require a string. The agent
1658
+ * loop itself routes through {@link normalizeMcpBlocks} so image blocks
1659
+ * survive into provider-native tool_result content (Anthropic blocks,
1660
+ * OpenAI companion-user-message).
1594
1661
  */
1595
1662
  declare function resultToString(content: unknown[]): string;
1596
1663
  /**
@@ -2173,4 +2240,4 @@ interface Agent {
2173
2240
  }
2174
2241
  declare function createAgent({ provider, name: agentName, system: agentSystem, tools: agentTools, toolAliases, behavior: agentBehavior, execution, mcpServers, session, skills: agentSkills, mcpConnector, eager }: AgentOptions): Agent;
2175
2242
 
2176
- export { type ToolHookContext as $, type Agent as A, type SessionData as B, CONTEXT_EXCEEDED_MESSAGE_PATTERNS as C, type SessionEndStatus as D, type SessionHookContext as E, type SessionMessage as F, type SessionRun as G, type SessionStore as H, type ImageContent as I, type SessionTurn as J, type SkillConfig as K, type SkillResource as L, type McpConnection as M, type SkillsConfig as N, type OAuthRefreshHookContext as O, type PromptDocumentPart as P, type SpawnHookContext as Q, type RemoteStoreOptions as R, type Session as S, type StreamCallbacks as T, type StreamHookContext as U, type StreamOptions as V, type ThinkingLevel as W, type ToolCall as X, type ToolContext as Y, type ToolDef as Z, type ToolExecutionMode as _, AgentAbortedError as a, type ToolMap as a0, type ToolResult as a1, type ToolResultContent as a2, type ToolResultImageContent as a3, type ToolResultTextContent as a4, type ToolSpec as a5, type TurnFinishReason as a6, type TurnResult as a7, type TurnUsage as a8, matchesContextExceeded as a9, fromOpenAI as aA, loadSession as aB, mapOAIFinishReason as aC, normalizeMcpBlocks as aD, normalizeMcpServers as aE, openai as aF, openaiCompat as aG, openrouter as aH, resultToString as aI, toAnthropic as aJ, toOpenAI as aK, toTypedError as aL, toolOutputByteLength as aa, toolResultToText as ab, type ActivationVia as ac, type ActiveSkill as ad, type DeactivationReason as ae, type FileMapAdapter as af, type FileMapStoreOptions as ag, type OpenAICompatAuthHeader as ah, OpenAICompatHttpError as ai, type OpenAICompatParams as aj, type SkillActivationState as ak, type SkillActivationStateOptions as al, type SkillDiagnostic as am, type SkillSource as an, anthropic as ao, autoDetectAndConvert as ap, cerebras as aq, classifyOpenAICompatError as ar, connectMcpServers as as, createAgent as at, createFileMapStore as au, createMemoryStore as av, createRemoteStore as aw, createSession as ax, createSkillActivationState as ay, fromAnthropic as az, type AgentBehavior as b, AgentContextExceededError as c, type AgentHooks as d, type AgentOptions as e, AgentProviderError as f, type AgentRunOptions as g, type AgentStats as h, AgentToolNotAllowedError as i, type AnthropicParams as j, type CerebrasParams as k, type ChildRunStats as l, type ClassifiedError as m, type ClassifiedErrorKind as n, type CreateSessionOptions as o, type McpServerConfig as p, type McpToolHookContext as q, type OpenAIParams as r, type OpenRouterParams as s, type PromptImagePart as t, type PromptPart as u, type PromptTextPart as v, type Provider as w, type ProviderCapabilities as x, type RunHookMap as y, type SessionContentBlock as z };
2243
+ export { type ToolMap as $, type Agent as A, type SessionData as B, CONTEXT_EXCEEDED_MESSAGE_PATTERNS as C, type SessionEndStatus as D, type SessionHookContext as E, type SessionMessage as F, type SessionRun as G, type SessionStore as H, type SessionTurn as I, type SkillConfig as J, type SkillResource as K, type SkillsConfig as L, type McpConnection as M, type SpawnHookContext as N, type OAuthRefreshHookContext as O, type PromptDocumentPart as P, type StreamCallbacks as Q, type RemoteStoreOptions as R, type Session as S, type StreamHookContext as T, type StreamOptions as U, type ThinkingLevel as V, type ToolCall as W, type ToolContext as X, type ToolDef as Y, type ToolExecutionMode as Z, type ToolHookContext as _, AgentAbortedError as a, type ToolResult as a0, type ToolResultContent as a1, type ToolResultImageContent as a2, type ToolResultTextContent as a3, type ToolSpec as a4, type TurnFinishReason as a5, type TurnResult as a6, type TurnUsage as a7, matchesContextExceeded as a8, toolOutputByteLength as a9, loadSession as aA, mapOAIFinishReason as aB, normalizeMcpBlocks as aC, normalizeMcpServers as aD, openai as aE, openaiCompat as aF, openrouter as aG, resultToString as aH, toAnthropic as aI, toOpenAI as aJ, toTypedError as aK, toolResultToText as aa, type ActivationVia as ab, type ActiveSkill as ac, type DeactivationReason as ad, type FileMapAdapter as ae, type FileMapStoreOptions as af, type OpenAICompatAuthHeader as ag, OpenAICompatHttpError as ah, type OpenAICompatParams as ai, type SkillActivationState as aj, type SkillActivationStateOptions as ak, type SkillDiagnostic as al, type SkillSource as am, anthropic as an, autoDetectAndConvert as ao, cerebras as ap, classifyOpenAICompatError as aq, connectMcpServers as ar, createAgent as as, createFileMapStore as at, createMemoryStore as au, createRemoteStore as av, createSession as aw, createSkillActivationState as ax, fromAnthropic as ay, fromOpenAI as az, type AgentBehavior as b, AgentContextExceededError as c, type AgentHooks as d, type AgentOptions as e, AgentProviderError as f, type AgentRunOptions as g, type AgentStats as h, AgentToolNotAllowedError as i, type AnthropicParams as j, type CerebrasParams as k, type ChildRunStats as l, type ClassifiedError as m, type ClassifiedErrorKind as n, type CreateSessionOptions as o, type McpServerConfig as p, type McpToolHookContext as q, type OpenAIParams as r, type OpenRouterParams as s, type PromptImagePart as t, type PromptPart as u, type PromptTextPart as v, type Provider as w, type ProviderCapabilities as x, type RunHookMap as y, type SessionContentBlock as z };
@@ -1,19 +1,19 @@
1
1
  import {
2
+ createSpawnTool,
2
3
  edit,
3
4
  listFiles,
4
5
  multiEdit,
5
6
  readFile,
6
7
  shell,
7
- spawn,
8
8
  writeFile
9
- } from "./chunk-6JIVVEQQ.js";
9
+ } from "./chunk-Z2E5QN5X.js";
10
10
 
11
11
  // src/presets/basic.ts
12
12
  var basicTools = { shell, readFile, writeFile, listFiles, edit, multiEdit };
13
13
  var basic_default = definePreset({
14
14
  name: "basic",
15
15
  system: "You are a helpful assistant with access to shell, file reading, file writing, surgical and multi-edit tools, directory listing, and sub-agent spawning. Prefer `edit` / `multi_edit` for in-place changes and `write_file` for full file overwrites. Use them to accomplish tasks in the project directory.",
16
- tools: { ...basicTools, spawn }
16
+ tools: { ...basicTools, spawn: createSpawnTool() }
17
17
  });
18
18
 
19
19
  // src/presets/index.ts
@@ -21,6 +21,8 @@ async function consumeSSE(response, callbacks, signal) {
21
21
  let finishReason = "stop";
22
22
  let usage = { input: 0, output: 0 };
23
23
  const tcMap = /* @__PURE__ */ new Map();
24
+ const reasoningMap = /* @__PURE__ */ new Map();
25
+ let sawReasoningDetails = false;
24
26
  try {
25
27
  while (true) {
26
28
  if (signal?.aborted)
@@ -56,10 +58,36 @@ async function consumeSSE(response, callbacks, signal) {
56
58
  if (fr)
57
59
  finishReason = fr;
58
60
  const delta = choice.delta;
59
- const thinkingDelta = delta?.reasoning_content ?? delta?.reasoning;
60
- if (thinkingDelta) {
61
- thinking += thinkingDelta;
62
- callbacks.onThinking?.(thinkingDelta);
61
+ const reasoningDeltaArr = delta?.reasoning_details;
62
+ if (reasoningDeltaArr && reasoningDeltaArr.length > 0) {
63
+ sawReasoningDetails = true;
64
+ for (const item of reasoningDeltaArr) {
65
+ const idx = typeof item.index === "number" ? item.index : 0;
66
+ const existing = reasoningMap.get(idx) ?? {};
67
+ if (typeof item.text === "string") {
68
+ existing.text = (existing.text ?? "") + item.text;
69
+ thinking += item.text;
70
+ callbacks.onThinking?.(item.text);
71
+ }
72
+ if (typeof item.summary === "string") {
73
+ existing.summary = (existing.summary ?? "") + item.summary;
74
+ thinking += item.summary;
75
+ callbacks.onThinking?.(item.summary);
76
+ }
77
+ for (const key of ["type", "signature", "data", "format", "id"]) {
78
+ const v = item[key];
79
+ if (typeof v === "string")
80
+ existing[key] = v;
81
+ }
82
+ reasoningMap.set(idx, existing);
83
+ }
84
+ }
85
+ if (!sawReasoningDetails) {
86
+ const thinkingDelta = delta?.reasoning_content ?? delta?.reasoning;
87
+ if (thinkingDelta) {
88
+ thinking += thinkingDelta;
89
+ callbacks.onThinking?.(thinkingDelta);
90
+ }
63
91
  }
64
92
  const contentDelta = delta?.content;
65
93
  if (contentDelta) {
@@ -113,7 +141,8 @@ async function consumeSSE(response, callbacks, signal) {
113
141
  );
114
142
  }
115
143
  }
116
- return { text, thinking, toolCalls, finishReason, usage };
144
+ const reasoningDetails = Array.from(reasoningMap.entries()).sort(([a], [b]) => a - b).map(([, item]) => item);
145
+ return { text, thinking, toolCalls, finishReason, usage, reasoningDetails };
117
146
  }
118
147
  function toImageUrlPart(img) {
119
148
  return {
@@ -137,11 +166,23 @@ function summarizeToolResultOutput(output) {
137
166
  function toOAIMessages(system, messages, options = {}) {
138
167
  const out = [{ role: "system", content: system }];
139
168
  const nativeImageInTool = options.imageInToolResult === true;
169
+ const reasoningEnabled = options.supportsReasoning === true;
170
+ const activeModel = options.model;
140
171
  for (const msg of messages) {
141
172
  const toolResults = msg.content.filter((b) => b.type === "tool_result");
142
173
  const toolCalls = msg.content.filter((b) => b.type === "tool_call");
143
174
  const textBlocks = msg.content.filter((b) => b.type === "text");
144
175
  const imageBlocks = msg.content.filter((b) => b.type === "image");
176
+ const reasoningBlocks = reasoningEnabled ? msg.content.filter((b) => {
177
+ if (b.type !== "provider_reasoning")
178
+ return false;
179
+ if (b.producer !== "openrouter")
180
+ return false;
181
+ if (b.model && activeModel && b.model !== activeModel)
182
+ return false;
183
+ return true;
184
+ }) : [];
185
+ const reasoningDetails = reasoningBlocks.flatMap((b) => b.details);
145
186
  if (toolResults.length > 0) {
146
187
  for (const tr of toolResults) {
147
188
  if (typeof tr.output === "string") {
@@ -176,7 +217,7 @@ ${attachedMarker}` : attachedMarker;
176
217
  }
177
218
  if (toolCalls.length > 0) {
178
219
  const textContent = textBlocks.length > 0 ? textBlocks[0].text : null;
179
- out.push({
220
+ const m = {
180
221
  role: "assistant",
181
222
  content: textContent,
182
223
  tool_calls: toolCalls.map((tc) => ({
@@ -184,7 +225,10 @@ ${attachedMarker}` : attachedMarker;
184
225
  type: "function",
185
226
  function: { name: tc.name, arguments: JSON.stringify(tc.input) }
186
227
  }))
187
- });
228
+ };
229
+ if (reasoningDetails.length > 0)
230
+ m.reasoning_details = reasoningDetails;
231
+ out.push(m);
188
232
  continue;
189
233
  }
190
234
  if (imageBlocks.length > 0) {
@@ -195,16 +239,23 @@ ${attachedMarker}` : attachedMarker;
195
239
  for (const b of textBlocks) {
196
240
  parts.push({ type: "text", text: b.text });
197
241
  }
198
- out.push({ role: msg.role, content: parts });
242
+ const m = { role: msg.role, content: parts };
243
+ if (msg.role === "assistant" && reasoningDetails.length > 0)
244
+ m.reasoning_details = reasoningDetails;
245
+ out.push(m);
199
246
  continue;
200
247
  }
248
+ let pushed;
201
249
  if (textBlocks.length === 1) {
202
- out.push({ role: msg.role, content: textBlocks[0].text });
250
+ pushed = { role: msg.role, content: textBlocks[0].text };
203
251
  } else if (textBlocks.length > 1) {
204
- out.push({ role: msg.role, content: textBlocks.map((b) => ({ type: "text", text: b.text })) });
252
+ pushed = { role: msg.role, content: textBlocks.map((b) => ({ type: "text", text: b.text })) };
205
253
  } else {
206
- out.push({ role: msg.role, content: null });
254
+ pushed = { role: msg.role, content: null };
207
255
  }
256
+ if (msg.role === "assistant" && reasoningDetails.length > 0)
257
+ pushed.reasoning_details = reasoningDetails;
258
+ out.push(pushed);
208
259
  }
209
260
  return out;
210
261
  }
@@ -246,20 +297,7 @@ function formatTools(tools) {
246
297
  function: { name: t.name, description: t.description, parameters: t.inputSchema }
247
298
  }));
248
299
  }
249
- function userMessage(content, images) {
250
- if (images?.length) {
251
- return {
252
- role: "user",
253
- content: [
254
- ...images.map((img) => ({
255
- type: "image",
256
- mediaType: img.source.media_type,
257
- data: img.source.data
258
- })),
259
- { type: "text", text: content }
260
- ]
261
- };
262
- }
300
+ function userMessage(content) {
263
301
  return { role: "user", content: [{ type: "text", text: content }] };
264
302
  }
265
303
  function assistantMessage(content) {
@@ -275,8 +313,18 @@ function toolResultsMessage(results) {
275
313
  }))
276
314
  };
277
315
  }
278
- function buildAssistantContent(text, toolCalls, thinking) {
316
+ function buildAssistantContent(text, toolCalls, thinking, reasoning) {
279
317
  const content = [];
318
+ if (reasoning && reasoning.details.length > 0) {
319
+ const block = {
320
+ type: "provider_reasoning",
321
+ producer: reasoning.producer,
322
+ details: reasoning.details
323
+ };
324
+ if (reasoning.model)
325
+ block.model = reasoning.model;
326
+ content.push(block);
327
+ }
280
328
  if (thinking)
281
329
  content.push({ type: "thinking", text: thinking });
282
330
  if (text)
@@ -362,6 +410,18 @@ function mapOAIFinishReason(reason) {
362
410
  return "other";
363
411
  }
364
412
  }
413
+ function planOpenRouterReasoning(thinking, thinkingBudget) {
414
+ if ((!thinking || thinking === "off") && typeof thinkingBudget !== "number")
415
+ return void 0;
416
+ const out = {};
417
+ if (thinking && thinking !== "off" && thinking !== "adaptive") {
418
+ out.effort = thinking === "minimal" ? "low" : thinking;
419
+ }
420
+ if (typeof thinkingBudget === "number" && thinkingBudget > 0) {
421
+ out.max_tokens = thinkingBudget;
422
+ }
423
+ return out;
424
+ }
365
425
  function openaiCompat(params) {
366
426
  const name = params.name ?? "openai-compat";
367
427
  const defaultModel = params.defaultModel ?? "gpt-4o-mini";
@@ -374,6 +434,7 @@ function openaiCompat(params) {
374
434
  imageInToolResult: params.capabilities?.imageInToolResult ?? false
375
435
  };
376
436
  const cacheBreakpointsEnabled = params.cacheBreakpoints === true;
437
+ const reasoningEnabled = params.supportsReasoning === true;
377
438
  return {
378
439
  name,
379
440
  meta: { defaultModel, capabilities },
@@ -385,7 +446,9 @@ function openaiCompat(params) {
385
446
  async stream(options, callbacks) {
386
447
  const modelId = options.model || defaultModel;
387
448
  const messages = toOAIMessages(options.system, options.messages, {
388
- imageInToolResult: capabilities.imageInToolResult === true
449
+ imageInToolResult: capabilities.imageInToolResult === true,
450
+ supportsReasoning: reasoningEnabled,
451
+ model: modelId
389
452
  });
390
453
  const shouldCache = cacheBreakpointsEnabled && options.cache !== false;
391
454
  if (shouldCache) {
@@ -401,6 +464,11 @@ function openaiCompat(params) {
401
464
  max_tokens: maxTokens,
402
465
  stream: true
403
466
  };
467
+ if (reasoningEnabled) {
468
+ const reasoning = planOpenRouterReasoning(options.thinking, options.thinkingBudget);
469
+ if (reasoning)
470
+ body.reasoning = reasoning;
471
+ }
404
472
  if (options.tools && options.tools.length > 0) {
405
473
  body.tools = shouldCache ? applyOAIToolCacheBreakpoint(options.tools) : options.tools;
406
474
  }
@@ -429,7 +497,12 @@ function openaiCompat(params) {
429
497
  const result = await consumeSSE(response, callbacks, options.signal);
430
498
  const finishReason = mapOAIFinishReason(result.finishReason);
431
499
  return {
432
- assistantMessage: buildAssistantContent(result.text, result.toolCalls, result.thinking),
500
+ assistantMessage: buildAssistantContent(
501
+ result.text,
502
+ result.toolCalls,
503
+ result.thinking,
504
+ reasoningEnabled && result.reasoningDetails.length > 0 ? { details: result.reasoningDetails, producer: "openrouter", model: modelId } : void 0
505
+ ),
433
506
  text: result.text,
434
507
  toolCalls: result.toolCalls,
435
508
  done: result.finishReason === "stop" || result.toolCalls.length === 0,
@@ -513,9 +586,26 @@ function fromAnthropic(msg) {
513
586
  content.push({ type: "tool_call", id: b.id, name: b.name, input: b.input });
514
587
  } else if (b.type === "tool_result") {
515
588
  const output = decodeAnthropicToolResultContent(b.content);
516
- content.push({ type: "tool_result", callId: b.tool_use_id, output });
589
+ const block2 = {
590
+ type: "tool_result",
591
+ callId: b.tool_use_id,
592
+ output
593
+ };
594
+ if (b.is_error === true)
595
+ block2.isError = true;
596
+ content.push(block2);
517
597
  } else if (b.type === "thinking") {
518
- content.push({ type: "thinking", text: b.thinking, signature: b.signature });
598
+ const block2 = {
599
+ type: "thinking",
600
+ text: b.thinking ?? ""
601
+ };
602
+ if (typeof b.signature === "string") {
603
+ block2.signature = b.signature;
604
+ block2.signatureProducer = "anthropic";
605
+ }
606
+ content.push(block2);
607
+ } else if (b.type === "redacted_thinking") {
608
+ content.push({ type: "redacted_thinking", data: b.data ?? "" });
519
609
  }
520
610
  }
521
611
  }
@@ -582,7 +672,7 @@ function fromOpenAI(msg) {
582
672
  return { role, content };
583
673
  }
584
674
  function toAnthropic(msg) {
585
- const blocks = msg.content.map((block) => {
675
+ const blocks = msg.content.filter((b) => !(b.type === "thinking" && b.signatureProducer === "openai")).filter((b) => b.type !== "provider_reasoning").map((block) => {
586
676
  switch (block.type) {
587
677
  case "text":
588
678
  return { type: "text", text: block.text };
@@ -590,10 +680,24 @@ function toAnthropic(msg) {
590
680
  return { type: "image", source: { type: "base64", media_type: block.mediaType, data: block.data } };
591
681
  case "tool_call":
592
682
  return { type: "tool_use", id: block.id, name: block.name, input: block.input };
593
- case "tool_result":
594
- return { type: "tool_result", tool_use_id: block.callId, content: encodeAnthropicToolResultContent(block.output) };
595
- case "thinking":
596
- return { type: "thinking", thinking: block.text, signature: block.signature };
683
+ case "tool_result": {
684
+ const out = {
685
+ type: "tool_result",
686
+ tool_use_id: block.callId,
687
+ content: encodeAnthropicToolResultContent(block.output)
688
+ };
689
+ if (block.isError)
690
+ out.is_error = true;
691
+ return out;
692
+ }
693
+ case "thinking": {
694
+ const out = { type: "thinking", thinking: block.text };
695
+ if (block.signature)
696
+ out.signature = block.signature;
697
+ return out;
698
+ }
699
+ case "redacted_thinking":
700
+ return { type: "redacted_thinking", data: block.data };
597
701
  default:
598
702
  return { type: "text", text: "" };
599
703
  }
@@ -1,6 +1,6 @@
1
1
  import {
2
2
  validateSkillForWrite
3
- } from "./chunk-J4ZOSNSH.js";
3
+ } from "./chunk-X3VOTPVM.js";
4
4
 
5
5
  // src/skills/index.ts
6
6
  function defineSkill(config) {
@@ -5,7 +5,7 @@ import {
5
5
  toAnthropic,
6
6
  toolResultsMessage,
7
7
  userMessage
8
- } from "./chunk-QX7TDFD4.js";
8
+ } from "./chunk-4ILGBQ23.js";
9
9
  import {
10
10
  matchesContextExceeded
11
11
  } from "./chunk-LNN5UTS2.js";
@@ -13,7 +13,7 @@ import {
13
13
  // src/providers/oauth.ts
14
14
  import { existsSync, readFileSync, renameSync, writeFileSync } from "fs";
15
15
  import { resolve } from "path";
16
- import { getOAuthApiKey } from "@yaelg/pi-ai/oauth";
16
+ import { getOAuthApiKey } from "@mariozechner/pi-ai/oauth";
17
17
  function credentialsFilePath() {
18
18
  return resolve(process.cwd(), ".credentials.json");
19
19
  }
@@ -365,20 +365,7 @@ function anthropic(anthropicParams) {
365
365
  input_schema: t.inputSchema
366
366
  }));
367
367
  },
368
- userMessage(content, images) {
369
- if (images && images.length > 0) {
370
- return {
371
- role: "user",
372
- content: [
373
- ...images.map((img) => ({
374
- type: "image",
375
- mediaType: img.source.media_type,
376
- data: img.source.data
377
- })),
378
- { type: "text", text: content }
379
- ]
380
- };
381
- }
368
+ userMessage(content) {
382
369
  return { role: "user", content: [{ type: "text", text: content }] };
383
370
  },
384
371
  assistantMessage(content) {
@@ -533,8 +520,8 @@ function cerebras(params) {
533
520
  }
534
521
 
535
522
  // src/providers/openai.ts
536
- import { getModel } from "@yaelg/pi-ai";
537
- import { streamOpenAICodexResponses } from "@yaelg/pi-ai/openai-codex-responses";
523
+ import { getModel } from "@mariozechner/pi-ai";
524
+ import { streamOpenAICodexResponses } from "@mariozechner/pi-ai/openai-codex-responses";
538
525
  var PROVIDER_ID = "openai-codex";
539
526
  var DEFAULT_MODEL = "gpt-5.4";
540
527
  function resolveModel(modelId) {
@@ -603,6 +590,8 @@ function toPiMessages(messages, modelId) {
603
590
  if (block.type === "text") {
604
591
  content.push({ type: "text", text: block.text });
605
592
  } else if (block.type === "thinking") {
593
+ if (block.signatureProducer === "anthropic")
594
+ continue;
606
595
  content.push({ type: "thinking", thinking: block.text, thinkingSignature: block.signature });
607
596
  } else if (block.type === "tool_call") {
608
597
  content.push({ type: "toolCall", id: block.id, name: block.name, arguments: block.input });
@@ -627,7 +616,15 @@ function fromPiAssistantMessage(message) {
627
616
  if (block.type === "text") {
628
617
  content.push({ type: "text", text: block.text });
629
618
  } else if (block.type === "thinking") {
630
- content.push({ type: "thinking", text: block.thinking, signature: block.thinkingSignature });
619
+ const out = {
620
+ type: "thinking",
621
+ text: block.thinking
622
+ };
623
+ if (typeof block.thinkingSignature === "string") {
624
+ out.signature = block.thinkingSignature;
625
+ out.signatureProducer = "openai";
626
+ }
627
+ content.push(out);
631
628
  } else if (block.type === "toolCall") {
632
629
  content.push({ type: "tool_call", id: block.id, name: block.name, input: block.arguments });
633
630
  }
@@ -820,7 +817,12 @@ function openrouter(params) {
820
817
  // silently ignores them for routes that cache automatically. Safe to turn on
821
818
  // by default — the caller can still flip `behavior.cache = false` to opt out
822
819
  // without needing to re-instantiate the provider.
823
- cacheBreakpoints: true
820
+ cacheBreakpoints: true,
821
+ // OpenRouter speaks the normalized `reasoning` request field and round-trips
822
+ // structured `reasoning_details` on assistant messages. Captured into
823
+ // `provider_reasoning` blocks and echoed back to preserve extended-reasoning
824
+ // state across turns on the same upstream route.
825
+ supportsReasoning: true
824
826
  });
825
827
  }
826
828
 
@@ -252,10 +252,9 @@ function installAllowedToolsGate(hooks, state) {
252
252
  }
253
253
 
254
254
  // src/skills/catalog.ts
255
- function buildCatalog(skills, optionsOrReadToolName = {}) {
255
+ function buildCatalog(skills, options = {}) {
256
256
  if (skills.length === 0)
257
257
  return "";
258
- const options = typeof optionsOrReadToolName === "string" ? { skillsToolRegistered: false, readToolName: optionsOrReadToolName } : optionsOrReadToolName;
259
258
  const skillsToolRegistered = options.skillsToolRegistered ?? true;
260
259
  const readToolName = options.readToolName ?? "read_file";
261
260
  const entries = skills.map((skill) => {
@@ -744,10 +743,6 @@ import { mkdtempSync, rmSync } from "fs";
744
743
  import { tmpdir } from "os";
745
744
  import { join as join3 } from "path";
746
745
  async function resolveSkills(config) {
747
- const { skills } = await resolveSkillsWithCleanup(config);
748
- return skills;
749
- }
750
- async function resolveSkillsWithCleanup(config) {
751
746
  const sourcedPaths = [];
752
747
  let writeDir;
753
748
  if (!config.skipDefaultPaths) {
@@ -828,6 +823,5 @@ export {
828
823
  writeSkillToDisk,
829
824
  writeSkillsToDisk,
830
825
  resolveSkills,
831
- resolveSkillsWithCleanup,
832
826
  interpolateShellCommands
833
827
  };