reasonix 0.5.13 → 0.5.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli/index.js CHANGED
@@ -210,6 +210,12 @@ var DeepSeekClient = class {
210
210
  if (opts.temperature !== void 0) payload.temperature = opts.temperature;
211
211
  if (opts.maxTokens !== void 0) payload.max_tokens = opts.maxTokens;
212
212
  if (opts.responseFormat) payload.response_format = opts.responseFormat;
213
+ if (opts.thinking) {
214
+ payload.extra_body = { thinking: { type: opts.thinking } };
215
+ }
216
+ if (opts.reasoningEffort) {
217
+ payload.reasoning_effort = opts.reasoningEffort;
218
+ }
213
219
  return payload;
214
220
  }
215
221
  /**
@@ -424,6 +430,13 @@ async function harvest(reasoningContent, client, options = {}, signal) {
424
430
  responseFormat: { type: "json_object" },
425
431
  temperature: 0,
426
432
  maxTokens: 600,
433
+ // Pin mode + effort so a future default-model swap (e.g. someone
434
+ // sets `options.model = "deepseek-v4-pro"`) can't accidentally
435
+ // turn this micro-extraction into a multi-thousand-reasoning-
436
+ // token call. DeepSeek ignores these on non-thinking models, so
437
+ // the request stays valid regardless of the chosen model.
438
+ thinking: "disabled",
439
+ reasoningEffort: "high",
427
440
  signal
428
441
  });
429
442
  return parsePlanState(resp.content, maxItems, maxItemLen);
@@ -1642,6 +1655,11 @@ function deleteSession(name) {
1642
1655
  const path = sessionPath(name);
1643
1656
  try {
1644
1657
  unlinkSync(path);
1658
+ const sidecar = path.replace(/\.jsonl$/, ".pending.json");
1659
+ try {
1660
+ unlinkSync(sidecar);
1661
+ } catch {
1662
+ }
1645
1663
  return true;
1646
1664
  } catch {
1647
1665
  return false;
@@ -1669,13 +1687,18 @@ function countLines(path) {
1669
1687
 
1670
1688
  // src/telemetry.ts
1671
1689
  var DEEPSEEK_PRICING = {
1672
- "deepseek-chat": { inputCacheHit: 0.028, inputCacheMiss: 0.28, output: 0.42 },
1673
- "deepseek-reasoner": { inputCacheHit: 0.028, inputCacheMiss: 0.28, output: 0.42 }
1690
+ "deepseek-v4-flash": { inputCacheHit: 0.028, inputCacheMiss: 0.139, output: 0.278 },
1691
+ "deepseek-v4-pro": { inputCacheHit: 0.139, inputCacheMiss: 1.667, output: 3.333 },
1692
+ // Compat aliases — priced as v4-flash per the deprecation notice.
1693
+ "deepseek-chat": { inputCacheHit: 0.028, inputCacheMiss: 0.139, output: 0.278 },
1694
+ "deepseek-reasoner": { inputCacheHit: 0.028, inputCacheMiss: 0.139, output: 0.278 }
1674
1695
  };
1675
1696
  var CLAUDE_SONNET_PRICING = { input: 3, output: 15 };
1676
1697
  var DEEPSEEK_CONTEXT_TOKENS = {
1677
- "deepseek-chat": 131072,
1678
- "deepseek-reasoner": 131072
1698
+ "deepseek-v4-flash": 1e6,
1699
+ "deepseek-v4-pro": 1e6,
1700
+ "deepseek-chat": 1e6,
1701
+ "deepseek-reasoner": 1e6
1679
1702
  };
1680
1703
  var DEFAULT_CONTEXT_TOKENS = 131072;
1681
1704
  function costUsd(model, usage) {
@@ -1773,6 +1796,8 @@ var CacheFirstLoop = class {
1773
1796
  harvestOptions;
1774
1797
  branchEnabled;
1775
1798
  branchOptions;
1799
+ /** See ReconfigurableOptions — mutable so `/effort` can flip mid-session. */
1800
+ reasoningEffort;
1776
1801
  sessionName;
1777
1802
  /**
1778
1803
  * Hook list, mutable so `/hooks reload` can swap it without
@@ -1798,7 +1823,8 @@ var CacheFirstLoop = class {
1798
1823
  this.client = opts.client;
1799
1824
  this.prefix = opts.prefix;
1800
1825
  this.tools = opts.tools ?? new ToolRegistry();
1801
- this.model = opts.model ?? "deepseek-chat";
1826
+ this.model = opts.model ?? "deepseek-v4-pro";
1827
+ this.reasoningEffort = opts.reasoningEffort ?? "max";
1802
1828
  this.maxToolIters = opts.maxToolIters ?? 64;
1803
1829
  this.hooks = opts.hooks ?? [];
1804
1830
  this.hookCwd = opts.hookCwd ?? process.cwd();
@@ -1914,6 +1940,7 @@ var CacheFirstLoop = class {
1914
1940
  configure(opts) {
1915
1941
  if (opts.model !== void 0) this.model = opts.model;
1916
1942
  if (opts.stream !== void 0) this._streamPreference = opts.stream;
1943
+ if (opts.reasoningEffort !== void 0) this.reasoningEffort = opts.reasoningEffort;
1917
1944
  if (opts.branch !== void 0) {
1918
1945
  if (typeof opts.branch === "number") {
1919
1946
  this.branchOptions = { budget: opts.branch };
@@ -2003,7 +2030,7 @@ var CacheFirstLoop = class {
2003
2030
  content: `aborted at iter ${iter}/${this.maxToolIters} \u2014 stopped without producing a summary (press \u2191 + Enter or /retry to resume)`
2004
2031
  };
2005
2032
  const stoppedMsg = "[aborted by user (Esc) \u2014 no summary produced. Ask again or /retry when ready; prior tool output is still in the log.]";
2006
- this.appendAndPersist({ role: "assistant", content: stoppedMsg });
2033
+ this.appendAndPersist(this.syntheticAssistantMessage(stoppedMsg));
2007
2034
  yield {
2008
2035
  turn: this._turn,
2009
2036
  role: "assistant_final",
@@ -2092,7 +2119,9 @@ var CacheFirstLoop = class {
2092
2119
  model: this.model,
2093
2120
  messages,
2094
2121
  tools: toolSpecs.length ? toolSpecs : void 0,
2095
- signal
2122
+ signal,
2123
+ thinking: thinkingModeForModel(this.model),
2124
+ reasoningEffort: this.reasoningEffort
2096
2125
  },
2097
2126
  {
2098
2127
  ...this.branchOptions,
@@ -2139,11 +2168,14 @@ var CacheFirstLoop = class {
2139
2168
  };
2140
2169
  } else if (this.stream) {
2141
2170
  const callBuf = /* @__PURE__ */ new Map();
2171
+ const readyIndices = /* @__PURE__ */ new Set();
2142
2172
  for await (const chunk of this.client.stream({
2143
2173
  model: this.model,
2144
2174
  messages,
2145
2175
  tools: toolSpecs.length ? toolSpecs : void 0,
2146
- signal
2176
+ signal,
2177
+ thinking: thinkingModeForModel(this.model),
2178
+ reasoningEffort: this.reasoningEffort
2147
2179
  })) {
2148
2180
  if (chunk.contentDelta) {
2149
2181
  assistantContent += chunk.contentDelta;
@@ -2174,13 +2206,18 @@ var CacheFirstLoop = class {
2174
2206
  if (d.argumentsDelta)
2175
2207
  cur.function.arguments = (cur.function.arguments ?? "") + d.argumentsDelta;
2176
2208
  callBuf.set(d.index, cur);
2209
+ if (!readyIndices.has(d.index) && cur.function.name && looksLikeCompleteJson(cur.function.arguments ?? "")) {
2210
+ readyIndices.add(d.index);
2211
+ }
2177
2212
  if (cur.function.name) {
2178
2213
  yield {
2179
2214
  turn: this._turn,
2180
2215
  role: "tool_call_delta",
2181
2216
  content: "",
2182
2217
  toolName: cur.function.name,
2183
- toolCallArgsChars: (cur.function.arguments ?? "").length
2218
+ toolCallArgsChars: (cur.function.arguments ?? "").length,
2219
+ toolCallIndex: d.index,
2220
+ toolCallReadyCount: readyIndices.size
2184
2221
  };
2185
2222
  }
2186
2223
  }
@@ -2192,7 +2229,9 @@ var CacheFirstLoop = class {
2192
2229
  model: this.model,
2193
2230
  messages,
2194
2231
  tools: toolSpecs.length ? toolSpecs : void 0,
2195
- signal
2232
+ signal,
2233
+ thinking: thinkingModeForModel(this.model),
2234
+ reasoningEffort: this.reasoningEffort
2196
2235
  });
2197
2236
  assistantContent = resp.content;
2198
2237
  reasoningContent = resp.reasoningContent ?? "";
@@ -2231,7 +2270,9 @@ var CacheFirstLoop = class {
2231
2270
  reasoningContent || null,
2232
2271
  assistantContent || null
2233
2272
  );
2234
- this.appendAndPersist(this.assistantMessage(assistantContent, repairedCalls));
2273
+ this.appendAndPersist(
2274
+ this.assistantMessage(assistantContent, repairedCalls, reasoningContent)
2275
+ );
2235
2276
  yield {
2236
2277
  turn: this._turn,
2237
2278
  role: "assistant_final",
@@ -2383,7 +2424,9 @@ ${reason}`;
2383
2424
  model: this.model,
2384
2425
  messages,
2385
2426
  // no tools → model is forced to answer in text
2386
- signal: this._turnAbort.signal
2427
+ signal: this._turnAbort.signal,
2428
+ thinking: thinkingModeForModel(this.model),
2429
+ reasoningEffort: this.reasoningEffort
2387
2430
  });
2388
2431
  const rawContent = resp.content?.trim() ?? "";
2389
2432
  const cleaned = stripHallucinatedToolMarkup(rawContent);
@@ -2393,7 +2436,7 @@ ${reason}`;
2393
2436
 
2394
2437
  ${summary}`;
2395
2438
  const summaryStats = this.stats.record(this._turn, this.model, resp.usage ?? new Usage());
2396
- this.appendAndPersist({ role: "assistant", content: summary });
2439
+ this.appendAndPersist(this.assistantMessage(summary, [], resp.reasoningContent ?? void 0));
2397
2440
  yield {
2398
2441
  turn: this._turn,
2399
2442
  role: "assistant_final",
@@ -2422,12 +2465,41 @@ ${summary}`;
2422
2465
  }
2423
2466
  return final;
2424
2467
  }
2425
- assistantMessage(content, toolCalls) {
2468
+ assistantMessage(content, toolCalls, reasoningContent) {
2426
2469
  const msg = { role: "assistant", content };
2427
2470
  if (toolCalls.length > 0) msg.tool_calls = toolCalls;
2471
+ if (reasoningContent && reasoningContent.length > 0) {
2472
+ msg.reasoning_content = reasoningContent;
2473
+ }
2474
+ return msg;
2475
+ }
2476
+ /**
2477
+ * Build a synthetic assistant message we insert into the log without
2478
+ * a real API round trip (abort notices, future system injections).
2479
+ * Reasoner models reject follow-up requests whose assistant history
2480
+ * is missing `reasoning_content`, so we stamp an empty-string
2481
+ * placeholder on reasoner sessions to satisfy the validator. V3
2482
+ * doesn't care — field stays absent there.
2483
+ */
2484
+ syntheticAssistantMessage(content) {
2485
+ const msg = { role: "assistant", content };
2486
+ if (isThinkingModeModel(this.model)) {
2487
+ msg.reasoning_content = "";
2488
+ }
2428
2489
  return msg;
2429
2490
  }
2430
2491
  };
2492
+ function isThinkingModeModel(model) {
2493
+ if (model.includes("reasoner")) return true;
2494
+ if (model === "deepseek-v4-flash" || model === "deepseek-v4-pro") return true;
2495
+ return false;
2496
+ }
2497
+ function thinkingModeForModel(model) {
2498
+ if (model === "deepseek-chat") return "disabled";
2499
+ if (model.includes("reasoner")) return "enabled";
2500
+ if (model === "deepseek-v4-flash" || model === "deepseek-v4-pro") return "enabled";
2501
+ return void 0;
2502
+ }
2431
2503
  function stripHallucinatedToolMarkup(s) {
2432
2504
  let out = s;
2433
2505
  out = out.replace(/<|DSML|function_calls>[\s\S]*?<\/?|DSML|function_calls>/g, "");
@@ -2443,6 +2515,15 @@ function safeParseToolArgs(raw) {
2443
2515
  return raw;
2444
2516
  }
2445
2517
  }
2518
+ function looksLikeCompleteJson(s) {
2519
+ if (!s || !s.trim()) return false;
2520
+ try {
2521
+ JSON.parse(s);
2522
+ return true;
2523
+ } catch {
2524
+ return false;
2525
+ }
2526
+ }
2446
2527
  function* hookWarnings(outcomes, turn) {
2447
2528
  for (const o of outcomes) {
2448
2529
  if (o.decision === "pass") continue;
@@ -3449,7 +3530,7 @@ function registerPlanTool(registry, opts = {}) {
3449
3530
  // src/tools/subagent.ts
3450
3531
  var DEFAULT_MAX_RESULT_CHARS2 = 8e3;
3451
3532
  var DEFAULT_MAX_ITERS = 16;
3452
- var DEFAULT_SUBAGENT_MODEL = "deepseek-chat";
3533
+ var DEFAULT_SUBAGENT_MODEL = "deepseek-v4-pro";
3453
3534
  var SUBAGENT_TOOL_NAME = "spawn_subagent";
3454
3535
  var NEVER_INHERITED_TOOLS = /* @__PURE__ */ new Set([SUBAGENT_TOOL_NAME, "submit_plan"]);
3455
3536
  async function spawnSubagent(opts) {
@@ -3457,11 +3538,14 @@ async function spawnSubagent(opts) {
3457
3538
  const maxToolIters = opts.maxToolIters ?? DEFAULT_MAX_ITERS;
3458
3539
  const maxResultChars = opts.maxResultChars ?? DEFAULT_MAX_RESULT_CHARS2;
3459
3540
  const sink = opts.sink;
3541
+ const skillName = opts.skillName;
3460
3542
  const startedAt = Date.now();
3461
3543
  const taskPreview = opts.task.length > 30 ? `${opts.task.slice(0, 30)}\u2026` : opts.task;
3462
3544
  sink?.current?.({
3463
3545
  kind: "start",
3464
3546
  task: taskPreview,
3547
+ skillName,
3548
+ model,
3465
3549
  iter: 0,
3466
3550
  elapsedMs: 0
3467
3551
  });
@@ -3491,6 +3575,8 @@ async function spawnSubagent(opts) {
3491
3575
  sink?.current?.({
3492
3576
  kind: "progress",
3493
3577
  task: taskPreview,
3578
+ skillName,
3579
+ model,
3494
3580
  iter: toolIter,
3495
3581
  elapsedMs: Date.now() - startedAt
3496
3582
  });
@@ -3513,17 +3599,22 @@ async function spawnSubagent(opts) {
3513
3599
  const elapsedMs = Date.now() - startedAt;
3514
3600
  const turns = childLoop.stats.turns.length;
3515
3601
  const costUsd2 = childLoop.stats.totalCost;
3602
+ const usage = aggregateChildUsage(childLoop);
3516
3603
  const truncated = final.length > maxResultChars ? `${final.slice(0, maxResultChars)}
3517
3604
 
3518
3605
  [\u2026truncated ${final.length - maxResultChars} chars; ask the subagent for a tighter summary if you need more.]` : final;
3519
3606
  sink?.current?.({
3520
3607
  kind: "end",
3521
3608
  task: taskPreview,
3609
+ skillName,
3610
+ model,
3522
3611
  iter: toolIter,
3523
3612
  elapsedMs,
3524
3613
  summary: errorMessage ? void 0 : truncated.slice(0, 120),
3525
3614
  error: errorMessage,
3526
- turns
3615
+ turns,
3616
+ costUsd: costUsd2,
3617
+ usage
3527
3618
  });
3528
3619
  return {
3529
3620
  success: !errorMessage,
@@ -3532,9 +3623,23 @@ async function spawnSubagent(opts) {
3532
3623
  turns,
3533
3624
  toolIters: toolIter,
3534
3625
  elapsedMs,
3535
- costUsd: costUsd2
3626
+ costUsd: costUsd2,
3627
+ model,
3628
+ skillName,
3629
+ usage
3536
3630
  };
3537
3631
  }
3632
+ function aggregateChildUsage(loop) {
3633
+ const agg = new Usage();
3634
+ for (const t of loop.stats.turns) {
3635
+ agg.promptTokens += t.usage.promptTokens;
3636
+ agg.completionTokens += t.usage.completionTokens;
3637
+ agg.totalTokens += t.usage.totalTokens;
3638
+ agg.promptCacheHitTokens += t.usage.promptCacheHitTokens;
3639
+ agg.promptCacheMissTokens += t.usage.promptCacheMissTokens;
3640
+ }
3641
+ return agg;
3642
+ }
3538
3643
  function formatSubagentResult(r) {
3539
3644
  if (!r.success) {
3540
3645
  return JSON.stringify({
@@ -5529,6 +5634,8 @@ function appendUsage(input) {
5529
5634
  costUsd: costUsd(input.model, input.usage),
5530
5635
  claudeEquivUsd: claudeEquivalentCost(input.usage)
5531
5636
  };
5637
+ if (input.kind === "subagent") record.kind = "subagent";
5638
+ if (input.subagent) record.subagent = input.subagent;
5532
5639
  const path = input.path ?? defaultUsageLogPath();
5533
5640
  try {
5534
5641
  mkdirSync5(dirname7(path), { recursive: true });
@@ -5602,6 +5709,10 @@ function aggregateUsage(records, opts = {}) {
5602
5709
  const sessionCounts = /* @__PURE__ */ new Map();
5603
5710
  let firstSeen = null;
5604
5711
  let lastSeen = null;
5712
+ const skillCounts = /* @__PURE__ */ new Map();
5713
+ let subagentTotal = 0;
5714
+ let subagentCost = 0;
5715
+ let subagentDuration = 0;
5605
5716
  for (const r of records) {
5606
5717
  addToBucket(all, r);
5607
5718
  if (r.ts >= today.since) addToBucket(today, r);
@@ -5612,15 +5723,34 @@ function aggregateUsage(records, opts = {}) {
5612
5723
  sessionCounts.set(sessKey, (sessionCounts.get(sessKey) ?? 0) + 1);
5613
5724
  if (firstSeen === null || r.ts < firstSeen) firstSeen = r.ts;
5614
5725
  if (lastSeen === null || r.ts > lastSeen) lastSeen = r.ts;
5726
+ if (r.kind === "subagent") {
5727
+ subagentTotal += 1;
5728
+ subagentCost += r.costUsd;
5729
+ const dur = r.subagent?.durationMs ?? 0;
5730
+ subagentDuration += dur;
5731
+ const key = r.subagent?.skillName?.trim() || "(adhoc)";
5732
+ const prev = skillCounts.get(key) ?? { count: 0, costUsd: 0, durationMs: 0 };
5733
+ prev.count += 1;
5734
+ prev.costUsd += r.costUsd;
5735
+ prev.durationMs += dur;
5736
+ skillCounts.set(key, prev);
5737
+ }
5615
5738
  }
5616
5739
  const byModel = Array.from(modelCounts.entries()).map(([model, turns]) => ({ model, turns })).sort((a, b) => b.turns - a.turns);
5617
5740
  const bySession = Array.from(sessionCounts.entries()).map(([session, turns]) => ({ session, turns })).sort((a, b) => b.turns - a.turns);
5741
+ const subagents = subagentTotal > 0 ? {
5742
+ total: subagentTotal,
5743
+ costUsd: subagentCost,
5744
+ totalDurationMs: subagentDuration,
5745
+ bySkill: Array.from(skillCounts.entries()).map(([skillName, v]) => ({ skillName, ...v })).sort((a, b) => b.count - a.count)
5746
+ } : void 0;
5618
5747
  return {
5619
5748
  buckets: [today, week, month, all],
5620
5749
  byModel,
5621
5750
  bySession,
5622
5751
  firstSeen,
5623
- lastSeen
5752
+ lastSeen,
5753
+ subagents
5624
5754
  };
5625
5755
  }
5626
5756
  function formatLogSize(path = defaultUsageLogPath()) {
@@ -5637,7 +5767,7 @@ function formatLogSize(path = defaultUsageLogPath()) {
5637
5767
  }
5638
5768
 
5639
5769
  // src/cli/commands/chat.tsx
5640
- import { existsSync as existsSync10, statSync as statSync6 } from "fs";
5770
+ import { existsSync as existsSync11, statSync as statSync6 } from "fs";
5641
5771
  import { render } from "ink";
5642
5772
  import React17, { useState as useState7 } from "react";
5643
5773
 
@@ -5714,6 +5844,58 @@ function capLines(lines, maxLines, indent) {
5714
5844
  return head;
5715
5845
  }
5716
5846
 
5847
+ // src/code/pending-edits.ts
5848
+ import { existsSync as existsSync9, mkdirSync as mkdirSync6, readFileSync as readFileSync11, unlinkSync as unlinkSync3, writeFileSync as writeFileSync5 } from "fs";
5849
+ import { dirname as dirname8, join as join9 } from "path";
5850
+ function pendingEditsPath(sessionName) {
5851
+ return join9(sessionsDir(), `${sanitizeName(sessionName)}.pending.json`);
5852
+ }
5853
+ function savePendingEdits(sessionName, blocks) {
5854
+ if (!sessionName) return;
5855
+ const path = pendingEditsPath(sessionName);
5856
+ try {
5857
+ if (blocks.length === 0) {
5858
+ if (existsSync9(path)) unlinkSync3(path);
5859
+ return;
5860
+ }
5861
+ mkdirSync6(dirname8(path), { recursive: true });
5862
+ writeFileSync5(path, JSON.stringify(blocks, null, 2), "utf8");
5863
+ } catch {
5864
+ }
5865
+ }
5866
+ function loadPendingEdits(sessionName) {
5867
+ if (!sessionName) return null;
5868
+ const path = pendingEditsPath(sessionName);
5869
+ if (!existsSync9(path)) return null;
5870
+ let raw;
5871
+ try {
5872
+ raw = readFileSync11(path, "utf8");
5873
+ } catch {
5874
+ return null;
5875
+ }
5876
+ try {
5877
+ const parsed = JSON.parse(raw);
5878
+ if (!Array.isArray(parsed)) return null;
5879
+ const out = [];
5880
+ for (const item of parsed) {
5881
+ if (item && typeof item === "object" && typeof item.path === "string" && typeof item.search === "string" && typeof item.replace === "string" && typeof item.offset === "number") {
5882
+ out.push(item);
5883
+ }
5884
+ }
5885
+ return out;
5886
+ } catch {
5887
+ return null;
5888
+ }
5889
+ }
5890
+ function clearPendingEdits(sessionName) {
5891
+ if (!sessionName) return;
5892
+ const path = pendingEditsPath(sessionName);
5893
+ try {
5894
+ if (existsSync9(path)) unlinkSync3(path);
5895
+ } catch {
5896
+ }
5897
+ }
5898
+
5717
5899
  // src/tools/skills.ts
5718
5900
  function registerSkillTools(registry, opts = {}) {
5719
5901
  const store = new SkillStore({
@@ -5835,8 +6017,8 @@ function PlanStateBlock({ planState }) {
5835
6017
  }
5836
6018
 
5837
6019
  // src/cli/ui/markdown.tsx
5838
- import { readFileSync as readFileSync11, statSync as statSync5 } from "fs";
5839
- import { isAbsolute as isAbsolute4, join as join9 } from "path";
6020
+ import { readFileSync as readFileSync12, statSync as statSync5 } from "fs";
6021
+ import { isAbsolute as isAbsolute4, join as join10 } from "path";
5840
6022
  import { Box as Box3, Text as Text3 } from "ink";
5841
6023
  import React3 from "react";
5842
6024
  var SUPERSCRIPT = {
@@ -5914,7 +6096,8 @@ function parseCitationUrl(url) {
5914
6096
  function validateCitation(url, projectRoot) {
5915
6097
  const parts = parseCitationUrl(url);
5916
6098
  if (!parts || !parts.path) return { ok: false, reason: "empty path" };
5917
- const fullPath = isAbsolute4(parts.path) ? parts.path : join9(projectRoot, parts.path);
6099
+ const normalized = parts.path.replace(/^[/\\]+/, "");
6100
+ const fullPath = isAbsolute4(normalized) ? normalized : join10(projectRoot, normalized);
5918
6101
  let stat;
5919
6102
  try {
5920
6103
  stat = statSync5(fullPath);
@@ -5925,7 +6108,7 @@ function validateCitation(url, projectRoot) {
5925
6108
  if (parts.startLine === void 0) return { ok: true };
5926
6109
  let lineCount;
5927
6110
  try {
5928
- lineCount = readFileSync11(fullPath, "utf8").split("\n").length;
6111
+ lineCount = readFileSync12(fullPath, "utf8").split("\n").length;
5929
6112
  } catch {
5930
6113
  return { ok: false, reason: "unreadable" };
5931
6114
  }
@@ -6417,13 +6600,15 @@ function StreamingAssistant({ event }) {
6417
6600
  label = `R1 reasoning \xB7 ${event.reasoning?.length ?? 0} chars of thought`;
6418
6601
  labelColor = "cyan";
6419
6602
  } else if (toolCallOnly) {
6420
- label = `assembling tool call <${toolCallBuild.name}> \xB7 ${toolCallBuild.chars} chars of arguments`;
6603
+ label = `assembling tool call${formatToolCallIndex(toolCallBuild)} <${toolCallBuild.name}> \xB7 ${toolCallBuild.chars} chars of arguments${formatReadyTail(toolCallBuild)}`;
6421
6604
  labelColor = "magenta";
6422
6605
  } else {
6423
6606
  const parts = [`writing response \xB7 ${event.text.length} chars`];
6424
6607
  if (event.reasoning) parts.push(`after ${event.reasoning.length} chars of reasoning`);
6425
6608
  if (toolCallBuild) {
6426
- parts.push(`building tool call <${toolCallBuild.name}> \xB7 ${toolCallBuild.chars} chars`);
6609
+ parts.push(
6610
+ `building tool call${formatToolCallIndex(toolCallBuild)} <${toolCallBuild.name}> \xB7 ${toolCallBuild.chars} chars${formatReadyTail(toolCallBuild)}`
6611
+ );
6427
6612
  }
6428
6613
  label = parts.join(" \xB7 ");
6429
6614
  labelColor = "green";
@@ -6439,6 +6624,16 @@ function Pulse() {
6439
6624
  const frames = ["\u280B", "\u2819", "\u2839", "\u2838", "\u283C", "\u2834", "\u2826", "\u2827", "\u2807", "\u280F"];
6440
6625
  return /* @__PURE__ */ React5.createElement(Text4, { color: "cyan" }, frames[Math.floor(tick / 4) % frames.length]);
6441
6626
  }
6627
+ function formatToolCallIndex(tb) {
6628
+ if (!tb || tb.index === void 0) return "";
6629
+ if (tb.index === 0 && (tb.readyCount ?? 0) === 0) return "";
6630
+ return ` (call ${tb.index + 1})`;
6631
+ }
6632
+ function formatReadyTail(tb) {
6633
+ const n = tb?.readyCount ?? 0;
6634
+ if (n <= 0) return "";
6635
+ return ` \xB7 ${n} ready`;
6636
+ }
6442
6637
  function lastLine(s, maxChars) {
6443
6638
  const flat = s.replace(/\s+/g, " ").trim();
6444
6639
  if (!flat) return "";
@@ -6576,12 +6771,34 @@ function findNextEnabled(items, from, step) {
6576
6771
 
6577
6772
  // src/cli/ui/PlanConfirm.tsx
6578
6773
  var DEFAULT_MAX_RENDERED = 2400;
6579
- function PlanConfirm({ plan, onChoose, maxRenderedChars, projectRoot }) {
6774
+ var PICKER_CHROME_ROWS = 18;
6775
+ var MARKDOWN_EXPANSION = 2;
6776
+ var MIN_BODY_ROWS = 4;
6777
+ function clampBodyByLines(text, maxLines) {
6778
+ const lines = text.split("\n");
6779
+ if (lines.length <= maxLines) return text;
6780
+ const kept = lines.slice(0, maxLines).join("\n");
6781
+ const dropped = lines.length - maxLines;
6782
+ return `${kept}
6783
+
6784
+ \u2026 (${dropped} more lines truncated \u2014 resize the terminal to see more, or /tool for the full proposal)`;
6785
+ }
6786
+ function PlanConfirmInner({
6787
+ plan,
6788
+ onChoose,
6789
+ maxRenderedChars,
6790
+ projectRoot,
6791
+ terminalRows
6792
+ }) {
6580
6793
  const cap = maxRenderedChars ?? DEFAULT_MAX_RENDERED;
6581
- const tooLong = plan.length > cap;
6582
- const visible = tooLong ? `${plan.slice(0, cap)}
6794
+ const charTrunc = plan.length > cap;
6795
+ const charCapped = charTrunc ? `${plan.slice(0, cap)}
6583
6796
 
6584
6797
  \u2026 (${plan.length - cap} chars truncated \u2014 use /tool to view the full proposal)` : plan;
6798
+ const rows = terminalRows ?? process.stdout?.rows ?? 24;
6799
+ const renderedBudget = Math.max(MIN_BODY_ROWS * MARKDOWN_EXPANSION, rows - PICKER_CHROME_ROWS);
6800
+ const sourceLineBudget = Math.max(MIN_BODY_ROWS, Math.floor(renderedBudget / MARKDOWN_EXPANSION));
6801
+ const visible = clampBodyByLines(charCapped, sourceLineBudget);
6585
6802
  const hasOpenQuestions = /^#{1,6}\s*(open[-\s]?questions?|risks?|unknowns?|assumptions?|unclear)/im.test(plan) || /^#{1,6}\s*(待确认|开放问题|风险|未知|假设|不确定)/im.test(plan);
6586
6803
  return /* @__PURE__ */ React7.createElement(Box6, { flexDirection: "column", borderStyle: "round", borderColor: "cyan", paddingX: 1, marginY: 1 }, /* @__PURE__ */ React7.createElement(Box6, null, /* @__PURE__ */ React7.createElement(Text6, { bold: true, color: "cyan" }, "\u25B8 plan submitted \u2014 awaiting your review")), /* @__PURE__ */ React7.createElement(Box6, null, /* @__PURE__ */ React7.createElement(Text6, { color: "cyan", dimColor: true }, "\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500")), /* @__PURE__ */ React7.createElement(Box6, { marginTop: 1, flexDirection: "column" }, /* @__PURE__ */ React7.createElement(Markdown, { text: visible, projectRoot })), hasOpenQuestions ? /* @__PURE__ */ React7.createElement(Box6, { marginTop: 1 }, /* @__PURE__ */ React7.createElement(Text6, { color: "yellow" }, "\u25B2 the plan has open questions or flagged risks \u2014 pick", " ", /* @__PURE__ */ React7.createElement(Text6, { bold: true }, "Refine / answer questions"), " to write concrete answers before the model moves on.")) : null, /* @__PURE__ */ React7.createElement(Box6, { marginTop: 1 }, /* @__PURE__ */ React7.createElement(
6587
6804
  SingleSelect,
@@ -6610,6 +6827,7 @@ function PlanConfirm({ plan, onChoose, maxRenderedChars, projectRoot }) {
6610
6827
  }
6611
6828
  )));
6612
6829
  }
6830
+ var PlanConfirm = React7.memo(PlanConfirmInner);
6613
6831
 
6614
6832
  // src/cli/ui/PlanRefineInput.tsx
6615
6833
  import { Box as Box7, Text as Text7, useInput as useInput2 } from "ink";
@@ -6994,6 +7212,7 @@ function StatsPanel({
6994
7212
  prefixHash,
6995
7213
  harvestOn,
6996
7214
  branchBudget,
7215
+ reasoningEffort,
6997
7216
  planMode,
6998
7217
  balance,
6999
7218
  updateAvailable,
@@ -7014,6 +7233,7 @@ function StatsPanel({
7014
7233
  harvestOn,
7015
7234
  branchOn,
7016
7235
  branchBudget: branchBudget ?? 1,
7236
+ reasoningEffort,
7017
7237
  planMode,
7018
7238
  turns: summary.turns,
7019
7239
  updateAvailable,
@@ -7046,13 +7266,14 @@ function Header({
7046
7266
  harvestOn,
7047
7267
  branchOn,
7048
7268
  branchBudget,
7269
+ reasoningEffort,
7049
7270
  planMode,
7050
7271
  turns,
7051
7272
  updateAvailable,
7052
7273
  narrow,
7053
7274
  busy
7054
7275
  }) {
7055
- return /* @__PURE__ */ React13.createElement(Box12, { justifyContent: "space-between" }, /* @__PURE__ */ React13.createElement(Box12, null, /* @__PURE__ */ React13.createElement(Wordmark, { busy }), /* @__PURE__ */ React13.createElement(Text12, { dimColor: true }, ` v${VERSION}`), /* @__PURE__ */ React13.createElement(Text12, { dimColor: true }, " \xB7 "), /* @__PURE__ */ React13.createElement(Text12, { color: "yellow" }, model), narrow ? null : /* @__PURE__ */ React13.createElement(React13.Fragment, null, /* @__PURE__ */ React13.createElement(Text12, { dimColor: true }, " \xB7 "), /* @__PURE__ */ React13.createElement(Text12, { dimColor: true }, prefixHash)), harvestOn ? /* @__PURE__ */ React13.createElement(Text12, { color: "magenta" }, " \xB7 harvest") : null, branchOn ? /* @__PURE__ */ React13.createElement(Text12, { color: "blue" }, " \xB7 branch", branchBudget) : null, planMode ? /* @__PURE__ */ React13.createElement(Text12, { color: "red", bold: true }, " \xB7 PLAN") : null), /* @__PURE__ */ React13.createElement(Text12, null, updateAvailable ? /* @__PURE__ */ React13.createElement(Text12, { color: "yellow", bold: true }, `update: ${updateAvailable} \xB7 `) : null, /* @__PURE__ */ React13.createElement(Text12, { dimColor: true }, narrow ? `turn ${turns}` : `turn ${turns} \xB7 /help`)));
7276
+ return /* @__PURE__ */ React13.createElement(Box12, { justifyContent: "space-between" }, /* @__PURE__ */ React13.createElement(Box12, null, /* @__PURE__ */ React13.createElement(Wordmark, { busy }), /* @__PURE__ */ React13.createElement(Text12, { dimColor: true }, ` v${VERSION}`), /* @__PURE__ */ React13.createElement(Text12, { dimColor: true }, " \xB7 "), /* @__PURE__ */ React13.createElement(Text12, { color: "yellow" }, model), narrow ? null : /* @__PURE__ */ React13.createElement(React13.Fragment, null, /* @__PURE__ */ React13.createElement(Text12, { dimColor: true }, " \xB7 "), /* @__PURE__ */ React13.createElement(Text12, { dimColor: true }, prefixHash)), harvestOn ? /* @__PURE__ */ React13.createElement(Text12, { color: "magenta" }, " \xB7 harvest") : null, branchOn ? /* @__PURE__ */ React13.createElement(Text12, { color: "blue" }, " \xB7 branch", branchBudget) : null, reasoningEffort === "max" ? /* @__PURE__ */ React13.createElement(Text12, { color: "green" }, " \xB7 max") : null, reasoningEffort === "high" ? /* @__PURE__ */ React13.createElement(Text12, { color: "yellow" }, " \xB7 high") : null, planMode ? /* @__PURE__ */ React13.createElement(Text12, { color: "red", bold: true }, " \xB7 PLAN") : null), /* @__PURE__ */ React13.createElement(Text12, null, updateAvailable ? /* @__PURE__ */ React13.createElement(Text12, { color: "yellow", bold: true }, `update: ${updateAvailable} \xB7 `) : null, /* @__PURE__ */ React13.createElement(Text12, { dimColor: true }, narrow ? `turn ${turns}` : `turn ${turns} \xB7 /help`)));
7056
7277
  }
7057
7278
  function InlineMetrics({
7058
7279
  summary,
@@ -7146,6 +7367,178 @@ function formatBangUserMessage(cmd, output) {
7146
7367
  ${output}`;
7147
7368
  }
7148
7369
 
7370
+ // src/cli/ui/mcp-browse.ts
7371
+ function formatResourceList(servers) {
7372
+ const lines = [];
7373
+ let total = 0;
7374
+ for (const s of servers) {
7375
+ if (!s.report.resources.supported) continue;
7376
+ const items = s.report.resources.items;
7377
+ if (items.length === 0) continue;
7378
+ lines.push(`[${s.label}] ${items.length} resource(s):`);
7379
+ for (const r of items.slice(0, 20)) {
7380
+ const name = r.name && r.name !== r.uri ? ` ${r.name}` : "";
7381
+ const mime = r.mimeType ? ` \xB7 ${r.mimeType}` : "";
7382
+ lines.push(` \xB7 ${r.uri}${name}${mime}`);
7383
+ total++;
7384
+ }
7385
+ if (items.length > 20) lines.push(` (+${items.length - 20} more)`);
7386
+ lines.push("");
7387
+ }
7388
+ if (total === 0) {
7389
+ return "No resources on any connected MCP server (or no servers connected). `/mcp` shows the current set.";
7390
+ }
7391
+ lines.push("Read one: `/resource <uri>` \u2014 or use Tab in the picker.");
7392
+ return lines.join("\n");
7393
+ }
7394
+ function formatPromptList(servers) {
7395
+ const lines = [];
7396
+ let total = 0;
7397
+ for (const s of servers) {
7398
+ if (!s.report.prompts.supported) continue;
7399
+ const items = s.report.prompts.items;
7400
+ if (items.length === 0) continue;
7401
+ lines.push(`[${s.label}] ${items.length} prompt(s):`);
7402
+ for (const p of items.slice(0, 20)) {
7403
+ const desc = p.description ? ` \u2014 ${p.description}` : "";
7404
+ const argHint = p.arguments && p.arguments.length > 0 ? ` (args: ${p.arguments.map((a) => a.name + (a.required ? "*" : "?")).join(", ")})` : "";
7405
+ lines.push(` \xB7 ${p.name}${argHint}${desc}`);
7406
+ total++;
7407
+ }
7408
+ if (items.length > 20) lines.push(` (+${items.length - 20} more)`);
7409
+ lines.push("");
7410
+ }
7411
+ if (total === 0) {
7412
+ return "No prompts on any connected MCP server (or no servers connected). `/mcp` shows the current set.";
7413
+ }
7414
+ lines.push(
7415
+ "Fetch one: `/prompt <name>` \u2014 args are not supported yet; prompts with required args will surface an error from the server."
7416
+ );
7417
+ return lines.join("\n");
7418
+ }
7419
+ function findServerForResource(servers, uri) {
7420
+ for (const s of servers) {
7421
+ if (!s.report.resources.supported) continue;
7422
+ if (s.report.resources.items.some((r) => r.uri === uri)) return s;
7423
+ }
7424
+ return null;
7425
+ }
7426
+ function findServerForPrompt(servers, name) {
7427
+ for (const s of servers) {
7428
+ if (!s.report.prompts.supported) continue;
7429
+ if (s.report.prompts.items.some((p) => p.name === name)) return s;
7430
+ }
7431
+ return null;
7432
+ }
7433
+ function formatResourceContents(uri, result) {
7434
+ const lines = [`Resource ${uri} (${result.contents.length} content block(s)):`, ""];
7435
+ for (let i = 0; i < result.contents.length; i++) {
7436
+ const c = result.contents[i];
7437
+ const header2 = `\u2014 block ${i + 1}${c.mimeType ? ` \xB7 ${c.mimeType}` : ""}`;
7438
+ lines.push(header2);
7439
+ lines.push(formatOneResourceContent(c));
7440
+ lines.push("");
7441
+ }
7442
+ return lines.join("\n").trimEnd();
7443
+ }
7444
+ function formatOneResourceContent(c) {
7445
+ if ("text" in c) {
7446
+ const MAX = 8e3;
7447
+ if (c.text.length > MAX) {
7448
+ return `${c.text.slice(0, MAX)}
7449
+
7450
+ [\u2026truncated ${c.text.length - MAX} chars; full contents available via McpClient.readResource in library mode.]`;
7451
+ }
7452
+ return c.text;
7453
+ }
7454
+ const bytes = typeof c.blob === "string" ? approximateBase64ByteSize(c.blob) : 0;
7455
+ return `[binary \xB7 ~${bytes.toLocaleString()} bytes \xB7 base64]`;
7456
+ }
7457
+ function approximateBase64ByteSize(b64) {
7458
+ const padding = b64.endsWith("==") ? 2 : b64.endsWith("=") ? 1 : 0;
7459
+ return Math.floor(b64.length * 3 / 4) - padding;
7460
+ }
7461
+ function formatPromptMessages(name, result) {
7462
+ const lines = [
7463
+ `Prompt ${name}${result.description ? ` \u2014 ${result.description}` : ""}`,
7464
+ `(${result.messages.length} message(s))`,
7465
+ ""
7466
+ ];
7467
+ for (let i = 0; i < result.messages.length; i++) {
7468
+ const m = result.messages[i];
7469
+ lines.push(`\u2014 ${i + 1}. ${m.role}`);
7470
+ lines.push(formatOnePromptMessage(m));
7471
+ lines.push("");
7472
+ }
7473
+ return lines.join("\n").trimEnd();
7474
+ }
7475
+ function formatOnePromptMessage(m) {
7476
+ const block = m.content;
7477
+ if (block.type === "text" && typeof block.text === "string") return block.text;
7478
+ if (block.type === "resource" && block.resource) {
7479
+ return `[resource: ${block.resource.uri}]
7480
+ ${formatOneResourceContent(block.resource)}`;
7481
+ }
7482
+ return `[non-text content: ${block.type ?? "unknown"}]`;
7483
+ }
7484
+ async function handleMcpBrowseSlash(kind, arg, servers, setHistorical) {
7485
+ const ts = Date.now();
7486
+ const push = (role, text) => {
7487
+ setHistorical((prev) => [...prev, { id: `mcp-${role}-${ts}-${prev.length}`, role, text }]);
7488
+ };
7489
+ if (!arg) {
7490
+ push("info", kind === "resource" ? formatResourceList(servers) : formatPromptList(servers));
7491
+ return;
7492
+ }
7493
+ if (kind === "resource") {
7494
+ const server2 = findServerForResource(servers, arg);
7495
+ if (!server2) {
7496
+ push(
7497
+ "warning",
7498
+ `no server exposes resource "${arg}". \`/resource\` with no arg lists what's available.`
7499
+ );
7500
+ return;
7501
+ }
7502
+ const client2 = server2.client;
7503
+ if (!client2) {
7504
+ push(
7505
+ "warning",
7506
+ `server [${server2.label}] is not connected (display-only). Resource read requires a live MCP client.`
7507
+ );
7508
+ return;
7509
+ }
7510
+ try {
7511
+ const result = await client2.readResource(arg);
7512
+ push("info", formatResourceContents(arg, result));
7513
+ } catch (err) {
7514
+ push("warning", `readResource failed: ${err.message}`);
7515
+ }
7516
+ return;
7517
+ }
7518
+ const server = findServerForPrompt(servers, arg);
7519
+ if (!server) {
7520
+ push(
7521
+ "warning",
7522
+ `no server exposes prompt "${arg}". \`/prompt\` with no arg lists what's available.`
7523
+ );
7524
+ return;
7525
+ }
7526
+ const client = server.client;
7527
+ if (!client) {
7528
+ push(
7529
+ "warning",
7530
+ `server [${server.label}] is not connected (display-only). Prompt fetch requires a live MCP client.`
7531
+ );
7532
+ return;
7533
+ }
7534
+ try {
7535
+ const result = await client.getPrompt(arg);
7536
+ push("info", formatPromptMessages(arg, result));
7537
+ } catch (err) {
7538
+ push("warning", `getPrompt failed: ${err.message}`);
7539
+ }
7540
+ }
7541
+
7149
7542
  // src/cli/ui/paste-collapse.ts
7150
7543
  var DEFAULT_PASTE_LINE_THRESHOLD = 40;
7151
7544
  var DEFAULT_PASTE_CHAR_THRESHOLD = 2e3;
@@ -7182,7 +7575,7 @@ function formatBytes(n) {
7182
7575
  import { spawnSync } from "child_process";
7183
7576
 
7184
7577
  // src/cli/commands/stats.ts
7185
- import { existsSync as existsSync9, readFileSync as readFileSync12 } from "fs";
7578
+ import { existsSync as existsSync10, readFileSync as readFileSync13 } from "fs";
7186
7579
  function statsCommand(opts) {
7187
7580
  if (opts.transcript) {
7188
7581
  transcriptSummary(opts.transcript);
@@ -7191,11 +7584,11 @@ function statsCommand(opts) {
7191
7584
  dashboard(opts);
7192
7585
  }
7193
7586
  function transcriptSummary(path) {
7194
- if (!existsSync9(path)) {
7587
+ if (!existsSync10(path)) {
7195
7588
  console.error(`no such transcript: ${path}`);
7196
7589
  process.exit(1);
7197
7590
  }
7198
- const lines = readFileSync12(path, "utf8").split(/\r?\n/).filter(Boolean);
7591
+ const lines = readFileSync13(path, "utf8").split(/\r?\n/).filter(Boolean);
7199
7592
  let assistantTurns = 0;
7200
7593
  let toolCalls = 0;
7201
7594
  let lastTurn = 0;
@@ -7254,6 +7647,28 @@ function renderDashboard(agg, logPath) {
7254
7647
  if (agg.firstSeen) {
7255
7648
  lines.push(`tracked since: ${new Date(agg.firstSeen).toISOString().slice(0, 10)}`);
7256
7649
  }
7650
+ if (agg.subagents) {
7651
+ lines.push("");
7652
+ lines.push(renderSubagentSection(agg.subagents));
7653
+ }
7654
+ return lines.join("\n");
7655
+ }
7656
+ function renderSubagentSection(sub) {
7657
+ const lines = [];
7658
+ const seconds = (sub.totalDurationMs / 1e3).toFixed(1);
7659
+ lines.push(
7660
+ `subagent activity: ${sub.total} run(s) \xB7 $${sub.costUsd.toFixed(6)} \xB7 ${seconds}s total`
7661
+ );
7662
+ const top = sub.bySkill.slice(0, 5);
7663
+ for (const s of top) {
7664
+ const sec = (s.durationMs / 1e3).toFixed(1);
7665
+ lines.push(
7666
+ ` ${pad(s.skillName, 18)} ${pad(`${s.count}`, 4, "right")} $${s.costUsd.toFixed(6)} ${sec}s`
7667
+ );
7668
+ }
7669
+ if (sub.bySkill.length > top.length) {
7670
+ lines.push(` (+${sub.bySkill.length - top.length} more)`);
7671
+ }
7257
7672
  return lines.join("\n");
7258
7673
  }
7259
7674
  function header() {
@@ -7316,7 +7731,25 @@ var SLASH_COMMANDS = [
7316
7731
  summary: "run N parallel samples per turn (N>=2)",
7317
7732
  argCompleter: ["off", "2", "3", "4", "5"]
7318
7733
  },
7734
+ {
7735
+ cmd: "effort",
7736
+ argsHint: "<high|max>",
7737
+ summary: "reasoning_effort cap \u2014 max is default (agent-class), high is cheaper/faster",
7738
+ argCompleter: ["max", "high"]
7739
+ },
7319
7740
  { cmd: "mcp", summary: "list MCP servers + tools attached to this session" },
7741
+ {
7742
+ cmd: "resource",
7743
+ argsHint: "[uri]",
7744
+ summary: "browse + read MCP resources (no arg \u2192 list URIs; <uri> \u2192 fetch contents)",
7745
+ argCompleter: "mcp-resources"
7746
+ },
7747
+ {
7748
+ cmd: "prompt",
7749
+ argsHint: "[name]",
7750
+ summary: "browse + fetch MCP prompts (no arg \u2192 list names; <name> \u2192 render prompt)",
7751
+ argCompleter: "mcp-prompts"
7752
+ },
7320
7753
  { cmd: "tool", argsHint: "[N]", summary: "dump full output of the Nth tool call (1=latest)" },
7321
7754
  {
7322
7755
  cmd: "memory",
@@ -7461,6 +7894,11 @@ function handleSlash(cmd, args, loop, ctx = {}) {
7461
7894
  " Tab insert the highlighted item without submitting",
7462
7895
  " Enter insert and (slash) run it, (@) keep editing",
7463
7896
  "",
7897
+ "MCP exploration:",
7898
+ " /mcp servers + tool/resource/prompt counts",
7899
+ " /resource [uri] browse & read resources exposed by your MCP servers",
7900
+ " /prompt [name] browse & fetch prompts exposed by your MCP servers",
7901
+ "",
7464
7902
  "Useful slashes: /help \xB7 /context \xB7 /stats \xB7 /compact \xB7 /new \xB7 /exit"
7465
7903
  ].join("\n")
7466
7904
  };
@@ -7476,7 +7914,10 @@ function handleSlash(cmd, args, loop, ctx = {}) {
7476
7914
  " /model <id> deepseek-chat or deepseek-reasoner",
7477
7915
  " /harvest [on|off] Pillar 2: structured plan-state extraction",
7478
7916
  " /branch <N|off> run N parallel samples (N>=2), pick most confident",
7917
+ " /effort <high|max> reasoning_effort cap (max=agent default, high=cheaper)",
7479
7918
  " /mcp list MCP servers + tools attached to this session",
7919
+ " /resource [uri] browse + read MCP resources (no arg \u2192 list URIs; <uri> \u2192 fetch)",
7920
+ " /prompt [name] browse + fetch MCP prompts (no arg \u2192 list names; <name> \u2192 render)",
7480
7921
  " /setup (exit + reconfigure) \u2192 run `reasonix setup`",
7481
7922
  " /compact [tokens] shrink large tool results in history (default 4000 tokens/result)",
7482
7923
  " /think dump the most recent turn's full R1 reasoning (reasoner only)",
@@ -7529,6 +7970,8 @@ function handleSlash(cmd, args, loop, ctx = {}) {
7529
7970
  }
7530
7971
  if (servers.length > 0) {
7531
7972
  const lines2 = [];
7973
+ let anyResources = false;
7974
+ let anyPrompts = false;
7532
7975
  for (const s of servers) {
7533
7976
  const { report } = s;
7534
7977
  const serverName = report.serverInfo.name || "(unknown)";
@@ -7537,11 +7980,20 @@ function handleSlash(cmd, args, loop, ctx = {}) {
7537
7980
  lines2.push(` tools ${s.toolCount}`);
7538
7981
  appendSection(lines2, "resources", report.resources);
7539
7982
  appendSection(lines2, "prompts ", report.prompts);
7983
+ if (report.resources.supported && report.resources.items.length > 0) anyResources = true;
7984
+ if (report.prompts.supported && report.prompts.items.length > 0) anyPrompts = true;
7540
7985
  lines2.push("");
7541
7986
  }
7542
- lines2.push(
7543
- "Chat mode consumes tools today; resources+prompts are surfaced here for awareness."
7544
- );
7987
+ if (anyResources || anyPrompts) {
7988
+ const hints = [];
7989
+ if (anyResources) hints.push("`/resource` to browse+read");
7990
+ if (anyPrompts) hints.push("`/prompt` to browse+fetch");
7991
+ lines2.push(hints.join(" \xB7 "));
7992
+ } else {
7993
+ lines2.push(
7994
+ "Chat mode consumes tools today; resources+prompts are surfaced here for awareness."
7995
+ );
7996
+ }
7545
7997
  lines2.push(
7546
7998
  "Full catalog: `reasonix mcp list` \xB7 deeper diagnosis: `reasonix mcp inspect <spec>`."
7547
7999
  );
@@ -7832,7 +8284,7 @@ ${entry.text}`
7832
8284
  const planLine = ctx.planMode ? " plan ON \u2014 writes gated (submit_plan + approval)" : "";
7833
8285
  const lines = [
7834
8286
  ` model ${loop.model}`,
7835
- ` flags harvest=${loop.harvestEnabled ? "on" : "off"} \xB7 branch=${branchBudget > 1 ? branchBudget : "off"} \xB7 stream=${loop.stream ? "on" : "off"}`,
8287
+ ` flags harvest=${loop.harvestEnabled ? "on" : "off"} \xB7 branch=${branchBudget > 1 ? branchBudget : "off"} \xB7 stream=${loop.stream ? "on" : "off"} \xB7 effort=${loop.reasoningEffort}`,
7836
8288
  ctxLine,
7837
8289
  mcpLine,
7838
8290
  sessionLine
@@ -7921,6 +8373,19 @@ ${entry.text}`
7921
8373
  loop.configure({ branch: n });
7922
8374
  return { info: `branch \u2192 ${n} (harvest auto-enabled; streaming disabled)` };
7923
8375
  }
8376
+ case "effort": {
8377
+ const raw = (args[0] ?? "").toLowerCase();
8378
+ if (raw === "") {
8379
+ return {
8380
+ info: `reasoning_effort \u2192 ${loop.reasoningEffort} (use /effort high for cheaper/faster, /effort max for the agent-class default)`
8381
+ };
8382
+ }
8383
+ if (raw !== "high" && raw !== "max") {
8384
+ return { info: "usage: /effort <high|max>" };
8385
+ }
8386
+ loop.configure({ reasoningEffort: raw });
8387
+ return { info: `reasoning_effort \u2192 ${raw}` };
8388
+ }
7924
8389
  default:
7925
8390
  return { unknown: true, info: `unknown command: /${cmd} (try /help)` };
7926
8391
  }
@@ -8476,8 +8941,30 @@ function App({
8476
8941
  if (!partial) return all.slice(0, 40);
8477
8942
  return all.filter((m) => m.toLowerCase().includes(needle)).slice(0, 40);
8478
8943
  }
8944
+ if (completer === "mcp-resources") {
8945
+ const uris = [];
8946
+ const servers = mcpServers ?? [];
8947
+ for (const s of servers) {
8948
+ if (!s.report.resources.supported) continue;
8949
+ for (const r of s.report.resources.items) uris.push(r.uri);
8950
+ }
8951
+ if (partial && uris.some((u) => u.toLowerCase() === needle)) return null;
8952
+ if (!partial) return uris.slice(0, 40);
8953
+ return uris.filter((u) => u.toLowerCase().includes(needle)).slice(0, 40);
8954
+ }
8955
+ if (completer === "mcp-prompts") {
8956
+ const names = [];
8957
+ const servers = mcpServers ?? [];
8958
+ for (const s of servers) {
8959
+ if (!s.report.prompts.supported) continue;
8960
+ for (const p of s.report.prompts.items) names.push(p.name);
8961
+ }
8962
+ if (partial && names.some((n) => n.toLowerCase() === needle)) return null;
8963
+ if (!partial) return names.slice(0, 40);
8964
+ return names.filter((n) => n.toLowerCase().includes(needle)).slice(0, 40);
8965
+ }
8479
8966
  return null;
8480
- }, [slashArgContext, models]);
8967
+ }, [slashArgContext, models, mcpServers]);
8481
8968
  useEffect2(() => {
8482
8969
  setSlashArgSelected((prev) => {
8483
8970
  if (!slashArgMatches || slashArgMatches.length === 0) return 0;
@@ -8512,7 +8999,10 @@ function App({
8512
8999
  // Per-skill model override (frontmatter `model: ...`),
8513
9000
  // else falls through to spawnSubagent's default.
8514
9001
  model: skill.model,
8515
- sink: subagentSinkRef.current
9002
+ sink: subagentSinkRef.current,
9003
+ // Stamped onto every event so the TUI sink + usage log can
9004
+ // attribute the run to a skill without extra bookkeeping.
9005
+ skillName: skill.name
8516
9006
  });
8517
9007
  return formatSubagentResult(result);
8518
9008
  }
@@ -8606,7 +9096,8 @@ function App({
8606
9096
  }
8607
9097
  setSubagentActivity(null);
8608
9098
  const seconds = ((ev.elapsedMs ?? 0) / 1e3).toFixed(1);
8609
- const summary2 = ev.error ? `\u232C subagent "${ev.task}" failed after ${seconds}s \xB7 ${ev.iter ?? 0} tool call(s) \u2014 ${ev.error}` : `\u232C subagent "${ev.task}" done in ${seconds}s \xB7 ${ev.iter ?? 0} tool call(s) \xB7 ${ev.turns ?? 0} turn(s)`;
9099
+ const costTail = ev.costUsd !== void 0 && ev.costUsd > 0 ? ` \xB7 $${ev.costUsd.toFixed(4)}` : "";
9100
+ const summary2 = ev.error ? `\u232C subagent "${ev.task}" failed after ${seconds}s \xB7 ${ev.iter ?? 0} tool call(s) \u2014 ${ev.error}` : `\u232C subagent "${ev.task}" done in ${seconds}s \xB7 ${ev.iter ?? 0} tool call(s) \xB7 ${ev.turns ?? 0} turn(s)${costTail}`;
8610
9101
  setHistorical((prev) => [
8611
9102
  ...prev,
8612
9103
  {
@@ -8615,11 +9106,25 @@ function App({
8615
9106
  text: summary2
8616
9107
  }
8617
9108
  ]);
9109
+ if (!ev.error && ev.usage && ev.model) {
9110
+ appendUsage({
9111
+ session: session ?? null,
9112
+ model: ev.model,
9113
+ usage: ev.usage,
9114
+ kind: "subagent",
9115
+ subagent: {
9116
+ skillName: ev.skillName,
9117
+ taskPreview: ev.task.slice(0, 60),
9118
+ toolIters: ev.iter ?? 0,
9119
+ durationMs: ev.elapsedMs ?? 0
9120
+ }
9121
+ });
9122
+ }
8618
9123
  };
8619
9124
  return () => {
8620
9125
  subagentSinkRef.current.current = null;
8621
9126
  };
8622
- }, []);
9127
+ }, [session]);
8623
9128
  const sessionBannerShown = useRef2(false);
8624
9129
  useEffect2(() => {
8625
9130
  if (sessionBannerShown.current) return;
@@ -8652,7 +9157,21 @@ function App({
8652
9157
  }
8653
9158
  ]);
8654
9159
  }
8655
- }, [session, loop]);
9160
+ if (session && codeMode) {
9161
+ const restored = loadPendingEdits(session);
9162
+ if (restored && restored.length > 0) {
9163
+ pendingEdits.current = restored;
9164
+ setHistorical((prev) => [
9165
+ ...prev,
9166
+ {
9167
+ id: `sys-pending-${Date.now()}`,
9168
+ role: "info",
9169
+ text: `\u25B8 restored ${restored.length} pending edit block(s) from an interrupted prior run \u2014 /apply to commit or /discard to drop.`
9170
+ }
9171
+ ]);
9172
+ }
9173
+ }
9174
+ }, [session, loop, codeMode]);
8656
9175
  useInput4((_input, key) => {
8657
9176
  if (key.escape && busy) {
8658
9177
  if (abortedThisTurn.current) return;
@@ -8746,14 +9265,16 @@ function App({
8746
9265
  const anyApplied = results.some((r) => r.status === "applied" || r.status === "created");
8747
9266
  if (anyApplied) lastEditSnapshots.current = snaps;
8748
9267
  pendingEdits.current = [];
9268
+ clearPendingEdits(session ?? null);
8749
9269
  return formatEditResults(results);
8750
- }, [codeMode]);
9270
+ }, [codeMode, session]);
8751
9271
  const codeDiscard = useCallback(() => {
8752
9272
  const count = pendingEdits.current.length;
8753
9273
  if (count === 0) return "nothing pending to discard.";
8754
9274
  pendingEdits.current = [];
9275
+ clearPendingEdits(session ?? null);
8755
9276
  return `\u25B8 discarded ${count} pending edit block(s). Nothing was written to disk.`;
8756
- }, []);
9277
+ }, [session]);
8757
9278
  const prefixHash = loop.prefix.fingerprint;
8758
9279
  const writeTranscript = useCallback(
8759
9280
  (ev) => {
@@ -8833,7 +9354,7 @@ function App({
8833
9354
  ...prev,
8834
9355
  { id: `bang-o-${Date.now()}`, role: "info", text: formatted }
8835
9356
  ]);
8836
- loop.log.append({
9357
+ loop.appendAndPersist({
8837
9358
  role: "user",
8838
9359
  content: formatBangUserMessage(bangCmd, formatted)
8839
9360
  });
@@ -8851,6 +9372,18 @@ function App({
8851
9372
  }
8852
9373
  return;
8853
9374
  }
9375
+ const mcpBrowseMatch = /^\/(resource|prompt)(?:\s+([\s\S]*))?$/.exec(text);
9376
+ if (mcpBrowseMatch) {
9377
+ const kind = mcpBrowseMatch[1];
9378
+ const arg = mcpBrowseMatch[2]?.trim() ?? "";
9379
+ promptHistory.current.push(text);
9380
+ setHistorical((prev) => [
9381
+ ...prev,
9382
+ { id: `mcp-u-${Date.now()}`, role: "user", text, leadSeparator: prev.length > 0 }
9383
+ ]);
9384
+ await handleMcpBrowseSlash(kind, arg, mcpServers ?? [], setHistorical);
9385
+ return;
9386
+ }
8854
9387
  const slash = parseSlash(text);
8855
9388
  if (slash) {
8856
9389
  const result = handleSlash(slash.cmd, slash.args, loop, {
@@ -8899,10 +9432,18 @@ function App({
8899
9432
  text: result.info
8900
9433
  }
8901
9434
  ]);
9435
+ if (codeMode) {
9436
+ pendingEdits.current = [];
9437
+ clearPendingEdits(session ?? null);
9438
+ }
8902
9439
  return;
8903
9440
  }
8904
9441
  if (result.clear) {
8905
9442
  setHistorical([]);
9443
+ if (codeMode) {
9444
+ pendingEdits.current = [];
9445
+ clearPendingEdits(session ?? null);
9446
+ }
8906
9447
  return;
8907
9448
  }
8908
9449
  if (result.info) {
@@ -9017,7 +9558,9 @@ function App({
9017
9558
  if (ev.toolName) {
9018
9559
  toolCallBuildBuf.current = {
9019
9560
  name: ev.toolName,
9020
- chars: ev.toolCallArgsChars ?? 0
9561
+ chars: ev.toolCallArgsChars ?? 0,
9562
+ index: ev.toolCallIndex,
9563
+ readyCount: ev.toolCallReadyCount
9021
9564
  };
9022
9565
  }
9023
9566
  } else if (ev.role === "branch_start") {
@@ -9076,6 +9619,7 @@ function App({
9076
9619
  const blocks = parseEditBlocks(finalText);
9077
9620
  if (blocks.length > 0) {
9078
9621
  pendingEdits.current = blocks;
9622
+ savePendingEdits(session ?? null, blocks);
9079
9623
  setHistorical((prev) => [
9080
9624
  ...prev,
9081
9625
  {
@@ -9313,6 +9857,14 @@ ${body}`;
9313
9857
  },
9314
9858
  [pendingPlan, togglePlanMode, busy, loop, handleSubmit]
9315
9859
  );
9860
+ const handlePlanConfirmRef = useRef2(handlePlanConfirm);
9861
+ useEffect2(() => {
9862
+ handlePlanConfirmRef.current = handlePlanConfirm;
9863
+ }, [handlePlanConfirm]);
9864
+ const stableHandlePlanConfirm = useCallback(
9865
+ async (choice) => handlePlanConfirmRef.current(choice),
9866
+ []
9867
+ );
9316
9868
  const handleStagedInputSubmit = useCallback(
9317
9869
  async (feedback) => {
9318
9870
  const staged = stagedInput;
@@ -9366,7 +9918,7 @@ Stay in plan mode \u2014 address the feedback (explore more if needed), then sub
9366
9918
  if (stagedInput?.plan) setPendingPlan(stagedInput.plan);
9367
9919
  setStagedInput(null);
9368
9920
  }, [stagedInput]);
9369
- return /* @__PURE__ */ React14.createElement(TickerProvider, { disabled: PLAIN_UI }, /* @__PURE__ */ React14.createElement(Box13, { flexDirection: "column" }, /* @__PURE__ */ React14.createElement(
9921
+ return /* @__PURE__ */ React14.createElement(TickerProvider, { disabled: PLAIN_UI || !!pendingPlan || !!pendingShell }, /* @__PURE__ */ React14.createElement(Box13, { flexDirection: "column" }, /* @__PURE__ */ React14.createElement(
9370
9922
  StatsPanel,
9371
9923
  {
9372
9924
  summary,
@@ -9374,6 +9926,7 @@ Stay in plan mode \u2014 address the feedback (explore more if needed), then sub
9374
9926
  prefixHash,
9375
9927
  harvestOn: loop.harvestEnabled,
9376
9928
  branchBudget: loop.branchOptions.budget,
9929
+ reasoningEffort: loop.reasoningEffort,
9377
9930
  planMode,
9378
9931
  balance,
9379
9932
  busy,
@@ -9386,7 +9939,14 @@ Stay in plan mode \u2014 address the feedback (explore more if needed), then sub
9386
9939
  onSubmit: handleStagedInputSubmit,
9387
9940
  onCancel: handleStagedInputCancel
9388
9941
  }
9389
- ) : pendingPlan ? /* @__PURE__ */ React14.createElement(PlanConfirm, { plan: pendingPlan, onChoose: handlePlanConfirm, projectRoot: hookCwd }) : pendingShell ? /* @__PURE__ */ React14.createElement(
9942
+ ) : pendingPlan ? /* @__PURE__ */ React14.createElement(
9943
+ PlanConfirm,
9944
+ {
9945
+ plan: pendingPlan,
9946
+ onChoose: stableHandlePlanConfirm,
9947
+ projectRoot: hookCwd
9948
+ }
9949
+ ) : pendingShell ? /* @__PURE__ */ React14.createElement(
9390
9950
  ShellConfirm,
9391
9951
  {
9392
9952
  command: pendingShell,
@@ -9717,7 +10277,8 @@ async function chatCommand(opts) {
9717
10277
  label,
9718
10278
  spec: raw,
9719
10279
  toolCount: bridge.registeredNames.length,
9720
- report
10280
+ report,
10281
+ client: mcp2
9721
10282
  });
9722
10283
  } catch (err) {
9723
10284
  const reason = err.message;
@@ -9747,7 +10308,7 @@ async function chatCommand(opts) {
9747
10308
  const prior = loadSessionMessages(opts.session);
9748
10309
  if (prior.length > 0) {
9749
10310
  const p = sessionPath(opts.session);
9750
- const mtime = existsSync10(p) ? statSync6(p).mtime : /* @__PURE__ */ new Date();
10311
+ const mtime = existsSync11(p) ? statSync6(p).mtime : /* @__PURE__ */ new Date();
9751
10312
  sessionPreview = { messageCount: prior.length, lastActive: mtime };
9752
10313
  }
9753
10314
  } else if (opts.session && opts.forceNew) {
@@ -9801,7 +10362,7 @@ async function codeCommand(opts = {}) {
9801
10362
  `
9802
10363
  );
9803
10364
  await chatCommand({
9804
- model: opts.model ?? "deepseek-reasoner",
10365
+ model: opts.model ?? "deepseek-v4-pro",
9805
10366
  harvest: opts.harvest ?? false,
9806
10367
  system: codeSystemPrompt2(rootDir),
9807
10368
  transcript: opts.transcript,
@@ -9814,7 +10375,7 @@ async function codeCommand(opts = {}) {
9814
10375
  }
9815
10376
 
9816
10377
  // src/cli/commands/diff.ts
9817
- import { writeFileSync as writeFileSync5 } from "fs";
10378
+ import { writeFileSync as writeFileSync6 } from "fs";
9818
10379
  import { basename as basename2 } from "path";
9819
10380
  import { render as render2 } from "ink";
9820
10381
  import React20 from "react";
@@ -9960,7 +10521,7 @@ async function diffCommand(opts) {
9960
10521
  if (wantMarkdown) {
9961
10522
  console.log(renderSummaryTable(report));
9962
10523
  const md = renderMarkdown(report);
9963
- writeFileSync5(opts.mdPath, md, "utf8");
10524
+ writeFileSync6(opts.mdPath, md, "utf8");
9964
10525
  console.log(`
9965
10526
  markdown report written to ${opts.mdPath}`);
9966
10527
  return;
@@ -10483,21 +11044,26 @@ import React23, { useState as useState10 } from "react";
10483
11044
 
10484
11045
  // src/cli/ui/presets.ts
10485
11046
  var PRESETS = {
11047
+ // `deepseek-chat` / `deepseek-reasoner` are retained as the fast /
11048
+ // smart models because they're deprecated-but-working compat aliases
11049
+ // for v4-flash's non-thinking and thinking modes respectively. Same
11050
+ // billing, smaller config churn for existing users. `max` promotes
11051
+ // to v4-pro — 12× flash on input/output, reserved for hard tasks.
10486
11052
  fast: { model: "deepseek-chat", harvest: false, branch: 1 },
10487
11053
  smart: { model: "deepseek-reasoner", harvest: true, branch: 1 },
10488
- max: { model: "deepseek-reasoner", harvest: true, branch: 3 }
11054
+ max: { model: "deepseek-v4-pro", harvest: true, branch: 3 }
10489
11055
  };
10490
11056
  var PRESET_DESCRIPTIONS = {
10491
11057
  fast: {
10492
- headline: "deepseek-chat, no reasoning harvest, no branching",
11058
+ headline: "deepseek-chat (= v4-flash non-thinking), no harvest, no branching",
10493
11059
  cost: "~1\xA2 per 100 turns \xB7 default"
10494
11060
  },
10495
11061
  smart: {
10496
- headline: "deepseek-reasoner + Pillar 2 harvest",
10497
- cost: "~10\xD7 cost vs fast \xB7 slower \xB7 better on multi-step tasks"
11062
+ headline: "deepseek-reasoner (= v4-flash thinking) + Pillar 2 harvest",
11063
+ cost: "same price as fast \xB7 slower \xB7 better on multi-step tasks"
10498
11064
  },
10499
11065
  max: {
10500
- headline: "reasoner + harvest + self-consistency (3 branches)",
11066
+ headline: "deepseek-v4-pro + harvest + self-consistency (3 branches)",
10501
11067
  cost: "~30\xD7 cost vs fast \xB7 slowest \xB7 for hard single-shots"
10502
11068
  }
10503
11069
  };