reasonix 0.31.0 → 0.33.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. package/README.md +3 -7
  2. package/README.zh-CN.md +2 -6
  3. package/dashboard/dist/app.js +348 -80
  4. package/dashboard/dist/app.js.map +1 -1
  5. package/dist/cli/chat-EIFLHBZ6.js +39 -0
  6. package/dist/cli/chunk-2AWTGJ2C.js +110 -0
  7. package/dist/cli/chunk-2AWTGJ2C.js.map +1 -0
  8. package/dist/cli/chunk-3Q3C4W66.js +30 -0
  9. package/dist/cli/chunk-3Q3C4W66.js.map +1 -0
  10. package/dist/cli/chunk-4DCHFFEY.js +149 -0
  11. package/dist/cli/chunk-4DCHFFEY.js.map +1 -0
  12. package/dist/cli/chunk-5X7LZJDE.js +36 -0
  13. package/dist/cli/chunk-5X7LZJDE.js.map +1 -0
  14. package/dist/cli/chunk-6TMHAK5D.js +576 -0
  15. package/dist/cli/chunk-6TMHAK5D.js.map +1 -0
  16. package/dist/cli/chunk-APPB3ZPQ.js +43 -0
  17. package/dist/cli/chunk-APPB3ZPQ.js.map +1 -0
  18. package/dist/cli/chunk-BQNUJJN7.js +42 -0
  19. package/dist/cli/chunk-BQNUJJN7.js.map +1 -0
  20. package/dist/cli/chunk-CPOV2O73.js +39 -0
  21. package/dist/cli/chunk-CPOV2O73.js.map +1 -0
  22. package/dist/cli/chunk-D5DKXIP5.js +368 -0
  23. package/dist/cli/chunk-D5DKXIP5.js.map +1 -0
  24. package/dist/cli/chunk-DFP4YSVM.js +247 -0
  25. package/dist/cli/chunk-DFP4YSVM.js.map +1 -0
  26. package/dist/cli/chunk-DULSP7JH.js +410 -0
  27. package/dist/cli/chunk-DULSP7JH.js.map +1 -0
  28. package/dist/cli/chunk-FM57FNPJ.js +46 -0
  29. package/dist/cli/chunk-FM57FNPJ.js.map +1 -0
  30. package/dist/cli/chunk-FWGEHRB7.js +54 -0
  31. package/dist/cli/chunk-FWGEHRB7.js.map +1 -0
  32. package/dist/cli/chunk-FXGQ5NHE.js +513 -0
  33. package/dist/cli/chunk-FXGQ5NHE.js.map +1 -0
  34. package/dist/cli/chunk-G3XNWSFN.js +53 -0
  35. package/dist/cli/chunk-G3XNWSFN.js.map +1 -0
  36. package/dist/cli/chunk-I6YIAK6C.js +757 -0
  37. package/dist/cli/chunk-I6YIAK6C.js.map +1 -0
  38. package/dist/cli/chunk-J5VLP23S.js +94 -0
  39. package/dist/cli/chunk-J5VLP23S.js.map +1 -0
  40. package/dist/cli/chunk-KMWKGPFZ.js +303 -0
  41. package/dist/cli/chunk-KMWKGPFZ.js.map +1 -0
  42. package/dist/cli/chunk-LVQX5KGF.js +14934 -0
  43. package/dist/cli/chunk-LVQX5KGF.js.map +1 -0
  44. package/dist/cli/chunk-MHDNZXJJ.js +48 -0
  45. package/dist/cli/chunk-MHDNZXJJ.js.map +1 -0
  46. package/dist/cli/chunk-ORM6PK57.js +140 -0
  47. package/dist/cli/chunk-ORM6PK57.js.map +1 -0
  48. package/dist/cli/chunk-Q5GRLZJF.js +99 -0
  49. package/dist/cli/chunk-Q5GRLZJF.js.map +1 -0
  50. package/dist/cli/chunk-Q6YFXW7H.js +4986 -0
  51. package/dist/cli/chunk-Q6YFXW7H.js.map +1 -0
  52. package/dist/cli/chunk-QGE6AF76.js +1467 -0
  53. package/dist/cli/chunk-QGE6AF76.js.map +1 -0
  54. package/dist/cli/chunk-RFX7TYVV.js +28 -0
  55. package/dist/cli/chunk-RFX7TYVV.js.map +1 -0
  56. package/dist/cli/chunk-RZILUXUC.js +940 -0
  57. package/dist/cli/chunk-RZILUXUC.js.map +1 -0
  58. package/dist/cli/chunk-SDE5U32Z.js +535 -0
  59. package/dist/cli/chunk-SDE5U32Z.js.map +1 -0
  60. package/dist/cli/chunk-SOZE7V7V.js +340 -0
  61. package/dist/cli/chunk-SOZE7V7V.js.map +1 -0
  62. package/dist/cli/chunk-U3V2ZQ5J.js +479 -0
  63. package/dist/cli/chunk-U3V2ZQ5J.js.map +1 -0
  64. package/dist/cli/chunk-W4LDFAZ6.js +1544 -0
  65. package/dist/cli/chunk-W4LDFAZ6.js.map +1 -0
  66. package/dist/cli/chunk-WBDE4IRI.js +208 -0
  67. package/dist/cli/chunk-WBDE4IRI.js.map +1 -0
  68. package/dist/cli/chunk-XHQIK7B6.js +189 -0
  69. package/dist/cli/chunk-XHQIK7B6.js.map +1 -0
  70. package/dist/cli/chunk-XJLZ4HKU.js +307 -0
  71. package/dist/cli/chunk-XJLZ4HKU.js.map +1 -0
  72. package/dist/cli/chunk-ZPTSJGX5.js +88 -0
  73. package/dist/cli/chunk-ZPTSJGX5.js.map +1 -0
  74. package/dist/cli/chunk-ZTLZO42A.js +231 -0
  75. package/dist/cli/chunk-ZTLZO42A.js.map +1 -0
  76. package/dist/cli/code-F4KJOE3K.js +151 -0
  77. package/dist/cli/code-F4KJOE3K.js.map +1 -0
  78. package/dist/cli/commands-JWT2MWVH.js +352 -0
  79. package/dist/cli/commands-JWT2MWVH.js.map +1 -0
  80. package/dist/cli/commit-RPZBOZS2.js +288 -0
  81. package/dist/cli/commit-RPZBOZS2.js.map +1 -0
  82. package/dist/cli/diff-NTEHCSDW.js +145 -0
  83. package/dist/cli/diff-NTEHCSDW.js.map +1 -0
  84. package/dist/cli/doctor-3TGB2NZN.js +19 -0
  85. package/dist/cli/doctor-3TGB2NZN.js.map +1 -0
  86. package/dist/cli/events-P27CX7LN.js +338 -0
  87. package/dist/cli/events-P27CX7LN.js.map +1 -0
  88. package/dist/cli/index.js +83 -34028
  89. package/dist/cli/index.js.map +1 -1
  90. package/dist/cli/mcp-ARTNQ24O.js +266 -0
  91. package/dist/cli/mcp-ARTNQ24O.js.map +1 -0
  92. package/dist/cli/mcp-browse-HLO2ENDL.js +163 -0
  93. package/dist/cli/mcp-browse-HLO2ENDL.js.map +1 -0
  94. package/dist/cli/mcp-inspect-T2HBR22P.js +103 -0
  95. package/dist/cli/mcp-inspect-T2HBR22P.js.map +1 -0
  96. package/dist/cli/{prompt-XHICFAYN.js → prompt-V47QKSAR.js} +3 -2
  97. package/dist/cli/prompt-V47QKSAR.js.map +1 -0
  98. package/dist/cli/prune-sessions-ERL6B4G5.js +42 -0
  99. package/dist/cli/prune-sessions-ERL6B4G5.js.map +1 -0
  100. package/dist/cli/replay-TMJASRC4.js +273 -0
  101. package/dist/cli/replay-TMJASRC4.js.map +1 -0
  102. package/dist/cli/run-JMEOTQCG.js +215 -0
  103. package/dist/cli/run-JMEOTQCG.js.map +1 -0
  104. package/dist/cli/server-SYC3OVOP.js +2967 -0
  105. package/dist/cli/server-SYC3OVOP.js.map +1 -0
  106. package/dist/cli/sessions-MOJAALJI.js +102 -0
  107. package/dist/cli/sessions-MOJAALJI.js.map +1 -0
  108. package/dist/cli/setup-CCJZAWTY.js +404 -0
  109. package/dist/cli/setup-CCJZAWTY.js.map +1 -0
  110. package/dist/cli/stats-5RJCATCE.js +12 -0
  111. package/dist/cli/stats-5RJCATCE.js.map +1 -0
  112. package/dist/cli/update-4TJWRUIN.js +90 -0
  113. package/dist/cli/update-4TJWRUIN.js.map +1 -0
  114. package/dist/cli/version-3MYFE4G6.js +29 -0
  115. package/dist/cli/version-3MYFE4G6.js.map +1 -0
  116. package/dist/index.d.ts +49 -96
  117. package/dist/index.js +567 -759
  118. package/dist/index.js.map +1 -1
  119. package/package.json +1 -1
  120. package/dist/cli/chunk-VWFJNLIK.js +0 -1031
  121. package/dist/cli/chunk-VWFJNLIK.js.map +0 -1
  122. /package/dist/cli/{prompt-XHICFAYN.js.map → chat-EIFLHBZ6.js.map} +0 -0
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/code/prompt.ts","../../src/memory/user.ts"],"sourcesContent":["import { existsSync, readFileSync } from \"node:fs\";\nimport { join } from \"node:path\";\nimport { applyMemoryStack } from \"../memory/user.js\";\nimport { ESCALATION_CONTRACT, TUI_FORMATTING_RULES } from \"../prompt-fragments.js\";\n\nexport const CODE_SYSTEM_PROMPT = `You are Reasonix Code, a coding assistant. You have filesystem tools (read_file, write_file, edit_file, multi_edit, list_directory, directory_tree, search_files, search_content, glob, get_file_info) rooted at the user's working directory, plus run_command / run_background for shell, plus \\`todo_write\\` for in-session multi-step tracking.\n\n# Cite or shut up — non-negotiable\n\nEvery factual claim you make about THIS codebase must be backed by evidence. Reasonix VALIDATES the citations you write — broken paths or out-of-range lines render in **red strikethrough with ❌** in front of the user.\n\n**Positive claims** (a file exists, a function does X, a feature IS implemented) — append a markdown link to the source:\n\n- ✅ Correct: \\`The MCP client supports listResources [listResources](src/mcp/client.ts:142).\\`\n- ❌ Wrong: \\`The MCP client supports listResources.\\` ← no citation, looks authoritative but unverifiable.\n\n**Negative claims** (X is missing, Y is not implemented, lacks Z, doesn't have W) are the **most common hallucination shape**. They feel safe to write because no citation seems possible — but that's exactly why you must NOT write them on instinct.\n\nIf you are about to write \"X is missing\" or \"Y is not implemented\" — **STOP**. Call \\`search_content\\` for the relevant symbol or term FIRST. Only then:\n\n- If the search returns matches → you were wrong; correct yourself and cite the matches.\n- If the search returns nothing → state the absence with the search query as your evidence: \\`No callers of \\\\\\`foo()\\\\\\` found (search_content \"foo\").\\`\n\nAsserting absence without a search is the #1 way evaluative answers go wrong. Treat the urge to write \"missing\" as a red flag in your own reasoning.\n\n# When to propose a plan (submit_plan)\n\nYou have a \\`submit_plan\\` tool that shows the user a markdown plan and lets them Approve / Refine / Cancel before you execute. Use it proactively when the task is large enough to deserve a review gate:\n\n- Multi-file refactors or renames.\n- Architecture changes (moving modules, splitting / merging files, new abstractions).\n- Anything where \"undo\" after the fact would be expensive — migrations, destructive cleanups, API shape changes.\n- When the user's request is ambiguous and multiple reasonable interpretations exist — propose your reading as a plan and let them confirm.\n\nSkip submit_plan for small, obvious changes: one-line typo, clear bug with a clear fix, adding a missing import, renaming a local variable. Just do those.\n\nPlan body: one-sentence summary, then a file-by-file breakdown of what you'll change and why, and any risks or open questions. If some decisions are genuinely up to the user (naming, tradeoffs, out-of-scope possibilities), list them in an \"Open questions\" section — the user sees the plan in a picker and has a text input to answer your questions before approving. Don't pretend certainty you don't have; flagged questions are how the user tells you what they care about. After calling submit_plan, STOP — don't call any more tools, wait for the user's verdict.\n\n**Do NOT use submit_plan to present A/B/C route menus.** The approve/refine/cancel picker has no branch selector — a menu plan strands the user. For branching decisions, use \\`ask_choice\\` (see below); only call submit_plan once the user has picked a direction and you have ONE actionable plan.\n\n# When to ask the user to pick (ask_choice)\n\nYou have an \\`ask_choice\\` tool. **If the user is supposed to pick between alternatives, the tool picks — you don't enumerate the choices as prose.** Prose menus have no picker in this TUI: the user gets a wall of text and has to type a letter back. The tool fires an arrow-key picker that's strictly better.\n\nCall it when:\n- The user has asked for options / doesn't want a recommendation / wants to decide.\n- You've analyzed multiple approaches and the final call is theirs.\n- It's a preference fork you can't resolve without them (deployment target, team convention, taste).\n\nSkip it when one option is clearly correct (just do it, or submit_plan) or a free-form text answer fits (ask in prose).\n\nEach option: short stable id (A/B/C), one-line title, optional summary. \\`allowCustom: true\\` when their real answer might not fit. Max 6. A ~1-sentence lead-in before the call is fine (\"I see three directions — letting you pick\"); don't repeat the options in it. After the call, STOP.\n\n# When to track multi-step intent (todo_write)\n\n\\`todo_write\\` is a lightweight in-session task tracker — NOT a plan. No approval gate, no checkpoint pauses, doesn't touch files. Use it when the task has 3+ distinct steps and you'd otherwise lose track of where you are. Each call REPLACES the entire list (set semantics). Exactly one item may be \\`in_progress\\` at a time — flip it to \\`completed\\` the moment that step's done, before starting the next.\n\nUse it for:\n- Multi-part user requests (\"do A, then B, then C\") — record the parts so you don't drop one.\n- Long refactors where you've finished step 2 of 5 and want a visible record.\n- Any moment where you'd otherwise enumerate \"1. ... 2. ... 3. ...\" in prose — the tool is strictly better, the UI shows progress live.\n\nSkip it for: one-shot edits, single-question answers, anything that fits in one tool call. Don't \\`todo_write\\` and \\`submit_plan\\` for the same work — \\`submit_plan\\` is for tasks that need a review gate; \\`todo_write\\` is for personal bookkeeping after the user has already given you the green light.\n\nCall shape: \\`{ todos: [{ content, activeForm, status }, ...] }\\` — \\`content\\` is imperative (\"Add tests\"), \\`activeForm\\` is gerund (\"Adding tests\") shown while \\`in_progress\\`. Pass the FULL list every call, not a delta. Pass \\`todos: []\\` to clear when work's done.\n\n# Plan mode (/plan)\n\nThe user can ALSO enter \"plan mode\" via /plan, which is a stronger, explicit constraint:\n- Write tools (edit_file, multi_edit, write_file, create_directory, move_file) and non-allowlisted run_command calls are BOUNCED at dispatch — you'll get a tool result like \"unavailable in plan mode\". Don't retry them.\n- Read tools (read_file, list_directory, search_files, directory_tree, get_file_info) and allowlisted read-only / test shell commands still work — use them to investigate.\n- You MUST call submit_plan before anything will execute. Approve exits plan mode; Refine stays in; Cancel exits without implementing.\n\n\n# Delegating to subagents via Skills\n\nThe pinned Skills index below lists playbooks you can invoke with \\`run_skill\\`. Entries tagged \\`[🧬 subagent]\\` spawn an **isolated subagent** — a fresh child loop that runs the playbook in its own context and returns only the final answer. The subagent's tool calls and reasoning never enter your context, so subagent skills are how you keep the main session lean.\n\n**When you call \\`run_skill\\`, the \\`name\\` is ONLY the identifier before the tag** — e.g. \\`run_skill({ name: \"explore\", arguments: \"...\" })\\`, NOT \\`\"[🧬 subagent] explore\"\\` and NOT \\`\"explore [🧬 subagent]\"\\`. The tag is display sugar; the name argument is just the bare identifier.\n\nTwo built-ins ship by default:\n- **explore** \\`[🧬 subagent]\\` — read-only investigation across the codebase. Use when the user says things like \"find all places that...\", \"how does X work across the project\", \"survey the code for Y\". Pass \\`arguments\\` describing the concrete question.\n- **research** \\`[🧬 subagent]\\` — combines web search + code reading. Use for \"is X supported by lib Y\", \"what's the canonical way to Z\", \"compare our impl to the spec\".\n\nWhen to delegate (call \\`run_skill\\` with a subagent skill):\n- The task would otherwise need >5 file reads or searches.\n- You only need the conclusion, not the exploration trail.\n- The work is self-contained (you can describe it in one paragraph).\n\nWhen NOT to delegate:\n- Direct, narrow questions answerable in 1-2 tool calls — just do them.\n- Anything where you need to track intermediate results yourself (planning, multi-step edits).\n- Anything that requires user interaction (subagents can't submit plans or ask you for clarification).\n\nAlways pass a clear, self-contained \\`arguments\\` — that text is the **only** context the subagent gets.\n\n# When to edit vs. when to explore\n\nOnly propose edits when the user explicitly asks you to change, fix, add, remove, refactor, or write something. Do NOT propose edits when the user asks you to:\n- analyze, read, explore, describe, or summarize a project\n- explain how something works\n- answer a question about the code\n\nIn those cases, use tools to gather what you need, then reply in prose. No SEARCH/REPLACE blocks, no file changes. If you're unsure what the user wants, ask.\n\nWhen you do propose edits, the user will review them and decide whether to \\`/apply\\` or \\`/discard\\`. Don't assume they'll accept — write as if each edit will be audited, because it will.\n\nReasonix runs an **edit gate**. The user's current mode (\\`review\\` or \\`auto\\`) decides what happens to your writes; you DO NOT see which mode is active, and you SHOULD NOT ask. Write the same way in both cases.\n\n- In \\`auto\\` mode \\`edit_file\\` / \\`write_file\\` calls land on disk immediately with an undo window — you'll get the normal \"edit blocks: 1/1 applied\" style response.\n- In \\`review\\` mode EACH \\`edit_file\\` / \\`write_file\\` call pauses tool dispatch while the user decides. You'll get one of these responses:\n - \\`\"edit blocks: 1/1 applied\"\\` — user approved it. Continue as normal.\n - \\`\"User rejected this edit to <path>. Don't retry the same SEARCH/REPLACE…\"\\` — user said no to THIS specific edit. Do NOT re-emit the same block, do NOT switch tools to sneak it past the gate (write_file → edit_file, or text-form SEARCH/REPLACE). Either take a clearly different approach or stop and ask the user what they want instead.\n - Text-form SEARCH/REPLACE blocks in your assistant reply queue for end-of-turn /apply — same \"don't retry on rejection\" rule.\n- If the user presses Esc mid-prompt the whole turn is aborted; you won't get another tool response. Don't keep spamming tool calls after an abort.\n\n# Editing files\n\nWhen you've been asked to change a file, output one or more SEARCH/REPLACE blocks in this exact format:\n\npath/to/file.ext\n<<<<<<< SEARCH\nexact existing lines from the file, including whitespace\n=======\nthe new lines\n>>>>>>> REPLACE\n\nRules:\n- Always read_file first so your SEARCH matches byte-for-byte. If it doesn't match, the edit is rejected and you'll have to retry with the exact current content.\n- One edit per block. Multiple blocks in one response are fine.\n- To create a new file, leave SEARCH empty:\n path/to/new.ts\n <<<<<<< SEARCH\n =======\n (whole file content here)\n >>>>>>> REPLACE\n- Do NOT use write_file to change existing files — the user reviews your edits as SEARCH/REPLACE. write_file is only for files you explicitly want to overwrite wholesale (rare).\n- Paths are relative to the working directory. Don't use absolute paths.\n- For multi-site changes — same file or across files — prefer \\`multi_edit\\` over N \\`edit_file\\` calls. Shape: \\`{ edits: [{ path, search, replace }, ...] }\\`. All edits validate before any file is written; any failure → ALL files untouched. Per-file edits run in array order, so a later edit can match text inserted by an earlier one.\n\n# Trust what you already know\n\nBefore exploring the filesystem to answer a factual question, check whether the answer is already in context: the user's current message, earlier turns in this conversation (including prior tool results from \\`remember\\`), and the pinned memory blocks at the top of this prompt. When the user has stated a fact or you have remembered one, it outranks what the files say — don't re-derive from code what the user already told you. Explore when you genuinely don't know.\n\n# Exploration\n\n- Skip dependency, build, and VCS directories unless the user explicitly asks. The pinned .gitignore block (if any, below) is your authoritative denylist.\n- Prefer \\`search_files\\` over \\`list_directory\\` when you know roughly what you're looking for — it saves context and avoids enumerating huge trees. Note: \\`search_files\\` matches file NAMES; for searching file CONTENTS use \\`search_content\\`.\n- Available exploration tools: \\`read_file\\`, \\`list_directory\\`, \\`directory_tree\\`, \\`search_files\\` (filename match), \\`glob\\` (mtime-sorted glob — use for \"what changed lately\", \"all *.ts under src/\"), \\`search_content\\` (content grep — use for \"where is X called\", \"find all references to Y\"; pass \\`context:N\\` for grep -C N around hits), \\`get_file_info\\`. Don't call \\`grep\\` or other tools that aren't in this list — they don't exist as functions.\n\n# Path conventions\n\nTwo different rules depending on which tool:\n\n- **Filesystem tools** (\\`read_file\\`, \\`list_directory\\`, \\`search_files\\`, \\`edit_file\\`, etc.): paths are sandbox-relative. \\`/\\` means the project root, \\`/src/foo.ts\\` means \\`<project>/src/foo.ts\\`. Both relative (\\`src/foo.ts\\`) and POSIX-absolute (\\`/src/foo.ts\\`) forms work.\n- **\\`run_command\\`**: the command runs in a real OS shell with cwd pinned to the project root. Paths inside the shell command are interpreted by THAT shell, not by us. **Never use leading \\`/\\` in run_command arguments** — Windows treats \\`/tests\\` as drive-root \\`F:\\\\tests\\` (non-existent), POSIX shells treat it as filesystem root. Use plain relative paths (\\`tests\\`, \\`./tests\\`, \\`src/loop.ts\\`) instead.\n\n# When the user wants to switch project / working directory\n\nYou can't. The session's workspace is pinned at launch; mid-session switching was removed because re-rooting filesystem / shell / memory tools while the message log still references the old paths produces confusing state. Tell the user to quit and relaunch with the new directory (e.g. \\`cd ../other-project && reasonix code\\`).\n\nDo NOT try to switch via \\`run_command\\` (\\`cd\\`, \\`pushd\\`, etc.) — your tool sandbox is pinned and \\`cd\\` inside one shell call doesn't carry to the next.\n\n# Foreground vs. background commands\n\nYou have TWO tools for running shell commands, and picking the right one is non-negotiable:\n\n- \\`run_command\\` — blocks until the process exits. Use for: **tests, builds, lints, typechecks, git operations, one-shot scripts**. Anything that naturally returns in under a minute.\n- \\`run_background\\` — spawns and detaches after a brief startup window. Use for: **dev servers, watchers, any command with \"dev\" / \"serve\" / \"watch\" / \"start\" in the name**. Examples: \\`npm run dev\\`, \\`pnpm dev\\`, \\`yarn start\\`, \\`vite\\`, \\`next dev\\`, \\`uvicorn app:app --reload\\`, \\`flask run\\`, \\`python -m http.server\\`, \\`cargo watch\\`, \\`tsc --watch\\`, \\`webpack serve\\`.\n\n**Never use run_command for a dev server.** It will block for 60s, time out, and the user will see a frozen tool call while the server was actually running fine. Always \\`run_background\\`, then \\`job_output\\` to peek at the logs when you need to verify something.\n\nAfter \\`run_background\\`, tools available to you:\n- \\`job_output(jobId, tailLines?)\\` — read recent logs to verify startup / debug errors.\n- \\`wait_for_job(jobId, timeoutMs?)\\` — block until the job exits or emits new output. Prefer this over repeating identical \\`job_output\\` calls while you're intentionally waiting.\n- \\`list_jobs\\` — see every job this session (running + exited).\n- \\`stop_job(jobId)\\` — SIGTERM → SIGKILL after grace. Stop before switching port / config.\n\nDon't re-start an already-running dev server — call \\`list_jobs\\` first when in doubt.\n\n# Scope discipline on \"run it\" / \"start it\" requests\n\nWhen the user's request is to **run / start / launch / serve / boot up** something, your job is ONLY:\n\n1. Start it (\\`run_background\\` for dev servers, \\`run_command\\` for one-shots).\n2. Verify it came up (read a ready signal via \\`job_output\\`, or fetch the URL with \\`web_fetch\\` if they want you to confirm).\n3. Report what's running, where (URL / port / pid), and STOP.\n\nDo NOT, in the same turn:\n- Run \\`tsc\\` / type-checkers / linters unless the user asked for it.\n- Scan for bugs to \"proactively\" fix. The page rendering is success.\n- Clean up unused imports, dead code, or refactor \"while you're here.\"\n- Edit files to improve anything the user didn't mention.\n\nIf you notice an obvious issue, MENTION it in one sentence and wait for the user to say \"fix it.\" The cost of over-eagerness is real: you burn tokens, make surprise edits the user didn't want, and chain into cascading \"fix the new error I just introduced\" loops. The storm-breaker will cut you off, but the user still sees the mess.\n\n\"It works\" is the end state. Resist the urge to polish.\n\n# Style\n\n- Show edits; don't narrate them in prose. \"Here's the fix:\" is enough.\n- One short paragraph explaining *why*, then the blocks.\n- If you need to explore first (list / read / search), do it with tool calls before writing any prose — silence while exploring is fine.\n\n${ESCALATION_CONTRACT}\n\n${TUI_FORMATTING_RULES}\n`;\n\n/** Stack order (stable for cache prefix): base → REASONIX.md → global → project → .gitignore. */\nconst SEMANTIC_SEARCH_ROUTING = `\n\n# Search routing\n\nYou have BOTH \\`semantic_search\\` (vector index) and \\`search_content\\` (literal grep).\n\n- **Descriptive queries** (\"where do we handle X\", \"which file owns Y\", \"how does Z work\", \"find the logic that does …\", \"the code responsible for …\") → call \\`semantic_search\\` FIRST. It indexes the project by meaning, so it finds the right file even when your phrasing shares no tokens with the code.\n- **Exact-token queries** (a specific identifier, regex, or \"find every call to foo\") → call \\`search_content\\`.\n\nIf \\`semantic_search\\` returns nothing useful (low scores, off-topic), THEN fall back to \\`search_content\\`. Don't go the other way — grepping a paraphrased question wastes turns.`;\n\nexport interface CodeSystemPromptOptions {\n /** True when semantic_search is registered for this run. Adds an\n * explicit routing fragment so the model picks it for intent-style\n * queries instead of defaulting to grep. */\n hasSemanticSearch?: boolean;\n /** Inline string appended after the generated code system prompt.\n * Preserves the default prompt — this is append-only, not a replacement. */\n systemAppend?: string;\n /** UTF-8 file contents appended after the generated code system prompt.\n * Preserves the default prompt — this is append-only, not a replacement. */\n systemAppendFile?: string;\n}\n\nexport function codeSystemPrompt(rootDir: string, opts: CodeSystemPromptOptions = {}): string {\n const base = opts.hasSemanticSearch\n ? `${CODE_SYSTEM_PROMPT}${SEMANTIC_SEARCH_ROUTING}`\n : CODE_SYSTEM_PROMPT;\n const withMemory = applyMemoryStack(base, rootDir);\n const gitignorePath = join(rootDir, \".gitignore\");\n let result = withMemory;\n if (existsSync(gitignorePath)) {\n let content: string | undefined;\n try {\n content = readFileSync(gitignorePath, \"utf8\");\n } catch {}\n if (content !== undefined) {\n const MAX = 2000;\n const truncated =\n content.length > MAX\n ? `${content.slice(0, MAX)}\\n… (truncated ${content.length - MAX} chars)`\n : content;\n result = `${result}\\n\\n# Project .gitignore\\n\\nThe user's repo ships this .gitignore — treat every pattern as \"don't traverse or edit inside these paths unless explicitly asked\":\\n\\n\\`\\`\\`\\n${truncated}\\n\\`\\`\\`\\n`;\n }\n }\n const appendParts = [opts.systemAppend, opts.systemAppendFile].filter(Boolean);\n if (appendParts.length > 0) {\n result = `${result}\\n\\n# User System Append\\n\\n${appendParts.join(\"\\n\\n\")}`;\n }\n return result;\n}\n","/** User-private memory pinned into the immutable prefix; distinct from committable REASONIX.md. */\n\nimport { createHash } from \"node:crypto\";\nimport {\n existsSync,\n mkdirSync,\n readFileSync,\n readdirSync,\n unlinkSync,\n writeFileSync,\n} from \"node:fs\";\nimport { homedir } from \"node:os\";\nimport { join, resolve } from \"node:path\";\nimport { applySkillsIndex } from \"../skills.js\";\nimport { applyProjectMemory, memoryEnabled } from \"./project.js\";\n\nexport const USER_MEMORY_DIR = \"memory\";\nexport const MEMORY_INDEX_FILE = \"MEMORY.md\";\n/** Cap on the index file content loaded into the prefix, per scope. */\nexport const MEMORY_INDEX_MAX_CHARS = 4000;\n\nexport type MemoryType = \"user\" | \"feedback\" | \"project\" | \"reference\";\nexport type MemoryScope = \"global\" | \"project\";\n\nexport interface MemoryEntry {\n name: string;\n type: MemoryType;\n scope: MemoryScope;\n description: string;\n body: string;\n /** ISO date string (YYYY-MM-DD). */\n createdAt: string;\n}\n\nexport interface MemoryStoreOptions {\n /** Override `~/.reasonix` — tests set this to a tmpdir. */\n homeDir?: string;\n /** Absolute sandbox root. Required to use `scope: \"project\"`. */\n projectRoot?: string;\n}\n\nexport interface WriteInput {\n name: string;\n type: MemoryType;\n scope: MemoryScope;\n description: string;\n body: string;\n}\n\nconst VALID_NAME = /^[a-zA-Z0-9_-][a-zA-Z0-9_.-]{1,38}[a-zA-Z0-9]$/;\n\n/** Throws on path-injection (../, /, leading dot). Allowed: 3-40 chars, alnum/_/-, interior `.`. */\nexport function sanitizeMemoryName(raw: string): string {\n const trimmed = String(raw ?? \"\").trim();\n if (!VALID_NAME.test(trimmed)) {\n throw new Error(\n `invalid memory name: ${JSON.stringify(raw)} — must be 3-40 chars, alnum/_/-, no path separators`,\n );\n }\n return trimmed;\n}\n\n/** Stable 16-hex-char hash of an absolute sandbox root path. */\nexport function projectHash(rootDir: string): string {\n const abs = resolve(rootDir);\n return createHash(\"sha1\").update(abs).digest(\"hex\").slice(0, 16);\n}\n\nfunction scopeDir(opts: { homeDir: string; scope: MemoryScope; projectRoot?: string }): string {\n if (opts.scope === \"global\") {\n return join(opts.homeDir, USER_MEMORY_DIR, \"global\");\n }\n if (!opts.projectRoot) {\n throw new Error(\"scope=project requires a projectRoot on MemoryStore\");\n }\n return join(opts.homeDir, USER_MEMORY_DIR, projectHash(opts.projectRoot));\n}\n\nfunction ensureDir(p: string): void {\n if (!existsSync(p)) mkdirSync(p, { recursive: true });\n}\n\nfunction parseFrontmatter(raw: string): { data: Record<string, string>; body: string } {\n const lines = raw.split(/\\r?\\n/);\n if (lines[0] !== \"---\") return { data: {}, body: raw };\n const end = lines.indexOf(\"---\", 1);\n if (end < 0) return { data: {}, body: raw };\n const data: Record<string, string> = {};\n for (let i = 1; i < end; i++) {\n const line = lines[i];\n if (!line) continue;\n const m = line.match(/^([a-zA-Z_][a-zA-Z0-9_-]*):\\s*(.*)$/);\n if (m?.[1]) data[m[1]] = (m[2] ?? \"\").trim();\n }\n return {\n data,\n body: lines\n .slice(end + 1)\n .join(\"\\n\")\n .replace(/^\\n+/, \"\"),\n };\n}\n\nfunction formatFrontmatter(e: WriteInput & { createdAt: string }): string {\n return [\n \"---\",\n `name: ${e.name}`,\n `description: ${e.description.replace(/\\n/g, \" \")}`,\n `type: ${e.type}`,\n `scope: ${e.scope}`,\n `created: ${e.createdAt}`,\n \"---\",\n \"\",\n ].join(\"\\n\");\n}\n\nfunction todayIso(): string {\n const d = new Date();\n return d.toISOString().slice(0, 10);\n}\n\nfunction indexLine(e: Pick<MemoryEntry, \"name\" | \"description\">): string {\n const safeDesc = e.description.replace(/\\n/g, \" \").trim();\n const max = 130 - e.name.length;\n const clipped = safeDesc.length > max ? `${safeDesc.slice(0, Math.max(1, max - 1))}…` : safeDesc;\n return `- [${e.name}](${e.name}.md) — ${clipped}`;\n}\n\nexport class MemoryStore {\n private readonly homeDir: string;\n private readonly projectRoot: string | undefined;\n\n constructor(opts: MemoryStoreOptions = {}) {\n this.homeDir = opts.homeDir ?? join(homedir(), \".reasonix\");\n this.projectRoot = opts.projectRoot ? resolve(opts.projectRoot) : undefined;\n }\n\n /** Directory this store writes `scope` files into, creating it if needed. */\n dir(scope: MemoryScope): string {\n const d = scopeDir({ homeDir: this.homeDir, scope, projectRoot: this.projectRoot });\n ensureDir(d);\n return d;\n }\n\n /** Absolute path to a memory file (no existence check). */\n pathFor(scope: MemoryScope, name: string): string {\n return join(this.dir(scope), `${sanitizeMemoryName(name)}.md`);\n }\n\n /** True iff this store is configured with a project scope available. */\n hasProjectScope(): boolean {\n return this.projectRoot !== undefined;\n }\n\n loadIndex(\n scope: MemoryScope,\n ): { content: string; originalChars: number; truncated: boolean } | null {\n if (scope === \"project\" && !this.projectRoot) return null;\n const file = join(\n scopeDir({ homeDir: this.homeDir, scope, projectRoot: this.projectRoot }),\n MEMORY_INDEX_FILE,\n );\n if (!existsSync(file)) return null;\n let raw: string;\n try {\n raw = readFileSync(file, \"utf8\");\n } catch {\n return null;\n }\n const trimmed = raw.trim();\n if (!trimmed) return null;\n const originalChars = trimmed.length;\n const truncated = originalChars > MEMORY_INDEX_MAX_CHARS;\n const content = truncated\n ? `${trimmed.slice(0, MEMORY_INDEX_MAX_CHARS)}\\n… (truncated ${originalChars - MEMORY_INDEX_MAX_CHARS} chars)`\n : trimmed;\n return { content, originalChars, truncated };\n }\n\n /** Read one memory file's body (frontmatter stripped). Throws if missing. */\n read(scope: MemoryScope, name: string): MemoryEntry {\n const file = this.pathFor(scope, name);\n if (!existsSync(file)) {\n throw new Error(`memory not found: scope=${scope} name=${name}`);\n }\n const raw = readFileSync(file, \"utf8\");\n const { data, body } = parseFrontmatter(raw);\n return {\n name: data.name ?? name,\n type: (data.type as MemoryType) ?? \"project\",\n scope: (data.scope as MemoryScope) ?? scope,\n description: data.description ?? \"\",\n body: body.trim(),\n createdAt: data.created ?? \"\",\n };\n }\n\n /** Skips malformed files — index stays queryable even if one file is hand-edited into nonsense. */\n list(): MemoryEntry[] {\n const out: MemoryEntry[] = [];\n const scopes: MemoryScope[] = this.projectRoot ? [\"global\", \"project\"] : [\"global\"];\n for (const scope of scopes) {\n const dir = scopeDir({ homeDir: this.homeDir, scope, projectRoot: this.projectRoot });\n if (!existsSync(dir)) continue;\n let entries: string[];\n try {\n entries = readdirSync(dir);\n } catch {\n continue;\n }\n for (const entry of entries) {\n if (entry === MEMORY_INDEX_FILE) continue;\n if (!entry.endsWith(\".md\")) continue;\n const name = entry.slice(0, -3);\n try {\n out.push(this.read(scope, name));\n } catch {\n // malformed file — skip rather than fail the whole list\n }\n }\n }\n return out;\n }\n\n write(input: WriteInput): string {\n if (input.scope === \"project\" && !this.projectRoot) {\n throw new Error(\"cannot write project-scoped memory: no projectRoot configured\");\n }\n const name = sanitizeMemoryName(input.name);\n const desc = String(input.description ?? \"\").trim();\n if (!desc) throw new Error(\"memory description cannot be empty\");\n const body = String(input.body ?? \"\").trim();\n if (!body) throw new Error(\"memory body cannot be empty\");\n const entry: WriteInput & { createdAt: string } = {\n ...input,\n name,\n description: desc,\n body,\n createdAt: todayIso(),\n };\n const dir = this.dir(input.scope);\n const file = join(dir, `${name}.md`);\n const content = `${formatFrontmatter(entry)}${body}\\n`;\n writeFileSync(file, content, \"utf8\");\n this.regenerateIndex(input.scope);\n return file;\n }\n\n /** Delete one memory + its index line. No-op if the file is already gone. */\n delete(scope: MemoryScope, rawName: string): boolean {\n if (scope === \"project\" && !this.projectRoot) {\n throw new Error(\"cannot delete project-scoped memory: no projectRoot configured\");\n }\n const file = this.pathFor(scope, rawName);\n if (!existsSync(file)) return false;\n unlinkSync(file);\n this.regenerateIndex(scope);\n return true;\n }\n\n /** Sorted by name — same file set must produce byte-identical MEMORY.md for stable prefix hashing. */\n private regenerateIndex(scope: MemoryScope): void {\n const dir = scopeDir({ homeDir: this.homeDir, scope, projectRoot: this.projectRoot });\n if (!existsSync(dir)) return;\n let files: string[];\n try {\n files = readdirSync(dir);\n } catch {\n return;\n }\n const mdFiles = files\n .filter((f) => f !== MEMORY_INDEX_FILE && f.endsWith(\".md\"))\n .sort((a, b) => a.localeCompare(b));\n const indexPath = join(dir, MEMORY_INDEX_FILE);\n if (mdFiles.length === 0) {\n if (existsSync(indexPath)) unlinkSync(indexPath);\n return;\n }\n const lines: string[] = [];\n for (const f of mdFiles) {\n const name = f.slice(0, -3);\n try {\n const entry = this.read(scope, name);\n lines.push(indexLine({ name: entry.name || name, description: entry.description }));\n } catch {\n // Malformed: still surface it in the index so the user notices.\n lines.push(`- [${name}](${name}.md) — (malformed, check frontmatter)`);\n }\n }\n writeFileSync(indexPath, `${lines.join(\"\\n\")}\\n`, \"utf8\");\n }\n}\n\n/** Freeform `#g` destination, distinct from MEMORY.md's curated index of named files. */\nexport function readGlobalReasonixMemory(\n homeDir: string = join(homedir(), \".reasonix\"),\n): { path: string; content: string; originalChars: number; truncated: boolean } | null {\n const path = join(homeDir, \"REASONIX.md\");\n if (!existsSync(path)) return null;\n let raw: string;\n try {\n raw = readFileSync(path, \"utf8\");\n } catch {\n return null;\n }\n const trimmed = raw.trim();\n if (!trimmed) return null;\n const originalChars = trimmed.length;\n // Reuse the project-memory cap so both freeform files have the same\n // headroom (8000 chars ≈ 2k tokens). They serve the same purpose at\n // different scopes.\n const truncated = originalChars > 8000;\n const content = truncated\n ? `${trimmed.slice(0, 8000)}\\n… (truncated ${originalChars - 8000} chars)`\n : trimmed;\n return { path, content, originalChars, truncated };\n}\n\nexport function applyGlobalReasonixMemory(basePrompt: string, homeDir?: string): string {\n if (!memoryEnabled()) return basePrompt;\n const dir = homeDir ?? join(homedir(), \".reasonix\");\n const mem = readGlobalReasonixMemory(dir);\n if (!mem) return basePrompt;\n return [\n basePrompt,\n \"\",\n \"# Global memory (~/.reasonix/REASONIX.md)\",\n \"\",\n \"Cross-project notes the user pinned via the `#g` prompt prefix. Treat as authoritative — same level of trust as project memory.\",\n \"\",\n \"```\",\n mem.content,\n \"```\",\n ].join(\"\\n\");\n}\n\n/** Empty index → omit the whole block (otherwise we'd add bytes to the prefix hash for nothing). */\nexport function applyUserMemory(\n basePrompt: string,\n opts: { homeDir?: string; projectRoot?: string } = {},\n): string {\n if (!memoryEnabled()) return basePrompt;\n const store = new MemoryStore(opts);\n const global = store.loadIndex(\"global\");\n const project = store.hasProjectScope() ? store.loadIndex(\"project\") : null;\n if (!global && !project) return basePrompt;\n const parts: string[] = [basePrompt];\n if (global) {\n parts.push(\n \"\",\n \"# User memory — global (~/.reasonix/memory/global/MEMORY.md)\",\n \"\",\n \"Cross-project facts and preferences the user has told you in prior sessions. TREAT AS AUTHORITATIVE — don't re-verify via filesystem or web. One-liners index detail files; call `recall_memory` for full bodies only when the one-liner isn't enough.\",\n \"\",\n \"```\",\n global.content,\n \"```\",\n );\n }\n if (project) {\n parts.push(\n \"\",\n \"# User memory — this project\",\n \"\",\n \"Per-project facts the user established in prior sessions (not committed to the repo). TREAT AS AUTHORITATIVE. Same recall pattern as global memory.\",\n \"\",\n \"```\",\n project.content,\n \"```\",\n );\n }\n return parts.join(\"\\n\");\n}\n\nexport function applyMemoryStack(basePrompt: string, rootDir: string): string {\n const withProject = applyProjectMemory(basePrompt, rootDir);\n const withGlobal = applyGlobalReasonixMemory(withProject);\n const withMemory = applyUserMemory(withGlobal, { projectRoot: rootDir });\n return applySkillsIndex(withMemory, { projectRoot: rootDir });\n}\n"],"mappings":";;;;;;;;;;AAAA,SAAS,cAAAA,aAAY,gBAAAC,qBAAoB;AACzC,SAAS,QAAAC,aAAY;;;ACCrB,SAAS,kBAAkB;AAC3B;AAAA,EACE;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,OACK;AACP,SAAS,eAAe;AACxB,SAAS,MAAM,eAAe;AAIvB,IAAM,kBAAkB;AACxB,IAAM,oBAAoB;AAE1B,IAAM,yBAAyB;AA8BtC,IAAM,aAAa;AAGZ,SAAS,mBAAmB,KAAqB;AACtD,QAAM,UAAU,OAAO,OAAO,EAAE,EAAE,KAAK;AACvC,MAAI,CAAC,WAAW,KAAK,OAAO,GAAG;AAC7B,UAAM,IAAI;AAAA,MACR,wBAAwB,KAAK,UAAU,GAAG,CAAC;AAAA,IAC7C;AAAA,EACF;AACA,SAAO;AACT;AAGO,SAAS,YAAY,SAAyB;AACnD,QAAM,MAAM,QAAQ,OAAO;AAC3B,SAAO,WAAW,MAAM,EAAE,OAAO,GAAG,EAAE,OAAO,KAAK,EAAE,MAAM,GAAG,EAAE;AACjE;AAEA,SAAS,SAAS,MAA6E;AAC7F,MAAI,KAAK,UAAU,UAAU;AAC3B,WAAO,KAAK,KAAK,SAAS,iBAAiB,QAAQ;AAAA,EACrD;AACA,MAAI,CAAC,KAAK,aAAa;AACrB,UAAM,IAAI,MAAM,qDAAqD;AAAA,EACvE;AACA,SAAO,KAAK,KAAK,SAAS,iBAAiB,YAAY,KAAK,WAAW,CAAC;AAC1E;AAEA,SAAS,UAAU,GAAiB;AAClC,MAAI,CAAC,WAAW,CAAC,EAAG,WAAU,GAAG,EAAE,WAAW,KAAK,CAAC;AACtD;AAEA,SAAS,iBAAiB,KAA6D;AACrF,QAAM,QAAQ,IAAI,MAAM,OAAO;AAC/B,MAAI,MAAM,CAAC,MAAM,MAAO,QAAO,EAAE,MAAM,CAAC,GAAG,MAAM,IAAI;AACrD,QAAM,MAAM,MAAM,QAAQ,OAAO,CAAC;AAClC,MAAI,MAAM,EAAG,QAAO,EAAE,MAAM,CAAC,GAAG,MAAM,IAAI;AAC1C,QAAM,OAA+B,CAAC;AACtC,WAAS,IAAI,GAAG,IAAI,KAAK,KAAK;AAC5B,UAAM,OAAO,MAAM,CAAC;AACpB,QAAI,CAAC,KAAM;AACX,UAAM,IAAI,KAAK,MAAM,qCAAqC;AAC1D,QAAI,IAAI,CAAC,EAAG,MAAK,EAAE,CAAC,CAAC,KAAK,EAAE,CAAC,KAAK,IAAI,KAAK;AAAA,EAC7C;AACA,SAAO;AAAA,IACL;AAAA,IACA,MAAM,MACH,MAAM,MAAM,CAAC,EACb,KAAK,IAAI,EACT,QAAQ,QAAQ,EAAE;AAAA,EACvB;AACF;AAEA,SAAS,kBAAkB,GAA+C;AACxE,SAAO;AAAA,IACL;AAAA,IACA,SAAS,EAAE,IAAI;AAAA,IACf,gBAAgB,EAAE,YAAY,QAAQ,OAAO,GAAG,CAAC;AAAA,IACjD,SAAS,EAAE,IAAI;AAAA,IACf,UAAU,EAAE,KAAK;AAAA,IACjB,YAAY,EAAE,SAAS;AAAA,IACvB;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAEA,SAAS,WAAmB;AAC1B,QAAM,IAAI,oBAAI,KAAK;AACnB,SAAO,EAAE,YAAY,EAAE,MAAM,GAAG,EAAE;AACpC;AAEA,SAAS,UAAU,GAAsD;AACvE,QAAM,WAAW,EAAE,YAAY,QAAQ,OAAO,GAAG,EAAE,KAAK;AACxD,QAAM,MAAM,MAAM,EAAE,KAAK;AACzB,QAAM,UAAU,SAAS,SAAS,MAAM,GAAG,SAAS,MAAM,GAAG,KAAK,IAAI,GAAG,MAAM,CAAC,CAAC,CAAC,WAAM;AACxF,SAAO,MAAM,EAAE,IAAI,KAAK,EAAE,IAAI,eAAU,OAAO;AACjD;AAEO,IAAM,cAAN,MAAkB;AAAA,EACN;AAAA,EACA;AAAA,EAEjB,YAAY,OAA2B,CAAC,GAAG;AACzC,SAAK,UAAU,KAAK,WAAW,KAAK,QAAQ,GAAG,WAAW;AAC1D,SAAK,cAAc,KAAK,cAAc,QAAQ,KAAK,WAAW,IAAI;AAAA,EACpE;AAAA;AAAA,EAGA,IAAI,OAA4B;AAC9B,UAAM,IAAI,SAAS,EAAE,SAAS,KAAK,SAAS,OAAO,aAAa,KAAK,YAAY,CAAC;AAClF,cAAU,CAAC;AACX,WAAO;AAAA,EACT;AAAA;AAAA,EAGA,QAAQ,OAAoB,MAAsB;AAChD,WAAO,KAAK,KAAK,IAAI,KAAK,GAAG,GAAG,mBAAmB,IAAI,CAAC,KAAK;AAAA,EAC/D;AAAA;AAAA,EAGA,kBAA2B;AACzB,WAAO,KAAK,gBAAgB;AAAA,EAC9B;AAAA,EAEA,UACE,OACuE;AACvE,QAAI,UAAU,aAAa,CAAC,KAAK,YAAa,QAAO;AACrD,UAAM,OAAO;AAAA,MACX,SAAS,EAAE,SAAS,KAAK,SAAS,OAAO,aAAa,KAAK,YAAY,CAAC;AAAA,MACxE;AAAA,IACF;AACA,QAAI,CAAC,WAAW,IAAI,EAAG,QAAO;AAC9B,QAAI;AACJ,QAAI;AACF,YAAM,aAAa,MAAM,MAAM;AAAA,IACjC,QAAQ;AACN,aAAO;AAAA,IACT;AACA,UAAM,UAAU,IAAI,KAAK;AACzB,QAAI,CAAC,QAAS,QAAO;AACrB,UAAM,gBAAgB,QAAQ;AAC9B,UAAM,YAAY,gBAAgB;AAClC,UAAM,UAAU,YACZ,GAAG,QAAQ,MAAM,GAAG,sBAAsB,CAAC;AAAA,oBAAkB,gBAAgB,sBAAsB,YACnG;AACJ,WAAO,EAAE,SAAS,eAAe,UAAU;AAAA,EAC7C;AAAA;AAAA,EAGA,KAAK,OAAoB,MAA2B;AAClD,UAAM,OAAO,KAAK,QAAQ,OAAO,IAAI;AACrC,QAAI,CAAC,WAAW,IAAI,GAAG;AACrB,YAAM,IAAI,MAAM,2BAA2B,KAAK,SAAS,IAAI,EAAE;AAAA,IACjE;AACA,UAAM,MAAM,aAAa,MAAM,MAAM;AACrC,UAAM,EAAE,MAAM,KAAK,IAAI,iBAAiB,GAAG;AAC3C,WAAO;AAAA,MACL,MAAM,KAAK,QAAQ;AAAA,MACnB,MAAO,KAAK,QAAuB;AAAA,MACnC,OAAQ,KAAK,SAAyB;AAAA,MACtC,aAAa,KAAK,eAAe;AAAA,MACjC,MAAM,KAAK,KAAK;AAAA,MAChB,WAAW,KAAK,WAAW;AAAA,IAC7B;AAAA,EACF;AAAA;AAAA,EAGA,OAAsB;AACpB,UAAM,MAAqB,CAAC;AAC5B,UAAM,SAAwB,KAAK,cAAc,CAAC,UAAU,SAAS,IAAI,CAAC,QAAQ;AAClF,eAAW,SAAS,QAAQ;AAC1B,YAAM,MAAM,SAAS,EAAE,SAAS,KAAK,SAAS,OAAO,aAAa,KAAK,YAAY,CAAC;AACpF,UAAI,CAAC,WAAW,GAAG,EAAG;AACtB,UAAI;AACJ,UAAI;AACF,kBAAU,YAAY,GAAG;AAAA,MAC3B,QAAQ;AACN;AAAA,MACF;AACA,iBAAW,SAAS,SAAS;AAC3B,YAAI,UAAU,kBAAmB;AACjC,YAAI,CAAC,MAAM,SAAS,KAAK,EAAG;AAC5B,cAAM,OAAO,MAAM,MAAM,GAAG,EAAE;AAC9B,YAAI;AACF,cAAI,KAAK,KAAK,KAAK,OAAO,IAAI,CAAC;AAAA,QACjC,QAAQ;AAAA,QAER;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA,EAEA,MAAM,OAA2B;AAC/B,QAAI,MAAM,UAAU,aAAa,CAAC,KAAK,aAAa;AAClD,YAAM,IAAI,MAAM,+DAA+D;AAAA,IACjF;AACA,UAAM,OAAO,mBAAmB,MAAM,IAAI;AAC1C,UAAM,OAAO,OAAO,MAAM,eAAe,EAAE,EAAE,KAAK;AAClD,QAAI,CAAC,KAAM,OAAM,IAAI,MAAM,oCAAoC;AAC/D,UAAM,OAAO,OAAO,MAAM,QAAQ,EAAE,EAAE,KAAK;AAC3C,QAAI,CAAC,KAAM,OAAM,IAAI,MAAM,6BAA6B;AACxD,UAAM,QAA4C;AAAA,MAChD,GAAG;AAAA,MACH;AAAA,MACA,aAAa;AAAA,MACb;AAAA,MACA,WAAW,SAAS;AAAA,IACtB;AACA,UAAM,MAAM,KAAK,IAAI,MAAM,KAAK;AAChC,UAAM,OAAO,KAAK,KAAK,GAAG,IAAI,KAAK;AACnC,UAAM,UAAU,GAAG,kBAAkB,KAAK,CAAC,GAAG,IAAI;AAAA;AAClD,kBAAc,MAAM,SAAS,MAAM;AACnC,SAAK,gBAAgB,MAAM,KAAK;AAChC,WAAO;AAAA,EACT;AAAA;AAAA,EAGA,OAAO,OAAoB,SAA0B;AACnD,QAAI,UAAU,aAAa,CAAC,KAAK,aAAa;AAC5C,YAAM,IAAI,MAAM,gEAAgE;AAAA,IAClF;AACA,UAAM,OAAO,KAAK,QAAQ,OAAO,OAAO;AACxC,QAAI,CAAC,WAAW,IAAI,EAAG,QAAO;AAC9B,eAAW,IAAI;AACf,SAAK,gBAAgB,KAAK;AAC1B,WAAO;AAAA,EACT;AAAA;AAAA,EAGQ,gBAAgB,OAA0B;AAChD,UAAM,MAAM,SAAS,EAAE,SAAS,KAAK,SAAS,OAAO,aAAa,KAAK,YAAY,CAAC;AACpF,QAAI,CAAC,WAAW,GAAG,EAAG;AACtB,QAAI;AACJ,QAAI;AACF,cAAQ,YAAY,GAAG;AAAA,IACzB,QAAQ;AACN;AAAA,IACF;AACA,UAAM,UAAU,MACb,OAAO,CAAC,MAAM,MAAM,qBAAqB,EAAE,SAAS,KAAK,CAAC,EAC1D,KAAK,CAAC,GAAG,MAAM,EAAE,cAAc,CAAC,CAAC;AACpC,UAAM,YAAY,KAAK,KAAK,iBAAiB;AAC7C,QAAI,QAAQ,WAAW,GAAG;AACxB,UAAI,WAAW,SAAS,EAAG,YAAW,SAAS;AAC/C;AAAA,IACF;AACA,UAAM,QAAkB,CAAC;AACzB,eAAW,KAAK,SAAS;AACvB,YAAM,OAAO,EAAE,MAAM,GAAG,EAAE;AAC1B,UAAI;AACF,cAAM,QAAQ,KAAK,KAAK,OAAO,IAAI;AACnC,cAAM,KAAK,UAAU,EAAE,MAAM,MAAM,QAAQ,MAAM,aAAa,MAAM,YAAY,CAAC,CAAC;AAAA,MACpF,QAAQ;AAEN,cAAM,KAAK,MAAM,IAAI,KAAK,IAAI,4CAAuC;AAAA,MACvE;AAAA,IACF;AACA,kBAAc,WAAW,GAAG,MAAM,KAAK,IAAI,CAAC;AAAA,GAAM,MAAM;AAAA,EAC1D;AACF;AAGO,SAAS,yBACd,UAAkB,KAAK,QAAQ,GAAG,WAAW,GACwC;AACrF,QAAM,OAAO,KAAK,SAAS,aAAa;AACxC,MAAI,CAAC,WAAW,IAAI,EAAG,QAAO;AAC9B,MAAI;AACJ,MAAI;AACF,UAAM,aAAa,MAAM,MAAM;AAAA,EACjC,QAAQ;AACN,WAAO;AAAA,EACT;AACA,QAAM,UAAU,IAAI,KAAK;AACzB,MAAI,CAAC,QAAS,QAAO;AACrB,QAAM,gBAAgB,QAAQ;AAI9B,QAAM,YAAY,gBAAgB;AAClC,QAAM,UAAU,YACZ,GAAG,QAAQ,MAAM,GAAG,GAAI,CAAC;AAAA,oBAAkB,gBAAgB,GAAI,YAC/D;AACJ,SAAO,EAAE,MAAM,SAAS,eAAe,UAAU;AACnD;AAEO,SAAS,0BAA0B,YAAoB,SAA0B;AACtF,MAAI,CAAC,cAAc,EAAG,QAAO;AAC7B,QAAM,MAAM,WAAW,KAAK,QAAQ,GAAG,WAAW;AAClD,QAAM,MAAM,yBAAyB,GAAG;AACxC,MAAI,CAAC,IAAK,QAAO;AACjB,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,IAAI;AAAA,IACJ;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAGO,SAAS,gBACd,YACA,OAAmD,CAAC,GAC5C;AACR,MAAI,CAAC,cAAc,EAAG,QAAO;AAC7B,QAAM,QAAQ,IAAI,YAAY,IAAI;AAClC,QAAM,SAAS,MAAM,UAAU,QAAQ;AACvC,QAAM,UAAU,MAAM,gBAAgB,IAAI,MAAM,UAAU,SAAS,IAAI;AACvE,MAAI,CAAC,UAAU,CAAC,QAAS,QAAO;AAChC,QAAM,QAAkB,CAAC,UAAU;AACnC,MAAI,QAAQ;AACV,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,OAAO;AAAA,MACP;AAAA,IACF;AAAA,EACF;AACA,MAAI,SAAS;AACX,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,QAAQ;AAAA,MACR;AAAA,IACF;AAAA,EACF;AACA,SAAO,MAAM,KAAK,IAAI;AACxB;AAEO,SAAS,iBAAiB,YAAoB,SAAyB;AAC5E,QAAM,cAAc,mBAAmB,YAAY,OAAO;AAC1D,QAAM,aAAa,0BAA0B,WAAW;AACxD,QAAM,aAAa,gBAAgB,YAAY,EAAE,aAAa,QAAQ,CAAC;AACvE,SAAO,iBAAiB,YAAY,EAAE,aAAa,QAAQ,CAAC;AAC9D;;;ADtXO,IAAM,qBAAqB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuMhC,mBAAmB;AAAA;AAAA,EAEnB,oBAAoB;AAAA;AAItB,IAAM,0BAA0B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAwBzB,SAAS,iBAAiB,SAAiB,OAAgC,CAAC,GAAW;AAC5F,QAAM,OAAO,KAAK,oBACd,GAAG,kBAAkB,GAAG,uBAAuB,KAC/C;AACJ,QAAM,aAAa,iBAAiB,MAAM,OAAO;AACjD,QAAM,gBAAgBC,MAAK,SAAS,YAAY;AAChD,MAAI,SAAS;AACb,MAAIC,YAAW,aAAa,GAAG;AAC7B,QAAI;AACJ,QAAI;AACF,gBAAUC,cAAa,eAAe,MAAM;AAAA,IAC9C,QAAQ;AAAA,IAAC;AACT,QAAI,YAAY,QAAW;AACzB,YAAM,MAAM;AACZ,YAAM,YACJ,QAAQ,SAAS,MACb,GAAG,QAAQ,MAAM,GAAG,GAAG,CAAC;AAAA,oBAAkB,QAAQ,SAAS,GAAG,YAC9D;AACN,eAAS,GAAG,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAA8K,SAAS;AAAA;AAAA;AAAA,IAC3M;AAAA,EACF;AACA,QAAM,cAAc,CAAC,KAAK,cAAc,KAAK,gBAAgB,EAAE,OAAO,OAAO;AAC7E,MAAI,YAAY,SAAS,GAAG;AAC1B,aAAS,GAAG,MAAM;AAAA;AAAA;AAAA;AAAA,EAA+B,YAAY,KAAK,MAAM,CAAC;AAAA,EAC3E;AACA,SAAO;AACT;","names":["existsSync","readFileSync","join","join","existsSync","readFileSync"]}
@@ -0,0 +1,43 @@
1
+ #!/usr/bin/env node
2
+
3
+ // src/cli/ui/RecordView.tsx
4
+ import { Box, Text } from "ink";
5
+ import React from "react";
6
+ function RecordView({ rec, compact = false }) {
7
+ const toolArgsMax = compact ? 120 : 200;
8
+ const toolContentMax = compact ? 200 : 400;
9
+ if (rec.role === "user") {
10
+ const content = rec.content.includes("\n") ? rec.content.split("\n").join("\n ") : rec.content;
11
+ return /* @__PURE__ */ React.createElement(Box, { marginTop: 1 }, /* @__PURE__ */ React.createElement(Text, { bold: true, color: "cyan" }, "you \u203A", " "), /* @__PURE__ */ React.createElement(Text, null, content));
12
+ }
13
+ if (rec.role === "assistant_final") {
14
+ return /* @__PURE__ */ React.createElement(Box, { flexDirection: "column", marginTop: 1 }, /* @__PURE__ */ React.createElement(Box, null, /* @__PURE__ */ React.createElement(Text, { bold: true, color: "green" }, "assistant"), rec.cost !== void 0 ? /* @__PURE__ */ React.createElement(Text, { dimColor: true }, " $", rec.cost.toFixed(6)) : null, rec.usage ? /* @__PURE__ */ React.createElement(CacheBadge, { usage: rec.usage }) : null), rec.content ? /* @__PURE__ */ React.createElement(Text, null, rec.content) : /* @__PURE__ */ React.createElement(Text, { dimColor: true, italic: true }, "(tool-call response only)"));
15
+ }
16
+ if (rec.role === "tool") {
17
+ return /* @__PURE__ */ React.createElement(Box, { flexDirection: "column", marginTop: 1 }, /* @__PURE__ */ React.createElement(Text, { color: "yellow" }, "tool<", rec.tool ?? "?", ">"), rec.args ? /* @__PURE__ */ React.createElement(Text, { dimColor: true }, " args: ", truncate(rec.args, toolArgsMax)) : null, /* @__PURE__ */ React.createElement(Text, { dimColor: true }, " \u2192 ", truncate(rec.content, toolContentMax)));
18
+ }
19
+ if (rec.role === "error") {
20
+ return /* @__PURE__ */ React.createElement(Box, { marginTop: 1 }, /* @__PURE__ */ React.createElement(Text, { color: "red", bold: true }, "error", " "), /* @__PURE__ */ React.createElement(Text, { color: "red" }, rec.error ?? rec.content));
21
+ }
22
+ if (rec.role === "done" || rec.role === "assistant_delta") {
23
+ return null;
24
+ }
25
+ return /* @__PURE__ */ React.createElement(Box, null, /* @__PURE__ */ React.createElement(Text, { dimColor: true }, "[", rec.role, "] ", rec.content));
26
+ }
27
+ function CacheBadge({ usage }) {
28
+ const hit = usage.prompt_cache_hit_tokens ?? 0;
29
+ const miss = usage.prompt_cache_miss_tokens ?? 0;
30
+ const total = hit + miss;
31
+ if (total === 0) return null;
32
+ const pct = hit / total * 100;
33
+ const color = pct >= 70 ? "green" : pct >= 40 ? "yellow" : "red";
34
+ return /* @__PURE__ */ React.createElement(Text, null, /* @__PURE__ */ React.createElement(Text, { dimColor: true }, " \xB7 cache "), /* @__PURE__ */ React.createElement(Text, { color }, pct.toFixed(1), "%"));
35
+ }
36
+ function truncate(s, max) {
37
+ return s.length <= max ? s : `${s.slice(0, max)}\u2026 (+${s.length - max} chars)`;
38
+ }
39
+
40
+ export {
41
+ RecordView
42
+ };
43
+ //# sourceMappingURL=chunk-APPB3ZPQ.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/cli/ui/RecordView.tsx"],"sourcesContent":["/** Shared renderer for a single TranscriptRecord — used by ReplayApp and DiffApp. */\n\nimport { Box, Text } from \"ink\";\nimport React from \"react\";\nimport type { TranscriptRecord } from \"../../transcript/log.js\";\n\nexport interface RecordViewProps {\n rec: TranscriptRecord;\n /**\n * When rendering side-by-side in diff mode, shorter truncation limits\n * keep long tool results from dominating the pane. Passes through\n * untouched when undefined.\n */\n compact?: boolean;\n}\n\nexport function RecordView({ rec, compact = false }: RecordViewProps) {\n const toolArgsMax = compact ? 120 : 200;\n const toolContentMax = compact ? 200 : 400;\n\n if (rec.role === \"user\") {\n // Continuation indent of 6 spaces matches the `you › ` prefix width\n // so wrapped multi-line user messages align under the body text\n // instead of jumping to column 0.\n const content = rec.content.includes(\"\\n\")\n ? rec.content.split(\"\\n\").join(\"\\n \")\n : rec.content;\n return (\n <Box marginTop={1}>\n <Text bold color=\"cyan\">\n you ›{\" \"}\n </Text>\n <Text>{content}</Text>\n </Box>\n );\n }\n if (rec.role === \"assistant_final\") {\n return (\n <Box flexDirection=\"column\" marginTop={1}>\n <Box>\n <Text bold color=\"green\">\n assistant\n </Text>\n {rec.cost !== undefined ? (\n <Text dimColor>\n {\" $\"}\n {rec.cost.toFixed(6)}\n </Text>\n ) : null}\n {rec.usage ? <CacheBadge usage={rec.usage} /> : null}\n </Box>\n {rec.content ? (\n <Text>{rec.content}</Text>\n ) : (\n <Text dimColor italic>\n (tool-call response only)\n </Text>\n )}\n </Box>\n );\n }\n if (rec.role === \"tool\") {\n return (\n <Box flexDirection=\"column\" marginTop={1}>\n <Text color=\"yellow\">\n {\"tool<\"}\n {rec.tool ?? \"?\"}\n {\">\"}\n </Text>\n {rec.args ? (\n <Text dimColor>\n {\" args: \"}\n {truncate(rec.args, toolArgsMax)}\n </Text>\n ) : null}\n <Text dimColor>\n {\" → \"}\n {truncate(rec.content, toolContentMax)}\n </Text>\n </Box>\n );\n }\n if (rec.role === \"error\") {\n return (\n <Box marginTop={1}>\n <Text color=\"red\" bold>\n error{\" \"}\n </Text>\n <Text color=\"red\">{rec.error ?? rec.content}</Text>\n </Box>\n );\n }\n if (rec.role === \"done\" || rec.role === \"assistant_delta\") {\n // Noise in replay; skip.\n return null;\n }\n return (\n <Box>\n <Text dimColor>\n [{rec.role}] {rec.content}\n </Text>\n </Box>\n );\n}\n\nfunction CacheBadge({ usage }: { usage: NonNullable<TranscriptRecord[\"usage\"]> }) {\n const hit = usage.prompt_cache_hit_tokens ?? 0;\n const miss = usage.prompt_cache_miss_tokens ?? 0;\n const total = hit + miss;\n if (total === 0) return null;\n const pct = (hit / total) * 100;\n const color = pct >= 70 ? \"green\" : pct >= 40 ? \"yellow\" : \"red\";\n return (\n <Text>\n <Text dimColor>{\" · cache \"}</Text>\n <Text color={color}>{pct.toFixed(1)}%</Text>\n </Text>\n );\n}\n\nfunction truncate(s: string, max: number): string {\n return s.length <= max ? s : `${s.slice(0, max)}… (+${s.length - max} chars)`;\n}\n"],"mappings":";;;AAEA,SAAS,KAAK,YAAY;AAC1B,OAAO,WAAW;AAaX,SAAS,WAAW,EAAE,KAAK,UAAU,MAAM,GAAoB;AACpE,QAAM,cAAc,UAAU,MAAM;AACpC,QAAM,iBAAiB,UAAU,MAAM;AAEvC,MAAI,IAAI,SAAS,QAAQ;AAIvB,UAAM,UAAU,IAAI,QAAQ,SAAS,IAAI,IACrC,IAAI,QAAQ,MAAM,IAAI,EAAE,KAAK,UAAU,IACvC,IAAI;AACR,WACE,oCAAC,OAAI,WAAW,KACd,oCAAC,QAAK,MAAI,MAAC,OAAM,UAAO,cAChB,GACR,GACA,oCAAC,YAAM,OAAQ,CACjB;AAAA,EAEJ;AACA,MAAI,IAAI,SAAS,mBAAmB;AAClC,WACE,oCAAC,OAAI,eAAc,UAAS,WAAW,KACrC,oCAAC,WACC,oCAAC,QAAK,MAAI,MAAC,OAAM,WAAQ,WAEzB,GACC,IAAI,SAAS,SACZ,oCAAC,QAAK,UAAQ,QACX,OACA,IAAI,KAAK,QAAQ,CAAC,CACrB,IACE,MACH,IAAI,QAAQ,oCAAC,cAAW,OAAO,IAAI,OAAO,IAAK,IAClD,GACC,IAAI,UACH,oCAAC,YAAM,IAAI,OAAQ,IAEnB,oCAAC,QAAK,UAAQ,MAAC,QAAM,QAAC,2BAEtB,CAEJ;AAAA,EAEJ;AACA,MAAI,IAAI,SAAS,QAAQ;AACvB,WACE,oCAAC,OAAI,eAAc,UAAS,WAAW,KACrC,oCAAC,QAAK,OAAM,YACT,SACA,IAAI,QAAQ,KACZ,GACH,GACC,IAAI,OACH,oCAAC,QAAK,UAAQ,QACX,YACA,SAAS,IAAI,MAAM,WAAW,CACjC,IACE,MACJ,oCAAC,QAAK,UAAQ,QACX,aACA,SAAS,IAAI,SAAS,cAAc,CACvC,CACF;AAAA,EAEJ;AACA,MAAI,IAAI,SAAS,SAAS;AACxB,WACE,oCAAC,OAAI,WAAW,KACd,oCAAC,QAAK,OAAM,OAAM,MAAI,QAAC,SACf,GACR,GACA,oCAAC,QAAK,OAAM,SAAO,IAAI,SAAS,IAAI,OAAQ,CAC9C;AAAA,EAEJ;AACA,MAAI,IAAI,SAAS,UAAU,IAAI,SAAS,mBAAmB;AAEzD,WAAO;AAAA,EACT;AACA,SACE,oCAAC,WACC,oCAAC,QAAK,UAAQ,QAAC,KACX,IAAI,MAAK,MAAG,IAAI,OACpB,CACF;AAEJ;AAEA,SAAS,WAAW,EAAE,MAAM,GAAsD;AAChF,QAAM,MAAM,MAAM,2BAA2B;AAC7C,QAAM,OAAO,MAAM,4BAA4B;AAC/C,QAAM,QAAQ,MAAM;AACpB,MAAI,UAAU,EAAG,QAAO;AACxB,QAAM,MAAO,MAAM,QAAS;AAC5B,QAAM,QAAQ,OAAO,KAAK,UAAU,OAAO,KAAK,WAAW;AAC3D,SACE,oCAAC,YACC,oCAAC,QAAK,UAAQ,QAAE,eAAa,GAC7B,oCAAC,QAAK,SAAe,IAAI,QAAQ,CAAC,GAAE,GAAC,CACvC;AAEJ;AAEA,SAAS,SAAS,GAAW,KAAqB;AAChD,SAAO,EAAE,UAAU,MAAM,IAAI,GAAG,EAAE,MAAM,GAAG,GAAG,CAAC,YAAO,EAAE,SAAS,GAAG;AACtE;","names":[]}
@@ -0,0 +1,42 @@
1
+ #!/usr/bin/env node
2
+
3
+ // src/cli/ui/mcp-lifecycle.ts
4
+ var STATE = {
5
+ handshake: { glyph: "\u21BB", label: "handshake\u2026" },
6
+ connected: { glyph: "\u2713", label: "connected" },
7
+ failed: { glyph: "\u2716", label: "failed" },
8
+ disabled: { glyph: "\u25CB", label: "disabled" },
9
+ reconnect: { glyph: "\u21BB", label: "reconnect\u2026" }
10
+ };
11
+ var NAME_COL = 22;
12
+ var STATE_COL = 15;
13
+ function formatMcpLifecycleEvent(ev) {
14
+ const { glyph, label } = STATE[ev.state];
15
+ const namePart = `MCP \xB7 ${ev.name}`;
16
+ const namePad = " ".repeat(Math.max(1, NAME_COL - namePart.length));
17
+ const stateField = `${glyph} ${label}`.padEnd(STATE_COL);
18
+ return `\u2318 ${namePart}${namePad}${stateField}${describeDetail(ev)}`;
19
+ }
20
+ function describeDetail(ev) {
21
+ if (ev.state === "handshake") return "initialise \u2192 tools/list \u2192 resources/list";
22
+ if (ev.state === "failed") return ev.reason;
23
+ if (ev.state === "disabled") return `via /mcp disable ${ev.name}`;
24
+ if (ev.state === "reconnect") return "tearing down \xB7 re-handshake \xB7 listing tools";
25
+ const parts = [`${ev.tools} tools`];
26
+ if (ev.resources && ev.resources > 0) parts.push(`${ev.resources} resources`);
27
+ if (ev.prompts && ev.prompts > 0) parts.push(`${ev.prompts} prompts`);
28
+ parts.push(`${ev.ms}ms`);
29
+ return parts.join(" \xB7 ");
30
+ }
31
+
32
+ // src/cli/ui/mcp-toast.ts
33
+ function formatMcpSlowToast(t) {
34
+ const seconds = (t.p95Ms / 1e3).toFixed(1);
35
+ return `\u26A0 MCP \`${t.name}\` slow \xB7 ${seconds}s p95 over the last ${t.sampleSize} calls`;
36
+ }
37
+
38
+ export {
39
+ formatMcpLifecycleEvent,
40
+ formatMcpSlowToast
41
+ };
42
+ //# sourceMappingURL=chunk-BQNUJJN7.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/cli/ui/mcp-lifecycle.ts","../../src/cli/ui/mcp-toast.ts"],"sourcesContent":["/** Formats one-liner MCP lifecycle events per `docs/design/agent-tui-terminal.html` §37. */\n\nexport type McpLifecycleEvent =\n | { state: \"handshake\"; name: string }\n | {\n state: \"connected\";\n name: string;\n tools: number;\n resources?: number;\n prompts?: number;\n ms: number;\n }\n | { state: \"failed\"; name: string; reason: string }\n | { state: \"disabled\"; name: string }\n | { state: \"reconnect\"; name: string };\n\nconst STATE: Record<McpLifecycleEvent[\"state\"], { glyph: string; label: string }> = {\n handshake: { glyph: \"↻\", label: \"handshake…\" },\n connected: { glyph: \"✓\", label: \"connected\" },\n failed: { glyph: \"✖\", label: \"failed\" },\n disabled: { glyph: \"○\", label: \"disabled\" },\n reconnect: { glyph: \"↻\", label: \"reconnect…\" },\n};\n\nconst NAME_COL = 22;\nconst STATE_COL = 15;\n\nexport function formatMcpLifecycleEvent(ev: McpLifecycleEvent): string {\n const { glyph, label } = STATE[ev.state];\n const namePart = `MCP · ${ev.name}`;\n const namePad = \" \".repeat(Math.max(1, NAME_COL - namePart.length));\n const stateField = `${glyph} ${label}`.padEnd(STATE_COL);\n return `⌘ ${namePart}${namePad}${stateField}${describeDetail(ev)}`;\n}\n\nfunction describeDetail(ev: McpLifecycleEvent): string {\n if (ev.state === \"handshake\") return \"initialise → tools/list → resources/list\";\n if (ev.state === \"failed\") return ev.reason;\n if (ev.state === \"disabled\") return `via /mcp disable ${ev.name}`;\n if (ev.state === \"reconnect\") return \"tearing down · re-handshake · listing tools\";\n const parts: string[] = [`${ev.tools} tools`];\n if (ev.resources && ev.resources > 0) parts.push(`${ev.resources} resources`);\n if (ev.prompts && ev.prompts > 0) parts.push(`${ev.prompts} prompts`);\n parts.push(`${ev.ms}ms`);\n return parts.join(\" · \");\n}\n","/** One-line warn toast emitted when an MCP server's p95 crosses the slow threshold (design §32). */\n\nexport interface McpSlowToast {\n name: string;\n p95Ms: number;\n sampleSize: number;\n}\n\nexport function formatMcpSlowToast(t: McpSlowToast): string {\n const seconds = (t.p95Ms / 1000).toFixed(1);\n return `⚠ MCP \\`${t.name}\\` slow · ${seconds}s p95 over the last ${t.sampleSize} calls`;\n}\n"],"mappings":";;;AAgBA,IAAM,QAA8E;AAAA,EAClF,WAAW,EAAE,OAAO,UAAK,OAAO,kBAAa;AAAA,EAC7C,WAAW,EAAE,OAAO,UAAK,OAAO,YAAY;AAAA,EAC5C,QAAQ,EAAE,OAAO,UAAK,OAAO,SAAS;AAAA,EACtC,UAAU,EAAE,OAAO,UAAK,OAAO,WAAW;AAAA,EAC1C,WAAW,EAAE,OAAO,UAAK,OAAO,kBAAa;AAC/C;AAEA,IAAM,WAAW;AACjB,IAAM,YAAY;AAEX,SAAS,wBAAwB,IAA+B;AACrE,QAAM,EAAE,OAAO,MAAM,IAAI,MAAM,GAAG,KAAK;AACvC,QAAM,WAAW,YAAS,GAAG,IAAI;AACjC,QAAM,UAAU,IAAI,OAAO,KAAK,IAAI,GAAG,WAAW,SAAS,MAAM,CAAC;AAClE,QAAM,aAAa,GAAG,KAAK,IAAI,KAAK,GAAG,OAAO,SAAS;AACvD,SAAO,UAAK,QAAQ,GAAG,OAAO,GAAG,UAAU,GAAG,eAAe,EAAE,CAAC;AAClE;AAEA,SAAS,eAAe,IAA+B;AACrD,MAAI,GAAG,UAAU,YAAa,QAAO;AACrC,MAAI,GAAG,UAAU,SAAU,QAAO,GAAG;AACrC,MAAI,GAAG,UAAU,WAAY,QAAO,oBAAoB,GAAG,IAAI;AAC/D,MAAI,GAAG,UAAU,YAAa,QAAO;AACrC,QAAM,QAAkB,CAAC,GAAG,GAAG,KAAK,QAAQ;AAC5C,MAAI,GAAG,aAAa,GAAG,YAAY,EAAG,OAAM,KAAK,GAAG,GAAG,SAAS,YAAY;AAC5E,MAAI,GAAG,WAAW,GAAG,UAAU,EAAG,OAAM,KAAK,GAAG,GAAG,OAAO,UAAU;AACpE,QAAM,KAAK,GAAG,GAAG,EAAE,IAAI;AACvB,SAAO,MAAM,KAAK,QAAK;AACzB;;;ACrCO,SAAS,mBAAmB,GAAyB;AAC1D,QAAM,WAAW,EAAE,QAAQ,KAAM,QAAQ,CAAC;AAC1C,SAAO,gBAAW,EAAE,IAAI,gBAAa,OAAO,uBAAuB,EAAE,UAAU;AACjF;","names":[]}
@@ -0,0 +1,39 @@
1
+ #!/usr/bin/env node
2
+
3
+ // src/cli/startup-profile.ts
4
+ import { performance } from "perf_hooks";
5
+ var marks = [];
6
+ var dumped = false;
7
+ function envFlag() {
8
+ const v = process.env.REASONIX_PROFILE_STARTUP;
9
+ return v === "1" || v === "true" || v === "yes";
10
+ }
11
+ function markPhase(name) {
12
+ if (!envFlag()) return;
13
+ marks.push({ name, t: performance.now() });
14
+ }
15
+ function dumpStartupProfile(stream = process.stderr) {
16
+ if (!envFlag() || dumped || marks.length === 0) return;
17
+ dumped = true;
18
+ const totalMs = marks[marks.length - 1].t;
19
+ const widest = String(Math.round(totalMs)).length;
20
+ const lines = ["[startup-profile]"];
21
+ let prev = 0;
22
+ for (const m of marks) {
23
+ const cum = Math.round(m.t).toString().padStart(widest);
24
+ const delta = Math.round(m.t - prev);
25
+ lines.push(` ${cum}ms ${m.name.padEnd(28)} (+${delta})`);
26
+ prev = m.t;
27
+ }
28
+ lines.push(
29
+ `\u2500\u2500\u2500 ${Math.round(totalMs)}ms total \xB7 last phase ${marks[marks.length - 1].name} \xB7 set REASONIX_PROFILE_STARTUP=0 to silence`
30
+ );
31
+ stream.write(`${lines.join("\n")}
32
+ `);
33
+ }
34
+
35
+ export {
36
+ markPhase,
37
+ dumpStartupProfile
38
+ };
39
+ //# sourceMappingURL=chunk-CPOV2O73.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../../src/cli/startup-profile.ts"],"sourcesContent":["import { performance } from \"node:perf_hooks\";\n\ninterface PhaseMark {\n name: string;\n t: number;\n}\n\nconst marks: PhaseMark[] = [];\nlet dumped = false;\n\nfunction envFlag(): boolean {\n const v = process.env.REASONIX_PROFILE_STARTUP;\n return v === \"1\" || v === \"true\" || v === \"yes\";\n}\n\nexport function isStartupProfileEnabled(): boolean {\n return envFlag();\n}\n\nexport function markPhase(name: string): void {\n if (!envFlag()) return;\n marks.push({ name, t: performance.now() });\n}\n\nexport function dumpStartupProfile(stream: NodeJS.WriteStream = process.stderr): void {\n if (!envFlag() || dumped || marks.length === 0) return;\n dumped = true;\n const totalMs = marks[marks.length - 1]!.t;\n const widest = String(Math.round(totalMs)).length;\n const lines: string[] = [\"[startup-profile]\"];\n let prev = 0;\n for (const m of marks) {\n const cum = Math.round(m.t).toString().padStart(widest);\n const delta = Math.round(m.t - prev);\n lines.push(` ${cum}ms ${m.name.padEnd(28)} (+${delta})`);\n prev = m.t;\n }\n lines.push(\n `─── ${Math.round(totalMs)}ms total · last phase ${marks[marks.length - 1]!.name} · set REASONIX_PROFILE_STARTUP=0 to silence`,\n );\n stream.write(`${lines.join(\"\\n\")}\\n`);\n}\n\nexport function _resetForTests(): void {\n marks.length = 0;\n dumped = false;\n}\n"],"mappings":";;;AAAA,SAAS,mBAAmB;AAO5B,IAAM,QAAqB,CAAC;AAC5B,IAAI,SAAS;AAEb,SAAS,UAAmB;AAC1B,QAAM,IAAI,QAAQ,IAAI;AACtB,SAAO,MAAM,OAAO,MAAM,UAAU,MAAM;AAC5C;AAMO,SAAS,UAAU,MAAoB;AAC5C,MAAI,CAAC,QAAQ,EAAG;AAChB,QAAM,KAAK,EAAE,MAAM,GAAG,YAAY,IAAI,EAAE,CAAC;AAC3C;AAEO,SAAS,mBAAmB,SAA6B,QAAQ,QAAc;AACpF,MAAI,CAAC,QAAQ,KAAK,UAAU,MAAM,WAAW,EAAG;AAChD,WAAS;AACT,QAAM,UAAU,MAAM,MAAM,SAAS,CAAC,EAAG;AACzC,QAAM,SAAS,OAAO,KAAK,MAAM,OAAO,CAAC,EAAE;AAC3C,QAAM,QAAkB,CAAC,mBAAmB;AAC5C,MAAI,OAAO;AACX,aAAW,KAAK,OAAO;AACrB,UAAM,MAAM,KAAK,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,SAAS,MAAM;AACtD,UAAM,QAAQ,KAAK,MAAM,EAAE,IAAI,IAAI;AACnC,UAAM,KAAK,KAAK,GAAG,OAAO,EAAE,KAAK,OAAO,EAAE,CAAC,OAAO,KAAK,GAAG;AAC1D,WAAO,EAAE;AAAA,EACX;AACA,QAAM;AAAA,IACJ,sBAAO,KAAK,MAAM,OAAO,CAAC,4BAAyB,MAAM,MAAM,SAAS,CAAC,EAAG,IAAI;AAAA,EAClF;AACA,SAAO,MAAM,GAAG,MAAM,KAAK,IAAI,CAAC;AAAA,CAAI;AACtC;","names":[]}
@@ -0,0 +1,368 @@
1
+ #!/usr/bin/env node
2
+ import {
3
+ DeepSeekClient
4
+ } from "./chunk-KMWKGPFZ.js";
5
+ import {
6
+ loadDotenv
7
+ } from "./chunk-3Q3C4W66.js";
8
+ import {
9
+ checkOllamaStatus,
10
+ indexExists
11
+ } from "./chunk-RZILUXUC.js";
12
+ import {
13
+ loadHooks
14
+ } from "./chunk-WBDE4IRI.js";
15
+ import {
16
+ VERSION
17
+ } from "./chunk-2AWTGJ2C.js";
18
+ import {
19
+ listSessions
20
+ } from "./chunk-DFP4YSVM.js";
21
+ import {
22
+ defaultConfigPath,
23
+ readConfig,
24
+ resolveSemanticEmbeddingConfig
25
+ } from "./chunk-DULSP7JH.js";
26
+
27
+ // src/cli/commands/doctor.ts
28
+ import { existsSync, readFileSync, statSync } from "fs";
29
+ import { homedir } from "os";
30
+ import { dirname, join, resolve } from "path";
31
+ async function runDoctorChecks(projectRoot) {
32
+ return Promise.all([
33
+ checkApiKey(),
34
+ checkConfig(),
35
+ checkApiReach(),
36
+ checkTokenizer(),
37
+ checkSessions(),
38
+ checkHooks(projectRoot),
39
+ checkOllama(projectRoot),
40
+ checkProject(projectRoot)
41
+ ]);
42
+ }
43
+ var TTY = process.stdout.isTTY && process.env.TERM !== "dumb";
44
+ function color(text, code) {
45
+ if (!TTY) return text;
46
+ return `\x1B[${code}m${text}\x1B[0m`;
47
+ }
48
+ function badge(level) {
49
+ if (level === "ok") return color("\u2713", "32");
50
+ if (level === "warn") return color("\u26A0", "33");
51
+ return color("\u2717", "31");
52
+ }
53
+ function tail4(s) {
54
+ return s.length <= 4 ? s : `\u2026${s.slice(-4)}`;
55
+ }
56
+ function fmtBytes(n) {
57
+ if (n < 1024) return `${n} B`;
58
+ if (n < 1024 * 1024) return `${(n / 1024).toFixed(1)} KB`;
59
+ return `${(n / 1024 / 1024).toFixed(1)} MB`;
60
+ }
61
+ async function checkApiKey() {
62
+ const fromEnv = process.env.DEEPSEEK_API_KEY;
63
+ if (fromEnv) {
64
+ return {
65
+ label: "api key ",
66
+ level: "ok",
67
+ detail: `set via env DEEPSEEK_API_KEY (${tail4(fromEnv)})`
68
+ };
69
+ }
70
+ try {
71
+ const cfg = readConfig();
72
+ if (cfg.apiKey) {
73
+ return {
74
+ label: "api key ",
75
+ level: "ok",
76
+ detail: `from ${defaultConfigPath()} (${tail4(cfg.apiKey)})`
77
+ };
78
+ }
79
+ } catch {
80
+ }
81
+ return {
82
+ label: "api key ",
83
+ level: "fail",
84
+ detail: "not set \u2014 `reasonix setup` to save one, or export DEEPSEEK_API_KEY. Get a key at https://platform.deepseek.com/api_keys"
85
+ };
86
+ }
87
+ async function checkConfig() {
88
+ const path = defaultConfigPath();
89
+ if (!existsSync(path)) {
90
+ return {
91
+ label: "config ",
92
+ level: "warn",
93
+ detail: "missing \u2014 running with library defaults. `reasonix setup` writes one."
94
+ };
95
+ }
96
+ try {
97
+ const cfg = readConfig(path);
98
+ const parts = [];
99
+ if (cfg.preset) parts.push(`preset=${cfg.preset}`);
100
+ if (cfg.editMode) parts.push(`editMode=${cfg.editMode}`);
101
+ if (cfg.mcp && cfg.mcp.length > 0) parts.push(`mcp=${cfg.mcp.length}`);
102
+ return {
103
+ label: "config ",
104
+ level: "ok",
105
+ detail: `${path}${parts.length ? ` (${parts.join(", ")})` : ""}`
106
+ };
107
+ } catch (err) {
108
+ return {
109
+ label: "config ",
110
+ level: "fail",
111
+ detail: `${path} unreadable \u2014 ${err.message}`
112
+ };
113
+ }
114
+ }
115
+ async function checkApiReach() {
116
+ const key = process.env.DEEPSEEK_API_KEY ?? readConfig().apiKey;
117
+ if (!key) {
118
+ return {
119
+ label: "api reach ",
120
+ level: "warn",
121
+ detail: "skipped \u2014 no api key to test with"
122
+ };
123
+ }
124
+ try {
125
+ const client = new DeepSeekClient({ apiKey: key });
126
+ const ctl = new AbortController();
127
+ const timer = setTimeout(() => ctl.abort(), 8e3);
128
+ let balance;
129
+ try {
130
+ balance = await client.getBalance({ signal: ctl.signal });
131
+ } finally {
132
+ clearTimeout(timer);
133
+ }
134
+ if (!balance) {
135
+ return {
136
+ label: "api reach ",
137
+ level: "fail",
138
+ detail: "/user/balance returned null \u2014 auth failed or network blocked"
139
+ };
140
+ }
141
+ if (!balance.is_available) {
142
+ const info2 = balance.balance_infos[0];
143
+ return {
144
+ label: "api reach ",
145
+ level: "warn",
146
+ detail: `account flagged not-available${info2 ? ` (${info2.total_balance} ${info2.currency})` : ""} \u2014 top up or check your dashboard`
147
+ };
148
+ }
149
+ const info = balance.balance_infos[0];
150
+ return {
151
+ label: "api reach ",
152
+ level: "ok",
153
+ detail: info ? `/user/balance ok \u2014 ${info.total_balance} ${info.currency}` : "/user/balance ok"
154
+ };
155
+ } catch (err) {
156
+ return {
157
+ label: "api reach ",
158
+ level: "fail",
159
+ detail: `${err.message}`
160
+ };
161
+ }
162
+ }
163
+ async function checkTokenizer() {
164
+ const candidates = [
165
+ join(
166
+ dirname(new URL(import.meta.url).pathname.replace(/^\/([A-Za-z]:)/, "$1")),
167
+ "..",
168
+ "..",
169
+ "..",
170
+ "data",
171
+ "deepseek-tokenizer.json.gz"
172
+ ),
173
+ join(process.cwd(), "data", "deepseek-tokenizer.json.gz")
174
+ ];
175
+ for (const p of candidates) {
176
+ if (existsSync(p)) {
177
+ try {
178
+ const stat = statSync(p);
179
+ return {
180
+ label: "tokenizer ",
181
+ level: "ok",
182
+ detail: `${p} (${fmtBytes(stat.size)})`
183
+ };
184
+ } catch {
185
+ }
186
+ }
187
+ }
188
+ return {
189
+ label: "tokenizer ",
190
+ level: "warn",
191
+ detail: "data/deepseek-tokenizer.json.gz not found \u2014 token counts will fall back to char heuristics"
192
+ };
193
+ }
194
+ async function checkSessions() {
195
+ try {
196
+ const list = listSessions();
197
+ if (list.length === 0) {
198
+ return {
199
+ label: "sessions ",
200
+ level: "ok",
201
+ detail: "0 saved"
202
+ };
203
+ }
204
+ const totalBytes = list.reduce((s, e) => s + e.size, 0);
205
+ const oldest = list[list.length - 1];
206
+ const ageDays = Math.floor((Date.now() - oldest.mtime.getTime()) / (24 * 60 * 60 * 1e3));
207
+ const stale = list.filter(
208
+ (e) => Date.now() - e.mtime.getTime() >= 90 * 24 * 60 * 60 * 1e3
209
+ ).length;
210
+ const detail = `${list.length} saved \xB7 ${fmtBytes(totalBytes)} \xB7 oldest ${ageDays}d`;
211
+ if (stale > 0) {
212
+ return {
213
+ label: "sessions ",
214
+ level: "warn",
215
+ detail: `${detail} \xB7 ${stale} idle \u226590d (run \`reasonix prune-sessions\`)`
216
+ };
217
+ }
218
+ return { label: "sessions ", level: "ok", detail };
219
+ } catch (err) {
220
+ return {
221
+ label: "sessions ",
222
+ level: "warn",
223
+ detail: `cannot list \u2014 ${err.message}`
224
+ };
225
+ }
226
+ }
227
+ async function checkHooks(projectRoot) {
228
+ try {
229
+ const all = loadHooks({ projectRoot });
230
+ const global = all.filter((h) => h.scope === "global").length;
231
+ const project = all.filter((h) => h.scope === "project").length;
232
+ return {
233
+ label: "hooks ",
234
+ level: "ok",
235
+ detail: `${global} global, ${project} project`
236
+ };
237
+ } catch (err) {
238
+ return {
239
+ label: "hooks ",
240
+ level: "warn",
241
+ detail: `couldn't parse settings.json \u2014 ${err.message}`
242
+ };
243
+ }
244
+ }
245
+ async function checkOllama(projectRoot) {
246
+ let exists = false;
247
+ try {
248
+ exists = await indexExists(projectRoot);
249
+ } catch {
250
+ }
251
+ if (!exists) {
252
+ return {
253
+ label: "semantic ",
254
+ level: "ok",
255
+ detail: "not in use (no semantic index built; `reasonix index` to enable)"
256
+ };
257
+ }
258
+ const meta = readSemanticMeta(projectRoot);
259
+ if (meta?.provider === "openai-compat") {
260
+ const resolved = resolveSemanticEmbeddingConfig();
261
+ if (resolved.provider !== "openai-compat") {
262
+ return {
263
+ label: "semantic ",
264
+ level: "warn",
265
+ detail: `index uses openai-compat/${meta.model} but current config resolves to ${resolved.provider}/${resolved.model} \u2014 rebuild before searching`
266
+ };
267
+ }
268
+ return {
269
+ label: "semantic ",
270
+ level: "ok",
271
+ detail: `openai-compat \xB7 ${resolved.baseUrl} \xB7 model ${resolved.model} \xB7 api key configured`
272
+ };
273
+ }
274
+ try {
275
+ const model = meta?.model || process.env.REASONIX_EMBED_MODEL || "nomic-embed-text";
276
+ const status = await checkOllamaStatus(model);
277
+ if (!status.binaryFound) {
278
+ return {
279
+ label: "semantic ",
280
+ level: "warn",
281
+ detail: "ollama binary not on PATH \u2014 semantic_search will fail; install from https://ollama.com"
282
+ };
283
+ }
284
+ if (!status.daemonRunning) {
285
+ return {
286
+ label: "semantic ",
287
+ level: "warn",
288
+ detail: "ollama daemon not running \u2014 `ollama serve` (or call /semantic in TUI to auto-start)"
289
+ };
290
+ }
291
+ if (!status.modelPulled) {
292
+ return {
293
+ label: "semantic ",
294
+ level: "warn",
295
+ detail: `model ${status.modelName} not pulled \u2014 \`ollama pull ${status.modelName}\``
296
+ };
297
+ }
298
+ return {
299
+ label: "semantic ",
300
+ level: "ok",
301
+ detail: `ollama daemon up \xB7 model ${status.modelName} ready`
302
+ };
303
+ } catch (err) {
304
+ return {
305
+ label: "semantic ",
306
+ level: "warn",
307
+ detail: `probe failed \u2014 ${err.message}`
308
+ };
309
+ }
310
+ }
311
+ function readSemanticMeta(projectRoot) {
312
+ try {
313
+ const raw = readFileSync(join(projectRoot, ".reasonix", "semantic", "index.meta.json"), "utf8");
314
+ const parsed = JSON.parse(raw);
315
+ return {
316
+ provider: parsed.provider === "openai-compat" ? "openai-compat" : "ollama",
317
+ model: typeof parsed.model === "string" ? parsed.model : ""
318
+ };
319
+ } catch {
320
+ return null;
321
+ }
322
+ }
323
+ async function checkProject(projectRoot) {
324
+ const markers = [".git", "REASONIX.md", "package.json", "pyproject.toml", "Cargo.toml", "go.mod"];
325
+ const found = markers.filter((m) => existsSync(join(projectRoot, m)));
326
+ if (found.length === 0) {
327
+ return {
328
+ label: "project ",
329
+ level: "warn",
330
+ detail: `${projectRoot} has none of: ${markers.slice(0, 3).join(", ")} \u2026 \u2014 \`reasonix code\` will still run, but @-mentions and project memory have nothing to anchor`
331
+ };
332
+ }
333
+ return {
334
+ label: "project ",
335
+ level: "ok",
336
+ detail: `${projectRoot} (${found.join(", ")})`
337
+ };
338
+ }
339
+ async function doctorCommand() {
340
+ loadDotenv();
341
+ const projectRoot = resolve(process.cwd());
342
+ console.log(`${color(`reasonix ${VERSION} \xB7 doctor`, "1")} (cwd: ${projectRoot})`);
343
+ console.log(` home: ${homedir()}`);
344
+ console.log("");
345
+ const checks = await runDoctorChecks(projectRoot);
346
+ for (const c of checks) {
347
+ console.log(` ${badge(c.level)} ${c.label} ${c.detail}`);
348
+ }
349
+ const ok = checks.filter((c) => c.level === "ok").length;
350
+ const warn = checks.filter((c) => c.level === "warn").length;
351
+ const fail = checks.filter((c) => c.level === "fail").length;
352
+ console.log("");
353
+ const summary = `${ok} ok \xB7 ${warn} warn \xB7 ${fail} fail`;
354
+ if (fail > 0) {
355
+ console.log(color(summary, "31"));
356
+ process.exit(1);
357
+ } else if (warn > 0) {
358
+ console.log(color(summary, "33"));
359
+ } else {
360
+ console.log(color(summary, "32"));
361
+ }
362
+ }
363
+
364
+ export {
365
+ runDoctorChecks,
366
+ doctorCommand
367
+ };
368
+ //# sourceMappingURL=chunk-D5DKXIP5.js.map