luckyd-code 1.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. luckyd_code/__init__.py +54 -0
  2. luckyd_code/__main__.py +5 -0
  3. luckyd_code/_agent_loop.py +551 -0
  4. luckyd_code/_data_dir.py +73 -0
  5. luckyd_code/agent.py +38 -0
  6. luckyd_code/analytics/__init__.py +18 -0
  7. luckyd_code/analytics/reporter.py +195 -0
  8. luckyd_code/analytics/scanner.py +443 -0
  9. luckyd_code/analytics/smells.py +316 -0
  10. luckyd_code/analytics/trends.py +303 -0
  11. luckyd_code/api.py +473 -0
  12. luckyd_code/audit_daemon.py +845 -0
  13. luckyd_code/autonomous_fixer.py +473 -0
  14. luckyd_code/background.py +159 -0
  15. luckyd_code/backup.py +237 -0
  16. luckyd_code/brain/__init__.py +84 -0
  17. luckyd_code/brain/assembler.py +100 -0
  18. luckyd_code/brain/chunker.py +345 -0
  19. luckyd_code/brain/constants.py +73 -0
  20. luckyd_code/brain/embedder.py +163 -0
  21. luckyd_code/brain/graph.py +311 -0
  22. luckyd_code/brain/indexer.py +316 -0
  23. luckyd_code/brain/parser.py +140 -0
  24. luckyd_code/brain/retriever.py +234 -0
  25. luckyd_code/cli.py +894 -0
  26. luckyd_code/cli_commands/__init__.py +1 -0
  27. luckyd_code/cli_commands/audit.py +120 -0
  28. luckyd_code/cli_commands/background.py +83 -0
  29. luckyd_code/cli_commands/brain.py +87 -0
  30. luckyd_code/cli_commands/config.py +75 -0
  31. luckyd_code/cli_commands/dispatcher.py +695 -0
  32. luckyd_code/cli_commands/sessions.py +41 -0
  33. luckyd_code/cli_entry.py +147 -0
  34. luckyd_code/cli_utils.py +112 -0
  35. luckyd_code/config.py +205 -0
  36. luckyd_code/context.py +214 -0
  37. luckyd_code/cost_tracker.py +209 -0
  38. luckyd_code/error_reporter.py +508 -0
  39. luckyd_code/exceptions.py +39 -0
  40. luckyd_code/export.py +126 -0
  41. luckyd_code/feedback_analyzer.py +290 -0
  42. luckyd_code/file_watcher.py +258 -0
  43. luckyd_code/git/__init__.py +11 -0
  44. luckyd_code/git/auto_commit.py +157 -0
  45. luckyd_code/git/tools.py +85 -0
  46. luckyd_code/hooks.py +236 -0
  47. luckyd_code/indexer.py +280 -0
  48. luckyd_code/init.py +39 -0
  49. luckyd_code/keybindings.py +77 -0
  50. luckyd_code/log.py +55 -0
  51. luckyd_code/mcp/__init__.py +6 -0
  52. luckyd_code/mcp/client.py +184 -0
  53. luckyd_code/memory/__init__.py +19 -0
  54. luckyd_code/memory/manager.py +339 -0
  55. luckyd_code/metrics/__init__.py +5 -0
  56. luckyd_code/model_registry.py +131 -0
  57. luckyd_code/orchestrator.py +204 -0
  58. luckyd_code/permissions/__init__.py +1 -0
  59. luckyd_code/permissions/manager.py +103 -0
  60. luckyd_code/planner.py +361 -0
  61. luckyd_code/plugins.py +91 -0
  62. luckyd_code/py.typed +0 -0
  63. luckyd_code/retry.py +57 -0
  64. luckyd_code/router.py +417 -0
  65. luckyd_code/sandbox.py +156 -0
  66. luckyd_code/self_critique.py +2 -0
  67. luckyd_code/self_improve.py +274 -0
  68. luckyd_code/sessions.py +114 -0
  69. luckyd_code/settings.py +72 -0
  70. luckyd_code/skills/__init__.py +8 -0
  71. luckyd_code/skills/review.py +22 -0
  72. luckyd_code/skills/security.py +17 -0
  73. luckyd_code/tasks/__init__.py +1 -0
  74. luckyd_code/tasks/manager.py +102 -0
  75. luckyd_code/templates/icon-192.png +0 -0
  76. luckyd_code/templates/icon-512.png +0 -0
  77. luckyd_code/templates/index.html +1965 -0
  78. luckyd_code/templates/manifest.json +14 -0
  79. luckyd_code/templates/src/app.js +694 -0
  80. luckyd_code/templates/src/body.html +767 -0
  81. luckyd_code/templates/src/cdn.txt +2 -0
  82. luckyd_code/templates/src/style.css +474 -0
  83. luckyd_code/templates/sw.js +31 -0
  84. luckyd_code/templates/test.html +6 -0
  85. luckyd_code/themes.py +48 -0
  86. luckyd_code/tools/__init__.py +97 -0
  87. luckyd_code/tools/agent_tools.py +65 -0
  88. luckyd_code/tools/bash.py +360 -0
  89. luckyd_code/tools/brain_tools.py +137 -0
  90. luckyd_code/tools/browser.py +369 -0
  91. luckyd_code/tools/datetime_tool.py +34 -0
  92. luckyd_code/tools/dockerfile_gen.py +212 -0
  93. luckyd_code/tools/file_ops.py +381 -0
  94. luckyd_code/tools/game_gen.py +360 -0
  95. luckyd_code/tools/git_tools.py +130 -0
  96. luckyd_code/tools/git_worktree.py +63 -0
  97. luckyd_code/tools/path_validate.py +64 -0
  98. luckyd_code/tools/project_gen.py +187 -0
  99. luckyd_code/tools/readme_gen.py +227 -0
  100. luckyd_code/tools/registry.py +157 -0
  101. luckyd_code/tools/shell_detect.py +109 -0
  102. luckyd_code/tools/web.py +89 -0
  103. luckyd_code/tools/youtube.py +187 -0
  104. luckyd_code/tools_bridge.py +144 -0
  105. luckyd_code/undo.py +126 -0
  106. luckyd_code/update.py +60 -0
  107. luckyd_code/verify.py +360 -0
  108. luckyd_code/web_app.py +176 -0
  109. luckyd_code/web_routes/__init__.py +23 -0
  110. luckyd_code/web_routes/background.py +73 -0
  111. luckyd_code/web_routes/brain.py +109 -0
  112. luckyd_code/web_routes/cost.py +12 -0
  113. luckyd_code/web_routes/files.py +133 -0
  114. luckyd_code/web_routes/memories.py +94 -0
  115. luckyd_code/web_routes/misc.py +67 -0
  116. luckyd_code/web_routes/project.py +48 -0
  117. luckyd_code/web_routes/review.py +20 -0
  118. luckyd_code/web_routes/sessions.py +44 -0
  119. luckyd_code/web_routes/settings.py +43 -0
  120. luckyd_code/web_routes/static.py +70 -0
  121. luckyd_code/web_routes/update.py +19 -0
  122. luckyd_code/web_routes/ws.py +237 -0
  123. luckyd_code-1.2.2.dist-info/METADATA +297 -0
  124. luckyd_code-1.2.2.dist-info/RECORD +127 -0
  125. luckyd_code-1.2.2.dist-info/WHEEL +4 -0
  126. luckyd_code-1.2.2.dist-info/entry_points.txt +3 -0
  127. luckyd_code-1.2.2.dist-info/licenses/LICENSE +21 -0
luckyd_code/cli.py ADDED
@@ -0,0 +1,894 @@
1
+ import json
2
+ import os
3
+ import signal
4
+ import subprocess
5
+ import sys
6
+ import io
7
+ import time
8
+ from pathlib import Path
9
+
10
+ # Force UTF-8 encoding for Windows console compatibility with Rich Unicode output
11
+ if sys.platform == "win32":
12
+ sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8", errors="replace")
13
+ sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding="utf-8", errors="replace")
14
+
15
+ from rich.console import Console
16
+
17
+ from ._data_dir import data_path
18
+ from .api import stream_chat, test_connection, _repair_json
19
+ from .config import Config
20
+ from .context import ConversationContext
21
+ from .tools import get_default_registry
22
+ from .tools.agent_tools import set_repl
23
+ from .permissions.manager import check_permission
24
+ from . import memory, settings as cfg
25
+ from .memory import MemoryManager
26
+ from .hooks import get_hook_runner
27
+ from .cost_tracker import CostTracker
28
+ from . import update as updater
29
+ from .log import get_logger
30
+ from .themes import get_theme
31
+ from .mcp.client import MCPManager
32
+ from .background import BackgroundAgent
33
+ from .brain import KnowledgeGraph, Retriever, ContextAssembler
34
+ from .cli_utils import console, init_prompt_session, read_input, play_completion_sound
35
+ from .git.auto_commit import auto_commit, collect_modified_paths
36
+ from .verify import run_verify_pipeline, pipeline_all_passed, pipeline_feedback
37
+
38
+ class Repl:
39
+ def __init__(self, config: Config, daemon: bool = False):
40
+ self.config = config
41
+ self.config.validate()
42
+ self.registry = get_default_registry()
43
+ self.context = ConversationContext(
44
+ config.system_prompt,
45
+ max_messages=config.max_context_messages,
46
+ )
47
+ self.session = init_prompt_session()
48
+ self.mcp = MCPManager()
49
+ self.background = BackgroundAgent(config)
50
+ self.background.load_history()
51
+ self.brain = KnowledgeGraph()
52
+ self.brain.load()
53
+ self._rag_retriever: Retriever | None = None
54
+ self._rag_assembler = ContextAssembler()
55
+ self.memory_mgr = MemoryManager()
56
+ self.hooks = get_hook_runner()
57
+ self.cost_tracker = CostTracker()
58
+ self.file_watcher = None
59
+ self._stop_requested = False
60
+ self._first_sigint_at = 0.0
61
+ # Reasoning content captured from the last "done" stream event.
62
+ # Passed to add_assistant_message so the context retains reasoning.
63
+ self._pending_reasoning: str = ""
64
+ # Whether to run the background audit daemon alongside the REPL
65
+ self._daemon_enabled: bool = daemon
66
+ self._audit_daemon_thread = None
67
+ self._audit_daemon = None
68
+ set_repl(self)
69
+
70
+ # Load theme
71
+ settings = cfg.load_settings()
72
+ self.theme_name = settings.get("theme", "dark")
73
+ # Update console in BOTH cli.py and cli_utils.py so that all
74
+ # Rich output (including prompts and sounds) shares the theme.
75
+ from . import cli_utils as _cli_utils_mod
76
+ _new_console = Console(theme=get_theme(self.theme_name))
77
+ _cli_utils_mod.console = _new_console
78
+ global console
79
+ console = _new_console
80
+
81
+ def run(self):
82
+ self._running = True
83
+ signal.signal(signal.SIGINT, self._handle_signal)
84
+ signal.signal(signal.SIGTERM, self._handle_signal)
85
+
86
+ # Run session-start hooks
87
+ self.hooks.run_hook("onSessionStart", {
88
+ "model": self.config.model,
89
+ "provider": self.config.provider,
90
+ })
91
+
92
+ # Initialize everything under a single spinner
93
+ ok = False
94
+ msg = ""
95
+ with console.status("Starting...", spinner="dots"):
96
+ # Test API connection
97
+ ok, msg = test_connection(self.config.api_key, self.config.base_url)
98
+ # Load MCP tools
99
+ self._init_mcp()
100
+ # Load memory (project indexing, brain)
101
+ self._load_memory()
102
+
103
+ if not ok:
104
+ console.print(f"[error]API connection failed: {msg}[/error]")
105
+ self._prompt_for_api_key()
106
+ # Retry after key entry
107
+ with console.status("Reconnecting...", spinner="dots"):
108
+ ok, msg = test_connection(self.config.api_key, self.config.base_url)
109
+ if ok:
110
+ console.print("[green]Connected![/green]")
111
+ else:
112
+ console.print(f"[error]Still failed: {msg}[/error]")
113
+
114
+ tool_count = len(self.registry.list_tools()) + len(self.mcp.get_all_tools())
115
+ symbol_summary = f"{self.brain.stats.get('node_count', 0)} symbols" if self.brain.nodes else ""
116
+ console.print(f"[dim]LuckyD Code v{updater.get_version()} — {tool_count} tools{', ' + symbol_summary if symbol_summary else ''} — /help for commands[/dim]")
117
+
118
+ # Start background audit daemon if requested via --daemon flag
119
+ if self._daemon_enabled:
120
+ self._start_audit_daemon()
121
+
122
+
123
+ while True:
124
+ # Reset stop flag for each new prompt cycle
125
+ self._stop_requested = False
126
+ self._first_sigint_at = 0.0
127
+
128
+ user_input = read_input(self.session)
129
+ if user_input == "__EOF__":
130
+ print()
131
+ break
132
+ if user_input is None:
133
+ print()
134
+ continue
135
+
136
+ user_input = user_input.strip()
137
+ if not user_input:
138
+ continue
139
+
140
+ if user_input.startswith("/"):
141
+ from .cli_commands.dispatcher import handle_command
142
+ handle_command(self, user_input)
143
+ continue
144
+
145
+ self.context.add_user_message(user_input)
146
+ try:
147
+ self._chat_loop(user_input)
148
+ except Exception as e:
149
+ get_logger().error("Chat error: %s", e, exc_info=True)
150
+ console.print(f"[red]Something went wrong: {e}[/red]")
151
+ # Reset context to recover
152
+ self.context = ConversationContext(
153
+ self.config.system_prompt,
154
+ max_messages=self.config.max_context_messages,
155
+ )
156
+ print()
157
+ self.hooks.run_hook("onSessionEnd")
158
+ self._cleanup()
159
+
160
+ def _init_mcp(self):
161
+ """Initialize MCP servers from settings."""
162
+ settings = cfg.load_settings()
163
+ self.mcp.load_from_config(settings)
164
+
165
+ def _load_memory(self):
166
+ md = memory.load_claude_md()
167
+
168
+ # Merge session memories into the project memory block so the model
169
+ # sees one coherent memory context instead of two overlapping blocks
170
+ session_memories = self.memory_mgr.get_all_memories_formatted()
171
+ if md and session_memories:
172
+ merged = md + "\n\n" + session_memories
173
+ elif session_memories:
174
+ merged = session_memories
175
+ else:
176
+ merged = md or ""
177
+
178
+ if merged:
179
+ self.context.messages.insert(1, {
180
+ "role": "user",
181
+ "content": f"<claude-md>{merged}</claude-md>",
182
+ })
183
+
184
+ # Smart project indexing
185
+ from .indexer import index_project
186
+ project_context = index_project()
187
+ if project_context:
188
+ idx = 2 if merged else 1
189
+ has_context = any(
190
+ isinstance(m.get("content"), str) and m["content"].startswith("<project-context>")
191
+ for m in self.context.messages
192
+ )
193
+ if not has_context:
194
+ self.context.messages.insert(idx, {
195
+ "role": "user",
196
+ "content": f"<project-context>\n{project_context}\n</project-context>",
197
+ })
198
+
199
+ # Load knowledge graph
200
+ if self.brain.nodes:
201
+ brain_summary = self.brain.summarize()
202
+ has_brain = any(
203
+ isinstance(m.get("content"), str) and m["content"].startswith("<knowledge-graph>")
204
+ for m in self.context.messages
205
+ )
206
+ if not has_brain:
207
+ self.context.messages.insert(1, {
208
+ "role": "user",
209
+ "content": brain_summary,
210
+ })
211
+
212
+ # Silently restore auto-saved conversation from recovery file
213
+ try:
214
+ recovery_file = data_path("recovery.json")
215
+ if recovery_file.exists():
216
+ data = json.loads(recovery_file.read_text(encoding="utf-8"))
217
+ if len(data) > 1:
218
+ self.context.messages = data
219
+ get_logger().info("Restored %d messages from recovery", len(data) - 1)
220
+ recovery_file.unlink(missing_ok=True)
221
+ except Exception:
222
+ get_logger().warning("Failed to check recovery file", exc_info=True)
223
+
224
+ def _prompt_for_api_key(self):
225
+ """Prompt user to enter a new API key and save it to .env."""
226
+ from .cli_utils import read_input
227
+ console.print("\n[bold]Enter your DEEPSEEK_API_KEY:[/bold] (or press Enter to skip)")
228
+ new_key = read_input(self.session)
229
+ if new_key and new_key.strip():
230
+ new_key = new_key.strip()
231
+ self.config.api_key = new_key
232
+ env_path = Path(__file__).parent.parent / ".env"
233
+ if env_path.exists():
234
+ lines = env_path.read_text(encoding="utf-8").splitlines()
235
+ found = False
236
+ for i, line in enumerate(lines):
237
+ if line.strip().startswith("DEEPSEEK_API_KEY="):
238
+ lines[i] = f"DEEPSEEK_API_KEY={new_key}"
239
+ found = True
240
+ break
241
+ if not found:
242
+ lines.append(f"DEEPSEEK_API_KEY={new_key}")
243
+ env_path.write_text("\n".join(lines) + "\n", encoding="utf-8")
244
+ else:
245
+ env_path.write_text(f"DEEPSEEK_API_KEY={new_key}\n", encoding="utf-8")
246
+ console.print("[green]Key saved to .env[/green]")
247
+
248
+ def _get_rag_retriever(self) -> Retriever:
249
+ """Lazy init the RAG retriever."""
250
+ if self._rag_retriever is None:
251
+ self._rag_retriever = Retriever()
252
+ return self._rag_retriever
253
+
254
+ def _inject_rag_context(self, force: bool = False):
255
+ """Silently search for context relevant to the user's latest message and inject it."""
256
+ try:
257
+ user_msg = None
258
+ for m in reversed(self.context.messages):
259
+ if m.get("role") == "user" and isinstance(m.get("content"), str):
260
+ user_msg = m["content"]
261
+ break
262
+
263
+ if not user_msg or len(user_msg.strip()) < 15:
264
+ return
265
+
266
+ retriever = self._get_rag_retriever()
267
+ results = retriever.search(user_msg, k=5)
268
+ if not results:
269
+ return
270
+
271
+ if force:
272
+ self.context.messages = [
273
+ m for m in self.context.messages
274
+ if not (isinstance(m.get("content"), str) and m["content"].startswith("<rag-context>"))
275
+ ]
276
+
277
+ if not force:
278
+ has_rag = any(
279
+ isinstance(m.get("content"), str) and m["content"].startswith("<rag-context>")
280
+ for m in self.context.messages
281
+ )
282
+ if has_rag:
283
+ return
284
+
285
+ context_block = self._rag_assembler.assemble(results, max_tokens=1500, max_chunks=5)
286
+ if context_block:
287
+ self.context.messages.insert(1, {
288
+ "role": "user",
289
+ "content": f"<rag-context>\n{context_block}\n</rag-context>",
290
+ })
291
+ except Exception:
292
+ get_logger().warning("RAG context injection failed", exc_info=True)
293
+
294
+ def _get_reasoner_model(self) -> str:
295
+ """Return the model name to use for complex/reasoning tasks based on provider."""
296
+ from .router import select_model
297
+ return select_model("complex task", recent_tool_count=6,
298
+ preferred_model=self.config.model)
299
+
300
+ def _chat_loop(self, user_prompt: str = ""):
301
+ max_iterations = 100
302
+ iteration = 0
303
+ tool_call_count = 0
304
+
305
+ # Per-turn state for auto-commit
306
+ all_tool_calls: list[dict] = []
307
+ tool_args_map: dict[str, dict] = {} # tool_call_id → parsed args
308
+ # Track all files modified this turn for verification
309
+ modified_files_this_turn: list[str] = []
310
+
311
+ settings = cfg.load_settings()
312
+ auto_route = settings.get("auto_route", True)
313
+ auto_commit_enabled = settings.get("auto_commit", True)
314
+ verify_enabled = settings.get("verify_edits", True)
315
+ verify_retries = settings.get("verify_retries", 3)
316
+ project_root = str(self.config.working_directory or os.getcwd())
317
+
318
+ from .router import resolve_initial_route, escalate_tier
319
+ current_tier = 2
320
+ active_model = self.config.model
321
+ t0 = time.time()
322
+ text_buffer = ""
323
+
324
+ while iteration < max_iterations:
325
+ if self._stop_requested:
326
+ if text_buffer:
327
+ self.context.add_assistant_message(content=text_buffer)
328
+ console.print("\n[dim]⏹ Stopped[/dim]")
329
+ self._stop_requested = False
330
+ return
331
+
332
+ iteration += 1
333
+ messages = self.context.get_messages()
334
+ text_buffer = ""
335
+ pending_tool_calls = None
336
+ tool_reasoning = ""
337
+
338
+ all_tools = self.registry.list_tools()
339
+ all_tools.extend(self.mcp.get_all_tools())
340
+
341
+ user_msgs = [m for m in messages if m.get("role") == "user"]
342
+ user_text = user_msgs[-1].get("content", "") if user_msgs else ""
343
+ if auto_route and user_text:
344
+ if iteration == 1 and tool_call_count == 0:
345
+ result = resolve_initial_route(
346
+ user_text, tool_call_count, self.config.provider,
347
+ self.config.model, auto_route,
348
+ config=self.config,
349
+ )
350
+ else:
351
+ result = escalate_tier(
352
+ user_text, tool_call_count, self.config.provider,
353
+ self.config.model, active_model, current_tier, auto_route,
354
+ )
355
+ active_model = result.model
356
+ current_tier = result.tier
357
+
358
+ if iteration == 1:
359
+ console.print(f"[dim]... {int(time.time() - t0)}s[/dim]")
360
+
361
+ if iteration == 1:
362
+ self._inject_rag_context()
363
+ messages = self.context.get_messages()
364
+ elif iteration % 3 == 0 and tool_call_count > 0:
365
+ self._inject_rag_context(force=True)
366
+ messages = self.context.get_messages()
367
+
368
+ if iteration == 1 and tool_call_count == 0:
369
+ try:
370
+ # Use 1/4 of the model's full context budget as the
371
+ # pre-turn compaction trigger. The old value (28 000)
372
+ # was far too small for a 800 K-token context window
373
+ # and caused history to be wiped after just a few exchanges.
374
+ context_limit = self.context._token_compact_threshold // 4
375
+ est_tokens = self.context.estimate_tokens()
376
+ if est_tokens > context_limit:
377
+ self.context.compact(
378
+ self.config, active_model, keep_last=6,
379
+ on_compact=lambda s, c: self.memory_mgr.save_conversation_summary(s, c),
380
+ )
381
+ messages = self.context.get_messages()
382
+ except Exception:
383
+ get_logger().warning("Auto-compaction failed", exc_info=True)
384
+
385
+ print()
386
+ text_buffer, pending_tool_calls, tool_reasoning, active_model = self._stream_with_fallback(
387
+ messages, all_tools, active_model, current_tier,
388
+ )
389
+
390
+ if pending_tool_calls:
391
+ tool_call_count = self._execute_tool_calls(
392
+ pending_tool_calls, tool_reasoning, text_buffer,
393
+ tool_call_count, t0, tool_args_map,
394
+ )
395
+ all_tool_calls.extend(pending_tool_calls)
396
+
397
+ # Track files modified by Write/Edit tools
398
+ for tc in pending_tool_calls:
399
+ if tc["function"]["name"] in ("Write", "Edit"):
400
+ tid = tc["id"]
401
+ args = tool_args_map.get(tid, {})
402
+ fp = args.get("file_path", "")
403
+ if fp and fp not in modified_files_this_turn:
404
+ modified_files_this_turn.append(fp)
405
+
406
+ # ── VERIFICATION GATE ─────────────────────────────────
407
+ # Run multi-pass verification on all modified files
408
+ if verify_enabled and modified_files_this_turn:
409
+ self._run_verification_gate(
410
+ modified_files_this_turn, project_root, verify_retries,
411
+ )
412
+
413
+ # Fix-until-green: auto-run tests after any file write/edit
414
+ test_feedback = self._maybe_run_tests(pending_tool_calls)
415
+ if test_feedback:
416
+ self.context.add_user_message(test_feedback)
417
+ continue
418
+
419
+ if text_buffer:
420
+ self.context.add_assistant_message(
421
+ content=text_buffer,
422
+ reasoning_content=self._pending_reasoning or None,
423
+ )
424
+ self._pending_reasoning = ""
425
+
426
+ break
427
+
428
+ if iteration >= max_iterations:
429
+ console.print(f"[red]Error: Maximum tool call iterations ({max_iterations}) reached. Use /clear to reset.[/red]")
430
+ play_completion_sound(success=False)
431
+ else:
432
+ play_completion_sound(success=True)
433
+
434
+ # Auto-commit any files the agent wrote or edited this turn
435
+ if auto_commit_enabled and all_tool_calls:
436
+ modified = collect_modified_paths(all_tool_calls, tool_args_map)
437
+ if modified:
438
+ sha = auto_commit(
439
+ user_prompt=user_prompt,
440
+ modified_paths=modified,
441
+ cwd=self.config.working_directory,
442
+ enabled=True,
443
+ )
444
+ if sha:
445
+ console.print(f"[dim]git: auto-committed {len(modified)} file(s) [{sha}][/dim]")
446
+
447
+ def _fallback_models(self, active_model, current_tier):
448
+ """Generator yielding (model, api_key, base_url) for the fallback chain."""
449
+ from .router import DEEPSEEK_FALLBACK_MODELS
450
+
451
+ models_tried = set()
452
+
453
+ models_tried.add(active_model)
454
+ yield active_model, self.config.api_key, self.config.base_url
455
+
456
+ for model in DEEPSEEK_FALLBACK_MODELS:
457
+ if model not in models_tried:
458
+ models_tried.add(model)
459
+ yield model, self.config.api_key, self.config.base_url
460
+
461
+ def _stream_with_fallback(self, messages, all_tools, active_model, current_tier):
462
+ """Stream response with model fallback chain."""
463
+ self.hooks.run_hook("preChat", {
464
+ "model": active_model,
465
+ "message_count": str(self.context.count_messages()),
466
+ })
467
+
468
+ text_buffer = ""
469
+ pending_tool_calls = None
470
+ tool_reasoning = ""
471
+ last_error = None
472
+ _reasoning_started = False # track first reasoning chunk for header
473
+
474
+ for attempt_model, api_key, base_url in self._fallback_models(active_model, current_tier):
475
+ stream_failed = False
476
+ auth_retried = False
477
+
478
+ while True:
479
+ stream_failed = False
480
+ _reasoning_started = False
481
+ for event_type, data in stream_chat(
482
+ messages=messages,
483
+ tools=all_tools,
484
+ model=attempt_model,
485
+ api_key=api_key,
486
+ base_url=base_url,
487
+ max_tokens=self.config.max_tokens,
488
+ temperature=self.config.temperature,
489
+ ):
490
+ # Check for user-requested stop (Ctrl+C / /stop)
491
+ if self._stop_requested:
492
+ stream_failed = True
493
+ break
494
+
495
+ if event_type == "model_not_found":
496
+ stream_failed = True
497
+ break
498
+ elif event_type == "reasoning":
499
+ # Stream DeepSeek Reasoner chain-of-thought live in dim style
500
+ if not _reasoning_started:
501
+ _reasoning_started = True
502
+ sys.stdout.write("\n")
503
+ console.print("[dim]\u22ef thinking[/dim]")
504
+ console.print(data, style="dim", end="", highlight=False)
505
+ sys.stdout.flush()
506
+ elif event_type == "text":
507
+ if _reasoning_started:
508
+ # Separator between reasoning and answer
509
+ sys.stdout.write("\n")
510
+ console.print("[dim]\u2500\u2500\u2500[/dim]")
511
+ _reasoning_started = False
512
+ sys.stdout.write(data)
513
+ sys.stdout.flush()
514
+ text_buffer += data
515
+ elif event_type == "tool_calls":
516
+ pending_tool_calls, tool_reasoning = data
517
+ elif event_type == "error":
518
+ is_auth = (
519
+ "401" in str(data)
520
+ or "authentication" in str(data).lower()
521
+ or "unauthorized" in str(data).lower()
522
+ or "invalid api key" in str(data).lower()
523
+ )
524
+ if is_auth and not auth_retried:
525
+ text_buffer = ""
526
+ pending_tool_calls = None
527
+ console.print("\n[red]API key rejected.[/red]")
528
+ self._prompt_for_api_key()
529
+ api_key = self.config.api_key
530
+ auth_retried = True
531
+ stream_failed = True
532
+ break
533
+ console.print(f"[red]API Error: {data}[/red]")
534
+ stream_failed = True
535
+ last_error = data
536
+ break
537
+ elif event_type == "done":
538
+ content, reasoning = data
539
+ # Store reasoning so _chat_loop can include it when
540
+ # adding the message to context. Do NOT clear
541
+ # text_buffer here — leave it non-empty so the
542
+ # loop finishes normally.
543
+ self._pending_reasoning = reasoning
544
+ pending_tool_calls = None
545
+ stream_failed = False
546
+ break
547
+
548
+ if not stream_failed:
549
+ break
550
+ if auth_retried and stream_failed:
551
+ auth_retried = False
552
+ break
553
+ break
554
+
555
+ if self._stop_requested:
556
+ break
557
+ if not stream_failed:
558
+ break
559
+ if last_error and "model" not in str(last_error).lower():
560
+ break
561
+ else:
562
+ detail = f" Last error: {str(last_error)[:200]}" if last_error else ""
563
+ get_logger().error("All models exhausted.%s", detail)
564
+
565
+ total_chars = sum(len(str(m.get("content", ""))) for m in messages)
566
+ self.cost_tracker.record_usage(
567
+ model=attempt_model,
568
+ input_tokens=max(total_chars // 4, 1),
569
+ output_tokens=max(len(text_buffer) // 4, 1),
570
+ )
571
+ self.hooks.run_hook("postChat", {
572
+ "model": attempt_model,
573
+ "tool_calls": str(len(pending_tool_calls)) if pending_tool_calls else "0",
574
+ })
575
+
576
+ return text_buffer, pending_tool_calls, tool_reasoning, attempt_model
577
+
578
+ def _execute_tool_calls(self, pending_tool_calls, tool_reasoning, text_buffer,
579
+ tool_call_count, t0=0, tool_args_map: dict | None = None):
580
+ """Execute tool calls with permissions, hooks, and result rendering."""
581
+ self.context.add_assistant_message(
582
+ text_buffer or None,
583
+ tool_calls=pending_tool_calls,
584
+ reasoning_content=tool_reasoning or None,
585
+ )
586
+
587
+ total = len(pending_tool_calls)
588
+ # Streamed text doesn't end with a newline, so force one here to
589
+ # prevent tool stamps from appearing glued to the last word of output.
590
+ sys.stdout.write("\n")
591
+ sys.stdout.flush()
592
+ for i, tc in enumerate(pending_tool_calls, 1):
593
+ name = tc["function"]["name"]
594
+ raw_args = tc["function"]["arguments"]
595
+ try:
596
+ args = json.loads(_repair_json(raw_args)) if raw_args else {}
597
+ except json.JSONDecodeError:
598
+ self.context.add_tool_result(
599
+ tool_call_id=tc["id"], tool_name=name,
600
+ result=f"Error: invalid JSON in tool arguments: {raw_args[:200]}",
601
+ )
602
+ continue
603
+
604
+ # Record parsed args for auto-commit path tracking
605
+ if tool_args_map is not None:
606
+ tool_args_map[tc["id"]] = args
607
+
608
+ if not check_permission(name):
609
+ self.context.add_tool_result(
610
+ tool_call_id=tc["id"], tool_name=name,
611
+ result="Permission denied",
612
+ )
613
+ continue
614
+
615
+ hook_results = self.hooks.run_hook("preToolUse", {
616
+ "tool_name": name,
617
+ "tool_args": str(args)[:500],
618
+ })
619
+ if any(not r.allow for r in hook_results):
620
+ self.context.add_tool_result(
621
+ tool_call_id=tc["id"], tool_name=name,
622
+ result="Tool blocked by preToolUse hook",
623
+ )
624
+ continue
625
+
626
+ elapsed = int(time.time() - t0) if t0 else 0
627
+ console.print(f"[dim]{name} ({i}/{total}) {elapsed}s[/dim]")
628
+
629
+ if name.startswith("mcp_"):
630
+ result = self.mcp.execute(name, args)
631
+ else:
632
+ result = self.registry.execute(name, args)
633
+
634
+ self.hooks.run_hook("postToolUse", {
635
+ "tool_name": name,
636
+ "tool_result_length": str(len(result)),
637
+ })
638
+
639
+ self.context.add_tool_result(
640
+ tool_call_id=tc["id"], tool_name=name, result=result,
641
+ )
642
+ tool_call_count += 1
643
+
644
+ return tool_call_count
645
+
646
+ def _maybe_run_tests(self, tool_calls: list) -> str | None:
647
+ """After Write/Edit tool calls, auto-run the test suite.
648
+
649
+ Returns a failure message for the model to fix, or None if tests
650
+ passed (or no test runner was found).
651
+ """
652
+ write_tools = {"Write", "Edit"}
653
+ did_write = any(tc["function"]["name"] in write_tools for tc in tool_calls)
654
+ if not did_write:
655
+ return None
656
+
657
+ settings = cfg.load_settings()
658
+ if not settings.get("auto_test", True):
659
+ return None
660
+
661
+ cwd = str(self.config.working_directory or os.getcwd())
662
+ runner_cmd = self._detect_test_runner(cwd)
663
+ if not runner_cmd:
664
+ return None
665
+
666
+ console.print(f"[dim]Running tests: {runner_cmd}[/dim]")
667
+ try:
668
+ result = subprocess.run(
669
+ runner_cmd,
670
+ shell=True,
671
+ capture_output=True,
672
+ text=True,
673
+ timeout=60,
674
+ cwd=cwd,
675
+ )
676
+ if result.returncode == 0:
677
+ output_preview = (result.stdout or "").strip()[:300]
678
+ console.print("[dim green]Tests passed[/dim green]"
679
+ + (f" — {output_preview}" if output_preview else ""))
680
+ return None
681
+ # Tests failed — collect output and feed it back to the model
682
+ full_output = ((result.stdout or "") + (result.stderr or "")).strip()
683
+ truncated = full_output[:3000]
684
+ if len(full_output) > 3000:
685
+ truncated += f"\n... ({len(full_output) - 3000} chars truncated)"
686
+ console.print("[dim red]Tests failed — asking model to fix...[/dim red]")
687
+ return (
688
+ f"Tests failed after your last change. Please fix all failures before continuing.\n"
689
+ f"\n```\n{truncated}\n```"
690
+ )
691
+ except subprocess.TimeoutExpired:
692
+ console.print("[dim]Test run timed out (60s) — skipping[/dim]")
693
+ return None
694
+ except Exception as e:
695
+ get_logger().warning("Test runner error: %s", e)
696
+ return None
697
+
698
+ def _detect_test_runner(self, cwd: str) -> str | None:
699
+ """Detect the appropriate test runner for the project.
700
+
701
+ Checks in priority order: pytest, jest/vitest, cargo, go test.
702
+ Prefers the virtualenv binary on Windows over the global one.
703
+ """
704
+ root = Path(cwd)
705
+
706
+ # Python: pytest
707
+ has_pytest_cfg = (
708
+ (root / "pytest.ini").exists()
709
+ or (root / "pyproject.toml").exists()
710
+ or (root / "setup.cfg").exists()
711
+ or (root / "tests").is_dir()
712
+ )
713
+ if has_pytest_cfg:
714
+ if sys.platform == "win32":
715
+ venv_pytest = root / ".venv" / "Scripts" / "pytest.exe"
716
+ if venv_pytest.exists():
717
+ return f'"{venv_pytest}" -x -q --tb=short'
718
+ else:
719
+ venv_pytest = root / ".venv" / "bin" / "pytest"
720
+ if venv_pytest.exists():
721
+ return f'"{venv_pytest}" -x -q --tb=short'
722
+ return "pytest -x -q --tb=short"
723
+
724
+ # JavaScript/TypeScript: vitest or jest
725
+ pkg = root / "package.json"
726
+ if pkg.exists():
727
+ try:
728
+ import json as _json
729
+ data = _json.loads(pkg.read_text(encoding="utf-8"))
730
+ scripts = data.get("scripts", {})
731
+ if "test" in scripts:
732
+ return "npm test -- --passWithNoTests 2>&1"
733
+ except Exception:
734
+ pass
735
+
736
+ # Rust: cargo test
737
+ if (root / "Cargo.toml").exists():
738
+ return "cargo test 2>&1"
739
+
740
+ # Go: go test
741
+ if (root / "go.mod").exists():
742
+ return "go test ./... 2>&1"
743
+
744
+ return None
745
+
746
+ def _run_verification_gate(
747
+ self,
748
+ modified_files: list[str],
749
+ project_root: str,
750
+ max_retries: int = 3,
751
+ ):
752
+ """Run the multi-pass verification pipeline on modified files.
753
+
754
+ If verification fails, injects feedback into the conversation context
755
+ so the model can fix issues on the next iteration.
756
+ """
757
+ for file_path in modified_files:
758
+ if not file_path.endswith(".py"):
759
+ continue
760
+
761
+ for attempt in range(max_retries + 1):
762
+ results = run_verify_pipeline(
763
+ file_path=file_path,
764
+ project_root=project_root,
765
+ run_lint=False,
766
+ run_consistency=True,
767
+ run_tests=False, # tests handled separately by _maybe_run_tests
768
+ )
769
+
770
+ if pipeline_all_passed(results):
771
+ if attempt == 0:
772
+ # Only show first-attempt success as dim
773
+ console.print(f"[dim]✓ verify: {Path(file_path).name}[/dim]")
774
+ else:
775
+ console.print(
776
+ f"[dim green]✓ verify: {Path(file_path).name} "
777
+ f"(fixed after {attempt} attempt(s))[/dim green]"
778
+ )
779
+ break
780
+
781
+ # Build feedback for the model
782
+ feedback = pipeline_feedback(results)
783
+ console.print(
784
+ f"[dim yellow]⚠ verify: {Path(file_path).name} "
785
+ f"(attempt {attempt + 1}/{max_retries + 1})[/dim yellow]"
786
+ )
787
+
788
+ if attempt >= max_retries:
789
+ get_logger().warning(
790
+ "Verification still failing after %d retries for %s",
791
+ max_retries, file_path,
792
+ )
793
+ # Inject failure as user message so the model sees it next turn
794
+ self.context.add_user_message(
795
+ f"⚠ Verification failed for {file_path} after {max_retries + 1} "
796
+ f"attempts. Please fix these issues:\n\n{feedback}"
797
+ )
798
+ break
799
+
800
+ # Inject feedback for the model to fix
801
+ self.context.add_user_message(
802
+ f"Verification found issues in {file_path}. "
803
+ f"Fix them before proceeding:\n\n{feedback}"
804
+ )
805
+ # Don't break here — the model will fix on the next turn
806
+
807
+ def _handle_command(self, cmd: str):
808
+ from .cli_commands.dispatcher import handle_command
809
+ handle_command(self, cmd)
810
+
811
+ def _save_state(self):
812
+ """Save current configuration and state."""
813
+ try:
814
+ self.config.save()
815
+ from . import settings as cfg
816
+ cfg.save_setting("model_name", self.config.model)
817
+ except Exception:
818
+ get_logger().warning("Failed to save state", exc_info=True)
819
+ console.print("[dim]Failed to save state[/dim]")
820
+
821
+ def _start_audit_daemon(self) -> None:
822
+ """Start AuditDaemon.run_forever() in a background daemon thread."""
823
+ import threading
824
+ import asyncio
825
+ from .audit_daemon import AuditDaemon
826
+ from pathlib import Path as _Path
827
+
828
+ project_root = str(self.config.working_directory or _Path.cwd())
829
+ daemon_obj = AuditDaemon(self.config, project_root=project_root)
830
+ self._audit_daemon = daemon_obj
831
+
832
+ def _run_loop() -> None:
833
+ loop = asyncio.new_event_loop()
834
+ asyncio.set_event_loop(loop)
835
+ try:
836
+ loop.run_until_complete(daemon_obj.run_forever())
837
+ finally:
838
+ loop.close()
839
+
840
+ t = threading.Thread(target=_run_loop, name="audit-daemon", daemon=True)
841
+ t.start()
842
+ self._audit_daemon_thread = t
843
+ console.print("[dim]Audit daemon started in background[/dim]")
844
+
845
+ def _stop_audit_daemon(self) -> None:
846
+ """Release the daemon lock so the background thread can exit cleanly."""
847
+ if self._audit_daemon is not None:
848
+ self._audit_daemon._release_lock()
849
+ self._audit_daemon = None
850
+
851
+ def _cleanup(self):
852
+ """Graceful cleanup on shutdown."""
853
+ self._save_state()
854
+ self._auto_save_conversation()
855
+ self._stop_audit_daemon()
856
+ if self.file_watcher and self.file_watcher.is_running:
857
+ self.file_watcher.stop()
858
+ self.mcp.close_all()
859
+
860
+ def _auto_save_conversation(self):
861
+ """Save the last N messages to a recovery file for SIGTERM protection."""
862
+ try:
863
+ recovery_dir = data_path()
864
+ recovery_dir.mkdir(parents=True, exist_ok=True)
865
+ recovery_file = recovery_dir / "recovery.json"
866
+
867
+ msgs = self.context.get_messages()
868
+ if len(msgs) <= 1:
869
+ return
870
+
871
+ keep = [msgs[0]] + msgs[-10:]
872
+ recovery_file.write_text(
873
+ json.dumps(keep, indent=2, default=str),
874
+ encoding="utf-8",
875
+ )
876
+ except Exception:
877
+ get_logger().warning("Auto-save conversation failed", exc_info=True)
878
+
879
+ def _handle_signal(self, signum, frame):
880
+ if signum == signal.SIGINT:
881
+ now = time.time()
882
+ # Double-tap SIGINT within 2 seconds → force exit
883
+ if self._stop_requested and (now - self._first_sigint_at) < 2.0:
884
+ console.print("\n[dim]Exiting...[/dim]")
885
+ self._running = False
886
+ self._cleanup()
887
+ sys.exit(0)
888
+ # First SIGINT → request stop of current operation
889
+ self._stop_requested = True
890
+ self._first_sigint_at = now
891
+ elif signum == signal.SIGTERM:
892
+ self._running = False
893
+ self._cleanup()
894
+ sys.exit(0)