luckyd-code 1.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- luckyd_code/__init__.py +54 -0
- luckyd_code/__main__.py +5 -0
- luckyd_code/_agent_loop.py +551 -0
- luckyd_code/_data_dir.py +73 -0
- luckyd_code/agent.py +38 -0
- luckyd_code/analytics/__init__.py +18 -0
- luckyd_code/analytics/reporter.py +195 -0
- luckyd_code/analytics/scanner.py +443 -0
- luckyd_code/analytics/smells.py +316 -0
- luckyd_code/analytics/trends.py +303 -0
- luckyd_code/api.py +473 -0
- luckyd_code/audit_daemon.py +845 -0
- luckyd_code/autonomous_fixer.py +473 -0
- luckyd_code/background.py +159 -0
- luckyd_code/backup.py +237 -0
- luckyd_code/brain/__init__.py +84 -0
- luckyd_code/brain/assembler.py +100 -0
- luckyd_code/brain/chunker.py +345 -0
- luckyd_code/brain/constants.py +73 -0
- luckyd_code/brain/embedder.py +163 -0
- luckyd_code/brain/graph.py +311 -0
- luckyd_code/brain/indexer.py +316 -0
- luckyd_code/brain/parser.py +140 -0
- luckyd_code/brain/retriever.py +234 -0
- luckyd_code/cli.py +894 -0
- luckyd_code/cli_commands/__init__.py +1 -0
- luckyd_code/cli_commands/audit.py +120 -0
- luckyd_code/cli_commands/background.py +83 -0
- luckyd_code/cli_commands/brain.py +87 -0
- luckyd_code/cli_commands/config.py +75 -0
- luckyd_code/cli_commands/dispatcher.py +695 -0
- luckyd_code/cli_commands/sessions.py +41 -0
- luckyd_code/cli_entry.py +147 -0
- luckyd_code/cli_utils.py +112 -0
- luckyd_code/config.py +205 -0
- luckyd_code/context.py +214 -0
- luckyd_code/cost_tracker.py +209 -0
- luckyd_code/error_reporter.py +508 -0
- luckyd_code/exceptions.py +39 -0
- luckyd_code/export.py +126 -0
- luckyd_code/feedback_analyzer.py +290 -0
- luckyd_code/file_watcher.py +258 -0
- luckyd_code/git/__init__.py +11 -0
- luckyd_code/git/auto_commit.py +157 -0
- luckyd_code/git/tools.py +85 -0
- luckyd_code/hooks.py +236 -0
- luckyd_code/indexer.py +280 -0
- luckyd_code/init.py +39 -0
- luckyd_code/keybindings.py +77 -0
- luckyd_code/log.py +55 -0
- luckyd_code/mcp/__init__.py +6 -0
- luckyd_code/mcp/client.py +184 -0
- luckyd_code/memory/__init__.py +19 -0
- luckyd_code/memory/manager.py +339 -0
- luckyd_code/metrics/__init__.py +5 -0
- luckyd_code/model_registry.py +131 -0
- luckyd_code/orchestrator.py +204 -0
- luckyd_code/permissions/__init__.py +1 -0
- luckyd_code/permissions/manager.py +103 -0
- luckyd_code/planner.py +361 -0
- luckyd_code/plugins.py +91 -0
- luckyd_code/py.typed +0 -0
- luckyd_code/retry.py +57 -0
- luckyd_code/router.py +417 -0
- luckyd_code/sandbox.py +156 -0
- luckyd_code/self_critique.py +2 -0
- luckyd_code/self_improve.py +274 -0
- luckyd_code/sessions.py +114 -0
- luckyd_code/settings.py +72 -0
- luckyd_code/skills/__init__.py +8 -0
- luckyd_code/skills/review.py +22 -0
- luckyd_code/skills/security.py +17 -0
- luckyd_code/tasks/__init__.py +1 -0
- luckyd_code/tasks/manager.py +102 -0
- luckyd_code/templates/icon-192.png +0 -0
- luckyd_code/templates/icon-512.png +0 -0
- luckyd_code/templates/index.html +1965 -0
- luckyd_code/templates/manifest.json +14 -0
- luckyd_code/templates/src/app.js +694 -0
- luckyd_code/templates/src/body.html +767 -0
- luckyd_code/templates/src/cdn.txt +2 -0
- luckyd_code/templates/src/style.css +474 -0
- luckyd_code/templates/sw.js +31 -0
- luckyd_code/templates/test.html +6 -0
- luckyd_code/themes.py +48 -0
- luckyd_code/tools/__init__.py +97 -0
- luckyd_code/tools/agent_tools.py +65 -0
- luckyd_code/tools/bash.py +360 -0
- luckyd_code/tools/brain_tools.py +137 -0
- luckyd_code/tools/browser.py +369 -0
- luckyd_code/tools/datetime_tool.py +34 -0
- luckyd_code/tools/dockerfile_gen.py +212 -0
- luckyd_code/tools/file_ops.py +381 -0
- luckyd_code/tools/game_gen.py +360 -0
- luckyd_code/tools/git_tools.py +130 -0
- luckyd_code/tools/git_worktree.py +63 -0
- luckyd_code/tools/path_validate.py +64 -0
- luckyd_code/tools/project_gen.py +187 -0
- luckyd_code/tools/readme_gen.py +227 -0
- luckyd_code/tools/registry.py +157 -0
- luckyd_code/tools/shell_detect.py +109 -0
- luckyd_code/tools/web.py +89 -0
- luckyd_code/tools/youtube.py +187 -0
- luckyd_code/tools_bridge.py +144 -0
- luckyd_code/undo.py +126 -0
- luckyd_code/update.py +60 -0
- luckyd_code/verify.py +360 -0
- luckyd_code/web_app.py +176 -0
- luckyd_code/web_routes/__init__.py +23 -0
- luckyd_code/web_routes/background.py +73 -0
- luckyd_code/web_routes/brain.py +109 -0
- luckyd_code/web_routes/cost.py +12 -0
- luckyd_code/web_routes/files.py +133 -0
- luckyd_code/web_routes/memories.py +94 -0
- luckyd_code/web_routes/misc.py +67 -0
- luckyd_code/web_routes/project.py +48 -0
- luckyd_code/web_routes/review.py +20 -0
- luckyd_code/web_routes/sessions.py +44 -0
- luckyd_code/web_routes/settings.py +43 -0
- luckyd_code/web_routes/static.py +70 -0
- luckyd_code/web_routes/update.py +19 -0
- luckyd_code/web_routes/ws.py +237 -0
- luckyd_code-1.2.2.dist-info/METADATA +297 -0
- luckyd_code-1.2.2.dist-info/RECORD +127 -0
- luckyd_code-1.2.2.dist-info/WHEEL +4 -0
- luckyd_code-1.2.2.dist-info/entry_points.txt +3 -0
- luckyd_code-1.2.2.dist-info/licenses/LICENSE +21 -0
luckyd_code/__init__.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"""LuckyD Code — AI coding assistant in your terminal."""
|
|
2
|
+
|
|
3
|
+
__version__ = "1.2.1"
|
|
4
|
+
__author__ = "LuckyD Code"
|
|
5
|
+
__license__ = "MIT"
|
|
6
|
+
|
|
7
|
+
# ---------------------------------------------------------------------------
|
|
8
|
+
# Core symbols — imported eagerly because they are lightweight and always used.
|
|
9
|
+
# ---------------------------------------------------------------------------
|
|
10
|
+
from .cli_entry import main
|
|
11
|
+
from .config import Config
|
|
12
|
+
from .api import stream_chat, test_connection
|
|
13
|
+
from .context import ConversationContext
|
|
14
|
+
from .cost_tracker import CostTracker
|
|
15
|
+
from .hooks import HookRunner, get_hook_runner
|
|
16
|
+
from .router import resolve_initial_route, escalate_tier
|
|
17
|
+
from .model_registry import get_models_by_tier
|
|
18
|
+
|
|
19
|
+
# ---------------------------------------------------------------------------
|
|
20
|
+
# Heavy sub-packages (memory, tools, brain, settings) are imported lazily so
|
|
21
|
+
# that a bare `import luckyd_code` doesn't pull in sentence-transformers,
|
|
22
|
+
# FAISS, Playwright, or the full tool registry at cold-start. They become
|
|
23
|
+
# available as normal attributes the first time they are accessed.
|
|
24
|
+
# ---------------------------------------------------------------------------
|
|
25
|
+
_LAZY_SUBPACKAGES = {"memory", "settings", "tools", "brain"}
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def __getattr__(name: str):
|
|
29
|
+
if name in _LAZY_SUBPACKAGES:
|
|
30
|
+
import importlib
|
|
31
|
+
module = importlib.import_module(f".{name}", package=__name__)
|
|
32
|
+
# Cache on the package so subsequent accesses are instant.
|
|
33
|
+
globals()[name] = module
|
|
34
|
+
return module
|
|
35
|
+
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
__all__ = [
|
|
39
|
+
"main",
|
|
40
|
+
"Config",
|
|
41
|
+
"stream_chat",
|
|
42
|
+
"test_connection",
|
|
43
|
+
"ConversationContext",
|
|
44
|
+
"CostTracker",
|
|
45
|
+
"HookRunner",
|
|
46
|
+
"get_hook_runner",
|
|
47
|
+
"resolve_initial_route",
|
|
48
|
+
"escalate_tier",
|
|
49
|
+
"get_models_by_tier",
|
|
50
|
+
"memory",
|
|
51
|
+
"settings",
|
|
52
|
+
"tools",
|
|
53
|
+
"brain",
|
|
54
|
+
]
|
luckyd_code/__main__.py
ADDED
|
@@ -0,0 +1,551 @@
|
|
|
1
|
+
"""Enhanced agentic execution loop — top-1% harness.
|
|
2
|
+
|
|
3
|
+
Architecture:
|
|
4
|
+
1. THINK — Model reasons; context-overflow protection runs before every turn
|
|
5
|
+
2. ACT — Execute tool calls (parallel where possible, results truncated)
|
|
6
|
+
3. VERIFY — Syntax → Lint → Test check on any written files
|
|
7
|
+
4. RECOVER — If verification fails, feed error back and retry (up to N times);
|
|
8
|
+
model escalates to a stronger tier after repeated failures
|
|
9
|
+
|
|
10
|
+
Improvements over the previous version:
|
|
11
|
+
- Stuck-loop detection (same tool+args repeated → inject nudge and break)
|
|
12
|
+
- Turn budget injection (model warned when ≤ 2 turns remain)
|
|
13
|
+
- Mid-loop model escalation on repeated verify failures
|
|
14
|
+
- Tool result truncation (large outputs capped before context injection)
|
|
15
|
+
- Re-read-after-write (file existence + size verified after Write/Edit)
|
|
16
|
+
- Context-overflow protection (auto-compact when token budget exceeded)
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import hashlib
|
|
22
|
+
import json
|
|
23
|
+
import logging
|
|
24
|
+
import os
|
|
25
|
+
from collections import deque
|
|
26
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
27
|
+
from typing import Callable, Deque, Dict, List, Optional, Any
|
|
28
|
+
|
|
29
|
+
from .api import stream_chat, _repair_json
|
|
30
|
+
from .context import ConversationContext
|
|
31
|
+
|
|
32
|
+
_log = logging.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
__all__ = ["run_agent_loop", "RunConfig", "LoopResult"]
|
|
35
|
+
|
|
36
|
+
# ── tunables ──────────────────────────────────────────────────────────────────
|
|
37
|
+
_MAX_VERIFY_RETRIES = 3 # verify-retry cycles before giving up
|
|
38
|
+
_MAX_PARALLEL_TOOLS = 4 # max concurrent read-only tool threads
|
|
39
|
+
_MAX_TOOL_RESULT_CHARS = 8_000 # truncate tool results longer than this
|
|
40
|
+
_STUCK_WINDOW = 3 # identical tool-call hashes in a row = stuck
|
|
41
|
+
_TURN_BUDGET_WARN = 2 # inject budget warning when ≤ N turns remain
|
|
42
|
+
# Model escalation ladder — used when verify keeps failing
|
|
43
|
+
_ESCALATION_LADDER = [
|
|
44
|
+
"deepseek-v4-flash",
|
|
45
|
+
"deepseek-v4-pro",
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
# ── config / result ────────────────────────────────────────────────────────────
|
|
50
|
+
|
|
51
|
+
class RunConfig:
|
|
52
|
+
"""Configuration for an agent loop run."""
|
|
53
|
+
|
|
54
|
+
__slots__ = (
|
|
55
|
+
"max_turns", "label", "verify_edits", "max_verify_retries",
|
|
56
|
+
"run_tests", "test_runner_cmd", "project_root",
|
|
57
|
+
"on_text", "on_tool_start", "on_tool_end",
|
|
58
|
+
"on_verify",
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
def __init__(
|
|
62
|
+
self,
|
|
63
|
+
max_turns: int = 10,
|
|
64
|
+
label: str = "agent",
|
|
65
|
+
verify_edits: bool = False,
|
|
66
|
+
max_verify_retries: int = _MAX_VERIFY_RETRIES,
|
|
67
|
+
run_tests: bool = False,
|
|
68
|
+
test_runner_cmd: Optional[str] = None,
|
|
69
|
+
project_root: str = "",
|
|
70
|
+
on_text: Optional[Callable[[str], None]] = None,
|
|
71
|
+
on_tool_start: Optional[Callable[[str, int, int], None]] = None,
|
|
72
|
+
on_tool_end: Optional[Callable[[str, str], None]] = None,
|
|
73
|
+
on_verify: Optional[Callable[[str], None]] = None,
|
|
74
|
+
):
|
|
75
|
+
self.max_turns = max_turns
|
|
76
|
+
self.label = label
|
|
77
|
+
self.verify_edits = verify_edits
|
|
78
|
+
self.max_verify_retries = max_verify_retries
|
|
79
|
+
self.run_tests = run_tests
|
|
80
|
+
self.test_runner_cmd = test_runner_cmd
|
|
81
|
+
self.project_root = project_root
|
|
82
|
+
self.on_text = on_text
|
|
83
|
+
self.on_tool_start = on_tool_start
|
|
84
|
+
self.on_tool_end = on_tool_end
|
|
85
|
+
self.on_verify = on_verify
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class LoopResult:
|
|
89
|
+
"""Result from an agent loop run."""
|
|
90
|
+
|
|
91
|
+
__slots__ = ("text", "tool_calls_executed", "files_modified",
|
|
92
|
+
"verification_passed", "escalated_model")
|
|
93
|
+
|
|
94
|
+
def __init__(self):
|
|
95
|
+
self.text: str = ""
|
|
96
|
+
self.tool_calls_executed: int = 0
|
|
97
|
+
self.files_modified: list[str] = []
|
|
98
|
+
self.verification_passed: bool = True
|
|
99
|
+
self.escalated_model: Optional[str] = None
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
# ── helpers ───────────────────────────────────────────────────────────────────
|
|
103
|
+
|
|
104
|
+
def _truncate_tool_result(result: str) -> str:
|
|
105
|
+
"""Cap a tool result at _MAX_TOOL_RESULT_CHARS to protect context budget."""
|
|
106
|
+
if len(result) <= _MAX_TOOL_RESULT_CHARS:
|
|
107
|
+
return result
|
|
108
|
+
head = result[:_MAX_TOOL_RESULT_CHARS // 2]
|
|
109
|
+
tail = result[-(_MAX_TOOL_RESULT_CHARS // 4):]
|
|
110
|
+
trimmed = len(result) - len(head) - len(tail)
|
|
111
|
+
return (
|
|
112
|
+
f"{head}\n\n"
|
|
113
|
+
f"[... {trimmed:,} characters trimmed — output too large ...]\n\n"
|
|
114
|
+
f"{tail}"
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def _tool_call_hash(tc: dict) -> str:
|
|
119
|
+
"""Stable hash of (tool_name, arguments) for stuck detection."""
|
|
120
|
+
name = tc.get("function", {}).get("name", "")
|
|
121
|
+
args = tc.get("function", {}).get("arguments", "")
|
|
122
|
+
return hashlib.md5(f"{name}:{args}".encode("utf-8")).hexdigest()
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def _verify_write(file_path: str) -> Optional[str]:
|
|
126
|
+
"""Confirm a write actually landed — return error string or None if ok."""
|
|
127
|
+
try:
|
|
128
|
+
stat = os.stat(file_path)
|
|
129
|
+
if stat.st_size == 0:
|
|
130
|
+
return f"Write produced an empty file: {file_path}"
|
|
131
|
+
return None
|
|
132
|
+
except FileNotFoundError:
|
|
133
|
+
return f"Write failed — file not found after write: {file_path}"
|
|
134
|
+
except OSError as e:
|
|
135
|
+
return f"Could not verify write for {file_path}: {e}"
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def _escalate_model(current_model: str) -> Optional[str]:
|
|
139
|
+
"""Return the next model up the escalation ladder, or None if at the top."""
|
|
140
|
+
try:
|
|
141
|
+
idx = _ESCALATION_LADDER.index(current_model)
|
|
142
|
+
if idx + 1 < len(_ESCALATION_LADDER):
|
|
143
|
+
return _ESCALATION_LADDER[idx + 1]
|
|
144
|
+
except ValueError:
|
|
145
|
+
pass
|
|
146
|
+
return None
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
# ── parallel tool execution ───────────────────────────────────────────────────
|
|
150
|
+
|
|
151
|
+
def _ingest_tool_result(
|
|
152
|
+
name: str,
|
|
153
|
+
result: str,
|
|
154
|
+
args: dict,
|
|
155
|
+
tc_id: str,
|
|
156
|
+
context: ConversationContext,
|
|
157
|
+
modified_files: list[str],
|
|
158
|
+
) -> None:
|
|
159
|
+
"""Truncate, store, and post-validate a single tool result."""
|
|
160
|
+
truncated = _truncate_tool_result(result)
|
|
161
|
+
context.add_tool_result(
|
|
162
|
+
tool_call_id=tc_id, tool_name=name, result=truncated,
|
|
163
|
+
)
|
|
164
|
+
if name in ("Write", "Edit"):
|
|
165
|
+
fp = args.get("file_path") or args.get("path", "")
|
|
166
|
+
if fp:
|
|
167
|
+
modified_files.append(fp)
|
|
168
|
+
err = _verify_write(fp)
|
|
169
|
+
if err:
|
|
170
|
+
context.add_user_message(
|
|
171
|
+
f"⚠️ Write verification failed: {err}\n"
|
|
172
|
+
"Please retry the write operation."
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def _execute_tool_calls_parallel(
|
|
177
|
+
pending_tool_calls: list,
|
|
178
|
+
registry,
|
|
179
|
+
context: ConversationContext,
|
|
180
|
+
on_start: Optional[Callable[[str, int, int], None]] = None,
|
|
181
|
+
on_end: Optional[Callable[[str, str], None]] = None,
|
|
182
|
+
) -> list[str]:
|
|
183
|
+
"""Execute tool calls, parallelising independent read-only ones.
|
|
184
|
+
|
|
185
|
+
Write-conflict tools run sequentially to prevent race conditions.
|
|
186
|
+
All tool results are truncated before being added to the context.
|
|
187
|
+
Write/Edit results trigger a re-read-after-write check.
|
|
188
|
+
"""
|
|
189
|
+
WRITE_CONFLICT_TOOLS = {"Write", "Edit", "Bash", "GitCommit", "GitPush", "GitAdd"}
|
|
190
|
+
modified_files: list[str] = []
|
|
191
|
+
total = len(pending_tool_calls)
|
|
192
|
+
|
|
193
|
+
def _run_one(tc: dict, idx: int) -> tuple[int, str, str, dict]:
|
|
194
|
+
"""Execute a single tool call. Returns (orig_idx, name, result, args)."""
|
|
195
|
+
name = tc["function"]["name"]
|
|
196
|
+
raw_args = tc["function"]["arguments"]
|
|
197
|
+
try:
|
|
198
|
+
args = json.loads(_repair_json(raw_args)) if raw_args else {}
|
|
199
|
+
except json.JSONDecodeError:
|
|
200
|
+
return idx, name, f"Error: invalid JSON in tool arguments: {raw_args[:200]}", {}
|
|
201
|
+
if on_start:
|
|
202
|
+
on_start(name, idx + 1, total)
|
|
203
|
+
result = registry.execute(name, args)
|
|
204
|
+
if on_end:
|
|
205
|
+
on_end(name, result)
|
|
206
|
+
return idx, name, result, args
|
|
207
|
+
|
|
208
|
+
# Separate into parallel (read-only) and sequential (write) groups
|
|
209
|
+
parallel_group: list[tuple[int, dict]] = []
|
|
210
|
+
sequential_group: list[tuple[int, dict]] = []
|
|
211
|
+
for i, tc in enumerate(pending_tool_calls):
|
|
212
|
+
name = tc["function"]["name"]
|
|
213
|
+
if name in WRITE_CONFLICT_TOOLS:
|
|
214
|
+
sequential_group.append((i, tc))
|
|
215
|
+
else:
|
|
216
|
+
parallel_group.append((i, tc))
|
|
217
|
+
|
|
218
|
+
# Run read-only tools in parallel
|
|
219
|
+
if parallel_group:
|
|
220
|
+
with ThreadPoolExecutor(max_workers=min(len(parallel_group), _MAX_PARALLEL_TOOLS)) as ex:
|
|
221
|
+
futures = {
|
|
222
|
+
ex.submit(_run_one, tc, orig_idx): orig_idx
|
|
223
|
+
for orig_idx, tc in parallel_group
|
|
224
|
+
}
|
|
225
|
+
for future in as_completed(futures):
|
|
226
|
+
try:
|
|
227
|
+
orig_idx, name, result, args = future.result()
|
|
228
|
+
_ingest_tool_result(
|
|
229
|
+
name, result, args,
|
|
230
|
+
pending_tool_calls[orig_idx]["id"],
|
|
231
|
+
context, modified_files,
|
|
232
|
+
)
|
|
233
|
+
except Exception as e:
|
|
234
|
+
_log.warning("Parallel tool execution failed: %s", e)
|
|
235
|
+
|
|
236
|
+
# Run write-conflict tools sequentially
|
|
237
|
+
for orig_idx, tc in sequential_group:
|
|
238
|
+
_, name, result, args = _run_one(tc, orig_idx)
|
|
239
|
+
_ingest_tool_result(
|
|
240
|
+
name, result, args, tc["id"],
|
|
241
|
+
context, modified_files,
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
return modified_files
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
# ── verification / recovery ───────────────────────────────────────────────────
|
|
248
|
+
|
|
249
|
+
def _check_files_verification(
|
|
250
|
+
files_modified: list[str],
|
|
251
|
+
run_cfg: RunConfig,
|
|
252
|
+
context: ConversationContext,
|
|
253
|
+
) -> tuple[bool, list[str]]:
|
|
254
|
+
"""Run the verify pipeline on each modified file.
|
|
255
|
+
|
|
256
|
+
Returns (all_passed, failed_files).
|
|
257
|
+
Split out of _verify_and_recover to lower its cyclomatic complexity.
|
|
258
|
+
"""
|
|
259
|
+
from .verify import run_verify_pipeline, pipeline_all_passed, pipeline_feedback
|
|
260
|
+
|
|
261
|
+
all_passed = True
|
|
262
|
+
failed_files: list[str] = []
|
|
263
|
+
|
|
264
|
+
for fp in files_modified:
|
|
265
|
+
results = run_verify_pipeline(
|
|
266
|
+
file_path=fp,
|
|
267
|
+
project_root=run_cfg.project_root,
|
|
268
|
+
run_lint=False,
|
|
269
|
+
run_consistency=True,
|
|
270
|
+
run_tests=run_cfg.run_tests,
|
|
271
|
+
test_runner_cmd=run_cfg.test_runner_cmd,
|
|
272
|
+
)
|
|
273
|
+
feedback = pipeline_feedback(results)
|
|
274
|
+
if run_cfg.on_verify:
|
|
275
|
+
run_cfg.on_verify(feedback)
|
|
276
|
+
if not pipeline_all_passed(results):
|
|
277
|
+
all_passed = False
|
|
278
|
+
failed_files.append(fp)
|
|
279
|
+
context.add_user_message(
|
|
280
|
+
f"Verification failed for {fp}:\n\n{feedback}\n\n"
|
|
281
|
+
"Please fix the issues and try again."
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
return all_passed, failed_files
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def _verify_and_recover(
|
|
288
|
+
context: ConversationContext,
|
|
289
|
+
config,
|
|
290
|
+
tools: list,
|
|
291
|
+
active_model: str,
|
|
292
|
+
files_modified: list[str],
|
|
293
|
+
run_cfg: RunConfig,
|
|
294
|
+
registry=None,
|
|
295
|
+
) -> tuple[bool, str]:
|
|
296
|
+
"""Run verification on modified files and retry on failure.
|
|
297
|
+
|
|
298
|
+
Returns (passed, active_model) — the model may have been escalated
|
|
299
|
+
during recovery attempts.
|
|
300
|
+
"""
|
|
301
|
+
if not run_cfg.verify_edits or not files_modified:
|
|
302
|
+
return True, active_model
|
|
303
|
+
|
|
304
|
+
for retry in range(run_cfg.max_verify_retries + 1):
|
|
305
|
+
all_passed, failed_files = _check_files_verification(
|
|
306
|
+
files_modified, run_cfg, context,
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
if all_passed:
|
|
310
|
+
return True, active_model
|
|
311
|
+
|
|
312
|
+
if retry >= run_cfg.max_verify_retries:
|
|
313
|
+
_log.warning(
|
|
314
|
+
"Verification still failing after %d retries for: %s",
|
|
315
|
+
run_cfg.max_verify_retries, failed_files,
|
|
316
|
+
)
|
|
317
|
+
return False, active_model
|
|
318
|
+
|
|
319
|
+
# Mid-loop model escalation — try a stronger model on retry
|
|
320
|
+
escalated = _escalate_model(active_model)
|
|
321
|
+
if escalated and escalated != active_model:
|
|
322
|
+
active_model = escalated
|
|
323
|
+
_log.info("Escalating to %s for verify-recovery retry %d", active_model, retry + 1)
|
|
324
|
+
context.add_user_message(
|
|
325
|
+
f"[System: escalating to {active_model} for better recovery]"
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
_log.info("Verification retry %d/%d for: %s",
|
|
329
|
+
retry + 1, run_cfg.max_verify_retries, failed_files)
|
|
330
|
+
|
|
331
|
+
turn_text, pending_tool_calls, tool_reasoning, error = _stream_turn(
|
|
332
|
+
context.get_messages(), tools, active_model, config, run_cfg,
|
|
333
|
+
)
|
|
334
|
+
if error:
|
|
335
|
+
_log.error("Verify-recover stream error: %s", error)
|
|
336
|
+
return False, active_model
|
|
337
|
+
|
|
338
|
+
if pending_tool_calls:
|
|
339
|
+
context.add_assistant_message(
|
|
340
|
+
turn_text or None,
|
|
341
|
+
tool_calls=pending_tool_calls,
|
|
342
|
+
reasoning_content=tool_reasoning or None,
|
|
343
|
+
)
|
|
344
|
+
new_modified = _execute_tool_calls_parallel(
|
|
345
|
+
pending_tool_calls, registry=registry, context=context,
|
|
346
|
+
)
|
|
347
|
+
files_modified.extend(new_modified)
|
|
348
|
+
continue
|
|
349
|
+
else:
|
|
350
|
+
context.add_assistant_message(content=turn_text)
|
|
351
|
+
break
|
|
352
|
+
|
|
353
|
+
return False, active_model
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
# ── helpers for the main loop ─────────────────────────────────────────────────
|
|
357
|
+
|
|
358
|
+
def _stream_turn(
|
|
359
|
+
messages: list,
|
|
360
|
+
tools: list,
|
|
361
|
+
active_model: str,
|
|
362
|
+
config,
|
|
363
|
+
rc: RunConfig,
|
|
364
|
+
) -> tuple:
|
|
365
|
+
"""Stream one model turn. Returns (turn_text, pending_tool_calls, tool_reasoning, error_msg)."""
|
|
366
|
+
pending_tool_calls = None
|
|
367
|
+
tool_reasoning = ""
|
|
368
|
+
turn_text = ""
|
|
369
|
+
|
|
370
|
+
for event_type, data in stream_chat(
|
|
371
|
+
messages=messages,
|
|
372
|
+
tools=tools,
|
|
373
|
+
model=active_model,
|
|
374
|
+
api_key=config.api_key,
|
|
375
|
+
base_url=config.base_url,
|
|
376
|
+
max_tokens=config.max_tokens,
|
|
377
|
+
temperature=config.temperature,
|
|
378
|
+
):
|
|
379
|
+
if event_type == "text":
|
|
380
|
+
turn_text += data
|
|
381
|
+
if rc.on_text:
|
|
382
|
+
rc.on_text(data)
|
|
383
|
+
elif event_type == "done":
|
|
384
|
+
turn_text = data[0]
|
|
385
|
+
elif event_type == "tool_calls":
|
|
386
|
+
pending_tool_calls, tool_reasoning = data
|
|
387
|
+
elif event_type in ("error", "model_not_found"):
|
|
388
|
+
return "", None, "", f"[{rc.label}] Error: {data}"
|
|
389
|
+
|
|
390
|
+
return turn_text, pending_tool_calls, tool_reasoning, None
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
def _process_tool_calls_turn(
|
|
394
|
+
pending_tool_calls: list,
|
|
395
|
+
turn_text: str,
|
|
396
|
+
tool_reasoning: str,
|
|
397
|
+
context: ConversationContext,
|
|
398
|
+
registry,
|
|
399
|
+
config,
|
|
400
|
+
tools: list,
|
|
401
|
+
active_model: str,
|
|
402
|
+
rc: RunConfig,
|
|
403
|
+
result: LoopResult,
|
|
404
|
+
recent_hashes: Deque[str],
|
|
405
|
+
) -> tuple[bool, str]:
|
|
406
|
+
"""Handle tool calls: stuck detection, execution, verification.
|
|
407
|
+
|
|
408
|
+
Returns (should_break, active_model).
|
|
409
|
+
"""
|
|
410
|
+
# Stuck-loop detection
|
|
411
|
+
batch_hash = hashlib.md5(
|
|
412
|
+
"|".join(_tool_call_hash(tc) for tc in pending_tool_calls).encode()
|
|
413
|
+
).hexdigest()
|
|
414
|
+
|
|
415
|
+
if list(recent_hashes).count(batch_hash) >= _STUCK_WINDOW - 1:
|
|
416
|
+
_log.warning("[%s] Stuck loop detected — same tool batch repeated %d times",
|
|
417
|
+
rc.label, _STUCK_WINDOW)
|
|
418
|
+
context.add_assistant_message(
|
|
419
|
+
turn_text or None,
|
|
420
|
+
tool_calls=pending_tool_calls,
|
|
421
|
+
reasoning_content=tool_reasoning or None,
|
|
422
|
+
)
|
|
423
|
+
_execute_tool_calls_parallel(
|
|
424
|
+
pending_tool_calls, registry, context,
|
|
425
|
+
on_start=rc.on_tool_start, on_end=rc.on_tool_end,
|
|
426
|
+
)
|
|
427
|
+
context.add_user_message(
|
|
428
|
+
"You appear to be stuck in a loop repeating the same tool calls. "
|
|
429
|
+
"Stop and explain what you have accomplished so far, what is "
|
|
430
|
+
"blocking you, and what the user should do next."
|
|
431
|
+
)
|
|
432
|
+
return True, active_model
|
|
433
|
+
|
|
434
|
+
recent_hashes.append(batch_hash)
|
|
435
|
+
|
|
436
|
+
context.add_assistant_message(
|
|
437
|
+
turn_text or None,
|
|
438
|
+
tool_calls=pending_tool_calls,
|
|
439
|
+
reasoning_content=tool_reasoning or None,
|
|
440
|
+
)
|
|
441
|
+
|
|
442
|
+
modified = _execute_tool_calls_parallel(
|
|
443
|
+
pending_tool_calls, registry, context,
|
|
444
|
+
on_start=rc.on_tool_start,
|
|
445
|
+
on_end=rc.on_tool_end,
|
|
446
|
+
)
|
|
447
|
+
result.tool_calls_executed += len(pending_tool_calls)
|
|
448
|
+
result.files_modified.extend(modified)
|
|
449
|
+
|
|
450
|
+
# Verification gate after file writes
|
|
451
|
+
if modified and rc.verify_edits:
|
|
452
|
+
passed, active_model = _verify_and_recover(
|
|
453
|
+
context, config, tools, active_model, modified, rc, registry,
|
|
454
|
+
)
|
|
455
|
+
if not passed:
|
|
456
|
+
result.verification_passed = False
|
|
457
|
+
if active_model != config.model:
|
|
458
|
+
result.escalated_model = active_model
|
|
459
|
+
|
|
460
|
+
return False, active_model
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
# ── main loop ─────────────────────────────────────────────────────────────────
|
|
464
|
+
|
|
465
|
+
def run_agent_loop(
|
|
466
|
+
context: ConversationContext,
|
|
467
|
+
config,
|
|
468
|
+
tools: List[Dict[str, Any]],
|
|
469
|
+
registry,
|
|
470
|
+
max_turns: int = 10,
|
|
471
|
+
label: str = "agent",
|
|
472
|
+
on_text: Optional[Callable[[str], None]] = None,
|
|
473
|
+
run_config: Optional[RunConfig] = None,
|
|
474
|
+
) -> str:
|
|
475
|
+
"""Run the agentic loop with verification and recovery.
|
|
476
|
+
|
|
477
|
+
Improvements active in this version:
|
|
478
|
+
✓ Stuck-loop detection breaks infinite tool-call cycles
|
|
479
|
+
✓ Turn budget injected when ≤ 2 turns remain
|
|
480
|
+
✓ Model escalates to stronger tier on repeated verify failures
|
|
481
|
+
✓ Tool results truncated to protect context window
|
|
482
|
+
✓ Re-read-after-write confirms writes landed
|
|
483
|
+
✓ Context auto-compacted mid-loop if token budget exceeded
|
|
484
|
+
|
|
485
|
+
Args:
|
|
486
|
+
context: Conversation context (pre-loaded with user message).
|
|
487
|
+
config: App config (api_key, base_url, model, etc.).
|
|
488
|
+
tools: OpenAI-format tool schemas.
|
|
489
|
+
registry: ToolRegistry instance.
|
|
490
|
+
max_turns: Max tool-call iterations before stopping.
|
|
491
|
+
label: Human label for this agent (e.g. "researcher", "coder").
|
|
492
|
+
on_text: Optional callback for streamed text chunks.
|
|
493
|
+
run_config: Optional RunConfig for verification settings.
|
|
494
|
+
|
|
495
|
+
Returns:
|
|
496
|
+
Final text response from the agent.
|
|
497
|
+
"""
|
|
498
|
+
rc = run_config or RunConfig(label=label, max_turns=max_turns, on_text=on_text)
|
|
499
|
+
result = LoopResult()
|
|
500
|
+
text_buffer = ""
|
|
501
|
+
|
|
502
|
+
# The model we're currently using — may escalate during verify-recovery
|
|
503
|
+
active_model: str = config.model
|
|
504
|
+
|
|
505
|
+
# Stuck-loop detection: track hashes of recent tool-call batches
|
|
506
|
+
recent_hashes: Deque[str] = deque(maxlen=_STUCK_WINDOW)
|
|
507
|
+
|
|
508
|
+
# Budget warning: only inject once to avoid duplicate messages
|
|
509
|
+
_budget_warning_sent = False
|
|
510
|
+
|
|
511
|
+
for turn in range(rc.max_turns):
|
|
512
|
+
turns_remaining = rc.max_turns - turn
|
|
513
|
+
|
|
514
|
+
# ── context-overflow protection ──────────────────────────────────────
|
|
515
|
+
if context.estimate_tokens() > context._token_compact_threshold * 0.85:
|
|
516
|
+
_log.info("[%s] Context near limit — auto-compacting before turn %d",
|
|
517
|
+
rc.label, turn + 1)
|
|
518
|
+
context.compact(config, active_model, keep_last=8)
|
|
519
|
+
|
|
520
|
+
# ── turn budget warning ──────────────────────────────────────────────
|
|
521
|
+
if turns_remaining <= _TURN_BUDGET_WARN and not _budget_warning_sent:
|
|
522
|
+
context.add_user_message(
|
|
523
|
+
f"[System: {turns_remaining} turn(s) remaining. "
|
|
524
|
+
"Wrap up your work and return a final answer now.]"
|
|
525
|
+
)
|
|
526
|
+
_budget_warning_sent = True
|
|
527
|
+
|
|
528
|
+
# ── stream one turn ──────────────────────────────────────────────────
|
|
529
|
+
turn_text, pending_tool_calls, tool_reasoning, error = _stream_turn(
|
|
530
|
+
context.get_messages(), tools, active_model, config, rc,
|
|
531
|
+
)
|
|
532
|
+
if error:
|
|
533
|
+
return error
|
|
534
|
+
|
|
535
|
+
# ── tool calls ───────────────────────────────────────────────────────
|
|
536
|
+
if pending_tool_calls:
|
|
537
|
+
should_break, active_model = _process_tool_calls_turn(
|
|
538
|
+
pending_tool_calls, turn_text, tool_reasoning,
|
|
539
|
+
context, registry, config, tools, active_model, rc, result,
|
|
540
|
+
recent_hashes,
|
|
541
|
+
)
|
|
542
|
+
if should_break:
|
|
543
|
+
break
|
|
544
|
+
continue
|
|
545
|
+
|
|
546
|
+
# ── no tool calls → agent is done ────────────────────────────────────
|
|
547
|
+
text_buffer = turn_text
|
|
548
|
+
break
|
|
549
|
+
|
|
550
|
+
result.text = text_buffer.strip() or f"({rc.label}: no response)"
|
|
551
|
+
return result.text
|
luckyd_code/_data_dir.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""Shared data directory for LuckyD Code.
|
|
2
|
+
|
|
3
|
+
All persistent state lives under ``~/.luckyd-code/`` (user-global)
|
|
4
|
+
or ``<project>/.luckyd-code/`` (project-local).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import os
|
|
9
|
+
import shutil
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
_logger = logging.getLogger("luckyd_code._data_dir")
|
|
13
|
+
|
|
14
|
+
# ---------- user-global paths ----------
|
|
15
|
+
|
|
16
|
+
DATA_DIR = Path.home() / ".luckyd-code"
|
|
17
|
+
|
|
18
|
+
_LEGACY_DIR = Path.home() / ".deepseek-code"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def ensure_data_dir() -> Path:
|
|
22
|
+
if _LEGACY_DIR.exists() and not DATA_DIR.exists():
|
|
23
|
+
_migrate_from_legacy()
|
|
24
|
+
DATA_DIR.mkdir(parents=True, exist_ok=True)
|
|
25
|
+
return DATA_DIR
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def data_path(*parts: str) -> Path:
|
|
29
|
+
return ensure_data_dir() / Path(*parts)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def legacy_path(*parts: str) -> Path:
|
|
33
|
+
return _LEGACY_DIR / Path(*parts)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
# ---------- project-local paths ----------
|
|
37
|
+
|
|
38
|
+
_PROJECT_DATA_NAME = ".luckyd-code"
|
|
39
|
+
_LEGACY_PROJECT_DATA_NAME = ".deepseek-code"
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _ensure_project_data_dir(project_root: str | Path | None = None) -> Path:
|
|
43
|
+
root = Path(project_root) if project_root else Path(os.getcwd())
|
|
44
|
+
new_dir = root / _PROJECT_DATA_NAME
|
|
45
|
+
legacy_dir = root / _LEGACY_PROJECT_DATA_NAME
|
|
46
|
+
if legacy_dir.exists() and not new_dir.exists():
|
|
47
|
+
try:
|
|
48
|
+
_logger.info("Migrating project data from %s to %s", legacy_dir, new_dir)
|
|
49
|
+
shutil.copytree(legacy_dir, new_dir, dirs_exist_ok=True)
|
|
50
|
+
except Exception:
|
|
51
|
+
_logger.warning(
|
|
52
|
+
"Could not auto-migrate project data from %s", legacy_dir, exc_info=True
|
|
53
|
+
)
|
|
54
|
+
new_dir.mkdir(parents=True, exist_ok=True)
|
|
55
|
+
return new_dir
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def project_data_path(*parts: str, root: str | Path | None = None) -> Path:
|
|
59
|
+
return _ensure_project_data_dir(root) / Path(*parts)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def project_legacy_path(*parts: str, root: str | Path | None = None) -> Path:
|
|
63
|
+
r = Path(root) if root else Path(os.getcwd())
|
|
64
|
+
return r / _LEGACY_PROJECT_DATA_NAME / Path(*parts)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _migrate_from_legacy() -> None:
|
|
68
|
+
try:
|
|
69
|
+
_logger.info("Migrating data from %s to %s", _LEGACY_DIR, DATA_DIR)
|
|
70
|
+
shutil.copytree(_LEGACY_DIR, DATA_DIR, dirs_exist_ok=True)
|
|
71
|
+
_logger.info("Migration complete")
|
|
72
|
+
except Exception:
|
|
73
|
+
_logger.warning("Could not auto-migrate from %s", _LEGACY_DIR, exc_info=True)
|