luckyd-code 1.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- luckyd_code/__init__.py +54 -0
- luckyd_code/__main__.py +5 -0
- luckyd_code/_agent_loop.py +551 -0
- luckyd_code/_data_dir.py +73 -0
- luckyd_code/agent.py +38 -0
- luckyd_code/analytics/__init__.py +18 -0
- luckyd_code/analytics/reporter.py +195 -0
- luckyd_code/analytics/scanner.py +443 -0
- luckyd_code/analytics/smells.py +316 -0
- luckyd_code/analytics/trends.py +303 -0
- luckyd_code/api.py +473 -0
- luckyd_code/audit_daemon.py +845 -0
- luckyd_code/autonomous_fixer.py +473 -0
- luckyd_code/background.py +159 -0
- luckyd_code/backup.py +237 -0
- luckyd_code/brain/__init__.py +84 -0
- luckyd_code/brain/assembler.py +100 -0
- luckyd_code/brain/chunker.py +345 -0
- luckyd_code/brain/constants.py +73 -0
- luckyd_code/brain/embedder.py +163 -0
- luckyd_code/brain/graph.py +311 -0
- luckyd_code/brain/indexer.py +316 -0
- luckyd_code/brain/parser.py +140 -0
- luckyd_code/brain/retriever.py +234 -0
- luckyd_code/cli.py +894 -0
- luckyd_code/cli_commands/__init__.py +1 -0
- luckyd_code/cli_commands/audit.py +120 -0
- luckyd_code/cli_commands/background.py +83 -0
- luckyd_code/cli_commands/brain.py +87 -0
- luckyd_code/cli_commands/config.py +75 -0
- luckyd_code/cli_commands/dispatcher.py +695 -0
- luckyd_code/cli_commands/sessions.py +41 -0
- luckyd_code/cli_entry.py +147 -0
- luckyd_code/cli_utils.py +112 -0
- luckyd_code/config.py +205 -0
- luckyd_code/context.py +214 -0
- luckyd_code/cost_tracker.py +209 -0
- luckyd_code/error_reporter.py +508 -0
- luckyd_code/exceptions.py +39 -0
- luckyd_code/export.py +126 -0
- luckyd_code/feedback_analyzer.py +290 -0
- luckyd_code/file_watcher.py +258 -0
- luckyd_code/git/__init__.py +11 -0
- luckyd_code/git/auto_commit.py +157 -0
- luckyd_code/git/tools.py +85 -0
- luckyd_code/hooks.py +236 -0
- luckyd_code/indexer.py +280 -0
- luckyd_code/init.py +39 -0
- luckyd_code/keybindings.py +77 -0
- luckyd_code/log.py +55 -0
- luckyd_code/mcp/__init__.py +6 -0
- luckyd_code/mcp/client.py +184 -0
- luckyd_code/memory/__init__.py +19 -0
- luckyd_code/memory/manager.py +339 -0
- luckyd_code/metrics/__init__.py +5 -0
- luckyd_code/model_registry.py +131 -0
- luckyd_code/orchestrator.py +204 -0
- luckyd_code/permissions/__init__.py +1 -0
- luckyd_code/permissions/manager.py +103 -0
- luckyd_code/planner.py +361 -0
- luckyd_code/plugins.py +91 -0
- luckyd_code/py.typed +0 -0
- luckyd_code/retry.py +57 -0
- luckyd_code/router.py +417 -0
- luckyd_code/sandbox.py +156 -0
- luckyd_code/self_critique.py +2 -0
- luckyd_code/self_improve.py +274 -0
- luckyd_code/sessions.py +114 -0
- luckyd_code/settings.py +72 -0
- luckyd_code/skills/__init__.py +8 -0
- luckyd_code/skills/review.py +22 -0
- luckyd_code/skills/security.py +17 -0
- luckyd_code/tasks/__init__.py +1 -0
- luckyd_code/tasks/manager.py +102 -0
- luckyd_code/templates/icon-192.png +0 -0
- luckyd_code/templates/icon-512.png +0 -0
- luckyd_code/templates/index.html +1965 -0
- luckyd_code/templates/manifest.json +14 -0
- luckyd_code/templates/src/app.js +694 -0
- luckyd_code/templates/src/body.html +767 -0
- luckyd_code/templates/src/cdn.txt +2 -0
- luckyd_code/templates/src/style.css +474 -0
- luckyd_code/templates/sw.js +31 -0
- luckyd_code/templates/test.html +6 -0
- luckyd_code/themes.py +48 -0
- luckyd_code/tools/__init__.py +97 -0
- luckyd_code/tools/agent_tools.py +65 -0
- luckyd_code/tools/bash.py +360 -0
- luckyd_code/tools/brain_tools.py +137 -0
- luckyd_code/tools/browser.py +369 -0
- luckyd_code/tools/datetime_tool.py +34 -0
- luckyd_code/tools/dockerfile_gen.py +212 -0
- luckyd_code/tools/file_ops.py +381 -0
- luckyd_code/tools/game_gen.py +360 -0
- luckyd_code/tools/git_tools.py +130 -0
- luckyd_code/tools/git_worktree.py +63 -0
- luckyd_code/tools/path_validate.py +64 -0
- luckyd_code/tools/project_gen.py +187 -0
- luckyd_code/tools/readme_gen.py +227 -0
- luckyd_code/tools/registry.py +157 -0
- luckyd_code/tools/shell_detect.py +109 -0
- luckyd_code/tools/web.py +89 -0
- luckyd_code/tools/youtube.py +187 -0
- luckyd_code/tools_bridge.py +144 -0
- luckyd_code/undo.py +126 -0
- luckyd_code/update.py +60 -0
- luckyd_code/verify.py +360 -0
- luckyd_code/web_app.py +176 -0
- luckyd_code/web_routes/__init__.py +23 -0
- luckyd_code/web_routes/background.py +73 -0
- luckyd_code/web_routes/brain.py +109 -0
- luckyd_code/web_routes/cost.py +12 -0
- luckyd_code/web_routes/files.py +133 -0
- luckyd_code/web_routes/memories.py +94 -0
- luckyd_code/web_routes/misc.py +67 -0
- luckyd_code/web_routes/project.py +48 -0
- luckyd_code/web_routes/review.py +20 -0
- luckyd_code/web_routes/sessions.py +44 -0
- luckyd_code/web_routes/settings.py +43 -0
- luckyd_code/web_routes/static.py +70 -0
- luckyd_code/web_routes/update.py +19 -0
- luckyd_code/web_routes/ws.py +237 -0
- luckyd_code-1.2.2.dist-info/METADATA +297 -0
- luckyd_code-1.2.2.dist-info/RECORD +127 -0
- luckyd_code-1.2.2.dist-info/WHEEL +4 -0
- luckyd_code-1.2.2.dist-info/entry_points.txt +3 -0
- luckyd_code-1.2.2.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,473 @@
|
|
|
1
|
+
"""Autonomous Fixer - generates patches, validates them, and creates PRs.
|
|
2
|
+
|
|
3
|
+
Takes a Diagnosis from feedback_analyzer.py, generates a code fix via LLM,
|
|
4
|
+
applies it in a git worktree for isolation, runs the test suite, and
|
|
5
|
+
optionally creates a draft PR on GitHub.
|
|
6
|
+
|
|
7
|
+
All work is done locally. The user's API key is used for LLM calls.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import os
|
|
14
|
+
import re
|
|
15
|
+
import subprocess
|
|
16
|
+
import tempfile
|
|
17
|
+
import urllib.parse
|
|
18
|
+
import uuid
|
|
19
|
+
from dataclasses import dataclass
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
from typing import Optional
|
|
22
|
+
|
|
23
|
+
import httpx
|
|
24
|
+
|
|
25
|
+
from .feedback_analyzer import Diagnosis, _call_llm
|
|
26
|
+
|
|
27
|
+
FIX_SYSTEM_PROMPT = """You are a senior software engineer fixing a bug in the **DeepSeek Code** project.
|
|
28
|
+
|
|
29
|
+
You will receive a diagnosis of a bug and the current source code of the affected files.
|
|
30
|
+
Generate the EXACT code change needed to fix the bug.
|
|
31
|
+
|
|
32
|
+
RULES:
|
|
33
|
+
- Only change DeepSeek Code's own source code (luckyd_code/ or tests/)
|
|
34
|
+
- Make MINIMAL changes - fix the bug, nothing else
|
|
35
|
+
- Preserve all existing behavior
|
|
36
|
+
- Do NOT change imports unless absolutely necessary
|
|
37
|
+
- Output ONLY a unified diff (diff -u format)
|
|
38
|
+
- If multiple files need changes, include all diffs separated by "--- FILE ---"
|
|
39
|
+
|
|
40
|
+
Output format:
|
|
41
|
+
```diff
|
|
42
|
+
--- a/path/to/file.py
|
|
43
|
+
+++ b/path/to/file.py
|
|
44
|
+
@@ -line,count +line,count @@ context
|
|
45
|
+
-old line
|
|
46
|
+
+new line
|
|
47
|
+
```"""
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@dataclass
|
|
51
|
+
class FixResult:
|
|
52
|
+
"""Result of an autonomous fix attempt."""
|
|
53
|
+
diagnosis: Diagnosis
|
|
54
|
+
success: bool
|
|
55
|
+
branch_name: str = ""
|
|
56
|
+
pr_url: str = ""
|
|
57
|
+
diff: str = ""
|
|
58
|
+
test_output: str = ""
|
|
59
|
+
error: str = ""
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _git(*args: str, cwd: str | None = None) -> tuple[int, str, str]:
|
|
63
|
+
"""Run a git command, return (exit_code, stdout, stderr)."""
|
|
64
|
+
try:
|
|
65
|
+
r = subprocess.run(
|
|
66
|
+
["git"] + list(args),
|
|
67
|
+
capture_output=True, text=True, timeout=30,
|
|
68
|
+
cwd=cwd,
|
|
69
|
+
)
|
|
70
|
+
return r.returncode, r.stdout.strip(), r.stderr.strip()
|
|
71
|
+
except Exception as e:
|
|
72
|
+
return -1, "", str(e)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _read_file_safe(path: str, project_root: str) -> str:
|
|
76
|
+
"""Read a file, returning its contents or an error string.
|
|
77
|
+
|
|
78
|
+
Enforces that the resolved path stays within *project_root* so a
|
|
79
|
+
manipulated LLM response cannot read arbitrary files (e.g. ../../.env).
|
|
80
|
+
"""
|
|
81
|
+
try:
|
|
82
|
+
root = Path(project_root).resolve()
|
|
83
|
+
full = (root / path).resolve()
|
|
84
|
+
if not str(full).startswith(str(root) + os.sep) and full != root:
|
|
85
|
+
return f"[BLOCKED: path '{path}' escapes project root]"
|
|
86
|
+
if not full.exists():
|
|
87
|
+
return f"[FILE NOT FOUND: {path}]"
|
|
88
|
+
content = full.read_text(encoding="utf-8")
|
|
89
|
+
lines = content.split("\n")
|
|
90
|
+
if len(lines) > 300:
|
|
91
|
+
content = "\n".join(lines[:300]) + f"\n... (truncated, {len(lines)} total lines)"
|
|
92
|
+
return content
|
|
93
|
+
except Exception as e:
|
|
94
|
+
return f"[ERROR reading {path}: {e}]"
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def generate_fix(
|
|
98
|
+
diagnosis: Diagnosis,
|
|
99
|
+
api_key: str,
|
|
100
|
+
project_root: str = "",
|
|
101
|
+
base_url: str = "https://api.deepseek.com/v1",
|
|
102
|
+
model: str = "deepseek-v4-flash",
|
|
103
|
+
) -> str:
|
|
104
|
+
"""Generate a code fix for a diagnosed bug using LLM.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
diagnosis: The Diagnosis from feedback_analyzer.
|
|
108
|
+
api_key: DeepSeek API key.
|
|
109
|
+
project_root: Project root directory.
|
|
110
|
+
base_url: API base URL.
|
|
111
|
+
model: Model to use.
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
A unified diff string, or empty string on failure.
|
|
115
|
+
"""
|
|
116
|
+
if not project_root:
|
|
117
|
+
project_root = str(Path(__file__).resolve().parent.parent)
|
|
118
|
+
|
|
119
|
+
# Read the affected files
|
|
120
|
+
file_contents = ""
|
|
121
|
+
for fpath in diagnosis.affected_files:
|
|
122
|
+
content = _read_file_safe(fpath, project_root)
|
|
123
|
+
file_contents += f"\n### {fpath}\n```python\n{content}\n```\n"
|
|
124
|
+
|
|
125
|
+
if not file_contents:
|
|
126
|
+
file_contents = "(No affected files could be read.)"
|
|
127
|
+
|
|
128
|
+
user_message = f"""## Bug Diagnosis
|
|
129
|
+
|
|
130
|
+
**Error:** {diagnosis.error_type}: {diagnosis.error_message}
|
|
131
|
+
**Root Cause:** {diagnosis.root_cause}
|
|
132
|
+
**Suggested Fix:** {diagnosis.fix_suggestion}
|
|
133
|
+
**Confidence:** {diagnosis.confidence}
|
|
134
|
+
|
|
135
|
+
## Current Code{file_contents}
|
|
136
|
+
|
|
137
|
+
Generate the exact diff to fix this bug."""
|
|
138
|
+
|
|
139
|
+
raw = _call_llm(
|
|
140
|
+
system_prompt=FIX_SYSTEM_PROMPT,
|
|
141
|
+
user_message=user_message,
|
|
142
|
+
api_key=api_key,
|
|
143
|
+
base_url=base_url,
|
|
144
|
+
model=model,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
if raw.startswith("ERROR:"):
|
|
148
|
+
return ""
|
|
149
|
+
|
|
150
|
+
return _extract_diff(raw)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def _extract_diff(llm_response: str) -> str:
|
|
154
|
+
"""Extract unified diff from an LLM response that may have markdown fences."""
|
|
155
|
+
if not llm_response:
|
|
156
|
+
return ""
|
|
157
|
+
|
|
158
|
+
m = re.search(r'```diff\s*\n(.*?)```', llm_response, re.DOTALL)
|
|
159
|
+
if m:
|
|
160
|
+
return m.group(1).strip()
|
|
161
|
+
|
|
162
|
+
m = re.search(r'```\s*\n(.*?)```', llm_response, re.DOTALL)
|
|
163
|
+
if m and ('--- a/' in m.group(1) or '+++ b/' in m.group(1)):
|
|
164
|
+
return m.group(1).strip()
|
|
165
|
+
|
|
166
|
+
if '--- a/' in llm_response:
|
|
167
|
+
return llm_response.strip()
|
|
168
|
+
|
|
169
|
+
return ""
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def apply_fix_in_worktree(
|
|
173
|
+
diff: str,
|
|
174
|
+
project_root: str = "",
|
|
175
|
+
) -> tuple[str, str]:
|
|
176
|
+
"""Apply a diff in a real isolated git worktree (temp directory).
|
|
177
|
+
|
|
178
|
+
Uses ``git worktree add`` so the user's working copy is NEVER touched.
|
|
179
|
+
The fix is applied inside a throw-away directory in the system temp folder.
|
|
180
|
+
|
|
181
|
+
Returns (worktree_path, branch_name) on success.
|
|
182
|
+
Returns ("", error_message) on failure.
|
|
183
|
+
"""
|
|
184
|
+
if not project_root:
|
|
185
|
+
project_root = str(Path(__file__).resolve().parent.parent)
|
|
186
|
+
|
|
187
|
+
if not diff:
|
|
188
|
+
return "", "Empty diff - nothing to apply"
|
|
189
|
+
|
|
190
|
+
# Check we're in a git repo
|
|
191
|
+
exit_code, _, stderr = _git("rev-parse", "--is-inside-work-tree", cwd=project_root)
|
|
192
|
+
if exit_code != 0:
|
|
193
|
+
return "", f"Not in a git repository: {stderr}"
|
|
194
|
+
|
|
195
|
+
worktree_id = uuid.uuid4().hex[:8]
|
|
196
|
+
worktree_path = str(Path(tempfile.gettempdir()) / f"deepseek-autofix-{worktree_id}")
|
|
197
|
+
branch_name = f"autofix/error-{worktree_id}"
|
|
198
|
+
|
|
199
|
+
# Create a real isolated worktree in a temp directory on a new branch.
|
|
200
|
+
# This leaves the user's working copy completely untouched.
|
|
201
|
+
exit_code, _, stderr = _git(
|
|
202
|
+
"worktree", "add", worktree_path, "-b", branch_name,
|
|
203
|
+
cwd=project_root,
|
|
204
|
+
)
|
|
205
|
+
if exit_code != 0:
|
|
206
|
+
return "", f"Failed to create git worktree: {stderr}"
|
|
207
|
+
|
|
208
|
+
# Write the diff to a temp file and apply it inside the worktree
|
|
209
|
+
diff_file = Path(tempfile.gettempdir()) / f"deepseek-fix-{worktree_id}.diff"
|
|
210
|
+
diff_file.write_text(diff, encoding="utf-8")
|
|
211
|
+
|
|
212
|
+
try:
|
|
213
|
+
result = subprocess.run(
|
|
214
|
+
["git", "apply", str(diff_file)],
|
|
215
|
+
capture_output=True, text=True, timeout=30,
|
|
216
|
+
cwd=worktree_path,
|
|
217
|
+
)
|
|
218
|
+
if result.returncode != 0:
|
|
219
|
+
# Try with --reject for partial application
|
|
220
|
+
result2 = subprocess.run(
|
|
221
|
+
["git", "apply", "--reject", str(diff_file)],
|
|
222
|
+
capture_output=True, text=True, timeout=30,
|
|
223
|
+
cwd=worktree_path,
|
|
224
|
+
)
|
|
225
|
+
if result2.returncode != 0:
|
|
226
|
+
# Clean up — nothing in the main repo was ever changed
|
|
227
|
+
_git("worktree", "remove", "--force", worktree_path, cwd=project_root)
|
|
228
|
+
_git("branch", "-D", branch_name, cwd=project_root)
|
|
229
|
+
return "", f"Failed to apply diff: {result.stderr.strip()}"
|
|
230
|
+
finally:
|
|
231
|
+
diff_file.unlink(missing_ok=True)
|
|
232
|
+
|
|
233
|
+
return worktree_path, branch_name
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def validate_fix(
|
|
237
|
+
worktree_path: str,
|
|
238
|
+
branch_name: str = "",
|
|
239
|
+
) -> tuple[bool, str]:
|
|
240
|
+
"""Run the verification pipeline on the fix.
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
worktree_path: Path to the git worktree where the diff was applied.
|
|
244
|
+
branch_name: Kept for API compatibility, not used internally.
|
|
245
|
+
|
|
246
|
+
Returns (passed, output_text).
|
|
247
|
+
"""
|
|
248
|
+
results: list[str] = []
|
|
249
|
+
|
|
250
|
+
# 1. Syntax-check changed files.
|
|
251
|
+
# The diff is applied but NOT yet committed, so compare unstaged working
|
|
252
|
+
# tree changes — not HEAD~1 which would compare two committed snapshots.
|
|
253
|
+
exit_code, stdout, _ = _git("diff", "--name-only", cwd=worktree_path)
|
|
254
|
+
changed_files = [f for f in stdout.split("\n") if f.endswith(".py") and f.strip()]
|
|
255
|
+
|
|
256
|
+
if not changed_files:
|
|
257
|
+
# Also check staged (pre-committed) changes
|
|
258
|
+
exit_code, stdout, _ = _git("diff", "--name-only", "--cached", cwd=worktree_path)
|
|
259
|
+
changed_files = [f for f in stdout.split("\n") if f.endswith(".py") and f.strip()]
|
|
260
|
+
|
|
261
|
+
for fpath in changed_files:
|
|
262
|
+
full = Path(worktree_path) / fpath
|
|
263
|
+
if not full.exists():
|
|
264
|
+
continue
|
|
265
|
+
try:
|
|
266
|
+
import py_compile
|
|
267
|
+
py_compile.compile(str(full), doraise=True)
|
|
268
|
+
results.append(f" [OK] Syntax: {fpath}")
|
|
269
|
+
except py_compile.PyCompileError as e:
|
|
270
|
+
results.append(f" [FAIL] Syntax: {fpath} - {e}")
|
|
271
|
+
return False, "\n".join(results)
|
|
272
|
+
|
|
273
|
+
# 2. Run test suite inside the worktree so the patched code is exercised
|
|
274
|
+
results.append(" Running tests...")
|
|
275
|
+
try:
|
|
276
|
+
proc = subprocess.run(
|
|
277
|
+
["python", "-m", "pytest", "tests/", "-x", "--timeout=60", "-q"],
|
|
278
|
+
capture_output=True, text=True, timeout=120,
|
|
279
|
+
cwd=worktree_path,
|
|
280
|
+
)
|
|
281
|
+
combined = (proc.stdout + proc.stderr).strip()
|
|
282
|
+
results.append(combined[-2000:] if len(combined) > 2000 else combined)
|
|
283
|
+
if proc.returncode == 0:
|
|
284
|
+
results.append(" [OK] All tests passed")
|
|
285
|
+
return True, "\n".join(results)
|
|
286
|
+
else:
|
|
287
|
+
results.append(f" [FAIL] Tests failed (exit {proc.returncode})")
|
|
288
|
+
return False, "\n".join(results)
|
|
289
|
+
except subprocess.TimeoutExpired:
|
|
290
|
+
results.append(" [FAIL] Tests timed out (120s)")
|
|
291
|
+
return False, "\n".join(results)
|
|
292
|
+
except Exception as e:
|
|
293
|
+
results.append(f" [FAIL] Could not run tests: {e}")
|
|
294
|
+
return False, "\n".join(results)
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def create_pr(
|
|
298
|
+
branch_name: str,
|
|
299
|
+
diagnosis: Diagnosis,
|
|
300
|
+
diff: str,
|
|
301
|
+
test_passed: bool,
|
|
302
|
+
test_output: str,
|
|
303
|
+
project_root: str = "",
|
|
304
|
+
) -> str:
|
|
305
|
+
"""Create a pull request via GitHub CLI or a pre-filled browser URL.
|
|
306
|
+
|
|
307
|
+
Returns PR URL on success, or a pre-filled new-PR URL as fallback.
|
|
308
|
+
"""
|
|
309
|
+
if not project_root:
|
|
310
|
+
project_root = str(Path(__file__).resolve().parent.parent)
|
|
311
|
+
|
|
312
|
+
test_badge = "[OK] tests passed" if test_passed else "[FAIL] tests failing"
|
|
313
|
+
title = f"autofix: {diagnosis.error_type}: {diagnosis.error_message[:60]}"
|
|
314
|
+
|
|
315
|
+
body = f"""## Autonomous Fix
|
|
316
|
+
|
|
317
|
+
{diagnosis.to_markdown()}
|
|
318
|
+
|
|
319
|
+
### Changes
|
|
320
|
+
```diff
|
|
321
|
+
{diff[:4000]}
|
|
322
|
+
```
|
|
323
|
+
|
|
324
|
+
### Validation
|
|
325
|
+
**Tests:** {test_badge}
|
|
326
|
+
```
|
|
327
|
+
{test_output[:2000]}
|
|
328
|
+
```
|
|
329
|
+
|
|
330
|
+
---
|
|
331
|
+
*Generated by DeepSeek Code's autonomous self-improvement system.
|
|
332
|
+
Please review carefully before merging.*
|
|
333
|
+
"""
|
|
334
|
+
|
|
335
|
+
# Try GitHub CLI first
|
|
336
|
+
try:
|
|
337
|
+
exit_code, stdout, stderr = _git("push", "-u", "origin", branch_name, cwd=project_root)
|
|
338
|
+
if exit_code != 0:
|
|
339
|
+
return _pr_fallback_url(title, body, branch_name)
|
|
340
|
+
|
|
341
|
+
proc = subprocess.run(
|
|
342
|
+
["gh", "pr", "create",
|
|
343
|
+
"--title", title,
|
|
344
|
+
"--body", body,
|
|
345
|
+
"--base", "main",
|
|
346
|
+
"--head", branch_name,
|
|
347
|
+
],
|
|
348
|
+
capture_output=True, text=True, timeout=30,
|
|
349
|
+
cwd=project_root,
|
|
350
|
+
)
|
|
351
|
+
if proc.returncode == 0:
|
|
352
|
+
pr_url = proc.stdout.strip()
|
|
353
|
+
if pr_url:
|
|
354
|
+
return pr_url
|
|
355
|
+
except (FileNotFoundError, subprocess.TimeoutExpired):
|
|
356
|
+
pass
|
|
357
|
+
except Exception:
|
|
358
|
+
pass
|
|
359
|
+
|
|
360
|
+
return _pr_fallback_url(title, body, branch_name)
|
|
361
|
+
|
|
362
|
+
|
|
363
|
+
def _pr_fallback_url(title: str, body: str, branch_name: str = "") -> str:
|
|
364
|
+
"""Build a pre-filled GitHub new-PR URL (no API needed)."""
|
|
365
|
+
repo_path = "Dylanchess0320/DeepSeek-Code"
|
|
366
|
+
try:
|
|
367
|
+
result = subprocess.run(
|
|
368
|
+
["git", "remote", "get-url", "origin"],
|
|
369
|
+
capture_output=True, text=True, timeout=5,
|
|
370
|
+
)
|
|
371
|
+
remote = result.stdout.strip()
|
|
372
|
+
if "github.com" in remote:
|
|
373
|
+
m = re.search(r'github\.com[:/]([^/]+/[^/]+?)(?:\.git)?$', remote)
|
|
374
|
+
if m:
|
|
375
|
+
repo_path = m.group(1)
|
|
376
|
+
except Exception:
|
|
377
|
+
pass
|
|
378
|
+
|
|
379
|
+
if len(body) > 60000:
|
|
380
|
+
body = body[:60000] + "\n... (truncated)"
|
|
381
|
+
|
|
382
|
+
q_title = urllib.parse.quote(title, safe="")
|
|
383
|
+
q_body = urllib.parse.quote(body, safe="")
|
|
384
|
+
# Use the actual branch name in the compare URL, not the PR title
|
|
385
|
+
q_branch = urllib.parse.quote(branch_name, safe="") if branch_name else urllib.parse.quote(title[:50], safe="")
|
|
386
|
+
|
|
387
|
+
return f"https://github.com/{repo_path}/compare/main...{q_branch}?expand=1&title={q_title}&body={q_body}"
|
|
388
|
+
|
|
389
|
+
|
|
390
|
+
def full_autonomous_pipeline(
|
|
391
|
+
exc: BaseException,
|
|
392
|
+
api_key: str,
|
|
393
|
+
project_root: str = "",
|
|
394
|
+
base_url: str = "https://api.deepseek.com/v1",
|
|
395
|
+
model: str = "deepseek-v4-flash",
|
|
396
|
+
create_pr_flag: bool = False,
|
|
397
|
+
) -> FixResult:
|
|
398
|
+
"""Run the complete autonomous fix pipeline.
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
exc: The unhandled exception.
|
|
402
|
+
api_key: DeepSeek API key.
|
|
403
|
+
project_root: Project root.
|
|
404
|
+
base_url: API base URL.
|
|
405
|
+
model: LLM model.
|
|
406
|
+
create_pr_flag: If True, create a PR on success.
|
|
407
|
+
|
|
408
|
+
Returns:
|
|
409
|
+
FixResult with full details.
|
|
410
|
+
"""
|
|
411
|
+
from .feedback_analyzer import analyze_error
|
|
412
|
+
|
|
413
|
+
if not project_root:
|
|
414
|
+
project_root = str(Path(__file__).resolve().parent.parent)
|
|
415
|
+
|
|
416
|
+
# Step 1: Diagnose
|
|
417
|
+
diagnosis = analyze_error(exc, api_key, base_url, model, project_root)
|
|
418
|
+
if not diagnosis:
|
|
419
|
+
return FixResult(
|
|
420
|
+
diagnosis=Diagnosis(
|
|
421
|
+
error_type=type(exc).__name__,
|
|
422
|
+
error_message=str(exc),
|
|
423
|
+
root_cause="",
|
|
424
|
+
affected_files=[],
|
|
425
|
+
fix_suggestion="",
|
|
426
|
+
confidence="low",
|
|
427
|
+
),
|
|
428
|
+
success=False,
|
|
429
|
+
error="LLM diagnosis failed",
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
# Step 2: Generate fix
|
|
433
|
+
diff = generate_fix(diagnosis, api_key, project_root, base_url, model)
|
|
434
|
+
if not diff:
|
|
435
|
+
return FixResult(
|
|
436
|
+
diagnosis=diagnosis,
|
|
437
|
+
success=False,
|
|
438
|
+
error="LLM fix generation failed",
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
# Step 3: Apply in an isolated worktree — user's working copy stays clean
|
|
442
|
+
worktree_path, branch = apply_fix_in_worktree(diff, project_root)
|
|
443
|
+
if not worktree_path:
|
|
444
|
+
return FixResult(
|
|
445
|
+
diagnosis=diagnosis,
|
|
446
|
+
success=False,
|
|
447
|
+
diff=diff,
|
|
448
|
+
error=f"Failed to apply fix: {branch}",
|
|
449
|
+
)
|
|
450
|
+
|
|
451
|
+
# Step 4: Validate inside the worktree so the patched code is what's tested
|
|
452
|
+
passed, test_output = validate_fix(worktree_path, branch)
|
|
453
|
+
|
|
454
|
+
result = FixResult(
|
|
455
|
+
diagnosis=diagnosis,
|
|
456
|
+
success=passed,
|
|
457
|
+
branch_name=branch,
|
|
458
|
+
diff=diff,
|
|
459
|
+
test_output=test_output,
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
# Step 5: Create PR if requested and tests passed
|
|
463
|
+
if create_pr_flag and passed:
|
|
464
|
+
result.pr_url = create_pr(branch, diagnosis, diff, passed, test_output, project_root)
|
|
465
|
+
|
|
466
|
+
# Always tear down the temp worktree directory.
|
|
467
|
+
# If validation failed, also delete the branch — nothing worth keeping.
|
|
468
|
+
# If validation passed, keep the branch so the user can merge/push it.
|
|
469
|
+
_git("worktree", "remove", "--force", worktree_path, cwd=project_root)
|
|
470
|
+
if not passed and branch:
|
|
471
|
+
_git("branch", "-D", branch, cwd=project_root)
|
|
472
|
+
|
|
473
|
+
return result
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
"""Background autonomous agent - persistent background tasks with progress reporting."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import threading
|
|
5
|
+
import time
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
|
|
8
|
+
from .agent import SubAgent
|
|
9
|
+
from .log import get_logger
|
|
10
|
+
|
|
11
|
+
from ._data_dir import data_path
|
|
12
|
+
|
|
13
|
+
BACKGROUND_DIR = data_path("background")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class BackgroundTask:
|
|
17
|
+
"""Represents a background task with status tracking."""
|
|
18
|
+
|
|
19
|
+
def __init__(self, task_id: str, description: str):
|
|
20
|
+
self.task_id: str = task_id
|
|
21
|
+
self.description: str = description
|
|
22
|
+
self.status: str = "pending" # pending, running, done, error
|
|
23
|
+
self.result: str = ""
|
|
24
|
+
self.error: str = ""
|
|
25
|
+
self.started_at: str | None = None
|
|
26
|
+
self.finished_at: str | None = None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class BackgroundAgent:
|
|
30
|
+
"""Manages autonomous background agents that work independently."""
|
|
31
|
+
|
|
32
|
+
def __init__(self, config):
|
|
33
|
+
self.config = config
|
|
34
|
+
self.tasks: dict[str, BackgroundTask] = {}
|
|
35
|
+
self.threads: dict[str, threading.Thread] = {}
|
|
36
|
+
self._lock = threading.Lock()
|
|
37
|
+
BACKGROUND_DIR.mkdir(parents=True, exist_ok=True)
|
|
38
|
+
|
|
39
|
+
def start_task(self, description: str) -> str:
|
|
40
|
+
"""Start a new background task and return its ID."""
|
|
41
|
+
task_id = f"bg_{int(time.time())}_{len(self.tasks)}"
|
|
42
|
+
task = BackgroundTask(task_id, description)
|
|
43
|
+
task.status = "pending"
|
|
44
|
+
task.started_at = datetime.now().isoformat()
|
|
45
|
+
|
|
46
|
+
with self._lock:
|
|
47
|
+
self.tasks[task_id] = task
|
|
48
|
+
|
|
49
|
+
thread = threading.Thread(
|
|
50
|
+
target=self._run_task,
|
|
51
|
+
args=(task_id, description),
|
|
52
|
+
daemon=True,
|
|
53
|
+
)
|
|
54
|
+
self.threads[task_id] = thread
|
|
55
|
+
thread.start()
|
|
56
|
+
|
|
57
|
+
return task_id
|
|
58
|
+
|
|
59
|
+
def _run_task(self, task_id: str, description: str):
|
|
60
|
+
"""Run a task in the background thread."""
|
|
61
|
+
with self._lock:
|
|
62
|
+
self.tasks[task_id].status = "running"
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
agent = SubAgent(self.config, description)
|
|
66
|
+
result = agent.run()
|
|
67
|
+
|
|
68
|
+
with self._lock:
|
|
69
|
+
self.tasks[task_id].status = "done"
|
|
70
|
+
self.tasks[task_id].result = result
|
|
71
|
+
self.tasks[task_id].finished_at = datetime.now().isoformat()
|
|
72
|
+
|
|
73
|
+
# Save to disk
|
|
74
|
+
self._save_task(task_id)
|
|
75
|
+
|
|
76
|
+
except Exception as e:
|
|
77
|
+
with self._lock:
|
|
78
|
+
self.tasks[task_id].status = "error"
|
|
79
|
+
self.tasks[task_id].error = str(e)
|
|
80
|
+
self.tasks[task_id].finished_at = datetime.now().isoformat()
|
|
81
|
+
self._save_task(task_id)
|
|
82
|
+
|
|
83
|
+
def get_status(self, task_id: str | None = None) -> list[dict[str, object]]:
|
|
84
|
+
"""Get status of tasks. If task_id is None, return all."""
|
|
85
|
+
with self._lock:
|
|
86
|
+
if task_id:
|
|
87
|
+
tasks = [self.tasks.get(task_id)]
|
|
88
|
+
else:
|
|
89
|
+
tasks = list(self.tasks.values())
|
|
90
|
+
|
|
91
|
+
result = []
|
|
92
|
+
for t in tasks:
|
|
93
|
+
if t is None:
|
|
94
|
+
continue
|
|
95
|
+
result.append({
|
|
96
|
+
"id": t.task_id,
|
|
97
|
+
"description": t.description[:100],
|
|
98
|
+
"status": t.status,
|
|
99
|
+
"started_at": t.started_at or "",
|
|
100
|
+
"finished_at": t.finished_at or "",
|
|
101
|
+
"result_preview": t.result[:200] if t.result else "",
|
|
102
|
+
"error": t.error,
|
|
103
|
+
})
|
|
104
|
+
return result
|
|
105
|
+
|
|
106
|
+
def get_result(self, task_id: str) -> str | None:
|
|
107
|
+
"""Get the full result of a completed task."""
|
|
108
|
+
with self._lock:
|
|
109
|
+
task = self.tasks.get(task_id)
|
|
110
|
+
if task and task.status == "done":
|
|
111
|
+
return task.result
|
|
112
|
+
return None
|
|
113
|
+
|
|
114
|
+
def _save_task(self, task_id: str):
|
|
115
|
+
"""Persist task result to disk."""
|
|
116
|
+
with self._lock:
|
|
117
|
+
task = self.tasks.get(task_id)
|
|
118
|
+
if not task:
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
data = {
|
|
122
|
+
"id": task.task_id,
|
|
123
|
+
"description": task.description,
|
|
124
|
+
"status": task.status,
|
|
125
|
+
"result": task.result,
|
|
126
|
+
"error": task.error,
|
|
127
|
+
"started_at": task.started_at,
|
|
128
|
+
"finished_at": task.finished_at,
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
path = BACKGROUND_DIR / f"{task_id}.json"
|
|
132
|
+
try:
|
|
133
|
+
path.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
|
134
|
+
except Exception:
|
|
135
|
+
get_logger().warning("Failed to save background task %s", task_id, exc_info=True)
|
|
136
|
+
|
|
137
|
+
def load_history(self):
|
|
138
|
+
"""Load past background tasks from disk."""
|
|
139
|
+
if not BACKGROUND_DIR.exists():
|
|
140
|
+
return
|
|
141
|
+
for f in sorted(BACKGROUND_DIR.glob("bg_*.json")):
|
|
142
|
+
try:
|
|
143
|
+
data = json.loads(f.read_text(encoding="utf-8"))
|
|
144
|
+
task = BackgroundTask(data["id"], data["description"])
|
|
145
|
+
task.status = data["status"]
|
|
146
|
+
task.result = data.get("result", "")
|
|
147
|
+
task.error = data.get("error", "")
|
|
148
|
+
task.started_at = data.get("started_at")
|
|
149
|
+
task.finished_at = data.get("finished_at")
|
|
150
|
+
with self._lock:
|
|
151
|
+
self.tasks[data["id"]] = task
|
|
152
|
+
except (json.JSONDecodeError, KeyError):
|
|
153
|
+
get_logger().warning("Corrupt background task file %s — deleting", f.name)
|
|
154
|
+
try:
|
|
155
|
+
f.unlink()
|
|
156
|
+
except OSError:
|
|
157
|
+
pass
|
|
158
|
+
except Exception:
|
|
159
|
+
get_logger().warning("Failed to load background task %s", f.name, exc_info=True)
|