luckyd-code 1.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- luckyd_code/__init__.py +54 -0
- luckyd_code/__main__.py +5 -0
- luckyd_code/_agent_loop.py +551 -0
- luckyd_code/_data_dir.py +73 -0
- luckyd_code/agent.py +38 -0
- luckyd_code/analytics/__init__.py +18 -0
- luckyd_code/analytics/reporter.py +195 -0
- luckyd_code/analytics/scanner.py +443 -0
- luckyd_code/analytics/smells.py +316 -0
- luckyd_code/analytics/trends.py +303 -0
- luckyd_code/api.py +473 -0
- luckyd_code/audit_daemon.py +845 -0
- luckyd_code/autonomous_fixer.py +473 -0
- luckyd_code/background.py +159 -0
- luckyd_code/backup.py +237 -0
- luckyd_code/brain/__init__.py +84 -0
- luckyd_code/brain/assembler.py +100 -0
- luckyd_code/brain/chunker.py +345 -0
- luckyd_code/brain/constants.py +73 -0
- luckyd_code/brain/embedder.py +163 -0
- luckyd_code/brain/graph.py +311 -0
- luckyd_code/brain/indexer.py +316 -0
- luckyd_code/brain/parser.py +140 -0
- luckyd_code/brain/retriever.py +234 -0
- luckyd_code/cli.py +894 -0
- luckyd_code/cli_commands/__init__.py +1 -0
- luckyd_code/cli_commands/audit.py +120 -0
- luckyd_code/cli_commands/background.py +83 -0
- luckyd_code/cli_commands/brain.py +87 -0
- luckyd_code/cli_commands/config.py +75 -0
- luckyd_code/cli_commands/dispatcher.py +695 -0
- luckyd_code/cli_commands/sessions.py +41 -0
- luckyd_code/cli_entry.py +147 -0
- luckyd_code/cli_utils.py +112 -0
- luckyd_code/config.py +205 -0
- luckyd_code/context.py +214 -0
- luckyd_code/cost_tracker.py +209 -0
- luckyd_code/error_reporter.py +508 -0
- luckyd_code/exceptions.py +39 -0
- luckyd_code/export.py +126 -0
- luckyd_code/feedback_analyzer.py +290 -0
- luckyd_code/file_watcher.py +258 -0
- luckyd_code/git/__init__.py +11 -0
- luckyd_code/git/auto_commit.py +157 -0
- luckyd_code/git/tools.py +85 -0
- luckyd_code/hooks.py +236 -0
- luckyd_code/indexer.py +280 -0
- luckyd_code/init.py +39 -0
- luckyd_code/keybindings.py +77 -0
- luckyd_code/log.py +55 -0
- luckyd_code/mcp/__init__.py +6 -0
- luckyd_code/mcp/client.py +184 -0
- luckyd_code/memory/__init__.py +19 -0
- luckyd_code/memory/manager.py +339 -0
- luckyd_code/metrics/__init__.py +5 -0
- luckyd_code/model_registry.py +131 -0
- luckyd_code/orchestrator.py +204 -0
- luckyd_code/permissions/__init__.py +1 -0
- luckyd_code/permissions/manager.py +103 -0
- luckyd_code/planner.py +361 -0
- luckyd_code/plugins.py +91 -0
- luckyd_code/py.typed +0 -0
- luckyd_code/retry.py +57 -0
- luckyd_code/router.py +417 -0
- luckyd_code/sandbox.py +156 -0
- luckyd_code/self_critique.py +2 -0
- luckyd_code/self_improve.py +274 -0
- luckyd_code/sessions.py +114 -0
- luckyd_code/settings.py +72 -0
- luckyd_code/skills/__init__.py +8 -0
- luckyd_code/skills/review.py +22 -0
- luckyd_code/skills/security.py +17 -0
- luckyd_code/tasks/__init__.py +1 -0
- luckyd_code/tasks/manager.py +102 -0
- luckyd_code/templates/icon-192.png +0 -0
- luckyd_code/templates/icon-512.png +0 -0
- luckyd_code/templates/index.html +1965 -0
- luckyd_code/templates/manifest.json +14 -0
- luckyd_code/templates/src/app.js +694 -0
- luckyd_code/templates/src/body.html +767 -0
- luckyd_code/templates/src/cdn.txt +2 -0
- luckyd_code/templates/src/style.css +474 -0
- luckyd_code/templates/sw.js +31 -0
- luckyd_code/templates/test.html +6 -0
- luckyd_code/themes.py +48 -0
- luckyd_code/tools/__init__.py +97 -0
- luckyd_code/tools/agent_tools.py +65 -0
- luckyd_code/tools/bash.py +360 -0
- luckyd_code/tools/brain_tools.py +137 -0
- luckyd_code/tools/browser.py +369 -0
- luckyd_code/tools/datetime_tool.py +34 -0
- luckyd_code/tools/dockerfile_gen.py +212 -0
- luckyd_code/tools/file_ops.py +381 -0
- luckyd_code/tools/game_gen.py +360 -0
- luckyd_code/tools/git_tools.py +130 -0
- luckyd_code/tools/git_worktree.py +63 -0
- luckyd_code/tools/path_validate.py +64 -0
- luckyd_code/tools/project_gen.py +187 -0
- luckyd_code/tools/readme_gen.py +227 -0
- luckyd_code/tools/registry.py +157 -0
- luckyd_code/tools/shell_detect.py +109 -0
- luckyd_code/tools/web.py +89 -0
- luckyd_code/tools/youtube.py +187 -0
- luckyd_code/tools_bridge.py +144 -0
- luckyd_code/undo.py +126 -0
- luckyd_code/update.py +60 -0
- luckyd_code/verify.py +360 -0
- luckyd_code/web_app.py +176 -0
- luckyd_code/web_routes/__init__.py +23 -0
- luckyd_code/web_routes/background.py +73 -0
- luckyd_code/web_routes/brain.py +109 -0
- luckyd_code/web_routes/cost.py +12 -0
- luckyd_code/web_routes/files.py +133 -0
- luckyd_code/web_routes/memories.py +94 -0
- luckyd_code/web_routes/misc.py +67 -0
- luckyd_code/web_routes/project.py +48 -0
- luckyd_code/web_routes/review.py +20 -0
- luckyd_code/web_routes/sessions.py +44 -0
- luckyd_code/web_routes/settings.py +43 -0
- luckyd_code/web_routes/static.py +70 -0
- luckyd_code/web_routes/update.py +19 -0
- luckyd_code/web_routes/ws.py +237 -0
- luckyd_code-1.2.2.dist-info/METADATA +297 -0
- luckyd_code-1.2.2.dist-info/RECORD +127 -0
- luckyd_code-1.2.2.dist-info/WHEEL +4 -0
- luckyd_code-1.2.2.dist-info/entry_points.txt +3 -0
- luckyd_code-1.2.2.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
"""Self-improvement module — AI improves its own source code with git-based tracking."""
|
|
2
|
+
|
|
3
|
+
import subprocess
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
SELF_IMPROVE_PROMPT = """You are in SELF-IMPROVEMENT MODE. Your task is to analyze and improve the LuckyD Code project's own source code.
|
|
9
|
+
|
|
10
|
+
You have full access to Read, Write, Edit, Glob, Grep, and Bash tools to explore and modify the codebase.
|
|
11
|
+
|
|
12
|
+
CRITICAL RULES — FOLLOW EXACTLY:
|
|
13
|
+
- NEVER write a file without running a syntax check immediately after
|
|
14
|
+
- A syntax check failure means you MUST fix the file before proceeding
|
|
15
|
+
- Do NOT make more than 3 files changes in a single session
|
|
16
|
+
- Read a file BEFORE editing it, every time
|
|
17
|
+
- If a test mock fails, first read the production code to verify it matches what the test expects before touching the test
|
|
18
|
+
|
|
19
|
+
Follow this protocol strictly:
|
|
20
|
+
|
|
21
|
+
STEP 1 — EXPLORE:
|
|
22
|
+
- Use Glob and Grep to understand the project structure
|
|
23
|
+
- Read key files to understand how modules work
|
|
24
|
+
- Identify areas that need improvement
|
|
25
|
+
|
|
26
|
+
STEP 2 — DIAGNOSE:
|
|
27
|
+
- Find bugs, missing features, code quality issues, or performance problems
|
|
28
|
+
- Check for error handling gaps, missing type hints, hardcoded values
|
|
29
|
+
- Look for places where the web UI or CLI could be improved
|
|
30
|
+
|
|
31
|
+
STEP 3 — PROPOSE:
|
|
32
|
+
- State clearly what you want to change and why
|
|
33
|
+
- Keep changes focused and minimal — max 3 files
|
|
34
|
+
|
|
35
|
+
STEP 4 — IMPLEMENT:
|
|
36
|
+
- Read the file first
|
|
37
|
+
- Make the targeted change using Edit/Write
|
|
38
|
+
- IMMEDIATELY run the syntax check (see STEP 5) — do NOT skip this
|
|
39
|
+
|
|
40
|
+
STEP 5 — MANDATORY SYNTAX CHECK (after EVERY file write):
|
|
41
|
+
For each .py file you edited, run:
|
|
42
|
+
python -c "import py_compile; py_compile.compile('PATH_TO_FILE', doraise=True)"
|
|
43
|
+
If it fails: fix the file immediately before touching anything else.
|
|
44
|
+
Do NOT proceed to the next file until the current one passes.
|
|
45
|
+
|
|
46
|
+
STEP 6 — REPORT:
|
|
47
|
+
- Summarize what was changed and why
|
|
48
|
+
- Note any follow-up improvements that could be made
|
|
49
|
+
|
|
50
|
+
CHANGE TRACKING:
|
|
51
|
+
- The system will automatically run tests and validate all changed files before committing
|
|
52
|
+
- If tests fail, changes will NOT be committed — so getting syntax right is essential
|
|
53
|
+
- You do NOT need to git commit — that is handled automatically
|
|
54
|
+
|
|
55
|
+
Focus areas (in priority order):
|
|
56
|
+
1. Bug fixes
|
|
57
|
+
2. Missing error handling
|
|
58
|
+
3. User-facing improvements (CLI + web UI)
|
|
59
|
+
4. Performance
|
|
60
|
+
5. Code quality (type hints, docs)
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def get_improvement_prompt(area: str = "") -> str:
|
|
65
|
+
"""Get a targeted improvement prompt."""
|
|
66
|
+
if area == "web":
|
|
67
|
+
return "Focus on improving the web UI in web_app.py and templates/index.html. Add features, fix issues, improve the UX."
|
|
68
|
+
elif area == "cli":
|
|
69
|
+
return "Focus on improving the CLI experience in cli.py. Add commands, fix issues, improve UX."
|
|
70
|
+
elif area == "tools":
|
|
71
|
+
return "Focus on improving the tool implementations. Check for bugs, error handling, and missing features."
|
|
72
|
+
elif area == "refactor":
|
|
73
|
+
return (
|
|
74
|
+
"You are fixing a structural code smell (long function, deep nesting, too many parameters, "
|
|
75
|
+
"large class, high cyclomatic complexity, or large file). Your approach should be:\n"
|
|
76
|
+
"- For LONG FUNCTIONS: extract logical blocks into well-named helper functions\n"
|
|
77
|
+
"- For DEEP NESTING: use early returns, guard clauses, or extract nested logic\n"
|
|
78
|
+
"- For TOO MANY PARAMETERS: group related parameters into a dataclass or TypedDict\n"
|
|
79
|
+
"- For LARGE CLASSES: extract cohesive groups of methods into a new class using composition\n"
|
|
80
|
+
"- For HIGH COMPLEXITY: reduce branching (if/else chains → lookup dicts or polymorphism)\n"
|
|
81
|
+
"- For LARGE FILES: split into smaller modules by grouping related functions/classes\n"
|
|
82
|
+
"- Make MINIMAL changes — do NOT rewrite the whole file\n"
|
|
83
|
+
"- Preserve all existing behavior exactly — same logic, same return values\n"
|
|
84
|
+
"- After every edit, verify that imports are correct and all callers still work"
|
|
85
|
+
)
|
|
86
|
+
elif area == "perf":
|
|
87
|
+
return "Focus on performance improvements across the codebase. Look for caching opportunities, reduce API calls, optimize imports."
|
|
88
|
+
elif area == "cleanup":
|
|
89
|
+
return (
|
|
90
|
+
"You are fixing a lightweight code cleanliness issue. Your approach should be:\n"
|
|
91
|
+
"- For TODOs: remove stale ones, or replace with actionable comments linked to issues\n"
|
|
92
|
+
"- For empty files: remove them if genuinely dead code, or add a docstring explaining "
|
|
93
|
+
"why they exist (e.g., namespace package marker)\n"
|
|
94
|
+
"- For large non-code files: consider whether they can be compressed, split, or moved "
|
|
95
|
+
"out of the source tree\n"
|
|
96
|
+
"- Make MINIMAL changes — do NOT rewrite or refactor anything\n"
|
|
97
|
+
"- If removing a file, FIRST verify no other module imports or references it"
|
|
98
|
+
)
|
|
99
|
+
else:
|
|
100
|
+
return "Explore the codebase and find the most impactful improvements to make. Fix bugs first, then add value."
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
# ------------------------------------------------------------------ #
|
|
104
|
+
# Git-based change tracking
|
|
105
|
+
# ------------------------------------------------------------------ #
|
|
106
|
+
|
|
107
|
+
@dataclass
|
|
108
|
+
class ImprovementReport:
|
|
109
|
+
branch: str = ""
|
|
110
|
+
start_hash: str = ""
|
|
111
|
+
end_hash: str = ""
|
|
112
|
+
files_changed: list[str] = field(default_factory=list)
|
|
113
|
+
diff_summary: str = ""
|
|
114
|
+
commit_hash: str = ""
|
|
115
|
+
error: Optional[str] = None
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def _git(*args: str, cwd: Optional[str] = None) -> str:
|
|
119
|
+
"""Run a git command and return stdout."""
|
|
120
|
+
try:
|
|
121
|
+
result = subprocess.run(
|
|
122
|
+
["git"] + list(args),
|
|
123
|
+
capture_output=True,
|
|
124
|
+
text=True,
|
|
125
|
+
timeout=30,
|
|
126
|
+
cwd=cwd,
|
|
127
|
+
)
|
|
128
|
+
return result.stdout.strip()
|
|
129
|
+
except Exception as e:
|
|
130
|
+
return f"<error: {e}>"
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
class ImprovementTracker:
|
|
134
|
+
"""Track file changes made during a self-improvement session using git.
|
|
135
|
+
|
|
136
|
+
Usage::
|
|
137
|
+
|
|
138
|
+
tracker = ImprovementTracker(cwd)
|
|
139
|
+
before = tracker.snapshot() # git stash of dirty files
|
|
140
|
+
# ... AI makes changes ...
|
|
141
|
+
report = tracker.report() # git diff + optional commit
|
|
142
|
+
"""
|
|
143
|
+
|
|
144
|
+
def __init__(self, cwd: Optional[str] = None):
|
|
145
|
+
self.cwd = cwd or str(Path.cwd())
|
|
146
|
+
self._branch = _git("rev-parse", "--abbrev-ref", "HEAD", cwd=self.cwd)
|
|
147
|
+
self._start_hash = _git("rev-parse", "--short", "HEAD", cwd=self.cwd)
|
|
148
|
+
self._stash_made = False
|
|
149
|
+
self._changes_before: set[str] = set()
|
|
150
|
+
|
|
151
|
+
def snapshot(self) -> str:
|
|
152
|
+
"""Stash any uncommitted changes so the diff shows only AI-made changes.
|
|
153
|
+
|
|
154
|
+
Returns a status message.
|
|
155
|
+
"""
|
|
156
|
+
# Record files that were dirty before AI starts
|
|
157
|
+
status = _git("status", "--porcelain", cwd=self.cwd)
|
|
158
|
+
if status:
|
|
159
|
+
self._changes_before = {
|
|
160
|
+
line.strip().split()[-1] for line in status.splitlines()
|
|
161
|
+
if line.strip()
|
|
162
|
+
}
|
|
163
|
+
# Stash to get a clean baseline
|
|
164
|
+
result = _git("stash", "push", "-m",
|
|
165
|
+
"self-improve-before", cwd=self.cwd)
|
|
166
|
+
if "Saved" in result:
|
|
167
|
+
self._stash_made = True
|
|
168
|
+
stashed = len(self._changes_before)
|
|
169
|
+
return f"Stashed {stashed} dirty file(s) for clean diff"
|
|
170
|
+
return "Working tree was clean"
|
|
171
|
+
|
|
172
|
+
def report(self, commit: bool = False,
|
|
173
|
+
commit_msg: str = "") -> ImprovementReport:
|
|
174
|
+
"""Generate a change report after the AI has made modifications.
|
|
175
|
+
|
|
176
|
+
Args:
|
|
177
|
+
commit: If True, commit the changes.
|
|
178
|
+
commit_msg: Commit message (auto-generated if empty).
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
An ImprovementReport with diff, file list, and optional commit hash.
|
|
182
|
+
"""
|
|
183
|
+
end_hash = _git("rev-parse", "--short", "HEAD", cwd=self.cwd)
|
|
184
|
+
|
|
185
|
+
# Get diff of unstaged + staged changes
|
|
186
|
+
unstaged = _git("diff", cwd=self.cwd)
|
|
187
|
+
staged = _git("diff", "--cached", cwd=self.cwd)
|
|
188
|
+
diff_text = unstaged + ("\n" if unstaged and staged else "") + staged
|
|
189
|
+
|
|
190
|
+
# Restore stashed dirty files so user doesn't lose them
|
|
191
|
+
if self._stash_made:
|
|
192
|
+
_git("stash", "pop", cwd=self.cwd)
|
|
193
|
+
|
|
194
|
+
# List changed files
|
|
195
|
+
changed = _git("diff", "--name-only", cwd=self.cwd)
|
|
196
|
+
files = [f for f in changed.splitlines() if f.strip()] if changed else []
|
|
197
|
+
|
|
198
|
+
# Filter out files that were already dirty before
|
|
199
|
+
new_files = [f for f in files if f not in self._changes_before]
|
|
200
|
+
|
|
201
|
+
# Build a concise summary
|
|
202
|
+
summary_lines = []
|
|
203
|
+
summary_lines.append(f"Branch: {self._branch}")
|
|
204
|
+
summary_lines.append(f"From: {self._start_hash}")
|
|
205
|
+
summary_lines.append(f"To: {end_hash}")
|
|
206
|
+
if new_files:
|
|
207
|
+
summary_lines.append(f"\nFiles changed ({len(new_files)}):")
|
|
208
|
+
for f in new_files:
|
|
209
|
+
# Show a one-line stat per file
|
|
210
|
+
stat = _git("diff", "--stat", "--", f, cwd=self.cwd)
|
|
211
|
+
short_stat = stat.split("\n")[-1].strip() if stat else f
|
|
212
|
+
summary_lines.append(f" {short_stat}")
|
|
213
|
+
else:
|
|
214
|
+
summary_lines.append("\nNo new file changes detected")
|
|
215
|
+
|
|
216
|
+
if diff_text:
|
|
217
|
+
summary_lines.append(f"\n--- Diff ({len(diff_text)} chars) ---")
|
|
218
|
+
# Show first 30 lines of diff as preview
|
|
219
|
+
diff_lines = diff_text.splitlines()
|
|
220
|
+
preview = diff_lines[:30]
|
|
221
|
+
summary_lines.extend(preview)
|
|
222
|
+
if len(diff_lines) > 30:
|
|
223
|
+
summary_lines.append(f"... ({len(diff_lines) - 30} more lines)")
|
|
224
|
+
|
|
225
|
+
commit_hash = ""
|
|
226
|
+
if commit and new_files:
|
|
227
|
+
# Run verification pipeline on every changed Python file before committing.
|
|
228
|
+
# Any failure aborts the commit — changes stay as unstaged edits so
|
|
229
|
+
# nothing is lost, but main/branch history stays clean.
|
|
230
|
+
try:
|
|
231
|
+
from .verify import run_verify_pipeline, pipeline_all_passed, pipeline_feedback
|
|
232
|
+
for f in new_files:
|
|
233
|
+
if f.endswith(".py"):
|
|
234
|
+
abs_path = str(Path(self.cwd) / f)
|
|
235
|
+
results = run_verify_pipeline(
|
|
236
|
+
abs_path, self.cwd,
|
|
237
|
+
run_lint=True, run_consistency=True, run_tests=False,
|
|
238
|
+
)
|
|
239
|
+
if not pipeline_all_passed(results):
|
|
240
|
+
summary_lines.append(
|
|
241
|
+
f"\n⚠ Verification failed for {f} — commit aborted"
|
|
242
|
+
)
|
|
243
|
+
summary_lines.append(pipeline_feedback(results))
|
|
244
|
+
summary = "\n".join(summary_lines)
|
|
245
|
+
return ImprovementReport(
|
|
246
|
+
branch=self._branch,
|
|
247
|
+
start_hash=self._start_hash,
|
|
248
|
+
end_hash=end_hash,
|
|
249
|
+
files_changed=new_files,
|
|
250
|
+
diff_summary=summary,
|
|
251
|
+
commit_hash="",
|
|
252
|
+
error=f"Verification failed for {f}",
|
|
253
|
+
)
|
|
254
|
+
except ImportError:
|
|
255
|
+
pass # verify module unavailable — proceed without it
|
|
256
|
+
|
|
257
|
+
msg = commit_msg or f"self-improve: {', '.join(new_files[:3])}"
|
|
258
|
+
if len(new_files) > 3:
|
|
259
|
+
msg += f" (+{len(new_files) - 3} more)"
|
|
260
|
+
_git("add", *new_files, cwd=self.cwd)
|
|
261
|
+
_git("commit", "-m", msg, cwd=self.cwd)
|
|
262
|
+
commit_hash = _git("rev-parse", "--short", "HEAD", cwd=self.cwd)
|
|
263
|
+
summary_lines.append(f"\nCommitted as {commit_hash}")
|
|
264
|
+
|
|
265
|
+
summary = "\n".join(summary_lines)
|
|
266
|
+
|
|
267
|
+
return ImprovementReport(
|
|
268
|
+
branch=self._branch,
|
|
269
|
+
start_hash=self._start_hash,
|
|
270
|
+
end_hash=end_hash,
|
|
271
|
+
files_changed=new_files,
|
|
272
|
+
diff_summary=summary,
|
|
273
|
+
commit_hash=commit_hash,
|
|
274
|
+
)
|
luckyd_code/sessions.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
"""Session save/load — persist and restore conversations."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
|
|
7
|
+
from .context import ConversationContext
|
|
8
|
+
|
|
9
|
+
from ._data_dir import data_path
|
|
10
|
+
|
|
11
|
+
SESSIONS_DIR = data_path("sessions")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _ensure_dir():
|
|
15
|
+
SESSIONS_DIR.mkdir(parents=True, exist_ok=True)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _sanitize_name(name: str) -> str:
|
|
19
|
+
"""Sanitize a session name for use as a filename."""
|
|
20
|
+
safe = "".join(c if c.isalnum() or c in " _-" else "_" for c in name)
|
|
21
|
+
return safe.strip() or "unnamed"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def save_session(name: str, context: ConversationContext) -> str:
|
|
25
|
+
"""Save current conversation to a session file."""
|
|
26
|
+
_ensure_dir()
|
|
27
|
+
safe = _sanitize_name(name)
|
|
28
|
+
path = SESSIONS_DIR / f"{safe}.json"
|
|
29
|
+
|
|
30
|
+
# Filter out the system prompt — it's re-applied from the live config on load,
|
|
31
|
+
# so storing it would restore a potentially stale prompt on future loads.
|
|
32
|
+
messages = [m for m in context.messages if m.get("role") != "system"]
|
|
33
|
+
|
|
34
|
+
data = {
|
|
35
|
+
"name": name,
|
|
36
|
+
"saved_at": datetime.now().isoformat(),
|
|
37
|
+
"message_count": len(messages),
|
|
38
|
+
"messages": messages,
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
path.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
|
42
|
+
return f"Session '{name}' saved ({len(messages)} messages)"
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def load_session(name: str, context: ConversationContext) -> str:
|
|
46
|
+
"""Load a session into the current context."""
|
|
47
|
+
_ensure_dir()
|
|
48
|
+
safe = _sanitize_name(name)
|
|
49
|
+
path = SESSIONS_DIR / f"{safe}.json"
|
|
50
|
+
|
|
51
|
+
if not path.exists():
|
|
52
|
+
# Try partial match
|
|
53
|
+
matches = list(SESSIONS_DIR.glob(f"{safe}*.json"))
|
|
54
|
+
if not matches:
|
|
55
|
+
return f"Session '{name}' not found"
|
|
56
|
+
path = matches[0]
|
|
57
|
+
|
|
58
|
+
try:
|
|
59
|
+
data = json.loads(path.read_text(encoding="utf-8"))
|
|
60
|
+
except (json.JSONDecodeError, OSError) as e:
|
|
61
|
+
return f"Error loading session: {e}"
|
|
62
|
+
|
|
63
|
+
messages = data.get("messages", [])
|
|
64
|
+
if not messages:
|
|
65
|
+
return "Session is empty"
|
|
66
|
+
|
|
67
|
+
# Preserve system prompt, replace everything else
|
|
68
|
+
system = context.messages[0] if context.messages else None
|
|
69
|
+
if messages[0].get("role") == "system":
|
|
70
|
+
context.messages = messages
|
|
71
|
+
else:
|
|
72
|
+
context.messages = [system] + messages if system else messages
|
|
73
|
+
|
|
74
|
+
# Re-apply max_messages
|
|
75
|
+
while len(context.messages) > context.max_messages:
|
|
76
|
+
context.messages.pop(1)
|
|
77
|
+
|
|
78
|
+
return f"Session '{data.get('name', name)}' loaded ({len(messages)} messages)"
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def list_sessions() -> str:
|
|
82
|
+
"""List all saved sessions."""
|
|
83
|
+
_ensure_dir()
|
|
84
|
+
sessions = []
|
|
85
|
+
for path in sorted(SESSIONS_DIR.glob("*.json"), key=os.path.getmtime, reverse=True):
|
|
86
|
+
try:
|
|
87
|
+
data = json.loads(path.read_text(encoding="utf-8"))
|
|
88
|
+
sessions.append({
|
|
89
|
+
"name": data.get("name", path.stem),
|
|
90
|
+
"saved_at": data.get("saved_at", "unknown"),
|
|
91
|
+
"count": data.get("message_count", 0),
|
|
92
|
+
})
|
|
93
|
+
except Exception:
|
|
94
|
+
sessions.append({"name": path.stem, "saved_at": "unknown", "count": 0})
|
|
95
|
+
|
|
96
|
+
if not sessions:
|
|
97
|
+
return "No saved sessions."
|
|
98
|
+
|
|
99
|
+
lines = []
|
|
100
|
+
for s in sessions:
|
|
101
|
+
time = s["saved_at"][:19] if s["saved_at"] != "unknown" else "unknown"
|
|
102
|
+
lines.append(f" {s['name']:20s} ({s['count']} msgs, saved {time})")
|
|
103
|
+
return "\n".join(lines)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def delete_session(name: str) -> str:
|
|
107
|
+
"""Delete a saved session."""
|
|
108
|
+
_ensure_dir()
|
|
109
|
+
safe = _sanitize_name(name)
|
|
110
|
+
path = SESSIONS_DIR / f"{safe}.json"
|
|
111
|
+
if path.exists():
|
|
112
|
+
path.unlink()
|
|
113
|
+
return f"Session '{name}' deleted"
|
|
114
|
+
return f"Session '{name}' not found"
|
luckyd_code/settings.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
"""settings.json and hooks support."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from .log import get_logger
|
|
8
|
+
from ._data_dir import project_data_path
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def get_settings_dir() -> Path:
|
|
12
|
+
return project_data_path()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def get_settings_path() -> Path:
|
|
16
|
+
return get_settings_dir() / "settings.json"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def get_local_settings_path() -> Path:
|
|
20
|
+
return get_settings_dir() / "settings.local.json"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def load_settings() -> dict[str, Any]:
|
|
24
|
+
settings = {}
|
|
25
|
+
for p in [get_settings_path(), get_local_settings_path()]:
|
|
26
|
+
if p.exists():
|
|
27
|
+
try:
|
|
28
|
+
data = json.loads(p.read_text())
|
|
29
|
+
settings.update(data)
|
|
30
|
+
except Exception:
|
|
31
|
+
get_logger().warning("Could not load settings from %s", str(p), exc_info=True)
|
|
32
|
+
return settings
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def save_setting(key: str, value):
|
|
36
|
+
path = get_local_settings_path()
|
|
37
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
38
|
+
settings = {}
|
|
39
|
+
if path.exists():
|
|
40
|
+
try:
|
|
41
|
+
settings = json.loads(path.read_text())
|
|
42
|
+
except Exception:
|
|
43
|
+
get_logger().warning("Could not load existing settings from %s", str(path), exc_info=True)
|
|
44
|
+
settings[key] = value
|
|
45
|
+
path.write_text(json.dumps(settings, indent=2))
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def get_hooks() -> dict:
|
|
49
|
+
settings = load_settings()
|
|
50
|
+
return settings.get("hooks", {})
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def run_pre_hook(tool_name: str) -> list[str]:
|
|
54
|
+
hooks = get_hooks()
|
|
55
|
+
hook_cfg = hooks.get("preToolUse", "")
|
|
56
|
+
# Normalise: the hook may be a plain string or a dict with a 'script' key
|
|
57
|
+
if isinstance(hook_cfg, dict):
|
|
58
|
+
script = hook_cfg.get("script", "")
|
|
59
|
+
# 'tools' is nested inside the hook config, not at the top level
|
|
60
|
+
allowed_tools = hook_cfg.get("tools", ["all"])
|
|
61
|
+
else:
|
|
62
|
+
script = hook_cfg
|
|
63
|
+
allowed_tools = ["all"]
|
|
64
|
+
if script and ("all" in allowed_tools or tool_name in allowed_tools):
|
|
65
|
+
import subprocess
|
|
66
|
+
try:
|
|
67
|
+
r = subprocess.run(script, shell=True, capture_output=True, text=True, timeout=30)
|
|
68
|
+
if r.returncode != 0:
|
|
69
|
+
return [r.stderr.strip()]
|
|
70
|
+
except Exception as e:
|
|
71
|
+
return [str(e)]
|
|
72
|
+
return []
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"""Code review skill."""
|
|
2
|
+
|
|
3
|
+
import subprocess
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def review_changes() -> str:
|
|
7
|
+
"""Review pending git changes."""
|
|
8
|
+
try:
|
|
9
|
+
diff = subprocess.run(
|
|
10
|
+
["git", "diff", "HEAD"],
|
|
11
|
+
capture_output=True, text=True, timeout=30,
|
|
12
|
+
).stdout
|
|
13
|
+
if not diff:
|
|
14
|
+
diff = subprocess.run(
|
|
15
|
+
["git", "diff", "--cached"],
|
|
16
|
+
capture_output=True, text=True, timeout=30,
|
|
17
|
+
).stdout
|
|
18
|
+
if not diff:
|
|
19
|
+
return "No changes to review."
|
|
20
|
+
return f"Changes to review:\n\n```diff\n{diff[:8000]}\n```"
|
|
21
|
+
except Exception as e:
|
|
22
|
+
return f"Error getting diff: {e}"
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""Security review skill."""
|
|
2
|
+
|
|
3
|
+
import subprocess
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def security_review() -> str:
|
|
7
|
+
"""Analyze pending changes for security issues."""
|
|
8
|
+
try:
|
|
9
|
+
diff = subprocess.run(
|
|
10
|
+
["git", "diff", "HEAD"],
|
|
11
|
+
capture_output=True, text=True, timeout=30,
|
|
12
|
+
).stdout
|
|
13
|
+
if not diff:
|
|
14
|
+
return "No changes to review."
|
|
15
|
+
return f"Security review of changes:\n\n```diff\n{diff[:8000]}\n```"
|
|
16
|
+
except Exception as e:
|
|
17
|
+
return f"Error: {e}"
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .manager import create_task, update_task, list_tasks, get_task
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
import uuid
|
|
2
|
+
import json
|
|
3
|
+
import logging
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any, Optional
|
|
6
|
+
|
|
7
|
+
_logger = logging.getLogger("luckyd_code.tasks")
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class Task:
|
|
11
|
+
def __init__(self, subject: str, description: str = "", task_id: str | None = None):
|
|
12
|
+
self.id = task_id or uuid.uuid4().hex[:8]
|
|
13
|
+
self.subject = subject
|
|
14
|
+
self.description = description
|
|
15
|
+
self.status = "pending" # pending, in_progress, completed, deleted
|
|
16
|
+
self.blocked_by: list[str] = []
|
|
17
|
+
self.blocks: list[str] = []
|
|
18
|
+
|
|
19
|
+
def to_dict(self) -> dict:
|
|
20
|
+
return {
|
|
21
|
+
"id": self.id,
|
|
22
|
+
"subject": self.subject,
|
|
23
|
+
"description": self.description,
|
|
24
|
+
"status": self.status,
|
|
25
|
+
"blocked_by": self.blocked_by,
|
|
26
|
+
"blocks": self.blocks,
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
from .._data_dir import project_data_path
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _get_db_path() -> Path:
|
|
34
|
+
p = project_data_path("tasks.json")
|
|
35
|
+
p.parent.mkdir(parents=True, exist_ok=True)
|
|
36
|
+
return p
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _load_tasks() -> dict[str, dict[str, Any]]:
|
|
40
|
+
path = _get_db_path()
|
|
41
|
+
if path.exists():
|
|
42
|
+
try:
|
|
43
|
+
data: object = json.loads(path.read_text())
|
|
44
|
+
if isinstance(data, dict):
|
|
45
|
+
return data # type: ignore[return-value]
|
|
46
|
+
return {}
|
|
47
|
+
except Exception:
|
|
48
|
+
_logger.warning("Failed to load tasks from %s", path, exc_info=True)
|
|
49
|
+
return {}
|
|
50
|
+
return {}
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def _save_tasks(tasks: dict[str, dict[str, Any]]) -> None:
|
|
54
|
+
path = _get_db_path()
|
|
55
|
+
path.write_text(json.dumps(tasks, indent=2))
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def create_task(subject: str, description: str = "", blocked_by: list[str] | None = None) -> Task:
|
|
59
|
+
tasks = _load_tasks()
|
|
60
|
+
task = Task(subject, description)
|
|
61
|
+
if blocked_by:
|
|
62
|
+
task.blocked_by = blocked_by
|
|
63
|
+
tasks[task.id] = task.to_dict()
|
|
64
|
+
_save_tasks(tasks)
|
|
65
|
+
return task
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def update_task(task_id: str, status: str | None = None, subject: str | None = None, description: str | None = None) -> str:
|
|
69
|
+
tasks = _load_tasks()
|
|
70
|
+
if task_id not in tasks:
|
|
71
|
+
return f"Error: task {task_id} not found"
|
|
72
|
+
if status:
|
|
73
|
+
tasks[task_id]["status"] = status
|
|
74
|
+
if subject:
|
|
75
|
+
tasks[task_id]["subject"] = subject
|
|
76
|
+
if description:
|
|
77
|
+
tasks[task_id]["description"] = description
|
|
78
|
+
_save_tasks(tasks)
|
|
79
|
+
return f"Task {task_id} updated: {status or 'ok'}"
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def list_tasks(status: str | None = None) -> str:
|
|
83
|
+
tasks = _load_tasks()
|
|
84
|
+
if not tasks:
|
|
85
|
+
return "No tasks."
|
|
86
|
+
|
|
87
|
+
items = []
|
|
88
|
+
for tid, t in tasks.items():
|
|
89
|
+
if status and t["status"] != status:
|
|
90
|
+
continue
|
|
91
|
+
blocked = f" [blocked by: {', '.join(t['blocked_by'])}]" if t.get("blocked_by") else ""
|
|
92
|
+
items.append(f"[{t['status']}] {tid}: {t['subject']}{blocked}")
|
|
93
|
+
|
|
94
|
+
return "\n".join(items) if items else "No matching tasks."
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def get_task(task_id: str) -> Optional[Task]:
|
|
98
|
+
tasks = _load_tasks()
|
|
99
|
+
if task_id not in tasks:
|
|
100
|
+
return None
|
|
101
|
+
d = tasks[task_id]
|
|
102
|
+
return Task(d["subject"], d.get("description", ""), task_id)
|
|
Binary file
|
|
Binary file
|