luckyd-code 1.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- luckyd_code/__init__.py +54 -0
- luckyd_code/__main__.py +5 -0
- luckyd_code/_agent_loop.py +551 -0
- luckyd_code/_data_dir.py +73 -0
- luckyd_code/agent.py +38 -0
- luckyd_code/analytics/__init__.py +18 -0
- luckyd_code/analytics/reporter.py +195 -0
- luckyd_code/analytics/scanner.py +443 -0
- luckyd_code/analytics/smells.py +316 -0
- luckyd_code/analytics/trends.py +303 -0
- luckyd_code/api.py +473 -0
- luckyd_code/audit_daemon.py +845 -0
- luckyd_code/autonomous_fixer.py +473 -0
- luckyd_code/background.py +159 -0
- luckyd_code/backup.py +237 -0
- luckyd_code/brain/__init__.py +84 -0
- luckyd_code/brain/assembler.py +100 -0
- luckyd_code/brain/chunker.py +345 -0
- luckyd_code/brain/constants.py +73 -0
- luckyd_code/brain/embedder.py +163 -0
- luckyd_code/brain/graph.py +311 -0
- luckyd_code/brain/indexer.py +316 -0
- luckyd_code/brain/parser.py +140 -0
- luckyd_code/brain/retriever.py +234 -0
- luckyd_code/cli.py +894 -0
- luckyd_code/cli_commands/__init__.py +1 -0
- luckyd_code/cli_commands/audit.py +120 -0
- luckyd_code/cli_commands/background.py +83 -0
- luckyd_code/cli_commands/brain.py +87 -0
- luckyd_code/cli_commands/config.py +75 -0
- luckyd_code/cli_commands/dispatcher.py +695 -0
- luckyd_code/cli_commands/sessions.py +41 -0
- luckyd_code/cli_entry.py +147 -0
- luckyd_code/cli_utils.py +112 -0
- luckyd_code/config.py +205 -0
- luckyd_code/context.py +214 -0
- luckyd_code/cost_tracker.py +209 -0
- luckyd_code/error_reporter.py +508 -0
- luckyd_code/exceptions.py +39 -0
- luckyd_code/export.py +126 -0
- luckyd_code/feedback_analyzer.py +290 -0
- luckyd_code/file_watcher.py +258 -0
- luckyd_code/git/__init__.py +11 -0
- luckyd_code/git/auto_commit.py +157 -0
- luckyd_code/git/tools.py +85 -0
- luckyd_code/hooks.py +236 -0
- luckyd_code/indexer.py +280 -0
- luckyd_code/init.py +39 -0
- luckyd_code/keybindings.py +77 -0
- luckyd_code/log.py +55 -0
- luckyd_code/mcp/__init__.py +6 -0
- luckyd_code/mcp/client.py +184 -0
- luckyd_code/memory/__init__.py +19 -0
- luckyd_code/memory/manager.py +339 -0
- luckyd_code/metrics/__init__.py +5 -0
- luckyd_code/model_registry.py +131 -0
- luckyd_code/orchestrator.py +204 -0
- luckyd_code/permissions/__init__.py +1 -0
- luckyd_code/permissions/manager.py +103 -0
- luckyd_code/planner.py +361 -0
- luckyd_code/plugins.py +91 -0
- luckyd_code/py.typed +0 -0
- luckyd_code/retry.py +57 -0
- luckyd_code/router.py +417 -0
- luckyd_code/sandbox.py +156 -0
- luckyd_code/self_critique.py +2 -0
- luckyd_code/self_improve.py +274 -0
- luckyd_code/sessions.py +114 -0
- luckyd_code/settings.py +72 -0
- luckyd_code/skills/__init__.py +8 -0
- luckyd_code/skills/review.py +22 -0
- luckyd_code/skills/security.py +17 -0
- luckyd_code/tasks/__init__.py +1 -0
- luckyd_code/tasks/manager.py +102 -0
- luckyd_code/templates/icon-192.png +0 -0
- luckyd_code/templates/icon-512.png +0 -0
- luckyd_code/templates/index.html +1965 -0
- luckyd_code/templates/manifest.json +14 -0
- luckyd_code/templates/src/app.js +694 -0
- luckyd_code/templates/src/body.html +767 -0
- luckyd_code/templates/src/cdn.txt +2 -0
- luckyd_code/templates/src/style.css +474 -0
- luckyd_code/templates/sw.js +31 -0
- luckyd_code/templates/test.html +6 -0
- luckyd_code/themes.py +48 -0
- luckyd_code/tools/__init__.py +97 -0
- luckyd_code/tools/agent_tools.py +65 -0
- luckyd_code/tools/bash.py +360 -0
- luckyd_code/tools/brain_tools.py +137 -0
- luckyd_code/tools/browser.py +369 -0
- luckyd_code/tools/datetime_tool.py +34 -0
- luckyd_code/tools/dockerfile_gen.py +212 -0
- luckyd_code/tools/file_ops.py +381 -0
- luckyd_code/tools/game_gen.py +360 -0
- luckyd_code/tools/git_tools.py +130 -0
- luckyd_code/tools/git_worktree.py +63 -0
- luckyd_code/tools/path_validate.py +64 -0
- luckyd_code/tools/project_gen.py +187 -0
- luckyd_code/tools/readme_gen.py +227 -0
- luckyd_code/tools/registry.py +157 -0
- luckyd_code/tools/shell_detect.py +109 -0
- luckyd_code/tools/web.py +89 -0
- luckyd_code/tools/youtube.py +187 -0
- luckyd_code/tools_bridge.py +144 -0
- luckyd_code/undo.py +126 -0
- luckyd_code/update.py +60 -0
- luckyd_code/verify.py +360 -0
- luckyd_code/web_app.py +176 -0
- luckyd_code/web_routes/__init__.py +23 -0
- luckyd_code/web_routes/background.py +73 -0
- luckyd_code/web_routes/brain.py +109 -0
- luckyd_code/web_routes/cost.py +12 -0
- luckyd_code/web_routes/files.py +133 -0
- luckyd_code/web_routes/memories.py +94 -0
- luckyd_code/web_routes/misc.py +67 -0
- luckyd_code/web_routes/project.py +48 -0
- luckyd_code/web_routes/review.py +20 -0
- luckyd_code/web_routes/sessions.py +44 -0
- luckyd_code/web_routes/settings.py +43 -0
- luckyd_code/web_routes/static.py +70 -0
- luckyd_code/web_routes/update.py +19 -0
- luckyd_code/web_routes/ws.py +237 -0
- luckyd_code-1.2.2.dist-info/METADATA +297 -0
- luckyd_code-1.2.2.dist-info/RECORD +127 -0
- luckyd_code-1.2.2.dist-info/WHEEL +4 -0
- luckyd_code-1.2.2.dist-info/entry_points.txt +3 -0
- luckyd_code-1.2.2.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
"""Code smell detector — identifies common anti-patterns and quality issues."""
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
from .scanner import ProjectMetrics, PYTHON_EXTENSIONS
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
# ── Smell type definitions ───────────────────────────────────────────────────
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class Smell:
|
|
15
|
+
"""A detected code smell."""
|
|
16
|
+
|
|
17
|
+
file: str
|
|
18
|
+
line: int
|
|
19
|
+
kind: str # e.g. "long_function", "deep_nesting", "large_file"
|
|
20
|
+
severity: str # "info", "warning", "error"
|
|
21
|
+
message: str
|
|
22
|
+
suggestion: str = ""
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
# ── Individual detectors ─────────────────────────────────────────────────────
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class SmellDetector:
|
|
29
|
+
"""Detect code smells in files and projects."""
|
|
30
|
+
|
|
31
|
+
# Thresholds
|
|
32
|
+
LONG_FUNCTION_LINES = 50
|
|
33
|
+
LONG_FILE_LINES = 500
|
|
34
|
+
DEEP_NESTING = 4
|
|
35
|
+
HIGH_COMPLEXITY = 15
|
|
36
|
+
MANY_PARAMS = 6
|
|
37
|
+
FILE_SIZE_MB_WARNING = 0.5
|
|
38
|
+
DUPLICATION_MIN_LINES = 5
|
|
39
|
+
BIG_CLASS_LINES = 300
|
|
40
|
+
|
|
41
|
+
def __init__(self):
|
|
42
|
+
self.smells: list[Smell] = []
|
|
43
|
+
|
|
44
|
+
def detect_file(self, file_path: str, content: str | None = None) -> list[Smell]:
|
|
45
|
+
"""Detect smells in a single file."""
|
|
46
|
+
self.smells = []
|
|
47
|
+
fp = Path(file_path)
|
|
48
|
+
|
|
49
|
+
if content is None:
|
|
50
|
+
try:
|
|
51
|
+
content = fp.read_text(encoding="utf-8", errors="replace")
|
|
52
|
+
except (OSError, UnicodeDecodeError):
|
|
53
|
+
return []
|
|
54
|
+
|
|
55
|
+
lines = content.splitlines()
|
|
56
|
+
|
|
57
|
+
# Large file
|
|
58
|
+
if len(lines) > self.LONG_FILE_LINES:
|
|
59
|
+
self.smells.append(Smell(
|
|
60
|
+
file=str(fp), line=len(lines),
|
|
61
|
+
kind="large_file", severity="warning",
|
|
62
|
+
message=f"File is {len(lines)} lines (threshold: {self.LONG_FILE_LINES})",
|
|
63
|
+
suggestion="Consider splitting into smaller modules.",
|
|
64
|
+
))
|
|
65
|
+
|
|
66
|
+
# Very large file
|
|
67
|
+
if len(lines) > self.LONG_FILE_LINES * 2:
|
|
68
|
+
self.smells.append(Smell(
|
|
69
|
+
file=str(fp), line=len(lines),
|
|
70
|
+
kind="large_file", severity="error",
|
|
71
|
+
message=f"File is {len(lines)} lines — extremely large",
|
|
72
|
+
suggestion="Split into multiple files immediately.",
|
|
73
|
+
))
|
|
74
|
+
|
|
75
|
+
# Detect long functions (Python-specific)
|
|
76
|
+
if fp.suffix.lower() in PYTHON_EXTENSIONS:
|
|
77
|
+
self._detect_python_smells(content, fp, lines)
|
|
78
|
+
else:
|
|
79
|
+
self._detect_generic_smells(content, fp, lines)
|
|
80
|
+
|
|
81
|
+
# Detect deep nesting
|
|
82
|
+
self._detect_deep_nesting(content, fp, lines)
|
|
83
|
+
|
|
84
|
+
# Check file size
|
|
85
|
+
try:
|
|
86
|
+
size_mb = fp.stat().st_size / (1024 * 1024)
|
|
87
|
+
if size_mb > self.FILE_SIZE_MB_WARNING:
|
|
88
|
+
self.smells.append(Smell(
|
|
89
|
+
file=str(fp), line=1,
|
|
90
|
+
kind="large_file_bytes", severity="warning",
|
|
91
|
+
message=f"File size is {size_mb:.1f}MB",
|
|
92
|
+
suggestion="Consider splitting or compressing assets.",
|
|
93
|
+
))
|
|
94
|
+
except OSError:
|
|
95
|
+
pass
|
|
96
|
+
|
|
97
|
+
return self.smells
|
|
98
|
+
|
|
99
|
+
def _detect_python_smells(self, content: str, fp: Path, lines: list[str]):
|
|
100
|
+
"""Python-specific smell detection."""
|
|
101
|
+
import ast
|
|
102
|
+
|
|
103
|
+
try:
|
|
104
|
+
tree = ast.parse(content, filename=str(fp))
|
|
105
|
+
except SyntaxError:
|
|
106
|
+
self.smells.append(Smell(
|
|
107
|
+
file=str(fp), line=1,
|
|
108
|
+
kind="syntax_error", severity="error",
|
|
109
|
+
message="File has a syntax error and cannot be parsed.",
|
|
110
|
+
suggestion="Fix the syntax error.",
|
|
111
|
+
))
|
|
112
|
+
return
|
|
113
|
+
|
|
114
|
+
for node in ast.walk(tree):
|
|
115
|
+
# Long functions
|
|
116
|
+
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
|
|
117
|
+
func_lines = node.end_lineno - node.lineno if node.end_lineno else 0
|
|
118
|
+
if func_lines > self.LONG_FUNCTION_LINES:
|
|
119
|
+
self.smells.append(Smell(
|
|
120
|
+
file=str(fp), line=node.lineno,
|
|
121
|
+
kind="long_function", severity="warning",
|
|
122
|
+
message=f"'{node.name}' is {func_lines} lines long",
|
|
123
|
+
suggestion=f"Break into smaller functions (<={self.LONG_FUNCTION_LINES} lines).",
|
|
124
|
+
))
|
|
125
|
+
|
|
126
|
+
# Many parameters
|
|
127
|
+
params = len(node.args.args)
|
|
128
|
+
if params > self.MANY_PARAMS:
|
|
129
|
+
self.smells.append(Smell(
|
|
130
|
+
file=str(fp), line=node.lineno,
|
|
131
|
+
kind="too_many_params", severity="warning",
|
|
132
|
+
message=f"'{node.name}' has {params} parameters",
|
|
133
|
+
suggestion="Use a config object/dataclass or split the function.",
|
|
134
|
+
))
|
|
135
|
+
|
|
136
|
+
# Large classes
|
|
137
|
+
if isinstance(node, ast.ClassDef):
|
|
138
|
+
class_lines = node.end_lineno - node.lineno if node.end_lineno else 0
|
|
139
|
+
if class_lines > self.BIG_CLASS_LINES:
|
|
140
|
+
self.smells.append(Smell(
|
|
141
|
+
file=str(fp), line=node.lineno,
|
|
142
|
+
kind="large_class", severity="warning",
|
|
143
|
+
message=f"'{node.name}' is {class_lines} lines",
|
|
144
|
+
suggestion="Split into smaller classes or use composition.",
|
|
145
|
+
))
|
|
146
|
+
|
|
147
|
+
# Bare except
|
|
148
|
+
if isinstance(node, ast.ExceptHandler):
|
|
149
|
+
if node.type is None:
|
|
150
|
+
self.smells.append(Smell(
|
|
151
|
+
file=str(fp), line=node.lineno,
|
|
152
|
+
kind="bare_except", severity="warning",
|
|
153
|
+
message="Bare 'except:' clause catches everything including SystemExit",
|
|
154
|
+
suggestion="Specify exception types: 'except ValueError:'",
|
|
155
|
+
))
|
|
156
|
+
|
|
157
|
+
# Mutable default arguments
|
|
158
|
+
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
|
|
159
|
+
for default in node.args.defaults + node.args.kw_defaults:
|
|
160
|
+
if isinstance(default, (ast.List, ast.Dict, ast.Set)):
|
|
161
|
+
self.smells.append(Smell(
|
|
162
|
+
file=str(fp), line=node.lineno,
|
|
163
|
+
kind="mutable_default", severity="warning",
|
|
164
|
+
message=(
|
|
165
|
+
f"Mutable default argument in '{node.name}()' "
|
|
166
|
+
f"({default.__class__.__name__.lower()})"
|
|
167
|
+
),
|
|
168
|
+
suggestion="Use None as default and set the mutable value in the function body.",
|
|
169
|
+
))
|
|
170
|
+
|
|
171
|
+
def _detect_generic_smells(self, content: str, fp: Path, lines: list[str]):
|
|
172
|
+
"""Generic smell detection for any language."""
|
|
173
|
+
|
|
174
|
+
# Long function detection via indentation heuristics
|
|
175
|
+
in_func = False
|
|
176
|
+
func_start = 0
|
|
177
|
+
|
|
178
|
+
# Function keyword regex
|
|
179
|
+
func_start_re = re.compile(
|
|
180
|
+
r"^\s*(?:def |async def |fn |func |function |pub fn )",
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
bare_except_re = re.compile(r"^\s*except\s*:", re.MULTILINE)
|
|
184
|
+
catch_all_re = re.compile(r"catch\s*\(", re.MULTILINE)
|
|
185
|
+
|
|
186
|
+
for i, line in enumerate(lines):
|
|
187
|
+
m = func_start_re.match(line)
|
|
188
|
+
if m:
|
|
189
|
+
# End previous function
|
|
190
|
+
if in_func and (i - func_start) > self.LONG_FUNCTION_LINES:
|
|
191
|
+
self.smells.append(Smell(
|
|
192
|
+
file=str(fp), line=func_start + 1,
|
|
193
|
+
kind="long_function", severity="warning",
|
|
194
|
+
message=f"Function is {i - func_start} lines long",
|
|
195
|
+
suggestion=f"Break into smaller functions (<={self.LONG_FUNCTION_LINES} lines).",
|
|
196
|
+
))
|
|
197
|
+
in_func = True
|
|
198
|
+
func_start = i
|
|
199
|
+
|
|
200
|
+
# Check last function
|
|
201
|
+
if in_func and (len(lines) - func_start) > self.LONG_FUNCTION_LINES:
|
|
202
|
+
self.smells.append(Smell(
|
|
203
|
+
file=str(fp), line=func_start + 1,
|
|
204
|
+
kind="long_function", severity="warning",
|
|
205
|
+
message=f"Function is {len(lines) - func_start} lines long",
|
|
206
|
+
suggestion=f"Break into smaller functions (<={self.LONG_FUNCTION_LINES} lines).",
|
|
207
|
+
))
|
|
208
|
+
|
|
209
|
+
# Bare except / catch-all
|
|
210
|
+
if bare_except_re.search(content):
|
|
211
|
+
self.smells.append(Smell(
|
|
212
|
+
file=str(fp), line=1,
|
|
213
|
+
kind="bare_except", severity="warning",
|
|
214
|
+
message="Bare 'except:' found (catches everything)",
|
|
215
|
+
suggestion="Specify exception types.",
|
|
216
|
+
))
|
|
217
|
+
elif catch_all_re.search(content):
|
|
218
|
+
self.smells.append(Smell(
|
|
219
|
+
file=str(fp), line=1,
|
|
220
|
+
kind="bare_except", severity="warning",
|
|
221
|
+
message="Catch-all 'catch(' found (catches every exception)",
|
|
222
|
+
suggestion="Catch specific exception types.",
|
|
223
|
+
))
|
|
224
|
+
|
|
225
|
+
def _detect_deep_nesting(self, content: str, fp: Path, lines: list[str]):
|
|
226
|
+
"""Detect lines with deep indentation."""
|
|
227
|
+
for i, line in enumerate(lines):
|
|
228
|
+
if not line.strip():
|
|
229
|
+
continue
|
|
230
|
+
indent = len(line) - len(line.lstrip())
|
|
231
|
+
# Assume 4-space indentation
|
|
232
|
+
nesting_level = indent // 4
|
|
233
|
+
if nesting_level > self.DEEP_NESTING:
|
|
234
|
+
self.smells.append(Smell(
|
|
235
|
+
file=str(fp), line=i + 1,
|
|
236
|
+
kind="deep_nesting", severity="info",
|
|
237
|
+
message=f"Nesting level {nesting_level} (>{self.DEEP_NESTING})",
|
|
238
|
+
suggestion="Consider extracting nested logic or using early returns.",
|
|
239
|
+
))
|
|
240
|
+
# Only report first few occurrences per file
|
|
241
|
+
if len([s for s in self.smells if s.kind == "deep_nesting" and s.file == str(fp)]) >= 3:
|
|
242
|
+
break
|
|
243
|
+
|
|
244
|
+
def detect_project(self, pm: ProjectMetrics) -> list[Smell]:
|
|
245
|
+
"""Detect smells across an entire project's metrics."""
|
|
246
|
+
smells = []
|
|
247
|
+
|
|
248
|
+
# Files with high complexity
|
|
249
|
+
for path, complexity in pm.complexity_breakdown.items():
|
|
250
|
+
if complexity > self.HIGH_COMPLEXITY:
|
|
251
|
+
smells.append(Smell(
|
|
252
|
+
file=path, line=1,
|
|
253
|
+
kind="high_complexity", severity="warning",
|
|
254
|
+
message=f"Cyclomatic complexity is {complexity}",
|
|
255
|
+
suggestion="Refactor to reduce branching and nesting.",
|
|
256
|
+
))
|
|
257
|
+
|
|
258
|
+
# Files with high TODO density
|
|
259
|
+
for fm in pm.file_metrics:
|
|
260
|
+
if fm.lines_code > 0:
|
|
261
|
+
todo_density = fm.todo_count / (fm.lines_code / 100)
|
|
262
|
+
if todo_density > 10:
|
|
263
|
+
smells.append(Smell(
|
|
264
|
+
file=fm.path, line=1,
|
|
265
|
+
kind="high_todo_density", severity="info",
|
|
266
|
+
message=f"{fm.todo_count} TODOs in {fm.lines_code} lines",
|
|
267
|
+
suggestion="Address or triage outstanding TODOs.",
|
|
268
|
+
))
|
|
269
|
+
|
|
270
|
+
# Empty files
|
|
271
|
+
for fm in pm.file_metrics:
|
|
272
|
+
if fm.lines_code == 0 and fm.lines_total > 0:
|
|
273
|
+
smells.append(Smell(
|
|
274
|
+
file=fm.path, line=1,
|
|
275
|
+
kind="empty_file", severity="info",
|
|
276
|
+
message="File has no code lines",
|
|
277
|
+
suggestion="Remove empty files or add content.",
|
|
278
|
+
))
|
|
279
|
+
|
|
280
|
+
# Large files (project-level)
|
|
281
|
+
for fm in pm.file_metrics:
|
|
282
|
+
if fm.lines_total > self.LONG_FILE_LINES:
|
|
283
|
+
smells.append(Smell(
|
|
284
|
+
file=fm.path, line=fm.lines_total,
|
|
285
|
+
kind="large_file", severity="warning",
|
|
286
|
+
message=f"File is {fm.lines_total} lines (threshold: {self.LONG_FILE_LINES})",
|
|
287
|
+
suggestion="Consider splitting into smaller modules.",
|
|
288
|
+
))
|
|
289
|
+
|
|
290
|
+
return smells + self.smells
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
# ── Convenience ──────────────────────────────────────────────────────────────
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def detect_smells(path: str | None = None) -> list[Smell]:
|
|
297
|
+
"""Convenience: detect smells in a file or project."""
|
|
298
|
+
detector = SmellDetector()
|
|
299
|
+
|
|
300
|
+
if path is None:
|
|
301
|
+
# Scan whole project
|
|
302
|
+
from .scanner import scan_project
|
|
303
|
+
pm = scan_project()
|
|
304
|
+
return detector.detect_project(pm)
|
|
305
|
+
|
|
306
|
+
fp = Path(path)
|
|
307
|
+
if fp.is_file():
|
|
308
|
+
return detector.detect_file(str(fp))
|
|
309
|
+
|
|
310
|
+
if fp.is_dir():
|
|
311
|
+
from .scanner import CodebaseScanner
|
|
312
|
+
scanner = CodebaseScanner(str(fp))
|
|
313
|
+
pm = scanner.scan()
|
|
314
|
+
return detector.detect_project(pm)
|
|
315
|
+
|
|
316
|
+
return []
|
|
@@ -0,0 +1,303 @@
|
|
|
1
|
+
"""Trend tracking — snapshot project metrics over time and analyze changes."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from .scanner import ProjectMetrics, scan_project
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
from .._data_dir import project_data_path
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _get_db_path() -> Path:
|
|
16
|
+
"""Get path to the analytics database file."""
|
|
17
|
+
p = project_data_path("analytics.json")
|
|
18
|
+
p.parent.mkdir(parents=True, exist_ok=True)
|
|
19
|
+
return p
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class TrendPoint:
|
|
24
|
+
"""A single snapshot in the trend history."""
|
|
25
|
+
|
|
26
|
+
timestamp: float
|
|
27
|
+
source_files: int
|
|
28
|
+
total_lines: int
|
|
29
|
+
total_code_lines: int
|
|
30
|
+
total_todos: int
|
|
31
|
+
total_fixmes: int
|
|
32
|
+
total_functions: int
|
|
33
|
+
total_classes: int
|
|
34
|
+
avg_complexity: float
|
|
35
|
+
health_score: float
|
|
36
|
+
total_size_bytes: int
|
|
37
|
+
languages: dict[str, int]
|
|
38
|
+
|
|
39
|
+
@classmethod
|
|
40
|
+
def from_metrics(cls, pm: ProjectMetrics) -> "TrendPoint":
|
|
41
|
+
return cls(
|
|
42
|
+
timestamp=pm.scanned_at,
|
|
43
|
+
source_files=pm.source_files,
|
|
44
|
+
total_lines=pm.total_lines,
|
|
45
|
+
total_code_lines=pm.total_code_lines,
|
|
46
|
+
total_todos=pm.total_todos,
|
|
47
|
+
total_fixmes=pm.total_fixmes,
|
|
48
|
+
total_functions=pm.total_functions,
|
|
49
|
+
total_classes=pm.total_classes,
|
|
50
|
+
avg_complexity=pm.avg_complexity,
|
|
51
|
+
health_score=pm.health_score,
|
|
52
|
+
total_size_bytes=pm.total_size_bytes,
|
|
53
|
+
languages=dict(pm.files_by_language),
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
def to_dict(self) -> dict[str, Any]:
|
|
57
|
+
return {
|
|
58
|
+
"timestamp": self.timestamp,
|
|
59
|
+
"source_files": self.source_files,
|
|
60
|
+
"total_lines": self.total_lines,
|
|
61
|
+
"total_code_lines": self.total_code_lines,
|
|
62
|
+
"total_todos": self.total_todos,
|
|
63
|
+
"total_fixmes": self.total_fixmes,
|
|
64
|
+
"total_functions": self.total_functions,
|
|
65
|
+
"total_classes": self.total_classes,
|
|
66
|
+
"avg_complexity": self.avg_complexity,
|
|
67
|
+
"health_score": self.health_score,
|
|
68
|
+
"total_size_bytes": self.total_size_bytes,
|
|
69
|
+
"languages": self.languages,
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
@classmethod
|
|
73
|
+
def from_dict(cls, d: dict[str, Any]) -> "TrendPoint":
|
|
74
|
+
return cls(
|
|
75
|
+
timestamp=d["timestamp"],
|
|
76
|
+
source_files=d["source_files"],
|
|
77
|
+
total_lines=d["total_lines"],
|
|
78
|
+
total_code_lines=d["total_code_lines"],
|
|
79
|
+
total_todos=d["total_todos"],
|
|
80
|
+
total_fixmes=d["total_fixmes"],
|
|
81
|
+
total_functions=d["total_functions"],
|
|
82
|
+
total_classes=d["total_classes"],
|
|
83
|
+
avg_complexity=d["avg_complexity"],
|
|
84
|
+
health_score=d["health_score"],
|
|
85
|
+
total_size_bytes=d["total_size_bytes"],
|
|
86
|
+
languages=d.get("languages", {}),
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
@dataclass
|
|
91
|
+
class TrendReport:
|
|
92
|
+
"""Analysis of changes between two snapshots."""
|
|
93
|
+
|
|
94
|
+
points: list[TrendPoint] = field(default_factory=list)
|
|
95
|
+
delta_files: int = 0
|
|
96
|
+
delta_lines: int = 0
|
|
97
|
+
delta_todos: int = 0
|
|
98
|
+
delta_fixmes: int = 0
|
|
99
|
+
delta_health: float = 0.0
|
|
100
|
+
delta_complexity: float = 0.0
|
|
101
|
+
direction: str = "" # "improving", "declining", "stable"
|
|
102
|
+
summary: str = ""
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class TrendTracker:
|
|
106
|
+
"""Track project metrics over time with snapshots."""
|
|
107
|
+
|
|
108
|
+
def __init__(self):
|
|
109
|
+
self.db_path = _get_db_path()
|
|
110
|
+
self._points: list[TrendPoint] | None = None
|
|
111
|
+
|
|
112
|
+
def load(self) -> list[TrendPoint]:
|
|
113
|
+
"""Load all snapshots from disk."""
|
|
114
|
+
if self._points is not None:
|
|
115
|
+
return self._points
|
|
116
|
+
|
|
117
|
+
if self.db_path.exists():
|
|
118
|
+
try:
|
|
119
|
+
data = json.loads(self.db_path.read_text())
|
|
120
|
+
self._points = [TrendPoint.from_dict(p) for p in data.get("snapshots", [])]
|
|
121
|
+
except (json.JSONDecodeError, KeyError):
|
|
122
|
+
self._points = []
|
|
123
|
+
else:
|
|
124
|
+
self._points = []
|
|
125
|
+
|
|
126
|
+
return self._points
|
|
127
|
+
|
|
128
|
+
def save(self, points: list[TrendPoint] | None = None):
|
|
129
|
+
"""Save snapshots to disk."""
|
|
130
|
+
if points is not None:
|
|
131
|
+
self._points = points
|
|
132
|
+
|
|
133
|
+
if self._points is None:
|
|
134
|
+
self._points = []
|
|
135
|
+
|
|
136
|
+
data = {
|
|
137
|
+
"snapshots": [p.to_dict() for p in self._points],
|
|
138
|
+
"updated_at": time.time(),
|
|
139
|
+
}
|
|
140
|
+
self.db_path.write_text(json.dumps(data, indent=2))
|
|
141
|
+
|
|
142
|
+
def snapshot(self) -> TrendPoint:
|
|
143
|
+
"""Take a snapshot of the current project state."""
|
|
144
|
+
pm = scan_project()
|
|
145
|
+
point = TrendPoint.from_metrics(pm)
|
|
146
|
+
|
|
147
|
+
# Load existing
|
|
148
|
+
points = self.load()
|
|
149
|
+
points.append(point)
|
|
150
|
+
self.save(points)
|
|
151
|
+
|
|
152
|
+
return point
|
|
153
|
+
|
|
154
|
+
def get_all(self) -> list[TrendPoint]:
|
|
155
|
+
"""Get all snapshots sorted by time."""
|
|
156
|
+
return sorted(self.load(), key=lambda p: p.timestamp)
|
|
157
|
+
|
|
158
|
+
def get_latest(self) -> TrendPoint | None:
|
|
159
|
+
"""Get the most recent snapshot."""
|
|
160
|
+
points = self.get_all()
|
|
161
|
+
if points:
|
|
162
|
+
return points[-1]
|
|
163
|
+
return None
|
|
164
|
+
|
|
165
|
+
def compare(self, index_a: int = -2, index_b: int = -1) -> TrendReport:
|
|
166
|
+
"""Compare two snapshots by index (default: last two)."""
|
|
167
|
+
points = self.get_all()
|
|
168
|
+
|
|
169
|
+
if len(points) < 2:
|
|
170
|
+
return TrendReport(
|
|
171
|
+
points=points,
|
|
172
|
+
direction="stable",
|
|
173
|
+
summary="Not enough data for comparison (need at least 2 snapshots).",
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
a = points[index_a]
|
|
177
|
+
b = points[index_b]
|
|
178
|
+
|
|
179
|
+
report = TrendReport(
|
|
180
|
+
points=[a, b],
|
|
181
|
+
delta_files=b.source_files - a.source_files,
|
|
182
|
+
delta_lines=b.total_code_lines - a.total_code_lines,
|
|
183
|
+
delta_todos=b.total_todos - a.total_todos,
|
|
184
|
+
delta_fixmes=b.total_fixmes - a.total_fixmes,
|
|
185
|
+
delta_health=b.health_score - a.health_score,
|
|
186
|
+
delta_complexity=b.avg_complexity - a.avg_complexity,
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
# Determine direction
|
|
190
|
+
improvements = []
|
|
191
|
+
declines = []
|
|
192
|
+
|
|
193
|
+
if report.delta_todos < 0:
|
|
194
|
+
improvements.append(f"TODOs decreased by {abs(report.delta_todos)}")
|
|
195
|
+
elif report.delta_todos > 0:
|
|
196
|
+
declines.append(f"TODOs increased by {report.delta_todos}")
|
|
197
|
+
|
|
198
|
+
if report.delta_fixmes < 0:
|
|
199
|
+
improvements.append(f"FIXMEs decreased by {abs(report.delta_fixmes)}")
|
|
200
|
+
elif report.delta_fixmes > 0:
|
|
201
|
+
declines.append(f"FIXMEs increased by {report.delta_fixmes}")
|
|
202
|
+
|
|
203
|
+
if report.delta_complexity < -0.5:
|
|
204
|
+
improvements.append(f"Avg complexity decreased ({report.delta_complexity:.1f})")
|
|
205
|
+
elif report.delta_complexity > 0.5:
|
|
206
|
+
declines.append(f"Avg complexity increased (+{report.delta_complexity:.1f})")
|
|
207
|
+
|
|
208
|
+
if report.delta_health > 1:
|
|
209
|
+
improvements.append(f"Health score improved (+{report.delta_health:.1f})")
|
|
210
|
+
elif report.delta_health < -1:
|
|
211
|
+
declines.append(f"Health score declined ({report.delta_health:.1f})")
|
|
212
|
+
|
|
213
|
+
if report.delta_lines > 0:
|
|
214
|
+
improvements.append(f"Codebase grew by {report.delta_lines:} lines")
|
|
215
|
+
|
|
216
|
+
if not improvements and not declines:
|
|
217
|
+
report.direction = "stable"
|
|
218
|
+
report.summary = "No significant changes detected."
|
|
219
|
+
elif len(improvements) >= len(declines):
|
|
220
|
+
report.direction = "improving"
|
|
221
|
+
report.summary = "Improvements: " + "; ".join(improvements)
|
|
222
|
+
if declines:
|
|
223
|
+
report.summary += " | Concerns: " + "; ".join(declines)
|
|
224
|
+
else:
|
|
225
|
+
report.direction = "declining"
|
|
226
|
+
report.summary = "Concerns: " + "; ".join(declines)
|
|
227
|
+
if improvements:
|
|
228
|
+
report.summary += " | Improvements: " + "; ".join(improvements)
|
|
229
|
+
|
|
230
|
+
return report
|
|
231
|
+
|
|
232
|
+
def trend_summary(self) -> str:
|
|
233
|
+
"""Generate a human-readable trend summary."""
|
|
234
|
+
points = self.get_all()
|
|
235
|
+
if len(points) < 2:
|
|
236
|
+
return "Not enough data for trends (take at least 2 snapshots)."
|
|
237
|
+
|
|
238
|
+
first = points[0]
|
|
239
|
+
last = points[-1]
|
|
240
|
+
|
|
241
|
+
days_span = (last.timestamp - first.timestamp) / 86400
|
|
242
|
+
|
|
243
|
+
lines = []
|
|
244
|
+
lines.append("=== Project Trends ===")
|
|
245
|
+
lines.append(f"Snapshots: {len(points)} over {days_span:.1f} days")
|
|
246
|
+
lines.append("")
|
|
247
|
+
|
|
248
|
+
# File count
|
|
249
|
+
delta_files = last.source_files - first.source_files
|
|
250
|
+
lines.append(f"Source files: {first.source_files} -> {last.source_files} ({delta_files:+d})")
|
|
251
|
+
|
|
252
|
+
# Lines
|
|
253
|
+
delta_lines = last.total_code_lines - first.total_code_lines
|
|
254
|
+
lines.append(f"Code lines: {first.total_code_lines:} -> {last.total_code_lines:} ({delta_lines:+})")
|
|
255
|
+
|
|
256
|
+
# TODOs
|
|
257
|
+
delta_todos = last.total_todos - first.total_todos
|
|
258
|
+
lines.append(f"TODOs: {first.total_todos} -> {last.total_todos} ({delta_todos:+d})")
|
|
259
|
+
|
|
260
|
+
# FIXMEs
|
|
261
|
+
delta_fixmes = last.total_fixmes - first.total_fixmes
|
|
262
|
+
lines.append(f"FIXMEs: {first.total_fixmes} -> {last.total_fixmes} ({delta_fixmes:+d})")
|
|
263
|
+
|
|
264
|
+
# Health
|
|
265
|
+
delta_health = last.health_score - first.health_score
|
|
266
|
+
direction = "improving" if delta_health > 0 else "declining"
|
|
267
|
+
lines.append(f"Health: {first.health_score:.1f} -> {last.health_score:.1f} ({delta_health:+.1f}, {direction})")
|
|
268
|
+
|
|
269
|
+
# Complexity
|
|
270
|
+
delta_comp = last.avg_complexity - first.avg_complexity
|
|
271
|
+
lines.append(f"Avg complexity: {first.avg_complexity:.1f} -> {last.avg_complexity:.1f} ({delta_comp:+.1f})")
|
|
272
|
+
|
|
273
|
+
# Languages
|
|
274
|
+
all_langs = set(list(first.languages.keys()) + list(last.languages.keys()))
|
|
275
|
+
added = [l for l in all_langs if l not in first.languages]
|
|
276
|
+
removed = [l for l in all_langs if l not in last.languages]
|
|
277
|
+
if added:
|
|
278
|
+
lines.append(f"Languages added: {', '.join(added)}")
|
|
279
|
+
if removed:
|
|
280
|
+
lines.append(f"Languages removed: {', '.join(removed)}")
|
|
281
|
+
|
|
282
|
+
return "\n".join(lines)
|
|
283
|
+
|
|
284
|
+
def clear(self):
|
|
285
|
+
"""Delete all snapshots."""
|
|
286
|
+
self._points = []
|
|
287
|
+
if self.db_path.exists():
|
|
288
|
+
self.db_path.unlink()
|
|
289
|
+
|
|
290
|
+
|
|
291
|
+
# ── Convenience functions ───────────────────────────────────────────────────
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
def snapshot_project() -> TrendPoint:
|
|
295
|
+
"""Take a snapshot of the current project."""
|
|
296
|
+
tracker = TrendTracker()
|
|
297
|
+
return tracker.snapshot()
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
def get_trends() -> str:
|
|
301
|
+
"""Get a trend summary of the current project."""
|
|
302
|
+
tracker = TrendTracker()
|
|
303
|
+
return tracker.trend_summary()
|