ctrlcode 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ctrlcode/__init__.py +8 -0
- ctrlcode/agents/__init__.py +29 -0
- ctrlcode/agents/cleanup.py +388 -0
- ctrlcode/agents/communication.py +439 -0
- ctrlcode/agents/observability.py +421 -0
- ctrlcode/agents/react_loop.py +297 -0
- ctrlcode/agents/registry.py +211 -0
- ctrlcode/agents/result_parser.py +242 -0
- ctrlcode/agents/workflow.py +723 -0
- ctrlcode/analysis/__init__.py +28 -0
- ctrlcode/analysis/ast_diff.py +163 -0
- ctrlcode/analysis/bug_detector.py +149 -0
- ctrlcode/analysis/code_graphs.py +329 -0
- ctrlcode/analysis/semantic.py +205 -0
- ctrlcode/analysis/static.py +183 -0
- ctrlcode/analysis/synthesizer.py +281 -0
- ctrlcode/analysis/tests.py +189 -0
- ctrlcode/cleanup/__init__.py +16 -0
- ctrlcode/cleanup/auto_merge.py +350 -0
- ctrlcode/cleanup/doc_gardening.py +388 -0
- ctrlcode/cleanup/pr_automation.py +330 -0
- ctrlcode/cleanup/scheduler.py +356 -0
- ctrlcode/config.py +380 -0
- ctrlcode/embeddings/__init__.py +6 -0
- ctrlcode/embeddings/embedder.py +192 -0
- ctrlcode/embeddings/vector_store.py +213 -0
- ctrlcode/fuzzing/__init__.py +24 -0
- ctrlcode/fuzzing/analyzer.py +280 -0
- ctrlcode/fuzzing/budget.py +112 -0
- ctrlcode/fuzzing/context.py +665 -0
- ctrlcode/fuzzing/context_fuzzer.py +506 -0
- ctrlcode/fuzzing/derived_orchestrator.py +732 -0
- ctrlcode/fuzzing/oracle_adapter.py +135 -0
- ctrlcode/linters/__init__.py +11 -0
- ctrlcode/linters/hand_rolled_utils.py +221 -0
- ctrlcode/linters/yolo_parsing.py +217 -0
- ctrlcode/metrics/__init__.py +6 -0
- ctrlcode/metrics/dashboard.py +283 -0
- ctrlcode/metrics/tech_debt.py +663 -0
- ctrlcode/paths.py +68 -0
- ctrlcode/permissions.py +179 -0
- ctrlcode/providers/__init__.py +15 -0
- ctrlcode/providers/anthropic.py +138 -0
- ctrlcode/providers/base.py +77 -0
- ctrlcode/providers/openai.py +197 -0
- ctrlcode/providers/parallel.py +104 -0
- ctrlcode/server.py +871 -0
- ctrlcode/session/__init__.py +6 -0
- ctrlcode/session/baseline.py +57 -0
- ctrlcode/session/manager.py +967 -0
- ctrlcode/skills/__init__.py +10 -0
- ctrlcode/skills/builtin/commit.toml +29 -0
- ctrlcode/skills/builtin/docs.toml +25 -0
- ctrlcode/skills/builtin/refactor.toml +33 -0
- ctrlcode/skills/builtin/review.toml +28 -0
- ctrlcode/skills/builtin/test.toml +28 -0
- ctrlcode/skills/loader.py +111 -0
- ctrlcode/skills/registry.py +139 -0
- ctrlcode/storage/__init__.py +19 -0
- ctrlcode/storage/history_db.py +708 -0
- ctrlcode/tools/__init__.py +220 -0
- ctrlcode/tools/bash.py +112 -0
- ctrlcode/tools/browser.py +352 -0
- ctrlcode/tools/executor.py +153 -0
- ctrlcode/tools/explore.py +486 -0
- ctrlcode/tools/mcp.py +108 -0
- ctrlcode/tools/observability.py +561 -0
- ctrlcode/tools/registry.py +193 -0
- ctrlcode/tools/todo.py +291 -0
- ctrlcode/tools/update.py +266 -0
- ctrlcode/tools/webfetch.py +147 -0
- ctrlcode-0.1.0.dist-info/METADATA +93 -0
- ctrlcode-0.1.0.dist-info/RECORD +75 -0
- ctrlcode-0.1.0.dist-info/WHEEL +4 -0
- ctrlcode-0.1.0.dist-info/entry_points.txt +3 -0
ctrlcode/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""Multi-agent system for ctrl+code."""
|
|
2
|
+
|
|
3
|
+
from .registry import AgentRegistry
|
|
4
|
+
from .communication import AgentMessage, AgentBus, AgentCoordinator
|
|
5
|
+
from .workflow import TaskGraph, MultiAgentWorkflow, WorkflowOrchestrator
|
|
6
|
+
from .observability import (
|
|
7
|
+
ObservabilityTools,
|
|
8
|
+
TestOutputParser,
|
|
9
|
+
PerformanceParser,
|
|
10
|
+
LogParser,
|
|
11
|
+
)
|
|
12
|
+
from .cleanup import CleanupAgent, GoldenPrinciplesScanner, CodeSmellDetector
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"AgentRegistry",
|
|
16
|
+
"AgentMessage",
|
|
17
|
+
"AgentBus",
|
|
18
|
+
"AgentCoordinator",
|
|
19
|
+
"TaskGraph",
|
|
20
|
+
"MultiAgentWorkflow",
|
|
21
|
+
"WorkflowOrchestrator",
|
|
22
|
+
"ObservabilityTools",
|
|
23
|
+
"TestOutputParser",
|
|
24
|
+
"PerformanceParser",
|
|
25
|
+
"LogParser",
|
|
26
|
+
"CleanupAgent",
|
|
27
|
+
"GoldenPrinciplesScanner",
|
|
28
|
+
"CodeSmellDetector",
|
|
29
|
+
]
|
|
@@ -0,0 +1,388 @@
|
|
|
1
|
+
"""Cleanup agent implementation for continuous tech debt paydown."""
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from typing import Any
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class Violation:
|
|
11
|
+
"""Code violation found during scan."""
|
|
12
|
+
|
|
13
|
+
principle: str
|
|
14
|
+
file: str
|
|
15
|
+
line: int
|
|
16
|
+
snippet: str
|
|
17
|
+
severity: str # "high" | "medium" | "low"
|
|
18
|
+
fix_suggestion: str
|
|
19
|
+
auto_fixable: bool
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class CleanupMetrics:
|
|
24
|
+
"""Metrics from cleanup scan."""
|
|
25
|
+
|
|
26
|
+
scan_date: str
|
|
27
|
+
violations_by_principle: dict[str, int]
|
|
28
|
+
code_smells: dict[str, int]
|
|
29
|
+
stale_docs: int
|
|
30
|
+
total_issues: int
|
|
31
|
+
high_priority: int
|
|
32
|
+
auto_fixable: int
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class GoldenPrinciplesScanner:
|
|
36
|
+
"""Scanner for golden principle violations."""
|
|
37
|
+
|
|
38
|
+
PRINCIPLES = {
|
|
39
|
+
"no-bare-excepts": {
|
|
40
|
+
"pattern": r"except\s*:",
|
|
41
|
+
"description": "Bare except clause",
|
|
42
|
+
"severity": "high",
|
|
43
|
+
},
|
|
44
|
+
"no-print-statements": {
|
|
45
|
+
"pattern": r"\bprint\s*\(",
|
|
46
|
+
"description": "Print statement in production code",
|
|
47
|
+
"severity": "medium",
|
|
48
|
+
},
|
|
49
|
+
"prefer-shared-utils": {
|
|
50
|
+
"patterns": [
|
|
51
|
+
r"for\s+\w+\s+in\s+range\(\d+\):\s*try:", # Hand-rolled retry
|
|
52
|
+
r"def\s+retry_\w+\(", # Custom retry function
|
|
53
|
+
],
|
|
54
|
+
"description": "Hand-rolled utility (use shared library)",
|
|
55
|
+
"severity": "medium",
|
|
56
|
+
},
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
def scan_principle(
|
|
60
|
+
self,
|
|
61
|
+
principle: str,
|
|
62
|
+
code_content: str,
|
|
63
|
+
file_path: str
|
|
64
|
+
) -> list[Violation]:
|
|
65
|
+
"""
|
|
66
|
+
Scan code for specific principle violation.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
principle: Principle name (e.g., "no-bare-excepts")
|
|
70
|
+
code_content: File content to scan
|
|
71
|
+
file_path: File path for reporting
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
List of violations found
|
|
75
|
+
"""
|
|
76
|
+
violations = []
|
|
77
|
+
|
|
78
|
+
if principle not in self.PRINCIPLES:
|
|
79
|
+
return violations
|
|
80
|
+
|
|
81
|
+
rule = self.PRINCIPLES[principle]
|
|
82
|
+
patterns = rule.get("patterns", [rule.get("pattern")])
|
|
83
|
+
|
|
84
|
+
for pattern in patterns:
|
|
85
|
+
for match in re.finditer(pattern, code_content, re.MULTILINE):
|
|
86
|
+
line_num = code_content[:match.start()].count('\n') + 1
|
|
87
|
+
|
|
88
|
+
# Extract snippet (3 lines context)
|
|
89
|
+
lines = code_content.splitlines()
|
|
90
|
+
start_line = max(0, line_num - 2)
|
|
91
|
+
end_line = min(len(lines), line_num + 1)
|
|
92
|
+
snippet = '\n'.join(lines[start_line:end_line])
|
|
93
|
+
|
|
94
|
+
violations.append(Violation(
|
|
95
|
+
principle=principle,
|
|
96
|
+
file=file_path,
|
|
97
|
+
line=line_num,
|
|
98
|
+
snippet=snippet,
|
|
99
|
+
severity=rule["severity"],
|
|
100
|
+
fix_suggestion=self._get_fix_suggestion(principle),
|
|
101
|
+
auto_fixable=self._is_auto_fixable(principle)
|
|
102
|
+
))
|
|
103
|
+
|
|
104
|
+
return violations
|
|
105
|
+
|
|
106
|
+
def _get_fix_suggestion(self, principle: str) -> str:
|
|
107
|
+
"""Get fix suggestion for principle."""
|
|
108
|
+
suggestions = {
|
|
109
|
+
"no-bare-excepts": "Replace with specific exception type and add logging",
|
|
110
|
+
"no-print-statements": "Replace with logger.info() or logger.debug()",
|
|
111
|
+
"prefer-shared-utils": "Use tenacity or other established library",
|
|
112
|
+
}
|
|
113
|
+
return suggestions.get(principle, "Review and refactor")
|
|
114
|
+
|
|
115
|
+
def _is_auto_fixable(self, principle: str) -> bool:
|
|
116
|
+
"""Determine if violation is auto-fixable."""
|
|
117
|
+
# Only simple cases are auto-fixable
|
|
118
|
+
auto_fixable = {"no-print-statements"}
|
|
119
|
+
return principle in auto_fixable
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class CodeSmellDetector:
|
|
123
|
+
"""Detector for code smells and anti-patterns."""
|
|
124
|
+
|
|
125
|
+
def detect_long_functions(
|
|
126
|
+
self,
|
|
127
|
+
code_content: str,
|
|
128
|
+
file_path: str,
|
|
129
|
+
threshold: int = 50
|
|
130
|
+
) -> list[dict[str, Any]]:
|
|
131
|
+
"""
|
|
132
|
+
Find functions exceeding line count threshold.
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
code_content: File content
|
|
136
|
+
file_path: File path
|
|
137
|
+
threshold: Maximum lines per function
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
List of long function issues
|
|
141
|
+
"""
|
|
142
|
+
issues = []
|
|
143
|
+
lines = code_content.splitlines()
|
|
144
|
+
|
|
145
|
+
# Find function definitions
|
|
146
|
+
func_pattern = r'^(\s*)def\s+(\w+)\s*\('
|
|
147
|
+
|
|
148
|
+
for i, line in enumerate(lines):
|
|
149
|
+
match = re.match(func_pattern, line)
|
|
150
|
+
if not match:
|
|
151
|
+
continue
|
|
152
|
+
|
|
153
|
+
indent = len(match.group(1))
|
|
154
|
+
func_name = match.group(2)
|
|
155
|
+
start_line = i + 1
|
|
156
|
+
|
|
157
|
+
# Count lines until next function at same indent level
|
|
158
|
+
func_lines = 0
|
|
159
|
+
for j in range(i + 1, len(lines)):
|
|
160
|
+
if re.match(r'^\s*def\s+', lines[j]):
|
|
161
|
+
# Check indent level
|
|
162
|
+
next_indent = len(lines[j]) - len(lines[j].lstrip())
|
|
163
|
+
if next_indent <= indent:
|
|
164
|
+
break
|
|
165
|
+
func_lines += 1
|
|
166
|
+
|
|
167
|
+
if func_lines > threshold:
|
|
168
|
+
issues.append({
|
|
169
|
+
"type": "long_function",
|
|
170
|
+
"file": file_path,
|
|
171
|
+
"line": start_line,
|
|
172
|
+
"function": func_name,
|
|
173
|
+
"lines": func_lines,
|
|
174
|
+
"threshold": threshold,
|
|
175
|
+
"suggestion": f"Extract subfunctions from {func_name} ({func_lines} lines)"
|
|
176
|
+
})
|
|
177
|
+
|
|
178
|
+
return issues
|
|
179
|
+
|
|
180
|
+
def detect_duplicated_imports(
|
|
181
|
+
self,
|
|
182
|
+
code_content: str,
|
|
183
|
+
file_path: str
|
|
184
|
+
) -> list[dict[str, Any]]:
|
|
185
|
+
"""
|
|
186
|
+
Find duplicate import statements.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
code_content: File content
|
|
190
|
+
file_path: File path
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
List of duplicate import issues
|
|
194
|
+
"""
|
|
195
|
+
issues = []
|
|
196
|
+
imports_seen = {}
|
|
197
|
+
|
|
198
|
+
import_pattern = r'^(from\s+[\w.]+\s+)?import\s+([\w,\s]+)'
|
|
199
|
+
|
|
200
|
+
for i, line in enumerate(code_content.splitlines()):
|
|
201
|
+
match = re.match(import_pattern, line.strip())
|
|
202
|
+
if not match:
|
|
203
|
+
continue
|
|
204
|
+
|
|
205
|
+
import_stmt = match.group(0)
|
|
206
|
+
|
|
207
|
+
if import_stmt in imports_seen:
|
|
208
|
+
issues.append({
|
|
209
|
+
"type": "duplicate_import",
|
|
210
|
+
"file": file_path,
|
|
211
|
+
"line": i + 1,
|
|
212
|
+
"import": import_stmt,
|
|
213
|
+
"previous_line": imports_seen[import_stmt],
|
|
214
|
+
"suggestion": "Remove duplicate import"
|
|
215
|
+
})
|
|
216
|
+
else:
|
|
217
|
+
imports_seen[import_stmt] = i + 1
|
|
218
|
+
|
|
219
|
+
return issues
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
class DocFreshnessChecker:
|
|
223
|
+
"""Checker for documentation freshness."""
|
|
224
|
+
|
|
225
|
+
def check_markdown_links(
|
|
226
|
+
self,
|
|
227
|
+
content: str,
|
|
228
|
+
file_path: str
|
|
229
|
+
) -> list[dict[str, Any]]:
|
|
230
|
+
"""
|
|
231
|
+
Find broken markdown links.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
content: Markdown content
|
|
235
|
+
file_path: File path
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
List of broken link issues
|
|
239
|
+
"""
|
|
240
|
+
issues = []
|
|
241
|
+
|
|
242
|
+
# Find markdown links [text](url)
|
|
243
|
+
link_pattern = r'\[([^\]]+)\]\(([^\)]+)\)'
|
|
244
|
+
|
|
245
|
+
for match in re.finditer(link_pattern, content):
|
|
246
|
+
link_text = match.group(1)
|
|
247
|
+
link_url = match.group(2)
|
|
248
|
+
line_num = content[:match.start()].count('\n') + 1
|
|
249
|
+
|
|
250
|
+
# Check for broken internal links (files that don't exist)
|
|
251
|
+
if not link_url.startswith(('http://', 'https://', '#')):
|
|
252
|
+
# Internal file link
|
|
253
|
+
issues.append({
|
|
254
|
+
"type": "internal_link",
|
|
255
|
+
"file": file_path,
|
|
256
|
+
"line": line_num,
|
|
257
|
+
"link": link_url,
|
|
258
|
+
"text": link_text,
|
|
259
|
+
"suggestion": "Verify link target exists"
|
|
260
|
+
})
|
|
261
|
+
|
|
262
|
+
return issues
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
class CleanupAgent:
|
|
266
|
+
"""Coordinates cleanup scans and refactoring tasks."""
|
|
267
|
+
|
|
268
|
+
def __init__(self):
|
|
269
|
+
"""Initialize cleanup agent."""
|
|
270
|
+
self.principles_scanner = GoldenPrinciplesScanner()
|
|
271
|
+
self.smell_detector = CodeSmellDetector()
|
|
272
|
+
self.doc_checker = DocFreshnessChecker()
|
|
273
|
+
|
|
274
|
+
def scan_golden_principles(
|
|
275
|
+
self,
|
|
276
|
+
files: list[tuple[str, str]],
|
|
277
|
+
principles: list[str] | None = None
|
|
278
|
+
) -> list[Violation]:
|
|
279
|
+
"""
|
|
280
|
+
Scan files for golden principle violations.
|
|
281
|
+
|
|
282
|
+
Args:
|
|
283
|
+
files: List of (file_path, content) tuples
|
|
284
|
+
principles: Optional list of specific principles to check
|
|
285
|
+
|
|
286
|
+
Returns:
|
|
287
|
+
List of violations found
|
|
288
|
+
"""
|
|
289
|
+
all_violations = []
|
|
290
|
+
|
|
291
|
+
principles_to_check = principles or list(
|
|
292
|
+
self.principles_scanner.PRINCIPLES.keys()
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
for file_path, content in files:
|
|
296
|
+
for principle in principles_to_check:
|
|
297
|
+
violations = self.principles_scanner.scan_principle(
|
|
298
|
+
principle,
|
|
299
|
+
content,
|
|
300
|
+
file_path
|
|
301
|
+
)
|
|
302
|
+
all_violations.extend(violations)
|
|
303
|
+
|
|
304
|
+
return all_violations
|
|
305
|
+
|
|
306
|
+
def scan_code_smells(
|
|
307
|
+
self,
|
|
308
|
+
files: list[tuple[str, str]]
|
|
309
|
+
) -> dict[str, list[dict[str, Any]]]:
|
|
310
|
+
"""
|
|
311
|
+
Scan files for code smells.
|
|
312
|
+
|
|
313
|
+
Args:
|
|
314
|
+
files: List of (file_path, content) tuples
|
|
315
|
+
|
|
316
|
+
Returns:
|
|
317
|
+
Dict mapping smell type to list of issues
|
|
318
|
+
"""
|
|
319
|
+
all_issues = {
|
|
320
|
+
"long_functions": [],
|
|
321
|
+
"duplicate_imports": [],
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
for file_path, content in files:
|
|
325
|
+
# Detect long functions
|
|
326
|
+
long_funcs = self.smell_detector.detect_long_functions(
|
|
327
|
+
content,
|
|
328
|
+
file_path
|
|
329
|
+
)
|
|
330
|
+
all_issues["long_functions"].extend(long_funcs)
|
|
331
|
+
|
|
332
|
+
# Detect duplicate imports
|
|
333
|
+
dup_imports = self.smell_detector.detect_duplicated_imports(
|
|
334
|
+
content,
|
|
335
|
+
file_path
|
|
336
|
+
)
|
|
337
|
+
all_issues["duplicate_imports"].extend(dup_imports)
|
|
338
|
+
|
|
339
|
+
return all_issues
|
|
340
|
+
|
|
341
|
+
def generate_metrics(
|
|
342
|
+
self,
|
|
343
|
+
violations: list[Violation],
|
|
344
|
+
code_smells: dict[str, list[dict[str, Any]]],
|
|
345
|
+
stale_docs: int
|
|
346
|
+
) -> CleanupMetrics:
|
|
347
|
+
"""
|
|
348
|
+
Generate cleanup metrics summary.
|
|
349
|
+
|
|
350
|
+
Args:
|
|
351
|
+
violations: List of principle violations
|
|
352
|
+
code_smells: Code smell issues by type
|
|
353
|
+
stale_docs: Count of stale documents
|
|
354
|
+
|
|
355
|
+
Returns:
|
|
356
|
+
CleanupMetrics summary
|
|
357
|
+
"""
|
|
358
|
+
violations_by_principle = {}
|
|
359
|
+
for violation in violations:
|
|
360
|
+
violations_by_principle[violation.principle] = \
|
|
361
|
+
violations_by_principle.get(violation.principle, 0) + 1
|
|
362
|
+
|
|
363
|
+
code_smell_counts = {
|
|
364
|
+
smell_type: len(issues)
|
|
365
|
+
for smell_type, issues in code_smells.items()
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
total_violations = len(violations)
|
|
369
|
+
total_smells = sum(code_smell_counts.values())
|
|
370
|
+
total_issues = total_violations + total_smells + stale_docs
|
|
371
|
+
|
|
372
|
+
high_priority = sum(
|
|
373
|
+
1 for v in violations if v.severity == "high"
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
auto_fixable = sum(
|
|
377
|
+
1 for v in violations if v.auto_fixable
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
return CleanupMetrics(
|
|
381
|
+
scan_date=datetime.utcnow().isoformat(),
|
|
382
|
+
violations_by_principle=violations_by_principle,
|
|
383
|
+
code_smells=code_smell_counts,
|
|
384
|
+
stale_docs=stale_docs,
|
|
385
|
+
total_issues=total_issues,
|
|
386
|
+
high_priority=high_priority,
|
|
387
|
+
auto_fixable=auto_fixable
|
|
388
|
+
)
|