ctrlcode 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ctrlcode/__init__.py +8 -0
- ctrlcode/agents/__init__.py +29 -0
- ctrlcode/agents/cleanup.py +388 -0
- ctrlcode/agents/communication.py +439 -0
- ctrlcode/agents/observability.py +421 -0
- ctrlcode/agents/react_loop.py +297 -0
- ctrlcode/agents/registry.py +211 -0
- ctrlcode/agents/result_parser.py +242 -0
- ctrlcode/agents/workflow.py +723 -0
- ctrlcode/analysis/__init__.py +28 -0
- ctrlcode/analysis/ast_diff.py +163 -0
- ctrlcode/analysis/bug_detector.py +149 -0
- ctrlcode/analysis/code_graphs.py +329 -0
- ctrlcode/analysis/semantic.py +205 -0
- ctrlcode/analysis/static.py +183 -0
- ctrlcode/analysis/synthesizer.py +281 -0
- ctrlcode/analysis/tests.py +189 -0
- ctrlcode/cleanup/__init__.py +16 -0
- ctrlcode/cleanup/auto_merge.py +350 -0
- ctrlcode/cleanup/doc_gardening.py +388 -0
- ctrlcode/cleanup/pr_automation.py +330 -0
- ctrlcode/cleanup/scheduler.py +356 -0
- ctrlcode/config.py +380 -0
- ctrlcode/embeddings/__init__.py +6 -0
- ctrlcode/embeddings/embedder.py +192 -0
- ctrlcode/embeddings/vector_store.py +213 -0
- ctrlcode/fuzzing/__init__.py +24 -0
- ctrlcode/fuzzing/analyzer.py +280 -0
- ctrlcode/fuzzing/budget.py +112 -0
- ctrlcode/fuzzing/context.py +665 -0
- ctrlcode/fuzzing/context_fuzzer.py +506 -0
- ctrlcode/fuzzing/derived_orchestrator.py +732 -0
- ctrlcode/fuzzing/oracle_adapter.py +135 -0
- ctrlcode/linters/__init__.py +11 -0
- ctrlcode/linters/hand_rolled_utils.py +221 -0
- ctrlcode/linters/yolo_parsing.py +217 -0
- ctrlcode/metrics/__init__.py +6 -0
- ctrlcode/metrics/dashboard.py +283 -0
- ctrlcode/metrics/tech_debt.py +663 -0
- ctrlcode/paths.py +68 -0
- ctrlcode/permissions.py +179 -0
- ctrlcode/providers/__init__.py +15 -0
- ctrlcode/providers/anthropic.py +138 -0
- ctrlcode/providers/base.py +77 -0
- ctrlcode/providers/openai.py +197 -0
- ctrlcode/providers/parallel.py +104 -0
- ctrlcode/server.py +871 -0
- ctrlcode/session/__init__.py +6 -0
- ctrlcode/session/baseline.py +57 -0
- ctrlcode/session/manager.py +967 -0
- ctrlcode/skills/__init__.py +10 -0
- ctrlcode/skills/builtin/commit.toml +29 -0
- ctrlcode/skills/builtin/docs.toml +25 -0
- ctrlcode/skills/builtin/refactor.toml +33 -0
- ctrlcode/skills/builtin/review.toml +28 -0
- ctrlcode/skills/builtin/test.toml +28 -0
- ctrlcode/skills/loader.py +111 -0
- ctrlcode/skills/registry.py +139 -0
- ctrlcode/storage/__init__.py +19 -0
- ctrlcode/storage/history_db.py +708 -0
- ctrlcode/tools/__init__.py +220 -0
- ctrlcode/tools/bash.py +112 -0
- ctrlcode/tools/browser.py +352 -0
- ctrlcode/tools/executor.py +153 -0
- ctrlcode/tools/explore.py +486 -0
- ctrlcode/tools/mcp.py +108 -0
- ctrlcode/tools/observability.py +561 -0
- ctrlcode/tools/registry.py +193 -0
- ctrlcode/tools/todo.py +291 -0
- ctrlcode/tools/update.py +266 -0
- ctrlcode/tools/webfetch.py +147 -0
- ctrlcode-0.1.0.dist-info/METADATA +93 -0
- ctrlcode-0.1.0.dist-info/RECORD +75 -0
- ctrlcode-0.1.0.dist-info/WHEEL +4 -0
- ctrlcode-0.1.0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
"""Test execution for code variants."""
|
|
2
|
+
|
|
3
|
+
import tempfile
|
|
4
|
+
import subprocess
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class TestResult:
|
|
11
|
+
"""Result of test execution."""
|
|
12
|
+
|
|
13
|
+
variant_id: str
|
|
14
|
+
passed: bool
|
|
15
|
+
total_tests: int = 0
|
|
16
|
+
passed_tests: int = 0
|
|
17
|
+
failed_tests: int = 0
|
|
18
|
+
errors: list[str] = field(default_factory=list)
|
|
19
|
+
output: str = ""
|
|
20
|
+
execution_time: float = 0.0
|
|
21
|
+
|
|
22
|
+
@property
|
|
23
|
+
def pass_rate(self) -> float:
|
|
24
|
+
"""Calculate test pass rate."""
|
|
25
|
+
if self.total_tests == 0:
|
|
26
|
+
return 0.0
|
|
27
|
+
return self.passed_tests / self.total_tests
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class TestExecutor:
|
|
31
|
+
"""Executes tests against code variants."""
|
|
32
|
+
|
|
33
|
+
def __init__(self, test_template: str | None = None):
|
|
34
|
+
"""
|
|
35
|
+
Initialize test executor.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
test_template: Optional template for generating tests
|
|
39
|
+
"""
|
|
40
|
+
self.test_template = test_template
|
|
41
|
+
|
|
42
|
+
async def run_tests(
|
|
43
|
+
self,
|
|
44
|
+
code: str,
|
|
45
|
+
variant_id: str,
|
|
46
|
+
test_code: str | None = None
|
|
47
|
+
) -> TestResult:
|
|
48
|
+
"""
|
|
49
|
+
Run tests against code.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
code: Code to test
|
|
53
|
+
variant_id: Identifier for variant
|
|
54
|
+
test_code: Optional test code (if None, auto-generate basic tests)
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
TestResult with execution results
|
|
58
|
+
"""
|
|
59
|
+
# Create temp directory for test
|
|
60
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
61
|
+
tmppath = Path(tmpdir)
|
|
62
|
+
|
|
63
|
+
# Write code to file
|
|
64
|
+
code_file = tmppath / "code.py"
|
|
65
|
+
code_file.write_text(code)
|
|
66
|
+
|
|
67
|
+
# Write test file
|
|
68
|
+
if test_code:
|
|
69
|
+
test_file = tmppath / "test_code.py"
|
|
70
|
+
test_file.write_text(test_code)
|
|
71
|
+
else:
|
|
72
|
+
# Generate basic tests
|
|
73
|
+
test_file = tmppath / "test_code.py"
|
|
74
|
+
basic_tests = self._generate_basic_tests(code)
|
|
75
|
+
test_file.write_text(basic_tests)
|
|
76
|
+
|
|
77
|
+
# Run pytest
|
|
78
|
+
result = subprocess.run(
|
|
79
|
+
["pytest", str(test_file), "-v", "--tb=short"],
|
|
80
|
+
capture_output=True,
|
|
81
|
+
text=True,
|
|
82
|
+
cwd=str(tmppath),
|
|
83
|
+
timeout=30, # 30 second timeout
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
# Parse results
|
|
87
|
+
return self._parse_pytest_output(
|
|
88
|
+
variant_id=variant_id,
|
|
89
|
+
returncode=result.returncode,
|
|
90
|
+
stdout=result.stdout,
|
|
91
|
+
stderr=result.stderr,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
def _generate_basic_tests(self, code: str) -> str:
|
|
95
|
+
"""
|
|
96
|
+
Generate basic smoke tests for code.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
code: Code to test
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
Test code as string
|
|
103
|
+
"""
|
|
104
|
+
# Very basic: just try to import and call functions
|
|
105
|
+
return """
|
|
106
|
+
import sys
|
|
107
|
+
import pytest
|
|
108
|
+
from code import *
|
|
109
|
+
|
|
110
|
+
def test_imports():
|
|
111
|
+
'''Test that code imports without errors.'''
|
|
112
|
+
assert True
|
|
113
|
+
|
|
114
|
+
def test_no_syntax_errors():
|
|
115
|
+
'''Verify code compiles.'''
|
|
116
|
+
import code
|
|
117
|
+
assert code is not None
|
|
118
|
+
|
|
119
|
+
# Add more specific tests based on code analysis
|
|
120
|
+
"""
|
|
121
|
+
|
|
122
|
+
def _parse_pytest_output(
|
|
123
|
+
self,
|
|
124
|
+
variant_id: str,
|
|
125
|
+
returncode: int,
|
|
126
|
+
stdout: str,
|
|
127
|
+
stderr: str,
|
|
128
|
+
) -> TestResult:
|
|
129
|
+
"""
|
|
130
|
+
Parse pytest output.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
variant_id: Variant identifier
|
|
134
|
+
returncode: Process return code
|
|
135
|
+
stdout: Standard output
|
|
136
|
+
stderr: Standard error
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
Parsed test result
|
|
140
|
+
"""
|
|
141
|
+
passed = returncode == 0
|
|
142
|
+
output = stdout + "\n" + stderr
|
|
143
|
+
|
|
144
|
+
# Parse test counts from output
|
|
145
|
+
# Pytest output format: "X passed, Y failed in Z.XXs"
|
|
146
|
+
import re
|
|
147
|
+
|
|
148
|
+
total_tests = 0
|
|
149
|
+
passed_tests = 0
|
|
150
|
+
failed_tests = 0
|
|
151
|
+
errors = []
|
|
152
|
+
|
|
153
|
+
# Look for summary line
|
|
154
|
+
summary_pattern = r"(\d+) passed"
|
|
155
|
+
passed_match = re.search(summary_pattern, output)
|
|
156
|
+
if passed_match:
|
|
157
|
+
passed_tests = int(passed_match.group(1))
|
|
158
|
+
|
|
159
|
+
failed_pattern = r"(\d+) failed"
|
|
160
|
+
failed_match = re.search(failed_pattern, output)
|
|
161
|
+
if failed_match:
|
|
162
|
+
failed_tests = int(failed_match.group(1))
|
|
163
|
+
|
|
164
|
+
total_tests = passed_tests + failed_tests
|
|
165
|
+
|
|
166
|
+
# Extract error messages
|
|
167
|
+
if not passed:
|
|
168
|
+
# Look for FAILED lines
|
|
169
|
+
for line in output.split("\n"):
|
|
170
|
+
if "FAILED" in line or "ERROR" in line:
|
|
171
|
+
errors.append(line.strip())
|
|
172
|
+
|
|
173
|
+
# Extract execution time
|
|
174
|
+
time_pattern = r"in ([\d.]+)s"
|
|
175
|
+
time_match = re.search(time_pattern, output)
|
|
176
|
+
execution_time = 0.0
|
|
177
|
+
if time_match:
|
|
178
|
+
execution_time = float(time_match.group(1))
|
|
179
|
+
|
|
180
|
+
return TestResult(
|
|
181
|
+
variant_id=variant_id,
|
|
182
|
+
passed=passed,
|
|
183
|
+
total_tests=total_tests,
|
|
184
|
+
passed_tests=passed_tests,
|
|
185
|
+
failed_tests=failed_tests,
|
|
186
|
+
errors=errors,
|
|
187
|
+
output=output,
|
|
188
|
+
execution_time=execution_time,
|
|
189
|
+
)
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""Cleanup automation for code quality maintenance."""
|
|
2
|
+
|
|
3
|
+
from .scheduler import CleanupScheduler
|
|
4
|
+
from .pr_automation import PRAutomation, PRConfig
|
|
5
|
+
from .auto_merge import AutoMerge, AutoMergeConfig
|
|
6
|
+
from .doc_gardening import DocGardener, DocHealthReport
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"CleanupScheduler",
|
|
10
|
+
"PRAutomation",
|
|
11
|
+
"PRConfig",
|
|
12
|
+
"AutoMerge",
|
|
13
|
+
"AutoMergeConfig",
|
|
14
|
+
"DocGardener",
|
|
15
|
+
"DocHealthReport",
|
|
16
|
+
]
|
|
@@ -0,0 +1,350 @@
|
|
|
1
|
+
"""Auto-merge capability for cleanup PRs."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import subprocess
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from datetime import datetime, timedelta
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class AutoMergeConfig:
|
|
16
|
+
"""Configuration for auto-merge."""
|
|
17
|
+
|
|
18
|
+
enabled: bool = False
|
|
19
|
+
wait_minutes: int = 60
|
|
20
|
+
max_files: int = 5
|
|
21
|
+
max_lines_changed: int = 100
|
|
22
|
+
excluded_paths: list[str] | None = None
|
|
23
|
+
dry_run: bool = False
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class AutoMerge:
|
|
27
|
+
"""Auto-merge logic for cleanup PRs."""
|
|
28
|
+
|
|
29
|
+
def __init__(self, workspace_root: Path, config: AutoMergeConfig | None = None, audit_log_path: Path | None = None):
|
|
30
|
+
"""
|
|
31
|
+
Initialize auto-merge.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
workspace_root: Root directory of workspace
|
|
35
|
+
config: Auto-merge configuration
|
|
36
|
+
audit_log_path: Path to audit log file
|
|
37
|
+
"""
|
|
38
|
+
self.workspace_root = Path(workspace_root)
|
|
39
|
+
self.config = config or AutoMergeConfig()
|
|
40
|
+
self.audit_log_path = audit_log_path or (self.workspace_root / ".ctrlcode" / "auto_merge_audit.jsonl")
|
|
41
|
+
self.audit_log_path.parent.mkdir(parents=True, exist_ok=True)
|
|
42
|
+
|
|
43
|
+
def check_and_merge_pr(self, pr_number: int) -> dict[str, Any]:
|
|
44
|
+
"""
|
|
45
|
+
Check if PR meets auto-merge criteria and merge if eligible.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
pr_number: PR number to check
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
Dict with merge status and details
|
|
52
|
+
"""
|
|
53
|
+
if not self.config.enabled:
|
|
54
|
+
return {"status": "skipped", "reason": "Auto-merge disabled"}
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
# Get PR details
|
|
58
|
+
pr_info = self._get_pr_info(pr_number)
|
|
59
|
+
|
|
60
|
+
if not pr_info:
|
|
61
|
+
return {"status": "error", "reason": "Failed to get PR info"}
|
|
62
|
+
|
|
63
|
+
# Check eligibility
|
|
64
|
+
eligibility = self._check_eligibility(pr_info)
|
|
65
|
+
|
|
66
|
+
if not eligibility["eligible"]:
|
|
67
|
+
return {
|
|
68
|
+
"status": "ineligible",
|
|
69
|
+
"reason": eligibility["reason"],
|
|
70
|
+
"pr_number": pr_number,
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
# Wait for review period
|
|
74
|
+
if self.config.wait_minutes > 0:
|
|
75
|
+
created_at_str = pr_info["created_at"].replace("Z", "+00:00")
|
|
76
|
+
created_at = datetime.fromisoformat(created_at_str)
|
|
77
|
+
|
|
78
|
+
# Strip timezone for simpler comparison
|
|
79
|
+
if created_at.tzinfo:
|
|
80
|
+
created_at = created_at.replace(tzinfo=None)
|
|
81
|
+
|
|
82
|
+
wait_until = created_at + timedelta(minutes=self.config.wait_minutes)
|
|
83
|
+
now = datetime.now()
|
|
84
|
+
|
|
85
|
+
if now < wait_until:
|
|
86
|
+
remaining = (wait_until - now).total_seconds() / 60
|
|
87
|
+
return {
|
|
88
|
+
"status": "waiting",
|
|
89
|
+
"reason": f"Waiting {remaining:.0f}m for review period",
|
|
90
|
+
"pr_number": pr_number,
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
# Dry run mode
|
|
94
|
+
if self.config.dry_run:
|
|
95
|
+
self._log_audit(pr_number, "dry_run", pr_info)
|
|
96
|
+
return {
|
|
97
|
+
"status": "dry_run",
|
|
98
|
+
"message": "Would merge PR (dry run mode)",
|
|
99
|
+
"pr_number": pr_number,
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
# Merge the PR
|
|
103
|
+
merge_result = self._merge_pr(pr_number)
|
|
104
|
+
|
|
105
|
+
self._log_audit(pr_number, "merged", pr_info, merge_result)
|
|
106
|
+
|
|
107
|
+
return {
|
|
108
|
+
"status": "merged",
|
|
109
|
+
"pr_number": pr_number,
|
|
110
|
+
"merge_sha": merge_result.get("merge_sha"),
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
except Exception as e:
|
|
114
|
+
logger.error(f"Failed to auto-merge PR #{pr_number}: {e}", exc_info=True)
|
|
115
|
+
return {"status": "error", "reason": str(e), "pr_number": pr_number}
|
|
116
|
+
|
|
117
|
+
def _get_pr_info(self, pr_number: int) -> dict[str, Any] | None:
|
|
118
|
+
"""Get PR information from GitHub."""
|
|
119
|
+
try:
|
|
120
|
+
result = subprocess.run(
|
|
121
|
+
[
|
|
122
|
+
"gh",
|
|
123
|
+
"pr",
|
|
124
|
+
"view",
|
|
125
|
+
str(pr_number),
|
|
126
|
+
"--json",
|
|
127
|
+
"number,title,state,author,labels,createdAt,files,additions,deletions,statusCheckRollup",
|
|
128
|
+
],
|
|
129
|
+
cwd=self.workspace_root,
|
|
130
|
+
capture_output=True,
|
|
131
|
+
text=True,
|
|
132
|
+
timeout=30,
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
if result.returncode != 0:
|
|
136
|
+
logger.error(f"Failed to get PR info: {result.stderr}")
|
|
137
|
+
return None
|
|
138
|
+
|
|
139
|
+
pr_data = json.loads(result.stdout)
|
|
140
|
+
|
|
141
|
+
return {
|
|
142
|
+
"number": pr_data["number"],
|
|
143
|
+
"title": pr_data["title"],
|
|
144
|
+
"state": pr_data["state"],
|
|
145
|
+
"author": pr_data["author"]["login"],
|
|
146
|
+
"labels": [label["name"] for label in pr_data.get("labels", [])],
|
|
147
|
+
"created_at": pr_data["createdAt"],
|
|
148
|
+
"files": pr_data.get("files", []),
|
|
149
|
+
"additions": pr_data.get("additions", 0),
|
|
150
|
+
"deletions": pr_data.get("deletions", 0),
|
|
151
|
+
"checks": pr_data.get("statusCheckRollup", []),
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
except Exception as e:
|
|
155
|
+
logger.error(f"Error getting PR info: {e}")
|
|
156
|
+
return None
|
|
157
|
+
|
|
158
|
+
def _check_eligibility(self, pr_info: dict[str, Any]) -> dict[str, Any]:
|
|
159
|
+
"""
|
|
160
|
+
Check if PR meets auto-merge criteria.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
pr_info: PR information
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
Dict with eligible status and reason
|
|
167
|
+
"""
|
|
168
|
+
# Check if PR is open
|
|
169
|
+
if pr_info["state"] != "OPEN":
|
|
170
|
+
return {"eligible": False, "reason": "PR is not open"}
|
|
171
|
+
|
|
172
|
+
# Check for auto-merge label
|
|
173
|
+
if "auto-merge-candidate" not in pr_info["labels"]:
|
|
174
|
+
return {"eligible": False, "reason": "Missing auto-merge-candidate label"}
|
|
175
|
+
|
|
176
|
+
# Check file count
|
|
177
|
+
file_count = len(pr_info["files"])
|
|
178
|
+
if file_count > self.config.max_files:
|
|
179
|
+
return {
|
|
180
|
+
"eligible": False,
|
|
181
|
+
"reason": f"Too many files changed ({file_count} > {self.config.max_files})",
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
# Check lines changed
|
|
185
|
+
lines_changed = pr_info["additions"] + pr_info["deletions"]
|
|
186
|
+
if lines_changed > self.config.max_lines_changed:
|
|
187
|
+
return {
|
|
188
|
+
"eligible": False,
|
|
189
|
+
"reason": f"Too many lines changed ({lines_changed} > {self.config.max_lines_changed})",
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
# Check excluded paths
|
|
193
|
+
if self.config.excluded_paths:
|
|
194
|
+
for file in pr_info["files"]:
|
|
195
|
+
file_path = file.get("path", "")
|
|
196
|
+
for excluded in self.config.excluded_paths:
|
|
197
|
+
if excluded in file_path:
|
|
198
|
+
return {
|
|
199
|
+
"eligible": False,
|
|
200
|
+
"reason": f"Changes in excluded path: {excluded}",
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
# Check CI status
|
|
204
|
+
checks_status = self._check_ci_status(pr_info["checks"])
|
|
205
|
+
if not checks_status["all_passed"]:
|
|
206
|
+
return {"eligible": False, "reason": checks_status["reason"]}
|
|
207
|
+
|
|
208
|
+
return {"eligible": True, "reason": "All criteria met"}
|
|
209
|
+
|
|
210
|
+
def _check_ci_status(self, checks: list[dict[str, Any]]) -> dict[str, Any]:
|
|
211
|
+
"""
|
|
212
|
+
Check CI status for all checks.
|
|
213
|
+
|
|
214
|
+
Args:
|
|
215
|
+
checks: List of status checks
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
Dict with all_passed status and reason
|
|
219
|
+
"""
|
|
220
|
+
if not checks:
|
|
221
|
+
# No checks configured - allow merge
|
|
222
|
+
return {"all_passed": True, "reason": "No checks configured"}
|
|
223
|
+
|
|
224
|
+
failed_checks = []
|
|
225
|
+
pending_checks = []
|
|
226
|
+
|
|
227
|
+
for check in checks:
|
|
228
|
+
# Handle both check runs and status contexts
|
|
229
|
+
status = check.get("status") or check.get("state")
|
|
230
|
+
conclusion = check.get("conclusion")
|
|
231
|
+
|
|
232
|
+
if status == "COMPLETED" and conclusion == "SUCCESS":
|
|
233
|
+
continue
|
|
234
|
+
elif status in ("PENDING", "IN_PROGRESS", "QUEUED"):
|
|
235
|
+
pending_checks.append(check.get("name", "unknown"))
|
|
236
|
+
else:
|
|
237
|
+
failed_checks.append(check.get("name", "unknown"))
|
|
238
|
+
|
|
239
|
+
if failed_checks:
|
|
240
|
+
return {
|
|
241
|
+
"all_passed": False,
|
|
242
|
+
"reason": f"Failed checks: {', '.join(failed_checks)}",
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
if pending_checks:
|
|
246
|
+
return {
|
|
247
|
+
"all_passed": False,
|
|
248
|
+
"reason": f"Pending checks: {', '.join(pending_checks)}",
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
return {"all_passed": True, "reason": "All checks passed"}
|
|
252
|
+
|
|
253
|
+
def _merge_pr(self, pr_number: int) -> dict[str, Any]:
|
|
254
|
+
"""
|
|
255
|
+
Merge the PR using gh CLI.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
pr_number: PR number to merge
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
Dict with merge result
|
|
262
|
+
"""
|
|
263
|
+
result = subprocess.run(
|
|
264
|
+
[
|
|
265
|
+
"gh",
|
|
266
|
+
"pr",
|
|
267
|
+
"merge",
|
|
268
|
+
str(pr_number),
|
|
269
|
+
"--auto",
|
|
270
|
+
"--squash", # Squash commits for clean history
|
|
271
|
+
"--delete-branch", # Clean up after merge
|
|
272
|
+
],
|
|
273
|
+
cwd=self.workspace_root,
|
|
274
|
+
capture_output=True,
|
|
275
|
+
text=True,
|
|
276
|
+
timeout=30,
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
if result.returncode != 0:
|
|
280
|
+
raise RuntimeError(f"Merge failed: {result.stderr}")
|
|
281
|
+
|
|
282
|
+
# Parse merge SHA from output
|
|
283
|
+
merge_sha = None
|
|
284
|
+
for line in result.stdout.split("\n"):
|
|
285
|
+
if "merged" in line.lower():
|
|
286
|
+
# Extract SHA if present
|
|
287
|
+
parts = line.split()
|
|
288
|
+
for part in parts:
|
|
289
|
+
if len(part) == 40 and all(c in "0123456789abcdef" for c in part):
|
|
290
|
+
merge_sha = part
|
|
291
|
+
break
|
|
292
|
+
|
|
293
|
+
return {"merge_sha": merge_sha, "output": result.stdout}
|
|
294
|
+
|
|
295
|
+
def _log_audit(
|
|
296
|
+
self,
|
|
297
|
+
pr_number: int,
|
|
298
|
+
action: str,
|
|
299
|
+
pr_info: dict[str, Any],
|
|
300
|
+
merge_result: dict[str, Any] | None = None,
|
|
301
|
+
):
|
|
302
|
+
"""
|
|
303
|
+
Log audit entry for auto-merge action.
|
|
304
|
+
|
|
305
|
+
Args:
|
|
306
|
+
pr_number: PR number
|
|
307
|
+
action: Action taken (merged, dry_run, rejected)
|
|
308
|
+
pr_info: PR information
|
|
309
|
+
merge_result: Merge result if applicable
|
|
310
|
+
"""
|
|
311
|
+
audit_entry = {
|
|
312
|
+
"timestamp": datetime.now().isoformat(),
|
|
313
|
+
"pr_number": pr_number,
|
|
314
|
+
"action": action,
|
|
315
|
+
"pr_title": pr_info.get("title"),
|
|
316
|
+
"pr_author": pr_info.get("author"),
|
|
317
|
+
"files_changed": len(pr_info.get("files", [])),
|
|
318
|
+
"lines_changed": pr_info.get("additions", 0) + pr_info.get("deletions", 0),
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
if merge_result:
|
|
322
|
+
audit_entry["merge_sha"] = merge_result.get("merge_sha")
|
|
323
|
+
|
|
324
|
+
with open(self.audit_log_path, "a") as f:
|
|
325
|
+
f.write(json.dumps(audit_entry) + "\n")
|
|
326
|
+
|
|
327
|
+
logger.info(f"Audit log: {action} PR #{pr_number}")
|
|
328
|
+
|
|
329
|
+
def get_audit_log(self, limit: int = 100) -> list[dict[str, Any]]:
|
|
330
|
+
"""
|
|
331
|
+
Get recent audit log entries.
|
|
332
|
+
|
|
333
|
+
Args:
|
|
334
|
+
limit: Maximum number of entries to return
|
|
335
|
+
|
|
336
|
+
Returns:
|
|
337
|
+
List of audit entries
|
|
338
|
+
"""
|
|
339
|
+
if not self.audit_log_path.exists():
|
|
340
|
+
return []
|
|
341
|
+
|
|
342
|
+
entries = []
|
|
343
|
+
with open(self.audit_log_path) as f:
|
|
344
|
+
for line in f:
|
|
345
|
+
try:
|
|
346
|
+
entries.append(json.loads(line))
|
|
347
|
+
except Exception:
|
|
348
|
+
continue
|
|
349
|
+
|
|
350
|
+
return entries[-limit:]
|