codebase-intel 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- codebase_intel/__init__.py +3 -0
- codebase_intel/analytics/__init__.py +1 -0
- codebase_intel/analytics/benchmark.py +406 -0
- codebase_intel/analytics/feedback.py +496 -0
- codebase_intel/analytics/tracker.py +439 -0
- codebase_intel/cli/__init__.py +1 -0
- codebase_intel/cli/main.py +740 -0
- codebase_intel/contracts/__init__.py +1 -0
- codebase_intel/contracts/auto_generator.py +438 -0
- codebase_intel/contracts/evaluator.py +531 -0
- codebase_intel/contracts/models.py +433 -0
- codebase_intel/contracts/registry.py +225 -0
- codebase_intel/core/__init__.py +1 -0
- codebase_intel/core/config.py +248 -0
- codebase_intel/core/exceptions.py +454 -0
- codebase_intel/core/types.py +375 -0
- codebase_intel/decisions/__init__.py +1 -0
- codebase_intel/decisions/miner.py +297 -0
- codebase_intel/decisions/models.py +302 -0
- codebase_intel/decisions/store.py +411 -0
- codebase_intel/drift/__init__.py +1 -0
- codebase_intel/drift/detector.py +443 -0
- codebase_intel/graph/__init__.py +1 -0
- codebase_intel/graph/builder.py +391 -0
- codebase_intel/graph/parser.py +1232 -0
- codebase_intel/graph/query.py +377 -0
- codebase_intel/graph/storage.py +736 -0
- codebase_intel/mcp/__init__.py +1 -0
- codebase_intel/mcp/server.py +710 -0
- codebase_intel/orchestrator/__init__.py +1 -0
- codebase_intel/orchestrator/assembler.py +649 -0
- codebase_intel-0.1.0.dist-info/METADATA +361 -0
- codebase_intel-0.1.0.dist-info/RECORD +36 -0
- codebase_intel-0.1.0.dist-info/WHEEL +4 -0
- codebase_intel-0.1.0.dist-info/entry_points.txt +2 -0
- codebase_intel-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,531 @@
|
|
|
1
|
+
"""Contract evaluator — checks code against quality contracts.
|
|
2
|
+
|
|
3
|
+
This is where contracts meet reality. The evaluator takes a file (or set of files)
|
|
4
|
+
and checks them against all applicable contracts, producing a report of violations.
|
|
5
|
+
|
|
6
|
+
Two modes:
|
|
7
|
+
1. Pre-generation: agent reads contracts BEFORE writing code (guidance mode)
|
|
8
|
+
2. Post-generation: code is checked AFTER writing (validation mode)
|
|
9
|
+
|
|
10
|
+
Edge cases:
|
|
11
|
+
- Rule regex doesn't compile: skip that rule, log error
|
|
12
|
+
- Rule matches in comments/strings (false positive): basic heuristic to
|
|
13
|
+
exclude matches inside comments and string literals
|
|
14
|
+
- Threshold metric not computable: some metrics need AST data (complexity),
|
|
15
|
+
others just need line count. Degrade gracefully if AST unavailable.
|
|
16
|
+
- File is too large for detailed analysis: report size threshold violation
|
|
17
|
+
but skip per-function checks
|
|
18
|
+
- Contract applies but file is unparseable (syntax errors): report what
|
|
19
|
+
we can (line count, import checks) without AST-dependent rules
|
|
20
|
+
- Multiple contracts have conflicting rules: detect and report conflicts
|
|
21
|
+
separately from violations
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
from __future__ import annotations
|
|
25
|
+
|
|
26
|
+
import logging
|
|
27
|
+
import re
|
|
28
|
+
from dataclasses import dataclass, field
|
|
29
|
+
from pathlib import Path
|
|
30
|
+
from typing import TYPE_CHECKING
|
|
31
|
+
|
|
32
|
+
from codebase_intel.contracts.models import (
|
|
33
|
+
ContractRule,
|
|
34
|
+
QualityContract,
|
|
35
|
+
RuleKind,
|
|
36
|
+
)
|
|
37
|
+
from codebase_intel.core.exceptions import ContractViolationError, ErrorContext
|
|
38
|
+
from codebase_intel.core.types import ContractSeverity, GraphNode, Language, NodeKind
|
|
39
|
+
|
|
40
|
+
if TYPE_CHECKING:
|
|
41
|
+
from codebase_intel.contracts.registry import ContractRegistry
|
|
42
|
+
from codebase_intel.graph.storage import GraphStorage
|
|
43
|
+
|
|
44
|
+
logger = logging.getLogger(__name__)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@dataclass
|
|
48
|
+
class Violation:
|
|
49
|
+
"""A single contract violation found in the code."""
|
|
50
|
+
|
|
51
|
+
contract_id: str
|
|
52
|
+
rule_id: str
|
|
53
|
+
rule_name: str
|
|
54
|
+
severity: ContractSeverity
|
|
55
|
+
file_path: Path
|
|
56
|
+
line: int | None = None
|
|
57
|
+
message: str = ""
|
|
58
|
+
fix_suggestion: str | None = None
|
|
59
|
+
matched_text: str | None = None # The violating code fragment
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@dataclass
|
|
63
|
+
class EvaluationReport:
|
|
64
|
+
"""Complete evaluation report for a set of files."""
|
|
65
|
+
|
|
66
|
+
files_checked: int = 0
|
|
67
|
+
rules_evaluated: int = 0
|
|
68
|
+
violations: list[Violation] = field(default_factory=list)
|
|
69
|
+
conflicts: list[str] = field(default_factory=list)
|
|
70
|
+
skipped_rules: list[str] = field(default_factory=list)
|
|
71
|
+
errors: list[str] = field(default_factory=list)
|
|
72
|
+
|
|
73
|
+
@property
|
|
74
|
+
def error_count(self) -> int:
|
|
75
|
+
return sum(1 for v in self.violations if v.severity == ContractSeverity.ERROR)
|
|
76
|
+
|
|
77
|
+
@property
|
|
78
|
+
def warning_count(self) -> int:
|
|
79
|
+
return sum(1 for v in self.violations if v.severity == ContractSeverity.WARNING)
|
|
80
|
+
|
|
81
|
+
@property
|
|
82
|
+
def has_blocking_violations(self) -> bool:
|
|
83
|
+
"""True if any ERROR-level violations exist."""
|
|
84
|
+
return self.error_count > 0
|
|
85
|
+
|
|
86
|
+
def to_context_string(self) -> str:
|
|
87
|
+
"""Serialize for inclusion in agent context."""
|
|
88
|
+
if not self.violations:
|
|
89
|
+
return "No contract violations found."
|
|
90
|
+
|
|
91
|
+
lines = [
|
|
92
|
+
f"## Contract Evaluation: {self.error_count} errors, {self.warning_count} warnings",
|
|
93
|
+
"",
|
|
94
|
+
]
|
|
95
|
+
|
|
96
|
+
# Group by file
|
|
97
|
+
by_file: dict[Path, list[Violation]] = {}
|
|
98
|
+
for v in self.violations:
|
|
99
|
+
by_file.setdefault(v.file_path, []).append(v)
|
|
100
|
+
|
|
101
|
+
for fp, violations in by_file.items():
|
|
102
|
+
lines.append(f"### {fp.name}")
|
|
103
|
+
for v in violations:
|
|
104
|
+
badge = "ERROR" if v.severity == ContractSeverity.ERROR else "WARN"
|
|
105
|
+
loc = f":{v.line}" if v.line else ""
|
|
106
|
+
lines.append(f"- [{badge}] {v.rule_name}{loc}: {v.message}")
|
|
107
|
+
if v.fix_suggestion:
|
|
108
|
+
lines.append(f" Fix: {v.fix_suggestion}")
|
|
109
|
+
lines.append("")
|
|
110
|
+
|
|
111
|
+
if self.conflicts:
|
|
112
|
+
lines.append("### Contract Conflicts")
|
|
113
|
+
for c in self.conflicts:
|
|
114
|
+
lines.append(f"- {c}")
|
|
115
|
+
|
|
116
|
+
return "\n".join(lines)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class ContractEvaluator:
|
|
120
|
+
"""Evaluates code against quality contracts."""
|
|
121
|
+
|
|
122
|
+
def __init__(
|
|
123
|
+
self,
|
|
124
|
+
registry: ContractRegistry,
|
|
125
|
+
storage: GraphStorage | None = None,
|
|
126
|
+
) -> None:
|
|
127
|
+
self._registry = registry
|
|
128
|
+
self._storage = storage
|
|
129
|
+
self._compiled_patterns: dict[str, re.Pattern[str] | None] = {}
|
|
130
|
+
|
|
131
|
+
async def evaluate_files(
|
|
132
|
+
self,
|
|
133
|
+
file_paths: list[Path],
|
|
134
|
+
*,
|
|
135
|
+
strict_mode: bool = False,
|
|
136
|
+
) -> EvaluationReport:
|
|
137
|
+
"""Evaluate a set of files against all applicable contracts.
|
|
138
|
+
|
|
139
|
+
Edge cases:
|
|
140
|
+
- File doesn't exist (deleted since task started): skip with warning
|
|
141
|
+
- File is binary: skip
|
|
142
|
+
- File has no applicable contracts: still count as checked (0 violations)
|
|
143
|
+
"""
|
|
144
|
+
report = EvaluationReport()
|
|
145
|
+
contracts = self._registry.get_all()
|
|
146
|
+
|
|
147
|
+
for fp in file_paths:
|
|
148
|
+
if not fp.exists():
|
|
149
|
+
report.errors.append(f"File not found: {fp}")
|
|
150
|
+
continue
|
|
151
|
+
|
|
152
|
+
try:
|
|
153
|
+
content = fp.read_text(encoding="utf-8")
|
|
154
|
+
except (UnicodeDecodeError, OSError) as exc:
|
|
155
|
+
report.errors.append(f"Cannot read {fp}: {exc}")
|
|
156
|
+
continue
|
|
157
|
+
|
|
158
|
+
report.files_checked += 1
|
|
159
|
+
language = self._detect_language(fp)
|
|
160
|
+
is_test = self._is_test_file(fp)
|
|
161
|
+
|
|
162
|
+
# Get graph nodes for this file (if graph available)
|
|
163
|
+
file_nodes: list[GraphNode] = []
|
|
164
|
+
if self._storage:
|
|
165
|
+
file_nodes = await self._storage.get_nodes_by_file(fp)
|
|
166
|
+
|
|
167
|
+
for contract in contracts:
|
|
168
|
+
rules = contract.rules_for_file(
|
|
169
|
+
fp, language=language, is_test=is_test
|
|
170
|
+
)
|
|
171
|
+
for rule in rules:
|
|
172
|
+
report.rules_evaluated += 1
|
|
173
|
+
violations = self._evaluate_rule(
|
|
174
|
+
rule, contract.id, fp, content, file_nodes
|
|
175
|
+
)
|
|
176
|
+
if strict_mode:
|
|
177
|
+
for v in violations:
|
|
178
|
+
if v.severity == ContractSeverity.WARNING:
|
|
179
|
+
v.severity = ContractSeverity.ERROR
|
|
180
|
+
report.violations.extend(violations)
|
|
181
|
+
|
|
182
|
+
# Check for contract conflicts
|
|
183
|
+
report.conflicts = self._detect_conflicts(contracts, file_paths)
|
|
184
|
+
|
|
185
|
+
return report
|
|
186
|
+
|
|
187
|
+
def evaluate_for_guidance(
|
|
188
|
+
self,
|
|
189
|
+
file_paths: list[Path],
|
|
190
|
+
) -> list[QualityContract]:
|
|
191
|
+
"""Get applicable contracts for pre-generation guidance.
|
|
192
|
+
|
|
193
|
+
Returns contracts (not violations) so the agent knows the rules
|
|
194
|
+
BEFORE writing code.
|
|
195
|
+
|
|
196
|
+
Edge case: file doesn't exist yet (agent will create it). Use the
|
|
197
|
+
intended path to match contracts by scope pattern.
|
|
198
|
+
"""
|
|
199
|
+
contracts = self._registry.get_all()
|
|
200
|
+
applicable: list[QualityContract] = []
|
|
201
|
+
|
|
202
|
+
for contract in contracts:
|
|
203
|
+
for fp in file_paths:
|
|
204
|
+
language = self._detect_language(fp)
|
|
205
|
+
if contract.scope.matches(fp, language):
|
|
206
|
+
applicable.append(contract)
|
|
207
|
+
break
|
|
208
|
+
|
|
209
|
+
# Sort by priority (highest first)
|
|
210
|
+
return sorted(applicable, key=lambda c: c.priority, reverse=True)
|
|
211
|
+
|
|
212
|
+
def _evaluate_rule(
|
|
213
|
+
self,
|
|
214
|
+
rule: ContractRule,
|
|
215
|
+
contract_id: str,
|
|
216
|
+
file_path: Path,
|
|
217
|
+
content: str,
|
|
218
|
+
file_nodes: list[GraphNode],
|
|
219
|
+
) -> list[Violation]:
|
|
220
|
+
"""Evaluate a single rule against a file.
|
|
221
|
+
|
|
222
|
+
Dispatches to type-specific evaluation methods.
|
|
223
|
+
"""
|
|
224
|
+
if rule.kind == RuleKind.PATTERN:
|
|
225
|
+
return self._evaluate_pattern_rule(rule, contract_id, file_path, content)
|
|
226
|
+
elif rule.kind == RuleKind.THRESHOLD:
|
|
227
|
+
return self._evaluate_threshold_rule(
|
|
228
|
+
rule, contract_id, file_path, content, file_nodes
|
|
229
|
+
)
|
|
230
|
+
elif rule.kind == RuleKind.ARCHITECTURAL:
|
|
231
|
+
return self._evaluate_architectural_rule(
|
|
232
|
+
rule, contract_id, file_path, content, file_nodes
|
|
233
|
+
)
|
|
234
|
+
elif rule.kind == RuleKind.AI_ANTIPATTERN:
|
|
235
|
+
return self._evaluate_ai_antipattern(
|
|
236
|
+
rule, contract_id, file_path, content, file_nodes
|
|
237
|
+
)
|
|
238
|
+
return []
|
|
239
|
+
|
|
240
|
+
def _evaluate_pattern_rule(
|
|
241
|
+
self,
|
|
242
|
+
rule: ContractRule,
|
|
243
|
+
contract_id: str,
|
|
244
|
+
file_path: Path,
|
|
245
|
+
content: str,
|
|
246
|
+
) -> list[Violation]:
|
|
247
|
+
"""Evaluate a regex-based pattern rule.
|
|
248
|
+
|
|
249
|
+
Edge cases:
|
|
250
|
+
- Regex matches inside comments: basic heuristic to skip comment lines
|
|
251
|
+
- Regex matches inside strings: much harder to detect, accept some false positives
|
|
252
|
+
- Pattern is invalid regex: compile once, cache, skip if invalid
|
|
253
|
+
- Multi-line patterns: use re.MULTILINE flag
|
|
254
|
+
"""
|
|
255
|
+
violations: list[Violation] = []
|
|
256
|
+
|
|
257
|
+
if rule.pattern:
|
|
258
|
+
compiled = self._compile_pattern(rule.id, rule.pattern)
|
|
259
|
+
if compiled:
|
|
260
|
+
for line_no, line in enumerate(content.split("\n"), 1):
|
|
261
|
+
# Basic comment filtering
|
|
262
|
+
stripped = line.strip()
|
|
263
|
+
if stripped.startswith(("#", "//", "/*", "*")):
|
|
264
|
+
continue
|
|
265
|
+
|
|
266
|
+
match = compiled.search(line)
|
|
267
|
+
if match:
|
|
268
|
+
violations.append(Violation(
|
|
269
|
+
contract_id=contract_id,
|
|
270
|
+
rule_id=rule.id,
|
|
271
|
+
rule_name=rule.name,
|
|
272
|
+
severity=rule.effective_severity,
|
|
273
|
+
file_path=file_path,
|
|
274
|
+
line=line_no,
|
|
275
|
+
message=rule.description,
|
|
276
|
+
fix_suggestion=rule.fix_suggestion,
|
|
277
|
+
matched_text=match.group(0),
|
|
278
|
+
))
|
|
279
|
+
|
|
280
|
+
if rule.anti_pattern:
|
|
281
|
+
# Check that the pattern IS present (absence = violation)
|
|
282
|
+
compiled = self._compile_pattern(f"{rule.id}_anti", rule.anti_pattern)
|
|
283
|
+
if compiled and not compiled.search(content):
|
|
284
|
+
violations.append(Violation(
|
|
285
|
+
contract_id=contract_id,
|
|
286
|
+
rule_id=rule.id,
|
|
287
|
+
rule_name=rule.name,
|
|
288
|
+
severity=rule.effective_severity,
|
|
289
|
+
file_path=file_path,
|
|
290
|
+
message=f"Required pattern not found: {rule.anti_pattern}",
|
|
291
|
+
fix_suggestion=rule.fix_suggestion,
|
|
292
|
+
))
|
|
293
|
+
|
|
294
|
+
return violations
|
|
295
|
+
|
|
296
|
+
def _evaluate_threshold_rule(
|
|
297
|
+
self,
|
|
298
|
+
rule: ContractRule,
|
|
299
|
+
contract_id: str,
|
|
300
|
+
file_path: Path,
|
|
301
|
+
content: str,
|
|
302
|
+
file_nodes: list[GraphNode],
|
|
303
|
+
) -> list[Violation]:
|
|
304
|
+
"""Evaluate a measurable threshold rule.
|
|
305
|
+
|
|
306
|
+
Supported metrics:
|
|
307
|
+
- max_lines: total file line count
|
|
308
|
+
- max_function_lines: per-function line count (requires graph nodes)
|
|
309
|
+
- max_complexity: cyclomatic complexity (future — requires AST analysis)
|
|
310
|
+
|
|
311
|
+
Edge case: metric requires graph data but graph is unavailable →
|
|
312
|
+
skip rule with a note in skipped_rules.
|
|
313
|
+
"""
|
|
314
|
+
violations: list[Violation] = []
|
|
315
|
+
metric = rule.threshold_metric
|
|
316
|
+
threshold = rule.threshold_value
|
|
317
|
+
|
|
318
|
+
if metric is None or threshold is None:
|
|
319
|
+
return []
|
|
320
|
+
|
|
321
|
+
if metric == "max_lines":
|
|
322
|
+
line_count = content.count("\n") + 1
|
|
323
|
+
if line_count > threshold:
|
|
324
|
+
violations.append(Violation(
|
|
325
|
+
contract_id=contract_id,
|
|
326
|
+
rule_id=rule.id,
|
|
327
|
+
rule_name=rule.name,
|
|
328
|
+
severity=rule.effective_severity,
|
|
329
|
+
file_path=file_path,
|
|
330
|
+
message=f"File has {line_count} lines (max: {int(threshold)})",
|
|
331
|
+
fix_suggestion=rule.fix_suggestion,
|
|
332
|
+
))
|
|
333
|
+
|
|
334
|
+
elif metric == "max_function_lines":
|
|
335
|
+
# Check each function/method node
|
|
336
|
+
for node in file_nodes:
|
|
337
|
+
if node.kind in (NodeKind.FUNCTION, NodeKind.METHOD) and node.line_range:
|
|
338
|
+
func_lines = node.line_range.span
|
|
339
|
+
if func_lines > threshold:
|
|
340
|
+
violations.append(Violation(
|
|
341
|
+
contract_id=contract_id,
|
|
342
|
+
rule_id=rule.id,
|
|
343
|
+
rule_name=rule.name,
|
|
344
|
+
severity=rule.effective_severity,
|
|
345
|
+
file_path=file_path,
|
|
346
|
+
line=node.line_range.start,
|
|
347
|
+
message=(
|
|
348
|
+
f"Function '{node.name}' has {func_lines} lines "
|
|
349
|
+
f"(max: {int(threshold)})"
|
|
350
|
+
),
|
|
351
|
+
fix_suggestion=rule.fix_suggestion,
|
|
352
|
+
))
|
|
353
|
+
|
|
354
|
+
return violations
|
|
355
|
+
|
|
356
|
+
def _evaluate_architectural_rule(
|
|
357
|
+
self,
|
|
358
|
+
rule: ContractRule,
|
|
359
|
+
contract_id: str,
|
|
360
|
+
file_path: Path,
|
|
361
|
+
content: str,
|
|
362
|
+
file_nodes: list[GraphNode],
|
|
363
|
+
) -> list[Violation]:
|
|
364
|
+
"""Evaluate an architectural rule (structural constraints).
|
|
365
|
+
|
|
366
|
+
These rules typically need graph data to check dependency direction,
|
|
367
|
+
layer violations, etc. Without a graph, we can still do basic
|
|
368
|
+
import-based checks.
|
|
369
|
+
"""
|
|
370
|
+
# Architectural rules are complex — for now, basic pattern matching
|
|
371
|
+
# on imports. Full evaluation requires graph traversal (future).
|
|
372
|
+
violations: list[Violation] = []
|
|
373
|
+
|
|
374
|
+
if rule.pattern:
|
|
375
|
+
return self._evaluate_pattern_rule(rule, contract_id, file_path, content)
|
|
376
|
+
|
|
377
|
+
return violations
|
|
378
|
+
|
|
379
|
+
def _evaluate_ai_antipattern(
|
|
380
|
+
self,
|
|
381
|
+
rule: ContractRule,
|
|
382
|
+
contract_id: str,
|
|
383
|
+
file_path: Path,
|
|
384
|
+
content: str,
|
|
385
|
+
file_nodes: list[GraphNode],
|
|
386
|
+
) -> list[Violation]:
|
|
387
|
+
"""Evaluate AI-specific anti-pattern rules.
|
|
388
|
+
|
|
389
|
+
These are heuristic-based and accept some false positives in exchange
|
|
390
|
+
for catching common AI mistakes.
|
|
391
|
+
|
|
392
|
+
Edge cases:
|
|
393
|
+
- "No restating comments" might flag legitimate explanatory comments
|
|
394
|
+
→ severity is INFO, not ERROR, so it's guidance not blocking
|
|
395
|
+
- "No hallucinated imports" needs the graph to verify → degrade
|
|
396
|
+
to pattern check without graph
|
|
397
|
+
"""
|
|
398
|
+
violations: list[Violation] = []
|
|
399
|
+
|
|
400
|
+
# Delegate to pattern matching if a pattern is defined
|
|
401
|
+
if rule.pattern:
|
|
402
|
+
return self._evaluate_pattern_rule(rule, contract_id, file_path, content)
|
|
403
|
+
|
|
404
|
+
# Rule-specific heuristics
|
|
405
|
+
if rule.id == "no-hallucinated-imports" and self._storage:
|
|
406
|
+
violations.extend(
|
|
407
|
+
self._check_hallucinated_imports(
|
|
408
|
+
rule, contract_id, file_path, content, file_nodes
|
|
409
|
+
)
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
if rule.id == "no-over-abstraction":
|
|
413
|
+
violations.extend(
|
|
414
|
+
self._check_over_abstraction(
|
|
415
|
+
rule, contract_id, file_path, content, file_nodes
|
|
416
|
+
)
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
return violations
|
|
420
|
+
|
|
421
|
+
def _check_hallucinated_imports(
|
|
422
|
+
self,
|
|
423
|
+
rule: ContractRule,
|
|
424
|
+
contract_id: str,
|
|
425
|
+
file_path: Path,
|
|
426
|
+
content: str,
|
|
427
|
+
file_nodes: list[GraphNode],
|
|
428
|
+
) -> list[Violation]:
|
|
429
|
+
"""Check for imports that don't resolve to real files or packages.
|
|
430
|
+
|
|
431
|
+
Edge cases:
|
|
432
|
+
- Standard library imports: always valid (we maintain a known-stdlib list)
|
|
433
|
+
- Third-party imports: check against installed packages (best effort)
|
|
434
|
+
- Relative imports: resolve against file position
|
|
435
|
+
- Dynamic imports: can't check statically, skip
|
|
436
|
+
"""
|
|
437
|
+
# This is a placeholder — full implementation would use the graph
|
|
438
|
+
# to verify that import edges resolve to actual nodes
|
|
439
|
+
return []
|
|
440
|
+
|
|
441
|
+
def _check_over_abstraction(
|
|
442
|
+
self,
|
|
443
|
+
rule: ContractRule,
|
|
444
|
+
contract_id: str,
|
|
445
|
+
file_path: Path,
|
|
446
|
+
content: str,
|
|
447
|
+
file_nodes: list[GraphNode],
|
|
448
|
+
) -> list[Violation]:
|
|
449
|
+
"""Detect potential over-abstraction patterns.
|
|
450
|
+
|
|
451
|
+
Heuristics:
|
|
452
|
+
- Base class with only one subclass
|
|
453
|
+
- Factory function that returns only one type
|
|
454
|
+
- Utility function called from only one place
|
|
455
|
+
- Interface with only one implementation
|
|
456
|
+
|
|
457
|
+
This requires graph data to check usage counts.
|
|
458
|
+
"""
|
|
459
|
+
# Placeholder — requires graph traversal for caller count analysis
|
|
460
|
+
return []
|
|
461
|
+
|
|
462
|
+
def _detect_conflicts(
|
|
463
|
+
self,
|
|
464
|
+
contracts: list[QualityContract],
|
|
465
|
+
file_paths: list[Path],
|
|
466
|
+
) -> list[str]:
|
|
467
|
+
"""Detect rules from different contracts that conflict.
|
|
468
|
+
|
|
469
|
+
Edge case: two contracts both have a "max function lines" rule
|
|
470
|
+
but with different thresholds. The stricter one wins.
|
|
471
|
+
|
|
472
|
+
Edge case: "no helper functions" + "max 50 lines per function"
|
|
473
|
+
→ inherently conflicting for complex logic. Report but don't resolve.
|
|
474
|
+
"""
|
|
475
|
+
conflicts: list[str] = []
|
|
476
|
+
|
|
477
|
+
# Check for duplicate threshold rules with different values
|
|
478
|
+
threshold_rules: dict[str, list[tuple[str, float, int]]] = {}
|
|
479
|
+
for contract in contracts:
|
|
480
|
+
for rule in contract.rules:
|
|
481
|
+
if rule.kind == RuleKind.THRESHOLD and rule.threshold_metric:
|
|
482
|
+
threshold_rules.setdefault(rule.threshold_metric, []).append(
|
|
483
|
+
(contract.id, rule.threshold_value or 0, contract.priority)
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
for metric, rules in threshold_rules.items():
|
|
487
|
+
if len(rules) > 1:
|
|
488
|
+
values = set(r[1] for r in rules)
|
|
489
|
+
if len(values) > 1:
|
|
490
|
+
details = ", ".join(
|
|
491
|
+
f"{r[0]}: {r[1]}" for r in sorted(rules, key=lambda x: -x[2])
|
|
492
|
+
)
|
|
493
|
+
conflicts.append(
|
|
494
|
+
f"Conflicting {metric} thresholds: {details}. "
|
|
495
|
+
f"Highest-priority contract wins."
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
return conflicts
|
|
499
|
+
|
|
500
|
+
def _compile_pattern(self, cache_key: str, pattern: str) -> re.Pattern[str] | None:
|
|
501
|
+
"""Compile and cache a regex pattern.
|
|
502
|
+
|
|
503
|
+
Edge case: invalid regex → log error, return None, skip this rule.
|
|
504
|
+
"""
|
|
505
|
+
if cache_key in self._compiled_patterns:
|
|
506
|
+
return self._compiled_patterns[cache_key]
|
|
507
|
+
|
|
508
|
+
try:
|
|
509
|
+
compiled = re.compile(pattern, re.MULTILINE)
|
|
510
|
+
self._compiled_patterns[cache_key] = compiled
|
|
511
|
+
return compiled
|
|
512
|
+
except re.error as exc:
|
|
513
|
+
logger.error("Invalid regex in rule %s: %s — %s", cache_key, pattern, exc)
|
|
514
|
+
self._compiled_patterns[cache_key] = None
|
|
515
|
+
return None
|
|
516
|
+
|
|
517
|
+
def _detect_language(self, file_path: Path) -> Language:
|
|
518
|
+
"""Detect language from file extension."""
|
|
519
|
+
from codebase_intel.graph.parser import detect_language
|
|
520
|
+
return detect_language(file_path)
|
|
521
|
+
|
|
522
|
+
def _is_test_file(self, file_path: Path) -> bool:
|
|
523
|
+
"""Check if file is a test file."""
|
|
524
|
+
name = file_path.stem.lower()
|
|
525
|
+
parts = [p.lower() for p in file_path.parts]
|
|
526
|
+
return (
|
|
527
|
+
name.startswith("test_")
|
|
528
|
+
or name.endswith("_test")
|
|
529
|
+
or name.endswith("_spec")
|
|
530
|
+
or any(p in ("tests", "test", "__tests__", "spec") for p in parts)
|
|
531
|
+
)
|