llm-code-validator 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,3 @@
1
+ """Deterministic API-drift checker for Python source code."""
2
+
3
+ __version__ = "0.1.0"
@@ -0,0 +1,141 @@
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import json
5
+ import platform
6
+ import statistics
7
+ import time
8
+ import tracemalloc
9
+ from datetime import date
10
+ from pathlib import Path
11
+
12
+ from .core import check_file, check_source, iter_python_files
13
+ from .versioning import build_version_context
14
+
15
+
16
+ def run_benchmark(paths: list[str]) -> dict[str, object]:
17
+ files = iter_python_files(paths)
18
+ version_context = build_version_context(paths)
19
+ timings: list[float] = []
20
+ diagnostics = 0
21
+ tracemalloc.start()
22
+ start = time.perf_counter()
23
+ for path in files:
24
+ file_start = time.perf_counter()
25
+ result = check_file(path, version_context)
26
+ timings.append(time.perf_counter() - file_start)
27
+ diagnostics += len(result.diagnostics)
28
+ total = time.perf_counter() - start
29
+ _, peak = tracemalloc.get_traced_memory()
30
+ tracemalloc.stop()
31
+
32
+ p50 = statistics.median(timings) if timings else 0.0
33
+ p95 = statistics.quantiles(timings, n=20)[18] if len(timings) >= 20 else (max(timings) if timings else 0.0)
34
+ files_per_second = len(files) / total if total else 0.0
35
+ return {
36
+ "files": len(files),
37
+ "diagnostics": diagnostics,
38
+ "total_seconds": total,
39
+ "p50_ms": p50 * 1000,
40
+ "p95_ms": p95 * 1000,
41
+ "files_per_second": files_per_second,
42
+ "peak_ram_mb": peak / (1024 * 1024),
43
+ "hardware": platform.machine(),
44
+ "os": platform.platform(),
45
+ "python_version": platform.python_version(),
46
+ "precision": None,
47
+ "recall": None,
48
+ "false_positives": None,
49
+ "false_negatives": None,
50
+ }
51
+
52
+
53
+ def run_labeled_benchmark(dataset_path: str | Path) -> dict[str, object]:
54
+ dataset_file = Path(dataset_path)
55
+ cases = json.loads(dataset_file.read_text(encoding="utf-8"))
56
+ timings: list[float] = []
57
+ true_positives = 0
58
+ false_positives = 0
59
+ false_negatives = 0
60
+ false_positive_examples: list[dict[str, str]] = []
61
+ false_negative_examples: list[dict[str, str]] = []
62
+ total_expected = 0
63
+ total_diagnostics = 0
64
+
65
+ tracemalloc.start()
66
+ start = time.perf_counter()
67
+ for case in cases:
68
+ case_start = time.perf_counter()
69
+ result = check_source(case["code"], case.get("path") or f"{case['id']}.py")
70
+ timings.append(time.perf_counter() - case_start)
71
+ expected = {(item["library"], item["symbol"]) for item in case.get("expected_diagnostics", [])}
72
+ actual = {(diagnostic.library, diagnostic.symbol) for diagnostic in result.diagnostics}
73
+ total_expected += len(expected)
74
+ total_diagnostics += len(actual)
75
+ true_positives += len(expected & actual)
76
+ case_false_positives = actual - expected
77
+ case_false_negatives = expected - actual
78
+ false_positives += len(case_false_positives)
79
+ false_negatives += len(case_false_negatives)
80
+ for library, symbol in sorted(case_false_positives):
81
+ false_positive_examples.append(
82
+ {"case_id": case["id"], "library": library, "symbol": symbol, "reason": "unexpected diagnostic"}
83
+ )
84
+ for library, symbol in sorted(case_false_negatives):
85
+ false_negative_examples.append(
86
+ {"case_id": case["id"], "library": library, "symbol": symbol, "reason": "missing rule or extraction gap"}
87
+ )
88
+ total = time.perf_counter() - start
89
+ _, peak = tracemalloc.get_traced_memory()
90
+ tracemalloc.stop()
91
+
92
+ precision = true_positives / (true_positives + false_positives) if true_positives + false_positives else 1.0
93
+ recall = true_positives / (true_positives + false_negatives) if true_positives + false_negatives else 1.0
94
+ p50 = statistics.median(timings) if timings else 0.0
95
+ p95 = statistics.quantiles(timings, n=20)[18] if len(timings) >= 20 else (max(timings) if timings else 0.0)
96
+ return {
97
+ "dataset": str(dataset_file),
98
+ "benchmark_date": date.today().isoformat(),
99
+ "cases": len(cases),
100
+ "files": len(cases),
101
+ "diagnostics": total_diagnostics,
102
+ "expected_diagnostics": total_expected,
103
+ "true_positives": true_positives,
104
+ "false_positives": false_positives,
105
+ "false_negatives": false_negatives,
106
+ "false_positive_examples": false_positive_examples,
107
+ "false_negative_examples": false_negative_examples,
108
+ "precision": precision,
109
+ "recall": recall,
110
+ "total_seconds": total,
111
+ "p50_ms": p50 * 1000,
112
+ "p95_ms": p95 * 1000,
113
+ "files_per_second": len(cases) / total if total else 0.0,
114
+ "peak_ram_mb": peak / (1024 * 1024),
115
+ "hardware": platform.machine(),
116
+ "os": platform.platform(),
117
+ "python_version": platform.python_version(),
118
+ }
119
+
120
+
121
+ def main(argv: list[str] | None = None) -> int:
122
+ parser = argparse.ArgumentParser(prog="python -m llm_code_validator.benchmark")
123
+ parser.add_argument("paths", nargs="*")
124
+ parser.add_argument("--dataset", help="Run a labeled benchmark dataset JSON file.")
125
+ parser.add_argument("--output", help="Write JSON benchmark output to a file.")
126
+ args = parser.parse_args(argv)
127
+ if args.dataset:
128
+ payload = run_labeled_benchmark(args.dataset)
129
+ elif args.paths:
130
+ payload = run_benchmark(args.paths)
131
+ else:
132
+ parser.error("provide one or more paths or --dataset")
133
+ output = json.dumps(payload, indent=2, sort_keys=True)
134
+ if args.output:
135
+ Path(args.output).write_text(output + "\n", encoding="utf-8")
136
+ print(output)
137
+ return 0
138
+
139
+
140
+ if __name__ == "__main__":
141
+ raise SystemExit(main())
@@ -0,0 +1,105 @@
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import sys
5
+
6
+ from .core import CheckResult, check_paths, check_stdin, staged_python_files
7
+ from .fixes import fix_file
8
+ from .formatting import format_github, format_json, format_text
9
+ from .signatures import validate_signature_database
10
+ from .versioning import build_version_context
11
+
12
+
13
+ def check_staged(requirements: str | None = None, python_version: str | None = None) -> CheckResult:
14
+ files = staged_python_files()
15
+ return check_paths(files, requirements=requirements, python_version=python_version) if files else check_paths([])
16
+
17
+
18
+ def build_parser() -> argparse.ArgumentParser:
19
+ parser = argparse.ArgumentParser(prog="llm-code-validator")
20
+ subparsers = parser.add_subparsers(dest="command", required=True)
21
+
22
+ check = subparsers.add_parser("check", help="Check Python files for known API drift.")
23
+ check.add_argument("paths", nargs="*", help="Files or directories to scan. Use '-' for stdin.")
24
+ check.add_argument("--staged", action="store_true", help="Check staged Python files from git.")
25
+ check.add_argument("--format", choices=["text", "json", "github"], default="text")
26
+ check.add_argument("--requirements", help="Requirements file used for version assumptions.")
27
+ check.add_argument("--python-version", help="Target Python version label for result context.")
28
+ check.add_argument("--show-low-confidence", action="store_true", help="Show lower-confidence diagnostics.")
29
+
30
+ fix = subparsers.add_parser("fix", help="Preview or apply deterministic safe fixes.")
31
+ fix.add_argument("paths", nargs="+", help="Python files to fix.")
32
+ fix.add_argument("--write", action="store_true", help="Write safe fixes to disk.")
33
+ fix.add_argument("--requirements", help="Requirements file used for version assumptions.")
34
+ fix.add_argument("--python-version", help="Target Python version label for result context.")
35
+
36
+ validate = subparsers.add_parser("validate-signatures", help="Validate the signature database.")
37
+ validate.add_argument("--path", help="Path to library_signatures.json.")
38
+ validate.add_argument(
39
+ "--require-official-evidence",
40
+ action="store_true",
41
+ help="Require diagnostic rules to use source_url or release_note instead of generic notes.",
42
+ )
43
+ return parser
44
+
45
+
46
+ def _render(result: CheckResult, output_format: str) -> str:
47
+ if output_format == "json":
48
+ return format_json(result)
49
+ if output_format == "github":
50
+ return format_github(result)
51
+ return format_text(result)
52
+
53
+
54
+ def main(argv: list[str] | None = None) -> int:
55
+ parser = build_parser()
56
+ args = parser.parse_args(argv)
57
+
58
+ try:
59
+ if args.command == "check":
60
+ if args.staged:
61
+ result = check_paths(
62
+ staged_python_files(),
63
+ requirements=args.requirements,
64
+ python_version=args.python_version,
65
+ show_low_confidence=args.show_low_confidence,
66
+ )
67
+ elif args.paths == ["-"]:
68
+ result = check_stdin(args.requirements, args.python_version, args.show_low_confidence)
69
+ elif args.paths:
70
+ result = check_paths(args.paths, args.requirements, args.python_version, args.show_low_confidence)
71
+ else:
72
+ parser.error("check requires a path, '-', or --staged")
73
+ output = _render(result, args.format)
74
+ if output:
75
+ print(output)
76
+ return 1 if result.diagnostics else 0
77
+ if args.command == "fix":
78
+ version_context = build_version_context(args.paths, args.requirements, args.python_version)
79
+ exit_code = 0
80
+ for path in args.paths:
81
+ result = fix_file(path, write=args.write, version_context=version_context)
82
+ for preview in result.previews:
83
+ print(preview)
84
+ for skipped in result.skipped:
85
+ print(skipped)
86
+ if result.skipped:
87
+ exit_code = 1
88
+ return exit_code
89
+ if args.command == "validate-signatures":
90
+ errors = validate_signature_database(args.path, args.require_official_evidence)
91
+ if errors:
92
+ for error in errors:
93
+ print(error, file=sys.stderr)
94
+ return 1
95
+ print("OK: signature database is valid")
96
+ return 0
97
+ except Exception as exc:
98
+ print(f"llm-code-validator: {exc}", file=sys.stderr)
99
+ return 2
100
+
101
+ return 2
102
+
103
+
104
+ if __name__ == "__main__":
105
+ raise SystemExit(main())
@@ -0,0 +1,359 @@
1
+ from __future__ import annotations
2
+
3
+ import ast
4
+ import subprocess
5
+ import sys
6
+ from pathlib import Path
7
+
8
+ from .diagnostics import CheckResult, Diagnostic, Fix
9
+ from .signatures import SignatureRule, find_rule, load_signatures
10
+ from .versioning import VersionContext, build_version_context
11
+
12
+
13
+ STDIN_PATH = "<stdin>"
14
+ EXCLUDED_DIR_NAMES = {
15
+ ".eggs",
16
+ ".git",
17
+ ".hg",
18
+ ".mypy_cache",
19
+ ".nox",
20
+ ".pytest_cache",
21
+ ".ruff_cache",
22
+ ".svn",
23
+ ".tox",
24
+ ".venv",
25
+ "__pycache__",
26
+ "dist",
27
+ "node_modules",
28
+ "site-packages",
29
+ "venv",
30
+ }
31
+ EXCLUDED_DIR_PARTS = {"Lib", "site-packages"}
32
+
33
+
34
+ class _CallExtractor(ast.NodeVisitor):
35
+ def __init__(self) -> None:
36
+ self.aliases: dict[str, str] = {}
37
+ self.alias_confidence: dict[str, float] = {}
38
+ self.calls: list[tuple[str, str, int, int, float, set[str]]] = []
39
+ self.dynamic_imports: list[tuple[str, int, int]] = []
40
+ self.returns: dict[str, str] = {}
41
+
42
+ def visit_Import(self, node: ast.Import) -> None:
43
+ for alias in node.names:
44
+ library = alias.name.split(".")[0]
45
+ used_name = alias.asname or alias.name
46
+ self.aliases[used_name] = library
47
+ self.alias_confidence[used_name] = 1.0
48
+ self.calls.append((library, alias.name, node.lineno, node.col_offset, 1.0, set()))
49
+ self.generic_visit(node)
50
+
51
+ def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
52
+ if node.level:
53
+ return
54
+ module = node.module or ""
55
+ library = module.split(".")[0]
56
+ for alias in node.names:
57
+ used_name = alias.asname or alias.name
58
+ self.aliases[used_name] = library
59
+ self.alias_confidence[used_name] = 1.0
60
+ qualified_name = f"{module}.{alias.name}" if module else alias.name
61
+ self.calls.append((library, qualified_name, node.lineno, node.col_offset, 1.0, set()))
62
+ self.generic_visit(node)
63
+
64
+ def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
65
+ for child in ast.walk(node):
66
+ if isinstance(child, ast.Return):
67
+ root = _root_name(child.value) if child.value else None
68
+ if root in self.aliases:
69
+ self.returns[node.name] = self.aliases[root]
70
+ break
71
+ self.generic_visit(node)
72
+
73
+ def visit_Assign(self, node: ast.Assign) -> None:
74
+ if len(node.targets) == 1 and isinstance(node.targets[0], ast.Name):
75
+ target = node.targets[0].id
76
+ value = node.value.func if isinstance(node.value, ast.Call) else node.value
77
+ root = _root_name(value)
78
+ if root in self.aliases:
79
+ self.aliases[target] = self.aliases[root]
80
+ self.alias_confidence[target] = self.alias_confidence.get(root, 0.85)
81
+ elif isinstance(node.value, ast.Call) and isinstance(node.value.func, ast.Name):
82
+ function_name = node.value.func.id
83
+ if function_name in self.returns:
84
+ self.aliases[target] = self.returns[function_name]
85
+ self.alias_confidence[target] = 0.75
86
+ self.generic_visit(node)
87
+
88
+ def visit_Attribute(self, node: ast.Attribute) -> None:
89
+ root = _root_name(node)
90
+ if root in self.aliases:
91
+ symbol = _attribute_name(node)
92
+ if "." in symbol:
93
+ _, tail = symbol.split(".", 1)
94
+ symbol = f"{self.aliases[root]}.{tail}"
95
+ self.calls.append(
96
+ (
97
+ self.aliases[root],
98
+ symbol,
99
+ node.lineno,
100
+ node.col_offset,
101
+ self.alias_confidence.get(root, 0.75),
102
+ set(),
103
+ )
104
+ )
105
+ self.generic_visit(node)
106
+
107
+ def visit_Call(self, node: ast.Call) -> None:
108
+ if isinstance(node.func, ast.Name) and node.func.id == "__import__":
109
+ library = _first_string_arg(node)
110
+ if library:
111
+ self.dynamic_imports.append((library.split(".")[0], node.lineno, node.col_offset))
112
+ elif (
113
+ isinstance(node.func, ast.Attribute)
114
+ and node.func.attr == "import_module"
115
+ and isinstance(node.func.value, ast.Name)
116
+ and node.func.value.id == "importlib"
117
+ ):
118
+ library = _first_string_arg(node)
119
+ if library:
120
+ self.dynamic_imports.append((library.split(".")[0], node.lineno, node.col_offset))
121
+ root = _root_name(node.func)
122
+ if root in self.aliases:
123
+ symbol = _call_symbol(node.func, self.aliases[root])
124
+ if symbol:
125
+ keywords = {keyword.arg for keyword in node.keywords if keyword.arg}
126
+ self.calls.append(
127
+ (
128
+ self.aliases[root],
129
+ symbol,
130
+ node.lineno,
131
+ node.col_offset,
132
+ self.alias_confidence.get(root, 0.75),
133
+ keywords,
134
+ )
135
+ )
136
+ self.generic_visit(node)
137
+
138
+
139
+ def _root_name(node: ast.AST) -> str | None:
140
+ current = node
141
+ while isinstance(current, ast.Attribute):
142
+ current = current.value
143
+ if isinstance(current, ast.Call):
144
+ return _root_name(current.func)
145
+ if isinstance(current, ast.Name):
146
+ return current.id
147
+ return None
148
+
149
+
150
+ def _attribute_name(node: ast.Attribute) -> str:
151
+ parts = [node.attr]
152
+ current = node.value
153
+ while isinstance(current, ast.Attribute):
154
+ parts.append(current.attr)
155
+ current = current.value
156
+ if isinstance(current, ast.Name):
157
+ parts.append(current.id)
158
+ return ".".join(reversed(parts))
159
+
160
+
161
+ def _call_symbol(node: ast.AST, library: str) -> str | None:
162
+ if isinstance(node, ast.Name):
163
+ return node.id
164
+ if isinstance(node, ast.Attribute):
165
+ symbol = _attribute_name(node)
166
+ if "." in symbol:
167
+ _, tail = symbol.split(".", 1)
168
+ return f"{library}.{tail}"
169
+ return symbol
170
+ return None
171
+
172
+
173
+ def _first_string_arg(node: ast.Call) -> str | None:
174
+ if not node.args:
175
+ return None
176
+ first = node.args[0]
177
+ if isinstance(first, ast.Constant) and isinstance(first.value, str):
178
+ return first.value
179
+ return None
180
+
181
+
182
+ def _diagnostic(
183
+ path: str,
184
+ line: int,
185
+ column: int,
186
+ library: str,
187
+ rule: SignatureRule,
188
+ version_context: VersionContext,
189
+ confidence: float = 1.0,
190
+ ) -> Diagnostic:
191
+ assumption = version_context.assumption_for(library, rule.version_assumption)
192
+ return Diagnostic(
193
+ path=path,
194
+ line=line,
195
+ column=column + 1,
196
+ code="LCV001",
197
+ severity=rule.severity,
198
+ library=library,
199
+ symbol=rule.symbol,
200
+ message=rule.message,
201
+ evidence=rule.evidence,
202
+ replacement=rule.replacement,
203
+ confidence=confidence,
204
+ version_assumption=assumption,
205
+ fix=Fix(replacement=rule.replacement, safety=rule.fix_safety),
206
+ )
207
+
208
+
209
+ def check_source(
210
+ source: str,
211
+ path: str | None = None,
212
+ version_context: VersionContext | None = None,
213
+ show_low_confidence: bool = False,
214
+ ) -> CheckResult:
215
+ display_path = path or STDIN_PATH
216
+ version_context = version_context or VersionContext(None, {}, used_defaults=True)
217
+ try:
218
+ tree = ast.parse(source)
219
+ except SyntaxError as exc:
220
+ diagnostic = Diagnostic(
221
+ path=display_path,
222
+ line=exc.lineno or 1,
223
+ column=exc.offset or 1,
224
+ code="LCV900",
225
+ severity="error",
226
+ library="python",
227
+ symbol="syntax",
228
+ message=f"Python syntax error: {exc.msg}",
229
+ confidence=1.0,
230
+ )
231
+ return CheckResult(checked_files=1, diagnostics=[diagnostic])
232
+
233
+ extractor = _CallExtractor()
234
+ extractor.visit(tree)
235
+ signatures = load_signatures()
236
+ diagnostics: list[Diagnostic] = []
237
+ seen: set[tuple[str, int, str]] = set()
238
+
239
+ for library, symbol, line, column, confidence, keywords in extractor.calls:
240
+ rule = find_rule(library, symbol, signatures, keywords)
241
+ if not rule:
242
+ continue
243
+ if confidence < 0.8 and not show_low_confidence:
244
+ continue
245
+ key = (library, line, rule.symbol)
246
+ if key in seen:
247
+ continue
248
+ seen.add(key)
249
+ diagnostics.append(_diagnostic(display_path, line, column, library, rule, version_context, confidence))
250
+
251
+ for library, line, column in extractor.dynamic_imports:
252
+ diagnostics.append(
253
+ Diagnostic(
254
+ path=display_path,
255
+ line=line,
256
+ column=column + 1,
257
+ code="LCV910",
258
+ severity="warning",
259
+ library=library,
260
+ symbol="dynamic-import",
261
+ message=f"Dynamic import of {library!r} may hide API usage from static checks.",
262
+ confidence=0.6,
263
+ )
264
+ )
265
+
266
+ warnings = []
267
+ if version_context.used_defaults:
268
+ warnings.append("No requirements file was evaluated; diagnostics use default signature version assumptions.")
269
+ return CheckResult(
270
+ checked_files=1,
271
+ diagnostics=diagnostics,
272
+ warnings=warnings,
273
+ )
274
+
275
+
276
+ def check_file(
277
+ path: str | Path,
278
+ version_context: VersionContext | None = None,
279
+ show_low_confidence: bool = False,
280
+ ) -> CheckResult:
281
+ file_path = Path(path)
282
+ try:
283
+ source = file_path.read_text(encoding="utf-8")
284
+ except UnicodeDecodeError:
285
+ source = file_path.read_text(encoding="utf-8", errors="replace")
286
+ except OSError as exc:
287
+ diagnostic = Diagnostic(
288
+ path=str(file_path),
289
+ line=1,
290
+ column=1,
291
+ code="LCV901",
292
+ severity="error",
293
+ library="filesystem",
294
+ symbol="read",
295
+ message=f"Could not read file: {exc}",
296
+ confidence=1.0,
297
+ )
298
+ return CheckResult(checked_files=0, diagnostics=[diagnostic])
299
+ return check_source(source, str(file_path), version_context, show_low_confidence)
300
+
301
+
302
+ def iter_python_files(paths: list[str]) -> list[Path]:
303
+ files: list[Path] = []
304
+ for raw_path in paths:
305
+ path = Path(raw_path)
306
+ if path.is_dir():
307
+ files.extend(sorted(p for p in path.rglob("*.py") if p.is_file() and not _is_excluded_python_path(p)))
308
+ elif path.is_file() and path.suffix == ".py":
309
+ files.append(path)
310
+ return files
311
+
312
+
313
+ def _is_excluded_python_path(path: Path) -> bool:
314
+ parts = set(path.parts)
315
+ if parts.intersection(EXCLUDED_DIR_NAMES):
316
+ return True
317
+ return EXCLUDED_DIR_PARTS.issubset(parts)
318
+
319
+
320
+ def merge_results(results: list[CheckResult]) -> CheckResult:
321
+ return CheckResult(
322
+ checked_files=sum(result.checked_files for result in results),
323
+ diagnostics=[diagnostic for result in results for diagnostic in result.diagnostics],
324
+ warnings=sorted({warning for result in results for warning in result.warnings}),
325
+ )
326
+
327
+
328
+ def check_paths(
329
+ paths: list[str],
330
+ requirements: str | None = None,
331
+ python_version: str | None = None,
332
+ show_low_confidence: bool = False,
333
+ ) -> CheckResult:
334
+ files = iter_python_files(paths)
335
+ if not files:
336
+ return CheckResult(checked_files=0, warnings=["No Python files were found."])
337
+ version_context = build_version_context(paths, requirements, python_version)
338
+ return merge_results([check_file(path, version_context, show_low_confidence) for path in files])
339
+
340
+
341
+ def staged_python_files() -> list[str]:
342
+ proc = subprocess.run(
343
+ ["git", "diff", "--cached", "--name-only", "--diff-filter=ACMR", "--", "*.py"],
344
+ text=True,
345
+ capture_output=True,
346
+ check=False,
347
+ )
348
+ if proc.returncode != 0:
349
+ raise RuntimeError(proc.stderr.strip() or "Could not read staged files from git.")
350
+ return [line.strip() for line in proc.stdout.splitlines() if line.strip()]
351
+
352
+
353
+ def check_stdin(
354
+ requirements: str | None = None,
355
+ python_version: str | None = None,
356
+ show_low_confidence: bool = False,
357
+ ) -> CheckResult:
358
+ version_context = build_version_context(None, requirements, python_version)
359
+ return check_source(sys.stdin.read(), STDIN_PATH, version_context, show_low_confidence)
@@ -0,0 +1,61 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import Any
5
+
6
+
7
+ @dataclass(frozen=True)
8
+ class Fix:
9
+ replacement: str | None = None
10
+ safety: str = "no_fix"
11
+
12
+ def to_dict(self) -> dict[str, Any]:
13
+ return {"replacement": self.replacement, "safety": self.safety}
14
+
15
+
16
+ @dataclass(frozen=True)
17
+ class Diagnostic:
18
+ path: str
19
+ line: int
20
+ column: int
21
+ code: str
22
+ severity: str
23
+ library: str
24
+ symbol: str
25
+ message: str
26
+ evidence: str | None = None
27
+ replacement: str | None = None
28
+ confidence: float = 1.0
29
+ version_assumption: str | None = None
30
+ fix: Fix = field(default_factory=Fix)
31
+
32
+ def to_dict(self) -> dict[str, Any]:
33
+ return {
34
+ "path": self.path,
35
+ "line": self.line,
36
+ "column": self.column,
37
+ "code": self.code,
38
+ "severity": self.severity,
39
+ "library": self.library,
40
+ "symbol": self.symbol,
41
+ "message": self.message,
42
+ "evidence": self.evidence,
43
+ "replacement": self.replacement,
44
+ "confidence": self.confidence,
45
+ "version_assumption": self.version_assumption,
46
+ "fix": self.fix.to_dict(),
47
+ }
48
+
49
+
50
+ @dataclass(frozen=True)
51
+ class CheckResult:
52
+ checked_files: int
53
+ diagnostics: list[Diagnostic] = field(default_factory=list)
54
+ warnings: list[str] = field(default_factory=list)
55
+
56
+ def to_dict(self) -> dict[str, Any]:
57
+ return {
58
+ "checked_files": self.checked_files,
59
+ "diagnostics": [diagnostic.to_dict() for diagnostic in self.diagnostics],
60
+ "warnings": list(self.warnings),
61
+ }