python-checkup 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. python_checkup/__init__.py +9 -0
  2. python_checkup/__main__.py +3 -0
  3. python_checkup/analysis_request.py +35 -0
  4. python_checkup/analyzer_catalog.py +100 -0
  5. python_checkup/analyzers/__init__.py +54 -0
  6. python_checkup/analyzers/bandit.py +158 -0
  7. python_checkup/analyzers/basedpyright.py +103 -0
  8. python_checkup/analyzers/cached.py +106 -0
  9. python_checkup/analyzers/dependency_vulns.py +298 -0
  10. python_checkup/analyzers/deptry.py +142 -0
  11. python_checkup/analyzers/detect_secrets.py +101 -0
  12. python_checkup/analyzers/mypy.py +217 -0
  13. python_checkup/analyzers/radon.py +150 -0
  14. python_checkup/analyzers/registry.py +69 -0
  15. python_checkup/analyzers/ruff.py +256 -0
  16. python_checkup/analyzers/typos.py +80 -0
  17. python_checkup/analyzers/vulture.py +151 -0
  18. python_checkup/cache.py +244 -0
  19. python_checkup/cli.py +763 -0
  20. python_checkup/config.py +87 -0
  21. python_checkup/dedup.py +119 -0
  22. python_checkup/dependencies/discovery.py +192 -0
  23. python_checkup/detection.py +298 -0
  24. python_checkup/diff.py +130 -0
  25. python_checkup/discovery.py +180 -0
  26. python_checkup/formatters/__init__.py +0 -0
  27. python_checkup/formatters/badge.py +38 -0
  28. python_checkup/formatters/json_fmt.py +22 -0
  29. python_checkup/formatters/terminal.py +396 -0
  30. python_checkup/mcp/__init__.py +3 -0
  31. python_checkup/mcp/installer.py +119 -0
  32. python_checkup/mcp/server.py +411 -0
  33. python_checkup/models.py +114 -0
  34. python_checkup/plan.py +109 -0
  35. python_checkup/progress.py +95 -0
  36. python_checkup/runner.py +438 -0
  37. python_checkup/scoring/__init__.py +0 -0
  38. python_checkup/scoring/engine.py +397 -0
  39. python_checkup/skills/SKILL.md +416 -0
  40. python_checkup/skills/__init__.py +0 -0
  41. python_checkup/skills/agents.py +98 -0
  42. python_checkup/skills/installer.py +248 -0
  43. python_checkup/skills/rule_db.py +806 -0
  44. python_checkup/web/__init__.py +0 -0
  45. python_checkup/web/server.py +285 -0
  46. python_checkup/web/static/__init__.py +0 -0
  47. python_checkup/web/static/index.html +959 -0
  48. python_checkup/web/template.py +26 -0
  49. python_checkup-0.0.1.dist-info/METADATA +250 -0
  50. python_checkup-0.0.1.dist-info/RECORD +53 -0
  51. python_checkup-0.0.1.dist-info/WHEEL +4 -0
  52. python_checkup-0.0.1.dist-info/entry_points.txt +14 -0
  53. python_checkup-0.0.1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,217 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import json
5
+ import logging
6
+ import re
7
+ from pathlib import Path
8
+
9
+ from python_checkup.analysis_request import AnalysisRequest
10
+ from python_checkup.models import Category, Diagnostic, Severity
11
+
12
+ logger = logging.getLogger("python_checkup")
13
+
14
+ # Fallback regex for mypy < 1.7 (no JSON output support)
15
+ MYPY_LINE_RE = re.compile(
16
+ r"^(.+?):(\d+):(?:(\d+):)?\s*(error|warning|note):\s*(.+?)(?:\s+\[(.+)\])?$"
17
+ )
18
+
19
+
20
+ class MypyAnalyzer:
21
+ """Type checking via mypy."""
22
+
23
+ @property
24
+ def name(self) -> str:
25
+ return "mypy"
26
+
27
+ @property
28
+ def category(self) -> Category:
29
+ return Category.TYPE_SAFETY
30
+
31
+ async def is_available(self) -> bool:
32
+ """Check if mypy is importable."""
33
+ try:
34
+ import mypy.api # noqa: F401
35
+
36
+ return True
37
+ except ImportError:
38
+ return False
39
+
40
+ async def analyze(
41
+ self,
42
+ request: AnalysisRequest,
43
+ ) -> list[Diagnostic]:
44
+ """Run mypy on the given files.
45
+
46
+ Strategy:
47
+ 1. Try --output json first (mypy 1.7+)
48
+ 2. If that flag is unrecognized, fall back to text parsing
49
+ 3. Run in a thread pool since mypy.api.run() is blocking
50
+ """
51
+ files = request.files
52
+ config = request.config_dict()
53
+
54
+ if not files:
55
+ return []
56
+
57
+ timeout: int = 60
58
+ if "timeout" in config and isinstance(config["timeout"], int | float):
59
+ timeout = int(config["timeout"])
60
+
61
+ # Build args for mypy.api.run()
62
+ args = self._build_args(files, use_json=True)
63
+
64
+ loop = asyncio.get_running_loop()
65
+
66
+ try:
67
+ stdout, stderr, exit_code = await asyncio.wait_for(
68
+ loop.run_in_executor(None, self._run_mypy, args),
69
+ timeout=timeout,
70
+ )
71
+ except asyncio.TimeoutError:
72
+ logger.warning("mypy timed out after %ds", timeout)
73
+ return []
74
+
75
+ # Exit code 2 = fatal error (bad args, can't find files, etc.)
76
+ if exit_code == 2:
77
+ # Check if it's because --output json isn't supported
78
+ if "unrecognized" in stderr.lower() or "error: argument" in stderr.lower():
79
+ logger.debug(
80
+ "mypy doesn't support --output json, falling back to text parsing"
81
+ )
82
+ args = self._build_args(files, use_json=False)
83
+ try:
84
+ stdout, stderr, exit_code = await asyncio.wait_for(
85
+ loop.run_in_executor(None, self._run_mypy, args),
86
+ timeout=timeout,
87
+ )
88
+ except asyncio.TimeoutError:
89
+ logger.warning("mypy timed out after %ds (fallback)", timeout)
90
+ return []
91
+
92
+ if exit_code == 2:
93
+ logger.error("mypy fatal error: %s", stderr)
94
+ return []
95
+ return self._parse_text_output(stdout)
96
+ else:
97
+ logger.error("mypy fatal error: %s", stderr)
98
+ return []
99
+
100
+ # Exit code 0 = clean, 1 = type errors found
101
+ if not stdout.strip():
102
+ return []
103
+
104
+ return self._parse_json_output(stdout)
105
+
106
+ def _build_args(
107
+ self,
108
+ files: list[Path],
109
+ use_json: bool,
110
+ ) -> list[str]:
111
+ args: list[str] = []
112
+
113
+ if use_json:
114
+ args.extend(["--output", "json"])
115
+
116
+ args.extend(
117
+ [
118
+ "--no-color-output",
119
+ "--no-error-summary",
120
+ "--show-absolute-path",
121
+ "--incremental", # Use .mypy_cache/ for speed
122
+ ]
123
+ )
124
+
125
+ # Pass file paths
126
+ args.extend(str(f) for f in files)
127
+
128
+ return args
129
+
130
+ @staticmethod
131
+ def _run_mypy(args: list[str]) -> tuple[str, str, int]:
132
+ """Call mypy.api.run() -- must run in thread pool (blocking)."""
133
+ from mypy.api import run
134
+
135
+ return run(args)
136
+
137
+ def _parse_json_output(self, stdout: str) -> list[Diagnostic]:
138
+ """Parse mypy JSON Lines output (one JSON object per line)."""
139
+ diagnostics: list[Diagnostic] = []
140
+
141
+ for line in stdout.strip().splitlines():
142
+ line = line.strip()
143
+ if not line:
144
+ continue
145
+
146
+ try:
147
+ item = json.loads(line)
148
+ except json.JSONDecodeError:
149
+ logger.debug("Skipping non-JSON mypy output line: %s", line[:80])
150
+ continue
151
+
152
+ severity = _map_mypy_severity(item.get("severity", "error"))
153
+ code = item.get("code", "misc")
154
+
155
+ diagnostics.append(
156
+ Diagnostic(
157
+ file_path=Path(item["file"]),
158
+ line=item.get("line", 0),
159
+ column=item.get("column", 0),
160
+ severity=severity,
161
+ rule_id=f"mypy-{code}",
162
+ tool="mypy",
163
+ category=Category.TYPE_SAFETY,
164
+ message=item.get("message", ""),
165
+ fix=item.get("hint"),
166
+ end_line=item.get("end_line"),
167
+ end_column=item.get("end_column"),
168
+ )
169
+ )
170
+
171
+ return diagnostics
172
+
173
+ def _parse_text_output(self, stdout: str) -> list[Diagnostic]:
174
+ """Fallback: parse mypy's traditional text output.
175
+
176
+ Format: file.py:10:5: error: message [error-code]
177
+ """
178
+ diagnostics: list[Diagnostic] = []
179
+
180
+ for line in stdout.strip().splitlines():
181
+ match = MYPY_LINE_RE.match(line)
182
+ if not match:
183
+ continue
184
+
185
+ filepath, lineno, colno, level, message, code = match.groups()
186
+
187
+ # Skip notes -- they're supplementary info, not actionable findings
188
+ if level == "note":
189
+ continue
190
+
191
+ severity = _map_mypy_severity(level)
192
+ code = code or "misc"
193
+
194
+ diagnostics.append(
195
+ Diagnostic(
196
+ file_path=Path(filepath),
197
+ line=int(lineno),
198
+ column=int(colno) if colno else 0,
199
+ severity=severity,
200
+ rule_id=f"mypy-{code}",
201
+ tool="mypy",
202
+ category=Category.TYPE_SAFETY,
203
+ message=message.strip(),
204
+ )
205
+ )
206
+
207
+ return diagnostics
208
+
209
+
210
+ def _map_mypy_severity(level: str) -> Severity:
211
+ match level:
212
+ case "error":
213
+ return Severity.ERROR
214
+ case "warning":
215
+ return Severity.WARNING
216
+ case _:
217
+ return Severity.INFO
@@ -0,0 +1,150 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import logging
5
+ from pathlib import Path
6
+ from typing import Any
7
+
8
+ from python_checkup.analysis_request import AnalysisRequest
9
+ from python_checkup.models import Category, Diagnostic, Severity
10
+
11
+ logger = logging.getLogger("python_checkup")
12
+
13
+ # Only report functions at or above this grade as diagnostics.
14
+ # Grade C (CC 11-20) is common in real-world code and not actionable
15
+ # enough to penalise. We start warning at D (CC 21-30).
16
+ DIAGNOSTIC_THRESHOLD = "D" # CC >= 21
17
+
18
+ # Map CC grades to severity
19
+ CC_GRADE_SEVERITY: dict[str, Severity] = {
20
+ "D": Severity.WARNING, # 21-30: complex
21
+ "E": Severity.ERROR, # 31-40: very complex
22
+ "F": Severity.ERROR, # 41+: unmaintainable
23
+ }
24
+
25
+
26
+ class RadonAnalyzer:
27
+ """Complexity analysis via Radon."""
28
+
29
+ @property
30
+ def name(self) -> str:
31
+ return "radon"
32
+
33
+ @property
34
+ def category(self) -> Category:
35
+ return Category.COMPLEXITY
36
+
37
+ async def is_available(self) -> bool:
38
+ """Check if radon is importable."""
39
+ try:
40
+ import radon.complexity # noqa: F401
41
+ import radon.metrics # noqa: F401
42
+
43
+ return True
44
+ except ImportError:
45
+ return False
46
+
47
+ async def analyze(
48
+ self,
49
+ request: AnalysisRequest,
50
+ ) -> list[Diagnostic]:
51
+ files = request.files
52
+ if not files:
53
+ return []
54
+
55
+ # Run in thread pool -- Radon is CPU-bound Python code
56
+ loop = asyncio.get_running_loop()
57
+ return await loop.run_in_executor(
58
+ None,
59
+ self._analyze_sync,
60
+ files,
61
+ request.metadata,
62
+ )
63
+
64
+ def _analyze_sync(
65
+ self,
66
+ files: list[Path],
67
+ metadata: dict[str, object],
68
+ ) -> list[Diagnostic]:
69
+ """Synchronous analysis -- runs in thread pool."""
70
+ from radon.complexity import cc_rank, cc_visit
71
+ from radon.metrics import mi_visit
72
+
73
+ diagnostics: list[Diagnostic] = []
74
+ mi_scores: list[float] = []
75
+
76
+ for file_path in files:
77
+ try:
78
+ source = file_path.read_text(errors="ignore")
79
+ except OSError as e:
80
+ logger.debug("Cannot read %s: %s", file_path, e)
81
+ continue
82
+
83
+ if not source.strip():
84
+ continue
85
+
86
+ # 1. Cyclomatic complexity per function/method
87
+ try:
88
+ cc_results = cc_visit(source)
89
+ for block in cc_results:
90
+ grade = cc_rank(block.complexity)
91
+ if grade >= DIAGNOSTIC_THRESHOLD:
92
+ severity = CC_GRADE_SEVERITY.get(grade, Severity.WARNING)
93
+ block_type = _block_type_label(block)
94
+ diagnostics.append(
95
+ Diagnostic(
96
+ file_path=file_path,
97
+ line=block.lineno,
98
+ column=0,
99
+ severity=severity,
100
+ rule_id=f"CC-{grade}",
101
+ tool="radon",
102
+ category=Category.COMPLEXITY,
103
+ message=(
104
+ f"{block_type} '{block.name}' has cyclomatic "
105
+ f"complexity of {block.complexity} (grade {grade})"
106
+ ),
107
+ fix=_cc_fix_suggestion(block.complexity, grade),
108
+ end_line=(
109
+ block.endline if hasattr(block, "endline") else None
110
+ ),
111
+ )
112
+ )
113
+ except Exception as e:
114
+ logger.debug("Radon CC failed on %s: %s", file_path, e)
115
+
116
+ # 2. Maintainability Index per file
117
+ try:
118
+ mi: float = mi_visit(source, multi=True)
119
+ mi_scores.append(mi)
120
+ except Exception as e:
121
+ logger.debug("Radon MI failed on %s: %s", file_path, e)
122
+
123
+ # Store MI scores in config for the scoring engine
124
+ metadata["_radon_mi_scores"] = mi_scores
125
+
126
+ return diagnostics
127
+
128
+
129
+ def _block_type_label(block: Any) -> str:
130
+ if hasattr(block, "is_method") and block.is_method:
131
+ return f"Method ({block.classname})"
132
+ elif hasattr(block, "is_method"):
133
+ return "Function"
134
+ else:
135
+ return "Class"
136
+
137
+
138
+ def _cc_fix_suggestion(complexity: int, grade: str) -> str:
139
+ if grade == "F":
140
+ return (
141
+ f"Complexity {complexity} is dangerously high. "
142
+ "Extract helper functions, replace conditional chains with "
143
+ "a dispatch dict or Strategy pattern, and eliminate nested loops."
144
+ )
145
+ # D and E
146
+ return (
147
+ f"Complexity {complexity} makes this hard to test and maintain. "
148
+ "Consider extracting helper functions or using early returns "
149
+ "to reduce nesting."
150
+ )
@@ -0,0 +1,69 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ from importlib.metadata import entry_points
5
+
6
+ from python_checkup.analyzers import Analyzer
7
+
8
+ logger = logging.getLogger("python_checkup")
9
+
10
+
11
+ async def discover_analyzers() -> list[Analyzer]:
12
+ """Discover all analyzers whose underlying tools are available.
13
+
14
+ Loads built-in and third-party analyzers via entry points, verifies
15
+ protocol conformance, checks availability, and returns only those
16
+ that are ready to run.
17
+ """
18
+ eps = entry_points(group="python-checkup.analyzers")
19
+ available: list[Analyzer] = []
20
+ unavailable: list[str] = []
21
+
22
+ for ep in eps:
23
+ try:
24
+ analyzer_cls = ep.load()
25
+ analyzer = analyzer_cls()
26
+
27
+ # Verify the plugin satisfies the Analyzer protocol
28
+ if not isinstance(analyzer, Analyzer):
29
+ logger.warning(
30
+ "Plugin '%s' does not implement the Analyzer protocol, skipping",
31
+ ep.name,
32
+ )
33
+ unavailable.append(ep.name)
34
+ continue
35
+
36
+ if await analyzer.is_available():
37
+ available.append(analyzer)
38
+ logger.debug("Analyzer available: %s", ep.name)
39
+ else:
40
+ unavailable.append(ep.name)
41
+ logger.debug("Analyzer not available: %s", ep.name)
42
+ except Exception:
43
+ logger.debug("Failed to load analyzer plugin '%s'", ep.name, exc_info=True)
44
+ unavailable.append(ep.name)
45
+
46
+ return available
47
+
48
+
49
+ async def get_analyzer(name: str) -> Analyzer | None:
50
+ """Load a specific analyzer by name."""
51
+ eps = entry_points(group="python-checkup.analyzers")
52
+ for ep in eps:
53
+ if ep.name == name:
54
+ try:
55
+ analyzer_cls = ep.load()
56
+ analyzer = analyzer_cls()
57
+
58
+ if not isinstance(analyzer, Analyzer):
59
+ logger.warning(
60
+ "Plugin '%s' does not implement the Analyzer protocol",
61
+ name,
62
+ )
63
+ return None
64
+
65
+ if await analyzer.is_available():
66
+ return analyzer
67
+ except Exception:
68
+ logger.debug("Failed to load analyzer '%s'", name, exc_info=True)
69
+ return None
@@ -0,0 +1,256 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import json
5
+ import logging
6
+ import shutil
7
+ from pathlib import Path
8
+
9
+ from python_checkup.analysis_request import AnalysisRequest
10
+ from python_checkup.models import Category, Diagnostic, Severity
11
+
12
+ logger = logging.getLogger("python_checkup")
13
+
14
+
15
+ # Map Ruff rule prefixes to python-checkup categories
16
+ RULE_PREFIX_TO_CATEGORY: dict[str, Category] = {
17
+ # Security (flake8-bandit)
18
+ "S": Category.SECURITY,
19
+ # Complexity (mccabe)
20
+ "C9": Category.COMPLEXITY,
21
+ # Dead code (eradicate)
22
+ "ERA": Category.DEAD_CODE,
23
+ # Framework-specific rules map to quality
24
+ "DJ": Category.QUALITY,
25
+ "FAST": Category.QUALITY,
26
+ "ASYNC": Category.QUALITY,
27
+ }
28
+
29
+ # Ruff rules that are errors (not warnings)
30
+ # E9xx are syntax errors, F-prefixed are important
31
+ ERROR_PREFIXES = {"E9", "F"}
32
+
33
+
34
+ def _get_framework_rules(framework: object | None) -> list[str]:
35
+ """Get framework-specific rule extensions."""
36
+ framework_rules: dict[str, list[str]] = {
37
+ "django": ["DJ", "S610", "S611"],
38
+ "fastapi": ["FAST", "ASYNC"],
39
+ "flask": ["S201"],
40
+ }
41
+ return framework_rules.get(str(framework), []) if framework else []
42
+
43
+
44
+ class RuffAnalyzer:
45
+ """Linting via Ruff."""
46
+
47
+ @property
48
+ def name(self) -> str:
49
+ return "ruff"
50
+
51
+ @property
52
+ def category(self) -> Category:
53
+ return Category.QUALITY
54
+
55
+ async def is_available(self) -> bool:
56
+ """Check if ruff is on PATH."""
57
+ return shutil.which("ruff") is not None
58
+
59
+ async def analyze(
60
+ self,
61
+ request: AnalysisRequest,
62
+ ) -> list[Diagnostic]:
63
+ """Run ruff check with JSON output and parse results.
64
+
65
+ We respect the project's existing ruff.toml / [tool.ruff] config.
66
+ We do NOT override rule selection -- the user's config is authoritative.
67
+ We only add --output-format json for machine-readable output.
68
+
69
+ When a framework is detected (config["framework"]), we extend the
70
+ rule selection with framework-specific rules (DJ for Django, FAST/ASYNC
71
+ for FastAPI, S201 for Flask).
72
+
73
+ Diagnostics whose categories are not in ``request.categories`` are
74
+ filtered out so that ``--skip security`` (for example) suppresses
75
+ Ruff's S-prefixed rules alongside Bandit and detect-secrets.
76
+ """
77
+ files = request.files
78
+
79
+ if not files:
80
+ return []
81
+
82
+ cmd = self._build_ruff_command(request)
83
+ diagnostics = await self._run_ruff_and_parse(cmd, request.config_dict())
84
+
85
+ if diagnostics and request.categories:
86
+ diagnostics = [d for d in diagnostics if d.category in request.categories]
87
+
88
+ return diagnostics
89
+
90
+ def _build_ruff_command(self, request: AnalysisRequest) -> list[str]:
91
+ """Build the ruff command with framework-specific rules."""
92
+ config = request.config_dict()
93
+ cmd = [
94
+ "ruff",
95
+ "check",
96
+ "--output-format",
97
+ "json",
98
+ "--no-cache",
99
+ ]
100
+
101
+ framework = config.get("framework")
102
+ extend_rules = _get_framework_rules(framework)
103
+
104
+ if extend_rules:
105
+ cmd.extend(["--extend-select", ",".join(extend_rules)])
106
+
107
+ str_paths = [str(f) for f in request.files]
108
+ cmd.extend(str_paths)
109
+
110
+ return cmd
111
+
112
+ async def _run_ruff_and_parse(
113
+ self,
114
+ cmd: list[str],
115
+ config: dict[str, object],
116
+ ) -> list[Diagnostic]:
117
+ """Execute ruff and parse JSON output into diagnostics."""
118
+ timeout: int = 60
119
+ if "timeout" in config and isinstance(config["timeout"], int | float):
120
+ timeout = int(config["timeout"])
121
+
122
+ try:
123
+ proc = await asyncio.create_subprocess_exec(
124
+ *cmd,
125
+ stdout=asyncio.subprocess.PIPE,
126
+ stderr=asyncio.subprocess.PIPE,
127
+ )
128
+ stdout, stderr = await asyncio.wait_for(
129
+ proc.communicate(),
130
+ timeout=timeout,
131
+ )
132
+ except asyncio.TimeoutError:
133
+ logger.warning("Ruff timed out")
134
+ return []
135
+ except FileNotFoundError:
136
+ logger.warning("Ruff binary not found")
137
+ return []
138
+
139
+ if proc.returncode == 2:
140
+ logger.error("Ruff error: %s", stderr.decode())
141
+ return []
142
+
143
+ output = stdout.decode()
144
+ if not output.strip():
145
+ return []
146
+
147
+ try:
148
+ raw_diagnostics = json.loads(output)
149
+ except json.JSONDecodeError:
150
+ logger.error("Failed to parse Ruff JSON output")
151
+ return []
152
+
153
+ return [_map_diagnostic(d) for d in raw_diagnostics]
154
+
155
+
156
+ def _map_diagnostic(raw: dict[str, object]) -> Diagnostic:
157
+ """Map a single Ruff JSON diagnostic to our Diagnostic model.
158
+
159
+ Ruff JSON output per item:
160
+ {
161
+ "code": "F401",
162
+ "message": "`os` imported but unused",
163
+ "filename": "/path/to/file.py",
164
+ "location": {"row": 1, "column": 8},
165
+ "end_location": {"row": 1, "column": 10},
166
+ "fix": {
167
+ "message": "Remove unused import: `os`",
168
+ "applicability": "safe",
169
+ "edits": [...]
170
+ },
171
+ "url": "https://docs.astral.sh/ruff/rules/unused-import",
172
+ "noqa_row": 1,
173
+ "cell": null
174
+ }
175
+ """
176
+ code = str(raw.get("code", "UNKNOWN"))
177
+ category = _categorize_rule(code)
178
+ severity = _severity_for_rule(code)
179
+
180
+ fix_info = raw.get("fix")
181
+ fix_message: str | None = None
182
+ if isinstance(fix_info, dict):
183
+ fix_message = str(fix_info["message"]) if fix_info.get("message") else None
184
+
185
+ location = raw.get("location")
186
+ end_location = raw.get("end_location")
187
+
188
+ row = 0
189
+ col = 0
190
+ if isinstance(location, dict):
191
+ row = int(location.get("row") or 0)
192
+ col = int(location.get("column") or 0)
193
+
194
+ end_row: int | None = None
195
+ end_col: int | None = None
196
+ if isinstance(end_location, dict):
197
+ end_row = int(end_location.get("row") or 0)
198
+ end_col = int(end_location.get("column") or 0)
199
+
200
+ return Diagnostic(
201
+ file_path=Path(str(raw.get("filename", "unknown"))),
202
+ line=row,
203
+ column=col,
204
+ severity=severity,
205
+ rule_id=code,
206
+ tool="ruff",
207
+ category=category,
208
+ message=str(raw.get("message", "")),
209
+ fix=fix_message,
210
+ help_url=str(raw["url"]) if raw.get("url") else None,
211
+ end_line=end_row,
212
+ end_column=end_col,
213
+ )
214
+
215
+
216
+ def _categorize_rule(code: str) -> Category:
217
+ """Map a Ruff rule code to a python-checkup category.
218
+
219
+ Rule prefix conventions:
220
+ - S* -> Security (flake8-bandit)
221
+ - C9* -> Complexity (mccabe)
222
+ - ERA* -> Dead code (eradicate)
223
+ - Everything else -> Quality
224
+ """
225
+ for prefix, category in RULE_PREFIX_TO_CATEGORY.items():
226
+ if code.startswith(prefix):
227
+ return category
228
+ return Category.QUALITY
229
+
230
+
231
+ def _severity_for_rule(code: str) -> Severity:
232
+ """Determine severity based on rule code.
233
+
234
+ - E9xx (syntax errors) and F-prefixed (Pyflakes) -> ERROR
235
+ - S-prefixed (security) -> ERROR
236
+ - Style rules (W, D, I) -> INFO
237
+ - Everything else -> WARNING
238
+ """
239
+ # Syntax errors are always errors
240
+ if code.startswith("E9"):
241
+ return Severity.ERROR
242
+
243
+ # Pyflakes rules are errors (unused imports, undefined names, etc.)
244
+ if code.startswith("F"):
245
+ return Severity.ERROR
246
+
247
+ # Security rules are errors
248
+ if code.startswith("S"):
249
+ return Severity.ERROR
250
+
251
+ # Style/formatting are info
252
+ if code.startswith(("W", "D", "I")):
253
+ return Severity.INFO
254
+
255
+ # Default to warning
256
+ return Severity.WARNING