python-checkup 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. python_checkup/__init__.py +9 -0
  2. python_checkup/__main__.py +3 -0
  3. python_checkup/analysis_request.py +35 -0
  4. python_checkup/analyzer_catalog.py +100 -0
  5. python_checkup/analyzers/__init__.py +54 -0
  6. python_checkup/analyzers/bandit.py +158 -0
  7. python_checkup/analyzers/basedpyright.py +103 -0
  8. python_checkup/analyzers/cached.py +106 -0
  9. python_checkup/analyzers/dependency_vulns.py +298 -0
  10. python_checkup/analyzers/deptry.py +142 -0
  11. python_checkup/analyzers/detect_secrets.py +101 -0
  12. python_checkup/analyzers/mypy.py +217 -0
  13. python_checkup/analyzers/radon.py +150 -0
  14. python_checkup/analyzers/registry.py +69 -0
  15. python_checkup/analyzers/ruff.py +256 -0
  16. python_checkup/analyzers/typos.py +80 -0
  17. python_checkup/analyzers/vulture.py +151 -0
  18. python_checkup/cache.py +244 -0
  19. python_checkup/cli.py +763 -0
  20. python_checkup/config.py +87 -0
  21. python_checkup/dedup.py +119 -0
  22. python_checkup/dependencies/discovery.py +192 -0
  23. python_checkup/detection.py +298 -0
  24. python_checkup/diff.py +130 -0
  25. python_checkup/discovery.py +180 -0
  26. python_checkup/formatters/__init__.py +0 -0
  27. python_checkup/formatters/badge.py +38 -0
  28. python_checkup/formatters/json_fmt.py +22 -0
  29. python_checkup/formatters/terminal.py +396 -0
  30. python_checkup/mcp/__init__.py +3 -0
  31. python_checkup/mcp/installer.py +119 -0
  32. python_checkup/mcp/server.py +411 -0
  33. python_checkup/models.py +114 -0
  34. python_checkup/plan.py +109 -0
  35. python_checkup/progress.py +95 -0
  36. python_checkup/runner.py +438 -0
  37. python_checkup/scoring/__init__.py +0 -0
  38. python_checkup/scoring/engine.py +397 -0
  39. python_checkup/skills/SKILL.md +416 -0
  40. python_checkup/skills/__init__.py +0 -0
  41. python_checkup/skills/agents.py +98 -0
  42. python_checkup/skills/installer.py +248 -0
  43. python_checkup/skills/rule_db.py +806 -0
  44. python_checkup/web/__init__.py +0 -0
  45. python_checkup/web/server.py +285 -0
  46. python_checkup/web/static/__init__.py +0 -0
  47. python_checkup/web/static/index.html +959 -0
  48. python_checkup/web/template.py +26 -0
  49. python_checkup-0.0.1.dist-info/METADATA +250 -0
  50. python_checkup-0.0.1.dist-info/RECORD +53 -0
  51. python_checkup-0.0.1.dist-info/WHEEL +4 -0
  52. python_checkup-0.0.1.dist-info/entry_points.txt +14 -0
  53. python_checkup-0.0.1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,411 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import logging
5
+ import sys
6
+ from pathlib import Path
7
+
8
+ from mcp.server.fastmcp import FastMCP
9
+
10
+ from python_checkup.models import Category, Diagnostic, HealthReport, Severity
11
+
12
+ # Configure logging to stderr (CRITICAL for stdio servers --
13
+ # any stdout output corrupts the JSON-RPC protocol)
14
+ logging.basicConfig(
15
+ level=logging.INFO,
16
+ format="%(name)s %(levelname)s: %(message)s",
17
+ stream=sys.stderr,
18
+ )
19
+ logger = logging.getLogger("python-checkup-mcp")
20
+
21
+ mcp_server = FastMCP("python-checkup")
22
+
23
+
24
+ # Tool 1: Full health check
25
+
26
+
27
+ @mcp_server.tool()
28
+ async def python_checkup_diagnose(
29
+ path: str,
30
+ quick: bool = False,
31
+ ) -> str:
32
+ """Run a complete health check on a Python project or specific files.
33
+
34
+ Returns a 0-100 health score with categorized diagnostics covering
35
+ code quality, type safety, security, complexity, and dead code.
36
+
37
+ Call this after making significant code changes, before committing,
38
+ or when you want an overview of a project's health. Use quick=True
39
+ to skip slow analyzers (mypy) for sub-3-second results.
40
+
41
+ Args:
42
+ path: Absolute path to the project directory or a specific file.
43
+ quick: If True, skip slow analyzers (mypy) for faster results.
44
+ """
45
+ try:
46
+ from python_checkup.config import load_config
47
+ from python_checkup.discovery import discover_python_files
48
+ from python_checkup.runner import run_analysis
49
+
50
+ project_path = Path(path).resolve()
51
+ if not project_path.exists():
52
+ return f"Error: Path does not exist: {path}"
53
+
54
+ # If path is a file, use its parent as project root
55
+ if project_path.is_file():
56
+ project_root = project_path.parent
57
+ files = [project_path]
58
+ config = load_config(project_root)
59
+ else:
60
+ project_root = project_path
61
+ config = load_config(project_root)
62
+ files = discover_python_files(project_root, config.ignore_files)
63
+
64
+ skip: set[str] = set()
65
+ if quick:
66
+ skip.add("mypy")
67
+
68
+ report = await run_analysis(
69
+ project_root=project_root,
70
+ config=config,
71
+ files=files,
72
+ skip_analyzers=skip,
73
+ quiet=True, # No Rich progress in MCP mode
74
+ )
75
+
76
+ return _format_report(report)
77
+
78
+ except Exception as e:
79
+ logger.exception("Error in python_checkup_diagnose")
80
+ return f"Error running analysis: {e}"
81
+
82
+
83
+ # Tool 2: Lint specific files
84
+
85
+
86
+ @mcp_server.tool()
87
+ async def python_checkup_lint(
88
+ paths: list[str],
89
+ ) -> str:
90
+ """Run Ruff linting on specific Python files.
91
+
92
+ Returns lint issues with rule IDs, severity, messages, and auto-fix
93
+ suggestions. Use this for quick feedback after editing Python files.
94
+ Faster than a full diagnose since it only runs the linter.
95
+
96
+ Args:
97
+ paths: List of absolute file paths to lint.
98
+ """
99
+ try:
100
+ from python_checkup.analyzers.ruff import RuffAnalyzer
101
+ from python_checkup.config import load_config
102
+
103
+ analyzer = RuffAnalyzer()
104
+ if not await analyzer.is_available():
105
+ return "Error: Ruff is not installed. Run: pip install ruff"
106
+
107
+ file_paths = [Path(p).resolve() for p in paths]
108
+ for p in file_paths:
109
+ if not p.exists():
110
+ return f"Error: File not found: {p}"
111
+
112
+ from python_checkup.analysis_request import AnalysisRequest
113
+ from python_checkup.plan import PROFILE_DEFAULT
114
+
115
+ diagnostics = await analyzer.analyze(
116
+ AnalysisRequest(
117
+ project_root=file_paths[0].parent,
118
+ files=file_paths,
119
+ config=load_config(file_paths[0].parent),
120
+ categories={
121
+ Category.QUALITY,
122
+ Category.SECURITY,
123
+ Category.COMPLEXITY,
124
+ Category.DEAD_CODE,
125
+ },
126
+ profile=PROFILE_DEFAULT,
127
+ )
128
+ )
129
+
130
+ if not diagnostics:
131
+ return "No lint issues found. Code looks clean!"
132
+
133
+ return _format_diagnostics(diagnostics, max_items=30)
134
+
135
+ except Exception as e:
136
+ logger.exception("Error in python_checkup_lint")
137
+ return f"Error running lint: {e}"
138
+
139
+
140
+ # Tool 3: Type check specific files
141
+
142
+
143
+ @mcp_server.tool()
144
+ async def python_checkup_typecheck(
145
+ paths: list[str],
146
+ ) -> str:
147
+ """Run type checking (mypy) on specific Python files.
148
+
149
+ Returns type errors with locations, error codes, and explanations.
150
+ Use this when writing or modifying typed Python code to catch type
151
+ mismatches, missing annotations, and incompatible assignments.
152
+
153
+ Args:
154
+ paths: List of absolute file paths to type-check.
155
+ """
156
+ try:
157
+ from python_checkup.analyzers.registry import get_analyzer
158
+
159
+ analyzer = await get_analyzer("mypy")
160
+ if analyzer is None:
161
+ return "Error: mypy is not installed. Run: pip install python-checkup"
162
+
163
+ file_paths = [Path(p).resolve() for p in paths]
164
+ for p in file_paths:
165
+ if not p.exists():
166
+ return f"Error: File not found: {p}"
167
+
168
+ from python_checkup.analysis_request import AnalysisRequest
169
+ from python_checkup.config import load_config
170
+ from python_checkup.plan import PROFILE_DEFAULT
171
+
172
+ diagnostics = await analyzer.analyze(
173
+ AnalysisRequest(
174
+ project_root=file_paths[0].parent,
175
+ files=file_paths,
176
+ config=load_config(file_paths[0].parent),
177
+ categories={Category.TYPE_SAFETY},
178
+ profile=PROFILE_DEFAULT,
179
+ )
180
+ )
181
+
182
+ if not diagnostics:
183
+ return "No type errors found. Types look correct!"
184
+
185
+ return _format_diagnostics(diagnostics, max_items=30)
186
+
187
+ except Exception as e:
188
+ logger.exception("Error in python_checkup_typecheck")
189
+ return f"Error running type check: {e}"
190
+
191
+
192
+ # Tool 4: Security scan specific files
193
+
194
+
195
+ @mcp_server.tool()
196
+ async def python_checkup_security(
197
+ paths: list[str],
198
+ ) -> str:
199
+ """Run security analysis on specific Python files.
200
+
201
+ Checks for vulnerabilities including SQL injection, hardcoded secrets,
202
+ insecure deserialization, shell injection, and weak cryptography.
203
+ Returns findings with CWE mappings and severity levels.
204
+
205
+ Call this before committing code that handles user input, credentials,
206
+ file operations, subprocess calls, or external data.
207
+
208
+ Args:
209
+ paths: List of absolute file paths to scan for security issues.
210
+ """
211
+ try:
212
+ from python_checkup.analyzers.registry import get_analyzer
213
+ from python_checkup.analyzers.ruff import RuffAnalyzer
214
+
215
+ results: list[Diagnostic] = []
216
+
217
+ file_paths = [Path(p).resolve() for p in paths]
218
+ for p in file_paths:
219
+ if not p.exists():
220
+ return f"Error: File not found: {p}"
221
+
222
+ # Try Bandit
223
+ bandit = await get_analyzer("bandit")
224
+ if bandit:
225
+ from python_checkup.analysis_request import AnalysisRequest
226
+ from python_checkup.config import load_config
227
+ from python_checkup.plan import PROFILE_DEFAULT
228
+
229
+ diagnostics = await bandit.analyze(
230
+ AnalysisRequest(
231
+ project_root=file_paths[0].parent,
232
+ files=file_paths,
233
+ config=load_config(file_paths[0].parent),
234
+ categories={Category.SECURITY},
235
+ profile=PROFILE_DEFAULT,
236
+ )
237
+ )
238
+ results.extend(diagnostics)
239
+
240
+ # Also run Ruff S-rules (always available)
241
+ ruff = RuffAnalyzer()
242
+ if await ruff.is_available():
243
+ from python_checkup.analysis_request import AnalysisRequest
244
+ from python_checkup.config import load_config
245
+ from python_checkup.plan import PROFILE_DEFAULT
246
+
247
+ all_diags = await ruff.analyze(
248
+ AnalysisRequest(
249
+ project_root=file_paths[0].parent,
250
+ files=file_paths,
251
+ config=load_config(file_paths[0].parent),
252
+ categories={Category.SECURITY},
253
+ profile=PROFILE_DEFAULT,
254
+ )
255
+ )
256
+ security_diags = [d for d in all_diags if d.category == Category.SECURITY]
257
+ results.extend(security_diags)
258
+
259
+ # Deduplicate by (file, line, rule_id)
260
+ seen: set[tuple[str, int, str]] = set()
261
+ unique: list[Diagnostic] = []
262
+ for d in results:
263
+ key = (str(d.file_path), d.line, d.rule_id)
264
+ if key not in seen:
265
+ seen.add(key)
266
+ unique.append(d)
267
+
268
+ if not unique:
269
+ return "No security issues found. Code looks secure!"
270
+
271
+ return _format_diagnostics(unique, max_items=30)
272
+
273
+ except Exception as e:
274
+ logger.exception("Error in python_checkup_security")
275
+ return f"Error running security scan: {e}"
276
+
277
+
278
+ # Tool 5: Explain a rule
279
+
280
+
281
+ @mcp_server.tool()
282
+ async def python_checkup_explain_rule(
283
+ rule_id: str,
284
+ ) -> str:
285
+ """Explain a specific lint/analysis rule by its ID.
286
+
287
+ Returns the rule's purpose, an example of code that violates it,
288
+ an example of correct code, and the recommended fix pattern.
289
+ Supports Ruff rules (F401, E501, S101, C901, etc.), Bandit rules
290
+ (B101, B608, etc.), and mypy error codes (return-value, arg-type, etc.).
291
+
292
+ Use this to understand a flagged issue before fixing it.
293
+
294
+ Args:
295
+ rule_id: The rule ID to explain (e.g., 'S101', 'F401', 'C901', 'B608').
296
+ """
297
+ try:
298
+ from python_checkup.skills.rule_db import explain_rule
299
+
300
+ explanation = explain_rule(rule_id)
301
+ return explanation
302
+
303
+ except Exception as e:
304
+ logger.exception("Error in python_checkup_explain_rule")
305
+ return f"Error explaining rule: {e}"
306
+
307
+
308
+ # Output formatting helpers
309
+
310
+
311
+ def _format_report(report: HealthReport) -> str:
312
+ """Format a HealthReport for MCP output, keeping under token limit."""
313
+ data: dict[str, object] = {
314
+ "score": report.score,
315
+ "label": report.label,
316
+ "categoryScores": [
317
+ {
318
+ "category": cs.category.value,
319
+ "score": cs.score,
320
+ "weight": cs.weight,
321
+ "issueCount": cs.issue_count,
322
+ "details": cs.details,
323
+ }
324
+ for cs in report.category_scores
325
+ ],
326
+ "project": {
327
+ "pythonVersion": report.project.python_version,
328
+ "framework": report.project.framework,
329
+ "totalFiles": report.project.total_files,
330
+ "totalLines": report.project.total_lines,
331
+ },
332
+ "analyzersUsed": report.analyzers_used,
333
+ "analyzersSkipped": report.analyzers_skipped,
334
+ "durationMs": report.duration_ms,
335
+ "totalIssues": len(report.diagnostics),
336
+ }
337
+
338
+ # Add top issues (limit to 50 to stay under token limit)
339
+ max_issues = 50
340
+ top = sorted(
341
+ report.diagnostics,
342
+ key=lambda d: (
343
+ 0 if d.severity == Severity.ERROR else 1,
344
+ str(d.file_path),
345
+ ),
346
+ )[:max_issues]
347
+
348
+ data["topIssues"] = [
349
+ {
350
+ "file": str(d.file_path),
351
+ "line": d.line,
352
+ "ruleId": d.rule_id,
353
+ "severity": d.severity.value,
354
+ "message": d.message,
355
+ "fix": d.fix,
356
+ }
357
+ for d in top
358
+ ]
359
+
360
+ if len(report.diagnostics) > max_issues:
361
+ data["truncated"] = True
362
+ data["note"] = f"Showing top {max_issues} of {len(report.diagnostics)} issues."
363
+
364
+ return json.dumps(data, indent=2)
365
+
366
+
367
+ def _format_diagnostics(
368
+ diagnostics: list[Diagnostic],
369
+ max_items: int = 30,
370
+ ) -> str:
371
+ sorted_diags = sorted(
372
+ diagnostics,
373
+ key=lambda d: (
374
+ 0 if d.severity == Severity.ERROR else 1,
375
+ str(d.file_path),
376
+ ),
377
+ )[:max_items]
378
+
379
+ items = [
380
+ {
381
+ "file": str(d.file_path),
382
+ "line": d.line,
383
+ "column": d.column,
384
+ "ruleId": d.rule_id,
385
+ "severity": d.severity.value,
386
+ "message": d.message,
387
+ "fix": d.fix,
388
+ "helpUrl": d.help_url,
389
+ }
390
+ for d in sorted_diags
391
+ ]
392
+
393
+ result: dict[str, object] = {
394
+ "totalIssues": len(diagnostics),
395
+ "showing": len(items),
396
+ "issues": items,
397
+ }
398
+
399
+ if len(diagnostics) > max_items:
400
+ result["truncated"] = True
401
+
402
+ return json.dumps(result, indent=2)
403
+
404
+
405
+ # Entry point
406
+
407
+
408
+ def start_mcp_server() -> None:
409
+ """Start the MCP server on stdio transport."""
410
+ logger.info("Starting python-checkup MCP server")
411
+ mcp_server.run(transport="stdio")
@@ -0,0 +1,114 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+ from enum import Enum
5
+ from pathlib import Path
6
+
7
+
8
+ class Severity(Enum):
9
+ """Diagnostic severity levels."""
10
+
11
+ ERROR = "error"
12
+ WARNING = "warning"
13
+ INFO = "info"
14
+
15
+
16
+ class Category(Enum):
17
+ """Analysis categories that map to scored dimensions."""
18
+
19
+ QUALITY = "quality"
20
+ TYPE_SAFETY = "type_safety"
21
+ SECURITY = "security"
22
+ COMPLEXITY = "complexity"
23
+ DEAD_CODE = "dead_code"
24
+ DEPENDENCIES = "dependencies"
25
+
26
+
27
+ @dataclass(frozen=True)
28
+ class Diagnostic:
29
+ """A single finding from an analyzer.
30
+
31
+ This is the universal exchange type. Every analyzer produces these,
32
+ every consumer (scoring, formatting, MCP) reads them.
33
+ """
34
+
35
+ file_path: Path
36
+ line: int
37
+ column: int
38
+ severity: Severity
39
+ rule_id: str # e.g., "S101", "E501", "C901", "mypy-return-value"
40
+ tool: str # e.g., "ruff", "mypy", "bandit", "radon", "vulture"
41
+ category: Category
42
+ message: str
43
+ fix: str | None = None # Suggested remediation text
44
+ help_url: str | None = None # URL to rule documentation
45
+ end_line: int | None = None
46
+ end_column: int | None = None
47
+
48
+
49
+ @dataclass
50
+ class CategoryScore:
51
+ """Score for a single analysis category."""
52
+
53
+ category: Category
54
+ score: int # 0-100 (truncated, not rounded — 100 means truly perfect)
55
+ weight: int # Configured weight (0-100)
56
+ issue_count: int
57
+ error_count: int = 0
58
+ warning_count: int = 0
59
+ details: str = "" # Human-readable summary, e.g. "8 errors, 15 warnings"
60
+ status: str = "scored" # scored, partial
61
+ coverage_note: str = ""
62
+
63
+
64
+ @dataclass
65
+ class CategoryCoverage:
66
+ """Coverage metadata for a single category."""
67
+
68
+ category: Category
69
+ status: str # scored, partial, unavailable, skipped_by_user
70
+ analyzers: list[str] = field(default_factory=list)
71
+ reason: str = ""
72
+
73
+
74
+ @dataclass
75
+ class CoverageInfo:
76
+ """Explains how complete an analysis run was."""
77
+
78
+ profile: str
79
+ confidence: str # full, partial, limited
80
+ requested_categories: list[Category] = field(default_factory=list)
81
+ scored_categories: list[Category] = field(default_factory=list)
82
+ category_coverage: list[CategoryCoverage] = field(default_factory=list)
83
+ analyzers_used: list[str] = field(default_factory=list)
84
+ analyzers_missing: list[str] = field(default_factory=list)
85
+ analyzers_optional_unavailable: list[str] = field(default_factory=list)
86
+ partial_reasons: list[str] = field(default_factory=list)
87
+ provenance: list[str] = field(default_factory=list)
88
+
89
+
90
+ @dataclass
91
+ class ProjectInfo:
92
+ """Metadata about the analyzed project."""
93
+
94
+ python_version: str | None
95
+ framework: str | None # "django-5.1", "fastapi-0.115", "flask-3.1", None
96
+ total_files: int
97
+ total_lines: int
98
+ packages: list[str] = field(default_factory=list)
99
+
100
+
101
+ @dataclass
102
+ class HealthReport:
103
+ """Complete analysis result -- the top-level return type."""
104
+
105
+ score: int # 0-100 weighted overall (truncated, not rounded)
106
+ label: str # "Healthy", "Needs work", "Critical"
107
+ category_scores: list[CategoryScore]
108
+ diagnostics: list[Diagnostic]
109
+ project: ProjectInfo
110
+ duration_ms: int
111
+ analyzers_used: list[str] = field(default_factory=list)
112
+ analyzers_skipped: list[str] = field(default_factory=list)
113
+ cache_stats: dict[str, int] | None = None # hits, misses, hit_rate_pct
114
+ coverage: CoverageInfo | None = None
python_checkup/plan.py ADDED
@@ -0,0 +1,109 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+
5
+ from python_checkup.models import Category
6
+
7
+ PROFILE_QUICK = "quick"
8
+ PROFILE_DEFAULT = "default"
9
+ PROFILE_FULL = "full"
10
+
11
+ TYPE_BACKEND_MYPY = "mypy"
12
+ TYPE_BACKEND_BASEDPYRIGHT = "basedpyright"
13
+ TYPE_BACKEND_AUTO = "auto"
14
+
15
+ ALL_CATEGORIES = {
16
+ Category.QUALITY,
17
+ Category.TYPE_SAFETY,
18
+ Category.SECURITY,
19
+ Category.COMPLEXITY,
20
+ Category.DEAD_CODE,
21
+ Category.DEPENDENCIES,
22
+ }
23
+
24
+
25
+ @dataclass(frozen=True)
26
+ class ScanPlan:
27
+ """Concrete execution plan for a single run."""
28
+
29
+ profile: str = PROFILE_DEFAULT
30
+ categories: frozenset[Category] = field(
31
+ default_factory=lambda: frozenset(ALL_CATEGORIES)
32
+ )
33
+ skipped_categories: frozenset[Category] = field(default_factory=frozenset)
34
+ type_backend: str = TYPE_BACKEND_AUTO
35
+ include_optional: bool = False
36
+ apply_fixes: bool = False
37
+ show_fix_suggestions: bool = False
38
+ diff_mode: bool = False
39
+
40
+ def includes(self, category: Category) -> bool:
41
+ """Return True if the category is enabled in this plan."""
42
+ return category in self.categories
43
+
44
+
45
+ def parse_categories(value: str | None) -> frozenset[Category] | None:
46
+ """Parse a comma-separated category list."""
47
+ if value is None:
48
+ return None
49
+
50
+ mapping = {
51
+ "quality": Category.QUALITY,
52
+ "types": Category.TYPE_SAFETY,
53
+ "type_safety": Category.TYPE_SAFETY,
54
+ "security": Category.SECURITY,
55
+ "complexity": Category.COMPLEXITY,
56
+ "dead_code": Category.DEAD_CODE,
57
+ "dead-code": Category.DEAD_CODE,
58
+ "dependencies": Category.DEPENDENCIES,
59
+ }
60
+
61
+ categories: set[Category] = set()
62
+ for part in value.split(","):
63
+ key = part.strip().lower()
64
+ if not key:
65
+ continue
66
+ category = mapping.get(key)
67
+ if category is None:
68
+ msg = f"Unknown category: {part}"
69
+ raise ValueError(msg)
70
+ categories.add(category)
71
+
72
+ return frozenset(categories)
73
+
74
+
75
+ def build_scan_plan(
76
+ *,
77
+ profile: str = PROFILE_DEFAULT,
78
+ only_categories: frozenset[Category] | None = None,
79
+ skip_categories: frozenset[Category] | None = None,
80
+ quick: bool = False,
81
+ include_optional: bool | None = None,
82
+ apply_fixes: bool = False,
83
+ show_fix_suggestions: bool = False,
84
+ diff_mode: bool = False,
85
+ type_backend: str = TYPE_BACKEND_AUTO,
86
+ ) -> ScanPlan:
87
+ """Build a concrete plan from CLI options."""
88
+ if quick:
89
+ profile = PROFILE_QUICK
90
+
91
+ categories = set(only_categories or ALL_CATEGORIES)
92
+ actually_skipped: frozenset[Category] = frozenset()
93
+ if skip_categories:
94
+ actually_skipped = frozenset(skip_categories & categories)
95
+ categories.difference_update(skip_categories)
96
+
97
+ if include_optional is None:
98
+ include_optional = profile == PROFILE_FULL
99
+
100
+ return ScanPlan(
101
+ profile=profile,
102
+ categories=frozenset(categories),
103
+ skipped_categories=actually_skipped,
104
+ type_backend=type_backend,
105
+ include_optional=include_optional,
106
+ apply_fixes=apply_fixes,
107
+ show_fix_suggestions=show_fix_suggestions,
108
+ diff_mode=diff_mode,
109
+ )