python-oop-analyzer 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- oop_analyzer/__init__.py +12 -0
- oop_analyzer/analyzer.py +373 -0
- oop_analyzer/cli.py +160 -0
- oop_analyzer/config.py +155 -0
- oop_analyzer/formatters/__init__.py +31 -0
- oop_analyzer/formatters/base.py +101 -0
- oop_analyzer/formatters/html_formatter.py +222 -0
- oop_analyzer/formatters/json_formatter.py +37 -0
- oop_analyzer/formatters/xml_formatter.py +113 -0
- oop_analyzer/py.typed +0 -0
- oop_analyzer/rules/__init__.py +56 -0
- oop_analyzer/rules/base.py +186 -0
- oop_analyzer/rules/boolean_flag.py +391 -0
- oop_analyzer/rules/coupling.py +616 -0
- oop_analyzer/rules/dictionary_usage.py +526 -0
- oop_analyzer/rules/encapsulation.py +291 -0
- oop_analyzer/rules/functions_to_objects.py +331 -0
- oop_analyzer/rules/null_object.py +472 -0
- oop_analyzer/rules/polymorphism.py +428 -0
- oop_analyzer/rules/reference_exposure.py +348 -0
- oop_analyzer/rules/type_code.py +450 -0
- oop_analyzer/safety.py +163 -0
- python_oop_analyzer-0.1.0.dist-info/METADATA +383 -0
- python_oop_analyzer-0.1.0.dist-info/RECORD +27 -0
- python_oop_analyzer-0.1.0.dist-info/WHEEL +4 -0
- python_oop_analyzer-0.1.0.dist-info/entry_points.txt +2 -0
- python_oop_analyzer-0.1.0.dist-info/licenses/LICENSE +21 -0
oop_analyzer/__init__.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OOP Analyzer - A static analysis tool for Python OOP best practices.
|
|
3
|
+
|
|
4
|
+
This tool analyzes Python code to check adherence to Object-Oriented Programming
|
|
5
|
+
principles without executing any code (safe static analysis only).
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .analyzer import OOPAnalyzer
|
|
9
|
+
from .config import AnalyzerConfig
|
|
10
|
+
|
|
11
|
+
__version__ = "0.1.0"
|
|
12
|
+
__all__ = ["OOPAnalyzer", "AnalyzerConfig"]
|
oop_analyzer/analyzer.py
ADDED
|
@@ -0,0 +1,373 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Core OOP Analyzer engine.
|
|
3
|
+
|
|
4
|
+
This module provides the main analyzer class that orchestrates
|
|
5
|
+
the analysis process using configured rules.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import ast
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from .config import AnalyzerConfig
|
|
13
|
+
from .formatters import AnalysisReport, get_formatter
|
|
14
|
+
from .rules import RULE_REGISTRY, BaseRule, RuleResult
|
|
15
|
+
from .safety import SafetyValidator
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OOPAnalyzer:
|
|
19
|
+
"""
|
|
20
|
+
Main analyzer class for checking OOP best practices.
|
|
21
|
+
|
|
22
|
+
This class:
|
|
23
|
+
1. Safely parses Python code (never executes it)
|
|
24
|
+
2. Runs configured rules against the AST
|
|
25
|
+
3. Aggregates results into a report
|
|
26
|
+
4. Formats output in the requested format
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(self, config: AnalyzerConfig | None = None):
|
|
30
|
+
"""
|
|
31
|
+
Initialize the analyzer.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
config: Configuration for the analyzer. Uses defaults if not provided.
|
|
35
|
+
"""
|
|
36
|
+
self.config = config or AnalyzerConfig.default()
|
|
37
|
+
self.safety = SafetyValidator(max_file_size=self.config.max_file_size)
|
|
38
|
+
self._rules: dict[str, BaseRule] = {}
|
|
39
|
+
self._initialize_rules()
|
|
40
|
+
|
|
41
|
+
def _initialize_rules(self) -> None:
|
|
42
|
+
"""Initialize enabled rules based on configuration."""
|
|
43
|
+
for rule_name in self.config.get_enabled_rules():
|
|
44
|
+
if rule_name in RULE_REGISTRY:
|
|
45
|
+
rule_config = self.config.rules.get(rule_name)
|
|
46
|
+
options = rule_config.options if rule_config else {}
|
|
47
|
+
self._rules[rule_name] = RULE_REGISTRY[rule_name](options)
|
|
48
|
+
|
|
49
|
+
def analyze_source(self, source: str, file_path: str = "<string>") -> AnalysisReport:
|
|
50
|
+
"""
|
|
51
|
+
Analyze Python source code string.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
source: Python source code as a string
|
|
55
|
+
file_path: Optional file path for reporting
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
AnalysisReport with results from all enabled rules
|
|
59
|
+
"""
|
|
60
|
+
errors: list[dict[str, Any]] = []
|
|
61
|
+
results: dict[str, RuleResult] = {}
|
|
62
|
+
|
|
63
|
+
# Validate source can be parsed
|
|
64
|
+
safety_report = self.safety.validate_source_code(source, file_path)
|
|
65
|
+
if not safety_report.is_safe:
|
|
66
|
+
errors.append(
|
|
67
|
+
{
|
|
68
|
+
"file": file_path,
|
|
69
|
+
"error": "Failed to parse source",
|
|
70
|
+
"details": safety_report.issues,
|
|
71
|
+
}
|
|
72
|
+
)
|
|
73
|
+
return AnalysisReport(
|
|
74
|
+
files_analyzed=[],
|
|
75
|
+
results={},
|
|
76
|
+
config=self.config.to_dict(),
|
|
77
|
+
errors=errors,
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
# Parse the source
|
|
81
|
+
tree = self.safety.parse_safely(source, file_path)
|
|
82
|
+
if tree is None:
|
|
83
|
+
errors.append(
|
|
84
|
+
{
|
|
85
|
+
"file": file_path,
|
|
86
|
+
"error": "Failed to parse source",
|
|
87
|
+
}
|
|
88
|
+
)
|
|
89
|
+
return AnalysisReport(
|
|
90
|
+
files_analyzed=[],
|
|
91
|
+
results={},
|
|
92
|
+
config=self.config.to_dict(),
|
|
93
|
+
errors=errors,
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Run each enabled rule
|
|
97
|
+
for rule_name, rule in self._rules.items():
|
|
98
|
+
try:
|
|
99
|
+
result = rule.analyze(tree, source, file_path)
|
|
100
|
+
results[rule_name] = result
|
|
101
|
+
except Exception as e:
|
|
102
|
+
errors.append(
|
|
103
|
+
{
|
|
104
|
+
"file": file_path,
|
|
105
|
+
"rule": rule_name,
|
|
106
|
+
"error": str(e),
|
|
107
|
+
}
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
return AnalysisReport(
|
|
111
|
+
files_analyzed=[file_path],
|
|
112
|
+
results=results,
|
|
113
|
+
config=self.config.to_dict(),
|
|
114
|
+
errors=errors,
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
def analyze_file(self, file_path: str | Path) -> AnalysisReport:
|
|
118
|
+
"""
|
|
119
|
+
Analyze a single Python file.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
file_path: Path to the Python file
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
AnalysisReport with results from all enabled rules
|
|
126
|
+
"""
|
|
127
|
+
path = Path(file_path)
|
|
128
|
+
errors: list[dict[str, Any]] = []
|
|
129
|
+
|
|
130
|
+
# Validate file
|
|
131
|
+
safety_report = self.safety.validate_file_path(path)
|
|
132
|
+
if not safety_report.is_safe:
|
|
133
|
+
errors.append(
|
|
134
|
+
{
|
|
135
|
+
"file": str(path),
|
|
136
|
+
"error": "File validation failed",
|
|
137
|
+
"details": safety_report.issues,
|
|
138
|
+
}
|
|
139
|
+
)
|
|
140
|
+
return AnalysisReport(
|
|
141
|
+
files_analyzed=[],
|
|
142
|
+
results={},
|
|
143
|
+
config=self.config.to_dict(),
|
|
144
|
+
errors=errors,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# Read and analyze
|
|
148
|
+
try:
|
|
149
|
+
source = path.read_text(encoding="utf-8")
|
|
150
|
+
except Exception as e:
|
|
151
|
+
errors.append(
|
|
152
|
+
{
|
|
153
|
+
"file": str(path),
|
|
154
|
+
"error": f"Failed to read file: {e}",
|
|
155
|
+
}
|
|
156
|
+
)
|
|
157
|
+
return AnalysisReport(
|
|
158
|
+
files_analyzed=[],
|
|
159
|
+
results={},
|
|
160
|
+
config=self.config.to_dict(),
|
|
161
|
+
errors=errors,
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
return self.analyze_source(source, str(path))
|
|
165
|
+
|
|
166
|
+
def analyze_directory(self, dir_path: str | Path) -> AnalysisReport:
|
|
167
|
+
"""
|
|
168
|
+
Analyze all Python files in a directory.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
dir_path: Path to the directory
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
AnalysisReport with aggregated results from all files
|
|
175
|
+
"""
|
|
176
|
+
path = Path(dir_path)
|
|
177
|
+
errors: list[dict[str, Any]] = []
|
|
178
|
+
all_files: list[str] = []
|
|
179
|
+
parsed_files: list[tuple[ast.Module, str, str]] = []
|
|
180
|
+
|
|
181
|
+
# Validate directory
|
|
182
|
+
safety_report = self.safety.validate_directory(path)
|
|
183
|
+
if not safety_report.is_safe:
|
|
184
|
+
errors.append(
|
|
185
|
+
{
|
|
186
|
+
"path": str(path),
|
|
187
|
+
"error": "Directory validation failed",
|
|
188
|
+
"details": safety_report.issues,
|
|
189
|
+
}
|
|
190
|
+
)
|
|
191
|
+
return AnalysisReport(
|
|
192
|
+
files_analyzed=[],
|
|
193
|
+
results={},
|
|
194
|
+
config=self.config.to_dict(),
|
|
195
|
+
errors=errors,
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
# Collect Python files
|
|
199
|
+
python_files = self.safety.collect_python_files(path)
|
|
200
|
+
|
|
201
|
+
# Filter based on include/exclude patterns
|
|
202
|
+
python_files = self._filter_files(python_files, path)
|
|
203
|
+
|
|
204
|
+
# Parse all files
|
|
205
|
+
for file_path in python_files:
|
|
206
|
+
file_safety = self.safety.validate_file_path(file_path)
|
|
207
|
+
if not file_safety.is_safe:
|
|
208
|
+
errors.append(
|
|
209
|
+
{
|
|
210
|
+
"file": str(file_path),
|
|
211
|
+
"error": "File validation failed",
|
|
212
|
+
"details": file_safety.issues,
|
|
213
|
+
}
|
|
214
|
+
)
|
|
215
|
+
continue
|
|
216
|
+
|
|
217
|
+
try:
|
|
218
|
+
source = file_path.read_text(encoding="utf-8")
|
|
219
|
+
tree = self.safety.parse_safely(source, str(file_path))
|
|
220
|
+
if tree:
|
|
221
|
+
parsed_files.append((tree, source, str(file_path)))
|
|
222
|
+
all_files.append(str(file_path))
|
|
223
|
+
else:
|
|
224
|
+
errors.append(
|
|
225
|
+
{
|
|
226
|
+
"file": str(file_path),
|
|
227
|
+
"error": "Failed to parse file",
|
|
228
|
+
}
|
|
229
|
+
)
|
|
230
|
+
except Exception as e:
|
|
231
|
+
errors.append(
|
|
232
|
+
{
|
|
233
|
+
"file": str(file_path),
|
|
234
|
+
"error": f"Failed to read file: {e}",
|
|
235
|
+
}
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
# Run rules that support multi-file analysis
|
|
239
|
+
results: dict[str, RuleResult] = {}
|
|
240
|
+
for rule_name, rule in self._rules.items():
|
|
241
|
+
try:
|
|
242
|
+
# Use analyze_multiple for rules that benefit from seeing all files
|
|
243
|
+
result = rule.analyze_multiple(parsed_files)
|
|
244
|
+
results[rule_name] = result
|
|
245
|
+
except Exception as e:
|
|
246
|
+
errors.append(
|
|
247
|
+
{
|
|
248
|
+
"rule": rule_name,
|
|
249
|
+
"error": str(e),
|
|
250
|
+
}
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
return AnalysisReport(
|
|
254
|
+
files_analyzed=all_files,
|
|
255
|
+
results=results,
|
|
256
|
+
config=self.config.to_dict(),
|
|
257
|
+
errors=errors,
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
def analyze_module(self, module_path: str | Path) -> AnalysisReport:
|
|
261
|
+
"""
|
|
262
|
+
Analyze a Python module (directory with __init__.py).
|
|
263
|
+
|
|
264
|
+
Args:
|
|
265
|
+
module_path: Path to the module directory
|
|
266
|
+
|
|
267
|
+
Returns:
|
|
268
|
+
AnalysisReport with results
|
|
269
|
+
"""
|
|
270
|
+
path = Path(module_path)
|
|
271
|
+
|
|
272
|
+
# Check if it's a valid module
|
|
273
|
+
init_file = path / "__init__.py"
|
|
274
|
+
if not init_file.exists():
|
|
275
|
+
return AnalysisReport(
|
|
276
|
+
files_analyzed=[],
|
|
277
|
+
results={},
|
|
278
|
+
config=self.config.to_dict(),
|
|
279
|
+
errors=[
|
|
280
|
+
{
|
|
281
|
+
"path": str(path),
|
|
282
|
+
"error": "Not a valid Python module (missing __init__.py)",
|
|
283
|
+
}
|
|
284
|
+
],
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
return self.analyze_directory(path)
|
|
288
|
+
|
|
289
|
+
def analyze(self, path: str | Path) -> AnalysisReport:
|
|
290
|
+
"""
|
|
291
|
+
Analyze a path (file, directory, or module).
|
|
292
|
+
|
|
293
|
+
Automatically detects the type and calls the appropriate method.
|
|
294
|
+
|
|
295
|
+
Args:
|
|
296
|
+
path: Path to analyze
|
|
297
|
+
|
|
298
|
+
Returns:
|
|
299
|
+
AnalysisReport with results
|
|
300
|
+
"""
|
|
301
|
+
path = Path(path)
|
|
302
|
+
|
|
303
|
+
if path.is_file():
|
|
304
|
+
return self.analyze_file(path)
|
|
305
|
+
elif path.is_dir():
|
|
306
|
+
# Check if it's a module
|
|
307
|
+
if (path / "__init__.py").exists():
|
|
308
|
+
return self.analyze_module(path)
|
|
309
|
+
return self.analyze_directory(path)
|
|
310
|
+
else:
|
|
311
|
+
return AnalysisReport(
|
|
312
|
+
files_analyzed=[],
|
|
313
|
+
results={},
|
|
314
|
+
config=self.config.to_dict(),
|
|
315
|
+
errors=[
|
|
316
|
+
{
|
|
317
|
+
"path": str(path),
|
|
318
|
+
"error": "Path does not exist",
|
|
319
|
+
}
|
|
320
|
+
],
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
def format_report(self, report: AnalysisReport, format_name: str | None = None) -> str:
|
|
324
|
+
"""
|
|
325
|
+
Format a report in the specified format.
|
|
326
|
+
|
|
327
|
+
Args:
|
|
328
|
+
report: The analysis report to format
|
|
329
|
+
format_name: Output format (json, xml, html). Uses config default if not specified.
|
|
330
|
+
|
|
331
|
+
Returns:
|
|
332
|
+
Formatted string
|
|
333
|
+
"""
|
|
334
|
+
format_name = format_name or self.config.output_format
|
|
335
|
+
formatter_class = get_formatter(format_name)
|
|
336
|
+
formatter = formatter_class()
|
|
337
|
+
return formatter.format(report)
|
|
338
|
+
|
|
339
|
+
def _filter_files(self, files: list[Path], base_path: Path) -> list[Path]:
|
|
340
|
+
"""Filter files based on include/exclude patterns."""
|
|
341
|
+
import fnmatch
|
|
342
|
+
|
|
343
|
+
filtered: list[Path] = []
|
|
344
|
+
|
|
345
|
+
for file_path in files:
|
|
346
|
+
try:
|
|
347
|
+
relative = file_path.relative_to(base_path)
|
|
348
|
+
except ValueError:
|
|
349
|
+
relative = file_path
|
|
350
|
+
|
|
351
|
+
rel_str = str(relative)
|
|
352
|
+
|
|
353
|
+
# Check exclude patterns
|
|
354
|
+
excluded = False
|
|
355
|
+
for pattern in self.config.exclude_patterns:
|
|
356
|
+
if fnmatch.fnmatch(rel_str, pattern):
|
|
357
|
+
excluded = True
|
|
358
|
+
break
|
|
359
|
+
|
|
360
|
+
if excluded:
|
|
361
|
+
continue
|
|
362
|
+
|
|
363
|
+
# Check include patterns
|
|
364
|
+
included = False
|
|
365
|
+
for pattern in self.config.include_patterns:
|
|
366
|
+
if fnmatch.fnmatch(file_path.name, pattern):
|
|
367
|
+
included = True
|
|
368
|
+
break
|
|
369
|
+
|
|
370
|
+
if included:
|
|
371
|
+
filtered.append(file_path)
|
|
372
|
+
|
|
373
|
+
return filtered
|
oop_analyzer/cli.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Command-line interface for OOP Analyzer.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
6
|
+
import sys
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
from .analyzer import OOPAnalyzer
|
|
10
|
+
from .config import AnalyzerConfig
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def main() -> int:
|
|
14
|
+
"""Main entry point for the CLI."""
|
|
15
|
+
parser = argparse.ArgumentParser(
|
|
16
|
+
prog="oop-analyzer",
|
|
17
|
+
description="Analyze Python code for OOP best practices",
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
parser.add_argument(
|
|
21
|
+
"path",
|
|
22
|
+
type=str,
|
|
23
|
+
nargs="?",
|
|
24
|
+
help="Path to Python file, module, or directory to analyze",
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
parser.add_argument(
|
|
28
|
+
"-f",
|
|
29
|
+
"--format",
|
|
30
|
+
choices=["json", "xml", "html"],
|
|
31
|
+
default="json",
|
|
32
|
+
help="Output format (default: json)",
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
parser.add_argument(
|
|
36
|
+
"-o",
|
|
37
|
+
"--output",
|
|
38
|
+
type=str,
|
|
39
|
+
help="Output file path (default: stdout)",
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
parser.add_argument(
|
|
43
|
+
"-c",
|
|
44
|
+
"--config",
|
|
45
|
+
type=str,
|
|
46
|
+
help="Path to configuration file",
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
parser.add_argument(
|
|
50
|
+
"--rules",
|
|
51
|
+
type=str,
|
|
52
|
+
nargs="+",
|
|
53
|
+
help="Enable only specific rules",
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
parser.add_argument(
|
|
57
|
+
"--disable-rules",
|
|
58
|
+
type=str,
|
|
59
|
+
nargs="+",
|
|
60
|
+
help="Disable specific rules",
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
parser.add_argument(
|
|
64
|
+
"--list-rules",
|
|
65
|
+
action="store_true",
|
|
66
|
+
help="List available rules and exit",
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
parser.add_argument(
|
|
70
|
+
"--init-config",
|
|
71
|
+
type=str,
|
|
72
|
+
metavar="FILE",
|
|
73
|
+
help="Generate a default configuration file",
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
parser.add_argument(
|
|
77
|
+
"-v",
|
|
78
|
+
"--verbose",
|
|
79
|
+
action="store_true",
|
|
80
|
+
help="Verbose output",
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
args = parser.parse_args()
|
|
84
|
+
|
|
85
|
+
# Handle --list-rules
|
|
86
|
+
if args.list_rules:
|
|
87
|
+
print("Available rules:")
|
|
88
|
+
for name, desc in AnalyzerConfig.AVAILABLE_RULES.items():
|
|
89
|
+
print(f" {name}: {desc}")
|
|
90
|
+
return 0
|
|
91
|
+
|
|
92
|
+
# Handle --init-config
|
|
93
|
+
if args.init_config:
|
|
94
|
+
config = AnalyzerConfig.default()
|
|
95
|
+
config.save(args.init_config)
|
|
96
|
+
print(f"Configuration saved to: {args.init_config}")
|
|
97
|
+
return 0
|
|
98
|
+
|
|
99
|
+
# Load or create configuration
|
|
100
|
+
if args.config:
|
|
101
|
+
try:
|
|
102
|
+
config = AnalyzerConfig.from_file(args.config)
|
|
103
|
+
except FileNotFoundError:
|
|
104
|
+
print(f"Error: Config file not found: {args.config}", file=sys.stderr)
|
|
105
|
+
return 1
|
|
106
|
+
else:
|
|
107
|
+
config = AnalyzerConfig.default()
|
|
108
|
+
|
|
109
|
+
# Apply command-line rule overrides
|
|
110
|
+
if args.rules:
|
|
111
|
+
config.enable_only(*args.rules)
|
|
112
|
+
|
|
113
|
+
if args.disable_rules:
|
|
114
|
+
for rule in args.disable_rules:
|
|
115
|
+
config.disable_rule(rule)
|
|
116
|
+
|
|
117
|
+
# Set output format
|
|
118
|
+
config.output_format = args.format
|
|
119
|
+
|
|
120
|
+
# Validate path (required for analysis)
|
|
121
|
+
if not args.path:
|
|
122
|
+
print("Error: path is required for analysis", file=sys.stderr)
|
|
123
|
+
parser.print_help()
|
|
124
|
+
return 1
|
|
125
|
+
|
|
126
|
+
path = Path(args.path)
|
|
127
|
+
if not path.exists():
|
|
128
|
+
print(f"Error: Path does not exist: {args.path}", file=sys.stderr)
|
|
129
|
+
return 1
|
|
130
|
+
|
|
131
|
+
# Run analysis
|
|
132
|
+
if args.verbose:
|
|
133
|
+
print(f"Analyzing: {path}", file=sys.stderr)
|
|
134
|
+
print(f"Enabled rules: {config.get_enabled_rules()}", file=sys.stderr)
|
|
135
|
+
|
|
136
|
+
analyzer = OOPAnalyzer(config)
|
|
137
|
+
report = analyzer.analyze(path)
|
|
138
|
+
|
|
139
|
+
# Format output
|
|
140
|
+
output = analyzer.format_report(report)
|
|
141
|
+
|
|
142
|
+
# Write output
|
|
143
|
+
if args.output:
|
|
144
|
+
output_path = Path(args.output)
|
|
145
|
+
output_path.write_text(output, encoding="utf-8")
|
|
146
|
+
if args.verbose:
|
|
147
|
+
print(f"Report saved to: {output_path}", file=sys.stderr)
|
|
148
|
+
else:
|
|
149
|
+
print(output)
|
|
150
|
+
|
|
151
|
+
# Return non-zero if there are errors or violations
|
|
152
|
+
if report.errors:
|
|
153
|
+
return 2
|
|
154
|
+
if report.total_violations > 0:
|
|
155
|
+
return 1
|
|
156
|
+
return 0
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
if __name__ == "__main__":
|
|
160
|
+
sys.exit(main())
|
oop_analyzer/config.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Configuration module for OOP Analyzer.
|
|
3
|
+
|
|
4
|
+
Provides configuration options for selecting which rules to run
|
|
5
|
+
and customizing analyzer behavior.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
from dataclasses import dataclass, field
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class RuleConfig:
|
|
16
|
+
"""Configuration for a specific rule."""
|
|
17
|
+
|
|
18
|
+
enabled: bool = True
|
|
19
|
+
severity: str = "warning" # "error", "warning", "info"
|
|
20
|
+
options: dict[str, Any] = field(default_factory=dict)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class AnalyzerConfig:
|
|
25
|
+
"""
|
|
26
|
+
Configuration for the OOP Analyzer.
|
|
27
|
+
|
|
28
|
+
Attributes:
|
|
29
|
+
rules: Dictionary mapping rule names to their configurations
|
|
30
|
+
output_format: Output format ("json", "xml", "html")
|
|
31
|
+
include_patterns: Glob patterns for files to include
|
|
32
|
+
exclude_patterns: Glob patterns for files to exclude
|
|
33
|
+
max_file_size: Maximum file size in bytes to analyze
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
rules: dict[str, RuleConfig] = field(default_factory=dict)
|
|
37
|
+
output_format: str = "json"
|
|
38
|
+
include_patterns: list[str] = field(default_factory=lambda: ["*.py"])
|
|
39
|
+
exclude_patterns: list[str] = field(
|
|
40
|
+
default_factory=lambda: ["**/test_*.py", "**/*_test.py", "**/tests/**"]
|
|
41
|
+
)
|
|
42
|
+
max_file_size: int = 10 * 1024 * 1024 # 10 MB
|
|
43
|
+
|
|
44
|
+
# Available rules with their default configurations
|
|
45
|
+
AVAILABLE_RULES = {
|
|
46
|
+
"encapsulation": "Check for direct property access (tell don't ask)",
|
|
47
|
+
"coupling": "Measure coupling and show dependency graph",
|
|
48
|
+
"null_object": "Detect None usage replaceable by Null Object pattern",
|
|
49
|
+
"polymorphism": "Find if blocks replaceable by polymorphism",
|
|
50
|
+
"functions_to_objects": "Detect functions that could be objects",
|
|
51
|
+
"type_code": "Detect type code conditionals replaceable by polymorphism",
|
|
52
|
+
"reference_exposure": "Detect methods exposing internal mutable state",
|
|
53
|
+
"dictionary_usage": "Detect dictionary usage that should be objects",
|
|
54
|
+
"boolean_flag": "Detect boolean flag parameters causing behavior branching",
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
def __post_init__(self) -> None:
|
|
58
|
+
# Initialize all rules with defaults if not specified
|
|
59
|
+
for rule_name in self.AVAILABLE_RULES:
|
|
60
|
+
if rule_name not in self.rules:
|
|
61
|
+
self.rules[rule_name] = RuleConfig()
|
|
62
|
+
|
|
63
|
+
def enable_rule(self, rule_name: str, **options: Any) -> None:
|
|
64
|
+
"""Enable a specific rule with optional configuration."""
|
|
65
|
+
if rule_name not in self.AVAILABLE_RULES:
|
|
66
|
+
raise ValueError(f"Unknown rule: {rule_name}")
|
|
67
|
+
self.rules[rule_name] = RuleConfig(enabled=True, options=options)
|
|
68
|
+
|
|
69
|
+
def disable_rule(self, rule_name: str) -> None:
|
|
70
|
+
"""Disable a specific rule."""
|
|
71
|
+
if rule_name in self.rules:
|
|
72
|
+
self.rules[rule_name].enabled = False
|
|
73
|
+
|
|
74
|
+
def enable_only(self, *rule_names: str) -> None:
|
|
75
|
+
"""Enable only the specified rules, disable all others."""
|
|
76
|
+
for rule_name in self.AVAILABLE_RULES:
|
|
77
|
+
if rule_name in rule_names:
|
|
78
|
+
self.rules[rule_name] = RuleConfig(enabled=True)
|
|
79
|
+
else:
|
|
80
|
+
self.rules[rule_name] = RuleConfig(enabled=False)
|
|
81
|
+
|
|
82
|
+
def get_enabled_rules(self) -> list[str]:
|
|
83
|
+
"""Get list of enabled rule names."""
|
|
84
|
+
return [name for name, config in self.rules.items() if config.enabled]
|
|
85
|
+
|
|
86
|
+
def is_rule_enabled(self, rule_name: str) -> bool:
|
|
87
|
+
"""Check if a rule is enabled."""
|
|
88
|
+
return rule_name in self.rules and self.rules[rule_name].enabled
|
|
89
|
+
|
|
90
|
+
@classmethod
|
|
91
|
+
def from_file(cls, config_path: str | Path) -> "AnalyzerConfig":
|
|
92
|
+
"""Load configuration from a JSON file."""
|
|
93
|
+
path = Path(config_path)
|
|
94
|
+
if not path.exists():
|
|
95
|
+
raise FileNotFoundError(f"Config file not found: {path}")
|
|
96
|
+
|
|
97
|
+
with open(path) as f:
|
|
98
|
+
data = json.load(f)
|
|
99
|
+
|
|
100
|
+
rules = {}
|
|
101
|
+
for rule_name, rule_data in data.get("rules", {}).items():
|
|
102
|
+
if isinstance(rule_data, bool):
|
|
103
|
+
rules[rule_name] = RuleConfig(enabled=rule_data)
|
|
104
|
+
elif isinstance(rule_data, dict):
|
|
105
|
+
rules[rule_name] = RuleConfig(
|
|
106
|
+
enabled=rule_data.get("enabled", True),
|
|
107
|
+
severity=rule_data.get("severity", "warning"),
|
|
108
|
+
options=rule_data.get("options", {}),
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
return cls(
|
|
112
|
+
rules=rules,
|
|
113
|
+
output_format=data.get("output_format", "json"),
|
|
114
|
+
include_patterns=data.get("include_patterns", ["*.py"]),
|
|
115
|
+
exclude_patterns=data.get(
|
|
116
|
+
"exclude_patterns",
|
|
117
|
+
["**/test_*.py", "**/*_test.py", "**/tests/**"],
|
|
118
|
+
),
|
|
119
|
+
max_file_size=data.get("max_file_size", 10 * 1024 * 1024),
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
def to_dict(self) -> dict[str, Any]:
|
|
123
|
+
"""Convert configuration to dictionary."""
|
|
124
|
+
return {
|
|
125
|
+
"rules": {
|
|
126
|
+
name: {
|
|
127
|
+
"enabled": config.enabled,
|
|
128
|
+
"severity": config.severity,
|
|
129
|
+
"options": config.options,
|
|
130
|
+
}
|
|
131
|
+
for name, config in self.rules.items()
|
|
132
|
+
},
|
|
133
|
+
"output_format": self.output_format,
|
|
134
|
+
"include_patterns": self.include_patterns,
|
|
135
|
+
"exclude_patterns": self.exclude_patterns,
|
|
136
|
+
"max_file_size": self.max_file_size,
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
def save(self, config_path: str | Path) -> None:
|
|
140
|
+
"""Save configuration to a JSON file."""
|
|
141
|
+
path = Path(config_path)
|
|
142
|
+
with open(path, "w") as f:
|
|
143
|
+
json.dump(self.to_dict(), f, indent=2)
|
|
144
|
+
|
|
145
|
+
@classmethod
|
|
146
|
+
def default(cls) -> "AnalyzerConfig":
|
|
147
|
+
"""Create a default configuration with all rules enabled."""
|
|
148
|
+
return cls()
|
|
149
|
+
|
|
150
|
+
@classmethod
|
|
151
|
+
def minimal(cls) -> "AnalyzerConfig":
|
|
152
|
+
"""Create a minimal configuration with only essential rules."""
|
|
153
|
+
config = cls()
|
|
154
|
+
config.enable_only("encapsulation", "coupling")
|
|
155
|
+
return config
|