thailint 0.5.0__py3-none-any.whl → 0.15.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- src/__init__.py +1 -0
- src/analyzers/__init__.py +4 -3
- src/analyzers/ast_utils.py +54 -0
- src/analyzers/rust_base.py +155 -0
- src/analyzers/rust_context.py +141 -0
- src/analyzers/typescript_base.py +4 -0
- src/cli/__init__.py +30 -0
- src/cli/__main__.py +22 -0
- src/cli/config.py +480 -0
- src/cli/config_merge.py +241 -0
- src/cli/linters/__init__.py +67 -0
- src/cli/linters/code_patterns.py +270 -0
- src/cli/linters/code_smells.py +342 -0
- src/cli/linters/documentation.py +83 -0
- src/cli/linters/performance.py +287 -0
- src/cli/linters/shared.py +331 -0
- src/cli/linters/structure.py +327 -0
- src/cli/linters/structure_quality.py +328 -0
- src/cli/main.py +120 -0
- src/cli/utils.py +395 -0
- src/cli_main.py +37 -0
- src/config.py +38 -25
- src/core/base.py +7 -2
- src/core/cli_utils.py +19 -2
- src/core/config_parser.py +5 -2
- src/core/constants.py +54 -0
- src/core/linter_utils.py +95 -6
- src/core/python_lint_rule.py +101 -0
- src/core/registry.py +1 -1
- src/core/rule_discovery.py +147 -84
- src/core/types.py +13 -0
- src/core/violation_builder.py +78 -15
- src/core/violation_utils.py +69 -0
- src/formatters/__init__.py +22 -0
- src/formatters/sarif.py +202 -0
- src/linter_config/directive_markers.py +109 -0
- src/linter_config/ignore.py +254 -395
- src/linter_config/loader.py +45 -12
- src/linter_config/pattern_utils.py +65 -0
- src/linter_config/rule_matcher.py +89 -0
- src/linters/collection_pipeline/__init__.py +90 -0
- src/linters/collection_pipeline/any_all_analyzer.py +281 -0
- src/linters/collection_pipeline/ast_utils.py +40 -0
- src/linters/collection_pipeline/config.py +75 -0
- src/linters/collection_pipeline/continue_analyzer.py +94 -0
- src/linters/collection_pipeline/detector.py +360 -0
- src/linters/collection_pipeline/filter_map_analyzer.py +402 -0
- src/linters/collection_pipeline/linter.py +420 -0
- src/linters/collection_pipeline/suggestion_builder.py +130 -0
- src/linters/cqs/__init__.py +54 -0
- src/linters/cqs/config.py +55 -0
- src/linters/cqs/function_analyzer.py +201 -0
- src/linters/cqs/input_detector.py +139 -0
- src/linters/cqs/linter.py +159 -0
- src/linters/cqs/output_detector.py +84 -0
- src/linters/cqs/python_analyzer.py +54 -0
- src/linters/cqs/types.py +82 -0
- src/linters/cqs/typescript_cqs_analyzer.py +61 -0
- src/linters/cqs/typescript_function_analyzer.py +192 -0
- src/linters/cqs/typescript_input_detector.py +203 -0
- src/linters/cqs/typescript_output_detector.py +117 -0
- src/linters/cqs/violation_builder.py +94 -0
- src/linters/dry/base_token_analyzer.py +16 -9
- src/linters/dry/block_filter.py +120 -20
- src/linters/dry/block_grouper.py +4 -0
- src/linters/dry/cache.py +104 -10
- src/linters/dry/cache_query.py +4 -0
- src/linters/dry/config.py +54 -11
- src/linters/dry/constant.py +92 -0
- src/linters/dry/constant_matcher.py +223 -0
- src/linters/dry/constant_violation_builder.py +98 -0
- src/linters/dry/duplicate_storage.py +5 -4
- src/linters/dry/file_analyzer.py +4 -2
- src/linters/dry/inline_ignore.py +7 -16
- src/linters/dry/linter.py +183 -48
- src/linters/dry/python_analyzer.py +60 -439
- src/linters/dry/python_constant_extractor.py +100 -0
- src/linters/dry/single_statement_detector.py +417 -0
- src/linters/dry/token_hasher.py +116 -112
- src/linters/dry/typescript_analyzer.py +68 -382
- src/linters/dry/typescript_constant_extractor.py +138 -0
- src/linters/dry/typescript_statement_detector.py +255 -0
- src/linters/dry/typescript_value_extractor.py +70 -0
- src/linters/dry/violation_builder.py +4 -0
- src/linters/dry/violation_filter.py +5 -4
- src/linters/dry/violation_generator.py +71 -14
- src/linters/file_header/atemporal_detector.py +68 -50
- src/linters/file_header/base_parser.py +93 -0
- src/linters/file_header/bash_parser.py +66 -0
- src/linters/file_header/config.py +90 -16
- src/linters/file_header/css_parser.py +70 -0
- src/linters/file_header/field_validator.py +36 -33
- src/linters/file_header/linter.py +140 -144
- src/linters/file_header/markdown_parser.py +130 -0
- src/linters/file_header/python_parser.py +14 -58
- src/linters/file_header/typescript_parser.py +73 -0
- src/linters/file_header/violation_builder.py +13 -12
- src/linters/file_placement/config_loader.py +3 -1
- src/linters/file_placement/directory_matcher.py +4 -0
- src/linters/file_placement/linter.py +66 -34
- src/linters/file_placement/pattern_matcher.py +41 -6
- src/linters/file_placement/pattern_validator.py +31 -12
- src/linters/file_placement/rule_checker.py +12 -7
- src/linters/lazy_ignores/__init__.py +43 -0
- src/linters/lazy_ignores/config.py +74 -0
- src/linters/lazy_ignores/directive_utils.py +164 -0
- src/linters/lazy_ignores/header_parser.py +177 -0
- src/linters/lazy_ignores/linter.py +158 -0
- src/linters/lazy_ignores/matcher.py +168 -0
- src/linters/lazy_ignores/python_analyzer.py +209 -0
- src/linters/lazy_ignores/rule_id_utils.py +180 -0
- src/linters/lazy_ignores/skip_detector.py +298 -0
- src/linters/lazy_ignores/types.py +71 -0
- src/linters/lazy_ignores/typescript_analyzer.py +146 -0
- src/linters/lazy_ignores/violation_builder.py +135 -0
- src/linters/lbyl/__init__.py +31 -0
- src/linters/lbyl/config.py +63 -0
- src/linters/lbyl/linter.py +67 -0
- src/linters/lbyl/pattern_detectors/__init__.py +53 -0
- src/linters/lbyl/pattern_detectors/base.py +63 -0
- src/linters/lbyl/pattern_detectors/dict_key_detector.py +107 -0
- src/linters/lbyl/pattern_detectors/division_check_detector.py +232 -0
- src/linters/lbyl/pattern_detectors/file_exists_detector.py +220 -0
- src/linters/lbyl/pattern_detectors/hasattr_detector.py +119 -0
- src/linters/lbyl/pattern_detectors/isinstance_detector.py +119 -0
- src/linters/lbyl/pattern_detectors/len_check_detector.py +173 -0
- src/linters/lbyl/pattern_detectors/none_check_detector.py +146 -0
- src/linters/lbyl/pattern_detectors/string_validator_detector.py +145 -0
- src/linters/lbyl/python_analyzer.py +215 -0
- src/linters/lbyl/violation_builder.py +354 -0
- src/linters/magic_numbers/context_analyzer.py +227 -225
- src/linters/magic_numbers/linter.py +28 -82
- src/linters/magic_numbers/python_analyzer.py +4 -16
- src/linters/magic_numbers/typescript_analyzer.py +9 -12
- src/linters/magic_numbers/typescript_ignore_checker.py +81 -0
- src/linters/method_property/__init__.py +49 -0
- src/linters/method_property/config.py +138 -0
- src/linters/method_property/linter.py +414 -0
- src/linters/method_property/python_analyzer.py +473 -0
- src/linters/method_property/violation_builder.py +119 -0
- src/linters/nesting/linter.py +24 -16
- src/linters/nesting/python_analyzer.py +4 -0
- src/linters/nesting/typescript_analyzer.py +6 -12
- src/linters/nesting/violation_builder.py +1 -0
- src/linters/performance/__init__.py +91 -0
- src/linters/performance/config.py +43 -0
- src/linters/performance/constants.py +49 -0
- src/linters/performance/linter.py +149 -0
- src/linters/performance/python_analyzer.py +365 -0
- src/linters/performance/regex_analyzer.py +312 -0
- src/linters/performance/regex_linter.py +139 -0
- src/linters/performance/typescript_analyzer.py +236 -0
- src/linters/performance/violation_builder.py +160 -0
- src/linters/print_statements/config.py +7 -12
- src/linters/print_statements/linter.py +26 -43
- src/linters/print_statements/python_analyzer.py +91 -93
- src/linters/print_statements/typescript_analyzer.py +15 -25
- src/linters/print_statements/violation_builder.py +12 -14
- src/linters/srp/class_analyzer.py +11 -7
- src/linters/srp/heuristics.py +56 -22
- src/linters/srp/linter.py +15 -16
- src/linters/srp/python_analyzer.py +55 -20
- src/linters/srp/typescript_metrics_calculator.py +110 -50
- src/linters/stateless_class/__init__.py +25 -0
- src/linters/stateless_class/config.py +58 -0
- src/linters/stateless_class/linter.py +349 -0
- src/linters/stateless_class/python_analyzer.py +290 -0
- src/linters/stringly_typed/__init__.py +36 -0
- src/linters/stringly_typed/config.py +189 -0
- src/linters/stringly_typed/context_filter.py +451 -0
- src/linters/stringly_typed/function_call_violation_builder.py +135 -0
- src/linters/stringly_typed/ignore_checker.py +100 -0
- src/linters/stringly_typed/ignore_utils.py +51 -0
- src/linters/stringly_typed/linter.py +376 -0
- src/linters/stringly_typed/python/__init__.py +33 -0
- src/linters/stringly_typed/python/analyzer.py +348 -0
- src/linters/stringly_typed/python/call_tracker.py +175 -0
- src/linters/stringly_typed/python/comparison_tracker.py +257 -0
- src/linters/stringly_typed/python/condition_extractor.py +134 -0
- src/linters/stringly_typed/python/conditional_detector.py +179 -0
- src/linters/stringly_typed/python/constants.py +21 -0
- src/linters/stringly_typed/python/match_analyzer.py +94 -0
- src/linters/stringly_typed/python/validation_detector.py +189 -0
- src/linters/stringly_typed/python/variable_extractor.py +96 -0
- src/linters/stringly_typed/storage.py +620 -0
- src/linters/stringly_typed/storage_initializer.py +45 -0
- src/linters/stringly_typed/typescript/__init__.py +28 -0
- src/linters/stringly_typed/typescript/analyzer.py +157 -0
- src/linters/stringly_typed/typescript/call_tracker.py +335 -0
- src/linters/stringly_typed/typescript/comparison_tracker.py +378 -0
- src/linters/stringly_typed/violation_generator.py +419 -0
- src/orchestrator/core.py +252 -14
- src/orchestrator/language_detector.py +5 -3
- src/templates/thailint_config_template.yaml +196 -0
- src/utils/project_root.py +3 -0
- thailint-0.15.3.dist-info/METADATA +187 -0
- thailint-0.15.3.dist-info/RECORD +226 -0
- thailint-0.15.3.dist-info/entry_points.txt +4 -0
- src/cli.py +0 -1665
- thailint-0.5.0.dist-info/METADATA +0 -1286
- thailint-0.5.0.dist-info/RECORD +0 -96
- thailint-0.5.0.dist-info/entry_points.txt +0 -4
- {thailint-0.5.0.dist-info → thailint-0.15.3.dist-info}/WHEEL +0 -0
- {thailint-0.5.0.dist-info → thailint-0.15.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Purpose: Shared utility functions for creating IgnoreDirective objects
|
|
3
|
+
|
|
4
|
+
Scope: Common directive creation and path normalization for ignore detectors
|
|
5
|
+
|
|
6
|
+
Overview: Provides shared utility functions used across Python, TypeScript, and test skip
|
|
7
|
+
detectors. Centralizes logic for normalizing file paths, extracting rule IDs from
|
|
8
|
+
regex matches, extracting inline justifications, and creating IgnoreDirective objects
|
|
9
|
+
to avoid code duplication.
|
|
10
|
+
|
|
11
|
+
Dependencies: re for match handling, pathlib for file paths, types module for dataclasses
|
|
12
|
+
|
|
13
|
+
Exports: normalize_path, extract_rule_ids, create_directive, create_directive_no_rules,
|
|
14
|
+
extract_inline_justification
|
|
15
|
+
|
|
16
|
+
Interfaces: Pure utility functions with no state
|
|
17
|
+
|
|
18
|
+
Implementation: Simple helper functions for directive creation
|
|
19
|
+
|
|
20
|
+
Suppressions:
|
|
21
|
+
too-many-arguments: create_directive needs all params for proper IgnoreDirective construction
|
|
22
|
+
too-many-positional-arguments: Factory function mirrors IgnoreDirective fields
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
import re
|
|
26
|
+
from pathlib import Path
|
|
27
|
+
|
|
28
|
+
from src.linters.lazy_ignores.types import IgnoreDirective, IgnoreType
|
|
29
|
+
|
|
30
|
+
# Pattern for inline justification: space-dash-space followed by text
|
|
31
|
+
INLINE_JUSTIFICATION_PATTERN = re.compile(r"\s+-\s+(.+)$")
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def normalize_path(file_path: Path | str | None) -> Path:
|
|
35
|
+
"""Normalize file path to Path object.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
file_path: Path object, string path, or None
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
Path object, defaults to Path("unknown") if None
|
|
42
|
+
"""
|
|
43
|
+
if file_path is None:
|
|
44
|
+
return Path("unknown")
|
|
45
|
+
if isinstance(file_path, str):
|
|
46
|
+
return Path(file_path)
|
|
47
|
+
return file_path
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def extract_inline_justification(raw_text: str) -> str | None:
|
|
51
|
+
"""Extract inline justification from raw directive text.
|
|
52
|
+
|
|
53
|
+
Looks for the pattern " - " (space-dash-space) followed by justification text.
|
|
54
|
+
This allows inline justifications like:
|
|
55
|
+
# noqa: PLR0912 - state machine inherently complex
|
|
56
|
+
# type: ignore[arg-type] - library has typing bug
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
raw_text: The raw comment text containing the ignore directive
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
The justification text if found, None otherwise.
|
|
63
|
+
Returns None for empty/whitespace-only justifications.
|
|
64
|
+
"""
|
|
65
|
+
match = INLINE_JUSTIFICATION_PATTERN.search(raw_text)
|
|
66
|
+
if not match:
|
|
67
|
+
return None
|
|
68
|
+
|
|
69
|
+
justification = match.group(1).strip()
|
|
70
|
+
return justification if justification else None
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _get_captured_group(match: re.Match[str]) -> str | None:
|
|
74
|
+
"""Get the first captured group from a regex match if it exists.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
match: Regex match object
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Captured group text or None if no capture groups
|
|
81
|
+
"""
|
|
82
|
+
if match.lastindex is None or match.lastindex < 1:
|
|
83
|
+
return None
|
|
84
|
+
return match.group(1)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def extract_rule_ids(match: re.Match[str]) -> list[str]:
|
|
88
|
+
"""Extract rule IDs from regex match group 1.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
match: Regex match object with optional group 1 containing rule IDs
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
List of rule ID strings, empty if no specific rules
|
|
95
|
+
"""
|
|
96
|
+
group = _get_captured_group(match)
|
|
97
|
+
if not group:
|
|
98
|
+
return []
|
|
99
|
+
|
|
100
|
+
ids = [rule_id.strip() for rule_id in group.split(",")]
|
|
101
|
+
return [rule_id for rule_id in ids if rule_id]
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def create_directive( # pylint: disable=too-many-arguments,too-many-positional-arguments
|
|
105
|
+
match: re.Match[str],
|
|
106
|
+
ignore_type: IgnoreType,
|
|
107
|
+
line_num: int,
|
|
108
|
+
file_path: Path,
|
|
109
|
+
rule_ids: tuple[str, ...] | None = None,
|
|
110
|
+
full_line: str | None = None,
|
|
111
|
+
) -> IgnoreDirective:
|
|
112
|
+
"""Create an IgnoreDirective from a regex match.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
match: Regex match object
|
|
116
|
+
ignore_type: Type of ignore pattern
|
|
117
|
+
line_num: 1-indexed line number
|
|
118
|
+
file_path: Path to source file
|
|
119
|
+
rule_ids: Optional tuple of rule IDs; if None, extracts from match group 1
|
|
120
|
+
full_line: Optional full line text for extracting inline justification
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
IgnoreDirective for this match
|
|
124
|
+
"""
|
|
125
|
+
if rule_ids is None:
|
|
126
|
+
rule_ids = tuple(extract_rule_ids(match))
|
|
127
|
+
|
|
128
|
+
# Use full line from match position to capture inline justification
|
|
129
|
+
if full_line is not None:
|
|
130
|
+
raw_text = full_line[match.start() :].strip()
|
|
131
|
+
else:
|
|
132
|
+
raw_text = match.group(0).strip()
|
|
133
|
+
|
|
134
|
+
inline_justification = extract_inline_justification(raw_text)
|
|
135
|
+
|
|
136
|
+
return IgnoreDirective(
|
|
137
|
+
ignore_type=ignore_type,
|
|
138
|
+
rule_ids=rule_ids,
|
|
139
|
+
line=line_num,
|
|
140
|
+
column=match.start() + 1,
|
|
141
|
+
raw_text=raw_text,
|
|
142
|
+
file_path=file_path,
|
|
143
|
+
inline_justification=inline_justification,
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def create_directive_no_rules(
|
|
148
|
+
match: re.Match[str],
|
|
149
|
+
ignore_type: IgnoreType,
|
|
150
|
+
line_num: int,
|
|
151
|
+
file_path: Path,
|
|
152
|
+
) -> IgnoreDirective:
|
|
153
|
+
"""Create an IgnoreDirective without rule IDs (for patterns like test skips).
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
match: Regex match object
|
|
157
|
+
ignore_type: Type of ignore pattern
|
|
158
|
+
line_num: 1-indexed line number
|
|
159
|
+
file_path: Path to source file
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
IgnoreDirective with empty rule_ids tuple
|
|
163
|
+
"""
|
|
164
|
+
return create_directive(match, ignore_type, line_num, file_path, rule_ids=())
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Purpose: Parse Suppressions section from file headers
|
|
3
|
+
|
|
4
|
+
Scope: Python docstrings and TypeScript JSDoc comment header parsing
|
|
5
|
+
|
|
6
|
+
Overview: Provides SuppressionsParser class for extracting the Suppressions section from
|
|
7
|
+
file headers. Parses Python triple-quoted docstrings and TypeScript JSDoc comments.
|
|
8
|
+
Extracts rule IDs and justifications, normalizing rule IDs for case-insensitive matching.
|
|
9
|
+
Returns dictionary mapping normalized rule IDs to their justifications.
|
|
10
|
+
|
|
11
|
+
Dependencies: re for pattern matching, Language enum for type safety
|
|
12
|
+
|
|
13
|
+
Exports: SuppressionsParser
|
|
14
|
+
|
|
15
|
+
Interfaces: parse(header: str) -> dict[str, str], extract_header(code: str, language: Language)
|
|
16
|
+
|
|
17
|
+
Implementation: Regex-based section extraction with line-by-line entry parsing
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import re
|
|
21
|
+
|
|
22
|
+
from src.core.constants import Language
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class SuppressionsParser:
|
|
26
|
+
"""Parses Suppressions section from file headers."""
|
|
27
|
+
|
|
28
|
+
# Pattern to find Suppressions section (case-insensitive)
|
|
29
|
+
# Matches "Suppressions:" followed by indented lines
|
|
30
|
+
SUPPRESSIONS_SECTION = re.compile(
|
|
31
|
+
r"Suppressions:\s*\n((?:[ \t]+\S.*\n?)+)",
|
|
32
|
+
re.MULTILINE | re.IGNORECASE,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# Pattern for JSDoc-style suppressions (* prefixed lines)
|
|
36
|
+
JSDOC_SUPPRESSIONS_SECTION = re.compile(
|
|
37
|
+
r"Suppressions:\s*\n((?:\s*\*\s+\S.*\n?)+)",
|
|
38
|
+
re.MULTILINE | re.IGNORECASE,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
# Pattern to parse individual entries (rule_id: justification)
|
|
42
|
+
# Rule IDs can contain colons (e.g., type:ignore[arg-type])
|
|
43
|
+
# Handles list prefixes: "- ", "* ", "• " and plain indented entries
|
|
44
|
+
# Justification must start with word char or underscore to avoid matching continuation lines
|
|
45
|
+
ENTRY_PATTERN = re.compile(
|
|
46
|
+
r"^\s*[-*•]?\s*(.+):\s+([A-Za-z_].*)$",
|
|
47
|
+
re.MULTILINE,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
def parse(self, header: str) -> dict[str, str]:
|
|
51
|
+
"""Parse Suppressions section, return rule_id -> justification mapping.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
header: File header content (docstring or JSDoc)
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
Dictionary mapping normalized rule IDs to justification strings
|
|
58
|
+
"""
|
|
59
|
+
# Try standard Python-style first, then JSDoc-style
|
|
60
|
+
section_match = self.SUPPRESSIONS_SECTION.search(header)
|
|
61
|
+
if not section_match:
|
|
62
|
+
section_match = self.JSDOC_SUPPRESSIONS_SECTION.search(header)
|
|
63
|
+
|
|
64
|
+
if not section_match:
|
|
65
|
+
return {}
|
|
66
|
+
|
|
67
|
+
entries: dict[str, str] = {}
|
|
68
|
+
section_content = section_match.group(1)
|
|
69
|
+
|
|
70
|
+
for match in self.ENTRY_PATTERN.finditer(section_content):
|
|
71
|
+
rule_id = match.group(1).strip()
|
|
72
|
+
justification = match.group(2).strip()
|
|
73
|
+
|
|
74
|
+
# Skip entries with empty justification
|
|
75
|
+
if justification:
|
|
76
|
+
normalized_id = self.normalize_rule_id(rule_id)
|
|
77
|
+
entries[normalized_id] = justification
|
|
78
|
+
|
|
79
|
+
return entries
|
|
80
|
+
|
|
81
|
+
def normalize_rule_id(self, rule_id: str) -> str:
|
|
82
|
+
"""Normalize rule ID for case-insensitive matching.
|
|
83
|
+
|
|
84
|
+
Strips common list prefixes (-, *, •) and normalizes to lowercase.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
rule_id: Original rule ID string
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
Normalized rule ID (lowercase, no list prefix)
|
|
91
|
+
"""
|
|
92
|
+
normalized = rule_id.lower().strip()
|
|
93
|
+
# Strip common list prefixes (bullet points)
|
|
94
|
+
if normalized.startswith(("- ", "* ", "• ")):
|
|
95
|
+
normalized = normalized[2:]
|
|
96
|
+
elif normalized.startswith(("-", "*", "•")):
|
|
97
|
+
normalized = normalized[1:].lstrip()
|
|
98
|
+
return normalized
|
|
99
|
+
|
|
100
|
+
def extract_header(self, code: str, language: str | Language = Language.PYTHON) -> str:
|
|
101
|
+
"""Extract the header section from code.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
code: Full source code
|
|
105
|
+
language: Programming language (Language enum or string)
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
Header content as string, or empty string if not found
|
|
109
|
+
"""
|
|
110
|
+
lang = Language(language) if isinstance(language, str) else language
|
|
111
|
+
if lang == Language.PYTHON:
|
|
112
|
+
return self._extract_python_header(code)
|
|
113
|
+
if lang in (Language.TYPESCRIPT, Language.JAVASCRIPT):
|
|
114
|
+
return self._extract_ts_header(code)
|
|
115
|
+
return ""
|
|
116
|
+
|
|
117
|
+
def _extract_python_header(self, code: str) -> str:
|
|
118
|
+
"""Extract Python docstring header.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
code: Python source code
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
Docstring content or empty string
|
|
125
|
+
"""
|
|
126
|
+
# Match triple-quoted docstring at start of file
|
|
127
|
+
# Skip leading whitespace, comments, and encoding declarations
|
|
128
|
+
stripped = self._skip_leading_comments(code)
|
|
129
|
+
|
|
130
|
+
# Try double quotes first
|
|
131
|
+
match = re.match(r'^"""(.*?)"""', stripped, re.DOTALL)
|
|
132
|
+
if match:
|
|
133
|
+
return match.group(0)
|
|
134
|
+
|
|
135
|
+
# Try single quotes
|
|
136
|
+
match = re.match(r"^'''(.*?)'''", stripped, re.DOTALL)
|
|
137
|
+
if match:
|
|
138
|
+
return match.group(0)
|
|
139
|
+
|
|
140
|
+
return ""
|
|
141
|
+
|
|
142
|
+
def _skip_leading_comments(self, code: str) -> str:
|
|
143
|
+
"""Skip leading comments and empty lines to find docstring.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
code: Python source code
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Code with leading comments/empty lines removed
|
|
150
|
+
"""
|
|
151
|
+
lines = code.split("\n")
|
|
152
|
+
for i, line in enumerate(lines):
|
|
153
|
+
stripped = line.strip()
|
|
154
|
+
# Skip empty lines
|
|
155
|
+
if not stripped:
|
|
156
|
+
continue
|
|
157
|
+
# Skip comment lines (including pylint/noqa/type comments)
|
|
158
|
+
if stripped.startswith("#"):
|
|
159
|
+
continue
|
|
160
|
+
# Found non-comment, non-empty line - return from here
|
|
161
|
+
return "\n".join(lines[i:])
|
|
162
|
+
return ""
|
|
163
|
+
|
|
164
|
+
def _extract_ts_header(self, code: str) -> str:
|
|
165
|
+
"""Extract TypeScript/JavaScript JSDoc header.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
code: TypeScript/JavaScript source code
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
JSDoc comment content or empty string
|
|
172
|
+
"""
|
|
173
|
+
stripped = code.lstrip()
|
|
174
|
+
match = re.match(r"^/\*\*(.*?)\*/", stripped, re.DOTALL)
|
|
175
|
+
if match:
|
|
176
|
+
return match.group(0)
|
|
177
|
+
return ""
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Purpose: Main LazyIgnoresRule class for detecting unjustified linting suppressions
|
|
3
|
+
|
|
4
|
+
Scope: Orchestration of ignore detection and header suppression validation
|
|
5
|
+
|
|
6
|
+
Overview: Provides LazyIgnoresRule that cross-references linting ignore directives found
|
|
7
|
+
in code (noqa, type:ignore, pylint:disable, nosec) and test skip patterns with
|
|
8
|
+
Suppressions entries declared in file headers. Detects two types of violations:
|
|
9
|
+
unjustified ignores/skips (directive without header declaration) and orphaned
|
|
10
|
+
suppressions (header declaration without matching ignore in code). Enforces the
|
|
11
|
+
header-based suppression model requiring human approval for all linting bypasses.
|
|
12
|
+
|
|
13
|
+
Dependencies: PythonIgnoreDetector, TestSkipDetector, SuppressionsParser, IgnoreSuppressionMatcher
|
|
14
|
+
|
|
15
|
+
Exports: LazyIgnoresRule
|
|
16
|
+
|
|
17
|
+
Interfaces: check(context: BaseLintContext) -> list[Violation]
|
|
18
|
+
|
|
19
|
+
Implementation: Delegation to matcher for cross-reference logic, violation builder for messages
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
|
|
24
|
+
from src.core.base import BaseLintContext, BaseLintRule
|
|
25
|
+
from src.core.constants import Language
|
|
26
|
+
from src.core.types import Violation
|
|
27
|
+
|
|
28
|
+
from .header_parser import SuppressionsParser
|
|
29
|
+
from .matcher import IgnoreSuppressionMatcher
|
|
30
|
+
from .python_analyzer import PythonIgnoreDetector
|
|
31
|
+
from .skip_detector import TestSkipDetector
|
|
32
|
+
from .types import IgnoreDirective
|
|
33
|
+
from .violation_builder import build_orphaned_violation, build_unjustified_violation
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class LazyIgnoresRule(BaseLintRule):
|
|
37
|
+
"""Detects unjustified linting suppressions and orphaned header entries."""
|
|
38
|
+
|
|
39
|
+
def __init__(self, check_test_skips: bool = True) -> None:
|
|
40
|
+
"""Initialize the lazy ignores rule with detection components.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
check_test_skips: Whether to check for unjustified test skips.
|
|
44
|
+
"""
|
|
45
|
+
self._python_detector = PythonIgnoreDetector()
|
|
46
|
+
self._test_skip_detector = TestSkipDetector()
|
|
47
|
+
self._suppression_parser = SuppressionsParser()
|
|
48
|
+
self._matcher = IgnoreSuppressionMatcher(self._suppression_parser)
|
|
49
|
+
self._check_test_skips = check_test_skips
|
|
50
|
+
|
|
51
|
+
@property
|
|
52
|
+
def rule_id(self) -> str:
|
|
53
|
+
"""Unique identifier for this rule."""
|
|
54
|
+
return "lazy-ignores"
|
|
55
|
+
|
|
56
|
+
@property
|
|
57
|
+
def rule_name(self) -> str:
|
|
58
|
+
"""Human-readable name for this rule."""
|
|
59
|
+
return "Lazy Ignores"
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
def description(self) -> str:
|
|
63
|
+
"""Description of what this rule checks."""
|
|
64
|
+
return (
|
|
65
|
+
"Detects linting suppressions (noqa, type:ignore, pylint:disable, nosec) "
|
|
66
|
+
"and test skips without corresponding entries in the file header's "
|
|
67
|
+
"Suppressions section."
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
def check(self, context: BaseLintContext) -> list[Violation]:
|
|
71
|
+
"""Check for violations in the given context.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
context: The lint context containing file information.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
List of violations for unjustified and orphaned suppressions.
|
|
78
|
+
"""
|
|
79
|
+
if context.language != Language.PYTHON:
|
|
80
|
+
return []
|
|
81
|
+
|
|
82
|
+
if not context.file_content:
|
|
83
|
+
return []
|
|
84
|
+
|
|
85
|
+
file_path = str(context.file_path) if context.file_path else "unknown"
|
|
86
|
+
return self.check_content(context.file_content, file_path)
|
|
87
|
+
|
|
88
|
+
def check_content(self, code: str, file_path: str) -> list[Violation]:
|
|
89
|
+
"""Check code for unjustified ignores and orphaned suppressions.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
code: Source code content to analyze.
|
|
93
|
+
file_path: Path to the file being analyzed.
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
List of violations for unjustified and orphaned suppressions.
|
|
97
|
+
"""
|
|
98
|
+
# Extract and parse header suppressions
|
|
99
|
+
header = self._suppression_parser.extract_header(code, "python")
|
|
100
|
+
suppressions = self._suppression_parser.parse(header)
|
|
101
|
+
|
|
102
|
+
# Find all ignore directives in code
|
|
103
|
+
ignores = self._python_detector.find_ignores(code, Path(file_path))
|
|
104
|
+
|
|
105
|
+
# Find test skip directives if enabled
|
|
106
|
+
if self._check_test_skips:
|
|
107
|
+
test_skips = self._test_skip_detector.find_skips(code, Path(file_path), "python")
|
|
108
|
+
ignores = list(ignores) + list(test_skips)
|
|
109
|
+
|
|
110
|
+
# Build set of normalized rule IDs used in code
|
|
111
|
+
used_rule_ids = self._matcher.collect_used_rule_ids(ignores)
|
|
112
|
+
|
|
113
|
+
# Find violations
|
|
114
|
+
violations: list[Violation] = []
|
|
115
|
+
violations.extend(self._find_unjustified(ignores, suppressions, file_path))
|
|
116
|
+
violations.extend(self._find_orphaned(suppressions, used_rule_ids, file_path))
|
|
117
|
+
|
|
118
|
+
return violations
|
|
119
|
+
|
|
120
|
+
def _find_unjustified(
|
|
121
|
+
self, ignores: list[IgnoreDirective], suppressions: dict[str, str], file_path: str
|
|
122
|
+
) -> list[Violation]:
|
|
123
|
+
"""Find ignore directives without matching header suppressions."""
|
|
124
|
+
violations: list[Violation] = []
|
|
125
|
+
|
|
126
|
+
for ignore in ignores:
|
|
127
|
+
unjustified = self._matcher.find_unjustified_rule_ids(ignore, suppressions)
|
|
128
|
+
if unjustified:
|
|
129
|
+
violations.append(
|
|
130
|
+
build_unjustified_violation(
|
|
131
|
+
file_path=file_path,
|
|
132
|
+
line=ignore.line,
|
|
133
|
+
column=ignore.column,
|
|
134
|
+
rule_id=", ".join(unjustified),
|
|
135
|
+
raw_text=ignore.raw_text,
|
|
136
|
+
)
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
return violations
|
|
140
|
+
|
|
141
|
+
def _find_orphaned(
|
|
142
|
+
self, suppressions: dict[str, str], used_rule_ids: set[str], file_path: str
|
|
143
|
+
) -> list[Violation]:
|
|
144
|
+
"""Find header suppressions without matching code ignores."""
|
|
145
|
+
violations: list[Violation] = []
|
|
146
|
+
orphaned = self._matcher.find_orphaned_rule_ids(suppressions, used_rule_ids)
|
|
147
|
+
|
|
148
|
+
for rule_id, justification in orphaned:
|
|
149
|
+
violations.append(
|
|
150
|
+
build_orphaned_violation(
|
|
151
|
+
file_path=file_path,
|
|
152
|
+
header_line=1, # Header entries are at file start
|
|
153
|
+
rule_id=rule_id,
|
|
154
|
+
justification=justification,
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
return violations
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Purpose: Cross-reference matcher for lazy-ignores linter
|
|
3
|
+
|
|
4
|
+
Scope: Matching ignore directives with header suppressions
|
|
5
|
+
|
|
6
|
+
Overview: Provides IgnoreSuppressionMatcher class that cross-references linting ignore
|
|
7
|
+
directives found in code with Suppressions entries declared in file headers. Handles
|
|
8
|
+
case-insensitive rule ID normalization and special patterns like type:ignore[code].
|
|
9
|
+
Identifies unjustified ignores (code ignores without header entries) and orphaned
|
|
10
|
+
suppressions (header entries without matching code ignores).
|
|
11
|
+
|
|
12
|
+
Dependencies: SuppressionsParser for normalization, types for IgnoreDirective and IgnoreType,
|
|
13
|
+
rule_id_utils for pure parsing functions
|
|
14
|
+
|
|
15
|
+
Exports: IgnoreSuppressionMatcher
|
|
16
|
+
|
|
17
|
+
Interfaces: find_unjustified(), find_orphaned()
|
|
18
|
+
|
|
19
|
+
Implementation: Set-based matching with rule ID normalization for case-insensitive comparison
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
from .header_parser import SuppressionsParser
|
|
23
|
+
from .rule_id_utils import (
|
|
24
|
+
comma_list_has_used_rule,
|
|
25
|
+
find_rule_in_suppressions,
|
|
26
|
+
is_type_ignore_format_in_suppressions,
|
|
27
|
+
type_ignore_bracket_has_used_rule,
|
|
28
|
+
)
|
|
29
|
+
from .types import IgnoreDirective, IgnoreType
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class IgnoreSuppressionMatcher:
|
|
33
|
+
"""Matches ignore directives with header suppressions."""
|
|
34
|
+
|
|
35
|
+
def __init__(self, parser: SuppressionsParser, min_justification_length: int = 10) -> None:
|
|
36
|
+
"""Initialize the matcher.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
parser: SuppressionsParser for rule ID normalization.
|
|
40
|
+
min_justification_length: Minimum length for valid inline justification.
|
|
41
|
+
"""
|
|
42
|
+
self._parser = parser
|
|
43
|
+
self._min_justification_length = min_justification_length
|
|
44
|
+
|
|
45
|
+
def collect_used_rule_ids(self, ignores: list[IgnoreDirective]) -> set[str]:
|
|
46
|
+
"""Collect all normalized rule IDs used in ignore directives.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
ignores: List of ignore directives from code.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
Set of normalized rule IDs that have ignore directives.
|
|
53
|
+
"""
|
|
54
|
+
used: set[str] = set()
|
|
55
|
+
for ignore in ignores:
|
|
56
|
+
used.update(self._get_matchable_rule_ids(ignore))
|
|
57
|
+
return used
|
|
58
|
+
|
|
59
|
+
def _get_matchable_rule_ids(self, ignore: IgnoreDirective) -> list[str]:
|
|
60
|
+
"""Get normalized rule IDs for matching, handling special formats."""
|
|
61
|
+
if not ignore.rule_ids:
|
|
62
|
+
return [self._normalize(ignore.ignore_type.value)]
|
|
63
|
+
|
|
64
|
+
ids: list[str] = []
|
|
65
|
+
for rule_id in ignore.rule_ids:
|
|
66
|
+
normalized = self._normalize(rule_id)
|
|
67
|
+
ids.append(normalized)
|
|
68
|
+
if ignore.ignore_type == IgnoreType.TYPE_IGNORE:
|
|
69
|
+
ids.append(f"type:ignore[{normalized}]")
|
|
70
|
+
return ids
|
|
71
|
+
|
|
72
|
+
def find_unjustified_rule_ids(
|
|
73
|
+
self, ignore: IgnoreDirective, suppressions: dict[str, str]
|
|
74
|
+
) -> list[str]:
|
|
75
|
+
"""Find which rule IDs in an ignore are not justified.
|
|
76
|
+
|
|
77
|
+
Checks inline justification first (higher precedence), then falls back
|
|
78
|
+
to header-based suppressions.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
ignore: The ignore directive to check.
|
|
82
|
+
suppressions: Dict of normalized rule IDs to justifications.
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
List of unjustified rule IDs (original case preserved).
|
|
86
|
+
"""
|
|
87
|
+
if self._has_valid_inline_justification(ignore):
|
|
88
|
+
return []
|
|
89
|
+
|
|
90
|
+
if not ignore.rule_ids:
|
|
91
|
+
return self._check_bare_ignore(ignore, suppressions)
|
|
92
|
+
|
|
93
|
+
return self._check_rule_specific_ignore(ignore, suppressions)
|
|
94
|
+
|
|
95
|
+
def _check_bare_ignore(
|
|
96
|
+
self, ignore: IgnoreDirective, suppressions: dict[str, str]
|
|
97
|
+
) -> list[str]:
|
|
98
|
+
"""Check if a bare ignore (no specific rules) is justified."""
|
|
99
|
+
type_key = self._normalize(ignore.ignore_type.value)
|
|
100
|
+
if type_key in suppressions:
|
|
101
|
+
return []
|
|
102
|
+
return [ignore.ignore_type.value]
|
|
103
|
+
|
|
104
|
+
def _check_rule_specific_ignore(
|
|
105
|
+
self, ignore: IgnoreDirective, suppressions: dict[str, str]
|
|
106
|
+
) -> list[str]:
|
|
107
|
+
"""Check which specific rule IDs are not justified."""
|
|
108
|
+
return [
|
|
109
|
+
rule_id
|
|
110
|
+
for rule_id in ignore.rule_ids
|
|
111
|
+
if not self._is_rule_justified(ignore, rule_id, suppressions)
|
|
112
|
+
]
|
|
113
|
+
|
|
114
|
+
def _has_valid_inline_justification(self, ignore: IgnoreDirective) -> bool:
|
|
115
|
+
"""Check if the ignore has a valid inline justification.
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
ignore: The ignore directive to check.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
True if the ignore has an inline justification meeting minimum length.
|
|
122
|
+
"""
|
|
123
|
+
if not ignore.inline_justification:
|
|
124
|
+
return False
|
|
125
|
+
return len(ignore.inline_justification) >= self._min_justification_length
|
|
126
|
+
|
|
127
|
+
def _is_rule_justified(
|
|
128
|
+
self, ignore: IgnoreDirective, rule_id: str, suppressions: dict[str, str]
|
|
129
|
+
) -> bool:
|
|
130
|
+
"""Check if a specific rule ID is justified in suppressions."""
|
|
131
|
+
normalized = self._normalize(rule_id)
|
|
132
|
+
is_type_ignore = ignore.ignore_type == IgnoreType.TYPE_IGNORE
|
|
133
|
+
|
|
134
|
+
if normalized in suppressions:
|
|
135
|
+
return True
|
|
136
|
+
if is_type_ignore and is_type_ignore_format_in_suppressions(normalized, suppressions):
|
|
137
|
+
return True
|
|
138
|
+
return find_rule_in_suppressions(normalized, suppressions, is_type_ignore)
|
|
139
|
+
|
|
140
|
+
def find_orphaned_rule_ids(
|
|
141
|
+
self, suppressions: dict[str, str], used_rule_ids: set[str]
|
|
142
|
+
) -> list[tuple[str, str]]:
|
|
143
|
+
"""Find header suppressions without matching code ignores.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
suppressions: Dict mapping normalized rule IDs to justifications.
|
|
147
|
+
used_rule_ids: Set of normalized rule IDs used in code.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
List of (rule_id, justification) tuples for orphaned suppressions.
|
|
151
|
+
"""
|
|
152
|
+
orphaned: list[tuple[str, str]] = []
|
|
153
|
+
for rule_id, justification in suppressions.items():
|
|
154
|
+
if not self._suppression_is_used(rule_id, used_rule_ids):
|
|
155
|
+
orphaned.append((rule_id.upper(), justification))
|
|
156
|
+
return orphaned
|
|
157
|
+
|
|
158
|
+
def _suppression_is_used(self, suppression_key: str, used_rule_ids: set[str]) -> bool:
|
|
159
|
+
"""Check if a suppression key is used by any code ignores."""
|
|
160
|
+
if suppression_key in used_rule_ids:
|
|
161
|
+
return True
|
|
162
|
+
if comma_list_has_used_rule(suppression_key, used_rule_ids):
|
|
163
|
+
return True
|
|
164
|
+
return type_ignore_bracket_has_used_rule(suppression_key, used_rule_ids)
|
|
165
|
+
|
|
166
|
+
def _normalize(self, rule_id: str) -> str:
|
|
167
|
+
"""Normalize a rule ID for case-insensitive matching."""
|
|
168
|
+
return self._parser.normalize_rule_id(rule_id)
|