@paths.design/caws-cli 7.0.1 → 7.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/budget-derivation.js +5 -4
- package/dist/commands/diagnose.js +26 -20
- package/dist/commands/init.js +72 -5
- package/dist/commands/specs.js +40 -1
- package/dist/commands/status.js +2 -2
- package/dist/commands/templates.js +10 -0
- package/dist/commands/tool.js +2 -3
- package/dist/commands/validate.js +12 -0
- package/dist/config/index.js +17 -8
- package/dist/generators/working-spec.js +42 -9
- package/dist/index.js +3 -1
- package/dist/scaffold/cursor-hooks.js +10 -2
- package/dist/scaffold/git-hooks.js +189 -32
- package/dist/scaffold/index.js +105 -17
- package/dist/templates/.caws/tools/README.md +20 -0
- package/dist/templates/.cursor/README.md +311 -0
- package/dist/templates/.cursor/hooks/audit.sh +55 -0
- package/dist/templates/.cursor/hooks/block-dangerous.sh +83 -0
- package/dist/templates/.cursor/hooks/caws-quality-check.sh +52 -0
- package/dist/templates/.cursor/hooks/caws-scope-guard.sh +130 -0
- package/dist/templates/.cursor/hooks/caws-tool-validation.sh +121 -0
- package/dist/templates/.cursor/hooks/format.sh +38 -0
- package/dist/templates/.cursor/hooks/naming-check.sh +64 -0
- package/dist/templates/.cursor/hooks/scan-secrets.sh +46 -0
- package/dist/templates/.cursor/hooks/scope-guard.sh +52 -0
- package/dist/templates/.cursor/hooks/validate-spec.sh +83 -0
- package/dist/templates/.cursor/hooks.json +59 -0
- package/dist/templates/.cursor/rules/00-claims-verification.mdc +144 -0
- package/dist/templates/.cursor/rules/01-working-style.mdc +50 -0
- package/dist/templates/.cursor/rules/02-quality-gates.mdc +370 -0
- package/dist/templates/.cursor/rules/03-naming-and-refactor.mdc +33 -0
- package/dist/templates/.cursor/rules/04-logging-language-style.mdc +23 -0
- package/dist/templates/.cursor/rules/05-safe-defaults-guards.mdc +23 -0
- package/dist/templates/.cursor/rules/06-typescript-conventions.mdc +36 -0
- package/dist/templates/.cursor/rules/07-process-ops.mdc +20 -0
- package/dist/templates/.cursor/rules/08-solid-and-architecture.mdc +16 -0
- package/dist/templates/.cursor/rules/09-docstrings.mdc +89 -0
- package/dist/templates/.cursor/rules/10-documentation-quality-standards.mdc +390 -0
- package/dist/templates/.cursor/rules/11-scope-management-waivers.mdc +385 -0
- package/dist/templates/.cursor/rules/12-implementation-completeness.mdc +516 -0
- package/dist/templates/.cursor/rules/13-language-agnostic-standards.mdc +588 -0
- package/dist/templates/.cursor/rules/README.md +148 -0
- package/dist/templates/.github/copilot/instructions.md +311 -0
- package/dist/templates/.idea/runConfigurations/CAWS_Evaluate.xml +5 -0
- package/dist/templates/.idea/runConfigurations/CAWS_Validate.xml +5 -0
- package/dist/templates/.vscode/launch.json +56 -0
- package/dist/templates/.vscode/settings.json +93 -0
- package/dist/templates/.windsurf/workflows/caws-guided-development.md +92 -0
- package/dist/templates/COMMIT_CONVENTIONS.md +86 -0
- package/dist/templates/OIDC_SETUP.md +300 -0
- package/dist/templates/agents.md +1047 -0
- package/dist/templates/codemod/README.md +1 -0
- package/dist/templates/codemod/test.js +93 -0
- package/dist/templates/docs/README.md +150 -0
- package/dist/templates/scripts/quality-gates/check-god-objects.js +146 -0
- package/dist/templates/scripts/quality-gates/run-quality-gates.js +50 -0
- package/dist/templates/scripts/v3/analysis/todo_analyzer.py +1997 -0
- package/dist/tool-loader.js +6 -1
- package/dist/tool-validator.js +8 -2
- package/dist/utils/detection.js +34 -6
- package/dist/utils/git-lock.js +118 -0
- package/dist/utils/gitignore-updater.js +148 -0
- package/dist/utils/quality-gates.js +47 -7
- package/dist/utils/spec-resolver.js +23 -3
- package/dist/utils/yaml-validation.js +155 -0
- package/dist/validation/spec-validation.js +105 -2
- package/package.json +2 -2
- package/templates/.caws/schemas/waivers.schema.json +30 -0
- package/templates/.caws/schemas/working-spec.schema.json +133 -0
- package/templates/.caws/templates/working-spec.template.yml +74 -0
- package/templates/.caws/tools/README.md +20 -0
- package/templates/.caws/tools/scope-guard.js +208 -0
- package/templates/.caws/tools-allow.json +331 -0
- package/templates/.caws/waivers.yml +19 -0
- package/templates/.cursor/hooks/scope-guard.sh +2 -2
- package/templates/.cursor/hooks/validate-spec.sh +42 -7
- package/templates/apps/tools/caws/COMPLETION_REPORT.md +0 -331
- package/templates/apps/tools/caws/MIGRATION_SUMMARY.md +0 -360
- package/templates/apps/tools/caws/README.md +0 -463
- package/templates/apps/tools/caws/TEST_STATUS.md +0 -365
- package/templates/apps/tools/caws/attest.js +0 -357
- package/templates/apps/tools/caws/ci-optimizer.js +0 -642
- package/templates/apps/tools/caws/config.ts +0 -245
- package/templates/apps/tools/caws/cross-functional.js +0 -876
- package/templates/apps/tools/caws/dashboard.js +0 -1112
- package/templates/apps/tools/caws/flake-detector.ts +0 -362
- package/templates/apps/tools/caws/gates.js +0 -198
- package/templates/apps/tools/caws/gates.ts +0 -271
- package/templates/apps/tools/caws/language-adapters.ts +0 -381
- package/templates/apps/tools/caws/language-support.d.ts +0 -367
- package/templates/apps/tools/caws/language-support.d.ts.map +0 -1
- package/templates/apps/tools/caws/language-support.js +0 -585
- package/templates/apps/tools/caws/legacy-assessment.ts +0 -408
- package/templates/apps/tools/caws/legacy-assessor.js +0 -764
- package/templates/apps/tools/caws/mutant-analyzer.js +0 -734
- package/templates/apps/tools/caws/perf-budgets.ts +0 -349
- package/templates/apps/tools/caws/prompt-lint.js.backup +0 -274
- package/templates/apps/tools/caws/property-testing.js +0 -707
- package/templates/apps/tools/caws/provenance.d.ts +0 -14
- package/templates/apps/tools/caws/provenance.d.ts.map +0 -1
- package/templates/apps/tools/caws/provenance.js +0 -132
- package/templates/apps/tools/caws/provenance.js.backup +0 -73
- package/templates/apps/tools/caws/provenance.ts +0 -211
- package/templates/apps/tools/caws/security-provenance.ts +0 -483
- package/templates/apps/tools/caws/shared/base-tool.ts +0 -281
- package/templates/apps/tools/caws/shared/config-manager.ts +0 -366
- package/templates/apps/tools/caws/shared/gate-checker.ts +0 -849
- package/templates/apps/tools/caws/shared/types.ts +0 -444
- package/templates/apps/tools/caws/shared/validator.ts +0 -305
- package/templates/apps/tools/caws/shared/waivers-manager.ts +0 -174
- package/templates/apps/tools/caws/spec-test-mapper.ts +0 -391
- package/templates/apps/tools/caws/test-quality.js +0 -578
- package/templates/apps/tools/caws/validate.js +0 -76
- package/templates/apps/tools/caws/validate.ts +0 -228
- package/templates/apps/tools/caws/waivers.js +0 -344
- /package/{templates/apps/tools/caws → dist/templates/.caws}/schemas/waivers.schema.json +0 -0
- /package/{templates/apps/tools/caws → dist/templates/.caws}/schemas/working-spec.schema.json +0 -0
- /package/{templates/apps/tools/caws → dist/templates/.caws}/templates/working-spec.template.yml +0 -0
- /package/{templates/apps/tools/caws → dist/templates/.caws/tools}/scope-guard.js +0 -0
- /package/{templates/apps/tools/caws → dist/templates/.caws}/tools-allow.json +0 -0
- /package/{templates/apps/tools/caws → dist/templates/.caws}/waivers.yml +0 -0
|
@@ -0,0 +1,1997 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hidden TODO Pattern Analyzer
|
|
4
|
+
|
|
5
|
+
@description: Hidden TODO analyzer with better accuracy, context awareness,
|
|
6
|
+
and reduced false positives. Uses semantic analysis and context clues to
|
|
7
|
+
distinguish between hidden TODOs and legitimate documentation.
|
|
8
|
+
|
|
9
|
+
@parameters:
|
|
10
|
+
- root_dir: The root directory to analyze.
|
|
11
|
+
- min_confidence: The minimum confidence score to consider a TODO.
|
|
12
|
+
- output_json: The path to save the JSON report.
|
|
13
|
+
- output_md: The path to save the Markdown report.
|
|
14
|
+
- verbose: Whether to print verbose output.
|
|
15
|
+
- enable_code_stub_scan: Whether to enable code stub detection heuristics.
|
|
16
|
+
|
|
17
|
+
For an example on how to improve the clarity of the TODOs that were found, see the following snippet:
|
|
18
|
+
```rust
|
|
19
|
+
// TODO: Implement ANE initialization with the following requirements:
|
|
20
|
+
// 1. ANE initialization: Initialize Apple Neural Engine framework and resources
|
|
21
|
+
// - Set up ANE device and computation resources
|
|
22
|
+
// - Initialize ANE neural network computation capabilities
|
|
23
|
+
// - Handle ANE initialization error handling and recovery
|
|
24
|
+
// 2. ANE resource setup: Set up ANE resources and memory
|
|
25
|
+
// - Allocate ANE memory and computation buffers
|
|
26
|
+
// - Set up ANE resource management and optimization
|
|
27
|
+
// - Implement ANE resource validation and verification
|
|
28
|
+
// 3. ANE configuration: Configure ANE settings and parameters
|
|
29
|
+
// - Set up ANE computation parameters and settings
|
|
30
|
+
// - Configure ANE performance and optimization settings
|
|
31
|
+
// - Handle ANE configuration validation and verification
|
|
32
|
+
// 4. ANE monitoring: Set up ANE monitoring and management
|
|
33
|
+
// - Initialize ANE performance monitoring
|
|
34
|
+
// - Set up ANE resource monitoring and management
|
|
35
|
+
// - Implement ANE monitoring and reporting
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
The analyzer now includes engineering-grade TODO format suggestions. Use --engineering-suggestions to get
|
|
39
|
+
recommendations for upgrading TODOs to the CAWS-compliant format with completion checklists, acceptance
|
|
40
|
+
criteria, dependencies, and governance information.
|
|
41
|
+
|
|
42
|
+
@author: @darianrosebrook
|
|
43
|
+
@date: 2025-10-17
|
|
44
|
+
@version: 2.0.0
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
import os
|
|
48
|
+
import re
|
|
49
|
+
import json
|
|
50
|
+
from pathlib import Path
|
|
51
|
+
from collections import defaultdict, Counter
|
|
52
|
+
from typing import Any, Dict, List, Set, Tuple, Optional
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class HiddenTodoAnalyzer:
|
|
56
|
+
def __init__(self, root_dir: str, *, enable_code_stub_scan: bool = True):
|
|
57
|
+
self.root_dir = Path(root_dir)
|
|
58
|
+
self.enable_code_stub_scan = enable_code_stub_scan
|
|
59
|
+
|
|
60
|
+
# Language-specific comment patterns
|
|
61
|
+
self.language_patterns = {
|
|
62
|
+
'rust': {
|
|
63
|
+
'extensions': ['.rs'],
|
|
64
|
+
'single_line': r'^\s*//',
|
|
65
|
+
'multi_line_start': r'^\s*/\*',
|
|
66
|
+
'multi_line_end': r'\*/',
|
|
67
|
+
},
|
|
68
|
+
'javascript': {
|
|
69
|
+
'extensions': ['.js', '.mjs', '.cjs'],
|
|
70
|
+
'single_line': r'^\s*//',
|
|
71
|
+
'multi_line_start': r'^\s*/\*',
|
|
72
|
+
'multi_line_end': r'\*/',
|
|
73
|
+
},
|
|
74
|
+
'typescript': {
|
|
75
|
+
'extensions': ['.ts', '.tsx', '.mts', '.cts'],
|
|
76
|
+
'single_line': r'^\s*//',
|
|
77
|
+
'multi_line_start': r'^\s*/\*',
|
|
78
|
+
'multi_line_end': r'\*/',
|
|
79
|
+
},
|
|
80
|
+
'python': {
|
|
81
|
+
'extensions': ['.py', '.pyi'],
|
|
82
|
+
'single_line': r'^\s*#',
|
|
83
|
+
'multi_line_start': r'^\s*"""',
|
|
84
|
+
'multi_line_end': r'"""',
|
|
85
|
+
},
|
|
86
|
+
'go': {
|
|
87
|
+
'extensions': ['.go'],
|
|
88
|
+
'single_line': r'^\s*//',
|
|
89
|
+
'multi_line_start': r'^\s*/\*',
|
|
90
|
+
'multi_line_end': r'\*/',
|
|
91
|
+
},
|
|
92
|
+
'java': {
|
|
93
|
+
'extensions': ['.java'],
|
|
94
|
+
'single_line': r'^\s*//',
|
|
95
|
+
'multi_line_start': r'^\s*/\*',
|
|
96
|
+
'multi_line_end': r'\*/',
|
|
97
|
+
},
|
|
98
|
+
'csharp': {
|
|
99
|
+
'extensions': ['.cs'],
|
|
100
|
+
'single_line': r'^\s*//',
|
|
101
|
+
'multi_line_start': r'^\s*/\*',
|
|
102
|
+
'multi_line_end': r'\*/',
|
|
103
|
+
},
|
|
104
|
+
'cpp': {
|
|
105
|
+
'extensions': ['.cpp', '.cc', '.cxx', '.c++', '.hpp', '.h', '.hxx'],
|
|
106
|
+
'single_line': r'^\s*//',
|
|
107
|
+
'multi_line_start': r'^\s*/\*',
|
|
108
|
+
'multi_line_end': r'\*/',
|
|
109
|
+
},
|
|
110
|
+
'c': {
|
|
111
|
+
'extensions': ['.c'],
|
|
112
|
+
'single_line': r'^\s*//',
|
|
113
|
+
'multi_line_start': r'^\s*/\*',
|
|
114
|
+
'multi_line_end': r'\*/',
|
|
115
|
+
},
|
|
116
|
+
'php': {
|
|
117
|
+
'extensions': ['.php'],
|
|
118
|
+
'single_line': r'^\s*//',
|
|
119
|
+
'multi_line_start': r'^\s*/\*',
|
|
120
|
+
'multi_line_end': r'\*/',
|
|
121
|
+
},
|
|
122
|
+
'ruby': {
|
|
123
|
+
'extensions': ['.rb'],
|
|
124
|
+
'single_line': r'^\s*#',
|
|
125
|
+
'multi_line_start': r'^\s*=begin',
|
|
126
|
+
'multi_line_end': r'=end',
|
|
127
|
+
},
|
|
128
|
+
'shell': {
|
|
129
|
+
'extensions': ['.sh', '.bash', '.zsh', '.fish'],
|
|
130
|
+
'single_line': r'^\s*#',
|
|
131
|
+
'multi_line_start': None,
|
|
132
|
+
'multi_line_end': None,
|
|
133
|
+
},
|
|
134
|
+
'yaml': {
|
|
135
|
+
'extensions': ['.yaml', '.yml'],
|
|
136
|
+
'single_line': r'^\s*#',
|
|
137
|
+
'multi_line_start': None,
|
|
138
|
+
'multi_line_end': None,
|
|
139
|
+
},
|
|
140
|
+
'json': {
|
|
141
|
+
'extensions': ['.json'],
|
|
142
|
+
'single_line': None,
|
|
143
|
+
'multi_line_start': None,
|
|
144
|
+
'multi_line_end': None,
|
|
145
|
+
},
|
|
146
|
+
'markdown': {
|
|
147
|
+
'extensions': ['.md', '.markdown'],
|
|
148
|
+
'single_line': r'^\s*<!--',
|
|
149
|
+
'multi_line_start': r'^\s*<!--',
|
|
150
|
+
'multi_line_end': r'-->',
|
|
151
|
+
},
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
# Comprehensive list of file patterns to ignore
|
|
155
|
+
self.ignored_file_patterns = [
|
|
156
|
+
# Test files
|
|
157
|
+
r'\btest\b',
|
|
158
|
+
r'\btests\b',
|
|
159
|
+
r'_test\.',
|
|
160
|
+
r'_tests\.',
|
|
161
|
+
r'\.test\.',
|
|
162
|
+
r'\.spec\.',
|
|
163
|
+
r'\.specs\.',
|
|
164
|
+
|
|
165
|
+
# Build artifacts and generated files
|
|
166
|
+
r'\btarget\b',
|
|
167
|
+
r'\bbuild\b',
|
|
168
|
+
r'\bout\b',
|
|
169
|
+
r'\bdist\b',
|
|
170
|
+
r'\bbin\b',
|
|
171
|
+
r'\.next\b',
|
|
172
|
+
r'generated\.',
|
|
173
|
+
r'bindgen\.',
|
|
174
|
+
r'private\.',
|
|
175
|
+
r'mime_types_generated\.',
|
|
176
|
+
r'named_entities\.',
|
|
177
|
+
r'ascii_case_insensitive_html_attributes\.',
|
|
178
|
+
|
|
179
|
+
# Package management and dependencies
|
|
180
|
+
r'\bnode_modules\b',
|
|
181
|
+
r'package-lock\.json$',
|
|
182
|
+
r'package\.json$',
|
|
183
|
+
r'yarn\.lock$',
|
|
184
|
+
r'pnpm-lock\.yaml$',
|
|
185
|
+
r'\bvenv\b',
|
|
186
|
+
r'\bpip\b',
|
|
187
|
+
r'requirements\.txt$',
|
|
188
|
+
r'Pipfile$',
|
|
189
|
+
r'Pipfile\.lock$',
|
|
190
|
+
r'poetry\.lock$',
|
|
191
|
+
r'Cargo\.lock$',
|
|
192
|
+
r'Cargo\.toml$',
|
|
193
|
+
|
|
194
|
+
# External libraries and frameworks
|
|
195
|
+
r'libtorch-cpu',
|
|
196
|
+
r'libtorch\b',
|
|
197
|
+
r'\.venv-whisper\b',
|
|
198
|
+
r'whisper_conversion_env',
|
|
199
|
+
r'\bsite-packages\b',
|
|
200
|
+
r'lib/python\d+\.\d+/site-packages',
|
|
201
|
+
r'whisperkit\b',
|
|
202
|
+
r'\.build\b',
|
|
203
|
+
|
|
204
|
+
# Model and data directories
|
|
205
|
+
r'models\b',
|
|
206
|
+
r'\bmodels/',
|
|
207
|
+
r'temp\.rs$',
|
|
208
|
+
r'todo_analyzer\.',
|
|
209
|
+
|
|
210
|
+
# Version control and IDE
|
|
211
|
+
r'\.git\b',
|
|
212
|
+
r'\.github\b',
|
|
213
|
+
r'\.vscode\b',
|
|
214
|
+
r'\.idea\b',
|
|
215
|
+
r'\.DS_Store$',
|
|
216
|
+
r'\.DS_Store\?$',
|
|
217
|
+
r'\._',
|
|
218
|
+
r'\.Spotlight-V100$',
|
|
219
|
+
|
|
220
|
+
# Documentation and examples
|
|
221
|
+
r'\bdocs\b',
|
|
222
|
+
r'\bexamples\b',
|
|
223
|
+
r'\bdoc\b',
|
|
224
|
+
r'\bexample\b',
|
|
225
|
+
|
|
226
|
+
# Temporary and cache files
|
|
227
|
+
r'\bcache\b',
|
|
228
|
+
r'\btmp\b',
|
|
229
|
+
r'\btemp\b',
|
|
230
|
+
r'\.tmp$',
|
|
231
|
+
r'\.temp$',
|
|
232
|
+
r'\.cache$',
|
|
233
|
+
|
|
234
|
+
# Coverage and analysis reports
|
|
235
|
+
r'\bhtmlcov\b',
|
|
236
|
+
r'\bcoverage\b',
|
|
237
|
+
r'\.coverage$',
|
|
238
|
+
r'coverage\.xml$',
|
|
239
|
+
r'lcov\.info$',
|
|
240
|
+
|
|
241
|
+
# OS-specific files
|
|
242
|
+
r'Thumbs\.db$',
|
|
243
|
+
r'desktop\.ini$',
|
|
244
|
+
r'\.fseventsd$',
|
|
245
|
+
r'\.Trashes$',
|
|
246
|
+
|
|
247
|
+
# Language-specific build artifacts
|
|
248
|
+
r'\.rlib$',
|
|
249
|
+
r'\.rmeta$',
|
|
250
|
+
r'\.d$',
|
|
251
|
+
r'\.pdb$',
|
|
252
|
+
r'\.o$',
|
|
253
|
+
r'\.obj$',
|
|
254
|
+
r'\.exe$',
|
|
255
|
+
r'\.dll$',
|
|
256
|
+
r'\.so$',
|
|
257
|
+
r'\.dylib$',
|
|
258
|
+
r'\.pyc$',
|
|
259
|
+
r'\.pyo$',
|
|
260
|
+
r'__pycache__',
|
|
261
|
+
r'\.class$',
|
|
262
|
+
r'\.jar$',
|
|
263
|
+
r'\.war$',
|
|
264
|
+
r'\.ear$',
|
|
265
|
+
|
|
266
|
+
# Web assets
|
|
267
|
+
r'\.min\.js$',
|
|
268
|
+
r'\.min\.css$',
|
|
269
|
+
r'\.bundle\.js$',
|
|
270
|
+
r'\.chunk\.js$',
|
|
271
|
+
r'\.map$',
|
|
272
|
+
|
|
273
|
+
# Configuration files (often generated)
|
|
274
|
+
r'\.env\.local$',
|
|
275
|
+
r'\.env\.production$',
|
|
276
|
+
r'\.env\.development$',
|
|
277
|
+
r'config\.local\.',
|
|
278
|
+
r'config\.prod\.',
|
|
279
|
+
r'config\.dev\.',
|
|
280
|
+
|
|
281
|
+
# Logs and reports
|
|
282
|
+
r'\blogs\b',
|
|
283
|
+
r'\.log$',
|
|
284
|
+
r'\breports\b',
|
|
285
|
+
r'\bartifacts\b',
|
|
286
|
+
r'\btemp\b',
|
|
287
|
+
|
|
288
|
+
# IDE and editor files
|
|
289
|
+
r'\.swp$',
|
|
290
|
+
r'\.swo$',
|
|
291
|
+
r'~$',
|
|
292
|
+
r'\.bak$',
|
|
293
|
+
r'\.backup$',
|
|
294
|
+
|
|
295
|
+
# Additional exclusions from user's search
|
|
296
|
+
r'\.gitignore$',
|
|
297
|
+
r'\.json$',
|
|
298
|
+
r'\.md$',
|
|
299
|
+
]
|
|
300
|
+
|
|
301
|
+
# Explicit TODO patterns (highest priority) - more restrictive
|
|
302
|
+
self.explicit_todo_patterns = {
|
|
303
|
+
'explicit_todos': [
|
|
304
|
+
r'\bTODO\b.*?:',
|
|
305
|
+
r'\bFIXME\b.*?:',
|
|
306
|
+
r'\bHACK\b.*?:',
|
|
307
|
+
r'\bXXX\b.*?:',
|
|
308
|
+
r'\bTEMP\b.*?:.*?(implement|fix|replace|complete|add)',
|
|
309
|
+
r'\bTEMPORARY\b.*?:.*?(implement|fix|replace|complete|add)',
|
|
310
|
+
# User's VSCode search patterns
|
|
311
|
+
r'\bTODO\b(?!(_|\.|anal|\sanal|s))',
|
|
312
|
+
r'\bin\s+a\s+real\b(?!(_|\.|anal|\sanal|s))',
|
|
313
|
+
r'\bsimplified\b(?!(_|\.|anal|\sanal|s))',
|
|
314
|
+
r'\bfor\s+now\b(?!(_|\.|anal|\sanal|s))',
|
|
315
|
+
]
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
# High-confidence hidden TODO patterns (more specific and contextual)
|
|
319
|
+
self.high_confidence_patterns = {
|
|
320
|
+
'incomplete_implementation': [
|
|
321
|
+
r'\bnot\s+yet\s+implemented\b',
|
|
322
|
+
r'\bmissing\s+implementation\b',
|
|
323
|
+
r'\bincomplete\s+implementation\b',
|
|
324
|
+
r'\bpartial\s+implementation\b',
|
|
325
|
+
r'\bunimplemented\b',
|
|
326
|
+
r'\bnot\s+done\b',
|
|
327
|
+
r'\bpending\s+implementation\b',
|
|
328
|
+
r'\bto\s+be\s+implemented\b',
|
|
329
|
+
r'\bwill\s+be\s+implemented\b',
|
|
330
|
+
],
|
|
331
|
+
|
|
332
|
+
'placeholder_code': [
|
|
333
|
+
r'\bplaceholder\s+code\b',
|
|
334
|
+
r'\bplaceholder\s+implementation\b',
|
|
335
|
+
r'\bplaceholder\s+function\b',
|
|
336
|
+
r'\bplaceholder\s+value\b',
|
|
337
|
+
r'\bstub\s+implementation\b',
|
|
338
|
+
r'\bstub\s+function\b',
|
|
339
|
+
r'\bdummy\s+implementation\b',
|
|
340
|
+
r'\bfake\s+implementation\b',
|
|
341
|
+
r'\bexample\s+implementation\b',
|
|
342
|
+
r'\bdemo\s+implementation\b',
|
|
343
|
+
r'\bsample\s+implementation\b',
|
|
344
|
+
r'\btemplate\s+implementation\b',
|
|
345
|
+
r'\bstub\s+implementation\s+for\b',
|
|
346
|
+
r'\bsimplified\s+.*?\s+calculation\b',
|
|
347
|
+
r'\bsimplified\s+.*?\s+implementation\b',
|
|
348
|
+
r'\bfor\s+now\b.*?(just|simply|only)\s+(concatenate|return|use)',
|
|
349
|
+
r'\bin\s+practice\b.*?(would|should|will)\s+(intelligently|properly|correctly)',
|
|
350
|
+
],
|
|
351
|
+
|
|
352
|
+
'temporary_solutions': [
|
|
353
|
+
r'\btemporary\s+solution\b',
|
|
354
|
+
r'\btemporary\s+fix\b',
|
|
355
|
+
r'\btemporary\s+workaround\b',
|
|
356
|
+
r'\bquick\s+fix\b',
|
|
357
|
+
r'\bquick\s+hack\b',
|
|
358
|
+
r'\bworkaround\b',
|
|
359
|
+
r'\bhack\b.*?(fix|solution)',
|
|
360
|
+
r'\bpatch\b.*?(fix|solution)',
|
|
361
|
+
r'\bbypass\b.*?(fix|solution)',
|
|
362
|
+
],
|
|
363
|
+
|
|
364
|
+
'hardcoded_values': [
|
|
365
|
+
r'\bhardcoded\s+value\b',
|
|
366
|
+
r'\bhard-coded\s+value\b',
|
|
367
|
+
r'\bmagic\s+number\b',
|
|
368
|
+
r'\bmagic\s+string\b',
|
|
369
|
+
r'\bconstant\s+value\b.*?(replace|change|make\s+configurable)',
|
|
370
|
+
r'\bdefault\s+value\b.*?(replace|change|make\s+configurable)',
|
|
371
|
+
r'\bhardcoded\s+return\s+value\b',
|
|
372
|
+
r'\bhardcoded\s+result\b',
|
|
373
|
+
r'\bhardcoded\s+efficiency\b',
|
|
374
|
+
r'\bhardcoded\s+percentage\b',
|
|
375
|
+
],
|
|
376
|
+
|
|
377
|
+
'future_improvements': [
|
|
378
|
+
r'\bin\s+production\b.*?(implement|add|fix)',
|
|
379
|
+
r'\bin\s+a\s+real\s+implementation\b',
|
|
380
|
+
r'\beventually\b.*?(implement|add|fix)',
|
|
381
|
+
r'\blater\b.*?(implement|add|fix)',
|
|
382
|
+
r'\bshould\s+be\b.*?(implemented|added|fixed)',
|
|
383
|
+
r'\bwould\s+be\b.*?(implemented|added|fixed)',
|
|
384
|
+
r'\bcould\s+be\b.*?(implemented|added|fixed)',
|
|
385
|
+
r'\bwill\s+be\b.*?(implemented|added|fixed)',
|
|
386
|
+
r'\bin\s+practice\b.*?(would|should|will)\s+(analyze|merge|intelligently)',
|
|
387
|
+
r'\bin\s+practice\b.*?(this\s+would|this\s+should|this\s+will)',
|
|
388
|
+
r'\bfor\s+now\b.*?(just|simply|only)',
|
|
389
|
+
],
|
|
390
|
+
}
|
|
391
|
+
|
|
392
|
+
# Medium-confidence patterns (context-dependent)
|
|
393
|
+
self.medium_confidence_patterns = {
|
|
394
|
+
'basic_implementations': [
|
|
395
|
+
r'\bbasic\s+implementation\b.*?(improve|enhance|replace)',
|
|
396
|
+
r'\bsimple\s+implementation\b.*?(improve|enhance|replace)',
|
|
397
|
+
r'\bminimal\s+implementation\b.*?(improve|enhance|replace)',
|
|
398
|
+
r'\bnaive\s+implementation\b.*?(improve|enhance|replace)',
|
|
399
|
+
r'\brough\s+implementation\b.*?(improve|enhance|replace)',
|
|
400
|
+
r'\bcrude\s+implementation\b.*?(improve|enhance|replace)',
|
|
401
|
+
],
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
# Patterns to exclude (legitimate technical terms and documentation)
|
|
405
|
+
self.exclusion_patterns = [
|
|
406
|
+
# Performance and optimization terms
|
|
407
|
+
r'\bperformance\s+monitoring\b',
|
|
408
|
+
r'\bperformance\s+optimization\b',
|
|
409
|
+
r'\bperformance\s+analysis\b',
|
|
410
|
+
r'\bperformance\s+benchmark\b',
|
|
411
|
+
r'\boptimize\s+for\s+performance\b',
|
|
412
|
+
r'\boptimization\s+strategy\b',
|
|
413
|
+
r'\befficient\s+implementation\b',
|
|
414
|
+
|
|
415
|
+
# Simulation and testing terms
|
|
416
|
+
r'\bsimulation\s+environment\b',
|
|
417
|
+
r'\bsimulate\s+network\s+conditions\b',
|
|
418
|
+
r'\bsimulate\s+.*?(behavior|response|data)\b',
|
|
419
|
+
r'\bsimulation\s+.*?(mode|environment)\b',
|
|
420
|
+
|
|
421
|
+
# Fallback and error handling
|
|
422
|
+
r'\bfallback\s+mechanism\b',
|
|
423
|
+
r'\bfallback\s+strategy\b',
|
|
424
|
+
r'\bfallback\s+to\b.*?(method|function|implementation)',
|
|
425
|
+
|
|
426
|
+
# Authentication and security
|
|
427
|
+
r'\bbasic\s+authentication\b',
|
|
428
|
+
r'\bbasic\s+configuration\b',
|
|
429
|
+
r'\bsimple\s+interface\b',
|
|
430
|
+
r'\bsimple\s+api\b',
|
|
431
|
+
|
|
432
|
+
# Mock and testing
|
|
433
|
+
r'\bmock\s+object\b',
|
|
434
|
+
r'\bmock\s+service\b',
|
|
435
|
+
r'\bmock\s+data\b',
|
|
436
|
+
r'\bmock\s+response\b',
|
|
437
|
+
|
|
438
|
+
# Documentation patterns
|
|
439
|
+
r'\bcurrent\s+implementation\b.*?(uses|provides|supports)',
|
|
440
|
+
r'\bthis\s+implementation\b.*?(uses|provides|supports)',
|
|
441
|
+
r'\bthe\s+implementation\b.*?(uses|provides|supports)',
|
|
442
|
+
r'\bimplementation\s+uses\b',
|
|
443
|
+
r'\bimplementation\s+provides\b',
|
|
444
|
+
r'\bimplementation\s+supports\b',
|
|
445
|
+
|
|
446
|
+
# Architecture and design documentation
|
|
447
|
+
r'\barchitecture\s+note\b',
|
|
448
|
+
r'\bdesign\s+note\b',
|
|
449
|
+
r'\bpattern\s+note\b',
|
|
450
|
+
r'\bdependency\s+injection\b',
|
|
451
|
+
r'\bresource\s+management\b',
|
|
452
|
+
|
|
453
|
+
# Console and logging
|
|
454
|
+
r'console\.(log|warn|error|info)',
|
|
455
|
+
r'\blogging\s+implementation\b',
|
|
456
|
+
|
|
457
|
+
# TODO system documentation (false positives when documenting TODO system itself)
|
|
458
|
+
r'\btodo\s+template\s+system\b',
|
|
459
|
+
r'\btodo\s+template\b',
|
|
460
|
+
r'\btodo\s+instance\b',
|
|
461
|
+
r'\btodo\s+step\b',
|
|
462
|
+
r'\btodo\s+integration\b',
|
|
463
|
+
r'\btodo\s+system\b',
|
|
464
|
+
r'\btodotemplate\b',
|
|
465
|
+
r'\btodoinstance\b',
|
|
466
|
+
r'\btodostep\b',
|
|
467
|
+
r'\btodointegration\b',
|
|
468
|
+
r'\btodotemplatesystem\b',
|
|
469
|
+
r'\btodoprogress\b',
|
|
470
|
+
r'\btododependency\b',
|
|
471
|
+
r'\btodoqualityenforcer\b',
|
|
472
|
+
r'\btodoworkflowhooks\b',
|
|
473
|
+
r'\btodostatus\b',
|
|
474
|
+
r'\btodopriority\b',
|
|
475
|
+
r'\btodosteptype\b',
|
|
476
|
+
# Rust doc comment patterns when mentioning TODO system types
|
|
477
|
+
r'^\s*//[!]/.*\btodo\b.*(template|instance|step|integration|system)\b',
|
|
478
|
+
r'^\s*///.*\btodo\b.*(template|instance|step|integration|system)\b',
|
|
479
|
+
]
|
|
480
|
+
|
|
481
|
+
# Engineering-grade TODO template patterns (for suggestions)
|
|
482
|
+
self.engineering_grade_patterns = {
|
|
483
|
+
'completion_checklist': [
|
|
484
|
+
r'COMPLETION CHECKLIST:',
|
|
485
|
+
r'COMPLETION CRITERIA:',
|
|
486
|
+
r'CHECKLIST:',
|
|
487
|
+
r'\[ \]',
|
|
488
|
+
r'\[x\]',
|
|
489
|
+
],
|
|
490
|
+
'acceptance_criteria': [
|
|
491
|
+
r'ACCEPTANCE CRITERIA:',
|
|
492
|
+
r'ACCEPTANCE:',
|
|
493
|
+
r'CRITERIA:',
|
|
494
|
+
r'REQUIREMENTS:',
|
|
495
|
+
],
|
|
496
|
+
'dependencies': [
|
|
497
|
+
r'DEPENDENCIES:',
|
|
498
|
+
r'DEPENDS ON:',
|
|
499
|
+
r'REQUIRES:',
|
|
500
|
+
r'BLOCKED BY:',
|
|
501
|
+
],
|
|
502
|
+
'governance': [
|
|
503
|
+
r'CAWS TIER:',
|
|
504
|
+
r'TIER:',
|
|
505
|
+
r'PRIORITY:',
|
|
506
|
+
r'BLOCKING:',
|
|
507
|
+
r'ESTIMATED EFFORT:',
|
|
508
|
+
r'EFFORT:',
|
|
509
|
+
r'GOVERNANCE:',
|
|
510
|
+
],
|
|
511
|
+
'structured_format': [
|
|
512
|
+
r'// TODO:.*?\n.*?//\s*COMPLETION',
|
|
513
|
+
r'// TODO:.*?\n.*?//\s*ACCEPTANCE',
|
|
514
|
+
r'// TODO:.*?\n.*?//\s*DEPENDENCIES',
|
|
515
|
+
]
|
|
516
|
+
}
|
|
517
|
+
|
|
518
|
+
# Patterns that suggest a TODO needs engineering-grade format
|
|
519
|
+
self.needs_engineering_format_patterns = {
|
|
520
|
+
'vague_todos': [
|
|
521
|
+
r'\bTODO\b.*?(implement|add|fix|complete|do)\b.*?$',
|
|
522
|
+
r'\bFIXME\b.*?(implement|add|fix|complete|do)\b.*?$',
|
|
523
|
+
r'\bHACK\b.*?(implement|add|fix|complete|do)\b.*?$',
|
|
524
|
+
],
|
|
525
|
+
'missing_structure': [
|
|
526
|
+
r'\bTODO\b.*?(?!.*COMPLETION CHECKLIST)(?!.*ACCEPTANCE CRITERIA)(?!.*DEPENDENCIES).*$',
|
|
527
|
+
r'\bFIXME\b.*?(?!.*COMPLETION CHECKLIST)(?!.*ACCEPTANCE CRITERIA)(?!.*DEPENDENCIES).*$',
|
|
528
|
+
],
|
|
529
|
+
'single_line_todos': [
|
|
530
|
+
r'^\s*//\s*TODO\b.*?$',
|
|
531
|
+
r'^\s*#\s*TODO\b.*?$',
|
|
532
|
+
],
|
|
533
|
+
'business_critical': [
|
|
534
|
+
r'\bTODO\b.*?(auth|security|payment|billing|database|persist|save|store)\b',
|
|
535
|
+
r'\bTODO\b.*?(critical|important|essential|required|must)\b',
|
|
536
|
+
r'\bFIXME\b.*?(auth|security|payment|billing|database|persist|save|store)\b',
|
|
537
|
+
]
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
# Context clues that suggest documentation rather than TODO
|
|
541
|
+
self.documentation_indicators = [
|
|
542
|
+
r'@param',
|
|
543
|
+
r'@return',
|
|
544
|
+
r'@throws',
|
|
545
|
+
r'@author',
|
|
546
|
+
r'@date',
|
|
547
|
+
r'@version',
|
|
548
|
+
r'@description',
|
|
549
|
+
r'@example',
|
|
550
|
+
r'@see',
|
|
551
|
+
r'@since',
|
|
552
|
+
r'@deprecated',
|
|
553
|
+
r'\*\s*\*\s*\*', # JSDoc comment blocks
|
|
554
|
+
r'^\s*/\*\*', # Start of JSDoc
|
|
555
|
+
r'^\s*# ', # Markdown headers
|
|
556
|
+
r'^\s*## ', # Markdown subheaders
|
|
557
|
+
r'^\s*### ', # Markdown sub-subheaders
|
|
558
|
+
]
|
|
559
|
+
|
|
560
|
+
# Context clues that suggest actual TODO
|
|
561
|
+
self.todo_indicators = [
|
|
562
|
+
r'\btodo\b',
|
|
563
|
+
r'\bfixme\b',
|
|
564
|
+
r'\bhack\b',
|
|
565
|
+
r'\bneed\s+to\b',
|
|
566
|
+
r'\bshould\s+be\b',
|
|
567
|
+
r'\bmust\s+be\b',
|
|
568
|
+
r'\bhas\s+to\b',
|
|
569
|
+
r'\brequired\s+to\b',
|
|
570
|
+
r'\bmissing\b',
|
|
571
|
+
r'\bincomplete\b',
|
|
572
|
+
r'\bpartial\b',
|
|
573
|
+
r'\bunfinished\b',
|
|
574
|
+
r'\bwork\s+in\s+progress\b',
|
|
575
|
+
r'\bwip\b',
|
|
576
|
+
]
|
|
577
|
+
|
|
578
|
+
self.results = defaultdict(list)
|
|
579
|
+
self.file_stats = defaultdict(int)
|
|
580
|
+
self.pattern_stats = defaultdict(int)
|
|
581
|
+
|
|
582
|
+
# Heuristic code stub patterns keyed by language
|
|
583
|
+
self.code_stub_patterns = {
|
|
584
|
+
'python': {
|
|
585
|
+
'function_stub': re.compile(r'^\s*def\s+\w+\(.*\):'),
|
|
586
|
+
'pass_stmt': re.compile(r'^\s*pass\s*$'),
|
|
587
|
+
'ellipsis_stmt': re.compile(r'^\s*\.\.\.\s*$'),
|
|
588
|
+
'raise_not_impl': re.compile(r'^\s*raise\s+NotImplementedError'),
|
|
589
|
+
'return_not_impl': re.compile(r'^\s*return\s+(None|NotImplemented)\s*$'),
|
|
590
|
+
},
|
|
591
|
+
'javascript': {
|
|
592
|
+
'function_stub': re.compile(r'^\s*(async\s+)?function\s+\w+\(.*\)\s*{'),
|
|
593
|
+
'arrow_stub': re.compile(r'^\s*const\s+\w+\s*=\s*\(.*\)\s*=>\s*{'),
|
|
594
|
+
'throw_not_impl': re.compile(r"^\s*throw\s+new\s+Error\((\"|')(TODO|Not\s+Implemented)"),
|
|
595
|
+
'return_todo': re.compile(r"^\s*return\s+(null|undefined);\s*//\s*TODO"),
|
|
596
|
+
},
|
|
597
|
+
'typescript': {
|
|
598
|
+
'function_stub': re.compile(r'^\s*(async\s+)?function\s+\w+\(.*\)\s*{'),
|
|
599
|
+
'arrow_stub': re.compile(r'^\s*const\s+\w+\s*=\s*\(.*\)\s*=>\s*{'),
|
|
600
|
+
'throw_not_impl': re.compile(r"^\s*throw\s+new\s+Error\((\"|')(TODO|Not\s+Implemented)"),
|
|
601
|
+
'return_todo': re.compile(r"^\s*return\s+(null|undefined);\s*//\s*TODO"),
|
|
602
|
+
},
|
|
603
|
+
}
|
|
604
|
+
|
|
605
|
+
def should_ignore_file(self, file_path: Path) -> bool:
|
|
606
|
+
"""Check if a file should be ignored based on patterns."""
|
|
607
|
+
path_str = str(file_path)
|
|
608
|
+
|
|
609
|
+
# Check against ignored patterns
|
|
610
|
+
for pattern in self.ignored_file_patterns:
|
|
611
|
+
if re.search(pattern, path_str, re.IGNORECASE):
|
|
612
|
+
return True
|
|
613
|
+
|
|
614
|
+
return False
|
|
615
|
+
|
|
616
|
+
def detect_language(self, file_path: Path) -> Optional[str]:
|
|
617
|
+
"""Detect the programming language of a file based on its extension."""
|
|
618
|
+
suffix = file_path.suffix.lower()
|
|
619
|
+
|
|
620
|
+
for language, config in self.language_patterns.items():
|
|
621
|
+
if suffix in config['extensions']:
|
|
622
|
+
return language
|
|
623
|
+
|
|
624
|
+
return None
|
|
625
|
+
|
|
626
|
+
def is_excluded_pattern(self, comment: str) -> bool:
|
|
627
|
+
"""Check if a comment matches exclusion patterns (legitimate technical terms)."""
|
|
628
|
+
for pattern in self.exclusion_patterns:
|
|
629
|
+
if re.search(pattern, comment, re.IGNORECASE):
|
|
630
|
+
return True
|
|
631
|
+
return False
|
|
632
|
+
|
|
633
|
+
def is_documentation_comment(self, comment: str) -> bool:
|
|
634
|
+
"""Check if a comment appears to be documentation rather than a TODO."""
|
|
635
|
+
for indicator in self.documentation_indicators:
|
|
636
|
+
if re.search(indicator, comment, re.IGNORECASE):
|
|
637
|
+
return True
|
|
638
|
+
|
|
639
|
+
# Check for Rust doc comments (//! and ///) that mention TODO system types
|
|
640
|
+
todo_system_types = [
|
|
641
|
+
r'todo\s+template',
|
|
642
|
+
r'todo\s+instance',
|
|
643
|
+
r'todo\s+step',
|
|
644
|
+
r'todo\s+integration',
|
|
645
|
+
r'todo\s+system',
|
|
646
|
+
r'todotemplate',
|
|
647
|
+
r'todoinstance',
|
|
648
|
+
r'todostep',
|
|
649
|
+
r'todointegration',
|
|
650
|
+
r'todotemplatesystem',
|
|
651
|
+
r'todoprogress',
|
|
652
|
+
r'tododependency',
|
|
653
|
+
]
|
|
654
|
+
|
|
655
|
+
# If comment mentions TODO system types and appears to be documentation, exclude it
|
|
656
|
+
if any(re.search(pattern, comment, re.IGNORECASE) for pattern in todo_system_types):
|
|
657
|
+
# Check if it's describing the system rather than a TODO item
|
|
658
|
+
# If it contains "TODO:" followed by a colon, it's likely a real TODO
|
|
659
|
+
if not re.search(r'\bTODO\s*:\s*', comment, re.IGNORECASE):
|
|
660
|
+
return True
|
|
661
|
+
|
|
662
|
+
return False
|
|
663
|
+
|
|
664
|
+
def has_todo_indicators(self, comment: str) -> bool:
|
|
665
|
+
"""Check if a comment contains indicators that suggest it's an actual TODO."""
|
|
666
|
+
for indicator in self.todo_indicators:
|
|
667
|
+
if re.search(indicator, comment, re.IGNORECASE):
|
|
668
|
+
return True
|
|
669
|
+
return False
|
|
670
|
+
|
|
671
|
+
def calculate_context_score(self, comment: str, line_num: int, file_path: Path) -> float:
|
|
672
|
+
"""Calculate a context score to help determine if this is a real TODO."""
|
|
673
|
+
score = 0.0
|
|
674
|
+
|
|
675
|
+
# Check for documentation indicators (reduce score)
|
|
676
|
+
if self.is_documentation_comment(comment):
|
|
677
|
+
score -= 0.5
|
|
678
|
+
|
|
679
|
+
# Check for TODO indicators (increase score)
|
|
680
|
+
if self.has_todo_indicators(comment):
|
|
681
|
+
score += 0.3
|
|
682
|
+
|
|
683
|
+
# Check if it's in a generated file (reduce score)
|
|
684
|
+
if self.is_generated_file(file_path):
|
|
685
|
+
score -= 0.4
|
|
686
|
+
|
|
687
|
+
# Check if comment is very short (likely not a TODO)
|
|
688
|
+
if len(comment.strip()) < 20 and not self.has_todo_indicators(comment):
|
|
689
|
+
score -= 0.2
|
|
690
|
+
|
|
691
|
+
# Check if comment starts with common documentation words
|
|
692
|
+
doc_starters = ['note:', 'current', 'this', 'the', 'implementation', 'method', 'function']
|
|
693
|
+
if any(comment.lower().startswith(starter) for starter in doc_starters):
|
|
694
|
+
score -= 0.2
|
|
695
|
+
|
|
696
|
+
score = round(score, 3)
|
|
697
|
+
|
|
698
|
+
return max(-1.0, min(1.0, score)) # Clamp between -1 and 1
|
|
699
|
+
|
|
700
|
+
def is_generated_file(self, file_path: Path) -> bool:
|
|
701
|
+
"""Check if a file appears to be generated code."""
|
|
702
|
+
path_str = str(file_path)
|
|
703
|
+
generated_indicators = [
|
|
704
|
+
r'\.next\b',
|
|
705
|
+
r'generated',
|
|
706
|
+
r'build/',
|
|
707
|
+
r'dist/',
|
|
708
|
+
r'target/',
|
|
709
|
+
r'node_modules',
|
|
710
|
+
r'\.min\.',
|
|
711
|
+
r'\.bundle\.',
|
|
712
|
+
r'\.chunk\.',
|
|
713
|
+
]
|
|
714
|
+
|
|
715
|
+
for indicator in generated_indicators:
|
|
716
|
+
if re.search(indicator, path_str, re.IGNORECASE):
|
|
717
|
+
return True
|
|
718
|
+
return False
|
|
719
|
+
|
|
720
|
+
def extract_comments_from_file(self, file_path: Path) -> List[Tuple[int, str]]:
|
|
721
|
+
"""Extract all comments from a file based on its language."""
|
|
722
|
+
language = self.detect_language(file_path)
|
|
723
|
+
if not language:
|
|
724
|
+
return []
|
|
725
|
+
|
|
726
|
+
config = self.language_patterns[language]
|
|
727
|
+
comments = []
|
|
728
|
+
|
|
729
|
+
try:
|
|
730
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
731
|
+
lines = f.readlines()
|
|
732
|
+
|
|
733
|
+
in_multiline = False
|
|
734
|
+
multiline_content = []
|
|
735
|
+
|
|
736
|
+
for line_num, line in enumerate(lines, 1):
|
|
737
|
+
original_line = line
|
|
738
|
+
line = line.strip()
|
|
739
|
+
|
|
740
|
+
# Skip empty lines
|
|
741
|
+
if not line:
|
|
742
|
+
continue
|
|
743
|
+
|
|
744
|
+
# Handle multi-line comments / docstrings
|
|
745
|
+
start_pattern = config['multi_line_start']
|
|
746
|
+
end_pattern = config['multi_line_end']
|
|
747
|
+
if start_pattern and end_pattern:
|
|
748
|
+
if not in_multiline:
|
|
749
|
+
start_match = re.search(start_pattern, original_line)
|
|
750
|
+
if start_match:
|
|
751
|
+
in_multiline = True
|
|
752
|
+
multiline_content = []
|
|
753
|
+
after_start = original_line[start_match.end():]
|
|
754
|
+
end_match_inline = re.search(end_pattern, after_start)
|
|
755
|
+
|
|
756
|
+
if end_match_inline:
|
|
757
|
+
body = after_start[:end_match_inline.start()].strip()
|
|
758
|
+
if body:
|
|
759
|
+
multiline_content.append(body)
|
|
760
|
+
combined = ' '.join(multiline_content).strip()
|
|
761
|
+
if combined:
|
|
762
|
+
comments.append((line_num, combined))
|
|
763
|
+
in_multiline = False
|
|
764
|
+
multiline_content = []
|
|
765
|
+
else:
|
|
766
|
+
stripped_body = after_start.strip()
|
|
767
|
+
if stripped_body:
|
|
768
|
+
multiline_content.append(stripped_body)
|
|
769
|
+
continue
|
|
770
|
+
else:
|
|
771
|
+
end_match = re.search(end_pattern, original_line)
|
|
772
|
+
if end_match:
|
|
773
|
+
before_end = original_line[:end_match.start()].strip()
|
|
774
|
+
if before_end:
|
|
775
|
+
multiline_content.append(before_end)
|
|
776
|
+
combined = ' '.join(multiline_content).strip()
|
|
777
|
+
if combined:
|
|
778
|
+
comments.append((line_num, combined))
|
|
779
|
+
in_multiline = False
|
|
780
|
+
multiline_content = []
|
|
781
|
+
continue
|
|
782
|
+
else:
|
|
783
|
+
inner = original_line.strip()
|
|
784
|
+
if inner:
|
|
785
|
+
multiline_content.append(inner)
|
|
786
|
+
continue
|
|
787
|
+
|
|
788
|
+
# Extract single-line comments (only if not in multi-line mode)
|
|
789
|
+
if not in_multiline and config['single_line'] and re.search(config['single_line'], line):
|
|
790
|
+
# Remove comment prefix
|
|
791
|
+
if language in ['rust', 'javascript', 'typescript', 'go', 'java', 'csharp', 'cpp', 'c', 'php']:
|
|
792
|
+
comment = re.sub(r'^\s*//\s*', '', line)
|
|
793
|
+
elif language in ['python', 'ruby', 'shell', 'yaml']:
|
|
794
|
+
comment = re.sub(r'^\s*#\s*', '', line)
|
|
795
|
+
elif language == 'markdown':
|
|
796
|
+
comment = re.sub(r'^\s*<!--\s*', '', line)
|
|
797
|
+
comment = re.sub(r'\s*-->$', '', comment)
|
|
798
|
+
|
|
799
|
+
if comment:
|
|
800
|
+
comments.append((line_num, comment))
|
|
801
|
+
|
|
802
|
+
except Exception as e:
|
|
803
|
+
print(f"Error reading {file_path}: {e}")
|
|
804
|
+
|
|
805
|
+
return comments
|
|
806
|
+
|
|
807
|
+
def detect_code_stubs(self, file_path: Path, language: str) -> List[Dict[str, Any]]:
|
|
808
|
+
"""Detect code stub patterns beyond explicit comments."""
|
|
809
|
+
if not self.enable_code_stub_scan:
|
|
810
|
+
return []
|
|
811
|
+
|
|
812
|
+
patterns = self.code_stub_patterns.get(language)
|
|
813
|
+
if not patterns:
|
|
814
|
+
return []
|
|
815
|
+
|
|
816
|
+
try:
|
|
817
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
818
|
+
lines = f.readlines()
|
|
819
|
+
except Exception as e:
|
|
820
|
+
print(f"Error reading {file_path}: {e}")
|
|
821
|
+
return []
|
|
822
|
+
|
|
823
|
+
if language == 'python':
|
|
824
|
+
stubs = self._detect_python_code_stubs(lines, patterns)
|
|
825
|
+
elif language in ('javascript', 'typescript'):
|
|
826
|
+
stubs = self._detect_js_code_stubs(lines, patterns)
|
|
827
|
+
else:
|
|
828
|
+
stubs = []
|
|
829
|
+
|
|
830
|
+
for stub in stubs:
|
|
831
|
+
self.pattern_stats[stub['reason']] += 1
|
|
832
|
+
|
|
833
|
+
return stubs
|
|
834
|
+
|
|
835
|
+
def _detect_python_code_stubs(self, lines: List[str], patterns: Dict[str, re.Pattern]) -> List[Dict[str, Any]]:
|
|
836
|
+
stubs: List[Dict[str, Any]] = []
|
|
837
|
+
|
|
838
|
+
for idx, raw_line in enumerate(lines, 1):
|
|
839
|
+
stripped = raw_line.strip()
|
|
840
|
+
if not stripped:
|
|
841
|
+
continue
|
|
842
|
+
|
|
843
|
+
if patterns['function_stub'].match(raw_line):
|
|
844
|
+
stub_entry = self._scan_python_function_body(lines, idx, patterns)
|
|
845
|
+
if stub_entry:
|
|
846
|
+
stubs.append(stub_entry)
|
|
847
|
+
continue
|
|
848
|
+
|
|
849
|
+
if patterns['raise_not_impl'].search(raw_line):
|
|
850
|
+
stubs.append({
|
|
851
|
+
'line': idx,
|
|
852
|
+
'reason': 'python_raise_not_implemented',
|
|
853
|
+
'snippet': stripped,
|
|
854
|
+
'confidence': 0.95,
|
|
855
|
+
'context_score': 0.2,
|
|
856
|
+
})
|
|
857
|
+
continue
|
|
858
|
+
|
|
859
|
+
if patterns['ellipsis_stmt'].match(raw_line):
|
|
860
|
+
stubs.append({
|
|
861
|
+
'line': idx,
|
|
862
|
+
'reason': 'python_ellipsis_stub',
|
|
863
|
+
'snippet': stripped,
|
|
864
|
+
'confidence': 0.85,
|
|
865
|
+
'context_score': 0.15,
|
|
866
|
+
})
|
|
867
|
+
continue
|
|
868
|
+
|
|
869
|
+
return stubs
|
|
870
|
+
|
|
871
|
+
def _scan_python_function_body(self, lines: List[str], start_index: int, patterns: Dict[str, re.Pattern]) -> Optional[Dict[str, Any]]:
|
|
872
|
+
"""Inspect the first meaningful statement in a Python function for stub markers."""
|
|
873
|
+
func_line = lines[start_index - 1]
|
|
874
|
+
func_indent = len(func_line) - len(func_line.lstrip())
|
|
875
|
+
|
|
876
|
+
for idx in range(start_index + 1, len(lines) + 1):
|
|
877
|
+
raw_line = lines[idx - 1]
|
|
878
|
+
stripped = raw_line.strip()
|
|
879
|
+
|
|
880
|
+
if not stripped or stripped.startswith('#'):
|
|
881
|
+
continue
|
|
882
|
+
|
|
883
|
+
current_indent = len(raw_line) - len(raw_line.lstrip())
|
|
884
|
+
if current_indent <= func_indent:
|
|
885
|
+
break
|
|
886
|
+
|
|
887
|
+
if patterns['pass_stmt'].match(raw_line):
|
|
888
|
+
return {
|
|
889
|
+
'line': idx,
|
|
890
|
+
'reason': 'python_pass_stub',
|
|
891
|
+
'snippet': stripped,
|
|
892
|
+
'confidence': 0.82,
|
|
893
|
+
'context_score': 0.1,
|
|
894
|
+
}
|
|
895
|
+
|
|
896
|
+
if patterns['ellipsis_stmt'].match(raw_line):
|
|
897
|
+
return {
|
|
898
|
+
'line': idx,
|
|
899
|
+
'reason': 'python_ellipsis_stub',
|
|
900
|
+
'snippet': stripped,
|
|
901
|
+
'confidence': 0.82,
|
|
902
|
+
'context_score': 0.1,
|
|
903
|
+
}
|
|
904
|
+
|
|
905
|
+
if patterns['raise_not_impl'].search(raw_line):
|
|
906
|
+
return {
|
|
907
|
+
'line': idx,
|
|
908
|
+
'reason': 'python_raise_not_implemented',
|
|
909
|
+
'snippet': stripped,
|
|
910
|
+
'confidence': 0.95,
|
|
911
|
+
'context_score': 0.25,
|
|
912
|
+
}
|
|
913
|
+
|
|
914
|
+
if patterns['return_not_impl'].match(raw_line):
|
|
915
|
+
return {
|
|
916
|
+
'line': idx,
|
|
917
|
+
'reason': 'python_return_placeholder',
|
|
918
|
+
'snippet': stripped,
|
|
919
|
+
'confidence': 0.8,
|
|
920
|
+
'context_score': 0.1,
|
|
921
|
+
}
|
|
922
|
+
|
|
923
|
+
# First substantive line is real implementation
|
|
924
|
+
break
|
|
925
|
+
|
|
926
|
+
return None
|
|
927
|
+
|
|
928
|
+
def _detect_js_code_stubs(self, lines: List[str], patterns: Dict[str, re.Pattern]) -> List[Dict[str, Any]]:
|
|
929
|
+
stubs: List[Dict[str, Any]] = []
|
|
930
|
+
total_lines = len(lines)
|
|
931
|
+
|
|
932
|
+
for idx, raw_line in enumerate(lines, 1):
|
|
933
|
+
stripped = raw_line.strip()
|
|
934
|
+
if not stripped:
|
|
935
|
+
continue
|
|
936
|
+
|
|
937
|
+
if patterns['throw_not_impl'].search(stripped):
|
|
938
|
+
stubs.append({
|
|
939
|
+
'line': idx,
|
|
940
|
+
'reason': 'js_throw_not_implemented',
|
|
941
|
+
'snippet': stripped,
|
|
942
|
+
'confidence': 0.9,
|
|
943
|
+
'context_score': 0.2,
|
|
944
|
+
})
|
|
945
|
+
continue
|
|
946
|
+
|
|
947
|
+
if patterns['return_todo'].search(stripped):
|
|
948
|
+
stubs.append({
|
|
949
|
+
'line': idx,
|
|
950
|
+
'reason': 'js_return_todo',
|
|
951
|
+
'snippet': stripped,
|
|
952
|
+
'confidence': 0.82,
|
|
953
|
+
'context_score': 0.1,
|
|
954
|
+
})
|
|
955
|
+
continue
|
|
956
|
+
|
|
957
|
+
if patterns['function_stub'].match(raw_line) or patterns['arrow_stub'].match(raw_line):
|
|
958
|
+
stub_entry = self._scan_js_function_body(lines, idx, patterns)
|
|
959
|
+
if stub_entry:
|
|
960
|
+
stubs.append(stub_entry)
|
|
961
|
+
|
|
962
|
+
return stubs
|
|
963
|
+
|
|
964
|
+
def _scan_js_function_body(self, lines: List[str], start_index: int, patterns: Dict[str, re.Pattern]) -> Optional[Dict[str, Any]]:
|
|
965
|
+
"""Inspect the first executable statement in a JS/TS function body."""
|
|
966
|
+
opening_line = lines[start_index - 1]
|
|
967
|
+
initial_brace_count = opening_line.count('{') - opening_line.count('}')
|
|
968
|
+
brace_depth = max(initial_brace_count, 0)
|
|
969
|
+
|
|
970
|
+
for idx in range(start_index + 1, len(lines) + 1):
|
|
971
|
+
raw_line = lines[idx - 1]
|
|
972
|
+
stripped = raw_line.strip()
|
|
973
|
+
|
|
974
|
+
brace_depth += raw_line.count('{')
|
|
975
|
+
brace_depth -= raw_line.count('}')
|
|
976
|
+
|
|
977
|
+
if not stripped or stripped.startswith('//') or stripped.startswith('/*'):
|
|
978
|
+
continue
|
|
979
|
+
|
|
980
|
+
if brace_depth < 0:
|
|
981
|
+
break
|
|
982
|
+
|
|
983
|
+
if patterns['throw_not_impl'].search(stripped):
|
|
984
|
+
return {
|
|
985
|
+
'line': idx,
|
|
986
|
+
'reason': 'js_throw_not_implemented',
|
|
987
|
+
'snippet': stripped,
|
|
988
|
+
'confidence': 0.9,
|
|
989
|
+
'context_score': 0.2,
|
|
990
|
+
}
|
|
991
|
+
|
|
992
|
+
if patterns['return_todo'].search(stripped):
|
|
993
|
+
return {
|
|
994
|
+
'line': idx,
|
|
995
|
+
'reason': 'js_return_todo',
|
|
996
|
+
'snippet': stripped,
|
|
997
|
+
'confidence': 0.82,
|
|
998
|
+
'context_score': 0.1,
|
|
999
|
+
}
|
|
1000
|
+
|
|
1001
|
+
if brace_depth <= 0:
|
|
1002
|
+
break
|
|
1003
|
+
|
|
1004
|
+
# Found non-stub statement
|
|
1005
|
+
break
|
|
1006
|
+
|
|
1007
|
+
return None
|
|
1008
|
+
|
|
1009
|
+
def analyze_comment(self, comment: str, line_num: int, file_path: Path) -> Dict[str, Any]:
|
|
1010
|
+
"""Analyze a single comment for hidden TODO patterns with enhanced context awareness."""
|
|
1011
|
+
normalized = comment.strip()
|
|
1012
|
+
if not normalized:
|
|
1013
|
+
return {}
|
|
1014
|
+
|
|
1015
|
+
comment = normalized
|
|
1016
|
+
matches = defaultdict(list)
|
|
1017
|
+
confidence_scores = []
|
|
1018
|
+
|
|
1019
|
+
# Skip if this is an excluded pattern (legitimate technical term)
|
|
1020
|
+
if self.is_excluded_pattern(comment):
|
|
1021
|
+
return {}
|
|
1022
|
+
|
|
1023
|
+
# Calculate context score
|
|
1024
|
+
context_score = self.calculate_context_score(comment, line_num, file_path)
|
|
1025
|
+
|
|
1026
|
+
# Check explicit TODO patterns (highest confidence)
|
|
1027
|
+
for pattern in self.explicit_todo_patterns['explicit_todos']:
|
|
1028
|
+
if re.search(pattern, comment, re.IGNORECASE):
|
|
1029
|
+
matches['explicit_todos'].append(pattern)
|
|
1030
|
+
# Adjust confidence based on context
|
|
1031
|
+
base_confidence = 1.0
|
|
1032
|
+
adjusted_confidence = min(1.0, max(0.1, base_confidence + context_score * 0.3))
|
|
1033
|
+
confidence_scores.append(('explicit', adjusted_confidence))
|
|
1034
|
+
self.pattern_stats[pattern] += 1
|
|
1035
|
+
|
|
1036
|
+
# Check high-confidence patterns
|
|
1037
|
+
for category, patterns in self.high_confidence_patterns.items():
|
|
1038
|
+
for pattern in patterns:
|
|
1039
|
+
if re.search(pattern, comment, re.IGNORECASE):
|
|
1040
|
+
matches[category].append(pattern)
|
|
1041
|
+
# Adjust confidence based on context
|
|
1042
|
+
base_confidence = 0.9
|
|
1043
|
+
adjusted_confidence = min(1.0, max(0.1, base_confidence + context_score * 0.2))
|
|
1044
|
+
confidence_scores.append((category, adjusted_confidence))
|
|
1045
|
+
self.pattern_stats[pattern] += 1
|
|
1046
|
+
|
|
1047
|
+
# Check medium-confidence patterns
|
|
1048
|
+
for category, patterns in self.medium_confidence_patterns.items():
|
|
1049
|
+
for pattern in patterns:
|
|
1050
|
+
if re.search(pattern, comment, re.IGNORECASE):
|
|
1051
|
+
matches[category].append(pattern)
|
|
1052
|
+
# Adjust confidence based on context
|
|
1053
|
+
base_confidence = 0.6
|
|
1054
|
+
adjusted_confidence = min(1.0, max(0.1, base_confidence + context_score * 0.1))
|
|
1055
|
+
confidence_scores.append((category, adjusted_confidence))
|
|
1056
|
+
self.pattern_stats[pattern] += 1
|
|
1057
|
+
|
|
1058
|
+
# Calculate overall confidence score
|
|
1059
|
+
if not confidence_scores:
|
|
1060
|
+
return {}
|
|
1061
|
+
|
|
1062
|
+
overall_confidence = max(score for _, score in confidence_scores)
|
|
1063
|
+
|
|
1064
|
+
return {
|
|
1065
|
+
'matches': matches,
|
|
1066
|
+
'confidence_score': overall_confidence,
|
|
1067
|
+
'confidence_breakdown': confidence_scores,
|
|
1068
|
+
'context_score': context_score
|
|
1069
|
+
}
|
|
1070
|
+
|
|
1071
|
+
def analyze_engineering_grade_suggestions(self, comment: str, line_num: int, file_path: Path) -> Dict[str, Any]:
|
|
1072
|
+
"""Analyze a TODO comment to suggest engineering-grade format improvements."""
|
|
1073
|
+
normalized = comment.strip()
|
|
1074
|
+
if not normalized:
|
|
1075
|
+
return {}
|
|
1076
|
+
|
|
1077
|
+
# Only analyze explicit TODOs
|
|
1078
|
+
if not re.search(r'\b(TODO|FIXME|HACK)\b', normalized, re.IGNORECASE):
|
|
1079
|
+
return {}
|
|
1080
|
+
|
|
1081
|
+
suggestions = {
|
|
1082
|
+
'needs_engineering_format': False,
|
|
1083
|
+
'missing_elements': [],
|
|
1084
|
+
'suggested_tier': None,
|
|
1085
|
+
'priority_level': 'Medium',
|
|
1086
|
+
'template_suggestion': None,
|
|
1087
|
+
'confidence': 0.0
|
|
1088
|
+
}
|
|
1089
|
+
|
|
1090
|
+
# Check if already has engineering-grade structure
|
|
1091
|
+
has_structure = False
|
|
1092
|
+
for category, patterns in self.engineering_grade_patterns.items():
|
|
1093
|
+
for pattern in patterns:
|
|
1094
|
+
if re.search(pattern, normalized, re.IGNORECASE):
|
|
1095
|
+
has_structure = True
|
|
1096
|
+
break
|
|
1097
|
+
if has_structure:
|
|
1098
|
+
break
|
|
1099
|
+
|
|
1100
|
+
if not has_structure:
|
|
1101
|
+
suggestions['needs_engineering_format'] = True
|
|
1102
|
+
suggestions['confidence'] = 0.8
|
|
1103
|
+
|
|
1104
|
+
# Check what's missing
|
|
1105
|
+
missing_elements = []
|
|
1106
|
+
|
|
1107
|
+
# Check for completion checklist
|
|
1108
|
+
if not any(re.search(pattern, normalized, re.IGNORECASE) for pattern in self.engineering_grade_patterns['completion_checklist']):
|
|
1109
|
+
missing_elements.append('completion_checklist')
|
|
1110
|
+
|
|
1111
|
+
# Check for acceptance criteria
|
|
1112
|
+
if not any(re.search(pattern, normalized, re.IGNORECASE) for pattern in self.engineering_grade_patterns['acceptance_criteria']):
|
|
1113
|
+
missing_elements.append('acceptance_criteria')
|
|
1114
|
+
|
|
1115
|
+
# Check for dependencies
|
|
1116
|
+
if not any(re.search(pattern, normalized, re.IGNORECASE) for pattern in self.engineering_grade_patterns['dependencies']):
|
|
1117
|
+
missing_elements.append('dependencies')
|
|
1118
|
+
|
|
1119
|
+
# Check for governance info
|
|
1120
|
+
if not any(re.search(pattern, normalized, re.IGNORECASE) for pattern in self.engineering_grade_patterns['governance']):
|
|
1121
|
+
missing_elements.append('governance')
|
|
1122
|
+
|
|
1123
|
+
suggestions['missing_elements'] = missing_elements
|
|
1124
|
+
|
|
1125
|
+
# Determine suggested CAWS tier based on content
|
|
1126
|
+
if any(re.search(pattern, normalized, re.IGNORECASE) for pattern in self.needs_engineering_format_patterns['business_critical']):
|
|
1127
|
+
suggestions['suggested_tier'] = 1
|
|
1128
|
+
suggestions['priority_level'] = 'Critical'
|
|
1129
|
+
suggestions['confidence'] = 0.9
|
|
1130
|
+
elif any(re.search(pattern, normalized, re.IGNORECASE) for pattern in self.needs_engineering_format_patterns['vague_todos']):
|
|
1131
|
+
suggestions['suggested_tier'] = 2
|
|
1132
|
+
suggestions['priority_level'] = 'High'
|
|
1133
|
+
suggestions['confidence'] = 0.7
|
|
1134
|
+
else:
|
|
1135
|
+
suggestions['suggested_tier'] = 3
|
|
1136
|
+
suggestions['priority_level'] = 'Medium'
|
|
1137
|
+
suggestions['confidence'] = 0.6
|
|
1138
|
+
|
|
1139
|
+
# Generate template suggestion
|
|
1140
|
+
suggestions['template_suggestion'] = self._generate_template_suggestion(normalized, suggestions, file_path)
|
|
1141
|
+
|
|
1142
|
+
return suggestions
|
|
1143
|
+
|
|
1144
|
+
def _generate_template_suggestion(self, todo_text: str, suggestions: Dict[str, Any], file_path: Path) -> str:
|
|
1145
|
+
"""Generate a suggested engineering-grade TODO template based on the original TODO."""
|
|
1146
|
+
# Extract the main TODO description
|
|
1147
|
+
todo_match = re.search(r'\b(TODO|FIXME|HACK)\b[:\s]*(.*?)$', todo_text, re.IGNORECASE)
|
|
1148
|
+
if not todo_match:
|
|
1149
|
+
return ""
|
|
1150
|
+
|
|
1151
|
+
todo_type = todo_match.group(1).upper()
|
|
1152
|
+
description = todo_match.group(2).strip()
|
|
1153
|
+
|
|
1154
|
+
# Determine language-specific comment prefix
|
|
1155
|
+
language = self.detect_language(file_path)
|
|
1156
|
+
if language in ['rust', 'javascript', 'typescript', 'go', 'java', 'csharp', 'cpp', 'c']:
|
|
1157
|
+
comment_prefix = "//"
|
|
1158
|
+
elif language in ['python', 'ruby', 'shell', 'yaml']:
|
|
1159
|
+
comment_prefix = "#"
|
|
1160
|
+
else:
|
|
1161
|
+
comment_prefix = "//"
|
|
1162
|
+
|
|
1163
|
+
tier = suggestions.get('suggested_tier', 2)
|
|
1164
|
+
priority = suggestions.get('priority_level', 'Medium')
|
|
1165
|
+
|
|
1166
|
+
template = f"""{comment_prefix} {todo_type}: {description}
|
|
1167
|
+
{comment_prefix} <One-sentence context & why this exists>
|
|
1168
|
+
{comment_prefix}
|
|
1169
|
+
{comment_prefix} COMPLETION CHECKLIST:
|
|
1170
|
+
{comment_prefix} [ ] Primary functionality implemented
|
|
1171
|
+
{comment_prefix} [ ] API/data structures defined & stable
|
|
1172
|
+
{comment_prefix} [ ] Error handling + validation aligned with error taxonomy
|
|
1173
|
+
{comment_prefix} [ ] Tests: Unit ≥80% branch coverage (≥50% mutation if enabled)
|
|
1174
|
+
{comment_prefix} [ ] Integration tests for external systems/contracts
|
|
1175
|
+
{comment_prefix} [ ] Documentation: public API + system behavior
|
|
1176
|
+
{comment_prefix} [ ] Performance/profiled against SLA (CPU/mem/latency throughput)
|
|
1177
|
+
{comment_prefix} [ ] Security posture reviewed (inputs, authz, sandboxing)
|
|
1178
|
+
{comment_prefix} [ ] Observability: logs (debug), metrics (SLO-aligned), tracing
|
|
1179
|
+
{comment_prefix} [ ] Configurability and feature flags defined if relevant
|
|
1180
|
+
{comment_prefix} [ ] Failure-mode cards documented (degradation paths)
|
|
1181
|
+
{comment_prefix}
|
|
1182
|
+
{comment_prefix} ACCEPTANCE CRITERIA:
|
|
1183
|
+
{comment_prefix} - <User-facing measurable behavior>
|
|
1184
|
+
{comment_prefix} - <Invariant or schema contract requirements>
|
|
1185
|
+
{comment_prefix} - <Performance/statistical bounds>
|
|
1186
|
+
{comment_prefix} - <Interoperation requirements or protocol contract>
|
|
1187
|
+
{comment_prefix}
|
|
1188
|
+
{comment_prefix} DEPENDENCIES:
|
|
1189
|
+
{comment_prefix} - <System or feature this relies on> (Required/Optional)
|
|
1190
|
+
{comment_prefix} - <Interop/contract references>
|
|
1191
|
+
{comment_prefix} - File path(s)/module links to dependent code
|
|
1192
|
+
{comment_prefix}
|
|
1193
|
+
{comment_prefix} ESTIMATED EFFORT: <Number + confidence range>
|
|
1194
|
+
{comment_prefix} PRIORITY: {priority}
|
|
1195
|
+
{comment_prefix} BLOCKING: {{Yes/No}} – If Yes: explicitly list what it blocks
|
|
1196
|
+
{comment_prefix}
|
|
1197
|
+
{comment_prefix} GOVERNANCE:
|
|
1198
|
+
{comment_prefix} - CAWS Tier: {tier} (impacts rigor, provenance, review policy)
|
|
1199
|
+
{comment_prefix} - Change Budget: <LOC or file count> (if relevant)
|
|
1200
|
+
{comment_prefix} - Reviewer Requirements: <Roles or domain expertise>"""
|
|
1201
|
+
|
|
1202
|
+
return template
|
|
1203
|
+
|
|
1204
|
+
def analyze_file(self, file_path: Path) -> Dict:
|
|
1205
|
+
"""Analyze a single file for hidden TODO patterns."""
|
|
1206
|
+
language = self.detect_language(file_path)
|
|
1207
|
+
if not language:
|
|
1208
|
+
return {}
|
|
1209
|
+
|
|
1210
|
+
# Skip ignored files
|
|
1211
|
+
if self.should_ignore_file(file_path):
|
|
1212
|
+
return {}
|
|
1213
|
+
|
|
1214
|
+
comments = self.extract_comments_from_file(file_path)
|
|
1215
|
+
try:
|
|
1216
|
+
relative_path = str(file_path.relative_to(self.root_dir))
|
|
1217
|
+
except ValueError:
|
|
1218
|
+
relative_path = str(file_path)
|
|
1219
|
+
|
|
1220
|
+
file_analysis = {
|
|
1221
|
+
'file_path': relative_path,
|
|
1222
|
+
'language': language,
|
|
1223
|
+
'total_comments': len(comments),
|
|
1224
|
+
'total_lines': self._count_file_lines(file_path),
|
|
1225
|
+
'comment_lines': len(comments),
|
|
1226
|
+
'hidden_todos': defaultdict(list),
|
|
1227
|
+
'all_comments': []
|
|
1228
|
+
}
|
|
1229
|
+
|
|
1230
|
+
for line_num, comment in comments:
|
|
1231
|
+
analysis = self.analyze_comment(comment, line_num, file_path)
|
|
1232
|
+
engineering_suggestions = self.analyze_engineering_grade_suggestions(comment, line_num, file_path)
|
|
1233
|
+
|
|
1234
|
+
if analysis and analysis['matches']:
|
|
1235
|
+
todo_data = {
|
|
1236
|
+
'comment': comment,
|
|
1237
|
+
'matches': analysis['matches'],
|
|
1238
|
+
'confidence_score': analysis['confidence_score'],
|
|
1239
|
+
'confidence_breakdown': analysis['confidence_breakdown'],
|
|
1240
|
+
'context_score': analysis['context_score']
|
|
1241
|
+
}
|
|
1242
|
+
|
|
1243
|
+
# Add engineering-grade suggestions if available
|
|
1244
|
+
if engineering_suggestions and engineering_suggestions.get('needs_engineering_format'):
|
|
1245
|
+
todo_data['engineering_suggestions'] = engineering_suggestions
|
|
1246
|
+
|
|
1247
|
+
file_analysis['hidden_todos'][line_num] = todo_data
|
|
1248
|
+
|
|
1249
|
+
# Store all comments for analysis
|
|
1250
|
+
file_analysis['all_comments'].append({
|
|
1251
|
+
'line': line_num,
|
|
1252
|
+
'comment': comment
|
|
1253
|
+
})
|
|
1254
|
+
|
|
1255
|
+
# Detect stub implementations in code bodies
|
|
1256
|
+
for stub in self.detect_code_stubs(file_path, language):
|
|
1257
|
+
line_num = stub['line']
|
|
1258
|
+
reason = stub['reason']
|
|
1259
|
+
snippet = stub['snippet']
|
|
1260
|
+
confidence = stub['confidence']
|
|
1261
|
+
context = stub.get('context_score', 0.0)
|
|
1262
|
+
|
|
1263
|
+
# Attempt to merge with nearby comment within 3 lines above
|
|
1264
|
+
nearby_comment_line = None
|
|
1265
|
+
for existing_line in sorted(file_analysis['hidden_todos'].keys()):
|
|
1266
|
+
if existing_line == line_num:
|
|
1267
|
+
nearby_comment_line = existing_line
|
|
1268
|
+
break
|
|
1269
|
+
if existing_line < line_num and line_num - existing_line <= 3:
|
|
1270
|
+
nearby_comment_line = existing_line
|
|
1271
|
+
|
|
1272
|
+
target_line = nearby_comment_line if nearby_comment_line is not None else line_num
|
|
1273
|
+
|
|
1274
|
+
if target_line in file_analysis['hidden_todos']:
|
|
1275
|
+
entry = file_analysis['hidden_todos'][target_line]
|
|
1276
|
+
entry['matches'].setdefault('code_stubs', []).append(reason)
|
|
1277
|
+
entry['confidence_score'] = max(entry['confidence_score'], confidence)
|
|
1278
|
+
entry['confidence_breakdown'].append(('code_stub', confidence))
|
|
1279
|
+
entry['context_score'] = max(entry['context_score'], context)
|
|
1280
|
+
if target_line != line_num:
|
|
1281
|
+
related = entry.setdefault('related_stub_lines', [])
|
|
1282
|
+
if line_num not in related:
|
|
1283
|
+
related.append(line_num)
|
|
1284
|
+
else:
|
|
1285
|
+
file_analysis['hidden_todos'][target_line] = {
|
|
1286
|
+
'comment': snippet,
|
|
1287
|
+
'matches': defaultdict(list, {'code_stubs': [reason]}),
|
|
1288
|
+
'confidence_score': confidence,
|
|
1289
|
+
'confidence_breakdown': [('code_stub', confidence)],
|
|
1290
|
+
'context_score': context,
|
|
1291
|
+
}
|
|
1292
|
+
file_analysis['all_comments'].append({
|
|
1293
|
+
'line': line_num,
|
|
1294
|
+
'comment': snippet
|
|
1295
|
+
})
|
|
1296
|
+
|
|
1297
|
+
return file_analysis
|
|
1298
|
+
|
|
1299
|
+
def _count_file_lines(self, file_path: Path) -> int:
|
|
1300
|
+
"""Count total lines in a file."""
|
|
1301
|
+
try:
|
|
1302
|
+
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
|
1303
|
+
return sum(1 for _ in f)
|
|
1304
|
+
except Exception:
|
|
1305
|
+
return 0
|
|
1306
|
+
|
|
1307
|
+
def analyze_directory(self, languages: Optional[List[str]] = None, min_confidence: float = 0.7, v3_only: bool = False) -> Dict:
|
|
1308
|
+
"""Analyze all files in the directory for hidden TODO patterns with improved accuracy."""
|
|
1309
|
+
print(f"Analyzing files with improved patterns in: {self.root_dir}")
|
|
1310
|
+
print(f"Minimum confidence threshold: {min_confidence}")
|
|
1311
|
+
|
|
1312
|
+
# Get all files with supported extensions
|
|
1313
|
+
all_files = []
|
|
1314
|
+
for language, config in self.language_patterns.items():
|
|
1315
|
+
if languages and language not in languages:
|
|
1316
|
+
continue
|
|
1317
|
+
for ext in config['extensions']:
|
|
1318
|
+
if v3_only:
|
|
1319
|
+
# Only analyze files in iterations/v3/ directory
|
|
1320
|
+
v3_dir = self.root_dir / 'iterations' / 'v3'
|
|
1321
|
+
if v3_dir.exists():
|
|
1322
|
+
all_files.extend(v3_dir.rglob(f'*{ext}'))
|
|
1323
|
+
else:
|
|
1324
|
+
all_files.extend(self.root_dir.rglob(f'*{ext}'))
|
|
1325
|
+
|
|
1326
|
+
# Filter out ignored files
|
|
1327
|
+
non_ignored_files = [
|
|
1328
|
+
f for f in all_files if not self.should_ignore_file(f)]
|
|
1329
|
+
|
|
1330
|
+
print(f"Found {len(all_files)} total files")
|
|
1331
|
+
print(f"Found {len(non_ignored_files)} non-ignored files")
|
|
1332
|
+
|
|
1333
|
+
# Count by language
|
|
1334
|
+
language_counts = defaultdict(int)
|
|
1335
|
+
for file_path in non_ignored_files:
|
|
1336
|
+
language = self.detect_language(file_path)
|
|
1337
|
+
if language:
|
|
1338
|
+
language_counts[language] += 1
|
|
1339
|
+
|
|
1340
|
+
print("Files by language:")
|
|
1341
|
+
for lang, count in sorted(language_counts.items()):
|
|
1342
|
+
print(f" {lang}: {count} files")
|
|
1343
|
+
|
|
1344
|
+
# Reset pattern statistics for this run
|
|
1345
|
+
self.pattern_stats = defaultdict(int)
|
|
1346
|
+
|
|
1347
|
+
all_results = {
|
|
1348
|
+
'summary': {
|
|
1349
|
+
'total_files': len(all_files),
|
|
1350
|
+
'non_ignored_files': len(non_ignored_files),
|
|
1351
|
+
'ignored_files': len(all_files) - len(non_ignored_files),
|
|
1352
|
+
'language_counts': dict(language_counts),
|
|
1353
|
+
'files_with_hidden_todos': 0,
|
|
1354
|
+
'total_hidden_todos': 0,
|
|
1355
|
+
'high_confidence_todos': 0,
|
|
1356
|
+
'medium_confidence_todos': 0,
|
|
1357
|
+
'low_confidence_todos': 0,
|
|
1358
|
+
'code_stub_todos': 0,
|
|
1359
|
+
'pattern_counts': {},
|
|
1360
|
+
'min_confidence_threshold': min_confidence,
|
|
1361
|
+
},
|
|
1362
|
+
'files': {},
|
|
1363
|
+
'patterns': defaultdict(list)
|
|
1364
|
+
}
|
|
1365
|
+
|
|
1366
|
+
for file_path in non_ignored_files:
|
|
1367
|
+
print(f"Analyzing: {file_path.relative_to(self.root_dir)}")
|
|
1368
|
+
file_analysis = self.analyze_file(file_path)
|
|
1369
|
+
|
|
1370
|
+
if file_analysis and file_analysis['hidden_todos']:
|
|
1371
|
+
# Filter by confidence threshold
|
|
1372
|
+
filtered_todos = {}
|
|
1373
|
+
for line_num, data in file_analysis['hidden_todos'].items():
|
|
1374
|
+
if data['confidence_score'] >= min_confidence:
|
|
1375
|
+
filtered_todos[line_num] = data
|
|
1376
|
+
|
|
1377
|
+
# Count by confidence level
|
|
1378
|
+
if data['confidence_score'] >= 0.9:
|
|
1379
|
+
all_results['summary']['high_confidence_todos'] += 1
|
|
1380
|
+
elif data['confidence_score'] >= 0.6:
|
|
1381
|
+
all_results['summary']['medium_confidence_todos'] += 1
|
|
1382
|
+
else:
|
|
1383
|
+
all_results['summary']['low_confidence_todos'] += 1
|
|
1384
|
+
|
|
1385
|
+
if filtered_todos:
|
|
1386
|
+
file_analysis['hidden_todos'] = filtered_todos
|
|
1387
|
+
all_results['files'][file_analysis['file_path']] = file_analysis
|
|
1388
|
+
all_results['summary']['files_with_hidden_todos'] += 1
|
|
1389
|
+
all_results['summary']['total_hidden_todos'] += len(filtered_todos)
|
|
1390
|
+
|
|
1391
|
+
# Group by patterns
|
|
1392
|
+
for line_num, data in filtered_todos.items():
|
|
1393
|
+
for category, patterns in data['matches'].items():
|
|
1394
|
+
all_results['patterns'][category].append({
|
|
1395
|
+
'file': file_analysis['file_path'],
|
|
1396
|
+
'language': file_analysis['language'],
|
|
1397
|
+
'line': line_num,
|
|
1398
|
+
'comment': data['comment'],
|
|
1399
|
+
'patterns': patterns,
|
|
1400
|
+
'confidence_score': data['confidence_score'],
|
|
1401
|
+
'context_score': data['context_score']
|
|
1402
|
+
})
|
|
1403
|
+
if category == 'code_stubs':
|
|
1404
|
+
all_results['summary']['code_stub_todos'] += 1
|
|
1405
|
+
|
|
1406
|
+
all_results['summary']['pattern_counts'] = dict(self.pattern_stats)
|
|
1407
|
+
|
|
1408
|
+
return all_results
|
|
1409
|
+
|
|
1410
|
+
def analyze_files(self, file_paths: List[str], min_confidence: float = 0.7) -> Dict:
|
|
1411
|
+
"""Analyze specific files for hidden TODO patterns."""
|
|
1412
|
+
print(f"Analyzing {len(file_paths)} specific files with improved patterns")
|
|
1413
|
+
print(f"Minimum confidence threshold: {min_confidence}")
|
|
1414
|
+
|
|
1415
|
+
# Convert string paths to Path objects and filter valid files
|
|
1416
|
+
valid_files = []
|
|
1417
|
+
for file_path in file_paths:
|
|
1418
|
+
path = Path(file_path)
|
|
1419
|
+
if path.exists() and path.is_file():
|
|
1420
|
+
valid_files.append(path)
|
|
1421
|
+
else:
|
|
1422
|
+
print(f"Warning: File not found or not accessible: {file_path}")
|
|
1423
|
+
|
|
1424
|
+
if not valid_files:
|
|
1425
|
+
print("No valid files to analyze")
|
|
1426
|
+
return {
|
|
1427
|
+
'summary': {
|
|
1428
|
+
'total_files': 0,
|
|
1429
|
+
'non_ignored_files': 0,
|
|
1430
|
+
'ignored_files': 0,
|
|
1431
|
+
'language_counts': {},
|
|
1432
|
+
'files_with_hidden_todos': 0,
|
|
1433
|
+
'total_hidden_todos': 0,
|
|
1434
|
+
'high_confidence_todos': 0,
|
|
1435
|
+
'medium_confidence_todos': 0,
|
|
1436
|
+
'low_confidence_todos': 0,
|
|
1437
|
+
'code_stub_todos': 0,
|
|
1438
|
+
'pattern_counts': {},
|
|
1439
|
+
'min_confidence_threshold': min_confidence,
|
|
1440
|
+
},
|
|
1441
|
+
'files': {},
|
|
1442
|
+
'patterns': defaultdict(list)
|
|
1443
|
+
}
|
|
1444
|
+
|
|
1445
|
+
# Reset pattern statistics for this run
|
|
1446
|
+
self.pattern_stats = defaultdict(int)
|
|
1447
|
+
|
|
1448
|
+
# Count languages
|
|
1449
|
+
language_counts = Counter()
|
|
1450
|
+
non_ignored_files = []
|
|
1451
|
+
|
|
1452
|
+
for file_path in valid_files:
|
|
1453
|
+
language = self.detect_language(file_path)
|
|
1454
|
+
if language:
|
|
1455
|
+
language_counts[language] += 1
|
|
1456
|
+
# Skip ignored files
|
|
1457
|
+
if not self.should_ignore_file(file_path):
|
|
1458
|
+
non_ignored_files.append(file_path)
|
|
1459
|
+
|
|
1460
|
+
all_results = {
|
|
1461
|
+
'summary': {
|
|
1462
|
+
'total_files': len(valid_files),
|
|
1463
|
+
'non_ignored_files': len(non_ignored_files),
|
|
1464
|
+
'ignored_files': len(valid_files) - len(non_ignored_files),
|
|
1465
|
+
'language_counts': dict(language_counts),
|
|
1466
|
+
'files_with_hidden_todos': 0,
|
|
1467
|
+
'total_hidden_todos': 0,
|
|
1468
|
+
'high_confidence_todos': 0,
|
|
1469
|
+
'medium_confidence_todos': 0,
|
|
1470
|
+
'low_confidence_todos': 0,
|
|
1471
|
+
'code_stub_todos': 0,
|
|
1472
|
+
'pattern_counts': {},
|
|
1473
|
+
'min_confidence_threshold': min_confidence,
|
|
1474
|
+
},
|
|
1475
|
+
'files': {},
|
|
1476
|
+
'patterns': defaultdict(list)
|
|
1477
|
+
}
|
|
1478
|
+
|
|
1479
|
+
for file_path in non_ignored_files:
|
|
1480
|
+
print(f"Analyzing: {file_path}")
|
|
1481
|
+
file_analysis = self.analyze_file(file_path)
|
|
1482
|
+
|
|
1483
|
+
if file_analysis and file_analysis['hidden_todos']:
|
|
1484
|
+
# Filter by confidence threshold
|
|
1485
|
+
filtered_todos = {}
|
|
1486
|
+
for line_num, data in file_analysis['hidden_todos'].items():
|
|
1487
|
+
if data['confidence_score'] >= min_confidence:
|
|
1488
|
+
filtered_todos[line_num] = data
|
|
1489
|
+
|
|
1490
|
+
# Count by confidence level
|
|
1491
|
+
if data['confidence_score'] >= 0.9:
|
|
1492
|
+
all_results['summary']['high_confidence_todos'] += 1
|
|
1493
|
+
elif data['confidence_score'] >= 0.6:
|
|
1494
|
+
all_results['summary']['medium_confidence_todos'] += 1
|
|
1495
|
+
else:
|
|
1496
|
+
all_results['summary']['low_confidence_todos'] += 1
|
|
1497
|
+
|
|
1498
|
+
# Count patterns
|
|
1499
|
+
for pattern in data.get('matches', []):
|
|
1500
|
+
self.pattern_stats[pattern] += 1
|
|
1501
|
+
|
|
1502
|
+
if filtered_todos:
|
|
1503
|
+
all_results['summary']['files_with_hidden_todos'] += 1
|
|
1504
|
+
all_results['summary']['total_hidden_todos'] += len(filtered_todos)
|
|
1505
|
+
all_results['files'][str(file_path)] = {
|
|
1506
|
+
'language': file_analysis['language'],
|
|
1507
|
+
'hidden_todos': filtered_todos,
|
|
1508
|
+
'total_lines': file_analysis['total_lines'],
|
|
1509
|
+
'comment_lines': file_analysis['comment_lines']
|
|
1510
|
+
}
|
|
1511
|
+
|
|
1512
|
+
# Add to patterns
|
|
1513
|
+
for line_num, data in filtered_todos.items():
|
|
1514
|
+
for pattern in data.get('matches', []):
|
|
1515
|
+
all_results['patterns'][pattern].append({
|
|
1516
|
+
'file': str(file_path),
|
|
1517
|
+
'language': file_analysis['language'],
|
|
1518
|
+
'line': line_num,
|
|
1519
|
+
'comment': data['comment'],
|
|
1520
|
+
'patterns': [pattern],
|
|
1521
|
+
'confidence_score': data['confidence_score'],
|
|
1522
|
+
'context_score': data.get('context_score', 0.0)
|
|
1523
|
+
})
|
|
1524
|
+
|
|
1525
|
+
# Finalize pattern counts
|
|
1526
|
+
all_results['summary']['pattern_counts'] = dict(self.pattern_stats)
|
|
1527
|
+
|
|
1528
|
+
return all_results
|
|
1529
|
+
|
|
1530
|
+
def analyze_staged_files_with_dependencies(self, min_confidence: float = 0.7,
|
|
1531
|
+
dependency_resolution: bool = True) -> Dict:
|
|
1532
|
+
"""
|
|
1533
|
+
Analyze staged files for hidden TODOs with dependency resolution.
|
|
1534
|
+
|
|
1535
|
+
This method:
|
|
1536
|
+
1. Gets staged files from git
|
|
1537
|
+
2. Analyzes them for hidden TODOs
|
|
1538
|
+
3. Resolves dependencies to determine if TODOs are blocking
|
|
1539
|
+
4. Returns results with dependency information
|
|
1540
|
+
"""
|
|
1541
|
+
import subprocess
|
|
1542
|
+
|
|
1543
|
+
try:
|
|
1544
|
+
# Get staged files
|
|
1545
|
+
result = subprocess.run(['git', 'diff', '--cached', '--name-only'],
|
|
1546
|
+
capture_output=True, text=True, check=True)
|
|
1547
|
+
staged_files = [f.strip() for f in result.stdout.split('\n') if f.strip()]
|
|
1548
|
+
|
|
1549
|
+
# Filter for supported file types
|
|
1550
|
+
supported_extensions = set()
|
|
1551
|
+
for lang_config in self.language_patterns.values():
|
|
1552
|
+
supported_extensions.update(lang_config['extensions'])
|
|
1553
|
+
|
|
1554
|
+
staged_files = [f for f in staged_files
|
|
1555
|
+
if any(f.endswith(ext) for ext in supported_extensions)]
|
|
1556
|
+
|
|
1557
|
+
if not staged_files:
|
|
1558
|
+
return {
|
|
1559
|
+
'summary': {
|
|
1560
|
+
'staged_files': 0,
|
|
1561
|
+
'analyzed_files': 0,
|
|
1562
|
+
'total_hidden_todos': 0,
|
|
1563
|
+
'blocking_todos': 0,
|
|
1564
|
+
'non_blocking_todos': 0,
|
|
1565
|
+
'dependency_resolution_enabled': dependency_resolution
|
|
1566
|
+
},
|
|
1567
|
+
'files': {},
|
|
1568
|
+
'dependencies': {},
|
|
1569
|
+
'blocking_analysis': {}
|
|
1570
|
+
}
|
|
1571
|
+
|
|
1572
|
+
print(f"Found {len(staged_files)} staged files to analyze")
|
|
1573
|
+
|
|
1574
|
+
# Analyze staged files
|
|
1575
|
+
analysis_results = self.analyze_files(staged_files, min_confidence)
|
|
1576
|
+
|
|
1577
|
+
# Add dependency resolution if enabled
|
|
1578
|
+
if dependency_resolution:
|
|
1579
|
+
dependency_info = self._resolve_todo_dependencies(analysis_results)
|
|
1580
|
+
analysis_results['dependencies'] = dependency_info
|
|
1581
|
+
analysis_results['blocking_analysis'] = self._analyze_blocking_todos(
|
|
1582
|
+
analysis_results, dependency_info)
|
|
1583
|
+
|
|
1584
|
+
# Update summary with staged file info
|
|
1585
|
+
analysis_results['summary']['staged_files'] = len(staged_files)
|
|
1586
|
+
analysis_results['summary']['analyzed_files'] = len(staged_files)
|
|
1587
|
+
analysis_results['summary']['dependency_resolution_enabled'] = dependency_resolution
|
|
1588
|
+
|
|
1589
|
+
return analysis_results
|
|
1590
|
+
|
|
1591
|
+
except subprocess.CalledProcessError as e:
|
|
1592
|
+
print(f"Error getting staged files: {e}")
|
|
1593
|
+
return {
|
|
1594
|
+
'summary': {
|
|
1595
|
+
'staged_files': 0,
|
|
1596
|
+
'analyzed_files': 0,
|
|
1597
|
+
'total_hidden_todos': 0,
|
|
1598
|
+
'blocking_todos': 0,
|
|
1599
|
+
'non_blocking_todos': 0,
|
|
1600
|
+
'dependency_resolution_enabled': False,
|
|
1601
|
+
'error': str(e)
|
|
1602
|
+
},
|
|
1603
|
+
'files': {},
|
|
1604
|
+
'dependencies': {},
|
|
1605
|
+
'blocking_analysis': {}
|
|
1606
|
+
}
|
|
1607
|
+
|
|
1608
|
+
def _resolve_todo_dependencies(self, analysis_results: Dict) -> Dict:
|
|
1609
|
+
"""
|
|
1610
|
+
Resolve dependencies for TODOs found in staged files.
|
|
1611
|
+
|
|
1612
|
+
Returns dependency information for each TODO.
|
|
1613
|
+
"""
|
|
1614
|
+
dependencies = {}
|
|
1615
|
+
|
|
1616
|
+
for file_path, file_data in analysis_results['files'].items():
|
|
1617
|
+
file_deps = {}
|
|
1618
|
+
|
|
1619
|
+
for line_num, todo_data in file_data['hidden_todos'].items():
|
|
1620
|
+
todo_text = todo_data['comment']
|
|
1621
|
+
todo_deps = self._extract_dependencies_from_todo(todo_text, file_path)
|
|
1622
|
+
file_deps[line_num] = todo_deps
|
|
1623
|
+
|
|
1624
|
+
if file_deps:
|
|
1625
|
+
dependencies[file_path] = file_deps
|
|
1626
|
+
|
|
1627
|
+
return dependencies
|
|
1628
|
+
|
|
1629
|
+
def _extract_dependencies_from_todo(self, todo_text: str, file_path: str) -> Dict:
|
|
1630
|
+
"""
|
|
1631
|
+
Extract dependency information from TODO text using the engineering-grade template.
|
|
1632
|
+
|
|
1633
|
+
Looks for:
|
|
1634
|
+
- DEPENDENCIES section
|
|
1635
|
+
- BLOCKING status
|
|
1636
|
+
- CAWS Tier
|
|
1637
|
+
- Required/Optional dependencies
|
|
1638
|
+
"""
|
|
1639
|
+
dependencies = {
|
|
1640
|
+
'blocking': False,
|
|
1641
|
+
'caws_tier': None,
|
|
1642
|
+
'required_deps': [],
|
|
1643
|
+
'optional_deps': [],
|
|
1644
|
+
'estimated_effort': None,
|
|
1645
|
+
'priority': 'Medium'
|
|
1646
|
+
}
|
|
1647
|
+
|
|
1648
|
+
# Look for DEPENDENCIES section
|
|
1649
|
+
deps_match = re.search(r'DEPENDENCIES:\s*\n((?:- .*\n?)*)', todo_text, re.MULTILINE)
|
|
1650
|
+
if deps_match:
|
|
1651
|
+
deps_text = deps_match.group(1)
|
|
1652
|
+
for line in deps_text.split('\n'):
|
|
1653
|
+
line = line.strip()
|
|
1654
|
+
if line.startswith('- '):
|
|
1655
|
+
dep_text = line[2:].strip()
|
|
1656
|
+
if '(Required)' in dep_text:
|
|
1657
|
+
dependencies['required_deps'].append(dep_text.replace('(Required)', '').strip())
|
|
1658
|
+
elif '(Optional)' in dep_text:
|
|
1659
|
+
dependencies['optional_deps'].append(dep_text.replace('(Optional)', '').strip())
|
|
1660
|
+
else:
|
|
1661
|
+
# Default to required if not specified
|
|
1662
|
+
dependencies['required_deps'].append(dep_text)
|
|
1663
|
+
|
|
1664
|
+
# Look for BLOCKING status
|
|
1665
|
+
blocking_match = re.search(r'BLOCKING:\s*{Yes|No}', todo_text)
|
|
1666
|
+
if blocking_match:
|
|
1667
|
+
dependencies['blocking'] = 'Yes' in blocking_match.group(0)
|
|
1668
|
+
|
|
1669
|
+
# Look for CAWS Tier
|
|
1670
|
+
tier_match = re.search(r'CAWS Tier:\s*(\d+)', todo_text)
|
|
1671
|
+
if tier_match:
|
|
1672
|
+
dependencies['caws_tier'] = int(tier_match.group(1))
|
|
1673
|
+
|
|
1674
|
+
# Look for PRIORITY
|
|
1675
|
+
priority_match = re.search(r'PRIORITY:\s*{Critical|High|Medium|Low}', todo_text)
|
|
1676
|
+
if priority_match:
|
|
1677
|
+
dependencies['priority'] = priority_match.group(0).split(':')[1].strip()
|
|
1678
|
+
|
|
1679
|
+
# Look for ESTIMATED EFFORT
|
|
1680
|
+
effort_match = re.search(r'ESTIMATED EFFORT:\s*([^\\n]+)', todo_text)
|
|
1681
|
+
if effort_match:
|
|
1682
|
+
dependencies['estimated_effort'] = effort_match.group(1).strip()
|
|
1683
|
+
|
|
1684
|
+
return dependencies
|
|
1685
|
+
|
|
1686
|
+
def _analyze_blocking_todos(self, analysis_results: Dict, dependency_info: Dict) -> Dict:
|
|
1687
|
+
"""
|
|
1688
|
+
Analyze which TODOs are blocking based on dependency resolution.
|
|
1689
|
+
"""
|
|
1690
|
+
blocking_analysis = {
|
|
1691
|
+
'blocking_todos': [],
|
|
1692
|
+
'non_blocking_todos': [],
|
|
1693
|
+
'critical_blockers': [],
|
|
1694
|
+
'dependency_summary': {
|
|
1695
|
+
'total_required_deps': 0,
|
|
1696
|
+
'resolved_deps': 0,
|
|
1697
|
+
'unresolved_deps': 0
|
|
1698
|
+
}
|
|
1699
|
+
}
|
|
1700
|
+
|
|
1701
|
+
for file_path, file_data in analysis_results['files'].items():
|
|
1702
|
+
file_deps = dependency_info.get(file_path, {})
|
|
1703
|
+
|
|
1704
|
+
for line_num, todo_data in file_data['hidden_todos'].items():
|
|
1705
|
+
todo_deps = file_deps.get(line_num, {})
|
|
1706
|
+
|
|
1707
|
+
# Determine if TODO is blocking
|
|
1708
|
+
is_blocking = self._is_todo_blocking(todo_deps, file_path)
|
|
1709
|
+
|
|
1710
|
+
todo_info = {
|
|
1711
|
+
'file': file_path,
|
|
1712
|
+
'line': line_num,
|
|
1713
|
+
'text': todo_data['comment'],
|
|
1714
|
+
'confidence': todo_data['confidence_score'],
|
|
1715
|
+
'dependencies': todo_deps,
|
|
1716
|
+
'blocking': is_blocking
|
|
1717
|
+
}
|
|
1718
|
+
|
|
1719
|
+
if is_blocking:
|
|
1720
|
+
blocking_analysis['blocking_todos'].append(todo_info)
|
|
1721
|
+
|
|
1722
|
+
# Check if it's critical
|
|
1723
|
+
if todo_deps.get('priority') == 'Critical' or todo_deps.get('caws_tier') == 1:
|
|
1724
|
+
blocking_analysis['critical_blockers'].append(todo_info)
|
|
1725
|
+
else:
|
|
1726
|
+
blocking_analysis['non_blocking_todos'].append(todo_info)
|
|
1727
|
+
|
|
1728
|
+
# Update dependency summary
|
|
1729
|
+
required_deps = todo_deps.get('required_deps', [])
|
|
1730
|
+
blocking_analysis['dependency_summary']['total_required_deps'] += len(required_deps)
|
|
1731
|
+
# For now, assume all dependencies are unresolved (would need actual resolution logic)
|
|
1732
|
+
blocking_analysis['dependency_summary']['unresolved_deps'] += len(required_deps)
|
|
1733
|
+
|
|
1734
|
+
return blocking_analysis
|
|
1735
|
+
|
|
1736
|
+
def _is_todo_blocking(self, todo_deps: Dict, file_path: str) -> bool:
|
|
1737
|
+
"""
|
|
1738
|
+
Determine if a TODO is blocking based on its dependencies and context.
|
|
1739
|
+
"""
|
|
1740
|
+
# Explicit blocking flag
|
|
1741
|
+
if todo_deps.get('blocking', False):
|
|
1742
|
+
return True
|
|
1743
|
+
|
|
1744
|
+
# High priority or critical tier
|
|
1745
|
+
if todo_deps.get('priority') in ['Critical', 'High']:
|
|
1746
|
+
return True
|
|
1747
|
+
|
|
1748
|
+
if todo_deps.get('caws_tier') == 1:
|
|
1749
|
+
return True
|
|
1750
|
+
|
|
1751
|
+
# Has required dependencies (simplified check)
|
|
1752
|
+
if todo_deps.get('required_deps'):
|
|
1753
|
+
return True
|
|
1754
|
+
|
|
1755
|
+
return False
|
|
1756
|
+
|
|
1757
|
+
def generate_report(self, results: Dict) -> str:
|
|
1758
|
+
"""Generate a comprehensive report with enhanced accuracy information."""
|
|
1759
|
+
report = []
|
|
1760
|
+
report.append("# Improved Hidden TODO Analysis Report (v2.0)")
|
|
1761
|
+
report.append("=" * 60)
|
|
1762
|
+
report.append("")
|
|
1763
|
+
|
|
1764
|
+
# Summary
|
|
1765
|
+
summary = results['summary']
|
|
1766
|
+
report.append("## Summary")
|
|
1767
|
+
report.append(f"- Total files: {summary['total_files']}")
|
|
1768
|
+
report.append(f"- Non-ignored files: {summary['non_ignored_files']}")
|
|
1769
|
+
report.append(f"- Ignored files: {summary['ignored_files']}")
|
|
1770
|
+
report.append(f"- Files with hidden TODOs: {summary['files_with_hidden_todos']}")
|
|
1771
|
+
report.append(f"- Total hidden TODOs found: {summary['total_hidden_todos']}")
|
|
1772
|
+
report.append(f"- Code stub detections: {summary.get('code_stub_todos', 0)}")
|
|
1773
|
+
report.append(f"- High confidence TODOs (≥0.9): {summary['high_confidence_todos']}")
|
|
1774
|
+
report.append(f"- Medium confidence TODOs (≥0.6): {summary['medium_confidence_todos']}")
|
|
1775
|
+
report.append(f"- Low confidence TODOs (<0.6): {summary['low_confidence_todos']}")
|
|
1776
|
+
report.append(f"- Minimum confidence threshold: {summary['min_confidence_threshold']}")
|
|
1777
|
+
report.append("")
|
|
1778
|
+
|
|
1779
|
+
# Language breakdown
|
|
1780
|
+
report.append("## Files by Language")
|
|
1781
|
+
for lang, count in sorted(summary['language_counts'].items()):
|
|
1782
|
+
report.append(f"- **{lang}**: {count} files")
|
|
1783
|
+
report.append("")
|
|
1784
|
+
|
|
1785
|
+
# Pattern statistics
|
|
1786
|
+
if summary['pattern_counts']:
|
|
1787
|
+
report.append("## Pattern Statistics")
|
|
1788
|
+
for pattern, count in sorted(summary['pattern_counts'].items(), key=lambda x: x[1], reverse=True):
|
|
1789
|
+
if count > 0:
|
|
1790
|
+
report.append(f"- `{pattern}`: {count} occurrences")
|
|
1791
|
+
report.append("")
|
|
1792
|
+
|
|
1793
|
+
# Files with most high-confidence hidden TODOs
|
|
1794
|
+
if results['files']:
|
|
1795
|
+
report.append("## Files with High-Confidence Hidden TODOs")
|
|
1796
|
+
file_todo_counts = []
|
|
1797
|
+
for file_path, data in results['files'].items():
|
|
1798
|
+
high_conf_count = sum(1 for todo in data['hidden_todos'].values()
|
|
1799
|
+
if todo['confidence_score'] >= 0.9)
|
|
1800
|
+
if high_conf_count > 0:
|
|
1801
|
+
file_todo_counts.append((file_path, data['language'], high_conf_count))
|
|
1802
|
+
|
|
1803
|
+
file_todo_counts.sort(key=lambda x: x[2], reverse=True)
|
|
1804
|
+
for file_path, language, count in file_todo_counts:
|
|
1805
|
+
report.append(f"- `{file_path}` ({language}): {count} high-confidence TODOs")
|
|
1806
|
+
report.append("")
|
|
1807
|
+
|
|
1808
|
+
# Engineering-grade TODO suggestions
|
|
1809
|
+
engineering_suggestions = []
|
|
1810
|
+
for file_path, file_data in results['files'].items():
|
|
1811
|
+
for line_num, todo_data in file_data['hidden_todos'].items():
|
|
1812
|
+
if 'engineering_suggestions' in todo_data:
|
|
1813
|
+
suggestions = todo_data['engineering_suggestions']
|
|
1814
|
+
if suggestions.get('needs_engineering_format'):
|
|
1815
|
+
engineering_suggestions.append({
|
|
1816
|
+
'file': file_path,
|
|
1817
|
+
'line': line_num,
|
|
1818
|
+
'language': file_data['language'],
|
|
1819
|
+
'original_comment': todo_data['comment'],
|
|
1820
|
+
'suggestions': suggestions
|
|
1821
|
+
})
|
|
1822
|
+
|
|
1823
|
+
if engineering_suggestions:
|
|
1824
|
+
report.append("## Engineering-Grade TODO Suggestions")
|
|
1825
|
+
report.append("")
|
|
1826
|
+
report.append("The following TODOs should be upgraded to the engineering-grade format:")
|
|
1827
|
+
report.append("")
|
|
1828
|
+
|
|
1829
|
+
for suggestion in engineering_suggestions[:10]: # Limit to top 10
|
|
1830
|
+
report.append(f"### `{suggestion['file']}:{suggestion['line']}` ({suggestion['language']})")
|
|
1831
|
+
report.append(f"**Original:** {suggestion['original_comment'][:100]}...")
|
|
1832
|
+
report.append(f"**Suggested Tier:** {suggestion['suggestions']['suggested_tier']}")
|
|
1833
|
+
report.append(f"**Priority:** {suggestion['suggestions']['priority_level']}")
|
|
1834
|
+
report.append(f"**Missing Elements:** {', '.join(suggestion['suggestions']['missing_elements'])}")
|
|
1835
|
+
report.append("")
|
|
1836
|
+
report.append("**Suggested Template:**")
|
|
1837
|
+
report.append("```")
|
|
1838
|
+
report.append(suggestion['suggestions']['template_suggestion'])
|
|
1839
|
+
report.append("```")
|
|
1840
|
+
report.append("")
|
|
1841
|
+
|
|
1842
|
+
if len(engineering_suggestions) > 10:
|
|
1843
|
+
report.append(f"... and {len(engineering_suggestions) - 10} more TODOs need engineering-grade format")
|
|
1844
|
+
report.append("")
|
|
1845
|
+
|
|
1846
|
+
# Pattern categories with confidence scores
|
|
1847
|
+
if results['patterns']:
|
|
1848
|
+
report.append("## Pattern Categories by Confidence")
|
|
1849
|
+
for category, items in results['patterns'].items():
|
|
1850
|
+
if items:
|
|
1851
|
+
high_conf_items = [item for item in items if 'confidence_score' in item and item['confidence_score'] >= 0.9]
|
|
1852
|
+
medium_conf_items = [item for item in items if 'confidence_score' in item and 0.6 <= item['confidence_score'] < 0.9]
|
|
1853
|
+
low_conf_items = [item for item in items if 'confidence_score' in item and item['confidence_score'] < 0.6]
|
|
1854
|
+
|
|
1855
|
+
if high_conf_items or medium_conf_items:
|
|
1856
|
+
report.append(f"### {category.replace('_', ' ').title()} ({len(items)} items)")
|
|
1857
|
+
|
|
1858
|
+
if high_conf_items:
|
|
1859
|
+
report.append(f"#### High Confidence ({len(high_conf_items)} items)")
|
|
1860
|
+
for item in high_conf_items[:3]:
|
|
1861
|
+
context_info = f" (context: {item['context_score']:.1f})" if 'context_score' in item else ""
|
|
1862
|
+
report.append(f"- `{item['file']}:{item['line']}` ({item['language']}, conf: {item['confidence_score']:.1f}{context_info}): {item['comment'][:80]}...")
|
|
1863
|
+
if len(high_conf_items) > 3:
|
|
1864
|
+
report.append(f"- ... and {len(high_conf_items) - 3} more high-confidence items")
|
|
1865
|
+
|
|
1866
|
+
if medium_conf_items:
|
|
1867
|
+
report.append(f"#### Medium Confidence ({len(medium_conf_items)} items)")
|
|
1868
|
+
for item in medium_conf_items[:2]:
|
|
1869
|
+
context_info = f" (context: {item['context_score']:.1f})" if 'context_score' in item else ""
|
|
1870
|
+
report.append(f"- `{item['file']}:{item['line']}` ({item['language']}, conf: {item['confidence_score']:.1f}{context_info}): {item['comment'][:80]}...")
|
|
1871
|
+
if len(medium_conf_items) > 2:
|
|
1872
|
+
report.append(f"- ... and {len(medium_conf_items) - 2} more medium-confidence items")
|
|
1873
|
+
|
|
1874
|
+
if low_conf_items:
|
|
1875
|
+
report.append(f"#### Low Confidence ({len(low_conf_items)} items) - *Consider reviewing for false positives*")
|
|
1876
|
+
|
|
1877
|
+
report.append("")
|
|
1878
|
+
|
|
1879
|
+
return "\n".join(report)
|
|
1880
|
+
|
|
1881
|
+
|
|
1882
|
+
HiddenTodoAnalyzer = HiddenTodoAnalyzer
|
|
1883
|
+
|
|
1884
|
+
|
|
1885
|
+
def main():
|
|
1886
|
+
import argparse
|
|
1887
|
+
|
|
1888
|
+
parser = argparse.ArgumentParser(
|
|
1889
|
+
description='Analyze files for hidden TODO patterns with improved accuracy')
|
|
1890
|
+
parser.add_argument('--root', default='.',
|
|
1891
|
+
help='Root directory to analyze (default: current directory)')
|
|
1892
|
+
parser.add_argument('--files', nargs='+',
|
|
1893
|
+
help='Specific files to analyze (instead of scanning directory)')
|
|
1894
|
+
parser.add_argument('--languages', nargs='+',
|
|
1895
|
+
help='Specific languages to analyze (e.g., rust python javascript)')
|
|
1896
|
+
parser.add_argument(
|
|
1897
|
+
'--output-json', help='Output JSON file for detailed results')
|
|
1898
|
+
parser.add_argument('--output-md', help='Output Markdown report file')
|
|
1899
|
+
parser.add_argument('--min-confidence', type=float, default=0.7,
|
|
1900
|
+
help='Minimum confidence threshold (0.0-1.0, default: 0.7)')
|
|
1901
|
+
parser.add_argument('--verbose', '-v',
|
|
1902
|
+
action='store_true', help='Verbose output')
|
|
1903
|
+
parser.add_argument('--disable-code-stub-scan',
|
|
1904
|
+
action='store_true', help='Disable code stub detection heuristics')
|
|
1905
|
+
parser.add_argument('--ci-mode',
|
|
1906
|
+
action='store_true', help='CI mode - exit with error code if hidden TODOs found')
|
|
1907
|
+
parser.add_argument('--warn-only',
|
|
1908
|
+
action='store_true', help='Warning mode - only warn, never fail')
|
|
1909
|
+
parser.add_argument('--v3-only',
|
|
1910
|
+
action='store_true', help='Only analyze v3 folder (matches user search scope)')
|
|
1911
|
+
parser.add_argument('--staged-only',
|
|
1912
|
+
action='store_true', help='Only analyze staged files with dependency resolution')
|
|
1913
|
+
parser.add_argument('--disable-dependency-resolution',
|
|
1914
|
+
action='store_true', help='Disable dependency resolution for staged files')
|
|
1915
|
+
parser.add_argument('--engineering-suggestions',
|
|
1916
|
+
action='store_true', help='Include engineering-grade TODO format suggestions')
|
|
1917
|
+
|
|
1918
|
+
args = parser.parse_args()
|
|
1919
|
+
|
|
1920
|
+
analyzer = HiddenTodoAnalyzer(
|
|
1921
|
+
args.root,
|
|
1922
|
+
enable_code_stub_scan=not args.disable_code_stub_scan,
|
|
1923
|
+
)
|
|
1924
|
+
|
|
1925
|
+
# Analyze staged files with dependency resolution
|
|
1926
|
+
if args.staged_only:
|
|
1927
|
+
results = analyzer.analyze_staged_files_with_dependencies(
|
|
1928
|
+
args.min_confidence,
|
|
1929
|
+
dependency_resolution=not args.disable_dependency_resolution
|
|
1930
|
+
)
|
|
1931
|
+
# Analyze specific files or entire directory
|
|
1932
|
+
elif args.files:
|
|
1933
|
+
results = analyzer.analyze_files(args.files, args.min_confidence)
|
|
1934
|
+
else:
|
|
1935
|
+
results = analyzer.analyze_directory(args.languages, args.min_confidence, args.v3_only)
|
|
1936
|
+
|
|
1937
|
+
# Print summary
|
|
1938
|
+
summary = results['summary']
|
|
1939
|
+
print(f"\n{'='*60}")
|
|
1940
|
+
print("IMPROVED HIDDEN TODO ANALYSIS COMPLETE (v2.0)")
|
|
1941
|
+
print(f"{'='*60}")
|
|
1942
|
+
print(f"Total files: {summary['total_files']}")
|
|
1943
|
+
print(f"Non-ignored files: {summary['non_ignored_files']}")
|
|
1944
|
+
print(f"Ignored files: {summary['ignored_files']}")
|
|
1945
|
+
print(f"Files with hidden TODOs: {summary['files_with_hidden_todos']}")
|
|
1946
|
+
print(f"Total hidden TODOs: {summary['total_hidden_todos']}")
|
|
1947
|
+
print(f"High confidence (≥0.9): {summary['high_confidence_todos']}")
|
|
1948
|
+
print(f"Medium confidence (≥0.6): {summary['medium_confidence_todos']}")
|
|
1949
|
+
print(f"Low confidence (<0.6): {summary['low_confidence_todos']}")
|
|
1950
|
+
print(f"Confidence threshold: {summary['min_confidence_threshold']}")
|
|
1951
|
+
|
|
1952
|
+
print(f"\nFiles by language:")
|
|
1953
|
+
for lang, count in sorted(summary['language_counts'].items()):
|
|
1954
|
+
print(f" {lang}: {count} files")
|
|
1955
|
+
|
|
1956
|
+
if summary['pattern_counts']:
|
|
1957
|
+
print(f"\nTop patterns found:")
|
|
1958
|
+
for pattern, count in sorted(summary['pattern_counts'].items(), key=lambda x: x[1], reverse=True)[:15]:
|
|
1959
|
+
if count > 0:
|
|
1960
|
+
print(f" {pattern}: {count}")
|
|
1961
|
+
|
|
1962
|
+
# Save reports
|
|
1963
|
+
if args.output_json:
|
|
1964
|
+
with open(args.output_json, 'w') as f:
|
|
1965
|
+
json.dump(results, f, indent=2)
|
|
1966
|
+
print(f"\nDetailed results saved to: {args.output_json}")
|
|
1967
|
+
|
|
1968
|
+
if args.output_md:
|
|
1969
|
+
report = analyzer.generate_report(results)
|
|
1970
|
+
with open(args.output_md, 'w') as f:
|
|
1971
|
+
f.write(report)
|
|
1972
|
+
print(f"Report saved to: {args.output_md}")
|
|
1973
|
+
else:
|
|
1974
|
+
# Print report to console
|
|
1975
|
+
try:
|
|
1976
|
+
print("\n" + analyzer.generate_report(results))
|
|
1977
|
+
except Exception as e:
|
|
1978
|
+
print(f"⚠️ Could not generate report: {e}")
|
|
1979
|
+
|
|
1980
|
+
# Handle CI mode and warn-only mode
|
|
1981
|
+
summary = results['summary']
|
|
1982
|
+
total_hidden_todos = summary['total_hidden_todos']
|
|
1983
|
+
|
|
1984
|
+
if total_hidden_todos > 0:
|
|
1985
|
+
if args.ci_mode:
|
|
1986
|
+
print(f"\n❌ CI MODE: Found {total_hidden_todos} hidden TODOs - blocking commit/push")
|
|
1987
|
+
exit(1)
|
|
1988
|
+
elif args.warn_only:
|
|
1989
|
+
print(f"\n⚠️ WARN MODE: Found {total_hidden_todos} hidden TODOs - proceeding anyway")
|
|
1990
|
+
else:
|
|
1991
|
+
print(f"\n⚠️ Found {total_hidden_todos} hidden TODOs - consider addressing them")
|
|
1992
|
+
else:
|
|
1993
|
+
print(f"\n✅ No hidden TODOs found - good job!")
|
|
1994
|
+
|
|
1995
|
+
|
|
1996
|
+
if __name__ == '__main__':
|
|
1997
|
+
main()
|