@voodocs/cli 2.5.0 → 2.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +26 -0
- package/lib/cli/__init__.py +7 -1
- package/lib/cli/analyze.py +156 -98
- package/lib/cli/convert.py +131 -0
- package/lib/darkarts/priority_analyzer/__init__.py +0 -0
- package/lib/darkarts/priority_analyzer/analyzer.py +301 -0
- package/lib/darkarts/priority_analyzer/complexity.py +271 -0
- package/lib/darkarts/priority_analyzer/dependencies.py +275 -0
- package/lib/darkarts/priority_analyzer/security.py +200 -0
- package/lib/darkarts/voodocs_lite_dict.py +216 -0
- package/lib/darkarts/voodocs_lite_dict_v2.py +198 -0
- package/lib/darkarts/voodocs_lite_parser.py +343 -0
- package/package.json +5 -1
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Main Priority Analyzer for VooDocs
|
|
3
|
+
|
|
4
|
+
Combines complexity, security, and dependency analysis to prioritize files.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
import re
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import List, Dict, Optional
|
|
12
|
+
|
|
13
|
+
from .complexity import ComplexityAnalyzer
|
|
14
|
+
from .security import SecurityAnalyzer
|
|
15
|
+
from .dependencies import DependencyAnalyzer
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class FileScore:
|
|
20
|
+
"""Score and analysis for a single file."""
|
|
21
|
+
filepath: str
|
|
22
|
+
priority_score: float
|
|
23
|
+
complexity_score: int
|
|
24
|
+
security_score: int
|
|
25
|
+
dependency_score: int
|
|
26
|
+
annotation_penalty: int
|
|
27
|
+
priority_level: str
|
|
28
|
+
reasons: List[str]
|
|
29
|
+
suggestions: List[str]
|
|
30
|
+
|
|
31
|
+
# Detailed metrics
|
|
32
|
+
loc: int = 0
|
|
33
|
+
cyclomatic: int = 0
|
|
34
|
+
functions: int = 0
|
|
35
|
+
security_keywords: List[str] = None
|
|
36
|
+
import_count: int = 0
|
|
37
|
+
dependent_count: int = 0
|
|
38
|
+
annotation_coverage: float = 0.0
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class PriorityAnalyzer:
|
|
42
|
+
"""Main analyzer that combines all scoring components."""
|
|
43
|
+
|
|
44
|
+
# Scoring weights
|
|
45
|
+
COMPLEXITY_WEIGHT = 0.30
|
|
46
|
+
SECURITY_WEIGHT = 0.40
|
|
47
|
+
DEPENDENCY_WEIGHT = 0.20
|
|
48
|
+
ANNOTATION_WEIGHT = 0.10
|
|
49
|
+
|
|
50
|
+
# Priority level thresholds
|
|
51
|
+
PRIORITY_THRESHOLDS = {
|
|
52
|
+
'CRITICAL': 80,
|
|
53
|
+
'HIGH': 60,
|
|
54
|
+
'MEDIUM': 40,
|
|
55
|
+
'LOW': 20,
|
|
56
|
+
'MINIMAL': 0
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
# Supported file extensions
|
|
60
|
+
SUPPORTED_EXTENSIONS = {'.py', '.ts', '.tsx', '.js', '.jsx', '.sol'}
|
|
61
|
+
|
|
62
|
+
def __init__(self, project_root: str = None):
|
|
63
|
+
"""
|
|
64
|
+
Initialize priority analyzer.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
project_root: Root directory of the project
|
|
68
|
+
"""
|
|
69
|
+
self.project_root = project_root
|
|
70
|
+
self.complexity_analyzer = ComplexityAnalyzer()
|
|
71
|
+
self.security_analyzer = SecurityAnalyzer()
|
|
72
|
+
self.dependency_analyzer = DependencyAnalyzer(project_root)
|
|
73
|
+
|
|
74
|
+
def analyze_file(self, filepath: str, all_files: List[str] = None) -> FileScore:
|
|
75
|
+
"""
|
|
76
|
+
Analyze a single file and calculate priority score.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
filepath: Path to file to analyze
|
|
80
|
+
all_files: List of all files (for dependency analysis)
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
FileScore object with complete analysis
|
|
84
|
+
"""
|
|
85
|
+
# Run individual analyzers
|
|
86
|
+
complexity = self.complexity_analyzer.analyze_file(filepath)
|
|
87
|
+
security = self.security_analyzer.analyze_file(filepath)
|
|
88
|
+
dependencies = self.dependency_analyzer.analyze_file(filepath, all_files)
|
|
89
|
+
|
|
90
|
+
# Check annotation coverage
|
|
91
|
+
annotation_coverage, annotation_penalty = self._check_annotation_coverage(filepath)
|
|
92
|
+
|
|
93
|
+
# Calculate weighted priority score
|
|
94
|
+
priority_score = (
|
|
95
|
+
complexity['total_score'] * self.COMPLEXITY_WEIGHT +
|
|
96
|
+
security['total_score'] * self.SECURITY_WEIGHT +
|
|
97
|
+
dependencies['total_score'] * self.DEPENDENCY_WEIGHT +
|
|
98
|
+
annotation_penalty * self.ANNOTATION_WEIGHT
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Determine priority level
|
|
102
|
+
priority_level = self._get_priority_level(priority_score)
|
|
103
|
+
|
|
104
|
+
# Generate reasons and suggestions
|
|
105
|
+
reasons = self._generate_reasons(complexity, security, dependencies, annotation_coverage)
|
|
106
|
+
suggestions = self._generate_suggestions(complexity, security, dependencies, annotation_coverage)
|
|
107
|
+
|
|
108
|
+
# Combine all security keywords
|
|
109
|
+
all_keywords = (
|
|
110
|
+
security.get('critical_keywords', []) +
|
|
111
|
+
security.get('high_keywords', []) +
|
|
112
|
+
security.get('medium_keywords', [])
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
return FileScore(
|
|
116
|
+
filepath=filepath,
|
|
117
|
+
priority_score=round(priority_score, 1),
|
|
118
|
+
complexity_score=complexity['total_score'],
|
|
119
|
+
security_score=security['total_score'],
|
|
120
|
+
dependency_score=dependencies['total_score'],
|
|
121
|
+
annotation_penalty=annotation_penalty,
|
|
122
|
+
priority_level=priority_level,
|
|
123
|
+
reasons=reasons,
|
|
124
|
+
suggestions=suggestions,
|
|
125
|
+
loc=complexity.get('loc', 0),
|
|
126
|
+
cyclomatic=complexity.get('cyclomatic', 0),
|
|
127
|
+
functions=complexity.get('functions', 0),
|
|
128
|
+
security_keywords=all_keywords[:10], # Top 10
|
|
129
|
+
import_count=dependencies.get('import_count', 0),
|
|
130
|
+
dependent_count=dependencies.get('dependent_count', 0),
|
|
131
|
+
annotation_coverage=annotation_coverage
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
def analyze_directory(self, dirpath: str, recursive: bool = True,
|
|
135
|
+
exclude_patterns: List[str] = None) -> List[FileScore]:
|
|
136
|
+
"""
|
|
137
|
+
Analyze all files in a directory.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
dirpath: Directory path to analyze
|
|
141
|
+
recursive: Whether to scan subdirectories
|
|
142
|
+
exclude_patterns: Patterns to exclude (e.g., ['node_modules', 'dist'])
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
List of FileScore objects, sorted by priority (highest first)
|
|
146
|
+
"""
|
|
147
|
+
if exclude_patterns is None:
|
|
148
|
+
exclude_patterns = ['node_modules', 'dist', 'build', '.git', '__pycache__']
|
|
149
|
+
|
|
150
|
+
# Find all files
|
|
151
|
+
all_files = self._find_files(dirpath, recursive, exclude_patterns)
|
|
152
|
+
|
|
153
|
+
# Analyze each file
|
|
154
|
+
scores = []
|
|
155
|
+
for filepath in all_files:
|
|
156
|
+
try:
|
|
157
|
+
score = self.analyze_file(filepath, all_files)
|
|
158
|
+
scores.append(score)
|
|
159
|
+
except Exception as e:
|
|
160
|
+
print(f"Warning: Failed to analyze {filepath}: {e}")
|
|
161
|
+
|
|
162
|
+
# Sort by priority score (highest first)
|
|
163
|
+
scores.sort(key=lambda x: x.priority_score, reverse=True)
|
|
164
|
+
|
|
165
|
+
return scores
|
|
166
|
+
|
|
167
|
+
def _find_files(self, dirpath: str, recursive: bool, exclude_patterns: List[str]) -> List[str]:
|
|
168
|
+
"""Find all supported files in directory."""
|
|
169
|
+
files = []
|
|
170
|
+
|
|
171
|
+
if recursive:
|
|
172
|
+
for root, dirs, filenames in os.walk(dirpath):
|
|
173
|
+
# Filter out excluded directories
|
|
174
|
+
dirs[:] = [d for d in dirs if not any(pattern in d for pattern in exclude_patterns)]
|
|
175
|
+
|
|
176
|
+
for filename in filenames:
|
|
177
|
+
filepath = os.path.join(root, filename)
|
|
178
|
+
if Path(filepath).suffix in self.SUPPORTED_EXTENSIONS:
|
|
179
|
+
files.append(filepath)
|
|
180
|
+
else:
|
|
181
|
+
for filename in os.listdir(dirpath):
|
|
182
|
+
filepath = os.path.join(dirpath, filename)
|
|
183
|
+
if os.path.isfile(filepath) and Path(filepath).suffix in self.SUPPORTED_EXTENSIONS:
|
|
184
|
+
files.append(filepath)
|
|
185
|
+
|
|
186
|
+
return files
|
|
187
|
+
|
|
188
|
+
def _check_annotation_coverage(self, filepath: str) -> tuple:
|
|
189
|
+
"""
|
|
190
|
+
Check VooDocs annotation coverage in file.
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
Tuple of (coverage_percentage, penalty_score)
|
|
194
|
+
"""
|
|
195
|
+
try:
|
|
196
|
+
with open(filepath, 'r', encoding='utf-8') as f:
|
|
197
|
+
content = f.read()
|
|
198
|
+
except:
|
|
199
|
+
return (0.0, -50)
|
|
200
|
+
|
|
201
|
+
# Count @darkarts annotations
|
|
202
|
+
darkarts_count = len(re.findall(r'@darkarts(-lite)?', content))
|
|
203
|
+
|
|
204
|
+
# Count total functions (rough estimate)
|
|
205
|
+
function_patterns = [
|
|
206
|
+
r'^\s*def\s+\w+\s*\(', # Python
|
|
207
|
+
r'^\s*(export\s+)?(async\s+)?function\s+\w+\s*\(', # JS/TS function
|
|
208
|
+
r'^\s*(const|let|var)\s+\w+\s*=\s*(\([^)]*\)|[^=]+)\s*=>', # Arrow function
|
|
209
|
+
r'^\s*function\s+\w+\s*\(', # Solidity
|
|
210
|
+
]
|
|
211
|
+
|
|
212
|
+
total_functions = 0
|
|
213
|
+
for pattern in function_patterns:
|
|
214
|
+
total_functions += len(re.findall(pattern, content, re.MULTILINE))
|
|
215
|
+
|
|
216
|
+
# Calculate coverage
|
|
217
|
+
if total_functions == 0:
|
|
218
|
+
# No functions, check if file has any annotations
|
|
219
|
+
coverage = 1.0 if darkarts_count > 0 else 0.0
|
|
220
|
+
else:
|
|
221
|
+
coverage = min(1.0, darkarts_count / total_functions)
|
|
222
|
+
|
|
223
|
+
# Calculate penalty
|
|
224
|
+
if coverage == 0:
|
|
225
|
+
penalty = -50
|
|
226
|
+
elif coverage < 0.5:
|
|
227
|
+
penalty = -25
|
|
228
|
+
else:
|
|
229
|
+
penalty = 0
|
|
230
|
+
|
|
231
|
+
return (coverage, penalty)
|
|
232
|
+
|
|
233
|
+
def _get_priority_level(self, score: float) -> str:
|
|
234
|
+
"""Determine priority level from score."""
|
|
235
|
+
if score >= self.PRIORITY_THRESHOLDS['CRITICAL']:
|
|
236
|
+
return 'CRITICAL'
|
|
237
|
+
elif score >= self.PRIORITY_THRESHOLDS['HIGH']:
|
|
238
|
+
return 'HIGH'
|
|
239
|
+
elif score >= self.PRIORITY_THRESHOLDS['MEDIUM']:
|
|
240
|
+
return 'MEDIUM'
|
|
241
|
+
elif score >= self.PRIORITY_THRESHOLDS['LOW']:
|
|
242
|
+
return 'LOW'
|
|
243
|
+
else:
|
|
244
|
+
return 'MINIMAL'
|
|
245
|
+
|
|
246
|
+
def _generate_reasons(self, complexity: Dict, security: Dict,
|
|
247
|
+
dependencies: Dict, annotation_coverage: float) -> List[str]:
|
|
248
|
+
"""Generate human-readable reasons for priority score."""
|
|
249
|
+
reasons = []
|
|
250
|
+
|
|
251
|
+
# Complexity reasons
|
|
252
|
+
if complexity['total_score'] >= 60:
|
|
253
|
+
reasons.append(f"High complexity: {complexity['loc']} LOC, {complexity['functions']} functions")
|
|
254
|
+
elif complexity['total_score'] >= 30:
|
|
255
|
+
reasons.append(f"Medium complexity: {complexity['loc']} LOC, {complexity['functions']} functions")
|
|
256
|
+
|
|
257
|
+
# Security reasons
|
|
258
|
+
if security.get('critical_keywords'):
|
|
259
|
+
keywords = ', '.join(security['critical_keywords'][:5])
|
|
260
|
+
reasons.append(f"Security keywords: {keywords}")
|
|
261
|
+
|
|
262
|
+
# Dependency reasons
|
|
263
|
+
if dependencies.get('dependent_count', 0) > 0:
|
|
264
|
+
reasons.append(f"Depended upon by {dependencies['dependent_count']} file(s)")
|
|
265
|
+
|
|
266
|
+
# Annotation reasons
|
|
267
|
+
if annotation_coverage == 0:
|
|
268
|
+
reasons.append("No VooDocs annotations found")
|
|
269
|
+
elif annotation_coverage < 0.5:
|
|
270
|
+
reasons.append(f"Partial annotations ({int(annotation_coverage * 100)}% coverage)")
|
|
271
|
+
|
|
272
|
+
return reasons
|
|
273
|
+
|
|
274
|
+
def _generate_suggestions(self, complexity: Dict, security: Dict,
|
|
275
|
+
dependencies: Dict, annotation_coverage: float) -> List[str]:
|
|
276
|
+
"""Generate actionable suggestions."""
|
|
277
|
+
suggestions = []
|
|
278
|
+
|
|
279
|
+
# Security suggestions
|
|
280
|
+
suggestions.extend(self.security_analyzer.get_security_suggestions(security))
|
|
281
|
+
|
|
282
|
+
# Dependency suggestions
|
|
283
|
+
suggestions.extend(self.dependency_analyzer.get_dependency_suggestions(dependencies))
|
|
284
|
+
|
|
285
|
+
# Complexity suggestions
|
|
286
|
+
if complexity['total_score'] >= 60:
|
|
287
|
+
suggestions.append("Break down complex logic into smaller functions")
|
|
288
|
+
suggestions.append("Document high-complexity sections")
|
|
289
|
+
|
|
290
|
+
# Annotation suggestions
|
|
291
|
+
if annotation_coverage == 0:
|
|
292
|
+
suggestions.append("Add @darkarts annotations to all functions")
|
|
293
|
+
suggestions.append("Start with module-level purpose annotation")
|
|
294
|
+
elif annotation_coverage < 1.0:
|
|
295
|
+
suggestions.append("Complete remaining function annotations")
|
|
296
|
+
|
|
297
|
+
# Generic suggestions
|
|
298
|
+
if not suggestions:
|
|
299
|
+
suggestions.append("Add comprehensive VooDocs annotations")
|
|
300
|
+
|
|
301
|
+
return suggestions[:5] # Limit to top 5
|
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Complexity Analyzer for VooDocs Priority System
|
|
3
|
+
|
|
4
|
+
Analyzes file complexity based on:
|
|
5
|
+
- Lines of code (LOC)
|
|
6
|
+
- Cyclomatic complexity (control flow)
|
|
7
|
+
- Function count
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import re
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Dict
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ComplexityAnalyzer:
|
|
16
|
+
"""Analyzes code complexity for priority scoring."""
|
|
17
|
+
|
|
18
|
+
# Control flow keywords by language
|
|
19
|
+
CONTROL_FLOW_PATTERNS = {
|
|
20
|
+
'if': r'\bif\s*\(',
|
|
21
|
+
'else': r'\belse\b',
|
|
22
|
+
'elif': r'\b(elif|else\s+if)\s*\(',
|
|
23
|
+
'for': r'\bfor\s*\(',
|
|
24
|
+
'while': r'\bwhile\s*\(',
|
|
25
|
+
'switch': r'\bswitch\s*\(',
|
|
26
|
+
'case': r'\bcase\s+',
|
|
27
|
+
'try': r'\btry\s*\{',
|
|
28
|
+
'catch': r'\bcatch\s*\(',
|
|
29
|
+
'except': r'\bexcept\s*:',
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
# Function definition patterns by language
|
|
33
|
+
FUNCTION_PATTERNS = {
|
|
34
|
+
'python': r'^\s*def\s+\w+\s*\(',
|
|
35
|
+
'typescript': r'^\s*(export\s+)?(async\s+)?function\s+\w+\s*\(',
|
|
36
|
+
'javascript': r'^\s*(export\s+)?(async\s+)?function\s+\w+\s*\(',
|
|
37
|
+
'arrow_function': r'^\s*(const|let|var)\s+\w+\s*=\s*(\([^)]*\)|[^=]+)\s*=>',
|
|
38
|
+
'method': r'^\s*(public|private|protected)?\s*(static\s+)?(async\s+)?\w+\s*\(',
|
|
39
|
+
'solidity': r'^\s*function\s+\w+\s*\(',
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
def __init__(self):
|
|
43
|
+
"""Initialize complexity analyzer."""
|
|
44
|
+
pass
|
|
45
|
+
|
|
46
|
+
def analyze_file(self, filepath: str) -> Dict[str, int]:
|
|
47
|
+
"""
|
|
48
|
+
Analyze a file's complexity.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
filepath: Path to file to analyze
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Dictionary with complexity metrics and scores
|
|
55
|
+
"""
|
|
56
|
+
try:
|
|
57
|
+
with open(filepath, 'r', encoding='utf-8') as f:
|
|
58
|
+
content = f.read()
|
|
59
|
+
except Exception as e:
|
|
60
|
+
return {
|
|
61
|
+
'loc': 0,
|
|
62
|
+
'loc_score': 0,
|
|
63
|
+
'cyclomatic': 0,
|
|
64
|
+
'cyclomatic_score': 0,
|
|
65
|
+
'functions': 0,
|
|
66
|
+
'function_score': 0,
|
|
67
|
+
'total_score': 0,
|
|
68
|
+
'error': str(e)
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
# Calculate metrics
|
|
72
|
+
loc = self._count_lines_of_code(content)
|
|
73
|
+
cyclomatic = self._calculate_cyclomatic_complexity(content)
|
|
74
|
+
functions = self._count_functions(content, filepath)
|
|
75
|
+
|
|
76
|
+
# Calculate scores
|
|
77
|
+
loc_score = self._calculate_loc_score(loc)
|
|
78
|
+
cyclomatic_score = self._calculate_cyclomatic_score(cyclomatic)
|
|
79
|
+
function_score = self._calculate_function_score(functions)
|
|
80
|
+
|
|
81
|
+
# Total score (capped at 100)
|
|
82
|
+
total_score = min(100, loc_score + cyclomatic_score + function_score)
|
|
83
|
+
|
|
84
|
+
return {
|
|
85
|
+
'loc': loc,
|
|
86
|
+
'loc_score': loc_score,
|
|
87
|
+
'cyclomatic': cyclomatic,
|
|
88
|
+
'cyclomatic_score': cyclomatic_score,
|
|
89
|
+
'functions': functions,
|
|
90
|
+
'function_score': function_score,
|
|
91
|
+
'total_score': total_score
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
def _count_lines_of_code(self, content: str) -> int:
|
|
95
|
+
"""
|
|
96
|
+
Count non-empty, non-comment lines of code.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
content: File content
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
Line count
|
|
103
|
+
"""
|
|
104
|
+
lines = content.split('\n')
|
|
105
|
+
loc = 0
|
|
106
|
+
|
|
107
|
+
in_multiline_comment = False
|
|
108
|
+
|
|
109
|
+
for line in lines:
|
|
110
|
+
stripped = line.strip()
|
|
111
|
+
|
|
112
|
+
# Skip empty lines
|
|
113
|
+
if not stripped:
|
|
114
|
+
continue
|
|
115
|
+
|
|
116
|
+
# Handle multi-line comments
|
|
117
|
+
if '/*' in stripped:
|
|
118
|
+
in_multiline_comment = True
|
|
119
|
+
if '*/' in stripped:
|
|
120
|
+
in_multiline_comment = False
|
|
121
|
+
continue
|
|
122
|
+
if in_multiline_comment:
|
|
123
|
+
continue
|
|
124
|
+
|
|
125
|
+
# Skip single-line comments
|
|
126
|
+
if stripped.startswith('//') or stripped.startswith('#'):
|
|
127
|
+
continue
|
|
128
|
+
|
|
129
|
+
# Count as LOC
|
|
130
|
+
loc += 1
|
|
131
|
+
|
|
132
|
+
return loc
|
|
133
|
+
|
|
134
|
+
def _calculate_cyclomatic_complexity(self, content: str) -> int:
|
|
135
|
+
"""
|
|
136
|
+
Calculate cyclomatic complexity by counting control flow statements.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
content: File content
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Cyclomatic complexity score
|
|
143
|
+
"""
|
|
144
|
+
complexity = 0
|
|
145
|
+
|
|
146
|
+
# Count each type of control flow
|
|
147
|
+
for keyword, pattern in self.CONTROL_FLOW_PATTERNS.items():
|
|
148
|
+
matches = re.findall(pattern, content, re.MULTILINE)
|
|
149
|
+
count = len(matches)
|
|
150
|
+
|
|
151
|
+
# Weight different constructs
|
|
152
|
+
if keyword in ['if', 'elif', 'else']:
|
|
153
|
+
complexity += count * 2
|
|
154
|
+
elif keyword in ['for', 'while']:
|
|
155
|
+
complexity += count * 3
|
|
156
|
+
elif keyword in ['try', 'catch', 'except']:
|
|
157
|
+
complexity += count * 3
|
|
158
|
+
elif keyword in ['switch', 'case']:
|
|
159
|
+
complexity += count * 2
|
|
160
|
+
else:
|
|
161
|
+
complexity += count
|
|
162
|
+
|
|
163
|
+
# Count nesting depth (rough estimate)
|
|
164
|
+
nesting_depth = self._estimate_nesting_depth(content)
|
|
165
|
+
complexity += nesting_depth
|
|
166
|
+
|
|
167
|
+
return complexity
|
|
168
|
+
|
|
169
|
+
def _estimate_nesting_depth(self, content: str) -> int:
|
|
170
|
+
"""
|
|
171
|
+
Estimate maximum nesting depth by counting indentation.
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
content: File content
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
Estimated nesting depth
|
|
178
|
+
"""
|
|
179
|
+
max_depth = 0
|
|
180
|
+
lines = content.split('\n')
|
|
181
|
+
|
|
182
|
+
for line in lines:
|
|
183
|
+
if not line.strip():
|
|
184
|
+
continue
|
|
185
|
+
|
|
186
|
+
# Count leading spaces/tabs
|
|
187
|
+
leading_spaces = len(line) - len(line.lstrip())
|
|
188
|
+
|
|
189
|
+
# Assume 2 or 4 spaces per indent level
|
|
190
|
+
if '\t' in line[:leading_spaces]:
|
|
191
|
+
depth = line[:leading_spaces].count('\t')
|
|
192
|
+
else:
|
|
193
|
+
# Try 4 spaces first, then 2
|
|
194
|
+
depth = leading_spaces // 4
|
|
195
|
+
if depth == 0 and leading_spaces >= 2:
|
|
196
|
+
depth = leading_spaces // 2
|
|
197
|
+
|
|
198
|
+
max_depth = max(max_depth, depth)
|
|
199
|
+
|
|
200
|
+
return max_depth
|
|
201
|
+
|
|
202
|
+
def _count_functions(self, content: str, filepath: str) -> int:
|
|
203
|
+
"""
|
|
204
|
+
Count function definitions in file.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
content: File content
|
|
208
|
+
filepath: File path (to determine language)
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
Function count
|
|
212
|
+
"""
|
|
213
|
+
function_count = 0
|
|
214
|
+
|
|
215
|
+
# Determine language from extension
|
|
216
|
+
ext = Path(filepath).suffix.lower()
|
|
217
|
+
|
|
218
|
+
# Apply appropriate patterns
|
|
219
|
+
if ext in ['.py']:
|
|
220
|
+
pattern = self.FUNCTION_PATTERNS['python']
|
|
221
|
+
function_count += len(re.findall(pattern, content, re.MULTILINE))
|
|
222
|
+
|
|
223
|
+
elif ext in ['.ts', '.tsx']:
|
|
224
|
+
# TypeScript: regular functions + arrow functions + methods
|
|
225
|
+
function_count += len(re.findall(self.FUNCTION_PATTERNS['typescript'], content, re.MULTILINE))
|
|
226
|
+
function_count += len(re.findall(self.FUNCTION_PATTERNS['arrow_function'], content, re.MULTILINE))
|
|
227
|
+
function_count += len(re.findall(self.FUNCTION_PATTERNS['method'], content, re.MULTILINE))
|
|
228
|
+
|
|
229
|
+
elif ext in ['.js', '.jsx']:
|
|
230
|
+
# JavaScript: regular functions + arrow functions
|
|
231
|
+
function_count += len(re.findall(self.FUNCTION_PATTERNS['javascript'], content, re.MULTILINE))
|
|
232
|
+
function_count += len(re.findall(self.FUNCTION_PATTERNS['arrow_function'], content, re.MULTILINE))
|
|
233
|
+
|
|
234
|
+
elif ext in ['.sol']:
|
|
235
|
+
# Solidity: function definitions
|
|
236
|
+
function_count += len(re.findall(self.FUNCTION_PATTERNS['solidity'], content, re.MULTILINE))
|
|
237
|
+
|
|
238
|
+
return function_count
|
|
239
|
+
|
|
240
|
+
def _calculate_loc_score(self, loc: int) -> int:
|
|
241
|
+
"""Calculate score based on lines of code."""
|
|
242
|
+
if loc < 50:
|
|
243
|
+
return 0
|
|
244
|
+
elif loc < 100:
|
|
245
|
+
return 10
|
|
246
|
+
elif loc < 200:
|
|
247
|
+
return 20
|
|
248
|
+
elif loc < 500:
|
|
249
|
+
return 40
|
|
250
|
+
elif loc < 1000:
|
|
251
|
+
return 60
|
|
252
|
+
else:
|
|
253
|
+
return 80
|
|
254
|
+
|
|
255
|
+
def _calculate_cyclomatic_score(self, cyclomatic: int) -> int:
|
|
256
|
+
"""Calculate score based on cyclomatic complexity."""
|
|
257
|
+
# Cap at 100
|
|
258
|
+
return min(100, cyclomatic)
|
|
259
|
+
|
|
260
|
+
def _calculate_function_score(self, functions: int) -> int:
|
|
261
|
+
"""Calculate score based on function count."""
|
|
262
|
+
if functions <= 5:
|
|
263
|
+
return 0
|
|
264
|
+
elif functions <= 10:
|
|
265
|
+
return 5
|
|
266
|
+
elif functions <= 20:
|
|
267
|
+
return 10
|
|
268
|
+
elif functions <= 50:
|
|
269
|
+
return 20
|
|
270
|
+
else:
|
|
271
|
+
return 30
|