@voodocs/cli 2.5.0 → 2.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/lib/darkarts/priority_analyzer/__init__.py +0 -0
- package/lib/darkarts/priority_analyzer/analyzer.py +301 -0
- package/lib/darkarts/priority_analyzer/complexity.py +271 -0
- package/lib/darkarts/priority_analyzer/dependencies.py +275 -0
- package/lib/darkarts/priority_analyzer/security.py +200 -0
- package/lib/darkarts/voodocs_lite_dict.py +216 -0
- package/lib/darkarts/voodocs_lite_dict_v2.py +198 -0
- package/lib/darkarts/voodocs_lite_parser.py +343 -0
- package/package.json +5 -1
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Dependency Analyzer for VooDocs Priority System
|
|
3
|
+
|
|
4
|
+
Analyzes import/dependency relationships between files.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import re
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Dict, List, Set
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class DependencyAnalyzer:
|
|
13
|
+
"""Analyzes file dependencies for priority scoring."""
|
|
14
|
+
|
|
15
|
+
# Import patterns by language
|
|
16
|
+
IMPORT_PATTERNS = {
|
|
17
|
+
'python': [
|
|
18
|
+
r'^\s*import\s+([^\s]+)',
|
|
19
|
+
r'^\s*from\s+([^\s]+)\s+import',
|
|
20
|
+
],
|
|
21
|
+
'typescript': [
|
|
22
|
+
r'^\s*import\s+.*\s+from\s+["\']([^"\']+)["\']',
|
|
23
|
+
r'^\s*import\s+["\']([^"\']+)["\']',
|
|
24
|
+
r'^\s*export\s+.*\s+from\s+["\']([^"\']+)["\']',
|
|
25
|
+
],
|
|
26
|
+
'javascript': [
|
|
27
|
+
r'^\s*import\s+.*\s+from\s+["\']([^"\']+)["\']',
|
|
28
|
+
r'^\s*import\s+["\']([^"\']+)["\']',
|
|
29
|
+
r'^\s*const\s+.*\s*=\s*require\(["\']([^"\']+)["\']\)',
|
|
30
|
+
r'^\s*export\s+.*\s+from\s+["\']([^"\']+)["\']',
|
|
31
|
+
],
|
|
32
|
+
'solidity': [
|
|
33
|
+
r'^\s*import\s+["\']([^"\']+)["\']',
|
|
34
|
+
],
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
def __init__(self, project_root: str = None):
|
|
38
|
+
"""
|
|
39
|
+
Initialize dependency analyzer.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
project_root: Root directory of the project
|
|
43
|
+
"""
|
|
44
|
+
self.project_root = Path(project_root) if project_root else None
|
|
45
|
+
self.file_cache = {} # Cache for file analysis
|
|
46
|
+
|
|
47
|
+
def analyze_file(self, filepath: str, all_files: List[str] = None) -> Dict:
|
|
48
|
+
"""
|
|
49
|
+
Analyze a file's dependencies.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
filepath: Path to file to analyze
|
|
53
|
+
all_files: List of all files in project (for dependent analysis)
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
Dictionary with dependency analysis results
|
|
57
|
+
"""
|
|
58
|
+
try:
|
|
59
|
+
with open(filepath, 'r', encoding='utf-8') as f:
|
|
60
|
+
content = f.read()
|
|
61
|
+
except Exception as e:
|
|
62
|
+
return {
|
|
63
|
+
'imports': [],
|
|
64
|
+
'import_count': 0,
|
|
65
|
+
'dependents': [],
|
|
66
|
+
'dependent_count': 0,
|
|
67
|
+
'depends_score': 0,
|
|
68
|
+
'depended_score': 0,
|
|
69
|
+
'total_score': 0,
|
|
70
|
+
'error': str(e)
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
# Find imports (what this file depends on)
|
|
74
|
+
imports = self._find_imports(content, filepath)
|
|
75
|
+
|
|
76
|
+
# Find dependents (what depends on this file)
|
|
77
|
+
dependents = []
|
|
78
|
+
if all_files:
|
|
79
|
+
dependents = self._find_dependents(filepath, all_files)
|
|
80
|
+
|
|
81
|
+
# Calculate scores
|
|
82
|
+
depends_score = min(50, len(imports) * 2)
|
|
83
|
+
depended_score = min(50, len(dependents) * 5)
|
|
84
|
+
total_score = depends_score + depended_score
|
|
85
|
+
|
|
86
|
+
return {
|
|
87
|
+
'imports': imports,
|
|
88
|
+
'import_count': len(imports),
|
|
89
|
+
'dependents': dependents,
|
|
90
|
+
'dependent_count': len(dependents),
|
|
91
|
+
'depends_score': depends_score,
|
|
92
|
+
'depended_score': depended_score,
|
|
93
|
+
'total_score': total_score
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
def _find_imports(self, content: str, filepath: str) -> List[str]:
|
|
97
|
+
"""
|
|
98
|
+
Find all imports in a file.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
content: File content
|
|
102
|
+
filepath: File path (to determine language)
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
List of imported module/file paths
|
|
106
|
+
"""
|
|
107
|
+
imports = set()
|
|
108
|
+
|
|
109
|
+
# Determine language from extension
|
|
110
|
+
ext = Path(filepath).suffix.lower()
|
|
111
|
+
|
|
112
|
+
# Select appropriate patterns
|
|
113
|
+
patterns = []
|
|
114
|
+
if ext in ['.py']:
|
|
115
|
+
patterns = self.IMPORT_PATTERNS['python']
|
|
116
|
+
elif ext in ['.ts', '.tsx']:
|
|
117
|
+
patterns = self.IMPORT_PATTERNS['typescript']
|
|
118
|
+
elif ext in ['.js', '.jsx']:
|
|
119
|
+
patterns = self.IMPORT_PATTERNS['javascript']
|
|
120
|
+
elif ext in ['.sol']:
|
|
121
|
+
patterns = self.IMPORT_PATTERNS['solidity']
|
|
122
|
+
|
|
123
|
+
# Find all imports
|
|
124
|
+
for pattern in patterns:
|
|
125
|
+
matches = re.findall(pattern, content, re.MULTILINE)
|
|
126
|
+
imports.update(matches)
|
|
127
|
+
|
|
128
|
+
# Filter out external packages (keep only relative imports)
|
|
129
|
+
local_imports = []
|
|
130
|
+
for imp in imports:
|
|
131
|
+
# Relative imports start with . or /
|
|
132
|
+
if imp.startswith('.') or imp.startswith('/'):
|
|
133
|
+
local_imports.append(imp)
|
|
134
|
+
# Or are within the project structure
|
|
135
|
+
elif self.project_root and not imp.startswith('@') and '/' in imp:
|
|
136
|
+
local_imports.append(imp)
|
|
137
|
+
|
|
138
|
+
return sorted(list(local_imports))
|
|
139
|
+
|
|
140
|
+
def _find_dependents(self, filepath: str, all_files: List[str]) -> List[str]:
|
|
141
|
+
"""
|
|
142
|
+
Find all files that import/depend on this file.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
filepath: Path to file
|
|
146
|
+
all_files: List of all files in project
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
List of files that depend on this file
|
|
150
|
+
"""
|
|
151
|
+
dependents = []
|
|
152
|
+
|
|
153
|
+
# Convert filepath to relative path for matching
|
|
154
|
+
file_path = Path(filepath)
|
|
155
|
+
|
|
156
|
+
# Check each file for imports of this file
|
|
157
|
+
for other_file in all_files:
|
|
158
|
+
if other_file == filepath:
|
|
159
|
+
continue
|
|
160
|
+
|
|
161
|
+
# Get imports from other file (use cache if available)
|
|
162
|
+
if other_file not in self.file_cache:
|
|
163
|
+
try:
|
|
164
|
+
with open(other_file, 'r', encoding='utf-8') as f:
|
|
165
|
+
content = f.read()
|
|
166
|
+
imports = self._find_imports(content, other_file)
|
|
167
|
+
self.file_cache[other_file] = imports
|
|
168
|
+
except:
|
|
169
|
+
self.file_cache[other_file] = []
|
|
170
|
+
|
|
171
|
+
imports = self.file_cache[other_file]
|
|
172
|
+
|
|
173
|
+
# Check if any import matches this file
|
|
174
|
+
for imp in imports:
|
|
175
|
+
if self._import_matches_file(imp, filepath, other_file):
|
|
176
|
+
dependents.append(other_file)
|
|
177
|
+
break
|
|
178
|
+
|
|
179
|
+
return sorted(dependents)
|
|
180
|
+
|
|
181
|
+
def _import_matches_file(self, import_path: str, target_file: str, source_file: str) -> bool:
|
|
182
|
+
"""
|
|
183
|
+
Check if an import path matches a target file.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
import_path: Import statement path
|
|
187
|
+
target_file: File to check against
|
|
188
|
+
source_file: File containing the import
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
True if import matches target file
|
|
192
|
+
"""
|
|
193
|
+
# Convert to Path objects
|
|
194
|
+
target = Path(target_file)
|
|
195
|
+
source = Path(source_file)
|
|
196
|
+
|
|
197
|
+
# Handle relative imports
|
|
198
|
+
if import_path.startswith('.'):
|
|
199
|
+
# Resolve relative to source file's directory
|
|
200
|
+
resolved = (source.parent / import_path).resolve()
|
|
201
|
+
|
|
202
|
+
# Try with and without extension
|
|
203
|
+
if resolved == target.resolve():
|
|
204
|
+
return True
|
|
205
|
+
|
|
206
|
+
# Try adding common extensions
|
|
207
|
+
for ext in ['.ts', '.tsx', '.js', '.jsx', '.py', '.sol']:
|
|
208
|
+
if (resolved.parent / (resolved.name + ext)).resolve() == target.resolve():
|
|
209
|
+
return True
|
|
210
|
+
|
|
211
|
+
# Try index files
|
|
212
|
+
for index_name in ['index.ts', 'index.tsx', 'index.js', 'index.jsx', '__init__.py']:
|
|
213
|
+
if (resolved / index_name).resolve() == target.resolve():
|
|
214
|
+
return True
|
|
215
|
+
|
|
216
|
+
# Handle absolute imports (check if target path ends with import path)
|
|
217
|
+
else:
|
|
218
|
+
target_str = str(target.resolve())
|
|
219
|
+
if import_path.replace('/', Path('/').as_posix()) in target_str:
|
|
220
|
+
return True
|
|
221
|
+
|
|
222
|
+
return False
|
|
223
|
+
|
|
224
|
+
def get_dependency_reasons(self, analysis: Dict) -> List[str]:
|
|
225
|
+
"""
|
|
226
|
+
Generate human-readable reasons for dependency score.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
analysis: Dependency analysis results
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
List of reason strings
|
|
233
|
+
"""
|
|
234
|
+
reasons = []
|
|
235
|
+
|
|
236
|
+
if analysis['dependent_count'] > 0:
|
|
237
|
+
reasons.append(f"Depended upon by {analysis['dependent_count']} file(s)")
|
|
238
|
+
|
|
239
|
+
if analysis['import_count'] > 10:
|
|
240
|
+
reasons.append(f"High dependency count ({analysis['import_count']} imports)")
|
|
241
|
+
elif analysis['import_count'] > 5:
|
|
242
|
+
reasons.append(f"Medium dependency count ({analysis['import_count']} imports)")
|
|
243
|
+
|
|
244
|
+
if analysis['dependent_count'] >= 10:
|
|
245
|
+
reasons.append("Critical shared module")
|
|
246
|
+
elif analysis['dependent_count'] >= 5:
|
|
247
|
+
reasons.append("Widely-used module")
|
|
248
|
+
|
|
249
|
+
return reasons
|
|
250
|
+
|
|
251
|
+
def get_dependency_suggestions(self, analysis: Dict) -> List[str]:
|
|
252
|
+
"""
|
|
253
|
+
Generate dependency-specific suggestions.
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
analysis: Dependency analysis results
|
|
257
|
+
|
|
258
|
+
Returns:
|
|
259
|
+
List of suggestion strings
|
|
260
|
+
"""
|
|
261
|
+
suggestions = []
|
|
262
|
+
|
|
263
|
+
if analysis['dependent_count'] >= 5:
|
|
264
|
+
suggestions.append("Document public API and contracts")
|
|
265
|
+
suggestions.append("Add usage examples for common patterns")
|
|
266
|
+
|
|
267
|
+
if analysis['import_count'] > 10:
|
|
268
|
+
suggestions.append("Document key dependencies and their roles")
|
|
269
|
+
suggestions.append("Consider documenting dependency graph")
|
|
270
|
+
|
|
271
|
+
if analysis['dependent_count'] >= 10:
|
|
272
|
+
suggestions.append("Critical module - prioritize comprehensive documentation")
|
|
273
|
+
suggestions.append("Add integration examples")
|
|
274
|
+
|
|
275
|
+
return suggestions
|
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Security Analyzer for VooDocs Priority System
|
|
3
|
+
|
|
4
|
+
Detects security-sensitive code based on keyword analysis.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import re
|
|
8
|
+
from typing import Dict, List, Set
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class SecurityAnalyzer:
|
|
12
|
+
"""Analyzes code for security-sensitive keywords and patterns."""
|
|
13
|
+
|
|
14
|
+
# Critical security keywords (10 points each)
|
|
15
|
+
CRITICAL_KEYWORDS = {
|
|
16
|
+
'password', 'secret', 'token', 'key', 'private',
|
|
17
|
+
'auth', 'authentication', 'authorization',
|
|
18
|
+
'admin', 'root', 'sudo', 'privilege',
|
|
19
|
+
'encrypt', 'decrypt', 'hash', 'crypto',
|
|
20
|
+
'payment', 'credit', 'card', 'billing',
|
|
21
|
+
'sql', 'query', 'execute', 'eval',
|
|
22
|
+
'unsafe', 'dangerous', 'vulnerable',
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
# High priority security keywords (5 points each)
|
|
26
|
+
HIGH_KEYWORDS = {
|
|
27
|
+
'user', 'session', 'cookie', 'jwt',
|
|
28
|
+
'login', 'logout', 'signin', 'signup',
|
|
29
|
+
'access', 'permission', 'role', 'grant',
|
|
30
|
+
'sanitize', 'validate', 'escape', 'filter',
|
|
31
|
+
'upload', 'download', 'file', 'path',
|
|
32
|
+
'api', 'endpoint', 'route', 'handler',
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
# Medium priority security keywords (2 points each)
|
|
36
|
+
MEDIUM_KEYWORDS = {
|
|
37
|
+
'input', 'output', 'request', 'response',
|
|
38
|
+
'data', 'database', 'db', 'storage',
|
|
39
|
+
'config', 'settings', 'env', 'environment',
|
|
40
|
+
'error', 'exception', 'fail', 'crash',
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
def __init__(self):
|
|
44
|
+
"""Initialize security analyzer."""
|
|
45
|
+
pass
|
|
46
|
+
|
|
47
|
+
def analyze_file(self, filepath: str) -> Dict:
|
|
48
|
+
"""
|
|
49
|
+
Analyze a file for security-sensitive keywords.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
filepath: Path to file to analyze
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
Dictionary with security analysis results
|
|
56
|
+
"""
|
|
57
|
+
try:
|
|
58
|
+
with open(filepath, 'r', encoding='utf-8') as f:
|
|
59
|
+
content = f.read()
|
|
60
|
+
except Exception as e:
|
|
61
|
+
return {
|
|
62
|
+
'critical_keywords': [],
|
|
63
|
+
'high_keywords': [],
|
|
64
|
+
'medium_keywords': [],
|
|
65
|
+
'total_score': 0,
|
|
66
|
+
'error': str(e)
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
# Scan for keywords
|
|
70
|
+
critical_found = self._scan_keywords(content, self.CRITICAL_KEYWORDS)
|
|
71
|
+
high_found = self._scan_keywords(content, self.HIGH_KEYWORDS)
|
|
72
|
+
medium_found = self._scan_keywords(content, self.MEDIUM_KEYWORDS)
|
|
73
|
+
|
|
74
|
+
# Calculate score
|
|
75
|
+
score = (
|
|
76
|
+
len(critical_found) * 10 +
|
|
77
|
+
len(high_found) * 5 +
|
|
78
|
+
len(medium_found) * 2
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
# Cap at 100
|
|
82
|
+
total_score = min(100, score)
|
|
83
|
+
|
|
84
|
+
return {
|
|
85
|
+
'critical_keywords': sorted(list(critical_found)),
|
|
86
|
+
'high_keywords': sorted(list(high_found)),
|
|
87
|
+
'medium_keywords': sorted(list(medium_found)),
|
|
88
|
+
'total_score': total_score,
|
|
89
|
+
'keyword_count': len(critical_found) + len(high_found) + len(medium_found)
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
def _scan_keywords(self, content: str, keywords: Set[str]) -> Set[str]:
|
|
93
|
+
"""
|
|
94
|
+
Scan content for keywords (case-insensitive).
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
content: File content
|
|
98
|
+
keywords: Set of keywords to search for
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
Set of found keywords
|
|
102
|
+
"""
|
|
103
|
+
found = set()
|
|
104
|
+
content_lower = content.lower()
|
|
105
|
+
|
|
106
|
+
for keyword in keywords:
|
|
107
|
+
# Use word boundaries to avoid partial matches
|
|
108
|
+
pattern = r'\b' + re.escape(keyword) + r'\b'
|
|
109
|
+
if re.search(pattern, content_lower):
|
|
110
|
+
found.add(keyword)
|
|
111
|
+
|
|
112
|
+
return found
|
|
113
|
+
|
|
114
|
+
def get_security_reasons(self, analysis: Dict) -> List[str]:
|
|
115
|
+
"""
|
|
116
|
+
Generate human-readable reasons for security score.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
analysis: Security analysis results
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
List of reason strings
|
|
123
|
+
"""
|
|
124
|
+
reasons = []
|
|
125
|
+
|
|
126
|
+
if analysis['critical_keywords']:
|
|
127
|
+
keywords_str = ', '.join(analysis['critical_keywords'][:5])
|
|
128
|
+
if len(analysis['critical_keywords']) > 5:
|
|
129
|
+
keywords_str += f" (+{len(analysis['critical_keywords']) - 5} more)"
|
|
130
|
+
reasons.append(f"Critical security keywords: {keywords_str}")
|
|
131
|
+
|
|
132
|
+
if analysis['high_keywords']:
|
|
133
|
+
keywords_str = ', '.join(analysis['high_keywords'][:5])
|
|
134
|
+
if len(analysis['high_keywords']) > 5:
|
|
135
|
+
keywords_str += f" (+{len(analysis['high_keywords']) - 5} more)"
|
|
136
|
+
reasons.append(f"High-priority keywords: {keywords_str}")
|
|
137
|
+
|
|
138
|
+
if analysis['total_score'] >= 80:
|
|
139
|
+
reasons.append("Highly security-sensitive code")
|
|
140
|
+
elif analysis['total_score'] >= 50:
|
|
141
|
+
reasons.append("Security-sensitive code")
|
|
142
|
+
|
|
143
|
+
return reasons
|
|
144
|
+
|
|
145
|
+
def get_security_suggestions(self, analysis: Dict) -> List[str]:
|
|
146
|
+
"""
|
|
147
|
+
Generate security-specific suggestions.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
analysis: Security analysis results
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
List of suggestion strings
|
|
154
|
+
"""
|
|
155
|
+
suggestions = []
|
|
156
|
+
|
|
157
|
+
critical = set(analysis['critical_keywords'])
|
|
158
|
+
high = set(analysis['high_keywords'])
|
|
159
|
+
|
|
160
|
+
# Authentication/Authorization
|
|
161
|
+
if critical & {'auth', 'authentication', 'authorization', 'login', 'logout'}:
|
|
162
|
+
suggestions.append("Document authentication/authorization flow")
|
|
163
|
+
suggestions.append("Specify security invariants and assumptions")
|
|
164
|
+
|
|
165
|
+
# Cryptography
|
|
166
|
+
if critical & {'encrypt', 'decrypt', 'hash', 'crypto', 'key', 'secret'}:
|
|
167
|
+
suggestions.append("Document encryption algorithms and key management")
|
|
168
|
+
suggestions.append("Specify cryptographic assumptions")
|
|
169
|
+
|
|
170
|
+
# Payment/Financial
|
|
171
|
+
if critical & {'payment', 'credit', 'card', 'billing'}:
|
|
172
|
+
suggestions.append("Document payment processing flow")
|
|
173
|
+
suggestions.append("Add PCI-DSS compliance notes")
|
|
174
|
+
|
|
175
|
+
# SQL/Database
|
|
176
|
+
if critical & {'sql', 'query', 'execute'}:
|
|
177
|
+
suggestions.append("Document SQL injection prevention measures")
|
|
178
|
+
suggestions.append("Specify query sanitization rules")
|
|
179
|
+
|
|
180
|
+
# Input Validation
|
|
181
|
+
if high & {'sanitize', 'validate', 'escape', 'filter', 'input'}:
|
|
182
|
+
suggestions.append("Document input validation rules")
|
|
183
|
+
suggestions.append("Specify allowed input formats")
|
|
184
|
+
|
|
185
|
+
# File Operations
|
|
186
|
+
if high & {'upload', 'download', 'file', 'path'}:
|
|
187
|
+
suggestions.append("Document file validation and size limits")
|
|
188
|
+
suggestions.append("Specify path traversal prevention")
|
|
189
|
+
|
|
190
|
+
# Session Management
|
|
191
|
+
if high & {'session', 'cookie', 'jwt', 'token'}:
|
|
192
|
+
suggestions.append("Document session/token lifecycle")
|
|
193
|
+
suggestions.append("Specify expiration and refresh logic")
|
|
194
|
+
|
|
195
|
+
# Generic security suggestions
|
|
196
|
+
if analysis['total_score'] >= 60 and not suggestions:
|
|
197
|
+
suggestions.append("Add security assumptions and invariants")
|
|
198
|
+
suggestions.append("Document security-critical logic")
|
|
199
|
+
|
|
200
|
+
return suggestions
|
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
"""
|
|
2
|
+
VooDocs Lite - Abbreviation Dictionary
|
|
3
|
+
|
|
4
|
+
Provides bidirectional mapping between full words and abbreviations
|
|
5
|
+
for ultra-compact symbolic notation.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
# Abbreviation dictionary: abbr -> full
|
|
9
|
+
ABBREVIATIONS = {
|
|
10
|
+
# Storage & Data
|
|
11
|
+
'db': 'database',
|
|
12
|
+
'cfg': 'configuration',
|
|
13
|
+
'var': 'variable',
|
|
14
|
+
'const': 'constant',
|
|
15
|
+
'param': 'parameter',
|
|
16
|
+
'arg': 'argument',
|
|
17
|
+
'id': 'identifier',
|
|
18
|
+
|
|
19
|
+
# Common verbs
|
|
20
|
+
'must': 'must',
|
|
21
|
+
'be': 'be',
|
|
22
|
+
'is': 'is',
|
|
23
|
+
'are': 'are',
|
|
24
|
+
'has': 'has',
|
|
25
|
+
'have': 'have',
|
|
26
|
+
'does': 'does',
|
|
27
|
+
'do': 'do',
|
|
28
|
+
'will': 'will',
|
|
29
|
+
'can': 'can',
|
|
30
|
+
'should': 'should',
|
|
31
|
+
'may': 'may',
|
|
32
|
+
'contains': 'contains',
|
|
33
|
+
'returns': 'returns',
|
|
34
|
+
'expire': 'expire',
|
|
35
|
+
'expires': 'expires',
|
|
36
|
+
'stored': 'stored',
|
|
37
|
+
'signed': 'signed',
|
|
38
|
+
'hashed': 'hashed',
|
|
39
|
+
'valid': 'valid',
|
|
40
|
+
'logged': 'logged',
|
|
41
|
+
|
|
42
|
+
# Actions
|
|
43
|
+
'init': 'initialize',
|
|
44
|
+
'val': 'validate',
|
|
45
|
+
'ver': 'verify',
|
|
46
|
+
'gen': 'generate',
|
|
47
|
+
'cr': 'create',
|
|
48
|
+
'upd': 'update',
|
|
49
|
+
'del': 'delete',
|
|
50
|
+
'mod': 'modify',
|
|
51
|
+
'get': 'retrieve',
|
|
52
|
+
'qry': 'query',
|
|
53
|
+
'chk': 'check',
|
|
54
|
+
|
|
55
|
+
# Security
|
|
56
|
+
'auth': 'authentication',
|
|
57
|
+
'authz': 'authorization',
|
|
58
|
+
'pwd': 'password',
|
|
59
|
+
'tok': 'token',
|
|
60
|
+
|
|
61
|
+
# Entities
|
|
62
|
+
'usr': 'user',
|
|
63
|
+
'usrs': 'users',
|
|
64
|
+
|
|
65
|
+
# Time
|
|
66
|
+
'ts': 'timestamp',
|
|
67
|
+
'exp': 'expiration',
|
|
68
|
+
|
|
69
|
+
# Communication
|
|
70
|
+
'resp': 'response',
|
|
71
|
+
'req': 'request',
|
|
72
|
+
'msg': 'message',
|
|
73
|
+
|
|
74
|
+
# Status & Errors
|
|
75
|
+
'err': 'error',
|
|
76
|
+
'exc': 'exception',
|
|
77
|
+
'ok': 'success',
|
|
78
|
+
'fail': 'failure',
|
|
79
|
+
|
|
80
|
+
# Boolean & Values
|
|
81
|
+
'T': 'true',
|
|
82
|
+
'F': 'false',
|
|
83
|
+
'N': 'null',
|
|
84
|
+
'U': 'undefined',
|
|
85
|
+
'E': 'empty',
|
|
86
|
+
|
|
87
|
+
# Types
|
|
88
|
+
'str': 'string',
|
|
89
|
+
'num': 'number',
|
|
90
|
+
'int': 'integer',
|
|
91
|
+
'bool': 'boolean',
|
|
92
|
+
'arr': 'array',
|
|
93
|
+
'obj': 'object',
|
|
94
|
+
'fn': 'function',
|
|
95
|
+
|
|
96
|
+
# Blockchain
|
|
97
|
+
'addr': 'address',
|
|
98
|
+
'ctr': 'contract',
|
|
99
|
+
'tx': 'transaction',
|
|
100
|
+
'blk': 'block',
|
|
101
|
+
'bal': 'balance',
|
|
102
|
+
'amt': 'amount',
|
|
103
|
+
|
|
104
|
+
# Domain-specific
|
|
105
|
+
'sub': 'subdomain',
|
|
106
|
+
'subs': 'subdomains',
|
|
107
|
+
'reg': 'registry',
|
|
108
|
+
'own': 'owner',
|
|
109
|
+
'mgmt': 'management',
|
|
110
|
+
|
|
111
|
+
# Common words
|
|
112
|
+
'w/': 'with',
|
|
113
|
+
'wo/': 'without',
|
|
114
|
+
'svc': 'service',
|
|
115
|
+
'sys': 'system',
|
|
116
|
+
'ops': 'operations',
|
|
117
|
+
'ret': 'returns',
|
|
118
|
+
'res': 'result',
|
|
119
|
+
'val': 'value',
|
|
120
|
+
'vals': 'values',
|
|
121
|
+
'len': 'length',
|
|
122
|
+
'cnt': 'count',
|
|
123
|
+
'max': 'maximum',
|
|
124
|
+
'min': 'minimum',
|
|
125
|
+
'avg': 'average',
|
|
126
|
+
'sum': 'summary',
|
|
127
|
+
'desc': 'description',
|
|
128
|
+
'info': 'information',
|
|
129
|
+
'spec': 'specification',
|
|
130
|
+
'impl': 'implementation',
|
|
131
|
+
'ref': 'reference',
|
|
132
|
+
'def': 'definition',
|
|
133
|
+
'decl': 'declaration',
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
# Reverse mapping: full -> abbr
|
|
137
|
+
EXPANSIONS = {v: k for k, v in ABBREVIATIONS.items()}
|
|
138
|
+
|
|
139
|
+
# Symbol mappings
|
|
140
|
+
LITE_TO_STANDARD = {
|
|
141
|
+
'>': '⊢', # Purpose/Postcondition (context-dependent)
|
|
142
|
+
'@': '∂', # Dependencies
|
|
143
|
+
'!': '⚠', # Assumptions
|
|
144
|
+
'<': '⊳', # Preconditions
|
|
145
|
+
'=': '⊨', # Invariants
|
|
146
|
+
'~': '⚡', # Complexity
|
|
147
|
+
'#': '🔒', # Security
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
STANDARD_TO_LITE = {v: k for k, v in LITE_TO_STANDARD.items()}
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def expand_abbreviation(abbr: str) -> str:
|
|
154
|
+
"""Expand an abbreviation to its full form."""
|
|
155
|
+
return ABBREVIATIONS.get(abbr, abbr)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def compress_word(word: str) -> str:
|
|
159
|
+
"""Compress a word to its abbreviation."""
|
|
160
|
+
return EXPANSIONS.get(word.lower(), word)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def expand_text(text: str) -> str:
|
|
164
|
+
"""
|
|
165
|
+
Expand abbreviated text to full form.
|
|
166
|
+
|
|
167
|
+
Example:
|
|
168
|
+
"usr auth svc w/ JWT gen" -> "user authentication service with JWT generation"
|
|
169
|
+
"""
|
|
170
|
+
words = text.split()
|
|
171
|
+
expanded = []
|
|
172
|
+
|
|
173
|
+
for word in words:
|
|
174
|
+
# Check if word has punctuation
|
|
175
|
+
if word[-1] in '.,;:!?':
|
|
176
|
+
punct = word[-1]
|
|
177
|
+
word_part = word[:-1]
|
|
178
|
+
expanded_word = expand_abbreviation(word_part)
|
|
179
|
+
expanded.append(expanded_word + punct)
|
|
180
|
+
else:
|
|
181
|
+
expanded.append(expand_abbreviation(word))
|
|
182
|
+
|
|
183
|
+
return ' '.join(expanded)
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def compress_text(text: str) -> str:
|
|
187
|
+
"""
|
|
188
|
+
Compress text using abbreviations.
|
|
189
|
+
|
|
190
|
+
Example:
|
|
191
|
+
"user authentication service with JWT generation" -> "usr auth svc w/ JWT gen"
|
|
192
|
+
"""
|
|
193
|
+
words = text.split()
|
|
194
|
+
compressed = []
|
|
195
|
+
|
|
196
|
+
for word in words:
|
|
197
|
+
# Check if word has punctuation
|
|
198
|
+
if word[-1] in '.,;:!?':
|
|
199
|
+
punct = word[-1]
|
|
200
|
+
word_part = word[:-1]
|
|
201
|
+
compressed_word = compress_word(word_part)
|
|
202
|
+
compressed.append(compressed_word + punct)
|
|
203
|
+
else:
|
|
204
|
+
compressed.append(compress_word(word))
|
|
205
|
+
|
|
206
|
+
return ' '.join(compressed)
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def get_lite_symbol(standard_symbol: str) -> str:
|
|
210
|
+
"""Convert standard VooDocs symbol to Lite symbol."""
|
|
211
|
+
return STANDARD_TO_LITE.get(standard_symbol, standard_symbol)
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def get_standard_symbol(lite_symbol: str) -> str:
|
|
215
|
+
"""Convert Lite symbol to standard VooDocs symbol."""
|
|
216
|
+
return LITE_TO_STANDARD.get(lite_symbol, lite_symbol)
|