@voodocs/cli 0.4.1 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/CHANGELOG.md +322 -0
  2. package/lib/cli/__init__.py +53 -0
  3. package/lib/cli/benchmark.py +311 -0
  4. package/lib/cli/fix.py +244 -0
  5. package/lib/cli/generate.py +310 -0
  6. package/lib/cli/test_cli.py +215 -0
  7. package/lib/cli/validate.py +364 -0
  8. package/lib/darkarts/__init__.py +11 -5
  9. package/lib/darkarts/annotations/__init__.py +11 -3
  10. package/lib/darkarts/annotations/darkarts_parser.py +1 -1
  11. package/lib/darkarts/annotations/types.py +16 -3
  12. package/lib/darkarts/cli_darkarts.py +385 -0
  13. package/lib/darkarts/context/__init__.py +11 -3
  14. package/lib/darkarts/context/ai_integrations.py +7 -21
  15. package/lib/darkarts/context/commands.py +1 -1
  16. package/lib/darkarts/context/diagram.py +8 -22
  17. package/lib/darkarts/context/models.py +7 -22
  18. package/lib/darkarts/context/module_utils.py +1 -1
  19. package/lib/darkarts/context/ui.py +1 -1
  20. package/lib/darkarts/context/validation.py +1 -1
  21. package/lib/darkarts/context/yaml_utils.py +8 -23
  22. package/lib/darkarts/core/__init__.py +12 -2
  23. package/lib/darkarts/core/interface.py +16 -2
  24. package/lib/darkarts/core/loader.py +17 -2
  25. package/lib/darkarts/core/plugin.py +16 -3
  26. package/lib/darkarts/core/registry.py +17 -2
  27. package/lib/darkarts/exceptions.py +17 -3
  28. package/lib/darkarts/plugins/voodocs/__init__.py +12 -2
  29. package/lib/darkarts/plugins/voodocs/ai_native_plugin.py +16 -5
  30. package/lib/darkarts/plugins/voodocs/annotation_validator.py +16 -3
  31. package/lib/darkarts/plugins/voodocs/api_spec_generator.py +16 -3
  32. package/lib/darkarts/plugins/voodocs/documentation_generator.py +16 -3
  33. package/lib/darkarts/plugins/voodocs/html_exporter.py +16 -3
  34. package/lib/darkarts/plugins/voodocs/instruction_generator.py +1 -1
  35. package/lib/darkarts/plugins/voodocs/pdf_exporter.py +16 -3
  36. package/lib/darkarts/plugins/voodocs/test_generator.py +16 -3
  37. package/lib/darkarts/telemetry.py +16 -3
  38. package/lib/darkarts/validation/README.md +147 -0
  39. package/lib/darkarts/validation/__init__.py +91 -0
  40. package/lib/darkarts/validation/autofix.py +297 -0
  41. package/lib/darkarts/validation/benchmark.py +426 -0
  42. package/lib/darkarts/validation/benchmark_wrapper.py +22 -0
  43. package/lib/darkarts/validation/config.py +257 -0
  44. package/lib/darkarts/validation/performance.py +412 -0
  45. package/lib/darkarts/validation/performance_wrapper.py +37 -0
  46. package/lib/darkarts/validation/semantic.py +461 -0
  47. package/lib/darkarts/validation/semantic_wrapper.py +77 -0
  48. package/lib/darkarts/validation/test_validation.py +160 -0
  49. package/lib/darkarts/validation/types.py +97 -0
  50. package/lib/darkarts/validation/watch.py +239 -0
  51. package/package.json +20 -6
  52. package/voodocs_cli.py +28 -0
  53. package/cli.py +0 -1646
  54. package/lib/darkarts/cli.py +0 -128
@@ -0,0 +1,412 @@
1
+ """@darkarts
2
+ ⊢performance:validation.complexity-tracking
3
+ ∂{time,pathlib,typing,dataclasses,json,re,ast,statistics}
4
+ ⚠{python≥3.7,file:readable,executable:for-benchmarking}
5
+ ⊨{∀track→metrics-collected,∀validate→complexity-verified,∀benchmark→accurate,non-invasive:no-code-modification}
6
+ 🔒{read-only:source,write:metrics,¬network,exec:isolated-subprocess}
7
+ ⚡{O(n*m)|n=file-size,m=benchmark-iterations}
8
+
9
+ Performance Tracking and Complexity Validation
10
+
11
+ Validates performance claims in @darkarts annotations with:
12
+ - Complexity extraction (parse ⚡{} section)
13
+ - Static analysis (count loops, recursion, operations)
14
+ - Runtime benchmarking (measure actual performance)
15
+ - Complexity verification (compare claimed vs actual)
16
+ - Performance regression detection (track over time)
17
+ - Optimization suggestions (identify bottlenecks)
18
+ """
19
+
20
+ import time
21
+ import ast
22
+ import re
23
+ import json
24
+ import statistics
25
+ from pathlib import Path
26
+ from typing import Dict, List, Optional, Tuple, Any
27
+ from dataclasses import dataclass, field
28
+ from datetime import datetime
29
+
30
+
31
+ @dataclass
32
+ class ComplexityInfo:
33
+ """Complexity information from annotation."""
34
+ claimed: str # e.g., "O(n)", "O(n*d)", "O(1)"
35
+ variables: Dict[str, str] # e.g., {"n": "file-size", "d": "depth"}
36
+ notes: List[str] # Additional notes like "worst-case:O(2^n)"
37
+
38
+ def __str__(self) -> str:
39
+ return self.claimed
40
+
41
+
42
+ @dataclass
43
+ class PerformanceMetrics:
44
+ """Performance metrics from benchmarking."""
45
+ execution_times: List[float] # Multiple runs
46
+ mean_time: float
47
+ median_time: float
48
+ std_dev: float
49
+ min_time: float
50
+ max_time: float
51
+
52
+ def __str__(self) -> str:
53
+ return f"μ={self.mean_time:.4f}s σ={self.std_dev:.4f}s"
54
+
55
+
56
+ @dataclass
57
+ class ComplexityAnalysis:
58
+ """Static complexity analysis results."""
59
+ loop_count: int
60
+ nested_loop_depth: int
61
+ recursion_detected: bool
62
+ function_calls: int
63
+ estimated_complexity: str
64
+
65
+ def __str__(self) -> str:
66
+ return f"{self.estimated_complexity} (loops={self.loop_count}, depth={self.nested_loop_depth})"
67
+
68
+
69
+ @dataclass
70
+ class PerformanceResult:
71
+ """Complete performance validation result."""
72
+ file_path: str
73
+ claimed_complexity: Optional[ComplexityInfo]
74
+ static_analysis: ComplexityAnalysis
75
+ benchmarks: Optional[PerformanceMetrics]
76
+ is_valid: bool
77
+ warnings: List[str] = field(default_factory=list)
78
+ suggestions: List[str] = field(default_factory=list)
79
+
80
+ def __str__(self) -> str:
81
+ lines = [f"Performance Analysis: {self.file_path}"]
82
+
83
+ if self.claimed_complexity:
84
+ lines.append(f" Claimed: {self.claimed_complexity}")
85
+
86
+ lines.append(f" Static: {self.static_analysis}")
87
+
88
+ if self.benchmarks:
89
+ lines.append(f" Benchmarks: {self.benchmarks}")
90
+
91
+ if not self.is_valid:
92
+ lines.append(" ❌ Complexity claim may be inaccurate")
93
+ else:
94
+ lines.append(" ✅ Complexity claim validated")
95
+
96
+ if self.warnings:
97
+ for warning in self.warnings:
98
+ lines.append(f" ⚠️ {warning}")
99
+
100
+ if self.suggestions:
101
+ for suggestion in self.suggestions:
102
+ lines.append(f" 💡 {suggestion}")
103
+
104
+ return "\n".join(lines)
105
+
106
+
107
+ class ComplexityAnalyzer(ast.NodeVisitor):
108
+ """Analyzes code complexity using AST."""
109
+
110
+ def __init__(self):
111
+ self.loop_count = 0
112
+ self.max_nested_depth = 0
113
+ self.current_depth = 0
114
+ self.recursion_detected = False
115
+ self.function_calls = 0
116
+ self.function_names = set()
117
+ self.current_function = None
118
+
119
+ def visit_For(self, node: ast.For):
120
+ """Visit for loops."""
121
+ self.loop_count += 1
122
+ self.current_depth += 1
123
+ self.max_nested_depth = max(self.max_nested_depth, self.current_depth)
124
+ self.generic_visit(node)
125
+ self.current_depth -= 1
126
+
127
+ def visit_While(self, node: ast.While):
128
+ """Visit while loops."""
129
+ self.loop_count += 1
130
+ self.current_depth += 1
131
+ self.max_nested_depth = max(self.max_nested_depth, self.current_depth)
132
+ self.generic_visit(node)
133
+ self.current_depth -= 1
134
+
135
+ def visit_FunctionDef(self, node: ast.FunctionDef):
136
+ """Visit function definitions."""
137
+ old_function = self.current_function
138
+ self.current_function = node.name
139
+ self.function_names.add(node.name)
140
+ self.generic_visit(node)
141
+ self.current_function = old_function
142
+
143
+ def visit_Call(self, node: ast.Call):
144
+ """Visit function calls."""
145
+ self.function_calls += 1
146
+
147
+ # Check for recursion
148
+ if isinstance(node.func, ast.Name):
149
+ if node.func.id == self.current_function:
150
+ self.recursion_detected = True
151
+
152
+ self.generic_visit(node)
153
+
154
+ def estimate_complexity(self) -> str:
155
+ """Estimate Big-O complexity from static analysis."""
156
+ if self.recursion_detected:
157
+ return "O(2^n)" # Exponential (worst case for recursion)
158
+
159
+ if self.max_nested_depth >= 3:
160
+ return "O(n^3)"
161
+ elif self.max_nested_depth == 2:
162
+ return "O(n^2)"
163
+ elif self.max_nested_depth == 1 or self.loop_count > 0:
164
+ return "O(n)"
165
+ else:
166
+ return "O(1)"
167
+
168
+
169
+ def parse_complexity_annotation(annotation: str) -> Optional[ComplexityInfo]:
170
+ """
171
+ Parse the ⚡{} section from an annotation.
172
+
173
+ Args:
174
+ annotation: The @darkarts annotation string
175
+
176
+ Returns:
177
+ ComplexityInfo if found, None otherwise
178
+ """
179
+ # Find the ⚡{...} section
180
+ pattern = r'⚡\{([^}]+)\}'
181
+ match = re.search(pattern, annotation)
182
+
183
+ if not match:
184
+ return None
185
+
186
+ complexity_str = match.group(1)
187
+
188
+ # Parse complexity (e.g., "O(n*d)|n=file-size,d=depth")
189
+ parts = complexity_str.split('|')
190
+ claimed = parts[0].strip()
191
+
192
+ variables = {}
193
+ notes = []
194
+
195
+ if len(parts) > 1:
196
+ # Parse variable definitions and notes
197
+ for part in parts[1:]:
198
+ if '=' in part:
199
+ key, value = part.split('=', 1)
200
+ variables[key.strip()] = value.strip()
201
+ else:
202
+ notes.append(part.strip())
203
+
204
+ return ComplexityInfo(
205
+ claimed=claimed,
206
+ variables=variables,
207
+ notes=notes
208
+ )
209
+
210
+
211
+ def analyze_complexity(file_path: Path) -> ComplexityAnalysis:
212
+ """
213
+ Perform static complexity analysis on a Python file.
214
+
215
+ Args:
216
+ file_path: Path to the Python file
217
+
218
+ Returns:
219
+ ComplexityAnalysis results
220
+ """
221
+ try:
222
+ content = file_path.read_text(encoding='utf-8')
223
+ tree = ast.parse(content, filename=str(file_path))
224
+
225
+ analyzer = ComplexityAnalyzer()
226
+ analyzer.visit(tree)
227
+
228
+ return ComplexityAnalysis(
229
+ loop_count=analyzer.loop_count,
230
+ nested_loop_depth=analyzer.max_nested_depth,
231
+ recursion_detected=analyzer.recursion_detected,
232
+ function_calls=analyzer.function_calls,
233
+ estimated_complexity=analyzer.estimate_complexity()
234
+ )
235
+
236
+ except Exception as e:
237
+ return ComplexityAnalysis(
238
+ loop_count=0,
239
+ nested_loop_depth=0,
240
+ recursion_detected=False,
241
+ function_calls=0,
242
+ estimated_complexity="Unknown"
243
+ )
244
+
245
+
246
+ def benchmark_file(file_path: Path, iterations: int = 10) -> Optional[PerformanceMetrics]:
247
+ """
248
+ Benchmark a Python file by importing and measuring execution time.
249
+
250
+ Note: This is a simplified benchmark. Real benchmarking would need
251
+ to call specific functions with test data.
252
+
253
+ Args:
254
+ file_path: Path to the Python file
255
+ iterations: Number of benchmark iterations
256
+
257
+ Returns:
258
+ PerformanceMetrics if successful, None otherwise
259
+ """
260
+ # This is a placeholder - real benchmarking would be more sophisticated
261
+ # and would need test data and function identification
262
+
263
+ execution_times = []
264
+
265
+ for _ in range(iterations):
266
+ start = time.perf_counter()
267
+
268
+ try:
269
+ # Simple import timing (not a real benchmark)
270
+ # In practice, you'd want to call specific functions
271
+ with open(file_path, 'r') as f:
272
+ content = f.read()
273
+ compile(content, str(file_path), 'exec')
274
+ except Exception:
275
+ return None
276
+
277
+ end = time.perf_counter()
278
+ execution_times.append(end - start)
279
+
280
+ return PerformanceMetrics(
281
+ execution_times=execution_times,
282
+ mean_time=statistics.mean(execution_times),
283
+ median_time=statistics.median(execution_times),
284
+ std_dev=statistics.stdev(execution_times) if len(execution_times) > 1 else 0.0,
285
+ min_time=min(execution_times),
286
+ max_time=max(execution_times)
287
+ )
288
+
289
+
290
+ def validate_complexity(
291
+ claimed: ComplexityInfo,
292
+ static: ComplexityAnalysis
293
+ ) -> Tuple[bool, List[str], List[str]]:
294
+ """
295
+ Validate claimed complexity against static analysis.
296
+
297
+ Args:
298
+ claimed: Claimed complexity from annotation
299
+ static: Static analysis results
300
+
301
+ Returns:
302
+ Tuple of (is_valid, warnings, suggestions)
303
+ """
304
+ warnings = []
305
+ suggestions = []
306
+ is_valid = True
307
+
308
+ # Extract claimed complexity class
309
+ claimed_str = claimed.claimed.upper()
310
+ estimated_str = static.estimated_complexity.upper()
311
+
312
+ # Simple validation (could be more sophisticated)
313
+ if "O(1)" in claimed_str and static.loop_count > 0:
314
+ warnings.append(f"Claims O(1) but has {static.loop_count} loops")
315
+ is_valid = False
316
+ suggestions.append(f"Consider updating to {static.estimated_complexity}")
317
+
318
+ if "O(N)" in claimed_str and static.nested_loop_depth > 1:
319
+ warnings.append(f"Claims O(n) but has nested loops (depth={static.nested_loop_depth})")
320
+ is_valid = False
321
+ suggestions.append(f"Consider updating to {static.estimated_complexity}")
322
+
323
+ if static.recursion_detected and "2^N" not in claimed_str and "EXPONENTIAL" not in claimed_str:
324
+ warnings.append("Recursion detected but not reflected in complexity claim")
325
+ suggestions.append("Consider adding worst-case exponential complexity note")
326
+
327
+ return is_valid, warnings, suggestions
328
+
329
+
330
+ def track_performance(file_path: Path, benchmark: bool = False) -> PerformanceResult:
331
+ """
332
+ Complete performance tracking for a file.
333
+
334
+ Args:
335
+ file_path: Path to the Python file
336
+ benchmark: Whether to run benchmarks (can be slow)
337
+
338
+ Returns:
339
+ PerformanceResult with complete analysis
340
+ """
341
+ # Extract annotation
342
+ try:
343
+ content = file_path.read_text(encoding='utf-8')
344
+ if content.startswith('"""@darkarts'):
345
+ end_idx = content.find('"""', 3)
346
+ if end_idx != -1:
347
+ annotation = content[3:end_idx]
348
+ claimed = parse_complexity_annotation(annotation)
349
+ else:
350
+ claimed = None
351
+ else:
352
+ claimed = None
353
+ except Exception:
354
+ claimed = None
355
+
356
+ # Static analysis
357
+ static = analyze_complexity(file_path)
358
+
359
+ # Benchmarks (optional)
360
+ benchmarks = None
361
+ if benchmark:
362
+ benchmarks = benchmark_file(file_path)
363
+
364
+ # Validation
365
+ if claimed:
366
+ is_valid, warnings, suggestions = validate_complexity(claimed, static)
367
+ else:
368
+ is_valid = True
369
+ warnings = ["No complexity annotation found"]
370
+ suggestions = [f"Consider adding: ⚡{{{static.estimated_complexity}}}"]
371
+
372
+ return PerformanceResult(
373
+ file_path=str(file_path),
374
+ claimed_complexity=claimed,
375
+ static_analysis=static,
376
+ benchmarks=benchmarks,
377
+ is_valid=is_valid,
378
+ warnings=warnings,
379
+ suggestions=suggestions
380
+ )
381
+
382
+
383
+ def save_metrics(result: PerformanceResult, output_path: Path):
384
+ """Save performance metrics to JSON file."""
385
+ data = {
386
+ 'file_path': result.file_path,
387
+ 'timestamp': datetime.now().isoformat(),
388
+ 'claimed_complexity': result.claimed_complexity.claimed if result.claimed_complexity else None,
389
+ 'static_analysis': {
390
+ 'loop_count': result.static_analysis.loop_count,
391
+ 'nested_loop_depth': result.static_analysis.nested_loop_depth,
392
+ 'recursion_detected': result.static_analysis.recursion_detected,
393
+ 'estimated_complexity': result.static_analysis.estimated_complexity
394
+ },
395
+ 'is_valid': result.is_valid,
396
+ 'warnings': result.warnings,
397
+ 'suggestions': result.suggestions
398
+ }
399
+
400
+ if result.benchmarks:
401
+ data['benchmarks'] = {
402
+ 'mean_time': result.benchmarks.mean_time,
403
+ 'median_time': result.benchmarks.median_time,
404
+ 'std_dev': result.benchmarks.std_dev
405
+ }
406
+
407
+ with open(output_path, 'w') as f:
408
+ json.dump(data, f, indent=2)
409
+
410
+
411
+
412
+ # CLI interface removed - use darkarts.validation API or voodocs CLI instead
@@ -0,0 +1,37 @@
1
+ """@darkarts
2
+ ⊢validation:performance.wrapper
3
+ ∂{pathlib,typing}
4
+ ⚠{python≥3.7}
5
+ ⊨{∀method→delegates,¬state}
6
+ 🔒{read-only}
7
+ ⚡{O(n):directory-analysis|n=files,O(1):single-file}
8
+ """
9
+
10
+ from pathlib import Path
11
+ from typing import List
12
+
13
+ from . import performance
14
+ from .types import PerformanceResult
15
+
16
+
17
+ class PerformanceTracker:
18
+ """Performance tracker for @darkarts annotations."""
19
+
20
+ def analyze_file(self, file_path: str | Path) -> PerformanceResult:
21
+ """Analyze a single file."""
22
+ return performance.track_performance(Path(file_path), benchmark=False)
23
+
24
+ def analyze_directory(self, directory: str | Path, recursive: bool = True) -> List[PerformanceResult]:
25
+ """Analyze all files in a directory."""
26
+ results = []
27
+ path = Path(directory)
28
+ pattern = "**/*.py" if recursive else "*.py"
29
+ for file_path in path.glob(pattern):
30
+ if file_path.is_file():
31
+ try:
32
+ result = self.analyze_file(file_path)
33
+ results.append(result)
34
+ except Exception:
35
+ # Skip files that can't be analyzed
36
+ pass
37
+ return results