@voodocs/cli 0.4.2 → 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/CHANGELOG.md +431 -0
  2. package/lib/cli/__init__.py +53 -0
  3. package/lib/cli/benchmark.py +311 -0
  4. package/lib/cli/fix.py +244 -0
  5. package/lib/cli/generate.py +310 -0
  6. package/lib/cli/test_cli.py +215 -0
  7. package/lib/cli/validate.py +364 -0
  8. package/lib/darkarts/__init__.py +11 -5
  9. package/lib/darkarts/annotations/__init__.py +11 -3
  10. package/lib/darkarts/annotations/darkarts_parser.py +1 -1
  11. package/lib/darkarts/annotations/translator.py +32 -5
  12. package/lib/darkarts/annotations/types.py +15 -2
  13. package/lib/darkarts/cli_darkarts.py +143 -15
  14. package/lib/darkarts/context/__init__.py +11 -3
  15. package/lib/darkarts/context/ai_integrations.py +7 -21
  16. package/lib/darkarts/context/commands.py +1 -1
  17. package/lib/darkarts/context/diagram.py +8 -22
  18. package/lib/darkarts/context/models.py +7 -22
  19. package/lib/darkarts/context/module_utils.py +1 -1
  20. package/lib/darkarts/context/ui.py +1 -1
  21. package/lib/darkarts/context/validation.py +1 -1
  22. package/lib/darkarts/context/yaml_utils.py +8 -23
  23. package/lib/darkarts/core/__init__.py +12 -2
  24. package/lib/darkarts/core/interface.py +15 -1
  25. package/lib/darkarts/core/loader.py +16 -1
  26. package/lib/darkarts/core/plugin.py +15 -2
  27. package/lib/darkarts/core/registry.py +16 -1
  28. package/lib/darkarts/exceptions.py +16 -2
  29. package/lib/darkarts/plugins/voodocs/__init__.py +12 -2
  30. package/lib/darkarts/plugins/voodocs/ai_native_plugin.py +15 -4
  31. package/lib/darkarts/plugins/voodocs/annotation_validator.py +15 -2
  32. package/lib/darkarts/plugins/voodocs/api_spec_generator.py +15 -2
  33. package/lib/darkarts/plugins/voodocs/documentation_generator.py +15 -2
  34. package/lib/darkarts/plugins/voodocs/html_exporter.py +15 -2
  35. package/lib/darkarts/plugins/voodocs/instruction_generator.py +1 -1
  36. package/lib/darkarts/plugins/voodocs/pdf_exporter.py +15 -2
  37. package/lib/darkarts/plugins/voodocs/test_generator.py +15 -2
  38. package/lib/darkarts/telemetry.py +15 -2
  39. package/lib/darkarts/validation/README.md +147 -0
  40. package/lib/darkarts/validation/__init__.py +91 -0
  41. package/lib/darkarts/validation/autofix.py +297 -0
  42. package/lib/darkarts/validation/benchmark.py +426 -0
  43. package/lib/darkarts/validation/benchmark_wrapper.py +22 -0
  44. package/lib/darkarts/validation/config.py +257 -0
  45. package/lib/darkarts/validation/performance.py +412 -0
  46. package/lib/darkarts/validation/performance_wrapper.py +37 -0
  47. package/lib/darkarts/validation/semantic.py +461 -0
  48. package/lib/darkarts/validation/semantic_wrapper.py +77 -0
  49. package/lib/darkarts/validation/test_validation.py +160 -0
  50. package/lib/darkarts/validation/types.py +97 -0
  51. package/lib/darkarts/validation/watch.py +239 -0
  52. package/package.json +19 -6
  53. package/voodocs_cli.py +28 -0
  54. package/cli.py +0 -1646
  55. package/lib/darkarts/cli.py +0 -128
@@ -0,0 +1,426 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ DarkArts Benchmark Suite
4
+
5
+ Comprehensive benchmarking framework for validating complexity claims
6
+ with real performance data.
7
+
8
+ Usage:
9
+ python3 benchmark_suite.py --module darkarts.executor.symbolic
10
+ python3 benchmark_suite.py --all
11
+ python3 benchmark_suite.py --critical-paths
12
+ """
13
+
14
+ import time
15
+ import sys
16
+ import json
17
+ import statistics
18
+ from pathlib import Path
19
+ from typing import Dict, List, Tuple, Any, Callable
20
+ from dataclasses import dataclass, asdict
21
+ import importlib
22
+ import inspect
23
+
24
+
25
+ @dataclass
26
+ class BenchmarkResult:
27
+ """Result of a single benchmark run."""
28
+ module: str
29
+ function: str
30
+ input_size: int
31
+ execution_time: float # seconds
32
+ memory_usage: int # bytes
33
+ iterations: int
34
+ claimed_complexity: str
35
+ actual_complexity: str
36
+ status: str # 'pass', 'fail', 'warning'
37
+
38
+
39
+ class ComplexityAnalyzer:
40
+ """Analyzes actual runtime complexity from benchmark data."""
41
+
42
+ @staticmethod
43
+ def fit_complexity(sizes: List[int], times: List[float]) -> str:
44
+ """
45
+ Fit benchmark data to complexity classes.
46
+
47
+ Returns best-fit complexity (O(1), O(log n), O(n), O(n log n), O(n²), O(2^n))
48
+ """
49
+ if len(sizes) < 3:
50
+ return "Unknown (insufficient data)"
51
+
52
+ # Normalize times
53
+ base_time = times[0]
54
+ if base_time == 0:
55
+ return "O(1)"
56
+
57
+ ratios = [t / base_time for t in times]
58
+ size_ratios = [s / sizes[0] for s in sizes]
59
+
60
+ # Check O(1) - constant time
61
+ if max(ratios) / min(ratios) < 1.5:
62
+ return "O(1)"
63
+
64
+ # Check O(log n)
65
+ log_ratios = [size_ratios[i] ** 0.5 for i in range(len(size_ratios))]
66
+ if ComplexityAnalyzer._fits(ratios, log_ratios, tolerance=0.3):
67
+ return "O(log n)"
68
+
69
+ # Check O(n)
70
+ if ComplexityAnalyzer._fits(ratios, size_ratios, tolerance=0.3):
71
+ return "O(n)"
72
+
73
+ # Check O(n log n)
74
+ nlogn_ratios = [sr * (sr ** 0.5) for sr in size_ratios]
75
+ if ComplexityAnalyzer._fits(ratios, nlogn_ratios, tolerance=0.4):
76
+ return "O(n log n)"
77
+
78
+ # Check O(n²)
79
+ n2_ratios = [sr ** 2 for sr in size_ratios]
80
+ if ComplexityAnalyzer._fits(ratios, n2_ratios, tolerance=0.5):
81
+ return "O(n²)"
82
+
83
+ # Check O(n³)
84
+ n3_ratios = [sr ** 3 for sr in size_ratios]
85
+ if ComplexityAnalyzer._fits(ratios, n3_ratios, tolerance=0.6):
86
+ return "O(n³)"
87
+
88
+ # Check O(2^n) - exponential
89
+ exp_ratios = [2 ** (i * 2) for i in range(len(size_ratios))]
90
+ if ComplexityAnalyzer._fits(ratios, exp_ratios, tolerance=0.7):
91
+ return "O(2^n)"
92
+
93
+ return "Unknown (doesn't fit standard complexity)"
94
+
95
+ @staticmethod
96
+ def _fits(actual: List[float], expected: List[float], tolerance: float) -> bool:
97
+ """Check if actual ratios fit expected ratios within tolerance."""
98
+ if len(actual) != len(expected):
99
+ return False
100
+
101
+ errors = [abs(a - e) / e if e != 0 else 0 for a, e in zip(actual, expected)]
102
+ avg_error = statistics.mean(errors)
103
+ return avg_error < tolerance
104
+
105
+
106
+ class BenchmarkSuite:
107
+ """Main benchmarking suite."""
108
+
109
+ def __init__(self):
110
+ self.results: List[BenchmarkResult] = []
111
+ self.critical_paths = [
112
+ ('darkarts.executor.symbolic', 'solve'),
113
+ ('darkarts.executor.numerical', 'solve'),
114
+ ('darkarts.executor.z3_solver', 'solve'),
115
+ ('darkarts.parser.unified', 'parse'),
116
+ ('darkarts.parser.latex', 'parse'),
117
+ ('darkarts.annotations.parser', 'parse_darkarts'),
118
+ ('darkarts.cli_darkarts', 'main'),
119
+ ('darkarts.context.diagram', 'generate_dependency_graph'),
120
+ ]
121
+
122
+ def benchmark_function(
123
+ self,
124
+ func: Callable,
125
+ input_generator: Callable[[int], Any],
126
+ sizes: List[int],
127
+ iterations: int = 10
128
+ ) -> List[Tuple[int, float]]:
129
+ """
130
+ Benchmark a function with different input sizes.
131
+
132
+ Args:
133
+ func: Function to benchmark
134
+ input_generator: Function that generates input of given size
135
+ sizes: List of input sizes to test
136
+ iterations: Number of iterations per size
137
+
138
+ Returns:
139
+ List of (size, avg_time) tuples
140
+ """
141
+ results = []
142
+
143
+ for size in sizes:
144
+ times = []
145
+
146
+ for _ in range(iterations):
147
+ input_data = input_generator(size)
148
+
149
+ start = time.perf_counter()
150
+ try:
151
+ func(input_data)
152
+ except Exception as e:
153
+ print(f" ⚠️ Error with size {size}: {e}")
154
+ continue
155
+ end = time.perf_counter()
156
+
157
+ times.append(end - start)
158
+
159
+ if times:
160
+ avg_time = statistics.mean(times)
161
+ results.append((size, avg_time))
162
+ print(f" Size {size:6d}: {avg_time:.6f}s (avg of {len(times)} runs)")
163
+
164
+ return results
165
+
166
+ def benchmark_module(self, module_path: str) -> List[BenchmarkResult]:
167
+ """Benchmark all functions in a module."""
168
+ print(f"\n{'='*60}")
169
+ print(f"Benchmarking: {module_path}")
170
+ print(f"{'='*60}\n")
171
+
172
+ try:
173
+ module = importlib.import_module(module_path)
174
+ except ImportError as e:
175
+ print(f"❌ Failed to import {module_path}: {e}")
176
+ return []
177
+
178
+ # Get claimed complexity from module docstring
179
+ claimed_complexity = self._extract_complexity_claim(module)
180
+
181
+ results = []
182
+
183
+ # Find benchmarkable functions
184
+ for name, obj in inspect.getmembers(module):
185
+ if inspect.isfunction(obj) and not name.startswith('_'):
186
+ print(f"\nFunction: {name}")
187
+
188
+ # Generate appropriate test sizes
189
+ sizes = [10, 50, 100, 500, 1000]
190
+
191
+ # Create input generator (customize per function type)
192
+ input_gen = self._create_input_generator(name, obj)
193
+
194
+ if input_gen is None:
195
+ print(f" ⚠️ No input generator available")
196
+ continue
197
+
198
+ # Run benchmark
199
+ bench_results = self.benchmark_function(obj, input_gen, sizes, iterations=5)
200
+
201
+ if not bench_results:
202
+ continue
203
+
204
+ # Analyze complexity
205
+ sizes_tested, times = zip(*bench_results)
206
+ actual_complexity = ComplexityAnalyzer.fit_complexity(list(sizes_tested), list(times))
207
+
208
+ # Compare with claim
209
+ status = self._compare_complexity(claimed_complexity, actual_complexity)
210
+
211
+ result = BenchmarkResult(
212
+ module=module_path,
213
+ function=name,
214
+ input_size=max(sizes_tested),
215
+ execution_time=times[-1],
216
+ memory_usage=0, # TODO: Add memory profiling
217
+ iterations=5,
218
+ claimed_complexity=claimed_complexity,
219
+ actual_complexity=actual_complexity,
220
+ status=status
221
+ )
222
+
223
+ results.append(result)
224
+ self.results.append(result)
225
+
226
+ print(f" Claimed: {claimed_complexity}")
227
+ print(f" Actual: {actual_complexity}")
228
+ print(f" Status: {status}")
229
+
230
+ return results
231
+
232
+ def benchmark_critical_paths(self) -> List[BenchmarkResult]:
233
+ """Benchmark all critical paths."""
234
+ print("\n" + "="*60)
235
+ print("BENCHMARKING CRITICAL PATHS")
236
+ print("="*60)
237
+
238
+ all_results = []
239
+
240
+ for module_path, func_name in self.critical_paths:
241
+ print(f"\n{'='*60}")
242
+ print(f"Critical Path: {module_path}.{func_name}")
243
+ print(f"{'='*60}\n")
244
+
245
+ try:
246
+ module = importlib.import_module(module_path)
247
+ func = getattr(module, func_name, None)
248
+
249
+ if func is None:
250
+ print(f"❌ Function {func_name} not found in {module_path}")
251
+ continue
252
+
253
+ # Get claimed complexity
254
+ claimed_complexity = self._extract_complexity_claim(module)
255
+
256
+ # Create input generator
257
+ input_gen = self._create_input_generator(func_name, func)
258
+
259
+ if input_gen is None:
260
+ print(f" ⚠️ No input generator available")
261
+ continue
262
+
263
+ # Run benchmark with larger sizes for critical paths
264
+ sizes = [10, 50, 100, 500, 1000, 5000]
265
+ bench_results = self.benchmark_function(func, input_gen, sizes, iterations=10)
266
+
267
+ if not bench_results:
268
+ continue
269
+
270
+ # Analyze
271
+ sizes_tested, times = zip(*bench_results)
272
+ actual_complexity = ComplexityAnalyzer.fit_complexity(list(sizes_tested), list(times))
273
+
274
+ status = self._compare_complexity(claimed_complexity, actual_complexity)
275
+
276
+ result = BenchmarkResult(
277
+ module=module_path,
278
+ function=func_name,
279
+ input_size=max(sizes_tested),
280
+ execution_time=times[-1],
281
+ memory_usage=0,
282
+ iterations=10,
283
+ claimed_complexity=claimed_complexity,
284
+ actual_complexity=actual_complexity,
285
+ status=status
286
+ )
287
+
288
+ all_results.append(result)
289
+ self.results.append(result)
290
+
291
+ print(f"\n Claimed: {claimed_complexity}")
292
+ print(f" Actual: {actual_complexity}")
293
+ print(f" Status: {status}")
294
+
295
+ except Exception as e:
296
+ print(f"❌ Error benchmarking {module_path}.{func_name}: {e}")
297
+
298
+ return all_results
299
+
300
+ def _extract_complexity_claim(self, module) -> str:
301
+ """Extract ⚡{} complexity claim from module docstring."""
302
+ docstring = inspect.getdoc(module)
303
+ if not docstring:
304
+ return "Unknown"
305
+
306
+ # Look for ⚡{...}
307
+ import re
308
+ match = re.search(r'⚡\{([^}]+)\}', docstring)
309
+ if match:
310
+ return match.group(1)
311
+
312
+ return "Unknown"
313
+
314
+ def _create_input_generator(self, func_name: str, func: Callable) -> Callable[[int], Any]:
315
+ """Create appropriate input generator for function."""
316
+ # Inspect function signature
317
+ sig = inspect.signature(func)
318
+ params = list(sig.parameters.values())
319
+
320
+ if not params:
321
+ return lambda n: None
322
+
323
+ first_param = params[0]
324
+
325
+ # Generate based on function name and parameter type
326
+ if 'parse' in func_name.lower():
327
+ # Parser functions - generate strings
328
+ return lambda n: "x + y = " + " + ".join([f"term{i}" for i in range(n)])
329
+
330
+ elif 'solve' in func_name.lower():
331
+ # Solver functions - generate equations
332
+ return lambda n: f"x^{n} + {n}*x + {n*2} = 0"
333
+
334
+ elif 'generate' in func_name.lower():
335
+ # Generator functions - generate lists
336
+ return lambda n: list(range(n))
337
+
338
+ elif first_param.annotation == str or 'str' in str(first_param.annotation):
339
+ # String input
340
+ return lambda n: "a" * n
341
+
342
+ elif first_param.annotation == list or 'list' in str(first_param.annotation):
343
+ # List input
344
+ return lambda n: list(range(n))
345
+
346
+ elif first_param.annotation == int or 'int' in str(first_param.annotation):
347
+ # Integer input
348
+ return lambda n: n
349
+
350
+ else:
351
+ # Default: list of integers
352
+ return lambda n: list(range(n))
353
+
354
+ def _compare_complexity(self, claimed: str, actual: str) -> str:
355
+ """Compare claimed vs actual complexity."""
356
+ # Normalize
357
+ claimed_norm = claimed.split(':')[0].split('|')[0].strip()
358
+ actual_norm = actual.strip()
359
+
360
+ # Extract complexity class
361
+ complexity_order = {
362
+ 'O(1)': 0,
363
+ 'O(log n)': 1,
364
+ 'O(n)': 2,
365
+ 'O(n log n)': 3,
366
+ 'O(n²)': 4,
367
+ 'O(n³)': 5,
368
+ 'O(2^n)': 6,
369
+ }
370
+
371
+ claimed_order = complexity_order.get(claimed_norm, -1)
372
+ actual_order = complexity_order.get(actual_norm, -1)
373
+
374
+ if claimed_order == -1 or actual_order == -1:
375
+ return 'warning'
376
+
377
+ if claimed_order == actual_order:
378
+ return 'pass'
379
+ elif claimed_order > actual_order:
380
+ return 'pass' # Conservative claim (safe)
381
+ else:
382
+ return 'fail' # Underestimated (dangerous)
383
+
384
+ def generate_report(self, output_path: str = None):
385
+ """Generate benchmark report."""
386
+ print("\n" + "="*60)
387
+ print("BENCHMARK REPORT")
388
+ print("="*60 + "\n")
389
+
390
+ if not self.results:
391
+ print("No benchmark results available")
392
+ return
393
+
394
+ # Summary statistics
395
+ total = len(self.results)
396
+ passed = sum(1 for r in self.results if r.status == 'pass')
397
+ failed = sum(1 for r in self.results if r.status == 'fail')
398
+ warnings = sum(1 for r in self.results if r.status == 'warning')
399
+
400
+ print(f"Total Benchmarks: {total}")
401
+ print(f"Passed: {passed} ({passed/total*100:.1f}%)")
402
+ print(f"Failed: {failed} ({failed/total*100:.1f}%)")
403
+ print(f"Warnings: {warnings} ({warnings/total*100:.1f}%)")
404
+ print()
405
+
406
+ # Detailed results
407
+ print("Detailed Results:")
408
+ print("-" * 60)
409
+
410
+ for result in self.results:
411
+ status_icon = {'pass': '✅', 'fail': '❌', 'warning': '⚠️'}[result.status]
412
+ print(f"{status_icon} {result.module}.{result.function}")
413
+ print(f" Claimed: {result.claimed_complexity}")
414
+ print(f" Actual: {result.actual_complexity}")
415
+ print(f" Time: {result.execution_time:.6f}s (n={result.input_size})")
416
+ print()
417
+
418
+ # Save to file
419
+ if output_path:
420
+ with open(output_path, 'w') as f:
421
+ json.dump([asdict(r) for r in self.results], f, indent=2)
422
+ print(f"Report saved to: {output_path}")
423
+
424
+
425
+
426
+ # CLI interface removed - use darkarts.validation API or voodocs CLI instead
@@ -0,0 +1,22 @@
1
+ """@darkarts
2
+ ⊢validation:benchmark.wrapper
3
+ ∂{pathlib,typing}
4
+ ⚠{python≥3.7}
5
+ ⊨{∀method→delegates,¬state}
6
+ 🔒{read-only}
7
+ ⚡{O(1):creation,O(n*m):benchmarks|n=files,m=iterations}
8
+ """
9
+
10
+ from pathlib import Path
11
+ from typing import List, Dict, Any
12
+
13
+ from . import benchmark
14
+ from .types import BenchmarkResult
15
+
16
+
17
+ class BenchmarkSuite:
18
+ """Benchmark suite for @darkarts annotations."""
19
+
20
+ def run_benchmarks(self, config: Dict[str, Any]) -> List[BenchmarkResult]:
21
+ """Run benchmarks based on configuration."""
22
+ return benchmark.run_benchmarks(config)
@@ -0,0 +1,257 @@
1
+ """@darkarts
2
+ ⊢config:validation.configuration
3
+ ∂{pathlib,typing,dataclasses,json,re}
4
+ ⚠{python≥3.7,config-file:valid-json}
5
+ ⊨{∀load→Config,∀validate→rules-applied,∀merge→precedence-respected,serializable:to-json}
6
+ 🔒{read:config-files,¬write-except-defaults,¬network,¬exec}
7
+ ⚡{O(n):load-and-parse|n=config-entries,O(n):rule-application}
8
+
9
+ Configuration File Support for Validation
10
+
11
+ Customizable validation rules and settings with:
12
+ - JSON configuration files (.darkarts.json)
13
+ - Per-project settings (project root)
14
+ - Per-directory overrides (nested configs)
15
+ - Rule customization (ignore patterns, custom stdlib)
16
+ - Validation thresholds (error vs warning)
17
+ - Auto-fix preferences (backup settings, dry-run defaults)
18
+ """
19
+
20
+ import json
21
+ from pathlib import Path
22
+ from typing import Optional, Set, List, Dict, Any
23
+ from dataclasses import dataclass, field, asdict
24
+
25
+
26
+ @dataclass
27
+ class ValidationConfig:
28
+ """Configuration for validation behavior."""
29
+
30
+ # Files and directories to ignore
31
+ ignore_patterns: List[str] = field(default_factory=lambda: [
32
+ "*/test_*.py",
33
+ "*/__pycache__/*",
34
+ "*/.*",
35
+ "*/venv/*",
36
+ "*/env/*",
37
+ "*/.venv/*"
38
+ ])
39
+
40
+ # Additional stdlib modules to keep in validation
41
+ stdlib_whitelist: Set[str] = field(default_factory=lambda: {
42
+ 'typing', 'dataclasses', 'enum', 'abc', 'pathlib',
43
+ 'datetime', 'json', 'yaml', 're', 'os', 'sys',
44
+ 'time', 'uuid', 'collections', 'itertools', 'functools'
45
+ })
46
+
47
+ # Modules to always ignore (even if imported)
48
+ ignore_modules: Set[str] = field(default_factory=set)
49
+
50
+ # Whether to treat extra dependencies as errors or warnings
51
+ extra_deps_as_error: bool = True
52
+
53
+ # Whether to treat missing dependencies as errors or warnings
54
+ missing_deps_as_error: bool = True
55
+
56
+ # Minimum number of dependencies before validation
57
+ min_deps_threshold: int = 0
58
+
59
+ # Whether to validate __init__.py files
60
+ validate_init_files: bool = True
61
+
62
+ # Whether to validate test files
63
+ validate_test_files: bool = True
64
+
65
+
66
+ @dataclass
67
+ class AutoFixConfig:
68
+ """Configuration for auto-fix behavior."""
69
+
70
+ # Whether to create backups before fixing
71
+ create_backups: bool = True
72
+
73
+ # Backup directory
74
+ backup_dir: str = ".backups"
75
+
76
+ # Whether to run in dry-run mode by default
77
+ dry_run_default: bool = False
78
+
79
+ # Whether to auto-fix on watch mode
80
+ auto_fix_on_watch: bool = False
81
+
82
+ # Maximum number of files to fix in one batch
83
+ max_batch_size: int = 100
84
+
85
+
86
+ @dataclass
87
+ class WatchConfig:
88
+ """Configuration for watch mode."""
89
+
90
+ # Check interval in seconds
91
+ interval: float = 1.0
92
+
93
+ # Debounce time in seconds
94
+ debounce: float = 0.5
95
+
96
+ # Whether to show verbose output
97
+ verbose: bool = False
98
+
99
+ # Whether to beep on errors
100
+ beep_on_error: bool = False
101
+
102
+
103
+ @dataclass
104
+ class Config:
105
+ """Complete configuration for darkarts validation."""
106
+
107
+ validation: ValidationConfig = field(default_factory=ValidationConfig)
108
+ autofix: AutoFixConfig = field(default_factory=AutoFixConfig)
109
+ watch: WatchConfig = field(default_factory=WatchConfig)
110
+
111
+ # Project-specific settings
112
+ project_name: Optional[str] = None
113
+ project_root: Optional[str] = None
114
+
115
+ @classmethod
116
+ def from_dict(cls, data: Dict[str, Any]) -> 'Config':
117
+ """Create Config from dictionary."""
118
+ validation_data = data.get('validation', {})
119
+ autofix_data = data.get('autofix', {})
120
+ watch_data = data.get('watch', {})
121
+
122
+ # Convert lists to sets where needed
123
+ if 'stdlib_whitelist' in validation_data:
124
+ validation_data['stdlib_whitelist'] = set(validation_data['stdlib_whitelist'])
125
+ if 'ignore_modules' in validation_data:
126
+ validation_data['ignore_modules'] = set(validation_data['ignore_modules'])
127
+
128
+ return cls(
129
+ validation=ValidationConfig(**validation_data),
130
+ autofix=AutoFixConfig(**autofix_data),
131
+ watch=WatchConfig(**watch_data),
132
+ project_name=data.get('project_name'),
133
+ project_root=data.get('project_root')
134
+ )
135
+
136
+ def to_dict(self) -> Dict[str, Any]:
137
+ """Convert Config to dictionary."""
138
+ result = asdict(self)
139
+
140
+ # Convert sets to lists for JSON serialization
141
+ if 'stdlib_whitelist' in result['validation']:
142
+ result['validation']['stdlib_whitelist'] = sorted(result['validation']['stdlib_whitelist'])
143
+ if 'ignore_modules' in result['validation']:
144
+ result['validation']['ignore_modules'] = sorted(result['validation']['ignore_modules'])
145
+
146
+ return result
147
+
148
+ def save(self, path: Path):
149
+ """Save configuration to a JSON file."""
150
+ with open(path, 'w') as f:
151
+ json.dump(self.to_dict(), f, indent=2)
152
+
153
+ @classmethod
154
+ def load(cls, path: Path) -> 'Config':
155
+ """Load configuration from a JSON file."""
156
+ with open(path, 'r') as f:
157
+ data = json.load(f)
158
+ return cls.from_dict(data)
159
+
160
+ @classmethod
161
+ def find_config(cls, start_path: Path) -> Optional['Config']:
162
+ """
163
+ Find and load configuration file starting from a path.
164
+
165
+ Searches upward from start_path for .darkarts.json
166
+
167
+ Args:
168
+ start_path: Path to start searching from
169
+
170
+ Returns:
171
+ Config if found, None otherwise
172
+ """
173
+ current = start_path if start_path.is_dir() else start_path.parent
174
+
175
+ while current != current.parent:
176
+ config_file = current / ".darkarts.json"
177
+ if config_file.exists():
178
+ return cls.load(config_file)
179
+ current = current.parent
180
+
181
+ return None
182
+
183
+ def merge(self, other: 'Config') -> 'Config':
184
+ """
185
+ Merge another config into this one.
186
+
187
+ Other config takes precedence for non-default values.
188
+
189
+ Args:
190
+ other: Config to merge
191
+
192
+ Returns:
193
+ New merged Config
194
+ """
195
+ # This is a simple merge - you could make it more sophisticated
196
+ merged_dict = self.to_dict()
197
+ other_dict = other.to_dict()
198
+
199
+ # Deep merge
200
+ for key, value in other_dict.items():
201
+ if isinstance(value, dict) and key in merged_dict:
202
+ merged_dict[key].update(value)
203
+ else:
204
+ merged_dict[key] = value
205
+
206
+ return Config.from_dict(merged_dict)
207
+
208
+
209
+ def create_default_config(path: Path):
210
+ """Create a default configuration file."""
211
+ config = Config()
212
+ config.save(path)
213
+ print(f"✅ Created default configuration: {path}")
214
+
215
+
216
+ def validate_config(path: Path) -> bool:
217
+ """
218
+ Validate a configuration file.
219
+
220
+ Args:
221
+ path: Path to the config file
222
+
223
+ Returns:
224
+ True if valid, False otherwise
225
+ """
226
+ try:
227
+ Config.load(path)
228
+ print(f"✅ Configuration valid: {path}")
229
+ return True
230
+ except json.JSONDecodeError as e:
231
+ print(f"❌ Invalid JSON in {path}: {e}")
232
+ return False
233
+ except Exception as e:
234
+ print(f"❌ Error loading {path}: {e}")
235
+ return False
236
+
237
+
238
+ def show_config(path: Optional[Path] = None):
239
+ """Display current configuration."""
240
+ if path:
241
+ config = Config.load(path)
242
+ else:
243
+ config = Config.find_config(Path.cwd())
244
+ if config is None:
245
+ print("No configuration file found. Using defaults.")
246
+ config = Config()
247
+
248
+ print("\n" + "="*60)
249
+ print("Current Configuration")
250
+ print("="*60 + "\n")
251
+
252
+ print(json.dumps(config.to_dict(), indent=2))
253
+ print()
254
+
255
+
256
+
257
+ # CLI interface removed - use darkarts.validation API or voodocs CLI instead