fast-clean-architecture 1.0.0__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. fast_clean_architecture/__init__.py +5 -6
  2. fast_clean_architecture/analytics.py +260 -0
  3. fast_clean_architecture/cli.py +563 -46
  4. fast_clean_architecture/config.py +47 -23
  5. fast_clean_architecture/error_tracking.py +201 -0
  6. fast_clean_architecture/exceptions.py +432 -12
  7. fast_clean_architecture/generators/__init__.py +11 -1
  8. fast_clean_architecture/generators/component_generator.py +407 -103
  9. fast_clean_architecture/generators/config_updater.py +186 -38
  10. fast_clean_architecture/generators/generator_factory.py +223 -0
  11. fast_clean_architecture/generators/package_generator.py +9 -7
  12. fast_clean_architecture/generators/template_validator.py +109 -9
  13. fast_clean_architecture/generators/validation_config.py +5 -3
  14. fast_clean_architecture/generators/validation_metrics.py +10 -6
  15. fast_clean_architecture/health.py +169 -0
  16. fast_clean_architecture/logging_config.py +52 -0
  17. fast_clean_architecture/metrics.py +108 -0
  18. fast_clean_architecture/protocols.py +406 -0
  19. fast_clean_architecture/templates/external.py.j2 +109 -32
  20. fast_clean_architecture/utils.py +50 -31
  21. fast_clean_architecture/validation.py +302 -0
  22. {fast_clean_architecture-1.0.0.dist-info → fast_clean_architecture-1.1.2.dist-info}/METADATA +131 -64
  23. fast_clean_architecture-1.1.2.dist-info/RECORD +38 -0
  24. fast_clean_architecture-1.0.0.dist-info/RECORD +0 -30
  25. {fast_clean_architecture-1.0.0.dist-info → fast_clean_architecture-1.1.2.dist-info}/WHEEL +0 -0
  26. {fast_clean_architecture-1.0.0.dist-info → fast_clean_architecture-1.1.2.dist-info}/entry_points.txt +0 -0
  27. {fast_clean_architecture-1.0.0.dist-info → fast_clean_architecture-1.1.2.dist-info}/licenses/LICENSE +0 -0
@@ -3,24 +3,24 @@
3
3
  import logging
4
4
  import time
5
5
  from abc import ABC, abstractmethod
6
- from typing import Dict, Any, Set, Optional, List, Protocol, TypeVar
7
6
  from pathlib import Path
7
+ from typing import Any, Dict, List, Optional, Protocol, Set, TypeVar
8
8
 
9
9
  import jinja2
10
- from jinja2 import meta, Environment, TemplateSyntaxError, UndefinedError
10
+ from jinja2 import Environment, TemplateSyntaxError, UndefinedError, meta
11
11
  from jinja2.sandbox import SandboxedEnvironment
12
12
 
13
13
  from ..exceptions import (
14
14
  TemplateError,
15
- TemplateValidationError,
16
15
  TemplateMissingVariablesError,
17
16
  TemplateUndefinedVariableError,
17
+ TemplateValidationError,
18
18
  )
19
19
  from .validation_config import ValidationConfig
20
20
  from .validation_metrics import (
21
- timed_validation,
22
- get_metrics_collector,
23
21
  ValidationMetrics,
22
+ get_metrics_collector,
23
+ timed_validation,
24
24
  validation_timeout,
25
25
  )
26
26
 
@@ -46,18 +46,35 @@ class TemplateValidationStrategy(ABC):
46
46
  """Abstract base class for template validation strategies."""
47
47
 
48
48
  @abstractmethod
49
- def validate(self, template_source: str, template_vars: Dict[str, Any]) -> None:
49
+ def validate(
50
+ self, template_source: str, template_vars: Dict[str, Any]
51
+ ) -> "ValidationMetrics":
50
52
  """Validate template with given variables.
51
53
 
52
54
  Args:
53
55
  template_source: The template source code
54
56
  template_vars: Variables to validate against
55
57
 
58
+ Returns:
59
+ ValidationMetrics: Metrics about the validation process
60
+
56
61
  Raises:
57
62
  TemplateError: If validation fails
58
63
  """
59
64
  pass
60
65
 
66
+ @abstractmethod
67
+ def sanitize_variables(self, variables: Dict[str, Any]) -> Dict[str, Any]:
68
+ """Sanitize template variables for safe processing.
69
+
70
+ Args:
71
+ variables: Raw template variables
72
+
73
+ Returns:
74
+ Dict[str, Any]: Sanitized variables safe for template processing
75
+ """
76
+ pass
77
+
61
78
 
62
79
  class SimpleTemplateValidator:
63
80
  """Simplified template validator with essential security features.
@@ -74,7 +91,9 @@ class SimpleTemplateValidator:
74
91
  get_metrics_collector() if config and config.enable_metrics else None
75
92
  )
76
93
 
77
- def validate(self, template_source: str, template_vars: Dict[str, Any]) -> None:
94
+ def validate(
95
+ self, template_source: str, template_vars: Dict[str, Any]
96
+ ) -> "ValidationMetrics":
78
97
  """Validate template with essential security checks.
79
98
 
80
99
  Args:
@@ -129,6 +148,16 @@ class SimpleTemplateValidator:
129
148
  if self._metrics_collector:
130
149
  self._metrics_collector.record_validation(True)
131
150
 
151
+ # Return success metrics
152
+ return ValidationMetrics(
153
+ strategy_used="simple",
154
+ validation_time_ms=0, # Not tracked in simple validator
155
+ template_size_bytes=template_size,
156
+ variables_count=len(template_vars),
157
+ undefined_variables_found=0,
158
+ fallback_used=False,
159
+ )
160
+
132
161
  except (
133
162
  TemplateSyntaxError,
134
163
  TemplateValidationError,
@@ -142,7 +171,9 @@ class SimpleTemplateValidator:
142
171
  self._metrics_collector.record_validation(False, type(e).__name__)
143
172
  raise TemplateValidationError(f"Template validation error: {e}")
144
173
 
145
- def _find_optional_variables(self, template_source: str, missing_vars: set) -> set:
174
+ def _find_optional_variables(
175
+ self, template_source: str, missing_vars: Set[str]
176
+ ) -> Set[str]:
146
177
  """Find variables that are optional (have default filters or are in conditionals)."""
147
178
  import re
148
179
 
@@ -173,6 +204,27 @@ class SimpleTemplateValidator:
173
204
 
174
205
  return optional_vars
175
206
 
207
+ def sanitize_variables(self, variables: Dict[str, Any]) -> Dict[str, Any]:
208
+ """Sanitize template variables for security.
209
+
210
+ Args:
211
+ variables: Raw template variables
212
+
213
+ Returns:
214
+ Sanitized variables
215
+ """
216
+ # Basic sanitization - remove potentially dangerous values
217
+ sanitized = {}
218
+ for key, value in variables.items():
219
+ if isinstance(value, str):
220
+ # Remove null bytes and control characters
221
+ sanitized[key] = "".join(
222
+ char for char in value if ord(char) >= 32 or char in "\t\n\r"
223
+ )
224
+ else:
225
+ sanitized[key] = value
226
+ return sanitized
227
+
176
228
 
177
229
  class RuntimeValidator(TemplateValidationStrategy):
178
230
  """Validates templates by attempting to render them.
@@ -192,13 +244,18 @@ class RuntimeValidator(TemplateValidationStrategy):
192
244
  self.config = config or ValidationConfig()
193
245
  self._metrics_collector = get_metrics_collector()
194
246
 
195
- def validate(self, template_source: str, template_vars: Dict[str, Any]) -> None:
247
+ def validate(
248
+ self, template_source: str, template_vars: Dict[str, Any]
249
+ ) -> ValidationMetrics:
196
250
  """Validate by attempting to render the template.
197
251
 
198
252
  Args:
199
253
  template_source: The template source code
200
254
  template_vars: Variables to use for rendering
201
255
 
256
+ Returns:
257
+ ValidationMetrics: Metrics from the validation process
258
+
202
259
  Raises:
203
260
  TemplateSyntaxError: If template has syntax errors
204
261
  TemplateUndefinedVariableError: If undefined variables are encountered
@@ -254,6 +311,29 @@ class RuntimeValidator(TemplateValidationStrategy):
254
311
  self._metrics_collector.record_validation(False, type(e).__name__)
255
312
  raise TemplateValidationError(f"Template rendering error: {e}")
256
313
 
314
+ return metrics
315
+
316
+ def sanitize_variables(self, variables: Dict[str, Any]) -> Dict[str, Any]:
317
+ """Sanitize template variables for safe processing.
318
+
319
+ Args:
320
+ variables: Raw template variables
321
+
322
+ Returns:
323
+ Dict[str, Any]: Sanitized variables safe for template processing
324
+ """
325
+ # Basic sanitization - remove potentially dangerous values
326
+ sanitized = {}
327
+ for key, value in variables.items():
328
+ if isinstance(value, str):
329
+ # Remove null bytes and control characters
330
+ sanitized[key] = "".join(
331
+ char for char in value if ord(char) >= 32 or char in "\t\n\r"
332
+ )
333
+ else:
334
+ sanitized[key] = value
335
+ return sanitized
336
+
257
337
 
258
338
  class TemplateSourceResolver:
259
339
  """Resolves template source from various inputs."""
@@ -430,6 +510,26 @@ class TemplateValidator:
430
510
  """Reset validation metrics."""
431
511
  self._metrics_collector.reset()
432
512
 
513
+ def sanitize_variables(self, variables: Dict[str, Any]) -> Dict[str, Any]:
514
+ """Sanitize template variables for safe processing.
515
+
516
+ Args:
517
+ variables: Raw template variables
518
+
519
+ Returns:
520
+ Dict[str, Any]: Sanitized variables safe for template processing
521
+ """
522
+ # Use the static validator's sanitize method if available
523
+ if hasattr(self.static_validator, "sanitize_variables"):
524
+ return self.static_validator.sanitize_variables(variables)
525
+
526
+ # Basic sanitization fallback
527
+ sanitized = {}
528
+ for key, value in variables.items():
529
+ if isinstance(key, str) and key.isidentifier():
530
+ sanitized[key] = value
531
+ return sanitized
532
+
433
533
 
434
534
  class TemplateValidatorFactory:
435
535
  """Factory for creating template validators with different configurations.
@@ -1,8 +1,8 @@
1
1
  """Configuration classes for template validation."""
2
2
 
3
3
  from dataclasses import dataclass
4
- from typing import Optional, Dict, Any
5
4
  from enum import Enum
5
+ from typing import Any, Dict, Literal, Optional, Union
6
6
 
7
7
 
8
8
  class ValidationStrategy(Enum):
@@ -39,9 +39,11 @@ class ValidationConfig:
39
39
 
40
40
  # Basic monitoring
41
41
  enable_metrics: bool = False # Disabled by default to reduce complexity
42
- log_level: str = "WARNING" # Reduced logging
42
+ log_level: Literal["DEBUG", "INFO", "WARNING", "ERROR"] = (
43
+ "WARNING" # Reduced logging
44
+ )
43
45
 
44
- def __post_init__(self):
46
+ def __post_init__(self) -> None:
45
47
  """Validate configuration after initialization."""
46
48
  if self.max_variable_nesting_depth < 1:
47
49
  raise ValueError("max_variable_nesting_depth must be at least 1")
@@ -1,15 +1,17 @@
1
1
  """Performance monitoring and metrics collection for template validation."""
2
2
 
3
- import time
4
3
  import logging
5
4
  import threading
5
+ import time
6
6
  from contextlib import contextmanager
7
- from typing import Dict, Any, Optional, Generator
8
7
  from dataclasses import dataclass, field
9
8
  from threading import Lock
9
+ from typing import Any, Dict, Generator, Optional
10
10
 
11
11
  from .validation_config import ValidationMetrics
12
12
 
13
+ __all__ = ["ValidationMetrics", "ValidationStats", "ValidationMetricsCollector"]
14
+
13
15
  logger = logging.getLogger(__name__)
14
16
 
15
17
 
@@ -33,7 +35,7 @@ class ValidationStats:
33
35
  metrics: ValidationMetrics,
34
36
  success: bool,
35
37
  error_type: Optional[str] = None,
36
- ):
38
+ ) -> None:
37
39
  """Update statistics with new validation metrics."""
38
40
  with self._lock:
39
41
  self.total_validations += 1
@@ -107,10 +109,12 @@ class ValidationStats:
107
109
  class ValidationMetricsCollector:
108
110
  """Simplified collector for validation metrics."""
109
111
 
110
- def __init__(self):
112
+ def __init__(self) -> None:
111
113
  self.stats = ValidationStats()
112
114
 
113
- def record_validation(self, success: bool, error_type: Optional[str] = None):
115
+ def record_validation(
116
+ self, success: bool, error_type: Optional[str] = None
117
+ ) -> None:
114
118
  """Record a validation operation.
115
119
 
116
120
  Args:
@@ -178,7 +182,7 @@ def timed_validation(
178
182
 
179
183
 
180
184
  @contextmanager
181
- def validation_timeout(timeout_seconds: float):
185
+ def validation_timeout(timeout_seconds: float) -> Generator[None, None, None]:
182
186
  """Context manager for validation timeout (placeholder for future implementation)."""
183
187
  # Note: This is a placeholder. Full timeout implementation would require
184
188
  # threading or async support, which depends on the application architecture.
@@ -0,0 +1,169 @@
1
+ """Health monitoring for fast-clean-architecture."""
2
+
3
+ import os
4
+ import sys
5
+ import time
6
+ from pathlib import Path
7
+ from typing import Any, Dict, Optional
8
+
9
+ import psutil # type: ignore
10
+
11
+ from .logging_config import get_logger
12
+
13
+ # Set up logger
14
+ logger = get_logger(__name__)
15
+
16
+
17
+ class HealthMonitor:
18
+ """Monitor system health and resource usage."""
19
+
20
+ def __init__(self) -> None:
21
+ """Initialize health monitor."""
22
+ self.start_time = time.time()
23
+
24
+ def get_system_health(self) -> Dict[str, Any]:
25
+ """Get current system health metrics.
26
+
27
+ Returns:
28
+ Dictionary containing system health information
29
+ """
30
+ try:
31
+ # Get process information
32
+ process = psutil.Process()
33
+
34
+ # Memory usage
35
+ memory_info = process.memory_info()
36
+ memory_percent = process.memory_percent()
37
+
38
+ # CPU usage
39
+ cpu_percent = process.cpu_percent()
40
+
41
+ # System memory
42
+ system_memory = psutil.virtual_memory()
43
+
44
+ # Disk usage for current directory
45
+ disk_usage = psutil.disk_usage(".")
46
+
47
+ health_data = {
48
+ "timestamp": time.time(),
49
+ "uptime_seconds": time.time() - self.start_time,
50
+ "process": {
51
+ "pid": os.getpid(),
52
+ "memory_rss_mb": round(memory_info.rss / 1024 / 1024, 2),
53
+ "memory_vms_mb": round(memory_info.vms / 1024 / 1024, 2),
54
+ "memory_percent": round(memory_percent, 2),
55
+ "cpu_percent": round(cpu_percent, 2),
56
+ },
57
+ "system": {
58
+ "memory_total_gb": round(
59
+ system_memory.total / 1024 / 1024 / 1024, 2
60
+ ),
61
+ "memory_available_gb": round(
62
+ system_memory.available / 1024 / 1024 / 1024, 2
63
+ ),
64
+ "memory_used_percent": round(system_memory.percent, 2),
65
+ "disk_total_gb": round(disk_usage.total / 1024 / 1024 / 1024, 2),
66
+ "disk_free_gb": round(disk_usage.free / 1024 / 1024 / 1024, 2),
67
+ "disk_used_percent": round(
68
+ (disk_usage.used / disk_usage.total) * 100, 2
69
+ ),
70
+ },
71
+ "python": {
72
+ "version": sys.version,
73
+ "executable": sys.executable,
74
+ },
75
+ }
76
+
77
+ return health_data
78
+
79
+ except Exception as e:
80
+ logger.error(
81
+ "Failed to get system health metrics",
82
+ operation="get_system_health",
83
+ error=str(e),
84
+ error_type=type(e).__name__,
85
+ )
86
+ return {"timestamp": time.time(), "error": str(e), "status": "unhealthy"}
87
+
88
+ def log_health_status(self) -> None:
89
+ """Log current health status."""
90
+ health_data = self.get_system_health()
91
+
92
+ if "error" in health_data:
93
+ logger.error(
94
+ "System health check failed", operation="health_check", **health_data
95
+ )
96
+ else:
97
+ logger.info("System health check", operation="health_check", **health_data)
98
+
99
+ def check_resource_limits(
100
+ self,
101
+ max_memory_mb: Optional[int] = None,
102
+ max_cpu_percent: Optional[float] = None,
103
+ ) -> bool:
104
+ """Check if resource usage is within limits.
105
+
106
+ Args:
107
+ max_memory_mb: Maximum memory usage in MB
108
+ max_cpu_percent: Maximum CPU usage percentage
109
+
110
+ Returns:
111
+ True if within limits, False otherwise
112
+ """
113
+ health_data = self.get_system_health()
114
+
115
+ if "error" in health_data:
116
+ return False
117
+
118
+ process_data = health_data.get("process", {})
119
+
120
+ # Check memory limit
121
+ if max_memory_mb and process_data.get("memory_rss_mb", 0) > max_memory_mb:
122
+ logger.warning(
123
+ "Memory usage exceeds limit",
124
+ operation="resource_check",
125
+ current_memory_mb=process_data.get("memory_rss_mb"),
126
+ limit_memory_mb=max_memory_mb,
127
+ )
128
+ return False
129
+
130
+ # Check CPU limit
131
+ if max_cpu_percent and process_data.get("cpu_percent", 0) > max_cpu_percent:
132
+ logger.warning(
133
+ "CPU usage exceeds limit",
134
+ operation="resource_check",
135
+ current_cpu_percent=process_data.get("cpu_percent"),
136
+ limit_cpu_percent=max_cpu_percent,
137
+ )
138
+ return False
139
+
140
+ return True
141
+
142
+
143
+ # Global health monitor instance
144
+ _health_monitor: Optional[HealthMonitor] = None
145
+
146
+
147
+ def get_health_monitor() -> HealthMonitor:
148
+ """Get the global health monitor instance.
149
+
150
+ Returns:
151
+ Global HealthMonitor instance
152
+ """
153
+ global _health_monitor
154
+ if _health_monitor is None:
155
+ _health_monitor = HealthMonitor()
156
+ return _health_monitor
157
+
158
+
159
+ def log_startup_health() -> None:
160
+ """Log health status at application startup."""
161
+ monitor = get_health_monitor()
162
+ monitor.log_health_status()
163
+
164
+ logger.info(
165
+ "Application started",
166
+ operation="startup",
167
+ pid=os.getpid(),
168
+ python_version=sys.version.split()[0],
169
+ )
@@ -0,0 +1,52 @@
1
+ """Structured logging configuration for fast-clean-architecture."""
2
+
3
+ import sys
4
+ from datetime import datetime, timezone
5
+ from typing import Any, Dict, Optional
6
+
7
+ import structlog
8
+ from structlog.typing import EventDict
9
+
10
+
11
+ def add_timestamp(logger: Any, method_name: str, event_dict: EventDict) -> EventDict:
12
+ """Add ISO timestamp to log events."""
13
+ event_dict["timestamp"] = datetime.now(timezone.utc).isoformat()
14
+ return event_dict
15
+
16
+
17
+ def add_log_level(logger: Any, method_name: str, event_dict: EventDict) -> EventDict:
18
+ """Add log level to event dict."""
19
+ event_dict["level"] = method_name.upper()
20
+ return event_dict
21
+
22
+
23
+ def configure_logging() -> None:
24
+ """Configure structured logging for the application."""
25
+ structlog.configure(
26
+ processors=[
27
+ structlog.contextvars.merge_contextvars,
28
+ add_timestamp,
29
+ add_log_level,
30
+ structlog.processors.add_log_level,
31
+ structlog.processors.StackInfoRenderer(),
32
+ structlog.dev.set_exc_info,
33
+ structlog.processors.JSONRenderer(),
34
+ ],
35
+ wrapper_class=structlog.make_filtering_bound_logger(30), # INFO level
36
+ logger_factory=structlog.WriteLoggerFactory(file=sys.stdout),
37
+ cache_logger_on_first_use=True,
38
+ )
39
+
40
+
41
+ def get_logger(name: Optional[str] = None) -> Any:
42
+ """Get a configured structured logger.
43
+
44
+ Args:
45
+ name: Logger name (typically __name__)
46
+
47
+ Returns:
48
+ Configured structured logger
49
+ """
50
+ if name:
51
+ return structlog.get_logger(name)
52
+ return structlog.get_logger()
@@ -0,0 +1,108 @@
1
+ """Performance metrics for fast-clean-architecture."""
2
+
3
+ import functools
4
+ import time
5
+ from typing import Any, Callable, Dict, Optional, TypeVar, cast
6
+
7
+ import structlog
8
+ from structlog.typing import EventDict
9
+
10
+ from .logging_config import get_logger
11
+
12
+ # Type variables for generic function decorators
13
+ F = TypeVar("F", bound=Callable[..., Any])
14
+ R = TypeVar("R")
15
+
16
+ # Set up logger
17
+ logger = get_logger(__name__)
18
+
19
+
20
+ def measure_execution_time(operation_name: str) -> Callable[[F], F]:
21
+ """Decorator to measure and log execution time of functions.
22
+
23
+ Args:
24
+ operation_name: Name of the operation being measured
25
+
26
+ Returns:
27
+ Decorator function that measures execution time
28
+ """
29
+
30
+ def decorator(func: F) -> F:
31
+ @functools.wraps(func)
32
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
33
+ start_time = time.time()
34
+
35
+ try:
36
+ result = func(*args, **kwargs)
37
+ execution_time = time.time() - start_time
38
+
39
+ # Log performance metrics
40
+ logger.info(
41
+ f"{operation_name} completed",
42
+ operation=operation_name,
43
+ execution_time_ms=round(execution_time * 1000, 2),
44
+ success=True,
45
+ )
46
+
47
+ return result
48
+ except Exception as e:
49
+ execution_time = time.time() - start_time
50
+
51
+ # Log performance metrics with error
52
+ logger.error(
53
+ f"{operation_name} failed",
54
+ operation=operation_name,
55
+ execution_time_ms=round(execution_time * 1000, 2),
56
+ error=str(e),
57
+ error_type=type(e).__name__,
58
+ success=False,
59
+ )
60
+
61
+ # Re-raise the exception
62
+ raise
63
+
64
+ return cast(F, wrapper)
65
+
66
+ return decorator
67
+
68
+
69
+ class PerformanceTracker:
70
+ """Context manager for tracking performance of code blocks."""
71
+
72
+ def __init__(self, operation_name: str, **context: Any):
73
+ """Initialize performance tracker.
74
+
75
+ Args:
76
+ operation_name: Name of the operation being tracked
77
+ context: Additional context to include in logs
78
+ """
79
+ self.operation_name = operation_name
80
+ self.context = context
81
+ self.start_time: float = 0
82
+
83
+ def __enter__(self) -> "PerformanceTracker":
84
+ self.start_time = time.time()
85
+ return self
86
+
87
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
88
+ execution_time = time.time() - self.start_time
89
+
90
+ # Prepare log context
91
+ log_context = {
92
+ "operation": self.operation_name,
93
+ "execution_time_ms": round(execution_time * 1000, 2),
94
+ "success": exc_type is None,
95
+ **self.context,
96
+ }
97
+
98
+ if exc_type is None:
99
+ # Log successful completion
100
+ logger.info(f"{self.operation_name} completed", **log_context)
101
+ else:
102
+ # Log error
103
+ logger.error(
104
+ f"{self.operation_name} failed",
105
+ error=str(exc_val),
106
+ error_type=exc_type.__name__,
107
+ **log_context,
108
+ )