empathy-framework 4.7.0__py3-none-any.whl → 4.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. empathy_framework-4.8.0.dist-info/METADATA +753 -0
  2. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/RECORD +83 -37
  3. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/WHEEL +1 -1
  4. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/entry_points.txt +2 -1
  5. empathy_os/__init__.py +2 -0
  6. empathy_os/cache/hash_only.py +6 -3
  7. empathy_os/cache/hybrid.py +6 -3
  8. empathy_os/cli/__init__.py +128 -238
  9. empathy_os/cli/__main__.py +5 -33
  10. empathy_os/cli/commands/__init__.py +1 -8
  11. empathy_os/cli/commands/help.py +331 -0
  12. empathy_os/cli/commands/info.py +140 -0
  13. empathy_os/cli/commands/inspect.py +437 -0
  14. empathy_os/cli/commands/metrics.py +92 -0
  15. empathy_os/cli/commands/orchestrate.py +184 -0
  16. empathy_os/cli/commands/patterns.py +207 -0
  17. empathy_os/cli/commands/provider.py +93 -81
  18. empathy_os/cli/commands/setup.py +96 -0
  19. empathy_os/cli/commands/status.py +235 -0
  20. empathy_os/cli/commands/sync.py +166 -0
  21. empathy_os/cli/commands/tier.py +121 -0
  22. empathy_os/cli/commands/workflow.py +574 -0
  23. empathy_os/cli/parsers/__init__.py +62 -0
  24. empathy_os/cli/parsers/help.py +41 -0
  25. empathy_os/cli/parsers/info.py +26 -0
  26. empathy_os/cli/parsers/inspect.py +66 -0
  27. empathy_os/cli/parsers/metrics.py +42 -0
  28. empathy_os/cli/parsers/orchestrate.py +61 -0
  29. empathy_os/cli/parsers/patterns.py +54 -0
  30. empathy_os/cli/parsers/provider.py +40 -0
  31. empathy_os/cli/parsers/setup.py +42 -0
  32. empathy_os/cli/parsers/status.py +47 -0
  33. empathy_os/cli/parsers/sync.py +31 -0
  34. empathy_os/cli/parsers/tier.py +33 -0
  35. empathy_os/cli/parsers/workflow.py +77 -0
  36. empathy_os/cli/utils/__init__.py +1 -0
  37. empathy_os/cli/utils/data.py +242 -0
  38. empathy_os/cli/utils/helpers.py +68 -0
  39. empathy_os/{cli.py → cli_legacy.py} +27 -27
  40. empathy_os/cli_minimal.py +662 -0
  41. empathy_os/cli_router.py +384 -0
  42. empathy_os/cli_unified.py +38 -2
  43. empathy_os/memory/__init__.py +19 -5
  44. empathy_os/memory/short_term.py +14 -404
  45. empathy_os/memory/types.py +437 -0
  46. empathy_os/memory/unified.py +61 -48
  47. empathy_os/models/fallback.py +1 -1
  48. empathy_os/models/provider_config.py +59 -344
  49. empathy_os/models/registry.py +31 -180
  50. empathy_os/monitoring/alerts.py +14 -20
  51. empathy_os/monitoring/alerts_cli.py +24 -7
  52. empathy_os/project_index/__init__.py +2 -0
  53. empathy_os/project_index/index.py +210 -5
  54. empathy_os/project_index/scanner.py +45 -14
  55. empathy_os/project_index/scanner_parallel.py +291 -0
  56. empathy_os/socratic/ab_testing.py +1 -1
  57. empathy_os/vscode_bridge 2.py +173 -0
  58. empathy_os/workflows/__init__.py +31 -2
  59. empathy_os/workflows/base.py +349 -325
  60. empathy_os/workflows/bug_predict.py +8 -0
  61. empathy_os/workflows/builder.py +273 -0
  62. empathy_os/workflows/caching.py +253 -0
  63. empathy_os/workflows/code_review_pipeline.py +1 -0
  64. empathy_os/workflows/history.py +510 -0
  65. empathy_os/workflows/output.py +410 -0
  66. empathy_os/workflows/perf_audit.py +125 -19
  67. empathy_os/workflows/progress.py +324 -22
  68. empathy_os/workflows/progressive/README 2.md +454 -0
  69. empathy_os/workflows/progressive/__init__ 2.py +92 -0
  70. empathy_os/workflows/progressive/cli 2.py +242 -0
  71. empathy_os/workflows/progressive/core 2.py +488 -0
  72. empathy_os/workflows/progressive/orchestrator 2.py +701 -0
  73. empathy_os/workflows/progressive/reports 2.py +528 -0
  74. empathy_os/workflows/progressive/telemetry 2.py +280 -0
  75. empathy_os/workflows/progressive/test_gen 2.py +514 -0
  76. empathy_os/workflows/progressive/workflow 2.py +628 -0
  77. empathy_os/workflows/routing.py +168 -0
  78. empathy_os/workflows/secure_release.py +1 -0
  79. empathy_os/workflows/security_audit.py +190 -0
  80. empathy_os/workflows/security_audit_phase3.py +328 -0
  81. empathy_os/workflows/telemetry_mixin.py +269 -0
  82. empathy_framework-4.7.0.dist-info/METADATA +0 -1598
  83. empathy_os/dashboard/__init__.py +0 -15
  84. empathy_os/dashboard/server.py +0 -941
  85. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/licenses/LICENSE +0 -0
  86. {empathy_framework-4.7.0.dist-info → empathy_framework-4.8.0.dist-info}/top_level.txt +0 -0
@@ -463,6 +463,14 @@ class BugPredictionWorkflow(BaseWorkflow):
463
463
  """
464
464
  super().__init__(**kwargs)
465
465
 
466
+ # Create instance-level tier_map to prevent class-level mutation
467
+ self.tier_map = {
468
+ "scan": ModelTier.CHEAP,
469
+ "correlate": ModelTier.CAPABLE,
470
+ "predict": ModelTier.CAPABLE,
471
+ "recommend": ModelTier.PREMIUM,
472
+ }
473
+
466
474
  # Load bug_predict config from empathy.config.yml
467
475
  self._bug_predict_config = _load_bug_predict_config()
468
476
 
@@ -0,0 +1,273 @@
1
+ """Builder pattern for BaseWorkflow construction.
2
+
3
+ Simplifies complex workflow configuration by providing a fluent API
4
+ for setting optional parameters.
5
+
6
+ Copyright 2025 Smart-AI-Memory
7
+ Licensed under Fair Source License 0.9
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from collections.abc import Callable
13
+ from typing import TYPE_CHECKING, Any, Generic, TypeVar
14
+
15
+ if TYPE_CHECKING:
16
+ from empathy_os.cache import BaseCache
17
+ from empathy_os.models import LLMExecutor, TelemetryBackend, UnifiedModelProvider
18
+ from empathy_os.workflows.base import BaseWorkflow
19
+ from empathy_os.workflows.config import WorkflowConfig
20
+ from empathy_os.workflows.progress import ProgressCallback
21
+ from empathy_os.workflows.routing import TierRoutingStrategy
22
+ from empathy_os.workflows.tier_tracking import WorkflowTierTracker
23
+
24
+ T = TypeVar("T", bound="BaseWorkflow")
25
+
26
+
27
+ class WorkflowBuilder(Generic[T]):
28
+ """Builder for complex workflow configuration.
29
+
30
+ Provides a fluent API for constructing workflows with many optional parameters.
31
+ Eliminates the need to pass 12+ constructor arguments.
32
+
33
+ Example:
34
+ >>> from empathy_os.workflows.test_gen import TestGenerationWorkflow
35
+ >>> from empathy_os.workflows.builder import WorkflowBuilder
36
+ >>> from empathy_os.workflows.routing import BalancedRouting
37
+ >>>
38
+ >>> workflow = (
39
+ ... WorkflowBuilder(TestGenerationWorkflow)
40
+ ... .with_config(my_config)
41
+ ... .with_routing(BalancedRouting(budget=10.0))
42
+ ... .with_cache_enabled(True)
43
+ ... .with_telemetry_enabled(True)
44
+ ... .build()
45
+ ... )
46
+
47
+ Chaining methods:
48
+ - with_config() - Set workflow configuration
49
+ - with_executor() - Set custom LLM executor
50
+ - with_provider() - Set model provider
51
+ - with_cache() - Set custom cache instance
52
+ - with_cache_enabled() - Enable/disable caching
53
+ - with_telemetry() - Set custom telemetry backend
54
+ - with_telemetry_enabled() - Enable/disable telemetry
55
+ - with_progress_callback() - Set progress callback
56
+ - with_tier_tracker() - Set tier tracker
57
+ - with_routing() - Set routing strategy
58
+ - build() - Construct the workflow
59
+ """
60
+
61
+ def __init__(self, workflow_class: type[T]):
62
+ """Initialize builder for a specific workflow class.
63
+
64
+ Args:
65
+ workflow_class: The workflow class to build (e.g., TestGenerationWorkflow)
66
+ """
67
+ self.workflow_class = workflow_class
68
+
69
+ # Optional configuration
70
+ self._config: WorkflowConfig | None = None
71
+ self._executor: LLMExecutor | None = None
72
+ self._provider: UnifiedModelProvider | None = None
73
+ self._cache: BaseCache | None = None
74
+ self._enable_cache: bool = True
75
+ self._telemetry_backend: TelemetryBackend | None = None
76
+ self._enable_telemetry: bool = True
77
+ self._progress_callback: ProgressCallback | None = None
78
+ self._tier_tracker: WorkflowTierTracker | None = None
79
+ self._routing_strategy: TierRoutingStrategy | None = None
80
+
81
+ def with_config(self, config: WorkflowConfig) -> WorkflowBuilder[T]:
82
+ """Set workflow configuration.
83
+
84
+ Args:
85
+ config: WorkflowConfig instance with provider, models, etc.
86
+
87
+ Returns:
88
+ Self for method chaining
89
+ """
90
+ self._config = config
91
+ return self
92
+
93
+ def with_executor(self, executor: LLMExecutor) -> WorkflowBuilder[T]:
94
+ """Set custom LLM executor.
95
+
96
+ Args:
97
+ executor: LLMExecutor instance for making LLM calls
98
+
99
+ Returns:
100
+ Self for method chaining
101
+ """
102
+ self._executor = executor
103
+ return self
104
+
105
+ def with_provider(self, provider: UnifiedModelProvider) -> WorkflowBuilder[T]:
106
+ """Set model provider.
107
+
108
+ Args:
109
+ provider: ModelProvider enum (ANTHROPIC, OPENAI, GOOGLE)
110
+
111
+ Returns:
112
+ Self for method chaining
113
+ """
114
+ self._provider = provider
115
+ return self
116
+
117
+ def with_cache(self, cache: BaseCache) -> WorkflowBuilder[T]:
118
+ """Set custom cache instance.
119
+
120
+ Args:
121
+ cache: BaseCache instance for caching LLM responses
122
+
123
+ Returns:
124
+ Self for method chaining
125
+ """
126
+ self._cache = cache
127
+ return self
128
+
129
+ def with_cache_enabled(self, enabled: bool) -> WorkflowBuilder[T]:
130
+ """Enable or disable caching.
131
+
132
+ Args:
133
+ enabled: Whether to enable caching (default: True)
134
+
135
+ Returns:
136
+ Self for method chaining
137
+ """
138
+ self._enable_cache = enabled
139
+ return self
140
+
141
+ def with_telemetry(self, backend: TelemetryBackend) -> WorkflowBuilder[T]:
142
+ """Set custom telemetry backend.
143
+
144
+ Args:
145
+ backend: TelemetryBackend instance for tracking workflow runs
146
+
147
+ Returns:
148
+ Self for method chaining
149
+ """
150
+ self._telemetry_backend = backend
151
+ return self
152
+
153
+ def with_telemetry_enabled(self, enabled: bool) -> WorkflowBuilder[T]:
154
+ """Enable or disable telemetry.
155
+
156
+ Args:
157
+ enabled: Whether to enable telemetry (default: True)
158
+
159
+ Returns:
160
+ Self for method chaining
161
+ """
162
+ self._enable_telemetry = enabled
163
+ return self
164
+
165
+ def with_progress_callback(
166
+ self, callback: ProgressCallback | Callable[[str, int, int], None]
167
+ ) -> WorkflowBuilder[T]:
168
+ """Set progress callback for workflow execution.
169
+
170
+ Args:
171
+ callback: ProgressCallback instance or callable(stage, current, total)
172
+
173
+ Returns:
174
+ Self for method chaining
175
+ """
176
+ self._progress_callback = callback # type: ignore
177
+ return self
178
+
179
+ def with_tier_tracker(self, tracker: WorkflowTierTracker) -> WorkflowBuilder[T]:
180
+ """Set tier tracker for learning tier progression.
181
+
182
+ Args:
183
+ tracker: WorkflowTierTracker instance
184
+
185
+ Returns:
186
+ Self for method chaining
187
+ """
188
+ self._tier_tracker = tracker
189
+ return self
190
+
191
+ def with_routing(self, strategy: TierRoutingStrategy) -> WorkflowBuilder[T]:
192
+ """Set tier routing strategy.
193
+
194
+ Args:
195
+ strategy: TierRoutingStrategy (CostOptimized, PerformanceOptimized, Balanced)
196
+
197
+ Returns:
198
+ Self for method chaining
199
+
200
+ Example:
201
+ >>> from empathy_os.workflows.routing import BalancedRouting
202
+ >>> builder.with_routing(BalancedRouting(budget=50.0))
203
+ """
204
+ self._routing_strategy = strategy
205
+ return self
206
+
207
+ def build(self) -> T:
208
+ """Build the configured workflow.
209
+
210
+ Returns:
211
+ Configured workflow instance ready for execution
212
+
213
+ Raises:
214
+ TypeError: If workflow_class constructor doesn't accept the provided parameters
215
+ """
216
+ # Build kwargs for constructor
217
+ kwargs: dict[str, Any] = {}
218
+
219
+ if self._config is not None:
220
+ kwargs["config"] = self._config
221
+
222
+ if self._executor is not None:
223
+ kwargs["executor"] = self._executor
224
+
225
+ if self._provider is not None:
226
+ kwargs["provider"] = self._provider
227
+
228
+ if self._cache is not None:
229
+ kwargs["cache"] = self._cache
230
+
231
+ kwargs["enable_cache"] = self._enable_cache
232
+
233
+ if self._telemetry_backend is not None:
234
+ kwargs["telemetry_backend"] = self._telemetry_backend
235
+
236
+ kwargs["enable_telemetry"] = self._enable_telemetry
237
+
238
+ if self._progress_callback is not None:
239
+ kwargs["progress_callback"] = self._progress_callback
240
+
241
+ if self._tier_tracker is not None:
242
+ kwargs["tier_tracker"] = self._tier_tracker
243
+
244
+ if self._routing_strategy is not None:
245
+ kwargs["routing_strategy"] = self._routing_strategy
246
+
247
+ # Construct workflow
248
+ return self.workflow_class(**kwargs)
249
+
250
+
251
+ def workflow_builder(workflow_class: type[T]) -> WorkflowBuilder[T]:
252
+ """Factory function for creating workflow builders.
253
+
254
+ Convenience function for creating builders with cleaner syntax.
255
+
256
+ Args:
257
+ workflow_class: The workflow class to build
258
+
259
+ Returns:
260
+ WorkflowBuilder instance
261
+
262
+ Example:
263
+ >>> from empathy_os.workflows.builder import workflow_builder
264
+ >>> from empathy_os.workflows.test_gen import TestGenerationWorkflow
265
+ >>>
266
+ >>> workflow = (
267
+ ... workflow_builder(TestGenerationWorkflow)
268
+ ... .with_cache_enabled(True)
269
+ ... .with_telemetry_enabled(False)
270
+ ... .build()
271
+ ... )
272
+ """
273
+ return WorkflowBuilder(workflow_class)
@@ -0,0 +1,253 @@
1
+ """Caching Mixin for Workflow LLM Calls
2
+
3
+ Extracted from BaseWorkflow to improve maintainability and reusability.
4
+ Provides caching behavior for LLM calls with automatic cache setup.
5
+
6
+ Copyright 2025 Smart-AI-Memory
7
+ Licensed under Fair Source License 0.9
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import logging
13
+ from dataclasses import dataclass
14
+ from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable
15
+
16
+ if TYPE_CHECKING:
17
+ from empathy_os.cache import BaseCache
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ @dataclass
23
+ class CachedResponse:
24
+ """Cached LLM response data."""
25
+
26
+ content: str
27
+ input_tokens: int
28
+ output_tokens: int
29
+
30
+ def to_dict(self) -> dict[str, Any]:
31
+ """Convert to dictionary for cache storage."""
32
+ return {
33
+ "content": self.content,
34
+ "input_tokens": self.input_tokens,
35
+ "output_tokens": self.output_tokens,
36
+ }
37
+
38
+ @classmethod
39
+ def from_dict(cls, data: dict[str, Any]) -> CachedResponse:
40
+ """Create from dictionary (cache retrieval)."""
41
+ return cls(
42
+ content=data["content"],
43
+ input_tokens=data["input_tokens"],
44
+ output_tokens=data["output_tokens"],
45
+ )
46
+
47
+
48
+ @runtime_checkable
49
+ class CacheAwareWorkflow(Protocol):
50
+ """Protocol for workflows that support caching."""
51
+
52
+ name: str
53
+ _cache: BaseCache | None
54
+ _enable_cache: bool
55
+
56
+ def get_model_for_tier(self, tier: Any) -> str:
57
+ """Get model ID for a given tier."""
58
+ ...
59
+
60
+
61
+ class CachingMixin:
62
+ """Mixin that provides caching behavior for LLM calls.
63
+
64
+ This mixin extracts caching logic from BaseWorkflow to improve
65
+ maintainability and enable reuse in other contexts.
66
+
67
+ Attributes:
68
+ _cache: Optional cache instance
69
+ _enable_cache: Whether caching is enabled
70
+ _cache_setup_attempted: Whether cache setup has been tried
71
+
72
+ Usage:
73
+ class MyWorkflow(CachingMixin, BaseWorkflow):
74
+ pass
75
+
76
+ # CachingMixin methods are now available
77
+ workflow._maybe_setup_cache()
78
+ cached = workflow._try_cache_lookup(...)
79
+ workflow._store_in_cache(...)
80
+ """
81
+
82
+ # Instance variables (set by __init__ or subclass)
83
+ _cache: BaseCache | None = None
84
+ _enable_cache: bool = True
85
+ _cache_setup_attempted: bool = False
86
+
87
+ # These must be provided by the class using this mixin
88
+ name: str = "unknown"
89
+
90
+ def _maybe_setup_cache(self) -> None:
91
+ """Set up cache with one-time user prompt if needed.
92
+
93
+ This is called lazily on first workflow execution to avoid
94
+ blocking workflow initialization.
95
+ """
96
+ if not self._enable_cache:
97
+ return
98
+
99
+ if self._cache_setup_attempted:
100
+ return
101
+
102
+ self._cache_setup_attempted = True
103
+
104
+ # If cache already provided, use it
105
+ if self._cache is not None:
106
+ return
107
+
108
+ # Import here to avoid circular imports
109
+ from empathy_os.cache import auto_setup_cache, create_cache
110
+
111
+ # Otherwise, trigger auto-setup (which may prompt user)
112
+ try:
113
+ auto_setup_cache()
114
+ self._cache = create_cache()
115
+ logger.info(f"Cache initialized for workflow: {self.name}")
116
+ except ImportError as e:
117
+ # Hybrid cache dependencies not available, fall back to hash-only
118
+ logger.info(
119
+ f"Using hash-only cache (install empathy-framework[cache] for semantic caching): {e}"
120
+ )
121
+ self._cache = create_cache(cache_type="hash")
122
+ except (OSError, PermissionError) as e:
123
+ # File system errors - disable cache
124
+ logger.warning(f"Cache setup failed (file system error): {e}, continuing without cache")
125
+ self._enable_cache = False
126
+ except (ValueError, TypeError, AttributeError) as e:
127
+ # Configuration errors - disable cache
128
+ logger.warning(f"Cache setup failed (config error): {e}, continuing without cache")
129
+ self._enable_cache = False
130
+
131
+ def _make_cache_key(self, system: str, user_message: str) -> str:
132
+ """Create cache key from system and user prompts.
133
+
134
+ Args:
135
+ system: System prompt
136
+ user_message: User message
137
+
138
+ Returns:
139
+ Combined prompt string for cache key
140
+ """
141
+ return f"{system}\n\n{user_message}" if system else user_message
142
+
143
+ def _try_cache_lookup(
144
+ self,
145
+ stage: str,
146
+ system: str,
147
+ user_message: str,
148
+ model: str,
149
+ ) -> CachedResponse | None:
150
+ """Try to retrieve a cached response.
151
+
152
+ Args:
153
+ stage: Stage name for cache key
154
+ system: System prompt
155
+ user_message: User message
156
+ model: Model ID
157
+
158
+ Returns:
159
+ CachedResponse if found, None otherwise
160
+ """
161
+ if not self._enable_cache or self._cache is None:
162
+ return None
163
+
164
+ try:
165
+ full_prompt = self._make_cache_key(system, user_message)
166
+ cached_data = self._cache.get(self.name, stage, full_prompt, model)
167
+
168
+ if cached_data is not None:
169
+ logger.debug(f"Cache hit for {self.name}:{stage}")
170
+ return CachedResponse.from_dict(cached_data)
171
+
172
+ except (KeyError, TypeError, ValueError) as e:
173
+ # Malformed cache data - continue with LLM call
174
+ logger.debug(f"Cache lookup failed (malformed data): {e}, continuing with LLM call")
175
+ except (OSError, PermissionError) as e:
176
+ # File system errors - continue with LLM call
177
+ logger.debug(f"Cache lookup failed (file system error): {e}, continuing with LLM call")
178
+
179
+ return None
180
+
181
+ def _store_in_cache(
182
+ self,
183
+ stage: str,
184
+ system: str,
185
+ user_message: str,
186
+ model: str,
187
+ response: CachedResponse,
188
+ ) -> bool:
189
+ """Store a response in the cache.
190
+
191
+ Args:
192
+ stage: Stage name for cache key
193
+ system: System prompt
194
+ user_message: User message
195
+ model: Model ID
196
+ response: Response to cache
197
+
198
+ Returns:
199
+ True if stored successfully, False otherwise
200
+ """
201
+ if not self._enable_cache or self._cache is None:
202
+ return False
203
+
204
+ try:
205
+ full_prompt = self._make_cache_key(system, user_message)
206
+ self._cache.put(self.name, stage, full_prompt, model, response.to_dict())
207
+ logger.debug(f"Cached response for {self.name}:{stage}")
208
+ return True
209
+ except (OSError, PermissionError) as e:
210
+ # File system errors - log but continue
211
+ logger.debug(f"Failed to cache response (file system error): {e}")
212
+ except (ValueError, TypeError, KeyError) as e:
213
+ # Data serialization errors - log but continue
214
+ logger.debug(f"Failed to cache response (serialization error): {e}")
215
+
216
+ return False
217
+
218
+ def _get_cache_type(self) -> str:
219
+ """Get the cache type for telemetry tracking.
220
+
221
+ Returns:
222
+ Cache type string (e.g., "hash", "semantic")
223
+ """
224
+ if self._cache is None:
225
+ return "none"
226
+
227
+ if hasattr(self._cache, "cache_type"):
228
+ ct = self._cache.cache_type
229
+ # Ensure it's a string (not a Mock object)
230
+ return str(ct) if ct and isinstance(ct, str) else "hash"
231
+
232
+ return "hash" # Default assumption
233
+
234
+ def _get_cache_stats(self) -> dict[str, Any]:
235
+ """Get cache statistics for cost reporting.
236
+
237
+ Returns:
238
+ Dictionary with cache stats (hits, misses, hit_rate)
239
+ """
240
+ if self._cache is None:
241
+ return {"hits": 0, "misses": 0, "hit_rate": 0.0}
242
+
243
+ try:
244
+ stats = self._cache.get_stats()
245
+ return {
246
+ "hits": stats.hits,
247
+ "misses": stats.misses,
248
+ "hit_rate": stats.hit_rate,
249
+ }
250
+ except (AttributeError, TypeError) as e:
251
+ # Cache doesn't support stats
252
+ logger.debug(f"Cache stats not available: {e}")
253
+ return {"hits": 0, "misses": 0, "hit_rate": 0.0}
@@ -283,6 +283,7 @@ class CodeReviewPipeline:
283
283
  _get_crew_review,
284
284
  crew_report_to_workflow_format,
285
285
  )
286
+
286
287
  crew_available = _check_crew_available()
287
288
  except ImportError:
288
289
  # Crew adapters removed - fall back to workflow only