empathy-framework 4.7.1__py3-none-any.whl → 4.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/METADATA +65 -2
  2. {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/RECORD +73 -52
  3. {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/WHEEL +1 -1
  4. {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/entry_points.txt +2 -1
  5. {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/top_level.txt +0 -1
  6. empathy_os/__init__.py +2 -0
  7. empathy_os/cache/hash_only.py +6 -3
  8. empathy_os/cache/hybrid.py +6 -3
  9. empathy_os/cli/__init__.py +128 -238
  10. empathy_os/cli/__main__.py +5 -33
  11. empathy_os/cli/commands/__init__.py +1 -8
  12. empathy_os/cli/commands/help.py +331 -0
  13. empathy_os/cli/commands/info.py +140 -0
  14. empathy_os/cli/commands/inspect.py +437 -0
  15. empathy_os/cli/commands/metrics.py +92 -0
  16. empathy_os/cli/commands/orchestrate.py +184 -0
  17. empathy_os/cli/commands/patterns.py +207 -0
  18. empathy_os/cli/commands/provider.py +93 -81
  19. empathy_os/cli/commands/setup.py +96 -0
  20. empathy_os/cli/commands/status.py +235 -0
  21. empathy_os/cli/commands/sync.py +166 -0
  22. empathy_os/cli/commands/tier.py +121 -0
  23. empathy_os/cli/commands/workflow.py +574 -0
  24. empathy_os/cli/parsers/__init__.py +62 -0
  25. empathy_os/cli/parsers/help.py +41 -0
  26. empathy_os/cli/parsers/info.py +26 -0
  27. empathy_os/cli/parsers/inspect.py +66 -0
  28. empathy_os/cli/parsers/metrics.py +42 -0
  29. empathy_os/cli/parsers/orchestrate.py +61 -0
  30. empathy_os/cli/parsers/patterns.py +54 -0
  31. empathy_os/cli/parsers/provider.py +40 -0
  32. empathy_os/cli/parsers/setup.py +42 -0
  33. empathy_os/cli/parsers/status.py +47 -0
  34. empathy_os/cli/parsers/sync.py +31 -0
  35. empathy_os/cli/parsers/tier.py +33 -0
  36. empathy_os/cli/parsers/workflow.py +77 -0
  37. empathy_os/cli/utils/__init__.py +1 -0
  38. empathy_os/cli/utils/data.py +242 -0
  39. empathy_os/cli/utils/helpers.py +68 -0
  40. empathy_os/{cli.py → cli_legacy.py} +27 -27
  41. empathy_os/cli_minimal.py +662 -0
  42. empathy_os/cli_router.py +384 -0
  43. empathy_os/cli_unified.py +38 -2
  44. empathy_os/memory/__init__.py +19 -5
  45. empathy_os/memory/short_term.py +14 -404
  46. empathy_os/memory/types.py +437 -0
  47. empathy_os/memory/unified.py +61 -48
  48. empathy_os/models/fallback.py +1 -1
  49. empathy_os/models/provider_config.py +59 -344
  50. empathy_os/models/registry.py +31 -180
  51. empathy_os/monitoring/alerts.py +14 -20
  52. empathy_os/monitoring/alerts_cli.py +24 -7
  53. empathy_os/project_index/__init__.py +2 -0
  54. empathy_os/project_index/index.py +210 -5
  55. empathy_os/project_index/scanner.py +45 -14
  56. empathy_os/project_index/scanner_parallel.py +291 -0
  57. empathy_os/socratic/ab_testing.py +1 -1
  58. empathy_os/workflows/__init__.py +31 -2
  59. empathy_os/workflows/base.py +349 -325
  60. empathy_os/workflows/bug_predict.py +8 -0
  61. empathy_os/workflows/builder.py +273 -0
  62. empathy_os/workflows/caching.py +253 -0
  63. empathy_os/workflows/code_review_pipeline.py +1 -0
  64. empathy_os/workflows/history.py +510 -0
  65. empathy_os/workflows/output.py +410 -0
  66. empathy_os/workflows/perf_audit.py +125 -19
  67. empathy_os/workflows/progress.py +324 -22
  68. empathy_os/workflows/routing.py +168 -0
  69. empathy_os/workflows/secure_release.py +1 -0
  70. empathy_os/workflows/security_audit.py +190 -0
  71. empathy_os/workflows/security_audit_phase3.py +328 -0
  72. empathy_os/workflows/telemetry_mixin.py +269 -0
  73. empathy_os/dashboard/__init__.py +0 -15
  74. empathy_os/dashboard/server.py +0 -941
  75. patterns/README.md +0 -119
  76. patterns/__init__.py +0 -95
  77. patterns/behavior.py +0 -298
  78. patterns/code_review_memory.json +0 -441
  79. patterns/core.py +0 -97
  80. patterns/debugging.json +0 -3763
  81. patterns/empathy.py +0 -268
  82. patterns/health_check_memory.json +0 -505
  83. patterns/input.py +0 -161
  84. patterns/memory_graph.json +0 -8
  85. patterns/refactoring_memory.json +0 -1113
  86. patterns/registry.py +0 -663
  87. patterns/security_memory.json +0 -8
  88. patterns/structural.py +0 -415
  89. patterns/validation.py +0 -194
  90. {empathy_framework-4.7.1.dist-info → empathy_framework-4.8.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,269 @@
1
+ """Telemetry Mixin for Workflow LLM Call Tracking
2
+
3
+ Extracted from BaseWorkflow to improve maintainability and reusability.
4
+ Provides telemetry tracking for LLM calls and workflow executions.
5
+
6
+ Copyright 2025 Smart-AI-Memory
7
+ Licensed under Fair Source License 0.9
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import logging
13
+ import uuid
14
+ from datetime import datetime
15
+ from typing import TYPE_CHECKING, Any
16
+
17
+ if TYPE_CHECKING:
18
+ from empathy_os.models import (
19
+ TelemetryBackend,
20
+ )
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+ # Try to import UsageTracker
25
+ try:
26
+ from empathy_os.telemetry import UsageTracker
27
+
28
+ TELEMETRY_AVAILABLE = True
29
+ except ImportError:
30
+ TELEMETRY_AVAILABLE = False
31
+ UsageTracker = None # type: ignore
32
+
33
+
34
+ class TelemetryMixin:
35
+ """Mixin that provides telemetry tracking for workflow LLM calls.
36
+
37
+ This mixin extracts telemetry logic from BaseWorkflow to improve
38
+ maintainability and enable reuse in other contexts.
39
+
40
+ Attributes:
41
+ _telemetry_backend: Backend for storing telemetry records
42
+ _telemetry_tracker: UsageTracker singleton for tracking
43
+ _enable_telemetry: Whether telemetry is enabled
44
+ _run_id: Current workflow run ID for correlation
45
+
46
+ Usage:
47
+ class MyWorkflow(TelemetryMixin, BaseWorkflow):
48
+ pass
49
+
50
+ # TelemetryMixin methods are now available
51
+ workflow._track_telemetry(...)
52
+ workflow._emit_call_telemetry(...)
53
+ workflow._emit_workflow_telemetry(...)
54
+ """
55
+
56
+ # Instance variables (set by __init__ or subclass)
57
+ _telemetry_backend: TelemetryBackend | None = None
58
+ _telemetry_tracker: UsageTracker | None = None
59
+ _enable_telemetry: bool = True
60
+ _run_id: str | None = None
61
+
62
+ # These must be provided by the class using this mixin
63
+ name: str = "unknown"
64
+ _provider_str: str = "unknown"
65
+
66
+ def _init_telemetry(self, telemetry_backend: TelemetryBackend | None = None) -> None:
67
+ """Initialize telemetry tracking.
68
+
69
+ Call this from __init__ to set up telemetry.
70
+
71
+ Args:
72
+ telemetry_backend: Optional backend for storing telemetry records.
73
+ Defaults to TelemetryStore (JSONL file backend).
74
+ """
75
+ from empathy_os.models import get_telemetry_store
76
+
77
+ self._telemetry_backend = telemetry_backend or get_telemetry_store()
78
+ self._telemetry_tracker = None
79
+ self._enable_telemetry = True
80
+
81
+ if TELEMETRY_AVAILABLE and UsageTracker is not None:
82
+ try:
83
+ self._telemetry_tracker = UsageTracker.get_instance()
84
+ except (OSError, PermissionError) as e:
85
+ # File system errors - log but disable telemetry
86
+ logger.debug(f"Failed to initialize telemetry tracker (file system error): {e}")
87
+ self._enable_telemetry = False
88
+ except (AttributeError, TypeError, ValueError) as e:
89
+ # Configuration or initialization errors
90
+ logger.debug(f"Failed to initialize telemetry tracker (config error): {e}")
91
+ self._enable_telemetry = False
92
+
93
+ def _track_telemetry(
94
+ self,
95
+ stage: str,
96
+ tier: Any, # ModelTier
97
+ model: str,
98
+ cost: float,
99
+ tokens: dict[str, int],
100
+ cache_hit: bool,
101
+ cache_type: str | None,
102
+ duration_ms: int,
103
+ ) -> None:
104
+ """Track telemetry for an LLM call.
105
+
106
+ Args:
107
+ stage: Stage name
108
+ tier: Model tier used (ModelTier enum)
109
+ model: Model ID used
110
+ cost: Cost in USD
111
+ tokens: Dictionary with "input" and "output" token counts
112
+ cache_hit: Whether this was a cache hit
113
+ cache_type: Cache type if cache hit
114
+ duration_ms: Duration in milliseconds
115
+ """
116
+ if not self._enable_telemetry or self._telemetry_tracker is None:
117
+ return
118
+
119
+ try:
120
+ provider_str = getattr(self, "_provider_str", "unknown")
121
+ self._telemetry_tracker.track_llm_call(
122
+ workflow=self.name,
123
+ stage=stage,
124
+ tier=tier.value.upper() if hasattr(tier, "value") else str(tier).upper(),
125
+ model=model,
126
+ provider=provider_str,
127
+ cost=cost,
128
+ tokens=tokens,
129
+ cache_hit=cache_hit,
130
+ cache_type=cache_type,
131
+ duration_ms=duration_ms,
132
+ )
133
+ except (AttributeError, TypeError, ValueError) as e:
134
+ # INTENTIONAL: Telemetry tracking failures should never crash workflows
135
+ logger.debug(f"Failed to track telemetry (config/data error): {e}")
136
+ except (OSError, PermissionError) as e:
137
+ # File system errors - log but never crash workflow
138
+ logger.debug(f"Failed to track telemetry (file system error): {e}")
139
+
140
+ def _emit_call_telemetry(
141
+ self,
142
+ step_name: str,
143
+ task_type: str,
144
+ tier: str,
145
+ model_id: str,
146
+ input_tokens: int,
147
+ output_tokens: int,
148
+ cost: float,
149
+ latency_ms: int,
150
+ success: bool = True,
151
+ error_message: str | None = None,
152
+ fallback_used: bool = False,
153
+ ) -> None:
154
+ """Emit an LLMCallRecord to the telemetry backend.
155
+
156
+ Args:
157
+ step_name: Name of the workflow step
158
+ task_type: Task type used for routing
159
+ tier: Model tier used
160
+ model_id: Model ID used
161
+ input_tokens: Input token count
162
+ output_tokens: Output token count
163
+ cost: Estimated cost
164
+ latency_ms: Latency in milliseconds
165
+ success: Whether the call succeeded
166
+ error_message: Error message if failed
167
+ fallback_used: Whether fallback was used
168
+ """
169
+ from empathy_os.models import LLMCallRecord
170
+
171
+ record = LLMCallRecord(
172
+ call_id=str(uuid.uuid4()),
173
+ timestamp=datetime.now().isoformat(),
174
+ workflow_name=self.name,
175
+ step_name=step_name,
176
+ task_type=task_type,
177
+ provider=getattr(self, "_provider_str", "unknown"),
178
+ tier=tier,
179
+ model_id=model_id,
180
+ input_tokens=input_tokens,
181
+ output_tokens=output_tokens,
182
+ estimated_cost=cost,
183
+ latency_ms=latency_ms,
184
+ success=success,
185
+ error_message=error_message,
186
+ fallback_used=fallback_used,
187
+ metadata={"run_id": self._run_id},
188
+ )
189
+ try:
190
+ if self._telemetry_backend is not None:
191
+ self._telemetry_backend.log_call(record)
192
+ except (AttributeError, ValueError, TypeError):
193
+ # Telemetry backend errors - log but don't crash workflow
194
+ logger.debug("Failed to log call telemetry (backend error)")
195
+ except OSError:
196
+ # File system errors - log but don't crash workflow
197
+ logger.debug("Failed to log call telemetry (file system error)")
198
+ except Exception: # noqa: BLE001
199
+ # INTENTIONAL: Telemetry is optional diagnostics - never crash workflow
200
+ logger.debug("Unexpected error logging call telemetry")
201
+
202
+ def _emit_workflow_telemetry(self, result: Any) -> None:
203
+ """Emit a WorkflowRunRecord to the telemetry backend.
204
+
205
+ Args:
206
+ result: The WorkflowResult to record
207
+ """
208
+ from empathy_os.models import WorkflowRunRecord, WorkflowStageRecord
209
+
210
+ # Build stage records
211
+ stages = [
212
+ WorkflowStageRecord(
213
+ stage_name=s.name,
214
+ tier=s.tier.value if hasattr(s.tier, "value") else str(s.tier),
215
+ model_id=(
216
+ self.get_model_for_tier(s.tier)
217
+ if hasattr(self, "get_model_for_tier")
218
+ else "unknown"
219
+ ),
220
+ input_tokens=s.input_tokens,
221
+ output_tokens=s.output_tokens,
222
+ cost=s.cost,
223
+ latency_ms=s.duration_ms,
224
+ success=not s.skipped and result.error is None,
225
+ skipped=s.skipped,
226
+ skip_reason=s.skip_reason,
227
+ )
228
+ for s in result.stages
229
+ ]
230
+
231
+ record = WorkflowRunRecord(
232
+ run_id=self._run_id or str(uuid.uuid4()),
233
+ workflow_name=self.name,
234
+ started_at=result.started_at.isoformat(),
235
+ completed_at=result.completed_at.isoformat(),
236
+ stages=stages,
237
+ total_input_tokens=sum(s.input_tokens for s in result.stages if not s.skipped),
238
+ total_output_tokens=sum(s.output_tokens for s in result.stages if not s.skipped),
239
+ total_cost=result.cost_report.total_cost,
240
+ baseline_cost=result.cost_report.baseline_cost,
241
+ savings=result.cost_report.savings,
242
+ savings_percent=result.cost_report.savings_percent,
243
+ total_duration_ms=result.total_duration_ms,
244
+ success=result.success,
245
+ error=result.error,
246
+ providers_used=[getattr(self, "_provider_str", "unknown")],
247
+ tiers_used=list(result.cost_report.by_tier.keys()),
248
+ )
249
+ try:
250
+ if self._telemetry_backend is not None:
251
+ self._telemetry_backend.log_workflow(record)
252
+ except (AttributeError, ValueError, TypeError):
253
+ # Telemetry backend errors - log but don't crash workflow
254
+ logger.debug("Failed to log workflow telemetry (backend error)")
255
+ except OSError:
256
+ # File system errors - log but don't crash workflow
257
+ logger.debug("Failed to log workflow telemetry (file system error)")
258
+ except Exception: # noqa: BLE001
259
+ # INTENTIONAL: Telemetry is optional diagnostics - never crash workflow
260
+ logger.debug("Unexpected error logging workflow telemetry")
261
+
262
+ def _generate_run_id(self) -> str:
263
+ """Generate a new run ID for telemetry correlation.
264
+
265
+ Returns:
266
+ A new UUID string for the run
267
+ """
268
+ self._run_id = str(uuid.uuid4())
269
+ return self._run_id
@@ -1,15 +0,0 @@
1
- """Visual Dashboard for Empathy Framework
2
-
3
- Web-based view of patterns, costs, and health trends.
4
-
5
- Usage:
6
- empathy dashboard
7
- # Opens browser to http://localhost:8765
8
-
9
- Copyright 2025 Smart-AI-Memory
10
- Licensed under Fair Source License 0.9
11
- """
12
-
13
- from empathy_os.dashboard.server import cmd_dashboard, run_dashboard
14
-
15
- __all__ = ["cmd_dashboard", "run_dashboard"]