empathy-framework 5.0.1__py3-none-any.whl → 5.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/METADATA +53 -9
  2. {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/RECORD +28 -31
  3. empathy_llm_toolkit/providers.py +175 -35
  4. empathy_llm_toolkit/utils/tokens.py +150 -30
  5. empathy_os/__init__.py +1 -1
  6. empathy_os/cli/commands/batch.py +256 -0
  7. empathy_os/cli/commands/cache.py +248 -0
  8. empathy_os/cli/commands/inspect.py +1 -2
  9. empathy_os/cli/commands/metrics.py +1 -1
  10. empathy_os/cli/commands/routing.py +285 -0
  11. empathy_os/cli/commands/workflow.py +2 -2
  12. empathy_os/cli/parsers/__init__.py +6 -0
  13. empathy_os/cli/parsers/batch.py +118 -0
  14. empathy_os/cli/parsers/cache.py +65 -0
  15. empathy_os/cli/parsers/routing.py +110 -0
  16. empathy_os/dashboard/standalone_server.py +22 -11
  17. empathy_os/metrics/collector.py +31 -0
  18. empathy_os/models/token_estimator.py +21 -13
  19. empathy_os/telemetry/agent_coordination.py +12 -14
  20. empathy_os/telemetry/agent_tracking.py +18 -19
  21. empathy_os/telemetry/approval_gates.py +27 -39
  22. empathy_os/telemetry/event_streaming.py +19 -19
  23. empathy_os/telemetry/feedback_loop.py +13 -16
  24. empathy_os/workflows/batch_processing.py +56 -10
  25. empathy_os/vscode_bridge 2.py +0 -173
  26. empathy_os/workflows/progressive/README 2.md +0 -454
  27. empathy_os/workflows/progressive/__init__ 2.py +0 -92
  28. empathy_os/workflows/progressive/cli 2.py +0 -242
  29. empathy_os/workflows/progressive/core 2.py +0 -488
  30. empathy_os/workflows/progressive/orchestrator 2.py +0 -701
  31. empathy_os/workflows/progressive/reports 2.py +0 -528
  32. empathy_os/workflows/progressive/telemetry 2.py +0 -280
  33. empathy_os/workflows/progressive/test_gen 2.py +0 -514
  34. empathy_os/workflows/progressive/workflow 2.py +0 -628
  35. {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/WHEEL +0 -0
  36. {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/entry_points.txt +0 -0
  37. {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/licenses/LICENSE +0 -0
  38. {empathy_framework-5.0.1.dist-info → empathy_framework-5.0.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,248 @@
1
+ """Cache monitoring and statistics commands for the CLI.
2
+
3
+ Copyright 2025 Smart-AI-Memory
4
+ Licensed under Fair Source License 0.9
5
+ """
6
+
7
+ import json
8
+ import re
9
+ from datetime import datetime, timedelta
10
+ from pathlib import Path
11
+ from typing import Any
12
+
13
+
14
+ def cmd_cache_stats(args):
15
+ """Display prompt caching statistics and savings.
16
+
17
+ Analyzes logs and telemetry to show cache performance metrics:
18
+ - Cache hit rate
19
+ - Total cost savings
20
+ - Cache read/write tokens
21
+ - Recommendations for optimization
22
+
23
+ Args:
24
+ args: Namespace object from argparse with attributes:
25
+ - days (int): Number of days to analyze (default: 7)
26
+ - format (str): Output format ('table' or 'json')
27
+ - verbose (bool): Show detailed breakdown
28
+
29
+ Returns:
30
+ None: Prints cache statistics report or JSON output
31
+ """
32
+ print(f"\n🔍 Analyzing cache performance (last {args.days} days)...\n")
33
+
34
+ # Collect cache metrics from logs
35
+ stats = _collect_cache_stats(days=args.days)
36
+
37
+ if args.format == "json":
38
+ print(json.dumps(stats, indent=2))
39
+ return
40
+
41
+ # Display formatted report
42
+ _display_cache_report(stats, verbose=args.verbose)
43
+
44
+
45
+ def _collect_cache_stats(days: int = 7) -> dict[str, Any]:
46
+ """Collect cache statistics from logs and telemetry.
47
+
48
+ Args:
49
+ days: Number of days to analyze
50
+
51
+ Returns:
52
+ Dictionary with cache statistics
53
+ """
54
+ # Try to find log files
55
+ log_paths = [
56
+ Path.cwd() / "empathy_os.log",
57
+ Path.home() / ".empathy" / "logs" / "empathy_os.log",
58
+ Path("/tmp/empathy_os.log"),
59
+ ]
60
+
61
+ log_file = None
62
+ for path in log_paths:
63
+ if path.exists():
64
+ log_file = path
65
+ break
66
+
67
+ if not log_file:
68
+ return {
69
+ "error": "No log file found",
70
+ "message": "Enable logging to track cache performance",
71
+ "total_requests": 0,
72
+ "cache_hits": 0,
73
+ "cache_writes": 0,
74
+ "total_savings": 0.0,
75
+ }
76
+
77
+ # Parse log file for cache metrics
78
+ cutoff_date = datetime.now() - timedelta(days=days)
79
+
80
+ cache_hits = 0
81
+ cache_writes = 0
82
+ total_cache_read_tokens = 0
83
+ total_cache_write_tokens = 0
84
+ total_savings = 0.0
85
+ total_requests = 0
86
+
87
+ # Regex patterns for log parsing
88
+ cache_hit_pattern = re.compile(r"Cache HIT: ([\d,]+) tokens read.*saved \$([\d.]+)")
89
+ cache_write_pattern = re.compile(r"Cache WRITE: ([\d,]+) tokens written.*cost \$([\d.]+)")
90
+
91
+ try:
92
+ with open(log_file) as f:
93
+ for line in f:
94
+ # Try to extract timestamp
95
+ # Common format: 2026-01-27 21:30:45,123
96
+ timestamp_match = re.match(
97
+ r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})", line
98
+ )
99
+ if timestamp_match:
100
+ try:
101
+ log_time = datetime.strptime(
102
+ timestamp_match.group(1), "%Y-%m-%d %H:%M:%S"
103
+ )
104
+ if log_time < cutoff_date:
105
+ continue # Skip old entries
106
+ except ValueError:
107
+ pass # Continue if timestamp parsing fails
108
+
109
+ # Count API requests (approximate)
110
+ if "anthropic.AsyncAnthropic" in line or "messages.create" in line:
111
+ total_requests += 1
112
+
113
+ # Parse cache hit
114
+ hit_match = cache_hit_pattern.search(line)
115
+ if hit_match:
116
+ tokens_str = hit_match.group(1).replace(",", "")
117
+ tokens = int(tokens_str)
118
+ savings = float(hit_match.group(2))
119
+
120
+ cache_hits += 1
121
+ total_cache_read_tokens += tokens
122
+ total_savings += savings
123
+
124
+ # Parse cache write
125
+ write_match = cache_write_pattern.search(line)
126
+ if write_match:
127
+ tokens_str = write_match.group(1).replace(",", "")
128
+ tokens = int(tokens_str)
129
+
130
+ cache_writes += 1
131
+ total_cache_write_tokens += tokens
132
+
133
+ except Exception as e:
134
+ return {
135
+ "error": f"Failed to parse log file: {e}",
136
+ "total_requests": 0,
137
+ "cache_hits": 0,
138
+ "cache_writes": 0,
139
+ "total_savings": 0.0,
140
+ }
141
+
142
+ # Calculate metrics
143
+ cache_hit_rate = (
144
+ (cache_hits / total_requests * 100) if total_requests > 0 else 0.0
145
+ )
146
+
147
+ return {
148
+ "days_analyzed": days,
149
+ "log_file": str(log_file),
150
+ "total_requests": total_requests,
151
+ "cache_hits": cache_hits,
152
+ "cache_writes": cache_writes,
153
+ "cache_hit_rate": round(cache_hit_rate, 1),
154
+ "total_cache_read_tokens": total_cache_read_tokens,
155
+ "total_cache_write_tokens": total_cache_write_tokens,
156
+ "total_savings": round(total_savings, 4),
157
+ "avg_savings_per_hit": (
158
+ round(total_savings / cache_hits, 4) if cache_hits > 0 else 0.0
159
+ ),
160
+ }
161
+
162
+
163
+ def _display_cache_report(stats: dict[str, Any], verbose: bool = False):
164
+ """Display formatted cache statistics report.
165
+
166
+ Args:
167
+ stats: Cache statistics dictionary
168
+ verbose: Show detailed breakdown
169
+ """
170
+ # Handle error cases
171
+ if "error" in stats:
172
+ print(f"⚠️ {stats['error']}")
173
+ if "message" in stats:
174
+ print(f" {stats['message']}")
175
+ return
176
+
177
+ # Summary section
178
+ print("=" * 60)
179
+ print("PROMPT CACHING PERFORMANCE SUMMARY")
180
+ print("=" * 60)
181
+ print(f"Analysis Period: Last {stats['days_analyzed']} days")
182
+ print(f"Log File: {stats['log_file']}")
183
+ print()
184
+
185
+ # Key metrics
186
+ print("📊 Key Metrics:")
187
+ print(f" Total API Requests: {stats['total_requests']:,}")
188
+ print(f" Cache Hits: {stats['cache_hits']:,}")
189
+ print(f" Cache Writes: {stats['cache_writes']:,}")
190
+ print(f" Cache Hit Rate: {stats['cache_hit_rate']}%")
191
+ print()
192
+
193
+ # Cost savings
194
+ print("💰 Cost Savings:")
195
+ print(f" Total Saved: ${stats['total_savings']:.4f}")
196
+ if stats['cache_hits'] > 0:
197
+ print(f" Avg Savings per Hit: ${stats['avg_savings_per_hit']:.4f}")
198
+ print()
199
+
200
+ # Token metrics (verbose mode)
201
+ if verbose:
202
+ print("🔢 Token Metrics:")
203
+ print(f" Cache Read Tokens: {stats['total_cache_read_tokens']:,}")
204
+ print(f" Cache Write Tokens: {stats['total_cache_write_tokens']:,}")
205
+ print()
206
+
207
+ # Performance assessment
208
+ hit_rate = stats['cache_hit_rate']
209
+ print("📈 Performance Assessment:")
210
+ if hit_rate >= 50:
211
+ print(" ✅ EXCELLENT - Cache is working effectively")
212
+ print(" Your workflows are benefiting from prompt caching")
213
+ elif hit_rate >= 30:
214
+ print(" ✓ GOOD - Cache is providing moderate benefits")
215
+ print(" Consider structuring prompts for better cache reuse")
216
+ elif hit_rate >= 10:
217
+ print(" ⚠️ LOW - Cache hit rate could be improved")
218
+ print(" Review your workflow patterns for optimization")
219
+ else:
220
+ print(" ❌ VERY LOW - Cache is not being utilized effectively")
221
+ print(" Consider enabling prompt caching or restructuring prompts")
222
+ print()
223
+
224
+ # Recommendations
225
+ if stats['total_requests'] < 10:
226
+ print("ℹ️ Note: Limited data available. Run more workflows for accurate stats.")
227
+ elif hit_rate < 30:
228
+ print("💡 Recommendations:")
229
+ print(" 1. Reuse system prompts across workflow steps")
230
+ print(" 2. Structure large context (docs, code) for caching")
231
+ print(" 3. Cache TTL is 5 minutes - batch related requests")
232
+ print(" 4. Enable use_prompt_caching=True in AnthropicProvider")
233
+
234
+ print("=" * 60)
235
+
236
+
237
+ def cmd_cache_clear(args):
238
+ """Clear cached data (placeholder for future implementation).
239
+
240
+ Args:
241
+ args: Namespace object from argparse
242
+
243
+ Returns:
244
+ None: Prints status message
245
+ """
246
+ print("\n⚠️ Cache clearing not implemented.")
247
+ print("Anthropic's cache has a 5-minute TTL and is server-side.")
248
+ print("Wait 5 minutes for cache to expire naturally.\n")
@@ -13,8 +13,7 @@ from empathy_os.core import EmpathyOS
13
13
  from empathy_os.logging_config import get_logger
14
14
  from empathy_os.metrics.collector import MetricsCollector
15
15
  from empathy_os.pattern_library import PatternLibrary
16
- from empathy_os.persistence import PatternPersistence
17
- from empathy_os.state_manager import StateManager
16
+ from empathy_os.persistence import PatternPersistence, StateManager
18
17
 
19
18
  logger = get_logger(__name__)
20
19
 
@@ -8,7 +8,7 @@ import sys
8
8
 
9
9
  from empathy_os.logging_config import get_logger
10
10
  from empathy_os.metrics.collector import MetricsCollector
11
- from empathy_os.state_manager import StateManager
11
+ from empathy_os.persistence import StateManager
12
12
 
13
13
  logger = get_logger(__name__)
14
14
 
@@ -0,0 +1,285 @@
1
+ """CLI commands for adaptive model routing statistics.
2
+
3
+ Provides commands to analyze model routing performance and get tier upgrade
4
+ recommendations based on historical telemetry data.
5
+
6
+ Copyright 2025 Smart-AI-Memory
7
+ Licensed under Fair Source License 0.9
8
+ """
9
+
10
+ import logging
11
+ from typing import Any
12
+
13
+ from empathy_os.models import AdaptiveModelRouter
14
+ from empathy_os.telemetry import UsageTracker
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ def cmd_routing_stats(args: Any) -> int:
20
+ """Show routing statistics for a workflow.
21
+
22
+ Args:
23
+ args: Arguments with workflow, stage (optional), days
24
+
25
+ Returns:
26
+ 0 on success, 1 on error
27
+ """
28
+ try:
29
+ # Get telemetry and router
30
+ tracker = UsageTracker.get_instance()
31
+ router = AdaptiveModelRouter(telemetry=tracker)
32
+
33
+ # Get routing stats
34
+ stats = router.get_routing_stats(
35
+ workflow=args.workflow,
36
+ stage=args.stage if hasattr(args, "stage") and args.stage else None,
37
+ days=args.days,
38
+ )
39
+
40
+ if stats["total_calls"] == 0:
41
+ print(f"❌ No data found for workflow '{args.workflow}'")
42
+ print(f" (searched last {args.days} days)")
43
+ return 1
44
+
45
+ # Display stats
46
+ print("\n" + "=" * 70)
47
+ print(f"ADAPTIVE ROUTING STATISTICS - {stats['workflow']}")
48
+ if stats["stage"] != "all":
49
+ print(f"Stage: {stats['stage']}")
50
+ print("=" * 70)
51
+
52
+ print(f"\n📊 Overview (Last {stats['days_analyzed']} days)")
53
+ print(f" Total calls: {stats['total_calls']:,}")
54
+ print(f" Average cost: ${stats['avg_cost']:.4f}")
55
+ print(f" Average success rate: {stats['avg_success_rate']:.1%}")
56
+ print(f" Models used: {len(stats['models_used'])}")
57
+
58
+ # Per-model performance
59
+ print("\n📈 Per-Model Performance")
60
+ print("-" * 70)
61
+
62
+ for model in stats["models_used"]:
63
+ perf = stats["performance_by_model"][model]
64
+ print(f"\n {model}:")
65
+ print(f" Calls: {perf['calls']:,}")
66
+ print(f" Success rate: {perf['success_rate']:.1%}")
67
+ print(f" Avg cost: ${perf['avg_cost']:.4f}")
68
+ print(f" Avg latency: {perf['avg_latency_ms']:.0f}ms")
69
+
70
+ # Quality score calculation (from AdaptiveModelRouter)
71
+ quality_score = (perf["success_rate"] * 100) - (perf["avg_cost"] * 10)
72
+ print(f" Quality score: {quality_score:.2f}")
73
+
74
+ # Recommendations
75
+ print("\n💡 Recommendations")
76
+ print("-" * 70)
77
+
78
+ # Find best model
79
+ best_model = max(
80
+ stats["performance_by_model"].items(),
81
+ key=lambda x: (x[1]["success_rate"] * 100) - (x[1]["avg_cost"] * 10),
82
+ )
83
+
84
+ print(f" Best model: {best_model[0]}")
85
+ print(f" ({best_model[1]['success_rate']:.1%} success, ${best_model[1]['avg_cost']:.4f}/call)")
86
+
87
+ # Cost savings potential
88
+ if len(stats["models_used"]) > 1:
89
+ cheapest = min(
90
+ stats["performance_by_model"].items(),
91
+ key=lambda x: x[1]["avg_cost"],
92
+ )
93
+ most_expensive = max(
94
+ stats["performance_by_model"].items(),
95
+ key=lambda x: x[1]["avg_cost"],
96
+ )
97
+
98
+ if cheapest[0] != most_expensive[0]:
99
+ savings_per_call = most_expensive[1]["avg_cost"] - cheapest[1]["avg_cost"]
100
+ print(f"\n 💰 Potential savings:")
101
+ print(f" Using {cheapest[0]} instead of {most_expensive[0]}")
102
+ print(f" ${savings_per_call:.4f} per call")
103
+ if stats["total_calls"] > 0:
104
+ weekly_calls = (stats["total_calls"] / stats["days_analyzed"]) * 7
105
+ weekly_savings = savings_per_call * weekly_calls
106
+ print(f" ~${weekly_savings:.2f}/week potential")
107
+
108
+ return 0
109
+
110
+ except Exception as e:
111
+ logger.exception("Failed to get routing stats")
112
+ print(f"❌ Error: {e}")
113
+ return 1
114
+
115
+
116
+ def cmd_routing_check(args: Any) -> int:
117
+ """Check if tier upgrades are recommended for workflows.
118
+
119
+ Args:
120
+ args: Arguments with workflow (or --all), stage (optional)
121
+
122
+ Returns:
123
+ 0 on success, 1 on error
124
+ """
125
+ try:
126
+ # Get telemetry and router
127
+ tracker = UsageTracker.get_instance()
128
+ router = AdaptiveModelRouter(telemetry=tracker)
129
+
130
+ print("\n" + "=" * 70)
131
+ print("ADAPTIVE ROUTING - TIER UPGRADE RECOMMENDATIONS")
132
+ print("=" * 70)
133
+
134
+ if hasattr(args, "all") and args.all:
135
+ # Check all workflows
136
+ stats = tracker.get_stats(days=args.days)
137
+ workflows = list(stats["by_workflow"].keys())
138
+
139
+ if not workflows:
140
+ print("\n❌ No workflow data found")
141
+ return 1
142
+
143
+ print(f"\nChecking {len(workflows)} workflows (last {args.days} days)...\n")
144
+
145
+ upgrades_needed = []
146
+ upgrades_ok = []
147
+
148
+ for workflow_name in workflows:
149
+ should_upgrade, reason = router.recommend_tier_upgrade(
150
+ workflow=workflow_name, stage=None
151
+ )
152
+
153
+ if should_upgrade:
154
+ upgrades_needed.append((workflow_name, reason))
155
+ else:
156
+ upgrades_ok.append((workflow_name, reason))
157
+
158
+ # Show workflows needing upgrades
159
+ if upgrades_needed:
160
+ print(f"⚠️ {len(upgrades_needed)} workflow(s) need tier upgrade:")
161
+ print("-" * 70)
162
+ for workflow_name, reason in upgrades_needed:
163
+ print(f" • {workflow_name}")
164
+ print(f" {reason}")
165
+ print()
166
+
167
+ # Show workflows performing well
168
+ if upgrades_ok:
169
+ print(f"✓ {len(upgrades_ok)} workflow(s) performing well:")
170
+ print("-" * 70)
171
+ for workflow_name, reason in upgrades_ok:
172
+ print(f" • {workflow_name}: {reason}")
173
+ print()
174
+
175
+ # Summary
176
+ if upgrades_needed:
177
+ print("💡 Recommendation:")
178
+ print(" Enable adaptive routing to automatically upgrade tiers:")
179
+ print(" workflow = MyWorkflow(enable_adaptive_routing=True)")
180
+ return 0
181
+ else:
182
+ print("✓ All workflows performing well - no upgrades needed")
183
+ return 0
184
+
185
+ else:
186
+ # Check specific workflow
187
+ workflow_name = args.workflow
188
+
189
+ should_upgrade, reason = router.recommend_tier_upgrade(
190
+ workflow=workflow_name,
191
+ stage=args.stage if hasattr(args, "stage") and args.stage else None,
192
+ )
193
+
194
+ print(f"\nWorkflow: {workflow_name}")
195
+ if hasattr(args, "stage") and args.stage:
196
+ print(f"Stage: {args.stage}")
197
+ print(f"Analysis period: Last {args.days} days")
198
+ print()
199
+
200
+ if should_upgrade:
201
+ print("⚠️ TIER UPGRADE RECOMMENDED")
202
+ print(f" {reason}")
203
+ print()
204
+ print("💡 Action:")
205
+ print(" 1. Enable adaptive routing:")
206
+ print(" workflow = MyWorkflow(enable_adaptive_routing=True)")
207
+ print(" 2. Or manually upgrade tier in workflow config")
208
+ return 0
209
+ else:
210
+ print("✓ NO UPGRADE NEEDED")
211
+ print(f" {reason}")
212
+ return 0
213
+
214
+ except Exception as e:
215
+ logger.exception("Failed to check routing recommendations")
216
+ print(f"❌ Error: {e}")
217
+ return 1
218
+
219
+
220
+ def cmd_routing_models(args: Any) -> int:
221
+ """Show model performance comparison.
222
+
223
+ Args:
224
+ args: Arguments with provider, days
225
+
226
+ Returns:
227
+ 0 on success, 1 on error
228
+ """
229
+ try:
230
+ # Get telemetry
231
+ tracker = UsageTracker.get_instance()
232
+
233
+ # Get recent entries
234
+ entries = tracker.get_recent_entries(limit=100000, days=args.days)
235
+
236
+ if args.provider:
237
+ entries = [e for e in entries if e.get("provider") == args.provider]
238
+
239
+ if not entries:
240
+ print(f"❌ No data found for provider '{args.provider}'")
241
+ return 1
242
+
243
+ # Group by model
244
+ by_model: dict[str, list] = {}
245
+ for entry in entries:
246
+ model = entry["model"]
247
+ if model not in by_model:
248
+ by_model[model] = []
249
+ by_model[model].append(entry)
250
+
251
+ print("\n" + "=" * 70)
252
+ print(f"MODEL PERFORMANCE COMPARISON - {args.provider.upper()}")
253
+ print(f"Last {args.days} days")
254
+ print("=" * 70)
255
+
256
+ # Sort by total calls
257
+ models_sorted = sorted(by_model.items(), key=lambda x: len(x[1]), reverse=True)
258
+
259
+ print(f"\n📊 {len(models_sorted)} model(s) used\n")
260
+
261
+ for model, model_entries in models_sorted:
262
+ total = len(model_entries)
263
+ successes = sum(1 for e in model_entries if e.get("success", True))
264
+ success_rate = successes / total
265
+
266
+ avg_cost = sum(e.get("cost", 0.0) for e in model_entries) / total
267
+ avg_latency = sum(e.get("duration_ms", 0) for e in model_entries) / total
268
+
269
+ # Quality score
270
+ quality_score = (success_rate * 100) - (avg_cost * 10)
271
+
272
+ print(f" {model}")
273
+ print(f" Calls: {total:,}")
274
+ print(f" Success rate: {success_rate:.1%}")
275
+ print(f" Avg cost: ${avg_cost:.4f}")
276
+ print(f" Avg latency: {avg_latency:.0f}ms")
277
+ print(f" Quality score: {quality_score:.2f}")
278
+ print()
279
+
280
+ return 0
281
+
282
+ except Exception as e:
283
+ logger.exception("Failed to get model performance")
284
+ print(f"❌ Error: {e}")
285
+ return 1
@@ -11,8 +11,8 @@ from pathlib import Path
11
11
 
12
12
  from empathy_os.config import _validate_file_path
13
13
  from empathy_os.logging_config import get_logger
14
- from empathy_os.workflows import list_workflows as get_workflow_list
15
- from empathy_os.workflows.config import WorkflowConfig, create_example_config, get_workflow
14
+ from empathy_os.workflows import get_workflow, list_workflows as get_workflow_list
15
+ from empathy_os.workflows.config import WorkflowConfig, create_example_config
16
16
 
17
17
  logger = get_logger(__name__)
18
18
 
@@ -7,6 +7,8 @@ Licensed under Fair Source License 0.9
7
7
  """
8
8
 
9
9
  from . import (
10
+ batch,
11
+ cache,
10
12
  help,
11
13
  info,
12
14
  inspect,
@@ -14,6 +16,7 @@ from . import (
14
16
  orchestrate,
15
17
  patterns,
16
18
  provider,
19
+ routing,
17
20
  setup,
18
21
  status,
19
22
  sync,
@@ -57,6 +60,9 @@ def register_all_parsers(subparsers):
57
60
 
58
61
  # Metrics and state
59
62
  metrics.register_parsers(subparsers)
63
+ cache.register_parsers(subparsers) # Cache monitoring
64
+ batch.register_parsers(subparsers) # Batch processing (50% cost savings)
65
+ routing.register_parsers(subparsers) # Adaptive routing statistics
60
66
 
61
67
  # Setup and initialization
62
68
  setup.register_parsers(subparsers)