empathy-framework 5.0.1__py3-none-any.whl → 5.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. {empathy_framework-5.0.1.dist-info → empathy_framework-5.1.0.dist-info}/METADATA +311 -150
  2. {empathy_framework-5.0.1.dist-info → empathy_framework-5.1.0.dist-info}/RECORD +60 -33
  3. empathy_framework-5.1.0.dist-info/licenses/LICENSE +201 -0
  4. empathy_framework-5.1.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
  5. empathy_llm_toolkit/providers.py +175 -35
  6. empathy_llm_toolkit/utils/tokens.py +150 -30
  7. empathy_os/__init__.py +1 -1
  8. empathy_os/cli/commands/batch.py +256 -0
  9. empathy_os/cli/commands/cache.py +248 -0
  10. empathy_os/cli/commands/inspect.py +1 -2
  11. empathy_os/cli/commands/metrics.py +1 -1
  12. empathy_os/cli/commands/routing.py +285 -0
  13. empathy_os/cli/commands/workflow.py +2 -1
  14. empathy_os/cli/parsers/__init__.py +6 -0
  15. empathy_os/cli/parsers/batch.py +118 -0
  16. empathy_os/cli/parsers/cache 2.py +65 -0
  17. empathy_os/cli/parsers/cache.py +65 -0
  18. empathy_os/cli/parsers/routing.py +110 -0
  19. empathy_os/cli_minimal.py +3 -3
  20. empathy_os/cli_router 2.py +416 -0
  21. empathy_os/dashboard/__init__.py +1 -2
  22. empathy_os/dashboard/app 2.py +512 -0
  23. empathy_os/dashboard/app.py +1 -1
  24. empathy_os/dashboard/simple_server 2.py +403 -0
  25. empathy_os/dashboard/standalone_server 2.py +536 -0
  26. empathy_os/dashboard/standalone_server.py +22 -11
  27. empathy_os/memory/types 2.py +441 -0
  28. empathy_os/metrics/collector.py +31 -0
  29. empathy_os/models/__init__.py +19 -0
  30. empathy_os/models/adaptive_routing 2.py +437 -0
  31. empathy_os/models/auth_cli.py +444 -0
  32. empathy_os/models/auth_strategy.py +450 -0
  33. empathy_os/models/token_estimator.py +21 -13
  34. empathy_os/project_index/scanner_parallel 2.py +291 -0
  35. empathy_os/telemetry/agent_coordination 2.py +478 -0
  36. empathy_os/telemetry/agent_coordination.py +14 -16
  37. empathy_os/telemetry/agent_tracking 2.py +350 -0
  38. empathy_os/telemetry/agent_tracking.py +18 -20
  39. empathy_os/telemetry/approval_gates 2.py +563 -0
  40. empathy_os/telemetry/approval_gates.py +27 -39
  41. empathy_os/telemetry/event_streaming 2.py +405 -0
  42. empathy_os/telemetry/event_streaming.py +22 -22
  43. empathy_os/telemetry/feedback_loop 2.py +557 -0
  44. empathy_os/telemetry/feedback_loop.py +14 -17
  45. empathy_os/workflows/__init__.py +8 -0
  46. empathy_os/workflows/autonomous_test_gen.py +569 -0
  47. empathy_os/workflows/batch_processing.py +56 -10
  48. empathy_os/workflows/bug_predict.py +45 -0
  49. empathy_os/workflows/code_review.py +92 -22
  50. empathy_os/workflows/document_gen.py +594 -62
  51. empathy_os/workflows/llm_base.py +363 -0
  52. empathy_os/workflows/perf_audit.py +69 -0
  53. empathy_os/workflows/release_prep.py +54 -0
  54. empathy_os/workflows/security_audit.py +154 -79
  55. empathy_os/workflows/test_gen.py +60 -0
  56. empathy_os/workflows/test_gen_behavioral.py +477 -0
  57. empathy_os/workflows/test_gen_parallel.py +341 -0
  58. empathy_framework-5.0.1.dist-info/licenses/LICENSE +0 -139
  59. {empathy_framework-5.0.1.dist-info → empathy_framework-5.1.0.dist-info}/WHEEL +0 -0
  60. {empathy_framework-5.0.1.dist-info → empathy_framework-5.1.0.dist-info}/entry_points.txt +0 -0
  61. {empathy_framework-5.0.1.dist-info → empathy_framework-5.1.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,285 @@
1
+ """CLI commands for adaptive model routing statistics.
2
+
3
+ Provides commands to analyze model routing performance and get tier upgrade
4
+ recommendations based on historical telemetry data.
5
+
6
+ Copyright 2025 Smart-AI-Memory
7
+ Licensed under Fair Source License 0.9
8
+ """
9
+
10
+ import logging
11
+ from typing import Any
12
+
13
+ from empathy_os.models import AdaptiveModelRouter
14
+ from empathy_os.telemetry import UsageTracker
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ def cmd_routing_stats(args: Any) -> int:
20
+ """Show routing statistics for a workflow.
21
+
22
+ Args:
23
+ args: Arguments with workflow, stage (optional), days
24
+
25
+ Returns:
26
+ 0 on success, 1 on error
27
+ """
28
+ try:
29
+ # Get telemetry and router
30
+ tracker = UsageTracker.get_instance()
31
+ router = AdaptiveModelRouter(telemetry=tracker)
32
+
33
+ # Get routing stats
34
+ stats = router.get_routing_stats(
35
+ workflow=args.workflow,
36
+ stage=args.stage if hasattr(args, "stage") and args.stage else None,
37
+ days=args.days,
38
+ )
39
+
40
+ if stats["total_calls"] == 0:
41
+ print(f"❌ No data found for workflow '{args.workflow}'")
42
+ print(f" (searched last {args.days} days)")
43
+ return 1
44
+
45
+ # Display stats
46
+ print("\n" + "=" * 70)
47
+ print(f"ADAPTIVE ROUTING STATISTICS - {stats['workflow']}")
48
+ if stats["stage"] != "all":
49
+ print(f"Stage: {stats['stage']}")
50
+ print("=" * 70)
51
+
52
+ print(f"\n📊 Overview (Last {stats['days_analyzed']} days)")
53
+ print(f" Total calls: {stats['total_calls']:,}")
54
+ print(f" Average cost: ${stats['avg_cost']:.4f}")
55
+ print(f" Average success rate: {stats['avg_success_rate']:.1%}")
56
+ print(f" Models used: {len(stats['models_used'])}")
57
+
58
+ # Per-model performance
59
+ print("\n📈 Per-Model Performance")
60
+ print("-" * 70)
61
+
62
+ for model in stats["models_used"]:
63
+ perf = stats["performance_by_model"][model]
64
+ print(f"\n {model}:")
65
+ print(f" Calls: {perf['calls']:,}")
66
+ print(f" Success rate: {perf['success_rate']:.1%}")
67
+ print(f" Avg cost: ${perf['avg_cost']:.4f}")
68
+ print(f" Avg latency: {perf['avg_latency_ms']:.0f}ms")
69
+
70
+ # Quality score calculation (from AdaptiveModelRouter)
71
+ quality_score = (perf["success_rate"] * 100) - (perf["avg_cost"] * 10)
72
+ print(f" Quality score: {quality_score:.2f}")
73
+
74
+ # Recommendations
75
+ print("\n💡 Recommendations")
76
+ print("-" * 70)
77
+
78
+ # Find best model
79
+ best_model = max(
80
+ stats["performance_by_model"].items(),
81
+ key=lambda x: (x[1]["success_rate"] * 100) - (x[1]["avg_cost"] * 10),
82
+ )
83
+
84
+ print(f" Best model: {best_model[0]}")
85
+ print(f" ({best_model[1]['success_rate']:.1%} success, ${best_model[1]['avg_cost']:.4f}/call)")
86
+
87
+ # Cost savings potential
88
+ if len(stats["models_used"]) > 1:
89
+ cheapest = min(
90
+ stats["performance_by_model"].items(),
91
+ key=lambda x: x[1]["avg_cost"],
92
+ )
93
+ most_expensive = max(
94
+ stats["performance_by_model"].items(),
95
+ key=lambda x: x[1]["avg_cost"],
96
+ )
97
+
98
+ if cheapest[0] != most_expensive[0]:
99
+ savings_per_call = most_expensive[1]["avg_cost"] - cheapest[1]["avg_cost"]
100
+ print("\n 💰 Potential savings:")
101
+ print(f" Using {cheapest[0]} instead of {most_expensive[0]}")
102
+ print(f" ${savings_per_call:.4f} per call")
103
+ if stats["total_calls"] > 0:
104
+ weekly_calls = (stats["total_calls"] / stats["days_analyzed"]) * 7
105
+ weekly_savings = savings_per_call * weekly_calls
106
+ print(f" ~${weekly_savings:.2f}/week potential")
107
+
108
+ return 0
109
+
110
+ except Exception as e:
111
+ logger.exception("Failed to get routing stats")
112
+ print(f"❌ Error: {e}")
113
+ return 1
114
+
115
+
116
+ def cmd_routing_check(args: Any) -> int:
117
+ """Check if tier upgrades are recommended for workflows.
118
+
119
+ Args:
120
+ args: Arguments with workflow (or --all), stage (optional)
121
+
122
+ Returns:
123
+ 0 on success, 1 on error
124
+ """
125
+ try:
126
+ # Get telemetry and router
127
+ tracker = UsageTracker.get_instance()
128
+ router = AdaptiveModelRouter(telemetry=tracker)
129
+
130
+ print("\n" + "=" * 70)
131
+ print("ADAPTIVE ROUTING - TIER UPGRADE RECOMMENDATIONS")
132
+ print("=" * 70)
133
+
134
+ if hasattr(args, "all") and args.all:
135
+ # Check all workflows
136
+ stats = tracker.get_stats(days=args.days)
137
+ workflows = list(stats["by_workflow"].keys())
138
+
139
+ if not workflows:
140
+ print("\n❌ No workflow data found")
141
+ return 1
142
+
143
+ print(f"\nChecking {len(workflows)} workflows (last {args.days} days)...\n")
144
+
145
+ upgrades_needed = []
146
+ upgrades_ok = []
147
+
148
+ for workflow_name in workflows:
149
+ should_upgrade, reason = router.recommend_tier_upgrade(
150
+ workflow=workflow_name, stage=None
151
+ )
152
+
153
+ if should_upgrade:
154
+ upgrades_needed.append((workflow_name, reason))
155
+ else:
156
+ upgrades_ok.append((workflow_name, reason))
157
+
158
+ # Show workflows needing upgrades
159
+ if upgrades_needed:
160
+ print(f"⚠️ {len(upgrades_needed)} workflow(s) need tier upgrade:")
161
+ print("-" * 70)
162
+ for workflow_name, reason in upgrades_needed:
163
+ print(f" • {workflow_name}")
164
+ print(f" {reason}")
165
+ print()
166
+
167
+ # Show workflows performing well
168
+ if upgrades_ok:
169
+ print(f"✓ {len(upgrades_ok)} workflow(s) performing well:")
170
+ print("-" * 70)
171
+ for workflow_name, reason in upgrades_ok:
172
+ print(f" • {workflow_name}: {reason}")
173
+ print()
174
+
175
+ # Summary
176
+ if upgrades_needed:
177
+ print("💡 Recommendation:")
178
+ print(" Enable adaptive routing to automatically upgrade tiers:")
179
+ print(" workflow = MyWorkflow(enable_adaptive_routing=True)")
180
+ return 0
181
+ else:
182
+ print("✓ All workflows performing well - no upgrades needed")
183
+ return 0
184
+
185
+ else:
186
+ # Check specific workflow
187
+ workflow_name = args.workflow
188
+
189
+ should_upgrade, reason = router.recommend_tier_upgrade(
190
+ workflow=workflow_name,
191
+ stage=args.stage if hasattr(args, "stage") and args.stage else None,
192
+ )
193
+
194
+ print(f"\nWorkflow: {workflow_name}")
195
+ if hasattr(args, "stage") and args.stage:
196
+ print(f"Stage: {args.stage}")
197
+ print(f"Analysis period: Last {args.days} days")
198
+ print()
199
+
200
+ if should_upgrade:
201
+ print("⚠️ TIER UPGRADE RECOMMENDED")
202
+ print(f" {reason}")
203
+ print()
204
+ print("💡 Action:")
205
+ print(" 1. Enable adaptive routing:")
206
+ print(" workflow = MyWorkflow(enable_adaptive_routing=True)")
207
+ print(" 2. Or manually upgrade tier in workflow config")
208
+ return 0
209
+ else:
210
+ print("✓ NO UPGRADE NEEDED")
211
+ print(f" {reason}")
212
+ return 0
213
+
214
+ except Exception as e:
215
+ logger.exception("Failed to check routing recommendations")
216
+ print(f"❌ Error: {e}")
217
+ return 1
218
+
219
+
220
+ def cmd_routing_models(args: Any) -> int:
221
+ """Show model performance comparison.
222
+
223
+ Args:
224
+ args: Arguments with provider, days
225
+
226
+ Returns:
227
+ 0 on success, 1 on error
228
+ """
229
+ try:
230
+ # Get telemetry
231
+ tracker = UsageTracker.get_instance()
232
+
233
+ # Get recent entries
234
+ entries = tracker.get_recent_entries(limit=100000, days=args.days)
235
+
236
+ if args.provider:
237
+ entries = [e for e in entries if e.get("provider") == args.provider]
238
+
239
+ if not entries:
240
+ print(f"❌ No data found for provider '{args.provider}'")
241
+ return 1
242
+
243
+ # Group by model
244
+ by_model: dict[str, list] = {}
245
+ for entry in entries:
246
+ model = entry["model"]
247
+ if model not in by_model:
248
+ by_model[model] = []
249
+ by_model[model].append(entry)
250
+
251
+ print("\n" + "=" * 70)
252
+ print(f"MODEL PERFORMANCE COMPARISON - {args.provider.upper()}")
253
+ print(f"Last {args.days} days")
254
+ print("=" * 70)
255
+
256
+ # Sort by total calls
257
+ models_sorted = sorted(by_model.items(), key=lambda x: len(x[1]), reverse=True)
258
+
259
+ print(f"\n📊 {len(models_sorted)} model(s) used\n")
260
+
261
+ for model, model_entries in models_sorted:
262
+ total = len(model_entries)
263
+ successes = sum(1 for e in model_entries if e.get("success", True))
264
+ success_rate = successes / total
265
+
266
+ avg_cost = sum(e.get("cost", 0.0) for e in model_entries) / total
267
+ avg_latency = sum(e.get("duration_ms", 0) for e in model_entries) / total
268
+
269
+ # Quality score
270
+ quality_score = (success_rate * 100) - (avg_cost * 10)
271
+
272
+ print(f" {model}")
273
+ print(f" Calls: {total:,}")
274
+ print(f" Success rate: {success_rate:.1%}")
275
+ print(f" Avg cost: ${avg_cost:.4f}")
276
+ print(f" Avg latency: {avg_latency:.0f}ms")
277
+ print(f" Quality score: {quality_score:.2f}")
278
+ print()
279
+
280
+ return 0
281
+
282
+ except Exception as e:
283
+ logger.exception("Failed to get model performance")
284
+ print(f"❌ Error: {e}")
285
+ return 1
@@ -11,8 +11,9 @@ from pathlib import Path
11
11
 
12
12
  from empathy_os.config import _validate_file_path
13
13
  from empathy_os.logging_config import get_logger
14
+ from empathy_os.workflows import get_workflow
14
15
  from empathy_os.workflows import list_workflows as get_workflow_list
15
- from empathy_os.workflows.config import WorkflowConfig, create_example_config, get_workflow
16
+ from empathy_os.workflows.config import WorkflowConfig, create_example_config
16
17
 
17
18
  logger = get_logger(__name__)
18
19
 
@@ -7,6 +7,8 @@ Licensed under Fair Source License 0.9
7
7
  """
8
8
 
9
9
  from . import (
10
+ batch,
11
+ cache,
10
12
  help,
11
13
  info,
12
14
  inspect,
@@ -14,6 +16,7 @@ from . import (
14
16
  orchestrate,
15
17
  patterns,
16
18
  provider,
19
+ routing,
17
20
  setup,
18
21
  status,
19
22
  sync,
@@ -57,6 +60,9 @@ def register_all_parsers(subparsers):
57
60
 
58
61
  # Metrics and state
59
62
  metrics.register_parsers(subparsers)
63
+ cache.register_parsers(subparsers) # Cache monitoring
64
+ batch.register_parsers(subparsers) # Batch processing (50% cost savings)
65
+ routing.register_parsers(subparsers) # Adaptive routing statistics
60
66
 
61
67
  # Setup and initialization
62
68
  setup.register_parsers(subparsers)
@@ -0,0 +1,118 @@
1
+ """Argument parser for batch processing commands.
2
+
3
+ Copyright 2025 Smart-AI-Memory
4
+ Licensed under Fair Source License 0.9
5
+ """
6
+
7
+
8
+ def register_parsers(subparsers):
9
+ """Register batch command parsers.
10
+
11
+ Args:
12
+ subparsers: Subparser object from main argument parser
13
+
14
+ Returns:
15
+ None: Adds batch subparser with submit, status, results, wait subcommands
16
+ """
17
+ from ..commands.batch import (
18
+ cmd_batch_results,
19
+ cmd_batch_status,
20
+ cmd_batch_submit,
21
+ cmd_batch_wait,
22
+ )
23
+
24
+ # Main batch command
25
+ batch_parser = subparsers.add_parser(
26
+ "batch",
27
+ help="Batch processing via Anthropic Batch API (50% cost savings)",
28
+ description="Submit and manage batch processing jobs for non-urgent tasks",
29
+ )
30
+
31
+ # Batch subcommands
32
+ batch_subparsers = batch_parser.add_subparsers(dest="batch_command", required=True)
33
+
34
+ # batch submit command
35
+ submit_parser = batch_subparsers.add_parser(
36
+ "submit",
37
+ help="Submit a batch processing job from JSON file",
38
+ description="Submit batch requests for asynchronous processing (50% cost savings)",
39
+ )
40
+
41
+ submit_parser.add_argument(
42
+ "input_file",
43
+ help='JSON file with batch requests. Format: [{"task_id": "...", "task_type": "...", "input_data": {...}}]',
44
+ )
45
+
46
+ submit_parser.set_defaults(func=cmd_batch_submit)
47
+
48
+ # batch status command
49
+ status_parser = batch_subparsers.add_parser(
50
+ "status",
51
+ help="Check status of a batch processing job",
52
+ description="Display current status and request counts for a batch",
53
+ )
54
+
55
+ status_parser.add_argument(
56
+ "batch_id",
57
+ help="Batch ID (e.g., msgbatch_abc123)",
58
+ )
59
+
60
+ status_parser.add_argument(
61
+ "--json",
62
+ action="store_true",
63
+ help="Output raw JSON status",
64
+ )
65
+
66
+ status_parser.set_defaults(func=cmd_batch_status)
67
+
68
+ # batch results command
69
+ results_parser = batch_subparsers.add_parser(
70
+ "results",
71
+ help="Retrieve results from completed batch",
72
+ description="Download and save batch results to JSON file",
73
+ )
74
+
75
+ results_parser.add_argument(
76
+ "batch_id",
77
+ help="Batch ID (e.g., msgbatch_abc123)",
78
+ )
79
+
80
+ results_parser.add_argument(
81
+ "output_file",
82
+ help="Path to output JSON file",
83
+ )
84
+
85
+ results_parser.set_defaults(func=cmd_batch_results)
86
+
87
+ # batch wait command
88
+ wait_parser = batch_subparsers.add_parser(
89
+ "wait",
90
+ help="Wait for batch to complete and retrieve results",
91
+ description="Poll batch status until completion, then download results",
92
+ )
93
+
94
+ wait_parser.add_argument(
95
+ "batch_id",
96
+ help="Batch ID (e.g., msgbatch_abc123)",
97
+ )
98
+
99
+ wait_parser.add_argument(
100
+ "output_file",
101
+ help="Path to output JSON file",
102
+ )
103
+
104
+ wait_parser.add_argument(
105
+ "--poll-interval",
106
+ type=int,
107
+ default=300,
108
+ help="Seconds between status checks (default: 300 = 5 minutes)",
109
+ )
110
+
111
+ wait_parser.add_argument(
112
+ "--timeout",
113
+ type=int,
114
+ default=86400,
115
+ help="Maximum wait time in seconds (default: 86400 = 24 hours)",
116
+ )
117
+
118
+ wait_parser.set_defaults(func=cmd_batch_wait)
@@ -0,0 +1,65 @@
1
+ """Argument parser for cache commands.
2
+
3
+ Copyright 2025 Smart-AI-Memory
4
+ Licensed under Fair Source License 0.9
5
+ """
6
+
7
+
8
+ def register_parsers(subparsers):
9
+ """Register cache command parsers.
10
+
11
+ Args:
12
+ subparsers: Subparser object from main argument parser
13
+
14
+ Returns:
15
+ None: Adds cache subparser with stats and clear subcommands
16
+ """
17
+ from ..commands.cache import cmd_cache_clear, cmd_cache_stats
18
+ # Main cache command
19
+ cache_parser = subparsers.add_parser(
20
+ "cache",
21
+ help="Cache monitoring and management",
22
+ description="Monitor prompt caching performance and cost savings",
23
+ )
24
+
25
+ # Cache subcommands
26
+ cache_subparsers = cache_parser.add_subparsers(dest="cache_command", required=True)
27
+
28
+ # cache stats command
29
+ stats_parser = cache_subparsers.add_parser(
30
+ "stats",
31
+ help="Show cache performance statistics",
32
+ description="Display prompt caching metrics including hit rate and cost savings",
33
+ )
34
+
35
+ stats_parser.add_argument(
36
+ "--days",
37
+ type=int,
38
+ default=7,
39
+ help="Number of days to analyze (default: 7)",
40
+ )
41
+
42
+ stats_parser.add_argument(
43
+ "--format",
44
+ choices=["table", "json"],
45
+ default="table",
46
+ help="Output format (default: table)",
47
+ )
48
+
49
+ stats_parser.add_argument(
50
+ "--verbose",
51
+ "-v",
52
+ action="store_true",
53
+ help="Show detailed token metrics",
54
+ )
55
+
56
+ stats_parser.set_defaults(func=cmd_cache_stats)
57
+
58
+ # cache clear command (placeholder)
59
+ clear_parser = cache_subparsers.add_parser(
60
+ "clear",
61
+ help="Clear cache (note: Anthropic cache is server-side with 5min TTL)",
62
+ description="Information about cache clearing",
63
+ )
64
+
65
+ clear_parser.set_defaults(func=cmd_cache_clear)
@@ -0,0 +1,65 @@
1
+ """Argument parser for cache commands.
2
+
3
+ Copyright 2025 Smart-AI-Memory
4
+ Licensed under Fair Source License 0.9
5
+ """
6
+
7
+
8
+ def register_parsers(subparsers):
9
+ """Register cache command parsers.
10
+
11
+ Args:
12
+ subparsers: Subparser object from main argument parser
13
+
14
+ Returns:
15
+ None: Adds cache subparser with stats and clear subcommands
16
+ """
17
+ from ..commands.cache import cmd_cache_clear, cmd_cache_stats
18
+ # Main cache command
19
+ cache_parser = subparsers.add_parser(
20
+ "cache",
21
+ help="Cache monitoring and management",
22
+ description="Monitor prompt caching performance and cost savings",
23
+ )
24
+
25
+ # Cache subcommands
26
+ cache_subparsers = cache_parser.add_subparsers(dest="cache_command", required=True)
27
+
28
+ # cache stats command
29
+ stats_parser = cache_subparsers.add_parser(
30
+ "stats",
31
+ help="Show cache performance statistics",
32
+ description="Display prompt caching metrics including hit rate and cost savings",
33
+ )
34
+
35
+ stats_parser.add_argument(
36
+ "--days",
37
+ type=int,
38
+ default=7,
39
+ help="Number of days to analyze (default: 7)",
40
+ )
41
+
42
+ stats_parser.add_argument(
43
+ "--format",
44
+ choices=["table", "json"],
45
+ default="table",
46
+ help="Output format (default: table)",
47
+ )
48
+
49
+ stats_parser.add_argument(
50
+ "--verbose",
51
+ "-v",
52
+ action="store_true",
53
+ help="Show detailed token metrics",
54
+ )
55
+
56
+ stats_parser.set_defaults(func=cmd_cache_stats)
57
+
58
+ # cache clear command (placeholder)
59
+ clear_parser = cache_subparsers.add_parser(
60
+ "clear",
61
+ help="Clear cache (note: Anthropic cache is server-side with 5min TTL)",
62
+ description="Information about cache clearing",
63
+ )
64
+
65
+ clear_parser.set_defaults(func=cmd_cache_clear)
@@ -0,0 +1,110 @@
1
+ """Argument parser for adaptive routing commands.
2
+
3
+ Copyright 2025 Smart-AI-Memory
4
+ Licensed under Fair Source License 0.9
5
+ """
6
+
7
+
8
+ def register_parsers(subparsers):
9
+ """Register routing command parsers.
10
+
11
+ Args:
12
+ subparsers: Subparser object from main argument parser
13
+
14
+ Returns:
15
+ None: Adds routing subparser with stats, check, models subcommands
16
+ """
17
+ from ..commands.routing import (
18
+ cmd_routing_check,
19
+ cmd_routing_models,
20
+ cmd_routing_stats,
21
+ )
22
+
23
+ # Main routing command
24
+ routing_parser = subparsers.add_parser(
25
+ "routing",
26
+ help="Adaptive model routing statistics and recommendations",
27
+ description="Analyze model routing performance based on historical telemetry",
28
+ )
29
+
30
+ # Routing subcommands
31
+ routing_subparsers = routing_parser.add_subparsers(
32
+ dest="routing_command", required=True
33
+ )
34
+
35
+ # routing stats command
36
+ stats_parser = routing_subparsers.add_parser(
37
+ "stats",
38
+ help="Show routing statistics for a workflow",
39
+ description="Display model performance metrics and recommendations",
40
+ )
41
+
42
+ stats_parser.add_argument("workflow", help="Workflow name (e.g., 'code-review')")
43
+
44
+ stats_parser.add_argument(
45
+ "--stage",
46
+ help="Stage name (optional, shows all stages if not specified)",
47
+ )
48
+
49
+ stats_parser.add_argument(
50
+ "--days",
51
+ type=int,
52
+ default=7,
53
+ help="Number of days to analyze (default: 7)",
54
+ )
55
+
56
+ stats_parser.set_defaults(func=cmd_routing_stats)
57
+
58
+ # routing check command
59
+ check_parser = routing_subparsers.add_parser(
60
+ "check",
61
+ help="Check for tier upgrade recommendations",
62
+ description="Analyze failure rates and recommend tier upgrades",
63
+ )
64
+
65
+ check_parser.add_argument(
66
+ "--workflow",
67
+ help="Workflow name (required unless --all is used)",
68
+ )
69
+
70
+ check_parser.add_argument(
71
+ "--stage",
72
+ help="Stage name (optional)",
73
+ )
74
+
75
+ check_parser.add_argument(
76
+ "--all",
77
+ action="store_true",
78
+ help="Check all workflows",
79
+ )
80
+
81
+ check_parser.add_argument(
82
+ "--days",
83
+ type=int,
84
+ default=7,
85
+ help="Number of days to analyze (default: 7)",
86
+ )
87
+
88
+ check_parser.set_defaults(func=cmd_routing_check)
89
+
90
+ # routing models command
91
+ models_parser = routing_subparsers.add_parser(
92
+ "models",
93
+ help="Compare model performance",
94
+ description="Show performance metrics for all models from a provider",
95
+ )
96
+
97
+ models_parser.add_argument(
98
+ "--provider",
99
+ default="anthropic",
100
+ help="Provider name (default: anthropic)",
101
+ )
102
+
103
+ models_parser.add_argument(
104
+ "--days",
105
+ type=int,
106
+ default=7,
107
+ help="Number of days to analyze (default: 7)",
108
+ )
109
+
110
+ models_parser.set_defaults(func=cmd_routing_models)