empathy-framework 4.9.0__py3-none-any.whl → 5.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.9.0.dist-info → empathy_framework-5.0.0.dist-info}/METADATA +64 -25
- {empathy_framework-4.9.0.dist-info → empathy_framework-5.0.0.dist-info}/RECORD +47 -26
- empathy_os/__init__.py +2 -2
- empathy_os/cache/hash_only.py +6 -3
- empathy_os/cache/hybrid.py +6 -3
- empathy_os/cli_legacy.py +27 -1
- empathy_os/cli_minimal.py +512 -15
- empathy_os/cli_router.py +145 -113
- empathy_os/cli_unified.py +25 -0
- empathy_os/dashboard/__init__.py +42 -0
- empathy_os/dashboard/app.py +512 -0
- empathy_os/dashboard/simple_server.py +403 -0
- empathy_os/dashboard/standalone_server.py +536 -0
- empathy_os/memory/__init__.py +19 -5
- empathy_os/memory/short_term.py +4 -70
- empathy_os/memory/types.py +2 -2
- empathy_os/models/__init__.py +3 -0
- empathy_os/models/adaptive_routing.py +437 -0
- empathy_os/models/registry.py +4 -4
- empathy_os/socratic/ab_testing.py +1 -1
- empathy_os/telemetry/__init__.py +29 -1
- empathy_os/telemetry/agent_coordination.py +478 -0
- empathy_os/telemetry/agent_tracking.py +350 -0
- empathy_os/telemetry/approval_gates.py +563 -0
- empathy_os/telemetry/event_streaming.py +405 -0
- empathy_os/telemetry/feedback_loop.py +557 -0
- empathy_os/vscode_bridge 2.py +173 -0
- empathy_os/workflows/__init__.py +4 -4
- empathy_os/workflows/base.py +495 -43
- empathy_os/workflows/history.py +3 -5
- empathy_os/workflows/output.py +410 -0
- empathy_os/workflows/progress.py +324 -22
- empathy_os/workflows/progressive/README 2.md +454 -0
- empathy_os/workflows/progressive/__init__ 2.py +92 -0
- empathy_os/workflows/progressive/cli 2.py +242 -0
- empathy_os/workflows/progressive/core 2.py +488 -0
- empathy_os/workflows/progressive/orchestrator 2.py +701 -0
- empathy_os/workflows/progressive/reports 2.py +528 -0
- empathy_os/workflows/progressive/telemetry 2.py +280 -0
- empathy_os/workflows/progressive/test_gen 2.py +514 -0
- empathy_os/workflows/progressive/workflow 2.py +628 -0
- empathy_os/workflows/routing.py +5 -0
- empathy_os/workflows/security_audit.py +189 -0
- {empathy_framework-4.9.0.dist-info → empathy_framework-5.0.0.dist-info}/WHEEL +0 -0
- {empathy_framework-4.9.0.dist-info → empathy_framework-5.0.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-4.9.0.dist-info → empathy_framework-5.0.0.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-4.9.0.dist-info → empathy_framework-5.0.0.dist-info}/top_level.txt +0 -0
empathy_os/cli_minimal.py
CHANGED
|
@@ -1,17 +1,27 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
|
-
"""Empathy Framework
|
|
2
|
+
"""Empathy Framework CLI.
|
|
3
3
|
|
|
4
|
-
|
|
5
|
-
|
|
4
|
+
IMPORTANT: This CLI is for automation only (git hooks, scripts, CI/CD).
|
|
5
|
+
For interactive use, use Claude Code skills in VSCode or Claude Desktop.
|
|
6
6
|
|
|
7
|
-
|
|
7
|
+
Automation commands:
|
|
8
8
|
empathy workflow list List available workflows
|
|
9
9
|
empathy workflow run <name> Execute a workflow
|
|
10
10
|
empathy workflow info <name> Show workflow details
|
|
11
11
|
|
|
12
|
+
Monitoring commands:
|
|
13
|
+
empathy dashboard start Start agent coordination dashboard
|
|
14
|
+
(opens web UI at http://localhost:8000)
|
|
15
|
+
|
|
16
|
+
Utility commands:
|
|
12
17
|
empathy telemetry show Display usage summary
|
|
13
18
|
empathy telemetry savings Show cost savings
|
|
14
19
|
empathy telemetry export Export to CSV/JSON
|
|
20
|
+
empathy telemetry routing-stats Show adaptive routing statistics
|
|
21
|
+
empathy telemetry routing-check Check for tier upgrade recommendations
|
|
22
|
+
empathy telemetry models Show model performance by provider
|
|
23
|
+
empathy telemetry agents Show active agents and their status
|
|
24
|
+
empathy telemetry signals Show coordination signals for an agent
|
|
15
25
|
|
|
16
26
|
empathy provider show Show current provider config
|
|
17
27
|
empathy provider set <name> Set provider (anthropic, openai, hybrid)
|
|
@@ -19,11 +29,13 @@ Commands:
|
|
|
19
29
|
empathy validate Validate configuration
|
|
20
30
|
empathy version Show version
|
|
21
31
|
|
|
22
|
-
For interactive
|
|
23
|
-
/dev Developer tools (
|
|
24
|
-
/testing Run tests, coverage,
|
|
32
|
+
For interactive development, use Claude Code skills:
|
|
33
|
+
/dev Developer tools (commit, review, debug, refactor)
|
|
34
|
+
/testing Run tests, coverage, generate tests
|
|
35
|
+
/workflows AI-powered workflows (security, bug prediction)
|
|
25
36
|
/docs Documentation generation
|
|
26
37
|
/release Release preparation
|
|
38
|
+
/learning Session evaluation and improvement
|
|
27
39
|
"""
|
|
28
40
|
|
|
29
41
|
from __future__ import annotations
|
|
@@ -339,6 +351,386 @@ def cmd_telemetry_export(args: Namespace) -> int:
|
|
|
339
351
|
return 1
|
|
340
352
|
|
|
341
353
|
|
|
354
|
+
def cmd_telemetry_routing_stats(args: Namespace) -> int:
|
|
355
|
+
"""Show adaptive routing statistics."""
|
|
356
|
+
try:
|
|
357
|
+
from empathy_os.models import AdaptiveModelRouter
|
|
358
|
+
from empathy_os.telemetry import UsageTracker
|
|
359
|
+
|
|
360
|
+
tracker = UsageTracker.get_instance()
|
|
361
|
+
router = AdaptiveModelRouter(telemetry=tracker)
|
|
362
|
+
|
|
363
|
+
workflow = args.workflow if hasattr(args, "workflow") and args.workflow else None
|
|
364
|
+
stage = args.stage if hasattr(args, "stage") and args.stage else None
|
|
365
|
+
days = args.days if hasattr(args, "days") else 7
|
|
366
|
+
|
|
367
|
+
print("\n📊 Adaptive Routing Statistics\n")
|
|
368
|
+
print("-" * 70)
|
|
369
|
+
|
|
370
|
+
if workflow:
|
|
371
|
+
# Show stats for specific workflow
|
|
372
|
+
stats = router.get_routing_stats(workflow=workflow, stage=stage, days=days)
|
|
373
|
+
|
|
374
|
+
if stats["total_calls"] == 0:
|
|
375
|
+
print(f"\n No data found for workflow: {workflow}")
|
|
376
|
+
if stage:
|
|
377
|
+
print(f" Stage: {stage}")
|
|
378
|
+
return 0
|
|
379
|
+
|
|
380
|
+
print(f"\n Workflow: {stats['workflow']}")
|
|
381
|
+
if stage:
|
|
382
|
+
print(f" Stage: {stage}")
|
|
383
|
+
print(f" Period: Last {days} days")
|
|
384
|
+
print(f" Total calls: {stats['total_calls']}")
|
|
385
|
+
print(f" Avg cost: ${stats['avg_cost']:.4f}")
|
|
386
|
+
print(f" Success rate: {stats['avg_success_rate']:.1%}")
|
|
387
|
+
|
|
388
|
+
print(f"\n Models used: {', '.join(stats['models_used'])}")
|
|
389
|
+
|
|
390
|
+
if stats["performance_by_model"]:
|
|
391
|
+
print("\n Per-Model Performance:")
|
|
392
|
+
for model, perf in sorted(
|
|
393
|
+
stats["performance_by_model"].items(),
|
|
394
|
+
key=lambda x: x[1]["quality_score"],
|
|
395
|
+
reverse=True,
|
|
396
|
+
):
|
|
397
|
+
print(f"\n {model}:")
|
|
398
|
+
print(f" Calls: {perf['calls']}")
|
|
399
|
+
print(f" Success rate: {perf['success_rate']:.1%}")
|
|
400
|
+
print(f" Avg cost: ${perf['avg_cost']:.4f}")
|
|
401
|
+
print(f" Avg latency: {perf['avg_latency_ms']:.0f}ms")
|
|
402
|
+
print(f" Quality score: {perf['quality_score']:.2f}")
|
|
403
|
+
|
|
404
|
+
else:
|
|
405
|
+
# Show overall statistics
|
|
406
|
+
stats = tracker.get_stats(days=days)
|
|
407
|
+
|
|
408
|
+
if stats["total_calls"] == 0:
|
|
409
|
+
print("\n No telemetry data found.")
|
|
410
|
+
return 0
|
|
411
|
+
|
|
412
|
+
print(f"\n Period: Last {days} days")
|
|
413
|
+
print(f" Total calls: {stats['total_calls']:,}")
|
|
414
|
+
print(f" Total cost: ${stats['total_cost']:.2f}")
|
|
415
|
+
print(f" Cache hit rate: {stats['cache_hit_rate']:.1f}%")
|
|
416
|
+
|
|
417
|
+
print("\n Cost by Tier:")
|
|
418
|
+
for tier, cost in sorted(stats["by_tier"].items(), key=lambda x: x[1], reverse=True):
|
|
419
|
+
pct = (cost / stats["total_cost"] * 100) if stats["total_cost"] > 0 else 0
|
|
420
|
+
print(f" {tier:8s}: ${cost:6.2f} ({pct:5.1f}%)")
|
|
421
|
+
|
|
422
|
+
print("\n Top Workflows:")
|
|
423
|
+
for workflow_name, cost in list(stats["by_workflow"].items())[:5]:
|
|
424
|
+
pct = (cost / stats["total_cost"] * 100) if stats["total_cost"] > 0 else 0
|
|
425
|
+
print(f" {workflow_name:30s}: ${cost:6.2f} ({pct:5.1f}%)")
|
|
426
|
+
|
|
427
|
+
print("\n" + "-" * 70)
|
|
428
|
+
return 0
|
|
429
|
+
|
|
430
|
+
except ImportError as e:
|
|
431
|
+
print(f"❌ Adaptive routing not available: {e}")
|
|
432
|
+
print(" Ensure empathy-framework is installed with telemetry support")
|
|
433
|
+
return 1
|
|
434
|
+
except Exception as e: # noqa: BLE001
|
|
435
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
436
|
+
logger.exception(f"Routing stats error: {e}")
|
|
437
|
+
print(f"❌ Error: {e}")
|
|
438
|
+
return 1
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
def cmd_telemetry_routing_check(args: Namespace) -> int:
|
|
442
|
+
"""Check for tier upgrade recommendations."""
|
|
443
|
+
try:
|
|
444
|
+
from empathy_os.models import AdaptiveModelRouter
|
|
445
|
+
from empathy_os.telemetry import UsageTracker
|
|
446
|
+
|
|
447
|
+
tracker = UsageTracker.get_instance()
|
|
448
|
+
router = AdaptiveModelRouter(telemetry=tracker)
|
|
449
|
+
|
|
450
|
+
workflow = args.workflow if hasattr(args, "workflow") and args.workflow else None
|
|
451
|
+
check_all = args.all if hasattr(args, "all") else False
|
|
452
|
+
|
|
453
|
+
print("\n🔍 Adaptive Routing Tier Upgrade Checks\n")
|
|
454
|
+
print("-" * 70)
|
|
455
|
+
|
|
456
|
+
if check_all:
|
|
457
|
+
# Check all workflows
|
|
458
|
+
stats = tracker.get_stats(days=7)
|
|
459
|
+
workflows = list(stats["by_workflow"].keys())
|
|
460
|
+
|
|
461
|
+
if not workflows:
|
|
462
|
+
print("\n No workflow data found.")
|
|
463
|
+
return 0
|
|
464
|
+
|
|
465
|
+
recommendations = []
|
|
466
|
+
|
|
467
|
+
for wf_name in workflows:
|
|
468
|
+
try:
|
|
469
|
+
should_upgrade, reason = router.recommend_tier_upgrade(
|
|
470
|
+
workflow=wf_name, stage=None
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
if should_upgrade:
|
|
474
|
+
recommendations.append(
|
|
475
|
+
{
|
|
476
|
+
"workflow": wf_name,
|
|
477
|
+
"reason": reason,
|
|
478
|
+
}
|
|
479
|
+
)
|
|
480
|
+
except Exception: # noqa: BLE001
|
|
481
|
+
# INTENTIONAL: Skip workflows without enough data
|
|
482
|
+
continue
|
|
483
|
+
|
|
484
|
+
if recommendations:
|
|
485
|
+
print("\n ⚠️ Tier Upgrade Recommendations:\n")
|
|
486
|
+
for rec in recommendations:
|
|
487
|
+
print(f" Workflow: {rec['workflow']}")
|
|
488
|
+
print(f" Reason: {rec['reason']}")
|
|
489
|
+
print()
|
|
490
|
+
else:
|
|
491
|
+
print("\n ✅ All workflows performing well - no upgrades needed.\n")
|
|
492
|
+
|
|
493
|
+
elif workflow:
|
|
494
|
+
# Check specific workflow
|
|
495
|
+
should_upgrade, reason = router.recommend_tier_upgrade(workflow=workflow, stage=None)
|
|
496
|
+
|
|
497
|
+
print(f"\n Workflow: {workflow}")
|
|
498
|
+
|
|
499
|
+
if should_upgrade:
|
|
500
|
+
print(f" Status: ⚠️ UPGRADE RECOMMENDED")
|
|
501
|
+
print(f" Reason: {reason}")
|
|
502
|
+
print("\n Action: Consider upgrading from CHEAP → CAPABLE or CAPABLE → PREMIUM")
|
|
503
|
+
else:
|
|
504
|
+
print(f" Status: ✅ Performing well")
|
|
505
|
+
print(f" Reason: {reason}")
|
|
506
|
+
|
|
507
|
+
else:
|
|
508
|
+
print("\n Error: Specify --workflow <name> or --all")
|
|
509
|
+
return 1
|
|
510
|
+
|
|
511
|
+
print("\n" + "-" * 70)
|
|
512
|
+
return 0
|
|
513
|
+
|
|
514
|
+
except ImportError as e:
|
|
515
|
+
print(f"❌ Adaptive routing not available: {e}")
|
|
516
|
+
return 1
|
|
517
|
+
except Exception as e: # noqa: BLE001
|
|
518
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
519
|
+
logger.exception(f"Routing check error: {e}")
|
|
520
|
+
print(f"❌ Error: {e}")
|
|
521
|
+
return 1
|
|
522
|
+
|
|
523
|
+
|
|
524
|
+
def cmd_telemetry_models(args: Namespace) -> int:
|
|
525
|
+
"""Show model performance by provider."""
|
|
526
|
+
try:
|
|
527
|
+
from empathy_os.telemetry import UsageTracker
|
|
528
|
+
|
|
529
|
+
tracker = UsageTracker.get_instance()
|
|
530
|
+
provider = args.provider if hasattr(args, "provider") else None
|
|
531
|
+
days = args.days if hasattr(args, "days") else 7
|
|
532
|
+
|
|
533
|
+
stats = tracker.get_stats(days=days)
|
|
534
|
+
|
|
535
|
+
if stats["total_calls"] == 0:
|
|
536
|
+
print("\n No telemetry data found.")
|
|
537
|
+
return 0
|
|
538
|
+
|
|
539
|
+
print("\n📊 Model Performance\n")
|
|
540
|
+
print("-" * 70)
|
|
541
|
+
print(f"\n Period: Last {days} days")
|
|
542
|
+
|
|
543
|
+
# Get entries for analysis
|
|
544
|
+
entries = tracker.get_recent_entries(limit=10000, days=days)
|
|
545
|
+
|
|
546
|
+
# Group by provider and model
|
|
547
|
+
model_stats: dict[str, dict[str, dict]] = {}
|
|
548
|
+
|
|
549
|
+
for entry in entries:
|
|
550
|
+
entry_provider = entry.get("provider", "unknown")
|
|
551
|
+
if provider and entry_provider != provider:
|
|
552
|
+
continue
|
|
553
|
+
|
|
554
|
+
model = entry.get("model", "unknown")
|
|
555
|
+
cost = entry.get("cost", 0.0)
|
|
556
|
+
success = entry.get("success", True)
|
|
557
|
+
duration = entry.get("duration_ms", 0)
|
|
558
|
+
|
|
559
|
+
if entry_provider not in model_stats:
|
|
560
|
+
model_stats[entry_provider] = {}
|
|
561
|
+
|
|
562
|
+
if model not in model_stats[entry_provider]:
|
|
563
|
+
model_stats[entry_provider][model] = {
|
|
564
|
+
"calls": 0,
|
|
565
|
+
"total_cost": 0.0,
|
|
566
|
+
"successes": 0,
|
|
567
|
+
"total_duration": 0,
|
|
568
|
+
}
|
|
569
|
+
|
|
570
|
+
model_stats[entry_provider][model]["calls"] += 1
|
|
571
|
+
model_stats[entry_provider][model]["total_cost"] += cost
|
|
572
|
+
if success:
|
|
573
|
+
model_stats[entry_provider][model]["successes"] += 1
|
|
574
|
+
model_stats[entry_provider][model]["total_duration"] += duration
|
|
575
|
+
|
|
576
|
+
# Display by provider
|
|
577
|
+
for prov, models in sorted(model_stats.items()):
|
|
578
|
+
print(f"\n Provider: {prov.upper()}")
|
|
579
|
+
|
|
580
|
+
for model_name, mstats in sorted(
|
|
581
|
+
models.items(), key=lambda x: x[1]["total_cost"], reverse=True
|
|
582
|
+
):
|
|
583
|
+
calls = mstats["calls"]
|
|
584
|
+
avg_cost = mstats["total_cost"] / calls if calls > 0 else 0
|
|
585
|
+
success_rate = (mstats["successes"] / calls * 100) if calls > 0 else 0
|
|
586
|
+
avg_duration = mstats["total_duration"] / calls if calls > 0 else 0
|
|
587
|
+
|
|
588
|
+
print(f"\n {model_name}:")
|
|
589
|
+
print(f" Calls: {calls:,}")
|
|
590
|
+
print(f" Total cost: ${mstats['total_cost']:.2f}")
|
|
591
|
+
print(f" Avg cost: ${avg_cost:.4f}")
|
|
592
|
+
print(f" Success rate: {success_rate:.1f}%")
|
|
593
|
+
print(f" Avg duration: {avg_duration:.0f}ms")
|
|
594
|
+
|
|
595
|
+
print("\n" + "-" * 70)
|
|
596
|
+
return 0
|
|
597
|
+
|
|
598
|
+
except ImportError as e:
|
|
599
|
+
print(f"❌ Telemetry not available: {e}")
|
|
600
|
+
return 1
|
|
601
|
+
except Exception as e: # noqa: BLE001
|
|
602
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
603
|
+
logger.exception(f"Models error: {e}")
|
|
604
|
+
print(f"❌ Error: {e}")
|
|
605
|
+
return 1
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
def cmd_telemetry_agents(args: Namespace) -> int:
|
|
609
|
+
"""Show active agents and their status."""
|
|
610
|
+
try:
|
|
611
|
+
from empathy_os.telemetry import HeartbeatCoordinator
|
|
612
|
+
|
|
613
|
+
coordinator = HeartbeatCoordinator()
|
|
614
|
+
active_agents = coordinator.get_active_agents()
|
|
615
|
+
|
|
616
|
+
print("\n🤖 Active Agents\n")
|
|
617
|
+
print("-" * 70)
|
|
618
|
+
|
|
619
|
+
if not active_agents:
|
|
620
|
+
print("\n No active agents found.")
|
|
621
|
+
print(" (Agents must use HeartbeatCoordinator to be tracked)")
|
|
622
|
+
return 0
|
|
623
|
+
|
|
624
|
+
print(f"\n Found {len(active_agents)} active agent(s):\n")
|
|
625
|
+
|
|
626
|
+
for agent in sorted(active_agents, key=lambda a: a.last_beat, reverse=True):
|
|
627
|
+
# Calculate time since last beat
|
|
628
|
+
from datetime import datetime
|
|
629
|
+
|
|
630
|
+
now = datetime.utcnow()
|
|
631
|
+
time_since = (now - agent.last_beat).total_seconds()
|
|
632
|
+
|
|
633
|
+
# Status indicator
|
|
634
|
+
if agent.status in ("completed", "failed", "cancelled"):
|
|
635
|
+
status_icon = "✅" if agent.status == "completed" else "❌"
|
|
636
|
+
elif time_since > 30:
|
|
637
|
+
status_icon = "⚠️" # Stale
|
|
638
|
+
else:
|
|
639
|
+
status_icon = "🟢" # Active
|
|
640
|
+
|
|
641
|
+
print(f" {status_icon} {agent.agent_id}")
|
|
642
|
+
print(f" Status: {agent.status}")
|
|
643
|
+
print(f" Progress: {agent.progress*100:.1f}%")
|
|
644
|
+
print(f" Task: {agent.current_task}")
|
|
645
|
+
print(f" Last beat: {time_since:.1f}s ago")
|
|
646
|
+
|
|
647
|
+
# Show metadata if present
|
|
648
|
+
if agent.metadata:
|
|
649
|
+
workflow = agent.metadata.get("workflow", "")
|
|
650
|
+
if workflow:
|
|
651
|
+
print(f" Workflow: {workflow}")
|
|
652
|
+
print()
|
|
653
|
+
|
|
654
|
+
print("-" * 70)
|
|
655
|
+
return 0
|
|
656
|
+
|
|
657
|
+
except ImportError as e:
|
|
658
|
+
print(f"❌ Agent tracking not available: {e}")
|
|
659
|
+
return 1
|
|
660
|
+
except Exception as e: # noqa: BLE001
|
|
661
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
662
|
+
logger.exception(f"Agents error: {e}")
|
|
663
|
+
print(f"❌ Error: {e}")
|
|
664
|
+
return 1
|
|
665
|
+
|
|
666
|
+
|
|
667
|
+
def cmd_telemetry_signals(args: Namespace) -> int:
|
|
668
|
+
"""Show coordination signals."""
|
|
669
|
+
try:
|
|
670
|
+
from empathy_os.telemetry import CoordinationSignals
|
|
671
|
+
|
|
672
|
+
agent_id = args.agent if hasattr(args, "agent") else None
|
|
673
|
+
|
|
674
|
+
if not agent_id:
|
|
675
|
+
print("❌ Error: --agent <id> required to view signals")
|
|
676
|
+
return 1
|
|
677
|
+
|
|
678
|
+
coordinator = CoordinationSignals(agent_id=agent_id)
|
|
679
|
+
signals = coordinator.get_pending_signals()
|
|
680
|
+
|
|
681
|
+
print(f"\n📡 Coordination Signals for {agent_id}\n")
|
|
682
|
+
print("-" * 70)
|
|
683
|
+
|
|
684
|
+
if not signals:
|
|
685
|
+
print("\n No pending signals.")
|
|
686
|
+
return 0
|
|
687
|
+
|
|
688
|
+
print(f"\n Found {len(signals)} pending signal(s):\n")
|
|
689
|
+
|
|
690
|
+
for signal in sorted(signals, key=lambda s: s.timestamp, reverse=True):
|
|
691
|
+
# Calculate age
|
|
692
|
+
from datetime import datetime
|
|
693
|
+
|
|
694
|
+
now = datetime.utcnow()
|
|
695
|
+
age = (now - signal.timestamp).total_seconds()
|
|
696
|
+
|
|
697
|
+
# Signal type indicator
|
|
698
|
+
type_icons = {
|
|
699
|
+
"task_complete": "✅",
|
|
700
|
+
"abort": "🛑",
|
|
701
|
+
"ready": "🟢",
|
|
702
|
+
"checkpoint": "🔄",
|
|
703
|
+
"error": "❌",
|
|
704
|
+
}
|
|
705
|
+
icon = type_icons.get(signal.signal_type, "📨")
|
|
706
|
+
|
|
707
|
+
print(f" {icon} {signal.signal_type}")
|
|
708
|
+
print(f" From: {signal.source_agent}")
|
|
709
|
+
print(f" Target: {signal.target_agent or '* (broadcast)'}")
|
|
710
|
+
print(f" Age: {age:.1f}s")
|
|
711
|
+
print(f" Expires in: {signal.ttl_seconds - age:.1f}s")
|
|
712
|
+
|
|
713
|
+
# Show payload summary
|
|
714
|
+
if signal.payload:
|
|
715
|
+
payload_str = str(signal.payload)
|
|
716
|
+
if len(payload_str) > 60:
|
|
717
|
+
payload_str = payload_str[:57] + "..."
|
|
718
|
+
print(f" Payload: {payload_str}")
|
|
719
|
+
print()
|
|
720
|
+
|
|
721
|
+
print("-" * 70)
|
|
722
|
+
return 0
|
|
723
|
+
|
|
724
|
+
except ImportError as e:
|
|
725
|
+
print(f"❌ Coordination signals not available: {e}")
|
|
726
|
+
return 1
|
|
727
|
+
except Exception as e: # noqa: BLE001
|
|
728
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
729
|
+
logger.exception(f"Signals error: {e}")
|
|
730
|
+
print(f"❌ Error: {e}")
|
|
731
|
+
return 1
|
|
732
|
+
|
|
733
|
+
|
|
342
734
|
# =============================================================================
|
|
343
735
|
# Provider Commands
|
|
344
736
|
# =============================================================================
|
|
@@ -412,6 +804,44 @@ def cmd_provider_set(args: Namespace) -> int:
|
|
|
412
804
|
return 1
|
|
413
805
|
|
|
414
806
|
|
|
807
|
+
# =============================================================================
|
|
808
|
+
# Dashboard Commands
|
|
809
|
+
# =============================================================================
|
|
810
|
+
|
|
811
|
+
|
|
812
|
+
def cmd_dashboard_start(args: Namespace) -> int:
|
|
813
|
+
"""Start the agent coordination dashboard."""
|
|
814
|
+
try:
|
|
815
|
+
from empathy_os.dashboard import run_standalone_dashboard
|
|
816
|
+
|
|
817
|
+
# Get host and port from args
|
|
818
|
+
host = args.host
|
|
819
|
+
port = args.port
|
|
820
|
+
|
|
821
|
+
print(f"\n🚀 Starting Agent Coordination Dashboard...")
|
|
822
|
+
print(f"📊 Dashboard will be available at: http://{host}:{port}\n")
|
|
823
|
+
print("💡 Make sure Redis is populated with test data:")
|
|
824
|
+
print(" python scripts/populate_redis_direct.py\n")
|
|
825
|
+
print("Press Ctrl+C to stop\n")
|
|
826
|
+
|
|
827
|
+
# Start dashboard
|
|
828
|
+
run_standalone_dashboard(host=host, port=port)
|
|
829
|
+
return 0
|
|
830
|
+
|
|
831
|
+
except KeyboardInterrupt:
|
|
832
|
+
print("\n\n🛑 Dashboard stopped")
|
|
833
|
+
return 0
|
|
834
|
+
except ImportError as e:
|
|
835
|
+
print(f"❌ Dashboard not available: {e}")
|
|
836
|
+
print(" Install dashboard dependencies: pip install redis")
|
|
837
|
+
return 1
|
|
838
|
+
except Exception as e: # noqa: BLE001
|
|
839
|
+
# INTENTIONAL: CLI commands should catch all errors and report gracefully
|
|
840
|
+
logger.exception(f"Dashboard error: {e}")
|
|
841
|
+
print(f"❌ Error starting dashboard: {e}")
|
|
842
|
+
return 1
|
|
843
|
+
|
|
844
|
+
|
|
415
845
|
# =============================================================================
|
|
416
846
|
# Utility Commands
|
|
417
847
|
# =============================================================================
|
|
@@ -508,6 +938,8 @@ def cmd_version(args: Namespace) -> int:
|
|
|
508
938
|
return 0
|
|
509
939
|
|
|
510
940
|
|
|
941
|
+
# =============================================================================
|
|
942
|
+
# Convenience Commands (Keyword Shortcuts)
|
|
511
943
|
# =============================================================================
|
|
512
944
|
# Main Entry Point
|
|
513
945
|
# =============================================================================
|
|
@@ -517,14 +949,16 @@ def create_parser() -> argparse.ArgumentParser:
|
|
|
517
949
|
"""Create the argument parser."""
|
|
518
950
|
parser = argparse.ArgumentParser(
|
|
519
951
|
prog="empathy",
|
|
520
|
-
description="Empathy Framework CLI
|
|
952
|
+
description="Empathy Framework CLI (automation interface - for git hooks, scripts, CI/CD)",
|
|
521
953
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
522
954
|
epilog="""
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
/
|
|
527
|
-
/
|
|
955
|
+
NOTE: This CLI is for automation only. For interactive development,
|
|
956
|
+
use Claude Code skills in VSCode or Claude Desktop:
|
|
957
|
+
|
|
958
|
+
/dev Developer tools (commit, review, debug, refactor)
|
|
959
|
+
/testing Run tests, coverage, generate tests
|
|
960
|
+
/workflows AI-powered workflows (security, bug prediction)
|
|
961
|
+
/learning Session evaluation
|
|
528
962
|
|
|
529
963
|
Documentation: https://smartaimemory.com/framework-docs/
|
|
530
964
|
""",
|
|
@@ -584,6 +1018,44 @@ Documentation: https://smartaimemory.com/framework-docs/
|
|
|
584
1018
|
"--days", "-d", type=int, default=30, help="Number of days (default: 30)"
|
|
585
1019
|
)
|
|
586
1020
|
|
|
1021
|
+
# telemetry routing-stats
|
|
1022
|
+
routing_stats_parser = telemetry_sub.add_parser(
|
|
1023
|
+
"routing-stats", help="Show adaptive routing statistics"
|
|
1024
|
+
)
|
|
1025
|
+
routing_stats_parser.add_argument("--workflow", "-w", help="Workflow name")
|
|
1026
|
+
routing_stats_parser.add_argument("--stage", "-s", help="Stage name")
|
|
1027
|
+
routing_stats_parser.add_argument(
|
|
1028
|
+
"--days", "-d", type=int, default=7, help="Number of days (default: 7)"
|
|
1029
|
+
)
|
|
1030
|
+
|
|
1031
|
+
# telemetry routing-check
|
|
1032
|
+
routing_check_parser = telemetry_sub.add_parser(
|
|
1033
|
+
"routing-check", help="Check for tier upgrade recommendations"
|
|
1034
|
+
)
|
|
1035
|
+
routing_check_parser.add_argument("--workflow", "-w", help="Workflow name")
|
|
1036
|
+
routing_check_parser.add_argument(
|
|
1037
|
+
"--all", "-a", action="store_true", help="Check all workflows"
|
|
1038
|
+
)
|
|
1039
|
+
|
|
1040
|
+
# telemetry models
|
|
1041
|
+
models_parser = telemetry_sub.add_parser("models", help="Show model performance by provider")
|
|
1042
|
+
models_parser.add_argument(
|
|
1043
|
+
"--provider",
|
|
1044
|
+
"-p",
|
|
1045
|
+
choices=["anthropic", "openai", "google"],
|
|
1046
|
+
help="Filter by provider",
|
|
1047
|
+
)
|
|
1048
|
+
models_parser.add_argument(
|
|
1049
|
+
"--days", "-d", type=int, default=7, help="Number of days (default: 7)"
|
|
1050
|
+
)
|
|
1051
|
+
|
|
1052
|
+
# telemetry agents
|
|
1053
|
+
telemetry_sub.add_parser("agents", help="Show active agents and their status")
|
|
1054
|
+
|
|
1055
|
+
# telemetry signals
|
|
1056
|
+
signals_parser = telemetry_sub.add_parser("signals", help="Show coordination signals")
|
|
1057
|
+
signals_parser.add_argument("--agent", "-a", required=True, help="Agent ID to view signals for")
|
|
1058
|
+
|
|
587
1059
|
# --- Provider commands ---
|
|
588
1060
|
provider_parser = subparsers.add_parser("provider", help="LLM provider configuration")
|
|
589
1061
|
provider_sub = provider_parser.add_subparsers(dest="provider_command")
|
|
@@ -595,12 +1067,20 @@ Documentation: https://smartaimemory.com/framework-docs/
|
|
|
595
1067
|
set_parser = provider_sub.add_parser("set", help="Set provider")
|
|
596
1068
|
set_parser.add_argument("name", choices=["anthropic", "openai", "hybrid"], help="Provider name")
|
|
597
1069
|
|
|
1070
|
+
# --- Dashboard commands ---
|
|
1071
|
+
dashboard_parser = subparsers.add_parser("dashboard", help="Agent coordination dashboard")
|
|
1072
|
+
dashboard_sub = dashboard_parser.add_subparsers(dest="dashboard_command")
|
|
1073
|
+
|
|
1074
|
+
# dashboard start
|
|
1075
|
+
start_parser = dashboard_sub.add_parser("start", help="Start dashboard web server")
|
|
1076
|
+
start_parser.add_argument("--host", default="127.0.0.1", help="Host to bind to (default: 127.0.0.1)")
|
|
1077
|
+
start_parser.add_argument("--port", type=int, default=8000, help="Port to bind to (default: 8000)")
|
|
1078
|
+
|
|
598
1079
|
# --- Utility commands ---
|
|
599
1080
|
subparsers.add_parser("validate", help="Validate configuration")
|
|
600
1081
|
|
|
601
1082
|
version_parser = subparsers.add_parser("version", help="Show version")
|
|
602
1083
|
version_parser.add_argument("-v", "--verbose", action="store_true", help="Show detailed info")
|
|
603
|
-
|
|
604
1084
|
return parser
|
|
605
1085
|
|
|
606
1086
|
|
|
@@ -634,8 +1114,18 @@ def main(argv: list[str] | None = None) -> int:
|
|
|
634
1114
|
return cmd_telemetry_savings(args)
|
|
635
1115
|
elif args.telemetry_command == "export":
|
|
636
1116
|
return cmd_telemetry_export(args)
|
|
1117
|
+
elif args.telemetry_command == "routing-stats":
|
|
1118
|
+
return cmd_telemetry_routing_stats(args)
|
|
1119
|
+
elif args.telemetry_command == "routing-check":
|
|
1120
|
+
return cmd_telemetry_routing_check(args)
|
|
1121
|
+
elif args.telemetry_command == "models":
|
|
1122
|
+
return cmd_telemetry_models(args)
|
|
1123
|
+
elif args.telemetry_command == "agents":
|
|
1124
|
+
return cmd_telemetry_agents(args)
|
|
1125
|
+
elif args.telemetry_command == "signals":
|
|
1126
|
+
return cmd_telemetry_signals(args)
|
|
637
1127
|
else:
|
|
638
|
-
print("Usage: empathy telemetry {show|savings|export}")
|
|
1128
|
+
print("Usage: empathy telemetry {show|savings|export|routing-stats|routing-check|models|agents|signals}")
|
|
639
1129
|
return 1
|
|
640
1130
|
|
|
641
1131
|
elif args.command == "provider":
|
|
@@ -647,6 +1137,13 @@ def main(argv: list[str] | None = None) -> int:
|
|
|
647
1137
|
print("Usage: empathy provider {show|set}")
|
|
648
1138
|
return 1
|
|
649
1139
|
|
|
1140
|
+
elif args.command == "dashboard":
|
|
1141
|
+
if args.dashboard_command == "start":
|
|
1142
|
+
return cmd_dashboard_start(args)
|
|
1143
|
+
else:
|
|
1144
|
+
print("Usage: empathy dashboard start [--host HOST] [--port PORT]")
|
|
1145
|
+
return 1
|
|
1146
|
+
|
|
650
1147
|
elif args.command == "validate":
|
|
651
1148
|
return cmd_validate(args)
|
|
652
1149
|
|