empathy-framework 3.8.3__py3-none-any.whl → 3.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/METADATA +67 -7
- {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/RECORD +50 -39
- {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/top_level.txt +0 -4
- empathy_os/.empathy/costs.json +60 -0
- empathy_os/.empathy/discovery_stats.json +15 -0
- empathy_os/.empathy/workflow_runs.json +45 -0
- empathy_os/cli.py +372 -13
- empathy_os/cli_unified.py +111 -0
- empathy_os/config/xml_config.py +45 -3
- empathy_os/config.py +46 -2
- empathy_os/memory/control_panel.py +128 -8
- empathy_os/memory/long_term.py +26 -4
- empathy_os/memory/short_term.py +110 -0
- empathy_os/models/token_estimator.py +25 -0
- empathy_os/pattern_library.py +81 -8
- empathy_os/patterns/debugging/all_patterns.json +81 -0
- empathy_os/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- empathy_os/patterns/refactoring_memory.json +89 -0
- empathy_os/telemetry/__init__.py +11 -0
- empathy_os/telemetry/cli.py +451 -0
- empathy_os/telemetry/usage_tracker.py +475 -0
- {test_generator → empathy_os/test_generator}/generator.py +1 -0
- empathy_os/tier_recommender.py +422 -0
- empathy_os/workflows/base.py +223 -23
- empathy_os/workflows/config.py +50 -5
- empathy_os/workflows/tier_tracking.py +408 -0
- {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/WHEEL +0 -0
- {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/entry_points.txt +0 -0
- {empathy_framework-3.8.3.dist-info → empathy_framework-3.9.1.dist-info}/licenses/LICENSE +0 -0
- {hot_reload → empathy_os/hot_reload}/README.md +0 -0
- {hot_reload → empathy_os/hot_reload}/__init__.py +0 -0
- {hot_reload → empathy_os/hot_reload}/config.py +0 -0
- {hot_reload → empathy_os/hot_reload}/integration.py +0 -0
- {hot_reload → empathy_os/hot_reload}/reloader.py +0 -0
- {hot_reload → empathy_os/hot_reload}/watcher.py +0 -0
- {hot_reload → empathy_os/hot_reload}/websocket.py +0 -0
- {scaffolding → empathy_os/scaffolding}/README.md +0 -0
- {scaffolding → empathy_os/scaffolding}/__init__.py +0 -0
- {scaffolding → empathy_os/scaffolding}/__main__.py +0 -0
- {scaffolding → empathy_os/scaffolding}/cli.py +0 -0
- {test_generator → empathy_os/test_generator}/__init__.py +0 -0
- {test_generator → empathy_os/test_generator}/__main__.py +0 -0
- {test_generator → empathy_os/test_generator}/cli.py +0 -0
- {test_generator → empathy_os/test_generator}/risk_analyzer.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/__init__.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/behavior.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/core.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/output.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/registry.py +0 -0
- {workflow_patterns → empathy_os/workflow_patterns}/structural.py +0 -0
empathy_os/cli.py
CHANGED
|
@@ -16,6 +16,7 @@ import argparse
|
|
|
16
16
|
import sys
|
|
17
17
|
import time
|
|
18
18
|
from importlib.metadata import version as get_version
|
|
19
|
+
from pathlib import Path
|
|
19
20
|
|
|
20
21
|
from empathy_os import EmpathyConfig, EmpathyOS, load_config
|
|
21
22
|
from empathy_os.cost_tracker import cmd_costs
|
|
@@ -37,9 +38,71 @@ from empathy_os.workflows import (
|
|
|
37
38
|
)
|
|
38
39
|
from empathy_os.workflows import list_workflows as get_workflow_list
|
|
39
40
|
|
|
41
|
+
# Import telemetry CLI commands
|
|
42
|
+
try:
|
|
43
|
+
from empathy_os.telemetry.cli import (
|
|
44
|
+
cmd_telemetry_compare,
|
|
45
|
+
cmd_telemetry_export,
|
|
46
|
+
cmd_telemetry_reset,
|
|
47
|
+
cmd_telemetry_savings,
|
|
48
|
+
cmd_telemetry_show,
|
|
49
|
+
)
|
|
50
|
+
TELEMETRY_CLI_AVAILABLE = True
|
|
51
|
+
except ImportError:
|
|
52
|
+
TELEMETRY_CLI_AVAILABLE = False
|
|
53
|
+
|
|
40
54
|
logger = get_logger(__name__)
|
|
41
55
|
|
|
42
56
|
|
|
57
|
+
# =============================================================================
|
|
58
|
+
# SECURITY UTILITIES
|
|
59
|
+
# =============================================================================
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _validate_file_path(path: str, allowed_dir: str | None = None) -> Path:
|
|
63
|
+
"""Validate file path to prevent path traversal and arbitrary writes.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
path: Path to validate
|
|
67
|
+
allowed_dir: Optional directory that must contain the path
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
Resolved absolute Path object
|
|
71
|
+
|
|
72
|
+
Raises:
|
|
73
|
+
ValueError: If path is invalid or outside allowed directory
|
|
74
|
+
|
|
75
|
+
"""
|
|
76
|
+
if not path or not isinstance(path, str):
|
|
77
|
+
raise ValueError("path must be a non-empty string")
|
|
78
|
+
|
|
79
|
+
# Check for null bytes
|
|
80
|
+
if "\x00" in path:
|
|
81
|
+
raise ValueError("path contains null bytes")
|
|
82
|
+
|
|
83
|
+
try:
|
|
84
|
+
# Resolve to absolute path
|
|
85
|
+
resolved = Path(path).resolve()
|
|
86
|
+
except (OSError, RuntimeError) as e:
|
|
87
|
+
raise ValueError(f"Invalid path: {e}")
|
|
88
|
+
|
|
89
|
+
# Check if within allowed directory
|
|
90
|
+
if allowed_dir:
|
|
91
|
+
try:
|
|
92
|
+
allowed = Path(allowed_dir).resolve()
|
|
93
|
+
resolved.relative_to(allowed)
|
|
94
|
+
except ValueError:
|
|
95
|
+
raise ValueError(f"path must be within {allowed_dir}")
|
|
96
|
+
|
|
97
|
+
# Check for dangerous system paths
|
|
98
|
+
dangerous_paths = ["/etc", "/sys", "/proc", "/dev"]
|
|
99
|
+
for dangerous in dangerous_paths:
|
|
100
|
+
if str(resolved).startswith(dangerous):
|
|
101
|
+
raise ValueError(f"Cannot write to system directory: {dangerous}")
|
|
102
|
+
|
|
103
|
+
return resolved
|
|
104
|
+
|
|
105
|
+
|
|
43
106
|
# =============================================================================
|
|
44
107
|
# CHEATSHEET DATA - Quick reference for all commands
|
|
45
108
|
# =============================================================================
|
|
@@ -603,11 +666,114 @@ def cmd_achievements(args):
|
|
|
603
666
|
print()
|
|
604
667
|
|
|
605
668
|
|
|
669
|
+
def cmd_tier_recommend(args):
|
|
670
|
+
"""Get intelligent tier recommendation for a bug/task."""
|
|
671
|
+
from empathy_os.tier_recommender import TierRecommender
|
|
672
|
+
|
|
673
|
+
recommender = TierRecommender()
|
|
674
|
+
|
|
675
|
+
# Get recommendation
|
|
676
|
+
result = recommender.recommend(
|
|
677
|
+
bug_description=args.description,
|
|
678
|
+
files_affected=args.files.split(",") if args.files else None,
|
|
679
|
+
complexity_hint=args.complexity
|
|
680
|
+
)
|
|
681
|
+
|
|
682
|
+
# Display results
|
|
683
|
+
print()
|
|
684
|
+
print("=" * 60)
|
|
685
|
+
print(" TIER RECOMMENDATION")
|
|
686
|
+
print("=" * 60)
|
|
687
|
+
print()
|
|
688
|
+
print(f" Bug/Task: {args.description}")
|
|
689
|
+
print()
|
|
690
|
+
print(f" 📍 Recommended Tier: {result.tier}")
|
|
691
|
+
print(f" 🎯 Confidence: {result.confidence * 100:.1f}%")
|
|
692
|
+
print(f" 💰 Expected Cost: ${result.expected_cost:.3f}")
|
|
693
|
+
print(f" 🔄 Expected Attempts: {result.expected_attempts:.1f}")
|
|
694
|
+
print()
|
|
695
|
+
print(f" 📊 Reasoning:")
|
|
696
|
+
print(f" {result.reasoning}")
|
|
697
|
+
print()
|
|
698
|
+
|
|
699
|
+
if result.similar_patterns_count > 0:
|
|
700
|
+
print(f" ✅ Based on {result.similar_patterns_count} similar patterns")
|
|
701
|
+
else:
|
|
702
|
+
print(f" ⚠️ No historical data - using conservative default")
|
|
703
|
+
|
|
704
|
+
if result.fallback_used:
|
|
705
|
+
print()
|
|
706
|
+
print(" 💡 Tip: As more patterns are collected, recommendations")
|
|
707
|
+
print(" will become more accurate and personalized.")
|
|
708
|
+
|
|
709
|
+
print()
|
|
710
|
+
print("=" * 60)
|
|
711
|
+
print()
|
|
712
|
+
|
|
713
|
+
|
|
714
|
+
def cmd_tier_stats(args):
|
|
715
|
+
"""Show tier pattern learning statistics."""
|
|
716
|
+
from empathy_os.tier_recommender import TierRecommender
|
|
717
|
+
|
|
718
|
+
recommender = TierRecommender()
|
|
719
|
+
stats = recommender.get_stats()
|
|
720
|
+
|
|
721
|
+
print()
|
|
722
|
+
print("=" * 60)
|
|
723
|
+
print(" TIER PATTERN LEARNING STATS")
|
|
724
|
+
print("=" * 60)
|
|
725
|
+
print()
|
|
726
|
+
|
|
727
|
+
if stats.get("total_patterns", 0) == 0:
|
|
728
|
+
print(" No patterns collected yet.")
|
|
729
|
+
print()
|
|
730
|
+
print(" 💡 Patterns are automatically collected as you use")
|
|
731
|
+
print(" cascading workflows with enhanced tracking enabled.")
|
|
732
|
+
print()
|
|
733
|
+
print("=" * 60)
|
|
734
|
+
print()
|
|
735
|
+
return
|
|
736
|
+
|
|
737
|
+
print(f" Total Patterns: {stats['total_patterns']}")
|
|
738
|
+
print(f" Avg Savings: {stats['avg_savings_percent']}%")
|
|
739
|
+
print()
|
|
740
|
+
|
|
741
|
+
print(" TIER DISTRIBUTION")
|
|
742
|
+
print(" " + "-" * 40)
|
|
743
|
+
for tier, count in stats["patterns_by_tier"].items():
|
|
744
|
+
percent = (count / stats["total_patterns"]) * 100
|
|
745
|
+
bar = "█" * int(percent / 5)
|
|
746
|
+
print(f" {tier:10} {count:3} ({percent:5.1f}%) {bar}")
|
|
747
|
+
print()
|
|
748
|
+
|
|
749
|
+
print(" BUG TYPE DISTRIBUTION")
|
|
750
|
+
print(" " + "-" * 40)
|
|
751
|
+
sorted_types = sorted(
|
|
752
|
+
stats["bug_type_distribution"].items(),
|
|
753
|
+
key=lambda x: x[1],
|
|
754
|
+
reverse=True
|
|
755
|
+
)
|
|
756
|
+
for bug_type, count in sorted_types[:10]:
|
|
757
|
+
percent = (count / stats["total_patterns"]) * 100
|
|
758
|
+
print(f" {bug_type:20} {count:3} ({percent:5.1f}%)")
|
|
759
|
+
|
|
760
|
+
print()
|
|
761
|
+
print("=" * 60)
|
|
762
|
+
print()
|
|
763
|
+
|
|
764
|
+
|
|
606
765
|
def cmd_init(args):
|
|
607
|
-
"""Initialize a new Empathy Framework project
|
|
766
|
+
"""Initialize a new Empathy Framework project
|
|
767
|
+
|
|
768
|
+
Raises:
|
|
769
|
+
ValueError: If output path is invalid or unsafe
|
|
770
|
+
"""
|
|
608
771
|
config_format = args.format
|
|
609
772
|
output_path = args.output or f"empathy.config.{config_format}"
|
|
610
773
|
|
|
774
|
+
# Validate output path to prevent path traversal attacks
|
|
775
|
+
validated_path = _validate_file_path(output_path)
|
|
776
|
+
|
|
611
777
|
logger.info(f"Initializing new Empathy Framework project with format: {config_format}")
|
|
612
778
|
|
|
613
779
|
# Create default config
|
|
@@ -615,13 +781,13 @@ def cmd_init(args):
|
|
|
615
781
|
|
|
616
782
|
# Save to file
|
|
617
783
|
if config_format == "yaml":
|
|
618
|
-
config.to_yaml(
|
|
784
|
+
config.to_yaml(str(validated_path))
|
|
619
785
|
logger.info(f"Created YAML configuration file: {output_path}")
|
|
620
786
|
logger.info(f"✓ Created YAML configuration: {output_path}")
|
|
621
787
|
elif config_format == "json":
|
|
622
|
-
config.to_json(
|
|
623
|
-
logger.info(f"Created JSON configuration file: {
|
|
624
|
-
logger.info(f"✓ Created JSON configuration: {
|
|
788
|
+
config.to_json(str(validated_path))
|
|
789
|
+
logger.info(f"Created JSON configuration file: {validated_path}")
|
|
790
|
+
logger.info(f"✓ Created JSON configuration: {validated_path}")
|
|
625
791
|
|
|
626
792
|
logger.info("\nNext steps:")
|
|
627
793
|
logger.info(f" 1. Edit {output_path} to customize settings")
|
|
@@ -727,7 +893,11 @@ def cmd_patterns_list(args):
|
|
|
727
893
|
|
|
728
894
|
|
|
729
895
|
def cmd_patterns_export(args):
|
|
730
|
-
"""Export patterns from one format to another
|
|
896
|
+
"""Export patterns from one format to another
|
|
897
|
+
|
|
898
|
+
Raises:
|
|
899
|
+
ValueError: If output path is invalid
|
|
900
|
+
"""
|
|
731
901
|
input_file = args.input
|
|
732
902
|
input_format = args.input_format
|
|
733
903
|
output_file = args.output
|
|
@@ -764,12 +934,15 @@ def cmd_patterns_export(args):
|
|
|
764
934
|
logger.error(f"✗ Failed to load patterns: {e}")
|
|
765
935
|
sys.exit(1)
|
|
766
936
|
|
|
937
|
+
# Validate output path
|
|
938
|
+
validated_output = _validate_file_path(output_file)
|
|
939
|
+
|
|
767
940
|
# Save to output format
|
|
768
941
|
try:
|
|
769
942
|
if output_format == "json":
|
|
770
|
-
PatternPersistence.save_to_json(library,
|
|
943
|
+
PatternPersistence.save_to_json(library, str(validated_output))
|
|
771
944
|
elif output_format == "sqlite":
|
|
772
|
-
PatternPersistence.save_to_sqlite(library,
|
|
945
|
+
PatternPersistence.save_to_sqlite(library, str(validated_output))
|
|
773
946
|
|
|
774
947
|
logger.info(f"Saved {len(library.patterns)} patterns to {output_file}")
|
|
775
948
|
logger.info(f"✓ Saved {len(library.patterns)} patterns to {output_file}")
|
|
@@ -1349,7 +1522,11 @@ def cmd_inspect(args):
|
|
|
1349
1522
|
|
|
1350
1523
|
|
|
1351
1524
|
def cmd_export(args):
|
|
1352
|
-
"""Export patterns to file for sharing/backup
|
|
1525
|
+
"""Export patterns to file for sharing/backup
|
|
1526
|
+
|
|
1527
|
+
Raises:
|
|
1528
|
+
ValueError: If output path is invalid
|
|
1529
|
+
"""
|
|
1353
1530
|
output_file = args.output
|
|
1354
1531
|
user_id = args.user_id
|
|
1355
1532
|
db_path = args.db or ".empathy/patterns.db"
|
|
@@ -1373,6 +1550,9 @@ def cmd_export(args):
|
|
|
1373
1550
|
|
|
1374
1551
|
print(f" Found {len(patterns)} patterns")
|
|
1375
1552
|
|
|
1553
|
+
# Validate output path
|
|
1554
|
+
validated_output = _validate_file_path(output_file)
|
|
1555
|
+
|
|
1376
1556
|
if format_type == "json":
|
|
1377
1557
|
# Create filtered library if user_id specified
|
|
1378
1558
|
if user_id:
|
|
@@ -1383,7 +1563,7 @@ def cmd_export(args):
|
|
|
1383
1563
|
filtered_library = library
|
|
1384
1564
|
|
|
1385
1565
|
# Export as JSON
|
|
1386
|
-
PatternPersistence.save_to_json(filtered_library,
|
|
1566
|
+
PatternPersistence.save_to_json(filtered_library, str(validated_output))
|
|
1387
1567
|
print(f" ✓ Exported {len(patterns)} patterns to {output_file}")
|
|
1388
1568
|
else:
|
|
1389
1569
|
print(f"✗ Unsupported format: {format_type}")
|
|
@@ -1682,12 +1862,18 @@ def cmd_provider_set(args):
|
|
|
1682
1862
|
|
|
1683
1863
|
|
|
1684
1864
|
def cmd_sync_claude(args):
|
|
1685
|
-
"""Sync patterns to Claude Code rules directory.
|
|
1865
|
+
"""Sync patterns to Claude Code rules directory.
|
|
1866
|
+
|
|
1867
|
+
Raises:
|
|
1868
|
+
ValueError: If output path is invalid
|
|
1869
|
+
"""
|
|
1686
1870
|
import json as json_mod
|
|
1687
1871
|
from pathlib import Path
|
|
1688
1872
|
|
|
1689
1873
|
patterns_dir = Path(args.patterns_dir)
|
|
1690
|
-
|
|
1874
|
+
# Validate output directory path
|
|
1875
|
+
validated_output_dir = _validate_file_path(args.output_dir)
|
|
1876
|
+
output_dir = validated_output_dir
|
|
1691
1877
|
|
|
1692
1878
|
print("=" * 60)
|
|
1693
1879
|
print(" SYNC PATTERNS TO CLAUDE CODE")
|
|
@@ -1723,7 +1909,9 @@ def cmd_sync_claude(args):
|
|
|
1723
1909
|
|
|
1724
1910
|
# Write rule file
|
|
1725
1911
|
rule_file = output_dir / f"{category}.md"
|
|
1726
|
-
|
|
1912
|
+
# Validate rule file path before writing
|
|
1913
|
+
validated_rule_file = _validate_file_path(str(rule_file), allowed_dir=str(output_dir))
|
|
1914
|
+
with open(validated_rule_file, "w") as f:
|
|
1727
1915
|
f.write(rule_content)
|
|
1728
1916
|
|
|
1729
1917
|
print(f" ✓ {category}: {len(patterns)} patterns → {rule_file}")
|
|
@@ -2206,6 +2394,51 @@ def cmd_frameworks(args):
|
|
|
2206
2394
|
return 0
|
|
2207
2395
|
|
|
2208
2396
|
|
|
2397
|
+
# =============================================================================
|
|
2398
|
+
# Telemetry CLI Command Wrappers
|
|
2399
|
+
# =============================================================================
|
|
2400
|
+
|
|
2401
|
+
|
|
2402
|
+
def _cmd_telemetry_show(args):
|
|
2403
|
+
"""Wrapper for telemetry show command."""
|
|
2404
|
+
if not TELEMETRY_CLI_AVAILABLE:
|
|
2405
|
+
print("Telemetry commands not available. Install telemetry dependencies.")
|
|
2406
|
+
return 1
|
|
2407
|
+
return cmd_telemetry_show(args)
|
|
2408
|
+
|
|
2409
|
+
|
|
2410
|
+
def _cmd_telemetry_savings(args):
|
|
2411
|
+
"""Wrapper for telemetry savings command."""
|
|
2412
|
+
if not TELEMETRY_CLI_AVAILABLE:
|
|
2413
|
+
print("Telemetry commands not available. Install telemetry dependencies.")
|
|
2414
|
+
return 1
|
|
2415
|
+
return cmd_telemetry_savings(args)
|
|
2416
|
+
|
|
2417
|
+
|
|
2418
|
+
def _cmd_telemetry_compare(args):
|
|
2419
|
+
"""Wrapper for telemetry compare command."""
|
|
2420
|
+
if not TELEMETRY_CLI_AVAILABLE:
|
|
2421
|
+
print("Telemetry commands not available. Install telemetry dependencies.")
|
|
2422
|
+
return 1
|
|
2423
|
+
return cmd_telemetry_compare(args)
|
|
2424
|
+
|
|
2425
|
+
|
|
2426
|
+
def _cmd_telemetry_reset(args):
|
|
2427
|
+
"""Wrapper for telemetry reset command."""
|
|
2428
|
+
if not TELEMETRY_CLI_AVAILABLE:
|
|
2429
|
+
print("Telemetry commands not available. Install telemetry dependencies.")
|
|
2430
|
+
return 1
|
|
2431
|
+
return cmd_telemetry_reset(args)
|
|
2432
|
+
|
|
2433
|
+
|
|
2434
|
+
def _cmd_telemetry_export(args):
|
|
2435
|
+
"""Wrapper for telemetry export command."""
|
|
2436
|
+
if not TELEMETRY_CLI_AVAILABLE:
|
|
2437
|
+
print("Telemetry commands not available. Install telemetry dependencies.")
|
|
2438
|
+
return 1
|
|
2439
|
+
return cmd_telemetry_export(args)
|
|
2440
|
+
|
|
2441
|
+
|
|
2209
2442
|
def main():
|
|
2210
2443
|
"""Main CLI entry point"""
|
|
2211
2444
|
# Configure Windows-compatible asyncio event loop policy
|
|
@@ -2607,6 +2840,98 @@ def main():
|
|
|
2607
2840
|
parser_costs.add_argument("--json", action="store_true", help="Output as JSON")
|
|
2608
2841
|
parser_costs.set_defaults(func=cmd_costs)
|
|
2609
2842
|
|
|
2843
|
+
# Telemetry commands (usage tracking)
|
|
2844
|
+
parser_telemetry = subparsers.add_parser(
|
|
2845
|
+
"telemetry",
|
|
2846
|
+
help="View and manage local usage telemetry",
|
|
2847
|
+
)
|
|
2848
|
+
telemetry_subparsers = parser_telemetry.add_subparsers(dest="telemetry_command")
|
|
2849
|
+
|
|
2850
|
+
# Telemetry show command
|
|
2851
|
+
parser_telemetry_show = telemetry_subparsers.add_parser(
|
|
2852
|
+
"show",
|
|
2853
|
+
help="Show recent LLM calls",
|
|
2854
|
+
)
|
|
2855
|
+
parser_telemetry_show.add_argument(
|
|
2856
|
+
"--limit",
|
|
2857
|
+
type=int,
|
|
2858
|
+
default=20,
|
|
2859
|
+
help="Number of entries to show (default: 20)",
|
|
2860
|
+
)
|
|
2861
|
+
parser_telemetry_show.add_argument(
|
|
2862
|
+
"--days",
|
|
2863
|
+
type=int,
|
|
2864
|
+
help="Only show entries from last N days",
|
|
2865
|
+
)
|
|
2866
|
+
parser_telemetry_show.set_defaults(func=lambda args: _cmd_telemetry_show(args))
|
|
2867
|
+
|
|
2868
|
+
# Telemetry savings command
|
|
2869
|
+
parser_telemetry_savings = telemetry_subparsers.add_parser(
|
|
2870
|
+
"savings",
|
|
2871
|
+
help="Calculate cost savings vs baseline",
|
|
2872
|
+
)
|
|
2873
|
+
parser_telemetry_savings.add_argument(
|
|
2874
|
+
"--days",
|
|
2875
|
+
type=int,
|
|
2876
|
+
default=30,
|
|
2877
|
+
help="Number of days to analyze (default: 30)",
|
|
2878
|
+
)
|
|
2879
|
+
parser_telemetry_savings.set_defaults(func=lambda args: _cmd_telemetry_savings(args))
|
|
2880
|
+
|
|
2881
|
+
# Telemetry compare command
|
|
2882
|
+
parser_telemetry_compare = telemetry_subparsers.add_parser(
|
|
2883
|
+
"compare",
|
|
2884
|
+
help="Compare two time periods",
|
|
2885
|
+
)
|
|
2886
|
+
parser_telemetry_compare.add_argument(
|
|
2887
|
+
"--period1",
|
|
2888
|
+
type=int,
|
|
2889
|
+
default=7,
|
|
2890
|
+
help="First period in days (default: 7)",
|
|
2891
|
+
)
|
|
2892
|
+
parser_telemetry_compare.add_argument(
|
|
2893
|
+
"--period2",
|
|
2894
|
+
type=int,
|
|
2895
|
+
default=30,
|
|
2896
|
+
help="Second period in days (default: 30)",
|
|
2897
|
+
)
|
|
2898
|
+
parser_telemetry_compare.set_defaults(func=lambda args: _cmd_telemetry_compare(args))
|
|
2899
|
+
|
|
2900
|
+
# Telemetry reset command
|
|
2901
|
+
parser_telemetry_reset = telemetry_subparsers.add_parser(
|
|
2902
|
+
"reset",
|
|
2903
|
+
help="Clear all telemetry data",
|
|
2904
|
+
)
|
|
2905
|
+
parser_telemetry_reset.add_argument(
|
|
2906
|
+
"--confirm",
|
|
2907
|
+
action="store_true",
|
|
2908
|
+
help="Confirm deletion",
|
|
2909
|
+
)
|
|
2910
|
+
parser_telemetry_reset.set_defaults(func=lambda args: _cmd_telemetry_reset(args))
|
|
2911
|
+
|
|
2912
|
+
# Telemetry export command
|
|
2913
|
+
parser_telemetry_export = telemetry_subparsers.add_parser(
|
|
2914
|
+
"export",
|
|
2915
|
+
help="Export telemetry data",
|
|
2916
|
+
)
|
|
2917
|
+
parser_telemetry_export.add_argument(
|
|
2918
|
+
"--format",
|
|
2919
|
+
choices=["json", "csv"],
|
|
2920
|
+
default="json",
|
|
2921
|
+
help="Export format (default: json)",
|
|
2922
|
+
)
|
|
2923
|
+
parser_telemetry_export.add_argument(
|
|
2924
|
+
"--output",
|
|
2925
|
+
"-o",
|
|
2926
|
+
help="Output file (default: stdout)",
|
|
2927
|
+
)
|
|
2928
|
+
parser_telemetry_export.add_argument(
|
|
2929
|
+
"--days",
|
|
2930
|
+
type=int,
|
|
2931
|
+
help="Only export last N days",
|
|
2932
|
+
)
|
|
2933
|
+
parser_telemetry_export.set_defaults(func=lambda args: _cmd_telemetry_export(args))
|
|
2934
|
+
|
|
2610
2935
|
# New command (project scaffolding)
|
|
2611
2936
|
parser_new = subparsers.add_parser("new", help="Create a new project from a template")
|
|
2612
2937
|
parser_new.add_argument(
|
|
@@ -2771,6 +3096,40 @@ def main():
|
|
|
2771
3096
|
)
|
|
2772
3097
|
parser_achievements.set_defaults(func=cmd_achievements)
|
|
2773
3098
|
|
|
3099
|
+
# Tier recommendation commands (cascading tier optimization)
|
|
3100
|
+
parser_tier = subparsers.add_parser(
|
|
3101
|
+
"tier",
|
|
3102
|
+
help="Intelligent tier recommendations for cascading workflows",
|
|
3103
|
+
)
|
|
3104
|
+
tier_subparsers = parser_tier.add_subparsers(dest="tier_command")
|
|
3105
|
+
|
|
3106
|
+
# tier recommend
|
|
3107
|
+
parser_tier_recommend = tier_subparsers.add_parser(
|
|
3108
|
+
"recommend",
|
|
3109
|
+
help="Get tier recommendation for a bug/task",
|
|
3110
|
+
)
|
|
3111
|
+
parser_tier_recommend.add_argument(
|
|
3112
|
+
"description",
|
|
3113
|
+
help="Description of the bug or task",
|
|
3114
|
+
)
|
|
3115
|
+
parser_tier_recommend.add_argument(
|
|
3116
|
+
"--files",
|
|
3117
|
+
help="Comma-separated list of affected files (optional)",
|
|
3118
|
+
)
|
|
3119
|
+
parser_tier_recommend.add_argument(
|
|
3120
|
+
"--complexity",
|
|
3121
|
+
type=int,
|
|
3122
|
+
help="Manual complexity hint 1-10 (optional)",
|
|
3123
|
+
)
|
|
3124
|
+
parser_tier_recommend.set_defaults(func=cmd_tier_recommend)
|
|
3125
|
+
|
|
3126
|
+
# tier stats
|
|
3127
|
+
parser_tier_stats = tier_subparsers.add_parser(
|
|
3128
|
+
"stats",
|
|
3129
|
+
help="Show tier pattern learning statistics",
|
|
3130
|
+
)
|
|
3131
|
+
parser_tier_stats.set_defaults(func=cmd_tier_stats)
|
|
3132
|
+
|
|
2774
3133
|
# Wizard Factory commands (create wizards 12x faster)
|
|
2775
3134
|
add_wizard_factory_commands(subparsers)
|
|
2776
3135
|
|
empathy_os/cli_unified.py
CHANGED
|
@@ -513,6 +513,117 @@ def workflow_recommend(
|
|
|
513
513
|
)
|
|
514
514
|
|
|
515
515
|
|
|
516
|
+
# =============================================================================
|
|
517
|
+
# TIER RECOMMENDATION SUBCOMMAND GROUP
|
|
518
|
+
# =============================================================================
|
|
519
|
+
|
|
520
|
+
tier_app = typer.Typer(help="Intelligent tier recommendations for cascading workflows")
|
|
521
|
+
app.add_typer(tier_app, name="tier")
|
|
522
|
+
|
|
523
|
+
|
|
524
|
+
@tier_app.command("recommend")
|
|
525
|
+
def tier_recommend(
|
|
526
|
+
description: str = typer.Argument(..., help="Description of the bug or task"),
|
|
527
|
+
files: str = typer.Option(None, "--files", "-f", help="Comma-separated list of affected files"),
|
|
528
|
+
complexity: int = typer.Option(None, "--complexity", "-c", help="Manual complexity hint 1-10"),
|
|
529
|
+
):
|
|
530
|
+
"""Get intelligent tier recommendation for a bug/task."""
|
|
531
|
+
from empathy_os.tier_recommender import TierRecommender
|
|
532
|
+
|
|
533
|
+
recommender = TierRecommender()
|
|
534
|
+
|
|
535
|
+
# Get recommendation
|
|
536
|
+
result = recommender.recommend(
|
|
537
|
+
bug_description=description,
|
|
538
|
+
files_affected=files.split(",") if files else None,
|
|
539
|
+
complexity_hint=complexity,
|
|
540
|
+
)
|
|
541
|
+
|
|
542
|
+
# Display results
|
|
543
|
+
console.print()
|
|
544
|
+
console.print("=" * 60)
|
|
545
|
+
console.print(" [bold]TIER RECOMMENDATION[/bold]")
|
|
546
|
+
console.print("=" * 60)
|
|
547
|
+
console.print()
|
|
548
|
+
console.print(f" [dim]Bug/Task:[/dim] {description}")
|
|
549
|
+
console.print()
|
|
550
|
+
console.print(f" 📍 [bold]Recommended Tier:[/bold] {result.tier}")
|
|
551
|
+
console.print(f" 🎯 [bold]Confidence:[/bold] {result.confidence * 100:.1f}%")
|
|
552
|
+
console.print(f" 💰 [bold]Expected Cost:[/bold] ${result.expected_cost:.3f}")
|
|
553
|
+
console.print(f" 🔄 [bold]Expected Attempts:[/bold] {result.expected_attempts:.1f}")
|
|
554
|
+
console.print()
|
|
555
|
+
console.print(f" 📊 [bold]Reasoning:[/bold]")
|
|
556
|
+
console.print(f" {result.reasoning}")
|
|
557
|
+
console.print()
|
|
558
|
+
|
|
559
|
+
if result.fallback_used:
|
|
560
|
+
console.print(" ⚠️ [yellow]No historical data - using conservative default[/yellow]")
|
|
561
|
+
console.print()
|
|
562
|
+
console.print(" 💡 [dim]Tip: As more patterns are collected, recommendations[/dim]")
|
|
563
|
+
console.print(" [dim]will become more accurate and personalized.[/dim]")
|
|
564
|
+
else:
|
|
565
|
+
console.print(f" ✅ Based on {result.similar_patterns_count} similar patterns")
|
|
566
|
+
|
|
567
|
+
console.print()
|
|
568
|
+
console.print("=" * 60)
|
|
569
|
+
console.print()
|
|
570
|
+
|
|
571
|
+
|
|
572
|
+
@tier_app.command("stats")
|
|
573
|
+
def tier_stats():
|
|
574
|
+
"""Show tier pattern learning statistics."""
|
|
575
|
+
from empathy_os.tier_recommender import TierRecommender
|
|
576
|
+
|
|
577
|
+
recommender = TierRecommender()
|
|
578
|
+
stats = recommender.get_stats()
|
|
579
|
+
|
|
580
|
+
if stats.get("total_patterns") == 0:
|
|
581
|
+
console.print()
|
|
582
|
+
console.print(" [yellow]No patterns loaded yet.[/yellow]")
|
|
583
|
+
console.print()
|
|
584
|
+
console.print(" 💡 [dim]Patterns are collected automatically as you use")
|
|
585
|
+
console.print(" cascading workflows. Run a few workflows first.[/dim]")
|
|
586
|
+
console.print()
|
|
587
|
+
return
|
|
588
|
+
|
|
589
|
+
# Display statistics
|
|
590
|
+
console.print()
|
|
591
|
+
console.print("=" * 60)
|
|
592
|
+
console.print(" [bold]TIER PATTERN LEARNING STATS[/bold]")
|
|
593
|
+
console.print("=" * 60)
|
|
594
|
+
console.print()
|
|
595
|
+
console.print(f" [bold]Total Patterns:[/bold] {stats['total_patterns']}")
|
|
596
|
+
console.print(f" [bold]Avg Savings:[/bold] {stats['avg_savings_percent']}%")
|
|
597
|
+
console.print()
|
|
598
|
+
console.print(" [bold]TIER DISTRIBUTION[/bold]")
|
|
599
|
+
console.print(" " + "-" * 40)
|
|
600
|
+
|
|
601
|
+
tier_dist = stats["patterns_by_tier"]
|
|
602
|
+
total = stats["total_patterns"]
|
|
603
|
+
max_bar_width = 20
|
|
604
|
+
|
|
605
|
+
for tier in ["CHEAP", "CAPABLE", "PREMIUM"]:
|
|
606
|
+
count = tier_dist.get(tier, 0)
|
|
607
|
+
percent = (count / total * 100) if total > 0 else 0
|
|
608
|
+
bar_width = int(percent / 100 * max_bar_width)
|
|
609
|
+
bar = "█" * bar_width
|
|
610
|
+
console.print(f" {tier:<12} {count:>2} ({percent:>5.1f}%) {bar}")
|
|
611
|
+
|
|
612
|
+
console.print()
|
|
613
|
+
console.print(" [bold]BUG TYPE DISTRIBUTION[/bold]")
|
|
614
|
+
console.print(" " + "-" * 40)
|
|
615
|
+
|
|
616
|
+
for bug_type, count in sorted(
|
|
617
|
+
stats["bug_type_distribution"].items(), key=lambda x: x[1], reverse=True
|
|
618
|
+
):
|
|
619
|
+
percent = (count / total * 100) if total > 0 else 0
|
|
620
|
+
console.print(f" {bug_type:<20} {count:>2} ({percent:>5.1f}%)")
|
|
621
|
+
|
|
622
|
+
console.print()
|
|
623
|
+
console.print("=" * 60)
|
|
624
|
+
console.print()
|
|
625
|
+
|
|
626
|
+
|
|
516
627
|
# =============================================================================
|
|
517
628
|
# UTILITY COMMANDS
|
|
518
629
|
# =============================================================================
|
empathy_os/config/xml_config.py
CHANGED
|
@@ -18,6 +18,48 @@ from dataclasses import asdict, dataclass, field
|
|
|
18
18
|
from pathlib import Path
|
|
19
19
|
|
|
20
20
|
|
|
21
|
+
def _validate_file_path(path: str, allowed_dir: str | None = None) -> Path:
|
|
22
|
+
"""Validate file path to prevent path traversal and arbitrary writes.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
path: File path to validate
|
|
26
|
+
allowed_dir: Optional directory to restrict writes to
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
Validated Path object
|
|
30
|
+
|
|
31
|
+
Raises:
|
|
32
|
+
ValueError: If path is invalid or unsafe
|
|
33
|
+
"""
|
|
34
|
+
if not path or not isinstance(path, str):
|
|
35
|
+
raise ValueError("path must be a non-empty string")
|
|
36
|
+
|
|
37
|
+
# Check for null bytes
|
|
38
|
+
if "\x00" in path:
|
|
39
|
+
raise ValueError("path contains null bytes")
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
resolved = Path(path).resolve()
|
|
43
|
+
except (OSError, RuntimeError) as e:
|
|
44
|
+
raise ValueError(f"Invalid path: {e}")
|
|
45
|
+
|
|
46
|
+
# Check if within allowed directory
|
|
47
|
+
if allowed_dir:
|
|
48
|
+
try:
|
|
49
|
+
allowed = Path(allowed_dir).resolve()
|
|
50
|
+
resolved.relative_to(allowed)
|
|
51
|
+
except ValueError:
|
|
52
|
+
raise ValueError(f"path must be within {allowed_dir}")
|
|
53
|
+
|
|
54
|
+
# Check for dangerous system paths
|
|
55
|
+
dangerous_paths = ["/etc", "/sys", "/proc", "/dev"]
|
|
56
|
+
for dangerous in dangerous_paths:
|
|
57
|
+
if str(resolved).startswith(dangerous):
|
|
58
|
+
raise ValueError(f"Cannot write to system directory: {dangerous}")
|
|
59
|
+
|
|
60
|
+
return resolved
|
|
61
|
+
|
|
62
|
+
|
|
21
63
|
@dataclass
|
|
22
64
|
class XMLConfig:
|
|
23
65
|
"""XML prompting configuration.
|
|
@@ -163,8 +205,8 @@ class EmpathyXMLConfig:
|
|
|
163
205
|
Args:
|
|
164
206
|
config_file: Path to save config (default: .empathy/config.json)
|
|
165
207
|
"""
|
|
166
|
-
|
|
167
|
-
|
|
208
|
+
validated_path = _validate_file_path(config_file)
|
|
209
|
+
validated_path.parent.mkdir(parents=True, exist_ok=True)
|
|
168
210
|
|
|
169
211
|
data = {
|
|
170
212
|
"xml": asdict(self.xml),
|
|
@@ -174,7 +216,7 @@ class EmpathyXMLConfig:
|
|
|
174
216
|
"metrics": asdict(self.metrics),
|
|
175
217
|
}
|
|
176
218
|
|
|
177
|
-
with open(
|
|
219
|
+
with open(validated_path, "w") as f:
|
|
178
220
|
json.dump(data, f, indent=2)
|
|
179
221
|
|
|
180
222
|
@classmethod
|