empathy-framework 5.1.1__py3-none-any.whl → 5.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/METADATA +79 -6
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/RECORD +83 -64
- empathy_os/__init__.py +1 -1
- empathy_os/cache/hybrid.py +5 -1
- empathy_os/cli/commands/batch.py +8 -0
- empathy_os/cli/commands/profiling.py +4 -0
- empathy_os/cli/commands/workflow.py +8 -4
- empathy_os/cli_router.py +9 -0
- empathy_os/config.py +15 -2
- empathy_os/core_modules/__init__.py +15 -0
- empathy_os/dashboard/simple_server.py +62 -30
- empathy_os/mcp/__init__.py +10 -0
- empathy_os/mcp/server.py +506 -0
- empathy_os/memory/control_panel.py +1 -131
- empathy_os/memory/control_panel_support.py +145 -0
- empathy_os/memory/encryption.py +159 -0
- empathy_os/memory/long_term.py +46 -631
- empathy_os/memory/long_term_types.py +99 -0
- empathy_os/memory/mixins/__init__.py +25 -0
- empathy_os/memory/mixins/backend_init_mixin.py +249 -0
- empathy_os/memory/mixins/capabilities_mixin.py +208 -0
- empathy_os/memory/mixins/handoff_mixin.py +208 -0
- empathy_os/memory/mixins/lifecycle_mixin.py +49 -0
- empathy_os/memory/mixins/long_term_mixin.py +352 -0
- empathy_os/memory/mixins/promotion_mixin.py +109 -0
- empathy_os/memory/mixins/short_term_mixin.py +182 -0
- empathy_os/memory/short_term.py +61 -12
- empathy_os/memory/simple_storage.py +302 -0
- empathy_os/memory/storage_backend.py +167 -0
- empathy_os/memory/types.py +8 -3
- empathy_os/memory/unified.py +21 -1120
- empathy_os/meta_workflows/cli_commands/__init__.py +56 -0
- empathy_os/meta_workflows/cli_commands/agent_commands.py +321 -0
- empathy_os/meta_workflows/cli_commands/analytics_commands.py +442 -0
- empathy_os/meta_workflows/cli_commands/config_commands.py +232 -0
- empathy_os/meta_workflows/cli_commands/memory_commands.py +182 -0
- empathy_os/meta_workflows/cli_commands/template_commands.py +354 -0
- empathy_os/meta_workflows/cli_commands/workflow_commands.py +382 -0
- empathy_os/meta_workflows/cli_meta_workflows.py +52 -1802
- empathy_os/models/telemetry/__init__.py +71 -0
- empathy_os/models/telemetry/analytics.py +594 -0
- empathy_os/models/telemetry/backend.py +196 -0
- empathy_os/models/telemetry/data_models.py +431 -0
- empathy_os/models/telemetry/storage.py +489 -0
- empathy_os/orchestration/__init__.py +35 -0
- empathy_os/orchestration/execution_strategies.py +481 -0
- empathy_os/orchestration/meta_orchestrator.py +488 -1
- empathy_os/routing/workflow_registry.py +36 -0
- empathy_os/telemetry/agent_coordination.py +2 -3
- empathy_os/telemetry/agent_tracking.py +26 -7
- empathy_os/telemetry/approval_gates.py +18 -24
- empathy_os/telemetry/cli.py +19 -724
- empathy_os/telemetry/commands/__init__.py +14 -0
- empathy_os/telemetry/commands/dashboard_commands.py +696 -0
- empathy_os/telemetry/event_streaming.py +7 -3
- empathy_os/telemetry/feedback_loop.py +28 -15
- empathy_os/tools.py +183 -0
- empathy_os/workflows/__init__.py +5 -0
- empathy_os/workflows/autonomous_test_gen.py +860 -161
- empathy_os/workflows/base.py +6 -2
- empathy_os/workflows/code_review.py +4 -1
- empathy_os/workflows/document_gen/__init__.py +25 -0
- empathy_os/workflows/document_gen/config.py +30 -0
- empathy_os/workflows/document_gen/report_formatter.py +162 -0
- empathy_os/workflows/{document_gen.py → document_gen/workflow.py} +5 -184
- empathy_os/workflows/output.py +4 -1
- empathy_os/workflows/progress.py +8 -2
- empathy_os/workflows/security_audit.py +2 -2
- empathy_os/workflows/security_audit_phase3.py +7 -4
- empathy_os/workflows/seo_optimization.py +633 -0
- empathy_os/workflows/test_gen/__init__.py +52 -0
- empathy_os/workflows/test_gen/ast_analyzer.py +249 -0
- empathy_os/workflows/test_gen/config.py +88 -0
- empathy_os/workflows/test_gen/data_models.py +38 -0
- empathy_os/workflows/test_gen/report_formatter.py +289 -0
- empathy_os/workflows/test_gen/test_templates.py +381 -0
- empathy_os/workflows/test_gen/workflow.py +655 -0
- empathy_os/workflows/test_gen.py +42 -1905
- empathy_os/cli/parsers/cache 2.py +0 -65
- empathy_os/cli_router 2.py +0 -416
- empathy_os/dashboard/app 2.py +0 -512
- empathy_os/dashboard/simple_server 2.py +0 -403
- empathy_os/dashboard/standalone_server 2.py +0 -536
- empathy_os/memory/types 2.py +0 -441
- empathy_os/models/adaptive_routing 2.py +0 -437
- empathy_os/models/telemetry.py +0 -1660
- empathy_os/project_index/scanner_parallel 2.py +0 -291
- empathy_os/telemetry/agent_coordination 2.py +0 -478
- empathy_os/telemetry/agent_tracking 2.py +0 -350
- empathy_os/telemetry/approval_gates 2.py +0 -563
- empathy_os/telemetry/event_streaming 2.py +0 -405
- empathy_os/telemetry/feedback_loop 2.py +0 -557
- empathy_os/vscode_bridge 2.py +0 -173
- empathy_os/workflows/progressive/__init__ 2.py +0 -92
- empathy_os/workflows/progressive/cli 2.py +0 -242
- empathy_os/workflows/progressive/core 2.py +0 -488
- empathy_os/workflows/progressive/orchestrator 2.py +0 -701
- empathy_os/workflows/progressive/reports 2.py +0 -528
- empathy_os/workflows/progressive/telemetry 2.py +0 -280
- empathy_os/workflows/progressive/test_gen 2.py +0 -514
- empathy_os/workflows/progressive/workflow 2.py +0 -628
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/WHEEL +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/top_level.txt +0 -0
empathy_os/vscode_bridge 2.py
DELETED
|
@@ -1,173 +0,0 @@
|
|
|
1
|
-
"""VS Code Extension Bridge
|
|
2
|
-
|
|
3
|
-
Provides functions to write data that the VS Code extension can pick up.
|
|
4
|
-
Enables Claude Code CLI output to appear in VS Code webview panels.
|
|
5
|
-
|
|
6
|
-
Copyright 2026 Smart-AI-Memory
|
|
7
|
-
Licensed under Fair Source License 0.9
|
|
8
|
-
"""
|
|
9
|
-
|
|
10
|
-
import json
|
|
11
|
-
from dataclasses import asdict, dataclass
|
|
12
|
-
from datetime import datetime
|
|
13
|
-
from pathlib import Path
|
|
14
|
-
from typing import Any
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
@dataclass
|
|
18
|
-
class ReviewFinding:
|
|
19
|
-
"""A code review finding."""
|
|
20
|
-
|
|
21
|
-
id: str
|
|
22
|
-
file: str
|
|
23
|
-
line: int
|
|
24
|
-
severity: str # 'critical' | 'high' | 'medium' | 'low' | 'info'
|
|
25
|
-
category: str # 'security' | 'performance' | 'maintainability' | 'style' | 'correctness'
|
|
26
|
-
message: str
|
|
27
|
-
column: int = 1
|
|
28
|
-
details: str | None = None
|
|
29
|
-
recommendation: str | None = None
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
@dataclass
|
|
33
|
-
class CodeReviewResult:
|
|
34
|
-
"""Code review results for VS Code bridge."""
|
|
35
|
-
|
|
36
|
-
findings: list[dict[str, Any]]
|
|
37
|
-
summary: dict[str, Any]
|
|
38
|
-
verdict: str # 'approve' | 'approve_with_suggestions' | 'request_changes' | 'reject'
|
|
39
|
-
security_score: int
|
|
40
|
-
formatted_report: str
|
|
41
|
-
model_tier_used: str
|
|
42
|
-
timestamp: str
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
def get_empathy_dir() -> Path:
|
|
46
|
-
"""Get the .empathy directory, creating if needed."""
|
|
47
|
-
empathy_dir = Path(".empathy")
|
|
48
|
-
empathy_dir.mkdir(exist_ok=True)
|
|
49
|
-
return empathy_dir
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
def write_code_review_results(
|
|
53
|
-
findings: list[dict[str, Any]] | None = None,
|
|
54
|
-
summary: dict[str, Any] | None = None,
|
|
55
|
-
verdict: str = "approve_with_suggestions",
|
|
56
|
-
security_score: int = 85,
|
|
57
|
-
formatted_report: str = "",
|
|
58
|
-
model_tier_used: str = "capable",
|
|
59
|
-
) -> Path:
|
|
60
|
-
"""Write code review results for VS Code extension to pick up.
|
|
61
|
-
|
|
62
|
-
Args:
|
|
63
|
-
findings: List of finding dicts with keys: id, file, line, severity, category, message
|
|
64
|
-
summary: Summary dict with keys: total_findings, by_severity, by_category, files_affected
|
|
65
|
-
verdict: One of 'approve', 'approve_with_suggestions', 'request_changes', 'reject'
|
|
66
|
-
security_score: 0-100 score
|
|
67
|
-
formatted_report: Markdown formatted report
|
|
68
|
-
model_tier_used: 'cheap', 'capable', or 'premium'
|
|
69
|
-
|
|
70
|
-
Returns:
|
|
71
|
-
Path to the written file
|
|
72
|
-
"""
|
|
73
|
-
findings = findings or []
|
|
74
|
-
|
|
75
|
-
# Build summary if not provided
|
|
76
|
-
if summary is None:
|
|
77
|
-
by_severity: dict[str, int] = {}
|
|
78
|
-
by_category: dict[str, int] = {}
|
|
79
|
-
files_affected: set[str] = set()
|
|
80
|
-
|
|
81
|
-
for f in findings:
|
|
82
|
-
sev = f.get("severity", "info")
|
|
83
|
-
cat = f.get("category", "correctness")
|
|
84
|
-
by_severity[sev] = by_severity.get(sev, 0) + 1
|
|
85
|
-
by_category[cat] = by_category.get(cat, 0) + 1
|
|
86
|
-
if f.get("file"):
|
|
87
|
-
files_affected.add(f["file"])
|
|
88
|
-
|
|
89
|
-
summary = {
|
|
90
|
-
"total_findings": len(findings),
|
|
91
|
-
"by_severity": by_severity,
|
|
92
|
-
"by_category": by_category,
|
|
93
|
-
"files_affected": list(files_affected),
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
result = CodeReviewResult(
|
|
97
|
-
findings=findings,
|
|
98
|
-
summary=summary,
|
|
99
|
-
verdict=verdict,
|
|
100
|
-
security_score=security_score,
|
|
101
|
-
formatted_report=formatted_report,
|
|
102
|
-
model_tier_used=model_tier_used,
|
|
103
|
-
timestamp=datetime.now().isoformat(),
|
|
104
|
-
)
|
|
105
|
-
|
|
106
|
-
output_path = get_empathy_dir() / "code-review-results.json"
|
|
107
|
-
|
|
108
|
-
with open(output_path, "w") as f:
|
|
109
|
-
json.dump(asdict(result), f, indent=2)
|
|
110
|
-
|
|
111
|
-
return output_path
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
def write_pr_review_results(
|
|
115
|
-
pr_number: int | str,
|
|
116
|
-
title: str,
|
|
117
|
-
findings: list[dict[str, Any]],
|
|
118
|
-
verdict: str = "approve_with_suggestions",
|
|
119
|
-
summary_text: str = "",
|
|
120
|
-
) -> Path:
|
|
121
|
-
"""Write PR review results for VS Code extension.
|
|
122
|
-
|
|
123
|
-
Convenience wrapper for PR reviews from GitHub.
|
|
124
|
-
|
|
125
|
-
Args:
|
|
126
|
-
pr_number: The PR number
|
|
127
|
-
title: PR title
|
|
128
|
-
findings: List of review findings
|
|
129
|
-
verdict: Review verdict
|
|
130
|
-
summary_text: Summary of the review
|
|
131
|
-
|
|
132
|
-
Returns:
|
|
133
|
-
Path to the written file
|
|
134
|
-
"""
|
|
135
|
-
formatted_report = f"""## PR #{pr_number}: {title}
|
|
136
|
-
|
|
137
|
-
{summary_text}
|
|
138
|
-
|
|
139
|
-
### Findings ({len(findings)})
|
|
140
|
-
|
|
141
|
-
"""
|
|
142
|
-
for f in findings:
|
|
143
|
-
formatted_report += f"- **{f.get('severity', 'info').upper()}** [{f.get('file', 'unknown')}:{f.get('line', 0)}]: {f.get('message', '')}\n"
|
|
144
|
-
|
|
145
|
-
return write_code_review_results(
|
|
146
|
-
findings=findings,
|
|
147
|
-
verdict=verdict,
|
|
148
|
-
formatted_report=formatted_report,
|
|
149
|
-
model_tier_used="capable",
|
|
150
|
-
)
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
# Quick helper for Claude Code to call
|
|
154
|
-
def send_to_vscode(
|
|
155
|
-
message: str,
|
|
156
|
-
findings: list[dict[str, Any]] | None = None,
|
|
157
|
-
verdict: str = "approve_with_suggestions",
|
|
158
|
-
) -> str:
|
|
159
|
-
"""Quick helper to send review results to VS Code.
|
|
160
|
-
|
|
161
|
-
Usage in Claude Code:
|
|
162
|
-
from empathy_os.vscode_bridge import send_to_vscode
|
|
163
|
-
send_to_vscode("Review complete", findings=[...])
|
|
164
|
-
|
|
165
|
-
Returns:
|
|
166
|
-
Confirmation message
|
|
167
|
-
"""
|
|
168
|
-
path = write_code_review_results(
|
|
169
|
-
findings=findings or [],
|
|
170
|
-
formatted_report=message,
|
|
171
|
-
verdict=verdict,
|
|
172
|
-
)
|
|
173
|
-
return f"Results written to {path} - VS Code will update automatically"
|
|
@@ -1,92 +0,0 @@
|
|
|
1
|
-
"""Progressive tier escalation system for cost-efficient, quality-driven workflows.
|
|
2
|
-
|
|
3
|
-
This module implements automatic model tier escalation (cheap → capable → premium)
|
|
4
|
-
based on failure analysis and quality metrics. Key features:
|
|
5
|
-
|
|
6
|
-
- Multi-signal failure detection (syntax, execution, coverage, confidence)
|
|
7
|
-
- Composite Quality Score (CQS) for objective quality measurement
|
|
8
|
-
- LLM-guided retry logic with stagnation detection
|
|
9
|
-
- Meta-orchestration with dynamic agent team creation
|
|
10
|
-
- Cost management with budget controls and approval prompts
|
|
11
|
-
- Comprehensive observability and reporting
|
|
12
|
-
|
|
13
|
-
Usage:
|
|
14
|
-
from empathy_os.workflows.progressive import (
|
|
15
|
-
ProgressiveWorkflow,
|
|
16
|
-
EscalationConfig,
|
|
17
|
-
Tier,
|
|
18
|
-
FailureAnalysis
|
|
19
|
-
)
|
|
20
|
-
|
|
21
|
-
# Configure escalation
|
|
22
|
-
config = EscalationConfig(
|
|
23
|
-
enabled=True,
|
|
24
|
-
max_cost=10.00,
|
|
25
|
-
auto_approve_under=5.00
|
|
26
|
-
)
|
|
27
|
-
|
|
28
|
-
# Create workflow
|
|
29
|
-
workflow = ProgressiveTestGenWorkflow(config)
|
|
30
|
-
result = workflow.execute(target_file="app.py")
|
|
31
|
-
|
|
32
|
-
# View report
|
|
33
|
-
print(result.generate_report())
|
|
34
|
-
|
|
35
|
-
Version: 4.1.0
|
|
36
|
-
Author: Empathy Framework Team
|
|
37
|
-
"""
|
|
38
|
-
|
|
39
|
-
from empathy_os.workflows.progressive.core import (
|
|
40
|
-
EscalationConfig,
|
|
41
|
-
FailureAnalysis,
|
|
42
|
-
ProgressiveWorkflowResult,
|
|
43
|
-
Tier,
|
|
44
|
-
TierResult,
|
|
45
|
-
)
|
|
46
|
-
from empathy_os.workflows.progressive.orchestrator import (
|
|
47
|
-
MetaOrchestrator,
|
|
48
|
-
)
|
|
49
|
-
from empathy_os.workflows.progressive.telemetry import (
|
|
50
|
-
ProgressiveTelemetry,
|
|
51
|
-
)
|
|
52
|
-
from empathy_os.workflows.progressive.test_gen import (
|
|
53
|
-
ProgressiveTestGenWorkflow,
|
|
54
|
-
calculate_coverage,
|
|
55
|
-
execute_test_file,
|
|
56
|
-
)
|
|
57
|
-
from empathy_os.workflows.progressive.workflow import (
|
|
58
|
-
BudgetExceededError,
|
|
59
|
-
ProgressiveWorkflow,
|
|
60
|
-
UserCancelledError,
|
|
61
|
-
)
|
|
62
|
-
|
|
63
|
-
__all__ = [
|
|
64
|
-
# Enums
|
|
65
|
-
"Tier",
|
|
66
|
-
|
|
67
|
-
# Core data structures
|
|
68
|
-
"FailureAnalysis",
|
|
69
|
-
"TierResult",
|
|
70
|
-
"ProgressiveWorkflowResult",
|
|
71
|
-
"EscalationConfig",
|
|
72
|
-
|
|
73
|
-
# Base classes
|
|
74
|
-
"ProgressiveWorkflow",
|
|
75
|
-
"MetaOrchestrator",
|
|
76
|
-
|
|
77
|
-
# Telemetry
|
|
78
|
-
"ProgressiveTelemetry",
|
|
79
|
-
|
|
80
|
-
# Exceptions
|
|
81
|
-
"BudgetExceededError",
|
|
82
|
-
"UserCancelledError",
|
|
83
|
-
|
|
84
|
-
# Workflows
|
|
85
|
-
"ProgressiveTestGenWorkflow",
|
|
86
|
-
|
|
87
|
-
# Utilities
|
|
88
|
-
"execute_test_file",
|
|
89
|
-
"calculate_coverage",
|
|
90
|
-
]
|
|
91
|
-
|
|
92
|
-
__version__ = "4.1.1"
|
|
@@ -1,242 +0,0 @@
|
|
|
1
|
-
"""CLI commands for progressive workflow management.
|
|
2
|
-
|
|
3
|
-
Provides commands for:
|
|
4
|
-
- Listing saved results
|
|
5
|
-
- Viewing detailed reports
|
|
6
|
-
- Generating analytics
|
|
7
|
-
- Cleaning up old results
|
|
8
|
-
"""
|
|
9
|
-
|
|
10
|
-
import argparse
|
|
11
|
-
import sys
|
|
12
|
-
|
|
13
|
-
from empathy_os.workflows.progressive.reports import (
|
|
14
|
-
cleanup_old_results,
|
|
15
|
-
format_cost_analytics_report,
|
|
16
|
-
generate_cost_analytics,
|
|
17
|
-
list_saved_results,
|
|
18
|
-
load_result_from_disk,
|
|
19
|
-
)
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def cmd_list_results(args: argparse.Namespace) -> int:
|
|
23
|
-
"""List all saved progressive workflow results.
|
|
24
|
-
|
|
25
|
-
Args:
|
|
26
|
-
args: Parsed command line arguments
|
|
27
|
-
|
|
28
|
-
Returns:
|
|
29
|
-
Exit code (0 for success)
|
|
30
|
-
"""
|
|
31
|
-
storage_path = args.storage_path or ".empathy/progressive_runs"
|
|
32
|
-
results = list_saved_results(storage_path)
|
|
33
|
-
|
|
34
|
-
if not results:
|
|
35
|
-
print(f"No results found in {storage_path}")
|
|
36
|
-
return 0
|
|
37
|
-
|
|
38
|
-
print(f"\n📋 Found {len(results)} progressive workflow results:\n")
|
|
39
|
-
print(f"{'Task ID':<40} {'Workflow':<15} {'Cost':<10} {'Savings':<12} {'Success'}")
|
|
40
|
-
print("─" * 90)
|
|
41
|
-
|
|
42
|
-
for result in results:
|
|
43
|
-
task_id = result.get("task_id", "unknown")
|
|
44
|
-
workflow = result.get("workflow", "unknown")[:14]
|
|
45
|
-
cost = result.get("total_cost", 0.0)
|
|
46
|
-
savings = result.get("cost_savings_percent", 0.0)
|
|
47
|
-
success = "✅" if result.get("success", False) else "❌"
|
|
48
|
-
|
|
49
|
-
print(f"{task_id:<40} {workflow:<15} ${cost:<9.2f} {savings:>6.1f}% {success}")
|
|
50
|
-
|
|
51
|
-
print()
|
|
52
|
-
return 0
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
def cmd_show_report(args: argparse.Namespace) -> int:
|
|
56
|
-
"""Show detailed report for a specific task.
|
|
57
|
-
|
|
58
|
-
Args:
|
|
59
|
-
args: Parsed command line arguments
|
|
60
|
-
|
|
61
|
-
Returns:
|
|
62
|
-
Exit code (0 for success, 1 for error)
|
|
63
|
-
"""
|
|
64
|
-
task_id = args.task_id
|
|
65
|
-
storage_path = args.storage_path or ".empathy/progressive_runs"
|
|
66
|
-
|
|
67
|
-
try:
|
|
68
|
-
result_data = load_result_from_disk(task_id, storage_path)
|
|
69
|
-
|
|
70
|
-
if args.json:
|
|
71
|
-
import json
|
|
72
|
-
print(json.dumps(result_data, indent=2))
|
|
73
|
-
else:
|
|
74
|
-
# Show human-readable report
|
|
75
|
-
report = result_data.get("report", "")
|
|
76
|
-
if report:
|
|
77
|
-
print(report)
|
|
78
|
-
else:
|
|
79
|
-
print("No report found for this task")
|
|
80
|
-
|
|
81
|
-
return 0
|
|
82
|
-
|
|
83
|
-
except FileNotFoundError as e:
|
|
84
|
-
print(f"Error: {e}", file=sys.stderr)
|
|
85
|
-
return 1
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
def cmd_analytics(args: argparse.Namespace) -> int:
|
|
89
|
-
"""Show cost optimization analytics.
|
|
90
|
-
|
|
91
|
-
Args:
|
|
92
|
-
args: Parsed command line arguments
|
|
93
|
-
|
|
94
|
-
Returns:
|
|
95
|
-
Exit code (0 for success)
|
|
96
|
-
"""
|
|
97
|
-
storage_path = args.storage_path or ".empathy/progressive_runs"
|
|
98
|
-
analytics = generate_cost_analytics(storage_path)
|
|
99
|
-
|
|
100
|
-
if analytics["total_runs"] == 0:
|
|
101
|
-
print(f"No results found in {storage_path}")
|
|
102
|
-
return 0
|
|
103
|
-
|
|
104
|
-
if args.json:
|
|
105
|
-
import json
|
|
106
|
-
print(json.dumps(analytics, indent=2))
|
|
107
|
-
else:
|
|
108
|
-
report = format_cost_analytics_report(analytics)
|
|
109
|
-
print(report)
|
|
110
|
-
|
|
111
|
-
return 0
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
def cmd_cleanup(args: argparse.Namespace) -> int:
|
|
115
|
-
"""Clean up old progressive workflow results.
|
|
116
|
-
|
|
117
|
-
Args:
|
|
118
|
-
args: Parsed command line arguments
|
|
119
|
-
|
|
120
|
-
Returns:
|
|
121
|
-
Exit code (0 for success)
|
|
122
|
-
"""
|
|
123
|
-
storage_path = args.storage_path or ".empathy/progressive_runs"
|
|
124
|
-
retention_days = args.retention_days
|
|
125
|
-
dry_run = args.dry_run
|
|
126
|
-
|
|
127
|
-
deleted, retained = cleanup_old_results(
|
|
128
|
-
storage_path=storage_path,
|
|
129
|
-
retention_days=retention_days,
|
|
130
|
-
dry_run=dry_run
|
|
131
|
-
)
|
|
132
|
-
|
|
133
|
-
if dry_run:
|
|
134
|
-
print("\n🔍 Dry run mode - no files deleted\n")
|
|
135
|
-
print(f"Would delete: {deleted} results older than {retention_days} days")
|
|
136
|
-
print(f"Would retain: {retained} recent results")
|
|
137
|
-
else:
|
|
138
|
-
print("\n🗑️ Cleanup complete\n")
|
|
139
|
-
print(f"Deleted: {deleted} results older than {retention_days} days")
|
|
140
|
-
print(f"Retained: {retained} recent results")
|
|
141
|
-
|
|
142
|
-
return 0
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
def create_parser() -> argparse.ArgumentParser:
|
|
146
|
-
"""Create argument parser for progressive CLI.
|
|
147
|
-
|
|
148
|
-
Returns:
|
|
149
|
-
Configured argument parser
|
|
150
|
-
"""
|
|
151
|
-
parser = argparse.ArgumentParser(
|
|
152
|
-
prog="empathy progressive",
|
|
153
|
-
description="Manage progressive tier escalation workflows"
|
|
154
|
-
)
|
|
155
|
-
|
|
156
|
-
parser.add_argument(
|
|
157
|
-
"--storage-path",
|
|
158
|
-
type=str,
|
|
159
|
-
default=None,
|
|
160
|
-
help="Custom storage path (default: .empathy/progressive_runs)"
|
|
161
|
-
)
|
|
162
|
-
|
|
163
|
-
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
|
164
|
-
|
|
165
|
-
# List command
|
|
166
|
-
list_parser = subparsers.add_parser(
|
|
167
|
-
"list",
|
|
168
|
-
help="List all saved progressive workflow results"
|
|
169
|
-
)
|
|
170
|
-
list_parser.set_defaults(func=cmd_list_results)
|
|
171
|
-
|
|
172
|
-
# Show command
|
|
173
|
-
show_parser = subparsers.add_parser(
|
|
174
|
-
"show",
|
|
175
|
-
help="Show detailed report for a specific task"
|
|
176
|
-
)
|
|
177
|
-
show_parser.add_argument(
|
|
178
|
-
"task_id",
|
|
179
|
-
type=str,
|
|
180
|
-
help="Task ID to display"
|
|
181
|
-
)
|
|
182
|
-
show_parser.add_argument(
|
|
183
|
-
"--json",
|
|
184
|
-
action="store_true",
|
|
185
|
-
help="Output in JSON format"
|
|
186
|
-
)
|
|
187
|
-
show_parser.set_defaults(func=cmd_show_report)
|
|
188
|
-
|
|
189
|
-
# Analytics command
|
|
190
|
-
analytics_parser = subparsers.add_parser(
|
|
191
|
-
"analytics",
|
|
192
|
-
help="Show cost optimization analytics"
|
|
193
|
-
)
|
|
194
|
-
analytics_parser.add_argument(
|
|
195
|
-
"--json",
|
|
196
|
-
action="store_true",
|
|
197
|
-
help="Output in JSON format"
|
|
198
|
-
)
|
|
199
|
-
analytics_parser.set_defaults(func=cmd_analytics)
|
|
200
|
-
|
|
201
|
-
# Cleanup command
|
|
202
|
-
cleanup_parser = subparsers.add_parser(
|
|
203
|
-
"cleanup",
|
|
204
|
-
help="Clean up old progressive workflow results"
|
|
205
|
-
)
|
|
206
|
-
cleanup_parser.add_argument(
|
|
207
|
-
"--retention-days",
|
|
208
|
-
type=int,
|
|
209
|
-
default=30,
|
|
210
|
-
help="Number of days to retain results (default: 30)"
|
|
211
|
-
)
|
|
212
|
-
cleanup_parser.add_argument(
|
|
213
|
-
"--dry-run",
|
|
214
|
-
action="store_true",
|
|
215
|
-
help="Show what would be deleted without actually deleting"
|
|
216
|
-
)
|
|
217
|
-
cleanup_parser.set_defaults(func=cmd_cleanup)
|
|
218
|
-
|
|
219
|
-
return parser
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
def main(argv: list[str] | None = None) -> int:
|
|
223
|
-
"""Main entry point for progressive CLI.
|
|
224
|
-
|
|
225
|
-
Args:
|
|
226
|
-
argv: Command line arguments (defaults to sys.argv[1:])
|
|
227
|
-
|
|
228
|
-
Returns:
|
|
229
|
-
Exit code
|
|
230
|
-
"""
|
|
231
|
-
parser = create_parser()
|
|
232
|
-
args = parser.parse_args(argv)
|
|
233
|
-
|
|
234
|
-
if not hasattr(args, "func"):
|
|
235
|
-
parser.print_help()
|
|
236
|
-
return 1
|
|
237
|
-
|
|
238
|
-
return args.func(args)
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
if __name__ == "__main__":
|
|
242
|
-
sys.exit(main())
|