empathy-framework 5.0.3__py3-none-any.whl → 5.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-5.0.3.dist-info → empathy_framework-5.1.0.dist-info}/METADATA +259 -142
- {empathy_framework-5.0.3.dist-info → empathy_framework-5.1.0.dist-info}/RECORD +56 -26
- empathy_framework-5.1.0.dist-info/licenses/LICENSE +201 -0
- empathy_framework-5.1.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- empathy_os/__init__.py +1 -1
- empathy_os/cli/commands/batch.py +5 -5
- empathy_os/cli/commands/routing.py +1 -1
- empathy_os/cli/commands/workflow.py +2 -1
- empathy_os/cli/parsers/cache 2.py +65 -0
- empathy_os/cli_minimal.py +3 -3
- empathy_os/cli_router 2.py +416 -0
- empathy_os/dashboard/__init__.py +1 -2
- empathy_os/dashboard/app 2.py +512 -0
- empathy_os/dashboard/app.py +1 -1
- empathy_os/dashboard/simple_server 2.py +403 -0
- empathy_os/dashboard/standalone_server 2.py +536 -0
- empathy_os/memory/types 2.py +441 -0
- empathy_os/models/__init__.py +19 -0
- empathy_os/models/adaptive_routing 2.py +437 -0
- empathy_os/models/auth_cli.py +444 -0
- empathy_os/models/auth_strategy.py +450 -0
- empathy_os/project_index/scanner_parallel 2.py +291 -0
- empathy_os/telemetry/agent_coordination 2.py +478 -0
- empathy_os/telemetry/agent_coordination.py +3 -3
- empathy_os/telemetry/agent_tracking 2.py +350 -0
- empathy_os/telemetry/agent_tracking.py +1 -2
- empathy_os/telemetry/approval_gates 2.py +563 -0
- empathy_os/telemetry/event_streaming 2.py +405 -0
- empathy_os/telemetry/event_streaming.py +3 -3
- empathy_os/telemetry/feedback_loop 2.py +557 -0
- empathy_os/telemetry/feedback_loop.py +1 -1
- empathy_os/vscode_bridge 2.py +173 -0
- empathy_os/workflows/__init__.py +8 -0
- empathy_os/workflows/autonomous_test_gen.py +569 -0
- empathy_os/workflows/bug_predict.py +45 -0
- empathy_os/workflows/code_review.py +92 -22
- empathy_os/workflows/document_gen.py +594 -62
- empathy_os/workflows/llm_base.py +363 -0
- empathy_os/workflows/perf_audit.py +69 -0
- empathy_os/workflows/progressive/README 2.md +454 -0
- empathy_os/workflows/progressive/__init__ 2.py +92 -0
- empathy_os/workflows/progressive/cli 2.py +242 -0
- empathy_os/workflows/progressive/core 2.py +488 -0
- empathy_os/workflows/progressive/orchestrator 2.py +701 -0
- empathy_os/workflows/progressive/reports 2.py +528 -0
- empathy_os/workflows/progressive/telemetry 2.py +280 -0
- empathy_os/workflows/progressive/test_gen 2.py +514 -0
- empathy_os/workflows/progressive/workflow 2.py +628 -0
- empathy_os/workflows/release_prep.py +54 -0
- empathy_os/workflows/security_audit.py +154 -79
- empathy_os/workflows/test_gen.py +60 -0
- empathy_os/workflows/test_gen_behavioral.py +477 -0
- empathy_os/workflows/test_gen_parallel.py +341 -0
- empathy_framework-5.0.3.dist-info/licenses/LICENSE +0 -139
- {empathy_framework-5.0.3.dist-info → empathy_framework-5.1.0.dist-info}/WHEEL +0 -0
- {empathy_framework-5.0.3.dist-info → empathy_framework-5.1.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-5.0.3.dist-info → empathy_framework-5.1.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"""VS Code Extension Bridge
|
|
2
|
+
|
|
3
|
+
Provides functions to write data that the VS Code extension can pick up.
|
|
4
|
+
Enables Claude Code CLI output to appear in VS Code webview panels.
|
|
5
|
+
|
|
6
|
+
Copyright 2026 Smart-AI-Memory
|
|
7
|
+
Licensed under Fair Source License 0.9
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import json
|
|
11
|
+
from dataclasses import asdict, dataclass
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class ReviewFinding:
|
|
19
|
+
"""A code review finding."""
|
|
20
|
+
|
|
21
|
+
id: str
|
|
22
|
+
file: str
|
|
23
|
+
line: int
|
|
24
|
+
severity: str # 'critical' | 'high' | 'medium' | 'low' | 'info'
|
|
25
|
+
category: str # 'security' | 'performance' | 'maintainability' | 'style' | 'correctness'
|
|
26
|
+
message: str
|
|
27
|
+
column: int = 1
|
|
28
|
+
details: str | None = None
|
|
29
|
+
recommendation: str | None = None
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class CodeReviewResult:
|
|
34
|
+
"""Code review results for VS Code bridge."""
|
|
35
|
+
|
|
36
|
+
findings: list[dict[str, Any]]
|
|
37
|
+
summary: dict[str, Any]
|
|
38
|
+
verdict: str # 'approve' | 'approve_with_suggestions' | 'request_changes' | 'reject'
|
|
39
|
+
security_score: int
|
|
40
|
+
formatted_report: str
|
|
41
|
+
model_tier_used: str
|
|
42
|
+
timestamp: str
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def get_empathy_dir() -> Path:
|
|
46
|
+
"""Get the .empathy directory, creating if needed."""
|
|
47
|
+
empathy_dir = Path(".empathy")
|
|
48
|
+
empathy_dir.mkdir(exist_ok=True)
|
|
49
|
+
return empathy_dir
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def write_code_review_results(
|
|
53
|
+
findings: list[dict[str, Any]] | None = None,
|
|
54
|
+
summary: dict[str, Any] | None = None,
|
|
55
|
+
verdict: str = "approve_with_suggestions",
|
|
56
|
+
security_score: int = 85,
|
|
57
|
+
formatted_report: str = "",
|
|
58
|
+
model_tier_used: str = "capable",
|
|
59
|
+
) -> Path:
|
|
60
|
+
"""Write code review results for VS Code extension to pick up.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
findings: List of finding dicts with keys: id, file, line, severity, category, message
|
|
64
|
+
summary: Summary dict with keys: total_findings, by_severity, by_category, files_affected
|
|
65
|
+
verdict: One of 'approve', 'approve_with_suggestions', 'request_changes', 'reject'
|
|
66
|
+
security_score: 0-100 score
|
|
67
|
+
formatted_report: Markdown formatted report
|
|
68
|
+
model_tier_used: 'cheap', 'capable', or 'premium'
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
Path to the written file
|
|
72
|
+
"""
|
|
73
|
+
findings = findings or []
|
|
74
|
+
|
|
75
|
+
# Build summary if not provided
|
|
76
|
+
if summary is None:
|
|
77
|
+
by_severity: dict[str, int] = {}
|
|
78
|
+
by_category: dict[str, int] = {}
|
|
79
|
+
files_affected: set[str] = set()
|
|
80
|
+
|
|
81
|
+
for f in findings:
|
|
82
|
+
sev = f.get("severity", "info")
|
|
83
|
+
cat = f.get("category", "correctness")
|
|
84
|
+
by_severity[sev] = by_severity.get(sev, 0) + 1
|
|
85
|
+
by_category[cat] = by_category.get(cat, 0) + 1
|
|
86
|
+
if f.get("file"):
|
|
87
|
+
files_affected.add(f["file"])
|
|
88
|
+
|
|
89
|
+
summary = {
|
|
90
|
+
"total_findings": len(findings),
|
|
91
|
+
"by_severity": by_severity,
|
|
92
|
+
"by_category": by_category,
|
|
93
|
+
"files_affected": list(files_affected),
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
result = CodeReviewResult(
|
|
97
|
+
findings=findings,
|
|
98
|
+
summary=summary,
|
|
99
|
+
verdict=verdict,
|
|
100
|
+
security_score=security_score,
|
|
101
|
+
formatted_report=formatted_report,
|
|
102
|
+
model_tier_used=model_tier_used,
|
|
103
|
+
timestamp=datetime.now().isoformat(),
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
output_path = get_empathy_dir() / "code-review-results.json"
|
|
107
|
+
|
|
108
|
+
with open(output_path, "w") as f:
|
|
109
|
+
json.dump(asdict(result), f, indent=2)
|
|
110
|
+
|
|
111
|
+
return output_path
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def write_pr_review_results(
|
|
115
|
+
pr_number: int | str,
|
|
116
|
+
title: str,
|
|
117
|
+
findings: list[dict[str, Any]],
|
|
118
|
+
verdict: str = "approve_with_suggestions",
|
|
119
|
+
summary_text: str = "",
|
|
120
|
+
) -> Path:
|
|
121
|
+
"""Write PR review results for VS Code extension.
|
|
122
|
+
|
|
123
|
+
Convenience wrapper for PR reviews from GitHub.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
pr_number: The PR number
|
|
127
|
+
title: PR title
|
|
128
|
+
findings: List of review findings
|
|
129
|
+
verdict: Review verdict
|
|
130
|
+
summary_text: Summary of the review
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
Path to the written file
|
|
134
|
+
"""
|
|
135
|
+
formatted_report = f"""## PR #{pr_number}: {title}
|
|
136
|
+
|
|
137
|
+
{summary_text}
|
|
138
|
+
|
|
139
|
+
### Findings ({len(findings)})
|
|
140
|
+
|
|
141
|
+
"""
|
|
142
|
+
for f in findings:
|
|
143
|
+
formatted_report += f"- **{f.get('severity', 'info').upper()}** [{f.get('file', 'unknown')}:{f.get('line', 0)}]: {f.get('message', '')}\n"
|
|
144
|
+
|
|
145
|
+
return write_code_review_results(
|
|
146
|
+
findings=findings,
|
|
147
|
+
verdict=verdict,
|
|
148
|
+
formatted_report=formatted_report,
|
|
149
|
+
model_tier_used="capable",
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
# Quick helper for Claude Code to call
|
|
154
|
+
def send_to_vscode(
|
|
155
|
+
message: str,
|
|
156
|
+
findings: list[dict[str, Any]] | None = None,
|
|
157
|
+
verdict: str = "approve_with_suggestions",
|
|
158
|
+
) -> str:
|
|
159
|
+
"""Quick helper to send review results to VS Code.
|
|
160
|
+
|
|
161
|
+
Usage in Claude Code:
|
|
162
|
+
from empathy_os.vscode_bridge import send_to_vscode
|
|
163
|
+
send_to_vscode("Review complete", findings=[...])
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
Confirmation message
|
|
167
|
+
"""
|
|
168
|
+
path = write_code_review_results(
|
|
169
|
+
findings=findings or [],
|
|
170
|
+
formatted_report=message,
|
|
171
|
+
verdict=verdict,
|
|
172
|
+
)
|
|
173
|
+
return f"Results written to {path} - VS Code will update automatically"
|
empathy_os/workflows/__init__.py
CHANGED
|
@@ -66,6 +66,8 @@ if TYPE_CHECKING:
|
|
|
66
66
|
from .test5 import Test5Workflow
|
|
67
67
|
from .test_coverage_boost_crew import TestCoverageBoostCrew, TestCoverageBoostCrewResult
|
|
68
68
|
from .test_gen import TestGenerationWorkflow
|
|
69
|
+
from .test_gen_behavioral import BehavioralTestGenerationWorkflow
|
|
70
|
+
from .test_gen_parallel import ParallelTestGenerationWorkflow
|
|
69
71
|
from .xml_enhanced_crew import XMLAgent, XMLTask
|
|
70
72
|
|
|
71
73
|
# Only import base module eagerly (small, needed for type checks)
|
|
@@ -136,6 +138,8 @@ _LAZY_WORKFLOW_IMPORTS: dict[str, tuple[str, str]] = {
|
|
|
136
138
|
"TestCoverageBoostCrew": (".test_coverage_boost_crew", "TestCoverageBoostCrew"),
|
|
137
139
|
"TestCoverageBoostCrewResult": (".test_coverage_boost_crew", "TestCoverageBoostCrewResult"),
|
|
138
140
|
"TestGenerationWorkflow": (".test_gen", "TestGenerationWorkflow"),
|
|
141
|
+
"BehavioralTestGenerationWorkflow": (".test_gen_behavioral", "BehavioralTestGenerationWorkflow"),
|
|
142
|
+
"ParallelTestGenerationWorkflow": (".test_gen_parallel", "ParallelTestGenerationWorkflow"),
|
|
139
143
|
"XMLAgent": (".xml_enhanced_crew", "XMLAgent"),
|
|
140
144
|
"XMLTask": (".xml_enhanced_crew", "XMLTask"),
|
|
141
145
|
"parse_xml_response": (".xml_enhanced_crew", "parse_xml_response"),
|
|
@@ -213,6 +217,8 @@ _DEFAULT_WORKFLOW_NAMES: dict[str, str] = {
|
|
|
213
217
|
"perf-audit": "PerformanceAuditWorkflow",
|
|
214
218
|
# Generation workflows
|
|
215
219
|
"test-gen": "TestGenerationWorkflow",
|
|
220
|
+
"test-gen-behavioral": "BehavioralTestGenerationWorkflow",
|
|
221
|
+
"test-gen-parallel": "ParallelTestGenerationWorkflow",
|
|
216
222
|
"refactor-plan": "RefactorPlanWorkflow",
|
|
217
223
|
# Operational workflows
|
|
218
224
|
"dependency-check": "DependencyCheckWorkflow",
|
|
@@ -484,6 +490,8 @@ __all__ = [
|
|
|
484
490
|
"SecureReleaseResult",
|
|
485
491
|
"SecurityAuditWorkflow",
|
|
486
492
|
"TestGenerationWorkflow",
|
|
493
|
+
"BehavioralTestGenerationWorkflow",
|
|
494
|
+
"ParallelTestGenerationWorkflow",
|
|
487
495
|
# Configuration
|
|
488
496
|
"WorkflowConfig",
|
|
489
497
|
"WorkflowResult",
|