empathy-framework 5.1.1__py3-none-any.whl → 5.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/METADATA +79 -6
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/RECORD +83 -64
- empathy_os/__init__.py +1 -1
- empathy_os/cache/hybrid.py +5 -1
- empathy_os/cli/commands/batch.py +8 -0
- empathy_os/cli/commands/profiling.py +4 -0
- empathy_os/cli/commands/workflow.py +8 -4
- empathy_os/cli_router.py +9 -0
- empathy_os/config.py +15 -2
- empathy_os/core_modules/__init__.py +15 -0
- empathy_os/dashboard/simple_server.py +62 -30
- empathy_os/mcp/__init__.py +10 -0
- empathy_os/mcp/server.py +506 -0
- empathy_os/memory/control_panel.py +1 -131
- empathy_os/memory/control_panel_support.py +145 -0
- empathy_os/memory/encryption.py +159 -0
- empathy_os/memory/long_term.py +46 -631
- empathy_os/memory/long_term_types.py +99 -0
- empathy_os/memory/mixins/__init__.py +25 -0
- empathy_os/memory/mixins/backend_init_mixin.py +249 -0
- empathy_os/memory/mixins/capabilities_mixin.py +208 -0
- empathy_os/memory/mixins/handoff_mixin.py +208 -0
- empathy_os/memory/mixins/lifecycle_mixin.py +49 -0
- empathy_os/memory/mixins/long_term_mixin.py +352 -0
- empathy_os/memory/mixins/promotion_mixin.py +109 -0
- empathy_os/memory/mixins/short_term_mixin.py +182 -0
- empathy_os/memory/short_term.py +61 -12
- empathy_os/memory/simple_storage.py +302 -0
- empathy_os/memory/storage_backend.py +167 -0
- empathy_os/memory/types.py +8 -3
- empathy_os/memory/unified.py +21 -1120
- empathy_os/meta_workflows/cli_commands/__init__.py +56 -0
- empathy_os/meta_workflows/cli_commands/agent_commands.py +321 -0
- empathy_os/meta_workflows/cli_commands/analytics_commands.py +442 -0
- empathy_os/meta_workflows/cli_commands/config_commands.py +232 -0
- empathy_os/meta_workflows/cli_commands/memory_commands.py +182 -0
- empathy_os/meta_workflows/cli_commands/template_commands.py +354 -0
- empathy_os/meta_workflows/cli_commands/workflow_commands.py +382 -0
- empathy_os/meta_workflows/cli_meta_workflows.py +52 -1802
- empathy_os/models/telemetry/__init__.py +71 -0
- empathy_os/models/telemetry/analytics.py +594 -0
- empathy_os/models/telemetry/backend.py +196 -0
- empathy_os/models/telemetry/data_models.py +431 -0
- empathy_os/models/telemetry/storage.py +489 -0
- empathy_os/orchestration/__init__.py +35 -0
- empathy_os/orchestration/execution_strategies.py +481 -0
- empathy_os/orchestration/meta_orchestrator.py +488 -1
- empathy_os/routing/workflow_registry.py +36 -0
- empathy_os/telemetry/agent_coordination.py +2 -3
- empathy_os/telemetry/agent_tracking.py +26 -7
- empathy_os/telemetry/approval_gates.py +18 -24
- empathy_os/telemetry/cli.py +19 -724
- empathy_os/telemetry/commands/__init__.py +14 -0
- empathy_os/telemetry/commands/dashboard_commands.py +696 -0
- empathy_os/telemetry/event_streaming.py +7 -3
- empathy_os/telemetry/feedback_loop.py +28 -15
- empathy_os/tools.py +183 -0
- empathy_os/workflows/__init__.py +5 -0
- empathy_os/workflows/autonomous_test_gen.py +860 -161
- empathy_os/workflows/base.py +6 -2
- empathy_os/workflows/code_review.py +4 -1
- empathy_os/workflows/document_gen/__init__.py +25 -0
- empathy_os/workflows/document_gen/config.py +30 -0
- empathy_os/workflows/document_gen/report_formatter.py +162 -0
- empathy_os/workflows/{document_gen.py → document_gen/workflow.py} +5 -184
- empathy_os/workflows/output.py +4 -1
- empathy_os/workflows/progress.py +8 -2
- empathy_os/workflows/security_audit.py +2 -2
- empathy_os/workflows/security_audit_phase3.py +7 -4
- empathy_os/workflows/seo_optimization.py +633 -0
- empathy_os/workflows/test_gen/__init__.py +52 -0
- empathy_os/workflows/test_gen/ast_analyzer.py +249 -0
- empathy_os/workflows/test_gen/config.py +88 -0
- empathy_os/workflows/test_gen/data_models.py +38 -0
- empathy_os/workflows/test_gen/report_formatter.py +289 -0
- empathy_os/workflows/test_gen/test_templates.py +381 -0
- empathy_os/workflows/test_gen/workflow.py +655 -0
- empathy_os/workflows/test_gen.py +42 -1905
- empathy_os/cli/parsers/cache 2.py +0 -65
- empathy_os/cli_router 2.py +0 -416
- empathy_os/dashboard/app 2.py +0 -512
- empathy_os/dashboard/simple_server 2.py +0 -403
- empathy_os/dashboard/standalone_server 2.py +0 -536
- empathy_os/memory/types 2.py +0 -441
- empathy_os/models/adaptive_routing 2.py +0 -437
- empathy_os/models/telemetry.py +0 -1660
- empathy_os/project_index/scanner_parallel 2.py +0 -291
- empathy_os/telemetry/agent_coordination 2.py +0 -478
- empathy_os/telemetry/agent_tracking 2.py +0 -350
- empathy_os/telemetry/approval_gates 2.py +0 -563
- empathy_os/telemetry/event_streaming 2.py +0 -405
- empathy_os/telemetry/feedback_loop 2.py +0 -557
- empathy_os/vscode_bridge 2.py +0 -173
- empathy_os/workflows/progressive/__init__ 2.py +0 -92
- empathy_os/workflows/progressive/cli 2.py +0 -242
- empathy_os/workflows/progressive/core 2.py +0 -488
- empathy_os/workflows/progressive/orchestrator 2.py +0 -701
- empathy_os/workflows/progressive/reports 2.py +0 -528
- empathy_os/workflows/progressive/telemetry 2.py +0 -280
- empathy_os/workflows/progressive/test_gen 2.py +0 -514
- empathy_os/workflows/progressive/workflow 2.py +0 -628
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/WHEEL +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
- {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/top_level.txt +0 -0
empathy_os/mcp/server.py
ADDED
|
@@ -0,0 +1,506 @@
|
|
|
1
|
+
"""Empathy Framework MCP Server Implementation.
|
|
2
|
+
|
|
3
|
+
Exposes Empathy workflows as MCP tools for Claude Code integration.
|
|
4
|
+
"""
|
|
5
|
+
import asyncio
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
import sys
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
# MCP server will be implemented using stdio transport
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class EmpathyMCPServer:
|
|
16
|
+
"""MCP server for Empathy Framework workflows.
|
|
17
|
+
|
|
18
|
+
Exposes workflows, agent dashboard, and telemetry as MCP tools
|
|
19
|
+
that can be invoked from Claude Code.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(self):
|
|
23
|
+
"""Initialize the MCP server."""
|
|
24
|
+
self.tools = self._register_tools()
|
|
25
|
+
self.resources = self._register_resources()
|
|
26
|
+
|
|
27
|
+
def _register_tools(self) -> dict[str, dict[str, Any]]:
|
|
28
|
+
"""Register available MCP tools.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
Dictionary of tool definitions
|
|
32
|
+
"""
|
|
33
|
+
return {
|
|
34
|
+
"security_audit": {
|
|
35
|
+
"name": "security_audit",
|
|
36
|
+
"description": "Run security audit workflow on codebase. Detects vulnerabilities, dangerous patterns, and security issues. Returns findings with severity levels.",
|
|
37
|
+
"input_schema": {
|
|
38
|
+
"type": "object",
|
|
39
|
+
"properties": {
|
|
40
|
+
"path": {
|
|
41
|
+
"type": "string",
|
|
42
|
+
"description": "Path to directory or file to audit"
|
|
43
|
+
}
|
|
44
|
+
},
|
|
45
|
+
"required": ["path"]
|
|
46
|
+
}
|
|
47
|
+
},
|
|
48
|
+
"bug_predict": {
|
|
49
|
+
"name": "bug_predict",
|
|
50
|
+
"description": "Run bug prediction workflow. Analyzes code patterns and predicts potential bugs before they occur.",
|
|
51
|
+
"input_schema": {
|
|
52
|
+
"type": "object",
|
|
53
|
+
"properties": {
|
|
54
|
+
"path": {
|
|
55
|
+
"type": "string",
|
|
56
|
+
"description": "Path to directory or file to analyze"
|
|
57
|
+
}
|
|
58
|
+
},
|
|
59
|
+
"required": ["path"]
|
|
60
|
+
}
|
|
61
|
+
},
|
|
62
|
+
"code_review": {
|
|
63
|
+
"name": "code_review",
|
|
64
|
+
"description": "Run code review workflow. Provides comprehensive code quality analysis with suggestions for improvement.",
|
|
65
|
+
"input_schema": {
|
|
66
|
+
"type": "object",
|
|
67
|
+
"properties": {
|
|
68
|
+
"path": {
|
|
69
|
+
"type": "string",
|
|
70
|
+
"description": "Path to directory or file to review"
|
|
71
|
+
}
|
|
72
|
+
},
|
|
73
|
+
"required": ["path"]
|
|
74
|
+
}
|
|
75
|
+
},
|
|
76
|
+
"test_generation": {
|
|
77
|
+
"name": "test_generation",
|
|
78
|
+
"description": "Generate tests for code. Can batch generate tests for multiple modules in parallel.",
|
|
79
|
+
"input_schema": {
|
|
80
|
+
"type": "object",
|
|
81
|
+
"properties": {
|
|
82
|
+
"module": {
|
|
83
|
+
"type": "string",
|
|
84
|
+
"description": "Path to Python module"
|
|
85
|
+
},
|
|
86
|
+
"batch": {
|
|
87
|
+
"type": "boolean",
|
|
88
|
+
"description": "Enable batch mode for parallel generation",
|
|
89
|
+
"default": False
|
|
90
|
+
}
|
|
91
|
+
},
|
|
92
|
+
"required": ["module"]
|
|
93
|
+
}
|
|
94
|
+
},
|
|
95
|
+
"performance_audit": {
|
|
96
|
+
"name": "performance_audit",
|
|
97
|
+
"description": "Run performance audit workflow. Identifies bottlenecks, memory leaks, and optimization opportunities.",
|
|
98
|
+
"input_schema": {
|
|
99
|
+
"type": "object",
|
|
100
|
+
"properties": {
|
|
101
|
+
"path": {
|
|
102
|
+
"type": "string",
|
|
103
|
+
"description": "Path to directory or file to audit"
|
|
104
|
+
}
|
|
105
|
+
},
|
|
106
|
+
"required": ["path"]
|
|
107
|
+
}
|
|
108
|
+
},
|
|
109
|
+
"release_prep": {
|
|
110
|
+
"name": "release_prep",
|
|
111
|
+
"description": "Run release preparation workflow. Checks health, security, changelog, and provides release recommendation.",
|
|
112
|
+
"input_schema": {
|
|
113
|
+
"type": "object",
|
|
114
|
+
"properties": {
|
|
115
|
+
"path": {
|
|
116
|
+
"type": "string",
|
|
117
|
+
"description": "Path to project root",
|
|
118
|
+
"default": "."
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
},
|
|
123
|
+
"auth_status": {
|
|
124
|
+
"name": "auth_status",
|
|
125
|
+
"description": "Get authentication strategy status. Shows current configuration, subscription tier, and default mode.",
|
|
126
|
+
"input_schema": {
|
|
127
|
+
"type": "object",
|
|
128
|
+
"properties": {}
|
|
129
|
+
}
|
|
130
|
+
},
|
|
131
|
+
"auth_recommend": {
|
|
132
|
+
"name": "auth_recommend",
|
|
133
|
+
"description": "Get authentication recommendation for a file. Analyzes LOC and suggests optimal auth mode.",
|
|
134
|
+
"input_schema": {
|
|
135
|
+
"type": "object",
|
|
136
|
+
"properties": {
|
|
137
|
+
"file_path": {
|
|
138
|
+
"type": "string",
|
|
139
|
+
"description": "Path to file to analyze"
|
|
140
|
+
}
|
|
141
|
+
},
|
|
142
|
+
"required": ["file_path"]
|
|
143
|
+
}
|
|
144
|
+
},
|
|
145
|
+
"telemetry_stats": {
|
|
146
|
+
"name": "telemetry_stats",
|
|
147
|
+
"description": "Get telemetry statistics. Shows cost savings, cache hit rates, and workflow performance.",
|
|
148
|
+
"input_schema": {
|
|
149
|
+
"type": "object",
|
|
150
|
+
"properties": {
|
|
151
|
+
"days": {
|
|
152
|
+
"type": "integer",
|
|
153
|
+
"description": "Number of days to analyze",
|
|
154
|
+
"default": 30
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
},
|
|
159
|
+
"dashboard_status": {
|
|
160
|
+
"name": "dashboard_status",
|
|
161
|
+
"description": "Get agent coordination dashboard status. Shows active agents, pending approvals, recent signals.",
|
|
162
|
+
"input_schema": {
|
|
163
|
+
"type": "object",
|
|
164
|
+
"properties": {}
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
def _register_resources(self) -> dict[str, dict[str, Any]]:
|
|
170
|
+
"""Register available MCP resources.
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
Dictionary of resource definitions
|
|
174
|
+
"""
|
|
175
|
+
return {
|
|
176
|
+
"workflows": {
|
|
177
|
+
"uri": "empathy://workflows",
|
|
178
|
+
"name": "Available Workflows",
|
|
179
|
+
"description": "List of all available Empathy workflows",
|
|
180
|
+
"mime_type": "application/json"
|
|
181
|
+
},
|
|
182
|
+
"auth_config": {
|
|
183
|
+
"uri": "empathy://auth/config",
|
|
184
|
+
"name": "Authentication Configuration",
|
|
185
|
+
"description": "Current authentication strategy configuration",
|
|
186
|
+
"mime_type": "application/json"
|
|
187
|
+
},
|
|
188
|
+
"telemetry": {
|
|
189
|
+
"uri": "empathy://telemetry",
|
|
190
|
+
"name": "Telemetry Data",
|
|
191
|
+
"description": "Cost tracking and performance metrics",
|
|
192
|
+
"mime_type": "application/json"
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
async def call_tool(self, tool_name: str, arguments: dict[str, Any]) -> dict[str, Any]:
|
|
197
|
+
"""Execute a tool call.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
tool_name: Name of the tool to execute
|
|
201
|
+
arguments: Tool arguments
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
Tool execution result
|
|
205
|
+
"""
|
|
206
|
+
try:
|
|
207
|
+
if tool_name == "security_audit":
|
|
208
|
+
return await self._run_security_audit(arguments)
|
|
209
|
+
elif tool_name == "bug_predict":
|
|
210
|
+
return await self._run_bug_predict(arguments)
|
|
211
|
+
elif tool_name == "code_review":
|
|
212
|
+
return await self._run_code_review(arguments)
|
|
213
|
+
elif tool_name == "test_generation":
|
|
214
|
+
return await self._run_test_generation(arguments)
|
|
215
|
+
elif tool_name == "performance_audit":
|
|
216
|
+
return await self._run_performance_audit(arguments)
|
|
217
|
+
elif tool_name == "release_prep":
|
|
218
|
+
return await self._run_release_prep(arguments)
|
|
219
|
+
elif tool_name == "auth_status":
|
|
220
|
+
return await self._get_auth_status()
|
|
221
|
+
elif tool_name == "auth_recommend":
|
|
222
|
+
return await self._get_auth_recommend(arguments)
|
|
223
|
+
elif tool_name == "telemetry_stats":
|
|
224
|
+
return await self._get_telemetry_stats(arguments)
|
|
225
|
+
elif tool_name == "dashboard_status":
|
|
226
|
+
return await self._get_dashboard_status()
|
|
227
|
+
else:
|
|
228
|
+
return {
|
|
229
|
+
"success": False,
|
|
230
|
+
"error": f"Unknown tool: {tool_name}"
|
|
231
|
+
}
|
|
232
|
+
except Exception as e:
|
|
233
|
+
logger.exception(f"Tool execution failed: {tool_name}")
|
|
234
|
+
return {
|
|
235
|
+
"success": False,
|
|
236
|
+
"error": str(e)
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
async def _run_security_audit(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
240
|
+
"""Run security audit workflow."""
|
|
241
|
+
from empathy_os.workflows.security_audit import SecurityAuditWorkflow
|
|
242
|
+
|
|
243
|
+
workflow = SecurityAuditWorkflow()
|
|
244
|
+
result = await workflow.execute(path=args["path"])
|
|
245
|
+
|
|
246
|
+
return {
|
|
247
|
+
"success": result.success,
|
|
248
|
+
"score": result.final_output.get("health_score"),
|
|
249
|
+
"findings": result.final_output.get("findings", []),
|
|
250
|
+
"cost": result.cost_report.total_cost,
|
|
251
|
+
"provider": result.provider
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
async def _run_bug_predict(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
255
|
+
"""Run bug prediction workflow."""
|
|
256
|
+
from empathy_os.workflows.bug_predict import BugPredictWorkflow
|
|
257
|
+
|
|
258
|
+
workflow = BugPredictWorkflow()
|
|
259
|
+
result = await workflow.execute(path=args["path"])
|
|
260
|
+
|
|
261
|
+
return {
|
|
262
|
+
"success": result.success,
|
|
263
|
+
"predictions": result.final_output.get("predictions", []),
|
|
264
|
+
"cost": result.cost_report.total_cost
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
async def _run_code_review(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
268
|
+
"""Run code review workflow."""
|
|
269
|
+
from empathy_os.workflows.code_review import CodeReviewWorkflow
|
|
270
|
+
|
|
271
|
+
workflow = CodeReviewWorkflow()
|
|
272
|
+
result = await workflow.execute(target_path=args["path"])
|
|
273
|
+
|
|
274
|
+
return {
|
|
275
|
+
"success": result.success,
|
|
276
|
+
"feedback": result.final_output.get("feedback"),
|
|
277
|
+
"score": result.final_output.get("quality_score"),
|
|
278
|
+
"cost": result.cost_report.total_cost
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
async def _run_test_generation(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
282
|
+
"""Run test generation workflow."""
|
|
283
|
+
from empathy_os.workflows.test_gen import TestGenerationWorkflow
|
|
284
|
+
|
|
285
|
+
workflow = TestGenerationWorkflow()
|
|
286
|
+
result = await workflow.execute(module_path=args["module"])
|
|
287
|
+
|
|
288
|
+
return {
|
|
289
|
+
"success": result.success,
|
|
290
|
+
"tests_generated": result.final_output.get("tests_generated", 0),
|
|
291
|
+
"output_path": result.final_output.get("output_path"),
|
|
292
|
+
"cost": result.cost_report.total_cost
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
async def _run_performance_audit(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
296
|
+
"""Run performance audit workflow."""
|
|
297
|
+
from empathy_os.workflows.perf_audit import PerformanceAuditWorkflow
|
|
298
|
+
|
|
299
|
+
workflow = PerformanceAuditWorkflow()
|
|
300
|
+
result = await workflow.execute(path=args["path"])
|
|
301
|
+
|
|
302
|
+
return {
|
|
303
|
+
"success": result.success,
|
|
304
|
+
"findings": result.final_output.get("findings", []),
|
|
305
|
+
"score": result.final_output.get("score"),
|
|
306
|
+
"cost": result.cost_report.total_cost
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
async def _run_release_prep(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
310
|
+
"""Run release preparation workflow."""
|
|
311
|
+
from empathy_os.workflows.release_prep import ReleasePreparationWorkflow
|
|
312
|
+
|
|
313
|
+
workflow = ReleasePreparationWorkflow(skip_approve_if_clean=True)
|
|
314
|
+
result = await workflow.execute(path=args.get("path", "."))
|
|
315
|
+
|
|
316
|
+
return {
|
|
317
|
+
"success": result.success,
|
|
318
|
+
"approved": result.final_output.get("approved"),
|
|
319
|
+
"health_score": result.final_output.get("health_score"),
|
|
320
|
+
"recommendation": result.final_output.get("recommendation"),
|
|
321
|
+
"cost": result.cost_report.total_cost
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
async def _get_auth_status(self) -> dict[str, Any]:
|
|
325
|
+
"""Get authentication strategy status."""
|
|
326
|
+
from empathy_os.models import AuthStrategy
|
|
327
|
+
|
|
328
|
+
strategy = AuthStrategy.load()
|
|
329
|
+
|
|
330
|
+
return {
|
|
331
|
+
"success": True,
|
|
332
|
+
"subscription_tier": strategy.subscription_tier.value,
|
|
333
|
+
"default_mode": strategy.default_mode.value,
|
|
334
|
+
"setup_completed": strategy.setup_completed
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
async def _get_auth_recommend(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
338
|
+
"""Get authentication recommendation."""
|
|
339
|
+
from pathlib import Path
|
|
340
|
+
|
|
341
|
+
from empathy_os.models import (
|
|
342
|
+
count_lines_of_code,
|
|
343
|
+
get_auth_strategy,
|
|
344
|
+
get_module_size_category,
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
file_path = Path(args["file_path"])
|
|
348
|
+
lines = count_lines_of_code(file_path)
|
|
349
|
+
category = get_module_size_category(lines)
|
|
350
|
+
|
|
351
|
+
strategy = get_auth_strategy()
|
|
352
|
+
recommended = strategy.get_recommended_mode(lines)
|
|
353
|
+
|
|
354
|
+
return {
|
|
355
|
+
"success": True,
|
|
356
|
+
"file_path": str(file_path),
|
|
357
|
+
"lines_of_code": lines,
|
|
358
|
+
"category": category,
|
|
359
|
+
"recommended_mode": recommended.value
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
async def _get_telemetry_stats(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
363
|
+
"""Get telemetry statistics."""
|
|
364
|
+
# Placeholder - would integrate with actual telemetry system
|
|
365
|
+
return {
|
|
366
|
+
"success": True,
|
|
367
|
+
"days": args.get("days", 30),
|
|
368
|
+
"total_cost": 0.0,
|
|
369
|
+
"savings": 0.0,
|
|
370
|
+
"cache_hit_rate": 0.0
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
async def _get_dashboard_status(self) -> dict[str, Any]:
|
|
374
|
+
"""Get dashboard status."""
|
|
375
|
+
# Placeholder - would integrate with actual dashboard
|
|
376
|
+
return {
|
|
377
|
+
"success": True,
|
|
378
|
+
"active_agents": 0,
|
|
379
|
+
"pending_approvals": 0,
|
|
380
|
+
"recent_signals": 0
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
def get_tool_list(self) -> list[dict[str, Any]]:
|
|
384
|
+
"""Get list of available tools.
|
|
385
|
+
|
|
386
|
+
Returns:
|
|
387
|
+
List of tool definitions
|
|
388
|
+
"""
|
|
389
|
+
return list(self.tools.values())
|
|
390
|
+
|
|
391
|
+
def get_resource_list(self) -> list[dict[str, Any]]:
|
|
392
|
+
"""Get list of available resources.
|
|
393
|
+
|
|
394
|
+
Returns:
|
|
395
|
+
List of resource definitions
|
|
396
|
+
"""
|
|
397
|
+
return list(self.resources.values())
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
async def handle_request(server: EmpathyMCPServer, request: dict[str, Any]) -> dict[str, Any]:
|
|
401
|
+
"""Handle an MCP request.
|
|
402
|
+
|
|
403
|
+
Args:
|
|
404
|
+
server: MCP server instance
|
|
405
|
+
request: MCP request
|
|
406
|
+
|
|
407
|
+
Returns:
|
|
408
|
+
MCP response
|
|
409
|
+
"""
|
|
410
|
+
method = request.get("method")
|
|
411
|
+
params = request.get("params", {})
|
|
412
|
+
|
|
413
|
+
if method == "tools/list":
|
|
414
|
+
return {
|
|
415
|
+
"tools": server.get_tool_list()
|
|
416
|
+
}
|
|
417
|
+
elif method == "tools/call":
|
|
418
|
+
tool_name = params.get("name")
|
|
419
|
+
arguments = params.get("arguments", {})
|
|
420
|
+
result = await server.call_tool(tool_name, arguments)
|
|
421
|
+
return {
|
|
422
|
+
"content": [
|
|
423
|
+
{
|
|
424
|
+
"type": "text",
|
|
425
|
+
"text": json.dumps(result, indent=2)
|
|
426
|
+
}
|
|
427
|
+
]
|
|
428
|
+
}
|
|
429
|
+
elif method == "resources/list":
|
|
430
|
+
return {
|
|
431
|
+
"resources": server.get_resource_list()
|
|
432
|
+
}
|
|
433
|
+
else:
|
|
434
|
+
return {
|
|
435
|
+
"error": {
|
|
436
|
+
"code": -32601,
|
|
437
|
+
"message": f"Method not found: {method}"
|
|
438
|
+
}
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
async def main_loop():
|
|
443
|
+
"""Main MCP server loop using stdio transport."""
|
|
444
|
+
server = EmpathyMCPServer()
|
|
445
|
+
|
|
446
|
+
logger.info("Empathy MCP Server started")
|
|
447
|
+
logger.info(f"Registered {len(server.tools)} tools")
|
|
448
|
+
|
|
449
|
+
while True:
|
|
450
|
+
try:
|
|
451
|
+
# Read request from stdin (JSON-RPC format)
|
|
452
|
+
line = await asyncio.get_event_loop().run_in_executor(None, sys.stdin.readline)
|
|
453
|
+
if not line:
|
|
454
|
+
break
|
|
455
|
+
|
|
456
|
+
request = json.loads(line)
|
|
457
|
+
response = await handle_request(server, request)
|
|
458
|
+
|
|
459
|
+
# Write response to stdout
|
|
460
|
+
print(json.dumps(response), flush=True)
|
|
461
|
+
|
|
462
|
+
except json.JSONDecodeError as e:
|
|
463
|
+
logger.error(f"Invalid JSON: {e}")
|
|
464
|
+
error_response = {
|
|
465
|
+
"error": {
|
|
466
|
+
"code": -32700,
|
|
467
|
+
"message": "Parse error"
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
print(json.dumps(error_response), flush=True)
|
|
471
|
+
except Exception as e:
|
|
472
|
+
logger.exception("Error handling request")
|
|
473
|
+
error_response = {
|
|
474
|
+
"error": {
|
|
475
|
+
"code": -32603,
|
|
476
|
+
"message": str(e)
|
|
477
|
+
}
|
|
478
|
+
}
|
|
479
|
+
print(json.dumps(error_response), flush=True)
|
|
480
|
+
|
|
481
|
+
|
|
482
|
+
def create_server() -> EmpathyMCPServer:
|
|
483
|
+
"""Create and return an Empathy MCP server instance.
|
|
484
|
+
|
|
485
|
+
Returns:
|
|
486
|
+
Configured MCP server
|
|
487
|
+
"""
|
|
488
|
+
return EmpathyMCPServer()
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
def main():
|
|
492
|
+
"""Entry point for MCP server."""
|
|
493
|
+
logging.basicConfig(
|
|
494
|
+
level=logging.INFO,
|
|
495
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
496
|
+
handlers=[logging.FileHandler('/tmp/empathy-mcp.log')]
|
|
497
|
+
)
|
|
498
|
+
|
|
499
|
+
try:
|
|
500
|
+
asyncio.run(main_loop())
|
|
501
|
+
except KeyboardInterrupt:
|
|
502
|
+
logger.info("Empathy MCP Server stopped")
|
|
503
|
+
|
|
504
|
+
|
|
505
|
+
if __name__ == "__main__":
|
|
506
|
+
main()
|
|
@@ -29,17 +29,14 @@ Licensed under Fair Source 0.9
|
|
|
29
29
|
"""
|
|
30
30
|
|
|
31
31
|
import argparse
|
|
32
|
-
import hashlib
|
|
33
32
|
import json
|
|
34
33
|
import logging
|
|
35
|
-
import os
|
|
36
34
|
import re
|
|
37
35
|
import signal
|
|
38
36
|
import ssl
|
|
39
37
|
import sys
|
|
40
38
|
import time
|
|
41
39
|
import warnings
|
|
42
|
-
from collections import defaultdict
|
|
43
40
|
from dataclasses import asdict, dataclass
|
|
44
41
|
from datetime import datetime
|
|
45
42
|
from http.server import BaseHTTPRequestHandler, HTTPServer
|
|
@@ -49,6 +46,7 @@ from urllib.parse import parse_qs, urlparse
|
|
|
49
46
|
|
|
50
47
|
import structlog
|
|
51
48
|
|
|
49
|
+
from .control_panel_support import APIKeyAuth, MemoryStats, RateLimiter
|
|
52
50
|
from .long_term import Classification, SecureMemDocsIntegration
|
|
53
51
|
from .redis_bootstrap import (
|
|
54
52
|
RedisStartMethod,
|
|
@@ -198,134 +196,6 @@ def _validate_file_path(path: str, allowed_dir: str | None = None) -> Path:
|
|
|
198
196
|
return resolved
|
|
199
197
|
|
|
200
198
|
|
|
201
|
-
class RateLimiter:
|
|
202
|
-
"""Simple in-memory rate limiter by IP address."""
|
|
203
|
-
|
|
204
|
-
def __init__(self, window_seconds: int = 60, max_requests: int = 100):
|
|
205
|
-
"""Initialize rate limiter.
|
|
206
|
-
|
|
207
|
-
Args:
|
|
208
|
-
window_seconds: Time window in seconds
|
|
209
|
-
max_requests: Maximum requests allowed per window
|
|
210
|
-
|
|
211
|
-
Raises:
|
|
212
|
-
ValueError: If window_seconds or max_requests is invalid
|
|
213
|
-
|
|
214
|
-
"""
|
|
215
|
-
if window_seconds < 1:
|
|
216
|
-
raise ValueError(f"window_seconds must be positive, got {window_seconds}")
|
|
217
|
-
|
|
218
|
-
if max_requests < 1:
|
|
219
|
-
raise ValueError(f"max_requests must be positive, got {max_requests}")
|
|
220
|
-
|
|
221
|
-
self.window_seconds = window_seconds
|
|
222
|
-
self.max_requests = max_requests
|
|
223
|
-
self._requests: dict[str, list[float]] = defaultdict(list)
|
|
224
|
-
|
|
225
|
-
def is_allowed(self, client_ip: str) -> bool:
|
|
226
|
-
"""Check if request is allowed for this IP.
|
|
227
|
-
|
|
228
|
-
Args:
|
|
229
|
-
client_ip: The client IP address
|
|
230
|
-
|
|
231
|
-
Returns:
|
|
232
|
-
True if allowed, False if rate limited
|
|
233
|
-
|
|
234
|
-
"""
|
|
235
|
-
now = time.time()
|
|
236
|
-
window_start = now - self.window_seconds
|
|
237
|
-
|
|
238
|
-
# Clean old entries
|
|
239
|
-
self._requests[client_ip] = [ts for ts in self._requests[client_ip] if ts > window_start]
|
|
240
|
-
|
|
241
|
-
# Check if over limit
|
|
242
|
-
if len(self._requests[client_ip]) >= self.max_requests:
|
|
243
|
-
logger.warning("rate_limit_exceeded", client_ip=client_ip)
|
|
244
|
-
return False
|
|
245
|
-
|
|
246
|
-
# Record this request
|
|
247
|
-
self._requests[client_ip].append(now)
|
|
248
|
-
return True
|
|
249
|
-
|
|
250
|
-
def get_remaining(self, client_ip: str) -> int:
|
|
251
|
-
"""Get remaining requests for this IP."""
|
|
252
|
-
now = time.time()
|
|
253
|
-
window_start = now - self.window_seconds
|
|
254
|
-
recent = [ts for ts in self._requests[client_ip] if ts > window_start]
|
|
255
|
-
return max(0, self.max_requests - len(recent))
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
class APIKeyAuth:
|
|
259
|
-
"""Simple API key authentication."""
|
|
260
|
-
|
|
261
|
-
def __init__(self, api_key: str | None = None):
|
|
262
|
-
"""Initialize API key auth.
|
|
263
|
-
|
|
264
|
-
Args:
|
|
265
|
-
api_key: The API key to require. If None, reads from
|
|
266
|
-
EMPATHY_MEMORY_API_KEY env var. If still None, auth is disabled.
|
|
267
|
-
|
|
268
|
-
"""
|
|
269
|
-
self.api_key = api_key or os.environ.get("EMPATHY_MEMORY_API_KEY")
|
|
270
|
-
self.enabled = bool(self.api_key)
|
|
271
|
-
self._key_hash: str | None = None
|
|
272
|
-
if self.enabled and self.api_key:
|
|
273
|
-
# Store hash of API key for comparison
|
|
274
|
-
self._key_hash = hashlib.sha256(self.api_key.encode()).hexdigest()
|
|
275
|
-
logger.info("api_key_auth_enabled")
|
|
276
|
-
else:
|
|
277
|
-
logger.info("api_key_auth_disabled", reason="no_key_configured")
|
|
278
|
-
|
|
279
|
-
def is_valid(self, provided_key: str | None) -> bool:
|
|
280
|
-
"""Check if provided API key is valid.
|
|
281
|
-
|
|
282
|
-
Args:
|
|
283
|
-
provided_key: The key provided in the request
|
|
284
|
-
|
|
285
|
-
Returns:
|
|
286
|
-
True if valid or auth disabled, False otherwise
|
|
287
|
-
|
|
288
|
-
"""
|
|
289
|
-
if not self.enabled:
|
|
290
|
-
return True
|
|
291
|
-
|
|
292
|
-
if not provided_key:
|
|
293
|
-
return False
|
|
294
|
-
|
|
295
|
-
# Constant-time comparison via hash
|
|
296
|
-
provided_hash = hashlib.sha256(provided_key.encode()).hexdigest()
|
|
297
|
-
return provided_hash == self._key_hash
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
@dataclass
|
|
301
|
-
class MemoryStats:
|
|
302
|
-
"""Statistics for memory system."""
|
|
303
|
-
|
|
304
|
-
# Redis stats
|
|
305
|
-
redis_available: bool = False
|
|
306
|
-
redis_method: str = "none"
|
|
307
|
-
redis_keys_total: int = 0
|
|
308
|
-
redis_keys_working: int = 0
|
|
309
|
-
redis_keys_staged: int = 0
|
|
310
|
-
redis_memory_used: str = "0"
|
|
311
|
-
|
|
312
|
-
# Long-term stats
|
|
313
|
-
long_term_available: bool = False
|
|
314
|
-
patterns_total: int = 0
|
|
315
|
-
patterns_public: int = 0
|
|
316
|
-
patterns_internal: int = 0
|
|
317
|
-
patterns_sensitive: int = 0
|
|
318
|
-
patterns_encrypted: int = 0
|
|
319
|
-
|
|
320
|
-
# Performance stats
|
|
321
|
-
redis_ping_ms: float = 0.0
|
|
322
|
-
storage_bytes: int = 0
|
|
323
|
-
collection_time_ms: float = 0.0
|
|
324
|
-
|
|
325
|
-
# Timestamps
|
|
326
|
-
collected_at: str = ""
|
|
327
|
-
|
|
328
|
-
|
|
329
199
|
@dataclass
|
|
330
200
|
class ControlPanelConfig:
|
|
331
201
|
"""Configuration for control panel."""
|