codeshield-ai 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- codeshield/__init__.py +62 -0
- codeshield/api_server.py +438 -0
- codeshield/cli.py +48 -0
- codeshield/contextvault/__init__.py +1 -0
- codeshield/contextvault/capture.py +174 -0
- codeshield/contextvault/restore.py +115 -0
- codeshield/mcp/__init__.py +1 -0
- codeshield/mcp/hooks.py +65 -0
- codeshield/mcp/server.py +319 -0
- codeshield/styleforge/__init__.py +1 -0
- codeshield/styleforge/corrector.py +298 -0
- codeshield/trustgate/__init__.py +1 -0
- codeshield/trustgate/checker.py +384 -0
- codeshield/trustgate/sandbox.py +101 -0
- codeshield/utils/__init__.py +9 -0
- codeshield/utils/daytona.py +233 -0
- codeshield/utils/leanmcp.py +258 -0
- codeshield/utils/llm.py +423 -0
- codeshield/utils/metrics.py +543 -0
- codeshield/utils/token_optimizer.py +605 -0
- codeshield_ai-0.1.0.dist-info/METADATA +565 -0
- codeshield_ai-0.1.0.dist-info/RECORD +24 -0
- codeshield_ai-0.1.0.dist-info/WHEEL +4 -0
- codeshield_ai-0.1.0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
"""
|
|
2
|
+
TrustGate Sandbox - Daytona integration for code execution
|
|
3
|
+
|
|
4
|
+
Executes verified code in secure sandbox to confirm it actually runs.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Optional
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
|
|
10
|
+
from codeshield.utils.daytona import get_daytona_client, ExecutionResult
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class SandboxVerification:
|
|
15
|
+
"""Result of sandbox verification"""
|
|
16
|
+
executed: bool
|
|
17
|
+
runs_successfully: bool
|
|
18
|
+
output: str
|
|
19
|
+
error: Optional[str] = None
|
|
20
|
+
execution_time_ms: int = 0
|
|
21
|
+
|
|
22
|
+
def to_dict(self) -> dict:
|
|
23
|
+
return {
|
|
24
|
+
"executed": self.executed,
|
|
25
|
+
"runs_successfully": self.runs_successfully,
|
|
26
|
+
"output": self.output[:500] if self.output else "", # Truncate long output
|
|
27
|
+
"error": self.error,
|
|
28
|
+
"execution_time_ms": self.execution_time_ms,
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def verify_in_sandbox(
|
|
33
|
+
code: str,
|
|
34
|
+
timeout_seconds: int = 10,
|
|
35
|
+
) -> SandboxVerification:
|
|
36
|
+
"""
|
|
37
|
+
Verify code runs successfully in sandbox.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
code: Python code to verify
|
|
41
|
+
timeout_seconds: Maximum execution time
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
SandboxVerification with execution details
|
|
45
|
+
"""
|
|
46
|
+
client = get_daytona_client()
|
|
47
|
+
|
|
48
|
+
# Execute code
|
|
49
|
+
result = client.execute_code(
|
|
50
|
+
code=code,
|
|
51
|
+
language="python",
|
|
52
|
+
timeout_seconds=timeout_seconds,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
return SandboxVerification(
|
|
56
|
+
executed=True,
|
|
57
|
+
runs_successfully=result.success,
|
|
58
|
+
output=result.stdout,
|
|
59
|
+
error=result.stderr if not result.success else None,
|
|
60
|
+
execution_time_ms=result.execution_time_ms,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def full_verification(code: str) -> dict:
|
|
65
|
+
"""
|
|
66
|
+
Complete verification: syntax + imports + execution.
|
|
67
|
+
|
|
68
|
+
Returns comprehensive verification report.
|
|
69
|
+
"""
|
|
70
|
+
from codeshield.trustgate.checker import verify_code
|
|
71
|
+
|
|
72
|
+
# Step 1: Static analysis
|
|
73
|
+
static_result = verify_code(code, auto_fix=True)
|
|
74
|
+
|
|
75
|
+
# Use fixed code if available
|
|
76
|
+
code_to_run = static_result.fixed_code or code
|
|
77
|
+
|
|
78
|
+
# Step 2: Sandbox execution (only if static checks pass)
|
|
79
|
+
sandbox_result = None
|
|
80
|
+
if static_result.is_valid or static_result.fixed_code:
|
|
81
|
+
sandbox_result = verify_in_sandbox(code_to_run)
|
|
82
|
+
|
|
83
|
+
# Build comprehensive report
|
|
84
|
+
report = {
|
|
85
|
+
"overall_valid": (
|
|
86
|
+
static_result.is_valid and
|
|
87
|
+
(sandbox_result is None or sandbox_result.runs_successfully)
|
|
88
|
+
),
|
|
89
|
+
"static_analysis": static_result.to_dict(),
|
|
90
|
+
"sandbox_execution": sandbox_result.to_dict() if sandbox_result else None,
|
|
91
|
+
"confidence_score": static_result.confidence_score,
|
|
92
|
+
"fixed_code": static_result.fixed_code,
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
# Adjust confidence based on sandbox result
|
|
96
|
+
if sandbox_result and sandbox_result.runs_successfully:
|
|
97
|
+
report["confidence_score"] = min(1.0, report["confidence_score"] + 0.1)
|
|
98
|
+
elif sandbox_result and not sandbox_result.runs_successfully:
|
|
99
|
+
report["confidence_score"] = max(0.0, report["confidence_score"] - 0.2)
|
|
100
|
+
|
|
101
|
+
return report
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"""Utility modules"""
|
|
2
|
+
|
|
3
|
+
from codeshield.utils.llm import LLMClient, LLMResponse, get_llm_client, get_provider_stats
|
|
4
|
+
from codeshield.utils.leanmcp import LeanMCPClient, get_leanmcp_client, track_mcp_call
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
"LLMClient", "LLMResponse", "get_llm_client", "get_provider_stats",
|
|
8
|
+
"LeanMCPClient", "get_leanmcp_client", "track_mcp_call"
|
|
9
|
+
]
|
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Daytona SDK Wrapper - Sandboxed code execution
|
|
3
|
+
|
|
4
|
+
Uses Daytona's secure sandbox environment to:
|
|
5
|
+
- Execute untrusted/AI-generated code safely
|
|
6
|
+
- Verify code actually runs without side effects
|
|
7
|
+
- Capture execution results and errors
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import os
|
|
11
|
+
from typing import Optional
|
|
12
|
+
from dataclasses import dataclass
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class ExecutionResult:
|
|
17
|
+
"""Result of code execution in sandbox"""
|
|
18
|
+
success: bool
|
|
19
|
+
stdout: str
|
|
20
|
+
stderr: str
|
|
21
|
+
exit_code: int
|
|
22
|
+
execution_time_ms: int = 0
|
|
23
|
+
error: Optional[str] = None
|
|
24
|
+
|
|
25
|
+
def to_dict(self) -> dict:
|
|
26
|
+
return {
|
|
27
|
+
"success": self.success,
|
|
28
|
+
"stdout": self.stdout,
|
|
29
|
+
"stderr": self.stderr,
|
|
30
|
+
"exit_code": self.exit_code,
|
|
31
|
+
"execution_time_ms": self.execution_time_ms,
|
|
32
|
+
"error": self.error,
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class DaytonaClient:
|
|
37
|
+
"""Client for Daytona sandbox execution"""
|
|
38
|
+
|
|
39
|
+
def __init__(self, api_key: Optional[str] = None, api_url: Optional[str] = None):
|
|
40
|
+
self.api_key = api_key or os.getenv("DAYTONA_API_KEY")
|
|
41
|
+
self.api_url = api_url or os.getenv("DAYTONA_API_URL", "https://app.daytona.io/api")
|
|
42
|
+
self._client = None
|
|
43
|
+
self._sandbox = None
|
|
44
|
+
|
|
45
|
+
def _ensure_sdk(self):
|
|
46
|
+
"""Ensure Daytona SDK is available"""
|
|
47
|
+
try:
|
|
48
|
+
# Official SDK import (pip install daytona)
|
|
49
|
+
from daytona import Daytona, DaytonaConfig
|
|
50
|
+
return Daytona, DaytonaConfig
|
|
51
|
+
except ImportError:
|
|
52
|
+
try:
|
|
53
|
+
# Alternative import (pip install daytona-sdk)
|
|
54
|
+
from daytona_sdk import Daytona, DaytonaConfig
|
|
55
|
+
return Daytona, DaytonaConfig
|
|
56
|
+
except ImportError:
|
|
57
|
+
return None, None
|
|
58
|
+
|
|
59
|
+
def is_available(self) -> bool:
|
|
60
|
+
"""Check if Daytona is available and configured"""
|
|
61
|
+
Daytona, _ = self._ensure_sdk()
|
|
62
|
+
return Daytona is not None and self.api_key is not None
|
|
63
|
+
|
|
64
|
+
def execute_code(
|
|
65
|
+
self,
|
|
66
|
+
code: str,
|
|
67
|
+
language: str = "python",
|
|
68
|
+
timeout_seconds: int = 30,
|
|
69
|
+
) -> ExecutionResult:
|
|
70
|
+
"""
|
|
71
|
+
Execute code in Daytona sandbox.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
code: Code to execute
|
|
75
|
+
language: Programming language (python, javascript, etc.)
|
|
76
|
+
timeout_seconds: Maximum execution time
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
ExecutionResult with stdout, stderr, and status
|
|
80
|
+
"""
|
|
81
|
+
Daytona, SandboxConfig = self._ensure_sdk()
|
|
82
|
+
|
|
83
|
+
if Daytona is None:
|
|
84
|
+
# Fallback to local execution with safety measures
|
|
85
|
+
return self._local_execute(code, language, timeout_seconds)
|
|
86
|
+
|
|
87
|
+
if not self.api_key:
|
|
88
|
+
return ExecutionResult(
|
|
89
|
+
success=False,
|
|
90
|
+
stdout="",
|
|
91
|
+
stderr="",
|
|
92
|
+
exit_code=-1,
|
|
93
|
+
error="DAYTONA_API_KEY not configured",
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
try:
|
|
97
|
+
# Get SDK classes
|
|
98
|
+
Daytona, DaytonaConfig = self._ensure_sdk()
|
|
99
|
+
|
|
100
|
+
if Daytona is None:
|
|
101
|
+
return self._local_execute(code, language, timeout_seconds)
|
|
102
|
+
|
|
103
|
+
# Initialize Daytona client with config (official SDK pattern)
|
|
104
|
+
config = DaytonaConfig(api_key=self.api_key)
|
|
105
|
+
daytona = Daytona(config)
|
|
106
|
+
|
|
107
|
+
# Create sandbox
|
|
108
|
+
sandbox = daytona.create()
|
|
109
|
+
|
|
110
|
+
try:
|
|
111
|
+
# Execute code in sandbox
|
|
112
|
+
response = sandbox.process.code_run(code)
|
|
113
|
+
|
|
114
|
+
return ExecutionResult(
|
|
115
|
+
success=response.exit_code == 0,
|
|
116
|
+
stdout=response.result or "",
|
|
117
|
+
stderr="",
|
|
118
|
+
exit_code=response.exit_code,
|
|
119
|
+
execution_time_ms=0,
|
|
120
|
+
)
|
|
121
|
+
finally:
|
|
122
|
+
# Clean up sandbox
|
|
123
|
+
daytona.delete(sandbox)
|
|
124
|
+
|
|
125
|
+
except Exception as e:
|
|
126
|
+
# Fall back to local execution if Daytona fails
|
|
127
|
+
print(f"Daytona error: {e}, falling back to local execution")
|
|
128
|
+
return self._local_execute(code, language, timeout_seconds)
|
|
129
|
+
|
|
130
|
+
def _local_execute(
|
|
131
|
+
self,
|
|
132
|
+
code: str,
|
|
133
|
+
language: str,
|
|
134
|
+
timeout_seconds: int,
|
|
135
|
+
) -> ExecutionResult:
|
|
136
|
+
"""
|
|
137
|
+
Fallback local execution with safety measures.
|
|
138
|
+
|
|
139
|
+
WARNING: This is less safe than Daytona. Use only for demo/testing.
|
|
140
|
+
"""
|
|
141
|
+
import subprocess
|
|
142
|
+
import tempfile
|
|
143
|
+
import time
|
|
144
|
+
|
|
145
|
+
if language != "python":
|
|
146
|
+
return ExecutionResult(
|
|
147
|
+
success=False,
|
|
148
|
+
stdout="",
|
|
149
|
+
stderr="",
|
|
150
|
+
exit_code=-1,
|
|
151
|
+
error=f"Local execution only supports Python, got: {language}",
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
# Write code to temp file
|
|
155
|
+
with tempfile.NamedTemporaryFile(
|
|
156
|
+
mode='w',
|
|
157
|
+
suffix='.py',
|
|
158
|
+
delete=False,
|
|
159
|
+
encoding='utf-8'
|
|
160
|
+
) as f:
|
|
161
|
+
f.write(code)
|
|
162
|
+
temp_path = f.name
|
|
163
|
+
|
|
164
|
+
try:
|
|
165
|
+
start_time = time.time()
|
|
166
|
+
|
|
167
|
+
# Execute with timeout
|
|
168
|
+
result = subprocess.run(
|
|
169
|
+
["python", temp_path],
|
|
170
|
+
capture_output=True,
|
|
171
|
+
text=True,
|
|
172
|
+
timeout=timeout_seconds,
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
execution_time = int((time.time() - start_time) * 1000)
|
|
176
|
+
|
|
177
|
+
return ExecutionResult(
|
|
178
|
+
success=result.returncode == 0,
|
|
179
|
+
stdout=result.stdout,
|
|
180
|
+
stderr=result.stderr,
|
|
181
|
+
exit_code=result.returncode,
|
|
182
|
+
execution_time_ms=execution_time,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
except subprocess.TimeoutExpired:
|
|
186
|
+
return ExecutionResult(
|
|
187
|
+
success=False,
|
|
188
|
+
stdout="",
|
|
189
|
+
stderr="",
|
|
190
|
+
exit_code=-1,
|
|
191
|
+
error=f"Execution timed out after {timeout_seconds}s",
|
|
192
|
+
)
|
|
193
|
+
except Exception as e:
|
|
194
|
+
return ExecutionResult(
|
|
195
|
+
success=False,
|
|
196
|
+
stdout="",
|
|
197
|
+
stderr=str(e),
|
|
198
|
+
exit_code=-1,
|
|
199
|
+
error=f"Local execution failed: {e}",
|
|
200
|
+
)
|
|
201
|
+
finally:
|
|
202
|
+
# Clean up temp file
|
|
203
|
+
try:
|
|
204
|
+
os.unlink(temp_path)
|
|
205
|
+
except:
|
|
206
|
+
pass
|
|
207
|
+
|
|
208
|
+
def verify_code_runs(self, code: str) -> tuple[bool, str]:
|
|
209
|
+
"""
|
|
210
|
+
Quick verification that code runs without error.
|
|
211
|
+
|
|
212
|
+
Returns:
|
|
213
|
+
Tuple of (success, message)
|
|
214
|
+
"""
|
|
215
|
+
result = self.execute_code(code, timeout_seconds=10)
|
|
216
|
+
|
|
217
|
+
if result.success:
|
|
218
|
+
return True, "Code executed successfully"
|
|
219
|
+
else:
|
|
220
|
+
error_msg = result.stderr or result.error or "Unknown error"
|
|
221
|
+
return False, f"Execution failed: {error_msg}"
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
# Singleton instance
|
|
225
|
+
_daytona_client: Optional[DaytonaClient] = None
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def get_daytona_client() -> DaytonaClient:
|
|
229
|
+
"""Get or create Daytona client singleton"""
|
|
230
|
+
global _daytona_client
|
|
231
|
+
if _daytona_client is None:
|
|
232
|
+
_daytona_client = DaytonaClient()
|
|
233
|
+
return _daytona_client
|
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LeanMCP Integration - MCP Server Observability & Analytics
|
|
3
|
+
|
|
4
|
+
LeanMCP provides production-grade MCP infrastructure.
|
|
5
|
+
This module integrates CodeShield with LeanMCP's observability platform
|
|
6
|
+
to track MCP tool usage, performance metrics, and health status.
|
|
7
|
+
|
|
8
|
+
Docs: https://docs.leanmcp.com/
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
import httpx
|
|
13
|
+
from typing import Optional, Dict, Any
|
|
14
|
+
from dataclasses import dataclass, field
|
|
15
|
+
from datetime import datetime
|
|
16
|
+
import json
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class MCPEvent:
|
|
21
|
+
"""Represents an MCP tool invocation event for analytics"""
|
|
22
|
+
tool_name: str
|
|
23
|
+
timestamp: str = field(default_factory=lambda: datetime.utcnow().isoformat())
|
|
24
|
+
duration_ms: Optional[int] = None
|
|
25
|
+
success: bool = True
|
|
26
|
+
error_message: Optional[str] = None
|
|
27
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class LeanMCPClient:
|
|
31
|
+
"""
|
|
32
|
+
LeanMCP observability client for tracking MCP server metrics.
|
|
33
|
+
|
|
34
|
+
Features:
|
|
35
|
+
- Tool invocation tracking
|
|
36
|
+
- Performance metrics
|
|
37
|
+
- Health reporting
|
|
38
|
+
- Usage analytics
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
def __init__(self):
|
|
42
|
+
self.api_key = os.getenv("LEANMCP_KEY")
|
|
43
|
+
self.api_url = os.getenv("LEANMCP_API_URL", "https://api.leanmcp.com")
|
|
44
|
+
self.enabled = bool(self.api_key)
|
|
45
|
+
self._client = httpx.Client(timeout=10.0) if self.enabled else None
|
|
46
|
+
self._events_buffer: list[MCPEvent] = []
|
|
47
|
+
self._buffer_size = 10 # Flush after 10 events
|
|
48
|
+
|
|
49
|
+
# Local metrics tracking (always available)
|
|
50
|
+
self._metrics = {
|
|
51
|
+
"total_calls": 0,
|
|
52
|
+
"successful_calls": 0,
|
|
53
|
+
"failed_calls": 0,
|
|
54
|
+
"tools": {},
|
|
55
|
+
"start_time": datetime.utcnow().isoformat(),
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
def is_configured(self) -> bool:
|
|
59
|
+
"""Check if LeanMCP is properly configured"""
|
|
60
|
+
return self.enabled and self.api_key is not None
|
|
61
|
+
|
|
62
|
+
def get_status(self) -> Dict[str, Any]:
|
|
63
|
+
"""Get LeanMCP integration status"""
|
|
64
|
+
return {
|
|
65
|
+
"configured": self.is_configured(),
|
|
66
|
+
"api_url": self.api_url,
|
|
67
|
+
"events_buffered": len(self._events_buffer),
|
|
68
|
+
"local_metrics": self._metrics,
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
def track_tool_call(
|
|
72
|
+
self,
|
|
73
|
+
tool_name: str,
|
|
74
|
+
duration_ms: Optional[int] = None,
|
|
75
|
+
success: bool = True,
|
|
76
|
+
error_message: Optional[str] = None,
|
|
77
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
78
|
+
) -> None:
|
|
79
|
+
"""
|
|
80
|
+
Track an MCP tool invocation for analytics.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
tool_name: Name of the MCP tool (e.g., "verify_code", "check_style")
|
|
84
|
+
duration_ms: Execution time in milliseconds
|
|
85
|
+
success: Whether the call succeeded
|
|
86
|
+
error_message: Error message if failed
|
|
87
|
+
metadata: Additional context
|
|
88
|
+
"""
|
|
89
|
+
# Update local metrics
|
|
90
|
+
self._metrics["total_calls"] += 1
|
|
91
|
+
if success:
|
|
92
|
+
self._metrics["successful_calls"] += 1
|
|
93
|
+
else:
|
|
94
|
+
self._metrics["failed_calls"] += 1
|
|
95
|
+
|
|
96
|
+
if tool_name not in self._metrics["tools"]:
|
|
97
|
+
self._metrics["tools"][tool_name] = {"calls": 0, "errors": 0, "total_duration_ms": 0}
|
|
98
|
+
|
|
99
|
+
self._metrics["tools"][tool_name]["calls"] += 1
|
|
100
|
+
if not success:
|
|
101
|
+
self._metrics["tools"][tool_name]["errors"] += 1
|
|
102
|
+
if duration_ms:
|
|
103
|
+
self._metrics["tools"][tool_name]["total_duration_ms"] += duration_ms
|
|
104
|
+
|
|
105
|
+
# Create event for LeanMCP
|
|
106
|
+
event = MCPEvent(
|
|
107
|
+
tool_name=tool_name,
|
|
108
|
+
duration_ms=duration_ms,
|
|
109
|
+
success=success,
|
|
110
|
+
error_message=error_message,
|
|
111
|
+
metadata=metadata or {}
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
self._events_buffer.append(event)
|
|
115
|
+
|
|
116
|
+
# Auto-flush if buffer is full
|
|
117
|
+
if len(self._events_buffer) >= self._buffer_size:
|
|
118
|
+
self.flush_events()
|
|
119
|
+
|
|
120
|
+
def flush_events(self) -> bool:
|
|
121
|
+
"""
|
|
122
|
+
Send buffered events to LeanMCP platform.
|
|
123
|
+
Returns True if successful or LeanMCP not configured.
|
|
124
|
+
"""
|
|
125
|
+
if not self._events_buffer:
|
|
126
|
+
return True
|
|
127
|
+
|
|
128
|
+
if not self.is_configured() or self._client is None:
|
|
129
|
+
# Clear buffer if not configured (events are still tracked locally)
|
|
130
|
+
self._events_buffer.clear()
|
|
131
|
+
return True
|
|
132
|
+
|
|
133
|
+
try:
|
|
134
|
+
events_data = [
|
|
135
|
+
{
|
|
136
|
+
"tool_name": e.tool_name,
|
|
137
|
+
"timestamp": e.timestamp,
|
|
138
|
+
"duration_ms": e.duration_ms,
|
|
139
|
+
"success": e.success,
|
|
140
|
+
"error_message": e.error_message,
|
|
141
|
+
"metadata": e.metadata,
|
|
142
|
+
}
|
|
143
|
+
for e in self._events_buffer
|
|
144
|
+
]
|
|
145
|
+
|
|
146
|
+
response = self._client.post(
|
|
147
|
+
f"{self.api_url}/v1/events",
|
|
148
|
+
headers={
|
|
149
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
150
|
+
"Content-Type": "application/json",
|
|
151
|
+
},
|
|
152
|
+
json={
|
|
153
|
+
"server_name": "CodeShield",
|
|
154
|
+
"events": events_data,
|
|
155
|
+
}
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
if response.status_code == 200:
|
|
159
|
+
self._events_buffer.clear()
|
|
160
|
+
return True
|
|
161
|
+
else:
|
|
162
|
+
print(f"LeanMCP event flush failed: {response.status_code}")
|
|
163
|
+
return False
|
|
164
|
+
|
|
165
|
+
except Exception as e:
|
|
166
|
+
print(f"LeanMCP error: {e}")
|
|
167
|
+
return False
|
|
168
|
+
|
|
169
|
+
def report_health(self) -> Dict[str, Any]:
|
|
170
|
+
"""
|
|
171
|
+
Report server health to LeanMCP and return health status.
|
|
172
|
+
"""
|
|
173
|
+
health_data = {
|
|
174
|
+
"server_name": "CodeShield",
|
|
175
|
+
"status": "healthy",
|
|
176
|
+
"version": "1.0.0",
|
|
177
|
+
"metrics": self._metrics,
|
|
178
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
if self.is_configured() and self._client is not None:
|
|
182
|
+
try:
|
|
183
|
+
self._client.post(
|
|
184
|
+
f"{self.api_url}/v1/health",
|
|
185
|
+
headers={
|
|
186
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
187
|
+
"Content-Type": "application/json",
|
|
188
|
+
},
|
|
189
|
+
json=health_data
|
|
190
|
+
)
|
|
191
|
+
except Exception as e:
|
|
192
|
+
print(f"LeanMCP health report error: {e}")
|
|
193
|
+
|
|
194
|
+
return health_data
|
|
195
|
+
|
|
196
|
+
def get_metrics(self) -> Dict[str, Any]:
|
|
197
|
+
"""Get current metrics summary"""
|
|
198
|
+
metrics = self._metrics.copy()
|
|
199
|
+
|
|
200
|
+
# Calculate averages
|
|
201
|
+
for tool_name, tool_data in metrics["tools"].items():
|
|
202
|
+
if tool_data["calls"] > 0 and tool_data["total_duration_ms"] > 0:
|
|
203
|
+
tool_data["avg_duration_ms"] = tool_data["total_duration_ms"] / tool_data["calls"]
|
|
204
|
+
|
|
205
|
+
return metrics
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
# Singleton instance
|
|
209
|
+
_leanmcp_client: Optional[LeanMCPClient] = None
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def get_leanmcp_client() -> LeanMCPClient:
|
|
213
|
+
"""Get or create LeanMCP client singleton"""
|
|
214
|
+
global _leanmcp_client
|
|
215
|
+
if _leanmcp_client is None:
|
|
216
|
+
_leanmcp_client = LeanMCPClient()
|
|
217
|
+
return _leanmcp_client
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def track_mcp_call(tool_name: str):
|
|
221
|
+
"""
|
|
222
|
+
Decorator to automatically track MCP tool calls with LeanMCP.
|
|
223
|
+
|
|
224
|
+
Usage:
|
|
225
|
+
@track_mcp_call("verify_code")
|
|
226
|
+
def verify_code(code: str) -> dict:
|
|
227
|
+
...
|
|
228
|
+
"""
|
|
229
|
+
import functools
|
|
230
|
+
import time
|
|
231
|
+
|
|
232
|
+
def decorator(func):
|
|
233
|
+
@functools.wraps(func)
|
|
234
|
+
def wrapper(*args, **kwargs):
|
|
235
|
+
client = get_leanmcp_client()
|
|
236
|
+
start_time = time.time()
|
|
237
|
+
|
|
238
|
+
try:
|
|
239
|
+
result = func(*args, **kwargs)
|
|
240
|
+
duration_ms = int((time.time() - start_time) * 1000)
|
|
241
|
+
client.track_tool_call(
|
|
242
|
+
tool_name=tool_name,
|
|
243
|
+
duration_ms=duration_ms,
|
|
244
|
+
success=True
|
|
245
|
+
)
|
|
246
|
+
return result
|
|
247
|
+
except Exception as e:
|
|
248
|
+
duration_ms = int((time.time() - start_time) * 1000)
|
|
249
|
+
client.track_tool_call(
|
|
250
|
+
tool_name=tool_name,
|
|
251
|
+
duration_ms=duration_ms,
|
|
252
|
+
success=False,
|
|
253
|
+
error_message=str(e)
|
|
254
|
+
)
|
|
255
|
+
raise
|
|
256
|
+
|
|
257
|
+
return wrapper
|
|
258
|
+
return decorator
|