evalguard-python 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
evalguard/crewai.py ADDED
@@ -0,0 +1,189 @@
1
+ """CrewAI guardrail integration for EvalGuard.
2
+
3
+ Usage::
4
+
5
+ from evalguard.crewai import EvalGuardGuardrail, guard_agent
6
+ from crewai import Crew, Agent, Task
7
+
8
+ # Option 1: Guard individual agents
9
+ agent = guard_agent(
10
+ Agent(role="researcher", goal="...", backstory="..."),
11
+ api_key="eg_...",
12
+ )
13
+
14
+ # Option 2: Use as a crew-level guardrail
15
+ guardrail = EvalGuardGuardrail(api_key="eg_...", project_id="proj_...")
16
+ crew = Crew(agents=[agent], tasks=[...])
17
+ # Call guardrail.check() before/after crew.kickoff()
18
+ """
19
+
20
+ from __future__ import annotations
21
+
22
+ import functools
23
+ import time
24
+ from typing import Any, Callable, Dict, List, Optional
25
+
26
+ from .guardrails import GuardrailClient, GuardrailViolation
27
+
28
+
29
+ class EvalGuardGuardrail:
30
+ """Standalone guardrail that can be used with CrewAI workflows.
31
+
32
+ Parameters
33
+ ----------
34
+ api_key:
35
+ EvalGuard API key.
36
+ project_id:
37
+ Optional project ID for trace grouping.
38
+ rules:
39
+ Guardrail rules for input checking.
40
+ block_on_violation:
41
+ If *True*, :meth:`check` raises on violation.
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ api_key: str,
47
+ project_id: Optional[str] = None,
48
+ base_url: str = "https://api.evalguard.ai",
49
+ rules: Optional[List[str]] = None,
50
+ block_on_violation: bool = True,
51
+ timeout: float = 5.0,
52
+ ) -> None:
53
+ self._guard = GuardrailClient(
54
+ api_key=api_key,
55
+ base_url=base_url,
56
+ project_id=project_id,
57
+ timeout=timeout,
58
+ )
59
+ self._rules = rules
60
+ self._block = block_on_violation
61
+
62
+ def check(self, text: str, metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
63
+ """Check text against guardrails.
64
+
65
+ Returns
66
+ -------
67
+ dict
68
+ ``{"allowed": bool, "violations": [...]}``
69
+
70
+ Raises
71
+ ------
72
+ GuardrailViolation
73
+ If ``block_on_violation`` is *True* and the check fails.
74
+ """
75
+ result = self._guard.check_input(text, rules=self._rules, metadata=metadata)
76
+ if not result.get("allowed", True) and self._block:
77
+ raise GuardrailViolation(result.get("violations", []))
78
+ return result
79
+
80
+ def log(self, data: Dict[str, Any]) -> None:
81
+ """Log a trace entry."""
82
+ self._guard.log_trace(data)
83
+
84
+ def wrap_function(self, fn: Callable[..., Any]) -> Callable[..., Any]:
85
+ """Decorator that guards a function's first string argument."""
86
+ @functools.wraps(fn)
87
+ def wrapper(*args: Any, **kwargs: Any) -> Any:
88
+ # Find the first string argument to check
89
+ text = ""
90
+ for arg in args:
91
+ if isinstance(arg, str):
92
+ text = arg
93
+ break
94
+ if not text:
95
+ for v in kwargs.values():
96
+ if isinstance(v, str):
97
+ text = v
98
+ break
99
+
100
+ if text:
101
+ self.check(text, metadata={"function": fn.__name__})
102
+
103
+ start = time.monotonic()
104
+ result = fn(*args, **kwargs)
105
+ elapsed_ms = (time.monotonic() - start) * 1000
106
+
107
+ self._guard.log_trace({
108
+ "provider": "crewai",
109
+ "function": fn.__name__,
110
+ "input": text,
111
+ "output": str(result)[:2000] if result else "",
112
+ "llm_latency_ms": round(elapsed_ms, 2),
113
+ })
114
+ return result
115
+
116
+ return wrapper
117
+
118
+
119
+ def guard_agent(
120
+ agent: Any,
121
+ *,
122
+ api_key: str,
123
+ project_id: Optional[str] = None,
124
+ base_url: str = "https://api.evalguard.ai",
125
+ rules: Optional[List[str]] = None,
126
+ block_on_violation: bool = True,
127
+ timeout: float = 5.0,
128
+ ) -> Any:
129
+ """Wrap a CrewAI ``Agent`` so that every task execution is guarded.
130
+
131
+ This patches the agent's ``execute_task`` method to run guardrail
132
+ checks before and trace logging after each task execution.
133
+
134
+ Parameters
135
+ ----------
136
+ agent:
137
+ A ``crewai.Agent`` instance.
138
+
139
+ Returns
140
+ -------
141
+ The same agent instance, with guardrails applied.
142
+ """
143
+ guard = GuardrailClient(
144
+ api_key=api_key,
145
+ base_url=base_url,
146
+ project_id=project_id,
147
+ timeout=timeout,
148
+ )
149
+ guardrail_rules = rules
150
+ block = block_on_violation
151
+
152
+ original_execute = getattr(agent, "execute_task", None)
153
+ if original_execute is None:
154
+ return agent
155
+
156
+ @functools.wraps(original_execute)
157
+ def guarded_execute(task: Any, *args: Any, **kwargs: Any) -> Any:
158
+ task_desc = getattr(task, "description", str(task)) if task else ""
159
+
160
+ # Pre-check
161
+ check = guard.check_input(
162
+ task_desc,
163
+ rules=guardrail_rules,
164
+ metadata={
165
+ "agent_role": getattr(agent, "role", "unknown"),
166
+ "framework": "crewai",
167
+ },
168
+ )
169
+ if not check.get("allowed", True) and block:
170
+ raise GuardrailViolation(check.get("violations", []))
171
+
172
+ # Execute
173
+ start = time.monotonic()
174
+ result = original_execute(task, *args, **kwargs)
175
+ elapsed_ms = (time.monotonic() - start) * 1000
176
+
177
+ # Trace
178
+ guard.log_trace({
179
+ "provider": "crewai",
180
+ "agent_role": getattr(agent, "role", "unknown"),
181
+ "input": task_desc[:2000],
182
+ "output": str(result)[:2000] if result else "",
183
+ "llm_latency_ms": round(elapsed_ms, 2),
184
+ "violations": check.get("violations", []),
185
+ })
186
+ return result
187
+
188
+ agent.execute_task = guarded_execute
189
+ return agent
evalguard/fastapi.py ADDED
@@ -0,0 +1,273 @@
1
+ """FastAPI middleware for EvalGuard.
2
+
3
+ Usage::
4
+
5
+ from evalguard.fastapi import EvalGuardMiddleware
6
+ from fastapi import FastAPI
7
+
8
+ app = FastAPI()
9
+ app.add_middleware(
10
+ EvalGuardMiddleware,
11
+ api_key="eg_...",
12
+ project_id="proj_...",
13
+ )
14
+ # All matching endpoints are now guarded
15
+
16
+ You can also use the route decorator for fine-grained control::
17
+
18
+ from evalguard.fastapi import guard_route
19
+
20
+ @app.post("/api/chat")
21
+ @guard_route(api_key="eg_...", rules=["prompt_injection"])
22
+ async def chat(request: Request):
23
+ ...
24
+ """
25
+
26
+ from __future__ import annotations
27
+
28
+ import functools
29
+ import json
30
+ import time
31
+ from typing import Any, Callable, Dict, List, Optional, Set
32
+
33
+ from .guardrails import GuardrailClient, GuardrailViolation
34
+
35
+
36
+ class EvalGuardMiddleware:
37
+ """ASGI middleware that guards incoming requests.
38
+
39
+ By default, guards POST requests to paths containing ``/chat``,
40
+ ``/completions``, ``/generate``, or ``/invoke``. Customize via
41
+ ``guarded_paths``.
42
+
43
+ Parameters
44
+ ----------
45
+ app:
46
+ The ASGI application.
47
+ api_key:
48
+ EvalGuard API key.
49
+ project_id:
50
+ Optional project ID.
51
+ rules:
52
+ Guardrail rules for input checking.
53
+ guarded_paths:
54
+ URL path substrings that trigger guardrail checks.
55
+ block_on_violation:
56
+ If *True*, return 403 when input is blocked.
57
+ """
58
+
59
+ _DEFAULT_PATHS = {"/chat", "/completions", "/generate", "/invoke", "/messages"}
60
+
61
+ def __init__(
62
+ self,
63
+ app: Any,
64
+ api_key: str,
65
+ project_id: Optional[str] = None,
66
+ base_url: str = "https://api.evalguard.ai",
67
+ rules: Optional[List[str]] = None,
68
+ guarded_paths: Optional[Set[str]] = None,
69
+ block_on_violation: bool = True,
70
+ timeout: float = 5.0,
71
+ ) -> None:
72
+ self.app = app
73
+ self._guard = GuardrailClient(
74
+ api_key=api_key,
75
+ base_url=base_url,
76
+ project_id=project_id,
77
+ timeout=timeout,
78
+ )
79
+ self._rules = rules
80
+ self._paths = guarded_paths or self._DEFAULT_PATHS
81
+ self._block = block_on_violation
82
+
83
+ async def __call__(self, scope: Dict[str, Any], receive: Any, send: Any) -> None:
84
+ if scope["type"] != "http":
85
+ await self.app(scope, receive, send)
86
+ return
87
+
88
+ path: str = scope.get("path", "")
89
+ method: str = scope.get("method", "GET")
90
+
91
+ # Only guard POST/PUT requests to matching paths
92
+ if method not in ("POST", "PUT") or not self._should_guard(path):
93
+ await self.app(scope, receive, send)
94
+ return
95
+
96
+ # Read the request body
97
+ body_chunks: list[bytes] = []
98
+ request_complete = False
99
+
100
+ async def receive_wrapper() -> Dict[str, Any]:
101
+ nonlocal request_complete
102
+ if body_chunks and request_complete:
103
+ # Replay the body for the downstream app
104
+ return {"type": "http.request", "body": b"".join(body_chunks), "more_body": False}
105
+ message = await receive()
106
+ if message["type"] == "http.request":
107
+ body_chunks.append(message.get("body", b""))
108
+ if not message.get("more_body", False):
109
+ request_complete = True
110
+ return message
111
+
112
+ # Consume the body first
113
+ while not request_complete:
114
+ await receive_wrapper()
115
+
116
+ body_bytes = b"".join(body_chunks)
117
+ prompt_text = _extract_body_text(body_bytes)
118
+
119
+ if prompt_text:
120
+ start = time.monotonic()
121
+ check = self._guard.check_input(
122
+ prompt_text,
123
+ rules=self._rules,
124
+ metadata={"path": path, "method": method, "framework": "fastapi"},
125
+ )
126
+ guard_ms = (time.monotonic() - start) * 1000
127
+
128
+ if not check.get("allowed", True) and self._block:
129
+ # Return 403 Forbidden
130
+ response_body = json.dumps({
131
+ "error": "Blocked by EvalGuard guardrail",
132
+ "violations": check.get("violations", []),
133
+ }).encode()
134
+ await send({
135
+ "type": "http.response.start",
136
+ "status": 403,
137
+ "headers": [
138
+ [b"content-type", b"application/json"],
139
+ [b"content-length", str(len(response_body)).encode()],
140
+ ],
141
+ })
142
+ await send({
143
+ "type": "http.response.body",
144
+ "body": response_body,
145
+ })
146
+ return
147
+
148
+ # Pass through to the app with replayed body
149
+ request_complete = True # ensure replay mode
150
+ start = time.monotonic()
151
+ await self.app(scope, receive_wrapper, send)
152
+ llm_ms = (time.monotonic() - start) * 1000
153
+
154
+ # Best-effort trace log
155
+ self._guard.log_trace({
156
+ "provider": "fastapi",
157
+ "path": path,
158
+ "input": prompt_text[:2000] if prompt_text else "",
159
+ "guard_latency_ms": round(guard_ms, 2) if prompt_text else 0,
160
+ "request_latency_ms": round(llm_ms, 2),
161
+ })
162
+
163
+ def _should_guard(self, path: str) -> bool:
164
+ return any(p in path for p in self._paths)
165
+
166
+
167
+ def guard_route(
168
+ *,
169
+ api_key: str,
170
+ project_id: Optional[str] = None,
171
+ base_url: str = "https://api.evalguard.ai",
172
+ rules: Optional[List[str]] = None,
173
+ block_on_violation: bool = True,
174
+ timeout: float = 5.0,
175
+ ) -> Callable:
176
+ """Decorator for guarding individual FastAPI route handlers.
177
+
178
+ Usage::
179
+
180
+ @app.post("/api/chat")
181
+ @guard_route(api_key="eg_...")
182
+ async def chat(request: Request):
183
+ body = await request.json()
184
+ ...
185
+ """
186
+ guard = GuardrailClient(
187
+ api_key=api_key,
188
+ base_url=base_url,
189
+ project_id=project_id,
190
+ timeout=timeout,
191
+ )
192
+
193
+ def decorator(fn: Callable) -> Callable:
194
+ @functools.wraps(fn)
195
+ async def wrapper(*args: Any, **kwargs: Any) -> Any:
196
+ # Try to find the Request object
197
+ request = None
198
+ for arg in args:
199
+ if hasattr(arg, "json") and hasattr(arg, "method"):
200
+ request = arg
201
+ break
202
+ for v in kwargs.values():
203
+ if hasattr(v, "json") and hasattr(v, "method"):
204
+ request = v
205
+ break
206
+
207
+ if request:
208
+ try:
209
+ body = await request.json()
210
+ prompt_text = _extract_dict_text(body)
211
+ if prompt_text:
212
+ check = guard.check_input(prompt_text, rules=rules)
213
+ if not check.get("allowed", True) and block_on_violation:
214
+ # Import here to avoid hard dependency
215
+ try:
216
+ from fastapi.responses import JSONResponse
217
+ return JSONResponse(
218
+ status_code=403,
219
+ content={
220
+ "error": "Blocked by EvalGuard guardrail",
221
+ "violations": check.get("violations", []),
222
+ },
223
+ )
224
+ except ImportError:
225
+ raise GuardrailViolation(check.get("violations", []))
226
+ except Exception:
227
+ pass # fail-open
228
+
229
+ return await fn(*args, **kwargs)
230
+
231
+ return wrapper
232
+
233
+ return decorator
234
+
235
+
236
+ def _extract_body_text(body: bytes) -> str:
237
+ """Extract prompt text from a JSON request body."""
238
+ try:
239
+ data = json.loads(body)
240
+ return _extract_dict_text(data)
241
+ except (json.JSONDecodeError, UnicodeDecodeError):
242
+ return ""
243
+
244
+
245
+ def _extract_dict_text(data: Any) -> str:
246
+ """Extract prompt/message text from a parsed JSON body."""
247
+ if not isinstance(data, dict):
248
+ return ""
249
+
250
+ # Direct prompt field
251
+ if "prompt" in data:
252
+ return data["prompt"] if isinstance(data["prompt"], str) else str(data["prompt"])
253
+
254
+ # OpenAI-style messages
255
+ messages = data.get("messages", [])
256
+ if messages and isinstance(messages, list):
257
+ parts: list[str] = []
258
+ for msg in messages:
259
+ if isinstance(msg, dict):
260
+ content = msg.get("content", "")
261
+ if isinstance(content, str):
262
+ parts.append(content)
263
+ return "\n".join(parts)
264
+
265
+ # Input field (common in many APIs)
266
+ if "input" in data:
267
+ return data["input"] if isinstance(data["input"], str) else str(data["input"])
268
+
269
+ # Query field
270
+ if "query" in data:
271
+ return data["query"] if isinstance(data["query"], str) else str(data["query"])
272
+
273
+ return ""
@@ -0,0 +1,160 @@
1
+ """Core guardrail client shared by all framework integrations.
2
+
3
+ Provides pre-LLM input checking (prompt injection, PII redaction) and
4
+ post-LLM trace logging. Every framework wrapper delegates to a single
5
+ :class:`GuardrailClient` instance so configuration is consistent.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import logging
11
+ import time
12
+ from typing import Any, Dict, List, Optional
13
+
14
+ import requests
15
+
16
+ logger = logging.getLogger("evalguard.guardrails")
17
+
18
+ _DEFAULT_RULES: List[str] = ["prompt_injection", "pii_redact"]
19
+
20
+
21
+ class GuardrailClient:
22
+ """Lightweight HTTP client for the EvalGuard guardrail & trace APIs.
23
+
24
+ Parameters
25
+ ----------
26
+ api_key:
27
+ EvalGuard API key (``eg_live_...`` or ``eg_test_...``).
28
+ base_url:
29
+ API base URL. Override for self-hosted deployments.
30
+ project_id:
31
+ Optional project ID attached to every trace.
32
+ timeout:
33
+ HTTP request timeout in seconds. Keep low (default 5 s) so the
34
+ guardrail check never dominates end-to-end latency.
35
+ fail_open:
36
+ If *True*, network / server errors allow the request through
37
+ instead of raising. Defaults to *False*.
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ api_key: str,
43
+ base_url: str = "https://api.evalguard.ai",
44
+ project_id: Optional[str] = None,
45
+ timeout: float = 5.0,
46
+ fail_open: bool = False,
47
+ ) -> None:
48
+ self._api_key = api_key
49
+ self._base_url = base_url.rstrip("/")
50
+ self._project_id = project_id
51
+ self._timeout = timeout
52
+ self._fail_open = fail_open
53
+ self._session = requests.Session()
54
+ self._session.headers.update(
55
+ {
56
+ "Authorization": f"Bearer {api_key}",
57
+ "Content-Type": "application/json",
58
+ "User-Agent": "evalguard-python/1.0.0",
59
+ }
60
+ )
61
+
62
+ # ── Public API ────────────────────────────────────────────────────
63
+
64
+ def check_input(
65
+ self,
66
+ text: str,
67
+ rules: Optional[List[str]] = None,
68
+ metadata: Optional[Dict[str, Any]] = None,
69
+ ) -> Dict[str, Any]:
70
+ """Pre-LLM guard check.
71
+
72
+ Returns
73
+ -------
74
+ dict
75
+ ``{"allowed": bool, "violations": [...], "sanitized": str | None}``
76
+ """
77
+ payload: Dict[str, Any] = {
78
+ "input": text,
79
+ "rules": rules or _DEFAULT_RULES,
80
+ }
81
+ if self._project_id:
82
+ payload["project_id"] = self._project_id
83
+ if metadata:
84
+ payload["metadata"] = metadata
85
+ try:
86
+ resp = self._session.post(
87
+ f"{self._base_url}/api/v1/guardrails",
88
+ json=payload,
89
+ timeout=self._timeout,
90
+ )
91
+ resp.raise_for_status()
92
+ data = resp.json()
93
+ return data.get("data", {"allowed": True, "violations": [], "sanitized": None})
94
+ except Exception:
95
+ if self._fail_open:
96
+ logger.debug("Guardrail check failed; fail-open allowing request", exc_info=True)
97
+ return {"allowed": True, "violations": [], "sanitized": None}
98
+ raise
99
+
100
+ def check_output(
101
+ self,
102
+ text: str,
103
+ rules: Optional[List[str]] = None,
104
+ metadata: Optional[Dict[str, Any]] = None,
105
+ ) -> Dict[str, Any]:
106
+ """Post-LLM output check.
107
+
108
+ Returns
109
+ -------
110
+ dict
111
+ ``{"allowed": bool, "violations": [...], "sanitized": str | None}``
112
+ """
113
+ payload: Dict[str, Any] = {
114
+ "output": text,
115
+ "rules": rules or ["toxic_content", "pii_redact"],
116
+ }
117
+ if self._project_id:
118
+ payload["project_id"] = self._project_id
119
+ if metadata:
120
+ payload["metadata"] = metadata
121
+ try:
122
+ resp = self._session.post(
123
+ f"{self._base_url}/api/v1/guardrails/output",
124
+ json=payload,
125
+ timeout=self._timeout,
126
+ )
127
+ resp.raise_for_status()
128
+ data = resp.json()
129
+ return data.get("data", {"allowed": True, "violations": [], "sanitized": None})
130
+ except Exception:
131
+ if self._fail_open:
132
+ logger.debug("Output check failed; fail-open allowing response", exc_info=True)
133
+ return {"allowed": True, "violations": [], "sanitized": None}
134
+ raise
135
+
136
+ def log_trace(self, data: Dict[str, Any]) -> None:
137
+ """Fire-and-forget trace logging. Errors are silently swallowed."""
138
+ payload = {**data}
139
+ if self._project_id:
140
+ payload.setdefault("project_id", self._project_id)
141
+ try:
142
+ self._session.post(
143
+ f"{self._base_url}/api/v1/traces",
144
+ json=payload,
145
+ timeout=self._timeout,
146
+ )
147
+ except Exception:
148
+ logger.debug("Trace log failed", exc_info=True)
149
+
150
+ def close(self) -> None:
151
+ """Close the underlying HTTP session."""
152
+ self._session.close()
153
+
154
+
155
+ class GuardrailViolation(Exception):
156
+ """Raised when a guardrail check blocks a request."""
157
+
158
+ def __init__(self, violations: List[Dict[str, Any]], message: str = "Blocked by EvalGuard guardrail"):
159
+ super().__init__(message)
160
+ self.violations = violations