driftrail 2.0.0__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
driftrail/client.py CHANGED
@@ -1,9 +1,9 @@
1
1
  """
2
- DriftRail Client - Sync and Async implementations
2
+ DriftRail Client - Complete SDK with all enterprise features.
3
3
  """
4
4
 
5
5
  import json
6
- from typing import Optional, Dict, Any, Union
6
+ from typing import Optional, Dict, Any, Union, List
7
7
  from urllib.request import Request, urlopen
8
8
  from urllib.error import HTTPError, URLError
9
9
  from concurrent.futures import ThreadPoolExecutor
@@ -17,40 +17,10 @@ DEFAULT_BASE_URL = "https://api.driftrail.com"
17
17
 
18
18
 
19
19
  class DriftRail:
20
- """
21
- Synchronous DriftRail client.
22
-
23
- Usage:
24
- client = DriftRail(api_key="dr_live_...", app_id="my-app")
25
-
26
- response = client.ingest(
27
- model="gpt-5",
28
- provider="openai",
29
- input={"prompt": "Hello"},
30
- output={"text": "Hi there!"}
31
- )
32
- """
33
-
34
- def __init__(
35
- self,
36
- api_key: str,
37
- app_id: str,
38
- base_url: str = DEFAULT_BASE_URL,
39
- timeout: int = 30,
40
- fail_open: bool = True,
41
- guard_mode: str = "fail_open",
42
- ):
43
- """
44
- Initialize DriftRail client.
45
-
46
- Args:
47
- api_key: Your DriftRail API key (dr_live_... or dr_test_...)
48
- app_id: Your application identifier
49
- base_url: API base URL (default: https://api.driftrail.com)
50
- timeout: Request timeout in seconds
51
- fail_open: If True, errors are logged but don't raise exceptions
52
- guard_mode: "fail_open" (default) or "fail_closed" for guard() calls
53
- """
20
+ """Synchronous DriftRail client."""
21
+
22
+ def __init__(self, api_key: str, app_id: str, base_url: str = DEFAULT_BASE_URL,
23
+ timeout: int = 30, fail_open: bool = True, guard_mode: str = "fail_open"):
54
24
  self.api_key = api_key
55
25
  self.app_id = app_id
56
26
  self.base_url = base_url.rstrip("/")
@@ -59,194 +29,84 @@ class DriftRail:
59
29
  self.guard_mode = guard_mode
60
30
  self._executor = ThreadPoolExecutor(max_workers=4)
61
31
 
62
- def ingest(
63
- self,
64
- model: str,
65
- provider: Provider,
66
- input: Union[InputPayload, Dict[str, Any]],
67
- output: Union[OutputPayload, Dict[str, Any]],
68
- metadata: Optional[Union[Metadata, Dict[str, Any]]] = None,
69
- ) -> IngestResponse:
70
- """
71
- Ingest an LLM interaction for classification.
72
-
73
- Args:
74
- model: Model name (e.g., "gpt-5", "claude-3")
75
- provider: Provider name ("openai", "google", "anthropic", "other")
76
- input: Input payload with prompt and optional messages/sources
77
- output: Output payload with text and optional tool calls
78
- metadata: Optional metadata (latency, tokens, temperature)
79
-
80
- Returns:
81
- IngestResponse with event_id and job_id on success
82
- """
83
- # Build payload
32
+ def ingest(self, model: str, provider: Provider, input: Union[InputPayload, Dict[str, Any]],
33
+ output: Union[OutputPayload, Dict[str, Any]], metadata: Optional[Union[Metadata, Dict[str, Any]]] = None) -> IngestResponse:
84
34
  if isinstance(input, dict):
85
- input_payload = InputPayload(
86
- prompt=input.get("prompt", ""),
87
- messages=input.get("messages"),
88
- retrieved_sources=input.get("retrievedSources") or input.get("retrieved_sources"),
89
- )
35
+ input_payload = InputPayload(prompt=input.get("prompt", ""), messages=input.get("messages"),
36
+ retrieved_sources=input.get("retrievedSources") or input.get("retrieved_sources"))
90
37
  else:
91
38
  input_payload = input
92
-
93
39
  if isinstance(output, dict):
94
- output_payload = OutputPayload(
95
- text=output.get("text", ""),
96
- tool_calls=output.get("toolCalls") or output.get("tool_calls"),
97
- )
40
+ output_payload = OutputPayload(text=output.get("text", ""),
41
+ tool_calls=output.get("toolCalls") or output.get("tool_calls"))
98
42
  else:
99
43
  output_payload = output
100
-
101
44
  metadata_payload = None
102
45
  if metadata:
103
46
  if isinstance(metadata, dict):
104
- metadata_payload = Metadata(
105
- latency_ms=metadata.get("latencyMs") or metadata.get("latency_ms"),
47
+ metadata_payload = Metadata(latency_ms=metadata.get("latencyMs") or metadata.get("latency_ms"),
106
48
  tokens_in=metadata.get("tokensIn") or metadata.get("tokens_in"),
107
49
  tokens_out=metadata.get("tokensOut") or metadata.get("tokens_out"),
108
- temperature=metadata.get("temperature"),
109
- )
50
+ temperature=metadata.get("temperature"))
110
51
  else:
111
52
  metadata_payload = metadata
112
-
113
- payload = IngestPayload(
114
- model=model,
115
- provider=provider,
116
- input=input_payload,
117
- output=output_payload,
118
- metadata=metadata_payload,
119
- )
120
-
53
+ payload = IngestPayload(model=model, provider=provider, input=input_payload, output=output_payload, metadata=metadata_payload)
121
54
  return self._send_ingest(payload)
122
55
 
123
- def ingest_async(
124
- self,
125
- model: str,
126
- provider: Provider,
127
- input: Union[InputPayload, Dict[str, Any]],
128
- output: Union[OutputPayload, Dict[str, Any]],
129
- metadata: Optional[Union[Metadata, Dict[str, Any]]] = None,
130
- ) -> None:
131
- """
132
- Ingest asynchronously (fire-and-forget).
133
- Does not block the main thread.
134
- """
56
+ def ingest_async(self, model: str, provider: Provider, input: Union[InputPayload, Dict[str, Any]],
57
+ output: Union[OutputPayload, Dict[str, Any]], metadata: Optional[Union[Metadata, Dict[str, Any]]] = None) -> None:
135
58
  self._executor.submit(self.ingest, model, provider, input, output, metadata)
136
59
 
137
- def guard(
138
- self,
139
- output: str,
140
- input: Optional[str] = None,
141
- mode: str = "strict",
142
- timeout_ms: int = 100,
143
- ) -> GuardResult:
144
- """
145
- Inline guardrail check - blocks dangerous outputs before they reach users.
146
-
147
- Args:
148
- output: The LLM output text to check
149
- input: Optional user input/prompt for context
150
- mode: "strict" (block on medium+ risk) or "permissive" (block on high only)
151
- timeout_ms: Classification timeout in ms (default 100, max 500)
152
-
153
- Returns:
154
- GuardResult with allowed, action, output (possibly redacted), triggered
155
-
156
- Raises:
157
- GuardBlockedError: If guard_mode="fail_closed" and content is blocked
158
- """
159
- url = f"{self.base_url}/api/guard"
160
-
161
- headers = {
162
- "Content-Type": "application/json",
163
- "Authorization": f"Bearer {self.api_key}",
164
- "X-App-Id": self.app_id,
165
- }
166
-
167
- payload = {
168
- "output": output,
169
- "input": input or "",
170
- "mode": mode,
171
- "timeout_ms": min(timeout_ms, 500),
172
- "app_id": self.app_id,
173
- }
174
60
 
61
+ def guard(self, output: str, input: Optional[str] = None, mode: str = "strict", timeout_ms: int = 100) -> GuardResult:
62
+ url = f"{self.base_url}/api/guard"
63
+ headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", "X-App-Id": self.app_id}
64
+ payload = {"output": output, "input": input or "", "mode": mode, "timeout_ms": min(timeout_ms, 500), "app_id": self.app_id}
175
65
  try:
176
66
  data = json.dumps(payload).encode("utf-8")
177
67
  req = Request(url, data=data, headers=headers, method="POST")
178
- guard_timeout = max(1, timeout_ms / 1000 + 1)
179
-
180
- with urlopen(req, timeout=guard_timeout) as response:
181
- result_data = json.loads(response.read().decode("utf-8"))
182
- result = GuardResult.from_dict(result_data)
183
-
68
+ with urlopen(req, timeout=max(1, timeout_ms / 1000 + 1)) as response:
69
+ result = GuardResult.from_dict(json.loads(response.read().decode("utf-8")))
184
70
  if self.guard_mode == "fail_closed" and not result.allowed:
185
71
  raise GuardBlockedError(result)
186
-
187
72
  return result
188
-
189
73
  except GuardBlockedError:
190
74
  raise
191
-
192
75
  except HTTPError as e:
193
76
  error_body = e.read().decode("utf-8") if e.fp else str(e)
194
77
  if self.guard_mode == "fail_closed":
195
78
  raise Exception(f"Guard API error: HTTP {e.code}: {error_body}")
196
- return GuardResult(
197
- allowed=True, action="allow", output=output, triggered=[],
198
- latency_ms=0, fallback=True, error=f"HTTP {e.code}: {error_body}",
199
- )
200
-
201
- except (URLError, Exception) as e:
79
+ return GuardResult(allowed=True, action="allow", output=output, triggered=[], latency_ms=0, fallback=True, error=f"HTTP {e.code}: {error_body}")
80
+ except Exception as e:
202
81
  if self.guard_mode == "fail_closed":
203
82
  raise Exception(f"Guard API error: {e}")
204
- return GuardResult(
205
- allowed=True, action="allow", output=output, triggered=[],
206
- latency_ms=0, fallback=True, error=str(e),
207
- )
83
+ return GuardResult(allowed=True, action="allow", output=output, triggered=[], latency_ms=0, fallback=True, error=str(e))
208
84
 
209
85
  def _send_ingest(self, payload: IngestPayload) -> IngestResponse:
210
- """Send ingest request to API."""
211
86
  url = f"{self.base_url}/ingest"
212
-
213
- headers = {
214
- "Content-Type": "application/json",
215
- "Authorization": f"Bearer {self.api_key}",
216
- "X-App-Id": self.app_id,
217
- }
218
-
87
+ headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", "X-App-Id": self.app_id}
219
88
  try:
220
89
  data = json.dumps(payload.to_dict()).encode("utf-8")
221
90
  req = Request(url, data=data, headers=headers, method="POST")
222
-
223
91
  with urlopen(req, timeout=self.timeout) as response:
224
92
  result = json.loads(response.read().decode("utf-8"))
225
- return IngestResponse(
226
- success=result.get("success", False),
227
- event_id=result.get("event_id"),
228
- job_id=result.get("job_id"),
229
- duplicate=result.get("duplicate", False),
230
- )
231
-
93
+ return IngestResponse(success=result.get("success", False), event_id=result.get("event_id"),
94
+ job_id=result.get("job_id"), duplicate=result.get("duplicate", False))
232
95
  except HTTPError as e:
233
96
  error_body = e.read().decode("utf-8") if e.fp else str(e)
234
97
  if self.fail_open:
235
98
  return IngestResponse(success=False, error=f"HTTP {e.code}: {error_body}")
236
99
  raise
237
-
238
100
  except URLError as e:
239
101
  if self.fail_open:
240
102
  return IngestResponse(success=False, error=f"Network error: {e.reason}")
241
103
  raise
242
-
243
104
  except Exception as e:
244
105
  if self.fail_open:
245
106
  return IngestResponse(success=False, error=str(e))
246
107
  raise
247
108
 
248
109
  def close(self) -> None:
249
- """Shutdown the thread pool executor."""
250
110
  self._executor.shutdown(wait=False)
251
111
 
252
112
  def __enter__(self) -> "DriftRail":
@@ -256,24 +116,10 @@ class DriftRail:
256
116
  self.close()
257
117
 
258
118
 
259
-
260
119
  class DriftRailAsync:
261
- """
262
- Async DriftRail client using aiohttp.
263
-
264
- Usage:
265
- async with DriftRailAsync(api_key="...", app_id="my-app") as client:
266
- response = await client.ingest(...)
267
- """
268
-
269
- def __init__(
270
- self,
271
- api_key: str,
272
- app_id: str,
273
- base_url: str = DEFAULT_BASE_URL,
274
- timeout: int = 30,
275
- fail_open: bool = True,
276
- ):
120
+ """Async DriftRail client using aiohttp."""
121
+
122
+ def __init__(self, api_key: str, app_id: str, base_url: str = DEFAULT_BASE_URL, timeout: int = 30, fail_open: bool = True):
277
123
  self.api_key = api_key
278
124
  self.app_id = app_id
279
125
  self.base_url = base_url.rstrip("/")
@@ -283,92 +129,43 @@ class DriftRailAsync:
283
129
 
284
130
  async def _get_session(self) -> Any:
285
131
  if self._session is None:
286
- try:
287
- import aiohttp
288
- self._session = aiohttp.ClientSession()
289
- except ImportError:
290
- raise ImportError("aiohttp is required for async client: pip install driftrail[async]")
132
+ import aiohttp
133
+ self._session = aiohttp.ClientSession()
291
134
  return self._session
292
135
 
293
- async def ingest(
294
- self,
295
- model: str,
296
- provider: Provider,
297
- input: Union[InputPayload, Dict[str, Any]],
298
- output: Union[OutputPayload, Dict[str, Any]],
299
- metadata: Optional[Union[Metadata, Dict[str, Any]]] = None,
300
- ) -> IngestResponse:
301
- """Async ingest - see DriftRail.ingest for documentation."""
136
+ async def ingest(self, model: str, provider: Provider, input: Union[InputPayload, Dict[str, Any]],
137
+ output: Union[OutputPayload, Dict[str, Any]], metadata: Optional[Union[Metadata, Dict[str, Any]]] = None) -> IngestResponse:
302
138
  import aiohttp
303
-
304
139
  if isinstance(input, dict):
305
- input_payload = InputPayload(
306
- prompt=input.get("prompt", ""),
307
- messages=input.get("messages"),
308
- retrieved_sources=input.get("retrievedSources") or input.get("retrieved_sources"),
309
- )
140
+ input_payload = InputPayload(prompt=input.get("prompt", ""), messages=input.get("messages"),
141
+ retrieved_sources=input.get("retrievedSources") or input.get("retrieved_sources"))
310
142
  else:
311
143
  input_payload = input
312
-
313
144
  if isinstance(output, dict):
314
- output_payload = OutputPayload(
315
- text=output.get("text", ""),
316
- tool_calls=output.get("toolCalls") or output.get("tool_calls"),
317
- )
145
+ output_payload = OutputPayload(text=output.get("text", ""), tool_calls=output.get("toolCalls") or output.get("tool_calls"))
318
146
  else:
319
147
  output_payload = output
320
-
321
148
  metadata_payload = None
322
149
  if metadata:
323
150
  if isinstance(metadata, dict):
324
- metadata_payload = Metadata(
325
- latency_ms=metadata.get("latencyMs") or metadata.get("latency_ms"),
151
+ metadata_payload = Metadata(latency_ms=metadata.get("latencyMs") or metadata.get("latency_ms"),
326
152
  tokens_in=metadata.get("tokensIn") or metadata.get("tokens_in"),
327
- tokens_out=metadata.get("tokensOut") or metadata.get("tokens_out"),
328
- temperature=metadata.get("temperature"),
329
- )
153
+ tokens_out=metadata.get("tokensOut") or metadata.get("tokens_out"), temperature=metadata.get("temperature"))
330
154
  else:
331
155
  metadata_payload = metadata
332
-
333
- payload = IngestPayload(
334
- model=model,
335
- provider=provider,
336
- input=input_payload,
337
- output=output_payload,
338
- metadata=metadata_payload,
339
- )
340
-
156
+ payload = IngestPayload(model=model, provider=provider, input=input_payload, output=output_payload, metadata=metadata_payload)
341
157
  url = f"{self.base_url}/ingest"
342
- headers = {
343
- "Content-Type": "application/json",
344
- "Authorization": f"Bearer {self.api_key}",
345
- "X-App-Id": self.app_id,
346
- }
347
-
158
+ headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", "X-App-Id": self.app_id}
348
159
  try:
349
160
  session = await self._get_session()
350
- async with session.post(
351
- url,
352
- json=payload.to_dict(),
353
- headers=headers,
354
- timeout=aiohttp.ClientTimeout(total=self.timeout),
355
- ) as response:
161
+ async with session.post(url, json=payload.to_dict(), headers=headers, timeout=aiohttp.ClientTimeout(total=self.timeout)) as response:
356
162
  result = await response.json()
357
163
  if response.status >= 400:
358
164
  if self.fail_open:
359
- return IngestResponse(
360
- success=False,
361
- error=f"HTTP {response.status}: {result.get('error', 'Unknown error')}",
362
- )
165
+ return IngestResponse(success=False, error=f"HTTP {response.status}: {result.get('error', 'Unknown')}")
363
166
  raise Exception(f"HTTP {response.status}: {result}")
364
-
365
- return IngestResponse(
366
- success=result.get("success", False),
367
- event_id=result.get("event_id"),
368
- job_id=result.get("job_id"),
369
- duplicate=result.get("duplicate", False),
370
- )
371
-
167
+ return IngestResponse(success=result.get("success", False), event_id=result.get("event_id"),
168
+ job_id=result.get("job_id"), duplicate=result.get("duplicate", False))
372
169
  except Exception as e:
373
170
  if self.fail_open:
374
171
  return IngestResponse(success=False, error=str(e))
@@ -386,93 +183,484 @@ class DriftRailAsync:
386
183
  await self.close()
387
184
 
388
185
 
186
+
389
187
  class DriftRailEnterprise(DriftRail):
390
- """
391
- Enterprise DriftRail client with monitoring features.
392
-
393
- Includes: Incidents, Compliance, Model Comparison, Exports, Brand Safety
394
- """
188
+ """Enterprise DriftRail client with full monitoring features."""
395
189
 
396
190
  def _api_request(self, endpoint: str, method: str = "GET", data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
397
- """Make an API request."""
398
191
  url = f"{self.base_url}{endpoint}"
399
- headers = {
400
- "Content-Type": "application/json",
401
- "Authorization": f"Bearer {self.api_key}",
402
- }
403
-
192
+ headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}"}
404
193
  try:
405
194
  body = json.dumps(data).encode("utf-8") if data else None
406
195
  req = Request(url, data=body, headers=headers, method=method)
407
-
408
196
  with urlopen(req, timeout=self.timeout) as response:
409
197
  return json.loads(response.read().decode("utf-8"))
410
-
411
198
  except HTTPError as e:
412
199
  error_body = e.read().decode("utf-8") if e.fp else str(e)
413
200
  raise Exception(f"HTTP {e.code}: {error_body}")
414
201
 
415
- def list_incidents(
416
- self,
417
- status: Optional[list] = None,
418
- severity: Optional[list] = None,
419
- limit: int = 50,
420
- ) -> Dict[str, Any]:
421
- """List incidents with optional filters."""
202
+ # Incidents
203
+ def list_incidents(self, status: Optional[List[str]] = None, severity: Optional[List[str]] = None, limit: int = 50) -> Dict[str, Any]:
422
204
  params = []
423
- if status:
424
- params.append(f"status={','.join(status)}")
425
- if severity:
426
- params.append(f"severity={','.join(severity)}")
205
+ if status: params.append(f"status={','.join(status)}")
206
+ if severity: params.append(f"severity={','.join(severity)}")
427
207
  params.append(f"limit={limit}")
428
- query = "&".join(params)
429
- return self._api_request(f"/api/incidents?{query}")
430
-
431
- def create_incident(
432
- self,
433
- title: str,
434
- severity: str,
435
- incident_type: str,
436
- description: Optional[str] = None,
437
- ) -> Dict[str, Any]:
438
- """Create a new incident."""
439
- return self._api_request("/api/incidents", "POST", {
440
- "title": title,
441
- "severity": severity,
442
- "incident_type": incident_type,
443
- "description": description,
444
- })
208
+ return self._api_request(f"/api/incidents?{'&'.join(params)}")
209
+
210
+ def create_incident(self, title: str, severity: str, incident_type: str, description: Optional[str] = None) -> Dict[str, Any]:
211
+ return self._api_request("/api/incidents", "POST", {"title": title, "severity": severity, "incident_type": incident_type, "description": description})
212
+
213
+ def update_incident_status(self, incident_id: str, status: str, comment: Optional[str] = None) -> Dict[str, Any]:
214
+ return self._api_request(f"/api/incidents/{incident_id}/status", "PATCH", {"status": status, "comment": comment})
445
215
 
446
216
  def get_incident_stats(self) -> Dict[str, Any]:
447
- """Get incident statistics."""
448
217
  return self._api_request("/api/incidents/stats")
449
218
 
219
+ # Compliance
450
220
  def get_compliance_status(self) -> Dict[str, Any]:
451
- """Get compliance framework status."""
452
221
  return self._api_request("/api/compliance/status")
453
222
 
223
+ def get_compliance_score(self) -> Dict[str, Any]:
224
+ return self._api_request("/api/compliance-reports/score")
225
+
226
+ def get_compliance_reports(self) -> Dict[str, Any]:
227
+ return self._api_request("/api/compliance-reports/reports")
228
+
229
+ def get_compliance_frameworks(self) -> Dict[str, Any]:
230
+ return self._api_request("/api/compliance-reports/frameworks")
231
+
232
+ def generate_compliance_report(self, framework: str, format: str = "json", include_evidence: bool = True, include_ai_analysis: bool = False) -> Dict[str, Any]:
233
+ return self._api_request("/api/compliance-reports/reports", "POST", {"framework": framework, "format": format, "include_evidence": include_evidence, "include_ai_analysis": include_ai_analysis})
234
+
235
+ def create_custom_framework(self, name: str, controls: List[Dict[str, Any]], description: Optional[str] = None) -> Dict[str, Any]:
236
+ return self._api_request("/api/compliance-reports/frameworks/custom", "POST", {"name": name, "description": description, "controls": controls})
237
+
238
+ # Model Comparison
454
239
  def get_model_leaderboard(self, metric: str = "avg_risk_score") -> Dict[str, Any]:
455
- """Get model performance leaderboard."""
456
240
  return self._api_request(f"/api/models/leaderboard?metric={metric}")
457
241
 
458
- def create_export(
459
- self,
460
- export_type: str,
461
- format: str = "json",
462
- date_from: Optional[str] = None,
463
- date_to: Optional[str] = None,
464
- ) -> Dict[str, Any]:
465
- """Create a data export job."""
466
- return self._api_request("/api/exports", "POST", {
467
- "export_type": export_type,
468
- "format": format,
469
- "date_from": date_from,
470
- "date_to": date_to,
471
- })
242
+ def create_model_comparison(self, name: str, model_a: str, model_b: str) -> Dict[str, Any]:
243
+ return self._api_request("/api/models/comparisons", "POST", {"name": name, "model_a": model_a, "model_b": model_b})
244
+
245
+ # Exports
246
+ def create_export(self, export_type: str, format: str = "json", date_from: Optional[str] = None, date_to: Optional[str] = None) -> Dict[str, Any]:
247
+ return self._api_request("/api/exports", "POST", {"export_type": export_type, "format": format, "date_from": date_from, "date_to": date_to})
472
248
 
249
+ def get_export_status(self, export_id: str) -> Dict[str, Any]:
250
+ return self._api_request(f"/api/exports/{export_id}")
251
+
252
+ # Brand Safety
473
253
  def check_brand_safety(self, text: str, location: str = "output") -> Dict[str, Any]:
474
- """Check text against brand safety rules."""
475
- return self._api_request("/api/brand-safety/check", "POST", {
476
- "text": text,
477
- "location": location,
478
- })
254
+ return self._api_request("/api/brand-safety/check", "POST", {"text": text, "location": location})
255
+
256
+ def create_brand_safety_rule(self, name: str, rule_type: str, config: Dict[str, Any], action: str = "flag", severity: str = "medium") -> Dict[str, Any]:
257
+ return self._api_request("/api/brand-safety/rules", "POST", {"name": name, "rule_type": rule_type, "config": config, "action": action, "severity": severity})
258
+
259
+ # Executive Dashboard
260
+ def get_executive_metrics(self, period: str = "7d") -> Dict[str, Any]:
261
+ return self._api_request(f"/api/executive?period={period}")
262
+
263
+ def get_kpi_targets(self) -> Dict[str, Any]:
264
+ return self._api_request("/api/executive/targets")
265
+
266
+ def update_kpi_targets(self, targets: Dict[str, Any]) -> Dict[str, Any]:
267
+ return self._api_request("/api/executive/targets", "PUT", targets)
268
+
269
+ def export_executive_metrics(self, period: str = "7d", format: str = "json") -> Dict[str, Any]:
270
+ return self._api_request("/api/executive/export", "POST", {"period": period, "format": format})
271
+
272
+
273
+ # Model Analytics
274
+ def get_model_analytics_summary(self) -> Dict[str, Any]:
275
+ return self._api_request("/api/model-analytics/summary")
276
+
277
+ def get_historical_logs(self, model: Optional[str] = None, environment: Optional[str] = None, start_time: Optional[str] = None, end_time: Optional[str] = None, min_risk_score: Optional[float] = None, limit: int = 100, offset: int = 0) -> Dict[str, Any]:
278
+ params = [f"limit={limit}", f"offset={offset}"]
279
+ if model: params.append(f"model={model}")
280
+ if environment: params.append(f"environment={environment}")
281
+ if start_time: params.append(f"start_time={start_time}")
282
+ if end_time: params.append(f"end_time={end_time}")
283
+ if min_risk_score is not None: params.append(f"min_risk_score={min_risk_score}")
284
+ return self._api_request(f"/api/model-analytics/logs?{'&'.join(params)}")
285
+
286
+ def get_model_switches(self, app_id: Optional[str] = None, limit: int = 50) -> Dict[str, Any]:
287
+ params = [f"limit={limit}"]
288
+ if app_id: params.append(f"app_id={app_id}")
289
+ return self._api_request(f"/api/model-analytics/switches?{'&'.join(params)}")
290
+
291
+ def record_model_switch(self, app_id: str, new_model: str, new_provider: str, previous_model: Optional[str] = None, previous_provider: Optional[str] = None, switch_reason: Optional[str] = None, environment: Optional[str] = None) -> Dict[str, Any]:
292
+ return self._api_request("/api/model-analytics/switches", "POST", {"app_id": app_id, "new_model": new_model, "new_provider": new_provider, "previous_model": previous_model, "previous_provider": previous_provider, "switch_reason": switch_reason, "environment": environment})
293
+
294
+ def get_environment_comparison(self, model: Optional[str] = None, app_id: Optional[str] = None, days: int = 7) -> Dict[str, Any]:
295
+ params = [f"days={days}"]
296
+ if model: params.append(f"model={model}")
297
+ if app_id: params.append(f"app_id={app_id}")
298
+ return self._api_request(f"/api/model-analytics/environments?{'&'.join(params)}")
299
+
300
+ def get_model_benchmarks(self, model: Optional[str] = None, environment: Optional[str] = None, limit: int = 50) -> Dict[str, Any]:
301
+ params = [f"limit={limit}"]
302
+ if model: params.append(f"model={model}")
303
+ if environment: params.append(f"environment={environment}")
304
+ return self._api_request(f"/api/model-analytics/benchmarks?{'&'.join(params)}")
305
+
306
+ def calculate_model_benchmark(self, model: str, environment: Optional[str] = None, days: int = 7) -> Dict[str, Any]:
307
+ return self._api_request("/api/model-analytics/benchmarks/calculate", "POST", {"model": model, "environment": environment, "days": days})
308
+
309
+ # Drift & Alerts
310
+ def get_drift_metrics(self, app_id: Optional[str] = None) -> Dict[str, Any]:
311
+ return self._api_request(f"/api/alerts/metrics{'?app_id=' + app_id if app_id else ''}")
312
+
313
+ def get_drift_alerts(self, severity: Optional[str] = None, unresolved: bool = False, app_id: Optional[str] = None, alert_type: Optional[str] = None, limit: int = 50, offset: int = 0) -> Dict[str, Any]:
314
+ params = [f"limit={limit}", f"offset={offset}"]
315
+ if severity: params.append(f"severity={severity}")
316
+ if unresolved: params.append("unresolved=true")
317
+ if app_id: params.append(f"app_id={app_id}")
318
+ if alert_type: params.append(f"alert_type={alert_type}")
319
+ return self._api_request(f"/api/alerts?{'&'.join(params)}")
320
+
321
+ def acknowledge_alert(self, alert_id: str) -> Dict[str, Any]:
322
+ return self._api_request("/api/alerts", "PATCH", {"alert_id": alert_id, "action": "acknowledge"})
323
+
324
+ def resolve_alert(self, alert_id: str, notes: Optional[str] = None) -> Dict[str, Any]:
325
+ return self._api_request("/api/alerts", "PATCH", {"alert_id": alert_id, "action": "resolve", "notes": notes})
326
+
327
+ def get_drift_trends(self, days: int = 7) -> Dict[str, Any]:
328
+ return self._api_request(f"/api/alerts/trends?days={days}")
329
+
330
+ def get_drift_baselines(self, app_id: Optional[str] = None) -> Dict[str, Any]:
331
+ return self._api_request(f"/api/alerts/baselines{'?app_id=' + app_id if app_id else ''}")
332
+
333
+ def refresh_baseline(self, baseline_id: str) -> Dict[str, Any]:
334
+ return self._api_request("/api/alerts/baselines", "POST", {"baseline_id": baseline_id, "action": "refresh"})
335
+
336
+
337
+ # Drift Detection V3
338
+ def get_model_drift_comparison(self, app_id: Optional[str] = None) -> Dict[str, Any]:
339
+ return self._api_request(f"/api/drift/models{'?app_id=' + app_id if app_id else ''}")
340
+
341
+ def get_drift_heatmap(self, days: int = 30, app_id: Optional[str] = None) -> Dict[str, Any]:
342
+ params = [f"days={days}"]
343
+ if app_id: params.append(f"app_id={app_id}")
344
+ return self._api_request(f"/api/drift/heatmap?{'&'.join(params)}")
345
+
346
+ def get_drift_thresholds(self, app_id: Optional[str] = None) -> Dict[str, Any]:
347
+ return self._api_request(f"/api/drift/thresholds{'?app_id=' + app_id if app_id else ''}")
348
+
349
+ def update_drift_thresholds(self, thresholds: Dict[str, Dict[str, Any]], app_id: Optional[str] = None) -> Dict[str, Any]:
350
+ return self._api_request("/api/drift/thresholds", "PUT", {"thresholds": thresholds, "app_id": app_id})
351
+
352
+ def get_baseline_history(self, app_id: Optional[str] = None, model: Optional[str] = None, limit: int = 50) -> Dict[str, Any]:
353
+ params = [f"limit={limit}"]
354
+ if app_id: params.append(f"app_id={app_id}")
355
+ if model: params.append(f"model={model}")
356
+ return self._api_request(f"/api/drift/baselines/history?{'&'.join(params)}")
357
+
358
+ def get_drift_deployment_correlations(self, days: int = 30, app_id: Optional[str] = None) -> Dict[str, Any]:
359
+ params = [f"days={days}"]
360
+ if app_id: params.append(f"app_id={app_id}")
361
+ return self._api_request(f"/api/drift/correlations?{'&'.join(params)}")
362
+
363
+ def record_deployment(self, app_id: str, deployment_type: Optional[str] = None, version: Optional[str] = None, description: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
364
+ return self._api_request("/api/drift/deployments", "POST", {"app_id": app_id, "deployment_type": deployment_type, "version": version, "description": description, "metadata": metadata})
365
+
366
+ def get_drift_predictions(self, app_id: Optional[str] = None) -> Dict[str, Any]:
367
+ return self._api_request(f"/api/drift/predictions{'?app_id=' + app_id if app_id else ''}")
368
+
369
+ def get_drift_score(self, app_id: Optional[str] = None) -> Dict[str, Any]:
370
+ return self._api_request(f"/api/drift/score{'?app_id=' + app_id if app_id else ''}")
371
+
372
+ def get_notification_channels(self, app_id: Optional[str] = None) -> Dict[str, Any]:
373
+ return self._api_request(f"/api/drift/notifications/channels{'?app_id=' + app_id if app_id else ''}")
374
+
375
+ def create_notification_channel(self, channel_type: str, name: str, config: Dict[str, Any], severity_filter: Optional[List[str]] = None, is_enabled: bool = True, app_id: Optional[str] = None) -> Dict[str, Any]:
376
+ return self._api_request("/api/drift/notifications/channels", "POST", {"channel_type": channel_type, "name": name, "config": config, "severity_filter": severity_filter or ["critical", "warning"], "is_enabled": is_enabled, "app_id": app_id})
377
+
378
+ def update_notification_channel(self, channel_id: str, updates: Dict[str, Any]) -> Dict[str, Any]:
379
+ return self._api_request(f"/api/drift/notifications/channels/{channel_id}", "PUT", updates)
380
+
381
+ def delete_notification_channel(self, channel_id: str) -> Dict[str, Any]:
382
+ return self._api_request(f"/api/drift/notifications/channels/{channel_id}", "DELETE")
383
+
384
+ def get_drift_segments(self, app_id: Optional[str] = None) -> Dict[str, Any]:
385
+ return self._api_request(f"/api/drift/segments{'?app_id=' + app_id if app_id else ''}")
386
+
387
+ def create_drift_segment(self, name: str, filter_criteria: Dict[str, Any], description: Optional[str] = None, app_id: Optional[str] = None) -> Dict[str, Any]:
388
+ return self._api_request("/api/drift/segments", "POST", {"name": name, "description": description, "filter_criteria": filter_criteria, "app_id": app_id})
389
+
390
+ def get_correlation_events(self, app_id: Optional[str] = None, days: int = 7) -> Dict[str, Any]:
391
+ params = [f"days={days}"]
392
+ if app_id: params.append(f"app_id={app_id}")
393
+ return self._api_request(f"/api/drift/correlations/events?{'&'.join(params)}")
394
+
395
+ def get_distribution_analysis(self, app_id: Optional[str] = None, metric_type: Optional[str] = None) -> Dict[str, Any]:
396
+ params = []
397
+ if app_id: params.append(f"app_id={app_id}")
398
+ if metric_type: params.append(f"metric_type={metric_type}")
399
+ return self._api_request(f"/api/drift/distribution{'?' + '&'.join(params) if params else ''}")
400
+
401
+ def get_seasonality_patterns(self, app_id: Optional[str] = None, metric_type: Optional[str] = None) -> Dict[str, Any]:
402
+ params = []
403
+ if app_id: params.append(f"app_id={app_id}")
404
+ if metric_type: params.append(f"metric_type={metric_type}")
405
+ return self._api_request(f"/api/drift/seasonality{'?' + '&'.join(params) if params else ''}")
406
+
407
+ def get_baseline_statistics(self, app_id: Optional[str] = None, model: Optional[str] = None) -> Dict[str, Any]:
408
+ params = []
409
+ if app_id: params.append(f"app_id={app_id}")
410
+ if model: params.append(f"model={model}")
411
+ return self._api_request(f"/api/drift/statistics{'?' + '&'.join(params) if params else ''}")
412
+
413
+
414
+ # Distributed Tracing
415
+ def start_trace(self, app_id: str, name: Optional[str] = None, user_id: Optional[str] = None, session_id: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None, tags: Optional[List[str]] = None) -> Dict[str, Any]:
416
+ return self._api_request("/api/traces", "POST", {"app_id": app_id, "name": name, "user_id": user_id, "session_id": session_id, "metadata": metadata or {}, "tags": tags or []})
417
+
418
+ def end_trace(self, trace_id: str, status: str = "completed") -> Dict[str, Any]:
419
+ return self._api_request(f"/api/traces/{trace_id}", "PATCH", {"status": status})
420
+
421
+ def get_trace(self, trace_id: str) -> Dict[str, Any]:
422
+ return self._api_request(f"/api/traces/{trace_id}")
423
+
424
+ def list_traces(self, app_id: Optional[str] = None, status: Optional[str] = None, user_id: Optional[str] = None, session_id: Optional[str] = None, limit: int = 50, offset: int = 0) -> Dict[str, Any]:
425
+ params = [f"limit={limit}", f"offset={offset}"]
426
+ if app_id: params.append(f"app_id={app_id}")
427
+ if status: params.append(f"status={status}")
428
+ if user_id: params.append(f"user_id={user_id}")
429
+ if session_id: params.append(f"session_id={session_id}")
430
+ return self._api_request(f"/api/traces?{'&'.join(params)}")
431
+
432
+ def start_span(self, trace_id: str, name: str, span_type: str, parent_span_id: Optional[str] = None, model: Optional[str] = None, provider: Optional[str] = None, input: Optional[Dict[str, Any]] = None, metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
433
+ return self._api_request("/api/traces/spans", "POST", {"trace_id": trace_id, "name": name, "span_type": span_type, "parent_span_id": parent_span_id, "model": model, "provider": provider, "input": input, "metadata": metadata})
434
+
435
+ def end_span(self, span_id: str, status: str = "completed", status_message: Optional[str] = None, output: Optional[Dict[str, Any]] = None, tokens_in: Optional[int] = None, tokens_out: Optional[int] = None, cost_usd: Optional[float] = None) -> Dict[str, Any]:
436
+ return self._api_request(f"/api/traces/spans/{span_id}", "PATCH", {"status": status, "status_message": status_message, "output": output, "tokens_in": tokens_in, "tokens_out": tokens_out, "cost_usd": cost_usd})
437
+
438
+ # Prompt Management
439
+ def create_prompt(self, name: str, description: Optional[str] = None, content: Optional[str] = None, variables: Optional[List[str]] = None, tags: Optional[List[str]] = None) -> Dict[str, Any]:
440
+ return self._api_request("/api/prompts", "POST", {"name": name, "description": description, "content": content, "variables": variables or [], "tags": tags or []})
441
+
442
+ def get_prompt(self, prompt_id: str) -> Dict[str, Any]:
443
+ return self._api_request(f"/api/prompts/{prompt_id}")
444
+
445
+ def list_prompts(self, is_active: Optional[bool] = None, tags: Optional[List[str]] = None, limit: int = 50, offset: int = 0) -> Dict[str, Any]:
446
+ params = [f"limit={limit}", f"offset={offset}"]
447
+ if is_active is not None: params.append(f"is_active={str(is_active).lower()}")
448
+ if tags: params.append(f"tags={','.join(tags)}")
449
+ return self._api_request(f"/api/prompts?{'&'.join(params)}")
450
+
451
+ def create_prompt_version(self, prompt_id: str, content: str, variables: Optional[List[str]] = None, model_config: Optional[Dict[str, Any]] = None, commit_message: Optional[str] = None) -> Dict[str, Any]:
452
+ return self._api_request(f"/api/prompts/{prompt_id}/versions", "POST", {"content": content, "variables": variables or [], "model_config": model_config or {}, "commit_message": commit_message})
453
+
454
+ def deploy_prompt_version(self, version_id: str, environment: str) -> Dict[str, Any]:
455
+ return self._api_request("/api/prompts/deploy", "POST", {"version_id": version_id, "environment": environment})
456
+
457
+ def get_deployed_prompt(self, prompt_id: str, environment: str) -> Dict[str, Any]:
458
+ return self._api_request(f"/api/prompts/{prompt_id}/deployed?environment={environment}")
459
+
460
+ def rollback_prompt(self, prompt_id: str, environment: str, version_id: str) -> Dict[str, Any]:
461
+ return self._api_request(f"/api/prompts/{prompt_id}/rollback", "POST", {"environment": environment, "version_id": version_id})
462
+
463
+
464
+ # Evaluation Framework
465
+ def create_dataset(self, name: str, description: Optional[str] = None, schema_type: str = "qa", tags: Optional[List[str]] = None) -> Dict[str, Any]:
466
+ return self._api_request("/api/evaluations/datasets", "POST", {"name": name, "description": description, "schema_type": schema_type, "tags": tags or []})
467
+
468
+ def get_dataset(self, dataset_id: str) -> Dict[str, Any]:
469
+ return self._api_request(f"/api/evaluations/datasets/{dataset_id}")
470
+
471
+ def list_datasets(self, schema_type: Optional[str] = None, is_active: Optional[bool] = None, limit: int = 50) -> Dict[str, Any]:
472
+ params = [f"limit={limit}"]
473
+ if schema_type: params.append(f"schema_type={schema_type}")
474
+ if is_active is not None: params.append(f"is_active={str(is_active).lower()}")
475
+ return self._api_request(f"/api/evaluations/datasets?{'&'.join(params)}")
476
+
477
+ def add_dataset_items(self, dataset_id: str, items: List[Dict[str, Any]]) -> Dict[str, Any]:
478
+ return self._api_request(f"/api/evaluations/datasets/{dataset_id}/items", "POST", {"items": items})
479
+
480
+ def create_eval_run(self, dataset_id: str, evaluators: List[Dict[str, Any]], name: Optional[str] = None, model: Optional[str] = None, prompt_version_id: Optional[str] = None, config: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
481
+ return self._api_request("/api/evaluations/runs", "POST", {"dataset_id": dataset_id, "name": name, "model": model, "prompt_version_id": prompt_version_id, "evaluators": evaluators, "config": config or {}})
482
+
483
+ def get_eval_run(self, run_id: str) -> Dict[str, Any]:
484
+ return self._api_request(f"/api/evaluations/runs/{run_id}")
485
+
486
+ def list_eval_runs(self, dataset_id: Optional[str] = None, status: Optional[str] = None, limit: int = 50) -> Dict[str, Any]:
487
+ params = [f"limit={limit}"]
488
+ if dataset_id: params.append(f"dataset_id={dataset_id}")
489
+ if status: params.append(f"status={status}")
490
+ return self._api_request(f"/api/evaluations/runs?{'&'.join(params)}")
491
+
492
+ def submit_eval_result(self, run_id: str, item_id: str, output: Dict[str, Any], scores: Dict[str, Dict[str, Any]], latency_ms: Optional[int] = None) -> Dict[str, Any]:
493
+ return self._api_request(f"/api/evaluations/runs/{run_id}/results", "POST", {"item_id": item_id, "output": output, "scores": scores, "latency_ms": latency_ms})
494
+
495
+ # Semantic Caching
496
+ def get_cache_settings(self) -> Dict[str, Any]:
497
+ return self._api_request("/api/cache/settings")
498
+
499
+ def update_cache_settings(self, settings: Dict[str, Any]) -> Dict[str, Any]:
500
+ return self._api_request("/api/cache/settings", "PUT", settings)
501
+
502
+ def get_cache_stats(self) -> Dict[str, Any]:
503
+ return self._api_request("/api/cache/stats")
504
+
505
+ def cache_lookup(self, input: Union[str, Dict[str, Any]], model: Optional[str] = None) -> Dict[str, Any]:
506
+ return self._api_request("/api/cache/lookup", "POST", {"input": input, "model": model})
507
+
508
+ def cache_store(self, input: Union[str, Dict[str, Any]], output: str, model: str, provider: Optional[str] = None, app_id: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
509
+ return self._api_request("/api/cache/store", "POST", {"input": input, "output": output, "model": model, "provider": provider, "app_id": app_id, "metadata": metadata})
510
+
511
+ def invalidate_cache(self, cache_id: str) -> Dict[str, Any]:
512
+ return self._api_request(f"/api/cache/{cache_id}", "DELETE")
513
+
514
+ def clear_cache(self, model: Optional[str] = None, app_id: Optional[str] = None) -> Dict[str, Any]:
515
+ return self._api_request("/api/cache/clear", "POST", {"model": model, "app_id": app_id})
516
+
517
+
518
+ # Agent Simulation
519
+ def create_simulation(self, name: str, scenario: str, description: Optional[str] = None, persona: Optional[Dict[str, Any]] = None, success_criteria: Optional[List[Dict[str, str]]] = None, max_turns: int = 10, model: Optional[str] = None, tags: Optional[List[str]] = None) -> Dict[str, Any]:
520
+ return self._api_request("/api/simulations", "POST", {"name": name, "scenario": scenario, "description": description, "persona": persona, "success_criteria": success_criteria or [], "max_turns": max_turns, "model": model, "tags": tags or []})
521
+
522
+ def get_simulation(self, simulation_id: str) -> Dict[str, Any]:
523
+ return self._api_request(f"/api/simulations/{simulation_id}")
524
+
525
+ def list_simulations(self, status: Optional[str] = None, tags: Optional[List[str]] = None, limit: int = 50) -> Dict[str, Any]:
526
+ params = [f"limit={limit}"]
527
+ if status: params.append(f"status={status}")
528
+ if tags: params.append(f"tags={','.join(tags)}")
529
+ return self._api_request(f"/api/simulations?{'&'.join(params)}")
530
+
531
+ def run_simulation(self, simulation_id: str, max_turns: Optional[int] = None, model: Optional[str] = None) -> Dict[str, Any]:
532
+ config = {}
533
+ if max_turns: config["max_turns"] = max_turns
534
+ if model: config["model"] = model
535
+ return self._api_request(f"/api/simulations/{simulation_id}/run", "POST", config)
536
+
537
+ def get_simulation_run(self, run_id: str) -> Dict[str, Any]:
538
+ return self._api_request(f"/api/simulations/runs/{run_id}")
539
+
540
+ def add_simulation_turn(self, run_id: str, turn_number: int, role: str, content: str, tool_calls: Optional[List[Dict[str, Any]]] = None, tool_results: Optional[List[Dict[str, Any]]] = None, latency_ms: Optional[int] = None, tokens_in: Optional[int] = None, tokens_out: Optional[int] = None) -> Dict[str, Any]:
541
+ return self._api_request(f"/api/simulations/runs/{run_id}/turns", "POST", {"turn_number": turn_number, "role": role, "content": content, "tool_calls": tool_calls, "tool_results": tool_results, "latency_ms": latency_ms, "tokens_in": tokens_in, "tokens_out": tokens_out})
542
+
543
+ def complete_simulation_run(self, run_id: str, success: bool, criteria_results: List[Dict[str, Any]], summary: Optional[str] = None) -> Dict[str, Any]:
544
+ return self._api_request(f"/api/simulations/runs/{run_id}/complete", "POST", {"success": success, "criteria_results": criteria_results, "summary": summary})
545
+
546
+ def get_simulation_stats(self) -> Dict[str, Any]:
547
+ return self._api_request("/api/simulations/stats")
548
+
549
+ # Integrations
550
+ def get_integrations(self) -> Dict[str, Any]:
551
+ return self._api_request("/api/integrations")
552
+
553
+ def create_integration(self, type: str, webhook_url: str, channel_name: Optional[str] = None, events: Optional[List[str]] = None) -> Dict[str, Any]:
554
+ return self._api_request("/api/integrations", "POST", {"type": type, "webhook_url": webhook_url, "channel_name": channel_name, "events": events or ["high_risk", "incident"]})
555
+
556
+ def test_integration(self, webhook_url: str, type: str) -> Dict[str, Any]:
557
+ return self._api_request("/api/integrations/test", "POST", {"webhook_url": webhook_url, "type": type})
558
+
559
+ def update_integration(self, integration_id: str, updates: Dict[str, Any]) -> Dict[str, Any]:
560
+ return self._api_request(f"/api/integrations/{integration_id}", "PATCH", updates)
561
+
562
+ def delete_integration(self, integration_id: str) -> Dict[str, Any]:
563
+ return self._api_request(f"/api/integrations/{integration_id}", "DELETE")
564
+
565
+ # Benchmarks
566
+ def get_industries(self) -> Dict[str, Any]:
567
+ return self._api_request("/api/benchmarks/industries")
568
+
569
+ def get_benchmark_report(self, industry: Optional[str] = None) -> Dict[str, Any]:
570
+ return self._api_request(f"/api/benchmarks{'?industry=' + industry if industry else ''}")
571
+
572
+ def set_tenant_industry(self, industry: str) -> Dict[str, Any]:
573
+ return self._api_request("/api/benchmarks/industry", "PATCH", {"industry": industry})
574
+
575
+
576
+ # Retention Policies
577
+ def get_retention_policies(self) -> Dict[str, Any]:
578
+ return self._api_request("/api/retention")
579
+
580
+ def get_retention_summary(self) -> Dict[str, Any]:
581
+ return self._api_request("/api/retention/summary")
582
+
583
+ def create_retention_policy(self, name: str, data_type: str, retention_days: int, description: Optional[str] = None) -> Dict[str, Any]:
584
+ return self._api_request("/api/retention", "POST", {"name": name, "data_type": data_type, "retention_days": retention_days, "description": description})
585
+
586
+ # Guardrails
587
+ def get_guardrails(self) -> Dict[str, Any]:
588
+ return self._api_request("/api/guardrails")
589
+
590
+ def get_guardrail_stats(self) -> Dict[str, Any]:
591
+ return self._api_request("/api/guardrails/stats")
592
+
593
+ def create_guardrail(self, name: str, rule_type: str, action: str, description: Optional[str] = None, config: Optional[Dict[str, Any]] = None, priority: int = 0) -> Dict[str, Any]:
594
+ return self._api_request("/api/guardrails", "POST", {"name": name, "description": description, "rule_type": rule_type, "action": action, "config": config or {}, "priority": priority})
595
+
596
+ def update_guardrail(self, guardrail_id: str, updates: Dict[str, Any]) -> Dict[str, Any]:
597
+ return self._api_request(f"/api/guardrails/{guardrail_id}", "PATCH", updates)
598
+
599
+ def delete_guardrail(self, guardrail_id: str) -> Dict[str, Any]:
600
+ return self._api_request(f"/api/guardrails/{guardrail_id}", "DELETE")
601
+
602
+ # Audit Logs
603
+ def get_audit_logs(self, action: Optional[str] = None, resource_type: Optional[str] = None, user_id: Optional[str] = None, start_time: Optional[str] = None, end_time: Optional[str] = None, limit: int = 100, offset: int = 0) -> Dict[str, Any]:
604
+ params = [f"limit={limit}", f"offset={offset}"]
605
+ if action: params.append(f"action={action}")
606
+ if resource_type: params.append(f"resource_type={resource_type}")
607
+ if user_id: params.append(f"user_id={user_id}")
608
+ if start_time: params.append(f"start_time={start_time}")
609
+ if end_time: params.append(f"end_time={end_time}")
610
+ return self._api_request(f"/api/audit?{'&'.join(params)}")
611
+
612
+ # Events
613
+ def get_events(self, app_id: Optional[str] = None, model: Optional[str] = None, min_risk_score: Optional[float] = None, start_time: Optional[str] = None, end_time: Optional[str] = None, limit: int = 100, offset: int = 0) -> Dict[str, Any]:
614
+ params = [f"limit={limit}", f"offset={offset}"]
615
+ if app_id: params.append(f"app_id={app_id}")
616
+ if model: params.append(f"model={model}")
617
+ if min_risk_score is not None: params.append(f"min_risk_score={min_risk_score}")
618
+ if start_time: params.append(f"start_time={start_time}")
619
+ if end_time: params.append(f"end_time={end_time}")
620
+ return self._api_request(f"/api/events?{'&'.join(params)}")
621
+
622
+ def get_event(self, event_id: str) -> Dict[str, Any]:
623
+ return self._api_request(f"/api/events/{event_id}")
624
+
625
+ def get_live_events(self, limit: int = 50) -> Dict[str, Any]:
626
+ return self._api_request(f"/api/events/live?limit={limit}")
627
+
628
+ # Custom Detections
629
+ def get_custom_detections(self) -> Dict[str, Any]:
630
+ return self._api_request("/api/detections")
631
+
632
+ def create_custom_detection(self, name: str, detection_type: str, config: Dict[str, Any], severity: str = "medium", description: Optional[str] = None) -> Dict[str, Any]:
633
+ return self._api_request("/api/detections", "POST", {"name": name, "description": description, "detection_type": detection_type, "config": config, "severity": severity})
634
+
635
+ def update_custom_detection(self, detection_id: str, updates: Dict[str, Any]) -> Dict[str, Any]:
636
+ return self._api_request(f"/api/detections/{detection_id}", "PATCH", updates)
637
+
638
+ def delete_custom_detection(self, detection_id: str) -> Dict[str, Any]:
639
+ return self._api_request(f"/api/detections/{detection_id}", "DELETE")
640
+
641
+ # Webhooks
642
+ def get_webhooks(self) -> Dict[str, Any]:
643
+ return self._api_request("/api/webhooks")
644
+
645
+ def create_webhook(self, url: str, events: List[str], secret: Optional[str] = None) -> Dict[str, Any]:
646
+ return self._api_request("/api/webhooks", "POST", {"url": url, "events": events, "secret": secret})
647
+
648
+ def update_webhook(self, webhook_id: str, updates: Dict[str, Any]) -> Dict[str, Any]:
649
+ return self._api_request(f"/api/webhooks/{webhook_id}", "PATCH", updates)
650
+
651
+ def delete_webhook(self, webhook_id: str) -> Dict[str, Any]:
652
+ return self._api_request(f"/api/webhooks/{webhook_id}", "DELETE")
653
+
654
+ # Classifications
655
+ def get_classifications(self, event_id: Optional[str] = None, classification_type: Optional[str] = None, min_score: Optional[float] = None, limit: int = 100) -> Dict[str, Any]:
656
+ params = [f"limit={limit}"]
657
+ if event_id: params.append(f"event_id={event_id}")
658
+ if classification_type: params.append(f"type={classification_type}")
659
+ if min_score is not None: params.append(f"min_score={min_score}")
660
+ return self._api_request(f"/api/classifications?{'&'.join(params)}")
661
+
662
+ # Stats
663
+ def get_stats(self, period: str = "7d", app_id: Optional[str] = None) -> Dict[str, Any]:
664
+ params = [f"period={period}"]
665
+ if app_id: params.append(f"app_id={app_id}")
666
+ return self._api_request(f"/api/stats?{'&'.join(params)}")