driftrail 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
driftrail/__init__.py ADDED
@@ -0,0 +1,19 @@
1
+ """
2
+ DriftRail Python SDK
3
+ AI Safety & Observability Platform
4
+ """
5
+
6
+ from .client import DriftRail, DriftRailAsync, DriftRailEnterprise
7
+ from .types import IngestPayload, IngestResponse, Provider, GuardResult, GuardBlockedError
8
+
9
+ __version__ = "2.0.0"
10
+ __all__ = [
11
+ "DriftRail",
12
+ "DriftRailAsync",
13
+ "DriftRailEnterprise",
14
+ "IngestPayload",
15
+ "IngestResponse",
16
+ "Provider",
17
+ "GuardResult",
18
+ "GuardBlockedError",
19
+ ]
driftrail/client.py ADDED
@@ -0,0 +1,478 @@
1
+ """
2
+ DriftRail Client - Sync and Async implementations
3
+ """
4
+
5
+ import json
6
+ from typing import Optional, Dict, Any, Union
7
+ from urllib.request import Request, urlopen
8
+ from urllib.error import HTTPError, URLError
9
+ from concurrent.futures import ThreadPoolExecutor
10
+
11
+ from .types import (
12
+ IngestPayload, IngestResponse, InputPayload, OutputPayload,
13
+ Metadata, Provider, GuardResult, GuardBlockedError
14
+ )
15
+
16
+ DEFAULT_BASE_URL = "https://api.driftrail.com"
17
+
18
+
19
+ class DriftRail:
20
+ """
21
+ Synchronous DriftRail client.
22
+
23
+ Usage:
24
+ client = DriftRail(api_key="dr_live_...", app_id="my-app")
25
+
26
+ response = client.ingest(
27
+ model="gpt-5",
28
+ provider="openai",
29
+ input={"prompt": "Hello"},
30
+ output={"text": "Hi there!"}
31
+ )
32
+ """
33
+
34
+ def __init__(
35
+ self,
36
+ api_key: str,
37
+ app_id: str,
38
+ base_url: str = DEFAULT_BASE_URL,
39
+ timeout: int = 30,
40
+ fail_open: bool = True,
41
+ guard_mode: str = "fail_open",
42
+ ):
43
+ """
44
+ Initialize DriftRail client.
45
+
46
+ Args:
47
+ api_key: Your DriftRail API key (dr_live_... or dr_test_...)
48
+ app_id: Your application identifier
49
+ base_url: API base URL (default: https://api.driftrail.com)
50
+ timeout: Request timeout in seconds
51
+ fail_open: If True, errors are logged but don't raise exceptions
52
+ guard_mode: "fail_open" (default) or "fail_closed" for guard() calls
53
+ """
54
+ self.api_key = api_key
55
+ self.app_id = app_id
56
+ self.base_url = base_url.rstrip("/")
57
+ self.timeout = timeout
58
+ self.fail_open = fail_open
59
+ self.guard_mode = guard_mode
60
+ self._executor = ThreadPoolExecutor(max_workers=4)
61
+
62
+ def ingest(
63
+ self,
64
+ model: str,
65
+ provider: Provider,
66
+ input: Union[InputPayload, Dict[str, Any]],
67
+ output: Union[OutputPayload, Dict[str, Any]],
68
+ metadata: Optional[Union[Metadata, Dict[str, Any]]] = None,
69
+ ) -> IngestResponse:
70
+ """
71
+ Ingest an LLM interaction for classification.
72
+
73
+ Args:
74
+ model: Model name (e.g., "gpt-5", "claude-3")
75
+ provider: Provider name ("openai", "google", "anthropic", "other")
76
+ input: Input payload with prompt and optional messages/sources
77
+ output: Output payload with text and optional tool calls
78
+ metadata: Optional metadata (latency, tokens, temperature)
79
+
80
+ Returns:
81
+ IngestResponse with event_id and job_id on success
82
+ """
83
+ # Build payload
84
+ if isinstance(input, dict):
85
+ input_payload = InputPayload(
86
+ prompt=input.get("prompt", ""),
87
+ messages=input.get("messages"),
88
+ retrieved_sources=input.get("retrievedSources") or input.get("retrieved_sources"),
89
+ )
90
+ else:
91
+ input_payload = input
92
+
93
+ if isinstance(output, dict):
94
+ output_payload = OutputPayload(
95
+ text=output.get("text", ""),
96
+ tool_calls=output.get("toolCalls") or output.get("tool_calls"),
97
+ )
98
+ else:
99
+ output_payload = output
100
+
101
+ metadata_payload = None
102
+ if metadata:
103
+ if isinstance(metadata, dict):
104
+ metadata_payload = Metadata(
105
+ latency_ms=metadata.get("latencyMs") or metadata.get("latency_ms"),
106
+ tokens_in=metadata.get("tokensIn") or metadata.get("tokens_in"),
107
+ tokens_out=metadata.get("tokensOut") or metadata.get("tokens_out"),
108
+ temperature=metadata.get("temperature"),
109
+ )
110
+ else:
111
+ metadata_payload = metadata
112
+
113
+ payload = IngestPayload(
114
+ model=model,
115
+ provider=provider,
116
+ input=input_payload,
117
+ output=output_payload,
118
+ metadata=metadata_payload,
119
+ )
120
+
121
+ return self._send_ingest(payload)
122
+
123
+ def ingest_async(
124
+ self,
125
+ model: str,
126
+ provider: Provider,
127
+ input: Union[InputPayload, Dict[str, Any]],
128
+ output: Union[OutputPayload, Dict[str, Any]],
129
+ metadata: Optional[Union[Metadata, Dict[str, Any]]] = None,
130
+ ) -> None:
131
+ """
132
+ Ingest asynchronously (fire-and-forget).
133
+ Does not block the main thread.
134
+ """
135
+ self._executor.submit(self.ingest, model, provider, input, output, metadata)
136
+
137
+ def guard(
138
+ self,
139
+ output: str,
140
+ input: Optional[str] = None,
141
+ mode: str = "strict",
142
+ timeout_ms: int = 100,
143
+ ) -> GuardResult:
144
+ """
145
+ Inline guardrail check - blocks dangerous outputs before they reach users.
146
+
147
+ Args:
148
+ output: The LLM output text to check
149
+ input: Optional user input/prompt for context
150
+ mode: "strict" (block on medium+ risk) or "permissive" (block on high only)
151
+ timeout_ms: Classification timeout in ms (default 100, max 500)
152
+
153
+ Returns:
154
+ GuardResult with allowed, action, output (possibly redacted), triggered
155
+
156
+ Raises:
157
+ GuardBlockedError: If guard_mode="fail_closed" and content is blocked
158
+ """
159
+ url = f"{self.base_url}/api/guard"
160
+
161
+ headers = {
162
+ "Content-Type": "application/json",
163
+ "Authorization": f"Bearer {self.api_key}",
164
+ "X-App-Id": self.app_id,
165
+ }
166
+
167
+ payload = {
168
+ "output": output,
169
+ "input": input or "",
170
+ "mode": mode,
171
+ "timeout_ms": min(timeout_ms, 500),
172
+ "app_id": self.app_id,
173
+ }
174
+
175
+ try:
176
+ data = json.dumps(payload).encode("utf-8")
177
+ req = Request(url, data=data, headers=headers, method="POST")
178
+ guard_timeout = max(1, timeout_ms / 1000 + 1)
179
+
180
+ with urlopen(req, timeout=guard_timeout) as response:
181
+ result_data = json.loads(response.read().decode("utf-8"))
182
+ result = GuardResult.from_dict(result_data)
183
+
184
+ if self.guard_mode == "fail_closed" and not result.allowed:
185
+ raise GuardBlockedError(result)
186
+
187
+ return result
188
+
189
+ except GuardBlockedError:
190
+ raise
191
+
192
+ except HTTPError as e:
193
+ error_body = e.read().decode("utf-8") if e.fp else str(e)
194
+ if self.guard_mode == "fail_closed":
195
+ raise Exception(f"Guard API error: HTTP {e.code}: {error_body}")
196
+ return GuardResult(
197
+ allowed=True, action="allow", output=output, triggered=[],
198
+ latency_ms=0, fallback=True, error=f"HTTP {e.code}: {error_body}",
199
+ )
200
+
201
+ except (URLError, Exception) as e:
202
+ if self.guard_mode == "fail_closed":
203
+ raise Exception(f"Guard API error: {e}")
204
+ return GuardResult(
205
+ allowed=True, action="allow", output=output, triggered=[],
206
+ latency_ms=0, fallback=True, error=str(e),
207
+ )
208
+
209
+ def _send_ingest(self, payload: IngestPayload) -> IngestResponse:
210
+ """Send ingest request to API."""
211
+ url = f"{self.base_url}/ingest"
212
+
213
+ headers = {
214
+ "Content-Type": "application/json",
215
+ "Authorization": f"Bearer {self.api_key}",
216
+ "X-App-Id": self.app_id,
217
+ }
218
+
219
+ try:
220
+ data = json.dumps(payload.to_dict()).encode("utf-8")
221
+ req = Request(url, data=data, headers=headers, method="POST")
222
+
223
+ with urlopen(req, timeout=self.timeout) as response:
224
+ result = json.loads(response.read().decode("utf-8"))
225
+ return IngestResponse(
226
+ success=result.get("success", False),
227
+ event_id=result.get("event_id"),
228
+ job_id=result.get("job_id"),
229
+ duplicate=result.get("duplicate", False),
230
+ )
231
+
232
+ except HTTPError as e:
233
+ error_body = e.read().decode("utf-8") if e.fp else str(e)
234
+ if self.fail_open:
235
+ return IngestResponse(success=False, error=f"HTTP {e.code}: {error_body}")
236
+ raise
237
+
238
+ except URLError as e:
239
+ if self.fail_open:
240
+ return IngestResponse(success=False, error=f"Network error: {e.reason}")
241
+ raise
242
+
243
+ except Exception as e:
244
+ if self.fail_open:
245
+ return IngestResponse(success=False, error=str(e))
246
+ raise
247
+
248
+ def close(self) -> None:
249
+ """Shutdown the thread pool executor."""
250
+ self._executor.shutdown(wait=False)
251
+
252
+ def __enter__(self) -> "DriftRail":
253
+ return self
254
+
255
+ def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
256
+ self.close()
257
+
258
+
259
+
260
+ class DriftRailAsync:
261
+ """
262
+ Async DriftRail client using aiohttp.
263
+
264
+ Usage:
265
+ async with DriftRailAsync(api_key="...", app_id="my-app") as client:
266
+ response = await client.ingest(...)
267
+ """
268
+
269
+ def __init__(
270
+ self,
271
+ api_key: str,
272
+ app_id: str,
273
+ base_url: str = DEFAULT_BASE_URL,
274
+ timeout: int = 30,
275
+ fail_open: bool = True,
276
+ ):
277
+ self.api_key = api_key
278
+ self.app_id = app_id
279
+ self.base_url = base_url.rstrip("/")
280
+ self.timeout = timeout
281
+ self.fail_open = fail_open
282
+ self._session: Any = None
283
+
284
+ async def _get_session(self) -> Any:
285
+ if self._session is None:
286
+ try:
287
+ import aiohttp
288
+ self._session = aiohttp.ClientSession()
289
+ except ImportError:
290
+ raise ImportError("aiohttp is required for async client: pip install driftrail[async]")
291
+ return self._session
292
+
293
+ async def ingest(
294
+ self,
295
+ model: str,
296
+ provider: Provider,
297
+ input: Union[InputPayload, Dict[str, Any]],
298
+ output: Union[OutputPayload, Dict[str, Any]],
299
+ metadata: Optional[Union[Metadata, Dict[str, Any]]] = None,
300
+ ) -> IngestResponse:
301
+ """Async ingest - see DriftRail.ingest for documentation."""
302
+ import aiohttp
303
+
304
+ if isinstance(input, dict):
305
+ input_payload = InputPayload(
306
+ prompt=input.get("prompt", ""),
307
+ messages=input.get("messages"),
308
+ retrieved_sources=input.get("retrievedSources") or input.get("retrieved_sources"),
309
+ )
310
+ else:
311
+ input_payload = input
312
+
313
+ if isinstance(output, dict):
314
+ output_payload = OutputPayload(
315
+ text=output.get("text", ""),
316
+ tool_calls=output.get("toolCalls") or output.get("tool_calls"),
317
+ )
318
+ else:
319
+ output_payload = output
320
+
321
+ metadata_payload = None
322
+ if metadata:
323
+ if isinstance(metadata, dict):
324
+ metadata_payload = Metadata(
325
+ latency_ms=metadata.get("latencyMs") or metadata.get("latency_ms"),
326
+ tokens_in=metadata.get("tokensIn") or metadata.get("tokens_in"),
327
+ tokens_out=metadata.get("tokensOut") or metadata.get("tokens_out"),
328
+ temperature=metadata.get("temperature"),
329
+ )
330
+ else:
331
+ metadata_payload = metadata
332
+
333
+ payload = IngestPayload(
334
+ model=model,
335
+ provider=provider,
336
+ input=input_payload,
337
+ output=output_payload,
338
+ metadata=metadata_payload,
339
+ )
340
+
341
+ url = f"{self.base_url}/ingest"
342
+ headers = {
343
+ "Content-Type": "application/json",
344
+ "Authorization": f"Bearer {self.api_key}",
345
+ "X-App-Id": self.app_id,
346
+ }
347
+
348
+ try:
349
+ session = await self._get_session()
350
+ async with session.post(
351
+ url,
352
+ json=payload.to_dict(),
353
+ headers=headers,
354
+ timeout=aiohttp.ClientTimeout(total=self.timeout),
355
+ ) as response:
356
+ result = await response.json()
357
+ if response.status >= 400:
358
+ if self.fail_open:
359
+ return IngestResponse(
360
+ success=False,
361
+ error=f"HTTP {response.status}: {result.get('error', 'Unknown error')}",
362
+ )
363
+ raise Exception(f"HTTP {response.status}: {result}")
364
+
365
+ return IngestResponse(
366
+ success=result.get("success", False),
367
+ event_id=result.get("event_id"),
368
+ job_id=result.get("job_id"),
369
+ duplicate=result.get("duplicate", False),
370
+ )
371
+
372
+ except Exception as e:
373
+ if self.fail_open:
374
+ return IngestResponse(success=False, error=str(e))
375
+ raise
376
+
377
+ async def close(self) -> None:
378
+ if self._session:
379
+ await self._session.close()
380
+ self._session = None
381
+
382
+ async def __aenter__(self) -> "DriftRailAsync":
383
+ return self
384
+
385
+ async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
386
+ await self.close()
387
+
388
+
389
+ class DriftRailEnterprise(DriftRail):
390
+ """
391
+ Enterprise DriftRail client with monitoring features.
392
+
393
+ Includes: Incidents, Compliance, Model Comparison, Exports, Brand Safety
394
+ """
395
+
396
+ def _api_request(self, endpoint: str, method: str = "GET", data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
397
+ """Make an API request."""
398
+ url = f"{self.base_url}{endpoint}"
399
+ headers = {
400
+ "Content-Type": "application/json",
401
+ "Authorization": f"Bearer {self.api_key}",
402
+ }
403
+
404
+ try:
405
+ body = json.dumps(data).encode("utf-8") if data else None
406
+ req = Request(url, data=body, headers=headers, method=method)
407
+
408
+ with urlopen(req, timeout=self.timeout) as response:
409
+ return json.loads(response.read().decode("utf-8"))
410
+
411
+ except HTTPError as e:
412
+ error_body = e.read().decode("utf-8") if e.fp else str(e)
413
+ raise Exception(f"HTTP {e.code}: {error_body}")
414
+
415
+ def list_incidents(
416
+ self,
417
+ status: Optional[list] = None,
418
+ severity: Optional[list] = None,
419
+ limit: int = 50,
420
+ ) -> Dict[str, Any]:
421
+ """List incidents with optional filters."""
422
+ params = []
423
+ if status:
424
+ params.append(f"status={','.join(status)}")
425
+ if severity:
426
+ params.append(f"severity={','.join(severity)}")
427
+ params.append(f"limit={limit}")
428
+ query = "&".join(params)
429
+ return self._api_request(f"/api/incidents?{query}")
430
+
431
+ def create_incident(
432
+ self,
433
+ title: str,
434
+ severity: str,
435
+ incident_type: str,
436
+ description: Optional[str] = None,
437
+ ) -> Dict[str, Any]:
438
+ """Create a new incident."""
439
+ return self._api_request("/api/incidents", "POST", {
440
+ "title": title,
441
+ "severity": severity,
442
+ "incident_type": incident_type,
443
+ "description": description,
444
+ })
445
+
446
+ def get_incident_stats(self) -> Dict[str, Any]:
447
+ """Get incident statistics."""
448
+ return self._api_request("/api/incidents/stats")
449
+
450
+ def get_compliance_status(self) -> Dict[str, Any]:
451
+ """Get compliance framework status."""
452
+ return self._api_request("/api/compliance/status")
453
+
454
+ def get_model_leaderboard(self, metric: str = "avg_risk_score") -> Dict[str, Any]:
455
+ """Get model performance leaderboard."""
456
+ return self._api_request(f"/api/models/leaderboard?metric={metric}")
457
+
458
+ def create_export(
459
+ self,
460
+ export_type: str,
461
+ format: str = "json",
462
+ date_from: Optional[str] = None,
463
+ date_to: Optional[str] = None,
464
+ ) -> Dict[str, Any]:
465
+ """Create a data export job."""
466
+ return self._api_request("/api/exports", "POST", {
467
+ "export_type": export_type,
468
+ "format": format,
469
+ "date_from": date_from,
470
+ "date_to": date_to,
471
+ })
472
+
473
+ def check_brand_safety(self, text: str, location: str = "output") -> Dict[str, Any]:
474
+ """Check text against brand safety rules."""
475
+ return self._api_request("/api/brand-safety/check", "POST", {
476
+ "text": text,
477
+ "location": location,
478
+ })
driftrail/py.typed ADDED
File without changes
driftrail/types.py ADDED
@@ -0,0 +1,187 @@
1
+ """
2
+ DriftRail Type Definitions
3
+ """
4
+
5
+ from typing import Optional, List, Dict, Any, Literal
6
+ from dataclasses import dataclass, field, asdict
7
+
8
+ Provider = Literal["openai", "google", "anthropic", "other"]
9
+
10
+
11
+ @dataclass
12
+ class Message:
13
+ role: Literal["system", "user", "assistant", "tool"]
14
+ content: str
15
+ name: Optional[str] = None
16
+ tool_call_id: Optional[str] = None
17
+
18
+
19
+ @dataclass
20
+ class SourceRef:
21
+ id: str
22
+ type: Optional[str] = None
23
+ content: Optional[str] = None
24
+ url: Optional[str] = None
25
+ metadata: Optional[Dict[str, Any]] = None
26
+
27
+
28
+ @dataclass
29
+ class ToolCall:
30
+ id: str
31
+ type: Literal["function"] = "function"
32
+ function: Dict[str, str] = field(default_factory=dict)
33
+
34
+
35
+ @dataclass
36
+ class InputPayload:
37
+ prompt: str
38
+ messages: Optional[List[Message]] = None
39
+ retrieved_sources: Optional[List[SourceRef]] = None
40
+
41
+ def to_dict(self) -> Dict[str, Any]:
42
+ d: Dict[str, Any] = {"prompt": self.prompt}
43
+ if self.messages:
44
+ d["messages"] = [asdict(m) for m in self.messages]
45
+ if self.retrieved_sources:
46
+ d["retrievedSources"] = [asdict(s) for s in self.retrieved_sources]
47
+ return d
48
+
49
+
50
+ @dataclass
51
+ class OutputPayload:
52
+ text: str
53
+ tool_calls: Optional[List[ToolCall]] = None
54
+
55
+ def to_dict(self) -> Dict[str, Any]:
56
+ d: Dict[str, Any] = {"text": self.text}
57
+ if self.tool_calls:
58
+ d["toolCalls"] = [asdict(t) for t in self.tool_calls]
59
+ return d
60
+
61
+
62
+ @dataclass
63
+ class Metadata:
64
+ latency_ms: Optional[int] = None
65
+ tokens_in: Optional[int] = None
66
+ tokens_out: Optional[int] = None
67
+ temperature: Optional[float] = None
68
+
69
+ def to_dict(self) -> Dict[str, Any]:
70
+ d: Dict[str, Any] = {}
71
+ if self.latency_ms is not None:
72
+ d["latencyMs"] = self.latency_ms
73
+ if self.tokens_in is not None:
74
+ d["tokensIn"] = self.tokens_in
75
+ if self.tokens_out is not None:
76
+ d["tokensOut"] = self.tokens_out
77
+ if self.temperature is not None:
78
+ d["temperature"] = self.temperature
79
+ return d
80
+
81
+
82
+ @dataclass
83
+ class IngestPayload:
84
+ model: str
85
+ provider: Provider
86
+ input: InputPayload
87
+ output: OutputPayload
88
+ metadata: Optional[Metadata] = None
89
+
90
+ def to_dict(self) -> Dict[str, Any]:
91
+ d: Dict[str, Any] = {
92
+ "model": self.model,
93
+ "provider": self.provider,
94
+ "input": self.input.to_dict(),
95
+ "output": self.output.to_dict(),
96
+ }
97
+ if self.metadata:
98
+ d["metadata"] = self.metadata.to_dict()
99
+ return d
100
+
101
+
102
+ @dataclass
103
+ class IngestResponse:
104
+ success: bool
105
+ event_id: Optional[str] = None
106
+ job_id: Optional[str] = None
107
+ error: Optional[str] = None
108
+ duplicate: bool = False
109
+
110
+
111
+ # Guard types for inline guardrails
112
+
113
+ GuardMode = Literal["strict", "permissive"]
114
+ GuardAction = Literal["allow", "block", "redact", "warn"]
115
+
116
+
117
+ @dataclass
118
+ class GuardTriggered:
119
+ type: Literal["classification", "guardrail"]
120
+ name: str
121
+ reason: str
122
+
123
+
124
+ @dataclass
125
+ class GuardClassification:
126
+ risk_score: int
127
+ pii_detected: bool
128
+ pii_types: List[str]
129
+ toxicity_detected: bool
130
+ toxicity_severity: str
131
+ prompt_injection_detected: bool
132
+ prompt_injection_risk: str
133
+
134
+
135
+ @dataclass
136
+ class GuardResult:
137
+ """Result from inline guard check."""
138
+ allowed: bool
139
+ action: GuardAction
140
+ output: str
141
+ triggered: List[GuardTriggered]
142
+ latency_ms: int
143
+ fallback: bool
144
+ classification: Optional[GuardClassification] = None
145
+ error: Optional[str] = None
146
+
147
+ @classmethod
148
+ def from_dict(cls, data: Dict[str, Any]) -> "GuardResult":
149
+ triggered = [
150
+ GuardTriggered(
151
+ type=t.get("type", "guardrail"),
152
+ name=t.get("name", "Unknown"),
153
+ reason=t.get("reason", ""),
154
+ )
155
+ for t in data.get("triggered", [])
156
+ ]
157
+
158
+ classification = None
159
+ if data.get("classification"):
160
+ c = data["classification"]
161
+ classification = GuardClassification(
162
+ risk_score=c.get("risk_score", 0),
163
+ pii_detected=c.get("pii", {}).get("detected", False),
164
+ pii_types=c.get("pii", {}).get("types", []),
165
+ toxicity_detected=c.get("toxicity", {}).get("detected", False),
166
+ toxicity_severity=c.get("toxicity", {}).get("severity", "none"),
167
+ prompt_injection_detected=c.get("prompt_injection", {}).get("detected", False),
168
+ prompt_injection_risk=c.get("prompt_injection", {}).get("risk", "none"),
169
+ )
170
+
171
+ return cls(
172
+ allowed=data.get("allowed", True),
173
+ action=data.get("action", "allow"),
174
+ output=data.get("output", ""),
175
+ triggered=triggered,
176
+ latency_ms=data.get("latency_ms", 0),
177
+ fallback=data.get("fallback", False),
178
+ classification=classification,
179
+ )
180
+
181
+
182
+ class GuardBlockedError(Exception):
183
+ """Raised when content is blocked in fail_closed mode."""
184
+ def __init__(self, result: GuardResult):
185
+ self.result = result
186
+ reasons = [t.reason for t in result.triggered]
187
+ super().__init__(f"Content blocked: {'; '.join(reasons)}")
@@ -0,0 +1,226 @@
1
+ Metadata-Version: 2.4
2
+ Name: driftrail
3
+ Version: 2.0.0
4
+ Summary: DriftRail SDK - AI Safety & Observability Platform for LLM monitoring, guardrails, and compliance
5
+ Author-email: DriftRail <support@driftrail.com>
6
+ Maintainer-email: DriftRail <support@driftrail.com>
7
+ License: MIT
8
+ Project-URL: Homepage, https://driftrail.com
9
+ Project-URL: Documentation, https://docs.driftrail.com
10
+ Project-URL: Repository, https://github.com/cutmob/DriftRail-Python
11
+ Project-URL: Changelog, https://github.com/cutmob/DriftRail-Python/blob/main/CHANGELOG.md
12
+ Project-URL: Bug Tracker, https://github.com/cutmob/DriftRail-Python/issues
13
+ Keywords: ai,llm,observability,safety,monitoring,audit,guardrails,compliance,openai,anthropic,gemini,machine-learning,mlops
14
+ Classifier: Development Status :: 4 - Beta
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: License :: OSI Approved :: MIT License
17
+ Classifier: Operating System :: OS Independent
18
+ Classifier: Programming Language :: Python :: 3
19
+ Classifier: Programming Language :: Python :: 3.8
20
+ Classifier: Programming Language :: Python :: 3.9
21
+ Classifier: Programming Language :: Python :: 3.10
22
+ Classifier: Programming Language :: Python :: 3.11
23
+ Classifier: Programming Language :: Python :: 3.12
24
+ Classifier: Programming Language :: Python :: 3.13
25
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
26
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
27
+ Classifier: Topic :: System :: Monitoring
28
+ Classifier: Typing :: Typed
29
+ Requires-Python: >=3.8
30
+ Description-Content-Type: text/markdown
31
+ License-File: LICENSE
32
+ Provides-Extra: async
33
+ Requires-Dist: aiohttp>=3.8.0; extra == "async"
34
+ Provides-Extra: dev
35
+ Requires-Dist: pytest>=7.0.0; extra == "dev"
36
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
37
+ Requires-Dist: mypy>=1.0.0; extra == "dev"
38
+ Requires-Dist: ruff>=0.1.0; extra == "dev"
39
+ Dynamic: license-file
40
+
41
+ # DriftRail Python SDK
42
+
43
+ [![PyPI version](https://badge.fury.io/py/driftrail.svg)](https://pypi.org/project/driftrail/)
44
+ [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
45
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
46
+
47
+ AI Safety & Observability Platform — Monitor, classify, and audit every LLM interaction.
48
+
49
+ ## Installation
50
+
51
+ ```bash
52
+ pip install driftrail
53
+
54
+ # For async support
55
+ pip install driftrail[async]
56
+ ```
57
+
58
+ ## Quick Start
59
+
60
+ ```python
61
+ from driftrail import DriftRail
62
+
63
+ client = DriftRail(
64
+ api_key="dr_live_...",
65
+ app_id="my-app"
66
+ )
67
+
68
+ # Log an LLM interaction
69
+ response = client.ingest(
70
+ model="claude-sonnet-4",
71
+ provider="anthropic",
72
+ input={"prompt": "What is the capital of France?"},
73
+ output={"text": "The capital of France is Paris."}
74
+ )
75
+
76
+ print(f"Event ID: {response.event_id}")
77
+ ```
78
+
79
+ ## Inline Guardrails
80
+
81
+ Block dangerous outputs BEFORE they reach users:
82
+
83
+ ```python
84
+ from driftrail import DriftRail
85
+
86
+ client = DriftRail(api_key="...", app_id="my-app")
87
+
88
+ # Get response from your LLM
89
+ llm_response = your_llm_call(user_prompt)
90
+
91
+ # Guard it before returning to user
92
+ result = client.guard(
93
+ output=llm_response,
94
+ input=user_prompt,
95
+ mode="strict" # or "permissive"
96
+ )
97
+
98
+ if result.allowed:
99
+ return result.output # May be redacted if PII was found
100
+ else:
101
+ print(f"Blocked: {[t.reason for t in result.triggered]}")
102
+ return "Sorry, I can't help with that."
103
+ ```
104
+
105
+ ### Guard Modes
106
+
107
+ - `strict` (default): Blocks on medium+ risk (PII, moderate toxicity, prompt injection)
108
+ - `permissive`: Only blocks on high risk (severe toxicity, high-risk injection)
109
+
110
+ ### Fail-Open vs Fail-Closed
111
+
112
+ ```python
113
+ # Fail-open (default): If DriftRail is unavailable, content is allowed through
114
+ client = DriftRail(api_key="...", app_id="...", guard_mode="fail_open")
115
+
116
+ # Fail-closed: If DriftRail is unavailable, raises exception
117
+ client = DriftRail(api_key="...", app_id="...", guard_mode="fail_closed")
118
+
119
+ try:
120
+ result = client.guard(output=llm_response)
121
+ except GuardBlockedError as e:
122
+ print(f"Blocked: {e.result.triggered}")
123
+ ```
124
+
125
+ ## Async Usage
126
+
127
+ ```python
128
+ import asyncio
129
+ from driftrail import DriftRailAsync
130
+
131
+ async def main():
132
+ async with DriftRailAsync(api_key="...", app_id="my-app") as client:
133
+ response = await client.ingest(
134
+ model="claude-3",
135
+ provider="anthropic",
136
+ input={"prompt": "Hello"},
137
+ output={"text": "Hi there!"}
138
+ )
139
+
140
+ asyncio.run(main())
141
+ ```
142
+
143
+ ## Fire-and-Forget (Non-blocking)
144
+
145
+ ```python
146
+ # Won't block your main thread
147
+ client.ingest_async(
148
+ model="gpt-4o",
149
+ provider="openai",
150
+ input={"prompt": "..."},
151
+ output={"text": "..."}
152
+ )
153
+ ```
154
+
155
+ > ⚠️ **Serverless Warning**: Do not use `ingest_async()` in AWS Lambda, Google Cloud Functions, or other serverless environments. Use the synchronous `ingest()` method instead.
156
+
157
+ ## With Metadata
158
+
159
+ ```python
160
+ import time
161
+
162
+ start = time.time()
163
+ # ... your LLM call ...
164
+ latency = int((time.time() - start) * 1000)
165
+
166
+ client.ingest(
167
+ model="gpt-4o",
168
+ provider="openai",
169
+ input={"prompt": "..."},
170
+ output={"text": "..."},
171
+ metadata={
172
+ "latency_ms": latency,
173
+ "tokens_in": 50,
174
+ "tokens_out": 150,
175
+ "temperature": 0.7
176
+ }
177
+ )
178
+ ```
179
+
180
+ ## With RAG Sources
181
+
182
+ ```python
183
+ client.ingest(
184
+ model="claude-3.5-haiku",
185
+ provider="anthropic",
186
+ input={
187
+ "prompt": "What does our refund policy say?",
188
+ "retrieved_sources": [
189
+ {"id": "doc-123", "content": "Refunds are available within 30 days..."},
190
+ {"id": "doc-456", "content": "Contact support for refund requests..."}
191
+ ]
192
+ },
193
+ output={"text": "According to our policy, refunds are available within 30 days..."}
194
+ )
195
+ ```
196
+
197
+ ## Enterprise Features
198
+
199
+ ```python
200
+ from driftrail import DriftRailEnterprise
201
+
202
+ client = DriftRailEnterprise(api_key="...", app_id="my-app")
203
+
204
+ # Incident management
205
+ stats = client.get_incident_stats()
206
+
207
+ # Compliance status
208
+ compliance = client.get_compliance_status()
209
+
210
+ # Model leaderboard
211
+ leaderboard = client.get_model_leaderboard(metric="avg_risk_score")
212
+
213
+ # Brand safety checks
214
+ violations = client.check_brand_safety("Some AI output text")
215
+ ```
216
+
217
+ ## Documentation
218
+
219
+ - [Full Documentation](https://docs.driftrail.com)
220
+ - [API Reference](https://docs.driftrail.com/api)
221
+ - [Dashboard](https://app.driftrail.com)
222
+ - [GitHub Repository](https://github.com/cutmob/DriftRail-Python)
223
+
224
+ ## License
225
+
226
+ MIT
@@ -0,0 +1,9 @@
1
+ driftrail/__init__.py,sha256=bM87N2M9Ii431s-lN2DAkMYceMauPGRSVY_Q6QB5aQ8,433
2
+ driftrail/client.py,sha256=g4XQChRpqTXhbIQo8lisH86_Tm09MLNvTRWZrSAzUas,16470
3
+ driftrail/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ driftrail/types.py,sha256=5WEpnl7e513b9ugtmFitC6blbMe2VC5oUV11ARYodj4,5232
5
+ driftrail-2.0.0.dist-info/licenses/LICENSE,sha256=c9ZcDM-aSAaN2gLxDBnXkKHd9qeTS25eXQg4i2vYQcU,1066
6
+ driftrail-2.0.0.dist-info/METADATA,sha256=y5SA5wuDhSJCDKWQgQaoPYHMvYg3IwPQL-uO3KlegWA,6334
7
+ driftrail-2.0.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
8
+ driftrail-2.0.0.dist-info/top_level.txt,sha256=Ok_mUwZ0Sktm13smRuubG2jZTSyoQEwjsbrEHoqTTnU,10
9
+ driftrail-2.0.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 DriftRail
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1 @@
1
+ driftrail