deeprails 0.2.1__tar.gz → 0.3.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of deeprails might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: deeprails
3
- Version: 0.2.1
3
+ Version: 0.3.1
4
4
  Summary: Python SDK for interacting with the DeepRails API
5
5
  Project-URL: Homepage, https://deeprails.com
6
6
  Project-URL: Documentation, https://docs.deeprails.com
@@ -35,7 +35,9 @@ Description-Content-Type: text/markdown
35
35
 
36
36
  # DeepRails Python SDK
37
37
 
38
- A lightweight, intuitive Python SDK for interacting with the DeepRails API. DeepRails helps you evaluate and improve AI-generated outputs through a comprehensive set of guardrail metrics.
38
+ Official Python SDK for interacting with the DeepRails API. [DeepRails](https://deeprails.com) is a powerful developer tool with a comprehesive set of adaptive guardrails to protect against LLM hallucinations - deploy our Evaluate, Monitor, and Defend APIs in <15 mins for the best out-of-the-box guardrails in the market.
39
+
40
+ Supports DeepRails API Version v2.0
39
41
 
40
42
  ## Installation
41
43
 
@@ -55,12 +57,17 @@ client = DeepRails(token="YOUR_API_KEY")
55
57
  evaluation = client.create_evaluation(
56
58
  model_input={"user_prompt": "Prompt used to generate completion"},
57
59
  model_output="Generated output",
58
- model_used="gpt-4o-mini (LLM used to generate completion)",
60
+ model_used="gpt-4o-mini",
59
61
  guardrail_metrics=["correctness", "completeness"]
60
62
  )
61
-
62
- # Print evaluation ID
63
63
  print(f"Evaluation created with ID: {evaluation.eval_id}")
64
+
65
+ # Create a monitor
66
+ monitor = client.create_monitor(
67
+ name="Production Assistant Monitor",
68
+ description="Tracking our production assistant quality"
69
+ )
70
+ print(f"Monitor created with ID: {monitor.monitor_id}")
64
71
  ```
65
72
 
66
73
  ## Features
@@ -69,6 +76,7 @@ print(f"Evaluation created with ID: {evaluation.eval_id}")
69
76
  - **Comprehensive Metrics**: Evaluate outputs on correctness, completeness, and more
70
77
  - **Real-time Progress**: Track evaluation progress in real-time
71
78
  - **Detailed Results**: Get detailed scores and rationales for each metric
79
+ - **Continuous Monitoring**: Create monitors to track AI system performance over time
72
80
 
73
81
  ## Authentication
74
82
 
@@ -81,14 +89,16 @@ token = os.environ.get("DEEPRAILS_API_KEY")
81
89
  client = DeepRails(token=token)
82
90
  ```
83
91
 
84
- ## Creating Evaluations
92
+ ## Evaluation Service
93
+
94
+ ### Creating Evaluations
85
95
 
86
96
  ```python
87
97
  try:
88
98
  evaluation = client.create_evaluation(
89
99
  model_input={"user_prompt": "Prompt used to generate completion"},
90
100
  model_output="Generated output",
91
- model_used="gpt-4o-mini (LLM used to generate completion)",
101
+ model_used="gpt-4o-mini",
92
102
  guardrail_metrics=["correctness", "completeness"]
93
103
  )
94
104
  print(f"ID: {evaluation.eval_id}")
@@ -98,7 +108,7 @@ except Exception as e:
98
108
  print(f"Error: {e}")
99
109
  ```
100
110
 
101
- ### Parameters
111
+ #### Parameters
102
112
 
103
113
  - `model_input`: Dictionary containing the prompt and any context (must include `user_prompt`)
104
114
  - `model_output`: The generated output to evaluate
@@ -108,7 +118,7 @@ except Exception as e:
108
118
  - `nametag`: (Optional) Custom identifier for this evaluation
109
119
  - `webhook`: (Optional) URL to receive completion notifications
110
120
 
111
- ## Retrieving Evaluations
121
+ ### Retrieving Evaluations
112
122
 
113
123
  ```python
114
124
  try:
@@ -126,6 +136,76 @@ except Exception as e:
126
136
  print(f"Error: {e}")
127
137
  ```
128
138
 
139
+ ## Monitor Service
140
+
141
+ ### Creating Monitors
142
+
143
+ ```python
144
+ try:
145
+ # Create a monitor
146
+ monitor = client.create_monitor(
147
+ name="Production Chat Assistant Monitor",
148
+ description="Monitoring our production chatbot responses"
149
+ )
150
+
151
+ print(f"Monitor created with ID: {monitor.monitor_id}")
152
+ except Exception as e:
153
+ print(f"Error: {e}")
154
+ ```
155
+
156
+ ### Logging Monitor Events
157
+
158
+ ```python
159
+ try:
160
+ # Add an event to the monitor
161
+ event = client.create_monitor_event(
162
+ monitor_id="mon-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
163
+ model_input={"user_prompt": "Tell me about renewable energy"},
164
+ model_output="Renewable energy comes from natural sources...",
165
+ model_used="gpt-4o-mini",
166
+ guardrail_metrics=["correctness", "completeness", "comprehensive_safety"]
167
+ )
168
+
169
+ print(f"Monitor event created with ID: {event.event_id}")
170
+ print(f"Associated evaluation ID: {event.evaluation_id}")
171
+ except Exception as e:
172
+ print(f"Error: {e}")
173
+ ```
174
+
175
+ ### Retrieving Monitor Data
176
+
177
+ ```python
178
+ try:
179
+ # Get monitor details
180
+ monitor = client.get_monitor("mon-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")
181
+ print(f"Monitor name: {monitor.name}")
182
+ print(f"Status: {monitor.monitor_status}")
183
+
184
+ # Get monitor events
185
+ events = client.get_monitor_events(
186
+ monitor_id="mon-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
187
+ limit=10
188
+ )
189
+
190
+ for event in events:
191
+ print(f"Event ID: {event.event_id}")
192
+ print(f"Evaluation ID: {event.evaluation_id}")
193
+
194
+ # List all monitors with filtering
195
+ monitors = client.get_monitors(
196
+ limit=5,
197
+ monitor_status=["active"],
198
+ sort_by="created_at",
199
+ sort_order="desc"
200
+ )
201
+
202
+ print(f"Total monitors: {monitors.pagination.total_count}")
203
+ for m in monitors.monitors:
204
+ print(f"{m.name}: {m.event_count} events")
205
+ except Exception as e:
206
+ print(f"Error: {e}")
207
+ ```
208
+
129
209
  ## Available Metrics
130
210
 
131
211
  - `correctness`: Measures factual accuracy by evaluating whether each claim in the output is true and verifiable.
@@ -135,7 +215,6 @@ except Exception as e:
135
215
  - `ground_truth_adherence`: Measures how closely the output matches a known correct answer (gold standard).
136
216
  - `comprehensive_safety`: Detects and categorizes safety violations across areas like PII, CBRN, hate speech, self-harm, and more.
137
217
 
138
-
139
218
  ## Error Handling
140
219
 
141
220
  The SDK throws `DeepRailsAPIError` for API-related errors, with status code and detailed message.
@@ -1,6 +1,8 @@
1
1
  # DeepRails Python SDK
2
2
 
3
- A lightweight, intuitive Python SDK for interacting with the DeepRails API. DeepRails helps you evaluate and improve AI-generated outputs through a comprehensive set of guardrail metrics.
3
+ Official Python SDK for interacting with the DeepRails API. [DeepRails](https://deeprails.com) is a powerful developer tool with a comprehesive set of adaptive guardrails to protect against LLM hallucinations - deploy our Evaluate, Monitor, and Defend APIs in <15 mins for the best out-of-the-box guardrails in the market.
4
+
5
+ Supports DeepRails API Version v2.0
4
6
 
5
7
  ## Installation
6
8
 
@@ -20,12 +22,17 @@ client = DeepRails(token="YOUR_API_KEY")
20
22
  evaluation = client.create_evaluation(
21
23
  model_input={"user_prompt": "Prompt used to generate completion"},
22
24
  model_output="Generated output",
23
- model_used="gpt-4o-mini (LLM used to generate completion)",
25
+ model_used="gpt-4o-mini",
24
26
  guardrail_metrics=["correctness", "completeness"]
25
27
  )
26
-
27
- # Print evaluation ID
28
28
  print(f"Evaluation created with ID: {evaluation.eval_id}")
29
+
30
+ # Create a monitor
31
+ monitor = client.create_monitor(
32
+ name="Production Assistant Monitor",
33
+ description="Tracking our production assistant quality"
34
+ )
35
+ print(f"Monitor created with ID: {monitor.monitor_id}")
29
36
  ```
30
37
 
31
38
  ## Features
@@ -34,6 +41,7 @@ print(f"Evaluation created with ID: {evaluation.eval_id}")
34
41
  - **Comprehensive Metrics**: Evaluate outputs on correctness, completeness, and more
35
42
  - **Real-time Progress**: Track evaluation progress in real-time
36
43
  - **Detailed Results**: Get detailed scores and rationales for each metric
44
+ - **Continuous Monitoring**: Create monitors to track AI system performance over time
37
45
 
38
46
  ## Authentication
39
47
 
@@ -46,14 +54,16 @@ token = os.environ.get("DEEPRAILS_API_KEY")
46
54
  client = DeepRails(token=token)
47
55
  ```
48
56
 
49
- ## Creating Evaluations
57
+ ## Evaluation Service
58
+
59
+ ### Creating Evaluations
50
60
 
51
61
  ```python
52
62
  try:
53
63
  evaluation = client.create_evaluation(
54
64
  model_input={"user_prompt": "Prompt used to generate completion"},
55
65
  model_output="Generated output",
56
- model_used="gpt-4o-mini (LLM used to generate completion)",
66
+ model_used="gpt-4o-mini",
57
67
  guardrail_metrics=["correctness", "completeness"]
58
68
  )
59
69
  print(f"ID: {evaluation.eval_id}")
@@ -63,7 +73,7 @@ except Exception as e:
63
73
  print(f"Error: {e}")
64
74
  ```
65
75
 
66
- ### Parameters
76
+ #### Parameters
67
77
 
68
78
  - `model_input`: Dictionary containing the prompt and any context (must include `user_prompt`)
69
79
  - `model_output`: The generated output to evaluate
@@ -73,7 +83,7 @@ except Exception as e:
73
83
  - `nametag`: (Optional) Custom identifier for this evaluation
74
84
  - `webhook`: (Optional) URL to receive completion notifications
75
85
 
76
- ## Retrieving Evaluations
86
+ ### Retrieving Evaluations
77
87
 
78
88
  ```python
79
89
  try:
@@ -91,6 +101,76 @@ except Exception as e:
91
101
  print(f"Error: {e}")
92
102
  ```
93
103
 
104
+ ## Monitor Service
105
+
106
+ ### Creating Monitors
107
+
108
+ ```python
109
+ try:
110
+ # Create a monitor
111
+ monitor = client.create_monitor(
112
+ name="Production Chat Assistant Monitor",
113
+ description="Monitoring our production chatbot responses"
114
+ )
115
+
116
+ print(f"Monitor created with ID: {monitor.monitor_id}")
117
+ except Exception as e:
118
+ print(f"Error: {e}")
119
+ ```
120
+
121
+ ### Logging Monitor Events
122
+
123
+ ```python
124
+ try:
125
+ # Add an event to the monitor
126
+ event = client.create_monitor_event(
127
+ monitor_id="mon-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
128
+ model_input={"user_prompt": "Tell me about renewable energy"},
129
+ model_output="Renewable energy comes from natural sources...",
130
+ model_used="gpt-4o-mini",
131
+ guardrail_metrics=["correctness", "completeness", "comprehensive_safety"]
132
+ )
133
+
134
+ print(f"Monitor event created with ID: {event.event_id}")
135
+ print(f"Associated evaluation ID: {event.evaluation_id}")
136
+ except Exception as e:
137
+ print(f"Error: {e}")
138
+ ```
139
+
140
+ ### Retrieving Monitor Data
141
+
142
+ ```python
143
+ try:
144
+ # Get monitor details
145
+ monitor = client.get_monitor("mon-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx")
146
+ print(f"Monitor name: {monitor.name}")
147
+ print(f"Status: {monitor.monitor_status}")
148
+
149
+ # Get monitor events
150
+ events = client.get_monitor_events(
151
+ monitor_id="mon-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
152
+ limit=10
153
+ )
154
+
155
+ for event in events:
156
+ print(f"Event ID: {event.event_id}")
157
+ print(f"Evaluation ID: {event.evaluation_id}")
158
+
159
+ # List all monitors with filtering
160
+ monitors = client.get_monitors(
161
+ limit=5,
162
+ monitor_status=["active"],
163
+ sort_by="created_at",
164
+ sort_order="desc"
165
+ )
166
+
167
+ print(f"Total monitors: {monitors.pagination.total_count}")
168
+ for m in monitors.monitors:
169
+ print(f"{m.name}: {m.event_count} events")
170
+ except Exception as e:
171
+ print(f"Error: {e}")
172
+ ```
173
+
94
174
  ## Available Metrics
95
175
 
96
176
  - `correctness`: Measures factual accuracy by evaluating whether each claim in the output is true and verifiable.
@@ -100,7 +180,6 @@ except Exception as e:
100
180
  - `ground_truth_adherence`: Measures how closely the output matches a known correct answer (gold standard).
101
181
  - `comprehensive_safety`: Detects and categorizes safety violations across areas like PII, CBRN, hate speech, self-harm, and more.
102
182
 
103
-
104
183
  ## Error Handling
105
184
 
106
185
  The SDK throws `DeepRailsAPIError` for API-related errors, with status code and detailed message.
@@ -0,0 +1,285 @@
1
+ import httpx
2
+ from typing import List, Optional, Dict, Any
3
+
4
+ from .schemas import EvaluationResponse
5
+ from .exceptions import DeepRailsAPIError
6
+
7
+ class DeepRails:
8
+ """
9
+ Python SDK client for the DeepRails API.
10
+ """
11
+
12
+ def __init__(self, token: str, base_url: str = "https://api.deeprails.com"):
13
+ """
14
+ Initializes the DeepRails client.
15
+
16
+ Args:
17
+ token: Your DeepRails API key (starts with 'sk_').
18
+ base_url: The base URL of the DeepRails API.
19
+ """
20
+ if not token:
21
+ raise ValueError("A valid DeepRails API token is required.")
22
+
23
+ self._base_url = base_url
24
+ self._headers = {
25
+ "Authorization": f"Bearer {token}",
26
+ "Content-Type": "application/json",
27
+ "User-Agent": "deeprails-python-sdk/0.3.0"
28
+ }
29
+ self._client = httpx.Client(base_url=self._base_url, headers=self._headers, timeout=30.0)
30
+
31
+
32
+ def _request(self, method: str, endpoint: str, **kwargs) -> httpx.Response:
33
+ """Helper method to make requests and handle API errors."""
34
+ try:
35
+ response = self._client.request(method, endpoint, **kwargs)
36
+ response.raise_for_status()
37
+ return response
38
+ except httpx.HTTPStatusError as e:
39
+ error_detail = "No detail provided."
40
+ try:
41
+ error_detail = e.response.json().get("detail", error_detail)
42
+ except Exception:
43
+ error_detail = e.response.text
44
+ raise DeepRailsAPIError(status_code=e.response.status_code, error_detail=error_detail) from e
45
+ except httpx.RequestError as e:
46
+ raise DeepRailsAPIError(status_code=500, error_detail=f"Request failed: {e}") from e
47
+
48
+
49
+ def create_evaluation(
50
+ self,
51
+ *,
52
+ model_input: Dict[str, Any],
53
+ model_output: str,
54
+ model_used: Optional[str] = None,
55
+ run_mode: Optional[str] = "smart", # Set default to "smart"
56
+ guardrail_metrics: Optional[List[str]] = None,
57
+ nametag: Optional[str] = None,
58
+ webhook: Optional[str] = None
59
+ ) -> EvaluationResponse:
60
+ """
61
+ Creates a new evaluation and immediately processes it.
62
+
63
+ Args:
64
+ model_input: A dictionary containing the inputs for the model.
65
+ Must contain a "user_prompt" key.
66
+ model_output: The response generated by the model you are evaluating.
67
+ model_used: The name or identifier of the model being evaluated.
68
+ run_mode: The evaluation mode (e.g., "smart", "dev").
69
+ guardrail_metrics: A list of metrics to evaluate.
70
+ nametag: A user-defined name or tag for the evaluation.
71
+ webhook: A URL to send a POST request to upon evaluation completion.
72
+
73
+ Returns:
74
+ An EvaluationResponse object with the details of the created evaluation.
75
+ """
76
+ if "user_prompt" not in model_input:
77
+ raise ValueError("`model_input` must contain a 'user_prompt' key.")
78
+
79
+ payload = {
80
+ "model_input": model_input,
81
+ "model_output": model_output,
82
+ "model_used": model_used,
83
+ "run_mode": run_mode,
84
+ "guardrail_metrics": guardrail_metrics,
85
+ "nametag": nametag,
86
+ "webhook": webhook,
87
+ }
88
+ json_payload = {k: v for k, v in payload.items() if v is not None}
89
+
90
+ response = self._request("POST", "/evaluate", json=json_payload)
91
+ return EvaluationResponse.parse_obj(response.json())
92
+
93
+
94
+ def get_evaluation(self, eval_id: str) -> EvaluationResponse:
95
+ """
96
+ Retrieves the status and results of a specific evaluation.
97
+
98
+ Args:
99
+ eval_id: The unique identifier of the evaluation.
100
+
101
+ Returns:
102
+ An EvaluationResponse object with the full, up-to-date details of the evaluation.
103
+ """
104
+ response = self._request("GET", f"/evaluate/{eval_id}")
105
+ return EvaluationResponse.parse_obj(response.json())
106
+
107
+
108
+ def create_monitor(
109
+ self,
110
+ *,
111
+ name: str,
112
+ description: Optional[str] = None
113
+ ) -> MonitorResponse:
114
+ """
115
+ Creates a new monitor for tracking AI responses.
116
+
117
+ Args:
118
+ name: A name for the monitor.
119
+ description: Optional description of the monitor's purpose.
120
+
121
+ Returns:
122
+ A MonitorResponse object with the details of the created monitor.
123
+ """
124
+ payload = {
125
+ "name": name,
126
+ "description": description
127
+ }
128
+
129
+ # Remove None values
130
+ json_payload = {k: v for k, v in payload.items() if v is not None}
131
+
132
+ response = self._request("POST", "/monitor", json=json_payload)
133
+ response_json = response.json()
134
+
135
+ # Handle DeepRails API response structure
136
+ if "data" in response_json:
137
+ return MonitorResponse.parse_obj(response_json["data"])
138
+ else:
139
+ return MonitorResponse.parse_obj(response_json)
140
+
141
+ def get_monitor(self, monitor_id: str) -> MonitorResponse:
142
+ """
143
+ Get details of a specific monitor.
144
+
145
+ Args:
146
+ monitor_id: The ID of the monitor to retrieve.
147
+
148
+ Returns:
149
+ A MonitorResponse object with the monitor details.
150
+ """
151
+ response = self._request("GET", f"/monitor/{monitor_id}")
152
+ response_json = response.json()
153
+
154
+ # Handle DeepRails API response structure
155
+ if "data" in response_json:
156
+ return MonitorResponse.parse_obj(response_json["data"])
157
+ else:
158
+ return MonitorResponse.parse_obj(response_json)
159
+
160
+ def create_monitor_event(
161
+ self,
162
+ *,
163
+ monitor_id: str,
164
+ model_input: Dict[str, Any],
165
+ model_output: str,
166
+ guardrail_metrics: List[str],
167
+ model_used: Optional[str] = None,
168
+ run_mode: Optional[str] = None,
169
+ nametag: Optional[str] = None,
170
+ webhook: Optional[str] = None
171
+ ) -> MonitorEventResponse:
172
+ """
173
+ Creates a new event for a monitor.
174
+
175
+ Args:
176
+ monitor_id: The ID of the monitor to create an event for.
177
+ model_input: A dictionary containing the inputs for the model.
178
+ model_output: The response generated by the model you are evaluating.
179
+ guardrail_metrics: A list of metrics to evaluate.
180
+ model_used: The name or identifier of the model being evaluated.
181
+ run_mode: The evaluation mode (e.g., "smart", "dev").
182
+ nametag: A user-defined name or tag for the event.
183
+ webhook: A URL to send a POST request to upon evaluation completion.
184
+
185
+ Returns:
186
+ A MonitorEventResponse object with the details of the created event.
187
+ """
188
+ payload = {
189
+ "model_input": model_input,
190
+ "model_output": model_output,
191
+ "model_used": model_used,
192
+ "run_mode": run_mode,
193
+ "guardrail_metrics": guardrail_metrics,
194
+ "nametag": nametag,
195
+ "webhook": webhook,
196
+ }
197
+
198
+ # Remove None values
199
+ json_payload = {k: v for k, v in payload.items() if v is not None}
200
+
201
+ response = self._request("POST", f"/monitor/{monitor_id}/events", json=json_payload)
202
+ response_json = response.json()
203
+
204
+ # Handle DeepRails API response structure
205
+ if "data" in response_json:
206
+ return MonitorEventResponse.parse_obj(response_json["data"])
207
+ else:
208
+ return MonitorEventResponse.parse_obj(response_json)
209
+
210
+ def get_monitor_events(
211
+ self,
212
+ monitor_id: str,
213
+ limit: int = 10,
214
+ offset: int = 0
215
+ ) -> List[MonitorEventResponse]:
216
+ """
217
+ Retrieves events for a specific monitor.
218
+
219
+ Args:
220
+ monitor_id: The ID of the monitor to get events for.
221
+ limit: Maximum number of events to return (default: 10).
222
+ offset: Offset for pagination (default: 0).
223
+
224
+ Returns:
225
+ A list of MonitorEventResponse objects with details of the monitor events.
226
+ """
227
+ params = {
228
+ "limit": limit,
229
+ "offset": offset
230
+ }
231
+
232
+ response = self._request("GET", f"/monitor/{monitor_id}/events", params=params)
233
+ response_json = response.json()
234
+
235
+ # Handle DeepRails API response structure
236
+ if "data" in response_json and isinstance(response_json["data"], list):
237
+ return [MonitorEventResponse.parse_obj(event) for event in response_json["data"]]
238
+ else:
239
+ # Fallback if the response structure is unexpected
240
+ return []
241
+
242
+ def get_monitors(
243
+ self,
244
+ *,
245
+ page: int = 1,
246
+ limit: int = 20,
247
+ search: Optional[List[str]] = None,
248
+ monitor_status: Optional[List[str]] = None,
249
+ date_from: Optional[str] = None,
250
+ date_to: Optional[str] = None,
251
+ sort_by: str = "created_at",
252
+ sort_order: str = "desc"
253
+ ) -> MonitorListResponse:
254
+ """
255
+ Get a paginated list of monitors with optional filtering.
256
+
257
+ Args:
258
+ page: Page number for pagination (default: 1)
259
+ limit: Number of items per page (default: 20, max: 100)
260
+ search: Optional list of free-text search terms
261
+ monitor_status: Optional list of monitor statuses ("active", "inactive", "all")
262
+ date_from: Optional filter for monitors from this date (ISO format)
263
+ date_to: Optional filter for monitors to this date (ISO format)
264
+ sort_by: Field to sort by (default: "created_at")
265
+ sort_order: Sort order (default: "desc")
266
+
267
+ Returns:
268
+ A MonitorListResponse object containing monitors, pagination info, and applied filters.
269
+ """
270
+ params = {
271
+ "page": page,
272
+ "limit": limit,
273
+ "sort_by": sort_by,
274
+ "sort_order": sort_order,
275
+ "search": search,
276
+ "monitor_status": monitor_status,
277
+ "date_from": date_from,
278
+ "date_to": date_to
279
+ }
280
+
281
+ # Remove None values
282
+ params = {k: v for k, v in params.items() if v is not None}
283
+
284
+ response = self._request("GET", "/monitor", params=params)
285
+ return MonitorListResponse.parse_obj(response.json())
@@ -0,0 +1,92 @@
1
+ from typing import List, Optional, Dict, Any
2
+ from pydantic import BaseModel, Field
3
+ from datetime import datetime
4
+
5
+
6
+ class EvaluationResponse(BaseModel):
7
+ """Represents the response for an evaluation from the DeepRails API."""
8
+ eval_id: str
9
+ evaluation_status: str
10
+ guardrail_metrics: Optional[List[str]] = None
11
+ model_used: Optional[str] = None
12
+ run_mode: Optional[str] = None
13
+ model_input: Optional[Dict[str, Any]] = None
14
+ model_output: Optional[str] = None
15
+ estimated_cost: Optional[float] = None
16
+ input_tokens: Optional[int] = None
17
+ output_tokens: Optional[int] = None
18
+ nametag: Optional[str] = None
19
+ progress: Optional[int] = Field(None, ge=0, le=100)
20
+ start_timestamp: Optional[datetime] = None
21
+ completion_timestamp: Optional[datetime] = None
22
+ error_message: Optional[str] = None
23
+ error_timestamp: Optional[datetime] = None
24
+ evaluation_result: Optional[Dict[str, Any]] = None
25
+ evaluation_total_cost: Optional[float] = None
26
+ created_at: Optional[datetime] = None
27
+ modified_at: Optional[datetime] = None
28
+
29
+ class Config:
30
+ extra = 'ignore'
31
+
32
+ class MonitorResponse(BaseModel):
33
+ """Represents a monitor from the DeepRails API."""
34
+ monitor_id: str
35
+ user_id: str
36
+ name: str
37
+ description: Optional[str] = None
38
+ monitor_status: str
39
+ created_at: str
40
+ updated_at: str
41
+
42
+ class Config:
43
+ extra = 'ignore'
44
+
45
+ class MonitorEventCreate(BaseModel):
46
+ """Model for creating a new monitor event."""
47
+ model_input: Dict[str, Any]
48
+ model_output: str
49
+ model_used: Optional[str] = None
50
+ run_mode: Optional[str] = None
51
+ guardrail_metrics: List[str]
52
+ nametag: Optional[str] = None
53
+ webhook: Optional[str] = None
54
+
55
+ class MonitorEventResponse(BaseModel):
56
+ """Response model for a monitor event."""
57
+ event_id: str
58
+ monitor_id: str
59
+ evaluation_id: str
60
+ created_at: str
61
+
62
+ class Config:
63
+ extra = 'ignore'
64
+
65
+ class PaginationInfo(BaseModel):
66
+ """Pagination information for list responses."""
67
+ page: int
68
+ limit: int
69
+ total_pages: int
70
+ total_count: int
71
+ has_next: bool
72
+ has_previous: bool
73
+
74
+ class MonitorFiltersApplied(BaseModel):
75
+ """Information about which filters were applied to the monitor query."""
76
+ search: Optional[List[str]] = None
77
+ status: Optional[List[str]] = None
78
+ date_from: Optional[str] = None
79
+ date_to: Optional[str] = None
80
+ sort_by: Optional[str] = None
81
+ sort_order: Optional[str] = None
82
+
83
+ class MonitorWithEventCountResponse(MonitorResponse):
84
+ """Monitor response with event count information."""
85
+ event_count: int
86
+ latest_event_modified_at: Optional[str] = None
87
+
88
+ class MonitorListResponse(BaseModel):
89
+ """Response model for a paginated list of monitors."""
90
+ monitors: List[MonitorWithEventCountResponse]
91
+ pagination: PaginationInfo
92
+ filters_applied: MonitorFiltersApplied
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "deeprails"
7
- version = "0.2.1"
7
+ version = "0.3.1"
8
8
  description = "Python SDK for interacting with the DeepRails API"
9
9
  readme = {file = "README.md", content-type = "text/markdown"}
10
10
  authors = [{name = "Neil Mate", email = "support@deeprails.ai"}]
@@ -1,101 +0,0 @@
1
- import httpx
2
- from typing import List, Optional, Dict, Any
3
-
4
- from .schemas import EvaluationResponse
5
- from .exceptions import DeepRailsAPIError
6
-
7
- class DeepRails:
8
- """
9
- Python SDK client for the DeepRails API.
10
- """
11
- def __init__(self, token: str, base_url: str = "https://api.deeprails.com"):
12
- """
13
- Initializes the DeepRails client.
14
-
15
- Args:
16
- token: Your DeepRails API key (starts with 'sk_').
17
- base_url: The base URL of the DeepRails API.
18
- """
19
- if not token:
20
- raise ValueError("A valid DeepRails API token is required.")
21
-
22
- self._base_url = base_url
23
- self._headers = {
24
- "Authorization": f"Bearer {token}",
25
- "Content-Type": "application/json",
26
- "User-Agent": "deeprails-python-sdk/0.2.0"
27
- }
28
- self._client = httpx.Client(base_url=self._base_url, headers=self._headers, timeout=30.0)
29
-
30
- def _request(self, method: str, endpoint: str, **kwargs) -> httpx.Response:
31
- """Helper method to make requests and handle API errors."""
32
- try:
33
- response = self._client.request(method, endpoint, **kwargs)
34
- response.raise_for_status()
35
- return response
36
- except httpx.HTTPStatusError as e:
37
- error_detail = "No detail provided."
38
- try:
39
- error_detail = e.response.json().get("detail", error_detail)
40
- except Exception:
41
- error_detail = e.response.text
42
- raise DeepRailsAPIError(status_code=e.response.status_code, error_detail=error_detail) from e
43
- except httpx.RequestError as e:
44
- raise DeepRailsAPIError(status_code=500, error_detail=f"Request failed: {e}") from e
45
-
46
- def create_evaluation(
47
- self,
48
- *,
49
- model_input: Dict[str, Any],
50
- model_output: str,
51
- model_used: Optional[str] = None,
52
- run_mode: Optional[str] = "smart", # Set default to "smart"
53
- guardrail_metrics: Optional[List[str]] = None,
54
- nametag: Optional[str] = None,
55
- webhook: Optional[str] = None
56
- ) -> EvaluationResponse:
57
- """
58
- Creates a new evaluation and immediately processes it.
59
-
60
- Args:
61
- model_input: A dictionary containing the inputs for the model.
62
- Must contain a "user_prompt" key.
63
- model_output: The response generated by the model you are evaluating.
64
- model_used: The name or identifier of the model being evaluated.
65
- run_mode: The evaluation mode (e.g., "smart", "dev").
66
- guardrail_metrics: A list of metrics to evaluate.
67
- nametag: A user-defined name or tag for the evaluation.
68
- webhook: A URL to send a POST request to upon evaluation completion.
69
-
70
- Returns:
71
- An EvaluationResponse object with the details of the created evaluation.
72
- """
73
- if "user_prompt" not in model_input:
74
- raise ValueError("`model_input` must contain a 'user_prompt' key.")
75
-
76
- payload = {
77
- "model_input": model_input,
78
- "model_output": model_output,
79
- "model_used": model_used,
80
- "run_mode": run_mode,
81
- "guardrail_metrics": guardrail_metrics,
82
- "nametag": nametag,
83
- "webhook": webhook,
84
- }
85
- json_payload = {k: v for k, v in payload.items() if v is not None}
86
-
87
- response = self._request("POST", "/evaluate", json=json_payload)
88
- return EvaluationResponse.parse_obj(response.json())
89
-
90
- def get_evaluation(self, eval_id: str) -> EvaluationResponse:
91
- """
92
- Retrieves the status and results of a specific evaluation.
93
-
94
- Args:
95
- eval_id: The unique identifier of the evaluation.
96
-
97
- Returns:
98
- An EvaluationResponse object with the full, up-to-date details of the evaluation.
99
- """
100
- response = self._request("GET", f"/evaluate/{eval_id}")
101
- return EvaluationResponse.parse_obj(response.json())
@@ -1,30 +0,0 @@
1
- from typing import List, Optional, Dict, Any
2
- from pydantic import BaseModel, Field
3
- from datetime import datetime
4
-
5
-
6
- class EvaluationResponse(BaseModel):
7
- """Represents the response for an evaluation from the DeepRails API."""
8
- eval_id: str
9
- evaluation_status: str
10
- guardrail_metrics: Optional[List[str]] = None
11
- model_used: Optional[str] = None
12
- run_mode: Optional[str] = None
13
- model_input: Optional[Dict[str, Any]] = None
14
- model_output: Optional[str] = None
15
- estimated_cost: Optional[float] = None
16
- input_tokens: Optional[int] = None
17
- output_tokens: Optional[int] = None
18
- nametag: Optional[str] = None
19
- progress: Optional[int] = Field(None, ge=0, le=100)
20
- start_timestamp: Optional[datetime] = None
21
- completion_timestamp: Optional[datetime] = None
22
- error_message: Optional[str] = None
23
- error_timestamp: Optional[datetime] = None
24
- evaluation_result: Optional[Dict[str, Any]] = None
25
- evaluation_total_cost: Optional[float] = None
26
- created_at: Optional[datetime] = None
27
- modified_at: Optional[datetime] = None
28
-
29
- class Config:
30
- extra = 'ignore'
File without changes