deeprails 0.3.2__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. deeprails/__init__.py +102 -1
  2. deeprails/_base_client.py +1995 -0
  3. deeprails/_client.py +419 -0
  4. deeprails/_compat.py +219 -0
  5. deeprails/_constants.py +14 -0
  6. deeprails/_exceptions.py +108 -0
  7. deeprails/_files.py +123 -0
  8. deeprails/_models.py +835 -0
  9. deeprails/_qs.py +150 -0
  10. deeprails/_resource.py +43 -0
  11. deeprails/_response.py +830 -0
  12. deeprails/_streaming.py +333 -0
  13. deeprails/_types.py +260 -0
  14. deeprails/_utils/__init__.py +64 -0
  15. deeprails/_utils/_compat.py +45 -0
  16. deeprails/_utils/_datetime_parse.py +136 -0
  17. deeprails/_utils/_logs.py +25 -0
  18. deeprails/_utils/_proxy.py +65 -0
  19. deeprails/_utils/_reflection.py +42 -0
  20. deeprails/_utils/_resources_proxy.py +24 -0
  21. deeprails/_utils/_streams.py +12 -0
  22. deeprails/_utils/_sync.py +86 -0
  23. deeprails/_utils/_transform.py +457 -0
  24. deeprails/_utils/_typing.py +156 -0
  25. deeprails/_utils/_utils.py +421 -0
  26. deeprails/_version.py +4 -0
  27. deeprails/lib/.keep +4 -0
  28. deeprails/py.typed +0 -0
  29. deeprails/resources/__init__.py +47 -0
  30. deeprails/resources/defend.py +671 -0
  31. deeprails/resources/evaluate.py +334 -0
  32. deeprails/resources/monitor.py +566 -0
  33. deeprails/types/__init__.py +18 -0
  34. deeprails/types/api_response.py +50 -0
  35. deeprails/types/defend_create_workflow_params.py +56 -0
  36. deeprails/types/defend_response.py +50 -0
  37. deeprails/types/defend_submit_event_params.py +44 -0
  38. deeprails/types/defend_update_workflow_params.py +18 -0
  39. deeprails/types/evaluate_create_params.py +60 -0
  40. deeprails/types/evaluation.py +113 -0
  41. deeprails/types/monitor_create_params.py +15 -0
  42. deeprails/types/monitor_retrieve_params.py +12 -0
  43. deeprails/types/monitor_retrieve_response.py +81 -0
  44. deeprails/types/monitor_submit_event_params.py +63 -0
  45. deeprails/types/monitor_submit_event_response.py +36 -0
  46. deeprails/types/monitor_update_params.py +22 -0
  47. deeprails/types/workflow_event_response.py +33 -0
  48. deeprails-1.2.0.dist-info/METADATA +377 -0
  49. deeprails-1.2.0.dist-info/RECORD +51 -0
  50. {deeprails-0.3.2.dist-info → deeprails-1.2.0.dist-info}/WHEEL +1 -1
  51. deeprails-1.2.0.dist-info/licenses/LICENSE +201 -0
  52. deeprails/client.py +0 -285
  53. deeprails/exceptions.py +0 -10
  54. deeprails/schemas.py +0 -92
  55. deeprails-0.3.2.dist-info/METADATA +0 -235
  56. deeprails-0.3.2.dist-info/RECORD +0 -8
  57. deeprails-0.3.2.dist-info/licenses/LICENSE +0 -11
@@ -0,0 +1,334 @@
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import List
6
+ from typing_extensions import Literal
7
+
8
+ import httpx
9
+
10
+ from ..types import evaluate_create_params
11
+ from .._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
12
+ from .._utils import maybe_transform, async_maybe_transform
13
+ from .._compat import cached_property
14
+ from .._resource import SyncAPIResource, AsyncAPIResource
15
+ from .._response import (
16
+ to_raw_response_wrapper,
17
+ to_streamed_response_wrapper,
18
+ async_to_raw_response_wrapper,
19
+ async_to_streamed_response_wrapper,
20
+ )
21
+ from .._base_client import make_request_options
22
+ from ..types.evaluation import Evaluation
23
+
24
+ __all__ = ["EvaluateResource", "AsyncEvaluateResource"]
25
+
26
+
27
+ class EvaluateResource(SyncAPIResource):
28
+ @cached_property
29
+ def with_raw_response(self) -> EvaluateResourceWithRawResponse:
30
+ """
31
+ This property can be used as a prefix for any HTTP method call to return
32
+ the raw response object instead of the parsed content.
33
+
34
+ For more information, see https://www.github.com/deeprails/deeprails-sdk-python#accessing-raw-response-data-eg-headers
35
+ """
36
+ return EvaluateResourceWithRawResponse(self)
37
+
38
+ @cached_property
39
+ def with_streaming_response(self) -> EvaluateResourceWithStreamingResponse:
40
+ """
41
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
42
+
43
+ For more information, see https://www.github.com/deeprails/deeprails-sdk-python#with_streaming_response
44
+ """
45
+ return EvaluateResourceWithStreamingResponse(self)
46
+
47
+ def create(
48
+ self,
49
+ *,
50
+ model_input: evaluate_create_params.ModelInput,
51
+ model_output: str,
52
+ run_mode: Literal["precision_plus", "precision", "smart", "economy"],
53
+ guardrail_metrics: List[
54
+ Literal[
55
+ "correctness",
56
+ "completeness",
57
+ "instruction_adherence",
58
+ "context_adherence",
59
+ "ground_truth_adherence",
60
+ "comprehensive_safety",
61
+ ]
62
+ ]
63
+ | Omit = omit,
64
+ model_used: str | Omit = omit,
65
+ nametag: str | Omit = omit,
66
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
67
+ # The extra values given here take precedence over values defined on the client or passed to this method.
68
+ extra_headers: Headers | None = None,
69
+ extra_query: Query | None = None,
70
+ extra_body: Body | None = None,
71
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
72
+ ) -> Evaluation:
73
+ """
74
+ Use this endpoint to evaluate a model's input and output pair against selected
75
+ guardrail metrics
76
+
77
+ Args:
78
+ model_input: A dictionary of inputs sent to the LLM to generate output. This must contain a
79
+ `user_prompt` field and an optional `context` field. Additional properties are
80
+ allowed.
81
+
82
+ model_output: Output generated by the LLM to be evaluated.
83
+
84
+ run_mode: Run mode for the evaluation. The run mode allows the user to optimize for speed,
85
+ accuracy, and cost by determining which models are used to evaluate the event.
86
+ Available run modes include `precision_plus`, `precision`, `smart`, and
87
+ `economy`. Defaults to `smart`.
88
+
89
+ guardrail_metrics: An array of guardrail metrics that the model input and output pair will be
90
+ evaluated on. For non-enterprise users, these will be limited to the allowed
91
+ guardrail metrics.
92
+
93
+ model_used: Model ID used to generate the output, like `gpt-4o` or `o3`.
94
+
95
+ nametag: An optional, user-defined tag for the evaluation.
96
+
97
+ extra_headers: Send extra headers
98
+
99
+ extra_query: Add additional query parameters to the request
100
+
101
+ extra_body: Add additional JSON properties to the request
102
+
103
+ timeout: Override the client-level default timeout for this request, in seconds
104
+ """
105
+ return self._post(
106
+ "/evaluate",
107
+ body=maybe_transform(
108
+ {
109
+ "model_input": model_input,
110
+ "model_output": model_output,
111
+ "run_mode": run_mode,
112
+ "guardrail_metrics": guardrail_metrics,
113
+ "model_used": model_used,
114
+ "nametag": nametag,
115
+ },
116
+ evaluate_create_params.EvaluateCreateParams,
117
+ ),
118
+ options=make_request_options(
119
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
120
+ ),
121
+ cast_to=Evaluation,
122
+ )
123
+
124
+ def retrieve(
125
+ self,
126
+ eval_id: str,
127
+ *,
128
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
129
+ # The extra values given here take precedence over values defined on the client or passed to this method.
130
+ extra_headers: Headers | None = None,
131
+ extra_query: Query | None = None,
132
+ extra_body: Body | None = None,
133
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
134
+ ) -> Evaluation:
135
+ """
136
+ Retrieve the evaluation record for a given evaluation ID.
137
+
138
+ Args:
139
+ extra_headers: Send extra headers
140
+
141
+ extra_query: Add additional query parameters to the request
142
+
143
+ extra_body: Add additional JSON properties to the request
144
+
145
+ timeout: Override the client-level default timeout for this request, in seconds
146
+ """
147
+ if not eval_id:
148
+ raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
149
+ return self._get(
150
+ f"/evaluate/{eval_id}",
151
+ options=make_request_options(
152
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
153
+ ),
154
+ cast_to=Evaluation,
155
+ )
156
+
157
+
158
+ class AsyncEvaluateResource(AsyncAPIResource):
159
+ @cached_property
160
+ def with_raw_response(self) -> AsyncEvaluateResourceWithRawResponse:
161
+ """
162
+ This property can be used as a prefix for any HTTP method call to return
163
+ the raw response object instead of the parsed content.
164
+
165
+ For more information, see https://www.github.com/deeprails/deeprails-sdk-python#accessing-raw-response-data-eg-headers
166
+ """
167
+ return AsyncEvaluateResourceWithRawResponse(self)
168
+
169
+ @cached_property
170
+ def with_streaming_response(self) -> AsyncEvaluateResourceWithStreamingResponse:
171
+ """
172
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
173
+
174
+ For more information, see https://www.github.com/deeprails/deeprails-sdk-python#with_streaming_response
175
+ """
176
+ return AsyncEvaluateResourceWithStreamingResponse(self)
177
+
178
+ async def create(
179
+ self,
180
+ *,
181
+ model_input: evaluate_create_params.ModelInput,
182
+ model_output: str,
183
+ run_mode: Literal["precision_plus", "precision", "smart", "economy"],
184
+ guardrail_metrics: List[
185
+ Literal[
186
+ "correctness",
187
+ "completeness",
188
+ "instruction_adherence",
189
+ "context_adherence",
190
+ "ground_truth_adherence",
191
+ "comprehensive_safety",
192
+ ]
193
+ ]
194
+ | Omit = omit,
195
+ model_used: str | Omit = omit,
196
+ nametag: str | Omit = omit,
197
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
198
+ # The extra values given here take precedence over values defined on the client or passed to this method.
199
+ extra_headers: Headers | None = None,
200
+ extra_query: Query | None = None,
201
+ extra_body: Body | None = None,
202
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
203
+ ) -> Evaluation:
204
+ """
205
+ Use this endpoint to evaluate a model's input and output pair against selected
206
+ guardrail metrics
207
+
208
+ Args:
209
+ model_input: A dictionary of inputs sent to the LLM to generate output. This must contain a
210
+ `user_prompt` field and an optional `context` field. Additional properties are
211
+ allowed.
212
+
213
+ model_output: Output generated by the LLM to be evaluated.
214
+
215
+ run_mode: Run mode for the evaluation. The run mode allows the user to optimize for speed,
216
+ accuracy, and cost by determining which models are used to evaluate the event.
217
+ Available run modes include `precision_plus`, `precision`, `smart`, and
218
+ `economy`. Defaults to `smart`.
219
+
220
+ guardrail_metrics: An array of guardrail metrics that the model input and output pair will be
221
+ evaluated on. For non-enterprise users, these will be limited to the allowed
222
+ guardrail metrics.
223
+
224
+ model_used: Model ID used to generate the output, like `gpt-4o` or `o3`.
225
+
226
+ nametag: An optional, user-defined tag for the evaluation.
227
+
228
+ extra_headers: Send extra headers
229
+
230
+ extra_query: Add additional query parameters to the request
231
+
232
+ extra_body: Add additional JSON properties to the request
233
+
234
+ timeout: Override the client-level default timeout for this request, in seconds
235
+ """
236
+ return await self._post(
237
+ "/evaluate",
238
+ body=await async_maybe_transform(
239
+ {
240
+ "model_input": model_input,
241
+ "model_output": model_output,
242
+ "run_mode": run_mode,
243
+ "guardrail_metrics": guardrail_metrics,
244
+ "model_used": model_used,
245
+ "nametag": nametag,
246
+ },
247
+ evaluate_create_params.EvaluateCreateParams,
248
+ ),
249
+ options=make_request_options(
250
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
251
+ ),
252
+ cast_to=Evaluation,
253
+ )
254
+
255
+ async def retrieve(
256
+ self,
257
+ eval_id: str,
258
+ *,
259
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
260
+ # The extra values given here take precedence over values defined on the client or passed to this method.
261
+ extra_headers: Headers | None = None,
262
+ extra_query: Query | None = None,
263
+ extra_body: Body | None = None,
264
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
265
+ ) -> Evaluation:
266
+ """
267
+ Retrieve the evaluation record for a given evaluation ID.
268
+
269
+ Args:
270
+ extra_headers: Send extra headers
271
+
272
+ extra_query: Add additional query parameters to the request
273
+
274
+ extra_body: Add additional JSON properties to the request
275
+
276
+ timeout: Override the client-level default timeout for this request, in seconds
277
+ """
278
+ if not eval_id:
279
+ raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
280
+ return await self._get(
281
+ f"/evaluate/{eval_id}",
282
+ options=make_request_options(
283
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
284
+ ),
285
+ cast_to=Evaluation,
286
+ )
287
+
288
+
289
+ class EvaluateResourceWithRawResponse:
290
+ def __init__(self, evaluate: EvaluateResource) -> None:
291
+ self._evaluate = evaluate
292
+
293
+ self.create = to_raw_response_wrapper(
294
+ evaluate.create,
295
+ )
296
+ self.retrieve = to_raw_response_wrapper(
297
+ evaluate.retrieve,
298
+ )
299
+
300
+
301
+ class AsyncEvaluateResourceWithRawResponse:
302
+ def __init__(self, evaluate: AsyncEvaluateResource) -> None:
303
+ self._evaluate = evaluate
304
+
305
+ self.create = async_to_raw_response_wrapper(
306
+ evaluate.create,
307
+ )
308
+ self.retrieve = async_to_raw_response_wrapper(
309
+ evaluate.retrieve,
310
+ )
311
+
312
+
313
+ class EvaluateResourceWithStreamingResponse:
314
+ def __init__(self, evaluate: EvaluateResource) -> None:
315
+ self._evaluate = evaluate
316
+
317
+ self.create = to_streamed_response_wrapper(
318
+ evaluate.create,
319
+ )
320
+ self.retrieve = to_streamed_response_wrapper(
321
+ evaluate.retrieve,
322
+ )
323
+
324
+
325
+ class AsyncEvaluateResourceWithStreamingResponse:
326
+ def __init__(self, evaluate: AsyncEvaluateResource) -> None:
327
+ self._evaluate = evaluate
328
+
329
+ self.create = async_to_streamed_response_wrapper(
330
+ evaluate.create,
331
+ )
332
+ self.retrieve = async_to_streamed_response_wrapper(
333
+ evaluate.retrieve,
334
+ )