deeprails 1.7.0__tar.gz → 1.9.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of deeprails might be problematic. Click here for more details.
- deeprails-1.9.0/.release-please-manifest.json +3 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/CHANGELOG.md +16 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/PKG-INFO +33 -1
- {deeprails-1.7.0 → deeprails-1.9.0}/README.md +32 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/api.md +5 -5
- {deeprails-1.7.0 → deeprails-1.9.0}/pyproject.toml +1 -1
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_version.py +1 -1
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/resources/defend.py +10 -10
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/resources/evaluate.py +4 -4
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/resources/monitor.py +23 -23
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/types/__init__.py +3 -3
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/types/defend_create_workflow_params.py +1 -1
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/types/defend_response.py +1 -1
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/types/defend_submit_event_params.py +4 -3
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/types/evaluate_create_params.py +3 -2
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/types/evaluation.py +3 -2
- deeprails-1.7.0/src/deeprails/types/monitor_retrieve_response.py → deeprails-1.9.0/src/deeprails/types/monitor_detail_response.py +4 -17
- deeprails-1.7.0/src/deeprails/types/monitor_submit_event_response.py → deeprails-1.9.0/src/deeprails/types/monitor_event_response.py +2 -15
- deeprails-1.7.0/src/deeprails/types/api_response.py → deeprails-1.9.0/src/deeprails/types/monitor_response.py +2 -15
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/types/monitor_submit_event_params.py +3 -2
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/api_resources/test_defend.py +10 -6
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/api_resources/test_monitor.py +35 -35
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/test_client.py +10 -10
- deeprails-1.7.0/.release-please-manifest.json +0 -3
- {deeprails-1.7.0 → deeprails-1.9.0}/.gitignore +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/CONTRIBUTING.md +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/LICENSE +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/bin/check-release-environment +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/bin/publish-pypi +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/examples/.keep +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/noxfile.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/release-please-config.json +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/requirements-dev.lock +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/requirements.lock +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/__init__.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_base_client.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_client.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_compat.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_constants.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_exceptions.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_files.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_models.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_qs.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_resource.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_response.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_streaming.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_types.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_utils/__init__.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_utils/_compat.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_utils/_datetime_parse.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_utils/_logs.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_utils/_proxy.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_utils/_reflection.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_utils/_resources_proxy.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_utils/_streams.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_utils/_sync.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_utils/_transform.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_utils/_typing.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/_utils/_utils.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/lib/.keep +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/py.typed +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/resources/__init__.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/types/defend_update_workflow_params.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/types/monitor_create_params.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/types/monitor_retrieve_params.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/types/monitor_update_params.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/src/deeprails/types/workflow_event_response.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/__init__.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/api_resources/__init__.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/api_resources/test_evaluate.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/conftest.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/sample_file.txt +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/test_deepcopy.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/test_extract_files.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/test_files.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/test_models.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/test_qs.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/test_required_args.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/test_response.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/test_streaming.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/test_transform.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/test_utils/test_datetime_parse.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/test_utils/test_proxy.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/test_utils/test_typing.py +0 -0
- {deeprails-1.7.0 → deeprails-1.9.0}/tests/utils.py +0 -0
|
@@ -1,5 +1,21 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## 1.9.0 (2025-10-24)
|
|
4
|
+
|
|
5
|
+
Full Changelog: [v1.8.0...v1.9.0](https://github.com/deeprails/deeprails-sdk-python/compare/v1.8.0...v1.9.0)
|
|
6
|
+
|
|
7
|
+
### Features
|
|
8
|
+
|
|
9
|
+
* **api:** remove apiresponse from monitor ([4971a99](https://github.com/deeprails/deeprails-sdk-python/commit/4971a99c7357bebbc5e86a2d76d2be55bb34f5ae))
|
|
10
|
+
|
|
11
|
+
## 1.8.0 (2025-10-22)
|
|
12
|
+
|
|
13
|
+
Full Changelog: [v1.7.0...v1.8.0](https://github.com/deeprails/deeprails-sdk-python/compare/v1.7.0...v1.8.0)
|
|
14
|
+
|
|
15
|
+
### Features
|
|
16
|
+
|
|
17
|
+
* **api:** manual updates ([4b46121](https://github.com/deeprails/deeprails-sdk-python/commit/4b461213615578ca0382f044201c8343d4e9f167))
|
|
18
|
+
|
|
3
19
|
## 1.7.0 (2025-10-22)
|
|
4
20
|
|
|
5
21
|
Full Changelog: [v1.6.1...v1.7.0](https://github.com/deeprails/deeprails-sdk-python/compare/v1.6.1...v1.7.0)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: deeprails
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.9.0
|
|
4
4
|
Summary: The official Python library for the deeprails API
|
|
5
5
|
Project-URL: Homepage, https://docs.deeprails.com/
|
|
6
6
|
Project-URL: Repository, https://github.com/deeprails/deeprails-sdk-python
|
|
@@ -69,6 +69,10 @@ defend_response = client.defend.create_workflow(
|
|
|
69
69
|
improvement_action="fixit",
|
|
70
70
|
name="Push Alert Workflow",
|
|
71
71
|
type="custom",
|
|
72
|
+
custom_hallucination_threshold_values={
|
|
73
|
+
"completeness": 0.7,
|
|
74
|
+
"instruction_adherence": 0.75,
|
|
75
|
+
},
|
|
72
76
|
)
|
|
73
77
|
print(defend_response.workflow_id)
|
|
74
78
|
```
|
|
@@ -97,6 +101,10 @@ async def main() -> None:
|
|
|
97
101
|
improvement_action="fixit",
|
|
98
102
|
name="Push Alert Workflow",
|
|
99
103
|
type="custom",
|
|
104
|
+
custom_hallucination_threshold_values={
|
|
105
|
+
"completeness": 0.7,
|
|
106
|
+
"instruction_adherence": 0.75,
|
|
107
|
+
},
|
|
100
108
|
)
|
|
101
109
|
print(defend_response.workflow_id)
|
|
102
110
|
|
|
@@ -134,6 +142,10 @@ async def main() -> None:
|
|
|
134
142
|
improvement_action="fixit",
|
|
135
143
|
name="Push Alert Workflow",
|
|
136
144
|
type="custom",
|
|
145
|
+
custom_hallucination_threshold_values={
|
|
146
|
+
"completeness": 0.7,
|
|
147
|
+
"instruction_adherence": 0.75,
|
|
148
|
+
},
|
|
137
149
|
)
|
|
138
150
|
print(defend_response.workflow_id)
|
|
139
151
|
|
|
@@ -189,6 +201,10 @@ try:
|
|
|
189
201
|
improvement_action="fixit",
|
|
190
202
|
name="Push Alert Workflow",
|
|
191
203
|
type="custom",
|
|
204
|
+
custom_hallucination_threshold_values={
|
|
205
|
+
"completeness": 0.7,
|
|
206
|
+
"instruction_adherence": 0.75,
|
|
207
|
+
},
|
|
192
208
|
)
|
|
193
209
|
except deeprails.APIConnectionError as e:
|
|
194
210
|
print("The server could not be reached")
|
|
@@ -236,6 +252,10 @@ client.with_options(max_retries=5).defend.create_workflow(
|
|
|
236
252
|
improvement_action="fixit",
|
|
237
253
|
name="Push Alert Workflow",
|
|
238
254
|
type="custom",
|
|
255
|
+
custom_hallucination_threshold_values={
|
|
256
|
+
"completeness": 0.7,
|
|
257
|
+
"instruction_adherence": 0.75,
|
|
258
|
+
},
|
|
239
259
|
)
|
|
240
260
|
```
|
|
241
261
|
|
|
@@ -263,6 +283,10 @@ client.with_options(timeout=5.0).defend.create_workflow(
|
|
|
263
283
|
improvement_action="fixit",
|
|
264
284
|
name="Push Alert Workflow",
|
|
265
285
|
type="custom",
|
|
286
|
+
custom_hallucination_threshold_values={
|
|
287
|
+
"completeness": 0.7,
|
|
288
|
+
"instruction_adherence": 0.75,
|
|
289
|
+
},
|
|
266
290
|
)
|
|
267
291
|
```
|
|
268
292
|
|
|
@@ -308,6 +332,10 @@ response = client.defend.with_raw_response.create_workflow(
|
|
|
308
332
|
improvement_action="fixit",
|
|
309
333
|
name="Push Alert Workflow",
|
|
310
334
|
type="custom",
|
|
335
|
+
custom_hallucination_threshold_values={
|
|
336
|
+
"completeness": 0.7,
|
|
337
|
+
"instruction_adherence": 0.75,
|
|
338
|
+
},
|
|
311
339
|
)
|
|
312
340
|
print(response.headers.get('X-My-Header'))
|
|
313
341
|
|
|
@@ -330,6 +358,10 @@ with client.defend.with_streaming_response.create_workflow(
|
|
|
330
358
|
improvement_action="fixit",
|
|
331
359
|
name="Push Alert Workflow",
|
|
332
360
|
type="custom",
|
|
361
|
+
custom_hallucination_threshold_values={
|
|
362
|
+
"completeness": 0.7,
|
|
363
|
+
"instruction_adherence": 0.75,
|
|
364
|
+
},
|
|
333
365
|
) as response:
|
|
334
366
|
print(response.headers.get("X-My-Header"))
|
|
335
367
|
|
|
@@ -34,6 +34,10 @@ defend_response = client.defend.create_workflow(
|
|
|
34
34
|
improvement_action="fixit",
|
|
35
35
|
name="Push Alert Workflow",
|
|
36
36
|
type="custom",
|
|
37
|
+
custom_hallucination_threshold_values={
|
|
38
|
+
"completeness": 0.7,
|
|
39
|
+
"instruction_adherence": 0.75,
|
|
40
|
+
},
|
|
37
41
|
)
|
|
38
42
|
print(defend_response.workflow_id)
|
|
39
43
|
```
|
|
@@ -62,6 +66,10 @@ async def main() -> None:
|
|
|
62
66
|
improvement_action="fixit",
|
|
63
67
|
name="Push Alert Workflow",
|
|
64
68
|
type="custom",
|
|
69
|
+
custom_hallucination_threshold_values={
|
|
70
|
+
"completeness": 0.7,
|
|
71
|
+
"instruction_adherence": 0.75,
|
|
72
|
+
},
|
|
65
73
|
)
|
|
66
74
|
print(defend_response.workflow_id)
|
|
67
75
|
|
|
@@ -99,6 +107,10 @@ async def main() -> None:
|
|
|
99
107
|
improvement_action="fixit",
|
|
100
108
|
name="Push Alert Workflow",
|
|
101
109
|
type="custom",
|
|
110
|
+
custom_hallucination_threshold_values={
|
|
111
|
+
"completeness": 0.7,
|
|
112
|
+
"instruction_adherence": 0.75,
|
|
113
|
+
},
|
|
102
114
|
)
|
|
103
115
|
print(defend_response.workflow_id)
|
|
104
116
|
|
|
@@ -154,6 +166,10 @@ try:
|
|
|
154
166
|
improvement_action="fixit",
|
|
155
167
|
name="Push Alert Workflow",
|
|
156
168
|
type="custom",
|
|
169
|
+
custom_hallucination_threshold_values={
|
|
170
|
+
"completeness": 0.7,
|
|
171
|
+
"instruction_adherence": 0.75,
|
|
172
|
+
},
|
|
157
173
|
)
|
|
158
174
|
except deeprails.APIConnectionError as e:
|
|
159
175
|
print("The server could not be reached")
|
|
@@ -201,6 +217,10 @@ client.with_options(max_retries=5).defend.create_workflow(
|
|
|
201
217
|
improvement_action="fixit",
|
|
202
218
|
name="Push Alert Workflow",
|
|
203
219
|
type="custom",
|
|
220
|
+
custom_hallucination_threshold_values={
|
|
221
|
+
"completeness": 0.7,
|
|
222
|
+
"instruction_adherence": 0.75,
|
|
223
|
+
},
|
|
204
224
|
)
|
|
205
225
|
```
|
|
206
226
|
|
|
@@ -228,6 +248,10 @@ client.with_options(timeout=5.0).defend.create_workflow(
|
|
|
228
248
|
improvement_action="fixit",
|
|
229
249
|
name="Push Alert Workflow",
|
|
230
250
|
type="custom",
|
|
251
|
+
custom_hallucination_threshold_values={
|
|
252
|
+
"completeness": 0.7,
|
|
253
|
+
"instruction_adherence": 0.75,
|
|
254
|
+
},
|
|
231
255
|
)
|
|
232
256
|
```
|
|
233
257
|
|
|
@@ -273,6 +297,10 @@ response = client.defend.with_raw_response.create_workflow(
|
|
|
273
297
|
improvement_action="fixit",
|
|
274
298
|
name="Push Alert Workflow",
|
|
275
299
|
type="custom",
|
|
300
|
+
custom_hallucination_threshold_values={
|
|
301
|
+
"completeness": 0.7,
|
|
302
|
+
"instruction_adherence": 0.75,
|
|
303
|
+
},
|
|
276
304
|
)
|
|
277
305
|
print(response.headers.get('X-My-Header'))
|
|
278
306
|
|
|
@@ -295,6 +323,10 @@ with client.defend.with_streaming_response.create_workflow(
|
|
|
295
323
|
improvement_action="fixit",
|
|
296
324
|
name="Push Alert Workflow",
|
|
297
325
|
type="custom",
|
|
326
|
+
custom_hallucination_threshold_values={
|
|
327
|
+
"completeness": 0.7,
|
|
328
|
+
"instruction_adherence": 0.75,
|
|
329
|
+
},
|
|
298
330
|
) as response:
|
|
299
331
|
print(response.headers.get("X-My-Header"))
|
|
300
332
|
|
|
@@ -19,15 +19,15 @@ Methods:
|
|
|
19
19
|
Types:
|
|
20
20
|
|
|
21
21
|
```python
|
|
22
|
-
from deeprails.types import
|
|
22
|
+
from deeprails.types import MonitorDetailResponse, MonitorEventResponse, MonitorResponse
|
|
23
23
|
```
|
|
24
24
|
|
|
25
25
|
Methods:
|
|
26
26
|
|
|
27
|
-
- <code title="post /monitor">client.monitor.<a href="./src/deeprails/resources/monitor.py">create</a>(\*\*<a href="src/deeprails/types/monitor_create_params.py">params</a>) -> <a href="./src/deeprails/types/
|
|
28
|
-
- <code title="get /monitor/{monitor_id}">client.monitor.<a href="./src/deeprails/resources/monitor.py">retrieve</a>(monitor_id, \*\*<a href="src/deeprails/types/monitor_retrieve_params.py">params</a>) -> <a href="./src/deeprails/types/
|
|
29
|
-
- <code title="put /monitor/{monitor_id}">client.monitor.<a href="./src/deeprails/resources/monitor.py">update</a>(monitor_id, \*\*<a href="src/deeprails/types/monitor_update_params.py">params</a>) -> <a href="./src/deeprails/types/
|
|
30
|
-
- <code title="post /monitor/{monitor_id}/events">client.monitor.<a href="./src/deeprails/resources/monitor.py">submit_event</a>(monitor_id, \*\*<a href="src/deeprails/types/monitor_submit_event_params.py">params</a>) -> <a href="./src/deeprails/types/
|
|
27
|
+
- <code title="post /monitor">client.monitor.<a href="./src/deeprails/resources/monitor.py">create</a>(\*\*<a href="src/deeprails/types/monitor_create_params.py">params</a>) -> <a href="./src/deeprails/types/monitor_response.py">MonitorResponse</a></code>
|
|
28
|
+
- <code title="get /monitor/{monitor_id}">client.monitor.<a href="./src/deeprails/resources/monitor.py">retrieve</a>(monitor_id, \*\*<a href="src/deeprails/types/monitor_retrieve_params.py">params</a>) -> <a href="./src/deeprails/types/monitor_detail_response.py">MonitorDetailResponse</a></code>
|
|
29
|
+
- <code title="put /monitor/{monitor_id}">client.monitor.<a href="./src/deeprails/resources/monitor.py">update</a>(monitor_id, \*\*<a href="src/deeprails/types/monitor_update_params.py">params</a>) -> <a href="./src/deeprails/types/monitor_response.py">MonitorResponse</a></code>
|
|
30
|
+
- <code title="post /monitor/{monitor_id}/events">client.monitor.<a href="./src/deeprails/resources/monitor.py">submit_event</a>(monitor_id, \*\*<a href="src/deeprails/types/monitor_submit_event_params.py">params</a>) -> <a href="./src/deeprails/types/monitor_event_response.py">MonitorEventResponse</a></code>
|
|
31
31
|
|
|
32
32
|
# Evaluate
|
|
33
33
|
|
|
@@ -54,7 +54,7 @@ class DefendResource(SyncAPIResource):
|
|
|
54
54
|
automatic_hallucination_tolerance_levels: Dict[str, Literal["low", "medium", "high"]] | Omit = omit,
|
|
55
55
|
custom_hallucination_threshold_values: Dict[str, float] | Omit = omit,
|
|
56
56
|
description: str | Omit = omit,
|
|
57
|
-
|
|
57
|
+
max_improvement_attempts: int | Omit = omit,
|
|
58
58
|
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
59
59
|
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
60
60
|
extra_headers: Headers | None = None,
|
|
@@ -92,7 +92,7 @@ class DefendResource(SyncAPIResource):
|
|
|
92
92
|
|
|
93
93
|
description: Description for the workflow.
|
|
94
94
|
|
|
95
|
-
|
|
95
|
+
max_improvement_attempts: Max. number of improvement action retries until a given event passes the
|
|
96
96
|
guardrails. Defaults to 10.
|
|
97
97
|
|
|
98
98
|
extra_headers: Send extra headers
|
|
@@ -113,7 +113,7 @@ class DefendResource(SyncAPIResource):
|
|
|
113
113
|
"automatic_hallucination_tolerance_levels": automatic_hallucination_tolerance_levels,
|
|
114
114
|
"custom_hallucination_threshold_values": custom_hallucination_threshold_values,
|
|
115
115
|
"description": description,
|
|
116
|
-
"
|
|
116
|
+
"max_improvement_attempts": max_improvement_attempts,
|
|
117
117
|
},
|
|
118
118
|
defend_create_workflow_params.DefendCreateWorkflowParams,
|
|
119
119
|
),
|
|
@@ -214,8 +214,8 @@ class DefendResource(SyncAPIResource):
|
|
|
214
214
|
|
|
215
215
|
Args:
|
|
216
216
|
model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
|
|
217
|
-
contain at least `user_prompt` or `system_prompt` field. For
|
|
218
|
-
|
|
217
|
+
contain at least a `user_prompt` field or a `system_prompt` field. For the
|
|
218
|
+
ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
219
219
|
|
|
220
220
|
model_output: Output generated by the LLM to be evaluated.
|
|
221
221
|
|
|
@@ -332,7 +332,7 @@ class AsyncDefendResource(AsyncAPIResource):
|
|
|
332
332
|
automatic_hallucination_tolerance_levels: Dict[str, Literal["low", "medium", "high"]] | Omit = omit,
|
|
333
333
|
custom_hallucination_threshold_values: Dict[str, float] | Omit = omit,
|
|
334
334
|
description: str | Omit = omit,
|
|
335
|
-
|
|
335
|
+
max_improvement_attempts: int | Omit = omit,
|
|
336
336
|
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
337
337
|
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
338
338
|
extra_headers: Headers | None = None,
|
|
@@ -370,7 +370,7 @@ class AsyncDefendResource(AsyncAPIResource):
|
|
|
370
370
|
|
|
371
371
|
description: Description for the workflow.
|
|
372
372
|
|
|
373
|
-
|
|
373
|
+
max_improvement_attempts: Max. number of improvement action retries until a given event passes the
|
|
374
374
|
guardrails. Defaults to 10.
|
|
375
375
|
|
|
376
376
|
extra_headers: Send extra headers
|
|
@@ -391,7 +391,7 @@ class AsyncDefendResource(AsyncAPIResource):
|
|
|
391
391
|
"automatic_hallucination_tolerance_levels": automatic_hallucination_tolerance_levels,
|
|
392
392
|
"custom_hallucination_threshold_values": custom_hallucination_threshold_values,
|
|
393
393
|
"description": description,
|
|
394
|
-
"
|
|
394
|
+
"max_improvement_attempts": max_improvement_attempts,
|
|
395
395
|
},
|
|
396
396
|
defend_create_workflow_params.DefendCreateWorkflowParams,
|
|
397
397
|
),
|
|
@@ -492,8 +492,8 @@ class AsyncDefendResource(AsyncAPIResource):
|
|
|
492
492
|
|
|
493
493
|
Args:
|
|
494
494
|
model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
|
|
495
|
-
contain at least `user_prompt` or `system_prompt` field. For
|
|
496
|
-
|
|
495
|
+
contain at least a `user_prompt` field or a `system_prompt` field. For the
|
|
496
|
+
ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
497
497
|
|
|
498
498
|
model_output: Output generated by the LLM to be evaluated.
|
|
499
499
|
|
|
@@ -76,8 +76,8 @@ class EvaluateResource(SyncAPIResource):
|
|
|
76
76
|
|
|
77
77
|
Args:
|
|
78
78
|
model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
|
|
79
|
-
contain at least `user_prompt` or `system_prompt` field. For
|
|
80
|
-
|
|
79
|
+
contain at least a `user_prompt` field or a `system_prompt` field. For
|
|
80
|
+
ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
81
81
|
|
|
82
82
|
model_output: Output generated by the LLM to be evaluated.
|
|
83
83
|
|
|
@@ -207,8 +207,8 @@ class AsyncEvaluateResource(AsyncAPIResource):
|
|
|
207
207
|
|
|
208
208
|
Args:
|
|
209
209
|
model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
|
|
210
|
-
contain at least `user_prompt` or `system_prompt` field. For
|
|
211
|
-
|
|
210
|
+
contain at least a `user_prompt` field or a `system_prompt` field. For
|
|
211
|
+
ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
212
212
|
|
|
213
213
|
model_output: Output generated by the LLM to be evaluated.
|
|
214
214
|
|
|
@@ -19,9 +19,9 @@ from .._response import (
|
|
|
19
19
|
async_to_streamed_response_wrapper,
|
|
20
20
|
)
|
|
21
21
|
from .._base_client import make_request_options
|
|
22
|
-
from ..types.
|
|
23
|
-
from ..types.
|
|
24
|
-
from ..types.
|
|
22
|
+
from ..types.monitor_response import MonitorResponse
|
|
23
|
+
from ..types.monitor_event_response import MonitorEventResponse
|
|
24
|
+
from ..types.monitor_detail_response import MonitorDetailResponse
|
|
25
25
|
|
|
26
26
|
__all__ = ["MonitorResource", "AsyncMonitorResource"]
|
|
27
27
|
|
|
@@ -57,7 +57,7 @@ class MonitorResource(SyncAPIResource):
|
|
|
57
57
|
extra_query: Query | None = None,
|
|
58
58
|
extra_body: Body | None = None,
|
|
59
59
|
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
60
|
-
) ->
|
|
60
|
+
) -> MonitorResponse:
|
|
61
61
|
"""
|
|
62
62
|
Use this endpoint to create a new monitor to evaluate model inputs and outputs
|
|
63
63
|
using guardrails
|
|
@@ -87,7 +87,7 @@ class MonitorResource(SyncAPIResource):
|
|
|
87
87
|
options=make_request_options(
|
|
88
88
|
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
89
89
|
),
|
|
90
|
-
cast_to=
|
|
90
|
+
cast_to=MonitorResponse,
|
|
91
91
|
)
|
|
92
92
|
|
|
93
93
|
def retrieve(
|
|
@@ -101,7 +101,7 @@ class MonitorResource(SyncAPIResource):
|
|
|
101
101
|
extra_query: Query | None = None,
|
|
102
102
|
extra_body: Body | None = None,
|
|
103
103
|
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
104
|
-
) ->
|
|
104
|
+
) -> MonitorDetailResponse:
|
|
105
105
|
"""
|
|
106
106
|
Use this endpoint to retrieve the details and evaluations associated with a
|
|
107
107
|
specific monitor
|
|
@@ -128,7 +128,7 @@ class MonitorResource(SyncAPIResource):
|
|
|
128
128
|
timeout=timeout,
|
|
129
129
|
query=maybe_transform({"limit": limit}, monitor_retrieve_params.MonitorRetrieveParams),
|
|
130
130
|
),
|
|
131
|
-
cast_to=
|
|
131
|
+
cast_to=MonitorDetailResponse,
|
|
132
132
|
)
|
|
133
133
|
|
|
134
134
|
def update(
|
|
@@ -144,7 +144,7 @@ class MonitorResource(SyncAPIResource):
|
|
|
144
144
|
extra_query: Query | None = None,
|
|
145
145
|
extra_body: Body | None = None,
|
|
146
146
|
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
147
|
-
) ->
|
|
147
|
+
) -> MonitorResponse:
|
|
148
148
|
"""
|
|
149
149
|
Use this endpoint to update the name, description, or status of an existing
|
|
150
150
|
monitor
|
|
@@ -180,7 +180,7 @@ class MonitorResource(SyncAPIResource):
|
|
|
180
180
|
options=make_request_options(
|
|
181
181
|
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
182
182
|
),
|
|
183
|
-
cast_to=
|
|
183
|
+
cast_to=MonitorResponse,
|
|
184
184
|
)
|
|
185
185
|
|
|
186
186
|
def submit_event(
|
|
@@ -208,7 +208,7 @@ class MonitorResource(SyncAPIResource):
|
|
|
208
208
|
extra_query: Query | None = None,
|
|
209
209
|
extra_body: Body | None = None,
|
|
210
210
|
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
211
|
-
) ->
|
|
211
|
+
) -> MonitorEventResponse:
|
|
212
212
|
"""
|
|
213
213
|
Use this endpoint to submit a model input and output pair to a monitor for
|
|
214
214
|
evaluation
|
|
@@ -220,8 +220,8 @@ class MonitorResource(SyncAPIResource):
|
|
|
220
220
|
`ground_truth_adherence`, and/or `comprehensive_safety`.
|
|
221
221
|
|
|
222
222
|
model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
|
|
223
|
-
contain at least a `user_prompt` or `system_prompt` field. For
|
|
224
|
-
|
|
223
|
+
contain at least a `user_prompt` field or a `system_prompt` field. For
|
|
224
|
+
ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
225
225
|
|
|
226
226
|
model_output: Output generated by the LLM to be evaluated.
|
|
227
227
|
|
|
@@ -260,7 +260,7 @@ class MonitorResource(SyncAPIResource):
|
|
|
260
260
|
options=make_request_options(
|
|
261
261
|
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
262
262
|
),
|
|
263
|
-
cast_to=
|
|
263
|
+
cast_to=MonitorEventResponse,
|
|
264
264
|
)
|
|
265
265
|
|
|
266
266
|
|
|
@@ -295,7 +295,7 @@ class AsyncMonitorResource(AsyncAPIResource):
|
|
|
295
295
|
extra_query: Query | None = None,
|
|
296
296
|
extra_body: Body | None = None,
|
|
297
297
|
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
298
|
-
) ->
|
|
298
|
+
) -> MonitorResponse:
|
|
299
299
|
"""
|
|
300
300
|
Use this endpoint to create a new monitor to evaluate model inputs and outputs
|
|
301
301
|
using guardrails
|
|
@@ -325,7 +325,7 @@ class AsyncMonitorResource(AsyncAPIResource):
|
|
|
325
325
|
options=make_request_options(
|
|
326
326
|
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
327
327
|
),
|
|
328
|
-
cast_to=
|
|
328
|
+
cast_to=MonitorResponse,
|
|
329
329
|
)
|
|
330
330
|
|
|
331
331
|
async def retrieve(
|
|
@@ -339,7 +339,7 @@ class AsyncMonitorResource(AsyncAPIResource):
|
|
|
339
339
|
extra_query: Query | None = None,
|
|
340
340
|
extra_body: Body | None = None,
|
|
341
341
|
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
342
|
-
) ->
|
|
342
|
+
) -> MonitorDetailResponse:
|
|
343
343
|
"""
|
|
344
344
|
Use this endpoint to retrieve the details and evaluations associated with a
|
|
345
345
|
specific monitor
|
|
@@ -366,7 +366,7 @@ class AsyncMonitorResource(AsyncAPIResource):
|
|
|
366
366
|
timeout=timeout,
|
|
367
367
|
query=await async_maybe_transform({"limit": limit}, monitor_retrieve_params.MonitorRetrieveParams),
|
|
368
368
|
),
|
|
369
|
-
cast_to=
|
|
369
|
+
cast_to=MonitorDetailResponse,
|
|
370
370
|
)
|
|
371
371
|
|
|
372
372
|
async def update(
|
|
@@ -382,7 +382,7 @@ class AsyncMonitorResource(AsyncAPIResource):
|
|
|
382
382
|
extra_query: Query | None = None,
|
|
383
383
|
extra_body: Body | None = None,
|
|
384
384
|
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
385
|
-
) ->
|
|
385
|
+
) -> MonitorResponse:
|
|
386
386
|
"""
|
|
387
387
|
Use this endpoint to update the name, description, or status of an existing
|
|
388
388
|
monitor
|
|
@@ -418,7 +418,7 @@ class AsyncMonitorResource(AsyncAPIResource):
|
|
|
418
418
|
options=make_request_options(
|
|
419
419
|
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
420
420
|
),
|
|
421
|
-
cast_to=
|
|
421
|
+
cast_to=MonitorResponse,
|
|
422
422
|
)
|
|
423
423
|
|
|
424
424
|
async def submit_event(
|
|
@@ -446,7 +446,7 @@ class AsyncMonitorResource(AsyncAPIResource):
|
|
|
446
446
|
extra_query: Query | None = None,
|
|
447
447
|
extra_body: Body | None = None,
|
|
448
448
|
timeout: float | httpx.Timeout | None | NotGiven = not_given,
|
|
449
|
-
) ->
|
|
449
|
+
) -> MonitorEventResponse:
|
|
450
450
|
"""
|
|
451
451
|
Use this endpoint to submit a model input and output pair to a monitor for
|
|
452
452
|
evaluation
|
|
@@ -458,8 +458,8 @@ class AsyncMonitorResource(AsyncAPIResource):
|
|
|
458
458
|
`ground_truth_adherence`, and/or `comprehensive_safety`.
|
|
459
459
|
|
|
460
460
|
model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
|
|
461
|
-
contain at least a `user_prompt` or `system_prompt` field. For
|
|
462
|
-
|
|
461
|
+
contain at least a `user_prompt` field or a `system_prompt` field. For
|
|
462
|
+
ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
463
463
|
|
|
464
464
|
model_output: Output generated by the LLM to be evaluated.
|
|
465
465
|
|
|
@@ -498,7 +498,7 @@ class AsyncMonitorResource(AsyncAPIResource):
|
|
|
498
498
|
options=make_request_options(
|
|
499
499
|
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
|
500
500
|
),
|
|
501
|
-
cast_to=
|
|
501
|
+
cast_to=MonitorEventResponse,
|
|
502
502
|
)
|
|
503
503
|
|
|
504
504
|
|
|
@@ -3,16 +3,16 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
from .evaluation import Evaluation as Evaluation
|
|
6
|
-
from .api_response import APIResponse as APIResponse
|
|
7
6
|
from .defend_response import DefendResponse as DefendResponse
|
|
7
|
+
from .monitor_response import MonitorResponse as MonitorResponse
|
|
8
8
|
from .monitor_create_params import MonitorCreateParams as MonitorCreateParams
|
|
9
9
|
from .monitor_update_params import MonitorUpdateParams as MonitorUpdateParams
|
|
10
10
|
from .evaluate_create_params import EvaluateCreateParams as EvaluateCreateParams
|
|
11
|
+
from .monitor_event_response import MonitorEventResponse as MonitorEventResponse
|
|
12
|
+
from .monitor_detail_response import MonitorDetailResponse as MonitorDetailResponse
|
|
11
13
|
from .monitor_retrieve_params import MonitorRetrieveParams as MonitorRetrieveParams
|
|
12
14
|
from .workflow_event_response import WorkflowEventResponse as WorkflowEventResponse
|
|
13
|
-
from .monitor_retrieve_response import MonitorRetrieveResponse as MonitorRetrieveResponse
|
|
14
15
|
from .defend_submit_event_params import DefendSubmitEventParams as DefendSubmitEventParams
|
|
15
16
|
from .monitor_submit_event_params import MonitorSubmitEventParams as MonitorSubmitEventParams
|
|
16
17
|
from .defend_create_workflow_params import DefendCreateWorkflowParams as DefendCreateWorkflowParams
|
|
17
18
|
from .defend_update_workflow_params import DefendUpdateWorkflowParams as DefendUpdateWorkflowParams
|
|
18
|
-
from .monitor_submit_event_response import MonitorSubmitEventResponse as MonitorSubmitEventResponse
|
|
@@ -48,7 +48,7 @@ class DefendCreateWorkflowParams(TypedDict, total=False):
|
|
|
48
48
|
description: str
|
|
49
49
|
"""Description for the workflow."""
|
|
50
50
|
|
|
51
|
-
|
|
51
|
+
max_improvement_attempts: int
|
|
52
52
|
"""Max.
|
|
53
53
|
|
|
54
54
|
number of improvement action retries until a given event passes the guardrails.
|
|
@@ -31,7 +31,7 @@ class DefendResponse(BaseModel):
|
|
|
31
31
|
Nothing does not attempt any improvement.
|
|
32
32
|
"""
|
|
33
33
|
|
|
34
|
-
|
|
34
|
+
max_improvement_attempts: Optional[int] = None
|
|
35
35
|
"""Max.
|
|
36
36
|
|
|
37
37
|
number of improvement action retries until a given event passes the guardrails.
|
|
@@ -11,8 +11,9 @@ class DefendSubmitEventParams(TypedDict, total=False):
|
|
|
11
11
|
model_input: Required[ModelInput]
|
|
12
12
|
"""A dictionary of inputs sent to the LLM to generate output.
|
|
13
13
|
|
|
14
|
-
The dictionary must contain at least `user_prompt` or `system_prompt`
|
|
15
|
-
|
|
14
|
+
The dictionary must contain at least a `user_prompt` field or a `system_prompt`
|
|
15
|
+
field. For the ground_truth_adherence guardrail metric, `ground_truth` should be
|
|
16
|
+
provided.
|
|
16
17
|
"""
|
|
17
18
|
|
|
18
19
|
model_output: Required[str]
|
|
@@ -36,7 +37,7 @@ class DefendSubmitEventParams(TypedDict, total=False):
|
|
|
36
37
|
|
|
37
38
|
class ModelInput(TypedDict, total=False):
|
|
38
39
|
ground_truth: str
|
|
39
|
-
"""The ground truth for evaluating Ground Truth Adherence guardrail."""
|
|
40
|
+
"""The ground truth for evaluating the Ground Truth Adherence guardrail."""
|
|
40
41
|
|
|
41
42
|
system_prompt: str
|
|
42
43
|
"""The system prompt used to generate the output."""
|
|
@@ -12,8 +12,9 @@ class EvaluateCreateParams(TypedDict, total=False):
|
|
|
12
12
|
model_input: Required[ModelInput]
|
|
13
13
|
"""A dictionary of inputs sent to the LLM to generate output.
|
|
14
14
|
|
|
15
|
-
The dictionary must contain at least `user_prompt` or `system_prompt`
|
|
16
|
-
|
|
15
|
+
The dictionary must contain at least a `user_prompt` field or a `system_prompt`
|
|
16
|
+
field. For ground_truth_adherence guardrail metric, `ground_truth` should be
|
|
17
|
+
provided.
|
|
17
18
|
"""
|
|
18
19
|
|
|
19
20
|
model_output: Required[str]
|
|
@@ -32,8 +32,9 @@ class Evaluation(BaseModel):
|
|
|
32
32
|
api_model_input: ModelInput = FieldInfo(alias="model_input")
|
|
33
33
|
"""A dictionary of inputs sent to the LLM to generate output.
|
|
34
34
|
|
|
35
|
-
The dictionary must contain at least `user_prompt` or `system_prompt`
|
|
36
|
-
|
|
35
|
+
The dictionary must contain at least a `user_prompt` field or a `system_prompt`
|
|
36
|
+
field. For ground_truth_adherence guardrail metric, `ground_truth` should be
|
|
37
|
+
provided.
|
|
37
38
|
"""
|
|
38
39
|
|
|
39
40
|
api_model_output: str = FieldInfo(alias="model_output")
|
|
@@ -7,10 +7,10 @@ from typing_extensions import Literal
|
|
|
7
7
|
from .._models import BaseModel
|
|
8
8
|
from .evaluation import Evaluation
|
|
9
9
|
|
|
10
|
-
__all__ = ["
|
|
10
|
+
__all__ = ["MonitorDetailResponse", "Stats"]
|
|
11
11
|
|
|
12
12
|
|
|
13
|
-
class
|
|
13
|
+
class Stats(BaseModel):
|
|
14
14
|
completed_evaluations: Optional[int] = None
|
|
15
15
|
"""Number of evaluations that completed successfully."""
|
|
16
16
|
|
|
@@ -27,7 +27,7 @@ class DataStats(BaseModel):
|
|
|
27
27
|
"""Total number of evaluations performed by this monitor."""
|
|
28
28
|
|
|
29
29
|
|
|
30
|
-
class
|
|
30
|
+
class MonitorDetailResponse(BaseModel):
|
|
31
31
|
monitor_id: str
|
|
32
32
|
"""A unique monitor ID."""
|
|
33
33
|
|
|
@@ -53,7 +53,7 @@ class Data(BaseModel):
|
|
|
53
53
|
Each one corresponds to a separate monitor event.
|
|
54
54
|
"""
|
|
55
55
|
|
|
56
|
-
stats: Optional[
|
|
56
|
+
stats: Optional[Stats] = None
|
|
57
57
|
"""
|
|
58
58
|
Contains five fields used for stats of this monitor: total evaluations,
|
|
59
59
|
completed evaluations, failed evaluations, queued evaluations, and in progress
|
|
@@ -65,16 +65,3 @@ class Data(BaseModel):
|
|
|
65
65
|
|
|
66
66
|
user_id: Optional[str] = None
|
|
67
67
|
"""User ID of the user who created the monitor."""
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
class MonitorRetrieveResponse(BaseModel):
|
|
71
|
-
success: bool
|
|
72
|
-
"""Represents whether the request was completed successfully."""
|
|
73
|
-
|
|
74
|
-
data: Optional[Data] = None
|
|
75
|
-
|
|
76
|
-
message: Optional[str] = None
|
|
77
|
-
"""The accompanying message for the request.
|
|
78
|
-
|
|
79
|
-
Includes error details when applicable.
|
|
80
|
-
"""
|