deeprails 1.7.0__py3-none-any.whl → 1.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of deeprails might be problematic. Click here for more details.
- deeprails/_version.py +1 -1
- deeprails/resources/defend.py +4 -4
- deeprails/resources/evaluate.py +2 -2
- deeprails/resources/monitor.py +2 -2
- deeprails/types/defend_submit_event_params.py +1 -1
- deeprails/types/evaluate_create_params.py +1 -1
- deeprails/types/evaluation.py +1 -1
- deeprails/types/monitor_submit_event_params.py +1 -1
- {deeprails-1.7.0.dist-info → deeprails-1.8.0.dist-info}/METADATA +33 -1
- {deeprails-1.7.0.dist-info → deeprails-1.8.0.dist-info}/RECORD +12 -12
- {deeprails-1.7.0.dist-info → deeprails-1.8.0.dist-info}/WHEEL +0 -0
- {deeprails-1.7.0.dist-info → deeprails-1.8.0.dist-info}/licenses/LICENSE +0 -0
deeprails/_version.py
CHANGED
deeprails/resources/defend.py
CHANGED
|
@@ -214,8 +214,8 @@ class DefendResource(SyncAPIResource):
|
|
|
214
214
|
|
|
215
215
|
Args:
|
|
216
216
|
model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
|
|
217
|
-
contain at least `user_prompt` or `system_prompt` field. For
|
|
218
|
-
|
|
217
|
+
contain at least `user_prompt` or `system_prompt` field. For the
|
|
218
|
+
ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
219
219
|
|
|
220
220
|
model_output: Output generated by the LLM to be evaluated.
|
|
221
221
|
|
|
@@ -492,8 +492,8 @@ class AsyncDefendResource(AsyncAPIResource):
|
|
|
492
492
|
|
|
493
493
|
Args:
|
|
494
494
|
model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
|
|
495
|
-
contain at least `user_prompt` or `system_prompt` field. For
|
|
496
|
-
|
|
495
|
+
contain at least `user_prompt` or `system_prompt` field. For the
|
|
496
|
+
ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
497
497
|
|
|
498
498
|
model_output: Output generated by the LLM to be evaluated.
|
|
499
499
|
|
deeprails/resources/evaluate.py
CHANGED
|
@@ -77,7 +77,7 @@ class EvaluateResource(SyncAPIResource):
|
|
|
77
77
|
Args:
|
|
78
78
|
model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
|
|
79
79
|
contain at least `user_prompt` or `system_prompt` field. For
|
|
80
|
-
|
|
80
|
+
ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
81
81
|
|
|
82
82
|
model_output: Output generated by the LLM to be evaluated.
|
|
83
83
|
|
|
@@ -208,7 +208,7 @@ class AsyncEvaluateResource(AsyncAPIResource):
|
|
|
208
208
|
Args:
|
|
209
209
|
model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
|
|
210
210
|
contain at least `user_prompt` or `system_prompt` field. For
|
|
211
|
-
|
|
211
|
+
ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
212
212
|
|
|
213
213
|
model_output: Output generated by the LLM to be evaluated.
|
|
214
214
|
|
deeprails/resources/monitor.py
CHANGED
|
@@ -221,7 +221,7 @@ class MonitorResource(SyncAPIResource):
|
|
|
221
221
|
|
|
222
222
|
model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
|
|
223
223
|
contain at least a `user_prompt` or `system_prompt` field. For
|
|
224
|
-
|
|
224
|
+
ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
225
225
|
|
|
226
226
|
model_output: Output generated by the LLM to be evaluated.
|
|
227
227
|
|
|
@@ -459,7 +459,7 @@ class AsyncMonitorResource(AsyncAPIResource):
|
|
|
459
459
|
|
|
460
460
|
model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
|
|
461
461
|
contain at least a `user_prompt` or `system_prompt` field. For
|
|
462
|
-
|
|
462
|
+
ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
463
463
|
|
|
464
464
|
model_output: Output generated by the LLM to be evaluated.
|
|
465
465
|
|
|
@@ -12,7 +12,7 @@ class DefendSubmitEventParams(TypedDict, total=False):
|
|
|
12
12
|
"""A dictionary of inputs sent to the LLM to generate output.
|
|
13
13
|
|
|
14
14
|
The dictionary must contain at least `user_prompt` or `system_prompt` field. For
|
|
15
|
-
|
|
15
|
+
the ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
16
16
|
"""
|
|
17
17
|
|
|
18
18
|
model_output: Required[str]
|
|
@@ -13,7 +13,7 @@ class EvaluateCreateParams(TypedDict, total=False):
|
|
|
13
13
|
"""A dictionary of inputs sent to the LLM to generate output.
|
|
14
14
|
|
|
15
15
|
The dictionary must contain at least `user_prompt` or `system_prompt` field. For
|
|
16
|
-
|
|
16
|
+
ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
19
|
model_output: Required[str]
|
deeprails/types/evaluation.py
CHANGED
|
@@ -33,7 +33,7 @@ class Evaluation(BaseModel):
|
|
|
33
33
|
"""A dictionary of inputs sent to the LLM to generate output.
|
|
34
34
|
|
|
35
35
|
The dictionary must contain at least `user_prompt` or `system_prompt` field. For
|
|
36
|
-
|
|
36
|
+
ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
37
37
|
"""
|
|
38
38
|
|
|
39
39
|
api_model_output: str = FieldInfo(alias="model_output")
|
|
@@ -32,7 +32,7 @@ class MonitorSubmitEventParams(TypedDict, total=False):
|
|
|
32
32
|
"""A dictionary of inputs sent to the LLM to generate output.
|
|
33
33
|
|
|
34
34
|
The dictionary must contain at least a `user_prompt` or `system_prompt` field.
|
|
35
|
-
For
|
|
35
|
+
For ground_truth_adherence guardrail metric, `ground_truth` should be provided.
|
|
36
36
|
"""
|
|
37
37
|
|
|
38
38
|
model_output: Required[str]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: deeprails
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.8.0
|
|
4
4
|
Summary: The official Python library for the deeprails API
|
|
5
5
|
Project-URL: Homepage, https://docs.deeprails.com/
|
|
6
6
|
Project-URL: Repository, https://github.com/deeprails/deeprails-sdk-python
|
|
@@ -69,6 +69,10 @@ defend_response = client.defend.create_workflow(
|
|
|
69
69
|
improvement_action="fixit",
|
|
70
70
|
name="Push Alert Workflow",
|
|
71
71
|
type="custom",
|
|
72
|
+
custom_hallucination_threshold_values={
|
|
73
|
+
"completeness": 0.7,
|
|
74
|
+
"instruction_adherence": 0.75,
|
|
75
|
+
},
|
|
72
76
|
)
|
|
73
77
|
print(defend_response.workflow_id)
|
|
74
78
|
```
|
|
@@ -97,6 +101,10 @@ async def main() -> None:
|
|
|
97
101
|
improvement_action="fixit",
|
|
98
102
|
name="Push Alert Workflow",
|
|
99
103
|
type="custom",
|
|
104
|
+
custom_hallucination_threshold_values={
|
|
105
|
+
"completeness": 0.7,
|
|
106
|
+
"instruction_adherence": 0.75,
|
|
107
|
+
},
|
|
100
108
|
)
|
|
101
109
|
print(defend_response.workflow_id)
|
|
102
110
|
|
|
@@ -134,6 +142,10 @@ async def main() -> None:
|
|
|
134
142
|
improvement_action="fixit",
|
|
135
143
|
name="Push Alert Workflow",
|
|
136
144
|
type="custom",
|
|
145
|
+
custom_hallucination_threshold_values={
|
|
146
|
+
"completeness": 0.7,
|
|
147
|
+
"instruction_adherence": 0.75,
|
|
148
|
+
},
|
|
137
149
|
)
|
|
138
150
|
print(defend_response.workflow_id)
|
|
139
151
|
|
|
@@ -189,6 +201,10 @@ try:
|
|
|
189
201
|
improvement_action="fixit",
|
|
190
202
|
name="Push Alert Workflow",
|
|
191
203
|
type="custom",
|
|
204
|
+
custom_hallucination_threshold_values={
|
|
205
|
+
"completeness": 0.7,
|
|
206
|
+
"instruction_adherence": 0.75,
|
|
207
|
+
},
|
|
192
208
|
)
|
|
193
209
|
except deeprails.APIConnectionError as e:
|
|
194
210
|
print("The server could not be reached")
|
|
@@ -236,6 +252,10 @@ client.with_options(max_retries=5).defend.create_workflow(
|
|
|
236
252
|
improvement_action="fixit",
|
|
237
253
|
name="Push Alert Workflow",
|
|
238
254
|
type="custom",
|
|
255
|
+
custom_hallucination_threshold_values={
|
|
256
|
+
"completeness": 0.7,
|
|
257
|
+
"instruction_adherence": 0.75,
|
|
258
|
+
},
|
|
239
259
|
)
|
|
240
260
|
```
|
|
241
261
|
|
|
@@ -263,6 +283,10 @@ client.with_options(timeout=5.0).defend.create_workflow(
|
|
|
263
283
|
improvement_action="fixit",
|
|
264
284
|
name="Push Alert Workflow",
|
|
265
285
|
type="custom",
|
|
286
|
+
custom_hallucination_threshold_values={
|
|
287
|
+
"completeness": 0.7,
|
|
288
|
+
"instruction_adherence": 0.75,
|
|
289
|
+
},
|
|
266
290
|
)
|
|
267
291
|
```
|
|
268
292
|
|
|
@@ -308,6 +332,10 @@ response = client.defend.with_raw_response.create_workflow(
|
|
|
308
332
|
improvement_action="fixit",
|
|
309
333
|
name="Push Alert Workflow",
|
|
310
334
|
type="custom",
|
|
335
|
+
custom_hallucination_threshold_values={
|
|
336
|
+
"completeness": 0.7,
|
|
337
|
+
"instruction_adherence": 0.75,
|
|
338
|
+
},
|
|
311
339
|
)
|
|
312
340
|
print(response.headers.get('X-My-Header'))
|
|
313
341
|
|
|
@@ -330,6 +358,10 @@ with client.defend.with_streaming_response.create_workflow(
|
|
|
330
358
|
improvement_action="fixit",
|
|
331
359
|
name="Push Alert Workflow",
|
|
332
360
|
type="custom",
|
|
361
|
+
custom_hallucination_threshold_values={
|
|
362
|
+
"completeness": 0.7,
|
|
363
|
+
"instruction_adherence": 0.75,
|
|
364
|
+
},
|
|
333
365
|
) as response:
|
|
334
366
|
print(response.headers.get("X-My-Header"))
|
|
335
367
|
|
|
@@ -11,7 +11,7 @@ deeprails/_resource.py,sha256=7RXX5KZr4j0TIE66vnduHp7p9Yf9X0FyDDECuvRHARg,1118
|
|
|
11
11
|
deeprails/_response.py,sha256=yj0HJDU91WPpiczwi6CBOLAl_bqf4I_I96vWMAwx6Fg,28806
|
|
12
12
|
deeprails/_streaming.py,sha256=hCp5bK9dyw2TyrVL69m-6qGC-QtGYwhXmFzITCWPgAs,10112
|
|
13
13
|
deeprails/_types.py,sha256=XR3mad9NsGqZsjrd1VVJ657-4O4kwyw9Qzg4M3i6Vh0,7239
|
|
14
|
-
deeprails/_version.py,sha256=
|
|
14
|
+
deeprails/_version.py,sha256=V8Z_1JKT_bgLyhd5iqPYjkEjv9Mii5vfV23cYHHyts8,161
|
|
15
15
|
deeprails/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
16
|
deeprails/_utils/__init__.py,sha256=7fch0GT9zpNnErbciSpUNa-SjTxxjY6kxHxKMOM4AGs,2305
|
|
17
17
|
deeprails/_utils/_compat.py,sha256=D8gtAvjJQrDWt9upS0XaG9Rr5l1QhiAx_I_1utT_tt0,1195
|
|
@@ -27,25 +27,25 @@ deeprails/_utils/_typing.py,sha256=N_5PPuFNsaygbtA_npZd98SVN1LQQvFTKL6bkWPBZGU,4
|
|
|
27
27
|
deeprails/_utils/_utils.py,sha256=0dDqauUbVZEXV0NVl7Bwu904Wwo5eyFCZpQThhFNhyA,12253
|
|
28
28
|
deeprails/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224
|
|
29
29
|
deeprails/resources/__init__.py,sha256=ha0jL9Et8fHzPdkTa7ecihYapOC4I6O-PHf2X9igprE,1491
|
|
30
|
-
deeprails/resources/defend.py,sha256=
|
|
31
|
-
deeprails/resources/evaluate.py,sha256=
|
|
32
|
-
deeprails/resources/monitor.py,sha256=
|
|
30
|
+
deeprails/resources/defend.py,sha256=TRuF6F3YEmgGBkJRxf8Mx6JofmZjstUeLTUpHV0S4gQ,28158
|
|
31
|
+
deeprails/resources/evaluate.py,sha256=uoElGfpFyH-bO8cbq-2xM2bgfKsQN3P6YF8nmEAPugg,13005
|
|
32
|
+
deeprails/resources/monitor.py,sha256=bFCz3t8-wweF76KZoD3n8jHY-xtL8ZA7S-3eI1FiHik,22517
|
|
33
33
|
deeprails/types/__init__.py,sha256=tLO-5DMMKt-F4qQYht3F-RFgLbthP-8a36853IhNoGI,1267
|
|
34
34
|
deeprails/types/api_response.py,sha256=eHEQeL677tvm1RK0A_S03EAoprQbJzmHspiKrtjKRt4,1232
|
|
35
35
|
deeprails/types/defend_create_workflow_params.py,sha256=b2wWr-DtON4XcbPyKD45oO7016F4ji5LoIqyNdM3B5k,2101
|
|
36
36
|
deeprails/types/defend_response.py,sha256=sIS9nBgW2a_yBYxVWo33pgy1IKEn-m4AzamOr5Eov5g,1602
|
|
37
|
-
deeprails/types/defend_submit_event_params.py,sha256=
|
|
37
|
+
deeprails/types/defend_submit_event_params.py,sha256=nbrszvTg_I8WEZJe6hPzP22pCC8Fkp60nmgH3p211uU,1521
|
|
38
38
|
deeprails/types/defend_update_workflow_params.py,sha256=QH2k7EDMLub3mW1lPV5SUoKDHW_T2arSo-RGHLterwo,373
|
|
39
|
-
deeprails/types/evaluate_create_params.py,sha256=
|
|
40
|
-
deeprails/types/evaluation.py,sha256=
|
|
39
|
+
deeprails/types/evaluate_create_params.py,sha256=B_WthHkJJzH7vjvKYd_PhJcQTilOhUsMBd6mz5hxGbQ,1987
|
|
40
|
+
deeprails/types/evaluation.py,sha256=GAzuZMOD51bCNVLUeROO4Vi26j3DE3kEIRFf9-UfZjQ,3314
|
|
41
41
|
deeprails/types/monitor_create_params.py,sha256=kTSj-PhuzpT-HPDTQJemRWfd8w32foUMH9FQZj8symk,384
|
|
42
42
|
deeprails/types/monitor_retrieve_params.py,sha256=PEsRmbd-81z4pJvhfi4JbrQWNzmeiLkoNsTUoPZ6kFY,352
|
|
43
43
|
deeprails/types/monitor_retrieve_response.py,sha256=BZp7-6PFVdqYU5ZDhbr1Eao3kU132zTm9idgoaA65Gg,2245
|
|
44
|
-
deeprails/types/monitor_submit_event_params.py,sha256=
|
|
44
|
+
deeprails/types/monitor_submit_event_params.py,sha256=d2AzwBe-d0tBfvnknfy0fGCrCCd0GC2kJb1AXbje3cg,2152
|
|
45
45
|
deeprails/types/monitor_submit_event_response.py,sha256=qlraxIJaclNSR_JOizMPj9gOiz-0x7lIChSX3DmFllM,867
|
|
46
46
|
deeprails/types/monitor_update_params.py,sha256=gJyFFxT_u_iWABknuKnLpPl9r-VPfCcGtOAmh6sPwUw,550
|
|
47
47
|
deeprails/types/workflow_event_response.py,sha256=mIzOCnYJg4TDSq_tG_0WfA0_Gmc9-0q-befyookfUFM,867
|
|
48
|
-
deeprails-1.
|
|
49
|
-
deeprails-1.
|
|
50
|
-
deeprails-1.
|
|
51
|
-
deeprails-1.
|
|
48
|
+
deeprails-1.8.0.dist-info/METADATA,sha256=taDsSz0VVSWTfKlHscWkFvDJPNLZz-ALaMMo9MfLjsc,12080
|
|
49
|
+
deeprails-1.8.0.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
|
|
50
|
+
deeprails-1.8.0.dist-info/licenses/LICENSE,sha256=rFTxPcYE516UQLju2SCY1r2pSDDfodL0-ZvxF_fgueg,11339
|
|
51
|
+
deeprails-1.8.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|