deeprails 1.4.1__py3-none-any.whl → 1.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of deeprails might be problematic. Click here for more details.

deeprails/_version.py CHANGED
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "deeprails"
4
- __version__ = "1.4.1" # x-release-please-version
4
+ __version__ = "1.6.0" # x-release-please-version
@@ -213,9 +213,9 @@ class DefendResource(SyncAPIResource):
213
213
  evaluation
214
214
 
215
215
  Args:
216
- model_input: A dictionary of inputs sent to the LLM to generate output. This must contain a
217
- `user_prompt` field and an optional `context` field. Additional properties are
218
- allowed.
216
+ model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
217
+ contain at least one of `user_prompt` or `system_prompt`. For
218
+ ground_truth_aherence guadrail metric, `ground_truth` should be provided.
219
219
 
220
220
  model_output: Output generated by the LLM to be evaluated.
221
221
 
@@ -491,9 +491,9 @@ class AsyncDefendResource(AsyncAPIResource):
491
491
  evaluation
492
492
 
493
493
  Args:
494
- model_input: A dictionary of inputs sent to the LLM to generate output. This must contain a
495
- `user_prompt` field and an optional `context` field. Additional properties are
496
- allowed.
494
+ model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
495
+ contain at least one of `user_prompt` or `system_prompt`. For
496
+ ground_truth_aherence guadrail metric, `ground_truth` should be provided.
497
497
 
498
498
  model_output: Output generated by the LLM to be evaluated.
499
499
 
@@ -75,9 +75,9 @@ class EvaluateResource(SyncAPIResource):
75
75
  guardrail metrics
76
76
 
77
77
  Args:
78
- model_input: A dictionary of inputs sent to the LLM to generate output. This must contain a
79
- `user_prompt` field and an optional `context` field. Additional properties are
80
- allowed.
78
+ model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
79
+ contain at least one of `user_prompt` or `system_prompt`. For
80
+ ground_truth_aherence guadrail metric, `ground_truth` should be provided.
81
81
 
82
82
  model_output: Output generated by the LLM to be evaluated.
83
83
 
@@ -206,9 +206,9 @@ class AsyncEvaluateResource(AsyncAPIResource):
206
206
  guardrail metrics
207
207
 
208
208
  Args:
209
- model_input: A dictionary of inputs sent to the LLM to generate output. This must contain a
210
- `user_prompt` field and an optional `context` field. Additional properties are
211
- allowed.
209
+ model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
210
+ contain at least one of `user_prompt` or `system_prompt`. For
211
+ ground_truth_aherence guadrail metric, `ground_truth` should be provided.
212
212
 
213
213
  model_output: Output generated by the LLM to be evaluated.
214
214
 
@@ -219,9 +219,9 @@ class MonitorResource(SyncAPIResource):
219
219
  `completeness`, `instruction_adherence`, `context_adherence`,
220
220
  `ground_truth_adherence`, and/or `comprehensive_safety`.
221
221
 
222
- model_input: A dictionary of inputs sent to the LLM to generate output. This must contain a
223
- `user_prompt` field and an optional `context` field. Additional properties are
224
- allowed.
222
+ model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
223
+ contain at least one of `user_prompt` or `system_prompt`. For
224
+ ground_truth_aherence guadrail metric, `ground_truth` should be provided.
225
225
 
226
226
  model_output: Output generated by the LLM to be evaluated.
227
227
 
@@ -457,9 +457,9 @@ class AsyncMonitorResource(AsyncAPIResource):
457
457
  `completeness`, `instruction_adherence`, `context_adherence`,
458
458
  `ground_truth_adherence`, and/or `comprehensive_safety`.
459
459
 
460
- model_input: A dictionary of inputs sent to the LLM to generate output. This must contain a
461
- `user_prompt` field and an optional `context` field. Additional properties are
462
- allowed.
460
+ model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
461
+ contain at least one of `user_prompt` or `system_prompt`. For
462
+ ground_truth_aherence guadrail metric, `ground_truth` should be provided.
463
463
 
464
464
  model_output: Output generated by the LLM to be evaluated.
465
465
 
@@ -2,8 +2,7 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Dict, Union
6
- from typing_extensions import Literal, Required, TypeAlias, TypedDict
5
+ from typing_extensions import Literal, Required, TypedDict
7
6
 
8
7
  __all__ = ["DefendSubmitEventParams", "ModelInput"]
9
8
 
@@ -12,8 +11,8 @@ class DefendSubmitEventParams(TypedDict, total=False):
12
11
  model_input: Required[ModelInput]
13
12
  """A dictionary of inputs sent to the LLM to generate output.
14
13
 
15
- This must contain a `user_prompt` field and an optional `context` field.
16
- Additional properties are allowed.
14
+ The dictionary must contain at least one of `user_prompt` or `system_prompt`.
15
+ For ground_truth_aherence guadrail metric, `ground_truth` should be provided.
17
16
  """
18
17
 
19
18
  model_output: Required[str]
@@ -35,10 +34,12 @@ class DefendSubmitEventParams(TypedDict, total=False):
35
34
  """An optional, user-defined tag for the event."""
36
35
 
37
36
 
38
- class ModelInputTyped(TypedDict, total=False):
39
- user_prompt: Required[str]
37
+ class ModelInput(TypedDict, total=False):
38
+ ground_truth: str
39
+ """The ground truth for evaluating Ground Truth Adherence guardrail."""
40
40
 
41
- context: str
41
+ system_prompt: str
42
+ """The system prompt used to generate the output."""
42
43
 
43
-
44
- ModelInput: TypeAlias = Union[ModelInputTyped, Dict[str, object]]
44
+ user_prompt: str
45
+ """The user prompt used to generate the output."""
@@ -2,8 +2,8 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Dict, List, Union
6
- from typing_extensions import Literal, Required, TypeAlias, TypedDict
5
+ from typing import List
6
+ from typing_extensions import Literal, Required, TypedDict
7
7
 
8
8
  __all__ = ["EvaluateCreateParams", "ModelInput"]
9
9
 
@@ -12,8 +12,8 @@ class EvaluateCreateParams(TypedDict, total=False):
12
12
  model_input: Required[ModelInput]
13
13
  """A dictionary of inputs sent to the LLM to generate output.
14
14
 
15
- This must contain a `user_prompt` field and an optional `context` field.
16
- Additional properties are allowed.
15
+ The dictionary must contain at least one of `user_prompt` or `system_prompt`.
16
+ For ground_truth_aherence guadrail metric, `ground_truth` should be provided.
17
17
  """
18
18
 
19
19
  model_output: Required[str]
@@ -51,10 +51,12 @@ class EvaluateCreateParams(TypedDict, total=False):
51
51
  """An optional, user-defined tag for the evaluation."""
52
52
 
53
53
 
54
- class ModelInputTyped(TypedDict, total=False):
55
- user_prompt: Required[str]
54
+ class ModelInput(TypedDict, total=False):
55
+ ground_truth: str
56
+ """The ground truth for evaluating Ground Truth Adherence guardrail."""
56
57
 
57
- context: str
58
+ system_prompt: str
59
+ """The system prompt used to generate the output."""
58
60
 
59
-
60
- ModelInput: TypeAlias = Union[ModelInputTyped, Dict[str, object]]
61
+ user_prompt: str
62
+ """The user prompt used to generate the output."""
@@ -1,6 +1,6 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
- from typing import TYPE_CHECKING, Dict, List, Optional
3
+ from typing import Dict, List, Optional
4
4
  from datetime import datetime
5
5
  from typing_extensions import Literal
6
6
 
@@ -12,23 +12,14 @@ __all__ = ["Evaluation", "ModelInput"]
12
12
 
13
13
 
14
14
  class ModelInput(BaseModel):
15
- user_prompt: str
16
- """The user prompt used to generate the output."""
17
-
18
- context: Optional[str] = None
19
- """Optional context supplied to the LLM when generating the output."""
15
+ ground_truth: Optional[str] = None
16
+ """The ground truth for evaluating Ground Truth Adherence guardrail."""
20
17
 
21
- if TYPE_CHECKING:
22
- # Some versions of Pydantic <2.8.0 have a bug and don’t allow assigning a
23
- # value to this field, so for compatibility we avoid doing it at runtime.
24
- __pydantic_extra__: Dict[str, object] = FieldInfo(init=False) # pyright: ignore[reportIncompatibleVariableOverride]
18
+ system_prompt: Optional[str] = None
19
+ """The system prompt used to generate the output."""
25
20
 
26
- # Stub to indicate that arbitrary properties are accepted.
27
- # To access properties that are not valid identifiers you can use `getattr`, e.g.
28
- # `getattr(obj, '$type')`
29
- def __getattr__(self, attr: str) -> object: ...
30
- else:
31
- __pydantic_extra__: Dict[str, object]
21
+ user_prompt: Optional[str] = None
22
+ """The user prompt used to generate the output."""
32
23
 
33
24
 
34
25
  class Evaluation(BaseModel):
@@ -41,8 +32,8 @@ class Evaluation(BaseModel):
41
32
  api_model_input: ModelInput = FieldInfo(alias="model_input")
42
33
  """A dictionary of inputs sent to the LLM to generate output.
43
34
 
44
- The dictionary must contain a `user_prompt` field and an optional `context`
45
- field. Additional properties are allowed.
35
+ The dictionary must contain at least one of `user_prompt` or `system_prompt`.
36
+ For ground_truth_aherence guadrail metric, `ground_truth` should be provided.
46
37
  """
47
38
 
48
39
  api_model_output: str = FieldInfo(alias="model_output")
@@ -2,8 +2,8 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import Dict, List, Union
6
- from typing_extensions import Literal, Required, TypeAlias, TypedDict
5
+ from typing import List
6
+ from typing_extensions import Literal, Required, TypedDict
7
7
 
8
8
  __all__ = ["MonitorSubmitEventParams", "ModelInput"]
9
9
 
@@ -31,8 +31,8 @@ class MonitorSubmitEventParams(TypedDict, total=False):
31
31
  model_input: Required[ModelInput]
32
32
  """A dictionary of inputs sent to the LLM to generate output.
33
33
 
34
- This must contain a `user_prompt` field and an optional `context` field.
35
- Additional properties are allowed.
34
+ The dictionary must contain at least one of `user_prompt` or `system_prompt`.
35
+ For ground_truth_aherence guadrail metric, `ground_truth` should be provided.
36
36
  """
37
37
 
38
38
  model_output: Required[str]
@@ -54,10 +54,12 @@ class MonitorSubmitEventParams(TypedDict, total=False):
54
54
  """
55
55
 
56
56
 
57
- class ModelInputTyped(TypedDict, total=False):
58
- user_prompt: Required[str]
57
+ class ModelInput(TypedDict, total=False):
58
+ ground_truth: str
59
+ """The ground truth for evaluating Ground Truth Adherence guardrail."""
59
60
 
60
- context: str
61
+ system_prompt: str
62
+ """The system prompt used to generate the output."""
61
63
 
62
-
63
- ModelInput: TypeAlias = Union[ModelInputTyped, Dict[str, object]]
64
+ user_prompt: str
65
+ """The user prompt used to generate the output."""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: deeprails
3
- Version: 1.4.1
3
+ Version: 1.6.0
4
4
  Summary: The official Python library for the deeprails API
5
5
  Project-URL: Homepage, https://docs.deeprails.com/
6
6
  Project-URL: Repository, https://github.com/deeprails/deeprails-sdk-python
@@ -68,7 +68,7 @@ client = Deeprails(
68
68
  defend_response = client.defend.create_workflow(
69
69
  improvement_action="fixit",
70
70
  metrics={
71
- "completeness": 0.8,
71
+ "completeness": 0.7,
72
72
  "instruction_adherence": 0.75,
73
73
  },
74
74
  name="Push Alert Workflow",
@@ -100,7 +100,7 @@ async def main() -> None:
100
100
  defend_response = await client.defend.create_workflow(
101
101
  improvement_action="fixit",
102
102
  metrics={
103
- "completeness": 0.8,
103
+ "completeness": 0.7,
104
104
  "instruction_adherence": 0.75,
105
105
  },
106
106
  name="Push Alert Workflow",
@@ -141,7 +141,7 @@ async def main() -> None:
141
141
  defend_response = await client.defend.create_workflow(
142
142
  improvement_action="fixit",
143
143
  metrics={
144
- "completeness": 0.8,
144
+ "completeness": 0.7,
145
145
  "instruction_adherence": 0.75,
146
146
  },
147
147
  name="Push Alert Workflow",
@@ -200,7 +200,7 @@ try:
200
200
  client.defend.create_workflow(
201
201
  improvement_action="fixit",
202
202
  metrics={
203
- "completeness": 0.8,
203
+ "completeness": 0.7,
204
204
  "instruction_adherence": 0.75,
205
205
  },
206
206
  name="Push Alert Workflow",
@@ -251,7 +251,7 @@ client = Deeprails(
251
251
  client.with_options(max_retries=5).defend.create_workflow(
252
252
  improvement_action="fixit",
253
253
  metrics={
254
- "completeness": 0.8,
254
+ "completeness": 0.7,
255
255
  "instruction_adherence": 0.75,
256
256
  },
257
257
  name="Push Alert Workflow",
@@ -282,7 +282,7 @@ client = Deeprails(
282
282
  client.with_options(timeout=5.0).defend.create_workflow(
283
283
  improvement_action="fixit",
284
284
  metrics={
285
- "completeness": 0.8,
285
+ "completeness": 0.7,
286
286
  "instruction_adherence": 0.75,
287
287
  },
288
288
  name="Push Alert Workflow",
@@ -331,7 +331,7 @@ client = Deeprails()
331
331
  response = client.defend.with_raw_response.create_workflow(
332
332
  improvement_action="fixit",
333
333
  metrics={
334
- "completeness": 0.8,
334
+ "completeness": 0.7,
335
335
  "instruction_adherence": 0.75,
336
336
  },
337
337
  name="Push Alert Workflow",
@@ -357,7 +357,7 @@ To stream the response body, use `.with_streaming_response` instead, which requi
357
357
  with client.defend.with_streaming_response.create_workflow(
358
358
  improvement_action="fixit",
359
359
  metrics={
360
- "completeness": 0.8,
360
+ "completeness": 0.7,
361
361
  "instruction_adherence": 0.75,
362
362
  },
363
363
  name="Push Alert Workflow",
@@ -11,7 +11,7 @@ deeprails/_resource.py,sha256=7RXX5KZr4j0TIE66vnduHp7p9Yf9X0FyDDECuvRHARg,1118
11
11
  deeprails/_response.py,sha256=yj0HJDU91WPpiczwi6CBOLAl_bqf4I_I96vWMAwx6Fg,28806
12
12
  deeprails/_streaming.py,sha256=hCp5bK9dyw2TyrVL69m-6qGC-QtGYwhXmFzITCWPgAs,10112
13
13
  deeprails/_types.py,sha256=XR3mad9NsGqZsjrd1VVJ657-4O4kwyw9Qzg4M3i6Vh0,7239
14
- deeprails/_version.py,sha256=EdZ87RxyRSIBxvDaKalajdR_Znfcmr4XvPV5HPezFS8,161
14
+ deeprails/_version.py,sha256=Rk1640sw5gjQbHmEb0M7ib_b1CFN45jJ9ePS1fFree8,161
15
15
  deeprails/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  deeprails/_utils/__init__.py,sha256=7fch0GT9zpNnErbciSpUNa-SjTxxjY6kxHxKMOM4AGs,2305
17
17
  deeprails/_utils/_compat.py,sha256=D8gtAvjJQrDWt9upS0XaG9Rr5l1QhiAx_I_1utT_tt0,1195
@@ -27,25 +27,25 @@ deeprails/_utils/_typing.py,sha256=N_5PPuFNsaygbtA_npZd98SVN1LQQvFTKL6bkWPBZGU,4
27
27
  deeprails/_utils/_utils.py,sha256=0dDqauUbVZEXV0NVl7Bwu904Wwo5eyFCZpQThhFNhyA,12253
28
28
  deeprails/lib/.keep,sha256=wuNrz-5SXo3jJaJOJgz4vFHM41YH_g20F5cRQo0vLes,224
29
29
  deeprails/resources/__init__.py,sha256=ha0jL9Et8fHzPdkTa7ecihYapOC4I6O-PHf2X9igprE,1491
30
- deeprails/resources/defend.py,sha256=_11GJ0jphqPP7RRPceLHfzyaLf-Mcmi29tinsA0F_cM,27566
31
- deeprails/resources/evaluate.py,sha256=_jAPyR1ULkz8yrU3nOtwJstdRIoCuwDiT1leAYPQCUY,12907
32
- deeprails/resources/monitor.py,sha256=hF-aLOS2whKsE-EVdNIj6hPY-YGKpo8xWoAwwCq4B34,22415
30
+ deeprails/resources/defend.py,sha256=oBH27YIClgm8l6jn_4No-JHNKrZl26RPmQJjsbe8vRw,27662
31
+ deeprails/resources/evaluate.py,sha256=eGuPv4kvQd9UPO_8GLcAOe0z6q_Q6zv1Cum2eyrRorU,13003
32
+ deeprails/resources/monitor.py,sha256=uSMhU23FziLy8NWaezqC7RfQ5UqyYTEqaS5FbvIK1vM,22511
33
33
  deeprails/types/__init__.py,sha256=tLO-5DMMKt-F4qQYht3F-RFgLbthP-8a36853IhNoGI,1267
34
34
  deeprails/types/api_response.py,sha256=eHEQeL677tvm1RK0A_S03EAoprQbJzmHspiKrtjKRt4,1232
35
35
  deeprails/types/defend_create_workflow_params.py,sha256=EWYlSeoojWf8pp7VawZh8IbShHWsd0iZfdKq6qlh8OA,2076
36
36
  deeprails/types/defend_response.py,sha256=u508zff61fbo7DqotbJLEoYzYg2VUDWbFScx8Ec8EGA,1617
37
- deeprails/types/defend_submit_event_params.py,sha256=PAmzDkpzaOZjCxGy0c_UTZDvb57N7uhG7NZm5ICRcIg,1376
37
+ deeprails/types/defend_submit_event_params.py,sha256=ZZwHoDz0Q2StafJq7pSLS-ivRSr39pzZJeUC6m63Aqc,1516
38
38
  deeprails/types/defend_update_workflow_params.py,sha256=QH2k7EDMLub3mW1lPV5SUoKDHW_T2arSo-RGHLterwo,373
39
- deeprails/types/evaluate_create_params.py,sha256=8P3jDuSgGRfGSdgq50Zq9J709ZltRZw29hI8R3MoS84,1828
40
- deeprails/types/evaluation.py,sha256=cLFQtazZyAEOVGkGqQ_VUmCFE8WMS2pBzUPLQ9MioOA,3787
39
+ deeprails/types/evaluate_create_params.py,sha256=xDR0kfjTC2s5Lajc5i8yLwyjWMXunv_JZ4EstYP6dfo,1986
40
+ deeprails/types/evaluation.py,sha256=NMax-Thd9qXKyoo76XfoByhdYH4Ve4yntMnUzp1okBA,3313
41
41
  deeprails/types/monitor_create_params.py,sha256=kTSj-PhuzpT-HPDTQJemRWfd8w32foUMH9FQZj8symk,384
42
42
  deeprails/types/monitor_retrieve_params.py,sha256=PEsRmbd-81z4pJvhfi4JbrQWNzmeiLkoNsTUoPZ6kFY,352
43
43
  deeprails/types/monitor_retrieve_response.py,sha256=BZp7-6PFVdqYU5ZDhbr1Eao3kU132zTm9idgoaA65Gg,2245
44
- deeprails/types/monitor_submit_event_params.py,sha256=q8Mq24McRr1WdXQ8L1YunKqLBb9-u91J2MLmhUCdaIw,1991
44
+ deeprails/types/monitor_submit_event_params.py,sha256=PQwn2wH8F0uMmVGg7KyMIBbbExtHE5onZrzIXkz3NGs,2149
45
45
  deeprails/types/monitor_submit_event_response.py,sha256=qlraxIJaclNSR_JOizMPj9gOiz-0x7lIChSX3DmFllM,867
46
46
  deeprails/types/monitor_update_params.py,sha256=gJyFFxT_u_iWABknuKnLpPl9r-VPfCcGtOAmh6sPwUw,550
47
47
  deeprails/types/workflow_event_response.py,sha256=mIzOCnYJg4TDSq_tG_0WfA0_Gmc9-0q-befyookfUFM,867
48
- deeprails-1.4.1.dist-info/METADATA,sha256=giQ1H0Pw-j4ONEZOZtDZofJnznTUuipurB-56zvQbRA,11840
49
- deeprails-1.4.1.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
50
- deeprails-1.4.1.dist-info/licenses/LICENSE,sha256=rFTxPcYE516UQLju2SCY1r2pSDDfodL0-ZvxF_fgueg,11339
51
- deeprails-1.4.1.dist-info/RECORD,,
48
+ deeprails-1.6.0.dist-info/METADATA,sha256=RPfU-7XSMjpun0P2zVPe68T2s1m0Z5T4r8FQVhAyk5c,11840
49
+ deeprails-1.6.0.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
50
+ deeprails-1.6.0.dist-info/licenses/LICENSE,sha256=rFTxPcYE516UQLju2SCY1r2pSDDfodL0-ZvxF_fgueg,11339
51
+ deeprails-1.6.0.dist-info/RECORD,,