deeprails 1.8.0__tar.gz → 1.9.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of deeprails might be problematic. Click here for more details.

Files changed (85) hide show
  1. deeprails-1.9.0/.release-please-manifest.json +3 -0
  2. {deeprails-1.8.0 → deeprails-1.9.0}/CHANGELOG.md +8 -0
  3. {deeprails-1.8.0 → deeprails-1.9.0}/PKG-INFO +1 -1
  4. {deeprails-1.8.0 → deeprails-1.9.0}/api.md +5 -5
  5. {deeprails-1.8.0 → deeprails-1.9.0}/pyproject.toml +1 -1
  6. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_version.py +1 -1
  7. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/resources/defend.py +8 -8
  8. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/resources/evaluate.py +2 -2
  9. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/resources/monitor.py +21 -21
  10. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/types/__init__.py +3 -3
  11. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/types/defend_create_workflow_params.py +1 -1
  12. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/types/defend_response.py +1 -1
  13. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/types/defend_submit_event_params.py +4 -3
  14. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/types/evaluate_create_params.py +3 -2
  15. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/types/evaluation.py +3 -2
  16. deeprails-1.8.0/src/deeprails/types/monitor_retrieve_response.py → deeprails-1.9.0/src/deeprails/types/monitor_detail_response.py +4 -17
  17. deeprails-1.8.0/src/deeprails/types/monitor_submit_event_response.py → deeprails-1.9.0/src/deeprails/types/monitor_event_response.py +2 -15
  18. deeprails-1.8.0/src/deeprails/types/api_response.py → deeprails-1.9.0/src/deeprails/types/monitor_response.py +2 -15
  19. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/types/monitor_submit_event_params.py +3 -2
  20. {deeprails-1.8.0 → deeprails-1.9.0}/tests/api_resources/test_defend.py +2 -2
  21. {deeprails-1.8.0 → deeprails-1.9.0}/tests/api_resources/test_monitor.py +35 -35
  22. deeprails-1.8.0/.release-please-manifest.json +0 -3
  23. {deeprails-1.8.0 → deeprails-1.9.0}/.gitignore +0 -0
  24. {deeprails-1.8.0 → deeprails-1.9.0}/CONTRIBUTING.md +0 -0
  25. {deeprails-1.8.0 → deeprails-1.9.0}/LICENSE +0 -0
  26. {deeprails-1.8.0 → deeprails-1.9.0}/README.md +0 -0
  27. {deeprails-1.8.0 → deeprails-1.9.0}/bin/check-release-environment +0 -0
  28. {deeprails-1.8.0 → deeprails-1.9.0}/bin/publish-pypi +0 -0
  29. {deeprails-1.8.0 → deeprails-1.9.0}/examples/.keep +0 -0
  30. {deeprails-1.8.0 → deeprails-1.9.0}/noxfile.py +0 -0
  31. {deeprails-1.8.0 → deeprails-1.9.0}/release-please-config.json +0 -0
  32. {deeprails-1.8.0 → deeprails-1.9.0}/requirements-dev.lock +0 -0
  33. {deeprails-1.8.0 → deeprails-1.9.0}/requirements.lock +0 -0
  34. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/__init__.py +0 -0
  35. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_base_client.py +0 -0
  36. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_client.py +0 -0
  37. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_compat.py +0 -0
  38. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_constants.py +0 -0
  39. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_exceptions.py +0 -0
  40. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_files.py +0 -0
  41. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_models.py +0 -0
  42. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_qs.py +0 -0
  43. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_resource.py +0 -0
  44. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_response.py +0 -0
  45. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_streaming.py +0 -0
  46. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_types.py +0 -0
  47. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_utils/__init__.py +0 -0
  48. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_utils/_compat.py +0 -0
  49. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_utils/_datetime_parse.py +0 -0
  50. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_utils/_logs.py +0 -0
  51. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_utils/_proxy.py +0 -0
  52. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_utils/_reflection.py +0 -0
  53. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_utils/_resources_proxy.py +0 -0
  54. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_utils/_streams.py +0 -0
  55. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_utils/_sync.py +0 -0
  56. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_utils/_transform.py +0 -0
  57. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_utils/_typing.py +0 -0
  58. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/_utils/_utils.py +0 -0
  59. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/lib/.keep +0 -0
  60. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/py.typed +0 -0
  61. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/resources/__init__.py +0 -0
  62. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/types/defend_update_workflow_params.py +0 -0
  63. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/types/monitor_create_params.py +0 -0
  64. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/types/monitor_retrieve_params.py +0 -0
  65. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/types/monitor_update_params.py +0 -0
  66. {deeprails-1.8.0 → deeprails-1.9.0}/src/deeprails/types/workflow_event_response.py +0 -0
  67. {deeprails-1.8.0 → deeprails-1.9.0}/tests/__init__.py +0 -0
  68. {deeprails-1.8.0 → deeprails-1.9.0}/tests/api_resources/__init__.py +0 -0
  69. {deeprails-1.8.0 → deeprails-1.9.0}/tests/api_resources/test_evaluate.py +0 -0
  70. {deeprails-1.8.0 → deeprails-1.9.0}/tests/conftest.py +0 -0
  71. {deeprails-1.8.0 → deeprails-1.9.0}/tests/sample_file.txt +0 -0
  72. {deeprails-1.8.0 → deeprails-1.9.0}/tests/test_client.py +0 -0
  73. {deeprails-1.8.0 → deeprails-1.9.0}/tests/test_deepcopy.py +0 -0
  74. {deeprails-1.8.0 → deeprails-1.9.0}/tests/test_extract_files.py +0 -0
  75. {deeprails-1.8.0 → deeprails-1.9.0}/tests/test_files.py +0 -0
  76. {deeprails-1.8.0 → deeprails-1.9.0}/tests/test_models.py +0 -0
  77. {deeprails-1.8.0 → deeprails-1.9.0}/tests/test_qs.py +0 -0
  78. {deeprails-1.8.0 → deeprails-1.9.0}/tests/test_required_args.py +0 -0
  79. {deeprails-1.8.0 → deeprails-1.9.0}/tests/test_response.py +0 -0
  80. {deeprails-1.8.0 → deeprails-1.9.0}/tests/test_streaming.py +0 -0
  81. {deeprails-1.8.0 → deeprails-1.9.0}/tests/test_transform.py +0 -0
  82. {deeprails-1.8.0 → deeprails-1.9.0}/tests/test_utils/test_datetime_parse.py +0 -0
  83. {deeprails-1.8.0 → deeprails-1.9.0}/tests/test_utils/test_proxy.py +0 -0
  84. {deeprails-1.8.0 → deeprails-1.9.0}/tests/test_utils/test_typing.py +0 -0
  85. {deeprails-1.8.0 → deeprails-1.9.0}/tests/utils.py +0 -0
@@ -0,0 +1,3 @@
1
+ {
2
+ ".": "1.9.0"
3
+ }
@@ -1,5 +1,13 @@
1
1
  # Changelog
2
2
 
3
+ ## 1.9.0 (2025-10-24)
4
+
5
+ Full Changelog: [v1.8.0...v1.9.0](https://github.com/deeprails/deeprails-sdk-python/compare/v1.8.0...v1.9.0)
6
+
7
+ ### Features
8
+
9
+ * **api:** remove apiresponse from monitor ([4971a99](https://github.com/deeprails/deeprails-sdk-python/commit/4971a99c7357bebbc5e86a2d76d2be55bb34f5ae))
10
+
3
11
  ## 1.8.0 (2025-10-22)
4
12
 
5
13
  Full Changelog: [v1.7.0...v1.8.0](https://github.com/deeprails/deeprails-sdk-python/compare/v1.7.0...v1.8.0)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: deeprails
3
- Version: 1.8.0
3
+ Version: 1.9.0
4
4
  Summary: The official Python library for the deeprails API
5
5
  Project-URL: Homepage, https://docs.deeprails.com/
6
6
  Project-URL: Repository, https://github.com/deeprails/deeprails-sdk-python
@@ -19,15 +19,15 @@ Methods:
19
19
  Types:
20
20
 
21
21
  ```python
22
- from deeprails.types import APIResponse, MonitorRetrieveResponse, MonitorSubmitEventResponse
22
+ from deeprails.types import MonitorDetailResponse, MonitorEventResponse, MonitorResponse
23
23
  ```
24
24
 
25
25
  Methods:
26
26
 
27
- - <code title="post /monitor">client.monitor.<a href="./src/deeprails/resources/monitor.py">create</a>(\*\*<a href="src/deeprails/types/monitor_create_params.py">params</a>) -> <a href="./src/deeprails/types/api_response.py">APIResponse</a></code>
28
- - <code title="get /monitor/{monitor_id}">client.monitor.<a href="./src/deeprails/resources/monitor.py">retrieve</a>(monitor_id, \*\*<a href="src/deeprails/types/monitor_retrieve_params.py">params</a>) -> <a href="./src/deeprails/types/monitor_retrieve_response.py">MonitorRetrieveResponse</a></code>
29
- - <code title="put /monitor/{monitor_id}">client.monitor.<a href="./src/deeprails/resources/monitor.py">update</a>(monitor_id, \*\*<a href="src/deeprails/types/monitor_update_params.py">params</a>) -> <a href="./src/deeprails/types/api_response.py">APIResponse</a></code>
30
- - <code title="post /monitor/{monitor_id}/events">client.monitor.<a href="./src/deeprails/resources/monitor.py">submit_event</a>(monitor_id, \*\*<a href="src/deeprails/types/monitor_submit_event_params.py">params</a>) -> <a href="./src/deeprails/types/monitor_submit_event_response.py">MonitorSubmitEventResponse</a></code>
27
+ - <code title="post /monitor">client.monitor.<a href="./src/deeprails/resources/monitor.py">create</a>(\*\*<a href="src/deeprails/types/monitor_create_params.py">params</a>) -> <a href="./src/deeprails/types/monitor_response.py">MonitorResponse</a></code>
28
+ - <code title="get /monitor/{monitor_id}">client.monitor.<a href="./src/deeprails/resources/monitor.py">retrieve</a>(monitor_id, \*\*<a href="src/deeprails/types/monitor_retrieve_params.py">params</a>) -> <a href="./src/deeprails/types/monitor_detail_response.py">MonitorDetailResponse</a></code>
29
+ - <code title="put /monitor/{monitor_id}">client.monitor.<a href="./src/deeprails/resources/monitor.py">update</a>(monitor_id, \*\*<a href="src/deeprails/types/monitor_update_params.py">params</a>) -> <a href="./src/deeprails/types/monitor_response.py">MonitorResponse</a></code>
30
+ - <code title="post /monitor/{monitor_id}/events">client.monitor.<a href="./src/deeprails/resources/monitor.py">submit_event</a>(monitor_id, \*\*<a href="src/deeprails/types/monitor_submit_event_params.py">params</a>) -> <a href="./src/deeprails/types/monitor_event_response.py">MonitorEventResponse</a></code>
31
31
 
32
32
  # Evaluate
33
33
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "deeprails"
3
- version = "1.8.0"
3
+ version = "1.9.0"
4
4
  description = "The official Python library for the deeprails API"
5
5
  dynamic = ["readme"]
6
6
  license = "Apache-2.0"
@@ -1,4 +1,4 @@
1
1
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
2
 
3
3
  __title__ = "deeprails"
4
- __version__ = "1.8.0" # x-release-please-version
4
+ __version__ = "1.9.0" # x-release-please-version
@@ -54,7 +54,7 @@ class DefendResource(SyncAPIResource):
54
54
  automatic_hallucination_tolerance_levels: Dict[str, Literal["low", "medium", "high"]] | Omit = omit,
55
55
  custom_hallucination_threshold_values: Dict[str, float] | Omit = omit,
56
56
  description: str | Omit = omit,
57
- max_improvement_attempt: int | Omit = omit,
57
+ max_improvement_attempts: int | Omit = omit,
58
58
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
59
59
  # The extra values given here take precedence over values defined on the client or passed to this method.
60
60
  extra_headers: Headers | None = None,
@@ -92,7 +92,7 @@ class DefendResource(SyncAPIResource):
92
92
 
93
93
  description: Description for the workflow.
94
94
 
95
- max_improvement_attempt: Max. number of improvement action retries until a given event passes the
95
+ max_improvement_attempts: Max. number of improvement action retries until a given event passes the
96
96
  guardrails. Defaults to 10.
97
97
 
98
98
  extra_headers: Send extra headers
@@ -113,7 +113,7 @@ class DefendResource(SyncAPIResource):
113
113
  "automatic_hallucination_tolerance_levels": automatic_hallucination_tolerance_levels,
114
114
  "custom_hallucination_threshold_values": custom_hallucination_threshold_values,
115
115
  "description": description,
116
- "max_improvement_attempt": max_improvement_attempt,
116
+ "max_improvement_attempts": max_improvement_attempts,
117
117
  },
118
118
  defend_create_workflow_params.DefendCreateWorkflowParams,
119
119
  ),
@@ -214,7 +214,7 @@ class DefendResource(SyncAPIResource):
214
214
 
215
215
  Args:
216
216
  model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
217
- contain at least `user_prompt` or `system_prompt` field. For the
217
+ contain at least a `user_prompt` field or a `system_prompt` field. For the
218
218
  ground_truth_adherence guardrail metric, `ground_truth` should be provided.
219
219
 
220
220
  model_output: Output generated by the LLM to be evaluated.
@@ -332,7 +332,7 @@ class AsyncDefendResource(AsyncAPIResource):
332
332
  automatic_hallucination_tolerance_levels: Dict[str, Literal["low", "medium", "high"]] | Omit = omit,
333
333
  custom_hallucination_threshold_values: Dict[str, float] | Omit = omit,
334
334
  description: str | Omit = omit,
335
- max_improvement_attempt: int | Omit = omit,
335
+ max_improvement_attempts: int | Omit = omit,
336
336
  # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
337
337
  # The extra values given here take precedence over values defined on the client or passed to this method.
338
338
  extra_headers: Headers | None = None,
@@ -370,7 +370,7 @@ class AsyncDefendResource(AsyncAPIResource):
370
370
 
371
371
  description: Description for the workflow.
372
372
 
373
- max_improvement_attempt: Max. number of improvement action retries until a given event passes the
373
+ max_improvement_attempts: Max. number of improvement action retries until a given event passes the
374
374
  guardrails. Defaults to 10.
375
375
 
376
376
  extra_headers: Send extra headers
@@ -391,7 +391,7 @@ class AsyncDefendResource(AsyncAPIResource):
391
391
  "automatic_hallucination_tolerance_levels": automatic_hallucination_tolerance_levels,
392
392
  "custom_hallucination_threshold_values": custom_hallucination_threshold_values,
393
393
  "description": description,
394
- "max_improvement_attempt": max_improvement_attempt,
394
+ "max_improvement_attempts": max_improvement_attempts,
395
395
  },
396
396
  defend_create_workflow_params.DefendCreateWorkflowParams,
397
397
  ),
@@ -492,7 +492,7 @@ class AsyncDefendResource(AsyncAPIResource):
492
492
 
493
493
  Args:
494
494
  model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
495
- contain at least `user_prompt` or `system_prompt` field. For the
495
+ contain at least a `user_prompt` field or a `system_prompt` field. For the
496
496
  ground_truth_adherence guardrail metric, `ground_truth` should be provided.
497
497
 
498
498
  model_output: Output generated by the LLM to be evaluated.
@@ -76,7 +76,7 @@ class EvaluateResource(SyncAPIResource):
76
76
 
77
77
  Args:
78
78
  model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
79
- contain at least `user_prompt` or `system_prompt` field. For
79
+ contain at least a `user_prompt` field or a `system_prompt` field. For
80
80
  ground_truth_adherence guardrail metric, `ground_truth` should be provided.
81
81
 
82
82
  model_output: Output generated by the LLM to be evaluated.
@@ -207,7 +207,7 @@ class AsyncEvaluateResource(AsyncAPIResource):
207
207
 
208
208
  Args:
209
209
  model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
210
- contain at least `user_prompt` or `system_prompt` field. For
210
+ contain at least a `user_prompt` field or a `system_prompt` field. For
211
211
  ground_truth_adherence guardrail metric, `ground_truth` should be provided.
212
212
 
213
213
  model_output: Output generated by the LLM to be evaluated.
@@ -19,9 +19,9 @@ from .._response import (
19
19
  async_to_streamed_response_wrapper,
20
20
  )
21
21
  from .._base_client import make_request_options
22
- from ..types.api_response import APIResponse
23
- from ..types.monitor_retrieve_response import MonitorRetrieveResponse
24
- from ..types.monitor_submit_event_response import MonitorSubmitEventResponse
22
+ from ..types.monitor_response import MonitorResponse
23
+ from ..types.monitor_event_response import MonitorEventResponse
24
+ from ..types.monitor_detail_response import MonitorDetailResponse
25
25
 
26
26
  __all__ = ["MonitorResource", "AsyncMonitorResource"]
27
27
 
@@ -57,7 +57,7 @@ class MonitorResource(SyncAPIResource):
57
57
  extra_query: Query | None = None,
58
58
  extra_body: Body | None = None,
59
59
  timeout: float | httpx.Timeout | None | NotGiven = not_given,
60
- ) -> APIResponse:
60
+ ) -> MonitorResponse:
61
61
  """
62
62
  Use this endpoint to create a new monitor to evaluate model inputs and outputs
63
63
  using guardrails
@@ -87,7 +87,7 @@ class MonitorResource(SyncAPIResource):
87
87
  options=make_request_options(
88
88
  extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
89
89
  ),
90
- cast_to=APIResponse,
90
+ cast_to=MonitorResponse,
91
91
  )
92
92
 
93
93
  def retrieve(
@@ -101,7 +101,7 @@ class MonitorResource(SyncAPIResource):
101
101
  extra_query: Query | None = None,
102
102
  extra_body: Body | None = None,
103
103
  timeout: float | httpx.Timeout | None | NotGiven = not_given,
104
- ) -> MonitorRetrieveResponse:
104
+ ) -> MonitorDetailResponse:
105
105
  """
106
106
  Use this endpoint to retrieve the details and evaluations associated with a
107
107
  specific monitor
@@ -128,7 +128,7 @@ class MonitorResource(SyncAPIResource):
128
128
  timeout=timeout,
129
129
  query=maybe_transform({"limit": limit}, monitor_retrieve_params.MonitorRetrieveParams),
130
130
  ),
131
- cast_to=MonitorRetrieveResponse,
131
+ cast_to=MonitorDetailResponse,
132
132
  )
133
133
 
134
134
  def update(
@@ -144,7 +144,7 @@ class MonitorResource(SyncAPIResource):
144
144
  extra_query: Query | None = None,
145
145
  extra_body: Body | None = None,
146
146
  timeout: float | httpx.Timeout | None | NotGiven = not_given,
147
- ) -> APIResponse:
147
+ ) -> MonitorResponse:
148
148
  """
149
149
  Use this endpoint to update the name, description, or status of an existing
150
150
  monitor
@@ -180,7 +180,7 @@ class MonitorResource(SyncAPIResource):
180
180
  options=make_request_options(
181
181
  extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
182
182
  ),
183
- cast_to=APIResponse,
183
+ cast_to=MonitorResponse,
184
184
  )
185
185
 
186
186
  def submit_event(
@@ -208,7 +208,7 @@ class MonitorResource(SyncAPIResource):
208
208
  extra_query: Query | None = None,
209
209
  extra_body: Body | None = None,
210
210
  timeout: float | httpx.Timeout | None | NotGiven = not_given,
211
- ) -> MonitorSubmitEventResponse:
211
+ ) -> MonitorEventResponse:
212
212
  """
213
213
  Use this endpoint to submit a model input and output pair to a monitor for
214
214
  evaluation
@@ -220,7 +220,7 @@ class MonitorResource(SyncAPIResource):
220
220
  `ground_truth_adherence`, and/or `comprehensive_safety`.
221
221
 
222
222
  model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
223
- contain at least a `user_prompt` or `system_prompt` field. For
223
+ contain at least a `user_prompt` field or a `system_prompt` field. For
224
224
  ground_truth_adherence guardrail metric, `ground_truth` should be provided.
225
225
 
226
226
  model_output: Output generated by the LLM to be evaluated.
@@ -260,7 +260,7 @@ class MonitorResource(SyncAPIResource):
260
260
  options=make_request_options(
261
261
  extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
262
262
  ),
263
- cast_to=MonitorSubmitEventResponse,
263
+ cast_to=MonitorEventResponse,
264
264
  )
265
265
 
266
266
 
@@ -295,7 +295,7 @@ class AsyncMonitorResource(AsyncAPIResource):
295
295
  extra_query: Query | None = None,
296
296
  extra_body: Body | None = None,
297
297
  timeout: float | httpx.Timeout | None | NotGiven = not_given,
298
- ) -> APIResponse:
298
+ ) -> MonitorResponse:
299
299
  """
300
300
  Use this endpoint to create a new monitor to evaluate model inputs and outputs
301
301
  using guardrails
@@ -325,7 +325,7 @@ class AsyncMonitorResource(AsyncAPIResource):
325
325
  options=make_request_options(
326
326
  extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
327
327
  ),
328
- cast_to=APIResponse,
328
+ cast_to=MonitorResponse,
329
329
  )
330
330
 
331
331
  async def retrieve(
@@ -339,7 +339,7 @@ class AsyncMonitorResource(AsyncAPIResource):
339
339
  extra_query: Query | None = None,
340
340
  extra_body: Body | None = None,
341
341
  timeout: float | httpx.Timeout | None | NotGiven = not_given,
342
- ) -> MonitorRetrieveResponse:
342
+ ) -> MonitorDetailResponse:
343
343
  """
344
344
  Use this endpoint to retrieve the details and evaluations associated with a
345
345
  specific monitor
@@ -366,7 +366,7 @@ class AsyncMonitorResource(AsyncAPIResource):
366
366
  timeout=timeout,
367
367
  query=await async_maybe_transform({"limit": limit}, monitor_retrieve_params.MonitorRetrieveParams),
368
368
  ),
369
- cast_to=MonitorRetrieveResponse,
369
+ cast_to=MonitorDetailResponse,
370
370
  )
371
371
 
372
372
  async def update(
@@ -382,7 +382,7 @@ class AsyncMonitorResource(AsyncAPIResource):
382
382
  extra_query: Query | None = None,
383
383
  extra_body: Body | None = None,
384
384
  timeout: float | httpx.Timeout | None | NotGiven = not_given,
385
- ) -> APIResponse:
385
+ ) -> MonitorResponse:
386
386
  """
387
387
  Use this endpoint to update the name, description, or status of an existing
388
388
  monitor
@@ -418,7 +418,7 @@ class AsyncMonitorResource(AsyncAPIResource):
418
418
  options=make_request_options(
419
419
  extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
420
420
  ),
421
- cast_to=APIResponse,
421
+ cast_to=MonitorResponse,
422
422
  )
423
423
 
424
424
  async def submit_event(
@@ -446,7 +446,7 @@ class AsyncMonitorResource(AsyncAPIResource):
446
446
  extra_query: Query | None = None,
447
447
  extra_body: Body | None = None,
448
448
  timeout: float | httpx.Timeout | None | NotGiven = not_given,
449
- ) -> MonitorSubmitEventResponse:
449
+ ) -> MonitorEventResponse:
450
450
  """
451
451
  Use this endpoint to submit a model input and output pair to a monitor for
452
452
  evaluation
@@ -458,7 +458,7 @@ class AsyncMonitorResource(AsyncAPIResource):
458
458
  `ground_truth_adherence`, and/or `comprehensive_safety`.
459
459
 
460
460
  model_input: A dictionary of inputs sent to the LLM to generate output. The dictionary must
461
- contain at least a `user_prompt` or `system_prompt` field. For
461
+ contain at least a `user_prompt` field or a `system_prompt` field. For
462
462
  ground_truth_adherence guardrail metric, `ground_truth` should be provided.
463
463
 
464
464
  model_output: Output generated by the LLM to be evaluated.
@@ -498,7 +498,7 @@ class AsyncMonitorResource(AsyncAPIResource):
498
498
  options=make_request_options(
499
499
  extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
500
500
  ),
501
- cast_to=MonitorSubmitEventResponse,
501
+ cast_to=MonitorEventResponse,
502
502
  )
503
503
 
504
504
 
@@ -3,16 +3,16 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  from .evaluation import Evaluation as Evaluation
6
- from .api_response import APIResponse as APIResponse
7
6
  from .defend_response import DefendResponse as DefendResponse
7
+ from .monitor_response import MonitorResponse as MonitorResponse
8
8
  from .monitor_create_params import MonitorCreateParams as MonitorCreateParams
9
9
  from .monitor_update_params import MonitorUpdateParams as MonitorUpdateParams
10
10
  from .evaluate_create_params import EvaluateCreateParams as EvaluateCreateParams
11
+ from .monitor_event_response import MonitorEventResponse as MonitorEventResponse
12
+ from .monitor_detail_response import MonitorDetailResponse as MonitorDetailResponse
11
13
  from .monitor_retrieve_params import MonitorRetrieveParams as MonitorRetrieveParams
12
14
  from .workflow_event_response import WorkflowEventResponse as WorkflowEventResponse
13
- from .monitor_retrieve_response import MonitorRetrieveResponse as MonitorRetrieveResponse
14
15
  from .defend_submit_event_params import DefendSubmitEventParams as DefendSubmitEventParams
15
16
  from .monitor_submit_event_params import MonitorSubmitEventParams as MonitorSubmitEventParams
16
17
  from .defend_create_workflow_params import DefendCreateWorkflowParams as DefendCreateWorkflowParams
17
18
  from .defend_update_workflow_params import DefendUpdateWorkflowParams as DefendUpdateWorkflowParams
18
- from .monitor_submit_event_response import MonitorSubmitEventResponse as MonitorSubmitEventResponse
@@ -48,7 +48,7 @@ class DefendCreateWorkflowParams(TypedDict, total=False):
48
48
  description: str
49
49
  """Description for the workflow."""
50
50
 
51
- max_improvement_attempt: int
51
+ max_improvement_attempts: int
52
52
  """Max.
53
53
 
54
54
  number of improvement action retries until a given event passes the guardrails.
@@ -31,7 +31,7 @@ class DefendResponse(BaseModel):
31
31
  Nothing does not attempt any improvement.
32
32
  """
33
33
 
34
- max_improvement_attempt: Optional[int] = None
34
+ max_improvement_attempts: Optional[int] = None
35
35
  """Max.
36
36
 
37
37
  number of improvement action retries until a given event passes the guardrails.
@@ -11,8 +11,9 @@ class DefendSubmitEventParams(TypedDict, total=False):
11
11
  model_input: Required[ModelInput]
12
12
  """A dictionary of inputs sent to the LLM to generate output.
13
13
 
14
- The dictionary must contain at least `user_prompt` or `system_prompt` field. For
15
- the ground_truth_adherence guardrail metric, `ground_truth` should be provided.
14
+ The dictionary must contain at least a `user_prompt` field or a `system_prompt`
15
+ field. For the ground_truth_adherence guardrail metric, `ground_truth` should be
16
+ provided.
16
17
  """
17
18
 
18
19
  model_output: Required[str]
@@ -36,7 +37,7 @@ class DefendSubmitEventParams(TypedDict, total=False):
36
37
 
37
38
  class ModelInput(TypedDict, total=False):
38
39
  ground_truth: str
39
- """The ground truth for evaluating Ground Truth Adherence guardrail."""
40
+ """The ground truth for evaluating the Ground Truth Adherence guardrail."""
40
41
 
41
42
  system_prompt: str
42
43
  """The system prompt used to generate the output."""
@@ -12,8 +12,9 @@ class EvaluateCreateParams(TypedDict, total=False):
12
12
  model_input: Required[ModelInput]
13
13
  """A dictionary of inputs sent to the LLM to generate output.
14
14
 
15
- The dictionary must contain at least `user_prompt` or `system_prompt` field. For
16
- ground_truth_adherence guardrail metric, `ground_truth` should be provided.
15
+ The dictionary must contain at least a `user_prompt` field or a `system_prompt`
16
+ field. For ground_truth_adherence guardrail metric, `ground_truth` should be
17
+ provided.
17
18
  """
18
19
 
19
20
  model_output: Required[str]
@@ -32,8 +32,9 @@ class Evaluation(BaseModel):
32
32
  api_model_input: ModelInput = FieldInfo(alias="model_input")
33
33
  """A dictionary of inputs sent to the LLM to generate output.
34
34
 
35
- The dictionary must contain at least `user_prompt` or `system_prompt` field. For
36
- ground_truth_adherence guardrail metric, `ground_truth` should be provided.
35
+ The dictionary must contain at least a `user_prompt` field or a `system_prompt`
36
+ field. For ground_truth_adherence guardrail metric, `ground_truth` should be
37
+ provided.
37
38
  """
38
39
 
39
40
  api_model_output: str = FieldInfo(alias="model_output")
@@ -7,10 +7,10 @@ from typing_extensions import Literal
7
7
  from .._models import BaseModel
8
8
  from .evaluation import Evaluation
9
9
 
10
- __all__ = ["MonitorRetrieveResponse", "Data", "DataStats"]
10
+ __all__ = ["MonitorDetailResponse", "Stats"]
11
11
 
12
12
 
13
- class DataStats(BaseModel):
13
+ class Stats(BaseModel):
14
14
  completed_evaluations: Optional[int] = None
15
15
  """Number of evaluations that completed successfully."""
16
16
 
@@ -27,7 +27,7 @@ class DataStats(BaseModel):
27
27
  """Total number of evaluations performed by this monitor."""
28
28
 
29
29
 
30
- class Data(BaseModel):
30
+ class MonitorDetailResponse(BaseModel):
31
31
  monitor_id: str
32
32
  """A unique monitor ID."""
33
33
 
@@ -53,7 +53,7 @@ class Data(BaseModel):
53
53
  Each one corresponds to a separate monitor event.
54
54
  """
55
55
 
56
- stats: Optional[DataStats] = None
56
+ stats: Optional[Stats] = None
57
57
  """
58
58
  Contains five fields used for stats of this monitor: total evaluations,
59
59
  completed evaluations, failed evaluations, queued evaluations, and in progress
@@ -65,16 +65,3 @@ class Data(BaseModel):
65
65
 
66
66
  user_id: Optional[str] = None
67
67
  """User ID of the user who created the monitor."""
68
-
69
-
70
- class MonitorRetrieveResponse(BaseModel):
71
- success: bool
72
- """Represents whether the request was completed successfully."""
73
-
74
- data: Optional[Data] = None
75
-
76
- message: Optional[str] = None
77
- """The accompanying message for the request.
78
-
79
- Includes error details when applicable.
80
- """
@@ -5,10 +5,10 @@ from datetime import datetime
5
5
 
6
6
  from .._models import BaseModel
7
7
 
8
- __all__ = ["MonitorSubmitEventResponse", "Data"]
8
+ __all__ = ["MonitorEventResponse"]
9
9
 
10
10
 
11
- class Data(BaseModel):
11
+ class MonitorEventResponse(BaseModel):
12
12
  evaluation_id: str
13
13
  """A unique evaluation ID associated with this event."""
14
14
 
@@ -20,16 +20,3 @@ class Data(BaseModel):
20
20
 
21
21
  created_at: Optional[datetime] = None
22
22
  """The time the monitor event was created in UTC."""
23
-
24
-
25
- class MonitorSubmitEventResponse(BaseModel):
26
- success: bool
27
- """Represents whether the request was completed successfully."""
28
-
29
- data: Optional[Data] = None
30
-
31
- message: Optional[str] = None
32
- """The accompanying message for the request.
33
-
34
- Includes error details when applicable.
35
- """
@@ -6,10 +6,10 @@ from typing_extensions import Literal
6
6
 
7
7
  from .._models import BaseModel
8
8
 
9
- __all__ = ["APIResponse", "Data"]
9
+ __all__ = ["MonitorResponse"]
10
10
 
11
11
 
12
- class Data(BaseModel):
12
+ class MonitorResponse(BaseModel):
13
13
  monitor_id: str
14
14
  """A unique monitor ID."""
15
15
 
@@ -34,16 +34,3 @@ class Data(BaseModel):
34
34
 
35
35
  user_id: Optional[str] = None
36
36
  """User ID of the user who created the monitor."""
37
-
38
-
39
- class APIResponse(BaseModel):
40
- success: bool
41
- """Represents whether the request was completed successfully."""
42
-
43
- data: Optional[Data] = None
44
-
45
- message: Optional[str] = None
46
- """The accompanying message for the request.
47
-
48
- Includes error details when applicable.
49
- """
@@ -31,8 +31,9 @@ class MonitorSubmitEventParams(TypedDict, total=False):
31
31
  model_input: Required[ModelInput]
32
32
  """A dictionary of inputs sent to the LLM to generate output.
33
33
 
34
- The dictionary must contain at least a `user_prompt` or `system_prompt` field.
35
- For ground_truth_adherence guardrail metric, `ground_truth` should be provided.
34
+ The dictionary must contain at least a `user_prompt` field or a `system_prompt`
35
+ field. For ground_truth_adherence guardrail metric, `ground_truth` should be
36
+ provided.
36
37
  """
37
38
 
38
39
  model_output: Required[str]
@@ -40,7 +40,7 @@ class TestDefend:
40
40
  type="automatic",
41
41
  automatic_hallucination_tolerance_levels={"correctness": "low"},
42
42
  description="description",
43
- max_improvement_attempt=2,
43
+ max_improvement_attempts=2,
44
44
  )
45
45
  assert_matches_type(DefendResponse, defend, path=["response"])
46
46
 
@@ -323,7 +323,7 @@ class TestAsyncDefend:
323
323
  type="automatic",
324
324
  automatic_hallucination_tolerance_levels={"correctness": "low"},
325
325
  description="description",
326
- max_improvement_attempt=2,
326
+ max_improvement_attempts=2,
327
327
  )
328
328
  assert_matches_type(DefendResponse, defend, path=["response"])
329
329
 
@@ -10,9 +10,9 @@ import pytest
10
10
  from deeprails import Deeprails, AsyncDeeprails
11
11
  from tests.utils import assert_matches_type
12
12
  from deeprails.types import (
13
- APIResponse,
14
- MonitorRetrieveResponse,
15
- MonitorSubmitEventResponse,
13
+ MonitorResponse,
14
+ MonitorEventResponse,
15
+ MonitorDetailResponse,
16
16
  )
17
17
 
18
18
  base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -27,7 +27,7 @@ class TestMonitor:
27
27
  monitor = client.monitor.create(
28
28
  name="name",
29
29
  )
30
- assert_matches_type(APIResponse, monitor, path=["response"])
30
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
31
31
 
32
32
  @pytest.mark.skip(reason="Prism tests are disabled")
33
33
  @parametrize
@@ -36,7 +36,7 @@ class TestMonitor:
36
36
  name="name",
37
37
  description="description",
38
38
  )
39
- assert_matches_type(APIResponse, monitor, path=["response"])
39
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
40
40
 
41
41
  @pytest.mark.skip(reason="Prism tests are disabled")
42
42
  @parametrize
@@ -48,7 +48,7 @@ class TestMonitor:
48
48
  assert response.is_closed is True
49
49
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
50
50
  monitor = response.parse()
51
- assert_matches_type(APIResponse, monitor, path=["response"])
51
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
52
52
 
53
53
  @pytest.mark.skip(reason="Prism tests are disabled")
54
54
  @parametrize
@@ -60,7 +60,7 @@ class TestMonitor:
60
60
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
61
61
 
62
62
  monitor = response.parse()
63
- assert_matches_type(APIResponse, monitor, path=["response"])
63
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
64
64
 
65
65
  assert cast(Any, response.is_closed) is True
66
66
 
@@ -70,7 +70,7 @@ class TestMonitor:
70
70
  monitor = client.monitor.retrieve(
71
71
  monitor_id="monitor_id",
72
72
  )
73
- assert_matches_type(MonitorRetrieveResponse, monitor, path=["response"])
73
+ assert_matches_type(MonitorDetailResponse, monitor, path=["response"])
74
74
 
75
75
  @pytest.mark.skip(reason="Prism tests are disabled")
76
76
  @parametrize
@@ -79,7 +79,7 @@ class TestMonitor:
79
79
  monitor_id="monitor_id",
80
80
  limit=0,
81
81
  )
82
- assert_matches_type(MonitorRetrieveResponse, monitor, path=["response"])
82
+ assert_matches_type(MonitorDetailResponse, monitor, path=["response"])
83
83
 
84
84
  @pytest.mark.skip(reason="Prism tests are disabled")
85
85
  @parametrize
@@ -91,7 +91,7 @@ class TestMonitor:
91
91
  assert response.is_closed is True
92
92
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
93
93
  monitor = response.parse()
94
- assert_matches_type(MonitorRetrieveResponse, monitor, path=["response"])
94
+ assert_matches_type(MonitorDetailResponse, monitor, path=["response"])
95
95
 
96
96
  @pytest.mark.skip(reason="Prism tests are disabled")
97
97
  @parametrize
@@ -103,7 +103,7 @@ class TestMonitor:
103
103
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
104
104
 
105
105
  monitor = response.parse()
106
- assert_matches_type(MonitorRetrieveResponse, monitor, path=["response"])
106
+ assert_matches_type(MonitorDetailResponse, monitor, path=["response"])
107
107
 
108
108
  assert cast(Any, response.is_closed) is True
109
109
 
@@ -121,7 +121,7 @@ class TestMonitor:
121
121
  monitor = client.monitor.update(
122
122
  monitor_id="monitor_id",
123
123
  )
124
- assert_matches_type(APIResponse, monitor, path=["response"])
124
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
125
125
 
126
126
  @pytest.mark.skip(reason="Prism tests are disabled")
127
127
  @parametrize
@@ -132,7 +132,7 @@ class TestMonitor:
132
132
  monitor_status="active",
133
133
  name="name",
134
134
  )
135
- assert_matches_type(APIResponse, monitor, path=["response"])
135
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
136
136
 
137
137
  @pytest.mark.skip(reason="Prism tests are disabled")
138
138
  @parametrize
@@ -144,7 +144,7 @@ class TestMonitor:
144
144
  assert response.is_closed is True
145
145
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
146
146
  monitor = response.parse()
147
- assert_matches_type(APIResponse, monitor, path=["response"])
147
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
148
148
 
149
149
  @pytest.mark.skip(reason="Prism tests are disabled")
150
150
  @parametrize
@@ -156,7 +156,7 @@ class TestMonitor:
156
156
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
157
157
 
158
158
  monitor = response.parse()
159
- assert_matches_type(APIResponse, monitor, path=["response"])
159
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
160
160
 
161
161
  assert cast(Any, response.is_closed) is True
162
162
 
@@ -177,7 +177,7 @@ class TestMonitor:
177
177
  model_input={},
178
178
  model_output="model_output",
179
179
  )
180
- assert_matches_type(MonitorSubmitEventResponse, monitor, path=["response"])
180
+ assert_matches_type(MonitorEventResponse, monitor, path=["response"])
181
181
 
182
182
  @pytest.mark.skip(reason="Prism tests are disabled")
183
183
  @parametrize
@@ -195,7 +195,7 @@ class TestMonitor:
195
195
  nametag="nametag",
196
196
  run_mode="precision_plus",
197
197
  )
198
- assert_matches_type(MonitorSubmitEventResponse, monitor, path=["response"])
198
+ assert_matches_type(MonitorEventResponse, monitor, path=["response"])
199
199
 
200
200
  @pytest.mark.skip(reason="Prism tests are disabled")
201
201
  @parametrize
@@ -210,7 +210,7 @@ class TestMonitor:
210
210
  assert response.is_closed is True
211
211
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
212
212
  monitor = response.parse()
213
- assert_matches_type(MonitorSubmitEventResponse, monitor, path=["response"])
213
+ assert_matches_type(MonitorEventResponse, monitor, path=["response"])
214
214
 
215
215
  @pytest.mark.skip(reason="Prism tests are disabled")
216
216
  @parametrize
@@ -225,7 +225,7 @@ class TestMonitor:
225
225
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
226
226
 
227
227
  monitor = response.parse()
228
- assert_matches_type(MonitorSubmitEventResponse, monitor, path=["response"])
228
+ assert_matches_type(MonitorEventResponse, monitor, path=["response"])
229
229
 
230
230
  assert cast(Any, response.is_closed) is True
231
231
 
@@ -252,7 +252,7 @@ class TestAsyncMonitor:
252
252
  monitor = await async_client.monitor.create(
253
253
  name="name",
254
254
  )
255
- assert_matches_type(APIResponse, monitor, path=["response"])
255
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
256
256
 
257
257
  @pytest.mark.skip(reason="Prism tests are disabled")
258
258
  @parametrize
@@ -261,7 +261,7 @@ class TestAsyncMonitor:
261
261
  name="name",
262
262
  description="description",
263
263
  )
264
- assert_matches_type(APIResponse, monitor, path=["response"])
264
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
265
265
 
266
266
  @pytest.mark.skip(reason="Prism tests are disabled")
267
267
  @parametrize
@@ -273,7 +273,7 @@ class TestAsyncMonitor:
273
273
  assert response.is_closed is True
274
274
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
275
275
  monitor = await response.parse()
276
- assert_matches_type(APIResponse, monitor, path=["response"])
276
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
277
277
 
278
278
  @pytest.mark.skip(reason="Prism tests are disabled")
279
279
  @parametrize
@@ -285,7 +285,7 @@ class TestAsyncMonitor:
285
285
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
286
286
 
287
287
  monitor = await response.parse()
288
- assert_matches_type(APIResponse, monitor, path=["response"])
288
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
289
289
 
290
290
  assert cast(Any, response.is_closed) is True
291
291
 
@@ -295,7 +295,7 @@ class TestAsyncMonitor:
295
295
  monitor = await async_client.monitor.retrieve(
296
296
  monitor_id="monitor_id",
297
297
  )
298
- assert_matches_type(MonitorRetrieveResponse, monitor, path=["response"])
298
+ assert_matches_type(MonitorDetailResponse, monitor, path=["response"])
299
299
 
300
300
  @pytest.mark.skip(reason="Prism tests are disabled")
301
301
  @parametrize
@@ -304,7 +304,7 @@ class TestAsyncMonitor:
304
304
  monitor_id="monitor_id",
305
305
  limit=0,
306
306
  )
307
- assert_matches_type(MonitorRetrieveResponse, monitor, path=["response"])
307
+ assert_matches_type(MonitorDetailResponse, monitor, path=["response"])
308
308
 
309
309
  @pytest.mark.skip(reason="Prism tests are disabled")
310
310
  @parametrize
@@ -316,7 +316,7 @@ class TestAsyncMonitor:
316
316
  assert response.is_closed is True
317
317
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
318
318
  monitor = await response.parse()
319
- assert_matches_type(MonitorRetrieveResponse, monitor, path=["response"])
319
+ assert_matches_type(MonitorDetailResponse, monitor, path=["response"])
320
320
 
321
321
  @pytest.mark.skip(reason="Prism tests are disabled")
322
322
  @parametrize
@@ -328,7 +328,7 @@ class TestAsyncMonitor:
328
328
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
329
329
 
330
330
  monitor = await response.parse()
331
- assert_matches_type(MonitorRetrieveResponse, monitor, path=["response"])
331
+ assert_matches_type(MonitorDetailResponse, monitor, path=["response"])
332
332
 
333
333
  assert cast(Any, response.is_closed) is True
334
334
 
@@ -346,7 +346,7 @@ class TestAsyncMonitor:
346
346
  monitor = await async_client.monitor.update(
347
347
  monitor_id="monitor_id",
348
348
  )
349
- assert_matches_type(APIResponse, monitor, path=["response"])
349
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
350
350
 
351
351
  @pytest.mark.skip(reason="Prism tests are disabled")
352
352
  @parametrize
@@ -357,7 +357,7 @@ class TestAsyncMonitor:
357
357
  monitor_status="active",
358
358
  name="name",
359
359
  )
360
- assert_matches_type(APIResponse, monitor, path=["response"])
360
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
361
361
 
362
362
  @pytest.mark.skip(reason="Prism tests are disabled")
363
363
  @parametrize
@@ -369,7 +369,7 @@ class TestAsyncMonitor:
369
369
  assert response.is_closed is True
370
370
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
371
371
  monitor = await response.parse()
372
- assert_matches_type(APIResponse, monitor, path=["response"])
372
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
373
373
 
374
374
  @pytest.mark.skip(reason="Prism tests are disabled")
375
375
  @parametrize
@@ -381,7 +381,7 @@ class TestAsyncMonitor:
381
381
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
382
382
 
383
383
  monitor = await response.parse()
384
- assert_matches_type(APIResponse, monitor, path=["response"])
384
+ assert_matches_type(MonitorResponse, monitor, path=["response"])
385
385
 
386
386
  assert cast(Any, response.is_closed) is True
387
387
 
@@ -402,7 +402,7 @@ class TestAsyncMonitor:
402
402
  model_input={},
403
403
  model_output="model_output",
404
404
  )
405
- assert_matches_type(MonitorSubmitEventResponse, monitor, path=["response"])
405
+ assert_matches_type(MonitorEventResponse, monitor, path=["response"])
406
406
 
407
407
  @pytest.mark.skip(reason="Prism tests are disabled")
408
408
  @parametrize
@@ -420,7 +420,7 @@ class TestAsyncMonitor:
420
420
  nametag="nametag",
421
421
  run_mode="precision_plus",
422
422
  )
423
- assert_matches_type(MonitorSubmitEventResponse, monitor, path=["response"])
423
+ assert_matches_type(MonitorEventResponse, monitor, path=["response"])
424
424
 
425
425
  @pytest.mark.skip(reason="Prism tests are disabled")
426
426
  @parametrize
@@ -435,7 +435,7 @@ class TestAsyncMonitor:
435
435
  assert response.is_closed is True
436
436
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
437
437
  monitor = await response.parse()
438
- assert_matches_type(MonitorSubmitEventResponse, monitor, path=["response"])
438
+ assert_matches_type(MonitorEventResponse, monitor, path=["response"])
439
439
 
440
440
  @pytest.mark.skip(reason="Prism tests are disabled")
441
441
  @parametrize
@@ -450,7 +450,7 @@ class TestAsyncMonitor:
450
450
  assert response.http_request.headers.get("X-Stainless-Lang") == "python"
451
451
 
452
452
  monitor = await response.parse()
453
- assert_matches_type(MonitorSubmitEventResponse, monitor, path=["response"])
453
+ assert_matches_type(MonitorEventResponse, monitor, path=["response"])
454
454
 
455
455
  assert cast(Any, response.is_closed) is True
456
456
 
@@ -1,3 +0,0 @@
1
- {
2
- ".": "1.8.0"
3
- }
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes