vellum-ai 0.10.6__py3-none-any.whl → 0.10.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. vellum/__init__.py +2 -0
  2. vellum/client/README.md +7 -52
  3. vellum/client/__init__.py +16 -136
  4. vellum/client/core/client_wrapper.py +1 -1
  5. vellum/client/resources/ad_hoc/client.py +14 -104
  6. vellum/client/resources/metric_definitions/client.py +113 -0
  7. vellum/client/resources/test_suites/client.py +8 -16
  8. vellum/client/resources/workflows/client.py +0 -32
  9. vellum/client/types/__init__.py +2 -0
  10. vellum/client/types/metric_definition_history_item.py +39 -0
  11. vellum/types/metric_definition_history_item.py +3 -0
  12. vellum/workflows/events/node.py +36 -3
  13. vellum/workflows/events/tests/test_event.py +89 -9
  14. vellum/workflows/nodes/displayable/conditional_node/node.py +2 -2
  15. vellum/workflows/ports/node_ports.py +2 -2
  16. vellum/workflows/ports/port.py +14 -0
  17. vellum/workflows/references/__init__.py +2 -0
  18. vellum/workflows/runner/runner.py +6 -7
  19. vellum/workflows/runner/types.py +1 -3
  20. vellum/workflows/state/encoder.py +2 -1
  21. vellum/workflows/types/tests/test_utils.py +6 -3
  22. vellum/workflows/types/utils.py +3 -0
  23. {vellum_ai-0.10.6.dist-info → vellum_ai-0.10.7.dist-info}/METADATA +1 -1
  24. {vellum_ai-0.10.6.dist-info → vellum_ai-0.10.7.dist-info}/RECORD +32 -30
  25. vellum_ee/workflows/display/nodes/vellum/final_output_node.py +4 -2
  26. vellum_ee/workflows/display/nodes/vellum/map_node.py +20 -48
  27. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +5 -16
  28. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +11 -8
  29. vellum_ee/workflows/display/utils/vellum.py +3 -2
  30. {vellum_ai-0.10.6.dist-info → vellum_ai-0.10.7.dist-info}/LICENSE +0 -0
  31. {vellum_ai-0.10.6.dist-info → vellum_ai-0.10.7.dist-info}/WHEEL +0 -0
  32. {vellum_ai-0.10.6.dist-info → vellum_ai-0.10.7.dist-info}/entry_points.txt +0 -0
vellum/__init__.py CHANGED
@@ -186,6 +186,7 @@ from .types import (
186
186
  MetadataFilterRuleRequest,
187
187
  MetadataFiltersRequest,
188
188
  MetricDefinitionExecution,
189
+ MetricDefinitionHistoryItem,
189
190
  MetricDefinitionInput,
190
191
  MetricNodeResult,
191
192
  MlModelRead,
@@ -708,6 +709,7 @@ __all__ = [
708
709
  "MetadataFilterRuleRequest",
709
710
  "MetadataFiltersRequest",
710
711
  "MetricDefinitionExecution",
712
+ "MetricDefinitionHistoryItem",
711
713
  "MetricDefinitionInput",
712
714
  "MetricNodeResult",
713
715
  "MlModelRead",
vellum/client/README.md CHANGED
@@ -91,82 +91,37 @@ The SDK supports streaming responses, as well, the response will be a generator
91
91
 
92
92
  ```python
93
93
  from vellum import (
94
- AdHocExpandMeta,
95
- EphemeralPromptCacheConfig,
96
- FunctionDefinition,
97
94
  JinjaPromptBlock,
98
95
  PromptParameters,
99
96
  PromptRequestStringInput,
100
- PromptSettings,
101
- StringVellumValue,
102
97
  Vellum,
103
98
  VellumVariable,
104
- VellumVariableExtensions,
105
99
  )
106
100
 
107
101
  client = Vellum(
108
102
  api_key="YOUR_API_KEY",
109
103
  )
110
104
  response = client.ad_hoc.adhoc_execute_prompt_stream(
111
- ml_model="string",
105
+ ml_model="ml_model",
112
106
  input_values=[
113
107
  PromptRequestStringInput(
114
- key="string",
115
- value="string",
108
+ key="key",
109
+ value="value",
116
110
  )
117
111
  ],
118
112
  input_variables=[
119
113
  VellumVariable(
120
- id="string",
121
- key="string",
114
+ id="id",
115
+ key="key",
122
116
  type="STRING",
123
- required=True,
124
- default=StringVellumValue(
125
- value="string",
126
- ),
127
- extensions=VellumVariableExtensions(
128
- color={"key": "value"},
129
- ),
130
117
  )
131
118
  ],
132
- parameters=PromptParameters(
133
- stop=["string"],
134
- temperature=1.1,
135
- max_tokens=1,
136
- top_p=1.1,
137
- top_k=1,
138
- frequency_penalty=1.1,
139
- presence_penalty=1.1,
140
- logit_bias={"string": {"key": "value"}},
141
- custom_parameters={"string": {"key": "value"}},
142
- ),
143
- settings=PromptSettings(
144
- timeout=1.1,
145
- ),
119
+ parameters=PromptParameters(),
146
120
  blocks=[
147
121
  JinjaPromptBlock(
148
- state="ENABLED",
149
- cache_config=EphemeralPromptCacheConfig(),
150
- template="string",
122
+ template="template",
151
123
  )
152
124
  ],
153
- functions=[
154
- FunctionDefinition(
155
- state="ENABLED",
156
- cache_config=EphemeralPromptCacheConfig(),
157
- name="string",
158
- description="string",
159
- parameters={"string": {"key": "value"}},
160
- forced=True,
161
- strict=True,
162
- )
163
- ],
164
- expand_meta=AdHocExpandMeta(
165
- cost=True,
166
- model_name=True,
167
- usage=True,
168
- finish_reason=True,
169
- ),
170
125
  )
171
126
  for chunk in response:
172
127
  yield chunk
vellum/client/__init__.py CHANGED
@@ -447,12 +447,7 @@ class Vellum:
447
447
 
448
448
  Examples
449
449
  --------
450
- from vellum import (
451
- PromptDeploymentExpandMetaRequest,
452
- RawPromptExecutionOverridesRequest,
453
- StringInputRequest,
454
- Vellum,
455
- )
450
+ from vellum import StringInputRequest, Vellum
456
451
 
457
452
  client = Vellum(
458
453
  api_key="YOUR_API_KEY",
@@ -460,30 +455,10 @@ class Vellum:
460
455
  response = client.execute_prompt_stream(
461
456
  inputs=[
462
457
  StringInputRequest(
463
- name="string",
464
- value="string",
458
+ name="name",
459
+ value="value",
465
460
  )
466
461
  ],
467
- prompt_deployment_id="string",
468
- prompt_deployment_name="string",
469
- release_tag="string",
470
- external_id="string",
471
- expand_meta=PromptDeploymentExpandMetaRequest(
472
- model_name=True,
473
- usage=True,
474
- cost=True,
475
- finish_reason=True,
476
- latency=True,
477
- deployment_release_tag=True,
478
- prompt_version_id=True,
479
- ),
480
- raw_overrides=RawPromptExecutionOverridesRequest(
481
- body={"string": {"key": "value"}},
482
- headers={"string": {"key": "value"}},
483
- url="string",
484
- ),
485
- expand_raw=["string"],
486
- metadata={"string": {"key": "value"}},
487
462
  )
488
463
  for chunk in response:
489
464
  yield chunk
@@ -752,11 +727,7 @@ class Vellum:
752
727
 
753
728
  Examples
754
729
  --------
755
- from vellum import (
756
- Vellum,
757
- WorkflowExpandMetaRequest,
758
- WorkflowRequestStringInputRequest,
759
- )
730
+ from vellum import Vellum, WorkflowRequestStringInputRequest
760
731
 
761
732
  client = Vellum(
762
733
  api_key="YOUR_API_KEY",
@@ -764,19 +735,10 @@ class Vellum:
764
735
  response = client.execute_workflow_stream(
765
736
  inputs=[
766
737
  WorkflowRequestStringInputRequest(
767
- name="string",
768
- value="string",
738
+ name="name",
739
+ value="value",
769
740
  )
770
741
  ],
771
- expand_meta=WorkflowExpandMetaRequest(
772
- usage=True,
773
- ),
774
- workflow_deployment_id="string",
775
- workflow_deployment_name="string",
776
- release_tag="string",
777
- external_id="string",
778
- event_types=["NODE"],
779
- metadata={"string": {"key": "value"}},
780
742
  )
781
743
  for chunk in response:
782
744
  yield chunk
@@ -1016,39 +978,17 @@ class Vellum:
1016
978
 
1017
979
  Examples
1018
980
  --------
1019
- from vellum import (
1020
- ChatMessageRequest,
1021
- GenerateOptionsRequest,
1022
- GenerateRequest,
1023
- StringChatMessageContentRequest,
1024
- Vellum,
1025
- )
981
+ from vellum import GenerateRequest, Vellum
1026
982
 
1027
983
  client = Vellum(
1028
984
  api_key="YOUR_API_KEY",
1029
985
  )
1030
986
  response = client.generate_stream(
1031
- deployment_id="string",
1032
- deployment_name="string",
1033
987
  requests=[
1034
988
  GenerateRequest(
1035
- input_values={"string": {"key": "value"}},
1036
- chat_history=[
1037
- ChatMessageRequest(
1038
- text="string",
1039
- role="SYSTEM",
1040
- content=StringChatMessageContentRequest(
1041
- value="string",
1042
- ),
1043
- source="string",
1044
- )
1045
- ],
1046
- external_ids=["string"],
989
+ input_values={"key": "value"},
1047
990
  )
1048
991
  ],
1049
- options=GenerateOptionsRequest(
1050
- logprobs="ALL",
1051
- ),
1052
992
  )
1053
993
  for chunk in response:
1054
994
  yield chunk
@@ -1785,12 +1725,7 @@ class AsyncVellum:
1785
1725
  --------
1786
1726
  import asyncio
1787
1727
 
1788
- from vellum import (
1789
- AsyncVellum,
1790
- PromptDeploymentExpandMetaRequest,
1791
- RawPromptExecutionOverridesRequest,
1792
- StringInputRequest,
1793
- )
1728
+ from vellum import AsyncVellum, StringInputRequest
1794
1729
 
1795
1730
  client = AsyncVellum(
1796
1731
  api_key="YOUR_API_KEY",
@@ -1801,30 +1736,10 @@ class AsyncVellum:
1801
1736
  response = await client.execute_prompt_stream(
1802
1737
  inputs=[
1803
1738
  StringInputRequest(
1804
- name="string",
1805
- value="string",
1739
+ name="name",
1740
+ value="value",
1806
1741
  )
1807
1742
  ],
1808
- prompt_deployment_id="string",
1809
- prompt_deployment_name="string",
1810
- release_tag="string",
1811
- external_id="string",
1812
- expand_meta=PromptDeploymentExpandMetaRequest(
1813
- model_name=True,
1814
- usage=True,
1815
- cost=True,
1816
- finish_reason=True,
1817
- latency=True,
1818
- deployment_release_tag=True,
1819
- prompt_version_id=True,
1820
- ),
1821
- raw_overrides=RawPromptExecutionOverridesRequest(
1822
- body={"string": {"key": "value"}},
1823
- headers={"string": {"key": "value"}},
1824
- url="string",
1825
- ),
1826
- expand_raw=["string"],
1827
- metadata={"string": {"key": "value"}},
1828
1743
  )
1829
1744
  async for chunk in response:
1830
1745
  yield chunk
@@ -2106,11 +2021,7 @@ class AsyncVellum:
2106
2021
  --------
2107
2022
  import asyncio
2108
2023
 
2109
- from vellum import (
2110
- AsyncVellum,
2111
- WorkflowExpandMetaRequest,
2112
- WorkflowRequestStringInputRequest,
2113
- )
2024
+ from vellum import AsyncVellum, WorkflowRequestStringInputRequest
2114
2025
 
2115
2026
  client = AsyncVellum(
2116
2027
  api_key="YOUR_API_KEY",
@@ -2121,19 +2032,10 @@ class AsyncVellum:
2121
2032
  response = await client.execute_workflow_stream(
2122
2033
  inputs=[
2123
2034
  WorkflowRequestStringInputRequest(
2124
- name="string",
2125
- value="string",
2035
+ name="name",
2036
+ value="value",
2126
2037
  )
2127
2038
  ],
2128
- expand_meta=WorkflowExpandMetaRequest(
2129
- usage=True,
2130
- ),
2131
- workflow_deployment_id="string",
2132
- workflow_deployment_name="string",
2133
- release_tag="string",
2134
- external_id="string",
2135
- event_types=["NODE"],
2136
- metadata={"string": {"key": "value"}},
2137
2039
  )
2138
2040
  async for chunk in response:
2139
2041
  yield chunk
@@ -2386,13 +2288,7 @@ class AsyncVellum:
2386
2288
  --------
2387
2289
  import asyncio
2388
2290
 
2389
- from vellum import (
2390
- AsyncVellum,
2391
- ChatMessageRequest,
2392
- GenerateOptionsRequest,
2393
- GenerateRequest,
2394
- StringChatMessageContentRequest,
2395
- )
2291
+ from vellum import AsyncVellum, GenerateRequest
2396
2292
 
2397
2293
  client = AsyncVellum(
2398
2294
  api_key="YOUR_API_KEY",
@@ -2401,27 +2297,11 @@ class AsyncVellum:
2401
2297
 
2402
2298
  async def main() -> None:
2403
2299
  response = await client.generate_stream(
2404
- deployment_id="string",
2405
- deployment_name="string",
2406
2300
  requests=[
2407
2301
  GenerateRequest(
2408
- input_values={"string": {"key": "value"}},
2409
- chat_history=[
2410
- ChatMessageRequest(
2411
- text="string",
2412
- role="SYSTEM",
2413
- content=StringChatMessageContentRequest(
2414
- value="string",
2415
- ),
2416
- source="string",
2417
- )
2418
- ],
2419
- external_ids=["string"],
2302
+ input_values={"key": "value"},
2420
2303
  )
2421
2304
  ],
2422
- options=GenerateOptionsRequest(
2423
- logprobs="ALL",
2424
- ),
2425
2305
  )
2426
2306
  async for chunk in response:
2427
2307
  yield chunk
@@ -17,7 +17,7 @@ class BaseClientWrapper:
17
17
  headers: typing.Dict[str, str] = {
18
18
  "X-Fern-Language": "Python",
19
19
  "X-Fern-SDK-Name": "vellum-ai",
20
- "X-Fern-SDK-Version": "0.10.6",
20
+ "X-Fern-SDK-Version": "0.10.7",
21
21
  }
22
22
  headers["X_API_KEY"] = self.api_key
23
23
  return headers
@@ -74,82 +74,37 @@ class AdHocClient:
74
74
  Examples
75
75
  --------
76
76
  from vellum import (
77
- AdHocExpandMeta,
78
- EphemeralPromptCacheConfig,
79
- FunctionDefinition,
80
77
  JinjaPromptBlock,
81
78
  PromptParameters,
82
79
  PromptRequestStringInput,
83
- PromptSettings,
84
- StringVellumValue,
85
80
  Vellum,
86
81
  VellumVariable,
87
- VellumVariableExtensions,
88
82
  )
89
83
 
90
84
  client = Vellum(
91
85
  api_key="YOUR_API_KEY",
92
86
  )
93
87
  response = client.ad_hoc.adhoc_execute_prompt_stream(
94
- ml_model="string",
88
+ ml_model="ml_model",
95
89
  input_values=[
96
90
  PromptRequestStringInput(
97
- key="string",
98
- value="string",
91
+ key="key",
92
+ value="value",
99
93
  )
100
94
  ],
101
95
  input_variables=[
102
96
  VellumVariable(
103
- id="string",
104
- key="string",
97
+ id="id",
98
+ key="key",
105
99
  type="STRING",
106
- required=True,
107
- default=StringVellumValue(
108
- value="string",
109
- ),
110
- extensions=VellumVariableExtensions(
111
- color={"key": "value"},
112
- ),
113
100
  )
114
101
  ],
115
- parameters=PromptParameters(
116
- stop=["string"],
117
- temperature=1.1,
118
- max_tokens=1,
119
- top_p=1.1,
120
- top_k=1,
121
- frequency_penalty=1.1,
122
- presence_penalty=1.1,
123
- logit_bias={"string": {"key": "value"}},
124
- custom_parameters={"string": {"key": "value"}},
125
- ),
126
- settings=PromptSettings(
127
- timeout=1.1,
128
- ),
102
+ parameters=PromptParameters(),
129
103
  blocks=[
130
104
  JinjaPromptBlock(
131
- state="ENABLED",
132
- cache_config=EphemeralPromptCacheConfig(),
133
- template="string",
105
+ template="template",
134
106
  )
135
107
  ],
136
- functions=[
137
- FunctionDefinition(
138
- state="ENABLED",
139
- cache_config=EphemeralPromptCacheConfig(),
140
- name="string",
141
- description="string",
142
- parameters={"string": {"key": "value"}},
143
- forced=True,
144
- strict=True,
145
- )
146
- ],
147
- expand_meta=AdHocExpandMeta(
148
- cost=True,
149
- model_name=True,
150
- usage=True,
151
- finish_reason=True,
152
- ),
153
108
  )
154
109
  for chunk in response:
155
110
  yield chunk
@@ -289,17 +244,11 @@ class AsyncAdHocClient:
289
244
  import asyncio
290
245
 
291
246
  from vellum import (
292
- AdHocExpandMeta,
293
247
  AsyncVellum,
294
- EphemeralPromptCacheConfig,
295
- FunctionDefinition,
296
248
  JinjaPromptBlock,
297
249
  PromptParameters,
298
250
  PromptRequestStringInput,
299
- PromptSettings,
300
- StringVellumValue,
301
251
  VellumVariable,
302
- VellumVariableExtensions,
303
252
  )
304
253
 
305
254
  client = AsyncVellum(
@@ -309,65 +258,26 @@ class AsyncAdHocClient:
309
258
 
310
259
  async def main() -> None:
311
260
  response = await client.ad_hoc.adhoc_execute_prompt_stream(
312
- ml_model="string",
261
+ ml_model="ml_model",
313
262
  input_values=[
314
263
  PromptRequestStringInput(
315
- key="string",
316
- value="string",
264
+ key="key",
265
+ value="value",
317
266
  )
318
267
  ],
319
268
  input_variables=[
320
269
  VellumVariable(
321
- id="string",
322
- key="string",
270
+ id="id",
271
+ key="key",
323
272
  type="STRING",
324
- required=True,
325
- default=StringVellumValue(
326
- value="string",
327
- ),
328
- extensions=VellumVariableExtensions(
329
- color={"key": "value"},
330
- ),
331
273
  )
332
274
  ],
333
- parameters=PromptParameters(
334
- stop=["string"],
335
- temperature=1.1,
336
- max_tokens=1,
337
- top_p=1.1,
338
- top_k=1,
339
- frequency_penalty=1.1,
340
- presence_penalty=1.1,
341
- logit_bias={"string": {"key": "value"}},
342
- custom_parameters={"string": {"key": "value"}},
343
- ),
344
- settings=PromptSettings(
345
- timeout=1.1,
346
- ),
275
+ parameters=PromptParameters(),
347
276
  blocks=[
348
277
  JinjaPromptBlock(
349
- state="ENABLED",
350
- cache_config=EphemeralPromptCacheConfig(),
351
- template="string",
278
+ template="template",
352
279
  )
353
280
  ],
354
- functions=[
355
- FunctionDefinition(
356
- state="ENABLED",
357
- cache_config=EphemeralPromptCacheConfig(),
358
- name="string",
359
- description="string",
360
- parameters={"string": {"key": "value"}},
361
- forced=True,
362
- strict=True,
363
- )
364
- ],
365
- expand_meta=AdHocExpandMeta(
366
- cost=True,
367
- model_name=True,
368
- usage=True,
369
- finish_reason=True,
370
- ),
371
281
  )
372
282
  async for chunk in response:
373
283
  yield chunk
@@ -10,6 +10,7 @@ from ...core.serialization import convert_and_respect_annotation_metadata
10
10
  from ...core.pydantic_utilities import parse_obj_as
11
11
  from json.decoder import JSONDecodeError
12
12
  from ...core.api_error import ApiError
13
+ from ...types.metric_definition_history_item import MetricDefinitionHistoryItem
13
14
  from ...core.client_wrapper import AsyncClientWrapper
14
15
 
15
16
  # this is used as the default value for optional parameters
@@ -92,6 +93,58 @@ class MetricDefinitionsClient:
92
93
  raise ApiError(status_code=_response.status_code, body=_response.text)
93
94
  raise ApiError(status_code=_response.status_code, body=_response_json)
94
95
 
96
+ def metric_definition_history_item_retrieve(
97
+ self, history_id_or_release_tag: str, id: str, *, request_options: typing.Optional[RequestOptions] = None
98
+ ) -> MetricDefinitionHistoryItem:
99
+ """
100
+ Parameters
101
+ ----------
102
+ history_id_or_release_tag : str
103
+ Either the UUID of Metric Definition History Item you'd like to retrieve, or the name of a Release Tag that's pointing to the Metric Definition History Item you'd like to retrieve.
104
+
105
+ id : str
106
+ A UUID string identifying this metric definition.
107
+
108
+ request_options : typing.Optional[RequestOptions]
109
+ Request-specific configuration.
110
+
111
+ Returns
112
+ -------
113
+ MetricDefinitionHistoryItem
114
+
115
+
116
+ Examples
117
+ --------
118
+ from vellum import Vellum
119
+
120
+ client = Vellum(
121
+ api_key="YOUR_API_KEY",
122
+ )
123
+ client.metric_definitions.metric_definition_history_item_retrieve(
124
+ history_id_or_release_tag="history_id_or_release_tag",
125
+ id="id",
126
+ )
127
+ """
128
+ _response = self._client_wrapper.httpx_client.request(
129
+ f"v1/metric-definitions/{jsonable_encoder(id)}/history/{jsonable_encoder(history_id_or_release_tag)}",
130
+ base_url=self._client_wrapper.get_environment().default,
131
+ method="GET",
132
+ request_options=request_options,
133
+ )
134
+ try:
135
+ if 200 <= _response.status_code < 300:
136
+ return typing.cast(
137
+ MetricDefinitionHistoryItem,
138
+ parse_obj_as(
139
+ type_=MetricDefinitionHistoryItem, # type: ignore
140
+ object_=_response.json(),
141
+ ),
142
+ )
143
+ _response_json = _response.json()
144
+ except JSONDecodeError:
145
+ raise ApiError(status_code=_response.status_code, body=_response.text)
146
+ raise ApiError(status_code=_response.status_code, body=_response_json)
147
+
95
148
 
96
149
  class AsyncMetricDefinitionsClient:
97
150
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -176,3 +229,63 @@ class AsyncMetricDefinitionsClient:
176
229
  except JSONDecodeError:
177
230
  raise ApiError(status_code=_response.status_code, body=_response.text)
178
231
  raise ApiError(status_code=_response.status_code, body=_response_json)
232
+
233
+ async def metric_definition_history_item_retrieve(
234
+ self, history_id_or_release_tag: str, id: str, *, request_options: typing.Optional[RequestOptions] = None
235
+ ) -> MetricDefinitionHistoryItem:
236
+ """
237
+ Parameters
238
+ ----------
239
+ history_id_or_release_tag : str
240
+ Either the UUID of Metric Definition History Item you'd like to retrieve, or the name of a Release Tag that's pointing to the Metric Definition History Item you'd like to retrieve.
241
+
242
+ id : str
243
+ A UUID string identifying this metric definition.
244
+
245
+ request_options : typing.Optional[RequestOptions]
246
+ Request-specific configuration.
247
+
248
+ Returns
249
+ -------
250
+ MetricDefinitionHistoryItem
251
+
252
+
253
+ Examples
254
+ --------
255
+ import asyncio
256
+
257
+ from vellum import AsyncVellum
258
+
259
+ client = AsyncVellum(
260
+ api_key="YOUR_API_KEY",
261
+ )
262
+
263
+
264
+ async def main() -> None:
265
+ await client.metric_definitions.metric_definition_history_item_retrieve(
266
+ history_id_or_release_tag="history_id_or_release_tag",
267
+ id="id",
268
+ )
269
+
270
+
271
+ asyncio.run(main())
272
+ """
273
+ _response = await self._client_wrapper.httpx_client.request(
274
+ f"v1/metric-definitions/{jsonable_encoder(id)}/history/{jsonable_encoder(history_id_or_release_tag)}",
275
+ base_url=self._client_wrapper.get_environment().default,
276
+ method="GET",
277
+ request_options=request_options,
278
+ )
279
+ try:
280
+ if 200 <= _response.status_code < 300:
281
+ return typing.cast(
282
+ MetricDefinitionHistoryItem,
283
+ parse_obj_as(
284
+ type_=MetricDefinitionHistoryItem, # type: ignore
285
+ object_=_response.json(),
286
+ ),
287
+ )
288
+ _response_json = _response.json()
289
+ except JSONDecodeError:
290
+ raise ApiError(status_code=_response.status_code, body=_response.text)
291
+ raise ApiError(status_code=_response.status_code, body=_response_json)