vellum-ai 0.3.12__py3-none-any.whl → 0.3.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. vellum/__init__.py +86 -0
  2. vellum/client.py +3 -0
  3. vellum/core/client_wrapper.py +1 -1
  4. vellum/resources/__init__.py +2 -0
  5. vellum/resources/test_suite_runs/__init__.py +2 -0
  6. vellum/resources/test_suite_runs/client.py +223 -0
  7. vellum/types/__init__.py +98 -0
  8. vellum/types/paginated_test_suite_run_execution_list.py +32 -0
  9. vellum/types/test_suite_run_deployment_release_tag_exec_config.py +36 -0
  10. vellum/types/test_suite_run_deployment_release_tag_exec_config_data.py +31 -0
  11. vellum/types/test_suite_run_deployment_release_tag_exec_config_data_request.py +31 -0
  12. vellum/types/test_suite_run_deployment_release_tag_exec_config_request.py +38 -0
  13. vellum/types/test_suite_run_deployment_release_tag_exec_config_type_enum.py +5 -0
  14. vellum/types/test_suite_run_exec_config.py +33 -0
  15. vellum/types/test_suite_run_exec_config_request.py +33 -0
  16. vellum/types/test_suite_run_execution.py +33 -0
  17. vellum/types/test_suite_run_execution_chat_history_output.py +30 -0
  18. vellum/types/test_suite_run_execution_error_output.py +30 -0
  19. vellum/types/test_suite_run_execution_json_output.py +29 -0
  20. vellum/types/test_suite_run_execution_metric_result.py +30 -0
  21. vellum/types/test_suite_run_execution_number_output.py +29 -0
  22. vellum/types/test_suite_run_execution_output.py +78 -0
  23. vellum/types/test_suite_run_execution_search_results_output.py +30 -0
  24. vellum/types/test_suite_run_execution_string_output.py +29 -0
  25. vellum/types/test_suite_run_metric_error_output.py +34 -0
  26. vellum/types/test_suite_run_metric_error_output_type_enum.py +5 -0
  27. vellum/types/test_suite_run_metric_number_output.py +33 -0
  28. vellum/types/test_suite_run_metric_number_output_type_enum.py +5 -0
  29. vellum/types/test_suite_run_metric_output.py +31 -0
  30. vellum/types/test_suite_run_read.py +47 -0
  31. vellum/types/test_suite_run_state.py +41 -0
  32. vellum/types/test_suite_run_test_suite.py +30 -0
  33. vellum/types/test_suite_run_workflow_release_tag_exec_config.py +36 -0
  34. vellum/types/test_suite_run_workflow_release_tag_exec_config_data.py +33 -0
  35. vellum/types/test_suite_run_workflow_release_tag_exec_config_data_request.py +33 -0
  36. vellum/types/test_suite_run_workflow_release_tag_exec_config_request.py +38 -0
  37. vellum/types/test_suite_run_workflow_release_tag_exec_config_type_enum.py +5 -0
  38. vellum/types/workflow_output_array.py +1 -1
  39. vellum/types/workflow_output_chat_history.py +1 -1
  40. vellum/types/workflow_output_error.py +1 -1
  41. vellum/types/workflow_output_function_call.py +1 -1
  42. vellum/types/workflow_output_image.py +1 -1
  43. vellum/types/workflow_output_json.py +1 -1
  44. vellum/types/workflow_output_number.py +1 -1
  45. vellum/types/workflow_output_search_results.py +1 -1
  46. vellum/types/workflow_output_string.py +1 -1
  47. {vellum_ai-0.3.12.dist-info → vellum_ai-0.3.14.dist-info}/METADATA +1 -1
  48. {vellum_ai-0.3.12.dist-info → vellum_ai-0.3.14.dist-info}/RECORD +50 -18
  49. {vellum_ai-0.3.12.dist-info → vellum_ai-0.3.14.dist-info}/LICENSE +0 -0
  50. {vellum_ai-0.3.12.dist-info → vellum_ai-0.3.14.dist-info}/WHEEL +0 -0
vellum/__init__.py CHANGED
@@ -203,6 +203,7 @@ from .types import (
203
203
  PaginatedSlimDeploymentReadList,
204
204
  PaginatedSlimDocumentList,
205
205
  PaginatedSlimWorkflowDeploymentList,
206
+ PaginatedTestSuiteRunExecutionList,
206
207
  ProcessingFailureReasonEnum,
207
208
  ProcessingStateEnum,
208
209
  PromptDeploymentExpandMetaRequestRequest,
@@ -328,6 +329,47 @@ from .types import (
328
329
  TestCaseVariableValue_Number,
329
330
  TestCaseVariableValue_SearchResults,
330
331
  TestCaseVariableValue_String,
332
+ TestSuiteRunDeploymentReleaseTagExecConfig,
333
+ TestSuiteRunDeploymentReleaseTagExecConfigData,
334
+ TestSuiteRunDeploymentReleaseTagExecConfigDataRequest,
335
+ TestSuiteRunDeploymentReleaseTagExecConfigRequest,
336
+ TestSuiteRunDeploymentReleaseTagExecConfigTypeEnum,
337
+ TestSuiteRunExecConfig,
338
+ TestSuiteRunExecConfigRequest,
339
+ TestSuiteRunExecConfigRequest_DeploymentReleaseTag,
340
+ TestSuiteRunExecConfigRequest_WorkflowReleaseTag,
341
+ TestSuiteRunExecConfig_DeploymentReleaseTag,
342
+ TestSuiteRunExecConfig_WorkflowReleaseTag,
343
+ TestSuiteRunExecution,
344
+ TestSuiteRunExecutionChatHistoryOutput,
345
+ TestSuiteRunExecutionErrorOutput,
346
+ TestSuiteRunExecutionJsonOutput,
347
+ TestSuiteRunExecutionMetricResult,
348
+ TestSuiteRunExecutionNumberOutput,
349
+ TestSuiteRunExecutionOutput,
350
+ TestSuiteRunExecutionOutput_ChatHistory,
351
+ TestSuiteRunExecutionOutput_Error,
352
+ TestSuiteRunExecutionOutput_Json,
353
+ TestSuiteRunExecutionOutput_Number,
354
+ TestSuiteRunExecutionOutput_SearchResults,
355
+ TestSuiteRunExecutionOutput_String,
356
+ TestSuiteRunExecutionSearchResultsOutput,
357
+ TestSuiteRunExecutionStringOutput,
358
+ TestSuiteRunMetricErrorOutput,
359
+ TestSuiteRunMetricErrorOutputTypeEnum,
360
+ TestSuiteRunMetricNumberOutput,
361
+ TestSuiteRunMetricNumberOutputTypeEnum,
362
+ TestSuiteRunMetricOutput,
363
+ TestSuiteRunMetricOutput_Error,
364
+ TestSuiteRunMetricOutput_Number,
365
+ TestSuiteRunRead,
366
+ TestSuiteRunState,
367
+ TestSuiteRunTestSuite,
368
+ TestSuiteRunWorkflowReleaseTagExecConfig,
369
+ TestSuiteRunWorkflowReleaseTagExecConfigData,
370
+ TestSuiteRunWorkflowReleaseTagExecConfigDataRequest,
371
+ TestSuiteRunWorkflowReleaseTagExecConfigRequest,
372
+ TestSuiteRunWorkflowReleaseTagExecConfigTypeEnum,
331
373
  TestSuiteTestCase,
332
374
  UploadDocumentErrorResponse,
333
375
  UploadDocumentResponse,
@@ -424,6 +466,7 @@ from .resources import (
424
466
  model_versions,
425
467
  registered_prompts,
426
468
  sandboxes,
469
+ test_suite_runs,
427
470
  test_suites,
428
471
  workflow_deployments,
429
472
  )
@@ -639,6 +682,7 @@ __all__ = [
639
682
  "PaginatedSlimDeploymentReadList",
640
683
  "PaginatedSlimDocumentList",
641
684
  "PaginatedSlimWorkflowDeploymentList",
685
+ "PaginatedTestSuiteRunExecutionList",
642
686
  "ProcessingFailureReasonEnum",
643
687
  "ProcessingStateEnum",
644
688
  "PromptDeploymentExpandMetaRequestRequest",
@@ -764,6 +808,47 @@ __all__ = [
764
808
  "TestCaseVariableValue_Number",
765
809
  "TestCaseVariableValue_SearchResults",
766
810
  "TestCaseVariableValue_String",
811
+ "TestSuiteRunDeploymentReleaseTagExecConfig",
812
+ "TestSuiteRunDeploymentReleaseTagExecConfigData",
813
+ "TestSuiteRunDeploymentReleaseTagExecConfigDataRequest",
814
+ "TestSuiteRunDeploymentReleaseTagExecConfigRequest",
815
+ "TestSuiteRunDeploymentReleaseTagExecConfigTypeEnum",
816
+ "TestSuiteRunExecConfig",
817
+ "TestSuiteRunExecConfigRequest",
818
+ "TestSuiteRunExecConfigRequest_DeploymentReleaseTag",
819
+ "TestSuiteRunExecConfigRequest_WorkflowReleaseTag",
820
+ "TestSuiteRunExecConfig_DeploymentReleaseTag",
821
+ "TestSuiteRunExecConfig_WorkflowReleaseTag",
822
+ "TestSuiteRunExecution",
823
+ "TestSuiteRunExecutionChatHistoryOutput",
824
+ "TestSuiteRunExecutionErrorOutput",
825
+ "TestSuiteRunExecutionJsonOutput",
826
+ "TestSuiteRunExecutionMetricResult",
827
+ "TestSuiteRunExecutionNumberOutput",
828
+ "TestSuiteRunExecutionOutput",
829
+ "TestSuiteRunExecutionOutput_ChatHistory",
830
+ "TestSuiteRunExecutionOutput_Error",
831
+ "TestSuiteRunExecutionOutput_Json",
832
+ "TestSuiteRunExecutionOutput_Number",
833
+ "TestSuiteRunExecutionOutput_SearchResults",
834
+ "TestSuiteRunExecutionOutput_String",
835
+ "TestSuiteRunExecutionSearchResultsOutput",
836
+ "TestSuiteRunExecutionStringOutput",
837
+ "TestSuiteRunMetricErrorOutput",
838
+ "TestSuiteRunMetricErrorOutputTypeEnum",
839
+ "TestSuiteRunMetricNumberOutput",
840
+ "TestSuiteRunMetricNumberOutputTypeEnum",
841
+ "TestSuiteRunMetricOutput",
842
+ "TestSuiteRunMetricOutput_Error",
843
+ "TestSuiteRunMetricOutput_Number",
844
+ "TestSuiteRunRead",
845
+ "TestSuiteRunState",
846
+ "TestSuiteRunTestSuite",
847
+ "TestSuiteRunWorkflowReleaseTagExecConfig",
848
+ "TestSuiteRunWorkflowReleaseTagExecConfigData",
849
+ "TestSuiteRunWorkflowReleaseTagExecConfigDataRequest",
850
+ "TestSuiteRunWorkflowReleaseTagExecConfigRequest",
851
+ "TestSuiteRunWorkflowReleaseTagExecConfigTypeEnum",
767
852
  "TestSuiteTestCase",
768
853
  "UploadDocumentErrorResponse",
769
854
  "UploadDocumentResponse",
@@ -856,6 +941,7 @@ __all__ = [
856
941
  "model_versions",
857
942
  "registered_prompts",
858
943
  "sandboxes",
944
+ "test_suite_runs",
859
945
  "test_suites",
860
946
  "workflow_deployments",
861
947
  ]
vellum/client.py CHANGED
@@ -22,6 +22,7 @@ from .resources.folder_entities.client import AsyncFolderEntitiesClient, FolderE
22
22
  from .resources.model_versions.client import AsyncModelVersionsClient, ModelVersionsClient
23
23
  from .resources.registered_prompts.client import AsyncRegisteredPromptsClient, RegisteredPromptsClient
24
24
  from .resources.sandboxes.client import AsyncSandboxesClient, SandboxesClient
25
+ from .resources.test_suite_runs.client import AsyncTestSuiteRunsClient, TestSuiteRunsClient
25
26
  from .resources.test_suites.client import AsyncTestSuitesClient, TestSuitesClient
26
27
  from .resources.workflow_deployments.client import AsyncWorkflowDeploymentsClient, WorkflowDeploymentsClient
27
28
  from .types.execute_prompt_event import ExecutePromptEvent
@@ -72,6 +73,7 @@ class Vellum:
72
73
  self.model_versions = ModelVersionsClient(client_wrapper=self._client_wrapper)
73
74
  self.registered_prompts = RegisteredPromptsClient(client_wrapper=self._client_wrapper)
74
75
  self.sandboxes = SandboxesClient(client_wrapper=self._client_wrapper)
76
+ self.test_suite_runs = TestSuiteRunsClient(client_wrapper=self._client_wrapper)
75
77
  self.test_suites = TestSuitesClient(client_wrapper=self._client_wrapper)
76
78
  self.workflow_deployments = WorkflowDeploymentsClient(client_wrapper=self._client_wrapper)
77
79
 
@@ -655,6 +657,7 @@ class AsyncVellum:
655
657
  self.model_versions = AsyncModelVersionsClient(client_wrapper=self._client_wrapper)
656
658
  self.registered_prompts = AsyncRegisteredPromptsClient(client_wrapper=self._client_wrapper)
657
659
  self.sandboxes = AsyncSandboxesClient(client_wrapper=self._client_wrapper)
660
+ self.test_suite_runs = AsyncTestSuiteRunsClient(client_wrapper=self._client_wrapper)
658
661
  self.test_suites = AsyncTestSuitesClient(client_wrapper=self._client_wrapper)
659
662
  self.workflow_deployments = AsyncWorkflowDeploymentsClient(client_wrapper=self._client_wrapper)
660
663
 
@@ -16,7 +16,7 @@ class BaseClientWrapper:
16
16
  headers: typing.Dict[str, str] = {
17
17
  "X-Fern-Language": "Python",
18
18
  "X-Fern-SDK-Name": "vellum-ai",
19
- "X-Fern-SDK-Version": "0.3.12",
19
+ "X-Fern-SDK-Version": "0.3.14",
20
20
  }
21
21
  headers["X_API_KEY"] = self.api_key
22
22
  return headers
@@ -8,6 +8,7 @@ from . import (
8
8
  model_versions,
9
9
  registered_prompts,
10
10
  sandboxes,
11
+ test_suite_runs,
11
12
  test_suites,
12
13
  workflow_deployments,
13
14
  )
@@ -26,6 +27,7 @@ __all__ = [
26
27
  "model_versions",
27
28
  "registered_prompts",
28
29
  "sandboxes",
30
+ "test_suite_runs",
29
31
  "test_suites",
30
32
  "workflow_deployments",
31
33
  ]
@@ -0,0 +1,2 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
@@ -0,0 +1,223 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ import urllib.parse
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ...core.api_error import ApiError
8
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...core.remove_none_from_dict import remove_none_from_dict
11
+ from ...types.paginated_test_suite_run_execution_list import PaginatedTestSuiteRunExecutionList
12
+ from ...types.test_suite_run_exec_config_request import TestSuiteRunExecConfigRequest
13
+ from ...types.test_suite_run_read import TestSuiteRunRead
14
+
15
+ try:
16
+ import pydantic.v1 as pydantic # type: ignore
17
+ except ImportError:
18
+ import pydantic # type: ignore
19
+
20
+ # this is used as the default value for optional parameters
21
+ OMIT = typing.cast(typing.Any, ...)
22
+
23
+
24
+ class TestSuiteRunsClient:
25
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
26
+ self._client_wrapper = client_wrapper
27
+
28
+ def create(
29
+ self, *, test_suite_id: typing.Optional[str] = OMIT, exec_config: TestSuiteRunExecConfigRequest
30
+ ) -> TestSuiteRunRead:
31
+ """
32
+ Trigger a Test Suite and create a new Test Suite Run
33
+
34
+ Parameters:
35
+ - test_suite_id: typing.Optional[str]. The ID of the Test Suite to run
36
+
37
+ - exec_config: TestSuiteRunExecConfigRequest. Configuration that defines how the Test Suite should be run
38
+ """
39
+ _request: typing.Dict[str, typing.Any] = {"exec_config": exec_config}
40
+ if test_suite_id is not OMIT:
41
+ _request["test_suite_id"] = test_suite_id
42
+ _response = self._client_wrapper.httpx_client.request(
43
+ "POST",
44
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", "v1/test-suite-runs"),
45
+ json=jsonable_encoder(_request),
46
+ headers=self._client_wrapper.get_headers(),
47
+ timeout=None,
48
+ )
49
+ if 200 <= _response.status_code < 300:
50
+ return pydantic.parse_obj_as(TestSuiteRunRead, _response.json()) # type: ignore
51
+ try:
52
+ _response_json = _response.json()
53
+ except JSONDecodeError:
54
+ raise ApiError(status_code=_response.status_code, body=_response.text)
55
+ raise ApiError(status_code=_response.status_code, body=_response_json)
56
+
57
+ def retrieve(self, id: str) -> TestSuiteRunRead:
58
+ """
59
+ Retrieve a specific Test Suite Run by ID
60
+
61
+ Parameters:
62
+ - id: str. A UUID string identifying this test suite run.
63
+ ---
64
+ from vellum.client import Vellum
65
+
66
+ client = Vellum(
67
+ api_key="YOUR_API_KEY",
68
+ )
69
+ client.test_suite_runs.retrieve(
70
+ id="id",
71
+ )
72
+ """
73
+ _response = self._client_wrapper.httpx_client.request(
74
+ "GET",
75
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/test-suite-runs/{id}"),
76
+ headers=self._client_wrapper.get_headers(),
77
+ timeout=None,
78
+ )
79
+ if 200 <= _response.status_code < 300:
80
+ return pydantic.parse_obj_as(TestSuiteRunRead, _response.json()) # type: ignore
81
+ try:
82
+ _response_json = _response.json()
83
+ except JSONDecodeError:
84
+ raise ApiError(status_code=_response.status_code, body=_response.text)
85
+ raise ApiError(status_code=_response.status_code, body=_response_json)
86
+
87
+ def list_test_suite_run_executions(
88
+ self, id: str, *, limit: typing.Optional[int] = None, offset: typing.Optional[int] = None
89
+ ) -> PaginatedTestSuiteRunExecutionList:
90
+ """
91
+ Parameters:
92
+ - id: str. A UUID string identifying this test suite run.
93
+
94
+ - limit: typing.Optional[int]. Number of results to return per page.
95
+
96
+ - offset: typing.Optional[int]. The initial index from which to return the results.
97
+ ---
98
+ from vellum.client import Vellum
99
+
100
+ client = Vellum(
101
+ api_key="YOUR_API_KEY",
102
+ )
103
+ client.test_suite_runs.list_test_suite_run_executions(
104
+ id="id",
105
+ )
106
+ """
107
+ _response = self._client_wrapper.httpx_client.request(
108
+ "GET",
109
+ urllib.parse.urljoin(
110
+ f"{self._client_wrapper.get_environment().default}/", f"v1/test-suite-runs/{id}/executions"
111
+ ),
112
+ params=remove_none_from_dict({"limit": limit, "offset": offset}),
113
+ headers=self._client_wrapper.get_headers(),
114
+ timeout=None,
115
+ )
116
+ if 200 <= _response.status_code < 300:
117
+ return pydantic.parse_obj_as(PaginatedTestSuiteRunExecutionList, _response.json()) # type: ignore
118
+ try:
119
+ _response_json = _response.json()
120
+ except JSONDecodeError:
121
+ raise ApiError(status_code=_response.status_code, body=_response.text)
122
+ raise ApiError(status_code=_response.status_code, body=_response_json)
123
+
124
+
125
+ class AsyncTestSuiteRunsClient:
126
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
127
+ self._client_wrapper = client_wrapper
128
+
129
+ async def create(
130
+ self, *, test_suite_id: typing.Optional[str] = OMIT, exec_config: TestSuiteRunExecConfigRequest
131
+ ) -> TestSuiteRunRead:
132
+ """
133
+ Trigger a Test Suite and create a new Test Suite Run
134
+
135
+ Parameters:
136
+ - test_suite_id: typing.Optional[str]. The ID of the Test Suite to run
137
+
138
+ - exec_config: TestSuiteRunExecConfigRequest. Configuration that defines how the Test Suite should be run
139
+ """
140
+ _request: typing.Dict[str, typing.Any] = {"exec_config": exec_config}
141
+ if test_suite_id is not OMIT:
142
+ _request["test_suite_id"] = test_suite_id
143
+ _response = await self._client_wrapper.httpx_client.request(
144
+ "POST",
145
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", "v1/test-suite-runs"),
146
+ json=jsonable_encoder(_request),
147
+ headers=self._client_wrapper.get_headers(),
148
+ timeout=None,
149
+ )
150
+ if 200 <= _response.status_code < 300:
151
+ return pydantic.parse_obj_as(TestSuiteRunRead, _response.json()) # type: ignore
152
+ try:
153
+ _response_json = _response.json()
154
+ except JSONDecodeError:
155
+ raise ApiError(status_code=_response.status_code, body=_response.text)
156
+ raise ApiError(status_code=_response.status_code, body=_response_json)
157
+
158
+ async def retrieve(self, id: str) -> TestSuiteRunRead:
159
+ """
160
+ Retrieve a specific Test Suite Run by ID
161
+
162
+ Parameters:
163
+ - id: str. A UUID string identifying this test suite run.
164
+ ---
165
+ from vellum.client import AsyncVellum
166
+
167
+ client = AsyncVellum(
168
+ api_key="YOUR_API_KEY",
169
+ )
170
+ await client.test_suite_runs.retrieve(
171
+ id="id",
172
+ )
173
+ """
174
+ _response = await self._client_wrapper.httpx_client.request(
175
+ "GET",
176
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/test-suite-runs/{id}"),
177
+ headers=self._client_wrapper.get_headers(),
178
+ timeout=None,
179
+ )
180
+ if 200 <= _response.status_code < 300:
181
+ return pydantic.parse_obj_as(TestSuiteRunRead, _response.json()) # type: ignore
182
+ try:
183
+ _response_json = _response.json()
184
+ except JSONDecodeError:
185
+ raise ApiError(status_code=_response.status_code, body=_response.text)
186
+ raise ApiError(status_code=_response.status_code, body=_response_json)
187
+
188
+ async def list_test_suite_run_executions(
189
+ self, id: str, *, limit: typing.Optional[int] = None, offset: typing.Optional[int] = None
190
+ ) -> PaginatedTestSuiteRunExecutionList:
191
+ """
192
+ Parameters:
193
+ - id: str. A UUID string identifying this test suite run.
194
+
195
+ - limit: typing.Optional[int]. Number of results to return per page.
196
+
197
+ - offset: typing.Optional[int]. The initial index from which to return the results.
198
+ ---
199
+ from vellum.client import AsyncVellum
200
+
201
+ client = AsyncVellum(
202
+ api_key="YOUR_API_KEY",
203
+ )
204
+ await client.test_suite_runs.list_test_suite_run_executions(
205
+ id="id",
206
+ )
207
+ """
208
+ _response = await self._client_wrapper.httpx_client.request(
209
+ "GET",
210
+ urllib.parse.urljoin(
211
+ f"{self._client_wrapper.get_environment().default}/", f"v1/test-suite-runs/{id}/executions"
212
+ ),
213
+ params=remove_none_from_dict({"limit": limit, "offset": offset}),
214
+ headers=self._client_wrapper.get_headers(),
215
+ timeout=None,
216
+ )
217
+ if 200 <= _response.status_code < 300:
218
+ return pydantic.parse_obj_as(PaginatedTestSuiteRunExecutionList, _response.json()) # type: ignore
219
+ try:
220
+ _response_json = _response.json()
221
+ except JSONDecodeError:
222
+ raise ApiError(status_code=_response.status_code, body=_response.text)
223
+ raise ApiError(status_code=_response.status_code, body=_response_json)
vellum/types/__init__.py CHANGED
@@ -226,6 +226,7 @@ from .paginated_document_index_read_list import PaginatedDocumentIndexReadList
226
226
  from .paginated_slim_deployment_read_list import PaginatedSlimDeploymentReadList
227
227
  from .paginated_slim_document_list import PaginatedSlimDocumentList
228
228
  from .paginated_slim_workflow_deployment_list import PaginatedSlimWorkflowDeploymentList
229
+ from .paginated_test_suite_run_execution_list import PaginatedTestSuiteRunExecutionList
229
230
  from .processing_failure_reason_enum import ProcessingFailureReasonEnum
230
231
  from .processing_state_enum import ProcessingStateEnum
231
232
  from .prompt_deployment_expand_meta_request_request import PromptDeploymentExpandMetaRequestRequest
@@ -363,6 +364,61 @@ from .test_case_variable_value import (
363
364
  TestCaseVariableValue_SearchResults,
364
365
  TestCaseVariableValue_String,
365
366
  )
367
+ from .test_suite_run_deployment_release_tag_exec_config import TestSuiteRunDeploymentReleaseTagExecConfig
368
+ from .test_suite_run_deployment_release_tag_exec_config_data import TestSuiteRunDeploymentReleaseTagExecConfigData
369
+ from .test_suite_run_deployment_release_tag_exec_config_data_request import (
370
+ TestSuiteRunDeploymentReleaseTagExecConfigDataRequest,
371
+ )
372
+ from .test_suite_run_deployment_release_tag_exec_config_request import TestSuiteRunDeploymentReleaseTagExecConfigRequest
373
+ from .test_suite_run_deployment_release_tag_exec_config_type_enum import (
374
+ TestSuiteRunDeploymentReleaseTagExecConfigTypeEnum,
375
+ )
376
+ from .test_suite_run_exec_config import (
377
+ TestSuiteRunExecConfig,
378
+ TestSuiteRunExecConfig_DeploymentReleaseTag,
379
+ TestSuiteRunExecConfig_WorkflowReleaseTag,
380
+ )
381
+ from .test_suite_run_exec_config_request import (
382
+ TestSuiteRunExecConfigRequest,
383
+ TestSuiteRunExecConfigRequest_DeploymentReleaseTag,
384
+ TestSuiteRunExecConfigRequest_WorkflowReleaseTag,
385
+ )
386
+ from .test_suite_run_execution import TestSuiteRunExecution
387
+ from .test_suite_run_execution_chat_history_output import TestSuiteRunExecutionChatHistoryOutput
388
+ from .test_suite_run_execution_error_output import TestSuiteRunExecutionErrorOutput
389
+ from .test_suite_run_execution_json_output import TestSuiteRunExecutionJsonOutput
390
+ from .test_suite_run_execution_metric_result import TestSuiteRunExecutionMetricResult
391
+ from .test_suite_run_execution_number_output import TestSuiteRunExecutionNumberOutput
392
+ from .test_suite_run_execution_output import (
393
+ TestSuiteRunExecutionOutput,
394
+ TestSuiteRunExecutionOutput_ChatHistory,
395
+ TestSuiteRunExecutionOutput_Error,
396
+ TestSuiteRunExecutionOutput_Json,
397
+ TestSuiteRunExecutionOutput_Number,
398
+ TestSuiteRunExecutionOutput_SearchResults,
399
+ TestSuiteRunExecutionOutput_String,
400
+ )
401
+ from .test_suite_run_execution_search_results_output import TestSuiteRunExecutionSearchResultsOutput
402
+ from .test_suite_run_execution_string_output import TestSuiteRunExecutionStringOutput
403
+ from .test_suite_run_metric_error_output import TestSuiteRunMetricErrorOutput
404
+ from .test_suite_run_metric_error_output_type_enum import TestSuiteRunMetricErrorOutputTypeEnum
405
+ from .test_suite_run_metric_number_output import TestSuiteRunMetricNumberOutput
406
+ from .test_suite_run_metric_number_output_type_enum import TestSuiteRunMetricNumberOutputTypeEnum
407
+ from .test_suite_run_metric_output import (
408
+ TestSuiteRunMetricOutput,
409
+ TestSuiteRunMetricOutput_Error,
410
+ TestSuiteRunMetricOutput_Number,
411
+ )
412
+ from .test_suite_run_read import TestSuiteRunRead
413
+ from .test_suite_run_state import TestSuiteRunState
414
+ from .test_suite_run_test_suite import TestSuiteRunTestSuite
415
+ from .test_suite_run_workflow_release_tag_exec_config import TestSuiteRunWorkflowReleaseTagExecConfig
416
+ from .test_suite_run_workflow_release_tag_exec_config_data import TestSuiteRunWorkflowReleaseTagExecConfigData
417
+ from .test_suite_run_workflow_release_tag_exec_config_data_request import (
418
+ TestSuiteRunWorkflowReleaseTagExecConfigDataRequest,
419
+ )
420
+ from .test_suite_run_workflow_release_tag_exec_config_request import TestSuiteRunWorkflowReleaseTagExecConfigRequest
421
+ from .test_suite_run_workflow_release_tag_exec_config_type_enum import TestSuiteRunWorkflowReleaseTagExecConfigTypeEnum
366
422
  from .test_suite_test_case import TestSuiteTestCase
367
423
  from .upload_document_error_response import UploadDocumentErrorResponse
368
424
  from .upload_document_response import UploadDocumentResponse
@@ -658,6 +714,7 @@ __all__ = [
658
714
  "PaginatedSlimDeploymentReadList",
659
715
  "PaginatedSlimDocumentList",
660
716
  "PaginatedSlimWorkflowDeploymentList",
717
+ "PaginatedTestSuiteRunExecutionList",
661
718
  "ProcessingFailureReasonEnum",
662
719
  "ProcessingStateEnum",
663
720
  "PromptDeploymentExpandMetaRequestRequest",
@@ -783,6 +840,47 @@ __all__ = [
783
840
  "TestCaseVariableValue_Number",
784
841
  "TestCaseVariableValue_SearchResults",
785
842
  "TestCaseVariableValue_String",
843
+ "TestSuiteRunDeploymentReleaseTagExecConfig",
844
+ "TestSuiteRunDeploymentReleaseTagExecConfigData",
845
+ "TestSuiteRunDeploymentReleaseTagExecConfigDataRequest",
846
+ "TestSuiteRunDeploymentReleaseTagExecConfigRequest",
847
+ "TestSuiteRunDeploymentReleaseTagExecConfigTypeEnum",
848
+ "TestSuiteRunExecConfig",
849
+ "TestSuiteRunExecConfigRequest",
850
+ "TestSuiteRunExecConfigRequest_DeploymentReleaseTag",
851
+ "TestSuiteRunExecConfigRequest_WorkflowReleaseTag",
852
+ "TestSuiteRunExecConfig_DeploymentReleaseTag",
853
+ "TestSuiteRunExecConfig_WorkflowReleaseTag",
854
+ "TestSuiteRunExecution",
855
+ "TestSuiteRunExecutionChatHistoryOutput",
856
+ "TestSuiteRunExecutionErrorOutput",
857
+ "TestSuiteRunExecutionJsonOutput",
858
+ "TestSuiteRunExecutionMetricResult",
859
+ "TestSuiteRunExecutionNumberOutput",
860
+ "TestSuiteRunExecutionOutput",
861
+ "TestSuiteRunExecutionOutput_ChatHistory",
862
+ "TestSuiteRunExecutionOutput_Error",
863
+ "TestSuiteRunExecutionOutput_Json",
864
+ "TestSuiteRunExecutionOutput_Number",
865
+ "TestSuiteRunExecutionOutput_SearchResults",
866
+ "TestSuiteRunExecutionOutput_String",
867
+ "TestSuiteRunExecutionSearchResultsOutput",
868
+ "TestSuiteRunExecutionStringOutput",
869
+ "TestSuiteRunMetricErrorOutput",
870
+ "TestSuiteRunMetricErrorOutputTypeEnum",
871
+ "TestSuiteRunMetricNumberOutput",
872
+ "TestSuiteRunMetricNumberOutputTypeEnum",
873
+ "TestSuiteRunMetricOutput",
874
+ "TestSuiteRunMetricOutput_Error",
875
+ "TestSuiteRunMetricOutput_Number",
876
+ "TestSuiteRunRead",
877
+ "TestSuiteRunState",
878
+ "TestSuiteRunTestSuite",
879
+ "TestSuiteRunWorkflowReleaseTagExecConfig",
880
+ "TestSuiteRunWorkflowReleaseTagExecConfigData",
881
+ "TestSuiteRunWorkflowReleaseTagExecConfigDataRequest",
882
+ "TestSuiteRunWorkflowReleaseTagExecConfigRequest",
883
+ "TestSuiteRunWorkflowReleaseTagExecConfigTypeEnum",
786
884
  "TestSuiteTestCase",
787
885
  "UploadDocumentErrorResponse",
788
886
  "UploadDocumentResponse",
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .test_suite_run_execution import TestSuiteRunExecution
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class PaginatedTestSuiteRunExecutionList(pydantic.BaseModel):
16
+ count: typing.Optional[int]
17
+ next: typing.Optional[str]
18
+ previous: typing.Optional[str]
19
+ results: typing.Optional[typing.List[TestSuiteRunExecution]]
20
+
21
+ def json(self, **kwargs: typing.Any) -> str:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().json(**kwargs_with_defaults)
24
+
25
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().dict(**kwargs_with_defaults)
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .test_suite_run_deployment_release_tag_exec_config_data import TestSuiteRunDeploymentReleaseTagExecConfigData
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class TestSuiteRunDeploymentReleaseTagExecConfig(pydantic.BaseModel):
16
+ """
17
+ Execution configuration for running a Test Suite against a Prompt Deployment
18
+ """
19
+
20
+ data: TestSuiteRunDeploymentReleaseTagExecConfigData
21
+ test_case_ids: typing.Optional[typing.List[str]] = pydantic.Field(
22
+ description="Optionally specify a subset of test case ids to run. If not provided, all test cases within the test suite will be run by default."
23
+ )
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,31 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class TestSuiteRunDeploymentReleaseTagExecConfigData(pydantic.BaseModel):
15
+ deployment_id: str = pydantic.Field(description="The ID of the Prompt Deployment to run the Test Suite against.")
16
+ tag: typing.Optional[str] = pydantic.Field(
17
+ description="A tag identifying which release of the Prompt Deployment to run the Test Suite against. Useful for testing past versions of the Prompt Deployment"
18
+ )
19
+
20
+ def json(self, **kwargs: typing.Any) -> str:
21
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
22
+ return super().json(**kwargs_with_defaults)
23
+
24
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().dict(**kwargs_with_defaults)
27
+
28
+ class Config:
29
+ frozen = True
30
+ smart_union = True
31
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,31 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class TestSuiteRunDeploymentReleaseTagExecConfigDataRequest(pydantic.BaseModel):
15
+ deployment_id: str = pydantic.Field(description="The ID of the Prompt Deployment to run the Test Suite against.")
16
+ tag: typing.Optional[str] = pydantic.Field(
17
+ description="A tag identifying which release of the Prompt Deployment to run the Test Suite against. Useful for testing past versions of the Prompt Deployment"
18
+ )
19
+
20
+ def json(self, **kwargs: typing.Any) -> str:
21
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
22
+ return super().json(**kwargs_with_defaults)
23
+
24
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().dict(**kwargs_with_defaults)
27
+
28
+ class Config:
29
+ frozen = True
30
+ smart_union = True
31
+ json_encoders = {dt.datetime: serialize_datetime}