vellum-ai 0.3.11__py3-none-any.whl → 0.3.13__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (59) hide show
  1. vellum/__init__.py +90 -0
  2. vellum/client.py +3 -0
  3. vellum/core/client_wrapper.py +1 -1
  4. vellum/resources/__init__.py +2 -0
  5. vellum/resources/document_indexes/client.py +282 -0
  6. vellum/resources/test_suite_runs/__init__.py +2 -0
  7. vellum/resources/test_suite_runs/client.py +223 -0
  8. vellum/types/__init__.py +102 -0
  9. vellum/types/array_variable_value_item.py +11 -0
  10. vellum/types/fulfilled_workflow_node_result_event.py +2 -0
  11. vellum/types/generate_request.py +1 -1
  12. vellum/types/image_variable_value.py +33 -0
  13. vellum/types/initiated_workflow_node_result_event.py +1 -0
  14. vellum/types/paginated_test_suite_run_execution_list.py +32 -0
  15. vellum/types/rejected_workflow_node_result_event.py +1 -0
  16. vellum/types/streaming_workflow_node_result_event.py +1 -0
  17. vellum/types/test_suite_run_deployment_release_tag_exec_config.py +36 -0
  18. vellum/types/test_suite_run_deployment_release_tag_exec_config_data.py +31 -0
  19. vellum/types/test_suite_run_deployment_release_tag_exec_config_data_request.py +31 -0
  20. vellum/types/test_suite_run_deployment_release_tag_exec_config_request.py +38 -0
  21. vellum/types/test_suite_run_deployment_release_tag_exec_config_type_enum.py +5 -0
  22. vellum/types/test_suite_run_exec_config.py +33 -0
  23. vellum/types/test_suite_run_exec_config_request.py +33 -0
  24. vellum/types/test_suite_run_execution.py +33 -0
  25. vellum/types/test_suite_run_execution_chat_history_output.py +30 -0
  26. vellum/types/test_suite_run_execution_error_output.py +30 -0
  27. vellum/types/test_suite_run_execution_json_output.py +29 -0
  28. vellum/types/test_suite_run_execution_metric_result.py +30 -0
  29. vellum/types/test_suite_run_execution_number_output.py +29 -0
  30. vellum/types/test_suite_run_execution_output.py +78 -0
  31. vellum/types/test_suite_run_execution_search_results_output.py +30 -0
  32. vellum/types/test_suite_run_execution_string_output.py +29 -0
  33. vellum/types/test_suite_run_metric_error_output.py +34 -0
  34. vellum/types/test_suite_run_metric_error_output_type_enum.py +5 -0
  35. vellum/types/test_suite_run_metric_number_output.py +33 -0
  36. vellum/types/test_suite_run_metric_number_output_type_enum.py +5 -0
  37. vellum/types/test_suite_run_metric_output.py +31 -0
  38. vellum/types/test_suite_run_read.py +47 -0
  39. vellum/types/test_suite_run_state.py +41 -0
  40. vellum/types/test_suite_run_test_suite.py +30 -0
  41. vellum/types/test_suite_run_workflow_release_tag_exec_config.py +36 -0
  42. vellum/types/test_suite_run_workflow_release_tag_exec_config_data.py +33 -0
  43. vellum/types/test_suite_run_workflow_release_tag_exec_config_data_request.py +33 -0
  44. vellum/types/test_suite_run_workflow_release_tag_exec_config_request.py +38 -0
  45. vellum/types/test_suite_run_workflow_release_tag_exec_config_type_enum.py +5 -0
  46. vellum/types/workflow_execution_event_error_code.py +5 -0
  47. vellum/types/workflow_output_array.py +1 -1
  48. vellum/types/workflow_output_chat_history.py +1 -1
  49. vellum/types/workflow_output_error.py +1 -1
  50. vellum/types/workflow_output_function_call.py +1 -1
  51. vellum/types/workflow_output_image.py +1 -1
  52. vellum/types/workflow_output_json.py +1 -1
  53. vellum/types/workflow_output_number.py +1 -1
  54. vellum/types/workflow_output_search_results.py +1 -1
  55. vellum/types/workflow_output_string.py +1 -1
  56. {vellum_ai-0.3.11.dist-info → vellum_ai-0.3.13.dist-info}/METADATA +1 -1
  57. {vellum_ai-0.3.11.dist-info → vellum_ai-0.3.13.dist-info}/RECORD +59 -26
  58. {vellum_ai-0.3.11.dist-info → vellum_ai-0.3.13.dist-info}/LICENSE +0 -0
  59. {vellum_ai-0.3.11.dist-info → vellum_ai-0.3.13.dist-info}/WHEEL +0 -0
@@ -0,0 +1,223 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ import urllib.parse
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ...core.api_error import ApiError
8
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...core.remove_none_from_dict import remove_none_from_dict
11
+ from ...types.paginated_test_suite_run_execution_list import PaginatedTestSuiteRunExecutionList
12
+ from ...types.test_suite_run_exec_config_request import TestSuiteRunExecConfigRequest
13
+ from ...types.test_suite_run_read import TestSuiteRunRead
14
+
15
+ try:
16
+ import pydantic.v1 as pydantic # type: ignore
17
+ except ImportError:
18
+ import pydantic # type: ignore
19
+
20
+ # this is used as the default value for optional parameters
21
+ OMIT = typing.cast(typing.Any, ...)
22
+
23
+
24
+ class TestSuiteRunsClient:
25
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
26
+ self._client_wrapper = client_wrapper
27
+
28
+ def create(
29
+ self, *, test_suite_id: typing.Optional[str] = OMIT, exec_config: TestSuiteRunExecConfigRequest
30
+ ) -> TestSuiteRunRead:
31
+ """
32
+ Trigger a Test Suite and create a new Test Suite Run
33
+
34
+ Parameters:
35
+ - test_suite_id: typing.Optional[str]. The ID of the Test Suite to run
36
+
37
+ - exec_config: TestSuiteRunExecConfigRequest. Configuration that defines how the Test Suite should be run
38
+ """
39
+ _request: typing.Dict[str, typing.Any] = {"exec_config": exec_config}
40
+ if test_suite_id is not OMIT:
41
+ _request["test_suite_id"] = test_suite_id
42
+ _response = self._client_wrapper.httpx_client.request(
43
+ "POST",
44
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", "v1/test-suite-runs"),
45
+ json=jsonable_encoder(_request),
46
+ headers=self._client_wrapper.get_headers(),
47
+ timeout=None,
48
+ )
49
+ if 200 <= _response.status_code < 300:
50
+ return pydantic.parse_obj_as(TestSuiteRunRead, _response.json()) # type: ignore
51
+ try:
52
+ _response_json = _response.json()
53
+ except JSONDecodeError:
54
+ raise ApiError(status_code=_response.status_code, body=_response.text)
55
+ raise ApiError(status_code=_response.status_code, body=_response_json)
56
+
57
+ def retrieve(self, id: str) -> TestSuiteRunRead:
58
+ """
59
+ Retrieve a specific Test Suite Run by ID
60
+
61
+ Parameters:
62
+ - id: str. A UUID string identifying this test suite run.
63
+ ---
64
+ from vellum.client import Vellum
65
+
66
+ client = Vellum(
67
+ api_key="YOUR_API_KEY",
68
+ )
69
+ client.test_suite_runs.retrieve(
70
+ id="id",
71
+ )
72
+ """
73
+ _response = self._client_wrapper.httpx_client.request(
74
+ "GET",
75
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/test-suite-runs/{id}"),
76
+ headers=self._client_wrapper.get_headers(),
77
+ timeout=None,
78
+ )
79
+ if 200 <= _response.status_code < 300:
80
+ return pydantic.parse_obj_as(TestSuiteRunRead, _response.json()) # type: ignore
81
+ try:
82
+ _response_json = _response.json()
83
+ except JSONDecodeError:
84
+ raise ApiError(status_code=_response.status_code, body=_response.text)
85
+ raise ApiError(status_code=_response.status_code, body=_response_json)
86
+
87
+ def list_test_suite_run_executions(
88
+ self, id: str, *, limit: typing.Optional[int] = None, offset: typing.Optional[int] = None
89
+ ) -> PaginatedTestSuiteRunExecutionList:
90
+ """
91
+ Parameters:
92
+ - id: str. A UUID string identifying this test suite run.
93
+
94
+ - limit: typing.Optional[int]. Number of results to return per page.
95
+
96
+ - offset: typing.Optional[int]. The initial index from which to return the results.
97
+ ---
98
+ from vellum.client import Vellum
99
+
100
+ client = Vellum(
101
+ api_key="YOUR_API_KEY",
102
+ )
103
+ client.test_suite_runs.list_test_suite_run_executions(
104
+ id="id",
105
+ )
106
+ """
107
+ _response = self._client_wrapper.httpx_client.request(
108
+ "GET",
109
+ urllib.parse.urljoin(
110
+ f"{self._client_wrapper.get_environment().default}/", f"v1/test-suite-runs/{id}/executions"
111
+ ),
112
+ params=remove_none_from_dict({"limit": limit, "offset": offset}),
113
+ headers=self._client_wrapper.get_headers(),
114
+ timeout=None,
115
+ )
116
+ if 200 <= _response.status_code < 300:
117
+ return pydantic.parse_obj_as(PaginatedTestSuiteRunExecutionList, _response.json()) # type: ignore
118
+ try:
119
+ _response_json = _response.json()
120
+ except JSONDecodeError:
121
+ raise ApiError(status_code=_response.status_code, body=_response.text)
122
+ raise ApiError(status_code=_response.status_code, body=_response_json)
123
+
124
+
125
+ class AsyncTestSuiteRunsClient:
126
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
127
+ self._client_wrapper = client_wrapper
128
+
129
+ async def create(
130
+ self, *, test_suite_id: typing.Optional[str] = OMIT, exec_config: TestSuiteRunExecConfigRequest
131
+ ) -> TestSuiteRunRead:
132
+ """
133
+ Trigger a Test Suite and create a new Test Suite Run
134
+
135
+ Parameters:
136
+ - test_suite_id: typing.Optional[str]. The ID of the Test Suite to run
137
+
138
+ - exec_config: TestSuiteRunExecConfigRequest. Configuration that defines how the Test Suite should be run
139
+ """
140
+ _request: typing.Dict[str, typing.Any] = {"exec_config": exec_config}
141
+ if test_suite_id is not OMIT:
142
+ _request["test_suite_id"] = test_suite_id
143
+ _response = await self._client_wrapper.httpx_client.request(
144
+ "POST",
145
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", "v1/test-suite-runs"),
146
+ json=jsonable_encoder(_request),
147
+ headers=self._client_wrapper.get_headers(),
148
+ timeout=None,
149
+ )
150
+ if 200 <= _response.status_code < 300:
151
+ return pydantic.parse_obj_as(TestSuiteRunRead, _response.json()) # type: ignore
152
+ try:
153
+ _response_json = _response.json()
154
+ except JSONDecodeError:
155
+ raise ApiError(status_code=_response.status_code, body=_response.text)
156
+ raise ApiError(status_code=_response.status_code, body=_response_json)
157
+
158
+ async def retrieve(self, id: str) -> TestSuiteRunRead:
159
+ """
160
+ Retrieve a specific Test Suite Run by ID
161
+
162
+ Parameters:
163
+ - id: str. A UUID string identifying this test suite run.
164
+ ---
165
+ from vellum.client import AsyncVellum
166
+
167
+ client = AsyncVellum(
168
+ api_key="YOUR_API_KEY",
169
+ )
170
+ await client.test_suite_runs.retrieve(
171
+ id="id",
172
+ )
173
+ """
174
+ _response = await self._client_wrapper.httpx_client.request(
175
+ "GET",
176
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", f"v1/test-suite-runs/{id}"),
177
+ headers=self._client_wrapper.get_headers(),
178
+ timeout=None,
179
+ )
180
+ if 200 <= _response.status_code < 300:
181
+ return pydantic.parse_obj_as(TestSuiteRunRead, _response.json()) # type: ignore
182
+ try:
183
+ _response_json = _response.json()
184
+ except JSONDecodeError:
185
+ raise ApiError(status_code=_response.status_code, body=_response.text)
186
+ raise ApiError(status_code=_response.status_code, body=_response_json)
187
+
188
+ async def list_test_suite_run_executions(
189
+ self, id: str, *, limit: typing.Optional[int] = None, offset: typing.Optional[int] = None
190
+ ) -> PaginatedTestSuiteRunExecutionList:
191
+ """
192
+ Parameters:
193
+ - id: str. A UUID string identifying this test suite run.
194
+
195
+ - limit: typing.Optional[int]. Number of results to return per page.
196
+
197
+ - offset: typing.Optional[int]. The initial index from which to return the results.
198
+ ---
199
+ from vellum.client import AsyncVellum
200
+
201
+ client = AsyncVellum(
202
+ api_key="YOUR_API_KEY",
203
+ )
204
+ await client.test_suite_runs.list_test_suite_run_executions(
205
+ id="id",
206
+ )
207
+ """
208
+ _response = await self._client_wrapper.httpx_client.request(
209
+ "GET",
210
+ urllib.parse.urljoin(
211
+ f"{self._client_wrapper.get_environment().default}/", f"v1/test-suite-runs/{id}/executions"
212
+ ),
213
+ params=remove_none_from_dict({"limit": limit, "offset": offset}),
214
+ headers=self._client_wrapper.get_headers(),
215
+ timeout=None,
216
+ )
217
+ if 200 <= _response.status_code < 300:
218
+ return pydantic.parse_obj_as(PaginatedTestSuiteRunExecutionList, _response.json()) # type: ignore
219
+ try:
220
+ _response_json = _response.json()
221
+ except JSONDecodeError:
222
+ raise ApiError(status_code=_response.status_code, body=_response.text)
223
+ raise ApiError(status_code=_response.status_code, body=_response_json)
vellum/types/__init__.py CHANGED
@@ -22,6 +22,7 @@ from .array_variable_value_item import (
22
22
  ArrayVariableValueItem_ChatHistory,
23
23
  ArrayVariableValueItem_Error,
24
24
  ArrayVariableValueItem_FunctionCall,
25
+ ArrayVariableValueItem_Image,
25
26
  ArrayVariableValueItem_Json,
26
27
  ArrayVariableValueItem_Number,
27
28
  ArrayVariableValueItem_SearchResults,
@@ -146,6 +147,7 @@ from .generate_stream_result_data import GenerateStreamResultData
146
147
  from .image_chat_message_content import ImageChatMessageContent
147
148
  from .image_chat_message_content_request import ImageChatMessageContentRequest
148
149
  from .image_enum import ImageEnum
150
+ from .image_variable_value import ImageVariableValue
149
151
  from .indexing_state_enum import IndexingStateEnum
150
152
  from .initiated_enum import InitiatedEnum
151
153
  from .initiated_execute_prompt_event import InitiatedExecutePromptEvent
@@ -224,6 +226,7 @@ from .paginated_document_index_read_list import PaginatedDocumentIndexReadList
224
226
  from .paginated_slim_deployment_read_list import PaginatedSlimDeploymentReadList
225
227
  from .paginated_slim_document_list import PaginatedSlimDocumentList
226
228
  from .paginated_slim_workflow_deployment_list import PaginatedSlimWorkflowDeploymentList
229
+ from .paginated_test_suite_run_execution_list import PaginatedTestSuiteRunExecutionList
227
230
  from .processing_failure_reason_enum import ProcessingFailureReasonEnum
228
231
  from .processing_state_enum import ProcessingStateEnum
229
232
  from .prompt_deployment_expand_meta_request_request import PromptDeploymentExpandMetaRequestRequest
@@ -361,6 +364,61 @@ from .test_case_variable_value import (
361
364
  TestCaseVariableValue_SearchResults,
362
365
  TestCaseVariableValue_String,
363
366
  )
367
+ from .test_suite_run_deployment_release_tag_exec_config import TestSuiteRunDeploymentReleaseTagExecConfig
368
+ from .test_suite_run_deployment_release_tag_exec_config_data import TestSuiteRunDeploymentReleaseTagExecConfigData
369
+ from .test_suite_run_deployment_release_tag_exec_config_data_request import (
370
+ TestSuiteRunDeploymentReleaseTagExecConfigDataRequest,
371
+ )
372
+ from .test_suite_run_deployment_release_tag_exec_config_request import TestSuiteRunDeploymentReleaseTagExecConfigRequest
373
+ from .test_suite_run_deployment_release_tag_exec_config_type_enum import (
374
+ TestSuiteRunDeploymentReleaseTagExecConfigTypeEnum,
375
+ )
376
+ from .test_suite_run_exec_config import (
377
+ TestSuiteRunExecConfig,
378
+ TestSuiteRunExecConfig_DeploymentReleaseTag,
379
+ TestSuiteRunExecConfig_WorkflowReleaseTag,
380
+ )
381
+ from .test_suite_run_exec_config_request import (
382
+ TestSuiteRunExecConfigRequest,
383
+ TestSuiteRunExecConfigRequest_DeploymentReleaseTag,
384
+ TestSuiteRunExecConfigRequest_WorkflowReleaseTag,
385
+ )
386
+ from .test_suite_run_execution import TestSuiteRunExecution
387
+ from .test_suite_run_execution_chat_history_output import TestSuiteRunExecutionChatHistoryOutput
388
+ from .test_suite_run_execution_error_output import TestSuiteRunExecutionErrorOutput
389
+ from .test_suite_run_execution_json_output import TestSuiteRunExecutionJsonOutput
390
+ from .test_suite_run_execution_metric_result import TestSuiteRunExecutionMetricResult
391
+ from .test_suite_run_execution_number_output import TestSuiteRunExecutionNumberOutput
392
+ from .test_suite_run_execution_output import (
393
+ TestSuiteRunExecutionOutput,
394
+ TestSuiteRunExecutionOutput_ChatHistory,
395
+ TestSuiteRunExecutionOutput_Error,
396
+ TestSuiteRunExecutionOutput_Json,
397
+ TestSuiteRunExecutionOutput_Number,
398
+ TestSuiteRunExecutionOutput_SearchResults,
399
+ TestSuiteRunExecutionOutput_String,
400
+ )
401
+ from .test_suite_run_execution_search_results_output import TestSuiteRunExecutionSearchResultsOutput
402
+ from .test_suite_run_execution_string_output import TestSuiteRunExecutionStringOutput
403
+ from .test_suite_run_metric_error_output import TestSuiteRunMetricErrorOutput
404
+ from .test_suite_run_metric_error_output_type_enum import TestSuiteRunMetricErrorOutputTypeEnum
405
+ from .test_suite_run_metric_number_output import TestSuiteRunMetricNumberOutput
406
+ from .test_suite_run_metric_number_output_type_enum import TestSuiteRunMetricNumberOutputTypeEnum
407
+ from .test_suite_run_metric_output import (
408
+ TestSuiteRunMetricOutput,
409
+ TestSuiteRunMetricOutput_Error,
410
+ TestSuiteRunMetricOutput_Number,
411
+ )
412
+ from .test_suite_run_read import TestSuiteRunRead
413
+ from .test_suite_run_state import TestSuiteRunState
414
+ from .test_suite_run_test_suite import TestSuiteRunTestSuite
415
+ from .test_suite_run_workflow_release_tag_exec_config import TestSuiteRunWorkflowReleaseTagExecConfig
416
+ from .test_suite_run_workflow_release_tag_exec_config_data import TestSuiteRunWorkflowReleaseTagExecConfigData
417
+ from .test_suite_run_workflow_release_tag_exec_config_data_request import (
418
+ TestSuiteRunWorkflowReleaseTagExecConfigDataRequest,
419
+ )
420
+ from .test_suite_run_workflow_release_tag_exec_config_request import TestSuiteRunWorkflowReleaseTagExecConfigRequest
421
+ from .test_suite_run_workflow_release_tag_exec_config_type_enum import TestSuiteRunWorkflowReleaseTagExecConfigTypeEnum
364
422
  from .test_suite_test_case import TestSuiteTestCase
365
423
  from .upload_document_error_response import UploadDocumentErrorResponse
366
424
  from .upload_document_response import UploadDocumentResponse
@@ -471,6 +529,7 @@ __all__ = [
471
529
  "ArrayVariableValueItem_ChatHistory",
472
530
  "ArrayVariableValueItem_Error",
473
531
  "ArrayVariableValueItem_FunctionCall",
532
+ "ArrayVariableValueItem_Image",
474
533
  "ArrayVariableValueItem_Json",
475
534
  "ArrayVariableValueItem_Number",
476
535
  "ArrayVariableValueItem_SearchResults",
@@ -582,6 +641,7 @@ __all__ = [
582
641
  "ImageChatMessageContent",
583
642
  "ImageChatMessageContentRequest",
584
643
  "ImageEnum",
644
+ "ImageVariableValue",
585
645
  "IndexingStateEnum",
586
646
  "InitiatedEnum",
587
647
  "InitiatedExecutePromptEvent",
@@ -654,6 +714,7 @@ __all__ = [
654
714
  "PaginatedSlimDeploymentReadList",
655
715
  "PaginatedSlimDocumentList",
656
716
  "PaginatedSlimWorkflowDeploymentList",
717
+ "PaginatedTestSuiteRunExecutionList",
657
718
  "ProcessingFailureReasonEnum",
658
719
  "ProcessingStateEnum",
659
720
  "PromptDeploymentExpandMetaRequestRequest",
@@ -779,6 +840,47 @@ __all__ = [
779
840
  "TestCaseVariableValue_Number",
780
841
  "TestCaseVariableValue_SearchResults",
781
842
  "TestCaseVariableValue_String",
843
+ "TestSuiteRunDeploymentReleaseTagExecConfig",
844
+ "TestSuiteRunDeploymentReleaseTagExecConfigData",
845
+ "TestSuiteRunDeploymentReleaseTagExecConfigDataRequest",
846
+ "TestSuiteRunDeploymentReleaseTagExecConfigRequest",
847
+ "TestSuiteRunDeploymentReleaseTagExecConfigTypeEnum",
848
+ "TestSuiteRunExecConfig",
849
+ "TestSuiteRunExecConfigRequest",
850
+ "TestSuiteRunExecConfigRequest_DeploymentReleaseTag",
851
+ "TestSuiteRunExecConfigRequest_WorkflowReleaseTag",
852
+ "TestSuiteRunExecConfig_DeploymentReleaseTag",
853
+ "TestSuiteRunExecConfig_WorkflowReleaseTag",
854
+ "TestSuiteRunExecution",
855
+ "TestSuiteRunExecutionChatHistoryOutput",
856
+ "TestSuiteRunExecutionErrorOutput",
857
+ "TestSuiteRunExecutionJsonOutput",
858
+ "TestSuiteRunExecutionMetricResult",
859
+ "TestSuiteRunExecutionNumberOutput",
860
+ "TestSuiteRunExecutionOutput",
861
+ "TestSuiteRunExecutionOutput_ChatHistory",
862
+ "TestSuiteRunExecutionOutput_Error",
863
+ "TestSuiteRunExecutionOutput_Json",
864
+ "TestSuiteRunExecutionOutput_Number",
865
+ "TestSuiteRunExecutionOutput_SearchResults",
866
+ "TestSuiteRunExecutionOutput_String",
867
+ "TestSuiteRunExecutionSearchResultsOutput",
868
+ "TestSuiteRunExecutionStringOutput",
869
+ "TestSuiteRunMetricErrorOutput",
870
+ "TestSuiteRunMetricErrorOutputTypeEnum",
871
+ "TestSuiteRunMetricNumberOutput",
872
+ "TestSuiteRunMetricNumberOutputTypeEnum",
873
+ "TestSuiteRunMetricOutput",
874
+ "TestSuiteRunMetricOutput_Error",
875
+ "TestSuiteRunMetricOutput_Number",
876
+ "TestSuiteRunRead",
877
+ "TestSuiteRunState",
878
+ "TestSuiteRunTestSuite",
879
+ "TestSuiteRunWorkflowReleaseTagExecConfig",
880
+ "TestSuiteRunWorkflowReleaseTagExecConfigData",
881
+ "TestSuiteRunWorkflowReleaseTagExecConfigDataRequest",
882
+ "TestSuiteRunWorkflowReleaseTagExecConfigRequest",
883
+ "TestSuiteRunWorkflowReleaseTagExecConfigTypeEnum",
782
884
  "TestSuiteTestCase",
783
885
  "UploadDocumentErrorResponse",
784
886
  "UploadDocumentResponse",
@@ -9,6 +9,7 @@ import typing_extensions
9
9
  from .chat_history_variable_value import ChatHistoryVariableValue
10
10
  from .error_variable_value import ErrorVariableValue
11
11
  from .function_call_variable_value import FunctionCallVariableValue
12
+ from .image_variable_value import ImageVariableValue
12
13
  from .json_variable_value import JsonVariableValue
13
14
  from .number_variable_value import NumberVariableValue
14
15
  from .search_results_variable_value import SearchResultsVariableValue
@@ -78,6 +79,15 @@ class ArrayVariableValueItem_FunctionCall(FunctionCallVariableValue):
78
79
  allow_population_by_field_name = True
79
80
 
80
81
 
82
+ class ArrayVariableValueItem_Image(ImageVariableValue):
83
+ type: typing_extensions.Literal["IMAGE"]
84
+
85
+ class Config:
86
+ frozen = True
87
+ smart_union = True
88
+ allow_population_by_field_name = True
89
+
90
+
81
91
  ArrayVariableValueItem = typing.Union[
82
92
  ArrayVariableValueItem_String,
83
93
  ArrayVariableValueItem_Number,
@@ -86,4 +96,5 @@ ArrayVariableValueItem = typing.Union[
86
96
  ArrayVariableValueItem_SearchResults,
87
97
  ArrayVariableValueItem_Error,
88
98
  ArrayVariableValueItem_FunctionCall,
99
+ ArrayVariableValueItem_Image,
89
100
  ]
@@ -23,7 +23,9 @@ class FulfilledWorkflowNodeResultEvent(pydantic.BaseModel):
23
23
  node_result_id: str
24
24
  ts: typing.Optional[dt.datetime]
25
25
  data: typing.Optional[WorkflowNodeResultData]
26
+ source_execution_id: typing.Optional[str]
26
27
  output_values: typing.Optional[typing.List[NodeOutputCompiledValue]]
28
+ mocked: typing.Optional[bool]
27
29
 
28
30
  def json(self, **kwargs: typing.Any) -> str:
29
31
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -17,7 +17,7 @@ class GenerateRequest(pydantic.BaseModel):
17
17
  description="Key/value pairs for each template variable defined in the deployment's prompt."
18
18
  )
19
19
  chat_history: typing.Optional[typing.List[ChatMessageRequest]] = pydantic.Field(
20
- description="Optionally provide a list of chat messages that'll be used in place of the special {$chat_history} variable, if included in the prompt."
20
+ description="Optionally provide a list of chat messages that'll be used in place of the special chat_history variable, if included in the prompt."
21
21
  )
22
22
  external_ids: typing.Optional[typing.List[str]] = pydantic.Field(
23
23
  description="Optionally include a unique identifier for each generation, as represented outside of Vellum. Note that this should generally be a list of length one."
@@ -0,0 +1,33 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .vellum_image import VellumImage
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class ImageVariableValue(pydantic.BaseModel):
16
+ """
17
+ A base Vellum primitive value representing an image.
18
+ """
19
+
20
+ value: typing.Optional[VellumImage]
21
+
22
+ def json(self, **kwargs: typing.Any) -> str:
23
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
+ return super().json(**kwargs_with_defaults)
25
+
26
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
27
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
28
+ return super().dict(**kwargs_with_defaults)
29
+
30
+ class Config:
31
+ frozen = True
32
+ smart_union = True
33
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -23,6 +23,7 @@ class InitiatedWorkflowNodeResultEvent(pydantic.BaseModel):
23
23
  node_result_id: str
24
24
  ts: typing.Optional[dt.datetime]
25
25
  data: typing.Optional[WorkflowNodeResultData]
26
+ source_execution_id: typing.Optional[str]
26
27
  input_values: typing.Optional[typing.List[NodeInputVariableCompiledValue]]
27
28
 
28
29
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .test_suite_run_execution import TestSuiteRunExecution
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class PaginatedTestSuiteRunExecutionList(pydantic.BaseModel):
16
+ count: typing.Optional[int]
17
+ next: typing.Optional[str]
18
+ previous: typing.Optional[str]
19
+ results: typing.Optional[typing.List[TestSuiteRunExecution]]
20
+
21
+ def json(self, **kwargs: typing.Any) -> str:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().json(**kwargs_with_defaults)
24
+
25
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().dict(**kwargs_with_defaults)
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -23,6 +23,7 @@ class RejectedWorkflowNodeResultEvent(pydantic.BaseModel):
23
23
  node_result_id: str
24
24
  ts: typing.Optional[dt.datetime]
25
25
  data: typing.Optional[WorkflowNodeResultData]
26
+ source_execution_id: typing.Optional[str]
26
27
  error: WorkflowEventError
27
28
 
28
29
  def json(self, **kwargs: typing.Any) -> str:
@@ -23,6 +23,7 @@ class StreamingWorkflowNodeResultEvent(pydantic.BaseModel):
23
23
  node_result_id: str
24
24
  ts: typing.Optional[dt.datetime]
25
25
  data: typing.Optional[WorkflowNodeResultData]
26
+ source_execution_id: typing.Optional[str]
26
27
  output: typing.Optional[NodeOutputCompiledValue]
27
28
  output_index: typing.Optional[int]
28
29
 
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .test_suite_run_deployment_release_tag_exec_config_data import TestSuiteRunDeploymentReleaseTagExecConfigData
8
+
9
+ try:
10
+ import pydantic.v1 as pydantic # type: ignore
11
+ except ImportError:
12
+ import pydantic # type: ignore
13
+
14
+
15
+ class TestSuiteRunDeploymentReleaseTagExecConfig(pydantic.BaseModel):
16
+ """
17
+ Execution configuration for running a Test Suite against a Prompt Deployment
18
+ """
19
+
20
+ data: TestSuiteRunDeploymentReleaseTagExecConfigData
21
+ test_case_ids: typing.Optional[typing.List[str]] = pydantic.Field(
22
+ description="Optionally specify a subset of test case ids to run. If not provided, all test cases within the test suite will be run by default."
23
+ )
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,31 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class TestSuiteRunDeploymentReleaseTagExecConfigData(pydantic.BaseModel):
15
+ deployment_id: str = pydantic.Field(description="The ID of the Prompt Deployment to run the Test Suite against.")
16
+ tag: typing.Optional[str] = pydantic.Field(
17
+ description="A tag identifying which release of the Prompt Deployment to run the Test Suite against. Useful for testing past versions of the Prompt Deployment"
18
+ )
19
+
20
+ def json(self, **kwargs: typing.Any) -> str:
21
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
22
+ return super().json(**kwargs_with_defaults)
23
+
24
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().dict(**kwargs_with_defaults)
27
+
28
+ class Config:
29
+ frozen = True
30
+ smart_union = True
31
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,31 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic.v1 as pydantic # type: ignore
10
+ except ImportError:
11
+ import pydantic # type: ignore
12
+
13
+
14
+ class TestSuiteRunDeploymentReleaseTagExecConfigDataRequest(pydantic.BaseModel):
15
+ deployment_id: str = pydantic.Field(description="The ID of the Prompt Deployment to run the Test Suite against.")
16
+ tag: typing.Optional[str] = pydantic.Field(
17
+ description="A tag identifying which release of the Prompt Deployment to run the Test Suite against. Useful for testing past versions of the Prompt Deployment"
18
+ )
19
+
20
+ def json(self, **kwargs: typing.Any) -> str:
21
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
22
+ return super().json(**kwargs_with_defaults)
23
+
24
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().dict(**kwargs_with_defaults)
27
+
28
+ class Config:
29
+ frozen = True
30
+ smart_union = True
31
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,38 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .test_suite_run_deployment_release_tag_exec_config_data_request import (
8
+ TestSuiteRunDeploymentReleaseTagExecConfigDataRequest,
9
+ )
10
+
11
+ try:
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class TestSuiteRunDeploymentReleaseTagExecConfigRequest(pydantic.BaseModel):
18
+ """
19
+ Execution configuration for running a Test Suite against a Prompt Deployment
20
+ """
21
+
22
+ data: TestSuiteRunDeploymentReleaseTagExecConfigDataRequest
23
+ test_case_ids: typing.Optional[typing.List[str]] = pydantic.Field(
24
+ description="Optionally specify a subset of test case ids to run. If not provided, all test cases within the test suite will be run by default."
25
+ )
26
+
27
+ def json(self, **kwargs: typing.Any) -> str:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().json(**kwargs_with_defaults)
30
+
31
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().dict(**kwargs_with_defaults)
34
+
35
+ class Config:
36
+ frozen = True
37
+ smart_union = True
38
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing_extensions
4
+
5
+ TestSuiteRunDeploymentReleaseTagExecConfigTypeEnum = typing_extensions.Literal["DEPLOYMENT_RELEASE_TAG"]