vellum-ai 0.6.0__py3-none-any.whl → 0.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,192 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ import urllib.parse
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ...core.api_error import ApiError
8
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...core.pydantic_utilities import pydantic_v1
11
+ from ...core.remove_none_from_dict import remove_none_from_dict
12
+ from ...core.request_options import RequestOptions
13
+ from ...types.deployment_read import DeploymentRead
14
+
15
+ # this is used as the default value for optional parameters
16
+ OMIT = typing.cast(typing.Any, ...)
17
+
18
+
19
+ class PromptVersionsClient:
20
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
21
+ self._client_wrapper = client_wrapper
22
+
23
+ def deploy_prompt(
24
+ self,
25
+ id: str,
26
+ prompt_id: str,
27
+ *,
28
+ prompt_deployment_id: typing.Optional[str] = OMIT,
29
+ prompt_deployment_name: typing.Optional[str] = OMIT,
30
+ label: typing.Optional[str] = OMIT,
31
+ release_tags: typing.Optional[typing.Sequence[str]] = OMIT,
32
+ request_options: typing.Optional[RequestOptions] = None,
33
+ ) -> DeploymentRead:
34
+ """
35
+ Parameters:
36
+ - id: str. A UUID string identifying this sandbox.
37
+
38
+ - prompt_id: str. An ID identifying the Prompt you'd like to deploy.
39
+
40
+ - prompt_deployment_id: typing.Optional[str]. The Vellum-generated ID of the Prompt Deployment you'd like to update. Cannot specify both this and prompt_deployment_name. Leave null to create a new Prompt Deployment.
41
+
42
+ - prompt_deployment_name: typing.Optional[str]. The unique name of the Prompt Deployment you'd like to either create or update. Cannot specify both this and prompt_deployment_id. If provided and matches an existing Prompt Deployment, that Prompt Deployment will be updated. Otherwise, a new Prompt Deployment will be created.
43
+
44
+ - label: typing.Optional[str]. In the event that a new Prompt Deployment is created, this will be the label it's given.
45
+
46
+ - release_tags: typing.Optional[typing.Sequence[str]]. Optionally provide the release tags that you'd like to be associated with the latest release of the created/updated Prompt Deployment.
47
+
48
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
49
+ ---
50
+ from vellum.client import Vellum
51
+
52
+ client = Vellum(
53
+ api_key="YOUR_API_KEY",
54
+ )
55
+ client.prompt_versions.deploy_prompt(
56
+ id="id",
57
+ prompt_id="prompt_id",
58
+ )
59
+ """
60
+ _request: typing.Dict[str, typing.Any] = {}
61
+ if prompt_deployment_id is not OMIT:
62
+ _request["prompt_deployment_id"] = prompt_deployment_id
63
+ if prompt_deployment_name is not OMIT:
64
+ _request["prompt_deployment_name"] = prompt_deployment_name
65
+ if label is not OMIT:
66
+ _request["label"] = label
67
+ if release_tags is not OMIT:
68
+ _request["release_tags"] = release_tags
69
+ _response = self._client_wrapper.httpx_client.request(
70
+ method="POST",
71
+ url=urllib.parse.urljoin(
72
+ f"{self._client_wrapper.get_environment().default}/",
73
+ f"v1/sandboxes/{jsonable_encoder(id)}/prompts/{jsonable_encoder(prompt_id)}/deploy",
74
+ ),
75
+ params=jsonable_encoder(
76
+ request_options.get("additional_query_parameters") if request_options is not None else None
77
+ ),
78
+ json=jsonable_encoder(_request)
79
+ if request_options is None or request_options.get("additional_body_parameters") is None
80
+ else {
81
+ **jsonable_encoder(_request),
82
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
83
+ },
84
+ headers=jsonable_encoder(
85
+ remove_none_from_dict(
86
+ {
87
+ **self._client_wrapper.get_headers(),
88
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
89
+ }
90
+ )
91
+ ),
92
+ timeout=request_options.get("timeout_in_seconds")
93
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
94
+ else self._client_wrapper.get_timeout(),
95
+ retries=0,
96
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
97
+ )
98
+ if 200 <= _response.status_code < 300:
99
+ return pydantic_v1.parse_obj_as(DeploymentRead, _response.json()) # type: ignore
100
+ try:
101
+ _response_json = _response.json()
102
+ except JSONDecodeError:
103
+ raise ApiError(status_code=_response.status_code, body=_response.text)
104
+ raise ApiError(status_code=_response.status_code, body=_response_json)
105
+
106
+
107
+ class AsyncPromptVersionsClient:
108
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
109
+ self._client_wrapper = client_wrapper
110
+
111
+ async def deploy_prompt(
112
+ self,
113
+ id: str,
114
+ prompt_id: str,
115
+ *,
116
+ prompt_deployment_id: typing.Optional[str] = OMIT,
117
+ prompt_deployment_name: typing.Optional[str] = OMIT,
118
+ label: typing.Optional[str] = OMIT,
119
+ release_tags: typing.Optional[typing.Sequence[str]] = OMIT,
120
+ request_options: typing.Optional[RequestOptions] = None,
121
+ ) -> DeploymentRead:
122
+ """
123
+ Parameters:
124
+ - id: str. A UUID string identifying this sandbox.
125
+
126
+ - prompt_id: str. An ID identifying the Prompt you'd like to deploy.
127
+
128
+ - prompt_deployment_id: typing.Optional[str]. The Vellum-generated ID of the Prompt Deployment you'd like to update. Cannot specify both this and prompt_deployment_name. Leave null to create a new Prompt Deployment.
129
+
130
+ - prompt_deployment_name: typing.Optional[str]. The unique name of the Prompt Deployment you'd like to either create or update. Cannot specify both this and prompt_deployment_id. If provided and matches an existing Prompt Deployment, that Prompt Deployment will be updated. Otherwise, a new Prompt Deployment will be created.
131
+
132
+ - label: typing.Optional[str]. In the event that a new Prompt Deployment is created, this will be the label it's given.
133
+
134
+ - release_tags: typing.Optional[typing.Sequence[str]]. Optionally provide the release tags that you'd like to be associated with the latest release of the created/updated Prompt Deployment.
135
+
136
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
137
+ ---
138
+ from vellum.client import AsyncVellum
139
+
140
+ client = AsyncVellum(
141
+ api_key="YOUR_API_KEY",
142
+ )
143
+ await client.prompt_versions.deploy_prompt(
144
+ id="id",
145
+ prompt_id="prompt_id",
146
+ )
147
+ """
148
+ _request: typing.Dict[str, typing.Any] = {}
149
+ if prompt_deployment_id is not OMIT:
150
+ _request["prompt_deployment_id"] = prompt_deployment_id
151
+ if prompt_deployment_name is not OMIT:
152
+ _request["prompt_deployment_name"] = prompt_deployment_name
153
+ if label is not OMIT:
154
+ _request["label"] = label
155
+ if release_tags is not OMIT:
156
+ _request["release_tags"] = release_tags
157
+ _response = await self._client_wrapper.httpx_client.request(
158
+ method="POST",
159
+ url=urllib.parse.urljoin(
160
+ f"{self._client_wrapper.get_environment().default}/",
161
+ f"v1/sandboxes/{jsonable_encoder(id)}/prompts/{jsonable_encoder(prompt_id)}/deploy",
162
+ ),
163
+ params=jsonable_encoder(
164
+ request_options.get("additional_query_parameters") if request_options is not None else None
165
+ ),
166
+ json=jsonable_encoder(_request)
167
+ if request_options is None or request_options.get("additional_body_parameters") is None
168
+ else {
169
+ **jsonable_encoder(_request),
170
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
171
+ },
172
+ headers=jsonable_encoder(
173
+ remove_none_from_dict(
174
+ {
175
+ **self._client_wrapper.get_headers(),
176
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
177
+ }
178
+ )
179
+ ),
180
+ timeout=request_options.get("timeout_in_seconds")
181
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
182
+ else self._client_wrapper.get_timeout(),
183
+ retries=0,
184
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
185
+ )
186
+ if 200 <= _response.status_code < 300:
187
+ return pydantic_v1.parse_obj_as(DeploymentRead, _response.json()) # type: ignore
188
+ try:
189
+ _response_json = _response.json()
190
+ except JSONDecodeError:
191
+ raise ApiError(status_code=_response.status_code, body=_response.text)
192
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -12,8 +12,12 @@ from ...core.remove_none_from_dict import remove_none_from_dict
12
12
  from ...core.request_options import RequestOptions
13
13
  from ...types.paginated_slim_workflow_deployment_list import PaginatedSlimWorkflowDeploymentList
14
14
  from ...types.workflow_deployment_read import WorkflowDeploymentRead
15
+ from ...types.workflow_release_tag_read import WorkflowReleaseTagRead
15
16
  from .types.workflow_deployments_list_request_status import WorkflowDeploymentsListRequestStatus
16
17
 
18
+ # this is used as the default value for optional parameters
19
+ OMIT = typing.cast(typing.Any, ...)
20
+
17
21
 
18
22
  class WorkflowDeploymentsClient:
19
23
  def __init__(self, *, client_wrapper: SyncClientWrapper):
@@ -135,6 +139,130 @@ class WorkflowDeploymentsClient:
135
139
  raise ApiError(status_code=_response.status_code, body=_response.text)
136
140
  raise ApiError(status_code=_response.status_code, body=_response_json)
137
141
 
142
+ def retrieve_workflow_release_tag(
143
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
144
+ ) -> WorkflowReleaseTagRead:
145
+ """
146
+ Retrieve a Workflow Release Tag by tag name, associated with a specified Workflow Deployment.
147
+
148
+ Parameters:
149
+ - id: str. A UUID string identifying this workflow deployment.
150
+
151
+ - name: str. The name of the Release Tag associated with this Workflow Deployment that you'd like to retrieve.
152
+
153
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
154
+ ---
155
+ from vellum.client import Vellum
156
+
157
+ client = Vellum(
158
+ api_key="YOUR_API_KEY",
159
+ )
160
+ client.workflow_deployments.retrieve_workflow_release_tag(
161
+ id="id",
162
+ name="name",
163
+ )
164
+ """
165
+ _response = self._client_wrapper.httpx_client.request(
166
+ method="GET",
167
+ url=urllib.parse.urljoin(
168
+ f"{self._client_wrapper.get_environment().default}/",
169
+ f"v1/workflow-deployments/{jsonable_encoder(id)}/release-tags/{jsonable_encoder(name)}",
170
+ ),
171
+ params=jsonable_encoder(
172
+ request_options.get("additional_query_parameters") if request_options is not None else None
173
+ ),
174
+ headers=jsonable_encoder(
175
+ remove_none_from_dict(
176
+ {
177
+ **self._client_wrapper.get_headers(),
178
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
179
+ }
180
+ )
181
+ ),
182
+ timeout=request_options.get("timeout_in_seconds")
183
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
184
+ else self._client_wrapper.get_timeout(),
185
+ retries=0,
186
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
187
+ )
188
+ if 200 <= _response.status_code < 300:
189
+ return pydantic_v1.parse_obj_as(WorkflowReleaseTagRead, _response.json()) # type: ignore
190
+ try:
191
+ _response_json = _response.json()
192
+ except JSONDecodeError:
193
+ raise ApiError(status_code=_response.status_code, body=_response.text)
194
+ raise ApiError(status_code=_response.status_code, body=_response_json)
195
+
196
+ def update_workflow_release_tag(
197
+ self,
198
+ id: str,
199
+ name: str,
200
+ *,
201
+ history_item_id: typing.Optional[str] = OMIT,
202
+ request_options: typing.Optional[RequestOptions] = None,
203
+ ) -> WorkflowReleaseTagRead:
204
+ """
205
+ Updates an existing Release Tag associated with the specified Workflow Deployment.
206
+
207
+ Parameters:
208
+ - id: str. A UUID string identifying this workflow deployment.
209
+
210
+ - name: str. The name of the Release Tag associated with this Workflow Deployment that you'd like to update.
211
+
212
+ - history_item_id: typing.Optional[str]. The ID of the Workflow Deployment History Item to tag
213
+
214
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
215
+ ---
216
+ from vellum.client import Vellum
217
+
218
+ client = Vellum(
219
+ api_key="YOUR_API_KEY",
220
+ )
221
+ client.workflow_deployments.update_workflow_release_tag(
222
+ id="id",
223
+ name="name",
224
+ )
225
+ """
226
+ _request: typing.Dict[str, typing.Any] = {}
227
+ if history_item_id is not OMIT:
228
+ _request["history_item_id"] = history_item_id
229
+ _response = self._client_wrapper.httpx_client.request(
230
+ method="PATCH",
231
+ url=urllib.parse.urljoin(
232
+ f"{self._client_wrapper.get_environment().default}/",
233
+ f"v1/workflow-deployments/{jsonable_encoder(id)}/release-tags/{jsonable_encoder(name)}",
234
+ ),
235
+ params=jsonable_encoder(
236
+ request_options.get("additional_query_parameters") if request_options is not None else None
237
+ ),
238
+ json=jsonable_encoder(_request)
239
+ if request_options is None or request_options.get("additional_body_parameters") is None
240
+ else {
241
+ **jsonable_encoder(_request),
242
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
243
+ },
244
+ headers=jsonable_encoder(
245
+ remove_none_from_dict(
246
+ {
247
+ **self._client_wrapper.get_headers(),
248
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
249
+ }
250
+ )
251
+ ),
252
+ timeout=request_options.get("timeout_in_seconds")
253
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
254
+ else self._client_wrapper.get_timeout(),
255
+ retries=0,
256
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
257
+ )
258
+ if 200 <= _response.status_code < 300:
259
+ return pydantic_v1.parse_obj_as(WorkflowReleaseTagRead, _response.json()) # type: ignore
260
+ try:
261
+ _response_json = _response.json()
262
+ except JSONDecodeError:
263
+ raise ApiError(status_code=_response.status_code, body=_response.text)
264
+ raise ApiError(status_code=_response.status_code, body=_response_json)
265
+
138
266
 
139
267
  class AsyncWorkflowDeploymentsClient:
140
268
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -257,3 +385,127 @@ class AsyncWorkflowDeploymentsClient:
257
385
  except JSONDecodeError:
258
386
  raise ApiError(status_code=_response.status_code, body=_response.text)
259
387
  raise ApiError(status_code=_response.status_code, body=_response_json)
388
+
389
+ async def retrieve_workflow_release_tag(
390
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
391
+ ) -> WorkflowReleaseTagRead:
392
+ """
393
+ Retrieve a Workflow Release Tag by tag name, associated with a specified Workflow Deployment.
394
+
395
+ Parameters:
396
+ - id: str. A UUID string identifying this workflow deployment.
397
+
398
+ - name: str. The name of the Release Tag associated with this Workflow Deployment that you'd like to retrieve.
399
+
400
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
401
+ ---
402
+ from vellum.client import AsyncVellum
403
+
404
+ client = AsyncVellum(
405
+ api_key="YOUR_API_KEY",
406
+ )
407
+ await client.workflow_deployments.retrieve_workflow_release_tag(
408
+ id="id",
409
+ name="name",
410
+ )
411
+ """
412
+ _response = await self._client_wrapper.httpx_client.request(
413
+ method="GET",
414
+ url=urllib.parse.urljoin(
415
+ f"{self._client_wrapper.get_environment().default}/",
416
+ f"v1/workflow-deployments/{jsonable_encoder(id)}/release-tags/{jsonable_encoder(name)}",
417
+ ),
418
+ params=jsonable_encoder(
419
+ request_options.get("additional_query_parameters") if request_options is not None else None
420
+ ),
421
+ headers=jsonable_encoder(
422
+ remove_none_from_dict(
423
+ {
424
+ **self._client_wrapper.get_headers(),
425
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
426
+ }
427
+ )
428
+ ),
429
+ timeout=request_options.get("timeout_in_seconds")
430
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
431
+ else self._client_wrapper.get_timeout(),
432
+ retries=0,
433
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
434
+ )
435
+ if 200 <= _response.status_code < 300:
436
+ return pydantic_v1.parse_obj_as(WorkflowReleaseTagRead, _response.json()) # type: ignore
437
+ try:
438
+ _response_json = _response.json()
439
+ except JSONDecodeError:
440
+ raise ApiError(status_code=_response.status_code, body=_response.text)
441
+ raise ApiError(status_code=_response.status_code, body=_response_json)
442
+
443
+ async def update_workflow_release_tag(
444
+ self,
445
+ id: str,
446
+ name: str,
447
+ *,
448
+ history_item_id: typing.Optional[str] = OMIT,
449
+ request_options: typing.Optional[RequestOptions] = None,
450
+ ) -> WorkflowReleaseTagRead:
451
+ """
452
+ Updates an existing Release Tag associated with the specified Workflow Deployment.
453
+
454
+ Parameters:
455
+ - id: str. A UUID string identifying this workflow deployment.
456
+
457
+ - name: str. The name of the Release Tag associated with this Workflow Deployment that you'd like to update.
458
+
459
+ - history_item_id: typing.Optional[str]. The ID of the Workflow Deployment History Item to tag
460
+
461
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
462
+ ---
463
+ from vellum.client import AsyncVellum
464
+
465
+ client = AsyncVellum(
466
+ api_key="YOUR_API_KEY",
467
+ )
468
+ await client.workflow_deployments.update_workflow_release_tag(
469
+ id="id",
470
+ name="name",
471
+ )
472
+ """
473
+ _request: typing.Dict[str, typing.Any] = {}
474
+ if history_item_id is not OMIT:
475
+ _request["history_item_id"] = history_item_id
476
+ _response = await self._client_wrapper.httpx_client.request(
477
+ method="PATCH",
478
+ url=urllib.parse.urljoin(
479
+ f"{self._client_wrapper.get_environment().default}/",
480
+ f"v1/workflow-deployments/{jsonable_encoder(id)}/release-tags/{jsonable_encoder(name)}",
481
+ ),
482
+ params=jsonable_encoder(
483
+ request_options.get("additional_query_parameters") if request_options is not None else None
484
+ ),
485
+ json=jsonable_encoder(_request)
486
+ if request_options is None or request_options.get("additional_body_parameters") is None
487
+ else {
488
+ **jsonable_encoder(_request),
489
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
490
+ },
491
+ headers=jsonable_encoder(
492
+ remove_none_from_dict(
493
+ {
494
+ **self._client_wrapper.get_headers(),
495
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
496
+ }
497
+ )
498
+ ),
499
+ timeout=request_options.get("timeout_in_seconds")
500
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
501
+ else self._client_wrapper.get_timeout(),
502
+ retries=0,
503
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
504
+ )
505
+ if 200 <= _response.status_code < 300:
506
+ return pydantic_v1.parse_obj_as(WorkflowReleaseTagRead, _response.json()) # type: ignore
507
+ try:
508
+ _response_json = _response.json()
509
+ except JSONDecodeError:
510
+ raise ApiError(status_code=_response.status_code, body=_response.text)
511
+ raise ApiError(status_code=_response.status_code, body=_response_json)
vellum/types/__init__.py CHANGED
@@ -94,6 +94,8 @@ from .conditional_node_result import ConditionalNodeResult
94
94
  from .conditional_node_result_data import ConditionalNodeResultData
95
95
  from .deployment_provider_payload_response import DeploymentProviderPayloadResponse
96
96
  from .deployment_read import DeploymentRead
97
+ from .deployment_release_tag_deployment_history_item import DeploymentReleaseTagDeploymentHistoryItem
98
+ from .deployment_release_tag_read import DeploymentReleaseTagRead
97
99
  from .document_document_to_document_index import DocumentDocumentToDocumentIndex
98
100
  from .document_index_chunking import (
99
101
  DocumentIndexChunking,
@@ -228,6 +230,8 @@ from .json_variable_value import JsonVariableValue
228
230
  from .json_vellum_value import JsonVellumValue
229
231
  from .logical_operator import LogicalOperator
230
232
  from .logprobs_enum import LogprobsEnum
233
+ from .merge_enum import MergeEnum
234
+ from .merge_node_result import MergeNodeResult
231
235
  from .metadata_filter_config_request import MetadataFilterConfigRequest
232
236
  from .metadata_filter_rule_combinator import MetadataFilterRuleCombinator
233
237
  from .metadata_filter_rule_request import MetadataFilterRuleRequest
@@ -363,6 +367,7 @@ from .rejected_execute_prompt_response import RejectedExecutePromptResponse
363
367
  from .rejected_execute_workflow_workflow_result_event import RejectedExecuteWorkflowWorkflowResultEvent
364
368
  from .rejected_prompt_execution_meta import RejectedPromptExecutionMeta
365
369
  from .rejected_workflow_node_result_event import RejectedWorkflowNodeResultEvent
370
+ from .release_tag_source import ReleaseTagSource
366
371
  from .sandbox_scenario import SandboxScenario
367
372
  from .scenario_input import ScenarioInput, ScenarioInput_ChatHistory, ScenarioInput_String
368
373
  from .scenario_input_chat_history_variable_value import ScenarioInputChatHistoryVariableValue
@@ -410,6 +415,7 @@ from .submit_workflow_execution_actual_request import (
410
415
  )
411
416
  from .subworkflow_enum import SubworkflowEnum
412
417
  from .subworkflow_node_result import SubworkflowNodeResult
418
+ from .subworkflow_node_result_data import SubworkflowNodeResultData
413
419
  from .templating_node_array_result import TemplatingNodeArrayResult
414
420
  from .templating_node_chat_history_result import TemplatingNodeChatHistoryResult
415
421
  from .templating_node_error_result import TemplatingNodeErrorResult
@@ -569,6 +575,7 @@ from .workflow_node_result_data import (
569
575
  WorkflowNodeResultData_Api,
570
576
  WorkflowNodeResultData_CodeExecution,
571
577
  WorkflowNodeResultData_Conditional,
578
+ WorkflowNodeResultData_Merge,
572
579
  WorkflowNodeResultData_Metric,
573
580
  WorkflowNodeResultData_Prompt,
574
581
  WorkflowNodeResultData_Search,
@@ -605,6 +612,8 @@ from .workflow_output_json import WorkflowOutputJson
605
612
  from .workflow_output_number import WorkflowOutputNumber
606
613
  from .workflow_output_search_results import WorkflowOutputSearchResults
607
614
  from .workflow_output_string import WorkflowOutputString
615
+ from .workflow_release_tag_read import WorkflowReleaseTagRead
616
+ from .workflow_release_tag_workflow_deployment_history_item import WorkflowReleaseTagWorkflowDeploymentHistoryItem
608
617
  from .workflow_request_chat_history_input_request import WorkflowRequestChatHistoryInputRequest
609
618
  from .workflow_request_input_request import (
610
619
  WorkflowRequestInputRequest,
@@ -711,6 +720,8 @@ __all__ = [
711
720
  "ConditionalNodeResultData",
712
721
  "DeploymentProviderPayloadResponse",
713
722
  "DeploymentRead",
723
+ "DeploymentReleaseTagDeploymentHistoryItem",
724
+ "DeploymentReleaseTagRead",
714
725
  "DocumentDocumentToDocumentIndex",
715
726
  "DocumentIndexChunking",
716
727
  "DocumentIndexChunkingRequest",
@@ -829,6 +840,8 @@ __all__ = [
829
840
  "JsonVellumValue",
830
841
  "LogicalOperator",
831
842
  "LogprobsEnum",
843
+ "MergeEnum",
844
+ "MergeNodeResult",
832
845
  "MetadataFilterConfigRequest",
833
846
  "MetadataFilterRuleCombinator",
834
847
  "MetadataFilterRuleRequest",
@@ -950,6 +963,7 @@ __all__ = [
950
963
  "RejectedExecuteWorkflowWorkflowResultEvent",
951
964
  "RejectedPromptExecutionMeta",
952
965
  "RejectedWorkflowNodeResultEvent",
966
+ "ReleaseTagSource",
953
967
  "SandboxScenario",
954
968
  "ScenarioInput",
955
969
  "ScenarioInputChatHistoryVariableValue",
@@ -997,6 +1011,7 @@ __all__ = [
997
1011
  "SubmitWorkflowExecutionActualRequest_String",
998
1012
  "SubworkflowEnum",
999
1013
  "SubworkflowNodeResult",
1014
+ "SubworkflowNodeResultData",
1000
1015
  "TemplatingNodeArrayResult",
1001
1016
  "TemplatingNodeChatHistoryResult",
1002
1017
  "TemplatingNodeErrorResult",
@@ -1135,6 +1150,7 @@ __all__ = [
1135
1150
  "WorkflowNodeResultData_Api",
1136
1151
  "WorkflowNodeResultData_CodeExecution",
1137
1152
  "WorkflowNodeResultData_Conditional",
1153
+ "WorkflowNodeResultData_Merge",
1138
1154
  "WorkflowNodeResultData_Metric",
1139
1155
  "WorkflowNodeResultData_Prompt",
1140
1156
  "WorkflowNodeResultData_Search",
@@ -1166,6 +1182,8 @@ __all__ = [
1166
1182
  "WorkflowOutput_Number",
1167
1183
  "WorkflowOutput_SearchResults",
1168
1184
  "WorkflowOutput_String",
1185
+ "WorkflowReleaseTagRead",
1186
+ "WorkflowReleaseTagWorkflowDeploymentHistoryItem",
1169
1187
  "WorkflowRequestChatHistoryInputRequest",
1170
1188
  "WorkflowRequestInputRequest",
1171
1189
  "WorkflowRequestInputRequest_ChatHistory",
@@ -0,0 +1,26 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from ..core.pydantic_utilities import pydantic_v1
8
+
9
+
10
+ class DeploymentReleaseTagDeploymentHistoryItem(pydantic_v1.BaseModel):
11
+ id: str
12
+ timestamp: dt.datetime
13
+
14
+ def json(self, **kwargs: typing.Any) -> str:
15
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
16
+ return super().json(**kwargs_with_defaults)
17
+
18
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
19
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
20
+ return super().dict(**kwargs_with_defaults)
21
+
22
+ class Config:
23
+ frozen = True
24
+ smart_union = True
25
+ extra = pydantic_v1.Extra.allow
26
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,43 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from ..core.pydantic_utilities import pydantic_v1
8
+ from .deployment_release_tag_deployment_history_item import DeploymentReleaseTagDeploymentHistoryItem
9
+ from .release_tag_source import ReleaseTagSource
10
+
11
+
12
+ class DeploymentReleaseTagRead(pydantic_v1.BaseModel):
13
+ name: str = pydantic_v1.Field()
14
+ """
15
+ The name of the Release Tag
16
+ """
17
+
18
+ source: ReleaseTagSource = pydantic_v1.Field()
19
+ """
20
+ The source of how the Release Tag was originally created
21
+
22
+ - `SYSTEM` - System
23
+ - `USER` - User
24
+ """
25
+
26
+ history_item: DeploymentReleaseTagDeploymentHistoryItem = pydantic_v1.Field()
27
+ """
28
+ The Deployment History Item that this Release Tag is associated with
29
+ """
30
+
31
+ def json(self, **kwargs: typing.Any) -> str:
32
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
33
+ return super().json(**kwargs_with_defaults)
34
+
35
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
36
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
+ return super().dict(**kwargs_with_defaults)
38
+
39
+ class Config:
40
+ frozen = True
41
+ smart_union = True
42
+ extra = pydantic_v1.Extra.allow
43
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ MergeEnum = typing.Literal["MERGE"]
@@ -0,0 +1,27 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from ..core.pydantic_utilities import pydantic_v1
8
+
9
+
10
+ class MergeNodeResult(pydantic_v1.BaseModel):
11
+ """
12
+ A Node Result Event emitted from a Merge Node.
13
+ """
14
+
15
+ def json(self, **kwargs: typing.Any) -> str:
16
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
17
+ return super().json(**kwargs_with_defaults)
18
+
19
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
20
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
21
+ return super().dict(**kwargs_with_defaults)
22
+
23
+ class Config:
24
+ frozen = True
25
+ smart_union = True
26
+ extra = pydantic_v1.Extra.allow
27
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ReleaseTagSource = typing.Union[typing.Literal["SYSTEM", "USER"], typing.Any]