vellum-ai 0.6.1__py3-none-any.whl → 0.6.3__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
vellum/__init__.py CHANGED
@@ -73,6 +73,8 @@ from .types import (
73
73
  ConditionalNodeResultData,
74
74
  DeploymentProviderPayloadResponse,
75
75
  DeploymentRead,
76
+ DeploymentReleaseTagDeploymentHistoryItem,
77
+ DeploymentReleaseTagRead,
76
78
  DocumentDocumentToDocumentIndex,
77
79
  DocumentIndexChunking,
78
80
  DocumentIndexChunkingRequest,
@@ -314,6 +316,7 @@ from .types import (
314
316
  RejectedExecuteWorkflowWorkflowResultEvent,
315
317
  RejectedPromptExecutionMeta,
316
318
  RejectedWorkflowNodeResultEvent,
319
+ ReleaseTagSource,
317
320
  SandboxScenario,
318
321
  ScenarioInput,
319
322
  ScenarioInputChatHistoryVariableValue,
@@ -361,6 +364,7 @@ from .types import (
361
364
  SubmitWorkflowExecutionActualRequest_String,
362
365
  SubworkflowEnum,
363
366
  SubworkflowNodeResult,
367
+ SubworkflowNodeResultData,
364
368
  TemplatingNodeArrayResult,
365
369
  TemplatingNodeChatHistoryResult,
366
370
  TemplatingNodeErrorResult,
@@ -531,6 +535,8 @@ from .types import (
531
535
  WorkflowOutput_Number,
532
536
  WorkflowOutput_SearchResults,
533
537
  WorkflowOutput_String,
538
+ WorkflowReleaseTagRead,
539
+ WorkflowReleaseTagWorkflowDeploymentHistoryItem,
534
540
  WorkflowRequestChatHistoryInputRequest,
535
541
  WorkflowRequestInputRequest,
536
542
  WorkflowRequestInputRequest_ChatHistory,
@@ -575,6 +581,7 @@ from .resources import (
575
581
  test_suite_runs,
576
582
  test_suites,
577
583
  workflow_deployments,
584
+ workflow_sandboxes,
578
585
  )
579
586
  from .environment import VellumEnvironment
580
587
  from .version import __version__
@@ -653,6 +660,8 @@ __all__ = [
653
660
  "ConditionalNodeResultData",
654
661
  "DeploymentProviderPayloadResponse",
655
662
  "DeploymentRead",
663
+ "DeploymentReleaseTagDeploymentHistoryItem",
664
+ "DeploymentReleaseTagRead",
656
665
  "DeploymentsListRequestStatus",
657
666
  "DocumentDocumentToDocumentIndex",
658
667
  "DocumentIndexChunking",
@@ -899,6 +908,7 @@ __all__ = [
899
908
  "RejectedExecuteWorkflowWorkflowResultEvent",
900
909
  "RejectedPromptExecutionMeta",
901
910
  "RejectedWorkflowNodeResultEvent",
911
+ "ReleaseTagSource",
902
912
  "SandboxScenario",
903
913
  "ScenarioInput",
904
914
  "ScenarioInputChatHistoryVariableValue",
@@ -946,6 +956,7 @@ __all__ = [
946
956
  "SubmitWorkflowExecutionActualRequest_String",
947
957
  "SubworkflowEnum",
948
958
  "SubworkflowNodeResult",
959
+ "SubworkflowNodeResultData",
949
960
  "TemplatingNodeArrayResult",
950
961
  "TemplatingNodeChatHistoryResult",
951
962
  "TemplatingNodeErrorResult",
@@ -1118,6 +1129,8 @@ __all__ = [
1118
1129
  "WorkflowOutput_Number",
1119
1130
  "WorkflowOutput_SearchResults",
1120
1131
  "WorkflowOutput_String",
1132
+ "WorkflowReleaseTagRead",
1133
+ "WorkflowReleaseTagWorkflowDeploymentHistoryItem",
1121
1134
  "WorkflowRequestChatHistoryInputRequest",
1122
1135
  "WorkflowRequestInputRequest",
1123
1136
  "WorkflowRequestInputRequest_ChatHistory",
@@ -1157,4 +1170,5 @@ __all__ = [
1157
1170
  "test_suite_runs",
1158
1171
  "test_suites",
1159
1172
  "workflow_deployments",
1173
+ "workflow_sandboxes",
1160
1174
  ]
vellum/client.py CHANGED
@@ -26,6 +26,7 @@ from .resources.sandboxes.client import AsyncSandboxesClient, SandboxesClient
26
26
  from .resources.test_suite_runs.client import AsyncTestSuiteRunsClient, TestSuiteRunsClient
27
27
  from .resources.test_suites.client import AsyncTestSuitesClient, TestSuitesClient
28
28
  from .resources.workflow_deployments.client import AsyncWorkflowDeploymentsClient, WorkflowDeploymentsClient
29
+ from .resources.workflow_sandboxes.client import AsyncWorkflowSandboxesClient, WorkflowSandboxesClient
29
30
  from .types.execute_prompt_event import ExecutePromptEvent
30
31
  from .types.execute_prompt_response import ExecutePromptResponse
31
32
  from .types.execute_workflow_response import ExecuteWorkflowResponse
@@ -100,6 +101,7 @@ class Vellum:
100
101
  self.test_suite_runs = TestSuiteRunsClient(client_wrapper=self._client_wrapper)
101
102
  self.test_suites = TestSuitesClient(client_wrapper=self._client_wrapper)
102
103
  self.workflow_deployments = WorkflowDeploymentsClient(client_wrapper=self._client_wrapper)
104
+ self.workflow_sandboxes = WorkflowSandboxesClient(client_wrapper=self._client_wrapper)
103
105
 
104
106
  def execute_prompt(
105
107
  self,
@@ -1062,6 +1064,7 @@ class AsyncVellum:
1062
1064
  self.test_suite_runs = AsyncTestSuiteRunsClient(client_wrapper=self._client_wrapper)
1063
1065
  self.test_suites = AsyncTestSuitesClient(client_wrapper=self._client_wrapper)
1064
1066
  self.workflow_deployments = AsyncWorkflowDeploymentsClient(client_wrapper=self._client_wrapper)
1067
+ self.workflow_sandboxes = AsyncWorkflowSandboxesClient(client_wrapper=self._client_wrapper)
1065
1068
 
1066
1069
  async def execute_prompt(
1067
1070
  self,
@@ -18,7 +18,7 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.6.1",
21
+ "X-Fern-SDK-Version": "0.6.3",
22
22
  }
23
23
  headers["X_API_KEY"] = self.api_key
24
24
  return headers
@@ -9,6 +9,7 @@ from . import (
9
9
  test_suite_runs,
10
10
  test_suites,
11
11
  workflow_deployments,
12
+ workflow_sandboxes,
12
13
  )
13
14
  from .deployments import DeploymentsListRequestStatus
14
15
  from .document_indexes import DocumentIndexesListRequestStatus
@@ -26,4 +27,5 @@ __all__ = [
26
27
  "test_suite_runs",
27
28
  "test_suites",
28
29
  "workflow_deployments",
30
+ "workflow_sandboxes",
29
31
  ]
@@ -16,6 +16,7 @@ from ...errors.internal_server_error import InternalServerError
16
16
  from ...errors.not_found_error import NotFoundError
17
17
  from ...types.deployment_provider_payload_response import DeploymentProviderPayloadResponse
18
18
  from ...types.deployment_read import DeploymentRead
19
+ from ...types.deployment_release_tag_read import DeploymentReleaseTagRead
19
20
  from ...types.paginated_slim_deployment_read_list import PaginatedSlimDeploymentReadList
20
21
  from ...types.prompt_deployment_input_request import PromptDeploymentInputRequest
21
22
  from .types.deployments_list_request_status import DeploymentsListRequestStatus
@@ -144,6 +145,130 @@ class DeploymentsClient:
144
145
  raise ApiError(status_code=_response.status_code, body=_response.text)
145
146
  raise ApiError(status_code=_response.status_code, body=_response_json)
146
147
 
148
+ def retrieve_deployment_release_tag(
149
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
150
+ ) -> DeploymentReleaseTagRead:
151
+ """
152
+ Retrieve a Deployment Release Tag by tag name, associated with a specified Deployment.
153
+
154
+ Parameters:
155
+ - id: str. A UUID string identifying this deployment.
156
+
157
+ - name: str. The name of the Release Tag associated with this Deployment that you'd like to retrieve.
158
+
159
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
160
+ ---
161
+ from vellum.client import Vellum
162
+
163
+ client = Vellum(
164
+ api_key="YOUR_API_KEY",
165
+ )
166
+ client.deployments.retrieve_deployment_release_tag(
167
+ id="id",
168
+ name="name",
169
+ )
170
+ """
171
+ _response = self._client_wrapper.httpx_client.request(
172
+ method="GET",
173
+ url=urllib.parse.urljoin(
174
+ f"{self._client_wrapper.get_environment().default}/",
175
+ f"v1/deployments/{jsonable_encoder(id)}/release-tags/{jsonable_encoder(name)}",
176
+ ),
177
+ params=jsonable_encoder(
178
+ request_options.get("additional_query_parameters") if request_options is not None else None
179
+ ),
180
+ headers=jsonable_encoder(
181
+ remove_none_from_dict(
182
+ {
183
+ **self._client_wrapper.get_headers(),
184
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
185
+ }
186
+ )
187
+ ),
188
+ timeout=request_options.get("timeout_in_seconds")
189
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
190
+ else self._client_wrapper.get_timeout(),
191
+ retries=0,
192
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
193
+ )
194
+ if 200 <= _response.status_code < 300:
195
+ return pydantic_v1.parse_obj_as(DeploymentReleaseTagRead, _response.json()) # type: ignore
196
+ try:
197
+ _response_json = _response.json()
198
+ except JSONDecodeError:
199
+ raise ApiError(status_code=_response.status_code, body=_response.text)
200
+ raise ApiError(status_code=_response.status_code, body=_response_json)
201
+
202
+ def update_deployment_release_tag(
203
+ self,
204
+ id: str,
205
+ name: str,
206
+ *,
207
+ history_item_id: typing.Optional[str] = OMIT,
208
+ request_options: typing.Optional[RequestOptions] = None,
209
+ ) -> DeploymentReleaseTagRead:
210
+ """
211
+ Updates an existing Release Tag associated with the specified Deployment.
212
+
213
+ Parameters:
214
+ - id: str. A UUID string identifying this deployment.
215
+
216
+ - name: str. The name of the Release Tag associated with this Deployment that you'd like to update.
217
+
218
+ - history_item_id: typing.Optional[str]. The ID of the Deployment History Item to tag
219
+
220
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
221
+ ---
222
+ from vellum.client import Vellum
223
+
224
+ client = Vellum(
225
+ api_key="YOUR_API_KEY",
226
+ )
227
+ client.deployments.update_deployment_release_tag(
228
+ id="id",
229
+ name="name",
230
+ )
231
+ """
232
+ _request: typing.Dict[str, typing.Any] = {}
233
+ if history_item_id is not OMIT:
234
+ _request["history_item_id"] = history_item_id
235
+ _response = self._client_wrapper.httpx_client.request(
236
+ method="PATCH",
237
+ url=urllib.parse.urljoin(
238
+ f"{self._client_wrapper.get_environment().default}/",
239
+ f"v1/deployments/{jsonable_encoder(id)}/release-tags/{jsonable_encoder(name)}",
240
+ ),
241
+ params=jsonable_encoder(
242
+ request_options.get("additional_query_parameters") if request_options is not None else None
243
+ ),
244
+ json=jsonable_encoder(_request)
245
+ if request_options is None or request_options.get("additional_body_parameters") is None
246
+ else {
247
+ **jsonable_encoder(_request),
248
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
249
+ },
250
+ headers=jsonable_encoder(
251
+ remove_none_from_dict(
252
+ {
253
+ **self._client_wrapper.get_headers(),
254
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
255
+ }
256
+ )
257
+ ),
258
+ timeout=request_options.get("timeout_in_seconds")
259
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
260
+ else self._client_wrapper.get_timeout(),
261
+ retries=0,
262
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
263
+ )
264
+ if 200 <= _response.status_code < 300:
265
+ return pydantic_v1.parse_obj_as(DeploymentReleaseTagRead, _response.json()) # type: ignore
266
+ try:
267
+ _response_json = _response.json()
268
+ except JSONDecodeError:
269
+ raise ApiError(status_code=_response.status_code, body=_response.text)
270
+ raise ApiError(status_code=_response.status_code, body=_response_json)
271
+
147
272
  def retrieve_provider_payload(
148
273
  self,
149
274
  *,
@@ -346,6 +471,130 @@ class AsyncDeploymentsClient:
346
471
  raise ApiError(status_code=_response.status_code, body=_response.text)
347
472
  raise ApiError(status_code=_response.status_code, body=_response_json)
348
473
 
474
+ async def retrieve_deployment_release_tag(
475
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
476
+ ) -> DeploymentReleaseTagRead:
477
+ """
478
+ Retrieve a Deployment Release Tag by tag name, associated with a specified Deployment.
479
+
480
+ Parameters:
481
+ - id: str. A UUID string identifying this deployment.
482
+
483
+ - name: str. The name of the Release Tag associated with this Deployment that you'd like to retrieve.
484
+
485
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
486
+ ---
487
+ from vellum.client import AsyncVellum
488
+
489
+ client = AsyncVellum(
490
+ api_key="YOUR_API_KEY",
491
+ )
492
+ await client.deployments.retrieve_deployment_release_tag(
493
+ id="id",
494
+ name="name",
495
+ )
496
+ """
497
+ _response = await self._client_wrapper.httpx_client.request(
498
+ method="GET",
499
+ url=urllib.parse.urljoin(
500
+ f"{self._client_wrapper.get_environment().default}/",
501
+ f"v1/deployments/{jsonable_encoder(id)}/release-tags/{jsonable_encoder(name)}",
502
+ ),
503
+ params=jsonable_encoder(
504
+ request_options.get("additional_query_parameters") if request_options is not None else None
505
+ ),
506
+ headers=jsonable_encoder(
507
+ remove_none_from_dict(
508
+ {
509
+ **self._client_wrapper.get_headers(),
510
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
511
+ }
512
+ )
513
+ ),
514
+ timeout=request_options.get("timeout_in_seconds")
515
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
516
+ else self._client_wrapper.get_timeout(),
517
+ retries=0,
518
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
519
+ )
520
+ if 200 <= _response.status_code < 300:
521
+ return pydantic_v1.parse_obj_as(DeploymentReleaseTagRead, _response.json()) # type: ignore
522
+ try:
523
+ _response_json = _response.json()
524
+ except JSONDecodeError:
525
+ raise ApiError(status_code=_response.status_code, body=_response.text)
526
+ raise ApiError(status_code=_response.status_code, body=_response_json)
527
+
528
+ async def update_deployment_release_tag(
529
+ self,
530
+ id: str,
531
+ name: str,
532
+ *,
533
+ history_item_id: typing.Optional[str] = OMIT,
534
+ request_options: typing.Optional[RequestOptions] = None,
535
+ ) -> DeploymentReleaseTagRead:
536
+ """
537
+ Updates an existing Release Tag associated with the specified Deployment.
538
+
539
+ Parameters:
540
+ - id: str. A UUID string identifying this deployment.
541
+
542
+ - name: str. The name of the Release Tag associated with this Deployment that you'd like to update.
543
+
544
+ - history_item_id: typing.Optional[str]. The ID of the Deployment History Item to tag
545
+
546
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
547
+ ---
548
+ from vellum.client import AsyncVellum
549
+
550
+ client = AsyncVellum(
551
+ api_key="YOUR_API_KEY",
552
+ )
553
+ await client.deployments.update_deployment_release_tag(
554
+ id="id",
555
+ name="name",
556
+ )
557
+ """
558
+ _request: typing.Dict[str, typing.Any] = {}
559
+ if history_item_id is not OMIT:
560
+ _request["history_item_id"] = history_item_id
561
+ _response = await self._client_wrapper.httpx_client.request(
562
+ method="PATCH",
563
+ url=urllib.parse.urljoin(
564
+ f"{self._client_wrapper.get_environment().default}/",
565
+ f"v1/deployments/{jsonable_encoder(id)}/release-tags/{jsonable_encoder(name)}",
566
+ ),
567
+ params=jsonable_encoder(
568
+ request_options.get("additional_query_parameters") if request_options is not None else None
569
+ ),
570
+ json=jsonable_encoder(_request)
571
+ if request_options is None or request_options.get("additional_body_parameters") is None
572
+ else {
573
+ **jsonable_encoder(_request),
574
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
575
+ },
576
+ headers=jsonable_encoder(
577
+ remove_none_from_dict(
578
+ {
579
+ **self._client_wrapper.get_headers(),
580
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
581
+ }
582
+ )
583
+ ),
584
+ timeout=request_options.get("timeout_in_seconds")
585
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
586
+ else self._client_wrapper.get_timeout(),
587
+ retries=0,
588
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
589
+ )
590
+ if 200 <= _response.status_code < 300:
591
+ return pydantic_v1.parse_obj_as(DeploymentReleaseTagRead, _response.json()) # type: ignore
592
+ try:
593
+ _response_json = _response.json()
594
+ except JSONDecodeError:
595
+ raise ApiError(status_code=_response.status_code, body=_response.text)
596
+ raise ApiError(status_code=_response.status_code, body=_response_json)
597
+
349
598
  async def retrieve_provider_payload(
350
599
  self,
351
600
  *,
@@ -10,6 +10,7 @@ from ...core.jsonable_encoder import jsonable_encoder
10
10
  from ...core.pydantic_utilities import pydantic_v1
11
11
  from ...core.remove_none_from_dict import remove_none_from_dict
12
12
  from ...core.request_options import RequestOptions
13
+ from ...types.deployment_read import DeploymentRead
13
14
  from ...types.named_scenario_input_request import NamedScenarioInputRequest
14
15
  from ...types.sandbox_scenario import SandboxScenario
15
16
 
@@ -21,6 +22,89 @@ class SandboxesClient:
21
22
  def __init__(self, *, client_wrapper: SyncClientWrapper):
22
23
  self._client_wrapper = client_wrapper
23
24
 
25
+ def deploy_prompt(
26
+ self,
27
+ id: str,
28
+ prompt_id: str,
29
+ *,
30
+ prompt_deployment_id: typing.Optional[str] = OMIT,
31
+ prompt_deployment_name: typing.Optional[str] = OMIT,
32
+ label: typing.Optional[str] = OMIT,
33
+ release_tags: typing.Optional[typing.Sequence[str]] = OMIT,
34
+ request_options: typing.Optional[RequestOptions] = None,
35
+ ) -> DeploymentRead:
36
+ """
37
+ Parameters:
38
+ - id: str. A UUID string identifying this sandbox.
39
+
40
+ - prompt_id: str. An ID identifying the Prompt you'd like to deploy.
41
+
42
+ - prompt_deployment_id: typing.Optional[str]. The Vellum-generated ID of the Prompt Deployment you'd like to update. Cannot specify both this and prompt_deployment_name. Leave null to create a new Prompt Deployment.
43
+
44
+ - prompt_deployment_name: typing.Optional[str]. The unique name of the Prompt Deployment you'd like to either create or update. Cannot specify both this and prompt_deployment_id. If provided and matches an existing Prompt Deployment, that Prompt Deployment will be updated. Otherwise, a new Prompt Deployment will be created.
45
+
46
+ - label: typing.Optional[str]. In the event that a new Prompt Deployment is created, this will be the label it's given.
47
+
48
+ - release_tags: typing.Optional[typing.Sequence[str]]. Optionally provide the release tags that you'd like to be associated with the latest release of the created/updated Prompt Deployment.
49
+
50
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
51
+ ---
52
+ from vellum.client import Vellum
53
+
54
+ client = Vellum(
55
+ api_key="YOUR_API_KEY",
56
+ )
57
+ client.sandboxes.deploy_prompt(
58
+ id="id",
59
+ prompt_id="prompt_id",
60
+ )
61
+ """
62
+ _request: typing.Dict[str, typing.Any] = {}
63
+ if prompt_deployment_id is not OMIT:
64
+ _request["prompt_deployment_id"] = prompt_deployment_id
65
+ if prompt_deployment_name is not OMIT:
66
+ _request["prompt_deployment_name"] = prompt_deployment_name
67
+ if label is not OMIT:
68
+ _request["label"] = label
69
+ if release_tags is not OMIT:
70
+ _request["release_tags"] = release_tags
71
+ _response = self._client_wrapper.httpx_client.request(
72
+ method="POST",
73
+ url=urllib.parse.urljoin(
74
+ f"{self._client_wrapper.get_environment().default}/",
75
+ f"v1/sandboxes/{jsonable_encoder(id)}/prompts/{jsonable_encoder(prompt_id)}/deploy",
76
+ ),
77
+ params=jsonable_encoder(
78
+ request_options.get("additional_query_parameters") if request_options is not None else None
79
+ ),
80
+ json=jsonable_encoder(_request)
81
+ if request_options is None or request_options.get("additional_body_parameters") is None
82
+ else {
83
+ **jsonable_encoder(_request),
84
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
85
+ },
86
+ headers=jsonable_encoder(
87
+ remove_none_from_dict(
88
+ {
89
+ **self._client_wrapper.get_headers(),
90
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
91
+ }
92
+ )
93
+ ),
94
+ timeout=request_options.get("timeout_in_seconds")
95
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
96
+ else self._client_wrapper.get_timeout(),
97
+ retries=0,
98
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
99
+ )
100
+ if 200 <= _response.status_code < 300:
101
+ return pydantic_v1.parse_obj_as(DeploymentRead, _response.json()) # type: ignore
102
+ try:
103
+ _response_json = _response.json()
104
+ except JSONDecodeError:
105
+ raise ApiError(status_code=_response.status_code, body=_response.text)
106
+ raise ApiError(status_code=_response.status_code, body=_response_json)
107
+
24
108
  def upsert_sandbox_scenario(
25
109
  self,
26
110
  id: str,
@@ -161,6 +245,89 @@ class AsyncSandboxesClient:
161
245
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
162
246
  self._client_wrapper = client_wrapper
163
247
 
248
+ async def deploy_prompt(
249
+ self,
250
+ id: str,
251
+ prompt_id: str,
252
+ *,
253
+ prompt_deployment_id: typing.Optional[str] = OMIT,
254
+ prompt_deployment_name: typing.Optional[str] = OMIT,
255
+ label: typing.Optional[str] = OMIT,
256
+ release_tags: typing.Optional[typing.Sequence[str]] = OMIT,
257
+ request_options: typing.Optional[RequestOptions] = None,
258
+ ) -> DeploymentRead:
259
+ """
260
+ Parameters:
261
+ - id: str. A UUID string identifying this sandbox.
262
+
263
+ - prompt_id: str. An ID identifying the Prompt you'd like to deploy.
264
+
265
+ - prompt_deployment_id: typing.Optional[str]. The Vellum-generated ID of the Prompt Deployment you'd like to update. Cannot specify both this and prompt_deployment_name. Leave null to create a new Prompt Deployment.
266
+
267
+ - prompt_deployment_name: typing.Optional[str]. The unique name of the Prompt Deployment you'd like to either create or update. Cannot specify both this and prompt_deployment_id. If provided and matches an existing Prompt Deployment, that Prompt Deployment will be updated. Otherwise, a new Prompt Deployment will be created.
268
+
269
+ - label: typing.Optional[str]. In the event that a new Prompt Deployment is created, this will be the label it's given.
270
+
271
+ - release_tags: typing.Optional[typing.Sequence[str]]. Optionally provide the release tags that you'd like to be associated with the latest release of the created/updated Prompt Deployment.
272
+
273
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
274
+ ---
275
+ from vellum.client import AsyncVellum
276
+
277
+ client = AsyncVellum(
278
+ api_key="YOUR_API_KEY",
279
+ )
280
+ await client.sandboxes.deploy_prompt(
281
+ id="id",
282
+ prompt_id="prompt_id",
283
+ )
284
+ """
285
+ _request: typing.Dict[str, typing.Any] = {}
286
+ if prompt_deployment_id is not OMIT:
287
+ _request["prompt_deployment_id"] = prompt_deployment_id
288
+ if prompt_deployment_name is not OMIT:
289
+ _request["prompt_deployment_name"] = prompt_deployment_name
290
+ if label is not OMIT:
291
+ _request["label"] = label
292
+ if release_tags is not OMIT:
293
+ _request["release_tags"] = release_tags
294
+ _response = await self._client_wrapper.httpx_client.request(
295
+ method="POST",
296
+ url=urllib.parse.urljoin(
297
+ f"{self._client_wrapper.get_environment().default}/",
298
+ f"v1/sandboxes/{jsonable_encoder(id)}/prompts/{jsonable_encoder(prompt_id)}/deploy",
299
+ ),
300
+ params=jsonable_encoder(
301
+ request_options.get("additional_query_parameters") if request_options is not None else None
302
+ ),
303
+ json=jsonable_encoder(_request)
304
+ if request_options is None or request_options.get("additional_body_parameters") is None
305
+ else {
306
+ **jsonable_encoder(_request),
307
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
308
+ },
309
+ headers=jsonable_encoder(
310
+ remove_none_from_dict(
311
+ {
312
+ **self._client_wrapper.get_headers(),
313
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
314
+ }
315
+ )
316
+ ),
317
+ timeout=request_options.get("timeout_in_seconds")
318
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
319
+ else self._client_wrapper.get_timeout(),
320
+ retries=0,
321
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
322
+ )
323
+ if 200 <= _response.status_code < 300:
324
+ return pydantic_v1.parse_obj_as(DeploymentRead, _response.json()) # type: ignore
325
+ try:
326
+ _response_json = _response.json()
327
+ except JSONDecodeError:
328
+ raise ApiError(status_code=_response.status_code, body=_response.text)
329
+ raise ApiError(status_code=_response.status_code, body=_response_json)
330
+
164
331
  async def upsert_sandbox_scenario(
165
332
  self,
166
333
  id: str,