vellum-ai 0.6.2__py3-none-any.whl → 0.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. vellum/__init__.py +58 -2
  2. vellum/client.py +11 -15
  3. vellum/core/client_wrapper.py +1 -1
  4. vellum/resources/__init__.py +2 -2
  5. vellum/resources/deployments/client.py +0 -167
  6. vellum/resources/sandboxes/client.py +167 -0
  7. vellum/resources/test_suites/client.py +175 -6
  8. vellum/resources/{prompt_versions → workflow_sandboxes}/client.py +39 -39
  9. vellum/types/__init__.py +60 -0
  10. vellum/types/bulk_create_test_suite_test_case_data_request.py +39 -0
  11. vellum/types/bulk_replace_test_suite_test_case_data_request.py +44 -0
  12. vellum/types/create_enum.py +5 -0
  13. vellum/types/created_enum.py +5 -0
  14. vellum/types/delete_enum.py +5 -0
  15. vellum/types/deleted_enum.py +5 -0
  16. vellum/types/replace_enum.py +5 -0
  17. vellum/types/replaced_enum.py +5 -0
  18. vellum/types/test_suite_test_case_bulk_operation_request.py +46 -0
  19. vellum/types/test_suite_test_case_bulk_result.py +58 -0
  20. vellum/types/test_suite_test_case_create_bulk_operation_request.py +35 -0
  21. vellum/types/test_suite_test_case_created_bulk_result.py +31 -0
  22. vellum/types/test_suite_test_case_created_bulk_result_data.py +29 -0
  23. vellum/types/test_suite_test_case_delete_bulk_operation_data_request.py +25 -0
  24. vellum/types/test_suite_test_case_delete_bulk_operation_request.py +38 -0
  25. vellum/types/test_suite_test_case_deleted_bulk_result.py +35 -0
  26. vellum/types/test_suite_test_case_deleted_bulk_result_data.py +29 -0
  27. vellum/types/test_suite_test_case_rejected_bulk_result.py +37 -0
  28. vellum/types/test_suite_test_case_replace_bulk_operation_request.py +35 -0
  29. vellum/types/test_suite_test_case_replaced_bulk_result.py +35 -0
  30. vellum/types/test_suite_test_case_replaced_bulk_result_data.py +29 -0
  31. {vellum_ai-0.6.2.dist-info → vellum_ai-0.6.4.dist-info}/METADATA +1 -1
  32. {vellum_ai-0.6.2.dist-info → vellum_ai-0.6.4.dist-info}/RECORD +35 -14
  33. /vellum/resources/{prompt_versions → workflow_sandboxes}/__init__.py +0 -0
  34. {vellum_ai-0.6.2.dist-info → vellum_ai-0.6.4.dist-info}/LICENSE +0 -0
  35. {vellum_ai-0.6.2.dist-info → vellum_ai-0.6.4.dist-info}/WHEEL +0 -0
vellum/__init__.py CHANGED
@@ -35,6 +35,8 @@ from .types import (
35
35
  BasicVectorizerSentenceTransformersMultiQaMpnetBaseCosV1Request,
36
36
  BasicVectorizerSentenceTransformersMultiQaMpnetBaseDotV1,
37
37
  BasicVectorizerSentenceTransformersMultiQaMpnetBaseDotV1Request,
38
+ BulkCreateTestSuiteTestCaseDataRequest,
39
+ BulkReplaceTestSuiteTestCaseDataRequest,
38
40
  ChatHistoryEnum,
39
41
  ChatHistoryInputRequest,
40
42
  ChatMessage,
@@ -71,6 +73,10 @@ from .types import (
71
73
  CodeExecutionNodeStringResult,
72
74
  ConditionalNodeResult,
73
75
  ConditionalNodeResultData,
76
+ CreateEnum,
77
+ CreatedEnum,
78
+ DeleteEnum,
79
+ DeletedEnum,
74
80
  DeploymentProviderPayloadResponse,
75
81
  DeploymentRead,
76
82
  DeploymentReleaseTagDeploymentHistoryItem,
@@ -317,6 +323,8 @@ from .types import (
317
323
  RejectedPromptExecutionMeta,
318
324
  RejectedWorkflowNodeResultEvent,
319
325
  ReleaseTagSource,
326
+ ReplaceEnum,
327
+ ReplacedEnum,
320
328
  SandboxScenario,
321
329
  ScenarioInput,
322
330
  ScenarioInputChatHistoryVariableValue,
@@ -473,6 +481,26 @@ from .types import (
473
481
  TestSuiteRunWorkflowReleaseTagExecConfigRequest,
474
482
  TestSuiteRunWorkflowReleaseTagExecConfigTypeEnum,
475
483
  TestSuiteTestCase,
484
+ TestSuiteTestCaseBulkOperationRequest,
485
+ TestSuiteTestCaseBulkOperationRequest_Create,
486
+ TestSuiteTestCaseBulkOperationRequest_Delete,
487
+ TestSuiteTestCaseBulkOperationRequest_Replace,
488
+ TestSuiteTestCaseBulkResult,
489
+ TestSuiteTestCaseBulkResult_Created,
490
+ TestSuiteTestCaseBulkResult_Deleted,
491
+ TestSuiteTestCaseBulkResult_Rejected,
492
+ TestSuiteTestCaseBulkResult_Replaced,
493
+ TestSuiteTestCaseCreateBulkOperationRequest,
494
+ TestSuiteTestCaseCreatedBulkResult,
495
+ TestSuiteTestCaseCreatedBulkResultData,
496
+ TestSuiteTestCaseDeleteBulkOperationDataRequest,
497
+ TestSuiteTestCaseDeleteBulkOperationRequest,
498
+ TestSuiteTestCaseDeletedBulkResult,
499
+ TestSuiteTestCaseDeletedBulkResultData,
500
+ TestSuiteTestCaseRejectedBulkResult,
501
+ TestSuiteTestCaseReplaceBulkOperationRequest,
502
+ TestSuiteTestCaseReplacedBulkResult,
503
+ TestSuiteTestCaseReplacedBulkResultData,
476
504
  TextEmbedding3LargeEnum,
477
505
  TextEmbedding3SmallEnum,
478
506
  TextEmbeddingAda002Enum,
@@ -577,11 +605,11 @@ from .resources import (
577
605
  document_indexes,
578
606
  documents,
579
607
  folder_entities,
580
- prompt_versions,
581
608
  sandboxes,
582
609
  test_suite_runs,
583
610
  test_suites,
584
611
  workflow_deployments,
612
+ workflow_sandboxes,
585
613
  )
586
614
  from .environment import VellumEnvironment
587
615
  from .version import __version__
@@ -622,6 +650,8 @@ __all__ = [
622
650
  "BasicVectorizerSentenceTransformersMultiQaMpnetBaseCosV1Request",
623
651
  "BasicVectorizerSentenceTransformersMultiQaMpnetBaseDotV1",
624
652
  "BasicVectorizerSentenceTransformersMultiQaMpnetBaseDotV1Request",
653
+ "BulkCreateTestSuiteTestCaseDataRequest",
654
+ "BulkReplaceTestSuiteTestCaseDataRequest",
625
655
  "ChatHistoryEnum",
626
656
  "ChatHistoryInputRequest",
627
657
  "ChatMessage",
@@ -658,6 +688,10 @@ __all__ = [
658
688
  "CodeExecutionNodeStringResult",
659
689
  "ConditionalNodeResult",
660
690
  "ConditionalNodeResultData",
691
+ "CreateEnum",
692
+ "CreatedEnum",
693
+ "DeleteEnum",
694
+ "DeletedEnum",
661
695
  "DeploymentProviderPayloadResponse",
662
696
  "DeploymentRead",
663
697
  "DeploymentReleaseTagDeploymentHistoryItem",
@@ -909,6 +943,8 @@ __all__ = [
909
943
  "RejectedPromptExecutionMeta",
910
944
  "RejectedWorkflowNodeResultEvent",
911
945
  "ReleaseTagSource",
946
+ "ReplaceEnum",
947
+ "ReplacedEnum",
912
948
  "SandboxScenario",
913
949
  "ScenarioInput",
914
950
  "ScenarioInputChatHistoryVariableValue",
@@ -1065,6 +1101,26 @@ __all__ = [
1065
1101
  "TestSuiteRunWorkflowReleaseTagExecConfigRequest",
1066
1102
  "TestSuiteRunWorkflowReleaseTagExecConfigTypeEnum",
1067
1103
  "TestSuiteTestCase",
1104
+ "TestSuiteTestCaseBulkOperationRequest",
1105
+ "TestSuiteTestCaseBulkOperationRequest_Create",
1106
+ "TestSuiteTestCaseBulkOperationRequest_Delete",
1107
+ "TestSuiteTestCaseBulkOperationRequest_Replace",
1108
+ "TestSuiteTestCaseBulkResult",
1109
+ "TestSuiteTestCaseBulkResult_Created",
1110
+ "TestSuiteTestCaseBulkResult_Deleted",
1111
+ "TestSuiteTestCaseBulkResult_Rejected",
1112
+ "TestSuiteTestCaseBulkResult_Replaced",
1113
+ "TestSuiteTestCaseCreateBulkOperationRequest",
1114
+ "TestSuiteTestCaseCreatedBulkResult",
1115
+ "TestSuiteTestCaseCreatedBulkResultData",
1116
+ "TestSuiteTestCaseDeleteBulkOperationDataRequest",
1117
+ "TestSuiteTestCaseDeleteBulkOperationRequest",
1118
+ "TestSuiteTestCaseDeletedBulkResult",
1119
+ "TestSuiteTestCaseDeletedBulkResultData",
1120
+ "TestSuiteTestCaseRejectedBulkResult",
1121
+ "TestSuiteTestCaseReplaceBulkOperationRequest",
1122
+ "TestSuiteTestCaseReplacedBulkResult",
1123
+ "TestSuiteTestCaseReplacedBulkResultData",
1068
1124
  "TextEmbedding3LargeEnum",
1069
1125
  "TextEmbedding3SmallEnum",
1070
1126
  "TextEmbeddingAda002Enum",
@@ -1166,9 +1222,9 @@ __all__ = [
1166
1222
  "document_indexes",
1167
1223
  "documents",
1168
1224
  "folder_entities",
1169
- "prompt_versions",
1170
1225
  "sandboxes",
1171
1226
  "test_suite_runs",
1172
1227
  "test_suites",
1173
1228
  "workflow_deployments",
1229
+ "workflow_sandboxes",
1174
1230
  ]
vellum/client.py CHANGED
@@ -22,11 +22,11 @@ from .resources.deployments.client import AsyncDeploymentsClient, DeploymentsCli
22
22
  from .resources.document_indexes.client import AsyncDocumentIndexesClient, DocumentIndexesClient
23
23
  from .resources.documents.client import AsyncDocumentsClient, DocumentsClient
24
24
  from .resources.folder_entities.client import AsyncFolderEntitiesClient, FolderEntitiesClient
25
- from .resources.prompt_versions.client import AsyncPromptVersionsClient, PromptVersionsClient
26
25
  from .resources.sandboxes.client import AsyncSandboxesClient, SandboxesClient
27
26
  from .resources.test_suite_runs.client import AsyncTestSuiteRunsClient, TestSuiteRunsClient
28
27
  from .resources.test_suites.client import AsyncTestSuitesClient, TestSuitesClient
29
28
  from .resources.workflow_deployments.client import AsyncWorkflowDeploymentsClient, WorkflowDeploymentsClient
29
+ from .resources.workflow_sandboxes.client import AsyncWorkflowSandboxesClient, WorkflowSandboxesClient
30
30
  from .types.execute_prompt_event import ExecutePromptEvent
31
31
  from .types.execute_prompt_response import ExecutePromptResponse
32
32
  from .types.execute_workflow_response import ExecuteWorkflowResponse
@@ -97,11 +97,11 @@ class Vellum:
97
97
  self.document_indexes = DocumentIndexesClient(client_wrapper=self._client_wrapper)
98
98
  self.documents = DocumentsClient(client_wrapper=self._client_wrapper)
99
99
  self.folder_entities = FolderEntitiesClient(client_wrapper=self._client_wrapper)
100
- self.prompt_versions = PromptVersionsClient(client_wrapper=self._client_wrapper)
101
100
  self.sandboxes = SandboxesClient(client_wrapper=self._client_wrapper)
102
101
  self.test_suite_runs = TestSuiteRunsClient(client_wrapper=self._client_wrapper)
103
102
  self.test_suites = TestSuitesClient(client_wrapper=self._client_wrapper)
104
103
  self.workflow_deployments = WorkflowDeploymentsClient(client_wrapper=self._client_wrapper)
104
+ self.workflow_sandboxes = WorkflowSandboxesClient(client_wrapper=self._client_wrapper)
105
105
 
106
106
  def execute_prompt(
107
107
  self,
@@ -591,7 +591,8 @@ class Vellum:
591
591
  """
592
592
  Generate a completion using a previously defined deployment.
593
593
 
594
- **Note:** Uses a base url of `https://predict.vellum.ai`.
594
+ Important: This endpoint is DEPRECATED and has been superseded by
595
+ [execute-prompt](/api-reference/api-reference/execute-prompt).
595
596
 
596
597
  Parameters:
597
598
  - deployment_id: typing.Optional[str]. The ID of the deployment. Must provide either this or deployment_name.
@@ -679,7 +680,8 @@ class Vellum:
679
680
  """
680
681
  Generate a stream of completions using a previously defined deployment.
681
682
 
682
- **Note:** Uses a base url of `https://predict.vellum.ai`.
683
+ Important: This endpoint is DEPRECATED and has been superseded by
684
+ [execute-prompt-stream](/api-reference/api-reference/execute-prompt-stream).
683
685
 
684
686
  Parameters:
685
687
  - deployment_id: typing.Optional[str]. The ID of the deployment. Must provide either this or deployment_name.
@@ -791,8 +793,6 @@ class Vellum:
791
793
  """
792
794
  Perform a search against a document index.
793
795
 
794
- **Note:** Uses a base url of `https://predict.vellum.ai`.
795
-
796
796
  Parameters:
797
797
  - index_id: typing.Optional[str]. The ID of the index to search against. Must provide either this or index_name.
798
798
 
@@ -871,8 +871,6 @@ class Vellum:
871
871
  """
872
872
  Used to submit feedback regarding the quality of previously generated completions.
873
873
 
874
- **Note:** Uses a base url of `https://predict.vellum.ai`.
875
-
876
874
  Parameters:
877
875
  - deployment_id: typing.Optional[str]. The ID of the deployment. Must provide either this or deployment_name.
878
876
 
@@ -1060,11 +1058,11 @@ class AsyncVellum:
1060
1058
  self.document_indexes = AsyncDocumentIndexesClient(client_wrapper=self._client_wrapper)
1061
1059
  self.documents = AsyncDocumentsClient(client_wrapper=self._client_wrapper)
1062
1060
  self.folder_entities = AsyncFolderEntitiesClient(client_wrapper=self._client_wrapper)
1063
- self.prompt_versions = AsyncPromptVersionsClient(client_wrapper=self._client_wrapper)
1064
1061
  self.sandboxes = AsyncSandboxesClient(client_wrapper=self._client_wrapper)
1065
1062
  self.test_suite_runs = AsyncTestSuiteRunsClient(client_wrapper=self._client_wrapper)
1066
1063
  self.test_suites = AsyncTestSuitesClient(client_wrapper=self._client_wrapper)
1067
1064
  self.workflow_deployments = AsyncWorkflowDeploymentsClient(client_wrapper=self._client_wrapper)
1065
+ self.workflow_sandboxes = AsyncWorkflowSandboxesClient(client_wrapper=self._client_wrapper)
1068
1066
 
1069
1067
  async def execute_prompt(
1070
1068
  self,
@@ -1554,7 +1552,8 @@ class AsyncVellum:
1554
1552
  """
1555
1553
  Generate a completion using a previously defined deployment.
1556
1554
 
1557
- **Note:** Uses a base url of `https://predict.vellum.ai`.
1555
+ Important: This endpoint is DEPRECATED and has been superseded by
1556
+ [execute-prompt](/api-reference/api-reference/execute-prompt).
1558
1557
 
1559
1558
  Parameters:
1560
1559
  - deployment_id: typing.Optional[str]. The ID of the deployment. Must provide either this or deployment_name.
@@ -1642,7 +1641,8 @@ class AsyncVellum:
1642
1641
  """
1643
1642
  Generate a stream of completions using a previously defined deployment.
1644
1643
 
1645
- **Note:** Uses a base url of `https://predict.vellum.ai`.
1644
+ Important: This endpoint is DEPRECATED and has been superseded by
1645
+ [execute-prompt-stream](/api-reference/api-reference/execute-prompt-stream).
1646
1646
 
1647
1647
  Parameters:
1648
1648
  - deployment_id: typing.Optional[str]. The ID of the deployment. Must provide either this or deployment_name.
@@ -1754,8 +1754,6 @@ class AsyncVellum:
1754
1754
  """
1755
1755
  Perform a search against a document index.
1756
1756
 
1757
- **Note:** Uses a base url of `https://predict.vellum.ai`.
1758
-
1759
1757
  Parameters:
1760
1758
  - index_id: typing.Optional[str]. The ID of the index to search against. Must provide either this or index_name.
1761
1759
 
@@ -1834,8 +1832,6 @@ class AsyncVellum:
1834
1832
  """
1835
1833
  Used to submit feedback regarding the quality of previously generated completions.
1836
1834
 
1837
- **Note:** Uses a base url of `https://predict.vellum.ai`.
1838
-
1839
1835
  Parameters:
1840
1836
  - deployment_id: typing.Optional[str]. The ID of the deployment. Must provide either this or deployment_name.
1841
1837
 
@@ -18,7 +18,7 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.6.2",
21
+ "X-Fern-SDK-Version": "0.6.4",
22
22
  }
23
23
  headers["X_API_KEY"] = self.api_key
24
24
  return headers
@@ -5,11 +5,11 @@ from . import (
5
5
  document_indexes,
6
6
  documents,
7
7
  folder_entities,
8
- prompt_versions,
9
8
  sandboxes,
10
9
  test_suite_runs,
11
10
  test_suites,
12
11
  workflow_deployments,
12
+ workflow_sandboxes,
13
13
  )
14
14
  from .deployments import DeploymentsListRequestStatus
15
15
  from .document_indexes import DocumentIndexesListRequestStatus
@@ -23,9 +23,9 @@ __all__ = [
23
23
  "document_indexes",
24
24
  "documents",
25
25
  "folder_entities",
26
- "prompt_versions",
27
26
  "sandboxes",
28
27
  "test_suite_runs",
29
28
  "test_suites",
30
29
  "workflow_deployments",
30
+ "workflow_sandboxes",
31
31
  ]
@@ -19,7 +19,6 @@ from ...types.deployment_read import DeploymentRead
19
19
  from ...types.deployment_release_tag_read import DeploymentReleaseTagRead
20
20
  from ...types.paginated_slim_deployment_read_list import PaginatedSlimDeploymentReadList
21
21
  from ...types.prompt_deployment_input_request import PromptDeploymentInputRequest
22
- from ...types.workflow_deployment_read import WorkflowDeploymentRead
23
22
  from .types.deployments_list_request_status import DeploymentsListRequestStatus
24
23
 
25
24
  # this is used as the default value for optional parameters
@@ -351,89 +350,6 @@ class DeploymentsClient:
351
350
  raise ApiError(status_code=_response.status_code, body=_response.text)
352
351
  raise ApiError(status_code=_response.status_code, body=_response_json)
353
352
 
354
- def deploy_workflow(
355
- self,
356
- id: str,
357
- workflow_id: str,
358
- *,
359
- workflow_deployment_id: typing.Optional[str] = OMIT,
360
- workflow_deployment_name: typing.Optional[str] = OMIT,
361
- label: typing.Optional[str] = OMIT,
362
- release_tags: typing.Optional[typing.Sequence[str]] = OMIT,
363
- request_options: typing.Optional[RequestOptions] = None,
364
- ) -> WorkflowDeploymentRead:
365
- """
366
- Parameters:
367
- - id: str. A UUID string identifying this workflow sandbox.
368
-
369
- - workflow_id: str. An ID identifying the Workflow you'd like to deploy.
370
-
371
- - workflow_deployment_id: typing.Optional[str]. The Vellum-generated ID of the Workflow Deployment you'd like to update. Cannot specify both this and workflow_deployment_name. Leave null to create a new Workflow Deployment.
372
-
373
- - workflow_deployment_name: typing.Optional[str]. The unique name of the Workflow Deployment you'd like to either create or update. Cannot specify both this and workflow_deployment_id. If provided and matches an existing Workflow Deployment, that Workflow Deployment will be updated. Otherwise, a new Prompt Deployment will be created.
374
-
375
- - label: typing.Optional[str]. In the event that a new Workflow Deployment is created, this will be the label it's given.
376
-
377
- - release_tags: typing.Optional[typing.Sequence[str]]. Optionally provide the release tags that you'd like to be associated with the latest release of the created/updated Prompt Deployment.
378
-
379
- - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
380
- ---
381
- from vellum.client import Vellum
382
-
383
- client = Vellum(
384
- api_key="YOUR_API_KEY",
385
- )
386
- client.deployments.deploy_workflow(
387
- id="id",
388
- workflow_id="workflow_id",
389
- )
390
- """
391
- _request: typing.Dict[str, typing.Any] = {}
392
- if workflow_deployment_id is not OMIT:
393
- _request["workflow_deployment_id"] = workflow_deployment_id
394
- if workflow_deployment_name is not OMIT:
395
- _request["workflow_deployment_name"] = workflow_deployment_name
396
- if label is not OMIT:
397
- _request["label"] = label
398
- if release_tags is not OMIT:
399
- _request["release_tags"] = release_tags
400
- _response = self._client_wrapper.httpx_client.request(
401
- method="POST",
402
- url=urllib.parse.urljoin(
403
- f"{self._client_wrapper.get_environment().default}/",
404
- f"v1/workflow-sandboxes/{jsonable_encoder(id)}/workflows/{jsonable_encoder(workflow_id)}/deploy",
405
- ),
406
- params=jsonable_encoder(
407
- request_options.get("additional_query_parameters") if request_options is not None else None
408
- ),
409
- json=jsonable_encoder(_request)
410
- if request_options is None or request_options.get("additional_body_parameters") is None
411
- else {
412
- **jsonable_encoder(_request),
413
- **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
414
- },
415
- headers=jsonable_encoder(
416
- remove_none_from_dict(
417
- {
418
- **self._client_wrapper.get_headers(),
419
- **(request_options.get("additional_headers", {}) if request_options is not None else {}),
420
- }
421
- )
422
- ),
423
- timeout=request_options.get("timeout_in_seconds")
424
- if request_options is not None and request_options.get("timeout_in_seconds") is not None
425
- else self._client_wrapper.get_timeout(),
426
- retries=0,
427
- max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
428
- )
429
- if 200 <= _response.status_code < 300:
430
- return pydantic_v1.parse_obj_as(WorkflowDeploymentRead, _response.json()) # type: ignore
431
- try:
432
- _response_json = _response.json()
433
- except JSONDecodeError:
434
- raise ApiError(status_code=_response.status_code, body=_response.text)
435
- raise ApiError(status_code=_response.status_code, body=_response_json)
436
-
437
353
 
438
354
  class AsyncDeploymentsClient:
439
355
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -759,86 +675,3 @@ class AsyncDeploymentsClient:
759
675
  except JSONDecodeError:
760
676
  raise ApiError(status_code=_response.status_code, body=_response.text)
761
677
  raise ApiError(status_code=_response.status_code, body=_response_json)
762
-
763
- async def deploy_workflow(
764
- self,
765
- id: str,
766
- workflow_id: str,
767
- *,
768
- workflow_deployment_id: typing.Optional[str] = OMIT,
769
- workflow_deployment_name: typing.Optional[str] = OMIT,
770
- label: typing.Optional[str] = OMIT,
771
- release_tags: typing.Optional[typing.Sequence[str]] = OMIT,
772
- request_options: typing.Optional[RequestOptions] = None,
773
- ) -> WorkflowDeploymentRead:
774
- """
775
- Parameters:
776
- - id: str. A UUID string identifying this workflow sandbox.
777
-
778
- - workflow_id: str. An ID identifying the Workflow you'd like to deploy.
779
-
780
- - workflow_deployment_id: typing.Optional[str]. The Vellum-generated ID of the Workflow Deployment you'd like to update. Cannot specify both this and workflow_deployment_name. Leave null to create a new Workflow Deployment.
781
-
782
- - workflow_deployment_name: typing.Optional[str]. The unique name of the Workflow Deployment you'd like to either create or update. Cannot specify both this and workflow_deployment_id. If provided and matches an existing Workflow Deployment, that Workflow Deployment will be updated. Otherwise, a new Prompt Deployment will be created.
783
-
784
- - label: typing.Optional[str]. In the event that a new Workflow Deployment is created, this will be the label it's given.
785
-
786
- - release_tags: typing.Optional[typing.Sequence[str]]. Optionally provide the release tags that you'd like to be associated with the latest release of the created/updated Prompt Deployment.
787
-
788
- - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
789
- ---
790
- from vellum.client import AsyncVellum
791
-
792
- client = AsyncVellum(
793
- api_key="YOUR_API_KEY",
794
- )
795
- await client.deployments.deploy_workflow(
796
- id="id",
797
- workflow_id="workflow_id",
798
- )
799
- """
800
- _request: typing.Dict[str, typing.Any] = {}
801
- if workflow_deployment_id is not OMIT:
802
- _request["workflow_deployment_id"] = workflow_deployment_id
803
- if workflow_deployment_name is not OMIT:
804
- _request["workflow_deployment_name"] = workflow_deployment_name
805
- if label is not OMIT:
806
- _request["label"] = label
807
- if release_tags is not OMIT:
808
- _request["release_tags"] = release_tags
809
- _response = await self._client_wrapper.httpx_client.request(
810
- method="POST",
811
- url=urllib.parse.urljoin(
812
- f"{self._client_wrapper.get_environment().default}/",
813
- f"v1/workflow-sandboxes/{jsonable_encoder(id)}/workflows/{jsonable_encoder(workflow_id)}/deploy",
814
- ),
815
- params=jsonable_encoder(
816
- request_options.get("additional_query_parameters") if request_options is not None else None
817
- ),
818
- json=jsonable_encoder(_request)
819
- if request_options is None or request_options.get("additional_body_parameters") is None
820
- else {
821
- **jsonable_encoder(_request),
822
- **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
823
- },
824
- headers=jsonable_encoder(
825
- remove_none_from_dict(
826
- {
827
- **self._client_wrapper.get_headers(),
828
- **(request_options.get("additional_headers", {}) if request_options is not None else {}),
829
- }
830
- )
831
- ),
832
- timeout=request_options.get("timeout_in_seconds")
833
- if request_options is not None and request_options.get("timeout_in_seconds") is not None
834
- else self._client_wrapper.get_timeout(),
835
- retries=0,
836
- max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
837
- )
838
- if 200 <= _response.status_code < 300:
839
- return pydantic_v1.parse_obj_as(WorkflowDeploymentRead, _response.json()) # type: ignore
840
- try:
841
- _response_json = _response.json()
842
- except JSONDecodeError:
843
- raise ApiError(status_code=_response.status_code, body=_response.text)
844
- raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -10,6 +10,7 @@ from ...core.jsonable_encoder import jsonable_encoder
10
10
  from ...core.pydantic_utilities import pydantic_v1
11
11
  from ...core.remove_none_from_dict import remove_none_from_dict
12
12
  from ...core.request_options import RequestOptions
13
+ from ...types.deployment_read import DeploymentRead
13
14
  from ...types.named_scenario_input_request import NamedScenarioInputRequest
14
15
  from ...types.sandbox_scenario import SandboxScenario
15
16
 
@@ -21,6 +22,89 @@ class SandboxesClient:
21
22
  def __init__(self, *, client_wrapper: SyncClientWrapper):
22
23
  self._client_wrapper = client_wrapper
23
24
 
25
+ def deploy_prompt(
26
+ self,
27
+ id: str,
28
+ prompt_id: str,
29
+ *,
30
+ prompt_deployment_id: typing.Optional[str] = OMIT,
31
+ prompt_deployment_name: typing.Optional[str] = OMIT,
32
+ label: typing.Optional[str] = OMIT,
33
+ release_tags: typing.Optional[typing.Sequence[str]] = OMIT,
34
+ request_options: typing.Optional[RequestOptions] = None,
35
+ ) -> DeploymentRead:
36
+ """
37
+ Parameters:
38
+ - id: str. A UUID string identifying this sandbox.
39
+
40
+ - prompt_id: str. An ID identifying the Prompt you'd like to deploy.
41
+
42
+ - prompt_deployment_id: typing.Optional[str]. The Vellum-generated ID of the Prompt Deployment you'd like to update. Cannot specify both this and prompt_deployment_name. Leave null to create a new Prompt Deployment.
43
+
44
+ - prompt_deployment_name: typing.Optional[str]. The unique name of the Prompt Deployment you'd like to either create or update. Cannot specify both this and prompt_deployment_id. If provided and matches an existing Prompt Deployment, that Prompt Deployment will be updated. Otherwise, a new Prompt Deployment will be created.
45
+
46
+ - label: typing.Optional[str]. In the event that a new Prompt Deployment is created, this will be the label it's given.
47
+
48
+ - release_tags: typing.Optional[typing.Sequence[str]]. Optionally provide the release tags that you'd like to be associated with the latest release of the created/updated Prompt Deployment.
49
+
50
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
51
+ ---
52
+ from vellum.client import Vellum
53
+
54
+ client = Vellum(
55
+ api_key="YOUR_API_KEY",
56
+ )
57
+ client.sandboxes.deploy_prompt(
58
+ id="id",
59
+ prompt_id="prompt_id",
60
+ )
61
+ """
62
+ _request: typing.Dict[str, typing.Any] = {}
63
+ if prompt_deployment_id is not OMIT:
64
+ _request["prompt_deployment_id"] = prompt_deployment_id
65
+ if prompt_deployment_name is not OMIT:
66
+ _request["prompt_deployment_name"] = prompt_deployment_name
67
+ if label is not OMIT:
68
+ _request["label"] = label
69
+ if release_tags is not OMIT:
70
+ _request["release_tags"] = release_tags
71
+ _response = self._client_wrapper.httpx_client.request(
72
+ method="POST",
73
+ url=urllib.parse.urljoin(
74
+ f"{self._client_wrapper.get_environment().default}/",
75
+ f"v1/sandboxes/{jsonable_encoder(id)}/prompts/{jsonable_encoder(prompt_id)}/deploy",
76
+ ),
77
+ params=jsonable_encoder(
78
+ request_options.get("additional_query_parameters") if request_options is not None else None
79
+ ),
80
+ json=jsonable_encoder(_request)
81
+ if request_options is None or request_options.get("additional_body_parameters") is None
82
+ else {
83
+ **jsonable_encoder(_request),
84
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
85
+ },
86
+ headers=jsonable_encoder(
87
+ remove_none_from_dict(
88
+ {
89
+ **self._client_wrapper.get_headers(),
90
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
91
+ }
92
+ )
93
+ ),
94
+ timeout=request_options.get("timeout_in_seconds")
95
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
96
+ else self._client_wrapper.get_timeout(),
97
+ retries=0,
98
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
99
+ )
100
+ if 200 <= _response.status_code < 300:
101
+ return pydantic_v1.parse_obj_as(DeploymentRead, _response.json()) # type: ignore
102
+ try:
103
+ _response_json = _response.json()
104
+ except JSONDecodeError:
105
+ raise ApiError(status_code=_response.status_code, body=_response.text)
106
+ raise ApiError(status_code=_response.status_code, body=_response_json)
107
+
24
108
  def upsert_sandbox_scenario(
25
109
  self,
26
110
  id: str,
@@ -161,6 +245,89 @@ class AsyncSandboxesClient:
161
245
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
162
246
  self._client_wrapper = client_wrapper
163
247
 
248
+ async def deploy_prompt(
249
+ self,
250
+ id: str,
251
+ prompt_id: str,
252
+ *,
253
+ prompt_deployment_id: typing.Optional[str] = OMIT,
254
+ prompt_deployment_name: typing.Optional[str] = OMIT,
255
+ label: typing.Optional[str] = OMIT,
256
+ release_tags: typing.Optional[typing.Sequence[str]] = OMIT,
257
+ request_options: typing.Optional[RequestOptions] = None,
258
+ ) -> DeploymentRead:
259
+ """
260
+ Parameters:
261
+ - id: str. A UUID string identifying this sandbox.
262
+
263
+ - prompt_id: str. An ID identifying the Prompt you'd like to deploy.
264
+
265
+ - prompt_deployment_id: typing.Optional[str]. The Vellum-generated ID of the Prompt Deployment you'd like to update. Cannot specify both this and prompt_deployment_name. Leave null to create a new Prompt Deployment.
266
+
267
+ - prompt_deployment_name: typing.Optional[str]. The unique name of the Prompt Deployment you'd like to either create or update. Cannot specify both this and prompt_deployment_id. If provided and matches an existing Prompt Deployment, that Prompt Deployment will be updated. Otherwise, a new Prompt Deployment will be created.
268
+
269
+ - label: typing.Optional[str]. In the event that a new Prompt Deployment is created, this will be the label it's given.
270
+
271
+ - release_tags: typing.Optional[typing.Sequence[str]]. Optionally provide the release tags that you'd like to be associated with the latest release of the created/updated Prompt Deployment.
272
+
273
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
274
+ ---
275
+ from vellum.client import AsyncVellum
276
+
277
+ client = AsyncVellum(
278
+ api_key="YOUR_API_KEY",
279
+ )
280
+ await client.sandboxes.deploy_prompt(
281
+ id="id",
282
+ prompt_id="prompt_id",
283
+ )
284
+ """
285
+ _request: typing.Dict[str, typing.Any] = {}
286
+ if prompt_deployment_id is not OMIT:
287
+ _request["prompt_deployment_id"] = prompt_deployment_id
288
+ if prompt_deployment_name is not OMIT:
289
+ _request["prompt_deployment_name"] = prompt_deployment_name
290
+ if label is not OMIT:
291
+ _request["label"] = label
292
+ if release_tags is not OMIT:
293
+ _request["release_tags"] = release_tags
294
+ _response = await self._client_wrapper.httpx_client.request(
295
+ method="POST",
296
+ url=urllib.parse.urljoin(
297
+ f"{self._client_wrapper.get_environment().default}/",
298
+ f"v1/sandboxes/{jsonable_encoder(id)}/prompts/{jsonable_encoder(prompt_id)}/deploy",
299
+ ),
300
+ params=jsonable_encoder(
301
+ request_options.get("additional_query_parameters") if request_options is not None else None
302
+ ),
303
+ json=jsonable_encoder(_request)
304
+ if request_options is None or request_options.get("additional_body_parameters") is None
305
+ else {
306
+ **jsonable_encoder(_request),
307
+ **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
308
+ },
309
+ headers=jsonable_encoder(
310
+ remove_none_from_dict(
311
+ {
312
+ **self._client_wrapper.get_headers(),
313
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
314
+ }
315
+ )
316
+ ),
317
+ timeout=request_options.get("timeout_in_seconds")
318
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
319
+ else self._client_wrapper.get_timeout(),
320
+ retries=0,
321
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
322
+ )
323
+ if 200 <= _response.status_code < 300:
324
+ return pydantic_v1.parse_obj_as(DeploymentRead, _response.json()) # type: ignore
325
+ try:
326
+ _response_json = _response.json()
327
+ except JSONDecodeError:
328
+ raise ApiError(status_code=_response.status_code, body=_response.text)
329
+ raise ApiError(status_code=_response.status_code, body=_response_json)
330
+
164
331
  async def upsert_sandbox_scenario(
165
332
  self,
166
333
  id: str,