vellum-ai 0.1.13__py3-none-any.whl → 0.2.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (86) hide show
  1. vellum/__init__.py +172 -4
  2. vellum/client.py +214 -10
  3. vellum/core/client_wrapper.py +1 -1
  4. vellum/resources/__init__.py +15 -1
  5. vellum/resources/deployments/__init__.py +3 -0
  6. vellum/resources/deployments/client.py +113 -2
  7. vellum/resources/deployments/types/__init__.py +5 -0
  8. vellum/resources/deployments/types/deployments_list_request_status.py +17 -0
  9. vellum/resources/document_indexes/client.py +55 -9
  10. vellum/resources/documents/client.py +22 -2
  11. vellum/resources/model_versions/client.py +18 -0
  12. vellum/resources/registered_prompts/client.py +104 -0
  13. vellum/resources/sandboxes/client.py +66 -4
  14. vellum/resources/test_suites/client.py +6 -6
  15. vellum/resources/workflow_deployments/__init__.py +5 -0
  16. vellum/resources/workflow_deployments/client.py +116 -0
  17. vellum/resources/workflow_deployments/types/__init__.py +5 -0
  18. vellum/resources/workflow_deployments/types/workflow_deployments_list_request_status.py +17 -0
  19. vellum/types/__init__.py +180 -4
  20. vellum/types/array_chat_message_content.py +33 -0
  21. vellum/types/array_chat_message_content_item.py +43 -0
  22. vellum/types/array_chat_message_content_item_request.py +45 -0
  23. vellum/types/array_chat_message_content_request.py +33 -0
  24. vellum/types/array_enum.py +5 -0
  25. vellum/types/array_variable_value.py +35 -0
  26. vellum/types/chat_history_enum.py +5 -0
  27. vellum/types/chat_history_input_request.py +4 -0
  28. vellum/types/chat_history_variable_value.py +29 -0
  29. vellum/types/chat_message.py +3 -1
  30. vellum/types/chat_message_content.py +53 -0
  31. vellum/types/chat_message_content_request.py +56 -0
  32. vellum/types/chat_message_request.py +3 -1
  33. vellum/types/deployment_read.py +5 -11
  34. vellum/types/document_index_read.py +2 -2
  35. vellum/types/{document_index_status.py → entity_status.py} +3 -3
  36. vellum/types/error_enum.py +5 -0
  37. vellum/types/execute_workflow_error_response.py +28 -0
  38. vellum/types/execute_workflow_response.py +32 -0
  39. vellum/types/execute_workflow_workflow_result_event.py +33 -0
  40. vellum/types/fulfilled_execute_workflow_workflow_result_event.py +35 -0
  41. vellum/types/function_call_chat_message_content.py +33 -0
  42. vellum/types/function_call_chat_message_content_request.py +33 -0
  43. vellum/types/function_call_chat_message_content_value.py +34 -0
  44. vellum/types/function_call_chat_message_content_value_request.py +34 -0
  45. vellum/types/function_call_enum.py +5 -0
  46. vellum/types/image_chat_message_content.py +33 -0
  47. vellum/types/image_chat_message_content_request.py +33 -0
  48. vellum/types/image_enum.py +5 -0
  49. vellum/types/json_enum.py +5 -0
  50. vellum/types/json_input_request.py +4 -0
  51. vellum/types/model_version_exec_config_parameters.py +1 -0
  52. vellum/types/number_enum.py +5 -0
  53. vellum/types/number_variable_value.py +28 -0
  54. vellum/types/paginated_slim_deployment_read_list.py +32 -0
  55. vellum/types/paginated_slim_workflow_deployment_list.py +32 -0
  56. vellum/types/register_prompt_model_parameters_request.py +1 -0
  57. vellum/types/rejected_execute_workflow_workflow_result_event.py +35 -0
  58. vellum/types/scenario_input_type_enum.py +2 -2
  59. vellum/types/search_results_enum.py +5 -0
  60. vellum/types/search_results_variable_value.py +29 -0
  61. vellum/types/slim_deployment_read.py +48 -0
  62. vellum/types/slim_workflow_deployment.py +57 -0
  63. vellum/types/string_chat_message_content.py +32 -0
  64. vellum/types/string_chat_message_content_request.py +32 -0
  65. vellum/types/string_enum.py +5 -0
  66. vellum/types/string_input_request.py +4 -0
  67. vellum/types/variable_value.py +102 -0
  68. vellum/types/vellum_image.py +29 -0
  69. vellum/types/vellum_image_request.py +29 -0
  70. vellum/types/vellum_variable_type.py +5 -0
  71. vellum/types/workflow_output.py +111 -0
  72. vellum/types/workflow_output_array.py +35 -0
  73. vellum/types/workflow_output_chat_history.py +35 -0
  74. vellum/types/workflow_output_error.py +35 -0
  75. vellum/types/workflow_output_function_call.py +35 -0
  76. vellum/types/workflow_output_image.py +35 -0
  77. vellum/types/workflow_output_json.py +34 -0
  78. vellum/types/workflow_output_number.py +34 -0
  79. vellum/types/workflow_output_search_results.py +35 -0
  80. vellum/types/workflow_output_string.py +34 -0
  81. vellum/types/workflow_result_event.py +2 -0
  82. {vellum_ai-0.1.13.dist-info → vellum_ai-0.2.1.dist-info}/METADATA +1 -1
  83. {vellum_ai-0.1.13.dist-info → vellum_ai-0.2.1.dist-info}/RECORD +85 -31
  84. vellum/types/deployment_status.py +0 -31
  85. {vellum_ai-0.1.13.dist-info → vellum_ai-0.2.1.dist-info}/LICENSE +0 -0
  86. {vellum_ai-0.1.13.dist-info → vellum_ai-0.2.1.dist-info}/WHEEL +0 -0
vellum/client.py CHANGED
@@ -22,8 +22,10 @@ from .resources.model_versions.client import AsyncModelVersionsClient, ModelVers
22
22
  from .resources.registered_prompts.client import AsyncRegisteredPromptsClient, RegisteredPromptsClient
23
23
  from .resources.sandboxes.client import AsyncSandboxesClient, SandboxesClient
24
24
  from .resources.test_suites.client import AsyncTestSuitesClient, TestSuitesClient
25
+ from .resources.workflow_deployments.client import AsyncWorkflowDeploymentsClient, WorkflowDeploymentsClient
25
26
  from .types.execute_prompt_event import ExecutePromptEvent
26
27
  from .types.execute_prompt_response import ExecutePromptResponse
28
+ from .types.execute_workflow_response import ExecuteWorkflowResponse
27
29
  from .types.generate_options_request import GenerateOptionsRequest
28
30
  from .types.generate_request import GenerateRequest
29
31
  from .types.generate_response import GenerateResponse
@@ -69,6 +71,7 @@ class Vellum:
69
71
  self.registered_prompts = RegisteredPromptsClient(client_wrapper=self._client_wrapper)
70
72
  self.sandboxes = SandboxesClient(client_wrapper=self._client_wrapper)
71
73
  self.test_suites = TestSuitesClient(client_wrapper=self._client_wrapper)
74
+ self.workflow_deployments = WorkflowDeploymentsClient(client_wrapper=self._client_wrapper)
72
75
 
73
76
  def execute_prompt(
74
77
  self,
@@ -90,7 +93,7 @@ class Vellum:
90
93
  In the meantime, we recommend still using the `/generate` endpoint for prompts with function calling.
91
94
 
92
95
  Parameters:
93
- - inputs: typing.List[PromptDeploymentInputRequest].
96
+ - inputs: typing.List[PromptDeploymentInputRequest]. The list of inputs defined in the Prompt's deployment with their corresponding values.
94
97
 
95
98
  - prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
96
99
 
@@ -168,7 +171,7 @@ class Vellum:
168
171
  In the meantime, we recommend still using the `/generate-stream` endpoint for prompts with function calling
169
172
 
170
173
  Parameters:
171
- - inputs: typing.List[PromptDeploymentInputRequest].
174
+ - inputs: typing.List[PromptDeploymentInputRequest]. The list of inputs defined in the Prompt's deployment with their corresponding values.
172
175
 
173
176
  - prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
174
177
 
@@ -231,6 +234,59 @@ class Vellum:
231
234
  raise ApiError(status_code=_response.status_code, body=_response.text)
232
235
  raise ApiError(status_code=_response.status_code, body=_response_json)
233
236
 
237
+ def execute_workflow(
238
+ self,
239
+ *,
240
+ workflow_deployment_id: typing.Optional[str] = OMIT,
241
+ workflow_deployment_name: typing.Optional[str] = OMIT,
242
+ release_tag: typing.Optional[str] = OMIT,
243
+ inputs: typing.List[WorkflowRequestInputRequest],
244
+ external_id: typing.Optional[str] = OMIT,
245
+ ) -> ExecuteWorkflowResponse:
246
+ """
247
+ Executes a deployed Workflow and returns its outputs.
248
+
249
+ Parameters:
250
+ - workflow_deployment_id: typing.Optional[str]. The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
251
+
252
+ - workflow_deployment_name: typing.Optional[str]. The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
253
+
254
+ - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
255
+
256
+ - inputs: typing.List[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
257
+
258
+ - external_id: typing.Optional[str]. Optionally include a unique identifier for monitoring purposes.
259
+ """
260
+ _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
261
+ if workflow_deployment_id is not OMIT:
262
+ _request["workflow_deployment_id"] = workflow_deployment_id
263
+ if workflow_deployment_name is not OMIT:
264
+ _request["workflow_deployment_name"] = workflow_deployment_name
265
+ if release_tag is not OMIT:
266
+ _request["release_tag"] = release_tag
267
+ if external_id is not OMIT:
268
+ _request["external_id"] = external_id
269
+ _response = self._client_wrapper.httpx_client.request(
270
+ "POST",
271
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", "v1/execute-workflow"),
272
+ json=jsonable_encoder(_request),
273
+ headers=self._client_wrapper.get_headers(),
274
+ timeout=None,
275
+ )
276
+ if 200 <= _response.status_code < 300:
277
+ return pydantic.parse_obj_as(ExecuteWorkflowResponse, _response.json()) # type: ignore
278
+ if _response.status_code == 400:
279
+ raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
280
+ if _response.status_code == 404:
281
+ raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
282
+ if _response.status_code == 500:
283
+ raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
284
+ try:
285
+ _response_json = _response.json()
286
+ except JSONDecodeError:
287
+ raise ApiError(status_code=_response.status_code, body=_response.text)
288
+ raise ApiError(status_code=_response.status_code, body=_response_json)
289
+
234
290
  def execute_workflow_stream(
235
291
  self,
236
292
  *,
@@ -316,7 +372,13 @@ class Vellum:
316
372
 
317
373
  - options: typing.Optional[GenerateOptionsRequest]. Additional configuration that can be used to control what's included in the response.
318
374
  ---
319
- from vellum import GenerateOptionsRequest, GenerateRequest, LogprobsEnum
375
+ from vellum import (
376
+ ChatMessageRequest,
377
+ ChatMessageRole,
378
+ GenerateOptionsRequest,
379
+ GenerateRequest,
380
+ LogprobsEnum,
381
+ )
320
382
  from vellum.client import Vellum
321
383
 
322
384
  client = Vellum(
@@ -325,7 +387,12 @@ class Vellum:
325
387
  client.generate(
326
388
  requests=[
327
389
  GenerateRequest(
328
- input_values={},
390
+ input_values={"string": {"unknown": "string", "type": "unknown"}},
391
+ chat_history=[
392
+ ChatMessageRequest(
393
+ role=ChatMessageRole.SYSTEM,
394
+ )
395
+ ],
329
396
  )
330
397
  ],
331
398
  options=GenerateOptionsRequest(
@@ -441,6 +508,41 @@ class Vellum:
441
508
  - query: str. The query to search for.
442
509
 
443
510
  - options: typing.Optional[SearchRequestOptionsRequest]. Configuration options for the search.
511
+ ---
512
+ from vellum import (
513
+ LogicalOperator,
514
+ MetadataFilterConfigRequest,
515
+ MetadataFilterRuleCombinator,
516
+ MetadataFilterRuleRequest,
517
+ SearchFiltersRequest,
518
+ SearchRequestOptionsRequest,
519
+ SearchResultMergingRequest,
520
+ SearchWeightsRequest,
521
+ )
522
+ from vellum.client import Vellum
523
+
524
+ client = Vellum(
525
+ api_key="YOUR_API_KEY",
526
+ )
527
+ client.search(
528
+ query="string",
529
+ options=SearchRequestOptionsRequest(
530
+ weights=SearchWeightsRequest(),
531
+ result_merging=SearchResultMergingRequest(),
532
+ filters=SearchFiltersRequest(
533
+ metadata=MetadataFilterConfigRequest(
534
+ combinator=MetadataFilterRuleCombinator.AND,
535
+ rules=[
536
+ MetadataFilterRuleRequest(
537
+ combinator=MetadataFilterRuleCombinator.AND,
538
+ operator=LogicalOperator.EQUALS,
539
+ )
540
+ ],
541
+ operator=LogicalOperator.EQUALS,
542
+ ),
543
+ ),
544
+ ),
545
+ )
444
546
  """
445
547
  _request: typing.Dict[str, typing.Any] = {"query": query}
446
548
  if index_id is not OMIT:
@@ -489,13 +591,14 @@ class Vellum:
489
591
 
490
592
  - actuals: typing.List[SubmitCompletionActualRequest]. Feedback regarding the quality of previously generated completions
491
593
  ---
594
+ from vellum import SubmitCompletionActualRequest
492
595
  from vellum.client import Vellum
493
596
 
494
597
  client = Vellum(
495
598
  api_key="YOUR_API_KEY",
496
599
  )
497
600
  client.submit_completion_actuals(
498
- actuals=[],
601
+ actuals=[SubmitCompletionActualRequest()],
499
602
  )
500
603
  """
501
604
  _request: typing.Dict[str, typing.Any] = {"actuals": actuals}
@@ -596,6 +699,7 @@ class AsyncVellum:
596
699
  self.registered_prompts = AsyncRegisteredPromptsClient(client_wrapper=self._client_wrapper)
597
700
  self.sandboxes = AsyncSandboxesClient(client_wrapper=self._client_wrapper)
598
701
  self.test_suites = AsyncTestSuitesClient(client_wrapper=self._client_wrapper)
702
+ self.workflow_deployments = AsyncWorkflowDeploymentsClient(client_wrapper=self._client_wrapper)
599
703
 
600
704
  async def execute_prompt(
601
705
  self,
@@ -617,7 +721,7 @@ class AsyncVellum:
617
721
  In the meantime, we recommend still using the `/generate` endpoint for prompts with function calling.
618
722
 
619
723
  Parameters:
620
- - inputs: typing.List[PromptDeploymentInputRequest].
724
+ - inputs: typing.List[PromptDeploymentInputRequest]. The list of inputs defined in the Prompt's deployment with their corresponding values.
621
725
 
622
726
  - prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
623
727
 
@@ -695,7 +799,7 @@ class AsyncVellum:
695
799
  In the meantime, we recommend still using the `/generate-stream` endpoint for prompts with function calling
696
800
 
697
801
  Parameters:
698
- - inputs: typing.List[PromptDeploymentInputRequest].
802
+ - inputs: typing.List[PromptDeploymentInputRequest]. The list of inputs defined in the Prompt's deployment with their corresponding values.
699
803
 
700
804
  - prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
701
805
 
@@ -758,6 +862,59 @@ class AsyncVellum:
758
862
  raise ApiError(status_code=_response.status_code, body=_response.text)
759
863
  raise ApiError(status_code=_response.status_code, body=_response_json)
760
864
 
865
+ async def execute_workflow(
866
+ self,
867
+ *,
868
+ workflow_deployment_id: typing.Optional[str] = OMIT,
869
+ workflow_deployment_name: typing.Optional[str] = OMIT,
870
+ release_tag: typing.Optional[str] = OMIT,
871
+ inputs: typing.List[WorkflowRequestInputRequest],
872
+ external_id: typing.Optional[str] = OMIT,
873
+ ) -> ExecuteWorkflowResponse:
874
+ """
875
+ Executes a deployed Workflow and returns its outputs.
876
+
877
+ Parameters:
878
+ - workflow_deployment_id: typing.Optional[str]. The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
879
+
880
+ - workflow_deployment_name: typing.Optional[str]. The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
881
+
882
+ - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
883
+
884
+ - inputs: typing.List[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
885
+
886
+ - external_id: typing.Optional[str]. Optionally include a unique identifier for monitoring purposes.
887
+ """
888
+ _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
889
+ if workflow_deployment_id is not OMIT:
890
+ _request["workflow_deployment_id"] = workflow_deployment_id
891
+ if workflow_deployment_name is not OMIT:
892
+ _request["workflow_deployment_name"] = workflow_deployment_name
893
+ if release_tag is not OMIT:
894
+ _request["release_tag"] = release_tag
895
+ if external_id is not OMIT:
896
+ _request["external_id"] = external_id
897
+ _response = await self._client_wrapper.httpx_client.request(
898
+ "POST",
899
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", "v1/execute-workflow"),
900
+ json=jsonable_encoder(_request),
901
+ headers=self._client_wrapper.get_headers(),
902
+ timeout=None,
903
+ )
904
+ if 200 <= _response.status_code < 300:
905
+ return pydantic.parse_obj_as(ExecuteWorkflowResponse, _response.json()) # type: ignore
906
+ if _response.status_code == 400:
907
+ raise BadRequestError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
908
+ if _response.status_code == 404:
909
+ raise NotFoundError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
910
+ if _response.status_code == 500:
911
+ raise InternalServerError(pydantic.parse_obj_as(typing.Any, _response.json())) # type: ignore
912
+ try:
913
+ _response_json = _response.json()
914
+ except JSONDecodeError:
915
+ raise ApiError(status_code=_response.status_code, body=_response.text)
916
+ raise ApiError(status_code=_response.status_code, body=_response_json)
917
+
761
918
  async def execute_workflow_stream(
762
919
  self,
763
920
  *,
@@ -843,7 +1000,13 @@ class AsyncVellum:
843
1000
 
844
1001
  - options: typing.Optional[GenerateOptionsRequest]. Additional configuration that can be used to control what's included in the response.
845
1002
  ---
846
- from vellum import GenerateOptionsRequest, GenerateRequest, LogprobsEnum
1003
+ from vellum import (
1004
+ ChatMessageRequest,
1005
+ ChatMessageRole,
1006
+ GenerateOptionsRequest,
1007
+ GenerateRequest,
1008
+ LogprobsEnum,
1009
+ )
847
1010
  from vellum.client import AsyncVellum
848
1011
 
849
1012
  client = AsyncVellum(
@@ -852,7 +1015,12 @@ class AsyncVellum:
852
1015
  await client.generate(
853
1016
  requests=[
854
1017
  GenerateRequest(
855
- input_values={},
1018
+ input_values={"string": {"unknown": "string", "type": "unknown"}},
1019
+ chat_history=[
1020
+ ChatMessageRequest(
1021
+ role=ChatMessageRole.SYSTEM,
1022
+ )
1023
+ ],
856
1024
  )
857
1025
  ],
858
1026
  options=GenerateOptionsRequest(
@@ -968,6 +1136,41 @@ class AsyncVellum:
968
1136
  - query: str. The query to search for.
969
1137
 
970
1138
  - options: typing.Optional[SearchRequestOptionsRequest]. Configuration options for the search.
1139
+ ---
1140
+ from vellum import (
1141
+ LogicalOperator,
1142
+ MetadataFilterConfigRequest,
1143
+ MetadataFilterRuleCombinator,
1144
+ MetadataFilterRuleRequest,
1145
+ SearchFiltersRequest,
1146
+ SearchRequestOptionsRequest,
1147
+ SearchResultMergingRequest,
1148
+ SearchWeightsRequest,
1149
+ )
1150
+ from vellum.client import AsyncVellum
1151
+
1152
+ client = AsyncVellum(
1153
+ api_key="YOUR_API_KEY",
1154
+ )
1155
+ await client.search(
1156
+ query="string",
1157
+ options=SearchRequestOptionsRequest(
1158
+ weights=SearchWeightsRequest(),
1159
+ result_merging=SearchResultMergingRequest(),
1160
+ filters=SearchFiltersRequest(
1161
+ metadata=MetadataFilterConfigRequest(
1162
+ combinator=MetadataFilterRuleCombinator.AND,
1163
+ rules=[
1164
+ MetadataFilterRuleRequest(
1165
+ combinator=MetadataFilterRuleCombinator.AND,
1166
+ operator=LogicalOperator.EQUALS,
1167
+ )
1168
+ ],
1169
+ operator=LogicalOperator.EQUALS,
1170
+ ),
1171
+ ),
1172
+ ),
1173
+ )
971
1174
  """
972
1175
  _request: typing.Dict[str, typing.Any] = {"query": query}
973
1176
  if index_id is not OMIT:
@@ -1016,13 +1219,14 @@ class AsyncVellum:
1016
1219
 
1017
1220
  - actuals: typing.List[SubmitCompletionActualRequest]. Feedback regarding the quality of previously generated completions
1018
1221
  ---
1222
+ from vellum import SubmitCompletionActualRequest
1019
1223
  from vellum.client import AsyncVellum
1020
1224
 
1021
1225
  client = AsyncVellum(
1022
1226
  api_key="YOUR_API_KEY",
1023
1227
  )
1024
1228
  await client.submit_completion_actuals(
1025
- actuals=[],
1229
+ actuals=[SubmitCompletionActualRequest()],
1026
1230
  )
1027
1231
  """
1028
1232
  _request: typing.Dict[str, typing.Any] = {"actuals": actuals}
@@ -16,7 +16,7 @@ class BaseClientWrapper:
16
16
  headers: typing.Dict[str, str] = {
17
17
  "X-Fern-Language": "Python",
18
18
  "X-Fern-SDK-Name": "vellum-ai",
19
- "X-Fern-SDK-Version": "v0.1.13",
19
+ "X-Fern-SDK-Version": "v0.2.1",
20
20
  }
21
21
  headers["X_API_KEY"] = self.api_key
22
22
  return headers
@@ -1,8 +1,21 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from . import deployments, document_indexes, documents, model_versions, registered_prompts, sandboxes, test_suites
3
+ from . import (
4
+ deployments,
5
+ document_indexes,
6
+ documents,
7
+ model_versions,
8
+ registered_prompts,
9
+ sandboxes,
10
+ test_suites,
11
+ workflow_deployments,
12
+ )
13
+ from .deployments import DeploymentsListRequestStatus
14
+ from .workflow_deployments import WorkflowDeploymentsListRequestStatus
4
15
 
5
16
  __all__ = [
17
+ "DeploymentsListRequestStatus",
18
+ "WorkflowDeploymentsListRequestStatus",
6
19
  "deployments",
7
20
  "document_indexes",
8
21
  "documents",
@@ -10,4 +23,5 @@ __all__ = [
10
23
  "registered_prompts",
11
24
  "sandboxes",
12
25
  "test_suites",
26
+ "workflow_deployments",
13
27
  ]
@@ -1,2 +1,5 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ from .types import DeploymentsListRequestStatus
4
+
5
+ __all__ = ["DeploymentsListRequestStatus"]
@@ -7,9 +7,12 @@ from json.decoder import JSONDecodeError
7
7
  from ...core.api_error import ApiError
8
8
  from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
9
  from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...core.remove_none_from_dict import remove_none_from_dict
10
11
  from ...types.deployment_provider_payload_response import DeploymentProviderPayloadResponse
11
12
  from ...types.deployment_read import DeploymentRead
13
+ from ...types.paginated_slim_deployment_read_list import PaginatedSlimDeploymentReadList
12
14
  from ...types.prompt_deployment_input_request import PromptDeploymentInputRequest
15
+ from .types.deployments_list_request_status import DeploymentsListRequestStatus
13
16
 
14
17
  try:
15
18
  import pydantic.v1 as pydantic # type: ignore
@@ -24,12 +27,66 @@ class DeploymentsClient:
24
27
  def __init__(self, *, client_wrapper: SyncClientWrapper):
25
28
  self._client_wrapper = client_wrapper
26
29
 
30
+ def list(
31
+ self,
32
+ *,
33
+ limit: typing.Optional[int] = None,
34
+ offset: typing.Optional[int] = None,
35
+ ordering: typing.Optional[str] = None,
36
+ status: typing.Optional[DeploymentsListRequestStatus] = None,
37
+ ) -> PaginatedSlimDeploymentReadList:
38
+ """
39
+ Parameters:
40
+ - limit: typing.Optional[int]. Number of results to return per page.
41
+
42
+ - offset: typing.Optional[int]. The initial index from which to return the results.
43
+
44
+ - ordering: typing.Optional[str]. Which field to use when ordering the results.
45
+
46
+ - status: typing.Optional[DeploymentsListRequestStatus]. The current status of the deployment
47
+
48
+ - `ACTIVE` - Active
49
+ - `ARCHIVED` - Archived---
50
+ from vellum import DeploymentsListRequestStatus
51
+ from vellum.client import Vellum
52
+
53
+ client = Vellum(
54
+ api_key="YOUR_API_KEY",
55
+ )
56
+ client.deployments.list(
57
+ status=DeploymentsListRequestStatus.ACTIVE,
58
+ )
59
+ """
60
+ _response = self._client_wrapper.httpx_client.request(
61
+ "GET",
62
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", "v1/deployments"),
63
+ params=remove_none_from_dict({"limit": limit, "offset": offset, "ordering": ordering, "status": status}),
64
+ headers=self._client_wrapper.get_headers(),
65
+ timeout=None,
66
+ )
67
+ if 200 <= _response.status_code < 300:
68
+ return pydantic.parse_obj_as(PaginatedSlimDeploymentReadList, _response.json()) # type: ignore
69
+ try:
70
+ _response_json = _response.json()
71
+ except JSONDecodeError:
72
+ raise ApiError(status_code=_response.status_code, body=_response.text)
73
+ raise ApiError(status_code=_response.status_code, body=_response_json)
74
+
27
75
  def retrieve(self, id: str) -> DeploymentRead:
28
76
  """
29
77
  Used to retrieve a deployment given its ID or name.
30
78
 
31
79
  Parameters:
32
80
  - id: str. Either the Deployment's ID or its unique name
81
+ ---
82
+ from vellum.client import Vellum
83
+
84
+ client = Vellum(
85
+ api_key="YOUR_API_KEY",
86
+ )
87
+ client.deployments.retrieve(
88
+ id="string",
89
+ )
33
90
  """
34
91
  _response = self._client_wrapper.httpx_client.request(
35
92
  "GET",
@@ -58,7 +115,7 @@ class DeploymentsClient:
58
115
 
59
116
  - deployment_name: typing.Optional[str]. The name of the deployment. Must provide either this or deployment_id.
60
117
 
61
- - inputs: typing.List[PromptDeploymentInputRequest].
118
+ - inputs: typing.List[PromptDeploymentInputRequest]. The list of inputs defined in the Prompt's deployment with their corresponding values.
62
119
  ---
63
120
  from vellum.client import Vellum
64
121
 
@@ -96,12 +153,66 @@ class AsyncDeploymentsClient:
96
153
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
97
154
  self._client_wrapper = client_wrapper
98
155
 
156
+ async def list(
157
+ self,
158
+ *,
159
+ limit: typing.Optional[int] = None,
160
+ offset: typing.Optional[int] = None,
161
+ ordering: typing.Optional[str] = None,
162
+ status: typing.Optional[DeploymentsListRequestStatus] = None,
163
+ ) -> PaginatedSlimDeploymentReadList:
164
+ """
165
+ Parameters:
166
+ - limit: typing.Optional[int]. Number of results to return per page.
167
+
168
+ - offset: typing.Optional[int]. The initial index from which to return the results.
169
+
170
+ - ordering: typing.Optional[str]. Which field to use when ordering the results.
171
+
172
+ - status: typing.Optional[DeploymentsListRequestStatus]. The current status of the deployment
173
+
174
+ - `ACTIVE` - Active
175
+ - `ARCHIVED` - Archived---
176
+ from vellum import DeploymentsListRequestStatus
177
+ from vellum.client import AsyncVellum
178
+
179
+ client = AsyncVellum(
180
+ api_key="YOUR_API_KEY",
181
+ )
182
+ await client.deployments.list(
183
+ status=DeploymentsListRequestStatus.ACTIVE,
184
+ )
185
+ """
186
+ _response = await self._client_wrapper.httpx_client.request(
187
+ "GET",
188
+ urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", "v1/deployments"),
189
+ params=remove_none_from_dict({"limit": limit, "offset": offset, "ordering": ordering, "status": status}),
190
+ headers=self._client_wrapper.get_headers(),
191
+ timeout=None,
192
+ )
193
+ if 200 <= _response.status_code < 300:
194
+ return pydantic.parse_obj_as(PaginatedSlimDeploymentReadList, _response.json()) # type: ignore
195
+ try:
196
+ _response_json = _response.json()
197
+ except JSONDecodeError:
198
+ raise ApiError(status_code=_response.status_code, body=_response.text)
199
+ raise ApiError(status_code=_response.status_code, body=_response_json)
200
+
99
201
  async def retrieve(self, id: str) -> DeploymentRead:
100
202
  """
101
203
  Used to retrieve a deployment given its ID or name.
102
204
 
103
205
  Parameters:
104
206
  - id: str. Either the Deployment's ID or its unique name
207
+ ---
208
+ from vellum.client import AsyncVellum
209
+
210
+ client = AsyncVellum(
211
+ api_key="YOUR_API_KEY",
212
+ )
213
+ await client.deployments.retrieve(
214
+ id="string",
215
+ )
105
216
  """
106
217
  _response = await self._client_wrapper.httpx_client.request(
107
218
  "GET",
@@ -130,7 +241,7 @@ class AsyncDeploymentsClient:
130
241
 
131
242
  - deployment_name: typing.Optional[str]. The name of the deployment. Must provide either this or deployment_id.
132
243
 
133
- - inputs: typing.List[PromptDeploymentInputRequest].
244
+ - inputs: typing.List[PromptDeploymentInputRequest]. The list of inputs defined in the Prompt's deployment with their corresponding values.
134
245
  ---
135
246
  from vellum.client import AsyncVellum
136
247
 
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from .deployments_list_request_status import DeploymentsListRequestStatus
4
+
5
+ __all__ = ["DeploymentsListRequestStatus"]
@@ -0,0 +1,17 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class DeploymentsListRequestStatus(str, enum.Enum):
10
+ ACTIVE = "ACTIVE"
11
+ ARCHIVED = "ARCHIVED"
12
+
13
+ def visit(self, active: typing.Callable[[], T_Result], archived: typing.Callable[[], T_Result]) -> T_Result:
14
+ if self is DeploymentsListRequestStatus.ACTIVE:
15
+ return active()
16
+ if self is DeploymentsListRequestStatus.ARCHIVED:
17
+ return archived()