vellum-ai 0.7.1__py3-none-any.whl → 0.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/__init__.py +46 -0
- vellum/client.py +45 -12
- vellum/core/client_wrapper.py +1 -1
- vellum/resources/deployments/client.py +11 -0
- vellum/types/__init__.py +48 -0
- vellum/types/array_vellum_value_item_request.py +82 -0
- vellum/types/compile_prompt_deployment_expand_meta_request.py +38 -0
- vellum/types/compile_prompt_meta.py +31 -0
- vellum/types/deployment_provider_payload_response.py +2 -0
- vellum/types/error_vellum_value_request.py +30 -0
- vellum/types/function_call_vellum_value_request.py +30 -0
- vellum/types/image_vellum_value_request.py +30 -0
- vellum/types/json_vellum_value_request.py +29 -0
- vellum/types/named_test_case_array_variable_value.py +31 -0
- vellum/types/named_test_case_array_variable_value_request.py +31 -0
- vellum/types/named_test_case_variable_value.py +12 -0
- vellum/types/named_test_case_variable_value_request.py +12 -0
- vellum/types/number_vellum_value_request.py +29 -0
- vellum/types/prompt_deployment_expand_meta_request_request.py +11 -11
- vellum/types/prompt_execution_meta.py +1 -1
- vellum/types/prompt_node_execution_meta.py +30 -0
- vellum/types/prompt_node_result_data.py +2 -0
- vellum/types/string_vellum_value_request.py +29 -0
- vellum/types/test_case_array_variable_value.py +32 -0
- vellum/types/test_case_variable_value.py +12 -0
- vellum/types/workflow_expand_meta_request.py +28 -0
- {vellum_ai-0.7.1.dist-info → vellum_ai-0.7.3.dist-info}/METADATA +1 -1
- {vellum_ai-0.7.1.dist-info → vellum_ai-0.7.3.dist-info}/RECORD +30 -16
- {vellum_ai-0.7.1.dist-info → vellum_ai-0.7.3.dist-info}/LICENSE +0 -0
- {vellum_ai-0.7.1.dist-info → vellum_ai-0.7.3.dist-info}/WHEEL +0 -0
vellum/__init__.py
CHANGED
@@ -23,6 +23,13 @@ from .types import (
|
|
23
23
|
ArrayVariableValueItem_Number,
|
24
24
|
ArrayVariableValueItem_String,
|
25
25
|
ArrayVellumValueItem,
|
26
|
+
ArrayVellumValueItemRequest,
|
27
|
+
ArrayVellumValueItemRequest_Error,
|
28
|
+
ArrayVellumValueItemRequest_FunctionCall,
|
29
|
+
ArrayVellumValueItemRequest_Image,
|
30
|
+
ArrayVellumValueItemRequest_Json,
|
31
|
+
ArrayVellumValueItemRequest_Number,
|
32
|
+
ArrayVellumValueItemRequest_String,
|
26
33
|
ArrayVellumValueItem_Error,
|
27
34
|
ArrayVellumValueItem_FunctionCall,
|
28
35
|
ArrayVellumValueItem_Image,
|
@@ -69,6 +76,8 @@ from .types import (
|
|
69
76
|
CodeExecutionNodeResultOutput_String,
|
70
77
|
CodeExecutionNodeSearchResultsResult,
|
71
78
|
CodeExecutionNodeStringResult,
|
79
|
+
CompilePromptDeploymentExpandMetaRequest,
|
80
|
+
CompilePromptMeta,
|
72
81
|
ConditionalNodeResult,
|
73
82
|
ConditionalNodeResultData,
|
74
83
|
CreateEnum,
|
@@ -101,6 +110,7 @@ from .types import (
|
|
101
110
|
ErrorEnum,
|
102
111
|
ErrorVariableValue,
|
103
112
|
ErrorVellumValue,
|
113
|
+
ErrorVellumValueRequest,
|
104
114
|
ExecutePromptApiErrorResponse,
|
105
115
|
ExecutePromptEvent,
|
106
116
|
ExecutePromptEvent_Fulfilled,
|
@@ -151,6 +161,7 @@ from .types import (
|
|
151
161
|
FunctionCallRequest,
|
152
162
|
FunctionCallVariableValue,
|
153
163
|
FunctionCallVellumValue,
|
164
|
+
FunctionCallVellumValueRequest,
|
154
165
|
GenerateErrorResponse,
|
155
166
|
GenerateOptionsRequest,
|
156
167
|
GenerateRequest,
|
@@ -169,6 +180,7 @@ from .types import (
|
|
169
180
|
ImageEnum,
|
170
181
|
ImageVariableValue,
|
171
182
|
ImageVellumValue,
|
183
|
+
ImageVellumValueRequest,
|
172
184
|
IndexingConfigVectorizer,
|
173
185
|
IndexingConfigVectorizerRequest,
|
174
186
|
IndexingConfigVectorizerRequest_HkunlpInstructorXl,
|
@@ -198,6 +210,7 @@ from .types import (
|
|
198
210
|
JsonInputRequest,
|
199
211
|
JsonVariableValue,
|
200
212
|
JsonVellumValue,
|
213
|
+
JsonVellumValueRequest,
|
201
214
|
LogicalOperator,
|
202
215
|
LogprobsEnum,
|
203
216
|
MapEnum,
|
@@ -217,6 +230,8 @@ from .types import (
|
|
217
230
|
NamedScenarioInputRequest_ChatHistory,
|
218
231
|
NamedScenarioInputRequest_String,
|
219
232
|
NamedScenarioInputStringVariableValueRequest,
|
233
|
+
NamedTestCaseArrayVariableValue,
|
234
|
+
NamedTestCaseArrayVariableValueRequest,
|
220
235
|
NamedTestCaseChatHistoryVariableValue,
|
221
236
|
NamedTestCaseChatHistoryVariableValueRequest,
|
222
237
|
NamedTestCaseErrorVariableValue,
|
@@ -233,6 +248,7 @@ from .types import (
|
|
233
248
|
NamedTestCaseStringVariableValueRequest,
|
234
249
|
NamedTestCaseVariableValue,
|
235
250
|
NamedTestCaseVariableValueRequest,
|
251
|
+
NamedTestCaseVariableValueRequest_Array,
|
236
252
|
NamedTestCaseVariableValueRequest_ChatHistory,
|
237
253
|
NamedTestCaseVariableValueRequest_Error,
|
238
254
|
NamedTestCaseVariableValueRequest_FunctionCall,
|
@@ -240,6 +256,7 @@ from .types import (
|
|
240
256
|
NamedTestCaseVariableValueRequest_Number,
|
241
257
|
NamedTestCaseVariableValueRequest_SearchResults,
|
242
258
|
NamedTestCaseVariableValueRequest_String,
|
259
|
+
NamedTestCaseVariableValue_Array,
|
243
260
|
NamedTestCaseVariableValue_ChatHistory,
|
244
261
|
NamedTestCaseVariableValue_Error,
|
245
262
|
NamedTestCaseVariableValue_FunctionCall,
|
@@ -286,6 +303,7 @@ from .types import (
|
|
286
303
|
NumberEnum,
|
287
304
|
NumberVariableValue,
|
288
305
|
NumberVellumValue,
|
306
|
+
NumberVellumValueRequest,
|
289
307
|
OpenAiVectorizerConfig,
|
290
308
|
OpenAiVectorizerConfigRequest,
|
291
309
|
OpenAiVectorizerTextEmbedding3Large,
|
@@ -311,6 +329,7 @@ from .types import (
|
|
311
329
|
PromptDeploymentInputRequest_Json,
|
312
330
|
PromptDeploymentInputRequest_String,
|
313
331
|
PromptExecutionMeta,
|
332
|
+
PromptNodeExecutionMeta,
|
314
333
|
PromptNodeResult,
|
315
334
|
PromptNodeResultData,
|
316
335
|
PromptOutput,
|
@@ -379,6 +398,7 @@ from .types import (
|
|
379
398
|
StringInputRequest,
|
380
399
|
StringVariableValue,
|
381
400
|
StringVellumValue,
|
401
|
+
StringVellumValueRequest,
|
382
402
|
SubmitCompletionActualRequest,
|
383
403
|
SubmitCompletionActualsErrorResponse,
|
384
404
|
SubmitWorkflowExecutionActualRequest,
|
@@ -426,6 +446,7 @@ from .types import (
|
|
426
446
|
TerminalNodeResultOutput_String,
|
427
447
|
TerminalNodeSearchResultsResult,
|
428
448
|
TerminalNodeStringResult,
|
449
|
+
TestCaseArrayVariableValue,
|
429
450
|
TestCaseChatHistoryVariableValue,
|
430
451
|
TestCaseErrorVariableValue,
|
431
452
|
TestCaseFunctionCallVariableValue,
|
@@ -434,6 +455,7 @@ from .types import (
|
|
434
455
|
TestCaseSearchResultsVariableValue,
|
435
456
|
TestCaseStringVariableValue,
|
436
457
|
TestCaseVariableValue,
|
458
|
+
TestCaseVariableValue_Array,
|
437
459
|
TestCaseVariableValue_ChatHistory,
|
438
460
|
TestCaseVariableValue_Error,
|
439
461
|
TestCaseVariableValue_FunctionCall,
|
@@ -548,6 +570,7 @@ from .types import (
|
|
548
570
|
WorkflowExecutionEventType,
|
549
571
|
WorkflowExecutionNodeResultEvent,
|
550
572
|
WorkflowExecutionWorkflowResultEvent,
|
573
|
+
WorkflowExpandMetaRequest,
|
551
574
|
WorkflowNodeResultData,
|
552
575
|
WorkflowNodeResultData_Api,
|
553
576
|
WorkflowNodeResultData_CodeExecution,
|
@@ -659,6 +682,13 @@ __all__ = [
|
|
659
682
|
"ArrayVariableValueItem_Number",
|
660
683
|
"ArrayVariableValueItem_String",
|
661
684
|
"ArrayVellumValueItem",
|
685
|
+
"ArrayVellumValueItemRequest",
|
686
|
+
"ArrayVellumValueItemRequest_Error",
|
687
|
+
"ArrayVellumValueItemRequest_FunctionCall",
|
688
|
+
"ArrayVellumValueItemRequest_Image",
|
689
|
+
"ArrayVellumValueItemRequest_Json",
|
690
|
+
"ArrayVellumValueItemRequest_Number",
|
691
|
+
"ArrayVellumValueItemRequest_String",
|
662
692
|
"ArrayVellumValueItem_Error",
|
663
693
|
"ArrayVellumValueItem_FunctionCall",
|
664
694
|
"ArrayVellumValueItem_Image",
|
@@ -706,6 +736,8 @@ __all__ = [
|
|
706
736
|
"CodeExecutionNodeResultOutput_String",
|
707
737
|
"CodeExecutionNodeSearchResultsResult",
|
708
738
|
"CodeExecutionNodeStringResult",
|
739
|
+
"CompilePromptDeploymentExpandMetaRequest",
|
740
|
+
"CompilePromptMeta",
|
709
741
|
"ConditionalNodeResult",
|
710
742
|
"ConditionalNodeResultData",
|
711
743
|
"CreateEnum",
|
@@ -740,6 +772,7 @@ __all__ = [
|
|
740
772
|
"ErrorEnum",
|
741
773
|
"ErrorVariableValue",
|
742
774
|
"ErrorVellumValue",
|
775
|
+
"ErrorVellumValueRequest",
|
743
776
|
"ExecutePromptApiErrorResponse",
|
744
777
|
"ExecutePromptEvent",
|
745
778
|
"ExecutePromptEvent_Fulfilled",
|
@@ -791,6 +824,7 @@ __all__ = [
|
|
791
824
|
"FunctionCallRequest",
|
792
825
|
"FunctionCallVariableValue",
|
793
826
|
"FunctionCallVellumValue",
|
827
|
+
"FunctionCallVellumValueRequest",
|
794
828
|
"GenerateErrorResponse",
|
795
829
|
"GenerateOptionsRequest",
|
796
830
|
"GenerateRequest",
|
@@ -809,6 +843,7 @@ __all__ = [
|
|
809
843
|
"ImageEnum",
|
810
844
|
"ImageVariableValue",
|
811
845
|
"ImageVellumValue",
|
846
|
+
"ImageVellumValueRequest",
|
812
847
|
"IndexingConfigVectorizer",
|
813
848
|
"IndexingConfigVectorizerRequest",
|
814
849
|
"IndexingConfigVectorizerRequest_HkunlpInstructorXl",
|
@@ -839,6 +874,7 @@ __all__ = [
|
|
839
874
|
"JsonInputRequest",
|
840
875
|
"JsonVariableValue",
|
841
876
|
"JsonVellumValue",
|
877
|
+
"JsonVellumValueRequest",
|
842
878
|
"LogicalOperator",
|
843
879
|
"LogprobsEnum",
|
844
880
|
"MapEnum",
|
@@ -858,6 +894,8 @@ __all__ = [
|
|
858
894
|
"NamedScenarioInputRequest_ChatHistory",
|
859
895
|
"NamedScenarioInputRequest_String",
|
860
896
|
"NamedScenarioInputStringVariableValueRequest",
|
897
|
+
"NamedTestCaseArrayVariableValue",
|
898
|
+
"NamedTestCaseArrayVariableValueRequest",
|
861
899
|
"NamedTestCaseChatHistoryVariableValue",
|
862
900
|
"NamedTestCaseChatHistoryVariableValueRequest",
|
863
901
|
"NamedTestCaseErrorVariableValue",
|
@@ -874,6 +912,7 @@ __all__ = [
|
|
874
912
|
"NamedTestCaseStringVariableValueRequest",
|
875
913
|
"NamedTestCaseVariableValue",
|
876
914
|
"NamedTestCaseVariableValueRequest",
|
915
|
+
"NamedTestCaseVariableValueRequest_Array",
|
877
916
|
"NamedTestCaseVariableValueRequest_ChatHistory",
|
878
917
|
"NamedTestCaseVariableValueRequest_Error",
|
879
918
|
"NamedTestCaseVariableValueRequest_FunctionCall",
|
@@ -881,6 +920,7 @@ __all__ = [
|
|
881
920
|
"NamedTestCaseVariableValueRequest_Number",
|
882
921
|
"NamedTestCaseVariableValueRequest_SearchResults",
|
883
922
|
"NamedTestCaseVariableValueRequest_String",
|
923
|
+
"NamedTestCaseVariableValue_Array",
|
884
924
|
"NamedTestCaseVariableValue_ChatHistory",
|
885
925
|
"NamedTestCaseVariableValue_Error",
|
886
926
|
"NamedTestCaseVariableValue_FunctionCall",
|
@@ -928,6 +968,7 @@ __all__ = [
|
|
928
968
|
"NumberEnum",
|
929
969
|
"NumberVariableValue",
|
930
970
|
"NumberVellumValue",
|
971
|
+
"NumberVellumValueRequest",
|
931
972
|
"OpenAiVectorizerConfig",
|
932
973
|
"OpenAiVectorizerConfigRequest",
|
933
974
|
"OpenAiVectorizerTextEmbedding3Large",
|
@@ -953,6 +994,7 @@ __all__ = [
|
|
953
994
|
"PromptDeploymentInputRequest_Json",
|
954
995
|
"PromptDeploymentInputRequest_String",
|
955
996
|
"PromptExecutionMeta",
|
997
|
+
"PromptNodeExecutionMeta",
|
956
998
|
"PromptNodeResult",
|
957
999
|
"PromptNodeResultData",
|
958
1000
|
"PromptOutput",
|
@@ -1021,6 +1063,7 @@ __all__ = [
|
|
1021
1063
|
"StringInputRequest",
|
1022
1064
|
"StringVariableValue",
|
1023
1065
|
"StringVellumValue",
|
1066
|
+
"StringVellumValueRequest",
|
1024
1067
|
"SubmitCompletionActualRequest",
|
1025
1068
|
"SubmitCompletionActualsErrorResponse",
|
1026
1069
|
"SubmitWorkflowExecutionActualRequest",
|
@@ -1068,6 +1111,7 @@ __all__ = [
|
|
1068
1111
|
"TerminalNodeResultOutput_String",
|
1069
1112
|
"TerminalNodeSearchResultsResult",
|
1070
1113
|
"TerminalNodeStringResult",
|
1114
|
+
"TestCaseArrayVariableValue",
|
1071
1115
|
"TestCaseChatHistoryVariableValue",
|
1072
1116
|
"TestCaseErrorVariableValue",
|
1073
1117
|
"TestCaseFunctionCallVariableValue",
|
@@ -1076,6 +1120,7 @@ __all__ = [
|
|
1076
1120
|
"TestCaseSearchResultsVariableValue",
|
1077
1121
|
"TestCaseStringVariableValue",
|
1078
1122
|
"TestCaseVariableValue",
|
1123
|
+
"TestCaseVariableValue_Array",
|
1079
1124
|
"TestCaseVariableValue_ChatHistory",
|
1080
1125
|
"TestCaseVariableValue_Error",
|
1081
1126
|
"TestCaseVariableValue_FunctionCall",
|
@@ -1192,6 +1237,7 @@ __all__ = [
|
|
1192
1237
|
"WorkflowExecutionEventType",
|
1193
1238
|
"WorkflowExecutionNodeResultEvent",
|
1194
1239
|
"WorkflowExecutionWorkflowResultEvent",
|
1240
|
+
"WorkflowExpandMetaRequest",
|
1195
1241
|
"WorkflowNodeResultData",
|
1196
1242
|
"WorkflowNodeResultData_Api",
|
1197
1243
|
"WorkflowNodeResultData_CodeExecution",
|
vellum/client.py
CHANGED
@@ -42,6 +42,7 @@ from .types.search_response import SearchResponse
|
|
42
42
|
from .types.submit_completion_actual_request import SubmitCompletionActualRequest
|
43
43
|
from .types.submit_workflow_execution_actual_request import SubmitWorkflowExecutionActualRequest
|
44
44
|
from .types.workflow_execution_event_type import WorkflowExecutionEventType
|
45
|
+
from .types.workflow_expand_meta_request import WorkflowExpandMetaRequest
|
45
46
|
from .types.workflow_request_input_request import WorkflowRequestInputRequest
|
46
47
|
from .types.workflow_stream_event import WorkflowStreamEvent
|
47
48
|
|
@@ -164,11 +165,11 @@ class Vellum:
|
|
164
165
|
external_id="string",
|
165
166
|
expand_meta=PromptDeploymentExpandMetaRequestRequest(
|
166
167
|
model_name=True,
|
168
|
+
usage=True,
|
169
|
+
finish_reason=True,
|
167
170
|
latency=True,
|
168
171
|
deployment_release_tag=True,
|
169
172
|
prompt_version_id=True,
|
170
|
-
finish_reason=True,
|
171
|
-
usage=True,
|
172
173
|
),
|
173
174
|
raw_overrides=RawPromptExecutionOverridesRequest(
|
174
175
|
body={"string": {"key": "value"}},
|
@@ -299,11 +300,11 @@ class Vellum:
|
|
299
300
|
external_id="string",
|
300
301
|
expand_meta=PromptDeploymentExpandMetaRequestRequest(
|
301
302
|
model_name=True,
|
303
|
+
usage=True,
|
304
|
+
finish_reason=True,
|
302
305
|
latency=True,
|
303
306
|
deployment_release_tag=True,
|
304
307
|
prompt_version_id=True,
|
305
|
-
finish_reason=True,
|
306
|
-
usage=True,
|
307
308
|
),
|
308
309
|
raw_overrides=RawPromptExecutionOverridesRequest(
|
309
310
|
body={"string": {"key": "value"}},
|
@@ -382,6 +383,7 @@ class Vellum:
|
|
382
383
|
self,
|
383
384
|
*,
|
384
385
|
inputs: typing.Sequence[WorkflowRequestInputRequest],
|
386
|
+
expand_meta: typing.Optional[WorkflowExpandMetaRequest] = OMIT,
|
385
387
|
workflow_deployment_id: typing.Optional[str] = OMIT,
|
386
388
|
workflow_deployment_name: typing.Optional[str] = OMIT,
|
387
389
|
release_tag: typing.Optional[str] = OMIT,
|
@@ -394,6 +396,8 @@ class Vellum:
|
|
394
396
|
Parameters:
|
395
397
|
- inputs: typing.Sequence[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
|
396
398
|
|
399
|
+
- expand_meta: typing.Optional[WorkflowExpandMetaRequest]. An optionally specified configuration used to opt in to including additional metadata about this workflow execution in the API response. Corresponding values will be returned under the `execution_meta` key within NODE events in the response stream.
|
400
|
+
|
397
401
|
- workflow_deployment_id: typing.Optional[str]. The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
|
398
402
|
|
399
403
|
- workflow_deployment_name: typing.Optional[str]. The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
|
@@ -404,7 +408,7 @@ class Vellum:
|
|
404
408
|
|
405
409
|
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
406
410
|
---
|
407
|
-
from vellum import WorkflowRequestInputRequest_String
|
411
|
+
from vellum import WorkflowExpandMetaRequest, WorkflowRequestInputRequest_String
|
408
412
|
from vellum.client import Vellum
|
409
413
|
|
410
414
|
client = Vellum(
|
@@ -417,6 +421,9 @@ class Vellum:
|
|
417
421
|
value="string",
|
418
422
|
)
|
419
423
|
],
|
424
|
+
expand_meta=WorkflowExpandMetaRequest(
|
425
|
+
usage=True,
|
426
|
+
),
|
420
427
|
workflow_deployment_id="string",
|
421
428
|
workflow_deployment_name="string",
|
422
429
|
release_tag="string",
|
@@ -424,6 +431,8 @@ class Vellum:
|
|
424
431
|
)
|
425
432
|
"""
|
426
433
|
_request: typing.Dict[str, typing.Any] = {"inputs": inputs}
|
434
|
+
if expand_meta is not OMIT:
|
435
|
+
_request["expand_meta"] = expand_meta
|
427
436
|
if workflow_deployment_id is not OMIT:
|
428
437
|
_request["workflow_deployment_id"] = workflow_deployment_id
|
429
438
|
if workflow_deployment_name is not OMIT:
|
@@ -476,6 +485,7 @@ class Vellum:
|
|
476
485
|
self,
|
477
486
|
*,
|
478
487
|
inputs: typing.Sequence[WorkflowRequestInputRequest],
|
488
|
+
expand_meta: typing.Optional[WorkflowExpandMetaRequest] = OMIT,
|
479
489
|
workflow_deployment_id: typing.Optional[str] = OMIT,
|
480
490
|
workflow_deployment_name: typing.Optional[str] = OMIT,
|
481
491
|
release_tag: typing.Optional[str] = OMIT,
|
@@ -489,6 +499,8 @@ class Vellum:
|
|
489
499
|
Parameters:
|
490
500
|
- inputs: typing.Sequence[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
|
491
501
|
|
502
|
+
- expand_meta: typing.Optional[WorkflowExpandMetaRequest]. An optionally specified configuration used to opt in to including additional metadata about this workflow execution in the API response. Corresponding values will be returned under the `execution_meta` key within NODE events in the response stream.
|
503
|
+
|
492
504
|
- workflow_deployment_id: typing.Optional[str]. The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
|
493
505
|
|
494
506
|
- workflow_deployment_name: typing.Optional[str]. The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
|
@@ -501,7 +513,7 @@ class Vellum:
|
|
501
513
|
|
502
514
|
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
503
515
|
---
|
504
|
-
from vellum import WorkflowRequestInputRequest_String
|
516
|
+
from vellum import WorkflowExpandMetaRequest, WorkflowRequestInputRequest_String
|
505
517
|
from vellum.client import Vellum
|
506
518
|
|
507
519
|
client = Vellum(
|
@@ -514,6 +526,9 @@ class Vellum:
|
|
514
526
|
value="string",
|
515
527
|
)
|
516
528
|
],
|
529
|
+
expand_meta=WorkflowExpandMetaRequest(
|
530
|
+
usage=True,
|
531
|
+
),
|
517
532
|
workflow_deployment_id="string",
|
518
533
|
workflow_deployment_name="string",
|
519
534
|
release_tag="string",
|
@@ -522,6 +537,8 @@ class Vellum:
|
|
522
537
|
)
|
523
538
|
"""
|
524
539
|
_request: typing.Dict[str, typing.Any] = {"inputs": inputs}
|
540
|
+
if expand_meta is not OMIT:
|
541
|
+
_request["expand_meta"] = expand_meta
|
525
542
|
if workflow_deployment_id is not OMIT:
|
526
543
|
_request["workflow_deployment_id"] = workflow_deployment_id
|
527
544
|
if workflow_deployment_name is not OMIT:
|
@@ -1125,11 +1142,11 @@ class AsyncVellum:
|
|
1125
1142
|
external_id="string",
|
1126
1143
|
expand_meta=PromptDeploymentExpandMetaRequestRequest(
|
1127
1144
|
model_name=True,
|
1145
|
+
usage=True,
|
1146
|
+
finish_reason=True,
|
1128
1147
|
latency=True,
|
1129
1148
|
deployment_release_tag=True,
|
1130
1149
|
prompt_version_id=True,
|
1131
|
-
finish_reason=True,
|
1132
|
-
usage=True,
|
1133
1150
|
),
|
1134
1151
|
raw_overrides=RawPromptExecutionOverridesRequest(
|
1135
1152
|
body={"string": {"key": "value"}},
|
@@ -1260,11 +1277,11 @@ class AsyncVellum:
|
|
1260
1277
|
external_id="string",
|
1261
1278
|
expand_meta=PromptDeploymentExpandMetaRequestRequest(
|
1262
1279
|
model_name=True,
|
1280
|
+
usage=True,
|
1281
|
+
finish_reason=True,
|
1263
1282
|
latency=True,
|
1264
1283
|
deployment_release_tag=True,
|
1265
1284
|
prompt_version_id=True,
|
1266
|
-
finish_reason=True,
|
1267
|
-
usage=True,
|
1268
1285
|
),
|
1269
1286
|
raw_overrides=RawPromptExecutionOverridesRequest(
|
1270
1287
|
body={"string": {"key": "value"}},
|
@@ -1343,6 +1360,7 @@ class AsyncVellum:
|
|
1343
1360
|
self,
|
1344
1361
|
*,
|
1345
1362
|
inputs: typing.Sequence[WorkflowRequestInputRequest],
|
1363
|
+
expand_meta: typing.Optional[WorkflowExpandMetaRequest] = OMIT,
|
1346
1364
|
workflow_deployment_id: typing.Optional[str] = OMIT,
|
1347
1365
|
workflow_deployment_name: typing.Optional[str] = OMIT,
|
1348
1366
|
release_tag: typing.Optional[str] = OMIT,
|
@@ -1355,6 +1373,8 @@ class AsyncVellum:
|
|
1355
1373
|
Parameters:
|
1356
1374
|
- inputs: typing.Sequence[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
|
1357
1375
|
|
1376
|
+
- expand_meta: typing.Optional[WorkflowExpandMetaRequest]. An optionally specified configuration used to opt in to including additional metadata about this workflow execution in the API response. Corresponding values will be returned under the `execution_meta` key within NODE events in the response stream.
|
1377
|
+
|
1358
1378
|
- workflow_deployment_id: typing.Optional[str]. The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
|
1359
1379
|
|
1360
1380
|
- workflow_deployment_name: typing.Optional[str]. The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
|
@@ -1365,7 +1385,7 @@ class AsyncVellum:
|
|
1365
1385
|
|
1366
1386
|
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
1367
1387
|
---
|
1368
|
-
from vellum import WorkflowRequestInputRequest_String
|
1388
|
+
from vellum import WorkflowExpandMetaRequest, WorkflowRequestInputRequest_String
|
1369
1389
|
from vellum.client import AsyncVellum
|
1370
1390
|
|
1371
1391
|
client = AsyncVellum(
|
@@ -1378,6 +1398,9 @@ class AsyncVellum:
|
|
1378
1398
|
value="string",
|
1379
1399
|
)
|
1380
1400
|
],
|
1401
|
+
expand_meta=WorkflowExpandMetaRequest(
|
1402
|
+
usage=True,
|
1403
|
+
),
|
1381
1404
|
workflow_deployment_id="string",
|
1382
1405
|
workflow_deployment_name="string",
|
1383
1406
|
release_tag="string",
|
@@ -1385,6 +1408,8 @@ class AsyncVellum:
|
|
1385
1408
|
)
|
1386
1409
|
"""
|
1387
1410
|
_request: typing.Dict[str, typing.Any] = {"inputs": inputs}
|
1411
|
+
if expand_meta is not OMIT:
|
1412
|
+
_request["expand_meta"] = expand_meta
|
1388
1413
|
if workflow_deployment_id is not OMIT:
|
1389
1414
|
_request["workflow_deployment_id"] = workflow_deployment_id
|
1390
1415
|
if workflow_deployment_name is not OMIT:
|
@@ -1437,6 +1462,7 @@ class AsyncVellum:
|
|
1437
1462
|
self,
|
1438
1463
|
*,
|
1439
1464
|
inputs: typing.Sequence[WorkflowRequestInputRequest],
|
1465
|
+
expand_meta: typing.Optional[WorkflowExpandMetaRequest] = OMIT,
|
1440
1466
|
workflow_deployment_id: typing.Optional[str] = OMIT,
|
1441
1467
|
workflow_deployment_name: typing.Optional[str] = OMIT,
|
1442
1468
|
release_tag: typing.Optional[str] = OMIT,
|
@@ -1450,6 +1476,8 @@ class AsyncVellum:
|
|
1450
1476
|
Parameters:
|
1451
1477
|
- inputs: typing.Sequence[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
|
1452
1478
|
|
1479
|
+
- expand_meta: typing.Optional[WorkflowExpandMetaRequest]. An optionally specified configuration used to opt in to including additional metadata about this workflow execution in the API response. Corresponding values will be returned under the `execution_meta` key within NODE events in the response stream.
|
1480
|
+
|
1453
1481
|
- workflow_deployment_id: typing.Optional[str]. The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
|
1454
1482
|
|
1455
1483
|
- workflow_deployment_name: typing.Optional[str]. The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
|
@@ -1462,7 +1490,7 @@ class AsyncVellum:
|
|
1462
1490
|
|
1463
1491
|
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
1464
1492
|
---
|
1465
|
-
from vellum import WorkflowRequestInputRequest_String
|
1493
|
+
from vellum import WorkflowExpandMetaRequest, WorkflowRequestInputRequest_String
|
1466
1494
|
from vellum.client import AsyncVellum
|
1467
1495
|
|
1468
1496
|
client = AsyncVellum(
|
@@ -1475,6 +1503,9 @@ class AsyncVellum:
|
|
1475
1503
|
value="string",
|
1476
1504
|
)
|
1477
1505
|
],
|
1506
|
+
expand_meta=WorkflowExpandMetaRequest(
|
1507
|
+
usage=True,
|
1508
|
+
),
|
1478
1509
|
workflow_deployment_id="string",
|
1479
1510
|
workflow_deployment_name="string",
|
1480
1511
|
release_tag="string",
|
@@ -1483,6 +1514,8 @@ class AsyncVellum:
|
|
1483
1514
|
)
|
1484
1515
|
"""
|
1485
1516
|
_request: typing.Dict[str, typing.Any] = {"inputs": inputs}
|
1517
|
+
if expand_meta is not OMIT:
|
1518
|
+
_request["expand_meta"] = expand_meta
|
1486
1519
|
if workflow_deployment_id is not OMIT:
|
1487
1520
|
_request["workflow_deployment_id"] = workflow_deployment_id
|
1488
1521
|
if workflow_deployment_name is not OMIT:
|
vellum/core/client_wrapper.py
CHANGED
@@ -14,6 +14,7 @@ from ...errors.bad_request_error import BadRequestError
|
|
14
14
|
from ...errors.forbidden_error import ForbiddenError
|
15
15
|
from ...errors.internal_server_error import InternalServerError
|
16
16
|
from ...errors.not_found_error import NotFoundError
|
17
|
+
from ...types.compile_prompt_deployment_expand_meta_request import CompilePromptDeploymentExpandMetaRequest
|
17
18
|
from ...types.deployment_provider_payload_response import DeploymentProviderPayloadResponse
|
18
19
|
from ...types.deployment_read import DeploymentRead
|
19
20
|
from ...types.deployment_release_tag_read import DeploymentReleaseTagRead
|
@@ -276,6 +277,7 @@ class DeploymentsClient:
|
|
276
277
|
deployment_name: typing.Optional[str] = OMIT,
|
277
278
|
inputs: typing.Sequence[PromptDeploymentInputRequest],
|
278
279
|
release_tag: typing.Optional[str] = OMIT,
|
280
|
+
expand_meta: typing.Optional[CompilePromptDeploymentExpandMetaRequest] = OMIT,
|
279
281
|
request_options: typing.Optional[RequestOptions] = None,
|
280
282
|
) -> DeploymentProviderPayloadResponse:
|
281
283
|
"""
|
@@ -288,6 +290,8 @@ class DeploymentsClient:
|
|
288
290
|
|
289
291
|
- release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
|
290
292
|
|
293
|
+
- expand_meta: typing.Optional[CompilePromptDeploymentExpandMetaRequest].
|
294
|
+
|
291
295
|
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
292
296
|
---
|
293
297
|
from vellum.client import Vellum
|
@@ -306,6 +310,8 @@ class DeploymentsClient:
|
|
306
310
|
_request["deployment_name"] = deployment_name
|
307
311
|
if release_tag is not OMIT:
|
308
312
|
_request["release_tag"] = release_tag
|
313
|
+
if expand_meta is not OMIT:
|
314
|
+
_request["expand_meta"] = expand_meta
|
309
315
|
_response = self._client_wrapper.httpx_client.request(
|
310
316
|
method="POST",
|
311
317
|
url=urllib.parse.urljoin(
|
@@ -602,6 +608,7 @@ class AsyncDeploymentsClient:
|
|
602
608
|
deployment_name: typing.Optional[str] = OMIT,
|
603
609
|
inputs: typing.Sequence[PromptDeploymentInputRequest],
|
604
610
|
release_tag: typing.Optional[str] = OMIT,
|
611
|
+
expand_meta: typing.Optional[CompilePromptDeploymentExpandMetaRequest] = OMIT,
|
605
612
|
request_options: typing.Optional[RequestOptions] = None,
|
606
613
|
) -> DeploymentProviderPayloadResponse:
|
607
614
|
"""
|
@@ -614,6 +621,8 @@ class AsyncDeploymentsClient:
|
|
614
621
|
|
615
622
|
- release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Workflow Deployment
|
616
623
|
|
624
|
+
- expand_meta: typing.Optional[CompilePromptDeploymentExpandMetaRequest].
|
625
|
+
|
617
626
|
- request_options: typing.Optional[RequestOptions]. Request-specific configuration.
|
618
627
|
---
|
619
628
|
from vellum.client import AsyncVellum
|
@@ -632,6 +641,8 @@ class AsyncDeploymentsClient:
|
|
632
641
|
_request["deployment_name"] = deployment_name
|
633
642
|
if release_tag is not OMIT:
|
634
643
|
_request["release_tag"] = release_tag
|
644
|
+
if expand_meta is not OMIT:
|
645
|
+
_request["expand_meta"] = expand_meta
|
635
646
|
_response = await self._client_wrapper.httpx_client.request(
|
636
647
|
method="POST",
|
637
648
|
url=urllib.parse.urljoin(
|