vellum-ai 0.7.1__py3-none-any.whl → 0.7.2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
vellum/__init__.py CHANGED
@@ -23,6 +23,13 @@ from .types import (
23
23
  ArrayVariableValueItem_Number,
24
24
  ArrayVariableValueItem_String,
25
25
  ArrayVellumValueItem,
26
+ ArrayVellumValueItemRequest,
27
+ ArrayVellumValueItemRequest_Error,
28
+ ArrayVellumValueItemRequest_FunctionCall,
29
+ ArrayVellumValueItemRequest_Image,
30
+ ArrayVellumValueItemRequest_Json,
31
+ ArrayVellumValueItemRequest_Number,
32
+ ArrayVellumValueItemRequest_String,
26
33
  ArrayVellumValueItem_Error,
27
34
  ArrayVellumValueItem_FunctionCall,
28
35
  ArrayVellumValueItem_Image,
@@ -101,6 +108,7 @@ from .types import (
101
108
  ErrorEnum,
102
109
  ErrorVariableValue,
103
110
  ErrorVellumValue,
111
+ ErrorVellumValueRequest,
104
112
  ExecutePromptApiErrorResponse,
105
113
  ExecutePromptEvent,
106
114
  ExecutePromptEvent_Fulfilled,
@@ -151,6 +159,7 @@ from .types import (
151
159
  FunctionCallRequest,
152
160
  FunctionCallVariableValue,
153
161
  FunctionCallVellumValue,
162
+ FunctionCallVellumValueRequest,
154
163
  GenerateErrorResponse,
155
164
  GenerateOptionsRequest,
156
165
  GenerateRequest,
@@ -169,6 +178,7 @@ from .types import (
169
178
  ImageEnum,
170
179
  ImageVariableValue,
171
180
  ImageVellumValue,
181
+ ImageVellumValueRequest,
172
182
  IndexingConfigVectorizer,
173
183
  IndexingConfigVectorizerRequest,
174
184
  IndexingConfigVectorizerRequest_HkunlpInstructorXl,
@@ -198,6 +208,7 @@ from .types import (
198
208
  JsonInputRequest,
199
209
  JsonVariableValue,
200
210
  JsonVellumValue,
211
+ JsonVellumValueRequest,
201
212
  LogicalOperator,
202
213
  LogprobsEnum,
203
214
  MapEnum,
@@ -217,6 +228,8 @@ from .types import (
217
228
  NamedScenarioInputRequest_ChatHistory,
218
229
  NamedScenarioInputRequest_String,
219
230
  NamedScenarioInputStringVariableValueRequest,
231
+ NamedTestCaseArrayVariableValue,
232
+ NamedTestCaseArrayVariableValueRequest,
220
233
  NamedTestCaseChatHistoryVariableValue,
221
234
  NamedTestCaseChatHistoryVariableValueRequest,
222
235
  NamedTestCaseErrorVariableValue,
@@ -233,6 +246,7 @@ from .types import (
233
246
  NamedTestCaseStringVariableValueRequest,
234
247
  NamedTestCaseVariableValue,
235
248
  NamedTestCaseVariableValueRequest,
249
+ NamedTestCaseVariableValueRequest_Array,
236
250
  NamedTestCaseVariableValueRequest_ChatHistory,
237
251
  NamedTestCaseVariableValueRequest_Error,
238
252
  NamedTestCaseVariableValueRequest_FunctionCall,
@@ -240,6 +254,7 @@ from .types import (
240
254
  NamedTestCaseVariableValueRequest_Number,
241
255
  NamedTestCaseVariableValueRequest_SearchResults,
242
256
  NamedTestCaseVariableValueRequest_String,
257
+ NamedTestCaseVariableValue_Array,
243
258
  NamedTestCaseVariableValue_ChatHistory,
244
259
  NamedTestCaseVariableValue_Error,
245
260
  NamedTestCaseVariableValue_FunctionCall,
@@ -286,6 +301,7 @@ from .types import (
286
301
  NumberEnum,
287
302
  NumberVariableValue,
288
303
  NumberVellumValue,
304
+ NumberVellumValueRequest,
289
305
  OpenAiVectorizerConfig,
290
306
  OpenAiVectorizerConfigRequest,
291
307
  OpenAiVectorizerTextEmbedding3Large,
@@ -311,6 +327,7 @@ from .types import (
311
327
  PromptDeploymentInputRequest_Json,
312
328
  PromptDeploymentInputRequest_String,
313
329
  PromptExecutionMeta,
330
+ PromptNodeExecutionMeta,
314
331
  PromptNodeResult,
315
332
  PromptNodeResultData,
316
333
  PromptOutput,
@@ -379,6 +396,7 @@ from .types import (
379
396
  StringInputRequest,
380
397
  StringVariableValue,
381
398
  StringVellumValue,
399
+ StringVellumValueRequest,
382
400
  SubmitCompletionActualRequest,
383
401
  SubmitCompletionActualsErrorResponse,
384
402
  SubmitWorkflowExecutionActualRequest,
@@ -426,6 +444,7 @@ from .types import (
426
444
  TerminalNodeResultOutput_String,
427
445
  TerminalNodeSearchResultsResult,
428
446
  TerminalNodeStringResult,
447
+ TestCaseArrayVariableValue,
429
448
  TestCaseChatHistoryVariableValue,
430
449
  TestCaseErrorVariableValue,
431
450
  TestCaseFunctionCallVariableValue,
@@ -434,6 +453,7 @@ from .types import (
434
453
  TestCaseSearchResultsVariableValue,
435
454
  TestCaseStringVariableValue,
436
455
  TestCaseVariableValue,
456
+ TestCaseVariableValue_Array,
437
457
  TestCaseVariableValue_ChatHistory,
438
458
  TestCaseVariableValue_Error,
439
459
  TestCaseVariableValue_FunctionCall,
@@ -548,6 +568,7 @@ from .types import (
548
568
  WorkflowExecutionEventType,
549
569
  WorkflowExecutionNodeResultEvent,
550
570
  WorkflowExecutionWorkflowResultEvent,
571
+ WorkflowExpandMetaRequest,
551
572
  WorkflowNodeResultData,
552
573
  WorkflowNodeResultData_Api,
553
574
  WorkflowNodeResultData_CodeExecution,
@@ -659,6 +680,13 @@ __all__ = [
659
680
  "ArrayVariableValueItem_Number",
660
681
  "ArrayVariableValueItem_String",
661
682
  "ArrayVellumValueItem",
683
+ "ArrayVellumValueItemRequest",
684
+ "ArrayVellumValueItemRequest_Error",
685
+ "ArrayVellumValueItemRequest_FunctionCall",
686
+ "ArrayVellumValueItemRequest_Image",
687
+ "ArrayVellumValueItemRequest_Json",
688
+ "ArrayVellumValueItemRequest_Number",
689
+ "ArrayVellumValueItemRequest_String",
662
690
  "ArrayVellumValueItem_Error",
663
691
  "ArrayVellumValueItem_FunctionCall",
664
692
  "ArrayVellumValueItem_Image",
@@ -740,6 +768,7 @@ __all__ = [
740
768
  "ErrorEnum",
741
769
  "ErrorVariableValue",
742
770
  "ErrorVellumValue",
771
+ "ErrorVellumValueRequest",
743
772
  "ExecutePromptApiErrorResponse",
744
773
  "ExecutePromptEvent",
745
774
  "ExecutePromptEvent_Fulfilled",
@@ -791,6 +820,7 @@ __all__ = [
791
820
  "FunctionCallRequest",
792
821
  "FunctionCallVariableValue",
793
822
  "FunctionCallVellumValue",
823
+ "FunctionCallVellumValueRequest",
794
824
  "GenerateErrorResponse",
795
825
  "GenerateOptionsRequest",
796
826
  "GenerateRequest",
@@ -809,6 +839,7 @@ __all__ = [
809
839
  "ImageEnum",
810
840
  "ImageVariableValue",
811
841
  "ImageVellumValue",
842
+ "ImageVellumValueRequest",
812
843
  "IndexingConfigVectorizer",
813
844
  "IndexingConfigVectorizerRequest",
814
845
  "IndexingConfigVectorizerRequest_HkunlpInstructorXl",
@@ -839,6 +870,7 @@ __all__ = [
839
870
  "JsonInputRequest",
840
871
  "JsonVariableValue",
841
872
  "JsonVellumValue",
873
+ "JsonVellumValueRequest",
842
874
  "LogicalOperator",
843
875
  "LogprobsEnum",
844
876
  "MapEnum",
@@ -858,6 +890,8 @@ __all__ = [
858
890
  "NamedScenarioInputRequest_ChatHistory",
859
891
  "NamedScenarioInputRequest_String",
860
892
  "NamedScenarioInputStringVariableValueRequest",
893
+ "NamedTestCaseArrayVariableValue",
894
+ "NamedTestCaseArrayVariableValueRequest",
861
895
  "NamedTestCaseChatHistoryVariableValue",
862
896
  "NamedTestCaseChatHistoryVariableValueRequest",
863
897
  "NamedTestCaseErrorVariableValue",
@@ -874,6 +908,7 @@ __all__ = [
874
908
  "NamedTestCaseStringVariableValueRequest",
875
909
  "NamedTestCaseVariableValue",
876
910
  "NamedTestCaseVariableValueRequest",
911
+ "NamedTestCaseVariableValueRequest_Array",
877
912
  "NamedTestCaseVariableValueRequest_ChatHistory",
878
913
  "NamedTestCaseVariableValueRequest_Error",
879
914
  "NamedTestCaseVariableValueRequest_FunctionCall",
@@ -881,6 +916,7 @@ __all__ = [
881
916
  "NamedTestCaseVariableValueRequest_Number",
882
917
  "NamedTestCaseVariableValueRequest_SearchResults",
883
918
  "NamedTestCaseVariableValueRequest_String",
919
+ "NamedTestCaseVariableValue_Array",
884
920
  "NamedTestCaseVariableValue_ChatHistory",
885
921
  "NamedTestCaseVariableValue_Error",
886
922
  "NamedTestCaseVariableValue_FunctionCall",
@@ -928,6 +964,7 @@ __all__ = [
928
964
  "NumberEnum",
929
965
  "NumberVariableValue",
930
966
  "NumberVellumValue",
967
+ "NumberVellumValueRequest",
931
968
  "OpenAiVectorizerConfig",
932
969
  "OpenAiVectorizerConfigRequest",
933
970
  "OpenAiVectorizerTextEmbedding3Large",
@@ -953,6 +990,7 @@ __all__ = [
953
990
  "PromptDeploymentInputRequest_Json",
954
991
  "PromptDeploymentInputRequest_String",
955
992
  "PromptExecutionMeta",
993
+ "PromptNodeExecutionMeta",
956
994
  "PromptNodeResult",
957
995
  "PromptNodeResultData",
958
996
  "PromptOutput",
@@ -1021,6 +1059,7 @@ __all__ = [
1021
1059
  "StringInputRequest",
1022
1060
  "StringVariableValue",
1023
1061
  "StringVellumValue",
1062
+ "StringVellumValueRequest",
1024
1063
  "SubmitCompletionActualRequest",
1025
1064
  "SubmitCompletionActualsErrorResponse",
1026
1065
  "SubmitWorkflowExecutionActualRequest",
@@ -1068,6 +1107,7 @@ __all__ = [
1068
1107
  "TerminalNodeResultOutput_String",
1069
1108
  "TerminalNodeSearchResultsResult",
1070
1109
  "TerminalNodeStringResult",
1110
+ "TestCaseArrayVariableValue",
1071
1111
  "TestCaseChatHistoryVariableValue",
1072
1112
  "TestCaseErrorVariableValue",
1073
1113
  "TestCaseFunctionCallVariableValue",
@@ -1076,6 +1116,7 @@ __all__ = [
1076
1116
  "TestCaseSearchResultsVariableValue",
1077
1117
  "TestCaseStringVariableValue",
1078
1118
  "TestCaseVariableValue",
1119
+ "TestCaseVariableValue_Array",
1079
1120
  "TestCaseVariableValue_ChatHistory",
1080
1121
  "TestCaseVariableValue_Error",
1081
1122
  "TestCaseVariableValue_FunctionCall",
@@ -1192,6 +1233,7 @@ __all__ = [
1192
1233
  "WorkflowExecutionEventType",
1193
1234
  "WorkflowExecutionNodeResultEvent",
1194
1235
  "WorkflowExecutionWorkflowResultEvent",
1236
+ "WorkflowExpandMetaRequest",
1195
1237
  "WorkflowNodeResultData",
1196
1238
  "WorkflowNodeResultData_Api",
1197
1239
  "WorkflowNodeResultData_CodeExecution",
vellum/client.py CHANGED
@@ -42,6 +42,7 @@ from .types.search_response import SearchResponse
42
42
  from .types.submit_completion_actual_request import SubmitCompletionActualRequest
43
43
  from .types.submit_workflow_execution_actual_request import SubmitWorkflowExecutionActualRequest
44
44
  from .types.workflow_execution_event_type import WorkflowExecutionEventType
45
+ from .types.workflow_expand_meta_request import WorkflowExpandMetaRequest
45
46
  from .types.workflow_request_input_request import WorkflowRequestInputRequest
46
47
  from .types.workflow_stream_event import WorkflowStreamEvent
47
48
 
@@ -164,11 +165,11 @@ class Vellum:
164
165
  external_id="string",
165
166
  expand_meta=PromptDeploymentExpandMetaRequestRequest(
166
167
  model_name=True,
168
+ usage=True,
169
+ finish_reason=True,
167
170
  latency=True,
168
171
  deployment_release_tag=True,
169
172
  prompt_version_id=True,
170
- finish_reason=True,
171
- usage=True,
172
173
  ),
173
174
  raw_overrides=RawPromptExecutionOverridesRequest(
174
175
  body={"string": {"key": "value"}},
@@ -299,11 +300,11 @@ class Vellum:
299
300
  external_id="string",
300
301
  expand_meta=PromptDeploymentExpandMetaRequestRequest(
301
302
  model_name=True,
303
+ usage=True,
304
+ finish_reason=True,
302
305
  latency=True,
303
306
  deployment_release_tag=True,
304
307
  prompt_version_id=True,
305
- finish_reason=True,
306
- usage=True,
307
308
  ),
308
309
  raw_overrides=RawPromptExecutionOverridesRequest(
309
310
  body={"string": {"key": "value"}},
@@ -382,6 +383,7 @@ class Vellum:
382
383
  self,
383
384
  *,
384
385
  inputs: typing.Sequence[WorkflowRequestInputRequest],
386
+ expand_meta: typing.Optional[WorkflowExpandMetaRequest] = OMIT,
385
387
  workflow_deployment_id: typing.Optional[str] = OMIT,
386
388
  workflow_deployment_name: typing.Optional[str] = OMIT,
387
389
  release_tag: typing.Optional[str] = OMIT,
@@ -394,6 +396,8 @@ class Vellum:
394
396
  Parameters:
395
397
  - inputs: typing.Sequence[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
396
398
 
399
+ - expand_meta: typing.Optional[WorkflowExpandMetaRequest]. An optionally specified configuration used to opt in to including additional metadata about this workflow execution in the API response. Corresponding values will be returned under the `execution_meta` key within NODE events in the response stream.
400
+
397
401
  - workflow_deployment_id: typing.Optional[str]. The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
398
402
 
399
403
  - workflow_deployment_name: typing.Optional[str]. The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
@@ -404,7 +408,7 @@ class Vellum:
404
408
 
405
409
  - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
406
410
  ---
407
- from vellum import WorkflowRequestInputRequest_String
411
+ from vellum import WorkflowExpandMetaRequest, WorkflowRequestInputRequest_String
408
412
  from vellum.client import Vellum
409
413
 
410
414
  client = Vellum(
@@ -417,6 +421,9 @@ class Vellum:
417
421
  value="string",
418
422
  )
419
423
  ],
424
+ expand_meta=WorkflowExpandMetaRequest(
425
+ usage=True,
426
+ ),
420
427
  workflow_deployment_id="string",
421
428
  workflow_deployment_name="string",
422
429
  release_tag="string",
@@ -424,6 +431,8 @@ class Vellum:
424
431
  )
425
432
  """
426
433
  _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
434
+ if expand_meta is not OMIT:
435
+ _request["expand_meta"] = expand_meta
427
436
  if workflow_deployment_id is not OMIT:
428
437
  _request["workflow_deployment_id"] = workflow_deployment_id
429
438
  if workflow_deployment_name is not OMIT:
@@ -476,6 +485,7 @@ class Vellum:
476
485
  self,
477
486
  *,
478
487
  inputs: typing.Sequence[WorkflowRequestInputRequest],
488
+ expand_meta: typing.Optional[WorkflowExpandMetaRequest] = OMIT,
479
489
  workflow_deployment_id: typing.Optional[str] = OMIT,
480
490
  workflow_deployment_name: typing.Optional[str] = OMIT,
481
491
  release_tag: typing.Optional[str] = OMIT,
@@ -489,6 +499,8 @@ class Vellum:
489
499
  Parameters:
490
500
  - inputs: typing.Sequence[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
491
501
 
502
+ - expand_meta: typing.Optional[WorkflowExpandMetaRequest]. An optionally specified configuration used to opt in to including additional metadata about this workflow execution in the API response. Corresponding values will be returned under the `execution_meta` key within NODE events in the response stream.
503
+
492
504
  - workflow_deployment_id: typing.Optional[str]. The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
493
505
 
494
506
  - workflow_deployment_name: typing.Optional[str]. The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
@@ -501,7 +513,7 @@ class Vellum:
501
513
 
502
514
  - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
503
515
  ---
504
- from vellum import WorkflowRequestInputRequest_String
516
+ from vellum import WorkflowExpandMetaRequest, WorkflowRequestInputRequest_String
505
517
  from vellum.client import Vellum
506
518
 
507
519
  client = Vellum(
@@ -514,6 +526,9 @@ class Vellum:
514
526
  value="string",
515
527
  )
516
528
  ],
529
+ expand_meta=WorkflowExpandMetaRequest(
530
+ usage=True,
531
+ ),
517
532
  workflow_deployment_id="string",
518
533
  workflow_deployment_name="string",
519
534
  release_tag="string",
@@ -522,6 +537,8 @@ class Vellum:
522
537
  )
523
538
  """
524
539
  _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
540
+ if expand_meta is not OMIT:
541
+ _request["expand_meta"] = expand_meta
525
542
  if workflow_deployment_id is not OMIT:
526
543
  _request["workflow_deployment_id"] = workflow_deployment_id
527
544
  if workflow_deployment_name is not OMIT:
@@ -1125,11 +1142,11 @@ class AsyncVellum:
1125
1142
  external_id="string",
1126
1143
  expand_meta=PromptDeploymentExpandMetaRequestRequest(
1127
1144
  model_name=True,
1145
+ usage=True,
1146
+ finish_reason=True,
1128
1147
  latency=True,
1129
1148
  deployment_release_tag=True,
1130
1149
  prompt_version_id=True,
1131
- finish_reason=True,
1132
- usage=True,
1133
1150
  ),
1134
1151
  raw_overrides=RawPromptExecutionOverridesRequest(
1135
1152
  body={"string": {"key": "value"}},
@@ -1260,11 +1277,11 @@ class AsyncVellum:
1260
1277
  external_id="string",
1261
1278
  expand_meta=PromptDeploymentExpandMetaRequestRequest(
1262
1279
  model_name=True,
1280
+ usage=True,
1281
+ finish_reason=True,
1263
1282
  latency=True,
1264
1283
  deployment_release_tag=True,
1265
1284
  prompt_version_id=True,
1266
- finish_reason=True,
1267
- usage=True,
1268
1285
  ),
1269
1286
  raw_overrides=RawPromptExecutionOverridesRequest(
1270
1287
  body={"string": {"key": "value"}},
@@ -1343,6 +1360,7 @@ class AsyncVellum:
1343
1360
  self,
1344
1361
  *,
1345
1362
  inputs: typing.Sequence[WorkflowRequestInputRequest],
1363
+ expand_meta: typing.Optional[WorkflowExpandMetaRequest] = OMIT,
1346
1364
  workflow_deployment_id: typing.Optional[str] = OMIT,
1347
1365
  workflow_deployment_name: typing.Optional[str] = OMIT,
1348
1366
  release_tag: typing.Optional[str] = OMIT,
@@ -1355,6 +1373,8 @@ class AsyncVellum:
1355
1373
  Parameters:
1356
1374
  - inputs: typing.Sequence[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
1357
1375
 
1376
+ - expand_meta: typing.Optional[WorkflowExpandMetaRequest]. An optionally specified configuration used to opt in to including additional metadata about this workflow execution in the API response. Corresponding values will be returned under the `execution_meta` key within NODE events in the response stream.
1377
+
1358
1378
  - workflow_deployment_id: typing.Optional[str]. The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
1359
1379
 
1360
1380
  - workflow_deployment_name: typing.Optional[str]. The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
@@ -1365,7 +1385,7 @@ class AsyncVellum:
1365
1385
 
1366
1386
  - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
1367
1387
  ---
1368
- from vellum import WorkflowRequestInputRequest_String
1388
+ from vellum import WorkflowExpandMetaRequest, WorkflowRequestInputRequest_String
1369
1389
  from vellum.client import AsyncVellum
1370
1390
 
1371
1391
  client = AsyncVellum(
@@ -1378,6 +1398,9 @@ class AsyncVellum:
1378
1398
  value="string",
1379
1399
  )
1380
1400
  ],
1401
+ expand_meta=WorkflowExpandMetaRequest(
1402
+ usage=True,
1403
+ ),
1381
1404
  workflow_deployment_id="string",
1382
1405
  workflow_deployment_name="string",
1383
1406
  release_tag="string",
@@ -1385,6 +1408,8 @@ class AsyncVellum:
1385
1408
  )
1386
1409
  """
1387
1410
  _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
1411
+ if expand_meta is not OMIT:
1412
+ _request["expand_meta"] = expand_meta
1388
1413
  if workflow_deployment_id is not OMIT:
1389
1414
  _request["workflow_deployment_id"] = workflow_deployment_id
1390
1415
  if workflow_deployment_name is not OMIT:
@@ -1437,6 +1462,7 @@ class AsyncVellum:
1437
1462
  self,
1438
1463
  *,
1439
1464
  inputs: typing.Sequence[WorkflowRequestInputRequest],
1465
+ expand_meta: typing.Optional[WorkflowExpandMetaRequest] = OMIT,
1440
1466
  workflow_deployment_id: typing.Optional[str] = OMIT,
1441
1467
  workflow_deployment_name: typing.Optional[str] = OMIT,
1442
1468
  release_tag: typing.Optional[str] = OMIT,
@@ -1450,6 +1476,8 @@ class AsyncVellum:
1450
1476
  Parameters:
1451
1477
  - inputs: typing.Sequence[WorkflowRequestInputRequest]. The list of inputs defined in the Workflow's Deployment with their corresponding values.
1452
1478
 
1479
+ - expand_meta: typing.Optional[WorkflowExpandMetaRequest]. An optionally specified configuration used to opt in to including additional metadata about this workflow execution in the API response. Corresponding values will be returned under the `execution_meta` key within NODE events in the response stream.
1480
+
1453
1481
  - workflow_deployment_id: typing.Optional[str]. The ID of the Workflow Deployment. Must provide either this or workflow_deployment_name.
1454
1482
 
1455
1483
  - workflow_deployment_name: typing.Optional[str]. The name of the Workflow Deployment. Must provide either this or workflow_deployment_id.
@@ -1462,7 +1490,7 @@ class AsyncVellum:
1462
1490
 
1463
1491
  - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
1464
1492
  ---
1465
- from vellum import WorkflowRequestInputRequest_String
1493
+ from vellum import WorkflowExpandMetaRequest, WorkflowRequestInputRequest_String
1466
1494
  from vellum.client import AsyncVellum
1467
1495
 
1468
1496
  client = AsyncVellum(
@@ -1475,6 +1503,9 @@ class AsyncVellum:
1475
1503
  value="string",
1476
1504
  )
1477
1505
  ],
1506
+ expand_meta=WorkflowExpandMetaRequest(
1507
+ usage=True,
1508
+ ),
1478
1509
  workflow_deployment_id="string",
1479
1510
  workflow_deployment_name="string",
1480
1511
  release_tag="string",
@@ -1483,6 +1514,8 @@ class AsyncVellum:
1483
1514
  )
1484
1515
  """
1485
1516
  _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
1517
+ if expand_meta is not OMIT:
1518
+ _request["expand_meta"] = expand_meta
1486
1519
  if workflow_deployment_id is not OMIT:
1487
1520
  _request["workflow_deployment_id"] = workflow_deployment_id
1488
1521
  if workflow_deployment_name is not OMIT:
@@ -18,7 +18,7 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.7.1",
21
+ "X-Fern-SDK-Version": "0.7.2",
22
22
  }
23
23
  headers["X_API_KEY"] = self.api_key
24
24
  return headers