vellum-ai 1.3.0__py3-none-any.whl → 1.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. vellum/__init__.py +6 -0
  2. vellum/client/README.md +5 -5
  3. vellum/client/__init__.py +20 -0
  4. vellum/client/core/client_wrapper.py +2 -2
  5. vellum/client/raw_client.py +20 -0
  6. vellum/client/reference.md +61 -27
  7. vellum/client/resources/ad_hoc/client.py +29 -29
  8. vellum/client/resources/ad_hoc/raw_client.py +13 -13
  9. vellum/client/resources/events/client.py +69 -33
  10. vellum/client/resources/events/raw_client.py +13 -9
  11. vellum/client/types/__init__.py +6 -0
  12. vellum/client/types/create_workflow_event_request.py +7 -0
  13. vellum/client/types/deprecated_prompt_request_input.py +8 -0
  14. vellum/client/types/event_create_response.py +5 -0
  15. vellum/client/types/logical_operator.py +1 -0
  16. vellum/client/types/processing_failure_reason_enum.py +3 -1
  17. vellum/client/types/slim_document.py +1 -0
  18. vellum/client/types/workflow_input.py +31 -0
  19. vellum/types/create_workflow_event_request.py +3 -0
  20. vellum/types/deprecated_prompt_request_input.py +3 -0
  21. vellum/types/workflow_input.py +3 -0
  22. vellum/workflows/constants.py +3 -0
  23. vellum/workflows/events/node.py +1 -0
  24. vellum/workflows/events/tests/test_event.py +1 -0
  25. vellum/workflows/events/workflow.py +10 -2
  26. vellum/workflows/nodes/core/templating_node/tests/test_templating_node.py +16 -0
  27. vellum/workflows/nodes/displayable/code_execution_node/tests/test_node.py +3 -13
  28. vellum/workflows/nodes/displayable/tool_calling_node/utils.py +11 -0
  29. vellum/workflows/nodes/displayable/web_search_node/tests/__init__.py +0 -0
  30. vellum/workflows/nodes/displayable/web_search_node/tests/test_node.py +319 -0
  31. vellum/workflows/nodes/tests/test_utils.py +23 -0
  32. vellum/workflows/nodes/utils.py +14 -0
  33. vellum/workflows/runner/runner.py +33 -12
  34. vellum/workflows/types/code_execution_node_wrappers.py +2 -1
  35. {vellum_ai-1.3.0.dist-info → vellum_ai-1.3.2.dist-info}/METADATA +1 -1
  36. {vellum_ai-1.3.0.dist-info → vellum_ai-1.3.2.dist-info}/RECORD +41 -32
  37. vellum_ee/workflows/display/tests/workflow_serialization/test_web_search_node_serialization.py +81 -0
  38. vellum_ee/workflows/display/utils/events.py +3 -0
  39. {vellum_ai-1.3.0.dist-info → vellum_ai-1.3.2.dist-info}/LICENSE +0 -0
  40. {vellum_ai-1.3.0.dist-info → vellum_ai-1.3.2.dist-info}/WHEEL +0 -0
  41. {vellum_ai-1.3.0.dist-info → vellum_ai-1.3.2.dist-info}/entry_points.txt +0 -0
vellum/__init__.py CHANGED
@@ -78,6 +78,7 @@ from .client.types import (
78
78
  ContainerImageContainerImageTag,
79
79
  ContainerImageRead,
80
80
  CreateTestSuiteTestCaseRequest,
81
+ CreateWorkflowEventRequest,
81
82
  DelimiterChunkerConfig,
82
83
  DelimiterChunkerConfigRequest,
83
84
  DelimiterChunking,
@@ -88,6 +89,7 @@ from .client.types import (
88
89
  DeploymentRead,
89
90
  DeploymentReleaseTagDeploymentHistoryItem,
90
91
  DeploymentReleaseTagRead,
92
+ DeprecatedPromptRequestInput,
91
93
  DockerServiceToken,
92
94
  DocumentChatMessageContent,
93
95
  DocumentChatMessageContentRequest,
@@ -624,6 +626,7 @@ from .client.types import (
624
626
  WorkflowExecutionWorkflowResultEvent,
625
627
  WorkflowExpandMetaRequest,
626
628
  WorkflowInitializationError,
629
+ WorkflowInput,
627
630
  WorkflowNodeResultData,
628
631
  WorkflowNodeResultEvent,
629
632
  WorkflowNodeResultEventState,
@@ -784,6 +787,7 @@ __all__ = [
784
787
  "ContainerImageContainerImageTag",
785
788
  "ContainerImageRead",
786
789
  "CreateTestSuiteTestCaseRequest",
790
+ "CreateWorkflowEventRequest",
787
791
  "DelimiterChunkerConfig",
788
792
  "DelimiterChunkerConfigRequest",
789
793
  "DelimiterChunking",
@@ -795,6 +799,7 @@ __all__ = [
795
799
  "DeploymentReleaseTagDeploymentHistoryItem",
796
800
  "DeploymentReleaseTagRead",
797
801
  "DeploymentsListRequestStatus",
802
+ "DeprecatedPromptRequestInput",
798
803
  "DockerServiceToken",
799
804
  "DocumentChatMessageContent",
800
805
  "DocumentChatMessageContentRequest",
@@ -1344,6 +1349,7 @@ __all__ = [
1344
1349
  "WorkflowExecutionWorkflowResultEvent",
1345
1350
  "WorkflowExpandMetaRequest",
1346
1351
  "WorkflowInitializationError",
1352
+ "WorkflowInput",
1347
1353
  "WorkflowNodeResultData",
1348
1354
  "WorkflowNodeResultEvent",
1349
1355
  "WorkflowNodeResultEventState",
vellum/client/README.md CHANGED
@@ -103,7 +103,7 @@ The SDK supports streaming responses, as well, the response will be a generator
103
103
  from vellum import (
104
104
  JinjaPromptBlock,
105
105
  PromptParameters,
106
- PromptRequestStringInput,
106
+ StringInput,
107
107
  Vellum,
108
108
  VellumVariable,
109
109
  )
@@ -115,12 +115,12 @@ client = Vellum(
115
115
  response = client.ad_hoc.adhoc_execute_prompt_stream(
116
116
  ml_model="x",
117
117
  input_values=[
118
- PromptRequestStringInput(
119
- key="x",
118
+ StringInput(
119
+ name="x",
120
120
  value="value",
121
121
  ),
122
- PromptRequestStringInput(
123
- key="x",
122
+ StringInput(
123
+ name="x",
124
124
  value="value",
125
125
  ),
126
126
  ],
vellum/client/__init__.py CHANGED
@@ -479,6 +479,7 @@ class Vellum:
479
479
  release_tag: typing.Optional[str] = OMIT,
480
480
  external_id: typing.Optional[str] = OMIT,
481
481
  metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
482
+ previous_execution_id: typing.Optional[str] = OMIT,
482
483
  request_options: typing.Optional[RequestOptions] = None,
483
484
  ) -> ExecuteWorkflowResponse:
484
485
  """
@@ -507,6 +508,9 @@ class Vellum:
507
508
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
508
509
  Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
509
510
 
511
+ previous_execution_id : typing.Optional[str]
512
+ The ID of a previous Workflow Execution to reference for initial State loading.
513
+
510
514
  request_options : typing.Optional[RequestOptions]
511
515
  Request-specific configuration.
512
516
 
@@ -544,6 +548,7 @@ class Vellum:
544
548
  release_tag=release_tag,
545
549
  external_id=external_id,
546
550
  metadata=metadata,
551
+ previous_execution_id=previous_execution_id,
547
552
  request_options=request_options,
548
553
  )
549
554
  return _response.data
@@ -559,6 +564,7 @@ class Vellum:
559
564
  external_id: typing.Optional[str] = OMIT,
560
565
  event_types: typing.Optional[typing.Sequence[WorkflowExecutionEventType]] = OMIT,
561
566
  metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
567
+ previous_execution_id: typing.Optional[str] = OMIT,
562
568
  request_options: typing.Optional[RequestOptions] = None,
563
569
  ) -> typing.Iterator[WorkflowStreamEvent]:
564
570
  """
@@ -590,6 +596,9 @@ class Vellum:
590
596
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
591
597
  Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
592
598
 
599
+ previous_execution_id : typing.Optional[str]
600
+ The ID of a previous Workflow Execution to reference for initial State loading.
601
+
593
602
  request_options : typing.Optional[RequestOptions]
594
603
  Request-specific configuration.
595
604
 
@@ -630,6 +639,7 @@ class Vellum:
630
639
  external_id=external_id,
631
640
  event_types=event_types,
632
641
  metadata=metadata,
642
+ previous_execution_id=previous_execution_id,
633
643
  request_options=request_options,
634
644
  ) as r:
635
645
  yield from r.data
@@ -1376,6 +1386,7 @@ class AsyncVellum:
1376
1386
  release_tag: typing.Optional[str] = OMIT,
1377
1387
  external_id: typing.Optional[str] = OMIT,
1378
1388
  metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
1389
+ previous_execution_id: typing.Optional[str] = OMIT,
1379
1390
  request_options: typing.Optional[RequestOptions] = None,
1380
1391
  ) -> ExecuteWorkflowResponse:
1381
1392
  """
@@ -1404,6 +1415,9 @@ class AsyncVellum:
1404
1415
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
1405
1416
  Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
1406
1417
 
1418
+ previous_execution_id : typing.Optional[str]
1419
+ The ID of a previous Workflow Execution to reference for initial State loading.
1420
+
1407
1421
  request_options : typing.Optional[RequestOptions]
1408
1422
  Request-specific configuration.
1409
1423
 
@@ -1449,6 +1463,7 @@ class AsyncVellum:
1449
1463
  release_tag=release_tag,
1450
1464
  external_id=external_id,
1451
1465
  metadata=metadata,
1466
+ previous_execution_id=previous_execution_id,
1452
1467
  request_options=request_options,
1453
1468
  )
1454
1469
  return _response.data
@@ -1464,6 +1479,7 @@ class AsyncVellum:
1464
1479
  external_id: typing.Optional[str] = OMIT,
1465
1480
  event_types: typing.Optional[typing.Sequence[WorkflowExecutionEventType]] = OMIT,
1466
1481
  metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
1482
+ previous_execution_id: typing.Optional[str] = OMIT,
1467
1483
  request_options: typing.Optional[RequestOptions] = None,
1468
1484
  ) -> typing.AsyncIterator[WorkflowStreamEvent]:
1469
1485
  """
@@ -1495,6 +1511,9 @@ class AsyncVellum:
1495
1511
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
1496
1512
  Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
1497
1513
 
1514
+ previous_execution_id : typing.Optional[str]
1515
+ The ID of a previous Workflow Execution to reference for initial State loading.
1516
+
1498
1517
  request_options : typing.Optional[RequestOptions]
1499
1518
  Request-specific configuration.
1500
1519
 
@@ -1543,6 +1562,7 @@ class AsyncVellum:
1543
1562
  external_id=external_id,
1544
1563
  event_types=event_types,
1545
1564
  metadata=metadata,
1565
+ previous_execution_id=previous_execution_id,
1546
1566
  request_options=request_options,
1547
1567
  ) as r:
1548
1568
  async for _chunk in r.data:
@@ -27,10 +27,10 @@ class BaseClientWrapper:
27
27
 
28
28
  def get_headers(self) -> typing.Dict[str, str]:
29
29
  headers: typing.Dict[str, str] = {
30
- "User-Agent": "vellum-ai/1.3.0",
30
+ "User-Agent": "vellum-ai/1.3.2",
31
31
  "X-Fern-Language": "Python",
32
32
  "X-Fern-SDK-Name": "vellum-ai",
33
- "X-Fern-SDK-Version": "1.3.0",
33
+ "X-Fern-SDK-Version": "1.3.2",
34
34
  **(self.get_custom_headers() or {}),
35
35
  }
36
36
  if self._api_version is not None:
@@ -515,6 +515,7 @@ class RawVellum:
515
515
  release_tag: typing.Optional[str] = OMIT,
516
516
  external_id: typing.Optional[str] = OMIT,
517
517
  metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
518
+ previous_execution_id: typing.Optional[str] = OMIT,
518
519
  request_options: typing.Optional[RequestOptions] = None,
519
520
  ) -> HttpResponse[ExecuteWorkflowResponse]:
520
521
  """
@@ -543,6 +544,9 @@ class RawVellum:
543
544
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
544
545
  Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
545
546
 
547
+ previous_execution_id : typing.Optional[str]
548
+ The ID of a previous Workflow Execution to reference for initial State loading.
549
+
546
550
  request_options : typing.Optional[RequestOptions]
547
551
  Request-specific configuration.
548
552
 
@@ -567,6 +571,7 @@ class RawVellum:
567
571
  "release_tag": release_tag,
568
572
  "external_id": external_id,
569
573
  "metadata": metadata,
574
+ "previous_execution_id": previous_execution_id,
570
575
  },
571
576
  headers={
572
577
  "content-type": "application/json",
@@ -634,6 +639,7 @@ class RawVellum:
634
639
  external_id: typing.Optional[str] = OMIT,
635
640
  event_types: typing.Optional[typing.Sequence[WorkflowExecutionEventType]] = OMIT,
636
641
  metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
642
+ previous_execution_id: typing.Optional[str] = OMIT,
637
643
  request_options: typing.Optional[RequestOptions] = None,
638
644
  ) -> typing.Iterator[HttpResponse[typing.Iterator[WorkflowStreamEvent]]]:
639
645
  """
@@ -665,6 +671,9 @@ class RawVellum:
665
671
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
666
672
  Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
667
673
 
674
+ previous_execution_id : typing.Optional[str]
675
+ The ID of a previous Workflow Execution to reference for initial State loading.
676
+
668
677
  request_options : typing.Optional[RequestOptions]
669
678
  Request-specific configuration.
670
679
 
@@ -690,6 +699,7 @@ class RawVellum:
690
699
  "external_id": external_id,
691
700
  "event_types": event_types,
692
701
  "metadata": metadata,
702
+ "previous_execution_id": previous_execution_id,
693
703
  },
694
704
  headers={
695
705
  "content-type": "application/json",
@@ -1728,6 +1738,7 @@ class AsyncRawVellum:
1728
1738
  release_tag: typing.Optional[str] = OMIT,
1729
1739
  external_id: typing.Optional[str] = OMIT,
1730
1740
  metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
1741
+ previous_execution_id: typing.Optional[str] = OMIT,
1731
1742
  request_options: typing.Optional[RequestOptions] = None,
1732
1743
  ) -> AsyncHttpResponse[ExecuteWorkflowResponse]:
1733
1744
  """
@@ -1756,6 +1767,9 @@ class AsyncRawVellum:
1756
1767
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
1757
1768
  Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
1758
1769
 
1770
+ previous_execution_id : typing.Optional[str]
1771
+ The ID of a previous Workflow Execution to reference for initial State loading.
1772
+
1759
1773
  request_options : typing.Optional[RequestOptions]
1760
1774
  Request-specific configuration.
1761
1775
 
@@ -1780,6 +1794,7 @@ class AsyncRawVellum:
1780
1794
  "release_tag": release_tag,
1781
1795
  "external_id": external_id,
1782
1796
  "metadata": metadata,
1797
+ "previous_execution_id": previous_execution_id,
1783
1798
  },
1784
1799
  headers={
1785
1800
  "content-type": "application/json",
@@ -1847,6 +1862,7 @@ class AsyncRawVellum:
1847
1862
  external_id: typing.Optional[str] = OMIT,
1848
1863
  event_types: typing.Optional[typing.Sequence[WorkflowExecutionEventType]] = OMIT,
1849
1864
  metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
1865
+ previous_execution_id: typing.Optional[str] = OMIT,
1850
1866
  request_options: typing.Optional[RequestOptions] = None,
1851
1867
  ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[WorkflowStreamEvent]]]:
1852
1868
  """
@@ -1878,6 +1894,9 @@ class AsyncRawVellum:
1878
1894
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
1879
1895
  Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
1880
1896
 
1897
+ previous_execution_id : typing.Optional[str]
1898
+ The ID of a previous Workflow Execution to reference for initial State loading.
1899
+
1881
1900
  request_options : typing.Optional[RequestOptions]
1882
1901
  Request-specific configuration.
1883
1902
 
@@ -1903,6 +1922,7 @@ class AsyncRawVellum:
1903
1922
  "external_id": external_id,
1904
1923
  "event_types": event_types,
1905
1924
  "metadata": metadata,
1925
+ "previous_execution_id": previous_execution_id,
1906
1926
  },
1907
1927
  headers={
1908
1928
  "content-type": "application/json",
@@ -605,6 +605,14 @@ client.execute_workflow(
605
605
  <dl>
606
606
  <dd>
607
607
 
608
+ **previous_execution_id:** `typing.Optional[str]` — The ID of a previous Workflow Execution to reference for initial State loading.
609
+
610
+ </dd>
611
+ </dl>
612
+
613
+ <dl>
614
+ <dd>
615
+
608
616
  **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
609
617
 
610
618
  </dd>
@@ -743,6 +751,14 @@ for chunk in response.data:
743
751
  <dl>
744
752
  <dd>
745
753
 
754
+ **previous_execution_id:** `typing.Optional[str]` — The ID of a previous Workflow Execution to reference for initial State loading.
755
+
756
+ </dd>
757
+ </dl>
758
+
759
+ <dl>
760
+ <dd>
761
+
746
762
  **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
747
763
 
748
764
  </dd>
@@ -1262,7 +1278,7 @@ client.submit_workflow_execution_actuals(
1262
1278
  <dl>
1263
1279
  <dd>
1264
1280
 
1265
- Accept an event and publish it to ClickHouse for analytics processing.
1281
+ Accept an event or list of events and publish them to ClickHouse for analytics processing.
1266
1282
  </dd>
1267
1283
  </dl>
1268
1284
  </dd>
@@ -1291,22 +1307,40 @@ client = Vellum(
1291
1307
  api_key="YOUR_API_KEY",
1292
1308
  )
1293
1309
  client.events.create(
1294
- request=NodeExecutionInitiatedEvent(
1295
- body=NodeExecutionInitiatedBody(
1296
- node_definition=VellumCodeResourceDefinition(
1297
- name="name",
1298
- module=["module", "module"],
1299
- id="id",
1310
+ request=[
1311
+ NodeExecutionInitiatedEvent(
1312
+ body=NodeExecutionInitiatedBody(
1313
+ node_definition=VellumCodeResourceDefinition(
1314
+ name="name",
1315
+ module=["module", "module"],
1316
+ id="id",
1317
+ ),
1318
+ inputs={"inputs": {"key": "value"}},
1300
1319
  ),
1301
- inputs={"inputs": {"key": "value"}},
1320
+ id="id",
1321
+ timestamp=datetime.datetime.fromisoformat(
1322
+ "2024-01-15 09:30:00+00:00",
1323
+ ),
1324
+ trace_id="trace_id",
1325
+ span_id="span_id",
1302
1326
  ),
1303
- id="id",
1304
- timestamp=datetime.datetime.fromisoformat(
1305
- "2024-01-15 09:30:00+00:00",
1327
+ NodeExecutionInitiatedEvent(
1328
+ body=NodeExecutionInitiatedBody(
1329
+ node_definition=VellumCodeResourceDefinition(
1330
+ name="name",
1331
+ module=["module", "module"],
1332
+ id="id",
1333
+ ),
1334
+ inputs={"inputs": {"key": "value"}},
1335
+ ),
1336
+ id="id",
1337
+ timestamp=datetime.datetime.fromisoformat(
1338
+ "2024-01-15 09:30:00+00:00",
1339
+ ),
1340
+ trace_id="trace_id",
1341
+ span_id="span_id",
1306
1342
  ),
1307
- trace_id="trace_id",
1308
- span_id="span_id",
1309
- ),
1343
+ ],
1310
1344
  )
1311
1345
 
1312
1346
  ```
@@ -1323,7 +1357,7 @@ client.events.create(
1323
1357
  <dl>
1324
1358
  <dd>
1325
1359
 
1326
- **request:** `WorkflowEvent`
1360
+ **request:** `CreateWorkflowEventRequest`
1327
1361
 
1328
1362
  </dd>
1329
1363
  </dl>
@@ -1360,7 +1394,7 @@ client.events.create(
1360
1394
  from vellum import (
1361
1395
  JinjaPromptBlock,
1362
1396
  PromptParameters,
1363
- PromptRequestStringInput,
1397
+ StringInput,
1364
1398
  Vellum,
1365
1399
  VellumVariable,
1366
1400
  )
@@ -1372,12 +1406,12 @@ client = Vellum(
1372
1406
  client.ad_hoc.adhoc_execute_prompt(
1373
1407
  ml_model="x",
1374
1408
  input_values=[
1375
- PromptRequestStringInput(
1376
- key="x",
1409
+ StringInput(
1410
+ name="x",
1377
1411
  value="value",
1378
1412
  ),
1379
- PromptRequestStringInput(
1380
- key="x",
1413
+ StringInput(
1414
+ name="x",
1381
1415
  value="value",
1382
1416
  ),
1383
1417
  ],
@@ -1426,7 +1460,7 @@ client.ad_hoc.adhoc_execute_prompt(
1426
1460
  <dl>
1427
1461
  <dd>
1428
1462
 
1429
- **input_values:** `typing.Sequence[PromptRequestInput]`
1463
+ **input_values:** `typing.Sequence[DeprecatedPromptRequestInput]`
1430
1464
 
1431
1465
  </dd>
1432
1466
  </dl>
@@ -1510,7 +1544,7 @@ client.ad_hoc.adhoc_execute_prompt(
1510
1544
  from vellum import (
1511
1545
  JinjaPromptBlock,
1512
1546
  PromptParameters,
1513
- PromptRequestStringInput,
1547
+ StringInput,
1514
1548
  Vellum,
1515
1549
  VellumVariable,
1516
1550
  )
@@ -1522,12 +1556,12 @@ client = Vellum(
1522
1556
  response = client.ad_hoc.adhoc_execute_prompt_stream(
1523
1557
  ml_model="x",
1524
1558
  input_values=[
1525
- PromptRequestStringInput(
1526
- key="x",
1559
+ StringInput(
1560
+ name="x",
1527
1561
  value="value",
1528
1562
  ),
1529
- PromptRequestStringInput(
1530
- key="x",
1563
+ StringInput(
1564
+ name="x",
1531
1565
  value="value",
1532
1566
  ),
1533
1567
  ],
@@ -1578,7 +1612,7 @@ for chunk in response.data:
1578
1612
  <dl>
1579
1613
  <dd>
1580
1614
 
1581
- **input_values:** `typing.Sequence[PromptRequestInput]`
1615
+ **input_values:** `typing.Sequence[DeprecatedPromptRequestInput]`
1582
1616
 
1583
1617
  </dd>
1584
1618
  </dl>
@@ -6,10 +6,10 @@ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
6
6
  from ...core.request_options import RequestOptions
7
7
  from ...types.ad_hoc_execute_prompt_event import AdHocExecutePromptEvent
8
8
  from ...types.ad_hoc_expand_meta import AdHocExpandMeta
9
+ from ...types.deprecated_prompt_request_input import DeprecatedPromptRequestInput
9
10
  from ...types.function_definition import FunctionDefinition
10
11
  from ...types.prompt_block import PromptBlock
11
12
  from ...types.prompt_parameters import PromptParameters
12
- from ...types.prompt_request_input import PromptRequestInput
13
13
  from ...types.prompt_settings import PromptSettings
14
14
  from ...types.vellum_variable import VellumVariable
15
15
  from .raw_client import AsyncRawAdHocClient, RawAdHocClient
@@ -41,7 +41,7 @@ class AdHocClient:
41
41
  self,
42
42
  *,
43
43
  ml_model: str,
44
- input_values: typing.Sequence[PromptRequestInput],
44
+ input_values: typing.Sequence[DeprecatedPromptRequestInput],
45
45
  input_variables: typing.Sequence[VellumVariable],
46
46
  parameters: PromptParameters,
47
47
  blocks: typing.Sequence[PromptBlock],
@@ -55,7 +55,7 @@ class AdHocClient:
55
55
  ----------
56
56
  ml_model : str
57
57
 
58
- input_values : typing.Sequence[PromptRequestInput]
58
+ input_values : typing.Sequence[DeprecatedPromptRequestInput]
59
59
 
60
60
  input_variables : typing.Sequence[VellumVariable]
61
61
 
@@ -82,7 +82,7 @@ class AdHocClient:
82
82
  from vellum import (
83
83
  JinjaPromptBlock,
84
84
  PromptParameters,
85
- PromptRequestStringInput,
85
+ StringInput,
86
86
  Vellum,
87
87
  VellumVariable,
88
88
  )
@@ -94,12 +94,12 @@ class AdHocClient:
94
94
  client.ad_hoc.adhoc_execute_prompt(
95
95
  ml_model="x",
96
96
  input_values=[
97
- PromptRequestStringInput(
98
- key="x",
97
+ StringInput(
98
+ name="x",
99
99
  value="value",
100
100
  ),
101
- PromptRequestStringInput(
102
- key="x",
101
+ StringInput(
102
+ name="x",
103
103
  value="value",
104
104
  ),
105
105
  ],
@@ -143,7 +143,7 @@ class AdHocClient:
143
143
  self,
144
144
  *,
145
145
  ml_model: str,
146
- input_values: typing.Sequence[PromptRequestInput],
146
+ input_values: typing.Sequence[DeprecatedPromptRequestInput],
147
147
  input_variables: typing.Sequence[VellumVariable],
148
148
  parameters: PromptParameters,
149
149
  blocks: typing.Sequence[PromptBlock],
@@ -157,7 +157,7 @@ class AdHocClient:
157
157
  ----------
158
158
  ml_model : str
159
159
 
160
- input_values : typing.Sequence[PromptRequestInput]
160
+ input_values : typing.Sequence[DeprecatedPromptRequestInput]
161
161
 
162
162
  input_variables : typing.Sequence[VellumVariable]
163
163
 
@@ -184,7 +184,7 @@ class AdHocClient:
184
184
  from vellum import (
185
185
  JinjaPromptBlock,
186
186
  PromptParameters,
187
- PromptRequestStringInput,
187
+ StringInput,
188
188
  Vellum,
189
189
  VellumVariable,
190
190
  )
@@ -196,12 +196,12 @@ class AdHocClient:
196
196
  response = client.ad_hoc.adhoc_execute_prompt_stream(
197
197
  ml_model="x",
198
198
  input_values=[
199
- PromptRequestStringInput(
200
- key="x",
199
+ StringInput(
200
+ name="x",
201
201
  value="value",
202
202
  ),
203
- PromptRequestStringInput(
204
- key="x",
203
+ StringInput(
204
+ name="x",
205
205
  value="value",
206
206
  ),
207
207
  ],
@@ -263,7 +263,7 @@ class AsyncAdHocClient:
263
263
  self,
264
264
  *,
265
265
  ml_model: str,
266
- input_values: typing.Sequence[PromptRequestInput],
266
+ input_values: typing.Sequence[DeprecatedPromptRequestInput],
267
267
  input_variables: typing.Sequence[VellumVariable],
268
268
  parameters: PromptParameters,
269
269
  blocks: typing.Sequence[PromptBlock],
@@ -277,7 +277,7 @@ class AsyncAdHocClient:
277
277
  ----------
278
278
  ml_model : str
279
279
 
280
- input_values : typing.Sequence[PromptRequestInput]
280
+ input_values : typing.Sequence[DeprecatedPromptRequestInput]
281
281
 
282
282
  input_variables : typing.Sequence[VellumVariable]
283
283
 
@@ -307,7 +307,7 @@ class AsyncAdHocClient:
307
307
  AsyncVellum,
308
308
  JinjaPromptBlock,
309
309
  PromptParameters,
310
- PromptRequestStringInput,
310
+ StringInput,
311
311
  VellumVariable,
312
312
  )
313
313
 
@@ -321,12 +321,12 @@ class AsyncAdHocClient:
321
321
  await client.ad_hoc.adhoc_execute_prompt(
322
322
  ml_model="x",
323
323
  input_values=[
324
- PromptRequestStringInput(
325
- key="x",
324
+ StringInput(
325
+ name="x",
326
326
  value="value",
327
327
  ),
328
- PromptRequestStringInput(
329
- key="x",
328
+ StringInput(
329
+ name="x",
330
330
  value="value",
331
331
  ),
332
332
  ],
@@ -373,7 +373,7 @@ class AsyncAdHocClient:
373
373
  self,
374
374
  *,
375
375
  ml_model: str,
376
- input_values: typing.Sequence[PromptRequestInput],
376
+ input_values: typing.Sequence[DeprecatedPromptRequestInput],
377
377
  input_variables: typing.Sequence[VellumVariable],
378
378
  parameters: PromptParameters,
379
379
  blocks: typing.Sequence[PromptBlock],
@@ -387,7 +387,7 @@ class AsyncAdHocClient:
387
387
  ----------
388
388
  ml_model : str
389
389
 
390
- input_values : typing.Sequence[PromptRequestInput]
390
+ input_values : typing.Sequence[DeprecatedPromptRequestInput]
391
391
 
392
392
  input_variables : typing.Sequence[VellumVariable]
393
393
 
@@ -417,7 +417,7 @@ class AsyncAdHocClient:
417
417
  AsyncVellum,
418
418
  JinjaPromptBlock,
419
419
  PromptParameters,
420
- PromptRequestStringInput,
420
+ StringInput,
421
421
  VellumVariable,
422
422
  )
423
423
 
@@ -431,12 +431,12 @@ class AsyncAdHocClient:
431
431
  response = await client.ad_hoc.adhoc_execute_prompt_stream(
432
432
  ml_model="x",
433
433
  input_values=[
434
- PromptRequestStringInput(
435
- key="x",
434
+ StringInput(
435
+ name="x",
436
436
  value="value",
437
437
  ),
438
- PromptRequestStringInput(
439
- key="x",
438
+ StringInput(
439
+ name="x",
440
440
  value="value",
441
441
  ),
442
442
  ],