vellum-ai 0.14.25__py3-none-any.whl → 0.14.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. vellum/__init__.py +6 -4
  2. vellum/client/__init__.py +4 -0
  3. vellum/client/core/client_wrapper.py +1 -1
  4. vellum/client/core/jsonable_encoder.py +1 -1
  5. vellum/client/resources/__init__.py +2 -2
  6. vellum/client/resources/prompts/__init__.py +2 -0
  7. vellum/client/resources/prompts/client.py +197 -0
  8. vellum/client/resources/workflows/__init__.py +0 -3
  9. vellum/client/resources/workflows/client.py +0 -9
  10. vellum/client/types/__init__.py +4 -2
  11. vellum/client/types/deployment_release_tag_read.py +7 -1
  12. vellum/client/types/prompt_exec_config.py +37 -0
  13. vellum/client/types/{release.py → release_tag_release.py} +1 -1
  14. vellum/client/types/workflow_release_tag_read.py +2 -2
  15. vellum/client/types/workflow_release_tag_workflow_deployment_history_item.py +3 -10
  16. vellum/{types/release.py → resources/prompts/__init__.py} +1 -1
  17. vellum/resources/{workflows/types/__init__.py → prompts/client.py} +1 -1
  18. vellum/{resources/workflows/types/workflows_pull_request_format.py → types/prompt_exec_config.py} +1 -1
  19. vellum/types/release_tag_release.py +3 -0
  20. vellum/workflows/events/types.py +10 -7
  21. vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +2 -4
  22. vellum/workflows/nodes/displayable/bases/prompt_deployment_node.py +2 -4
  23. vellum/workflows/nodes/displayable/conftest.py +117 -0
  24. vellum/workflows/nodes/displayable/guardrail_node/node.py +10 -11
  25. vellum/workflows/nodes/displayable/guardrail_node/test_node.py +38 -0
  26. vellum/workflows/nodes/displayable/inline_prompt_node/tests/test_node.py +49 -0
  27. vellum/workflows/nodes/displayable/prompt_deployment_node/tests/test_node.py +49 -0
  28. vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py +2 -5
  29. vellum/workflows/nodes/displayable/subworkflow_deployment_node/tests/test_node.py +63 -0
  30. vellum/workflows/references/workflow_input.py +3 -0
  31. vellum/workflows/runner/runner.py +2 -0
  32. {vellum_ai-0.14.25.dist-info → vellum_ai-0.14.27.dist-info}/METADATA +1 -1
  33. {vellum_ai-0.14.25.dist-info → vellum_ai-0.14.27.dist-info}/RECORD +44 -40
  34. vellum_ee/workflows/display/base.py +13 -7
  35. vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/conftest.py +11 -10
  36. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_default_state_serialization.py +1 -1
  37. vellum_ee/workflows/display/types.py +5 -9
  38. vellum_ee/workflows/display/vellum.py +9 -4
  39. vellum_ee/workflows/display/workflows/base_workflow_display.py +20 -21
  40. vellum_ee/workflows/display/workflows/vellum_workflow_display.py +7 -35
  41. vellum_ee/workflows/tests/test_server.py +54 -0
  42. vellum/client/resources/workflows/types/__init__.py +0 -5
  43. vellum/client/resources/workflows/types/workflows_pull_request_format.py +0 -5
  44. {vellum_ai-0.14.25.dist-info → vellum_ai-0.14.27.dist-info}/LICENSE +0 -0
  45. {vellum_ai-0.14.25.dist-info → vellum_ai-0.14.27.dist-info}/WHEEL +0 -0
  46. {vellum_ai-0.14.25.dist-info → vellum_ai-0.14.27.dist-info}/entry_points.txt +0 -0
vellum/__init__.py CHANGED
@@ -300,6 +300,7 @@ from .types import (
300
300
  PromptDeploymentExpandMetaRequest,
301
301
  PromptDeploymentInputRequest,
302
302
  PromptDeploymentParentContext,
303
+ PromptExecConfig,
303
304
  PromptExecutionMeta,
304
305
  PromptNodeExecutionMeta,
305
306
  PromptNodeResult,
@@ -322,7 +323,7 @@ from .types import (
322
323
  RejectedExecuteWorkflowWorkflowResultEvent,
323
324
  RejectedPromptExecutionMeta,
324
325
  RejectedWorkflowNodeResultEvent,
325
- Release,
326
+ ReleaseTagRelease,
326
327
  ReleaseTagSource,
327
328
  ReplaceTestSuiteTestCaseRequest,
328
329
  RichTextChildBlock,
@@ -580,7 +581,6 @@ from .resources import (
580
581
  ListWorkflowReleaseTagsRequestSource,
581
582
  ListWorkflowSandboxExamplesRequestTag,
582
583
  WorkflowDeploymentsListRequestStatus,
583
- WorkflowsPullRequestFormat,
584
584
  ad_hoc,
585
585
  container_images,
586
586
  deployments,
@@ -590,6 +590,7 @@ from .resources import (
590
590
  metric_definitions,
591
591
  ml_models,
592
592
  organizations,
593
+ prompts,
593
594
  sandboxes,
594
595
  test_suite_runs,
595
596
  test_suites,
@@ -911,6 +912,7 @@ __all__ = [
911
912
  "PromptDeploymentExpandMetaRequest",
912
913
  "PromptDeploymentInputRequest",
913
914
  "PromptDeploymentParentContext",
915
+ "PromptExecConfig",
914
916
  "PromptExecutionMeta",
915
917
  "PromptNodeExecutionMeta",
916
918
  "PromptNodeResult",
@@ -933,7 +935,7 @@ __all__ = [
933
935
  "RejectedExecuteWorkflowWorkflowResultEvent",
934
936
  "RejectedPromptExecutionMeta",
935
937
  "RejectedWorkflowNodeResultEvent",
936
- "Release",
938
+ "ReleaseTagRelease",
937
939
  "ReleaseTagSource",
938
940
  "ReplaceTestSuiteTestCaseRequest",
939
941
  "RichTextChildBlock",
@@ -1182,7 +1184,6 @@ __all__ = [
1182
1184
  "WorkflowSandboxExample",
1183
1185
  "WorkflowSandboxParentContext",
1184
1186
  "WorkflowStreamEvent",
1185
- "WorkflowsPullRequestFormat",
1186
1187
  "WorkspaceRead",
1187
1188
  "WorkspaceSecretRead",
1188
1189
  "__version__",
@@ -1195,6 +1196,7 @@ __all__ = [
1195
1196
  "metric_definitions",
1196
1197
  "ml_models",
1197
1198
  "organizations",
1199
+ "prompts",
1198
1200
  "sandboxes",
1199
1201
  "test_suite_runs",
1200
1202
  "test_suites",
vellum/client/__init__.py CHANGED
@@ -13,6 +13,7 @@ from .resources.folder_entities.client import FolderEntitiesClient
13
13
  from .resources.metric_definitions.client import MetricDefinitionsClient
14
14
  from .resources.ml_models.client import MlModelsClient
15
15
  from .resources.organizations.client import OrganizationsClient
16
+ from .resources.prompts.client import PromptsClient
16
17
  from .resources.sandboxes.client import SandboxesClient
17
18
  from .resources.test_suite_runs.client import TestSuiteRunsClient
18
19
  from .resources.test_suites.client import TestSuitesClient
@@ -69,6 +70,7 @@ from .resources.folder_entities.client import AsyncFolderEntitiesClient
69
70
  from .resources.metric_definitions.client import AsyncMetricDefinitionsClient
70
71
  from .resources.ml_models.client import AsyncMlModelsClient
71
72
  from .resources.organizations.client import AsyncOrganizationsClient
73
+ from .resources.prompts.client import AsyncPromptsClient
72
74
  from .resources.sandboxes.client import AsyncSandboxesClient
73
75
  from .resources.test_suite_runs.client import AsyncTestSuiteRunsClient
74
76
  from .resources.test_suites.client import AsyncTestSuitesClient
@@ -145,6 +147,7 @@ class Vellum:
145
147
  self.metric_definitions = MetricDefinitionsClient(client_wrapper=self._client_wrapper)
146
148
  self.ml_models = MlModelsClient(client_wrapper=self._client_wrapper)
147
149
  self.organizations = OrganizationsClient(client_wrapper=self._client_wrapper)
150
+ self.prompts = PromptsClient(client_wrapper=self._client_wrapper)
148
151
  self.sandboxes = SandboxesClient(client_wrapper=self._client_wrapper)
149
152
  self.test_suite_runs = TestSuiteRunsClient(client_wrapper=self._client_wrapper)
150
153
  self.test_suites = TestSuitesClient(client_wrapper=self._client_wrapper)
@@ -1486,6 +1489,7 @@ class AsyncVellum:
1486
1489
  self.metric_definitions = AsyncMetricDefinitionsClient(client_wrapper=self._client_wrapper)
1487
1490
  self.ml_models = AsyncMlModelsClient(client_wrapper=self._client_wrapper)
1488
1491
  self.organizations = AsyncOrganizationsClient(client_wrapper=self._client_wrapper)
1492
+ self.prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper)
1489
1493
  self.sandboxes = AsyncSandboxesClient(client_wrapper=self._client_wrapper)
1490
1494
  self.test_suite_runs = AsyncTestSuiteRunsClient(client_wrapper=self._client_wrapper)
1491
1495
  self.test_suites = AsyncTestSuitesClient(client_wrapper=self._client_wrapper)
@@ -18,7 +18,7 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.14.25",
21
+ "X-Fern-SDK-Version": "0.14.27",
22
22
  }
23
23
  headers["X_API_KEY"] = self.api_key
24
24
  return headers
@@ -45,7 +45,7 @@ def jsonable_encoder(obj: Any, custom_encoder: Optional[Dict[Any, Callable[[Any]
45
45
  encoder = getattr(obj.__config__, "json_encoders", {}) # type: ignore # Pydantic v1
46
46
  if custom_encoder:
47
47
  encoder.update(custom_encoder)
48
- obj_dict = obj.dict(by_alias=True)
48
+ obj_dict = obj.model_dump(mode="json")
49
49
  if "__root__" in obj_dict:
50
50
  obj_dict = obj_dict["__root__"]
51
51
  if "root" in obj_dict:
@@ -10,6 +10,7 @@ from . import (
10
10
  metric_definitions,
11
11
  ml_models,
12
12
  organizations,
13
+ prompts,
13
14
  sandboxes,
14
15
  test_suite_runs,
15
16
  test_suites,
@@ -24,7 +25,6 @@ from .document_indexes import DocumentIndexesListRequestStatus
24
25
  from .folder_entities import FolderEntitiesListRequestEntityStatus
25
26
  from .workflow_deployments import ListWorkflowReleaseTagsRequestSource, WorkflowDeploymentsListRequestStatus
26
27
  from .workflow_sandboxes import ListWorkflowSandboxExamplesRequestTag
27
- from .workflows import WorkflowsPullRequestFormat
28
28
 
29
29
  __all__ = [
30
30
  "DeploymentsListRequestStatus",
@@ -34,7 +34,6 @@ __all__ = [
34
34
  "ListWorkflowReleaseTagsRequestSource",
35
35
  "ListWorkflowSandboxExamplesRequestTag",
36
36
  "WorkflowDeploymentsListRequestStatus",
37
- "WorkflowsPullRequestFormat",
38
37
  "ad_hoc",
39
38
  "container_images",
40
39
  "deployments",
@@ -44,6 +43,7 @@ __all__ = [
44
43
  "metric_definitions",
45
44
  "ml_models",
46
45
  "organizations",
46
+ "prompts",
47
47
  "sandboxes",
48
48
  "test_suite_runs",
49
49
  "test_suites",
@@ -0,0 +1,2 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
@@ -0,0 +1,197 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ...core.client_wrapper import SyncClientWrapper
4
+ import typing
5
+ from ...core.request_options import RequestOptions
6
+ from ...types.prompt_exec_config import PromptExecConfig
7
+ from ...core.jsonable_encoder import jsonable_encoder
8
+ from ...core.pydantic_utilities import parse_obj_as
9
+ from ...errors.bad_request_error import BadRequestError
10
+ from ...errors.not_found_error import NotFoundError
11
+ from json.decoder import JSONDecodeError
12
+ from ...core.api_error import ApiError
13
+ from ...core.client_wrapper import AsyncClientWrapper
14
+
15
+
16
+ class PromptsClient:
17
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
18
+ self._client_wrapper = client_wrapper
19
+
20
+ def pull(
21
+ self,
22
+ id: str,
23
+ *,
24
+ prompt_variant_id: typing.Optional[str] = None,
25
+ request_options: typing.Optional[RequestOptions] = None,
26
+ ) -> PromptExecConfig:
27
+ """
28
+ Used to pull the definition of a Prompt from Vellum.
29
+
30
+ Parameters
31
+ ----------
32
+ id : str
33
+ The ID of the Prompt to pull from. Prompt Sandbox IDs are currently supported.
34
+
35
+ prompt_variant_id : typing.Optional[str]
36
+ The ID of the Prompt Variant within a Prompt Sandbox to pull. Must be included if providing the ID of a Prompt Sandbox.
37
+
38
+ request_options : typing.Optional[RequestOptions]
39
+ Request-specific configuration.
40
+
41
+ Returns
42
+ -------
43
+ PromptExecConfig
44
+
45
+
46
+ Examples
47
+ --------
48
+ from vellum import Vellum
49
+
50
+ client = Vellum(
51
+ api_key="YOUR_API_KEY",
52
+ )
53
+ client.prompts.pull(
54
+ id="id",
55
+ )
56
+ """
57
+ _response = self._client_wrapper.httpx_client.request(
58
+ f"v1/prompts/{jsonable_encoder(id)}/pull",
59
+ base_url=self._client_wrapper.get_environment().default,
60
+ method="GET",
61
+ params={
62
+ "prompt_variant_id": prompt_variant_id,
63
+ },
64
+ headers={
65
+ "Accept": "application/json",
66
+ },
67
+ request_options=request_options,
68
+ )
69
+ try:
70
+ if 200 <= _response.status_code < 300:
71
+ return typing.cast(
72
+ PromptExecConfig,
73
+ parse_obj_as(
74
+ type_=PromptExecConfig, # type: ignore
75
+ object_=_response.json(),
76
+ ),
77
+ )
78
+ if _response.status_code == 400:
79
+ raise BadRequestError(
80
+ typing.cast(
81
+ typing.Optional[typing.Any],
82
+ parse_obj_as(
83
+ type_=typing.Optional[typing.Any], # type: ignore
84
+ object_=_response.json(),
85
+ ),
86
+ )
87
+ )
88
+ if _response.status_code == 404:
89
+ raise NotFoundError(
90
+ typing.cast(
91
+ typing.Optional[typing.Any],
92
+ parse_obj_as(
93
+ type_=typing.Optional[typing.Any], # type: ignore
94
+ object_=_response.json(),
95
+ ),
96
+ )
97
+ )
98
+ _response_json = _response.json()
99
+ except JSONDecodeError:
100
+ raise ApiError(status_code=_response.status_code, body=_response.text)
101
+ raise ApiError(status_code=_response.status_code, body=_response_json)
102
+
103
+
104
+ class AsyncPromptsClient:
105
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
106
+ self._client_wrapper = client_wrapper
107
+
108
+ async def pull(
109
+ self,
110
+ id: str,
111
+ *,
112
+ prompt_variant_id: typing.Optional[str] = None,
113
+ request_options: typing.Optional[RequestOptions] = None,
114
+ ) -> PromptExecConfig:
115
+ """
116
+ Used to pull the definition of a Prompt from Vellum.
117
+
118
+ Parameters
119
+ ----------
120
+ id : str
121
+ The ID of the Prompt to pull from. Prompt Sandbox IDs are currently supported.
122
+
123
+ prompt_variant_id : typing.Optional[str]
124
+ The ID of the Prompt Variant within a Prompt Sandbox to pull. Must be included if providing the ID of a Prompt Sandbox.
125
+
126
+ request_options : typing.Optional[RequestOptions]
127
+ Request-specific configuration.
128
+
129
+ Returns
130
+ -------
131
+ PromptExecConfig
132
+
133
+
134
+ Examples
135
+ --------
136
+ import asyncio
137
+
138
+ from vellum import AsyncVellum
139
+
140
+ client = AsyncVellum(
141
+ api_key="YOUR_API_KEY",
142
+ )
143
+
144
+
145
+ async def main() -> None:
146
+ await client.prompts.pull(
147
+ id="id",
148
+ )
149
+
150
+
151
+ asyncio.run(main())
152
+ """
153
+ _response = await self._client_wrapper.httpx_client.request(
154
+ f"v1/prompts/{jsonable_encoder(id)}/pull",
155
+ base_url=self._client_wrapper.get_environment().default,
156
+ method="GET",
157
+ params={
158
+ "prompt_variant_id": prompt_variant_id,
159
+ },
160
+ headers={
161
+ "Accept": "application/json",
162
+ },
163
+ request_options=request_options,
164
+ )
165
+ try:
166
+ if 200 <= _response.status_code < 300:
167
+ return typing.cast(
168
+ PromptExecConfig,
169
+ parse_obj_as(
170
+ type_=PromptExecConfig, # type: ignore
171
+ object_=_response.json(),
172
+ ),
173
+ )
174
+ if _response.status_code == 400:
175
+ raise BadRequestError(
176
+ typing.cast(
177
+ typing.Optional[typing.Any],
178
+ parse_obj_as(
179
+ type_=typing.Optional[typing.Any], # type: ignore
180
+ object_=_response.json(),
181
+ ),
182
+ )
183
+ )
184
+ if _response.status_code == 404:
185
+ raise NotFoundError(
186
+ typing.cast(
187
+ typing.Optional[typing.Any],
188
+ parse_obj_as(
189
+ type_=typing.Optional[typing.Any], # type: ignore
190
+ object_=_response.json(),
191
+ ),
192
+ )
193
+ )
194
+ _response_json = _response.json()
195
+ except JSONDecodeError:
196
+ raise ApiError(status_code=_response.status_code, body=_response.text)
197
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -1,5 +1,2 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from .types import WorkflowsPullRequestFormat
4
-
5
- __all__ = ["WorkflowsPullRequestFormat"]
@@ -2,7 +2,6 @@
2
2
 
3
3
  import typing
4
4
  from ...core.client_wrapper import SyncClientWrapper
5
- from .types.workflows_pull_request_format import WorkflowsPullRequestFormat
6
5
  from ...core.request_options import RequestOptions
7
6
  from ...core.jsonable_encoder import jsonable_encoder
8
7
  from ...errors.bad_request_error import BadRequestError
@@ -28,7 +27,6 @@ class WorkflowsClient:
28
27
  id: str,
29
28
  *,
30
29
  exclude_code: typing.Optional[bool] = None,
31
- format: typing.Optional[WorkflowsPullRequestFormat] = None,
32
30
  include_json: typing.Optional[bool] = None,
33
31
  include_sandbox: typing.Optional[bool] = None,
34
32
  strict: typing.Optional[bool] = None,
@@ -42,8 +40,6 @@ class WorkflowsClient:
42
40
 
43
41
  exclude_code : typing.Optional[bool]
44
42
 
45
- format : typing.Optional[WorkflowsPullRequestFormat]
46
-
47
43
  include_json : typing.Optional[bool]
48
44
 
49
45
  include_sandbox : typing.Optional[bool]
@@ -64,7 +60,6 @@ class WorkflowsClient:
64
60
  method="GET",
65
61
  params={
66
62
  "exclude_code": exclude_code,
67
- "format": format,
68
63
  "include_json": include_json,
69
64
  "include_sandbox": include_sandbox,
70
65
  "strict": strict,
@@ -180,7 +175,6 @@ class AsyncWorkflowsClient:
180
175
  id: str,
181
176
  *,
182
177
  exclude_code: typing.Optional[bool] = None,
183
- format: typing.Optional[WorkflowsPullRequestFormat] = None,
184
178
  include_json: typing.Optional[bool] = None,
185
179
  include_sandbox: typing.Optional[bool] = None,
186
180
  strict: typing.Optional[bool] = None,
@@ -194,8 +188,6 @@ class AsyncWorkflowsClient:
194
188
 
195
189
  exclude_code : typing.Optional[bool]
196
190
 
197
- format : typing.Optional[WorkflowsPullRequestFormat]
198
-
199
191
  include_json : typing.Optional[bool]
200
192
 
201
193
  include_sandbox : typing.Optional[bool]
@@ -216,7 +208,6 @@ class AsyncWorkflowsClient:
216
208
  method="GET",
217
209
  params={
218
210
  "exclude_code": exclude_code,
219
- "format": format,
220
211
  "include_json": include_json,
221
212
  "include_sandbox": include_sandbox,
222
213
  "strict": strict,
@@ -308,6 +308,7 @@ from .prompt_block_state import PromptBlockState
308
308
  from .prompt_deployment_expand_meta_request import PromptDeploymentExpandMetaRequest
309
309
  from .prompt_deployment_input_request import PromptDeploymentInputRequest
310
310
  from .prompt_deployment_parent_context import PromptDeploymentParentContext
311
+ from .prompt_exec_config import PromptExecConfig
311
312
  from .prompt_execution_meta import PromptExecutionMeta
312
313
  from .prompt_node_execution_meta import PromptNodeExecutionMeta
313
314
  from .prompt_node_result import PromptNodeResult
@@ -330,7 +331,7 @@ from .rejected_execute_prompt_response import RejectedExecutePromptResponse
330
331
  from .rejected_execute_workflow_workflow_result_event import RejectedExecuteWorkflowWorkflowResultEvent
331
332
  from .rejected_prompt_execution_meta import RejectedPromptExecutionMeta
332
333
  from .rejected_workflow_node_result_event import RejectedWorkflowNodeResultEvent
333
- from .release import Release
334
+ from .release_tag_release import ReleaseTagRelease
334
335
  from .release_tag_source import ReleaseTagSource
335
336
  from .replace_test_suite_test_case_request import ReplaceTestSuiteTestCaseRequest
336
337
  from .rich_text_child_block import RichTextChildBlock
@@ -892,6 +893,7 @@ __all__ = [
892
893
  "PromptDeploymentExpandMetaRequest",
893
894
  "PromptDeploymentInputRequest",
894
895
  "PromptDeploymentParentContext",
896
+ "PromptExecConfig",
895
897
  "PromptExecutionMeta",
896
898
  "PromptNodeExecutionMeta",
897
899
  "PromptNodeResult",
@@ -914,7 +916,7 @@ __all__ = [
914
916
  "RejectedExecuteWorkflowWorkflowResultEvent",
915
917
  "RejectedPromptExecutionMeta",
916
918
  "RejectedWorkflowNodeResultEvent",
917
- "Release",
919
+ "ReleaseTagRelease",
918
920
  "ReleaseTagSource",
919
921
  "ReplaceTestSuiteTestCaseRequest",
920
922
  "RichTextChildBlock",
@@ -4,6 +4,7 @@ from ..core.pydantic_utilities import UniversalBaseModel
4
4
  import pydantic
5
5
  from .release_tag_source import ReleaseTagSource
6
6
  from .deployment_release_tag_deployment_history_item import DeploymentReleaseTagDeploymentHistoryItem
7
+ from .release_tag_release import ReleaseTagRelease
7
8
  from ..core.pydantic_utilities import IS_PYDANTIC_V2
8
9
  import typing
9
10
 
@@ -24,7 +25,12 @@ class DeploymentReleaseTagRead(UniversalBaseModel):
24
25
 
25
26
  history_item: DeploymentReleaseTagDeploymentHistoryItem = pydantic.Field()
26
27
  """
27
- The Deployment History Item that this Release Tag is associated with
28
+ Deprecated. Reference the `release` field instead.
29
+ """
30
+
31
+ release: ReleaseTagRelease = pydantic.Field()
32
+ """
33
+ The Release that this Release Tag points to.
28
34
  """
29
35
 
30
36
  if IS_PYDANTIC_V2:
@@ -0,0 +1,37 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from __future__ import annotations
4
+ from ..core.pydantic_utilities import UniversalBaseModel
5
+ from .array_vellum_value import ArrayVellumValue
6
+ from .chat_message_prompt_block import ChatMessagePromptBlock
7
+ import typing
8
+ from .vellum_variable import VellumVariable
9
+ from .prompt_parameters import PromptParameters
10
+ from .prompt_settings import PromptSettings
11
+ from .prompt_block import PromptBlock
12
+ from .function_definition import FunctionDefinition
13
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
14
+ import pydantic
15
+ from ..core.pydantic_utilities import update_forward_refs
16
+
17
+
18
+ class PromptExecConfig(UniversalBaseModel):
19
+ ml_model: str
20
+ input_variables: typing.List[VellumVariable]
21
+ parameters: PromptParameters
22
+ settings: typing.Optional[PromptSettings] = None
23
+ blocks: typing.List[PromptBlock]
24
+ functions: typing.Optional[typing.List[FunctionDefinition]] = None
25
+
26
+ if IS_PYDANTIC_V2:
27
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
28
+ else:
29
+
30
+ class Config:
31
+ frozen = True
32
+ smart_union = True
33
+ extra = pydantic.Extra.allow
34
+
35
+
36
+ update_forward_refs(ArrayVellumValue, PromptExecConfig=PromptExecConfig)
37
+ update_forward_refs(ChatMessagePromptBlock, PromptExecConfig=PromptExecConfig)
@@ -7,7 +7,7 @@ import typing
7
7
  import pydantic
8
8
 
9
9
 
10
- class Release(UniversalBaseModel):
10
+ class ReleaseTagRelease(UniversalBaseModel):
11
11
  id: str
12
12
  timestamp: dt.datetime
13
13
 
@@ -4,7 +4,7 @@ from ..core.pydantic_utilities import UniversalBaseModel
4
4
  import pydantic
5
5
  from .release_tag_source import ReleaseTagSource
6
6
  from .workflow_release_tag_workflow_deployment_history_item import WorkflowReleaseTagWorkflowDeploymentHistoryItem
7
- from .release import Release
7
+ from .release_tag_release import ReleaseTagRelease
8
8
  from ..core.pydantic_utilities import IS_PYDANTIC_V2
9
9
  import typing
10
10
 
@@ -28,7 +28,7 @@ class WorkflowReleaseTagRead(UniversalBaseModel):
28
28
  Deprecated. Reference the `release` field instead.
29
29
  """
30
30
 
31
- release: Release = pydantic.Field()
31
+ release: ReleaseTagRelease = pydantic.Field()
32
32
  """
33
33
  The Release that this Release Tag points to.
34
34
  """
@@ -1,22 +1,15 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
3
  from ..core.pydantic_utilities import UniversalBaseModel
4
- import pydantic
5
4
  import datetime as dt
6
5
  from ..core.pydantic_utilities import IS_PYDANTIC_V2
7
6
  import typing
7
+ import pydantic
8
8
 
9
9
 
10
10
  class WorkflowReleaseTagWorkflowDeploymentHistoryItem(UniversalBaseModel):
11
- id: str = pydantic.Field()
12
- """
13
- The ID of the Workflow Deployment History Item
14
- """
15
-
16
- timestamp: dt.datetime = pydantic.Field()
17
- """
18
- The timestamp representing when this History Item was created
19
- """
11
+ id: str
12
+ timestamp: dt.datetime
20
13
 
21
14
  if IS_PYDANTIC_V2:
22
15
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -1,3 +1,3 @@
1
1
  # WARNING: This file will be removed in a future release. Please import from "vellum.client" instead.
2
2
 
3
- from vellum.client.types.release import *
3
+ from vellum.client.resources.prompts import *
@@ -1,3 +1,3 @@
1
1
  # WARNING: This file will be removed in a future release. Please import from "vellum.client" instead.
2
2
 
3
- from vellum.client.resources.workflows.types import *
3
+ from vellum.client.resources.prompts.client import *
@@ -1,3 +1,3 @@
1
1
  # WARNING: This file will be removed in a future release. Please import from "vellum.client" instead.
2
2
 
3
- from vellum.client.resources.workflows.types.workflows_pull_request_format import *
3
+ from vellum.client.types.prompt_exec_config import *
@@ -0,0 +1,3 @@
1
+ # WARNING: This file will be removed in a future release. Please import from "vellum.client" instead.
2
+
3
+ from vellum.client.types.release_tag_release import *
@@ -28,13 +28,16 @@ def serialize_type_encoder(obj: type) -> Dict[str, Any]:
28
28
  }
29
29
 
30
30
 
31
- def serialize_type_encoder_with_id(obj: type) -> Dict[str, Any]:
32
- if not hasattr(obj, "__id__"):
33
- raise AttributeError(f"The object of type '{type(obj).__name__}' must have an '__id__' attribute.")
34
- return {
35
- "id": getattr(obj, "__id__"),
36
- **serialize_type_encoder(obj),
37
- }
31
+ def serialize_type_encoder_with_id(obj: Union[type, "CodeResourceDefinition"]) -> Dict[str, Any]:
32
+ if hasattr(obj, "__id__") and isinstance(obj, type):
33
+ return {
34
+ "id": getattr(obj, "__id__"),
35
+ **serialize_type_encoder(obj),
36
+ }
37
+ elif isinstance(obj, CodeResourceDefinition):
38
+ return obj.model_dump(mode="json")
39
+
40
+ raise AttributeError(f"The object of type '{type(obj).__name__}' must have an '__id__' attribute.")
38
41
 
39
42
 
40
43
  def default_serializer(obj: Any) -> Any:
@@ -87,13 +87,11 @@ class BaseInlinePromptNode(BasePromptNode[StateType], Generic[StateType]):
87
87
 
88
88
  def _get_prompt_event_stream(self) -> Iterator[AdHocExecutePromptEvent]:
89
89
  input_variables, input_values = self._compile_prompt_inputs()
90
- current_context = get_execution_context()
91
- parent_context = current_context.parent_context
92
- trace_id = current_context.trace_id
90
+ execution_context = get_execution_context()
93
91
  request_options = self.request_options or RequestOptions()
94
92
 
95
93
  request_options["additional_body_parameters"] = {
96
- "execution_context": {"parent_context": parent_context, "trace_id": trace_id},
94
+ "execution_context": execution_context.model_dump(mode="json"),
97
95
  **request_options.get("additional_body_parameters", {}),
98
96
  }
99
97
  normalized_functions = (
@@ -55,12 +55,10 @@ class BasePromptDeploymentNode(BasePromptNode, Generic[StateType]):
55
55
  merge_behavior = MergeBehavior.AWAIT_ANY
56
56
 
57
57
  def _get_prompt_event_stream(self) -> Iterator[ExecutePromptEvent]:
58
- current_context = get_execution_context()
59
- trace_id = current_context.trace_id
60
- parent_context = current_context.parent_context.model_dump() if current_context.parent_context else None
58
+ execution_context = get_execution_context()
61
59
  request_options = self.request_options or RequestOptions()
62
60
  request_options["additional_body_parameters"] = {
63
- "execution_context": {"parent_context": parent_context, "trace_id": trace_id},
61
+ "execution_context": execution_context.model_dump(mode="json"),
64
62
  **request_options.get("additional_body_parameters", {}),
65
63
  }
66
64
  return self._context.vellum_client.execute_prompt_stream(