vellum-ai 0.12.13__py3-none-any.whl → 0.12.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. vellum/__init__.py +9 -0
  2. vellum/client/__init__.py +2 -6
  3. vellum/client/core/client_wrapper.py +1 -1
  4. vellum/client/environment.py +3 -3
  5. vellum/client/resources/ad_hoc/client.py +2 -6
  6. vellum/client/resources/container_images/client.py +0 -8
  7. vellum/client/resources/metric_definitions/client.py +2 -6
  8. vellum/client/resources/workflows/client.py +8 -8
  9. vellum/client/types/__init__.py +6 -0
  10. vellum/client/types/audio_prompt_block.py +29 -0
  11. vellum/client/types/function_call_prompt_block.py +30 -0
  12. vellum/client/types/image_prompt_block.py +29 -0
  13. vellum/client/types/prompt_block.py +12 -1
  14. vellum/client/types/workflow_push_response.py +1 -0
  15. vellum/plugins/pydantic.py +12 -2
  16. vellum/types/audio_prompt_block.py +3 -0
  17. vellum/types/function_call_prompt_block.py +3 -0
  18. vellum/types/image_prompt_block.py +3 -0
  19. vellum/workflows/descriptors/tests/test_utils.py +3 -0
  20. vellum/workflows/nodes/bases/base.py +4 -1
  21. vellum/workflows/nodes/bases/base_adornment_node.py +75 -0
  22. vellum/workflows/nodes/bases/tests/test_base_node.py +13 -0
  23. vellum/workflows/nodes/core/inline_subworkflow_node/node.py +2 -0
  24. vellum/workflows/nodes/core/map_node/node.py +49 -45
  25. vellum/workflows/nodes/core/retry_node/node.py +10 -45
  26. vellum/workflows/nodes/core/try_node/node.py +12 -84
  27. vellum/workflows/nodes/utils.py +44 -1
  28. vellum/workflows/references/constant.py +21 -0
  29. vellum/workflows/runner/runner.py +4 -3
  30. vellum/workflows/types/cycle_map.py +34 -0
  31. vellum/workflows/workflows/base.py +4 -11
  32. {vellum_ai-0.12.13.dist-info → vellum_ai-0.12.15.dist-info}/METADATA +2 -2
  33. {vellum_ai-0.12.13.dist-info → vellum_ai-0.12.15.dist-info}/RECORD +52 -39
  34. vellum_cli/config.py +4 -0
  35. vellum_cli/pull.py +20 -5
  36. vellum_cli/push.py +7 -0
  37. vellum_cli/tests/test_pull.py +19 -1
  38. vellum_ee/workflows/display/nodes/vellum/__init__.py +2 -0
  39. vellum_ee/workflows/display/nodes/vellum/base_node.py +18 -0
  40. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +10 -41
  41. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_error_node_serialization.py +4 -14
  42. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_generic_node_serialization.py +174 -0
  43. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_subworkflow_serialization.py +2 -10
  44. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +2 -10
  45. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_merge_node_serialization.py +5 -19
  46. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_try_node_serialization.py +2 -8
  47. vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_node_serialization.py +14 -25
  48. vellum_ee/workflows/server/__init__.py +0 -0
  49. vellum_ee/workflows/server/virtual_file_loader.py +42 -0
  50. {vellum_ai-0.12.13.dist-info → vellum_ai-0.12.15.dist-info}/LICENSE +0 -0
  51. {vellum_ai-0.12.13.dist-info → vellum_ai-0.12.15.dist-info}/WHEEL +0 -0
  52. {vellum_ai-0.12.13.dist-info → vellum_ai-0.12.15.dist-info}/entry_points.txt +0 -0
vellum/__init__.py CHANGED
@@ -1,4 +1,7 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
+ from .plugins.utils import load_runtime_plugins
3
+
4
+ load_runtime_plugins()
2
5
 
3
6
  from .types import (
4
7
  AdHocExecutePromptEvent,
@@ -21,6 +24,7 @@ from .types import (
21
24
  ArrayVellumValueRequest,
22
25
  AudioChatMessageContent,
23
26
  AudioChatMessageContentRequest,
27
+ AudioPromptBlock,
24
28
  AudioVariableValue,
25
29
  AudioVellumValue,
26
30
  AudioVellumValueRequest,
@@ -131,6 +135,7 @@ from .types import (
131
135
  FunctionCallChatMessageContentValue,
132
136
  FunctionCallChatMessageContentValueRequest,
133
137
  FunctionCallInput,
138
+ FunctionCallPromptBlock,
134
139
  FunctionCallRequest,
135
140
  FunctionCallVariableValue,
136
141
  FunctionCallVellumValue,
@@ -155,6 +160,7 @@ from .types import (
155
160
  HkunlpInstructorXlVectorizerRequest,
156
161
  ImageChatMessageContent,
157
162
  ImageChatMessageContentRequest,
163
+ ImagePromptBlock,
158
164
  ImageVariableValue,
159
165
  ImageVellumValue,
160
166
  ImageVellumValueRequest,
@@ -545,6 +551,7 @@ __all__ = [
545
551
  "AsyncVellum",
546
552
  "AudioChatMessageContent",
547
553
  "AudioChatMessageContentRequest",
554
+ "AudioPromptBlock",
548
555
  "AudioVariableValue",
549
556
  "AudioVellumValue",
550
557
  "AudioVellumValueRequest",
@@ -660,6 +667,7 @@ __all__ = [
660
667
  "FunctionCallChatMessageContentValue",
661
668
  "FunctionCallChatMessageContentValueRequest",
662
669
  "FunctionCallInput",
670
+ "FunctionCallPromptBlock",
663
671
  "FunctionCallRequest",
664
672
  "FunctionCallVariableValue",
665
673
  "FunctionCallVellumValue",
@@ -684,6 +692,7 @@ __all__ = [
684
692
  "HkunlpInstructorXlVectorizerRequest",
685
693
  "ImageChatMessageContent",
686
694
  "ImageChatMessageContentRequest",
695
+ "ImagePromptBlock",
687
696
  "ImageVariableValue",
688
697
  "ImageVellumValue",
689
698
  "ImageVellumValueRequest",
vellum/client/__init__.py CHANGED
@@ -154,8 +154,6 @@ class Vellum:
154
154
  request_options: typing.Optional[RequestOptions] = None,
155
155
  ) -> CodeExecutorResponse:
156
156
  """
157
- An internal-only endpoint that's subject to breaking changes without notice. Not intended for public use.
158
-
159
157
  Parameters
160
158
  ----------
161
159
  code : str
@@ -203,7 +201,7 @@ class Vellum:
203
201
  """
204
202
  _response = self._client_wrapper.httpx_client.request(
205
203
  "v1/execute-code",
206
- base_url=self._client_wrapper.get_environment().default,
204
+ base_url=self._client_wrapper.get_environment().predict,
207
205
  method="POST",
208
206
  json={
209
207
  "code": code,
@@ -1419,8 +1417,6 @@ class AsyncVellum:
1419
1417
  request_options: typing.Optional[RequestOptions] = None,
1420
1418
  ) -> CodeExecutorResponse:
1421
1419
  """
1422
- An internal-only endpoint that's subject to breaking changes without notice. Not intended for public use.
1423
-
1424
1420
  Parameters
1425
1421
  ----------
1426
1422
  code : str
@@ -1476,7 +1472,7 @@ class AsyncVellum:
1476
1472
  """
1477
1473
  _response = await self._client_wrapper.httpx_client.request(
1478
1474
  "v1/execute-code",
1479
- base_url=self._client_wrapper.get_environment().default,
1475
+ base_url=self._client_wrapper.get_environment().predict,
1480
1476
  method="POST",
1481
1477
  json={
1482
1478
  "code": code,
@@ -18,7 +18,7 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.12.13",
21
+ "X-Fern-SDK-Version": "0.12.15",
22
22
  }
23
23
  headers["X_API_KEY"] = self.api_key
24
24
  return headers
@@ -6,12 +6,12 @@ from __future__ import annotations
6
6
  class VellumEnvironment:
7
7
  PRODUCTION: VellumEnvironment
8
8
 
9
- def __init__(self, *, default: str, documents: str, predict: str):
9
+ def __init__(self, *, default: str, predict: str, documents: str):
10
10
  self.default = default
11
- self.documents = documents
12
11
  self.predict = predict
12
+ self.documents = documents
13
13
 
14
14
 
15
15
  VellumEnvironment.PRODUCTION = VellumEnvironment(
16
- default="https://api.vellum.ai", documents="https://documents.vellum.ai", predict="https://predict.vellum.ai"
16
+ default="https://api.vellum.ai", predict="https://predict.vellum.ai", documents="https://documents.vellum.ai"
17
17
  )
@@ -43,8 +43,6 @@ class AdHocClient:
43
43
  request_options: typing.Optional[RequestOptions] = None,
44
44
  ) -> typing.Iterator[AdHocExecutePromptEvent]:
45
45
  """
46
- An internal-only endpoint that's subject to breaking changes without notice. Not intended for public use.
47
-
48
46
  Parameters
49
47
  ----------
50
48
  ml_model : str
@@ -111,7 +109,7 @@ class AdHocClient:
111
109
  """
112
110
  with self._client_wrapper.httpx_client.stream(
113
111
  "v1/ad-hoc/execute-prompt-stream",
114
- base_url=self._client_wrapper.get_environment().default,
112
+ base_url=self._client_wrapper.get_environment().predict,
115
113
  method="POST",
116
114
  json={
117
115
  "ml_model": ml_model,
@@ -211,8 +209,6 @@ class AsyncAdHocClient:
211
209
  request_options: typing.Optional[RequestOptions] = None,
212
210
  ) -> typing.AsyncIterator[AdHocExecutePromptEvent]:
213
211
  """
214
- An internal-only endpoint that's subject to breaking changes without notice. Not intended for public use.
215
-
216
212
  Parameters
217
213
  ----------
218
214
  ml_model : str
@@ -287,7 +283,7 @@ class AsyncAdHocClient:
287
283
  """
288
284
  async with self._client_wrapper.httpx_client.stream(
289
285
  "v1/ad-hoc/execute-prompt-stream",
290
- base_url=self._client_wrapper.get_environment().default,
286
+ base_url=self._client_wrapper.get_environment().predict,
291
287
  method="POST",
292
288
  json={
293
289
  "ml_model": ml_model,
@@ -134,8 +134,6 @@ class ContainerImagesClient:
134
134
 
135
135
  def docker_service_token(self, *, request_options: typing.Optional[RequestOptions] = None) -> DockerServiceToken:
136
136
  """
137
- An internal-only endpoint that's subject to breaking changes without notice. Not intended for public use.
138
-
139
137
  Parameters
140
138
  ----------
141
139
  request_options : typing.Optional[RequestOptions]
@@ -184,8 +182,6 @@ class ContainerImagesClient:
184
182
  request_options: typing.Optional[RequestOptions] = None,
185
183
  ) -> ContainerImageRead:
186
184
  """
187
- An internal-only endpoint that's subject to breaking changes without notice. Not intended for public use.
188
-
189
185
  Parameters
190
186
  ----------
191
187
  name : str
@@ -378,8 +374,6 @@ class AsyncContainerImagesClient:
378
374
  self, *, request_options: typing.Optional[RequestOptions] = None
379
375
  ) -> DockerServiceToken:
380
376
  """
381
- An internal-only endpoint that's subject to breaking changes without notice. Not intended for public use.
382
-
383
377
  Parameters
384
378
  ----------
385
379
  request_options : typing.Optional[RequestOptions]
@@ -436,8 +430,6 @@ class AsyncContainerImagesClient:
436
430
  request_options: typing.Optional[RequestOptions] = None,
437
431
  ) -> ContainerImageRead:
438
432
  """
439
- An internal-only endpoint that's subject to breaking changes without notice. Not intended for public use.
440
-
441
433
  Parameters
442
434
  ----------
443
435
  name : str
@@ -30,8 +30,6 @@ class MetricDefinitionsClient:
30
30
  request_options: typing.Optional[RequestOptions] = None,
31
31
  ) -> MetricDefinitionExecution:
32
32
  """
33
- An internal-only endpoint that's subject to breaking changes without notice. Not intended for public use.
34
-
35
33
  Parameters
36
34
  ----------
37
35
  id : str
@@ -68,7 +66,7 @@ class MetricDefinitionsClient:
68
66
  """
69
67
  _response = self._client_wrapper.httpx_client.request(
70
68
  f"v1/metric-definitions/{jsonable_encoder(id)}/execute",
71
- base_url=self._client_wrapper.get_environment().default,
69
+ base_url=self._client_wrapper.get_environment().predict,
72
70
  method="POST",
73
71
  json={
74
72
  "inputs": convert_and_respect_annotation_metadata(
@@ -159,8 +157,6 @@ class AsyncMetricDefinitionsClient:
159
157
  request_options: typing.Optional[RequestOptions] = None,
160
158
  ) -> MetricDefinitionExecution:
161
159
  """
162
- An internal-only endpoint that's subject to breaking changes without notice. Not intended for public use.
163
-
164
160
  Parameters
165
161
  ----------
166
162
  id : str
@@ -205,7 +201,7 @@ class AsyncMetricDefinitionsClient:
205
201
  """
206
202
  _response = await self._client_wrapper.httpx_client.request(
207
203
  f"v1/metric-definitions/{jsonable_encoder(id)}/execute",
208
- base_url=self._client_wrapper.get_environment().default,
204
+ base_url=self._client_wrapper.get_environment().predict,
209
205
  method="POST",
210
206
  json={
211
207
  "inputs": convert_and_respect_annotation_metadata(
@@ -35,8 +35,6 @@ class WorkflowsClient:
35
35
  request_options: typing.Optional[RequestOptions] = None,
36
36
  ) -> typing.Iterator[bytes]:
37
37
  """
38
- An internal-only endpoint that's subject to breaking changes without notice. Not intended for public use.
39
-
40
38
  Parameters
41
39
  ----------
42
40
  id : str
@@ -102,11 +100,10 @@ class WorkflowsClient:
102
100
  workflow_sandbox_id: typing.Optional[str] = OMIT,
103
101
  deployment_config: typing.Optional[WorkflowPushDeploymentConfigRequest] = OMIT,
104
102
  artifact: typing.Optional[core.File] = OMIT,
103
+ dry_run: typing.Optional[bool] = OMIT,
105
104
  request_options: typing.Optional[RequestOptions] = None,
106
105
  ) -> WorkflowPushResponse:
107
106
  """
108
- An internal-only endpoint that's subject to breaking changes without notice. Not intended for public use.
109
-
110
107
  Parameters
111
108
  ----------
112
109
  exec_config : WorkflowPushExecConfig
@@ -121,6 +118,8 @@ class WorkflowsClient:
121
118
  artifact : typing.Optional[core.File]
122
119
  See core.File for more documentation
123
120
 
121
+ dry_run : typing.Optional[bool]
122
+
124
123
  request_options : typing.Optional[RequestOptions]
125
124
  Request-specific configuration.
126
125
 
@@ -150,6 +149,7 @@ class WorkflowsClient:
150
149
  "label": label,
151
150
  "workflow_sandbox_id": workflow_sandbox_id,
152
151
  "deployment_config": deployment_config,
152
+ "dry_run": dry_run,
153
153
  },
154
154
  files={
155
155
  "artifact": artifact,
@@ -188,8 +188,6 @@ class AsyncWorkflowsClient:
188
188
  request_options: typing.Optional[RequestOptions] = None,
189
189
  ) -> typing.AsyncIterator[bytes]:
190
190
  """
191
- An internal-only endpoint that's subject to breaking changes without notice. Not intended for public use.
192
-
193
191
  Parameters
194
192
  ----------
195
193
  id : str
@@ -255,11 +253,10 @@ class AsyncWorkflowsClient:
255
253
  workflow_sandbox_id: typing.Optional[str] = OMIT,
256
254
  deployment_config: typing.Optional[WorkflowPushDeploymentConfigRequest] = OMIT,
257
255
  artifact: typing.Optional[core.File] = OMIT,
256
+ dry_run: typing.Optional[bool] = OMIT,
258
257
  request_options: typing.Optional[RequestOptions] = None,
259
258
  ) -> WorkflowPushResponse:
260
259
  """
261
- An internal-only endpoint that's subject to breaking changes without notice. Not intended for public use.
262
-
263
260
  Parameters
264
261
  ----------
265
262
  exec_config : WorkflowPushExecConfig
@@ -274,6 +271,8 @@ class AsyncWorkflowsClient:
274
271
  artifact : typing.Optional[core.File]
275
272
  See core.File for more documentation
276
273
 
274
+ dry_run : typing.Optional[bool]
275
+
277
276
  request_options : typing.Optional[RequestOptions]
278
277
  Request-specific configuration.
279
278
 
@@ -311,6 +310,7 @@ class AsyncWorkflowsClient:
311
310
  "label": label,
312
311
  "workflow_sandbox_id": workflow_sandbox_id,
313
312
  "deployment_config": deployment_config,
313
+ "dry_run": dry_run,
314
314
  },
315
315
  files={
316
316
  "artifact": artifact,
@@ -20,6 +20,7 @@ from .array_vellum_value import ArrayVellumValue
20
20
  from .array_vellum_value_request import ArrayVellumValueRequest
21
21
  from .audio_chat_message_content import AudioChatMessageContent
22
22
  from .audio_chat_message_content_request import AudioChatMessageContentRequest
23
+ from .audio_prompt_block import AudioPromptBlock
23
24
  from .audio_variable_value import AudioVariableValue
24
25
  from .audio_vellum_value import AudioVellumValue
25
26
  from .audio_vellum_value_request import AudioVellumValueRequest
@@ -138,6 +139,7 @@ from .function_call_chat_message_content_request import FunctionCallChatMessageC
138
139
  from .function_call_chat_message_content_value import FunctionCallChatMessageContentValue
139
140
  from .function_call_chat_message_content_value_request import FunctionCallChatMessageContentValueRequest
140
141
  from .function_call_input import FunctionCallInput
142
+ from .function_call_prompt_block import FunctionCallPromptBlock
141
143
  from .function_call_request import FunctionCallRequest
142
144
  from .function_call_variable_value import FunctionCallVariableValue
143
145
  from .function_call_vellum_value import FunctionCallVellumValue
@@ -166,6 +168,7 @@ from .hkunlp_instructor_xl_vectorizer import HkunlpInstructorXlVectorizer
166
168
  from .hkunlp_instructor_xl_vectorizer_request import HkunlpInstructorXlVectorizerRequest
167
169
  from .image_chat_message_content import ImageChatMessageContent
168
170
  from .image_chat_message_content_request import ImageChatMessageContentRequest
171
+ from .image_prompt_block import ImagePromptBlock
169
172
  from .image_variable_value import ImageVariableValue
170
173
  from .image_vellum_value import ImageVellumValue
171
174
  from .image_vellum_value_request import ImageVellumValueRequest
@@ -542,6 +545,7 @@ __all__ = [
542
545
  "ArrayVellumValueRequest",
543
546
  "AudioChatMessageContent",
544
547
  "AudioChatMessageContentRequest",
548
+ "AudioPromptBlock",
545
549
  "AudioVariableValue",
546
550
  "AudioVellumValue",
547
551
  "AudioVellumValueRequest",
@@ -652,6 +656,7 @@ __all__ = [
652
656
  "FunctionCallChatMessageContentValue",
653
657
  "FunctionCallChatMessageContentValueRequest",
654
658
  "FunctionCallInput",
659
+ "FunctionCallPromptBlock",
655
660
  "FunctionCallRequest",
656
661
  "FunctionCallVariableValue",
657
662
  "FunctionCallVellumValue",
@@ -676,6 +681,7 @@ __all__ = [
676
681
  "HkunlpInstructorXlVectorizerRequest",
677
682
  "ImageChatMessageContent",
678
683
  "ImageChatMessageContentRequest",
684
+ "ImagePromptBlock",
679
685
  "ImageVariableValue",
680
686
  "ImageVellumValue",
681
687
  "ImageVellumValueRequest",
@@ -0,0 +1,29 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.pydantic_utilities import UniversalBaseModel
4
+ import typing
5
+ from .prompt_block_state import PromptBlockState
6
+ from .ephemeral_prompt_cache_config import EphemeralPromptCacheConfig
7
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
8
+ import pydantic
9
+
10
+
11
+ class AudioPromptBlock(UniversalBaseModel):
12
+ """
13
+ A block that represents an audio file in a prompt template.
14
+ """
15
+
16
+ block_type: typing.Literal["AUDIO"] = "AUDIO"
17
+ state: typing.Optional[PromptBlockState] = None
18
+ cache_config: typing.Optional[EphemeralPromptCacheConfig] = None
19
+ src: str
20
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
21
+
22
+ if IS_PYDANTIC_V2:
23
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
24
+ else:
25
+
26
+ class Config:
27
+ frozen = True
28
+ smart_union = True
29
+ extra = pydantic.Extra.allow
@@ -0,0 +1,30 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.pydantic_utilities import UniversalBaseModel
4
+ import typing
5
+ from .prompt_block_state import PromptBlockState
6
+ from .ephemeral_prompt_cache_config import EphemeralPromptCacheConfig
7
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
8
+ import pydantic
9
+
10
+
11
+ class FunctionCallPromptBlock(UniversalBaseModel):
12
+ """
13
+ A block that represents a function call in a prompt template.
14
+ """
15
+
16
+ block_type: typing.Literal["FUNCTION_CALL"] = "FUNCTION_CALL"
17
+ state: typing.Optional[PromptBlockState] = None
18
+ cache_config: typing.Optional[EphemeralPromptCacheConfig] = None
19
+ id: typing.Optional[str] = None
20
+ name: str
21
+ arguments: typing.Dict[str, typing.Optional[typing.Any]]
22
+
23
+ if IS_PYDANTIC_V2:
24
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
25
+ else:
26
+
27
+ class Config:
28
+ frozen = True
29
+ smart_union = True
30
+ extra = pydantic.Extra.allow
@@ -0,0 +1,29 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.pydantic_utilities import UniversalBaseModel
4
+ import typing
5
+ from .prompt_block_state import PromptBlockState
6
+ from .ephemeral_prompt_cache_config import EphemeralPromptCacheConfig
7
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
8
+ import pydantic
9
+
10
+
11
+ class ImagePromptBlock(UniversalBaseModel):
12
+ """
13
+ A block that represents an image in a prompt template.
14
+ """
15
+
16
+ block_type: typing.Literal["IMAGE"] = "IMAGE"
17
+ state: typing.Optional[PromptBlockState] = None
18
+ cache_config: typing.Optional[EphemeralPromptCacheConfig] = None
19
+ src: str
20
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
21
+
22
+ if IS_PYDANTIC_V2:
23
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
24
+ else:
25
+
26
+ class Config:
27
+ frozen = True
28
+ smart_union = True
29
+ extra = pydantic.Extra.allow
@@ -5,8 +5,19 @@ import typing
5
5
  from .jinja_prompt_block import JinjaPromptBlock
6
6
  from .variable_prompt_block import VariablePromptBlock
7
7
  from .rich_text_prompt_block import RichTextPromptBlock
8
+ from .audio_prompt_block import AudioPromptBlock
9
+ from .function_call_prompt_block import FunctionCallPromptBlock
10
+ from .image_prompt_block import ImagePromptBlock
8
11
  import typing
9
12
 
10
13
  if typing.TYPE_CHECKING:
11
14
  from .chat_message_prompt_block import ChatMessagePromptBlock
12
- PromptBlock = typing.Union[JinjaPromptBlock, "ChatMessagePromptBlock", VariablePromptBlock, RichTextPromptBlock]
15
+ PromptBlock = typing.Union[
16
+ JinjaPromptBlock,
17
+ "ChatMessagePromptBlock",
18
+ VariablePromptBlock,
19
+ RichTextPromptBlock,
20
+ AudioPromptBlock,
21
+ FunctionCallPromptBlock,
22
+ ImagePromptBlock,
23
+ ]
@@ -9,6 +9,7 @@ import pydantic
9
9
  class WorkflowPushResponse(UniversalBaseModel):
10
10
  workflow_sandbox_id: str
11
11
  workflow_deployment_id: typing.Optional[str] = None
12
+ proposed_diffs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None
12
13
 
13
14
  if IS_PYDANTIC_V2:
14
15
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -1,3 +1,4 @@
1
+ from functools import lru_cache
1
2
  from typing import Any, Dict, Literal, Optional, Tuple, Union
2
3
 
3
4
  from pydantic.plugin import (
@@ -10,12 +11,20 @@ from pydantic.plugin import (
10
11
  )
11
12
  from pydantic_core import CoreSchema
12
13
 
13
- from vellum.workflows.descriptors.base import BaseDescriptor
14
+
15
+ @lru_cache(maxsize=1)
16
+ def import_base_descriptor():
17
+ """
18
+ We have to avoid importing from vellum.* in this file because it will cause a circular import.
19
+ """
20
+ from vellum.workflows.descriptors.base import BaseDescriptor
21
+
22
+ return BaseDescriptor
14
23
 
15
24
 
16
25
  # https://docs.pydantic.dev/2.8/concepts/plugins/#build-a-plugin
17
26
  class OnValidatePython(ValidatePythonHandlerProtocol):
18
- tracked_descriptors: Dict[str, BaseDescriptor] = {}
27
+ tracked_descriptors: Dict[str, Any] = {}
19
28
 
20
29
  def on_enter(
21
30
  self,
@@ -31,6 +40,7 @@ class OnValidatePython(ValidatePythonHandlerProtocol):
31
40
  return
32
41
 
33
42
  self.tracked_descriptors = {}
43
+ BaseDescriptor = import_base_descriptor()
34
44
 
35
45
  for key, value in input.items():
36
46
  if isinstance(value, BaseDescriptor):
@@ -0,0 +1,3 @@
1
+ # WARNING: This file will be removed in a future release. Please import from "vellum.client" instead.
2
+
3
+ from vellum.client.types.audio_prompt_block import *
@@ -0,0 +1,3 @@
1
+ # WARNING: This file will be removed in a future release. Please import from "vellum.client" instead.
2
+
3
+ from vellum.client.types.function_call_prompt_block import *
@@ -0,0 +1,3 @@
1
+ # WARNING: This file will be removed in a future release. Please import from "vellum.client" instead.
2
+
3
+ from vellum.client.types.image_prompt_block import *
@@ -2,6 +2,7 @@ import pytest
2
2
 
3
3
  from vellum.workflows.descriptors.utils import resolve_value
4
4
  from vellum.workflows.nodes.bases.base import BaseNode
5
+ from vellum.workflows.references.constant import ConstantValueReference
5
6
  from vellum.workflows.state.base import BaseState
6
7
 
7
8
 
@@ -73,6 +74,7 @@ class DummyNode(BaseNode[FixtureState]):
73
74
  True,
74
75
  ),
75
76
  (FixtureState.zeta["foo"], "bar"),
77
+ (ConstantValueReference(1), 1),
76
78
  ],
77
79
  ids=[
78
80
  "or",
@@ -116,6 +118,7 @@ class DummyNode(BaseNode[FixtureState]):
116
118
  "is_not_blank",
117
119
  "or_and",
118
120
  "accessor",
121
+ "constants",
119
122
  ],
120
123
  )
121
124
  def test_resolve_value__happy_path(descriptor, expected_value):
@@ -214,6 +214,9 @@ class _BaseNodeExecutionMeta(type):
214
214
  return self_execution_class.node_class.__name__ == other_execution_class.node_class.__name__
215
215
 
216
216
 
217
+ NodeRunResponse = Union[BaseOutputs, Iterator[BaseOutput]]
218
+
219
+
217
220
  class BaseNode(Generic[StateType], metaclass=BaseNodeMeta):
218
221
  __id__: UUID = uuid4_from_hash(__qualname__)
219
222
  state: StateType
@@ -350,7 +353,7 @@ class BaseNode(Generic[StateType], metaclass=BaseNodeMeta):
350
353
 
351
354
  self._inputs = MappingProxyType(all_inputs)
352
355
 
353
- def run(self) -> Union[BaseOutputs, Iterator[BaseOutput]]:
356
+ def run(self) -> NodeRunResponse:
354
357
  return self.Outputs()
355
358
 
356
359
  def __repr__(self) -> str:
@@ -0,0 +1,75 @@
1
+ from typing import TYPE_CHECKING, Any, Dict, Generic, Optional, Tuple, Type
2
+
3
+ from vellum.workflows.nodes.bases.base import BaseNode, BaseNodeMeta
4
+ from vellum.workflows.outputs.base import BaseOutputs
5
+ from vellum.workflows.references.output import OutputReference
6
+ from vellum.workflows.types.generics import StateType
7
+
8
+ if TYPE_CHECKING:
9
+ from vellum.workflows import BaseWorkflow
10
+
11
+
12
+ class _BaseAdornmentNodeMeta(BaseNodeMeta):
13
+ def __new__(cls, name: str, bases: Tuple[Type, ...], dct: Dict[str, Any]) -> Any:
14
+ node_class = super().__new__(cls, name, bases, dct)
15
+
16
+ subworkflow_attribute = dct.get("subworkflow")
17
+ if not subworkflow_attribute:
18
+ return node_class
19
+
20
+ if not issubclass(node_class, BaseAdornmentNode):
21
+ raise ValueError("BaseAdornableNodeMeta can only be used on subclasses of BaseAdornableNode")
22
+
23
+ subworkflow_outputs = getattr(subworkflow_attribute, "Outputs")
24
+ if not issubclass(subworkflow_outputs, BaseOutputs):
25
+ raise ValueError("subworkflow.Outputs must be a subclass of BaseOutputs")
26
+
27
+ outputs_class = dct.get("Outputs")
28
+ if not outputs_class:
29
+ raise ValueError("Outputs class not found in base classes")
30
+
31
+ if not issubclass(outputs_class, BaseNode.Outputs):
32
+ raise ValueError("Outputs class must be a subclass of BaseNode.Outputs")
33
+
34
+ for descriptor in subworkflow_outputs:
35
+ node_class.__annotate_outputs_class__(outputs_class, descriptor)
36
+
37
+ return node_class
38
+
39
+ def __getattribute__(cls, name: str) -> Any:
40
+ try:
41
+ return super().__getattribute__(name)
42
+ except AttributeError:
43
+ if name != "__wrapped_node__" and issubclass(cls, BaseAdornmentNode):
44
+ return getattr(cls.__wrapped_node__, name)
45
+ raise
46
+
47
+ @property
48
+ def _localns(cls) -> Dict[str, Any]:
49
+ if not hasattr(cls, "SubworkflowInputs"):
50
+ return super()._localns
51
+
52
+ return {
53
+ **super()._localns,
54
+ "SubworkflowInputs": getattr(cls, "SubworkflowInputs"),
55
+ }
56
+
57
+
58
+ class BaseAdornmentNode(
59
+ BaseNode[StateType],
60
+ Generic[StateType],
61
+ metaclass=_BaseAdornmentNodeMeta,
62
+ ):
63
+ """
64
+ A base node that enables the node to be used as an adornment - meaning it can wrap another node. The
65
+ wrapped node is stored in the `__wrapped_node__` attribute and is redefined as a single-node subworkflow.
66
+ """
67
+
68
+ __wrapped_node__: Optional[Type["BaseNode"]] = None
69
+ subworkflow: Type["BaseWorkflow"]
70
+
71
+ @classmethod
72
+ def __annotate_outputs_class__(cls, outputs_class: Type[BaseOutputs], reference: OutputReference) -> None:
73
+ # Subclasses of BaseAdornableNode can override this method to provider their own
74
+ # approach to annotating the outputs class based on the `subworkflow.Outputs`
75
+ setattr(outputs_class, reference.name, reference)
@@ -1,6 +1,7 @@
1
1
  from uuid import UUID
2
2
  from typing import Optional
3
3
 
4
+ from vellum.client.types.string_vellum_value_request import StringVellumValueRequest
4
5
  from vellum.core.pydantic_utilities import UniversalBaseModel
5
6
  from vellum.workflows.inputs.base import BaseInputs
6
7
  from vellum.workflows.nodes.bases.base import BaseNode
@@ -135,3 +136,15 @@ def test_base_node__default_id():
135
136
 
136
137
  # THEN it should equal the hash of `test_base_node__default_id.<locals>.MyNode`
137
138
  assert my_id == UUID("8e71bea7-ce68-492f-9abe-477c788e6273")
139
+
140
+
141
+ def test_base_node__node_resolution__descriptor_in_fern_pydantic():
142
+ class State(BaseState):
143
+ foo: str
144
+
145
+ class SomeNode(BaseNode):
146
+ model = StringVellumValueRequest(value=State.foo)
147
+
148
+ node = SomeNode(state=State(foo="bar"))
149
+
150
+ assert node.model.value == "bar"