vellum-ai 0.14.73__py3-none-any.whl → 0.14.75__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. vellum/client/core/client_wrapper.py +1 -1
  2. vellum/client/core/serialization.py +0 -1
  3. vellum/client/reference.md +9 -1
  4. vellum/client/resources/prompts/client.py +16 -4
  5. vellum/client/types/secret_type_enum.py +3 -1
  6. vellum/workflows/nodes/bases/base.py +0 -15
  7. vellum/workflows/nodes/core/map_node/tests/test_node.py +54 -0
  8. vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +2 -2
  9. vellum/workflows/nodes/displayable/bases/utils.py +2 -0
  10. vellum/workflows/nodes/experimental/tool_calling_node/node.py +5 -1
  11. vellum/workflows/nodes/experimental/tool_calling_node/tests/test_node.py +13 -0
  12. vellum/workflows/nodes/experimental/tool_calling_node/utils.py +14 -4
  13. vellum/workflows/outputs/base.py +26 -2
  14. vellum/workflows/state/encoder.py +2 -0
  15. vellum/workflows/types/definition.py +1 -1
  16. vellum/workflows/types/generics.py +5 -0
  17. vellum/workflows/utils/functions.py +3 -3
  18. vellum/workflows/utils/tests/test_functions.py +7 -7
  19. {vellum_ai-0.14.73.dist-info → vellum_ai-0.14.75.dist-info}/METADATA +1 -1
  20. {vellum_ai-0.14.73.dist-info → vellum_ai-0.14.75.dist-info}/RECORD +33 -32
  21. vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +36 -7
  22. vellum_ee/workflows/display/nodes/vellum/tests/test_prompt_node.py +102 -0
  23. vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_attributes_serialization.py +90 -0
  24. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_inline_prompt_node_serialization.py +117 -0
  25. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_inline_workflow_serialization.py +7 -0
  26. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_serialization.py +7 -0
  27. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_workflow_deployment_serialization.py +62 -0
  28. vellum_ee/workflows/display/utils/expressions.py +33 -2
  29. vellum_ee/workflows/display/workflows/base_workflow_display.py +20 -6
  30. vellum_ee/workflows/tests/test_display_meta.py +41 -0
  31. {vellum_ai-0.14.73.dist-info → vellum_ai-0.14.75.dist-info}/LICENSE +0 -0
  32. {vellum_ai-0.14.73.dist-info → vellum_ai-0.14.75.dist-info}/WHEEL +0 -0
  33. {vellum_ai-0.14.73.dist-info → vellum_ai-0.14.75.dist-info}/entry_points.txt +0 -0
@@ -18,7 +18,7 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.14.73",
21
+ "X-Fern-SDK-Version": "0.14.75",
22
22
  }
23
23
  headers["X-API-KEY"] = self.api_key
24
24
  return headers
@@ -184,7 +184,6 @@ def _convert_mapping(
184
184
  converted_object[_alias_key(key, type_, direction, aliases_to_field_names)] = (
185
185
  convert_and_respect_annotation_metadata(object_=value, annotation=type_, direction=direction)
186
186
  )
187
-
188
187
  return converted_object
189
188
 
190
189
 
@@ -4270,7 +4270,15 @@ client.prompts.push(
4270
4270
  <dl>
4271
4271
  <dd>
4272
4272
 
4273
- **prompt_variant_id:** `typing.Optional[str]`
4273
+ **prompt_variant_id:** `typing.Optional[str]` — If specified, an existing Prompt Variant by the provided ID will be updated. Otherwise, a new Prompt Variant will be created and an ID generated.
4274
+
4275
+ </dd>
4276
+ </dl>
4277
+
4278
+ <dl>
4279
+ <dd>
4280
+
4281
+ **prompt_variant_label:** `typing.Optional[str]` — If provided, then the created/updated Prompt Variant will have this label.
4274
4282
 
4275
4283
  </dd>
4276
4284
  </dl>
@@ -110,6 +110,7 @@ class PromptsClient:
110
110
  *,
111
111
  exec_config: PromptExecConfig,
112
112
  prompt_variant_id: typing.Optional[str] = OMIT,
113
+ prompt_variant_label: typing.Optional[str] = OMIT,
113
114
  prompt_sandbox_id: typing.Optional[str] = OMIT,
114
115
  request_options: typing.Optional[RequestOptions] = None,
115
116
  ) -> PromptPushResponse:
@@ -121,6 +122,10 @@ class PromptsClient:
121
122
  exec_config : PromptExecConfig
122
123
 
123
124
  prompt_variant_id : typing.Optional[str]
125
+ If specified, an existing Prompt Variant by the provided ID will be updated. Otherwise, a new Prompt Variant will be created and an ID generated.
126
+
127
+ prompt_variant_label : typing.Optional[str]
128
+ If provided, then the created/updated Prompt Variant will have this label.
124
129
 
125
130
  prompt_sandbox_id : typing.Optional[str]
126
131
 
@@ -169,11 +174,12 @@ class PromptsClient:
169
174
  base_url=self._client_wrapper.get_environment().default,
170
175
  method="POST",
171
176
  json={
177
+ "prompt_variant_id": prompt_variant_id,
178
+ "prompt_variant_label": prompt_variant_label,
179
+ "prompt_sandbox_id": prompt_sandbox_id,
172
180
  "exec_config": convert_and_respect_annotation_metadata(
173
181
  object_=exec_config, annotation=PromptExecConfig, direction="write"
174
182
  ),
175
- "prompt_variant_id": prompt_variant_id,
176
- "prompt_sandbox_id": prompt_sandbox_id,
177
183
  },
178
184
  headers={
179
185
  "content-type": "application/json",
@@ -316,6 +322,7 @@ class AsyncPromptsClient:
316
322
  *,
317
323
  exec_config: PromptExecConfig,
318
324
  prompt_variant_id: typing.Optional[str] = OMIT,
325
+ prompt_variant_label: typing.Optional[str] = OMIT,
319
326
  prompt_sandbox_id: typing.Optional[str] = OMIT,
320
327
  request_options: typing.Optional[RequestOptions] = None,
321
328
  ) -> PromptPushResponse:
@@ -327,6 +334,10 @@ class AsyncPromptsClient:
327
334
  exec_config : PromptExecConfig
328
335
 
329
336
  prompt_variant_id : typing.Optional[str]
337
+ If specified, an existing Prompt Variant by the provided ID will be updated. Otherwise, a new Prompt Variant will be created and an ID generated.
338
+
339
+ prompt_variant_label : typing.Optional[str]
340
+ If provided, then the created/updated Prompt Variant will have this label.
330
341
 
331
342
  prompt_sandbox_id : typing.Optional[str]
332
343
 
@@ -383,11 +394,12 @@ class AsyncPromptsClient:
383
394
  base_url=self._client_wrapper.get_environment().default,
384
395
  method="POST",
385
396
  json={
397
+ "prompt_variant_id": prompt_variant_id,
398
+ "prompt_variant_label": prompt_variant_label,
399
+ "prompt_sandbox_id": prompt_sandbox_id,
386
400
  "exec_config": convert_and_respect_annotation_metadata(
387
401
  object_=exec_config, annotation=PromptExecConfig, direction="write"
388
402
  ),
389
- "prompt_variant_id": prompt_variant_id,
390
- "prompt_sandbox_id": prompt_sandbox_id,
391
403
  },
392
404
  headers={
393
405
  "content-type": "application/json",
@@ -2,4 +2,6 @@
2
2
 
3
3
  import typing
4
4
 
5
- SecretTypeEnum = typing.Union[typing.Literal["USER_DEFINED", "HMAC", "INTERNAL_API_KEY"], typing.Any]
5
+ SecretTypeEnum = typing.Union[
6
+ typing.Literal["USER_DEFINED", "HMAC", "INTERNAL_API_KEY", "EXTERNALLY_PROVISIONED"], typing.Any
7
+ ]
@@ -457,21 +457,6 @@ class BaseNode(Generic[StateType], ABC, metaclass=BaseNodeMeta):
457
457
  resolved_value = resolve_value(descriptor.instance, self.state, path=descriptor.name, memo=inputs)
458
458
  setattr(self, descriptor.name, resolved_value)
459
459
 
460
- # Resolve descriptors set as defaults to the outputs class
461
- def _outputs_post_init(outputs_self: "BaseNode.Outputs", **kwargs: Any) -> None:
462
- for node_output_descriptor in self.Outputs:
463
- if node_output_descriptor.name in kwargs:
464
- continue
465
-
466
- if isinstance(node_output_descriptor.instance, BaseDescriptor):
467
- setattr(
468
- outputs_self,
469
- node_output_descriptor.name,
470
- node_output_descriptor.instance.resolve(self.state),
471
- )
472
-
473
- setattr(self.Outputs, "_outputs_post_init", _outputs_post_init)
474
-
475
460
  # We only want to store the attributes that were actually set as inputs, not every attribute that exists.
476
461
  all_inputs = {}
477
462
  for key, value in inputs.items():
@@ -3,6 +3,7 @@ import threading
3
3
  import time
4
4
 
5
5
  from vellum.workflows.inputs.base import BaseInputs
6
+ from vellum.workflows.nodes import FinalOutputNode
6
7
  from vellum.workflows.nodes.bases import BaseNode
7
8
  from vellum.workflows.nodes.core.map_node.node import MapNode
8
9
  from vellum.workflows.nodes.core.try_node.node import TryNode
@@ -221,3 +222,56 @@ def test_map_node_parallel_execution_with_workflow():
221
222
  # AND each item should have run on a different thread
222
223
  thread_ids_list = list(thread_ids.values())
223
224
  assert len(set(thread_ids_list)) == 3
225
+
226
+
227
+ def test_map_node__shared_state_race_condition():
228
+ processed_items = []
229
+
230
+ # GIVEN a templating node that processes items
231
+ class TemplatingNode(BaseNode):
232
+ item = MapNode.SubworkflowInputs.item
233
+
234
+ class Outputs(BaseOutputs):
235
+ processed_item: str
236
+
237
+ def run(self) -> Outputs:
238
+ processed_item = f"{self.item}!"
239
+ return self.Outputs(processed_item=processed_item)
240
+
241
+ # AND a final output node
242
+ class FinalOutput(FinalOutputNode[BaseState, str]):
243
+ class Outputs(FinalOutputNode.Outputs):
244
+ value = TemplatingNode.Outputs.processed_item
245
+
246
+ def run(self) -> Outputs:
247
+ outputs = super().run()
248
+ processed_items.append(outputs.value)
249
+ return outputs # type: ignore[return-value]
250
+
251
+ # AND a workflow using those nodes
252
+ class ProcessItemWorkflow(BaseWorkflow[MapNode.SubworkflowInputs, BaseState]):
253
+ graph = TemplatingNode >> FinalOutput
254
+
255
+ class Outputs(BaseWorkflow.Outputs):
256
+ result = FinalOutput.Outputs.value
257
+
258
+ # AND a map node with high concurrency
259
+ class RaceConditionMapNode(MapNode):
260
+ items = ["a", "b", "c", "d", "e", "f"]
261
+ subworkflow = ProcessItemWorkflow
262
+ max_concurrency = 1
263
+
264
+ # WHEN we run the map node multiple times to see pass consistently
265
+ num_runs = 50
266
+ for index in range(num_runs):
267
+ processed_items.clear()
268
+ node = RaceConditionMapNode(state=BaseState())
269
+ outputs = list(node.run())
270
+ final_result = outputs[-1].value
271
+
272
+ # THEN the state is unique among each run
273
+ assert len(set(processed_items)) == 6
274
+
275
+ # AND all results should be in correct order
276
+ expected_result = ["a!", "b!", "c!", "d!", "e!", "f!"]
277
+ assert final_result == expected_result, f"Failed on run {index}"
@@ -33,9 +33,9 @@ from vellum.workflows.types import MergeBehavior
33
33
  from vellum.workflows.types.definition import DeploymentDefinition
34
34
  from vellum.workflows.types.generics import StateType, is_workflow_class
35
35
  from vellum.workflows.utils.functions import (
36
- compile_deployment_workflow_function_definition,
37
36
  compile_function_definition,
38
37
  compile_inline_workflow_function_definition,
38
+ compile_workflow_deployment_function_definition,
39
39
  )
40
40
  from vellum.workflows.utils.pydantic_schema import normalize_json
41
41
 
@@ -115,7 +115,7 @@ class BaseInlinePromptNode(BasePromptNode[StateType], Generic[StateType]):
115
115
  normalized_functions.append(function)
116
116
  elif isinstance(function, DeploymentDefinition):
117
117
  normalized_functions.append(
118
- compile_deployment_workflow_function_definition(
118
+ compile_workflow_deployment_function_definition(
119
119
  function.model_dump(),
120
120
  vellum_client=self._context.vellum_client,
121
121
  )
@@ -52,6 +52,8 @@ def primitive_to_vellum_value(value: Any) -> VellumValue:
52
52
  return StringVellumValue(value=value)
53
53
  elif isinstance(value, enum.Enum):
54
54
  return StringVellumValue(value=value.value)
55
+ elif isinstance(value, bool):
56
+ return JsonVellumValue(value=value)
55
57
  elif isinstance(value, (int, float)):
56
58
  return NumberVellumValue(value=value)
57
59
  elif isinstance(value, list) and (
@@ -32,6 +32,7 @@ class ToolCallingNode(BaseNode):
32
32
  functions: List[Tool] - The functions that can be called
33
33
  prompt_inputs: Optional[EntityInputsInterface] - Mapping of input variable names to values
34
34
  function_configs: Optional[Dict[str, Dict[str, Any]]] - Mapping of function names to their configuration
35
+ max_prompt_iterations: Optional[int] - Maximum number of prompt iterations before stopping
35
36
  """
36
37
 
37
38
  ml_model: ClassVar[str] = "gpt-4o-mini"
@@ -39,6 +40,7 @@ class ToolCallingNode(BaseNode):
39
40
  functions: ClassVar[List[Tool]] = []
40
41
  prompt_inputs: ClassVar[Optional[EntityInputsInterface]] = None
41
42
  function_configs: ClassVar[Optional[Dict[str, Dict[str, Any]]]] = None
43
+ max_prompt_iterations: ClassVar[Optional[int]] = 5
42
44
 
43
45
  class Outputs(BaseOutputs):
44
46
  """
@@ -65,6 +67,7 @@ class ToolCallingNode(BaseNode):
65
67
 
66
68
  class ToolCallingState(BaseState):
67
69
  chat_history: List[ChatMessage] = []
70
+ prompt_iterations: int = 0
68
71
 
69
72
  class ToolCallingWorkflow(BaseWorkflow[BaseInputs, ToolCallingState]):
70
73
  graph = self._graph
@@ -93,7 +96,7 @@ class ToolCallingNode(BaseNode):
93
96
 
94
97
  return node_outputs
95
98
  elif terminal_event.name == "workflow.execution.rejected":
96
- raise Exception(f"Workflow execution rejected: {terminal_event.error}")
99
+ raise NodeException(message=terminal_event.error.message, code=terminal_event.error.code)
97
100
 
98
101
  raise Exception(f"Unexpected workflow event: {terminal_event.name}")
99
102
 
@@ -103,6 +106,7 @@ class ToolCallingNode(BaseNode):
103
106
  blocks=self.blocks,
104
107
  functions=self.functions,
105
108
  prompt_inputs=self.prompt_inputs,
109
+ max_prompt_iterations=self.max_prompt_iterations,
106
110
  )
107
111
 
108
112
  self._function_nodes = {}
@@ -12,6 +12,7 @@ from vellum.workflows.nodes.experimental.tool_calling_node.utils import create_f
12
12
  from vellum.workflows.outputs.base import BaseOutputs
13
13
  from vellum.workflows.state.base import BaseState, StateMeta
14
14
  from vellum.workflows.state.context import WorkflowContext
15
+ from vellum.workflows.types.definition import DeploymentDefinition
15
16
 
16
17
 
17
18
  def first_function() -> str:
@@ -127,3 +128,15 @@ def test_tool_calling_node_inline_workflow_context():
127
128
  assert isinstance(function_response.content, StringChatMessageContent)
128
129
  data = json.loads(function_response.content.value)
129
130
  assert data["generated_files"] == {"script.py": "print('hello world')"}
131
+
132
+
133
+ def test_deployment_definition_release_tag_defaults_to_latest():
134
+ """
135
+ Test that when creating a DeploymentDefinition without specifying release_tag,
136
+ it defaults to "LATEST".
137
+ """
138
+ # WHEN we create a deployment definition without specifying release_tag
139
+ deployment_config = DeploymentDefinition(deployment="test-deployment")
140
+
141
+ # THEN the release_tag should default to "LATEST"
142
+ assert deployment_config.release_tag == "LATEST"
@@ -37,10 +37,16 @@ class FunctionNode(BaseNode):
37
37
 
38
38
 
39
39
  class ToolRouterNode(InlinePromptNode):
40
+ max_prompt_iterations: Optional[int] = 5
41
+
40
42
  class Trigger(InlinePromptNode.Trigger):
41
43
  merge_behavior = MergeBehavior.AWAIT_ATTRIBUTES
42
44
 
43
45
  def run(self) -> Iterator[BaseOutput]:
46
+ if self.state.prompt_iterations >= self.max_prompt_iterations:
47
+ max_iterations_message = f"Maximum number of prompt iterations `{self.max_prompt_iterations}` reached."
48
+ raise NodeException(message=max_iterations_message, code=WorkflowErrorCode.NODE_EXECUTION)
49
+
44
50
  self.prompt_inputs = {**self.prompt_inputs, "chat_history": self.state.chat_history} # type: ignore
45
51
  generator = super().run()
46
52
  for output in generator:
@@ -50,6 +56,8 @@ class ToolRouterNode(InlinePromptNode):
50
56
  if values[0].type == "STRING":
51
57
  self.state.chat_history.append(ChatMessage(role="ASSISTANT", text=values[0].value))
52
58
  elif values[0].type == "FUNCTION_CALL":
59
+ self.state.prompt_iterations += 1
60
+
53
61
  function_call = values[0].value
54
62
  if function_call is not None:
55
63
  self.state.chat_history.append(
@@ -72,6 +80,7 @@ def create_tool_router_node(
72
80
  blocks: List[PromptBlock],
73
81
  functions: List[Tool],
74
82
  prompt_inputs: Optional[EntityInputsInterface],
83
+ max_prompt_iterations: Optional[int] = None,
75
84
  ) -> Type[ToolRouterNode]:
76
85
  if functions and len(functions) > 0:
77
86
  # If we have functions, create dynamic ports for each function
@@ -120,6 +129,7 @@ def create_tool_router_node(
120
129
  "blocks": blocks,
121
130
  "functions": functions,
122
131
  "prompt_inputs": prompt_inputs,
132
+ "max_prompt_iterations": max_prompt_iterations,
123
133
  "Ports": Ports,
124
134
  "__module__": __name__,
125
135
  },
@@ -149,7 +159,7 @@ def create_function_node(
149
159
  deployment = function.deployment
150
160
  release_tag = function.release_tag
151
161
 
152
- def execute_deployment_workflow_function(self) -> BaseNode.Outputs:
162
+ def execute_workflow_deployment_function(self) -> BaseNode.Outputs:
153
163
  function_call_output = self.state.meta.node_outputs.get(tool_router_node.Outputs.results)
154
164
  if function_call_output and len(function_call_output) > 0:
155
165
  function_call = function_call_output[0]
@@ -187,10 +197,10 @@ def create_function_node(
187
197
  return self.Outputs()
188
198
 
189
199
  node = type(
190
- f"DeploymentWorkflowNode_{deployment}",
200
+ f"WorkflowDeploymentNode_{deployment}",
191
201
  (FunctionNode,),
192
202
  {
193
- "run": execute_deployment_workflow_function,
203
+ "run": execute_workflow_deployment_function,
194
204
  "__module__": __name__,
195
205
  },
196
206
  )
@@ -219,7 +229,7 @@ def create_function_node(
219
229
  elif terminal_event.name == "workflow.execution.fulfilled":
220
230
  result = terminal_event.outputs
221
231
  elif terminal_event.name == "workflow.execution.rejected":
222
- raise Exception(f"Workflow execution rejected: {terminal_event.error}")
232
+ raise NodeException(message=terminal_event.error.message, code=terminal_event.error.code)
223
233
 
224
234
  self.state.chat_history.append(
225
235
  ChatMessage(
@@ -1,3 +1,4 @@
1
+ import inspect
1
2
  from typing import TYPE_CHECKING, Any, Generic, Iterator, Set, Tuple, Type, TypeVar, Union, cast
2
3
  from typing_extensions import dataclass_transform
3
4
 
@@ -9,6 +10,7 @@ from vellum.workflows.descriptors.base import BaseDescriptor
9
10
  from vellum.workflows.errors.types import WorkflowErrorCode
10
11
  from vellum.workflows.exceptions import NodeException
11
12
  from vellum.workflows.references.output import OutputReference
13
+ from vellum.workflows.types.generics import is_node_instance
12
14
  from vellum.workflows.types.utils import get_class_attr_names, infer_types
13
15
 
14
16
  if TYPE_CHECKING:
@@ -198,8 +200,30 @@ class BaseOutputs(metaclass=_BaseOutputsMeta):
198
200
  for name, value in kwargs.items():
199
201
  setattr(self, name, value)
200
202
 
201
- if hasattr(self, "_outputs_post_init") and callable(self._outputs_post_init):
202
- self._outputs_post_init(**kwargs)
203
+ # If climb up the to the caller's frame, and if it's a BaseNode instance, it should
204
+ # have a state attribute that we can use to resolve the output descriptors.
205
+ frame = inspect.currentframe()
206
+ if not frame:
207
+ return
208
+
209
+ caller_frame = frame.f_back
210
+ if not caller_frame or "self" not in caller_frame.f_locals:
211
+ return
212
+
213
+ caller_self = caller_frame.f_locals["self"]
214
+ if not is_node_instance(caller_self):
215
+ return
216
+
217
+ for node_output_descriptor in self.__class__:
218
+ if node_output_descriptor.name in kwargs:
219
+ continue
220
+
221
+ if isinstance(node_output_descriptor.instance, BaseDescriptor):
222
+ setattr(
223
+ self,
224
+ node_output_descriptor.name,
225
+ node_output_descriptor.instance.resolve(caller_self.state),
226
+ )
203
227
 
204
228
  def __eq__(self, other: object) -> bool:
205
229
  if not isinstance(other, (dict, BaseOutputs)):
@@ -68,6 +68,8 @@ class DefaultStateEncoder(JSONEncoder):
68
68
 
69
69
  return {
70
70
  "type": "CODE_EXECUTION",
71
+ "name": function_definition.name,
72
+ "description": function_definition.description,
71
73
  "definition": function_definition,
72
74
  "src": source_code,
73
75
  }
@@ -74,4 +74,4 @@ VellumCodeResourceDefinition = Annotated[
74
74
 
75
75
  class DeploymentDefinition(UniversalBaseModel):
76
76
  deployment: str
77
- release_tag: Optional[str] = "LATEST"
77
+ release_tag: str = "LATEST"
@@ -42,6 +42,11 @@ def is_node_class(obj: Any) -> TypeGuard[Type["BaseNode"]]:
42
42
  return isinstance(obj, type) and issubclass(obj, base_node_class)
43
43
 
44
44
 
45
+ def is_node_instance(obj: Any) -> TypeGuard["BaseNode"]:
46
+ base_node_class = _import_node_class()
47
+ return isinstance(obj, base_node_class)
48
+
49
+
45
50
  def is_workflow_class(obj: Any) -> TypeGuard[Type["BaseWorkflow"]]:
46
51
  base_workflow_class = import_workflow_class()
47
52
  return isinstance(obj, type) and issubclass(obj, base_workflow_class)
@@ -88,7 +88,7 @@ def _compile_default_value(default: Any) -> Any:
88
88
  return default
89
89
 
90
90
 
91
- def _compile_deployment_workflow_input(input_var: Any) -> dict[str, Any]:
91
+ def _compile_workflow_deployment_input(input_var: Any) -> dict[str, Any]:
92
92
  """
93
93
  Converts a deployment workflow input variable to a JSON schema type definition.
94
94
  """
@@ -168,7 +168,7 @@ def compile_inline_workflow_function_definition(workflow_class: Type["BaseWorkfl
168
168
  )
169
169
 
170
170
 
171
- def compile_deployment_workflow_function_definition(
171
+ def compile_workflow_deployment_function_definition(
172
172
  deployment_config: Dict[str, str],
173
173
  vellum_client: Vellum,
174
174
  ) -> FunctionDefinition:
@@ -193,7 +193,7 @@ def compile_deployment_workflow_function_definition(
193
193
  required = []
194
194
 
195
195
  for input_var in input_variables:
196
- properties[input_var.key] = _compile_deployment_workflow_input(input_var)
196
+ properties[input_var.key] = _compile_workflow_deployment_input(input_var)
197
197
 
198
198
  if input_var.required and input_var.default is None:
199
199
  required.append(input_var.key)
@@ -12,9 +12,9 @@ from vellum.workflows.inputs.base import BaseInputs
12
12
  from vellum.workflows.nodes.bases.base import BaseNode
13
13
  from vellum.workflows.state.base import BaseState
14
14
  from vellum.workflows.utils.functions import (
15
- compile_deployment_workflow_function_definition,
16
15
  compile_function_definition,
17
16
  compile_inline_workflow_function_definition,
17
+ compile_workflow_deployment_function_definition,
18
18
  )
19
19
 
20
20
 
@@ -440,7 +440,7 @@ def test_compile_inline_workflow_function_definition__optionals():
440
440
  )
441
441
 
442
442
 
443
- def test_compile_deployment_workflow_function_definition__just_name():
443
+ def test_compile_workflow_deployment_function_definition__just_name():
444
444
  # GIVEN a mock Vellum client and deployment
445
445
  mock_client = Mock()
446
446
  mock_release = Mock()
@@ -451,7 +451,7 @@ def test_compile_deployment_workflow_function_definition__just_name():
451
451
  deployment_config = {"deployment": "my_deployment", "release_tag": "latest"}
452
452
 
453
453
  # WHEN compiling the deployment workflow function
454
- compiled_function = compile_deployment_workflow_function_definition(deployment_config, mock_client)
454
+ compiled_function = compile_workflow_deployment_function_definition(deployment_config, mock_client)
455
455
 
456
456
  # THEN it should return the compiled function definition (same structure as function test)
457
457
  assert compiled_function == FunctionDefinition(
@@ -461,7 +461,7 @@ def test_compile_deployment_workflow_function_definition__just_name():
461
461
  )
462
462
 
463
463
 
464
- def test_compile_deployment_workflow_function_definition__all_args():
464
+ def test_compile_workflow_deployment_function_definition__all_args():
465
465
  # GIVEN a mock Vellum client and deployment
466
466
  mock_client = Mock()
467
467
  mock_release = Mock()
@@ -497,7 +497,7 @@ def test_compile_deployment_workflow_function_definition__all_args():
497
497
  deployment_config = {"deployment": "my_deployment", "release_tag": "latest"}
498
498
 
499
499
  # WHEN compiling the deployment workflow function
500
- compiled_function = compile_deployment_workflow_function_definition(deployment_config, mock_client)
500
+ compiled_function = compile_workflow_deployment_function_definition(deployment_config, mock_client)
501
501
 
502
502
  # THEN it should return the compiled function definition
503
503
  assert compiled_function == FunctionDefinition(
@@ -524,7 +524,7 @@ def test_compile_deployment_workflow_function_definition__all_args():
524
524
  )
525
525
 
526
526
 
527
- def test_compile_deployment_workflow_function_definition__defaults():
527
+ def test_compile_workflow_deployment_function_definition__defaults():
528
528
  # GIVEN a mock Vellum client and deployment
529
529
  mock_client = Mock()
530
530
  mock_release = Mock()
@@ -565,7 +565,7 @@ def test_compile_deployment_workflow_function_definition__defaults():
565
565
  deployment_config = {"deployment": "my_deployment", "release_tag": "latest"}
566
566
 
567
567
  # WHEN compiling the deployment workflow function
568
- compiled_function = compile_deployment_workflow_function_definition(deployment_config, mock_client)
568
+ compiled_function = compile_workflow_deployment_function_definition(deployment_config, mock_client)
569
569
 
570
570
  # THEN it should return the compiled function definition with proper default handling
571
571
  assert compiled_function == FunctionDefinition(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-ai
3
- Version: 0.14.73
3
+ Version: 0.14.75
4
4
  Summary:
5
5
  License: MIT
6
6
  Requires-Python: >=3.9,<4.0