vellum-ai 1.4.1__py3-none-any.whl → 1.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/__init__.py +14 -0
- vellum/client/__init__.py +3 -0
- vellum/client/core/client_wrapper.py +2 -2
- vellum/client/reference.md +160 -0
- vellum/client/resources/__init__.py +2 -0
- vellum/client/resources/integrations/__init__.py +4 -0
- vellum/client/resources/integrations/client.py +260 -0
- vellum/client/resources/integrations/raw_client.py +267 -0
- vellum/client/types/__init__.py +12 -0
- vellum/client/types/components_schemas_composio_execute_tool_request.py +5 -0
- vellum/client/types/components_schemas_composio_execute_tool_response.py +5 -0
- vellum/client/types/components_schemas_composio_tool_definition.py +5 -0
- vellum/client/types/composio_execute_tool_request.py +24 -0
- vellum/client/types/composio_execute_tool_response.py +24 -0
- vellum/client/types/composio_tool_definition.py +26 -0
- vellum/client/types/vellum_error_code_enum.py +2 -0
- vellum/client/types/vellum_sdk_error.py +1 -0
- vellum/client/types/workflow_event_error.py +1 -0
- vellum/resources/integrations/__init__.py +3 -0
- vellum/resources/integrations/client.py +3 -0
- vellum/resources/integrations/raw_client.py +3 -0
- vellum/types/components_schemas_composio_execute_tool_request.py +3 -0
- vellum/types/components_schemas_composio_execute_tool_response.py +3 -0
- vellum/types/components_schemas_composio_tool_definition.py +3 -0
- vellum/types/composio_execute_tool_request.py +3 -0
- vellum/types/composio_execute_tool_response.py +3 -0
- vellum/types/composio_tool_definition.py +3 -0
- vellum/workflows/constants.py +4 -0
- vellum/workflows/emitters/base.py +8 -0
- vellum/workflows/emitters/vellum_emitter.py +10 -0
- vellum/workflows/inputs/dataset_row.py +2 -2
- vellum/workflows/nodes/bases/base.py +12 -1
- vellum/workflows/nodes/displayable/bases/base_prompt_node/node.py +6 -0
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +16 -2
- vellum/workflows/nodes/displayable/final_output_node/node.py +59 -0
- vellum/workflows/nodes/displayable/final_output_node/tests/test_node.py +40 -1
- vellum/workflows/nodes/displayable/tool_calling_node/node.py +3 -0
- vellum/workflows/nodes/displayable/tool_calling_node/tests/test_utils.py +64 -0
- vellum/workflows/nodes/displayable/tool_calling_node/utils.py +30 -41
- vellum/workflows/runner/runner.py +132 -110
- vellum/workflows/tests/test_dataset_row.py +29 -0
- vellum/workflows/types/core.py +13 -2
- vellum/workflows/types/definition.py +13 -1
- vellum/workflows/utils/functions.py +69 -27
- vellum/workflows/utils/tests/test_functions.py +50 -6
- vellum/workflows/vellum_client.py +7 -1
- vellum/workflows/workflows/base.py +26 -4
- vellum/workflows/workflows/tests/test_base_workflow.py +54 -0
- {vellum_ai-1.4.1.dist-info → vellum_ai-1.5.0.dist-info}/METADATA +1 -1
- {vellum_ai-1.4.1.dist-info → vellum_ai-1.5.0.dist-info}/RECORD +63 -42
- vellum_ai-1.5.0.dist-info/entry_points.txt +4 -0
- vellum_cli/tests/test_pull.py +1 -0
- vellum_cli/tests/test_push.py +2 -0
- vellum_ee/assets/node-definitions.json +483 -0
- vellum_ee/scripts/generate_node_definitions.py +89 -0
- vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +1 -3
- vellum_ee/workflows/display/nodes/vellum/tests/test_final_output_node.py +78 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_inline_workflow_serialization.py +5 -0
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_serialization.py +5 -0
- vellum_ee/workflows/display/types.py +3 -0
- vellum_ee/workflows/display/workflows/base_workflow_display.py +6 -0
- vellum_ai-1.4.1.dist-info/entry_points.txt +0 -3
- {vellum_ai-1.4.1.dist-info → vellum_ai-1.5.0.dist-info}/LICENSE +0 -0
- {vellum_ai-1.4.1.dist-info → vellum_ai-1.5.0.dist-info}/WHEEL +0 -0
@@ -5,6 +5,7 @@ from vellum.workflows.nodes.bases import BaseNode
|
|
5
5
|
from vellum.workflows.nodes.bases.base import BaseNodeMeta
|
6
6
|
from vellum.workflows.nodes.utils import cast_to_output_type
|
7
7
|
from vellum.workflows.ports import NodePorts
|
8
|
+
from vellum.workflows.references.output import OutputReference
|
8
9
|
from vellum.workflows.types import MergeBehavior
|
9
10
|
from vellum.workflows.types.generics import StateType
|
10
11
|
from vellum.workflows.types.utils import get_original_base
|
@@ -27,6 +28,7 @@ class _FinalOutputNodeMeta(BaseNodeMeta):
|
|
27
28
|
**annotations,
|
28
29
|
"value": parent.get_output_type(),
|
29
30
|
}
|
31
|
+
|
30
32
|
return parent
|
31
33
|
|
32
34
|
def get_output_type(cls) -> Type:
|
@@ -38,6 +40,63 @@ class _FinalOutputNodeMeta(BaseNodeMeta):
|
|
38
40
|
else:
|
39
41
|
return all_args[1]
|
40
42
|
|
43
|
+
def __validate__(cls) -> None:
|
44
|
+
cls._validate_output_type_consistency(cls)
|
45
|
+
|
46
|
+
@classmethod
|
47
|
+
def _validate_output_type_consistency(mcs, cls: Type) -> None:
|
48
|
+
"""
|
49
|
+
Validates that the declared output type of FinalOutputNode matches
|
50
|
+
the type of the descriptor assigned to the 'value' attribute in its Outputs class.
|
51
|
+
|
52
|
+
Raises ValueError if there's a type mismatch.
|
53
|
+
"""
|
54
|
+
if not hasattr(cls, "Outputs"):
|
55
|
+
return
|
56
|
+
|
57
|
+
outputs_class = cls.Outputs
|
58
|
+
if not hasattr(outputs_class, "value"):
|
59
|
+
return
|
60
|
+
|
61
|
+
declared_output_type = cls.get_output_type()
|
62
|
+
value_descriptor = None
|
63
|
+
|
64
|
+
if "value" in outputs_class.__dict__:
|
65
|
+
value_descriptor = outputs_class.__dict__["value"]
|
66
|
+
else:
|
67
|
+
value_descriptor = getattr(outputs_class, "value")
|
68
|
+
|
69
|
+
if isinstance(value_descriptor, OutputReference):
|
70
|
+
descriptor_types = value_descriptor.types
|
71
|
+
|
72
|
+
type_mismatch = True
|
73
|
+
for descriptor_type in descriptor_types:
|
74
|
+
if descriptor_type == declared_output_type:
|
75
|
+
type_mismatch = False
|
76
|
+
break
|
77
|
+
try:
|
78
|
+
if issubclass(descriptor_type, declared_output_type) or issubclass(
|
79
|
+
declared_output_type, descriptor_type
|
80
|
+
):
|
81
|
+
type_mismatch = False
|
82
|
+
break
|
83
|
+
except TypeError:
|
84
|
+
# Handle cases where types aren't classes (e.g., Union)
|
85
|
+
if str(descriptor_type) == str(declared_output_type):
|
86
|
+
type_mismatch = False
|
87
|
+
break
|
88
|
+
|
89
|
+
if type_mismatch:
|
90
|
+
declared_type_name = getattr(declared_output_type, "__name__", str(declared_output_type))
|
91
|
+
descriptor_type_names = [getattr(t, "__name__", str(t)) for t in descriptor_types]
|
92
|
+
|
93
|
+
raise ValueError(
|
94
|
+
f"Output type mismatch in {cls.__name__}: "
|
95
|
+
f"FinalOutputNode is declared with output type '{declared_type_name}' "
|
96
|
+
f"but the 'value' descriptor has type(s) {descriptor_type_names}. "
|
97
|
+
f"The output descriptor type must match the declared FinalOutputNode output type."
|
98
|
+
)
|
99
|
+
|
41
100
|
|
42
101
|
class FinalOutputNode(BaseNode[StateType], Generic[StateType, _OutputType], metaclass=_FinalOutputNodeMeta):
|
43
102
|
"""
|
@@ -2,10 +2,11 @@ import pytest
|
|
2
2
|
|
3
3
|
from vellum.workflows.exceptions import NodeException
|
4
4
|
from vellum.workflows.nodes.displayable.final_output_node import FinalOutputNode
|
5
|
+
from vellum.workflows.nodes.displayable.inline_prompt_node import InlinePromptNode
|
5
6
|
from vellum.workflows.state.base import BaseState
|
6
7
|
|
7
8
|
|
8
|
-
def
|
9
|
+
def test_final_output_node__mismatched_output_type_should_raise_exception_when_ran():
|
9
10
|
# GIVEN a FinalOutputNode with a mismatched output type
|
10
11
|
class StringOutputNode(FinalOutputNode[BaseState, str]):
|
11
12
|
class Outputs(FinalOutputNode.Outputs):
|
@@ -18,3 +19,41 @@ def test_final_output_node__mismatched_output_type():
|
|
18
19
|
|
19
20
|
# THEN an error is raised
|
20
21
|
assert str(exc_info.value) == "Expected an output of type 'str', but received 'dict'"
|
22
|
+
|
23
|
+
|
24
|
+
def test_final_output_node__mismatched_output_type_should_raise_exception():
|
25
|
+
# GIVEN a FinalOutputNode declared with list output type but has a string value type
|
26
|
+
class Output(FinalOutputNode[BaseState, list]):
|
27
|
+
"""Output the extracted invoice line items as an array of objects."""
|
28
|
+
|
29
|
+
class Outputs(FinalOutputNode.Outputs):
|
30
|
+
value = InlinePromptNode.Outputs.text
|
31
|
+
|
32
|
+
# WHEN attempting to validate the node class
|
33
|
+
# THEN a ValueError should be raised during validation
|
34
|
+
with pytest.raises(ValueError) as exc_info:
|
35
|
+
Output.__validate__()
|
36
|
+
|
37
|
+
# AND the error message should indicate the type mismatch
|
38
|
+
assert (
|
39
|
+
str(exc_info.value)
|
40
|
+
== "Output type mismatch in Output: FinalOutputNode is declared with output type 'list' but "
|
41
|
+
"the 'value' descriptor has type(s) ['str']. The output descriptor type must match the "
|
42
|
+
"declared FinalOutputNode output type."
|
43
|
+
)
|
44
|
+
|
45
|
+
|
46
|
+
def test_final_output_node__matching_output_type_should_pass_validation():
|
47
|
+
# GIVEN a FinalOutputNode declared with correct matching types
|
48
|
+
class CorrectOutput(FinalOutputNode[BaseState, str]):
|
49
|
+
"""Output with correct type matching."""
|
50
|
+
|
51
|
+
class Outputs(FinalOutputNode.Outputs):
|
52
|
+
value = InlinePromptNode.Outputs.text
|
53
|
+
|
54
|
+
# WHEN attempting to validate the node class
|
55
|
+
# THEN validation should pass without raising an exception
|
56
|
+
try:
|
57
|
+
CorrectOutput.__validate__()
|
58
|
+
except ValueError:
|
59
|
+
pytest.fail("Validation should not raise an exception for correct type matching")
|
@@ -2,6 +2,7 @@ from typing import Any, ClassVar, Dict, Generic, Iterator, List, Optional, Set,
|
|
2
2
|
|
3
3
|
from vellum import ChatMessage, PromptBlock
|
4
4
|
from vellum.client.types.prompt_parameters import PromptParameters
|
5
|
+
from vellum.client.types.prompt_settings import PromptSettings
|
5
6
|
from vellum.prompts.constants import DEFAULT_PROMPT_PARAMETERS
|
6
7
|
from vellum.workflows.context import execution_context, get_parent_context
|
7
8
|
from vellum.workflows.errors.types import WorkflowErrorCode
|
@@ -47,6 +48,7 @@ class ToolCallingNode(BaseNode[StateType], Generic[StateType]):
|
|
47
48
|
prompt_inputs: ClassVar[Optional[EntityInputsInterface]] = None
|
48
49
|
parameters: PromptParameters = DEFAULT_PROMPT_PARAMETERS
|
49
50
|
max_prompt_iterations: ClassVar[Optional[int]] = 5
|
51
|
+
settings: ClassVar[Optional[Union[PromptSettings, Dict[str, Any]]]] = None
|
50
52
|
|
51
53
|
class Outputs(BaseOutputs):
|
52
54
|
"""
|
@@ -150,6 +152,7 @@ class ToolCallingNode(BaseNode[StateType], Generic[StateType]):
|
|
150
152
|
max_prompt_iterations=self.max_prompt_iterations,
|
151
153
|
process_parameters_method=process_parameters_method,
|
152
154
|
process_blocks_method=process_blocks_method,
|
155
|
+
settings=self.settings,
|
153
156
|
)
|
154
157
|
|
155
158
|
# Create the router node (handles routing logic only)
|
@@ -5,6 +5,7 @@ from vellum.client.types.chat_message_prompt_block import ChatMessagePromptBlock
|
|
5
5
|
from vellum.client.types.fulfilled_execute_prompt_event import FulfilledExecutePromptEvent
|
6
6
|
from vellum.client.types.initiated_execute_prompt_event import InitiatedExecutePromptEvent
|
7
7
|
from vellum.client.types.plain_text_prompt_block import PlainTextPromptBlock
|
8
|
+
from vellum.client.types.prompt_settings import PromptSettings
|
8
9
|
from vellum.client.types.rich_text_prompt_block import RichTextPromptBlock
|
9
10
|
from vellum.client.types.string_vellum_value import StringVellumValue
|
10
11
|
from vellum.client.types.variable_prompt_block import VariablePromptBlock
|
@@ -250,3 +251,66 @@ def test_get_mcp_tool_name_snake_case():
|
|
250
251
|
|
251
252
|
result = get_mcp_tool_name(mcp_tool)
|
252
253
|
assert result == "github_server__create_repository"
|
254
|
+
|
255
|
+
|
256
|
+
def test_create_tool_prompt_node_settings_dict_stream_disabled(vellum_adhoc_prompt_client):
|
257
|
+
# GIVEN settings provided as dict with stream disabled
|
258
|
+
tool_prompt_node = create_tool_prompt_node(
|
259
|
+
ml_model="gpt-4o-mini",
|
260
|
+
blocks=[],
|
261
|
+
functions=[],
|
262
|
+
prompt_inputs=None,
|
263
|
+
parameters=DEFAULT_PROMPT_PARAMETERS,
|
264
|
+
max_prompt_iterations=1,
|
265
|
+
settings={"stream_enabled": False},
|
266
|
+
)
|
267
|
+
|
268
|
+
# AND the API mocks
|
269
|
+
def generate_non_stream_response(*args, **kwargs):
|
270
|
+
return FulfilledExecutePromptEvent(execution_id=str(uuid4()), outputs=[StringVellumValue(value="ok")])
|
271
|
+
|
272
|
+
vellum_adhoc_prompt_client.adhoc_execute_prompt.side_effect = generate_non_stream_response
|
273
|
+
|
274
|
+
# WHEN we run the node
|
275
|
+
node_instance = tool_prompt_node()
|
276
|
+
list(node_instance.run())
|
277
|
+
|
278
|
+
# THEN the node should have called the API correctly
|
279
|
+
assert node_instance.settings is not None
|
280
|
+
assert node_instance.settings.stream_enabled is False
|
281
|
+
assert vellum_adhoc_prompt_client.adhoc_execute_prompt.call_count == 1
|
282
|
+
assert vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.call_count == 0
|
283
|
+
|
284
|
+
|
285
|
+
def test_create_tool_prompt_node_settings_model_stream_enabled(vellum_adhoc_prompt_client):
|
286
|
+
# GIVEN settings provided as PromptSettings with stream enabled
|
287
|
+
tool_prompt_node = create_tool_prompt_node(
|
288
|
+
ml_model="gpt-4o-mini",
|
289
|
+
blocks=[],
|
290
|
+
functions=[],
|
291
|
+
prompt_inputs=None,
|
292
|
+
parameters=DEFAULT_PROMPT_PARAMETERS,
|
293
|
+
max_prompt_iterations=1,
|
294
|
+
settings=PromptSettings(stream_enabled=True),
|
295
|
+
)
|
296
|
+
|
297
|
+
# AND the API mocks
|
298
|
+
def generate_stream_events(*args, **kwargs):
|
299
|
+
execution_id = str(uuid4())
|
300
|
+
events = [
|
301
|
+
InitiatedExecutePromptEvent(execution_id=execution_id),
|
302
|
+
FulfilledExecutePromptEvent(execution_id=execution_id, outputs=[StringVellumValue(value="ok")]),
|
303
|
+
]
|
304
|
+
yield from events
|
305
|
+
|
306
|
+
vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.side_effect = generate_stream_events
|
307
|
+
|
308
|
+
# WHEN we run the node
|
309
|
+
node_instance = tool_prompt_node()
|
310
|
+
list(node_instance.run())
|
311
|
+
|
312
|
+
# THEN the node should have called the API correctly
|
313
|
+
assert node_instance.settings is not None
|
314
|
+
assert node_instance.settings.stream_enabled is True
|
315
|
+
assert vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.call_count == 1
|
316
|
+
assert vellum_adhoc_prompt_client.adhoc_execute_prompt.call_count == 0
|
@@ -9,9 +9,9 @@ from vellum.client.types.array_chat_message_content import ArrayChatMessageConte
|
|
9
9
|
from vellum.client.types.array_chat_message_content_item import ArrayChatMessageContentItem
|
10
10
|
from vellum.client.types.function_call_chat_message_content import FunctionCallChatMessageContent
|
11
11
|
from vellum.client.types.function_call_chat_message_content_value import FunctionCallChatMessageContentValue
|
12
|
-
from vellum.client.types.function_definition import FunctionDefinition
|
13
12
|
from vellum.client.types.prompt_output import PromptOutput
|
14
13
|
from vellum.client.types.prompt_parameters import PromptParameters
|
14
|
+
from vellum.client.types.prompt_settings import PromptSettings
|
15
15
|
from vellum.client.types.string_chat_message_content import StringChatMessageContent
|
16
16
|
from vellum.client.types.variable_prompt_block import VariablePromptBlock
|
17
17
|
from vellum.workflows.descriptors.base import BaseDescriptor
|
@@ -31,7 +31,13 @@ from vellum.workflows.ports.port import Port
|
|
31
31
|
from vellum.workflows.state import BaseState
|
32
32
|
from vellum.workflows.state.encoder import DefaultStateEncoder
|
33
33
|
from vellum.workflows.types.core import EntityInputsInterface, MergeBehavior, Tool, ToolBase
|
34
|
-
from vellum.workflows.types.definition import
|
34
|
+
from vellum.workflows.types.definition import (
|
35
|
+
ComposioToolDefinition,
|
36
|
+
DeploymentDefinition,
|
37
|
+
MCPServer,
|
38
|
+
MCPToolDefinition,
|
39
|
+
VellumIntegrationToolDefinition,
|
40
|
+
)
|
35
41
|
from vellum.workflows.types.generics import is_workflow_class
|
36
42
|
from vellum.workflows.utils.functions import compile_mcp_tool_definition, get_mcp_tool_name
|
37
43
|
|
@@ -274,36 +280,6 @@ class ElseNode(BaseNode[ToolCallingState]):
|
|
274
280
|
return self.Outputs()
|
275
281
|
|
276
282
|
|
277
|
-
def _hydrate_composio_tool_definition(tool_def: ComposioToolDefinition) -> FunctionDefinition:
|
278
|
-
"""Hydrate a ComposioToolDefinition with detailed information from the Composio API.
|
279
|
-
|
280
|
-
Args:
|
281
|
-
tool_def: The basic ComposioToolDefinition to enhance
|
282
|
-
|
283
|
-
Returns:
|
284
|
-
FunctionDefinition with detailed parameters and description
|
285
|
-
"""
|
286
|
-
try:
|
287
|
-
composio_service = ComposioService()
|
288
|
-
tool_details = composio_service.get_tool_by_slug(tool_def.action)
|
289
|
-
|
290
|
-
# Create a FunctionDefinition directly with proper field extraction
|
291
|
-
return FunctionDefinition(
|
292
|
-
name=tool_def.name,
|
293
|
-
description=tool_details.get("description", tool_def.description),
|
294
|
-
parameters=tool_details.get("input_parameters", {}),
|
295
|
-
)
|
296
|
-
|
297
|
-
except Exception as e:
|
298
|
-
# If hydration fails (including no API key), log and return basic function definition
|
299
|
-
logger.warning(f"Failed to enhance Composio tool '{tool_def.action}': {e}")
|
300
|
-
return FunctionDefinition(
|
301
|
-
name=tool_def.name,
|
302
|
-
description=tool_def.description,
|
303
|
-
parameters={},
|
304
|
-
)
|
305
|
-
|
306
|
-
|
307
283
|
def create_tool_prompt_node(
|
308
284
|
ml_model: str,
|
309
285
|
blocks: List[Union[PromptBlock, Dict[str, Any]]],
|
@@ -313,17 +289,10 @@ def create_tool_prompt_node(
|
|
313
289
|
max_prompt_iterations: Optional[int] = None,
|
314
290
|
process_parameters_method: Optional[Callable] = None,
|
315
291
|
process_blocks_method: Optional[Callable] = None,
|
292
|
+
settings: Optional[Union[PromptSettings, Dict[str, Any]]] = None,
|
316
293
|
) -> Type[ToolPromptNode]:
|
317
294
|
if functions and len(functions) > 0:
|
318
|
-
prompt_functions: List[
|
319
|
-
|
320
|
-
for function in functions:
|
321
|
-
if isinstance(function, ComposioToolDefinition):
|
322
|
-
# Get Composio tool details and hydrate the function definition
|
323
|
-
enhanced_function = _hydrate_composio_tool_definition(function)
|
324
|
-
prompt_functions.append(enhanced_function)
|
325
|
-
else:
|
326
|
-
prompt_functions.append(function)
|
295
|
+
prompt_functions: List[Tool] = functions
|
327
296
|
else:
|
328
297
|
prompt_functions = []
|
329
298
|
|
@@ -359,6 +328,13 @@ def create_tool_prompt_node(
|
|
359
328
|
),
|
360
329
|
}
|
361
330
|
|
331
|
+
# Normalize settings to PromptSettings if provided as a dict
|
332
|
+
normalized_settings: Optional[PromptSettings]
|
333
|
+
if isinstance(settings, dict):
|
334
|
+
normalized_settings = PromptSettings.model_validate(settings)
|
335
|
+
else:
|
336
|
+
normalized_settings = settings
|
337
|
+
|
362
338
|
node = cast(
|
363
339
|
Type[ToolPromptNode],
|
364
340
|
type(
|
@@ -371,6 +347,7 @@ def create_tool_prompt_node(
|
|
371
347
|
"prompt_inputs": node_prompt_inputs,
|
372
348
|
"parameters": parameters,
|
373
349
|
"max_prompt_iterations": max_prompt_iterations,
|
350
|
+
"settings": normalized_settings,
|
374
351
|
**({"process_parameters": process_parameters_method} if process_parameters_method is not None else {}),
|
375
352
|
**({"process_blocks": process_blocks_method} if process_blocks_method is not None else {}),
|
376
353
|
"__module__": __name__,
|
@@ -409,6 +386,10 @@ def create_router_node(
|
|
409
386
|
function_name = get_function_name(function)
|
410
387
|
port = create_port_condition(function_name)
|
411
388
|
setattr(Ports, function_name, port)
|
389
|
+
elif isinstance(function, VellumIntegrationToolDefinition):
|
390
|
+
function_name = get_function_name(function)
|
391
|
+
port = create_port_condition(function_name)
|
392
|
+
setattr(Ports, function_name, port)
|
412
393
|
elif isinstance(function, MCPServer):
|
413
394
|
tool_functions: List[MCPToolDefinition] = compile_mcp_tool_definition(function)
|
414
395
|
for tool_function in tool_functions:
|
@@ -483,6 +464,12 @@ def create_function_node(
|
|
483
464
|
},
|
484
465
|
)
|
485
466
|
return node
|
467
|
+
elif isinstance(function, VellumIntegrationToolDefinition):
|
468
|
+
# TODO: Implement VellumIntegrationNode
|
469
|
+
raise NotImplementedError(
|
470
|
+
"VellumIntegrationToolDefinition support coming soon. "
|
471
|
+
"This will be implemented when the VellumIntegrationService is created."
|
472
|
+
)
|
486
473
|
elif is_workflow_class(function):
|
487
474
|
function.is_dynamic = True
|
488
475
|
node = type(
|
@@ -572,5 +559,7 @@ def get_function_name(function: ToolBase) -> str:
|
|
572
559
|
elif isinstance(function, ComposioToolDefinition):
|
573
560
|
# model post init sets the name to the action if it's not set
|
574
561
|
return function.name # type: ignore[return-value]
|
562
|
+
elif isinstance(function, VellumIntegrationToolDefinition):
|
563
|
+
return function.name
|
575
564
|
else:
|
576
565
|
return snake_case(function.__name__)
|