vellum-ai 1.0.3__py3-none-any.whl → 1.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/__init__.py +2 -0
- vellum/client/core/client_wrapper.py +2 -2
- vellum/client/types/__init__.py +2 -0
- vellum/client/types/name_enum.py +7 -0
- vellum/client/types/organization_limit_config.py +1 -0
- vellum/client/types/quota.py +2 -1
- vellum/prompts/blocks/compilation.py +5 -1
- vellum/prompts/blocks/tests/test_compilation.py +64 -0
- vellum/types/name_enum.py +3 -0
- vellum/workflows/descriptors/base.py +12 -0
- vellum/workflows/expressions/concat.py +32 -0
- vellum/workflows/expressions/tests/test_concat.py +53 -0
- vellum/workflows/nodes/displayable/inline_prompt_node/node.py +1 -2
- vellum/workflows/nodes/displayable/prompt_deployment_node/node.py +1 -2
- vellum/workflows/nodes/displayable/tool_calling_node/composio_service.py +83 -0
- vellum/workflows/nodes/displayable/tool_calling_node/tests/test_composio_service.py +122 -0
- vellum/workflows/nodes/displayable/tool_calling_node/tests/test_utils.py +21 -1
- vellum/workflows/nodes/displayable/tool_calling_node/utils.py +133 -57
- vellum/workflows/types/core.py +2 -2
- vellum/workflows/types/definition.py +20 -1
- vellum/workflows/types/tests/test_definition.py +14 -1
- vellum/workflows/utils/functions.py +13 -1
- vellum/workflows/utils/tests/test_functions.py +32 -1
- {vellum_ai-1.0.3.dist-info → vellum_ai-1.0.5.dist-info}/METADATA +3 -1
- {vellum_ai-1.0.3.dist-info → vellum_ai-1.0.5.dist-info}/RECORD +29 -22
- vellum_ee/workflows/display/tests/workflow_serialization/test_basic_tool_calling_node_composio_serialization.py +86 -0
- {vellum_ai-1.0.3.dist-info → vellum_ai-1.0.5.dist-info}/LICENSE +0 -0
- {vellum_ai-1.0.3.dist-info → vellum_ai-1.0.5.dist-info}/WHEEL +0 -0
- {vellum_ai-1.0.3.dist-info → vellum_ai-1.0.5.dist-info}/entry_points.txt +0 -0
@@ -1,4 +1,5 @@
|
|
1
1
|
import json
|
2
|
+
import os
|
2
3
|
from typing import Any, Callable, Iterator, List, Optional, Type, cast
|
3
4
|
|
4
5
|
from pydash import snake_case
|
@@ -12,11 +13,13 @@ from vellum.client.types.string_chat_message_content import StringChatMessageCon
|
|
12
13
|
from vellum.client.types.variable_prompt_block import VariablePromptBlock
|
13
14
|
from vellum.workflows.errors.types import WorkflowErrorCode
|
14
15
|
from vellum.workflows.exceptions import NodeException
|
16
|
+
from vellum.workflows.expressions.concat import ConcatExpression
|
15
17
|
from vellum.workflows.inputs import BaseInputs
|
16
18
|
from vellum.workflows.nodes.bases import BaseNode
|
17
19
|
from vellum.workflows.nodes.core.inline_subworkflow_node.node import InlineSubworkflowNode
|
18
20
|
from vellum.workflows.nodes.displayable.inline_prompt_node.node import InlinePromptNode
|
19
21
|
from vellum.workflows.nodes.displayable.subworkflow_deployment_node.node import SubworkflowDeploymentNode
|
22
|
+
from vellum.workflows.nodes.displayable.tool_calling_node.composio_service import ComposioService
|
20
23
|
from vellum.workflows.nodes.displayable.tool_calling_node.state import ToolCallingState
|
21
24
|
from vellum.workflows.outputs.base import BaseOutput
|
22
25
|
from vellum.workflows.ports.port import Port
|
@@ -24,12 +27,35 @@ from vellum.workflows.references.lazy import LazyReference
|
|
24
27
|
from vellum.workflows.state import BaseState
|
25
28
|
from vellum.workflows.state.encoder import DefaultStateEncoder
|
26
29
|
from vellum.workflows.types.core import EntityInputsInterface, MergeBehavior, Tool
|
27
|
-
from vellum.workflows.types.definition import DeploymentDefinition
|
30
|
+
from vellum.workflows.types.definition import ComposioToolDefinition, DeploymentDefinition
|
28
31
|
from vellum.workflows.types.generics import is_workflow_class
|
29
32
|
|
30
33
|
CHAT_HISTORY_VARIABLE = "chat_history"
|
31
34
|
|
32
35
|
|
36
|
+
class FunctionCallNodeMixin:
|
37
|
+
"""Mixin providing common functionality for nodes that handle function calls."""
|
38
|
+
|
39
|
+
function_call_output: List[PromptOutput]
|
40
|
+
|
41
|
+
def _extract_function_arguments(self) -> dict:
|
42
|
+
"""Extract arguments from function call output."""
|
43
|
+
if self.function_call_output and len(self.function_call_output) > 0:
|
44
|
+
function_call = self.function_call_output[0]
|
45
|
+
if function_call.type == "FUNCTION_CALL" and function_call.value is not None:
|
46
|
+
return function_call.value.arguments or {}
|
47
|
+
return {}
|
48
|
+
|
49
|
+
def _add_function_result_to_chat_history(self, result: Any, state: ToolCallingState) -> None:
|
50
|
+
"""Add function execution result to chat history."""
|
51
|
+
state.chat_history.append(
|
52
|
+
ChatMessage(
|
53
|
+
role="FUNCTION",
|
54
|
+
content=StringChatMessageContent(value=json.dumps(result, cls=DefaultStateEncoder)),
|
55
|
+
)
|
56
|
+
)
|
57
|
+
|
58
|
+
|
33
59
|
class ToolRouterNode(InlinePromptNode[ToolCallingState]):
|
34
60
|
max_prompt_iterations: Optional[int] = 5
|
35
61
|
|
@@ -41,10 +67,6 @@ class ToolRouterNode(InlinePromptNode[ToolCallingState]):
|
|
41
67
|
max_iterations_message = f"Maximum number of prompt iterations `{self.max_prompt_iterations}` reached."
|
42
68
|
raise NodeException(message=max_iterations_message, code=WorkflowErrorCode.NODE_EXECUTION)
|
43
69
|
|
44
|
-
# Merge user-provided chat history with node's chat history
|
45
|
-
user_chat_history = self.prompt_inputs.get(CHAT_HISTORY_VARIABLE, []) if self.prompt_inputs else []
|
46
|
-
merged_chat_history = user_chat_history + self.state.chat_history
|
47
|
-
self.prompt_inputs = {**self.prompt_inputs, CHAT_HISTORY_VARIABLE: merged_chat_history} # type: ignore
|
48
70
|
generator = super().run()
|
49
71
|
for output in generator:
|
50
72
|
if output.name == "results" and output.value:
|
@@ -72,20 +94,11 @@ class ToolRouterNode(InlinePromptNode[ToolCallingState]):
|
|
72
94
|
yield output
|
73
95
|
|
74
96
|
|
75
|
-
class DynamicSubworkflowDeploymentNode(SubworkflowDeploymentNode[ToolCallingState]):
|
97
|
+
class DynamicSubworkflowDeploymentNode(SubworkflowDeploymentNode[ToolCallingState], FunctionCallNodeMixin):
|
76
98
|
"""Node that executes a deployment definition with function call output."""
|
77
99
|
|
78
|
-
function_call_output: List[PromptOutput]
|
79
|
-
|
80
100
|
def run(self) -> Iterator[BaseOutput]:
|
81
|
-
|
82
|
-
function_call = self.function_call_output[0]
|
83
|
-
if function_call.type == "FUNCTION_CALL" and function_call.value is not None:
|
84
|
-
arguments = function_call.value.arguments
|
85
|
-
else:
|
86
|
-
arguments = {}
|
87
|
-
else:
|
88
|
-
arguments = {}
|
101
|
+
arguments = self._extract_function_arguments()
|
89
102
|
|
90
103
|
# Mypy doesn't like instance assignments of class attributes. It's safe in our case tho bc it's what
|
91
104
|
# we do in the `__init__` method. Long term, instead of the function_call_output attribute above, we
|
@@ -106,28 +119,16 @@ class DynamicSubworkflowDeploymentNode(SubworkflowDeploymentNode[ToolCallingStat
|
|
106
119
|
yield output
|
107
120
|
|
108
121
|
# Add the result to the chat history
|
109
|
-
self.state
|
110
|
-
ChatMessage(
|
111
|
-
role="FUNCTION",
|
112
|
-
content=StringChatMessageContent(value=json.dumps(outputs, cls=DefaultStateEncoder)),
|
113
|
-
)
|
114
|
-
)
|
122
|
+
self._add_function_result_to_chat_history(outputs, self.state)
|
115
123
|
|
116
124
|
|
117
|
-
class DynamicInlineSubworkflowNode(
|
125
|
+
class DynamicInlineSubworkflowNode(
|
126
|
+
InlineSubworkflowNode[ToolCallingState, BaseInputs, BaseState], FunctionCallNodeMixin
|
127
|
+
):
|
118
128
|
"""Node that executes an inline subworkflow with function call output."""
|
119
129
|
|
120
|
-
function_call_output: List[PromptOutput]
|
121
|
-
|
122
130
|
def run(self) -> Iterator[BaseOutput]:
|
123
|
-
|
124
|
-
function_call = self.function_call_output[0]
|
125
|
-
if function_call.type == "FUNCTION_CALL" and function_call.value is not None:
|
126
|
-
arguments = function_call.value.arguments
|
127
|
-
else:
|
128
|
-
arguments = {}
|
129
|
-
else:
|
130
|
-
arguments = {}
|
131
|
+
arguments = self._extract_function_arguments()
|
131
132
|
|
132
133
|
self.subworkflow_inputs = arguments # type: ignore[misc]
|
133
134
|
|
@@ -140,29 +141,16 @@ class DynamicInlineSubworkflowNode(InlineSubworkflowNode[ToolCallingState, BaseI
|
|
140
141
|
yield output
|
141
142
|
|
142
143
|
# Add the result to the chat history
|
143
|
-
self.state
|
144
|
-
ChatMessage(
|
145
|
-
role="FUNCTION",
|
146
|
-
content=StringChatMessageContent(value=json.dumps(outputs, cls=DefaultStateEncoder)),
|
147
|
-
)
|
148
|
-
)
|
144
|
+
self._add_function_result_to_chat_history(outputs, self.state)
|
149
145
|
|
150
146
|
|
151
|
-
class FunctionNode(BaseNode[ToolCallingState]):
|
147
|
+
class FunctionNode(BaseNode[ToolCallingState], FunctionCallNodeMixin):
|
152
148
|
"""Node that executes a regular Python function with function call output."""
|
153
149
|
|
154
|
-
function_call_output: List[PromptOutput]
|
155
150
|
function_definition: Callable[..., Any]
|
156
151
|
|
157
152
|
def run(self) -> Iterator[BaseOutput]:
|
158
|
-
|
159
|
-
function_call = self.function_call_output[0]
|
160
|
-
if function_call.type == "FUNCTION_CALL" and function_call.value is not None:
|
161
|
-
arguments = function_call.value.arguments
|
162
|
-
else:
|
163
|
-
arguments = {}
|
164
|
-
else:
|
165
|
-
arguments = {}
|
153
|
+
arguments = self._extract_function_arguments()
|
166
154
|
|
167
155
|
try:
|
168
156
|
result = self.function_definition(**arguments)
|
@@ -174,16 +162,73 @@ class FunctionNode(BaseNode[ToolCallingState]):
|
|
174
162
|
)
|
175
163
|
|
176
164
|
# Add the result to the chat history
|
177
|
-
self.state
|
178
|
-
|
179
|
-
|
180
|
-
|
165
|
+
self._add_function_result_to_chat_history(result, self.state)
|
166
|
+
|
167
|
+
yield from []
|
168
|
+
|
169
|
+
|
170
|
+
class ComposioNode(BaseNode[ToolCallingState], FunctionCallNodeMixin):
|
171
|
+
"""Node that executes a Composio tool with function call output."""
|
172
|
+
|
173
|
+
composio_tool: ComposioToolDefinition
|
174
|
+
|
175
|
+
def run(self) -> Iterator[BaseOutput]:
|
176
|
+
# Extract arguments from function call
|
177
|
+
arguments = self._extract_function_arguments()
|
178
|
+
|
179
|
+
# HACK: Use first Composio API key found in environment variables
|
180
|
+
composio_api_key = None
|
181
|
+
common_env_var_names = ["COMPOSIO_API_KEY", "COMPOSIO_KEY"]
|
182
|
+
|
183
|
+
for env_var_name in common_env_var_names:
|
184
|
+
value = os.environ.get(env_var_name)
|
185
|
+
if value:
|
186
|
+
composio_api_key = value
|
187
|
+
break
|
188
|
+
|
189
|
+
if not composio_api_key:
|
190
|
+
raise NodeException(
|
191
|
+
message=(
|
192
|
+
"No Composio API key found in environment variables. "
|
193
|
+
"Please ensure one of these environment variables is set: "
|
194
|
+
)
|
195
|
+
+ ", ".join(common_env_var_names),
|
196
|
+
code=WorkflowErrorCode.NODE_EXECUTION,
|
181
197
|
)
|
182
|
-
|
198
|
+
|
199
|
+
try:
|
200
|
+
# Execute using ComposioService
|
201
|
+
composio_service = ComposioService(api_key=composio_api_key)
|
202
|
+
result = composio_service.execute_tool(tool_name=self.composio_tool.action, arguments=arguments)
|
203
|
+
except Exception as e:
|
204
|
+
raise NodeException(
|
205
|
+
message=f"Error executing Composio tool '{self.composio_tool.action}': {str(e)}",
|
206
|
+
code=WorkflowErrorCode.NODE_EXECUTION,
|
207
|
+
)
|
208
|
+
|
209
|
+
# Add result to chat history
|
210
|
+
self._add_function_result_to_chat_history(result, self.state)
|
183
211
|
|
184
212
|
yield from []
|
185
213
|
|
186
214
|
|
215
|
+
def create_composio_wrapper_function(tool_def: ComposioToolDefinition):
|
216
|
+
"""Create a real Python function that wraps the Composio tool for prompt layer compatibility."""
|
217
|
+
|
218
|
+
def wrapper_function(**kwargs):
|
219
|
+
# This should never be called due to routing, but satisfies introspection
|
220
|
+
raise RuntimeError(
|
221
|
+
f"ComposioToolDefinition wrapper for '{tool_def.action}' should not be called directly. "
|
222
|
+
f"Execution should go through ComposioNode. This suggests a routing issue."
|
223
|
+
)
|
224
|
+
|
225
|
+
# Set proper function attributes for prompt layer introspection
|
226
|
+
wrapper_function.__name__ = tool_def.name
|
227
|
+
wrapper_function.__doc__ = tool_def.description
|
228
|
+
|
229
|
+
return wrapper_function
|
230
|
+
|
231
|
+
|
187
232
|
def create_tool_router_node(
|
188
233
|
ml_model: str,
|
189
234
|
blocks: List[PromptBlock],
|
@@ -193,9 +238,18 @@ def create_tool_router_node(
|
|
193
238
|
max_prompt_iterations: Optional[int] = None,
|
194
239
|
) -> Type[ToolRouterNode]:
|
195
240
|
if functions and len(functions) > 0:
|
196
|
-
#
|
241
|
+
# Create dynamic ports and convert functions in a single loop
|
197
242
|
Ports = type("Ports", (), {})
|
243
|
+
prompt_functions = []
|
244
|
+
|
198
245
|
for function in functions:
|
246
|
+
# Convert ComposioToolDefinition to wrapper function for prompt layer
|
247
|
+
if isinstance(function, ComposioToolDefinition):
|
248
|
+
prompt_functions.append(create_composio_wrapper_function(function))
|
249
|
+
else:
|
250
|
+
prompt_functions.append(function)
|
251
|
+
|
252
|
+
# Create port for this function (using original function for get_function_name)
|
199
253
|
function_name = get_function_name(function)
|
200
254
|
|
201
255
|
# Avoid using lambda to capture function_name
|
@@ -218,6 +272,7 @@ def create_tool_router_node(
|
|
218
272
|
else:
|
219
273
|
# If no functions exist, create a simple Ports class with just a default port
|
220
274
|
Ports = type("Ports", (), {"default": Port(default=True)})
|
275
|
+
prompt_functions = []
|
221
276
|
|
222
277
|
# Add a chat history block to blocks only if one doesn't already exist
|
223
278
|
has_chat_history_block = any(
|
@@ -234,6 +289,14 @@ def create_tool_router_node(
|
|
234
289
|
)
|
235
290
|
)
|
236
291
|
|
292
|
+
node_prompt_inputs = {
|
293
|
+
**(prompt_inputs or {}),
|
294
|
+
CHAT_HISTORY_VARIABLE: ConcatExpression[List[ChatMessage], List[ChatMessage]](
|
295
|
+
lhs=(prompt_inputs or {}).get(CHAT_HISTORY_VARIABLE, []),
|
296
|
+
rhs=ToolCallingState.chat_history,
|
297
|
+
),
|
298
|
+
}
|
299
|
+
|
237
300
|
node = cast(
|
238
301
|
Type[ToolRouterNode],
|
239
302
|
type(
|
@@ -242,8 +305,8 @@ def create_tool_router_node(
|
|
242
305
|
{
|
243
306
|
"ml_model": ml_model,
|
244
307
|
"blocks": blocks,
|
245
|
-
"functions": functions
|
246
|
-
"prompt_inputs":
|
308
|
+
"functions": prompt_functions, # Use converted functions for prompt layer
|
309
|
+
"prompt_inputs": node_prompt_inputs,
|
247
310
|
"parameters": parameters,
|
248
311
|
"max_prompt_iterations": max_prompt_iterations,
|
249
312
|
"Ports": Ports,
|
@@ -285,6 +348,17 @@ def create_function_node(
|
|
285
348
|
|
286
349
|
return node
|
287
350
|
|
351
|
+
elif isinstance(function, ComposioToolDefinition):
|
352
|
+
node = type(
|
353
|
+
f"ComposioNode_{function.name}",
|
354
|
+
(ComposioNode,),
|
355
|
+
{
|
356
|
+
"composio_tool": function,
|
357
|
+
"function_call_output": tool_router_node.Outputs.results,
|
358
|
+
"__module__": __name__,
|
359
|
+
},
|
360
|
+
)
|
361
|
+
return node
|
288
362
|
elif is_workflow_class(function):
|
289
363
|
node = type(
|
290
364
|
f"DynamicInlineSubworkflowNode_{function.__name__}",
|
@@ -314,5 +388,7 @@ def get_function_name(function: Tool) -> str:
|
|
314
388
|
if isinstance(function, DeploymentDefinition):
|
315
389
|
name = str(function.deployment_id or function.deployment_name)
|
316
390
|
return name.replace("-", "")
|
391
|
+
elif isinstance(function, ComposioToolDefinition):
|
392
|
+
return function.name
|
317
393
|
else:
|
318
394
|
return snake_case(function.__name__)
|
vellum/workflows/types/core.py
CHANGED
@@ -13,7 +13,7 @@ from typing import ( # type: ignore[attr-defined]
|
|
13
13
|
)
|
14
14
|
|
15
15
|
from vellum.client.core.pydantic_utilities import UniversalBaseModel
|
16
|
-
from vellum.workflows.types.definition import DeploymentDefinition
|
16
|
+
from vellum.workflows.types.definition import ComposioToolDefinition, DeploymentDefinition
|
17
17
|
|
18
18
|
if TYPE_CHECKING:
|
19
19
|
from vellum.workflows.workflows.base import BaseWorkflow
|
@@ -50,4 +50,4 @@ class ConditionType(Enum):
|
|
50
50
|
|
51
51
|
|
52
52
|
# Type alias for functions that can be called in tool calling nodes
|
53
|
-
Tool = Union[Callable[..., Any], DeploymentDefinition, Type["BaseWorkflow"]]
|
53
|
+
Tool = Union[Callable[..., Any], DeploymentDefinition, Type["BaseWorkflow"], ComposioToolDefinition]
|
@@ -2,7 +2,7 @@ import importlib
|
|
2
2
|
import inspect
|
3
3
|
from types import FrameType
|
4
4
|
from uuid import UUID
|
5
|
-
from typing import Annotated, Any, Dict, Optional, Union
|
5
|
+
from typing import Annotated, Any, Dict, Literal, Optional, Union
|
6
6
|
|
7
7
|
from pydantic import BeforeValidator
|
8
8
|
|
@@ -97,3 +97,22 @@ class DeploymentDefinition(UniversalBaseModel):
|
|
97
97
|
if not self._is_uuid():
|
98
98
|
return self.deployment
|
99
99
|
return None
|
100
|
+
|
101
|
+
|
102
|
+
class ComposioToolDefinition(UniversalBaseModel):
|
103
|
+
"""Represents a specific Composio action that can be used in Tool Calling Node"""
|
104
|
+
|
105
|
+
type: Literal["COMPOSIO"] = "COMPOSIO"
|
106
|
+
|
107
|
+
# Core identification
|
108
|
+
toolkit: str # "GITHUB", "SLACK", etc.
|
109
|
+
action: str # Specific action like "GITHUB_CREATE_AN_ISSUE"
|
110
|
+
description: str
|
111
|
+
|
112
|
+
# Optional cached metadata
|
113
|
+
display_name: Optional[str] = None
|
114
|
+
|
115
|
+
@property
|
116
|
+
def name(self) -> str:
|
117
|
+
"""Generate a function name for this tool"""
|
118
|
+
return self.action.lower()
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import pytest
|
2
2
|
from uuid import UUID
|
3
3
|
|
4
|
-
from vellum.workflows.types.definition import DeploymentDefinition
|
4
|
+
from vellum.workflows.types.definition import ComposioToolDefinition, DeploymentDefinition
|
5
5
|
|
6
6
|
|
7
7
|
@pytest.mark.parametrize(
|
@@ -31,3 +31,16 @@ def test_deployment_definition(deployment_value, expected_deployment_id, expecte
|
|
31
31
|
|
32
32
|
assert deployment.deployment_id == expected_deployment_id
|
33
33
|
assert deployment.deployment_name == expected_deployment_name
|
34
|
+
|
35
|
+
|
36
|
+
def test_composio_tool_definition_creation():
|
37
|
+
"""Test that ComposioToolDefinition can be created with required fields."""
|
38
|
+
composio_tool = ComposioToolDefinition(
|
39
|
+
toolkit="GITHUB", action="GITHUB_CREATE_AN_ISSUE", description="Create a new issue in a GitHub repository"
|
40
|
+
)
|
41
|
+
|
42
|
+
assert composio_tool.toolkit == "GITHUB"
|
43
|
+
assert composio_tool.action == "GITHUB_CREATE_AN_ISSUE"
|
44
|
+
assert composio_tool.description == "Create a new issue in a GitHub repository"
|
45
|
+
assert composio_tool.display_name is None
|
46
|
+
assert composio_tool.name == "github_create_an_issue"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import dataclasses
|
2
2
|
import inspect
|
3
|
-
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type, Union, get_args, get_origin
|
3
|
+
from typing import TYPE_CHECKING, Any, Callable, Dict, Literal, Optional, Type, Union, get_args, get_origin
|
4
4
|
|
5
5
|
from pydantic import BaseModel
|
6
6
|
from pydantic_core import PydanticUndefined
|
@@ -33,6 +33,18 @@ def compile_annotation(annotation: Optional[Any], defs: dict[str, Any]) -> dict:
|
|
33
33
|
if get_origin(annotation) is Union:
|
34
34
|
return {"anyOf": [compile_annotation(a, defs) for a in get_args(annotation)]}
|
35
35
|
|
36
|
+
if get_origin(annotation) is Literal:
|
37
|
+
values = list(get_args(annotation))
|
38
|
+
types = {type(value) for value in values}
|
39
|
+
if len(types) == 1:
|
40
|
+
value_type = types.pop()
|
41
|
+
if value_type in type_map:
|
42
|
+
return {"type": type_map[value_type], "enum": values}
|
43
|
+
else:
|
44
|
+
return {"enum": values}
|
45
|
+
else:
|
46
|
+
return {"enum": values}
|
47
|
+
|
36
48
|
if get_origin(annotation) is dict:
|
37
49
|
_, value_type = get_args(annotation)
|
38
50
|
return {"type": "object", "additionalProperties": compile_annotation(value_type, defs)}
|
@@ -1,6 +1,8 @@
|
|
1
|
+
import pytest
|
1
2
|
from dataclasses import dataclass
|
3
|
+
from enum import Enum
|
2
4
|
from unittest.mock import Mock
|
3
|
-
from typing import Dict, List, Optional, Union
|
5
|
+
from typing import Dict, List, Literal, Optional, Union
|
4
6
|
|
5
7
|
from pydantic import BaseModel
|
6
8
|
|
@@ -581,3 +583,32 @@ def test_compile_workflow_deployment_function_definition__defaults():
|
|
581
583
|
"required": ["no_default"],
|
582
584
|
},
|
583
585
|
)
|
586
|
+
|
587
|
+
|
588
|
+
@pytest.mark.parametrize(
|
589
|
+
"annotation,expected_schema",
|
590
|
+
[
|
591
|
+
(Literal["a", "b"], {"type": "string", "enum": ["a", "b"]}),
|
592
|
+
(Literal["a", 1], {"enum": ["a", 1]}),
|
593
|
+
],
|
594
|
+
)
|
595
|
+
def test_compile_function_definition__literal(annotation, expected_schema):
|
596
|
+
def my_function(a: annotation): # type: ignore
|
597
|
+
pass
|
598
|
+
|
599
|
+
compiled_function = compile_function_definition(my_function)
|
600
|
+
assert isinstance(compiled_function.parameters, dict)
|
601
|
+
assert compiled_function.parameters["properties"]["a"] == expected_schema
|
602
|
+
|
603
|
+
|
604
|
+
def test_compile_function_definition__literal_type_not_in_map():
|
605
|
+
class MyEnum(Enum):
|
606
|
+
FOO = "foo"
|
607
|
+
BAR = "bar"
|
608
|
+
|
609
|
+
def my_function(a: Literal[MyEnum.FOO, MyEnum.BAR]):
|
610
|
+
pass
|
611
|
+
|
612
|
+
compiled_function = compile_function_definition(my_function)
|
613
|
+
assert isinstance(compiled_function.parameters, dict)
|
614
|
+
assert compiled_function.parameters["properties"]["a"] == {"enum": [MyEnum.FOO, MyEnum.BAR]}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: vellum-ai
|
3
|
-
Version: 1.0.
|
3
|
+
Version: 1.0.5
|
4
4
|
Summary:
|
5
5
|
License: MIT
|
6
6
|
Requires-Python: >=3.9,<4.0
|
@@ -22,6 +22,8 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
22
22
|
Classifier: Typing :: Typed
|
23
23
|
Requires-Dist: Jinja2 (>=3.1.0,<4.0.0)
|
24
24
|
Requires-Dist: click (>=8.1.7,<9.0.0)
|
25
|
+
Requires-Dist: composio-client (>=1.5.0,<2.0.0)
|
26
|
+
Requires-Dist: composio-core (>=0.7.20,<1.0.0)
|
25
27
|
Requires-Dist: docker (>=7.1.0,<8.0.0)
|
26
28
|
Requires-Dist: httpx (>=0.21.2)
|
27
29
|
Requires-Dist: openai (>=1.0.0,<2.0.0)
|