vellum-ai 1.4.2__py3-none-any.whl → 1.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/__init__.py +14 -0
- vellum/client/__init__.py +3 -0
- vellum/client/core/client_wrapper.py +2 -2
- vellum/client/reference.md +160 -0
- vellum/client/resources/__init__.py +2 -0
- vellum/client/resources/integrations/__init__.py +4 -0
- vellum/client/resources/integrations/client.py +260 -0
- vellum/client/resources/integrations/raw_client.py +267 -0
- vellum/client/types/__init__.py +12 -0
- vellum/client/types/components_schemas_composio_execute_tool_request.py +5 -0
- vellum/client/types/components_schemas_composio_execute_tool_response.py +5 -0
- vellum/client/types/components_schemas_composio_tool_definition.py +5 -0
- vellum/client/types/composio_execute_tool_request.py +24 -0
- vellum/client/types/composio_execute_tool_response.py +24 -0
- vellum/client/types/composio_tool_definition.py +26 -0
- vellum/client/types/vellum_error_code_enum.py +2 -0
- vellum/client/types/vellum_sdk_error.py +1 -0
- vellum/client/types/workflow_event_error.py +1 -0
- vellum/resources/integrations/__init__.py +3 -0
- vellum/resources/integrations/client.py +3 -0
- vellum/resources/integrations/raw_client.py +3 -0
- vellum/types/components_schemas_composio_execute_tool_request.py +3 -0
- vellum/types/components_schemas_composio_execute_tool_response.py +3 -0
- vellum/types/components_schemas_composio_tool_definition.py +3 -0
- vellum/types/composio_execute_tool_request.py +3 -0
- vellum/types/composio_execute_tool_response.py +3 -0
- vellum/types/composio_tool_definition.py +3 -0
- vellum/workflows/descriptors/utils.py +3 -0
- vellum/workflows/emitters/vellum_emitter.py +4 -1
- vellum/workflows/integrations/__init__.py +5 -0
- vellum/workflows/integrations/tests/__init__.py +0 -0
- vellum/workflows/integrations/tests/test_vellum_integration_service.py +225 -0
- vellum/workflows/integrations/vellum_integration_service.py +96 -0
- vellum/workflows/nodes/bases/base.py +24 -3
- vellum/workflows/nodes/core/inline_subworkflow_node/node.py +5 -0
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +2 -5
- vellum/workflows/nodes/displayable/tool_calling_node/utils.py +38 -4
- vellum/workflows/runner/runner.py +132 -110
- vellum/workflows/utils/functions.py +29 -18
- vellum/workflows/utils/tests/test_functions.py +40 -0
- vellum/workflows/workflows/base.py +23 -5
- vellum/workflows/workflows/tests/test_base_workflow.py +99 -0
- {vellum_ai-1.4.2.dist-info → vellum_ai-1.5.1.dist-info}/METADATA +1 -1
- {vellum_ai-1.4.2.dist-info → vellum_ai-1.5.1.dist-info}/RECORD +64 -41
- vellum_ai-1.5.1.dist-info/entry_points.txt +4 -0
- vellum_ee/assets/node-definitions.json +833 -0
- vellum_ee/scripts/generate_node_definitions.py +89 -0
- vellum_ee/workflows/display/nodes/base_node_display.py +6 -3
- vellum_ee/workflows/display/nodes/vellum/api_node.py +4 -7
- vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py +19 -5
- vellum_ee/workflows/display/nodes/vellum/retry_node.py +2 -3
- vellum_ee/workflows/display/nodes/vellum/search_node.py +3 -6
- vellum_ee/workflows/display/nodes/vellum/templating_node.py +1 -1
- vellum_ee/workflows/display/nodes/vellum/tests/test_utils.py +2 -3
- vellum_ee/workflows/display/nodes/vellum/try_node.py +3 -4
- vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_attributes_serialization.py +5 -11
- vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_outputs_serialization.py +1 -1
- vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_ports_serialization.py +1 -1
- vellum_ee/workflows/display/types.py +3 -3
- vellum_ee/workflows/display/utils/expressions.py +10 -3
- vellum_ee/workflows/display/utils/vellum.py +9 -2
- vellum_ee/workflows/display/workflows/base_workflow_display.py +2 -2
- vellum_ai-1.4.2.dist-info/entry_points.txt +0 -3
- {vellum_ai-1.4.2.dist-info → vellum_ai-1.5.1.dist-info}/LICENSE +0 -0
- {vellum_ai-1.4.2.dist-info → vellum_ai-1.5.1.dist-info}/WHEEL +0 -0
@@ -42,6 +42,7 @@ from vellum.workflows.events import (
|
|
42
42
|
WorkflowExecutionStreamingEvent,
|
43
43
|
)
|
44
44
|
from vellum.workflows.events.node import (
|
45
|
+
NodeEvent,
|
45
46
|
NodeExecutionFulfilledBody,
|
46
47
|
NodeExecutionInitiatedBody,
|
47
48
|
NodeExecutionRejectedBody,
|
@@ -212,6 +213,10 @@ class WorkflowRunner(Generic[StateType]):
|
|
212
213
|
descriptor for descriptor in self.workflow.Outputs if isinstance(descriptor.instance, StateValueReference)
|
213
214
|
]
|
214
215
|
|
216
|
+
self._background_thread: Optional[Thread] = None
|
217
|
+
self._cancel_thread: Optional[Thread] = None
|
218
|
+
self._stream_thread: Optional[Thread] = None
|
219
|
+
|
215
220
|
def _snapshot_state(self, state: StateType, deltas: List[StateDelta]) -> StateType:
|
216
221
|
self._workflow_event_inner_queue.put(
|
217
222
|
WorkflowExecutionSnapshottedEvent(
|
@@ -259,17 +264,36 @@ class WorkflowRunner(Generic[StateType]):
|
|
259
264
|
return event
|
260
265
|
|
261
266
|
def _run_work_item(self, node: BaseNode[StateType], span_id: UUID) -> None:
|
267
|
+
for event in self.run_node(node, span_id):
|
268
|
+
self._workflow_event_inner_queue.put(event)
|
269
|
+
|
270
|
+
def run_node(
|
271
|
+
self,
|
272
|
+
node: "BaseNode[StateType]",
|
273
|
+
span_id: UUID,
|
274
|
+
) -> Generator[NodeEvent, None, None]:
|
275
|
+
"""
|
276
|
+
Execute a single node and yield workflow events.
|
277
|
+
|
278
|
+
Args:
|
279
|
+
node: The node instance to execute
|
280
|
+
span_id: Unique identifier for this node execution
|
281
|
+
|
282
|
+
Yields:
|
283
|
+
NodeExecutionEvent: Events emitted during node execution (initiated, streaming, fulfilled, rejected)
|
284
|
+
"""
|
262
285
|
execution = get_execution_context()
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
)
|
286
|
+
|
287
|
+
node_output_mocks_map = self.workflow.context.node_output_mocks_map
|
288
|
+
|
289
|
+
yield NodeExecutionInitiatedEvent(
|
290
|
+
trace_id=execution.trace_id,
|
291
|
+
span_id=span_id,
|
292
|
+
body=NodeExecutionInitiatedBody(
|
293
|
+
node_definition=node.__class__,
|
294
|
+
inputs=node._inputs,
|
295
|
+
),
|
296
|
+
parent=execution.parent_context,
|
273
297
|
)
|
274
298
|
|
275
299
|
logger.debug(f"Started running node: {node.__class__.__name__}")
|
@@ -282,7 +306,7 @@ class WorkflowRunner(Generic[StateType]):
|
|
282
306
|
)
|
283
307
|
node_run_response: NodeRunResponse
|
284
308
|
was_mocked: Optional[bool] = None
|
285
|
-
mock_candidates =
|
309
|
+
mock_candidates = node_output_mocks_map.get(node.Outputs) or []
|
286
310
|
for mock_candidate in mock_candidates:
|
287
311
|
if mock_candidate.when_condition.resolve(node.state):
|
288
312
|
node_run_response = mock_candidate.then_outputs
|
@@ -312,8 +336,9 @@ class WorkflowRunner(Generic[StateType]):
|
|
312
336
|
streaming_output_queues: Dict[str, Queue] = {}
|
313
337
|
outputs = node.Outputs()
|
314
338
|
|
315
|
-
def initiate_node_streaming_output(
|
316
|
-
|
339
|
+
def initiate_node_streaming_output(
|
340
|
+
output: BaseOutput,
|
341
|
+
) -> Generator[NodeExecutionStreamingEvent, None, None]:
|
317
342
|
streaming_output_queues[output.name] = Queue()
|
318
343
|
output_descriptor = OutputReference(
|
319
344
|
name=output.name,
|
@@ -325,57 +350,51 @@ class WorkflowRunner(Generic[StateType]):
|
|
325
350
|
node.state.meta.node_outputs[output_descriptor] = streaming_output_queues[output.name]
|
326
351
|
initiated_output: BaseOutput = BaseOutput(name=output.name)
|
327
352
|
initiated_ports = initiated_output > ports
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
invoked_ports=initiated_ports,
|
336
|
-
),
|
337
|
-
parent=execution.parent_context,
|
353
|
+
yield NodeExecutionStreamingEvent(
|
354
|
+
trace_id=execution.trace_id,
|
355
|
+
span_id=span_id,
|
356
|
+
body=NodeExecutionStreamingBody(
|
357
|
+
node_definition=node.__class__,
|
358
|
+
output=initiated_output,
|
359
|
+
invoked_ports=initiated_ports,
|
338
360
|
),
|
361
|
+
parent=execution.parent_context,
|
339
362
|
)
|
340
363
|
|
341
364
|
with execution_context(parent_context=updated_parent_context, trace_id=execution.trace_id):
|
342
365
|
for output in node_run_response:
|
343
366
|
invoked_ports = output > ports
|
344
367
|
if output.is_initiated:
|
345
|
-
initiate_node_streaming_output(output)
|
368
|
+
yield from initiate_node_streaming_output(output)
|
346
369
|
elif output.is_streaming:
|
347
370
|
if output.name not in streaming_output_queues:
|
348
|
-
initiate_node_streaming_output(output)
|
371
|
+
yield from initiate_node_streaming_output(output)
|
349
372
|
|
350
373
|
streaming_output_queues[output.name].put(output.delta)
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
invoked_ports=invoked_ports,
|
359
|
-
),
|
360
|
-
parent=execution.parent_context,
|
374
|
+
yield NodeExecutionStreamingEvent(
|
375
|
+
trace_id=execution.trace_id,
|
376
|
+
span_id=span_id,
|
377
|
+
body=NodeExecutionStreamingBody(
|
378
|
+
node_definition=node.__class__,
|
379
|
+
output=output,
|
380
|
+
invoked_ports=invoked_ports,
|
361
381
|
),
|
382
|
+
parent=execution.parent_context,
|
362
383
|
)
|
363
384
|
elif output.is_fulfilled:
|
364
385
|
if output.name in streaming_output_queues:
|
365
386
|
streaming_output_queues[output.name].put(undefined)
|
366
387
|
|
367
388
|
setattr(outputs, output.name, output.value)
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
parent=execution.parent_context,
|
378
|
-
)
|
389
|
+
yield NodeExecutionStreamingEvent(
|
390
|
+
trace_id=execution.trace_id,
|
391
|
+
span_id=span_id,
|
392
|
+
body=NodeExecutionStreamingBody(
|
393
|
+
node_definition=node.__class__,
|
394
|
+
output=output,
|
395
|
+
invoked_ports=invoked_ports,
|
396
|
+
),
|
397
|
+
parent=execution.parent_context,
|
379
398
|
)
|
380
399
|
|
381
400
|
node.state.meta.node_execution_cache.fulfill_node_execution(node.__class__, span_id)
|
@@ -390,66 +409,57 @@ class WorkflowRunner(Generic[StateType]):
|
|
390
409
|
node.state.meta.node_outputs[descriptor] = output_value
|
391
410
|
|
392
411
|
invoked_ports = ports(outputs, node.state)
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
parent=execution.parent_context,
|
404
|
-
)
|
412
|
+
yield NodeExecutionFulfilledEvent(
|
413
|
+
trace_id=execution.trace_id,
|
414
|
+
span_id=span_id,
|
415
|
+
body=NodeExecutionFulfilledBody(
|
416
|
+
node_definition=node.__class__,
|
417
|
+
outputs=outputs,
|
418
|
+
invoked_ports=invoked_ports,
|
419
|
+
mocked=was_mocked,
|
420
|
+
),
|
421
|
+
parent=execution.parent_context,
|
405
422
|
)
|
406
423
|
except NodeException as e:
|
407
424
|
logger.info(e)
|
408
425
|
captured_stacktrace = traceback.format_exc()
|
409
426
|
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
parent=execution.parent_context,
|
420
|
-
)
|
427
|
+
yield NodeExecutionRejectedEvent(
|
428
|
+
trace_id=execution.trace_id,
|
429
|
+
span_id=span_id,
|
430
|
+
body=NodeExecutionRejectedBody(
|
431
|
+
node_definition=node.__class__,
|
432
|
+
error=e.error,
|
433
|
+
stacktrace=captured_stacktrace,
|
434
|
+
),
|
435
|
+
parent=execution.parent_context,
|
421
436
|
)
|
422
437
|
except WorkflowInitializationException as e:
|
423
438
|
logger.info(e)
|
424
439
|
captured_stacktrace = traceback.format_exc()
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
parent=execution.parent_context,
|
435
|
-
)
|
440
|
+
yield NodeExecutionRejectedEvent(
|
441
|
+
trace_id=execution.trace_id,
|
442
|
+
span_id=span_id,
|
443
|
+
body=NodeExecutionRejectedBody(
|
444
|
+
node_definition=node.__class__,
|
445
|
+
error=e.error,
|
446
|
+
stacktrace=captured_stacktrace,
|
447
|
+
),
|
448
|
+
parent=execution.parent_context,
|
436
449
|
)
|
437
450
|
except InvalidExpressionException as e:
|
438
451
|
logger.info(e)
|
439
452
|
captured_stacktrace = traceback.format_exc()
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
parent=execution.parent_context,
|
450
|
-
)
|
453
|
+
yield NodeExecutionRejectedEvent(
|
454
|
+
trace_id=execution.trace_id,
|
455
|
+
span_id=span_id,
|
456
|
+
body=NodeExecutionRejectedBody(
|
457
|
+
node_definition=node.__class__,
|
458
|
+
error=e.error,
|
459
|
+
stacktrace=captured_stacktrace,
|
460
|
+
),
|
461
|
+
parent=execution.parent_context,
|
451
462
|
)
|
452
|
-
|
453
463
|
except Exception as e:
|
454
464
|
error_message = self._parse_error_message(e)
|
455
465
|
if error_message is None:
|
@@ -459,19 +469,17 @@ class WorkflowRunner(Generic[StateType]):
|
|
459
469
|
else:
|
460
470
|
error_code = WorkflowErrorCode.NODE_EXECUTION
|
461
471
|
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
code=error_code,
|
471
|
-
),
|
472
|
+
yield NodeExecutionRejectedEvent(
|
473
|
+
trace_id=execution.trace_id,
|
474
|
+
span_id=span_id,
|
475
|
+
body=NodeExecutionRejectedBody(
|
476
|
+
node_definition=node.__class__,
|
477
|
+
error=WorkflowError(
|
478
|
+
message=error_message,
|
479
|
+
code=error_code,
|
472
480
|
),
|
473
|
-
parent=execution.parent_context,
|
474
481
|
),
|
482
|
+
parent=execution.parent_context,
|
475
483
|
)
|
476
484
|
|
477
485
|
logger.debug(f"Finished running node: {node.__class__.__name__}")
|
@@ -911,20 +919,20 @@ class WorkflowRunner(Generic[StateType]):
|
|
911
919
|
return False
|
912
920
|
|
913
921
|
def _generate_events(self) -> Generator[WorkflowEvent, None, None]:
|
914
|
-
|
922
|
+
self._background_thread = Thread(
|
915
923
|
target=self._run_background_thread,
|
916
924
|
name=f"{self.workflow.__class__.__name__}.background_thread",
|
917
925
|
)
|
918
|
-
|
926
|
+
self._background_thread.start()
|
919
927
|
|
920
928
|
cancel_thread_kill_switch = ThreadingEvent()
|
921
929
|
if self._cancel_signal:
|
922
|
-
|
930
|
+
self._cancel_thread = Thread(
|
923
931
|
target=self._run_cancel_thread,
|
924
932
|
name=f"{self.workflow.__class__.__name__}.cancel_thread",
|
925
933
|
kwargs={"kill_switch": cancel_thread_kill_switch},
|
926
934
|
)
|
927
|
-
|
935
|
+
self._cancel_thread.start()
|
928
936
|
|
929
937
|
event: WorkflowEvent
|
930
938
|
if self._is_resuming:
|
@@ -935,13 +943,13 @@ class WorkflowRunner(Generic[StateType]):
|
|
935
943
|
yield self._emit_event(event)
|
936
944
|
|
937
945
|
# The extra level of indirection prevents the runner from waiting on the caller to consume the event stream
|
938
|
-
|
946
|
+
self._stream_thread = Thread(
|
939
947
|
target=self._stream,
|
940
948
|
name=f"{self.workflow.__class__.__name__}.stream_thread",
|
941
949
|
)
|
942
|
-
|
950
|
+
self._stream_thread.start()
|
943
951
|
|
944
|
-
while
|
952
|
+
while self._stream_thread.is_alive():
|
945
953
|
try:
|
946
954
|
event = self._workflow_event_outer_queue.get(timeout=0.1)
|
947
955
|
except Empty:
|
@@ -971,3 +979,17 @@ class WorkflowRunner(Generic[StateType]):
|
|
971
979
|
|
972
980
|
def stream(self) -> WorkflowEventStream:
|
973
981
|
return WorkflowEventGenerator(self._generate_events(), self._initial_state.meta.span_id)
|
982
|
+
|
983
|
+
def join(self) -> None:
|
984
|
+
"""
|
985
|
+
Wait for all background threads to complete.
|
986
|
+
This ensures all pending work is finished before the runner terminates.
|
987
|
+
"""
|
988
|
+
if self._stream_thread and self._stream_thread.is_alive():
|
989
|
+
self._stream_thread.join()
|
990
|
+
|
991
|
+
if self._background_thread and self._background_thread.is_alive():
|
992
|
+
self._background_thread.join()
|
993
|
+
|
994
|
+
if self._cancel_thread and self._cancel_thread.is_alive():
|
995
|
+
self._cancel_thread.join()
|
@@ -10,6 +10,7 @@ from vellum import Vellum
|
|
10
10
|
from vellum.client.types.function_definition import FunctionDefinition
|
11
11
|
from vellum.workflows.integrations.composio_service import ComposioService
|
12
12
|
from vellum.workflows.integrations.mcp_service import MCPService
|
13
|
+
from vellum.workflows.integrations.vellum_integration_service import VellumIntegrationService
|
13
14
|
from vellum.workflows.types.definition import (
|
14
15
|
ComposioToolDefinition,
|
15
16
|
DeploymentDefinition,
|
@@ -22,7 +23,7 @@ from vellum.workflows.utils.vellum_variables import vellum_variable_type_to_open
|
|
22
23
|
if TYPE_CHECKING:
|
23
24
|
from vellum.workflows.workflows.base import BaseWorkflow
|
24
25
|
|
25
|
-
type_map = {
|
26
|
+
type_map: dict[Any, str] = {
|
26
27
|
str: "string",
|
27
28
|
int: "integer",
|
28
29
|
float: "number",
|
@@ -32,8 +33,13 @@ type_map = {
|
|
32
33
|
None: "null",
|
33
34
|
type(None): "null",
|
34
35
|
inspect._empty: "null",
|
36
|
+
"None": "null",
|
35
37
|
}
|
36
38
|
|
39
|
+
for k, v in list(type_map.items()):
|
40
|
+
if isinstance(k, type):
|
41
|
+
type_map[k.__name__] = v
|
42
|
+
|
37
43
|
|
38
44
|
def compile_annotation(annotation: Optional[Any], defs: dict[str, Any]) -> dict:
|
39
45
|
if annotation is None:
|
@@ -90,22 +96,21 @@ def compile_annotation(annotation: Optional[Any], defs: dict[str, Any]) -> dict:
|
|
90
96
|
defs[annotation.__name__] = {"type": "object", "properties": properties, "required": required}
|
91
97
|
return {"$ref": f"#/$defs/{annotation.__name__}"}
|
92
98
|
|
93
|
-
if issubclass(annotation, BaseModel):
|
99
|
+
if inspect.isclass(annotation) and issubclass(annotation, BaseModel):
|
94
100
|
if annotation.__name__ not in defs:
|
95
101
|
properties = {}
|
96
102
|
required = []
|
97
|
-
for field_name,
|
98
|
-
#
|
99
|
-
|
100
|
-
properties[field_name] = compile_annotation(field_annotation, defs)
|
103
|
+
for field_name, field_info in annotation.model_fields.items():
|
104
|
+
# field_info is a FieldInfo object which has an annotation attribute
|
105
|
+
properties[field_name] = compile_annotation(field_info.annotation, defs)
|
101
106
|
|
102
|
-
if
|
103
|
-
properties[field_name]["description"] =
|
107
|
+
if field_info.description is not None:
|
108
|
+
properties[field_name]["description"] = field_info.description
|
104
109
|
|
105
|
-
if
|
110
|
+
if field_info.default is PydanticUndefined:
|
106
111
|
required.append(field_name)
|
107
112
|
else:
|
108
|
-
properties[field_name]["default"] = _compile_default_value(
|
113
|
+
properties[field_name]["default"] = _compile_default_value(field_info.default)
|
109
114
|
defs[annotation.__name__] = {"type": "object", "properties": properties, "required": required}
|
110
115
|
|
111
116
|
return {"$ref": f"#/$defs/{annotation.__name__}"}
|
@@ -321,20 +326,26 @@ def compile_composio_tool_definition(tool_def: ComposioToolDefinition) -> Functi
|
|
321
326
|
def compile_vellum_integration_tool_definition(tool_def: VellumIntegrationToolDefinition) -> FunctionDefinition:
|
322
327
|
"""Compile a VellumIntegrationToolDefinition into a FunctionDefinition.
|
323
328
|
|
324
|
-
TODO: Implement when VellumIntegrationService is created.
|
325
|
-
|
326
329
|
Args:
|
327
330
|
tool_def: The VellumIntegrationToolDefinition to compile
|
328
331
|
|
329
332
|
Returns:
|
330
333
|
FunctionDefinition with tool parameters and description
|
331
334
|
"""
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
335
|
+
try:
|
336
|
+
service = VellumIntegrationService()
|
337
|
+
tool_details = service.get_tool_definition(
|
338
|
+
integration=tool_def.integration, provider=tool_def.provider.value, tool_name=tool_def.name
|
339
|
+
)
|
340
|
+
|
341
|
+
return FunctionDefinition(
|
342
|
+
name=tool_def.name,
|
343
|
+
description=tool_details.get("description", tool_def.description),
|
344
|
+
parameters=tool_details.get("parameters", {}),
|
345
|
+
)
|
346
|
+
except Exception:
|
347
|
+
# Fallback for service failures
|
348
|
+
return FunctionDefinition(name=tool_def.name, description=tool_def.description, parameters={})
|
338
349
|
|
339
350
|
|
340
351
|
def use_tool_inputs(**inputs):
|
@@ -733,3 +733,43 @@ def test_compile_function_definition__tuples(annotation, expected_schema):
|
|
733
733
|
compiled_function = compile_function_definition(my_function)
|
734
734
|
assert isinstance(compiled_function.parameters, dict)
|
735
735
|
assert compiled_function.parameters["properties"]["a"] == expected_schema
|
736
|
+
|
737
|
+
|
738
|
+
def test_compile_function_definition__string_annotations_with_future_imports():
|
739
|
+
"""Test that string annotations work with __future__ import annotations."""
|
740
|
+
# This simulates what happens when using `from __future__ import annotations`
|
741
|
+
# where type annotations become string literals at runtime
|
742
|
+
|
743
|
+
def my_function_with_string_annotations(
|
744
|
+
a: "str",
|
745
|
+
b: "int",
|
746
|
+
c: "float",
|
747
|
+
d: "bool",
|
748
|
+
e: "list",
|
749
|
+
f: "dict",
|
750
|
+
g: "None",
|
751
|
+
):
|
752
|
+
"""Function with string type annotations."""
|
753
|
+
pass
|
754
|
+
|
755
|
+
# WHEN compiling the function
|
756
|
+
compiled_function = compile_function_definition(my_function_with_string_annotations)
|
757
|
+
|
758
|
+
# THEN it should return the compiled function definition with proper types
|
759
|
+
assert compiled_function == FunctionDefinition(
|
760
|
+
name="my_function_with_string_annotations",
|
761
|
+
description="Function with string type annotations.",
|
762
|
+
parameters={
|
763
|
+
"type": "object",
|
764
|
+
"properties": {
|
765
|
+
"a": {"type": "string"},
|
766
|
+
"b": {"type": "integer"},
|
767
|
+
"c": {"type": "number"},
|
768
|
+
"d": {"type": "boolean"},
|
769
|
+
"e": {"type": "array"},
|
770
|
+
"f": {"type": "object"},
|
771
|
+
"g": {"type": "null"},
|
772
|
+
},
|
773
|
+
"required": ["a", "b", "c", "d", "e", "f", "g"],
|
774
|
+
},
|
775
|
+
)
|
@@ -31,6 +31,7 @@ from vellum.workflows.edges import Edge
|
|
31
31
|
from vellum.workflows.emitters.base import BaseWorkflowEmitter
|
32
32
|
from vellum.workflows.errors import WorkflowError, WorkflowErrorCode
|
33
33
|
from vellum.workflows.events.node import (
|
34
|
+
NodeEvent,
|
34
35
|
NodeExecutionFulfilledBody,
|
35
36
|
NodeExecutionFulfilledEvent,
|
36
37
|
NodeExecutionInitiatedBody,
|
@@ -252,6 +253,7 @@ class BaseWorkflow(Generic[InputsType, StateType], BaseExecutable, metaclass=_Ba
|
|
252
253
|
self.resolvers = resolvers or (self.resolvers if hasattr(self, "resolvers") else [])
|
253
254
|
self._store = store or Store()
|
254
255
|
self._execution_context = self._context.execution_context
|
256
|
+
self._current_runner: Optional[WorkflowRunner] = None
|
255
257
|
|
256
258
|
# Register context with all emitters
|
257
259
|
for emitter in self.emitters:
|
@@ -412,7 +414,7 @@ class BaseWorkflow(Generic[InputsType, StateType], BaseExecutable, metaclass=_Ba
|
|
412
414
|
subworkflows or nodes that utilizes threads.
|
413
415
|
"""
|
414
416
|
|
415
|
-
|
417
|
+
runner = WorkflowRunner(
|
416
418
|
self,
|
417
419
|
inputs=inputs,
|
418
420
|
state=state,
|
@@ -423,7 +425,9 @@ class BaseWorkflow(Generic[InputsType, StateType], BaseExecutable, metaclass=_Ba
|
|
423
425
|
node_output_mocks=node_output_mocks,
|
424
426
|
max_concurrency=max_concurrency,
|
425
427
|
init_execution_context=self._execution_context,
|
426
|
-
)
|
428
|
+
)
|
429
|
+
self._current_runner = runner
|
430
|
+
events = runner.stream()
|
427
431
|
first_event: Optional[Union[WorkflowExecutionInitiatedEvent, WorkflowExecutionResumedEvent]] = None
|
428
432
|
last_event = None
|
429
433
|
for event in events:
|
@@ -531,7 +535,7 @@ class BaseWorkflow(Generic[InputsType, StateType], BaseExecutable, metaclass=_Ba
|
|
531
535
|
"""
|
532
536
|
|
533
537
|
should_yield = event_filter or workflow_event_filter
|
534
|
-
|
538
|
+
runner = WorkflowRunner(
|
535
539
|
self,
|
536
540
|
inputs=inputs,
|
537
541
|
state=state,
|
@@ -542,7 +546,9 @@ class BaseWorkflow(Generic[InputsType, StateType], BaseExecutable, metaclass=_Ba
|
|
542
546
|
node_output_mocks=node_output_mocks,
|
543
547
|
max_concurrency=max_concurrency,
|
544
548
|
init_execution_context=self._execution_context,
|
545
|
-
)
|
549
|
+
)
|
550
|
+
self._current_runner = runner
|
551
|
+
runner_stream = runner.stream()
|
546
552
|
|
547
553
|
def _generate_filtered_events() -> Generator[BaseWorkflow.WorkflowEvent, None, None]:
|
548
554
|
for event in runner_stream:
|
@@ -559,6 +565,15 @@ class BaseWorkflow(Generic[InputsType, StateType], BaseExecutable, metaclass=_Ba
|
|
559
565
|
# https://app.shortcut.com/vellum/story/4327
|
560
566
|
pass
|
561
567
|
|
568
|
+
def run_node(
|
569
|
+
self, node: Type[BaseNode], *, inputs: Optional[Dict[str, Any]] = None
|
570
|
+
) -> Generator[NodeEvent, None, None]:
|
571
|
+
runner = WorkflowRunner(self)
|
572
|
+
span_id = uuid4()
|
573
|
+
node_instance = node(state=self.get_default_state(), context=self._context, inputs=inputs)
|
574
|
+
|
575
|
+
return runner.run_node(node=node_instance, span_id=span_id)
|
576
|
+
|
562
577
|
@classmethod
|
563
578
|
@lru_cache
|
564
579
|
def _get_parameterized_classes(
|
@@ -689,9 +704,12 @@ class BaseWorkflow(Generic[InputsType, StateType], BaseExecutable, metaclass=_Ba
|
|
689
704
|
|
690
705
|
def join(self) -> None:
|
691
706
|
"""
|
692
|
-
Wait for all emitters to complete their background work.
|
707
|
+
Wait for all emitters and runner to complete their background work.
|
693
708
|
This ensures all pending events are processed before the workflow terminates.
|
694
709
|
"""
|
710
|
+
if self._current_runner:
|
711
|
+
self._current_runner.join()
|
712
|
+
|
695
713
|
for emitter in self.emitters:
|
696
714
|
emitter.join()
|
697
715
|
|