vellum-workflow-server 1.4.2__py3-none-any.whl → 1.4.2.post2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-workflow-server
3
- Version: 1.4.2
3
+ Version: 1.4.2.post2
4
4
  Summary:
5
5
  License: AGPL
6
6
  Requires-Python: >=3.9.0,<4
@@ -4,17 +4,17 @@ workflow_server/api/auth_middleware.py,sha256=IlZaCiwZ5nwQqk5sYQorvOFj7lt0p1ZSSE
4
4
  workflow_server/api/healthz_view.py,sha256=itiRvBDBXncrw8Kbbc73UZLwqMAhgHOR3uSre_dAfgY,404
5
5
  workflow_server/api/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  workflow_server/api/tests/test_input_display_mapping.py,sha256=drBZqMudFyB5wgiUOcMgRXz7E7ge-Qgxbstw4E4f0zE,2211
7
- workflow_server/api/tests/test_workflow_view.py,sha256=RlAw1tHeIlnOXGrFQN-w3EOLPZkhp6Dfy6d1r7kU5oc,22573
7
+ workflow_server/api/tests/test_workflow_view.py,sha256=d46UNMZUJbIKLiTJkjIsVpgqCJMwCe4LL6RjFkfx_Y4,29178
8
8
  workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=FaEIgGsbq8M7ZF2opVjBdGCYeRPF_vqsUhFTruLInxA,31358
9
- workflow_server/api/workflow_view.py,sha256=UZTxWImM9kmof819SCa3ljJKiYEsCrBFD8vp2_f-zAg,21719
9
+ workflow_server/api/workflow_view.py,sha256=qayjwq18DSbmuMa6ROksc68v4XLcQOr7hAH3yRV5mlQ,21443
10
10
  workflow_server/code_exec_runner.py,sha256=E-HsjAL53L-znSMPg7lDiQNzyCjL6W076ZoWWbrSRrU,2217
11
11
  workflow_server/config.py,sha256=qmmTr6ty3ZN5LDOFs3TfUxYshYe6Mmn_LanplHHeE9Q,1796
12
12
  workflow_server/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  workflow_server/core/cancel_workflow.py,sha256=Ffkc3mzmrdMEUcD-sHfEhX4IwVrka-E--SxKA1dUfIU,2185
14
14
  workflow_server/core/events.py,sha256=24MA66DVQuaLJJcZrS8IL1Zq4Ohi9CoouKZ5VgoH3Cs,1402
15
- workflow_server/core/executor.py,sha256=5cji5KQSukLrABmihJp9cgKjXS145TocOz2cOcnof04,18962
16
- workflow_server/core/utils.py,sha256=cmwHbKCfXqtUutBD3akGus0Ga7a1xG3zlOw-jEMx6mI,1795
17
- workflow_server/core/workflow_executor_context.py,sha256=VafZg74t_GQ3_2DEWVroy38rSy_spcAw4c3NrOOWOKY,3198
15
+ workflow_server/core/executor.py,sha256=zr5iruCsAMI5yHoW_yWzAhCo1U4Si9Kr9tjOmBKQf4A,20102
16
+ workflow_server/core/utils.py,sha256=si0NB4Suurc-mn8NYdn59xM9CkPrfOP1aWEVrZvifDI,1929
17
+ workflow_server/core/workflow_executor_context.py,sha256=w0nFhtu54GX36ZzmaqLTD3-ssPtoFd9QX02dPgeIbBM,3615
18
18
  workflow_server/server.py,sha256=QBU12AaAfAgLqfCDBd24qIJl_mbheiq0-hfcWV7rZM4,1234
19
19
  workflow_server/start.py,sha256=pkwRcms6I4tkVHP06LdrZY6rG_DFHfBx4ioY5X91W5k,2264
20
20
  workflow_server/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -28,7 +28,7 @@ workflow_server/utils/tests/test_sentry_integration.py,sha256=LGmWiaLhFrx-jslrRj
28
28
  workflow_server/utils/tests/test_system_utils.py,sha256=_4GwXvVvU5BrATxUEWwQIPg0bzQXMWBtiBmjP8MTxJM,4314
29
29
  workflow_server/utils/tests/test_utils.py,sha256=0Nq6du8o-iBtTrip9_wgHES53JSiJbVdSXaBnPobw3s,6930
30
30
  workflow_server/utils/utils.py,sha256=ZPoM1Suhid22dpB8oEFLux8wx-9iyzmSfWuYxSCrgWk,4774
31
- vellum_workflow_server-1.4.2.dist-info/METADATA,sha256=ZevXg46YiyUvi73O9rmNa3tYSRfKL1JPRM5wKH-ypxs,2267
32
- vellum_workflow_server-1.4.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
33
- vellum_workflow_server-1.4.2.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
34
- vellum_workflow_server-1.4.2.dist-info/RECORD,,
31
+ vellum_workflow_server-1.4.2.post2.dist-info/METADATA,sha256=mfiAFH3GV0WI9qDRFRCIwIHE3e8DcxZfbhvOIlJWpSM,2273
32
+ vellum_workflow_server-1.4.2.post2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
33
+ vellum_workflow_server-1.4.2.post2.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
34
+ vellum_workflow_server-1.4.2.post2.dist-info/RECORD,,
@@ -1,7 +1,8 @@
1
+ import json
1
2
  import logging
2
3
  import re
3
4
  from unittest.mock import patch
4
- from uuid import UUID
5
+ from uuid import UUID, uuid4
5
6
 
6
7
  from deepdiff import DeepDiff
7
8
 
@@ -534,3 +535,197 @@ def test_serialize_route__with_invalid_workspace_api_key():
534
535
 
535
536
  # AND the response should contain the serialization result
536
537
  assert "exec_config" in response.json
538
+
539
+
540
+ def test_stream_node_route__with_node_id():
541
+ """
542
+ Tests that the stream-node endpoint works with node_id.
543
+ """
544
+ # GIVEN a valid request body with node_id
545
+ node_id = uuid4()
546
+ span_id = uuid4()
547
+ request_body = {
548
+ "timeout": 360,
549
+ "execution_id": str(span_id),
550
+ "node_id": str(node_id),
551
+ "inputs": [],
552
+ "environment_api_key": "test",
553
+ "module": "workflow",
554
+ "files": {
555
+ "__init__.py": "from .display import *",
556
+ "workflow.py": """\
557
+ from vellum.workflows import BaseWorkflow
558
+ from .nodes.test_node import TestNode
559
+
560
+ class Workflow(BaseWorkflow):
561
+ graph = TestNode
562
+
563
+ class Outputs(BaseWorkflow.Outputs):
564
+ result = TestNode.Outputs.value
565
+ """,
566
+ "nodes/__init__.py": "from .test_node import TestNode\n__all__ = ['TestNode']",
567
+ "nodes/test_node.py": """\
568
+ from vellum.workflows.nodes import BaseNode
569
+
570
+ class TestNode(BaseNode):
571
+ class Outputs(BaseNode.Outputs):
572
+ value = "test_result"
573
+ """,
574
+ "display/__init__.py": "from .nodes import *\nfrom .workflow import *",
575
+ "display/workflow.py": """\
576
+ from vellum_ee.workflows.display.workflows import BaseWorkflowDisplay
577
+ """,
578
+ "display/nodes/__init__.py": "from .test_node import TestNodeDisplay\n__all__ = ['TestNodeDisplay']",
579
+ "display/nodes/test_node.py": f"""\
580
+ from uuid import UUID
581
+ from vellum_ee.workflows.display.nodes import BaseNodeDisplay
582
+ from ...nodes.test_node import TestNode
583
+
584
+ class TestNodeDisplay(BaseNodeDisplay[TestNode]):
585
+ node_id = UUID("{node_id}")
586
+ """,
587
+ },
588
+ }
589
+
590
+ flask_app = create_app()
591
+
592
+ # WHEN we call the stream-node route
593
+ with flask_app.test_client() as test_client:
594
+ response = test_client.post("/workflow/stream-node", json=request_body)
595
+
596
+ # THEN we get a 200 response
597
+ assert response.status_code == 200
598
+
599
+ events = [json.loads(line) for line in response.data.decode().strip().split("\n") if line.strip()]
600
+ assert len(events) == 4
601
+ assert events[0]["name"] == "vembda.execution.initiated"
602
+ assert events[1]["name"] == "node.execution.initiated"
603
+ assert events[2]["name"] == "node.execution.fulfilled"
604
+ assert events[3]["name"] == "vembda.execution.fulfilled"
605
+
606
+
607
+ def test_stream_node_route__with_node_module_and_name_backward_compatibility():
608
+ """
609
+ Tests that the stream-node endpoint still works with node_module and node_name for backward compatibility.
610
+ """
611
+ # GIVEN a valid request body with node_module and node_name (old format)
612
+ span_id = uuid4()
613
+ request_body = {
614
+ "timeout": 360,
615
+ "execution_id": str(span_id),
616
+ "node_module": "nodes.test_node",
617
+ "node_name": "TestNode",
618
+ "inputs": [],
619
+ "environment_api_key": "test",
620
+ "module": "workflow",
621
+ "files": {
622
+ "__init__.py": "",
623
+ "workflow.py": """\
624
+ from vellum.workflows import BaseWorkflow
625
+ from .nodes.test_node import TestNode
626
+
627
+ class Workflow(BaseWorkflow):
628
+ graph = TestNode
629
+ """,
630
+ "nodes/__init__.py": "from .test_node import TestNode",
631
+ "nodes/test_node.py": """\
632
+ from vellum.workflows.nodes import BaseNode
633
+
634
+ class TestNode(BaseNode):
635
+ class Outputs(BaseNode.Outputs):
636
+ value = "test_result"
637
+ """,
638
+ },
639
+ }
640
+
641
+ flask_app = create_app()
642
+
643
+ # WHEN we call the stream-node route
644
+ with flask_app.test_client() as test_client:
645
+ response = test_client.post("/workflow/stream-node", json=request_body)
646
+
647
+ # THEN we get a 200 response
648
+ assert response.status_code == 200
649
+
650
+ events = [json.loads(line) for line in response.data.decode().strip().split("\n") if line.strip()]
651
+ assert events[0]["name"] == "vembda.execution.initiated"
652
+ assert events[1]["name"] == "node.execution.initiated", json.dumps(events[1]["body"])
653
+ assert events[2]["name"] == "node.execution.fulfilled"
654
+ assert events[3]["name"] == "vembda.execution.fulfilled"
655
+ assert len(events) == 4
656
+
657
+
658
+ def test_stream_node_route__missing_node_info_validation():
659
+ """
660
+ Tests that the stream-node endpoint returns validation error when neither
661
+ node_id nor node_module/node_name are provided.
662
+ """
663
+ # GIVEN a request body missing node identification
664
+ span_id = uuid4()
665
+ request_body = {
666
+ "timeout": 360,
667
+ "execution_id": str(span_id),
668
+ "inputs": [],
669
+ "environment_api_key": "test",
670
+ "module": "workflow",
671
+ "files": {"__init__.py": "", "workflow.py": ""},
672
+ }
673
+
674
+ flask_app = create_app()
675
+
676
+ # WHEN we call the stream-node route
677
+ with flask_app.test_client() as test_client:
678
+ response = test_client.post("/workflow/stream-node", json=request_body)
679
+
680
+ # THEN we get a 400 response
681
+ assert response.status_code == 400
682
+
683
+ # AND we get a validation error message
684
+ assert "Either node_id or both node_module and node_name must be provided" in response.get_json()["detail"]
685
+
686
+
687
+ def test_stream_node_route__invalid_node_id():
688
+ """
689
+ Tests that the stream-node endpoint returns 404 for invalid node_id.
690
+ """
691
+ # GIVEN a request body with invalid node_id
692
+ invalid_node_id = uuid4()
693
+ span_id = uuid4()
694
+ request_body = {
695
+ "timeout": 360,
696
+ "execution_id": str(span_id),
697
+ "node_id": str(invalid_node_id),
698
+ "inputs": [],
699
+ "environment_api_key": "test",
700
+ "module": "workflow",
701
+ "files": {
702
+ "__init__.py": "",
703
+ "workflow.py": """\
704
+ from vellum.workflows import BaseWorkflow
705
+
706
+ class Workflow(BaseWorkflow):
707
+ pass
708
+ """,
709
+ },
710
+ }
711
+
712
+ flask_app = create_app()
713
+
714
+ # WHEN we call the stream-node route
715
+ with flask_app.test_client() as test_client:
716
+ response = test_client.post("/workflow/stream-node", json=request_body)
717
+
718
+ # THEN we get a 200 response
719
+ # TODO: In the future, we would want this to return a 4xx response by returning the workflow
720
+ # instance and the node definition that we want to run as part of request deserialization.
721
+ assert response.status_code == 200, response.text
722
+
723
+ # AND we get an appropriate error message
724
+ # TODO: In a future where we are returning 4xx responses, we assert the following data:
725
+ # response_data = response.get_json()
726
+ # assert "Node with ID" in response_data["detail"]
727
+ # assert "not found" in response_data["detail"]
728
+ events = [json.loads(line) for line in response.data.decode().strip().split("\n") if line.strip()]
729
+ assert events[0]["name"] == "vembda.execution.initiated"
730
+ assert events[1]["name"] == "vembda.execution.fulfilled"
731
+ assert len(events) == 2
@@ -36,7 +36,7 @@ from workflow_server.core.events import (
36
36
  VembdaExecutionInitiatedBody,
37
37
  VembdaExecutionInitiatedEvent,
38
38
  )
39
- from workflow_server.core.executor import stream_node_pebble_timeout, stream_workflow, stream_workflow_process_timeout
39
+ from workflow_server.core.executor import stream_node_process_timeout, stream_workflow, stream_workflow_process_timeout
40
40
  from workflow_server.core.utils import (
41
41
  create_vembda_rejected_event,
42
42
  is_events_emitting_enabled,
@@ -374,10 +374,10 @@ def stream_node_route() -> Response:
374
374
  parent=None,
375
375
  )
376
376
 
377
- app.logger.debug(f"Node stream received {data.get('execution_id')}")
377
+ app.logger.debug(f"Node stream started. Trace ID: {context.trace_id}")
378
378
 
379
379
  pebble_queue: Queue[dict] = Queue()
380
- stream_future = stream_node_pebble_timeout(
380
+ process = stream_node_process_timeout(
381
381
  executor_context=context,
382
382
  queue=pebble_queue,
383
383
  )
@@ -388,14 +388,9 @@ def stream_node_route() -> Response:
388
388
  event = pebble_queue.get(timeout=context.timeout)
389
389
 
390
390
  except Empty:
391
- if stream_future.exception() is not None:
392
- # This happens when theres a problem with the stream function call
393
- # itself not the workflow runner
391
+ if not process.is_alive():
394
392
  yield create_vembda_rejected_event(context, "Internal Server Error")
395
- app.logger.exception(stream_future.exception())
396
393
  break
397
- else:
398
- continue
399
394
 
400
395
  yield event
401
396
  if event.get("name") == VEMBDA_EXECUTION_FULFILLED_EVENT_NAME:
@@ -15,7 +15,6 @@ from traceback import format_exc
15
15
  from uuid import UUID, uuid4
16
16
  from typing import Any, Callable, Generator, Iterator, Optional, Tuple, Type
17
17
 
18
- from pebble import concurrent
19
18
  from vellum_ee.workflows.display.utils.events import event_enricher
20
19
  from vellum_ee.workflows.display.workflows import BaseWorkflowDisplay
21
20
  from vellum_ee.workflows.server.virtual_file_loader import VirtualFileFinder
@@ -24,6 +23,14 @@ from vellum.workflows import BaseWorkflow
24
23
  from vellum.workflows.emitters.base import BaseWorkflowEmitter
25
24
  from vellum.workflows.emitters.vellum_emitter import VellumEmitter
26
25
  from vellum.workflows.events.exception_handling import stream_initialization_exception
26
+ from vellum.workflows.events.node import (
27
+ NodeExecutionFulfilledBody,
28
+ NodeExecutionFulfilledEvent,
29
+ NodeExecutionInitiatedBody,
30
+ NodeExecutionInitiatedEvent,
31
+ NodeExecutionStreamingBody,
32
+ NodeExecutionStreamingEvent,
33
+ )
27
34
  from vellum.workflows.events.types import BaseEvent
28
35
  from vellum.workflows.events.workflow import WorkflowEventDisplayContext
29
36
  from vellum.workflows.exceptions import WorkflowInitializationException
@@ -44,9 +51,12 @@ from workflow_server.core.events import (
44
51
  VembdaExecutionFulfilledBody,
45
52
  VembdaExecutionFulfilledEvent,
46
53
  )
47
- from workflow_server.core.utils import is_events_emitting_enabled, serialize_vembda_rejected_event
54
+ from workflow_server.core.utils import (
55
+ create_vembda_rejected_event,
56
+ is_events_emitting_enabled,
57
+ serialize_vembda_rejected_event,
58
+ )
48
59
  from workflow_server.core.workflow_executor_context import (
49
- DEFAULT_TIMEOUT_SECONDS,
50
60
  BaseExecutorContext,
51
61
  NodeExecutorContext,
52
62
  WorkflowExecutorContext,
@@ -56,38 +66,31 @@ from workflow_server.utils.log_proxy import redirect_log
56
66
  logger = logging.getLogger(__name__)
57
67
 
58
68
 
59
- @concurrent.process(timeout=DEFAULT_TIMEOUT_SECONDS)
60
- # type ignore since pebble annotation changes return type
61
- def stream_node_pebble_timeout(
69
+ def stream_node_process_timeout(
62
70
  executor_context: NodeExecutorContext,
63
71
  queue: Queue,
64
- ) -> None:
65
- _stream_node_wrapper(
66
- executor_context=executor_context,
67
- queue=queue,
72
+ ) -> Process:
73
+ node_process = Process(
74
+ target=_stream_node_wrapper,
75
+ args=(executor_context, queue),
68
76
  )
77
+ node_process.start()
78
+
79
+ if node_process.exitcode is not None:
80
+ queue.put(create_vembda_rejected_event(executor_context, "Internal Server Error", timed_out=True))
81
+
82
+ return node_process
69
83
 
70
84
 
71
85
  def _stream_node_wrapper(executor_context: NodeExecutorContext, queue: Queue) -> None:
72
86
  try:
73
87
  for event in stream_node(executor_context=executor_context):
74
88
  queue.put(event)
89
+ except WorkflowInitializationException as e:
90
+ queue.put(create_vembda_rejected_event(executor_context, e.message))
75
91
  except Exception as e:
76
92
  logger.exception(e)
77
- queue.put(
78
- VembdaExecutionFulfilledEvent(
79
- id=uuid4(),
80
- timestamp=datetime.now(),
81
- trace_id=executor_context.trace_id,
82
- span_id=executor_context.execution_id,
83
- body=VembdaExecutionFulfilledBody(
84
- exit_code=-1,
85
- stderr="Internal Server Error",
86
- container_overhead_latency=executor_context.container_overhead_latency,
87
- ),
88
- parent=None,
89
- ).model_dump(mode="json")
90
- )
93
+ queue.put(create_vembda_rejected_event(executor_context, "Internal Server Error"))
91
94
 
92
95
 
93
96
  def _stream_workflow_wrapper(
@@ -125,7 +128,7 @@ def _stream_workflow_wrapper(
125
128
  def stream_workflow_process_timeout(
126
129
  executor_context: WorkflowExecutorContext,
127
130
  queue: Queue,
128
- cancel_signal: Optional[ThreadingEvent],
131
+ cancel_signal: ThreadingEvent,
129
132
  timeout_signal: ThreadingEvent,
130
133
  ) -> Process:
131
134
  workflow_process = Process(
@@ -140,19 +143,7 @@ def stream_workflow_process_timeout(
140
143
  workflow_process.start()
141
144
 
142
145
  if workflow_process.exitcode is not None:
143
- vembda_fulfilled_event = VembdaExecutionFulfilledEvent(
144
- id=uuid4(),
145
- timestamp=datetime.now(),
146
- trace_id=executor_context.trace_id,
147
- span_id=executor_context.execution_id,
148
- body=VembdaExecutionFulfilledBody(
149
- exit_code=-1,
150
- timed_out=True,
151
- container_overhead_latency=executor_context.container_overhead_latency,
152
- ),
153
- parent=None,
154
- )
155
- queue.put(vembda_fulfilled_event.model_dump(mode="json"))
146
+ queue.put(create_vembda_rejected_event(executor_context, "Internal Server Error", timed_out=True))
156
147
 
157
148
  return workflow_process
158
149
 
@@ -259,9 +250,7 @@ def stream_workflow(
259
250
  if emitter_thread:
260
251
  emitter_thread.join()
261
252
 
262
- timer_thread = next((t for t in threading.enumerate() if t.name.startswith("Thread-")), None)
263
- if timer_thread:
264
- timer_thread.join()
253
+ workflow.join()
265
254
 
266
255
  return (
267
256
  _call_stream(
@@ -278,28 +267,74 @@ def stream_node(
278
267
  executor_context: NodeExecutorContext,
279
268
  disable_redirect: bool = True,
280
269
  ) -> Iterator[dict]:
281
- def call_node() -> Generator[dict[str, Any], Any, None]:
282
- workflow, namespace = _gather_workflow(executor_context)
283
- node_module = importlib.import_module(f"{namespace}.{executor_context.node_module}")
284
-
285
- Node = getattr(node_module, executor_context.node_name)
286
-
287
- workflow_inputs = _get_workflow_inputs(executor_context, workflow.__class__)
288
- workflow_state = _get_workflow_state(executor_context, workflow_inputs=workflow_inputs)
289
-
290
- node = Node(
291
- state=workflow_state,
292
- context=workflow._context,
270
+ workflow, namespace = _gather_workflow(executor_context)
271
+ Node: Optional[Type[BaseNode]] = None
272
+
273
+ for workflow_node in workflow.get_nodes():
274
+ if executor_context.node_id and workflow_node.__id__ == executor_context.node_id:
275
+ Node = workflow_node
276
+ break
277
+ elif (
278
+ executor_context.node_module
279
+ and executor_context.node_name
280
+ and workflow_node.__name__ == executor_context.node_name
281
+ and workflow_node.__module__ == f"{namespace}.{executor_context.node_module}"
282
+ ):
283
+ Node = workflow_node
284
+ break
285
+
286
+ if not Node:
287
+ identifier = executor_context.node_id or f"{executor_context.node_module}.{executor_context.node_name}"
288
+ raise WorkflowInitializationException(
289
+ message=f"Node '{identifier}' not found in workflow",
290
+ workflow_definition=workflow.__class__,
293
291
  )
294
292
 
293
+ workflow_inputs = _get_workflow_inputs(executor_context, workflow.__class__)
294
+ workflow_state = _get_workflow_state(executor_context, workflow_inputs=workflow_inputs)
295
+ node = Node(
296
+ state=workflow_state,
297
+ context=workflow._context,
298
+ )
299
+
300
+ def call_node() -> Generator[dict[str, Any], Any, None]:
295
301
  executor_context.stream_start_time = time.time_ns()
302
+ span_id = uuid4()
303
+ yield NodeExecutionInitiatedEvent(
304
+ id=uuid4(),
305
+ timestamp=datetime.now(),
306
+ trace_id=executor_context.trace_id,
307
+ span_id=span_id,
308
+ body=NodeExecutionInitiatedBody(
309
+ node_definition=Node,
310
+ inputs=node._inputs,
311
+ ),
312
+ ).model_dump(mode="json")
296
313
  node_outputs = node.run()
297
314
 
298
315
  if isinstance(node_outputs, (Iterator)):
299
316
  for node_output in node_outputs:
300
- yield json.loads(json.dumps(node_output, default=vars))
317
+ yield NodeExecutionStreamingEvent(
318
+ id=uuid4(),
319
+ timestamp=datetime.now(),
320
+ trace_id=executor_context.trace_id,
321
+ span_id=span_id,
322
+ body=NodeExecutionStreamingBody(
323
+ node_definition=Node,
324
+ output=node_output,
325
+ ),
326
+ ).model_dump(mode="json")
301
327
  else:
302
- yield json.loads(json.dumps(node_outputs, default=vars))
328
+ yield NodeExecutionFulfilledEvent(
329
+ id=uuid4(),
330
+ timestamp=datetime.now(),
331
+ trace_id=executor_context.trace_id,
332
+ span_id=span_id,
333
+ body=NodeExecutionFulfilledBody(
334
+ node_definition=Node,
335
+ outputs=node_outputs,
336
+ ),
337
+ ).model_dump(mode="json")
303
338
 
304
339
  return _call_stream(
305
340
  executor_context=executor_context,
@@ -7,7 +7,7 @@ from workflow_server.core.workflow_executor_context import BaseExecutorContext
7
7
 
8
8
 
9
9
  def _create_vembda_rejected_event_base(
10
- executor_context: Optional[BaseExecutorContext], error_message: str
10
+ executor_context: Optional[BaseExecutorContext], error_message: str, timed_out: bool
11
11
  ) -> VembdaExecutionFulfilledEvent:
12
12
  if executor_context:
13
13
  trace_id = executor_context.trace_id
@@ -25,6 +25,7 @@ def _create_vembda_rejected_event_base(
25
25
  span_id=span_id,
26
26
  body=VembdaExecutionFulfilledBody(
27
27
  exit_code=-1,
28
+ timed_out=timed_out,
28
29
  stderr=error_message,
29
30
  container_overhead_latency=container_overhead_latency,
30
31
  ),
@@ -32,12 +33,16 @@ def _create_vembda_rejected_event_base(
32
33
  )
33
34
 
34
35
 
35
- def create_vembda_rejected_event(executor_context: Optional[BaseExecutorContext], error_message: str) -> dict:
36
- return _create_vembda_rejected_event_base(executor_context, error_message).model_dump(mode="json")
36
+ def create_vembda_rejected_event(
37
+ executor_context: Optional[BaseExecutorContext], error_message: str, timed_out: bool = False
38
+ ) -> dict:
39
+ return _create_vembda_rejected_event_base(executor_context, error_message, timed_out).model_dump(mode="json")
37
40
 
38
41
 
39
- def serialize_vembda_rejected_event(executor_context: Optional[BaseExecutorContext], error_message: str) -> str:
40
- return _create_vembda_rejected_event_base(executor_context, error_message).model_dump_json()
42
+ def serialize_vembda_rejected_event(
43
+ executor_context: Optional[BaseExecutorContext], error_message: str, timed_out: bool = False
44
+ ) -> str:
45
+ return _create_vembda_rejected_event_base(executor_context, error_message, timed_out).model_dump_json()
41
46
 
42
47
 
43
48
  def is_events_emitting_enabled(executor_context: Optional[BaseExecutorContext]) -> bool:
@@ -1,9 +1,11 @@
1
1
  from dataclasses import field
2
+ from functools import cached_property
2
3
  import os
3
4
  from uuid import UUID
4
5
  from typing import Any, Optional
6
+ from typing_extensions import Self
5
7
 
6
- from _pytest.compat import cached_property
8
+ from pydantic import model_validator
7
9
 
8
10
  from vellum import ApiVersionEnum, Vellum, VellumEnvironment
9
11
  from vellum.client.core import UniversalBaseModel
@@ -95,5 +97,12 @@ class WorkflowExecutorContext(BaseExecutorContext):
95
97
 
96
98
 
97
99
  class NodeExecutorContext(BaseExecutorContext):
98
- node_module: str
99
- node_name: str
100
+ node_id: Optional[UUID] = None
101
+ node_module: Optional[str] = None
102
+ node_name: Optional[str] = None
103
+
104
+ @model_validator(mode="after")
105
+ def validate_node_identification(self) -> Self:
106
+ if not self.node_id and not (self.node_module and self.node_name):
107
+ raise ValueError("Either node_id or both node_module and node_name must be provided")
108
+ return self