vellum-workflow-server 1.4.2.post1__tar.gz → 1.5.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vellum-workflow-server might be problematic. Click here for more details.
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/PKG-INFO +2 -2
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/pyproject.toml +2 -2
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/api/tests/test_workflow_view.py +196 -1
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/api/workflow_view.py +4 -9
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/core/executor.py +89 -52
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/core/utils.py +10 -5
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/core/workflow_executor_context.py +12 -2
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/README.md +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/__init__.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/api/__init__.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/api/auth_middleware.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/api/healthz_view.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/api/tests/__init__.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/api/tests/test_input_display_mapping.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/api/tests/test_workflow_view_stream_workflow_route.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/code_exec_runner.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/config.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/core/__init__.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/core/cancel_workflow.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/core/events.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/server.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/start.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/utils/__init__.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/utils/exit_handler.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/utils/log_proxy.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/utils/oom_killer.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/utils/sentry.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/utils/system_utils.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/utils/tests/__init__.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/utils/tests/test_sentry_integration.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/utils/tests/test_system_utils.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/utils/tests/test_utils.py +0 -0
- {vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/utils/utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: vellum-workflow-server
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.5.0
|
|
4
4
|
Summary:
|
|
5
5
|
License: AGPL
|
|
6
6
|
Requires-Python: >=3.9.0,<4
|
|
@@ -29,7 +29,7 @@ Requires-Dist: pyjwt (==2.10.0)
|
|
|
29
29
|
Requires-Dist: python-dotenv (==1.0.1)
|
|
30
30
|
Requires-Dist: retrying (==1.3.4)
|
|
31
31
|
Requires-Dist: sentry-sdk[flask] (==2.20.0)
|
|
32
|
-
Requires-Dist: vellum-ai (==1.
|
|
32
|
+
Requires-Dist: vellum-ai (==1.5.0)
|
|
33
33
|
Description-Content-Type: text/markdown
|
|
34
34
|
|
|
35
35
|
# Vellum Workflow Runner Server
|
|
@@ -3,7 +3,7 @@ name = "vellum-workflow-server"
|
|
|
3
3
|
|
|
4
4
|
[tool.poetry]
|
|
5
5
|
name = "vellum-workflow-server"
|
|
6
|
-
version = "1.
|
|
6
|
+
version = "1.5.0"
|
|
7
7
|
description = ""
|
|
8
8
|
readme = "README.md"
|
|
9
9
|
authors = []
|
|
@@ -45,7 +45,7 @@ flask = "2.3.3"
|
|
|
45
45
|
orderly-set = "5.2.2"
|
|
46
46
|
pebble = "5.0.7"
|
|
47
47
|
gunicorn = "23.0.0"
|
|
48
|
-
vellum-ai = "1.
|
|
48
|
+
vellum-ai = "1.5.0"
|
|
49
49
|
python-dotenv = "1.0.1"
|
|
50
50
|
retrying = "1.3.4"
|
|
51
51
|
sentry-sdk = {extras = ["flask"], version = "2.20.0"}
|
|
@@ -1,7 +1,8 @@
|
|
|
1
|
+
import json
|
|
1
2
|
import logging
|
|
2
3
|
import re
|
|
3
4
|
from unittest.mock import patch
|
|
4
|
-
from uuid import UUID
|
|
5
|
+
from uuid import UUID, uuid4
|
|
5
6
|
|
|
6
7
|
from deepdiff import DeepDiff
|
|
7
8
|
|
|
@@ -534,3 +535,197 @@ def test_serialize_route__with_invalid_workspace_api_key():
|
|
|
534
535
|
|
|
535
536
|
# AND the response should contain the serialization result
|
|
536
537
|
assert "exec_config" in response.json
|
|
538
|
+
|
|
539
|
+
|
|
540
|
+
def test_stream_node_route__with_node_id():
|
|
541
|
+
"""
|
|
542
|
+
Tests that the stream-node endpoint works with node_id.
|
|
543
|
+
"""
|
|
544
|
+
# GIVEN a valid request body with node_id
|
|
545
|
+
node_id = uuid4()
|
|
546
|
+
span_id = uuid4()
|
|
547
|
+
request_body = {
|
|
548
|
+
"timeout": 360,
|
|
549
|
+
"execution_id": str(span_id),
|
|
550
|
+
"node_id": str(node_id),
|
|
551
|
+
"inputs": [],
|
|
552
|
+
"environment_api_key": "test",
|
|
553
|
+
"module": "workflow",
|
|
554
|
+
"files": {
|
|
555
|
+
"__init__.py": "from .display import *",
|
|
556
|
+
"workflow.py": """\
|
|
557
|
+
from vellum.workflows import BaseWorkflow
|
|
558
|
+
from .nodes.test_node import TestNode
|
|
559
|
+
|
|
560
|
+
class Workflow(BaseWorkflow):
|
|
561
|
+
graph = TestNode
|
|
562
|
+
|
|
563
|
+
class Outputs(BaseWorkflow.Outputs):
|
|
564
|
+
result = TestNode.Outputs.value
|
|
565
|
+
""",
|
|
566
|
+
"nodes/__init__.py": "from .test_node import TestNode\n__all__ = ['TestNode']",
|
|
567
|
+
"nodes/test_node.py": """\
|
|
568
|
+
from vellum.workflows.nodes import BaseNode
|
|
569
|
+
|
|
570
|
+
class TestNode(BaseNode):
|
|
571
|
+
class Outputs(BaseNode.Outputs):
|
|
572
|
+
value = "test_result"
|
|
573
|
+
""",
|
|
574
|
+
"display/__init__.py": "from .nodes import *\nfrom .workflow import *",
|
|
575
|
+
"display/workflow.py": """\
|
|
576
|
+
from vellum_ee.workflows.display.workflows import BaseWorkflowDisplay
|
|
577
|
+
""",
|
|
578
|
+
"display/nodes/__init__.py": "from .test_node import TestNodeDisplay\n__all__ = ['TestNodeDisplay']",
|
|
579
|
+
"display/nodes/test_node.py": f"""\
|
|
580
|
+
from uuid import UUID
|
|
581
|
+
from vellum_ee.workflows.display.nodes import BaseNodeDisplay
|
|
582
|
+
from ...nodes.test_node import TestNode
|
|
583
|
+
|
|
584
|
+
class TestNodeDisplay(BaseNodeDisplay[TestNode]):
|
|
585
|
+
node_id = UUID("{node_id}")
|
|
586
|
+
""",
|
|
587
|
+
},
|
|
588
|
+
}
|
|
589
|
+
|
|
590
|
+
flask_app = create_app()
|
|
591
|
+
|
|
592
|
+
# WHEN we call the stream-node route
|
|
593
|
+
with flask_app.test_client() as test_client:
|
|
594
|
+
response = test_client.post("/workflow/stream-node", json=request_body)
|
|
595
|
+
|
|
596
|
+
# THEN we get a 200 response
|
|
597
|
+
assert response.status_code == 200
|
|
598
|
+
|
|
599
|
+
events = [json.loads(line) for line in response.data.decode().strip().split("\n") if line.strip()]
|
|
600
|
+
assert len(events) == 4
|
|
601
|
+
assert events[0]["name"] == "vembda.execution.initiated"
|
|
602
|
+
assert events[1]["name"] == "node.execution.initiated"
|
|
603
|
+
assert events[2]["name"] == "node.execution.fulfilled"
|
|
604
|
+
assert events[3]["name"] == "vembda.execution.fulfilled"
|
|
605
|
+
|
|
606
|
+
|
|
607
|
+
def test_stream_node_route__with_node_module_and_name_backward_compatibility():
|
|
608
|
+
"""
|
|
609
|
+
Tests that the stream-node endpoint still works with node_module and node_name for backward compatibility.
|
|
610
|
+
"""
|
|
611
|
+
# GIVEN a valid request body with node_module and node_name (old format)
|
|
612
|
+
span_id = uuid4()
|
|
613
|
+
request_body = {
|
|
614
|
+
"timeout": 360,
|
|
615
|
+
"execution_id": str(span_id),
|
|
616
|
+
"node_module": "nodes.test_node",
|
|
617
|
+
"node_name": "TestNode",
|
|
618
|
+
"inputs": [],
|
|
619
|
+
"environment_api_key": "test",
|
|
620
|
+
"module": "workflow",
|
|
621
|
+
"files": {
|
|
622
|
+
"__init__.py": "",
|
|
623
|
+
"workflow.py": """\
|
|
624
|
+
from vellum.workflows import BaseWorkflow
|
|
625
|
+
from .nodes.test_node import TestNode
|
|
626
|
+
|
|
627
|
+
class Workflow(BaseWorkflow):
|
|
628
|
+
graph = TestNode
|
|
629
|
+
""",
|
|
630
|
+
"nodes/__init__.py": "from .test_node import TestNode",
|
|
631
|
+
"nodes/test_node.py": """\
|
|
632
|
+
from vellum.workflows.nodes import BaseNode
|
|
633
|
+
|
|
634
|
+
class TestNode(BaseNode):
|
|
635
|
+
class Outputs(BaseNode.Outputs):
|
|
636
|
+
value = "test_result"
|
|
637
|
+
""",
|
|
638
|
+
},
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
flask_app = create_app()
|
|
642
|
+
|
|
643
|
+
# WHEN we call the stream-node route
|
|
644
|
+
with flask_app.test_client() as test_client:
|
|
645
|
+
response = test_client.post("/workflow/stream-node", json=request_body)
|
|
646
|
+
|
|
647
|
+
# THEN we get a 200 response
|
|
648
|
+
assert response.status_code == 200
|
|
649
|
+
|
|
650
|
+
events = [json.loads(line) for line in response.data.decode().strip().split("\n") if line.strip()]
|
|
651
|
+
assert events[0]["name"] == "vembda.execution.initiated"
|
|
652
|
+
assert events[1]["name"] == "node.execution.initiated", json.dumps(events[1]["body"])
|
|
653
|
+
assert events[2]["name"] == "node.execution.fulfilled"
|
|
654
|
+
assert events[3]["name"] == "vembda.execution.fulfilled"
|
|
655
|
+
assert len(events) == 4
|
|
656
|
+
|
|
657
|
+
|
|
658
|
+
def test_stream_node_route__missing_node_info_validation():
|
|
659
|
+
"""
|
|
660
|
+
Tests that the stream-node endpoint returns validation error when neither
|
|
661
|
+
node_id nor node_module/node_name are provided.
|
|
662
|
+
"""
|
|
663
|
+
# GIVEN a request body missing node identification
|
|
664
|
+
span_id = uuid4()
|
|
665
|
+
request_body = {
|
|
666
|
+
"timeout": 360,
|
|
667
|
+
"execution_id": str(span_id),
|
|
668
|
+
"inputs": [],
|
|
669
|
+
"environment_api_key": "test",
|
|
670
|
+
"module": "workflow",
|
|
671
|
+
"files": {"__init__.py": "", "workflow.py": ""},
|
|
672
|
+
}
|
|
673
|
+
|
|
674
|
+
flask_app = create_app()
|
|
675
|
+
|
|
676
|
+
# WHEN we call the stream-node route
|
|
677
|
+
with flask_app.test_client() as test_client:
|
|
678
|
+
response = test_client.post("/workflow/stream-node", json=request_body)
|
|
679
|
+
|
|
680
|
+
# THEN we get a 400 response
|
|
681
|
+
assert response.status_code == 400
|
|
682
|
+
|
|
683
|
+
# AND we get a validation error message
|
|
684
|
+
assert "Either node_id or both node_module and node_name must be provided" in response.get_json()["detail"]
|
|
685
|
+
|
|
686
|
+
|
|
687
|
+
def test_stream_node_route__invalid_node_id():
|
|
688
|
+
"""
|
|
689
|
+
Tests that the stream-node endpoint returns 404 for invalid node_id.
|
|
690
|
+
"""
|
|
691
|
+
# GIVEN a request body with invalid node_id
|
|
692
|
+
invalid_node_id = uuid4()
|
|
693
|
+
span_id = uuid4()
|
|
694
|
+
request_body = {
|
|
695
|
+
"timeout": 360,
|
|
696
|
+
"execution_id": str(span_id),
|
|
697
|
+
"node_id": str(invalid_node_id),
|
|
698
|
+
"inputs": [],
|
|
699
|
+
"environment_api_key": "test",
|
|
700
|
+
"module": "workflow",
|
|
701
|
+
"files": {
|
|
702
|
+
"__init__.py": "",
|
|
703
|
+
"workflow.py": """\
|
|
704
|
+
from vellum.workflows import BaseWorkflow
|
|
705
|
+
|
|
706
|
+
class Workflow(BaseWorkflow):
|
|
707
|
+
pass
|
|
708
|
+
""",
|
|
709
|
+
},
|
|
710
|
+
}
|
|
711
|
+
|
|
712
|
+
flask_app = create_app()
|
|
713
|
+
|
|
714
|
+
# WHEN we call the stream-node route
|
|
715
|
+
with flask_app.test_client() as test_client:
|
|
716
|
+
response = test_client.post("/workflow/stream-node", json=request_body)
|
|
717
|
+
|
|
718
|
+
# THEN we get a 200 response
|
|
719
|
+
# TODO: In the future, we would want this to return a 4xx response by returning the workflow
|
|
720
|
+
# instance and the node definition that we want to run as part of request deserialization.
|
|
721
|
+
assert response.status_code == 200, response.text
|
|
722
|
+
|
|
723
|
+
# AND we get an appropriate error message
|
|
724
|
+
# TODO: In a future where we are returning 4xx responses, we assert the following data:
|
|
725
|
+
# response_data = response.get_json()
|
|
726
|
+
# assert "Node with ID" in response_data["detail"]
|
|
727
|
+
# assert "not found" in response_data["detail"]
|
|
728
|
+
events = [json.loads(line) for line in response.data.decode().strip().split("\n") if line.strip()]
|
|
729
|
+
assert events[0]["name"] == "vembda.execution.initiated"
|
|
730
|
+
assert events[1]["name"] == "vembda.execution.fulfilled"
|
|
731
|
+
assert len(events) == 2
|
|
@@ -36,7 +36,7 @@ from workflow_server.core.events import (
|
|
|
36
36
|
VembdaExecutionInitiatedBody,
|
|
37
37
|
VembdaExecutionInitiatedEvent,
|
|
38
38
|
)
|
|
39
|
-
from workflow_server.core.executor import
|
|
39
|
+
from workflow_server.core.executor import stream_node_process_timeout, stream_workflow, stream_workflow_process_timeout
|
|
40
40
|
from workflow_server.core.utils import (
|
|
41
41
|
create_vembda_rejected_event,
|
|
42
42
|
is_events_emitting_enabled,
|
|
@@ -374,10 +374,10 @@ def stream_node_route() -> Response:
|
|
|
374
374
|
parent=None,
|
|
375
375
|
)
|
|
376
376
|
|
|
377
|
-
app.logger.debug(f"Node stream
|
|
377
|
+
app.logger.debug(f"Node stream started. Trace ID: {context.trace_id}")
|
|
378
378
|
|
|
379
379
|
pebble_queue: Queue[dict] = Queue()
|
|
380
|
-
|
|
380
|
+
process = stream_node_process_timeout(
|
|
381
381
|
executor_context=context,
|
|
382
382
|
queue=pebble_queue,
|
|
383
383
|
)
|
|
@@ -388,14 +388,9 @@ def stream_node_route() -> Response:
|
|
|
388
388
|
event = pebble_queue.get(timeout=context.timeout)
|
|
389
389
|
|
|
390
390
|
except Empty:
|
|
391
|
-
if
|
|
392
|
-
# This happens when theres a problem with the stream function call
|
|
393
|
-
# itself not the workflow runner
|
|
391
|
+
if not process.is_alive():
|
|
394
392
|
yield create_vembda_rejected_event(context, "Internal Server Error")
|
|
395
|
-
app.logger.exception(stream_future.exception())
|
|
396
393
|
break
|
|
397
|
-
else:
|
|
398
|
-
continue
|
|
399
394
|
|
|
400
395
|
yield event
|
|
401
396
|
if event.get("name") == VEMBDA_EXECUTION_FULFILLED_EVENT_NAME:
|
|
@@ -15,7 +15,6 @@ from traceback import format_exc
|
|
|
15
15
|
from uuid import UUID, uuid4
|
|
16
16
|
from typing import Any, Callable, Generator, Iterator, Optional, Tuple, Type
|
|
17
17
|
|
|
18
|
-
from pebble import concurrent
|
|
19
18
|
from vellum_ee.workflows.display.utils.events import event_enricher
|
|
20
19
|
from vellum_ee.workflows.display.workflows import BaseWorkflowDisplay
|
|
21
20
|
from vellum_ee.workflows.server.virtual_file_loader import VirtualFileFinder
|
|
@@ -24,6 +23,14 @@ from vellum.workflows import BaseWorkflow
|
|
|
24
23
|
from vellum.workflows.emitters.base import BaseWorkflowEmitter
|
|
25
24
|
from vellum.workflows.emitters.vellum_emitter import VellumEmitter
|
|
26
25
|
from vellum.workflows.events.exception_handling import stream_initialization_exception
|
|
26
|
+
from vellum.workflows.events.node import (
|
|
27
|
+
NodeExecutionFulfilledBody,
|
|
28
|
+
NodeExecutionFulfilledEvent,
|
|
29
|
+
NodeExecutionInitiatedBody,
|
|
30
|
+
NodeExecutionInitiatedEvent,
|
|
31
|
+
NodeExecutionStreamingBody,
|
|
32
|
+
NodeExecutionStreamingEvent,
|
|
33
|
+
)
|
|
27
34
|
from vellum.workflows.events.types import BaseEvent
|
|
28
35
|
from vellum.workflows.events.workflow import WorkflowEventDisplayContext
|
|
29
36
|
from vellum.workflows.exceptions import WorkflowInitializationException
|
|
@@ -44,9 +51,12 @@ from workflow_server.core.events import (
|
|
|
44
51
|
VembdaExecutionFulfilledBody,
|
|
45
52
|
VembdaExecutionFulfilledEvent,
|
|
46
53
|
)
|
|
47
|
-
from workflow_server.core.utils import
|
|
54
|
+
from workflow_server.core.utils import (
|
|
55
|
+
create_vembda_rejected_event,
|
|
56
|
+
is_events_emitting_enabled,
|
|
57
|
+
serialize_vembda_rejected_event,
|
|
58
|
+
)
|
|
48
59
|
from workflow_server.core.workflow_executor_context import (
|
|
49
|
-
DEFAULT_TIMEOUT_SECONDS,
|
|
50
60
|
BaseExecutorContext,
|
|
51
61
|
NodeExecutorContext,
|
|
52
62
|
WorkflowExecutorContext,
|
|
@@ -56,38 +66,31 @@ from workflow_server.utils.log_proxy import redirect_log
|
|
|
56
66
|
logger = logging.getLogger(__name__)
|
|
57
67
|
|
|
58
68
|
|
|
59
|
-
|
|
60
|
-
# type ignore since pebble annotation changes return type
|
|
61
|
-
def stream_node_pebble_timeout(
|
|
69
|
+
def stream_node_process_timeout(
|
|
62
70
|
executor_context: NodeExecutorContext,
|
|
63
71
|
queue: Queue,
|
|
64
|
-
) ->
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
72
|
+
) -> Process:
|
|
73
|
+
node_process = Process(
|
|
74
|
+
target=_stream_node_wrapper,
|
|
75
|
+
args=(executor_context, queue),
|
|
68
76
|
)
|
|
77
|
+
node_process.start()
|
|
78
|
+
|
|
79
|
+
if node_process.exitcode is not None:
|
|
80
|
+
queue.put(create_vembda_rejected_event(executor_context, "Internal Server Error", timed_out=True))
|
|
81
|
+
|
|
82
|
+
return node_process
|
|
69
83
|
|
|
70
84
|
|
|
71
85
|
def _stream_node_wrapper(executor_context: NodeExecutorContext, queue: Queue) -> None:
|
|
72
86
|
try:
|
|
73
87
|
for event in stream_node(executor_context=executor_context):
|
|
74
88
|
queue.put(event)
|
|
89
|
+
except WorkflowInitializationException as e:
|
|
90
|
+
queue.put(create_vembda_rejected_event(executor_context, e.message))
|
|
75
91
|
except Exception as e:
|
|
76
92
|
logger.exception(e)
|
|
77
|
-
queue.put(
|
|
78
|
-
VembdaExecutionFulfilledEvent(
|
|
79
|
-
id=uuid4(),
|
|
80
|
-
timestamp=datetime.now(),
|
|
81
|
-
trace_id=executor_context.trace_id,
|
|
82
|
-
span_id=executor_context.execution_id,
|
|
83
|
-
body=VembdaExecutionFulfilledBody(
|
|
84
|
-
exit_code=-1,
|
|
85
|
-
stderr="Internal Server Error",
|
|
86
|
-
container_overhead_latency=executor_context.container_overhead_latency,
|
|
87
|
-
),
|
|
88
|
-
parent=None,
|
|
89
|
-
).model_dump(mode="json")
|
|
90
|
-
)
|
|
93
|
+
queue.put(create_vembda_rejected_event(executor_context, "Internal Server Error"))
|
|
91
94
|
|
|
92
95
|
|
|
93
96
|
def _stream_workflow_wrapper(
|
|
@@ -125,7 +128,7 @@ def _stream_workflow_wrapper(
|
|
|
125
128
|
def stream_workflow_process_timeout(
|
|
126
129
|
executor_context: WorkflowExecutorContext,
|
|
127
130
|
queue: Queue,
|
|
128
|
-
cancel_signal:
|
|
131
|
+
cancel_signal: ThreadingEvent,
|
|
129
132
|
timeout_signal: ThreadingEvent,
|
|
130
133
|
) -> Process:
|
|
131
134
|
workflow_process = Process(
|
|
@@ -140,19 +143,7 @@ def stream_workflow_process_timeout(
|
|
|
140
143
|
workflow_process.start()
|
|
141
144
|
|
|
142
145
|
if workflow_process.exitcode is not None:
|
|
143
|
-
|
|
144
|
-
id=uuid4(),
|
|
145
|
-
timestamp=datetime.now(),
|
|
146
|
-
trace_id=executor_context.trace_id,
|
|
147
|
-
span_id=executor_context.execution_id,
|
|
148
|
-
body=VembdaExecutionFulfilledBody(
|
|
149
|
-
exit_code=-1,
|
|
150
|
-
timed_out=True,
|
|
151
|
-
container_overhead_latency=executor_context.container_overhead_latency,
|
|
152
|
-
),
|
|
153
|
-
parent=None,
|
|
154
|
-
)
|
|
155
|
-
queue.put(vembda_fulfilled_event.model_dump(mode="json"))
|
|
146
|
+
queue.put(create_vembda_rejected_event(executor_context, "Internal Server Error", timed_out=True))
|
|
156
147
|
|
|
157
148
|
return workflow_process
|
|
158
149
|
|
|
@@ -276,28 +267,74 @@ def stream_node(
|
|
|
276
267
|
executor_context: NodeExecutorContext,
|
|
277
268
|
disable_redirect: bool = True,
|
|
278
269
|
) -> Iterator[dict]:
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
270
|
+
workflow, namespace = _gather_workflow(executor_context)
|
|
271
|
+
Node: Optional[Type[BaseNode]] = None
|
|
272
|
+
|
|
273
|
+
for workflow_node in workflow.get_nodes():
|
|
274
|
+
if executor_context.node_id and workflow_node.__id__ == executor_context.node_id:
|
|
275
|
+
Node = workflow_node
|
|
276
|
+
break
|
|
277
|
+
elif (
|
|
278
|
+
executor_context.node_module
|
|
279
|
+
and executor_context.node_name
|
|
280
|
+
and workflow_node.__name__ == executor_context.node_name
|
|
281
|
+
and workflow_node.__module__ == f"{namespace}.{executor_context.node_module}"
|
|
282
|
+
):
|
|
283
|
+
Node = workflow_node
|
|
284
|
+
break
|
|
285
|
+
|
|
286
|
+
if not Node:
|
|
287
|
+
identifier = executor_context.node_id or f"{executor_context.node_module}.{executor_context.node_name}"
|
|
288
|
+
raise WorkflowInitializationException(
|
|
289
|
+
message=f"Node '{identifier}' not found in workflow",
|
|
290
|
+
workflow_definition=workflow.__class__,
|
|
291
291
|
)
|
|
292
292
|
|
|
293
|
+
workflow_inputs = _get_workflow_inputs(executor_context, workflow.__class__)
|
|
294
|
+
workflow_state = _get_workflow_state(executor_context, workflow_inputs=workflow_inputs)
|
|
295
|
+
node = Node(
|
|
296
|
+
state=workflow_state,
|
|
297
|
+
context=workflow._context,
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
def call_node() -> Generator[dict[str, Any], Any, None]:
|
|
293
301
|
executor_context.stream_start_time = time.time_ns()
|
|
302
|
+
span_id = uuid4()
|
|
303
|
+
yield NodeExecutionInitiatedEvent(
|
|
304
|
+
id=uuid4(),
|
|
305
|
+
timestamp=datetime.now(),
|
|
306
|
+
trace_id=executor_context.trace_id,
|
|
307
|
+
span_id=span_id,
|
|
308
|
+
body=NodeExecutionInitiatedBody(
|
|
309
|
+
node_definition=Node,
|
|
310
|
+
inputs=node._inputs,
|
|
311
|
+
),
|
|
312
|
+
).model_dump(mode="json")
|
|
294
313
|
node_outputs = node.run()
|
|
295
314
|
|
|
296
315
|
if isinstance(node_outputs, (Iterator)):
|
|
297
316
|
for node_output in node_outputs:
|
|
298
|
-
yield
|
|
317
|
+
yield NodeExecutionStreamingEvent(
|
|
318
|
+
id=uuid4(),
|
|
319
|
+
timestamp=datetime.now(),
|
|
320
|
+
trace_id=executor_context.trace_id,
|
|
321
|
+
span_id=span_id,
|
|
322
|
+
body=NodeExecutionStreamingBody(
|
|
323
|
+
node_definition=Node,
|
|
324
|
+
output=node_output,
|
|
325
|
+
),
|
|
326
|
+
).model_dump(mode="json")
|
|
299
327
|
else:
|
|
300
|
-
yield
|
|
328
|
+
yield NodeExecutionFulfilledEvent(
|
|
329
|
+
id=uuid4(),
|
|
330
|
+
timestamp=datetime.now(),
|
|
331
|
+
trace_id=executor_context.trace_id,
|
|
332
|
+
span_id=span_id,
|
|
333
|
+
body=NodeExecutionFulfilledBody(
|
|
334
|
+
node_definition=Node,
|
|
335
|
+
outputs=node_outputs,
|
|
336
|
+
),
|
|
337
|
+
).model_dump(mode="json")
|
|
301
338
|
|
|
302
339
|
return _call_stream(
|
|
303
340
|
executor_context=executor_context,
|
|
@@ -7,7 +7,7 @@ from workflow_server.core.workflow_executor_context import BaseExecutorContext
|
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
def _create_vembda_rejected_event_base(
|
|
10
|
-
executor_context: Optional[BaseExecutorContext], error_message: str
|
|
10
|
+
executor_context: Optional[BaseExecutorContext], error_message: str, timed_out: bool
|
|
11
11
|
) -> VembdaExecutionFulfilledEvent:
|
|
12
12
|
if executor_context:
|
|
13
13
|
trace_id = executor_context.trace_id
|
|
@@ -25,6 +25,7 @@ def _create_vembda_rejected_event_base(
|
|
|
25
25
|
span_id=span_id,
|
|
26
26
|
body=VembdaExecutionFulfilledBody(
|
|
27
27
|
exit_code=-1,
|
|
28
|
+
timed_out=timed_out,
|
|
28
29
|
stderr=error_message,
|
|
29
30
|
container_overhead_latency=container_overhead_latency,
|
|
30
31
|
),
|
|
@@ -32,12 +33,16 @@ def _create_vembda_rejected_event_base(
|
|
|
32
33
|
)
|
|
33
34
|
|
|
34
35
|
|
|
35
|
-
def create_vembda_rejected_event(
|
|
36
|
-
|
|
36
|
+
def create_vembda_rejected_event(
|
|
37
|
+
executor_context: Optional[BaseExecutorContext], error_message: str, timed_out: bool = False
|
|
38
|
+
) -> dict:
|
|
39
|
+
return _create_vembda_rejected_event_base(executor_context, error_message, timed_out).model_dump(mode="json")
|
|
37
40
|
|
|
38
41
|
|
|
39
|
-
def serialize_vembda_rejected_event(
|
|
40
|
-
|
|
42
|
+
def serialize_vembda_rejected_event(
|
|
43
|
+
executor_context: Optional[BaseExecutorContext], error_message: str, timed_out: bool = False
|
|
44
|
+
) -> str:
|
|
45
|
+
return _create_vembda_rejected_event_base(executor_context, error_message, timed_out).model_dump_json()
|
|
41
46
|
|
|
42
47
|
|
|
43
48
|
def is_events_emitting_enabled(executor_context: Optional[BaseExecutorContext]) -> bool:
|
|
@@ -3,6 +3,9 @@ from functools import cached_property
|
|
|
3
3
|
import os
|
|
4
4
|
from uuid import UUID
|
|
5
5
|
from typing import Any, Optional
|
|
6
|
+
from typing_extensions import Self
|
|
7
|
+
|
|
8
|
+
from pydantic import model_validator
|
|
6
9
|
|
|
7
10
|
from vellum import ApiVersionEnum, Vellum, VellumEnvironment
|
|
8
11
|
from vellum.client.core import UniversalBaseModel
|
|
@@ -94,5 +97,12 @@ class WorkflowExecutorContext(BaseExecutorContext):
|
|
|
94
97
|
|
|
95
98
|
|
|
96
99
|
class NodeExecutorContext(BaseExecutorContext):
|
|
97
|
-
|
|
98
|
-
|
|
100
|
+
node_id: Optional[UUID] = None
|
|
101
|
+
node_module: Optional[str] = None
|
|
102
|
+
node_name: Optional[str] = None
|
|
103
|
+
|
|
104
|
+
@model_validator(mode="after")
|
|
105
|
+
def validate_node_identification(self) -> Self:
|
|
106
|
+
if not self.node_id and not (self.node_module and self.node_name):
|
|
107
|
+
raise ValueError("Either node_id or both node_module and node_name must be provided")
|
|
108
|
+
return self
|
|
File without changes
|
{vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/config.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/server.py
RENAMED
|
File without changes
|
{vellum_workflow_server-1.4.2.post1 → vellum_workflow_server-1.5.0}/src/workflow_server/start.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|