vellum-workflow-server 1.6.2__py3-none-any.whl → 1.6.2.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vellum-workflow-server might be problematic. Click here for more details.
- {vellum_workflow_server-1.6.2.dist-info → vellum_workflow_server-1.6.2.post1.dist-info}/METADATA +1 -1
- {vellum_workflow_server-1.6.2.dist-info → vellum_workflow_server-1.6.2.post1.dist-info}/RECORD +7 -7
- workflow_server/api/workflow_view.py +4 -34
- workflow_server/code_exec_runner.py +1 -2
- workflow_server/core/workflow_executor_context.py +26 -3
- {vellum_workflow_server-1.6.2.dist-info → vellum_workflow_server-1.6.2.post1.dist-info}/WHEEL +0 -0
- {vellum_workflow_server-1.6.2.dist-info → vellum_workflow_server-1.6.2.post1.dist-info}/entry_points.txt +0 -0
{vellum_workflow_server-1.6.2.dist-info → vellum_workflow_server-1.6.2.post1.dist-info}/RECORD
RENAMED
|
@@ -6,15 +6,15 @@ workflow_server/api/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
|
|
|
6
6
|
workflow_server/api/tests/test_input_display_mapping.py,sha256=drBZqMudFyB5wgiUOcMgRXz7E7ge-Qgxbstw4E4f0zE,2211
|
|
7
7
|
workflow_server/api/tests/test_workflow_view.py,sha256=d46UNMZUJbIKLiTJkjIsVpgqCJMwCe4LL6RjFkfx_Y4,29178
|
|
8
8
|
workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=FaEIgGsbq8M7ZF2opVjBdGCYeRPF_vqsUhFTruLInxA,31358
|
|
9
|
-
workflow_server/api/workflow_view.py,sha256=
|
|
10
|
-
workflow_server/code_exec_runner.py,sha256=
|
|
9
|
+
workflow_server/api/workflow_view.py,sha256=pqqgeQMVU3YjozjP26Q_f9qSsL_pjkpOQctsq_xwHfE,20447
|
|
10
|
+
workflow_server/code_exec_runner.py,sha256=5uTjHJ231aX6vMwPKRvLcQdbsIxhjTsGMk8G0MEnasU,2152
|
|
11
11
|
workflow_server/config.py,sha256=qmmTr6ty3ZN5LDOFs3TfUxYshYe6Mmn_LanplHHeE9Q,1796
|
|
12
12
|
workflow_server/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
13
|
workflow_server/core/cancel_workflow.py,sha256=Ffkc3mzmrdMEUcD-sHfEhX4IwVrka-E--SxKA1dUfIU,2185
|
|
14
14
|
workflow_server/core/events.py,sha256=24MA66DVQuaLJJcZrS8IL1Zq4Ohi9CoouKZ5VgoH3Cs,1402
|
|
15
15
|
workflow_server/core/executor.py,sha256=Bw6luelJs84-_fBIG9VvxYFxeha4DmwxDpslVeqr2I8,18411
|
|
16
16
|
workflow_server/core/utils.py,sha256=si0NB4Suurc-mn8NYdn59xM9CkPrfOP1aWEVrZvifDI,1929
|
|
17
|
-
workflow_server/core/workflow_executor_context.py,sha256=
|
|
17
|
+
workflow_server/core/workflow_executor_context.py,sha256=3HCcT8Xp3iZmgfBjQ4mGurETa9p2D9w4o8GJW61i6xc,4476
|
|
18
18
|
workflow_server/server.py,sha256=QBU12AaAfAgLqfCDBd24qIJl_mbheiq0-hfcWV7rZM4,1234
|
|
19
19
|
workflow_server/start.py,sha256=pkwRcms6I4tkVHP06LdrZY6rG_DFHfBx4ioY5X91W5k,2264
|
|
20
20
|
workflow_server/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -28,7 +28,7 @@ workflow_server/utils/tests/test_sentry_integration.py,sha256=LGmWiaLhFrx-jslrRj
|
|
|
28
28
|
workflow_server/utils/tests/test_system_utils.py,sha256=_4GwXvVvU5BrATxUEWwQIPg0bzQXMWBtiBmjP8MTxJM,4314
|
|
29
29
|
workflow_server/utils/tests/test_utils.py,sha256=0Nq6du8o-iBtTrip9_wgHES53JSiJbVdSXaBnPobw3s,6930
|
|
30
30
|
workflow_server/utils/utils.py,sha256=ZPoM1Suhid22dpB8oEFLux8wx-9iyzmSfWuYxSCrgWk,4774
|
|
31
|
-
vellum_workflow_server-1.6.2.dist-info/METADATA,sha256=
|
|
32
|
-
vellum_workflow_server-1.6.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
33
|
-
vellum_workflow_server-1.6.2.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
|
|
34
|
-
vellum_workflow_server-1.6.2.dist-info/RECORD,,
|
|
31
|
+
vellum_workflow_server-1.6.2.post1.dist-info/METADATA,sha256=fqu5YV3jRuxGWRFdnak8sx2VcQmjmYBd-w0zCk1ZcNY,2273
|
|
32
|
+
vellum_workflow_server-1.6.2.post1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
33
|
+
vellum_workflow_server-1.6.2.post1.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
|
|
34
|
+
vellum_workflow_server-1.6.2.post1.dist-info/RECORD,,
|
|
@@ -16,7 +16,7 @@ import traceback
|
|
|
16
16
|
from uuid import uuid4
|
|
17
17
|
from typing import Any, Dict, Generator, Iterator, Optional, Union, cast
|
|
18
18
|
|
|
19
|
-
from flask import Blueprint, Response, current_app as app,
|
|
19
|
+
from flask import Blueprint, Response, current_app as app, request, stream_with_context
|
|
20
20
|
from pydantic import ValidationError
|
|
21
21
|
from vellum_ee.workflows.display.nodes.get_node_display_class import get_node_display_class
|
|
22
22
|
from vellum_ee.workflows.display.types import WorkflowDisplayContext
|
|
@@ -56,7 +56,7 @@ from workflow_server.utils.system_utils import (
|
|
|
56
56
|
remove_active_span_id,
|
|
57
57
|
wait_for_available_process,
|
|
58
58
|
)
|
|
59
|
-
from workflow_server.utils.utils import
|
|
59
|
+
from workflow_server.utils.utils import get_version
|
|
60
60
|
|
|
61
61
|
bp = Blueprint("exec", __name__)
|
|
62
62
|
logger = logging.getLogger(__name__)
|
|
@@ -72,7 +72,7 @@ def stream_workflow_route() -> Response:
|
|
|
72
72
|
data = request.get_json()
|
|
73
73
|
|
|
74
74
|
try:
|
|
75
|
-
context =
|
|
75
|
+
context = WorkflowExecutorContext.model_validate(data)
|
|
76
76
|
except ValidationError as e:
|
|
77
77
|
error_message = e.errors()[0]["msg"]
|
|
78
78
|
error_location = e.errors()[0]["loc"]
|
|
@@ -354,7 +354,7 @@ def stream_node_route() -> Response:
|
|
|
354
354
|
data = request.get_json()
|
|
355
355
|
|
|
356
356
|
try:
|
|
357
|
-
context =
|
|
357
|
+
context = NodeExecutorContext.model_validate(data)
|
|
358
358
|
except ValidationError as e:
|
|
359
359
|
error_message = e.errors()[0]["msg"]
|
|
360
360
|
error_location = e.errors()[0]["loc"]
|
|
@@ -533,36 +533,6 @@ def get_version_route() -> tuple[dict, int]:
|
|
|
533
533
|
return resp, 200
|
|
534
534
|
|
|
535
535
|
|
|
536
|
-
def get_workflow_request_context(data: dict) -> WorkflowExecutorContext:
|
|
537
|
-
context_data = {
|
|
538
|
-
**data,
|
|
539
|
-
"inputs": convert_json_inputs_to_vellum(data.get("inputs") or []),
|
|
540
|
-
"request_start_time": time.time_ns(),
|
|
541
|
-
}
|
|
542
|
-
|
|
543
|
-
if has_request_context():
|
|
544
|
-
api_version_header = request.headers.get("x-api-version")
|
|
545
|
-
if api_version_header:
|
|
546
|
-
context_data["api_version"] = api_version_header
|
|
547
|
-
|
|
548
|
-
return WorkflowExecutorContext.model_validate(context_data)
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
def get_node_request_context(data: dict) -> NodeExecutorContext:
|
|
552
|
-
context_data = {
|
|
553
|
-
**data,
|
|
554
|
-
"inputs": convert_json_inputs_to_vellum(data["inputs"]),
|
|
555
|
-
"request_start_time": time.time_ns(),
|
|
556
|
-
}
|
|
557
|
-
|
|
558
|
-
if has_request_context():
|
|
559
|
-
api_version_header = request.headers.get("x-api-version")
|
|
560
|
-
if api_version_header:
|
|
561
|
-
context_data["api_version"] = api_version_header
|
|
562
|
-
|
|
563
|
-
return NodeExecutorContext.model_validate(context_data)
|
|
564
|
-
|
|
565
|
-
|
|
566
536
|
def startup_error_generator(
|
|
567
537
|
vembda_initiated_event: VembdaExecutionInitiatedEvent, message: str, context: WorkflowExecutorContext
|
|
568
538
|
) -> Generator[str, None, None]:
|
|
@@ -6,7 +6,6 @@ from threading import Event as ThreadingEvent
|
|
|
6
6
|
from uuid import uuid4
|
|
7
7
|
from typing import Optional
|
|
8
8
|
|
|
9
|
-
from workflow_server.api.workflow_view import get_workflow_request_context
|
|
10
9
|
from workflow_server.core.events import VembdaExecutionInitiatedBody, VembdaExecutionInitiatedEvent
|
|
11
10
|
from workflow_server.core.executor import stream_workflow
|
|
12
11
|
from workflow_server.core.utils import serialize_vembda_rejected_event
|
|
@@ -31,7 +30,7 @@ def run_code_exec_stream() -> None:
|
|
|
31
30
|
input_json = split_input[0]
|
|
32
31
|
|
|
33
32
|
input_data = json.loads(input_json)
|
|
34
|
-
context =
|
|
33
|
+
context = WorkflowExecutorContext.model_validate(input_data)
|
|
35
34
|
|
|
36
35
|
print("--vellum-output-start--") # noqa: T201
|
|
37
36
|
|
|
@@ -1,16 +1,19 @@
|
|
|
1
1
|
from dataclasses import field
|
|
2
2
|
from functools import cached_property
|
|
3
3
|
import os
|
|
4
|
+
import time
|
|
4
5
|
from uuid import UUID
|
|
5
6
|
from typing import Any, Optional
|
|
6
7
|
from typing_extensions import Self
|
|
7
8
|
|
|
8
|
-
from
|
|
9
|
+
from flask import has_request_context, request
|
|
10
|
+
from pydantic import Field, field_validator, model_validator
|
|
9
11
|
|
|
10
12
|
from vellum import ApiVersionEnum, Vellum, VellumEnvironment
|
|
11
13
|
from vellum.client.core import UniversalBaseModel
|
|
12
14
|
from vellum.workflows.context import ExecutionContext
|
|
13
15
|
from workflow_server.config import IS_VPC, VELLUM_API_URL_HOST, VELLUM_API_URL_PORT
|
|
16
|
+
from workflow_server.utils.utils import convert_json_inputs_to_vellum
|
|
14
17
|
|
|
15
18
|
DEFAULT_TIMEOUT_SECONDS = 60 * 30
|
|
16
19
|
|
|
@@ -55,7 +58,7 @@ def create_vellum_client(
|
|
|
55
58
|
|
|
56
59
|
|
|
57
60
|
class BaseExecutorContext(UniversalBaseModel):
|
|
58
|
-
inputs: dict
|
|
61
|
+
inputs: dict = Field(default_factory=dict)
|
|
59
62
|
state: Optional[dict] = None
|
|
60
63
|
timeout: int = DEFAULT_TIMEOUT_SECONDS
|
|
61
64
|
files: dict[str, str]
|
|
@@ -64,7 +67,7 @@ class BaseExecutorContext(UniversalBaseModel):
|
|
|
64
67
|
execution_id: UUID
|
|
65
68
|
module: str
|
|
66
69
|
execution_context: ExecutionContext = field(default_factory=ExecutionContext)
|
|
67
|
-
request_start_time: int
|
|
70
|
+
request_start_time: int = Field(default_factory=lambda: time.time_ns())
|
|
68
71
|
stream_start_time: int = 0
|
|
69
72
|
vembda_public_url: Optional[str] = None
|
|
70
73
|
node_output_mocks: Optional[list[Any]] = None
|
|
@@ -72,6 +75,26 @@ class BaseExecutorContext(UniversalBaseModel):
|
|
|
72
75
|
previous_execution_id: Optional[UUID] = None
|
|
73
76
|
feature_flags: Optional[dict[str, bool]] = None
|
|
74
77
|
|
|
78
|
+
@field_validator("inputs", mode="before")
|
|
79
|
+
@classmethod
|
|
80
|
+
def convert_inputs(cls, v: Any) -> dict:
|
|
81
|
+
if v is None:
|
|
82
|
+
return {}
|
|
83
|
+
if isinstance(v, list):
|
|
84
|
+
return convert_json_inputs_to_vellum(v)
|
|
85
|
+
return v
|
|
86
|
+
|
|
87
|
+
@field_validator("api_version", mode="before")
|
|
88
|
+
@classmethod
|
|
89
|
+
def extract_api_version_from_headers(cls, v: Any) -> Any:
|
|
90
|
+
if v is not None:
|
|
91
|
+
return v
|
|
92
|
+
if has_request_context():
|
|
93
|
+
api_version_header = request.headers.get("x-api-version")
|
|
94
|
+
if api_version_header:
|
|
95
|
+
return api_version_header
|
|
96
|
+
return v
|
|
97
|
+
|
|
75
98
|
@property
|
|
76
99
|
def container_overhead_latency(self) -> int:
|
|
77
100
|
return self.stream_start_time - self.request_start_time if self.stream_start_time else -1
|
{vellum_workflow_server-1.6.2.dist-info → vellum_workflow_server-1.6.2.post1.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|