vellum-workflow-server 1.4.0__py3-none-any.whl → 1.4.1.post1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vellum-workflow-server might be problematic. Click here for more details.
- {vellum_workflow_server-1.4.0.dist-info → vellum_workflow_server-1.4.1.post1.dist-info}/METADATA +2 -2
- {vellum_workflow_server-1.4.0.dist-info → vellum_workflow_server-1.4.1.post1.dist-info}/RECORD +8 -8
- workflow_server/api/workflow_view.py +16 -5
- workflow_server/core/executor.py +22 -15
- workflow_server/utils/oom_killer.py +4 -1
- workflow_server/utils/system_utils.py +34 -0
- {vellum_workflow_server-1.4.0.dist-info → vellum_workflow_server-1.4.1.post1.dist-info}/WHEEL +0 -0
- {vellum_workflow_server-1.4.0.dist-info → vellum_workflow_server-1.4.1.post1.dist-info}/entry_points.txt +0 -0
{vellum_workflow_server-1.4.0.dist-info → vellum_workflow_server-1.4.1.post1.dist-info}/METADATA
RENAMED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: vellum-workflow-server
|
|
3
|
-
Version: 1.4.
|
|
3
|
+
Version: 1.4.1.post1
|
|
4
4
|
Summary:
|
|
5
5
|
License: AGPL
|
|
6
6
|
Requires-Python: >=3.9.0,<4
|
|
@@ -29,7 +29,7 @@ Requires-Dist: pyjwt (==2.10.0)
|
|
|
29
29
|
Requires-Dist: python-dotenv (==1.0.1)
|
|
30
30
|
Requires-Dist: retrying (==1.3.4)
|
|
31
31
|
Requires-Dist: sentry-sdk[flask] (==2.20.0)
|
|
32
|
-
Requires-Dist: vellum-ai (==1.4.
|
|
32
|
+
Requires-Dist: vellum-ai (==1.4.1)
|
|
33
33
|
Description-Content-Type: text/markdown
|
|
34
34
|
|
|
35
35
|
# Vellum Workflow Runner Server
|
{vellum_workflow_server-1.4.0.dist-info → vellum_workflow_server-1.4.1.post1.dist-info}/RECORD
RENAMED
|
@@ -6,13 +6,13 @@ workflow_server/api/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
|
|
|
6
6
|
workflow_server/api/tests/test_input_display_mapping.py,sha256=drBZqMudFyB5wgiUOcMgRXz7E7ge-Qgxbstw4E4f0zE,2211
|
|
7
7
|
workflow_server/api/tests/test_workflow_view.py,sha256=RlAw1tHeIlnOXGrFQN-w3EOLPZkhp6Dfy6d1r7kU5oc,22573
|
|
8
8
|
workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=Qo8u6mPyRCmE2jamY1yIh8l44hgo4-Nwlq03z61ND5g,27031
|
|
9
|
-
workflow_server/api/workflow_view.py,sha256=
|
|
9
|
+
workflow_server/api/workflow_view.py,sha256=_WhjNgimTPoS10C-npRWDfJixzg4eHTJ5xIKACStZf4,21943
|
|
10
10
|
workflow_server/code_exec_runner.py,sha256=lBnMIorPZL8zZBye6TjeCIs06WTJM7P2HR07B1fjJJI,2533
|
|
11
11
|
workflow_server/config.py,sha256=qmmTr6ty3ZN5LDOFs3TfUxYshYe6Mmn_LanplHHeE9Q,1796
|
|
12
12
|
workflow_server/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
13
|
workflow_server/core/cancel_workflow.py,sha256=Ffkc3mzmrdMEUcD-sHfEhX4IwVrka-E--SxKA1dUfIU,2185
|
|
14
14
|
workflow_server/core/events.py,sha256=24MA66DVQuaLJJcZrS8IL1Zq4Ohi9CoouKZ5VgoH3Cs,1402
|
|
15
|
-
workflow_server/core/executor.py,sha256=
|
|
15
|
+
workflow_server/core/executor.py,sha256=bNryNvXBbg7IHW3BFzzFaGoCO1rGufeEE4FrY9TDA90,18294
|
|
16
16
|
workflow_server/core/utils.py,sha256=aIpSINstLGslP2PIoDLM82_1GlJ1uC_0AIrP-V7Yobo,3230
|
|
17
17
|
workflow_server/core/workflow_executor_context.py,sha256=w3OhV_AXpgh7AxpjEsc0vo-IJypgJcgr5DXJCqGptOU,1587
|
|
18
18
|
workflow_server/server.py,sha256=QBU12AaAfAgLqfCDBd24qIJl_mbheiq0-hfcWV7rZM4,1234
|
|
@@ -20,15 +20,15 @@ workflow_server/start.py,sha256=pkwRcms6I4tkVHP06LdrZY6rG_DFHfBx4ioY5X91W5k,2264
|
|
|
20
20
|
workflow_server/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
21
|
workflow_server/utils/exit_handler.py,sha256=_FacDVi4zc3bfTA3D2mJsISePlJ8jpLrnGVo5-xZQFs,743
|
|
22
22
|
workflow_server/utils/log_proxy.py,sha256=nugi6fOgAYKX2X9DIc39TG366rsmmDUPoEtG3gzma_Y,3088
|
|
23
|
-
workflow_server/utils/oom_killer.py,sha256=
|
|
23
|
+
workflow_server/utils/oom_killer.py,sha256=AprKFXC_wT3lQZcKDxU5O6dtJwi6meRxjo7nhQtQ8T0,2955
|
|
24
24
|
workflow_server/utils/sentry.py,sha256=pqx3X_4W3yOzmz8QMJYUEi39skIKWtrTN5nyFhaPkbk,1597
|
|
25
|
-
workflow_server/utils/system_utils.py,sha256=
|
|
25
|
+
workflow_server/utils/system_utils.py,sha256=3jNv113zRkKJ0928i2Vm6TqFHrDulteQu1kjseP2B0Y,3271
|
|
26
26
|
workflow_server/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
27
27
|
workflow_server/utils/tests/test_sentry_integration.py,sha256=LGmWiaLhFrx-jslrRjRq9JY6Z5ShLZyx_N_L0-FU6OI,2100
|
|
28
28
|
workflow_server/utils/tests/test_system_utils.py,sha256=_4GwXvVvU5BrATxUEWwQIPg0bzQXMWBtiBmjP8MTxJM,4314
|
|
29
29
|
workflow_server/utils/tests/test_utils.py,sha256=0Nq6du8o-iBtTrip9_wgHES53JSiJbVdSXaBnPobw3s,6930
|
|
30
30
|
workflow_server/utils/utils.py,sha256=ZPoM1Suhid22dpB8oEFLux8wx-9iyzmSfWuYxSCrgWk,4774
|
|
31
|
-
vellum_workflow_server-1.4.
|
|
32
|
-
vellum_workflow_server-1.4.
|
|
33
|
-
vellum_workflow_server-1.4.
|
|
34
|
-
vellum_workflow_server-1.4.
|
|
31
|
+
vellum_workflow_server-1.4.1.post1.dist-info/METADATA,sha256=sXR0B1bNZZvRQswy2hK4IqACrHG67fw6XLErZfqRoRE,2273
|
|
32
|
+
vellum_workflow_server-1.4.1.post1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
33
|
+
vellum_workflow_server-1.4.1.post1.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
|
|
34
|
+
vellum_workflow_server-1.4.1.post1.dist-info/RECORD,,
|
|
@@ -23,6 +23,7 @@ from vellum_ee.workflows.display.types import WorkflowDisplayContext
|
|
|
23
23
|
from vellum_ee.workflows.display.workflows import BaseWorkflowDisplay
|
|
24
24
|
from vellum_ee.workflows.server.virtual_file_loader import VirtualFileFinder
|
|
25
25
|
|
|
26
|
+
from vellum.workflows import BaseWorkflow
|
|
26
27
|
from vellum.workflows.exceptions import WorkflowInitializationException
|
|
27
28
|
from vellum.workflows.nodes import BaseNode
|
|
28
29
|
from workflow_server.config import ENABLE_PROCESS_WRAPPER, MEMORY_LIMIT_MB
|
|
@@ -49,8 +50,10 @@ from workflow_server.core.workflow_executor_context import (
|
|
|
49
50
|
)
|
|
50
51
|
from workflow_server.utils.oom_killer import get_is_oom_killed
|
|
51
52
|
from workflow_server.utils.system_utils import (
|
|
53
|
+
add_active_span_id,
|
|
52
54
|
get_active_process_count,
|
|
53
55
|
increment_process_count,
|
|
56
|
+
remove_active_span_id,
|
|
54
57
|
wait_for_available_process,
|
|
55
58
|
)
|
|
56
59
|
from workflow_server.utils.utils import convert_json_inputs_to_vellum, get_version
|
|
@@ -182,10 +185,7 @@ def stream_workflow_route() -> Response:
|
|
|
182
185
|
first_item = next(stream_iterator)
|
|
183
186
|
increment_process_count(1)
|
|
184
187
|
|
|
185
|
-
if isinstance(first_item, str)
|
|
186
|
-
span_id = first_item.split(":")[1]
|
|
187
|
-
headers["X-Vellum-Workflow-Span-Id"] = span_id
|
|
188
|
-
else:
|
|
188
|
+
if not isinstance(first_item, str) or not first_item.startswith(SPAN_ID_EVENT):
|
|
189
189
|
logger.error("Workflow stream did not start with span id event")
|
|
190
190
|
return Response(
|
|
191
191
|
json.dumps({"detail": "Internal Server Error"}),
|
|
@@ -194,6 +194,10 @@ def stream_workflow_route() -> Response:
|
|
|
194
194
|
headers=headers,
|
|
195
195
|
)
|
|
196
196
|
|
|
197
|
+
span_id = first_item.split(":")[1]
|
|
198
|
+
headers["X-Vellum-Workflow-Span-Id"] = span_id
|
|
199
|
+
add_active_span_id(span_id)
|
|
200
|
+
|
|
197
201
|
logger.info(f"Starting Workflow Stream, execution ID: {span_id}, ")
|
|
198
202
|
|
|
199
203
|
def process_events(queue: Queue) -> Iterator[Union[str, dict]]:
|
|
@@ -227,6 +231,7 @@ def stream_workflow_route() -> Response:
|
|
|
227
231
|
|
|
228
232
|
if not ENABLE_PROCESS_WRAPPER or process:
|
|
229
233
|
increment_process_count(-1)
|
|
234
|
+
remove_active_span_id(span_id)
|
|
230
235
|
|
|
231
236
|
yield VembdaExecutionFulfilledEvent(
|
|
232
237
|
id=uuid4(),
|
|
@@ -254,6 +259,7 @@ def stream_workflow_route() -> Response:
|
|
|
254
259
|
process.kill()
|
|
255
260
|
if process:
|
|
256
261
|
increment_process_count(-1)
|
|
262
|
+
remove_active_span_id(span_id)
|
|
257
263
|
|
|
258
264
|
break
|
|
259
265
|
|
|
@@ -332,10 +338,12 @@ def stream_workflow_route() -> Response:
|
|
|
332
338
|
process.kill()
|
|
333
339
|
if process:
|
|
334
340
|
increment_process_count(-1)
|
|
341
|
+
remove_active_span_id(span_id)
|
|
335
342
|
except Exception as e:
|
|
336
343
|
logger.error("Failed to kill process", e)
|
|
337
344
|
else:
|
|
338
345
|
increment_process_count(-1)
|
|
346
|
+
remove_active_span_id(span_id)
|
|
339
347
|
|
|
340
348
|
resp = Response(
|
|
341
349
|
stream_with_context(generator()),
|
|
@@ -451,7 +459,10 @@ def serialize_route() -> Response:
|
|
|
451
459
|
try:
|
|
452
460
|
result = BaseWorkflowDisplay.serialize_module(namespace, client=client)
|
|
453
461
|
except Exception as e:
|
|
454
|
-
raise WorkflowInitializationException(
|
|
462
|
+
raise WorkflowInitializationException(
|
|
463
|
+
message=str(e),
|
|
464
|
+
workflow_definition=BaseWorkflow,
|
|
465
|
+
) from e
|
|
455
466
|
|
|
456
467
|
return Response(
|
|
457
468
|
json.dumps(result.model_dump()),
|
workflow_server/core/executor.py
CHANGED
|
@@ -174,7 +174,7 @@ def stream_workflow(
|
|
|
174
174
|
cancel_signal: Optional[ThreadingEvent] = None,
|
|
175
175
|
) -> tuple[Iterator[dict], UUID]:
|
|
176
176
|
workflow, namespace = _gather_workflow(executor_context)
|
|
177
|
-
workflow_inputs = _get_workflow_inputs(executor_context)
|
|
177
|
+
workflow_inputs = _get_workflow_inputs(executor_context, workflow.__class__)
|
|
178
178
|
display_context = _gather_display_context(workflow, namespace)
|
|
179
179
|
workflow_state = (
|
|
180
180
|
workflow.deserialize_state(
|
|
@@ -258,21 +258,18 @@ def stream_node(
|
|
|
258
258
|
executor_context: NodeExecutorContext,
|
|
259
259
|
disable_redirect: bool = True,
|
|
260
260
|
) -> Iterator[dict]:
|
|
261
|
-
namespace = _get_file_namespace(executor_context)
|
|
262
|
-
|
|
263
261
|
def call_node() -> Generator[dict[str, Any], Any, None]:
|
|
264
|
-
|
|
265
|
-
workflow_context = _create_workflow_context(executor_context)
|
|
262
|
+
workflow, namespace = _gather_workflow(executor_context)
|
|
266
263
|
node_module = importlib.import_module(f"{namespace}.{executor_context.node_module}")
|
|
267
264
|
|
|
268
265
|
Node = getattr(node_module, executor_context.node_name)
|
|
269
266
|
|
|
270
|
-
workflow_inputs = _get_workflow_inputs(executor_context)
|
|
267
|
+
workflow_inputs = _get_workflow_inputs(executor_context, workflow.__class__)
|
|
271
268
|
workflow_state = _get_workflow_state(executor_context, workflow_inputs=workflow_inputs)
|
|
272
269
|
|
|
273
270
|
node = Node(
|
|
274
271
|
state=workflow_state,
|
|
275
|
-
context=
|
|
272
|
+
context=workflow._context,
|
|
276
273
|
)
|
|
277
274
|
|
|
278
275
|
executor_context.stream_start_time = time.time_ns()
|
|
@@ -339,7 +336,7 @@ def _call_stream(
|
|
|
339
336
|
yield vembda_fulfilled_event.model_dump(mode="json")
|
|
340
337
|
|
|
341
338
|
|
|
342
|
-
def _create_workflow(executor_context:
|
|
339
|
+
def _create_workflow(executor_context: BaseExecutorContext, namespace: str) -> BaseWorkflow:
|
|
343
340
|
workflow_context = _create_workflow_context(executor_context)
|
|
344
341
|
Workflow = BaseWorkflow.load_from_module(namespace)
|
|
345
342
|
VembdaExecutionFulfilledEvent.model_rebuild(
|
|
@@ -431,7 +428,9 @@ def _dump_event(event: BaseEvent, executor_context: BaseExecutorContext, client:
|
|
|
431
428
|
return dump
|
|
432
429
|
|
|
433
430
|
|
|
434
|
-
def _get_workflow_inputs(
|
|
431
|
+
def _get_workflow_inputs(
|
|
432
|
+
executor_context: BaseExecutorContext, workflow_class: Type[BaseWorkflow]
|
|
433
|
+
) -> Optional[BaseInputs]:
|
|
435
434
|
if not executor_context.inputs:
|
|
436
435
|
return None
|
|
437
436
|
|
|
@@ -443,17 +442,22 @@ def _get_workflow_inputs(executor_context: BaseExecutorContext) -> Optional[Base
|
|
|
443
442
|
try:
|
|
444
443
|
inputs_module = importlib.import_module(inputs_module_path)
|
|
445
444
|
except Exception as e:
|
|
446
|
-
raise WorkflowInitializationException(
|
|
445
|
+
raise WorkflowInitializationException(
|
|
446
|
+
message=f"Failed to initialize workflow inputs: {e}",
|
|
447
|
+
workflow_definition=workflow_class,
|
|
448
|
+
) from e
|
|
447
449
|
|
|
448
450
|
if not hasattr(inputs_module, "Inputs"):
|
|
449
451
|
raise WorkflowInitializationException(
|
|
450
|
-
f"Inputs module {inputs_module_path} does not have a required Inputs class"
|
|
452
|
+
message=f"Inputs module {inputs_module_path} does not have a required Inputs class",
|
|
453
|
+
workflow_definition=workflow_class,
|
|
451
454
|
)
|
|
452
455
|
|
|
453
456
|
if not issubclass(inputs_module.Inputs, BaseInputs):
|
|
454
457
|
raise WorkflowInitializationException(
|
|
455
|
-
f"""The class {inputs_module_path}.Inputs was expected to be a subclass of BaseInputs, \
|
|
456
|
-
but found {inputs_module.Inputs.__class__.__name__}"""
|
|
458
|
+
message=f"""The class {inputs_module_path}.Inputs was expected to be a subclass of BaseInputs, \
|
|
459
|
+
but found {inputs_module.Inputs.__class__.__name__}""",
|
|
460
|
+
workflow_definition=workflow_class,
|
|
457
461
|
)
|
|
458
462
|
|
|
459
463
|
return inputs_module.Inputs(**executor_context.inputs)
|
|
@@ -493,7 +497,7 @@ def _get_run_from_node(executor_context: WorkflowExecutorContext, workflow: Base
|
|
|
493
497
|
return None
|
|
494
498
|
|
|
495
499
|
|
|
496
|
-
def _gather_workflow(context:
|
|
500
|
+
def _gather_workflow(context: BaseExecutorContext) -> Tuple[BaseWorkflow, str]:
|
|
497
501
|
try:
|
|
498
502
|
namespace = _get_file_namespace(context)
|
|
499
503
|
if namespace != LOCAL_WORKFLOW_MODULE:
|
|
@@ -505,7 +509,10 @@ def _gather_workflow(context: WorkflowExecutorContext) -> Tuple[BaseWorkflow, st
|
|
|
505
509
|
return workflow, namespace
|
|
506
510
|
except Exception as e:
|
|
507
511
|
logger.exception("Failed to initialize Workflow")
|
|
508
|
-
raise WorkflowInitializationException(
|
|
512
|
+
raise WorkflowInitializationException(
|
|
513
|
+
message=f"Failed to initialize workflow: {e}",
|
|
514
|
+
workflow_definition=BaseWorkflow,
|
|
515
|
+
) from e
|
|
509
516
|
|
|
510
517
|
|
|
511
518
|
def _gather_display_context(workflow: BaseWorkflow, namespace: str) -> Optional["WorkflowEventDisplayContext"]:
|
|
@@ -15,6 +15,7 @@ from workflow_server.utils.system_utils import (
|
|
|
15
15
|
FORCE_GC_MEMORY_PERCENT,
|
|
16
16
|
WARN_MEMORY_PERCENT,
|
|
17
17
|
get_active_process_count,
|
|
18
|
+
get_active_span_ids,
|
|
18
19
|
get_memory_in_use_mb,
|
|
19
20
|
)
|
|
20
21
|
|
|
@@ -71,8 +72,10 @@ class OomKillerThread(Thread):
|
|
|
71
72
|
|
|
72
73
|
if memory_mb > (MEMORY_LIMIT_MB * _MAX_MEMORY_PERCENT):
|
|
73
74
|
self._kill_switch.set()
|
|
75
|
+
active_span_ids = get_active_span_ids()
|
|
74
76
|
logger.error(
|
|
75
|
-
f"Workflow server OOM killed, memory: {memory_mb}MB, Process Count: {get_active_process_count()}"
|
|
77
|
+
f"Workflow server OOM killed, memory: {memory_mb}MB, Process Count: {get_active_process_count()}",
|
|
78
|
+
extra={"active_span_ids": active_span_ids},
|
|
76
79
|
)
|
|
77
80
|
# Give time for the threads to get our kill switch
|
|
78
81
|
sleep(_KILL_GRACE_PERIOD)
|
|
@@ -17,6 +17,7 @@ _MEMORY_CHECK_INTERVAL_SECONDS = 2
|
|
|
17
17
|
_MAX_MEMORY_CHECK_ATTEMPTS = 3
|
|
18
18
|
_ACTIVE_PROCESS_COUNT = multiprocessing.Value("i", 0)
|
|
19
19
|
_ACTIVE_PROCESS_LOCK = multiprocessing.Lock()
|
|
20
|
+
_ACTIVE_SPAN_IDS = multiprocessing.Manager().list()
|
|
20
21
|
|
|
21
22
|
|
|
22
23
|
def increment_process_count(change: int) -> None:
|
|
@@ -36,6 +37,39 @@ def get_active_process_count() -> int:
|
|
|
36
37
|
return _ACTIVE_PROCESS_COUNT.value # type: ignore
|
|
37
38
|
|
|
38
39
|
|
|
40
|
+
def get_active_span_ids() -> list[str]:
|
|
41
|
+
"""Get a copy of currently active span IDs"""
|
|
42
|
+
with _ACTIVE_PROCESS_LOCK:
|
|
43
|
+
return list(_ACTIVE_SPAN_IDS)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def add_active_span_id(span_id: str) -> None:
|
|
47
|
+
"""Add a span ID to the active tracking list"""
|
|
48
|
+
result = _ACTIVE_PROCESS_LOCK.acquire(timeout=5)
|
|
49
|
+
try:
|
|
50
|
+
if result:
|
|
51
|
+
_ACTIVE_SPAN_IDS.append(span_id)
|
|
52
|
+
else:
|
|
53
|
+
logger.error("Failed to lock workflow server span ID tracking.")
|
|
54
|
+
finally:
|
|
55
|
+
if result:
|
|
56
|
+
_ACTIVE_PROCESS_LOCK.release()
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def remove_active_span_id(span_id: str) -> None:
|
|
60
|
+
"""Remove a span ID from the active tracking list"""
|
|
61
|
+
result = _ACTIVE_PROCESS_LOCK.acquire(timeout=5)
|
|
62
|
+
try:
|
|
63
|
+
if result and span_id in _ACTIVE_SPAN_IDS:
|
|
64
|
+
_ACTIVE_SPAN_IDS.remove(span_id)
|
|
65
|
+
else:
|
|
66
|
+
if not result:
|
|
67
|
+
logger.error("Failed to lock workflow server span ID tracking.")
|
|
68
|
+
finally:
|
|
69
|
+
if result:
|
|
70
|
+
_ACTIVE_PROCESS_LOCK.release()
|
|
71
|
+
|
|
72
|
+
|
|
39
73
|
def get_memory_in_use_mb() -> Optional[float]:
|
|
40
74
|
try:
|
|
41
75
|
with open("/sys/fs/cgroup/memory/memory.usage_in_bytes", "r") as file:
|
{vellum_workflow_server-1.4.0.dist-info → vellum_workflow_server-1.4.1.post1.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|