vellum-workflow-server 0.14.73.post1__py3-none-any.whl → 0.14.73.post3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vellum-workflow-server might be problematic. Click here for more details.
- {vellum_workflow_server-0.14.73.post1.dist-info → vellum_workflow_server-0.14.73.post3.dist-info}/METADATA +1 -1
- {vellum_workflow_server-0.14.73.post1.dist-info → vellum_workflow_server-0.14.73.post3.dist-info}/RECORD +10 -9
- workflow_server/api/tests/test_workflow_view_stream_workflow_route.py +6 -34
- workflow_server/api/workflow_view.py +7 -14
- workflow_server/code_exec_runner.py +0 -3
- workflow_server/core/executor.py +0 -5
- workflow_server/utils/sentry.py +34 -0
- workflow_server/utils/tests/test_sentry_integration.py +69 -0
- {vellum_workflow_server-0.14.73.post1.dist-info → vellum_workflow_server-0.14.73.post3.dist-info}/WHEEL +0 -0
- {vellum_workflow_server-0.14.73.post1.dist-info → vellum_workflow_server-0.14.73.post3.dist-info}/entry_points.txt +0 -0
|
@@ -5,14 +5,14 @@ workflow_server/api/healthz_view.py,sha256=itiRvBDBXncrw8Kbbc73UZLwqMAhgHOR3uSre
|
|
|
5
5
|
workflow_server/api/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
6
|
workflow_server/api/tests/test_input_display_mapping.py,sha256=drBZqMudFyB5wgiUOcMgRXz7E7ge-Qgxbstw4E4f0zE,2211
|
|
7
7
|
workflow_server/api/tests/test_workflow_view.py,sha256=2nscM_QsYPHkkTG8_JhNbE2LmGL5FQKXEtaLLjXouw0,14591
|
|
8
|
-
workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=
|
|
9
|
-
workflow_server/api/workflow_view.py,sha256=
|
|
10
|
-
workflow_server/code_exec_runner.py,sha256=
|
|
8
|
+
workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=cUVF3tqV8Ay91YfFr2lT2oXONWRN823Nk5M3EOC8wE8,24133
|
|
9
|
+
workflow_server/api/workflow_view.py,sha256=9_ZI7TGlTRmeNiO47juTRx5TBpAjVmUEGx5C_VqTYMI,14993
|
|
10
|
+
workflow_server/code_exec_runner.py,sha256=njBK48zVUwhAjvap_KY1so-D0UjKgR4UihDuKow3JnM,2274
|
|
11
11
|
workflow_server/config.py,sha256=K5Tavm7wiqCZt0RWWue7zzb8N6e8aWnFOTNlBqEJPcI,1330
|
|
12
12
|
workflow_server/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
13
|
workflow_server/core/cancel_workflow.py,sha256=Ffkc3mzmrdMEUcD-sHfEhX4IwVrka-E--SxKA1dUfIU,2185
|
|
14
14
|
workflow_server/core/events.py,sha256=24MA66DVQuaLJJcZrS8IL1Zq4Ohi9CoouKZ5VgoH3Cs,1402
|
|
15
|
-
workflow_server/core/executor.py,sha256=
|
|
15
|
+
workflow_server/core/executor.py,sha256=AjY4qlTxv8T6gq73KXGlWb8gU97xr-p1RMxgbPSg1O0,16288
|
|
16
16
|
workflow_server/core/utils.py,sha256=lgzxkAEjEXPxGXXQlUYTYuCdHht-eDJJmHj5AhEb3_o,1500
|
|
17
17
|
workflow_server/core/workflow_executor_context.py,sha256=a-v48GJbOWUh4JIf_bNwDX-BvfKkg4xwRSPEyRVQmp4,1373
|
|
18
18
|
workflow_server/server.py,sha256=QBU12AaAfAgLqfCDBd24qIJl_mbheiq0-hfcWV7rZM4,1234
|
|
@@ -21,13 +21,14 @@ workflow_server/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
|
|
|
21
21
|
workflow_server/utils/exit_handler.py,sha256=_FacDVi4zc3bfTA3D2mJsISePlJ8jpLrnGVo5-xZQFs,743
|
|
22
22
|
workflow_server/utils/log_proxy.py,sha256=nugi6fOgAYKX2X9DIc39TG366rsmmDUPoEtG3gzma_Y,3088
|
|
23
23
|
workflow_server/utils/oom_killer.py,sha256=4Sag_iRQWqbp62iIBn6nKP-pxUHguOF93DdVXZTtJDk,2809
|
|
24
|
-
workflow_server/utils/sentry.py,sha256=
|
|
24
|
+
workflow_server/utils/sentry.py,sha256=pqx3X_4W3yOzmz8QMJYUEi39skIKWtrTN5nyFhaPkbk,1597
|
|
25
25
|
workflow_server/utils/system_utils.py,sha256=fTzbdpmZ-0bXiNBLYYQdNJWtFAItZgIH8cLJdoXDuQQ,2114
|
|
26
26
|
workflow_server/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
27
|
+
workflow_server/utils/tests/test_sentry_integration.py,sha256=LGmWiaLhFrx-jslrRjRq9JY6Z5ShLZyx_N_L0-FU6OI,2100
|
|
27
28
|
workflow_server/utils/tests/test_system_utils.py,sha256=MdBxI9gxUOpR_JBAHpEz6dGFY6JjxhMSM2oExpqFvNA,4314
|
|
28
29
|
workflow_server/utils/tests/test_utils.py,sha256=qwK5Rmy3RQyjtlUrYAuGuDlBeRzZKsf1yS-y2IpUizQ,6452
|
|
29
30
|
workflow_server/utils/utils.py,sha256=Wqqn-1l2ugkGgy5paWWdt0AVxAyPMQCYcnRSSOMjXlA,4355
|
|
30
|
-
vellum_workflow_server-0.14.73.
|
|
31
|
-
vellum_workflow_server-0.14.73.
|
|
32
|
-
vellum_workflow_server-0.14.73.
|
|
33
|
-
vellum_workflow_server-0.14.73.
|
|
31
|
+
vellum_workflow_server-0.14.73.post3.dist-info/METADATA,sha256=WI6P39NDrxKvYn99EqvupmJvsghS7m2bvR-InCMll1U,2243
|
|
32
|
+
vellum_workflow_server-0.14.73.post3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
33
|
+
vellum_workflow_server-0.14.73.post3.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
|
|
34
|
+
vellum_workflow_server-0.14.73.post3.dist-info/RECORD,,
|
|
@@ -10,6 +10,7 @@ import requests_mock
|
|
|
10
10
|
|
|
11
11
|
from workflow_server.code_exec_runner import run_code_exec_stream
|
|
12
12
|
from workflow_server.server import create_app
|
|
13
|
+
from workflow_server.utils.system_utils import get_active_process_count
|
|
13
14
|
|
|
14
15
|
|
|
15
16
|
def flask_stream(request_body: dict) -> tuple[int, list]:
|
|
@@ -775,44 +776,15 @@ class Workflow(BaseWorkflow):
|
|
|
775
776
|
# WHEN we call the stream route
|
|
776
777
|
status_code, events = flask_stream(request_body)
|
|
777
778
|
|
|
778
|
-
# THEN we get a
|
|
779
|
-
assert status_code ==
|
|
779
|
+
# THEN we get a 429 response
|
|
780
|
+
assert status_code == 429, events
|
|
780
781
|
|
|
781
|
-
#
|
|
782
|
+
# AND we get a simple JSON error response
|
|
783
|
+
assert len(events) == 1
|
|
782
784
|
assert events[0] == {
|
|
783
|
-
"
|
|
784
|
-
"trace_id": mock.ANY,
|
|
785
|
-
"span_id": str(span_id),
|
|
786
|
-
"timestamp": mock.ANY,
|
|
787
|
-
"api_version": "2024-10-25",
|
|
788
|
-
"parent": None,
|
|
789
|
-
"name": "vembda.execution.initiated",
|
|
790
|
-
"body": {
|
|
791
|
-
"sdk_version": version("vellum-ai"),
|
|
792
|
-
"server_version": "local",
|
|
793
|
-
},
|
|
785
|
+
"detail": f"Workflow server concurrent request rate exceeded. Process count: {get_active_process_count()}"
|
|
794
786
|
}
|
|
795
787
|
|
|
796
|
-
# AND we get a vembda.execution.fulfilled event with error
|
|
797
|
-
assert events[1] == {
|
|
798
|
-
"id": mock.ANY,
|
|
799
|
-
"trace_id": events[0]["trace_id"],
|
|
800
|
-
"span_id": str(span_id),
|
|
801
|
-
"timestamp": mock.ANY,
|
|
802
|
-
"api_version": "2024-10-25",
|
|
803
|
-
"parent": None,
|
|
804
|
-
"name": "vembda.execution.fulfilled",
|
|
805
|
-
"body": {
|
|
806
|
-
"log": "",
|
|
807
|
-
"exit_code": -1,
|
|
808
|
-
"stderr": "Workflow server concurrent request rate exceeded. Process count: 0",
|
|
809
|
-
"container_overhead_latency": mock.ANY,
|
|
810
|
-
"timed_out": False,
|
|
811
|
-
},
|
|
812
|
-
}
|
|
813
|
-
|
|
814
|
-
assert len(events) == 2
|
|
815
|
-
|
|
816
788
|
|
|
817
789
|
def test_stream_workflow_route__with_environment_variables(both_stream_types):
|
|
818
790
|
# GIVEN a valid request body with environment variables
|
|
@@ -15,7 +15,6 @@ from typing import Any, Dict, Generator, Iterator, Union, cast
|
|
|
15
15
|
|
|
16
16
|
from flask import Blueprint, Response, current_app as app, request, stream_with_context
|
|
17
17
|
from pydantic import ValidationError
|
|
18
|
-
import sentry_sdk
|
|
19
18
|
from vellum_ee.workflows.display.nodes.get_node_display_class import get_node_display_class
|
|
20
19
|
from vellum_ee.workflows.display.types import WorkflowDisplayContext
|
|
21
20
|
|
|
@@ -97,16 +96,14 @@ def stream_workflow_route() -> Response:
|
|
|
97
96
|
# if we detect a memory problem just exit us early
|
|
98
97
|
if not wait_for_available_process():
|
|
99
98
|
return Response(
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
vembda_initiated_event=vembda_initiated_event,
|
|
106
|
-
)
|
|
99
|
+
json.dumps(
|
|
100
|
+
{
|
|
101
|
+
"detail": f"Workflow server concurrent request rate exceeded. "
|
|
102
|
+
f"Process count: {get_active_process_count()}"
|
|
103
|
+
}
|
|
107
104
|
),
|
|
108
|
-
status=
|
|
109
|
-
content_type="application/
|
|
105
|
+
status=429,
|
|
106
|
+
content_type="application/json",
|
|
110
107
|
headers=headers,
|
|
111
108
|
)
|
|
112
109
|
|
|
@@ -117,7 +114,6 @@ def stream_workflow_route() -> Response:
|
|
|
117
114
|
)
|
|
118
115
|
increment_process_count(1)
|
|
119
116
|
except Exception as e:
|
|
120
|
-
sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
|
|
121
117
|
logger.exception(e)
|
|
122
118
|
|
|
123
119
|
process_output_queue.put(create_vembda_rejected_event(context, traceback.format_exc()))
|
|
@@ -204,7 +200,6 @@ def stream_workflow_route() -> Response:
|
|
|
204
200
|
|
|
205
201
|
continue
|
|
206
202
|
except Exception as e:
|
|
207
|
-
sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
|
|
208
203
|
logger.exception(e)
|
|
209
204
|
break
|
|
210
205
|
|
|
@@ -302,7 +297,6 @@ def stream_node_route() -> Response:
|
|
|
302
297
|
# This happens when theres a problem with the stream function call
|
|
303
298
|
# itself not the workflow runner
|
|
304
299
|
yield create_vembda_rejected_event(context, "Internal Server Error")
|
|
305
|
-
sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
|
|
306
300
|
app.logger.exception(stream_future.exception())
|
|
307
301
|
break
|
|
308
302
|
else:
|
|
@@ -379,7 +373,6 @@ def get_version_route() -> tuple[dict, int]:
|
|
|
379
373
|
|
|
380
374
|
resp["nodes"] = nodes
|
|
381
375
|
except Exception as e:
|
|
382
|
-
sentry_sdk.set_tag("vellum_trace_id", "unknown")
|
|
383
376
|
logger.exception(f"Failed to discover nodes: {str(e)}")
|
|
384
377
|
resp["nodes"] = []
|
|
385
378
|
|
|
@@ -5,8 +5,6 @@ import os
|
|
|
5
5
|
from uuid import uuid4
|
|
6
6
|
from typing import Optional
|
|
7
7
|
|
|
8
|
-
import sentry_sdk
|
|
9
|
-
|
|
10
8
|
from vellum.workflows.exceptions import WorkflowInitializationException
|
|
11
9
|
from workflow_server.api.workflow_view import get_workflow_request_context
|
|
12
10
|
from workflow_server.core.events import VembdaExecutionInitiatedBody, VembdaExecutionInitiatedEvent
|
|
@@ -56,7 +54,6 @@ def run_code_exec_stream() -> None:
|
|
|
56
54
|
fulfilled_event = serialize_vembda_rejected_event(context, str(e))
|
|
57
55
|
print(f"{_EVENT_LINE}{fulfilled_event}") # noqa: T201
|
|
58
56
|
except Exception as e:
|
|
59
|
-
sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id) if context else "unknown")
|
|
60
57
|
logger.exception(e)
|
|
61
58
|
|
|
62
59
|
event = serialize_vembda_rejected_event(context, "Internal Server Error")
|
workflow_server/core/executor.py
CHANGED
|
@@ -15,7 +15,6 @@ from uuid import UUID, uuid4
|
|
|
15
15
|
from typing import Any, Callable, Generator, Iterator, Optional, Tuple, Type
|
|
16
16
|
|
|
17
17
|
from pebble import concurrent
|
|
18
|
-
import sentry_sdk
|
|
19
18
|
from vellum_ee.workflows.display.workflows import BaseWorkflowDisplay
|
|
20
19
|
from vellum_ee.workflows.server.virtual_file_loader import VirtualFileFinder
|
|
21
20
|
|
|
@@ -68,7 +67,6 @@ def _stream_node_wrapper(executor_context: NodeExecutorContext, queue: Queue) ->
|
|
|
68
67
|
for event in stream_node(executor_context=executor_context):
|
|
69
68
|
queue.put(event)
|
|
70
69
|
except Exception as e:
|
|
71
|
-
sentry_sdk.set_tag("vellum_trace_id", str(executor_context.trace_id))
|
|
72
70
|
logger.exception(e)
|
|
73
71
|
queue.put(
|
|
74
72
|
VembdaExecutionFulfilledEvent(
|
|
@@ -106,7 +104,6 @@ def _stream_workflow_wrapper(executor_context: WorkflowExecutorContext, queue: Q
|
|
|
106
104
|
if not span_id_emitted:
|
|
107
105
|
queue.put(f"{SPAN_ID_EVENT}:{uuid4()}")
|
|
108
106
|
|
|
109
|
-
sentry_sdk.set_tag("vellum_trace_id", str(executor_context.trace_id))
|
|
110
107
|
logger.exception(e)
|
|
111
108
|
queue.put(serialize_vembda_rejected_event(executor_context, "Internal Server Error"))
|
|
112
109
|
queue.put(STREAM_FINISHED_EVENT)
|
|
@@ -456,7 +453,6 @@ def _gather_workflow(context: WorkflowExecutorContext) -> Tuple[BaseWorkflow, st
|
|
|
456
453
|
)
|
|
457
454
|
return workflow, namespace
|
|
458
455
|
except Exception as e:
|
|
459
|
-
sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
|
|
460
456
|
logger.exception("Failed to initialize Workflow")
|
|
461
457
|
raise WorkflowInitializationException(f"Failed to initialize workflow: {e}") from e
|
|
462
458
|
|
|
@@ -465,6 +461,5 @@ def _gather_display_context(workflow: BaseWorkflow, namespace: str) -> Optional[
|
|
|
465
461
|
try:
|
|
466
462
|
return BaseWorkflowDisplay.gather_event_display_context(namespace, workflow.__class__)
|
|
467
463
|
except Exception:
|
|
468
|
-
sentry_sdk.set_tag("vellum_trace_id", "unknown")
|
|
469
464
|
logger.exception("Unable to Parse Workflow Display Context")
|
|
470
465
|
return None
|
workflow_server/utils/sentry.py
CHANGED
|
@@ -5,10 +5,44 @@ import sentry_sdk
|
|
|
5
5
|
from sentry_sdk.integrations.logging import LoggingIntegration
|
|
6
6
|
|
|
7
7
|
|
|
8
|
+
def _tag_trace_id(event: dict) -> None:
|
|
9
|
+
if "request" not in event:
|
|
10
|
+
return
|
|
11
|
+
|
|
12
|
+
if not isinstance(event["request"], dict):
|
|
13
|
+
return
|
|
14
|
+
|
|
15
|
+
url = event["request"].get("url")
|
|
16
|
+
if not isinstance(url, str):
|
|
17
|
+
return
|
|
18
|
+
|
|
19
|
+
if not url.endswith("/workflow/stream"):
|
|
20
|
+
return
|
|
21
|
+
|
|
22
|
+
body = event["request"].get("data")
|
|
23
|
+
if not isinstance(body, dict):
|
|
24
|
+
return
|
|
25
|
+
|
|
26
|
+
execution_context = body.get("execution_context")
|
|
27
|
+
if not isinstance(execution_context, dict):
|
|
28
|
+
return
|
|
29
|
+
|
|
30
|
+
trace_id = execution_context.get("trace_id")
|
|
31
|
+
if not isinstance(trace_id, str):
|
|
32
|
+
return
|
|
33
|
+
|
|
34
|
+
if "tags" not in event:
|
|
35
|
+
event["tags"] = {}
|
|
36
|
+
|
|
37
|
+
event["tags"]["vellum_trace_id"] = trace_id
|
|
38
|
+
|
|
39
|
+
|
|
8
40
|
def before_send(event: dict, hint: dict) -> Optional[dict]:
|
|
9
41
|
if "exc_info" in hint:
|
|
10
42
|
_, _, _ = hint["exc_info"]
|
|
11
43
|
|
|
44
|
+
_tag_trace_id(event)
|
|
45
|
+
|
|
12
46
|
return event
|
|
13
47
|
|
|
14
48
|
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from uuid import uuid4
|
|
3
|
+
|
|
4
|
+
from workflow_server.server import create_app
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@pytest.fixture
|
|
8
|
+
def mock_sentry_capture_envelope(mocker):
|
|
9
|
+
mock_transport = mocker.patch("sentry_sdk.client.make_transport")
|
|
10
|
+
return mock_transport.return_value.capture_envelope
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def test_sentry_integration_with_workflow_endpoints(monkeypatch, mock_sentry_capture_envelope):
|
|
14
|
+
# GIVEN sentry is configured
|
|
15
|
+
monkeypatch.setenv("SENTRY_DSN", "https://test-dsn@sentry.io/1234567890")
|
|
16
|
+
|
|
17
|
+
# AND our /workflow/stream endpoint raises an exception
|
|
18
|
+
def mock_get_version():
|
|
19
|
+
raise Exception("Test exception")
|
|
20
|
+
|
|
21
|
+
monkeypatch.setattr("workflow_server.api.workflow_view.get_version", mock_get_version)
|
|
22
|
+
|
|
23
|
+
# AND we have a mock trace_id
|
|
24
|
+
trace_id = str(uuid4())
|
|
25
|
+
|
|
26
|
+
# AND we have a mock request body
|
|
27
|
+
body = {
|
|
28
|
+
"execution_id": uuid4(),
|
|
29
|
+
"inputs": [],
|
|
30
|
+
"environment_api_key": "test",
|
|
31
|
+
"module": "workflow",
|
|
32
|
+
"timeout": 360,
|
|
33
|
+
"files": {
|
|
34
|
+
"__init__.py": "",
|
|
35
|
+
"workflow.py": """\
|
|
36
|
+
from vellum.workflows import BaseWorkflow
|
|
37
|
+
|
|
38
|
+
class Workflow(BaseWorkflow):
|
|
39
|
+
pass
|
|
40
|
+
""",
|
|
41
|
+
},
|
|
42
|
+
"execution_context": {
|
|
43
|
+
"trace_id": trace_id,
|
|
44
|
+
"parent_context": {
|
|
45
|
+
"type": "API_REQUEST",
|
|
46
|
+
"span_id": str(uuid4()),
|
|
47
|
+
"parent": None,
|
|
48
|
+
},
|
|
49
|
+
},
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
# WHEN we call the /workflow/version endpoint
|
|
53
|
+
flask_app = create_app()
|
|
54
|
+
|
|
55
|
+
with flask_app.test_client() as test_client:
|
|
56
|
+
response = test_client.post("/workflow/stream", json=body)
|
|
57
|
+
|
|
58
|
+
# THEN we get a 500 error
|
|
59
|
+
assert response.status_code == 500
|
|
60
|
+
|
|
61
|
+
# AND sentry captures the error with the correct data
|
|
62
|
+
assert mock_sentry_capture_envelope.call_count == 1
|
|
63
|
+
envelope = mock_sentry_capture_envelope.call_args[0][0]
|
|
64
|
+
event = envelope.get_event()
|
|
65
|
+
assert event["level"] == "error"
|
|
66
|
+
assert "Test exception" in event["exception"]["values"][0]["value"]
|
|
67
|
+
|
|
68
|
+
# AND the trace_id is tagged
|
|
69
|
+
assert event["tags"]["vellum_trace_id"] == trace_id
|
|
File without changes
|
|
File without changes
|