vellum-workflow-server 0.14.73.post1__tar.gz → 0.14.73.post2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vellum-workflow-server might be problematic. Click here for more details.
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/PKG-INFO +1 -1
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/pyproject.toml +1 -1
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/tests/test_workflow_view_stream_workflow_route.py +6 -34
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/workflow_view.py +7 -9
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/README.md +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/__init__.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/__init__.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/auth_middleware.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/healthz_view.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/tests/__init__.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/tests/test_input_display_mapping.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/tests/test_workflow_view.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/code_exec_runner.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/config.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/core/__init__.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/core/cancel_workflow.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/core/events.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/core/executor.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/core/utils.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/core/workflow_executor_context.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/server.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/start.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/__init__.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/exit_handler.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/log_proxy.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/oom_killer.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/sentry.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/system_utils.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/tests/__init__.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/tests/test_system_utils.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/tests/test_utils.py +0 -0
- {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/utils.py +0 -0
|
@@ -10,6 +10,7 @@ import requests_mock
|
|
|
10
10
|
|
|
11
11
|
from workflow_server.code_exec_runner import run_code_exec_stream
|
|
12
12
|
from workflow_server.server import create_app
|
|
13
|
+
from workflow_server.utils.system_utils import get_active_process_count
|
|
13
14
|
|
|
14
15
|
|
|
15
16
|
def flask_stream(request_body: dict) -> tuple[int, list]:
|
|
@@ -775,44 +776,15 @@ class Workflow(BaseWorkflow):
|
|
|
775
776
|
# WHEN we call the stream route
|
|
776
777
|
status_code, events = flask_stream(request_body)
|
|
777
778
|
|
|
778
|
-
# THEN we get a
|
|
779
|
-
assert status_code ==
|
|
779
|
+
# THEN we get a 429 response
|
|
780
|
+
assert status_code == 429, events
|
|
780
781
|
|
|
781
|
-
#
|
|
782
|
+
# AND we get a simple JSON error response
|
|
783
|
+
assert len(events) == 1
|
|
782
784
|
assert events[0] == {
|
|
783
|
-
"
|
|
784
|
-
"trace_id": mock.ANY,
|
|
785
|
-
"span_id": str(span_id),
|
|
786
|
-
"timestamp": mock.ANY,
|
|
787
|
-
"api_version": "2024-10-25",
|
|
788
|
-
"parent": None,
|
|
789
|
-
"name": "vembda.execution.initiated",
|
|
790
|
-
"body": {
|
|
791
|
-
"sdk_version": version("vellum-ai"),
|
|
792
|
-
"server_version": "local",
|
|
793
|
-
},
|
|
785
|
+
"detail": f"Workflow server concurrent request rate exceeded. Process count: {get_active_process_count()}"
|
|
794
786
|
}
|
|
795
787
|
|
|
796
|
-
# AND we get a vembda.execution.fulfilled event with error
|
|
797
|
-
assert events[1] == {
|
|
798
|
-
"id": mock.ANY,
|
|
799
|
-
"trace_id": events[0]["trace_id"],
|
|
800
|
-
"span_id": str(span_id),
|
|
801
|
-
"timestamp": mock.ANY,
|
|
802
|
-
"api_version": "2024-10-25",
|
|
803
|
-
"parent": None,
|
|
804
|
-
"name": "vembda.execution.fulfilled",
|
|
805
|
-
"body": {
|
|
806
|
-
"log": "",
|
|
807
|
-
"exit_code": -1,
|
|
808
|
-
"stderr": "Workflow server concurrent request rate exceeded. Process count: 0",
|
|
809
|
-
"container_overhead_latency": mock.ANY,
|
|
810
|
-
"timed_out": False,
|
|
811
|
-
},
|
|
812
|
-
}
|
|
813
|
-
|
|
814
|
-
assert len(events) == 2
|
|
815
|
-
|
|
816
788
|
|
|
817
789
|
def test_stream_workflow_route__with_environment_variables(both_stream_types):
|
|
818
790
|
# GIVEN a valid request body with environment variables
|
|
@@ -97,16 +97,14 @@ def stream_workflow_route() -> Response:
|
|
|
97
97
|
# if we detect a memory problem just exit us early
|
|
98
98
|
if not wait_for_available_process():
|
|
99
99
|
return Response(
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
vembda_initiated_event=vembda_initiated_event,
|
|
106
|
-
)
|
|
100
|
+
json.dumps(
|
|
101
|
+
{
|
|
102
|
+
"detail": f"Workflow server concurrent request rate exceeded. "
|
|
103
|
+
f"Process count: {get_active_process_count()}"
|
|
104
|
+
}
|
|
107
105
|
),
|
|
108
|
-
status=
|
|
109
|
-
content_type="application/
|
|
106
|
+
status=429,
|
|
107
|
+
content_type="application/json",
|
|
110
108
|
headers=headers,
|
|
111
109
|
)
|
|
112
110
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|