vellum-workflow-server 0.14.73__tar.gz → 0.14.73.post2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vellum-workflow-server might be problematic. Click here for more details.

Files changed (32) hide show
  1. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/PKG-INFO +1 -1
  2. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/pyproject.toml +1 -1
  3. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/tests/test_workflow_view_stream_workflow_route.py +6 -34
  4. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/workflow_view.py +7 -9
  5. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/core/executor.py +0 -8
  6. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/README.md +0 -0
  7. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/__init__.py +0 -0
  8. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/__init__.py +0 -0
  9. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/auth_middleware.py +0 -0
  10. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/healthz_view.py +0 -0
  11. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/tests/__init__.py +0 -0
  12. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/tests/test_input_display_mapping.py +0 -0
  13. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/api/tests/test_workflow_view.py +0 -0
  14. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/code_exec_runner.py +0 -0
  15. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/config.py +0 -0
  16. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/core/__init__.py +0 -0
  17. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/core/cancel_workflow.py +0 -0
  18. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/core/events.py +0 -0
  19. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/core/utils.py +0 -0
  20. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/core/workflow_executor_context.py +0 -0
  21. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/server.py +0 -0
  22. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/start.py +0 -0
  23. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/__init__.py +0 -0
  24. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/exit_handler.py +0 -0
  25. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/log_proxy.py +0 -0
  26. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/oom_killer.py +0 -0
  27. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/sentry.py +0 -0
  28. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/system_utils.py +0 -0
  29. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/tests/__init__.py +0 -0
  30. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/tests/test_system_utils.py +0 -0
  31. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/tests/test_utils.py +0 -0
  32. {vellum_workflow_server-0.14.73 → vellum_workflow_server-0.14.73.post2}/src/workflow_server/utils/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-workflow-server
3
- Version: 0.14.73
3
+ Version: 0.14.73.post2
4
4
  Summary:
5
5
  License: AGPL
6
6
  Requires-Python: >=3.9.0,<4
@@ -3,7 +3,7 @@ name = "vellum-workflow-server"
3
3
 
4
4
  [tool.poetry]
5
5
  name = "vellum-workflow-server"
6
- version = "0.14.73"
6
+ version = "0.14.73.post2"
7
7
  description = ""
8
8
  readme = "README.md"
9
9
  authors = []
@@ -10,6 +10,7 @@ import requests_mock
10
10
 
11
11
  from workflow_server.code_exec_runner import run_code_exec_stream
12
12
  from workflow_server.server import create_app
13
+ from workflow_server.utils.system_utils import get_active_process_count
13
14
 
14
15
 
15
16
  def flask_stream(request_body: dict) -> tuple[int, list]:
@@ -775,44 +776,15 @@ class Workflow(BaseWorkflow):
775
776
  # WHEN we call the stream route
776
777
  status_code, events = flask_stream(request_body)
777
778
 
778
- # THEN we get a 200 response
779
- assert status_code == 200, events
779
+ # THEN we get a 429 response
780
+ assert status_code == 429, events
780
781
 
781
- # THEN we get the expected events
782
+ # AND we get a simple JSON error response
783
+ assert len(events) == 1
782
784
  assert events[0] == {
783
- "id": mock.ANY,
784
- "trace_id": mock.ANY,
785
- "span_id": str(span_id),
786
- "timestamp": mock.ANY,
787
- "api_version": "2024-10-25",
788
- "parent": None,
789
- "name": "vembda.execution.initiated",
790
- "body": {
791
- "sdk_version": version("vellum-ai"),
792
- "server_version": "local",
793
- },
785
+ "detail": f"Workflow server concurrent request rate exceeded. Process count: {get_active_process_count()}"
794
786
  }
795
787
 
796
- # AND we get a vembda.execution.fulfilled event with error
797
- assert events[1] == {
798
- "id": mock.ANY,
799
- "trace_id": events[0]["trace_id"],
800
- "span_id": str(span_id),
801
- "timestamp": mock.ANY,
802
- "api_version": "2024-10-25",
803
- "parent": None,
804
- "name": "vembda.execution.fulfilled",
805
- "body": {
806
- "log": "",
807
- "exit_code": -1,
808
- "stderr": "Workflow server concurrent request rate exceeded. Process count: 0",
809
- "container_overhead_latency": mock.ANY,
810
- "timed_out": False,
811
- },
812
- }
813
-
814
- assert len(events) == 2
815
-
816
788
 
817
789
  def test_stream_workflow_route__with_environment_variables(both_stream_types):
818
790
  # GIVEN a valid request body with environment variables
@@ -97,16 +97,14 @@ def stream_workflow_route() -> Response:
97
97
  # if we detect a memory problem just exit us early
98
98
  if not wait_for_available_process():
99
99
  return Response(
100
- stream_with_context(
101
- startup_error_generator(
102
- context=context,
103
- message=f"Workflow server concurrent request rate exceeded. "
104
- f"Process count: {get_active_process_count()}",
105
- vembda_initiated_event=vembda_initiated_event,
106
- )
100
+ json.dumps(
101
+ {
102
+ "detail": f"Workflow server concurrent request rate exceeded. "
103
+ f"Process count: {get_active_process_count()}"
104
+ }
107
105
  ),
108
- status=200,
109
- content_type="application/x-ndjson",
106
+ status=429,
107
+ content_type="application/json",
110
108
  headers=headers,
111
109
  )
112
110
 
@@ -21,7 +21,6 @@ from vellum_ee.workflows.server.virtual_file_loader import VirtualFileFinder
21
21
 
22
22
  from vellum import Vellum, VellumEnvironment
23
23
  from vellum.workflows import BaseWorkflow
24
- from vellum.workflows.errors import WorkflowError, WorkflowErrorCode
25
24
  from vellum.workflows.events.types import BaseEvent
26
25
  from vellum.workflows.events.workflow import WorkflowEventDisplayContext
27
26
  from vellum.workflows.exceptions import WorkflowInitializationException
@@ -207,13 +206,6 @@ def stream_workflow(
207
206
  if event.name == "workflow.execution.initiated":
208
207
  event.body.display_context = display_context
209
208
 
210
- if event.name.endswith("rejected") and event.body.error.code.name == "INTERNAL_ERROR": # type: ignore
211
- sentry_sdk.set_tag("vellum_trace_id", str(executor_context.trace_id))
212
- logger.exception(event.body.error.message) # type: ignore
213
- event.body.error = WorkflowError( # type: ignore
214
- code=WorkflowErrorCode.INTERNAL_ERROR.value, message="Internal Error" # type: ignore
215
- )
216
-
217
209
  yield _dump_event(
218
210
  event=event,
219
211
  executor_context=executor_context,