vellum-workflow-server 0.14.73__py3-none-any.whl → 0.14.73.post2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vellum-workflow-server might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-workflow-server
3
- Version: 0.14.73
3
+ Version: 0.14.73.post2
4
4
  Summary:
5
5
  License: AGPL
6
6
  Requires-Python: >=3.9.0,<4
@@ -5,14 +5,14 @@ workflow_server/api/healthz_view.py,sha256=itiRvBDBXncrw8Kbbc73UZLwqMAhgHOR3uSre
5
5
  workflow_server/api/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  workflow_server/api/tests/test_input_display_mapping.py,sha256=drBZqMudFyB5wgiUOcMgRXz7E7ge-Qgxbstw4E4f0zE,2211
7
7
  workflow_server/api/tests/test_workflow_view.py,sha256=2nscM_QsYPHkkTG8_JhNbE2LmGL5FQKXEtaLLjXouw0,14591
8
- workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=wmeH8oGo0lKx3YzEJQ9nGjw7adqQD0EL7-xGLPgYWqc,24872
9
- workflow_server/api/workflow_view.py,sha256=AOHccJXkhzw-iXBo4Q_EaLRynTuNmeQZaQxB5ttgrY4,15434
8
+ workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=cUVF3tqV8Ay91YfFr2lT2oXONWRN823Nk5M3EOC8wE8,24133
9
+ workflow_server/api/workflow_view.py,sha256=bvkDQ9h267hqWsHSElplg83NGHfoLuR8VaLlMh6zJ9s,15295
10
10
  workflow_server/code_exec_runner.py,sha256=XSs4w_b0vDUt0HqSakc26Gxx9aoG2wmwOo-JGVL5QJ4,2388
11
11
  workflow_server/config.py,sha256=K5Tavm7wiqCZt0RWWue7zzb8N6e8aWnFOTNlBqEJPcI,1330
12
12
  workflow_server/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  workflow_server/core/cancel_workflow.py,sha256=Ffkc3mzmrdMEUcD-sHfEhX4IwVrka-E--SxKA1dUfIU,2185
14
14
  workflow_server/core/events.py,sha256=24MA66DVQuaLJJcZrS8IL1Zq4Ohi9CoouKZ5VgoH3Cs,1402
15
- workflow_server/core/executor.py,sha256=TOnJ5FcmC8mWNdGvoyTwNlNlbhp6s6hk7Nc4M35jEe4,17148
15
+ workflow_server/core/executor.py,sha256=j78gYOgaeJ5Z25nCefnPg4fUPdTLW0tcwaZeWQf0DP0,16588
16
16
  workflow_server/core/utils.py,sha256=lgzxkAEjEXPxGXXQlUYTYuCdHht-eDJJmHj5AhEb3_o,1500
17
17
  workflow_server/core/workflow_executor_context.py,sha256=a-v48GJbOWUh4JIf_bNwDX-BvfKkg4xwRSPEyRVQmp4,1373
18
18
  workflow_server/server.py,sha256=QBU12AaAfAgLqfCDBd24qIJl_mbheiq0-hfcWV7rZM4,1234
@@ -27,7 +27,7 @@ workflow_server/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
27
27
  workflow_server/utils/tests/test_system_utils.py,sha256=MdBxI9gxUOpR_JBAHpEz6dGFY6JjxhMSM2oExpqFvNA,4314
28
28
  workflow_server/utils/tests/test_utils.py,sha256=qwK5Rmy3RQyjtlUrYAuGuDlBeRzZKsf1yS-y2IpUizQ,6452
29
29
  workflow_server/utils/utils.py,sha256=Wqqn-1l2ugkGgy5paWWdt0AVxAyPMQCYcnRSSOMjXlA,4355
30
- vellum_workflow_server-0.14.73.dist-info/METADATA,sha256=d7G7Lir-2rbsAhDvGMUDbX9oV-88h_ViurMycJ_PK_Q,2237
31
- vellum_workflow_server-0.14.73.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
32
- vellum_workflow_server-0.14.73.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
33
- vellum_workflow_server-0.14.73.dist-info/RECORD,,
30
+ vellum_workflow_server-0.14.73.post2.dist-info/METADATA,sha256=7J8qwzytgU5lH36SRdvkqMfN35req2uxbKYeZJhVCQI,2243
31
+ vellum_workflow_server-0.14.73.post2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
32
+ vellum_workflow_server-0.14.73.post2.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
33
+ vellum_workflow_server-0.14.73.post2.dist-info/RECORD,,
@@ -10,6 +10,7 @@ import requests_mock
10
10
 
11
11
  from workflow_server.code_exec_runner import run_code_exec_stream
12
12
  from workflow_server.server import create_app
13
+ from workflow_server.utils.system_utils import get_active_process_count
13
14
 
14
15
 
15
16
  def flask_stream(request_body: dict) -> tuple[int, list]:
@@ -775,44 +776,15 @@ class Workflow(BaseWorkflow):
775
776
  # WHEN we call the stream route
776
777
  status_code, events = flask_stream(request_body)
777
778
 
778
- # THEN we get a 200 response
779
- assert status_code == 200, events
779
+ # THEN we get a 429 response
780
+ assert status_code == 429, events
780
781
 
781
- # THEN we get the expected events
782
+ # AND we get a simple JSON error response
783
+ assert len(events) == 1
782
784
  assert events[0] == {
783
- "id": mock.ANY,
784
- "trace_id": mock.ANY,
785
- "span_id": str(span_id),
786
- "timestamp": mock.ANY,
787
- "api_version": "2024-10-25",
788
- "parent": None,
789
- "name": "vembda.execution.initiated",
790
- "body": {
791
- "sdk_version": version("vellum-ai"),
792
- "server_version": "local",
793
- },
785
+ "detail": f"Workflow server concurrent request rate exceeded. Process count: {get_active_process_count()}"
794
786
  }
795
787
 
796
- # AND we get a vembda.execution.fulfilled event with error
797
- assert events[1] == {
798
- "id": mock.ANY,
799
- "trace_id": events[0]["trace_id"],
800
- "span_id": str(span_id),
801
- "timestamp": mock.ANY,
802
- "api_version": "2024-10-25",
803
- "parent": None,
804
- "name": "vembda.execution.fulfilled",
805
- "body": {
806
- "log": "",
807
- "exit_code": -1,
808
- "stderr": "Workflow server concurrent request rate exceeded. Process count: 0",
809
- "container_overhead_latency": mock.ANY,
810
- "timed_out": False,
811
- },
812
- }
813
-
814
- assert len(events) == 2
815
-
816
788
 
817
789
  def test_stream_workflow_route__with_environment_variables(both_stream_types):
818
790
  # GIVEN a valid request body with environment variables
@@ -97,16 +97,14 @@ def stream_workflow_route() -> Response:
97
97
  # if we detect a memory problem just exit us early
98
98
  if not wait_for_available_process():
99
99
  return Response(
100
- stream_with_context(
101
- startup_error_generator(
102
- context=context,
103
- message=f"Workflow server concurrent request rate exceeded. "
104
- f"Process count: {get_active_process_count()}",
105
- vembda_initiated_event=vembda_initiated_event,
106
- )
100
+ json.dumps(
101
+ {
102
+ "detail": f"Workflow server concurrent request rate exceeded. "
103
+ f"Process count: {get_active_process_count()}"
104
+ }
107
105
  ),
108
- status=200,
109
- content_type="application/x-ndjson",
106
+ status=429,
107
+ content_type="application/json",
110
108
  headers=headers,
111
109
  )
112
110
 
@@ -21,7 +21,6 @@ from vellum_ee.workflows.server.virtual_file_loader import VirtualFileFinder
21
21
 
22
22
  from vellum import Vellum, VellumEnvironment
23
23
  from vellum.workflows import BaseWorkflow
24
- from vellum.workflows.errors import WorkflowError, WorkflowErrorCode
25
24
  from vellum.workflows.events.types import BaseEvent
26
25
  from vellum.workflows.events.workflow import WorkflowEventDisplayContext
27
26
  from vellum.workflows.exceptions import WorkflowInitializationException
@@ -207,13 +206,6 @@ def stream_workflow(
207
206
  if event.name == "workflow.execution.initiated":
208
207
  event.body.display_context = display_context
209
208
 
210
- if event.name.endswith("rejected") and event.body.error.code.name == "INTERNAL_ERROR": # type: ignore
211
- sentry_sdk.set_tag("vellum_trace_id", str(executor_context.trace_id))
212
- logger.exception(event.body.error.message) # type: ignore
213
- event.body.error = WorkflowError( # type: ignore
214
- code=WorkflowErrorCode.INTERNAL_ERROR.value, message="Internal Error" # type: ignore
215
- )
216
-
217
209
  yield _dump_event(
218
210
  event=event,
219
211
  executor_context=executor_context,