vellum-workflow-server 0.14.73.post1__tar.gz → 0.14.73.post3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vellum-workflow-server might be problematic. Click here for more details.

Files changed (33) hide show
  1. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/PKG-INFO +1 -1
  2. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/pyproject.toml +2 -1
  3. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/api/tests/test_workflow_view_stream_workflow_route.py +6 -34
  4. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/api/workflow_view.py +7 -14
  5. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/code_exec_runner.py +0 -3
  6. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/core/executor.py +0 -5
  7. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/utils/sentry.py +34 -0
  8. vellum_workflow_server-0.14.73.post3/src/workflow_server/utils/tests/test_sentry_integration.py +69 -0
  9. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/README.md +0 -0
  10. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/__init__.py +0 -0
  11. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/api/__init__.py +0 -0
  12. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/api/auth_middleware.py +0 -0
  13. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/api/healthz_view.py +0 -0
  14. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/api/tests/__init__.py +0 -0
  15. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/api/tests/test_input_display_mapping.py +0 -0
  16. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/api/tests/test_workflow_view.py +0 -0
  17. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/config.py +0 -0
  18. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/core/__init__.py +0 -0
  19. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/core/cancel_workflow.py +0 -0
  20. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/core/events.py +0 -0
  21. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/core/utils.py +0 -0
  22. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/core/workflow_executor_context.py +0 -0
  23. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/server.py +0 -0
  24. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/start.py +0 -0
  25. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/utils/__init__.py +0 -0
  26. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/utils/exit_handler.py +0 -0
  27. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/utils/log_proxy.py +0 -0
  28. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/utils/oom_killer.py +0 -0
  29. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/utils/system_utils.py +0 -0
  30. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/utils/tests/__init__.py +0 -0
  31. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/utils/tests/test_system_utils.py +0 -0
  32. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/utils/tests/test_utils.py +0 -0
  33. {vellum_workflow_server-0.14.73.post1 → vellum_workflow_server-0.14.73.post3}/src/workflow_server/utils/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-workflow-server
3
- Version: 0.14.73.post1
3
+ Version: 0.14.73.post3
4
4
  Summary:
5
5
  License: AGPL
6
6
  Requires-Python: >=3.9.0,<4
@@ -3,7 +3,7 @@ name = "vellum-workflow-server"
3
3
 
4
4
  [tool.poetry]
5
5
  name = "vellum-workflow-server"
6
- version = "0.14.73.post1"
6
+ version = "0.14.73.post3"
7
7
  description = ""
8
8
  readme = "README.md"
9
9
  authors = []
@@ -32,6 +32,7 @@ packages = [
32
32
 
33
33
  [tool.poetry.group.dev.dependencies]
34
34
  requests-mock = "^1.12.1"
35
+ pytest-mock = "^3.14.1"
35
36
 
36
37
  [project.urls]
37
38
  Repository = 'https://github.com/vellum-ai/vembda-service'
@@ -10,6 +10,7 @@ import requests_mock
10
10
 
11
11
  from workflow_server.code_exec_runner import run_code_exec_stream
12
12
  from workflow_server.server import create_app
13
+ from workflow_server.utils.system_utils import get_active_process_count
13
14
 
14
15
 
15
16
  def flask_stream(request_body: dict) -> tuple[int, list]:
@@ -775,44 +776,15 @@ class Workflow(BaseWorkflow):
775
776
  # WHEN we call the stream route
776
777
  status_code, events = flask_stream(request_body)
777
778
 
778
- # THEN we get a 200 response
779
- assert status_code == 200, events
779
+ # THEN we get a 429 response
780
+ assert status_code == 429, events
780
781
 
781
- # THEN we get the expected events
782
+ # AND we get a simple JSON error response
783
+ assert len(events) == 1
782
784
  assert events[0] == {
783
- "id": mock.ANY,
784
- "trace_id": mock.ANY,
785
- "span_id": str(span_id),
786
- "timestamp": mock.ANY,
787
- "api_version": "2024-10-25",
788
- "parent": None,
789
- "name": "vembda.execution.initiated",
790
- "body": {
791
- "sdk_version": version("vellum-ai"),
792
- "server_version": "local",
793
- },
785
+ "detail": f"Workflow server concurrent request rate exceeded. Process count: {get_active_process_count()}"
794
786
  }
795
787
 
796
- # AND we get a vembda.execution.fulfilled event with error
797
- assert events[1] == {
798
- "id": mock.ANY,
799
- "trace_id": events[0]["trace_id"],
800
- "span_id": str(span_id),
801
- "timestamp": mock.ANY,
802
- "api_version": "2024-10-25",
803
- "parent": None,
804
- "name": "vembda.execution.fulfilled",
805
- "body": {
806
- "log": "",
807
- "exit_code": -1,
808
- "stderr": "Workflow server concurrent request rate exceeded. Process count: 0",
809
- "container_overhead_latency": mock.ANY,
810
- "timed_out": False,
811
- },
812
- }
813
-
814
- assert len(events) == 2
815
-
816
788
 
817
789
  def test_stream_workflow_route__with_environment_variables(both_stream_types):
818
790
  # GIVEN a valid request body with environment variables
@@ -15,7 +15,6 @@ from typing import Any, Dict, Generator, Iterator, Union, cast
15
15
 
16
16
  from flask import Blueprint, Response, current_app as app, request, stream_with_context
17
17
  from pydantic import ValidationError
18
- import sentry_sdk
19
18
  from vellum_ee.workflows.display.nodes.get_node_display_class import get_node_display_class
20
19
  from vellum_ee.workflows.display.types import WorkflowDisplayContext
21
20
 
@@ -97,16 +96,14 @@ def stream_workflow_route() -> Response:
97
96
  # if we detect a memory problem just exit us early
98
97
  if not wait_for_available_process():
99
98
  return Response(
100
- stream_with_context(
101
- startup_error_generator(
102
- context=context,
103
- message=f"Workflow server concurrent request rate exceeded. "
104
- f"Process count: {get_active_process_count()}",
105
- vembda_initiated_event=vembda_initiated_event,
106
- )
99
+ json.dumps(
100
+ {
101
+ "detail": f"Workflow server concurrent request rate exceeded. "
102
+ f"Process count: {get_active_process_count()}"
103
+ }
107
104
  ),
108
- status=200,
109
- content_type="application/x-ndjson",
105
+ status=429,
106
+ content_type="application/json",
110
107
  headers=headers,
111
108
  )
112
109
 
@@ -117,7 +114,6 @@ def stream_workflow_route() -> Response:
117
114
  )
118
115
  increment_process_count(1)
119
116
  except Exception as e:
120
- sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
121
117
  logger.exception(e)
122
118
 
123
119
  process_output_queue.put(create_vembda_rejected_event(context, traceback.format_exc()))
@@ -204,7 +200,6 @@ def stream_workflow_route() -> Response:
204
200
 
205
201
  continue
206
202
  except Exception as e:
207
- sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
208
203
  logger.exception(e)
209
204
  break
210
205
 
@@ -302,7 +297,6 @@ def stream_node_route() -> Response:
302
297
  # This happens when theres a problem with the stream function call
303
298
  # itself not the workflow runner
304
299
  yield create_vembda_rejected_event(context, "Internal Server Error")
305
- sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
306
300
  app.logger.exception(stream_future.exception())
307
301
  break
308
302
  else:
@@ -379,7 +373,6 @@ def get_version_route() -> tuple[dict, int]:
379
373
 
380
374
  resp["nodes"] = nodes
381
375
  except Exception as e:
382
- sentry_sdk.set_tag("vellum_trace_id", "unknown")
383
376
  logger.exception(f"Failed to discover nodes: {str(e)}")
384
377
  resp["nodes"] = []
385
378
 
@@ -5,8 +5,6 @@ import os
5
5
  from uuid import uuid4
6
6
  from typing import Optional
7
7
 
8
- import sentry_sdk
9
-
10
8
  from vellum.workflows.exceptions import WorkflowInitializationException
11
9
  from workflow_server.api.workflow_view import get_workflow_request_context
12
10
  from workflow_server.core.events import VembdaExecutionInitiatedBody, VembdaExecutionInitiatedEvent
@@ -56,7 +54,6 @@ def run_code_exec_stream() -> None:
56
54
  fulfilled_event = serialize_vembda_rejected_event(context, str(e))
57
55
  print(f"{_EVENT_LINE}{fulfilled_event}") # noqa: T201
58
56
  except Exception as e:
59
- sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id) if context else "unknown")
60
57
  logger.exception(e)
61
58
 
62
59
  event = serialize_vembda_rejected_event(context, "Internal Server Error")
@@ -15,7 +15,6 @@ from uuid import UUID, uuid4
15
15
  from typing import Any, Callable, Generator, Iterator, Optional, Tuple, Type
16
16
 
17
17
  from pebble import concurrent
18
- import sentry_sdk
19
18
  from vellum_ee.workflows.display.workflows import BaseWorkflowDisplay
20
19
  from vellum_ee.workflows.server.virtual_file_loader import VirtualFileFinder
21
20
 
@@ -68,7 +67,6 @@ def _stream_node_wrapper(executor_context: NodeExecutorContext, queue: Queue) ->
68
67
  for event in stream_node(executor_context=executor_context):
69
68
  queue.put(event)
70
69
  except Exception as e:
71
- sentry_sdk.set_tag("vellum_trace_id", str(executor_context.trace_id))
72
70
  logger.exception(e)
73
71
  queue.put(
74
72
  VembdaExecutionFulfilledEvent(
@@ -106,7 +104,6 @@ def _stream_workflow_wrapper(executor_context: WorkflowExecutorContext, queue: Q
106
104
  if not span_id_emitted:
107
105
  queue.put(f"{SPAN_ID_EVENT}:{uuid4()}")
108
106
 
109
- sentry_sdk.set_tag("vellum_trace_id", str(executor_context.trace_id))
110
107
  logger.exception(e)
111
108
  queue.put(serialize_vembda_rejected_event(executor_context, "Internal Server Error"))
112
109
  queue.put(STREAM_FINISHED_EVENT)
@@ -456,7 +453,6 @@ def _gather_workflow(context: WorkflowExecutorContext) -> Tuple[BaseWorkflow, st
456
453
  )
457
454
  return workflow, namespace
458
455
  except Exception as e:
459
- sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
460
456
  logger.exception("Failed to initialize Workflow")
461
457
  raise WorkflowInitializationException(f"Failed to initialize workflow: {e}") from e
462
458
 
@@ -465,6 +461,5 @@ def _gather_display_context(workflow: BaseWorkflow, namespace: str) -> Optional[
465
461
  try:
466
462
  return BaseWorkflowDisplay.gather_event_display_context(namespace, workflow.__class__)
467
463
  except Exception:
468
- sentry_sdk.set_tag("vellum_trace_id", "unknown")
469
464
  logger.exception("Unable to Parse Workflow Display Context")
470
465
  return None
@@ -5,10 +5,44 @@ import sentry_sdk
5
5
  from sentry_sdk.integrations.logging import LoggingIntegration
6
6
 
7
7
 
8
+ def _tag_trace_id(event: dict) -> None:
9
+ if "request" not in event:
10
+ return
11
+
12
+ if not isinstance(event["request"], dict):
13
+ return
14
+
15
+ url = event["request"].get("url")
16
+ if not isinstance(url, str):
17
+ return
18
+
19
+ if not url.endswith("/workflow/stream"):
20
+ return
21
+
22
+ body = event["request"].get("data")
23
+ if not isinstance(body, dict):
24
+ return
25
+
26
+ execution_context = body.get("execution_context")
27
+ if not isinstance(execution_context, dict):
28
+ return
29
+
30
+ trace_id = execution_context.get("trace_id")
31
+ if not isinstance(trace_id, str):
32
+ return
33
+
34
+ if "tags" not in event:
35
+ event["tags"] = {}
36
+
37
+ event["tags"]["vellum_trace_id"] = trace_id
38
+
39
+
8
40
  def before_send(event: dict, hint: dict) -> Optional[dict]:
9
41
  if "exc_info" in hint:
10
42
  _, _, _ = hint["exc_info"]
11
43
 
44
+ _tag_trace_id(event)
45
+
12
46
  return event
13
47
 
14
48
 
@@ -0,0 +1,69 @@
1
+ import pytest
2
+ from uuid import uuid4
3
+
4
+ from workflow_server.server import create_app
5
+
6
+
7
+ @pytest.fixture
8
+ def mock_sentry_capture_envelope(mocker):
9
+ mock_transport = mocker.patch("sentry_sdk.client.make_transport")
10
+ return mock_transport.return_value.capture_envelope
11
+
12
+
13
+ def test_sentry_integration_with_workflow_endpoints(monkeypatch, mock_sentry_capture_envelope):
14
+ # GIVEN sentry is configured
15
+ monkeypatch.setenv("SENTRY_DSN", "https://test-dsn@sentry.io/1234567890")
16
+
17
+ # AND our /workflow/stream endpoint raises an exception
18
+ def mock_get_version():
19
+ raise Exception("Test exception")
20
+
21
+ monkeypatch.setattr("workflow_server.api.workflow_view.get_version", mock_get_version)
22
+
23
+ # AND we have a mock trace_id
24
+ trace_id = str(uuid4())
25
+
26
+ # AND we have a mock request body
27
+ body = {
28
+ "execution_id": uuid4(),
29
+ "inputs": [],
30
+ "environment_api_key": "test",
31
+ "module": "workflow",
32
+ "timeout": 360,
33
+ "files": {
34
+ "__init__.py": "",
35
+ "workflow.py": """\
36
+ from vellum.workflows import BaseWorkflow
37
+
38
+ class Workflow(BaseWorkflow):
39
+ pass
40
+ """,
41
+ },
42
+ "execution_context": {
43
+ "trace_id": trace_id,
44
+ "parent_context": {
45
+ "type": "API_REQUEST",
46
+ "span_id": str(uuid4()),
47
+ "parent": None,
48
+ },
49
+ },
50
+ }
51
+
52
+ # WHEN we call the /workflow/version endpoint
53
+ flask_app = create_app()
54
+
55
+ with flask_app.test_client() as test_client:
56
+ response = test_client.post("/workflow/stream", json=body)
57
+
58
+ # THEN we get a 500 error
59
+ assert response.status_code == 500
60
+
61
+ # AND sentry captures the error with the correct data
62
+ assert mock_sentry_capture_envelope.call_count == 1
63
+ envelope = mock_sentry_capture_envelope.call_args[0][0]
64
+ event = envelope.get_event()
65
+ assert event["level"] == "error"
66
+ assert "Test exception" in event["exception"]["values"][0]["value"]
67
+
68
+ # AND the trace_id is tagged
69
+ assert event["tags"]["vellum_trace_id"] == trace_id