vellum-workflow-server 0.14.72.post6__tar.gz → 0.14.72.post8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vellum-workflow-server might be problematic. Click here for more details.

Files changed (32) hide show
  1. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/PKG-INFO +1 -1
  2. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/pyproject.toml +1 -1
  3. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/api/workflow_view.py +10 -62
  4. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/code_exec_runner.py +5 -32
  5. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/core/executor.py +3 -28
  6. vellum_workflow_server-0.14.72.post8/src/workflow_server/core/utils.py +40 -0
  7. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/README.md +0 -0
  8. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/__init__.py +0 -0
  9. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/api/__init__.py +0 -0
  10. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/api/auth_middleware.py +0 -0
  11. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/api/healthz_view.py +0 -0
  12. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/api/tests/__init__.py +0 -0
  13. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/api/tests/test_input_display_mapping.py +0 -0
  14. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/api/tests/test_workflow_view.py +0 -0
  15. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/api/tests/test_workflow_view_stream_workflow_route.py +0 -0
  16. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/config.py +0 -0
  17. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/core/__init__.py +0 -0
  18. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/core/cancel_workflow.py +0 -0
  19. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/core/events.py +0 -0
  20. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/core/workflow_executor_context.py +0 -0
  21. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/server.py +0 -0
  22. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/start.py +0 -0
  23. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/utils/__init__.py +0 -0
  24. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/utils/exit_handler.py +0 -0
  25. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/utils/log_proxy.py +0 -0
  26. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/utils/oom_killer.py +0 -0
  27. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/utils/sentry.py +0 -0
  28. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/utils/system_utils.py +0 -0
  29. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/utils/tests/__init__.py +0 -0
  30. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/utils/tests/test_system_utils.py +0 -0
  31. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/utils/tests/test_utils.py +0 -0
  32. {vellum_workflow_server-0.14.72.post6 → vellum_workflow_server-0.14.72.post8}/src/workflow_server/utils/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-workflow-server
3
- Version: 0.14.72.post6
3
+ Version: 0.14.72.post8
4
4
  Summary:
5
5
  License: AGPL
6
6
  Requires-Python: >=3.9.0,<4
@@ -3,7 +3,7 @@ name = "vellum-workflow-server"
3
3
 
4
4
  [tool.poetry]
5
5
  name = "vellum-workflow-server"
6
- version = "0.14.72.post6"
6
+ version = "0.14.72.post8"
7
7
  description = ""
8
8
  readme = "README.md"
9
9
  authors = []
@@ -32,6 +32,7 @@ from workflow_server.core.events import (
32
32
  VembdaExecutionInitiatedEvent,
33
33
  )
34
34
  from workflow_server.core.executor import stream_node_pebble_timeout, stream_workflow_process_timeout
35
+ from workflow_server.core.utils import create_vembda_rejected_event, serialize_vembda_rejected_event
35
36
  from workflow_server.core.workflow_executor_context import (
36
37
  DEFAULT_TIMEOUT_SECONDS,
37
38
  NodeExecutorContext,
@@ -119,19 +120,7 @@ def stream_workflow_route() -> Response:
119
120
  sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
120
121
  logger.exception(e)
121
122
 
122
- vembda_fulfilled_event = VembdaExecutionFulfilledEvent(
123
- id=uuid4(),
124
- timestamp=datetime.now(),
125
- trace_id=context.trace_id,
126
- span_id=context.execution_id,
127
- body=VembdaExecutionFulfilledBody(
128
- exit_code=-1,
129
- stderr=traceback.format_exc(),
130
- container_overhead_latency=context.container_overhead_latency,
131
- ),
132
- parent=None,
133
- )
134
- process_output_queue.put(vembda_fulfilled_event.model_dump(mode="json"))
123
+ process_output_queue.put(create_vembda_rejected_event(context, traceback.format_exc()))
135
124
 
136
125
  first_item = process_output_queue.get(timeout=0.1)
137
126
  if isinstance(first_item, str) and first_item.startswith(SPAN_ID_EVENT):
@@ -184,18 +173,9 @@ def stream_workflow_route() -> Response:
184
173
  if get_is_oom_killed():
185
174
  logger.warning("Workflow stream OOM Kill event")
186
175
 
187
- yield VembdaExecutionFulfilledEvent(
188
- id=uuid4(),
189
- timestamp=datetime.now(),
190
- trace_id=context.trace_id,
191
- span_id=context.execution_id,
192
- body=VembdaExecutionFulfilledBody(
193
- exit_code=-1,
194
- container_overhead_latency=context.container_overhead_latency,
195
- stderr=f"Organization Workflow server has exceeded {MEMORY_LIMIT_MB}MB memory limit.",
196
- ),
197
- parent=None,
198
- ).model_dump(mode="json")
176
+ yield create_vembda_rejected_event(
177
+ context, f"Organization Workflow server has exceeded {MEMORY_LIMIT_MB}MB memory limit."
178
+ )
199
179
 
200
180
  if process and process.is_alive():
201
181
  process.kill()
@@ -216,18 +196,9 @@ def stream_workflow_route() -> Response:
216
196
  if process and not process.is_alive():
217
197
  logger.error("Workflow process exited abnormally")
218
198
 
219
- yield VembdaExecutionFulfilledEvent(
220
- id=uuid4(),
221
- timestamp=datetime.now(),
222
- trace_id=context.trace_id,
223
- span_id=context.execution_id,
224
- body=VembdaExecutionFulfilledBody(
225
- exit_code=-1,
226
- container_overhead_latency=context.container_overhead_latency,
227
- stderr="Internal Server Error, Workflow process exited abnormally",
228
- ),
229
- parent=None,
230
- ).model_dump(mode="json")
199
+ yield create_vembda_rejected_event(
200
+ context, "Internal Server Error, Workflow process exited abnormally"
201
+ )
231
202
 
232
203
  break
233
204
 
@@ -330,19 +301,7 @@ def stream_node_route() -> Response:
330
301
  if stream_future.exception() is not None:
331
302
  # This happens when theres a problem with the stream function call
332
303
  # itself not the workflow runner
333
- vembda_fulfilled_event = VembdaExecutionFulfilledEvent(
334
- id=uuid4(),
335
- timestamp=datetime.now(),
336
- trace_id=context.trace_id,
337
- span_id=context.execution_id,
338
- body=VembdaExecutionFulfilledBody(
339
- exit_code=-1,
340
- stderr="Internal Server Error",
341
- container_overhead_latency=context.container_overhead_latency,
342
- ),
343
- parent=None,
344
- )
345
- yield vembda_fulfilled_event.model_dump(mode="json")
304
+ yield create_vembda_rejected_event(context, "Internal Server Error")
346
305
  sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
347
306
  app.logger.exception(stream_future.exception())
348
307
  break
@@ -454,18 +413,7 @@ def startup_error_generator(
454
413
  yield "\n"
455
414
  yield vembda_initiated_event.model_dump_json()
456
415
  yield "\n"
457
- yield VembdaExecutionFulfilledEvent(
458
- id=uuid4(),
459
- timestamp=datetime.now(),
460
- trace_id=context.trace_id,
461
- span_id=context.execution_id,
462
- body=VembdaExecutionFulfilledBody(
463
- exit_code=-1,
464
- container_overhead_latency=context.container_overhead_latency,
465
- stderr=message,
466
- ),
467
- parent=None,
468
- ).model_dump_json()
416
+ yield serialize_vembda_rejected_event(context, message)
469
417
  yield "\n"
470
418
  yield "END"
471
419
  yield "\n"
@@ -9,13 +9,9 @@ import sentry_sdk
9
9
 
10
10
  from vellum.workflows.exceptions import WorkflowInitializationException
11
11
  from workflow_server.api.workflow_view import get_workflow_request_context
12
- from workflow_server.core.events import (
13
- VembdaExecutionFulfilledBody,
14
- VembdaExecutionFulfilledEvent,
15
- VembdaExecutionInitiatedBody,
16
- VembdaExecutionInitiatedEvent,
17
- )
12
+ from workflow_server.core.events import VembdaExecutionInitiatedBody, VembdaExecutionInitiatedEvent
18
13
  from workflow_server.core.executor import stream_workflow
14
+ from workflow_server.core.utils import serialize_vembda_rejected_event
19
15
  from workflow_server.core.workflow_executor_context import WorkflowExecutorContext
20
16
  from workflow_server.utils.utils import get_version
21
17
 
@@ -57,34 +53,11 @@ def run_code_exec_stream() -> None:
57
53
  for line in stream_iterator:
58
54
  print(f"{_EVENT_LINE}{json.dumps(line)}") # noqa: T201
59
55
  except WorkflowInitializationException as e:
60
- fulfilled_event = VembdaExecutionFulfilledEvent(
61
- id=uuid4(),
62
- timestamp=datetime.now(),
63
- trace_id=context.trace_id,
64
- span_id=context.execution_id,
65
- body=VembdaExecutionFulfilledBody(
66
- exit_code=-1,
67
- stderr=str(e),
68
- container_overhead_latency=context.container_overhead_latency,
69
- ),
70
- parent=None,
71
- ).model_dump(mode="json")
72
-
73
- print(f"{_EVENT_LINE}{json.dumps(fulfilled_event)}") # noqa: T201
56
+ fulfilled_event = serialize_vembda_rejected_event(context, str(e))
57
+ print(f"{_EVENT_LINE}{fulfilled_event}") # noqa: T201
74
58
  except Exception as e:
75
59
  sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id) if context else "unknown")
76
60
  logger.exception(e)
77
61
 
78
- vembda_fulfilled_event = VembdaExecutionFulfilledEvent(
79
- id=uuid4(),
80
- timestamp=datetime.now(),
81
- trace_id=context.trace_id if context else uuid4(),
82
- span_id=context.execution_id if context else uuid4(),
83
- body=VembdaExecutionFulfilledBody(
84
- exit_code=-1,
85
- stderr="Internal Server Error",
86
- ),
87
- parent=None,
88
- )
89
- event = json.dumps(vembda_fulfilled_event.model_dump(mode="json"))
62
+ event = serialize_vembda_rejected_event(context, "Internal Server Error")
90
63
  print(f"{_EVENT_LINE}{event}") # noqa: T201
@@ -40,6 +40,7 @@ from workflow_server.core.events import (
40
40
  VembdaExecutionFulfilledBody,
41
41
  VembdaExecutionFulfilledEvent,
42
42
  )
43
+ from workflow_server.core.utils import serialize_vembda_rejected_event
43
44
  from workflow_server.core.workflow_executor_context import (
44
45
  DEFAULT_TIMEOUT_SECONDS,
45
46
  BaseExecutorContext,
@@ -101,40 +102,14 @@ def _stream_workflow_wrapper(executor_context: WorkflowExecutorContext, queue: Q
101
102
  if not span_id_emitted:
102
103
  queue.put(f"{SPAN_ID_EVENT}:{uuid4()}")
103
104
 
104
- queue.put(
105
- VembdaExecutionFulfilledEvent(
106
- id=uuid4(),
107
- timestamp=datetime.now(),
108
- trace_id=executor_context.trace_id,
109
- span_id=executor_context.execution_id,
110
- body=VembdaExecutionFulfilledBody(
111
- exit_code=-1,
112
- stderr=str(e),
113
- container_overhead_latency=executor_context.container_overhead_latency,
114
- ),
115
- parent=None,
116
- ).model_dump(mode="json")
117
- )
105
+ queue.put(serialize_vembda_rejected_event(executor_context, str(e)))
118
106
  except Exception as e:
119
107
  if not span_id_emitted:
120
108
  queue.put(f"{SPAN_ID_EVENT}:{uuid4()}")
121
109
 
122
110
  sentry_sdk.set_tag("vellum_trace_id", str(executor_context.trace_id))
123
111
  logger.exception(e)
124
- queue.put(
125
- VembdaExecutionFulfilledEvent(
126
- id=uuid4(),
127
- timestamp=datetime.now(),
128
- trace_id=executor_context.trace_id,
129
- span_id=executor_context.execution_id,
130
- body=VembdaExecutionFulfilledBody(
131
- exit_code=-1,
132
- stderr="Internal Server Error",
133
- container_overhead_latency=executor_context.container_overhead_latency,
134
- ),
135
- parent=None,
136
- ).model_dump(mode="json")
137
- )
112
+ queue.put(serialize_vembda_rejected_event(executor_context, "Internal Server Error"))
138
113
  queue.put(STREAM_FINISHED_EVENT)
139
114
 
140
115
  exit(0)
@@ -0,0 +1,40 @@
1
+ from datetime import datetime
2
+ from uuid import uuid4
3
+ from typing import Optional
4
+
5
+ from workflow_server.core.events import VembdaExecutionFulfilledBody, VembdaExecutionFulfilledEvent
6
+ from workflow_server.core.workflow_executor_context import BaseExecutorContext
7
+
8
+
9
+ def _create_vembda_rejected_event_base(
10
+ executor_context: Optional[BaseExecutorContext], error_message: str
11
+ ) -> VembdaExecutionFulfilledEvent:
12
+ if executor_context:
13
+ trace_id = executor_context.trace_id
14
+ span_id = executor_context.execution_id
15
+ container_overhead_latency = executor_context.container_overhead_latency
16
+ else:
17
+ trace_id = uuid4()
18
+ span_id = uuid4()
19
+ container_overhead_latency = None
20
+
21
+ return VembdaExecutionFulfilledEvent(
22
+ id=uuid4(),
23
+ timestamp=datetime.now(),
24
+ trace_id=trace_id,
25
+ span_id=span_id,
26
+ body=VembdaExecutionFulfilledBody(
27
+ exit_code=-1,
28
+ stderr=error_message,
29
+ container_overhead_latency=container_overhead_latency,
30
+ ),
31
+ parent=None,
32
+ )
33
+
34
+
35
+ def create_vembda_rejected_event(executor_context: Optional[BaseExecutorContext], error_message: str) -> dict:
36
+ return _create_vembda_rejected_event_base(executor_context, error_message).model_dump(mode="json")
37
+
38
+
39
+ def serialize_vembda_rejected_event(executor_context: Optional[BaseExecutorContext], error_message: str) -> str:
40
+ return _create_vembda_rejected_event_base(executor_context, error_message).model_dump_json()