vellum-workflow-server 0.14.72.post6__py3-none-any.whl → 0.14.72.post8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of vellum-workflow-server might be problematic. Click here for more details.
- {vellum_workflow_server-0.14.72.post6.dist-info → vellum_workflow_server-0.14.72.post8.dist-info}/METADATA +1 -1
- {vellum_workflow_server-0.14.72.post6.dist-info → vellum_workflow_server-0.14.72.post8.dist-info}/RECORD +8 -7
- workflow_server/api/workflow_view.py +10 -62
- workflow_server/code_exec_runner.py +5 -32
- workflow_server/core/executor.py +3 -28
- workflow_server/core/utils.py +40 -0
- {vellum_workflow_server-0.14.72.post6.dist-info → vellum_workflow_server-0.14.72.post8.dist-info}/WHEEL +0 -0
- {vellum_workflow_server-0.14.72.post6.dist-info → vellum_workflow_server-0.14.72.post8.dist-info}/entry_points.txt +0 -0
|
@@ -6,13 +6,14 @@ workflow_server/api/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
|
|
|
6
6
|
workflow_server/api/tests/test_input_display_mapping.py,sha256=drBZqMudFyB5wgiUOcMgRXz7E7ge-Qgxbstw4E4f0zE,2211
|
|
7
7
|
workflow_server/api/tests/test_workflow_view.py,sha256=2nscM_QsYPHkkTG8_JhNbE2LmGL5FQKXEtaLLjXouw0,14591
|
|
8
8
|
workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=wmeH8oGo0lKx3YzEJQ9nGjw7adqQD0EL7-xGLPgYWqc,24872
|
|
9
|
-
workflow_server/api/workflow_view.py,sha256=
|
|
10
|
-
workflow_server/code_exec_runner.py,sha256=
|
|
9
|
+
workflow_server/api/workflow_view.py,sha256=AOHccJXkhzw-iXBo4Q_EaLRynTuNmeQZaQxB5ttgrY4,15434
|
|
10
|
+
workflow_server/code_exec_runner.py,sha256=XSs4w_b0vDUt0HqSakc26Gxx9aoG2wmwOo-JGVL5QJ4,2388
|
|
11
11
|
workflow_server/config.py,sha256=K5Tavm7wiqCZt0RWWue7zzb8N6e8aWnFOTNlBqEJPcI,1330
|
|
12
12
|
workflow_server/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
13
|
workflow_server/core/cancel_workflow.py,sha256=Ffkc3mzmrdMEUcD-sHfEhX4IwVrka-E--SxKA1dUfIU,2185
|
|
14
14
|
workflow_server/core/events.py,sha256=24MA66DVQuaLJJcZrS8IL1Zq4Ohi9CoouKZ5VgoH3Cs,1402
|
|
15
|
-
workflow_server/core/executor.py,sha256
|
|
15
|
+
workflow_server/core/executor.py,sha256=TOnJ5FcmC8mWNdGvoyTwNlNlbhp6s6hk7Nc4M35jEe4,17148
|
|
16
|
+
workflow_server/core/utils.py,sha256=lgzxkAEjEXPxGXXQlUYTYuCdHht-eDJJmHj5AhEb3_o,1500
|
|
16
17
|
workflow_server/core/workflow_executor_context.py,sha256=a-v48GJbOWUh4JIf_bNwDX-BvfKkg4xwRSPEyRVQmp4,1373
|
|
17
18
|
workflow_server/server.py,sha256=QBU12AaAfAgLqfCDBd24qIJl_mbheiq0-hfcWV7rZM4,1234
|
|
18
19
|
workflow_server/start.py,sha256=DgtQhuCLc07BIWyJPLPZKZsQ8jwEFsvvfIo7MdwVrpw,1998
|
|
@@ -26,7 +27,7 @@ workflow_server/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
|
|
|
26
27
|
workflow_server/utils/tests/test_system_utils.py,sha256=MdBxI9gxUOpR_JBAHpEz6dGFY6JjxhMSM2oExpqFvNA,4314
|
|
27
28
|
workflow_server/utils/tests/test_utils.py,sha256=qwK5Rmy3RQyjtlUrYAuGuDlBeRzZKsf1yS-y2IpUizQ,6452
|
|
28
29
|
workflow_server/utils/utils.py,sha256=Wqqn-1l2ugkGgy5paWWdt0AVxAyPMQCYcnRSSOMjXlA,4355
|
|
29
|
-
vellum_workflow_server-0.14.72.
|
|
30
|
-
vellum_workflow_server-0.14.72.
|
|
31
|
-
vellum_workflow_server-0.14.72.
|
|
32
|
-
vellum_workflow_server-0.14.72.
|
|
30
|
+
vellum_workflow_server-0.14.72.post8.dist-info/METADATA,sha256=nVRAxS08mE0rdcDEdVIP_WqV-0rtExCdf-OWc_PeVvI,2243
|
|
31
|
+
vellum_workflow_server-0.14.72.post8.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
32
|
+
vellum_workflow_server-0.14.72.post8.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
|
|
33
|
+
vellum_workflow_server-0.14.72.post8.dist-info/RECORD,,
|
|
@@ -32,6 +32,7 @@ from workflow_server.core.events import (
|
|
|
32
32
|
VembdaExecutionInitiatedEvent,
|
|
33
33
|
)
|
|
34
34
|
from workflow_server.core.executor import stream_node_pebble_timeout, stream_workflow_process_timeout
|
|
35
|
+
from workflow_server.core.utils import create_vembda_rejected_event, serialize_vembda_rejected_event
|
|
35
36
|
from workflow_server.core.workflow_executor_context import (
|
|
36
37
|
DEFAULT_TIMEOUT_SECONDS,
|
|
37
38
|
NodeExecutorContext,
|
|
@@ -119,19 +120,7 @@ def stream_workflow_route() -> Response:
|
|
|
119
120
|
sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
|
|
120
121
|
logger.exception(e)
|
|
121
122
|
|
|
122
|
-
|
|
123
|
-
id=uuid4(),
|
|
124
|
-
timestamp=datetime.now(),
|
|
125
|
-
trace_id=context.trace_id,
|
|
126
|
-
span_id=context.execution_id,
|
|
127
|
-
body=VembdaExecutionFulfilledBody(
|
|
128
|
-
exit_code=-1,
|
|
129
|
-
stderr=traceback.format_exc(),
|
|
130
|
-
container_overhead_latency=context.container_overhead_latency,
|
|
131
|
-
),
|
|
132
|
-
parent=None,
|
|
133
|
-
)
|
|
134
|
-
process_output_queue.put(vembda_fulfilled_event.model_dump(mode="json"))
|
|
123
|
+
process_output_queue.put(create_vembda_rejected_event(context, traceback.format_exc()))
|
|
135
124
|
|
|
136
125
|
first_item = process_output_queue.get(timeout=0.1)
|
|
137
126
|
if isinstance(first_item, str) and first_item.startswith(SPAN_ID_EVENT):
|
|
@@ -184,18 +173,9 @@ def stream_workflow_route() -> Response:
|
|
|
184
173
|
if get_is_oom_killed():
|
|
185
174
|
logger.warning("Workflow stream OOM Kill event")
|
|
186
175
|
|
|
187
|
-
yield
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
trace_id=context.trace_id,
|
|
191
|
-
span_id=context.execution_id,
|
|
192
|
-
body=VembdaExecutionFulfilledBody(
|
|
193
|
-
exit_code=-1,
|
|
194
|
-
container_overhead_latency=context.container_overhead_latency,
|
|
195
|
-
stderr=f"Organization Workflow server has exceeded {MEMORY_LIMIT_MB}MB memory limit.",
|
|
196
|
-
),
|
|
197
|
-
parent=None,
|
|
198
|
-
).model_dump(mode="json")
|
|
176
|
+
yield create_vembda_rejected_event(
|
|
177
|
+
context, f"Organization Workflow server has exceeded {MEMORY_LIMIT_MB}MB memory limit."
|
|
178
|
+
)
|
|
199
179
|
|
|
200
180
|
if process and process.is_alive():
|
|
201
181
|
process.kill()
|
|
@@ -216,18 +196,9 @@ def stream_workflow_route() -> Response:
|
|
|
216
196
|
if process and not process.is_alive():
|
|
217
197
|
logger.error("Workflow process exited abnormally")
|
|
218
198
|
|
|
219
|
-
yield
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
trace_id=context.trace_id,
|
|
223
|
-
span_id=context.execution_id,
|
|
224
|
-
body=VembdaExecutionFulfilledBody(
|
|
225
|
-
exit_code=-1,
|
|
226
|
-
container_overhead_latency=context.container_overhead_latency,
|
|
227
|
-
stderr="Internal Server Error, Workflow process exited abnormally",
|
|
228
|
-
),
|
|
229
|
-
parent=None,
|
|
230
|
-
).model_dump(mode="json")
|
|
199
|
+
yield create_vembda_rejected_event(
|
|
200
|
+
context, "Internal Server Error, Workflow process exited abnormally"
|
|
201
|
+
)
|
|
231
202
|
|
|
232
203
|
break
|
|
233
204
|
|
|
@@ -330,19 +301,7 @@ def stream_node_route() -> Response:
|
|
|
330
301
|
if stream_future.exception() is not None:
|
|
331
302
|
# This happens when theres a problem with the stream function call
|
|
332
303
|
# itself not the workflow runner
|
|
333
|
-
|
|
334
|
-
id=uuid4(),
|
|
335
|
-
timestamp=datetime.now(),
|
|
336
|
-
trace_id=context.trace_id,
|
|
337
|
-
span_id=context.execution_id,
|
|
338
|
-
body=VembdaExecutionFulfilledBody(
|
|
339
|
-
exit_code=-1,
|
|
340
|
-
stderr="Internal Server Error",
|
|
341
|
-
container_overhead_latency=context.container_overhead_latency,
|
|
342
|
-
),
|
|
343
|
-
parent=None,
|
|
344
|
-
)
|
|
345
|
-
yield vembda_fulfilled_event.model_dump(mode="json")
|
|
304
|
+
yield create_vembda_rejected_event(context, "Internal Server Error")
|
|
346
305
|
sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
|
|
347
306
|
app.logger.exception(stream_future.exception())
|
|
348
307
|
break
|
|
@@ -454,18 +413,7 @@ def startup_error_generator(
|
|
|
454
413
|
yield "\n"
|
|
455
414
|
yield vembda_initiated_event.model_dump_json()
|
|
456
415
|
yield "\n"
|
|
457
|
-
yield
|
|
458
|
-
id=uuid4(),
|
|
459
|
-
timestamp=datetime.now(),
|
|
460
|
-
trace_id=context.trace_id,
|
|
461
|
-
span_id=context.execution_id,
|
|
462
|
-
body=VembdaExecutionFulfilledBody(
|
|
463
|
-
exit_code=-1,
|
|
464
|
-
container_overhead_latency=context.container_overhead_latency,
|
|
465
|
-
stderr=message,
|
|
466
|
-
),
|
|
467
|
-
parent=None,
|
|
468
|
-
).model_dump_json()
|
|
416
|
+
yield serialize_vembda_rejected_event(context, message)
|
|
469
417
|
yield "\n"
|
|
470
418
|
yield "END"
|
|
471
419
|
yield "\n"
|
|
@@ -9,13 +9,9 @@ import sentry_sdk
|
|
|
9
9
|
|
|
10
10
|
from vellum.workflows.exceptions import WorkflowInitializationException
|
|
11
11
|
from workflow_server.api.workflow_view import get_workflow_request_context
|
|
12
|
-
from workflow_server.core.events import
|
|
13
|
-
VembdaExecutionFulfilledBody,
|
|
14
|
-
VembdaExecutionFulfilledEvent,
|
|
15
|
-
VembdaExecutionInitiatedBody,
|
|
16
|
-
VembdaExecutionInitiatedEvent,
|
|
17
|
-
)
|
|
12
|
+
from workflow_server.core.events import VembdaExecutionInitiatedBody, VembdaExecutionInitiatedEvent
|
|
18
13
|
from workflow_server.core.executor import stream_workflow
|
|
14
|
+
from workflow_server.core.utils import serialize_vembda_rejected_event
|
|
19
15
|
from workflow_server.core.workflow_executor_context import WorkflowExecutorContext
|
|
20
16
|
from workflow_server.utils.utils import get_version
|
|
21
17
|
|
|
@@ -57,34 +53,11 @@ def run_code_exec_stream() -> None:
|
|
|
57
53
|
for line in stream_iterator:
|
|
58
54
|
print(f"{_EVENT_LINE}{json.dumps(line)}") # noqa: T201
|
|
59
55
|
except WorkflowInitializationException as e:
|
|
60
|
-
fulfilled_event =
|
|
61
|
-
|
|
62
|
-
timestamp=datetime.now(),
|
|
63
|
-
trace_id=context.trace_id,
|
|
64
|
-
span_id=context.execution_id,
|
|
65
|
-
body=VembdaExecutionFulfilledBody(
|
|
66
|
-
exit_code=-1,
|
|
67
|
-
stderr=str(e),
|
|
68
|
-
container_overhead_latency=context.container_overhead_latency,
|
|
69
|
-
),
|
|
70
|
-
parent=None,
|
|
71
|
-
).model_dump(mode="json")
|
|
72
|
-
|
|
73
|
-
print(f"{_EVENT_LINE}{json.dumps(fulfilled_event)}") # noqa: T201
|
|
56
|
+
fulfilled_event = serialize_vembda_rejected_event(context, str(e))
|
|
57
|
+
print(f"{_EVENT_LINE}{fulfilled_event}") # noqa: T201
|
|
74
58
|
except Exception as e:
|
|
75
59
|
sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id) if context else "unknown")
|
|
76
60
|
logger.exception(e)
|
|
77
61
|
|
|
78
|
-
|
|
79
|
-
id=uuid4(),
|
|
80
|
-
timestamp=datetime.now(),
|
|
81
|
-
trace_id=context.trace_id if context else uuid4(),
|
|
82
|
-
span_id=context.execution_id if context else uuid4(),
|
|
83
|
-
body=VembdaExecutionFulfilledBody(
|
|
84
|
-
exit_code=-1,
|
|
85
|
-
stderr="Internal Server Error",
|
|
86
|
-
),
|
|
87
|
-
parent=None,
|
|
88
|
-
)
|
|
89
|
-
event = json.dumps(vembda_fulfilled_event.model_dump(mode="json"))
|
|
62
|
+
event = serialize_vembda_rejected_event(context, "Internal Server Error")
|
|
90
63
|
print(f"{_EVENT_LINE}{event}") # noqa: T201
|
workflow_server/core/executor.py
CHANGED
|
@@ -40,6 +40,7 @@ from workflow_server.core.events import (
|
|
|
40
40
|
VembdaExecutionFulfilledBody,
|
|
41
41
|
VembdaExecutionFulfilledEvent,
|
|
42
42
|
)
|
|
43
|
+
from workflow_server.core.utils import serialize_vembda_rejected_event
|
|
43
44
|
from workflow_server.core.workflow_executor_context import (
|
|
44
45
|
DEFAULT_TIMEOUT_SECONDS,
|
|
45
46
|
BaseExecutorContext,
|
|
@@ -101,40 +102,14 @@ def _stream_workflow_wrapper(executor_context: WorkflowExecutorContext, queue: Q
|
|
|
101
102
|
if not span_id_emitted:
|
|
102
103
|
queue.put(f"{SPAN_ID_EVENT}:{uuid4()}")
|
|
103
104
|
|
|
104
|
-
queue.put(
|
|
105
|
-
VembdaExecutionFulfilledEvent(
|
|
106
|
-
id=uuid4(),
|
|
107
|
-
timestamp=datetime.now(),
|
|
108
|
-
trace_id=executor_context.trace_id,
|
|
109
|
-
span_id=executor_context.execution_id,
|
|
110
|
-
body=VembdaExecutionFulfilledBody(
|
|
111
|
-
exit_code=-1,
|
|
112
|
-
stderr=str(e),
|
|
113
|
-
container_overhead_latency=executor_context.container_overhead_latency,
|
|
114
|
-
),
|
|
115
|
-
parent=None,
|
|
116
|
-
).model_dump(mode="json")
|
|
117
|
-
)
|
|
105
|
+
queue.put(serialize_vembda_rejected_event(executor_context, str(e)))
|
|
118
106
|
except Exception as e:
|
|
119
107
|
if not span_id_emitted:
|
|
120
108
|
queue.put(f"{SPAN_ID_EVENT}:{uuid4()}")
|
|
121
109
|
|
|
122
110
|
sentry_sdk.set_tag("vellum_trace_id", str(executor_context.trace_id))
|
|
123
111
|
logger.exception(e)
|
|
124
|
-
queue.put(
|
|
125
|
-
VembdaExecutionFulfilledEvent(
|
|
126
|
-
id=uuid4(),
|
|
127
|
-
timestamp=datetime.now(),
|
|
128
|
-
trace_id=executor_context.trace_id,
|
|
129
|
-
span_id=executor_context.execution_id,
|
|
130
|
-
body=VembdaExecutionFulfilledBody(
|
|
131
|
-
exit_code=-1,
|
|
132
|
-
stderr="Internal Server Error",
|
|
133
|
-
container_overhead_latency=executor_context.container_overhead_latency,
|
|
134
|
-
),
|
|
135
|
-
parent=None,
|
|
136
|
-
).model_dump(mode="json")
|
|
137
|
-
)
|
|
112
|
+
queue.put(serialize_vembda_rejected_event(executor_context, "Internal Server Error"))
|
|
138
113
|
queue.put(STREAM_FINISHED_EVENT)
|
|
139
114
|
|
|
140
115
|
exit(0)
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from uuid import uuid4
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from workflow_server.core.events import VembdaExecutionFulfilledBody, VembdaExecutionFulfilledEvent
|
|
6
|
+
from workflow_server.core.workflow_executor_context import BaseExecutorContext
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _create_vembda_rejected_event_base(
|
|
10
|
+
executor_context: Optional[BaseExecutorContext], error_message: str
|
|
11
|
+
) -> VembdaExecutionFulfilledEvent:
|
|
12
|
+
if executor_context:
|
|
13
|
+
trace_id = executor_context.trace_id
|
|
14
|
+
span_id = executor_context.execution_id
|
|
15
|
+
container_overhead_latency = executor_context.container_overhead_latency
|
|
16
|
+
else:
|
|
17
|
+
trace_id = uuid4()
|
|
18
|
+
span_id = uuid4()
|
|
19
|
+
container_overhead_latency = None
|
|
20
|
+
|
|
21
|
+
return VembdaExecutionFulfilledEvent(
|
|
22
|
+
id=uuid4(),
|
|
23
|
+
timestamp=datetime.now(),
|
|
24
|
+
trace_id=trace_id,
|
|
25
|
+
span_id=span_id,
|
|
26
|
+
body=VembdaExecutionFulfilledBody(
|
|
27
|
+
exit_code=-1,
|
|
28
|
+
stderr=error_message,
|
|
29
|
+
container_overhead_latency=container_overhead_latency,
|
|
30
|
+
),
|
|
31
|
+
parent=None,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def create_vembda_rejected_event(executor_context: Optional[BaseExecutorContext], error_message: str) -> dict:
|
|
36
|
+
return _create_vembda_rejected_event_base(executor_context, error_message).model_dump(mode="json")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def serialize_vembda_rejected_event(executor_context: Optional[BaseExecutorContext], error_message: str) -> str:
|
|
40
|
+
return _create_vembda_rejected_event_base(executor_context, error_message).model_dump_json()
|
|
File without changes
|
|
File without changes
|