vellum-workflow-server 1.9.0.post2__tar.gz → 1.9.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/PKG-INFO +2 -2
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/pyproject.toml +2 -2
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/api/workflow_view.py +192 -91
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/core/workflow_executor_context.py +3 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/start.py +6 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/README.md +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/__init__.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/api/__init__.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/api/auth_middleware.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/api/healthz_view.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/api/status_view.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/api/tests/__init__.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/api/tests/test_input_display_mapping.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/api/tests/test_workflow_view.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/api/tests/test_workflow_view_stream_workflow_route.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/code_exec_runner.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/config.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/core/__init__.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/core/cancel_workflow.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/core/events.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/core/executor.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/core/utils.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/logging_config.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/server.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/utils/__init__.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/utils/exit_handler.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/utils/log_proxy.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/utils/oom_killer.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/utils/sentry.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/utils/system_utils.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/utils/tests/__init__.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/utils/tests/test_sentry_integration.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/utils/tests/test_system_utils.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/utils/tests/test_utils.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/utils/utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: vellum-workflow-server
|
|
3
|
-
Version: 1.9.
|
|
3
|
+
Version: 1.9.2
|
|
4
4
|
Summary:
|
|
5
5
|
License: AGPL
|
|
6
6
|
Requires-Python: >=3.9.0,<4
|
|
@@ -29,7 +29,7 @@ Requires-Dist: pyjwt (==2.10.0)
|
|
|
29
29
|
Requires-Dist: python-dotenv (==1.0.1)
|
|
30
30
|
Requires-Dist: retrying (==1.3.4)
|
|
31
31
|
Requires-Dist: sentry-sdk[flask] (==2.20.0)
|
|
32
|
-
Requires-Dist: vellum-ai (==1.9.
|
|
32
|
+
Requires-Dist: vellum-ai (==1.9.2)
|
|
33
33
|
Description-Content-Type: text/markdown
|
|
34
34
|
|
|
35
35
|
# Vellum Workflow Runner Server
|
|
@@ -3,7 +3,7 @@ name = "vellum-workflow-server"
|
|
|
3
3
|
|
|
4
4
|
[tool.poetry]
|
|
5
5
|
name = "vellum-workflow-server"
|
|
6
|
-
version = "1.9.
|
|
6
|
+
version = "1.9.2"
|
|
7
7
|
description = ""
|
|
8
8
|
readme = "README.md"
|
|
9
9
|
authors = []
|
|
@@ -45,7 +45,7 @@ flask = "2.3.3"
|
|
|
45
45
|
orderly-set = "5.2.2"
|
|
46
46
|
pebble = "5.0.7"
|
|
47
47
|
gunicorn = "23.0.0"
|
|
48
|
-
vellum-ai = "1.9.
|
|
48
|
+
vellum-ai = "1.9.2"
|
|
49
49
|
python-dotenv = "1.0.1"
|
|
50
50
|
retrying = "1.3.4"
|
|
51
51
|
sentry-sdk = {extras = ["flask"], version = "2.20.0"}
|
|
@@ -8,6 +8,7 @@ import os
|
|
|
8
8
|
import pkgutil
|
|
9
9
|
from queue import Empty
|
|
10
10
|
import sys
|
|
11
|
+
import threading
|
|
11
12
|
import time
|
|
12
13
|
import traceback
|
|
13
14
|
from uuid import uuid4
|
|
@@ -71,19 +72,195 @@ WORKFLOW_INITIATION_TIMEOUT_SECONDS = 60
|
|
|
71
72
|
@bp.route("/stream", methods=["POST"])
|
|
72
73
|
def stream_workflow_route() -> Response:
|
|
73
74
|
data = request.get_json()
|
|
75
|
+
try:
|
|
76
|
+
context = WorkflowExecutorContext.model_validate(data)
|
|
77
|
+
except ValidationError as e:
|
|
78
|
+
error_message = e.errors()[0]["msg"]
|
|
79
|
+
error_location = e.errors()[0]["loc"]
|
|
80
|
+
|
|
81
|
+
return Response(
|
|
82
|
+
json.dumps({"detail": f"Invalid context: {error_message} at {error_location}"}),
|
|
83
|
+
status=400,
|
|
84
|
+
content_type="application/json",
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
headers = _get_headers(context)
|
|
88
|
+
|
|
89
|
+
# We can exceed the concurrency count currently with long running workflows due to a knative issue. So here
|
|
90
|
+
# if we detect a memory problem just exit us early
|
|
91
|
+
if not wait_for_available_process():
|
|
92
|
+
return Response(
|
|
93
|
+
json.dumps(
|
|
94
|
+
{
|
|
95
|
+
"detail": f"Workflow server concurrent request rate exceeded. "
|
|
96
|
+
f"Process count: {get_active_process_count()}"
|
|
97
|
+
}
|
|
98
|
+
),
|
|
99
|
+
status=429,
|
|
100
|
+
content_type="application/json",
|
|
101
|
+
headers=headers,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
start_workflow_state = _start_workflow(context)
|
|
105
|
+
if isinstance(start_workflow_state, Response):
|
|
106
|
+
return start_workflow_state
|
|
107
|
+
|
|
108
|
+
workflow_events, vembda_initiated_event, process, span_id, headers = start_workflow_state
|
|
109
|
+
|
|
110
|
+
def generator() -> Generator[str, None, None]:
|
|
111
|
+
try:
|
|
112
|
+
yield "\n"
|
|
113
|
+
yield vembda_initiated_event.model_dump_json()
|
|
114
|
+
yield "\n"
|
|
115
|
+
for row in workflow_events:
|
|
116
|
+
yield "\n"
|
|
117
|
+
if isinstance(row, dict):
|
|
118
|
+
dump = json.dumps(row)
|
|
119
|
+
yield dump
|
|
120
|
+
else:
|
|
121
|
+
yield row
|
|
122
|
+
yield "\n"
|
|
123
|
+
# Sometimes the connections get hung after they finish with the vembda fulfilled event
|
|
124
|
+
# if it happens during a knative scale down event. So we emit an END string so that
|
|
125
|
+
# we don't have to do string compares on all the events for performance.
|
|
126
|
+
yield "\n"
|
|
127
|
+
yield "END"
|
|
128
|
+
yield "\n"
|
|
129
|
+
|
|
130
|
+
logger.info(
|
|
131
|
+
f"Workflow stream completed, execution ID: {span_id}, process count: {get_active_process_count()}"
|
|
132
|
+
)
|
|
133
|
+
except GeneratorExit:
|
|
134
|
+
# These can happen either from Vembda disconnects (possibily from predict disconnects) or
|
|
135
|
+
# from knative activator gateway timeouts which are caused by idleTimeout or responseStartSeconds
|
|
136
|
+
# being exceeded.
|
|
137
|
+
app.logger.error(
|
|
138
|
+
"Client disconnected in the middle of the Workflow Stream",
|
|
139
|
+
extra={
|
|
140
|
+
"sentry_tags": {
|
|
141
|
+
"server_version": vembda_initiated_event.body.server_version,
|
|
142
|
+
"sdk_version": vembda_initiated_event.body.sdk_version,
|
|
143
|
+
}
|
|
144
|
+
},
|
|
145
|
+
)
|
|
146
|
+
return
|
|
147
|
+
except Exception as e:
|
|
148
|
+
logger.exception("Error during workflow response stream generator", extra={"error": e})
|
|
149
|
+
yield "\n"
|
|
150
|
+
yield "END"
|
|
151
|
+
yield "\n"
|
|
152
|
+
return
|
|
153
|
+
finally:
|
|
154
|
+
if ENABLE_PROCESS_WRAPPER:
|
|
155
|
+
try:
|
|
156
|
+
if process and process.is_alive():
|
|
157
|
+
process.kill()
|
|
158
|
+
if process:
|
|
159
|
+
increment_process_count(-1)
|
|
160
|
+
remove_active_span_id(span_id)
|
|
161
|
+
except Exception as e:
|
|
162
|
+
logger.error("Failed to kill process", e)
|
|
163
|
+
else:
|
|
164
|
+
increment_process_count(-1)
|
|
165
|
+
remove_active_span_id(span_id)
|
|
166
|
+
|
|
167
|
+
resp = Response(
|
|
168
|
+
stream_with_context(generator()),
|
|
169
|
+
status=200,
|
|
170
|
+
content_type="application/x-ndjson",
|
|
171
|
+
headers=headers,
|
|
172
|
+
)
|
|
173
|
+
return resp
|
|
74
174
|
|
|
175
|
+
|
|
176
|
+
@bp.route("/async-exec", methods=["POST"])
|
|
177
|
+
def async_exec_workflow() -> Response:
|
|
178
|
+
data = request.get_json()
|
|
75
179
|
try:
|
|
76
180
|
context = WorkflowExecutorContext.model_validate(data)
|
|
77
181
|
except ValidationError as e:
|
|
78
182
|
error_message = e.errors()[0]["msg"]
|
|
79
183
|
error_location = e.errors()[0]["loc"]
|
|
80
184
|
|
|
185
|
+
# TODO need to convert this to a vembda event so that trigger'd execs can me notified
|
|
186
|
+
# can either do it here in the workflow server or
|
|
81
187
|
return Response(
|
|
82
188
|
json.dumps({"detail": f"Invalid context: {error_message} at {error_location}"}),
|
|
83
189
|
status=400,
|
|
84
190
|
content_type="application/json",
|
|
85
191
|
)
|
|
86
192
|
|
|
193
|
+
# Reject back to the queue handler if were low on memory here, though maybe we should update the is_available
|
|
194
|
+
# route to look at memory too. Don't send this response as an event. Though we might want some logic to catch
|
|
195
|
+
# if they have a workflow server that can never start a workflow because the base image uses so much memory.
|
|
196
|
+
if not wait_for_available_process():
|
|
197
|
+
return Response(
|
|
198
|
+
json.dumps({"detail": f"Server resources low." f"Process count: {get_active_process_count()}"}),
|
|
199
|
+
status=429,
|
|
200
|
+
content_type="application/json",
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
def run_workflow_background() -> None:
|
|
204
|
+
process: Optional[Process] = None
|
|
205
|
+
span_id: Optional[str] = None
|
|
206
|
+
|
|
207
|
+
try:
|
|
208
|
+
start_workflow_result = _start_workflow(context)
|
|
209
|
+
if isinstance(start_workflow_result, Response):
|
|
210
|
+
# TODO same here, should return this response as en event or it will get yeeted to the nether
|
|
211
|
+
# return start_workflow_result
|
|
212
|
+
return
|
|
213
|
+
|
|
214
|
+
workflow_events, vembda_initiated_event, process, span_id, headers = start_workflow_result
|
|
215
|
+
|
|
216
|
+
for _ in workflow_events:
|
|
217
|
+
# This is way inefficient in process mode since were just having the main proc stream the events
|
|
218
|
+
# to nowhere wasting memory I/O and cpu.
|
|
219
|
+
continue
|
|
220
|
+
logger.info(
|
|
221
|
+
f"Workflow async exec completed, execution ID: {span_id}, process count: {get_active_process_count()}"
|
|
222
|
+
)
|
|
223
|
+
except Exception as e:
|
|
224
|
+
logger.exception("Error during workflow async background worker", e)
|
|
225
|
+
finally:
|
|
226
|
+
if ENABLE_PROCESS_WRAPPER:
|
|
227
|
+
try:
|
|
228
|
+
if process and process.is_alive():
|
|
229
|
+
process.kill()
|
|
230
|
+
if process:
|
|
231
|
+
increment_process_count(-1)
|
|
232
|
+
if span_id:
|
|
233
|
+
remove_active_span_id(span_id)
|
|
234
|
+
except Exception as e:
|
|
235
|
+
logger.error("Failed to kill process", e)
|
|
236
|
+
else:
|
|
237
|
+
increment_process_count(-1)
|
|
238
|
+
if span_id:
|
|
239
|
+
remove_active_span_id(span_id)
|
|
240
|
+
|
|
241
|
+
thread = threading.Thread(target=run_workflow_background)
|
|
242
|
+
thread.start()
|
|
243
|
+
|
|
244
|
+
return Response(
|
|
245
|
+
json.dumps({"success": True}),
|
|
246
|
+
status=200,
|
|
247
|
+
content_type="application/json",
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def _start_workflow(
|
|
252
|
+
context: WorkflowExecutorContext,
|
|
253
|
+
) -> Union[
|
|
254
|
+
Response,
|
|
255
|
+
tuple[
|
|
256
|
+
Iterator[Union[str, dict]],
|
|
257
|
+
VembdaExecutionInitiatedEvent,
|
|
258
|
+
Optional[Process],
|
|
259
|
+
str,
|
|
260
|
+
dict[str, str],
|
|
261
|
+
],
|
|
262
|
+
]:
|
|
263
|
+
headers = _get_headers(context)
|
|
87
264
|
logger.info(
|
|
88
265
|
f"Starting Workflow Server Request, trace ID: {context.trace_id}, "
|
|
89
266
|
f"process count: {get_active_process_count()}, process wrapper: {ENABLE_PROCESS_WRAPPER}"
|
|
@@ -100,29 +277,7 @@ def stream_workflow_route() -> Response:
|
|
|
100
277
|
parent=None,
|
|
101
278
|
)
|
|
102
279
|
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
headers = {
|
|
106
|
-
"X-Vellum-SDK-Version": vembda_initiated_event.body.sdk_version,
|
|
107
|
-
"X-Vellum-Server-Version": vembda_initiated_event.body.server_version,
|
|
108
|
-
"X-Vellum-Events-Emitted": str(is_events_emitting_enabled(context)),
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
# We can exceed the concurrency count currently with long running workflows due to a knative issue. So here
|
|
112
|
-
# if we detect a memory problem just exit us early
|
|
113
|
-
if not wait_for_available_process():
|
|
114
|
-
return Response(
|
|
115
|
-
json.dumps(
|
|
116
|
-
{
|
|
117
|
-
"detail": f"Workflow server concurrent request rate exceeded. "
|
|
118
|
-
f"Process count: {get_active_process_count()}"
|
|
119
|
-
}
|
|
120
|
-
),
|
|
121
|
-
status=429,
|
|
122
|
-
content_type="application/json",
|
|
123
|
-
headers=headers,
|
|
124
|
-
)
|
|
125
|
-
|
|
280
|
+
output_queue: Queue[Union[str, dict]] = Queue()
|
|
126
281
|
cancel_signal = MultiprocessingEvent()
|
|
127
282
|
timeout_signal = MultiprocessingEvent()
|
|
128
283
|
|
|
@@ -131,7 +286,7 @@ def stream_workflow_route() -> Response:
|
|
|
131
286
|
try:
|
|
132
287
|
process = stream_workflow_process_timeout(
|
|
133
288
|
executor_context=context,
|
|
134
|
-
queue=
|
|
289
|
+
queue=output_queue,
|
|
135
290
|
cancel_signal=cancel_signal,
|
|
136
291
|
timeout_signal=timeout_signal,
|
|
137
292
|
)
|
|
@@ -139,10 +294,10 @@ def stream_workflow_route() -> Response:
|
|
|
139
294
|
except Exception as e:
|
|
140
295
|
logger.exception(e)
|
|
141
296
|
|
|
142
|
-
|
|
297
|
+
output_queue.put(create_vembda_rejected_event(context, traceback.format_exc()))
|
|
143
298
|
|
|
144
299
|
try:
|
|
145
|
-
first_item =
|
|
300
|
+
first_item = output_queue.get(timeout=WORKFLOW_INITIATION_TIMEOUT_SECONDS)
|
|
146
301
|
except Empty:
|
|
147
302
|
logger.error("Request timed out trying to initiate the Workflow")
|
|
148
303
|
|
|
@@ -291,72 +446,9 @@ def stream_workflow_route() -> Response:
|
|
|
291
446
|
break
|
|
292
447
|
yield event
|
|
293
448
|
|
|
294
|
-
workflow_events = process_events(
|
|
449
|
+
workflow_events = process_events(output_queue)
|
|
295
450
|
|
|
296
|
-
|
|
297
|
-
try:
|
|
298
|
-
yield "\n"
|
|
299
|
-
yield vembda_initiated_event.model_dump_json()
|
|
300
|
-
yield "\n"
|
|
301
|
-
for row in workflow_events:
|
|
302
|
-
yield "\n"
|
|
303
|
-
if isinstance(row, dict):
|
|
304
|
-
dump = json.dumps(row)
|
|
305
|
-
yield dump
|
|
306
|
-
else:
|
|
307
|
-
yield row
|
|
308
|
-
yield "\n"
|
|
309
|
-
# Sometimes the connections get hung after they finish with the vembda fulfilled event
|
|
310
|
-
# if it happens during a knative scale down event. So we emit an END string so that
|
|
311
|
-
# we don't have to do string compares on all the events for performance.
|
|
312
|
-
yield "\n"
|
|
313
|
-
yield "END"
|
|
314
|
-
yield "\n"
|
|
315
|
-
|
|
316
|
-
logger.info(
|
|
317
|
-
f"Workflow stream completed, execution ID: {span_id}, process count: {get_active_process_count()}"
|
|
318
|
-
)
|
|
319
|
-
except GeneratorExit:
|
|
320
|
-
# These can happen either from Vembda disconnects (possibily from predict disconnects) or
|
|
321
|
-
# from knative activator gateway timeouts which are caused by idleTimeout or responseStartSeconds
|
|
322
|
-
# being exceeded.
|
|
323
|
-
app.logger.error(
|
|
324
|
-
"Client disconnected in the middle of the Workflow Stream",
|
|
325
|
-
extra={
|
|
326
|
-
"sentry_tags": {
|
|
327
|
-
"server_version": vembda_initiated_event.body.server_version,
|
|
328
|
-
"sdk_version": vembda_initiated_event.body.sdk_version,
|
|
329
|
-
}
|
|
330
|
-
},
|
|
331
|
-
)
|
|
332
|
-
return
|
|
333
|
-
except Exception as e:
|
|
334
|
-
logger.exception("Error during workflow response stream generator", extra={"error": e})
|
|
335
|
-
yield "\n"
|
|
336
|
-
yield "END"
|
|
337
|
-
yield "\n"
|
|
338
|
-
return
|
|
339
|
-
finally:
|
|
340
|
-
if ENABLE_PROCESS_WRAPPER:
|
|
341
|
-
try:
|
|
342
|
-
if process and process.is_alive():
|
|
343
|
-
process.kill()
|
|
344
|
-
if process:
|
|
345
|
-
increment_process_count(-1)
|
|
346
|
-
remove_active_span_id(span_id)
|
|
347
|
-
except Exception as e:
|
|
348
|
-
logger.error("Failed to kill process", e)
|
|
349
|
-
else:
|
|
350
|
-
increment_process_count(-1)
|
|
351
|
-
remove_active_span_id(span_id)
|
|
352
|
-
|
|
353
|
-
resp = Response(
|
|
354
|
-
stream_with_context(generator()),
|
|
355
|
-
status=200,
|
|
356
|
-
content_type="application/x-ndjson",
|
|
357
|
-
headers=headers,
|
|
358
|
-
)
|
|
359
|
-
return resp
|
|
451
|
+
return workflow_events, vembda_initiated_event, process, span_id, headers
|
|
360
452
|
|
|
361
453
|
|
|
362
454
|
@bp.route("/stream-node", methods=["POST"])
|
|
@@ -564,3 +656,12 @@ def startup_error_generator(
|
|
|
564
656
|
},
|
|
565
657
|
)
|
|
566
658
|
return
|
|
659
|
+
|
|
660
|
+
|
|
661
|
+
def _get_headers(context: WorkflowExecutorContext) -> dict[str, Union[str, Any]]:
|
|
662
|
+
headers = {
|
|
663
|
+
"X-Vellum-SDK-Version": get_version()["sdk_version"],
|
|
664
|
+
"X-Vellum-Server-Version": get_version()["server_version"],
|
|
665
|
+
"X-Vellum-Events-Emitted": str(is_events_emitting_enabled(context)),
|
|
666
|
+
}
|
|
667
|
+
return headers
|
|
@@ -37,6 +37,9 @@ class BaseExecutorContext(UniversalBaseModel):
|
|
|
37
37
|
feature_flags: Optional[dict[str, bool]] = None
|
|
38
38
|
is_new_server: bool = False
|
|
39
39
|
trigger_id: Optional[UUID] = None
|
|
40
|
+
# The actual 'execution id' of the workflow that we pass into the workflow
|
|
41
|
+
# when running in async mode.
|
|
42
|
+
workflow_span_id: Optional[UUID] = None
|
|
40
43
|
|
|
41
44
|
@field_validator("inputs", mode="before")
|
|
42
45
|
@classmethod
|
{vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/start.py
RENAMED
|
@@ -33,6 +33,7 @@ class CustomGunicornLogger(glogging.Logger):
|
|
|
33
33
|
logger = logging.getLogger("gunicorn.access")
|
|
34
34
|
logger.addFilter(HealthCheckFilter())
|
|
35
35
|
logger.addFilter(SignalFilter())
|
|
36
|
+
logger.addFilter(StatusIsAvailableFilter())
|
|
36
37
|
|
|
37
38
|
|
|
38
39
|
class HealthCheckFilter(logging.Filter):
|
|
@@ -45,6 +46,11 @@ class SignalFilter(logging.Filter):
|
|
|
45
46
|
return "SIGTERM" not in record.getMessage()
|
|
46
47
|
|
|
47
48
|
|
|
49
|
+
class StatusIsAvailableFilter(logging.Filter):
|
|
50
|
+
def filter(self, record: Any) -> bool:
|
|
51
|
+
return "/status/is_available" not in record.getMessage()
|
|
52
|
+
|
|
53
|
+
|
|
48
54
|
def start() -> None:
|
|
49
55
|
if not is_development():
|
|
50
56
|
start_oom_killer_worker()
|
|
File without changes
|
{vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/config.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.2}/src/workflow_server/server.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|