vellum-workflow-server 1.9.0.post2__tar.gz → 1.9.8.post2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/PKG-INFO +2 -2
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/pyproject.toml +2 -2
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/api/tests/test_workflow_view.py +24 -24
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/api/tests/test_workflow_view_stream_workflow_route.py +12 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/api/workflow_view.py +194 -92
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/core/executor.py +15 -25
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/core/utils.py +4 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/core/workflow_executor_context.py +17 -1
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/start.py +6 -0
- vellum_workflow_server-1.9.8.post2/src/workflow_server/utils/exit_handler.py +56 -0
- vellum_workflow_server-1.9.0.post2/src/workflow_server/utils/exit_handler.py +0 -27
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/README.md +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/__init__.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/api/__init__.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/api/auth_middleware.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/api/healthz_view.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/api/status_view.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/api/tests/__init__.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/api/tests/test_input_display_mapping.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/code_exec_runner.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/config.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/core/__init__.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/core/cancel_workflow.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/core/events.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/logging_config.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/server.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/utils/__init__.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/utils/log_proxy.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/utils/oom_killer.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/utils/sentry.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/utils/system_utils.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/utils/tests/__init__.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/utils/tests/test_sentry_integration.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/utils/tests/test_system_utils.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/utils/tests/test_utils.py +0 -0
- {vellum_workflow_server-1.9.0.post2 → vellum_workflow_server-1.9.8.post2}/src/workflow_server/utils/utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: vellum-workflow-server
|
|
3
|
-
Version: 1.9.
|
|
3
|
+
Version: 1.9.8.post2
|
|
4
4
|
Summary:
|
|
5
5
|
License: AGPL
|
|
6
6
|
Requires-Python: >=3.9.0,<4
|
|
@@ -29,7 +29,7 @@ Requires-Dist: pyjwt (==2.10.0)
|
|
|
29
29
|
Requires-Dist: python-dotenv (==1.0.1)
|
|
30
30
|
Requires-Dist: retrying (==1.3.4)
|
|
31
31
|
Requires-Dist: sentry-sdk[flask] (==2.20.0)
|
|
32
|
-
Requires-Dist: vellum-ai (==1.9.
|
|
32
|
+
Requires-Dist: vellum-ai (==1.9.8)
|
|
33
33
|
Description-Content-Type: text/markdown
|
|
34
34
|
|
|
35
35
|
# Vellum Workflow Runner Server
|
|
@@ -3,7 +3,7 @@ name = "vellum-workflow-server"
|
|
|
3
3
|
|
|
4
4
|
[tool.poetry]
|
|
5
5
|
name = "vellum-workflow-server"
|
|
6
|
-
version = "1.9.
|
|
6
|
+
version = "1.9.8.post2"
|
|
7
7
|
description = ""
|
|
8
8
|
readme = "README.md"
|
|
9
9
|
authors = []
|
|
@@ -45,7 +45,7 @@ flask = "2.3.3"
|
|
|
45
45
|
orderly-set = "5.2.2"
|
|
46
46
|
pebble = "5.0.7"
|
|
47
47
|
gunicorn = "23.0.0"
|
|
48
|
-
vellum-ai = "1.9.
|
|
48
|
+
vellum-ai = "1.9.8"
|
|
49
49
|
python-dotenv = "1.0.1"
|
|
50
50
|
retrying = "1.3.4"
|
|
51
51
|
sentry-sdk = {extras = ["flask"], version = "2.20.0"}
|
|
@@ -63,11 +63,11 @@ class TestNode(BaseNode):
|
|
|
63
63
|
"comment": {"expanded": True, "value": "A test node for processing data."},
|
|
64
64
|
"position": {"x": 0.0, "y": 0.0},
|
|
65
65
|
},
|
|
66
|
-
"id": "
|
|
66
|
+
"id": "6f4c9178-9f46-4723-bcb7-0bd59db54eca",
|
|
67
67
|
"label": "Test Node",
|
|
68
68
|
"outputs": [],
|
|
69
|
-
"ports": [{"id": "
|
|
70
|
-
"trigger": {"id": "
|
|
69
|
+
"ports": [{"id": "4394823f-79a8-4dbc-99ae-06a1df6c7408", "name": "default", "type": "DEFAULT"}],
|
|
70
|
+
"trigger": {"id": "07240af1-67c6-4460-b53d-53f0b0f1b90e", "merge_behavior": "AWAIT_ATTRIBUTES"},
|
|
71
71
|
"type": "GENERIC",
|
|
72
72
|
}
|
|
73
73
|
|
|
@@ -127,11 +127,11 @@ class SomeOtherNode(BaseNode):
|
|
|
127
127
|
"comment": {"expanded": True, "value": "This is Some Node."},
|
|
128
128
|
"position": {"x": 0.0, "y": 0.0},
|
|
129
129
|
},
|
|
130
|
-
"id": "
|
|
130
|
+
"id": "89e84bac-5a5f-4f64-8083-7d3ebec98be1",
|
|
131
131
|
"label": "Some Node",
|
|
132
132
|
"outputs": [],
|
|
133
|
-
"ports": [{"id": "
|
|
134
|
-
"trigger": {"id": "
|
|
133
|
+
"ports": [{"id": "2983ea5c-1d29-483a-b896-53098f5de4f1", "name": "default", "type": "DEFAULT"}],
|
|
134
|
+
"trigger": {"id": "6996efb0-5a20-4719-8835-34fe6552764a", "merge_behavior": "AWAIT_ATTRIBUTES"},
|
|
135
135
|
"type": "GENERIC",
|
|
136
136
|
}
|
|
137
137
|
|
|
@@ -150,11 +150,11 @@ class SomeOtherNode(BaseNode):
|
|
|
150
150
|
"comment": {"expanded": True, "value": "This is Some Other Node."},
|
|
151
151
|
"position": {"x": 0.0, "y": 0.0},
|
|
152
152
|
},
|
|
153
|
-
"id": "
|
|
153
|
+
"id": "3cdbba02-8a34-4e0f-8b94-770a944dcaa3",
|
|
154
154
|
"label": "Some Other Node",
|
|
155
155
|
"outputs": [],
|
|
156
|
-
"ports": [{"id": "
|
|
157
|
-
"trigger": {"id": "
|
|
156
|
+
"ports": [{"id": "1839bde5-2ad4-4723-b21b-2c55fa833a7a", "name": "default", "type": "DEFAULT"}],
|
|
157
|
+
"trigger": {"id": "c36df8a8-5624-45be-99c9-826cf511a951", "merge_behavior": "AWAIT_ATTRIBUTES"},
|
|
158
158
|
"type": "GENERIC",
|
|
159
159
|
}
|
|
160
160
|
|
|
@@ -222,11 +222,11 @@ class HelperClass:
|
|
|
222
222
|
"comment": {"expanded": True, "value": "Processes input data."},
|
|
223
223
|
"position": {"x": 0.0, "y": 0.0},
|
|
224
224
|
},
|
|
225
|
-
"id": "
|
|
225
|
+
"id": "7121bcb9-98a1-4907-bf9b-9734d773fd15",
|
|
226
226
|
"label": "Processing Node",
|
|
227
227
|
"outputs": [],
|
|
228
|
-
"ports": [{"id": "
|
|
229
|
-
"trigger": {"id": "
|
|
228
|
+
"ports": [{"id": "de27da74-30e9-4e7b-95c2-92bdfc5bf042", "name": "default", "type": "DEFAULT"}],
|
|
229
|
+
"trigger": {"id": "e02bd85e-8b03-4b21-8b3e-f411042334ce", "merge_behavior": "AWAIT_ATTRIBUTES"},
|
|
230
230
|
"type": "GENERIC",
|
|
231
231
|
}
|
|
232
232
|
|
|
@@ -240,11 +240,11 @@ class HelperClass:
|
|
|
240
240
|
"comment": {"expanded": True, "value": "Transforms data format."},
|
|
241
241
|
"position": {"x": 0.0, "y": 0.0},
|
|
242
242
|
},
|
|
243
|
-
"id": "
|
|
243
|
+
"id": "6a785cb0-f631-4f03-94c6-e82331c14c1a",
|
|
244
244
|
"label": "Transformation Node",
|
|
245
245
|
"outputs": [],
|
|
246
|
-
"ports": [{"id": "
|
|
247
|
-
"trigger": {"id": "
|
|
246
|
+
"ports": [{"id": "67a13ea0-fd6b-44dc-af46-c72da06aa11f", "name": "default", "type": "DEFAULT"}],
|
|
247
|
+
"trigger": {"id": "08d4e317-baa8-478f-b278-99362e50e6b4", "merge_behavior": "AWAIT_ATTRIBUTES"},
|
|
248
248
|
"type": "GENERIC",
|
|
249
249
|
}
|
|
250
250
|
|
|
@@ -306,11 +306,11 @@ class BrokenNode(BaseNode)
|
|
|
306
306
|
"comment": {"expanded": True, "value": "This is Some Node."},
|
|
307
307
|
"position": {"x": 0.0, "y": 0.0},
|
|
308
308
|
},
|
|
309
|
-
"id": "
|
|
309
|
+
"id": "a2706730-074b-4ea3-968a-25e68af1caed",
|
|
310
310
|
"label": "Some Node",
|
|
311
311
|
"outputs": [],
|
|
312
|
-
"ports": [{"id": "
|
|
313
|
-
"trigger": {"id": "
|
|
312
|
+
"ports": [{"id": "e0ee3653-e071-4b91-9dfc-5e1dca9c665b", "name": "default", "type": "DEFAULT"}],
|
|
313
|
+
"trigger": {"id": "8d931b01-30ca-4c0d-b1b7-7c18379c83e6", "merge_behavior": "AWAIT_ATTRIBUTES"},
|
|
314
314
|
"type": "GENERIC",
|
|
315
315
|
}
|
|
316
316
|
|
|
@@ -371,12 +371,12 @@ class MyAdditionNode(BaseNode):
|
|
|
371
371
|
"adornments": None,
|
|
372
372
|
"attributes": [
|
|
373
373
|
{
|
|
374
|
-
"id": "
|
|
374
|
+
"id": "4223b340-447f-46c2-b35d-30ef16c5ae17",
|
|
375
375
|
"name": "arg1",
|
|
376
376
|
"value": None,
|
|
377
377
|
},
|
|
378
378
|
{
|
|
379
|
-
"id": "
|
|
379
|
+
"id": "1de0f46a-95f6-4cd0-bb0f-e2414054d507",
|
|
380
380
|
"name": "arg2",
|
|
381
381
|
"value": None,
|
|
382
382
|
},
|
|
@@ -387,11 +387,11 @@ class MyAdditionNode(BaseNode):
|
|
|
387
387
|
"comment": {"expanded": True, "value": "Custom node that performs simple addition."},
|
|
388
388
|
"position": {"x": 0.0, "y": 0.0},
|
|
389
389
|
},
|
|
390
|
-
"id": "
|
|
390
|
+
"id": "2464b610-fb6d-495b-b17c-933ee147f19f",
|
|
391
391
|
"label": "My Addition Node",
|
|
392
|
-
"outputs": [{"id": "
|
|
393
|
-
"ports": [{"id": "
|
|
394
|
-
"trigger": {"id": "
|
|
392
|
+
"outputs": [{"id": "f39d85c9-e7bf-45e1-bb67-f16225db0118", "name": "result", "type": "NUMBER", "value": None}],
|
|
393
|
+
"ports": [{"id": "bc489295-cd8a-4aa2-88bb-34446374100d", "name": "default", "type": "DEFAULT"}],
|
|
394
|
+
"trigger": {"id": "ff580cad-73d6-44fe-8f2c-4b8dc990ee70", "merge_behavior": "AWAIT_ATTRIBUTES"},
|
|
395
395
|
"type": "GENERIC",
|
|
396
396
|
"should_file_merge": True,
|
|
397
397
|
}
|
|
@@ -5,6 +5,7 @@ import io
|
|
|
5
5
|
import json
|
|
6
6
|
from queue import Empty
|
|
7
7
|
import re
|
|
8
|
+
import time
|
|
8
9
|
from unittest import mock
|
|
9
10
|
from uuid import uuid4
|
|
10
11
|
|
|
@@ -133,6 +134,8 @@ class Workflow(BaseWorkflow):
|
|
|
133
134
|
|
|
134
135
|
with mock.patch("builtins.open", mock.mock_open(read_data="104857600")):
|
|
135
136
|
# WHEN we call the stream route
|
|
137
|
+
ts_ns = time.time_ns()
|
|
138
|
+
request_body["vembda_service_initiated_timestamp"] = ts_ns
|
|
136
139
|
status_code, events = both_stream_types(request_body)
|
|
137
140
|
|
|
138
141
|
# THEN we get a 200 response
|
|
@@ -177,6 +180,15 @@ class Workflow(BaseWorkflow):
|
|
|
177
180
|
assert "is_new_server" in server_metadata
|
|
178
181
|
assert server_metadata["is_new_server"] is False
|
|
179
182
|
|
|
183
|
+
# AND the initiated event should have initiated_latency within a reasonable range
|
|
184
|
+
assert "initiated_latency" in server_metadata, "initiated_latency should be present in server_metadata"
|
|
185
|
+
initiated_latency = server_metadata["initiated_latency"]
|
|
186
|
+
assert isinstance(initiated_latency, int), "initiated_latency should be an integer (nanoseconds)"
|
|
187
|
+
# Latency should be positive and less than 60 seconds (60_000_000_000 nanoseconds) for CI
|
|
188
|
+
assert (
|
|
189
|
+
0 < initiated_latency < 60_000_000_000
|
|
190
|
+
), f"initiated_latency should be between 0 and 60 seconds, got {initiated_latency} ns"
|
|
191
|
+
|
|
180
192
|
assert events[2]["name"] == "workflow.execution.fulfilled", events[2]
|
|
181
193
|
assert events[2]["body"]["workflow_definition"]["module"] == ["test", "workflow"]
|
|
182
194
|
|
|
@@ -8,6 +8,7 @@ import os
|
|
|
8
8
|
import pkgutil
|
|
9
9
|
from queue import Empty
|
|
10
10
|
import sys
|
|
11
|
+
import threading
|
|
11
12
|
import time
|
|
12
13
|
import traceback
|
|
13
14
|
from uuid import uuid4
|
|
@@ -71,19 +72,195 @@ WORKFLOW_INITIATION_TIMEOUT_SECONDS = 60
|
|
|
71
72
|
@bp.route("/stream", methods=["POST"])
|
|
72
73
|
def stream_workflow_route() -> Response:
|
|
73
74
|
data = request.get_json()
|
|
75
|
+
try:
|
|
76
|
+
context = WorkflowExecutorContext.model_validate(data)
|
|
77
|
+
except ValidationError as e:
|
|
78
|
+
error_message = e.errors()[0]["msg"]
|
|
79
|
+
error_location = e.errors()[0]["loc"]
|
|
80
|
+
|
|
81
|
+
return Response(
|
|
82
|
+
json.dumps({"detail": f"Invalid context: {error_message} at {error_location}"}),
|
|
83
|
+
status=400,
|
|
84
|
+
content_type="application/json",
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
headers = _get_headers(context)
|
|
88
|
+
|
|
89
|
+
# We can exceed the concurrency count currently with long running workflows due to a knative issue. So here
|
|
90
|
+
# if we detect a memory problem just exit us early
|
|
91
|
+
if not wait_for_available_process():
|
|
92
|
+
return Response(
|
|
93
|
+
json.dumps(
|
|
94
|
+
{
|
|
95
|
+
"detail": f"Workflow server concurrent request rate exceeded. "
|
|
96
|
+
f"Process count: {get_active_process_count()}"
|
|
97
|
+
}
|
|
98
|
+
),
|
|
99
|
+
status=429,
|
|
100
|
+
content_type="application/json",
|
|
101
|
+
headers=headers,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
start_workflow_state = _start_workflow(context)
|
|
105
|
+
if isinstance(start_workflow_state, Response):
|
|
106
|
+
return start_workflow_state
|
|
107
|
+
|
|
108
|
+
workflow_events, vembda_initiated_event, process, span_id, headers = start_workflow_state
|
|
109
|
+
|
|
110
|
+
def generator() -> Generator[str, None, None]:
|
|
111
|
+
try:
|
|
112
|
+
yield "\n"
|
|
113
|
+
yield vembda_initiated_event.model_dump_json()
|
|
114
|
+
yield "\n"
|
|
115
|
+
for row in workflow_events:
|
|
116
|
+
yield "\n"
|
|
117
|
+
if isinstance(row, dict):
|
|
118
|
+
dump = json.dumps(row)
|
|
119
|
+
yield dump
|
|
120
|
+
else:
|
|
121
|
+
yield row
|
|
122
|
+
yield "\n"
|
|
123
|
+
# Sometimes the connections get hung after they finish with the vembda fulfilled event
|
|
124
|
+
# if it happens during a knative scale down event. So we emit an END string so that
|
|
125
|
+
# we don't have to do string compares on all the events for performance.
|
|
126
|
+
yield "\n"
|
|
127
|
+
yield "END"
|
|
128
|
+
yield "\n"
|
|
129
|
+
|
|
130
|
+
logger.info(
|
|
131
|
+
f"Workflow stream completed, execution ID: {span_id}, process count: {get_active_process_count()}"
|
|
132
|
+
)
|
|
133
|
+
except GeneratorExit:
|
|
134
|
+
# These can happen either from Vembda disconnects (possibily from predict disconnects) or
|
|
135
|
+
# from knative activator gateway timeouts which are caused by idleTimeout or responseStartSeconds
|
|
136
|
+
# being exceeded.
|
|
137
|
+
app.logger.error(
|
|
138
|
+
"Client disconnected in the middle of the Workflow Stream",
|
|
139
|
+
extra={
|
|
140
|
+
"sentry_tags": {
|
|
141
|
+
"server_version": vembda_initiated_event.body.server_version,
|
|
142
|
+
"sdk_version": vembda_initiated_event.body.sdk_version,
|
|
143
|
+
}
|
|
144
|
+
},
|
|
145
|
+
)
|
|
146
|
+
return
|
|
147
|
+
except Exception as e:
|
|
148
|
+
logger.exception("Error during workflow response stream generator", extra={"error": e})
|
|
149
|
+
yield "\n"
|
|
150
|
+
yield "END"
|
|
151
|
+
yield "\n"
|
|
152
|
+
return
|
|
153
|
+
finally:
|
|
154
|
+
if ENABLE_PROCESS_WRAPPER:
|
|
155
|
+
try:
|
|
156
|
+
if process and process.is_alive():
|
|
157
|
+
process.kill()
|
|
158
|
+
if process:
|
|
159
|
+
increment_process_count(-1)
|
|
160
|
+
remove_active_span_id(span_id)
|
|
161
|
+
except Exception as e:
|
|
162
|
+
logger.error("Failed to kill process", e)
|
|
163
|
+
else:
|
|
164
|
+
increment_process_count(-1)
|
|
165
|
+
remove_active_span_id(span_id)
|
|
166
|
+
|
|
167
|
+
resp = Response(
|
|
168
|
+
stream_with_context(generator()),
|
|
169
|
+
status=200,
|
|
170
|
+
content_type="application/x-ndjson",
|
|
171
|
+
headers=headers,
|
|
172
|
+
)
|
|
173
|
+
return resp
|
|
74
174
|
|
|
175
|
+
|
|
176
|
+
@bp.route("/async-exec", methods=["POST"])
|
|
177
|
+
def async_exec_workflow() -> Response:
|
|
178
|
+
data = request.get_json()
|
|
75
179
|
try:
|
|
76
180
|
context = WorkflowExecutorContext.model_validate(data)
|
|
77
181
|
except ValidationError as e:
|
|
78
182
|
error_message = e.errors()[0]["msg"]
|
|
79
183
|
error_location = e.errors()[0]["loc"]
|
|
80
184
|
|
|
185
|
+
# TODO need to convert this to a vembda event so that trigger'd execs can me notified
|
|
186
|
+
# can either do it here in the workflow server or
|
|
81
187
|
return Response(
|
|
82
188
|
json.dumps({"detail": f"Invalid context: {error_message} at {error_location}"}),
|
|
83
189
|
status=400,
|
|
84
190
|
content_type="application/json",
|
|
85
191
|
)
|
|
86
192
|
|
|
193
|
+
# Reject back to the queue handler if were low on memory here, though maybe we should update the is_available
|
|
194
|
+
# route to look at memory too. Don't send this response as an event. Though we might want some logic to catch
|
|
195
|
+
# if they have a workflow server that can never start a workflow because the base image uses so much memory.
|
|
196
|
+
if not wait_for_available_process():
|
|
197
|
+
return Response(
|
|
198
|
+
json.dumps({"detail": f"Server resources low." f"Process count: {get_active_process_count()}"}),
|
|
199
|
+
status=429,
|
|
200
|
+
content_type="application/json",
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
def run_workflow_background() -> None:
|
|
204
|
+
process: Optional[Process] = None
|
|
205
|
+
span_id: Optional[str] = None
|
|
206
|
+
|
|
207
|
+
try:
|
|
208
|
+
start_workflow_result = _start_workflow(context)
|
|
209
|
+
if isinstance(start_workflow_result, Response):
|
|
210
|
+
# TODO same here, should return this response as en event or it will get yeeted to the nether
|
|
211
|
+
# return start_workflow_result
|
|
212
|
+
return
|
|
213
|
+
|
|
214
|
+
workflow_events, vembda_initiated_event, process, span_id, headers = start_workflow_result
|
|
215
|
+
|
|
216
|
+
for _ in workflow_events:
|
|
217
|
+
# This is way inefficient in process mode since were just having the main proc stream the events
|
|
218
|
+
# to nowhere wasting memory I/O and cpu.
|
|
219
|
+
continue
|
|
220
|
+
logger.info(
|
|
221
|
+
f"Workflow async exec completed, execution ID: {span_id}, process count: {get_active_process_count()}"
|
|
222
|
+
)
|
|
223
|
+
except Exception as e:
|
|
224
|
+
logger.exception("Error during workflow async background worker", e)
|
|
225
|
+
finally:
|
|
226
|
+
if ENABLE_PROCESS_WRAPPER:
|
|
227
|
+
try:
|
|
228
|
+
if process and process.is_alive():
|
|
229
|
+
process.kill()
|
|
230
|
+
if process:
|
|
231
|
+
increment_process_count(-1)
|
|
232
|
+
if span_id:
|
|
233
|
+
remove_active_span_id(span_id)
|
|
234
|
+
except Exception as e:
|
|
235
|
+
logger.error("Failed to kill process", e)
|
|
236
|
+
else:
|
|
237
|
+
increment_process_count(-1)
|
|
238
|
+
if span_id:
|
|
239
|
+
remove_active_span_id(span_id)
|
|
240
|
+
|
|
241
|
+
thread = threading.Thread(target=run_workflow_background)
|
|
242
|
+
thread.start()
|
|
243
|
+
|
|
244
|
+
return Response(
|
|
245
|
+
json.dumps({"success": True}),
|
|
246
|
+
status=200,
|
|
247
|
+
content_type="application/json",
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def _start_workflow(
|
|
252
|
+
context: WorkflowExecutorContext,
|
|
253
|
+
) -> Union[
|
|
254
|
+
Response,
|
|
255
|
+
tuple[
|
|
256
|
+
Iterator[Union[str, dict]],
|
|
257
|
+
VembdaExecutionInitiatedEvent,
|
|
258
|
+
Optional[Process],
|
|
259
|
+
str,
|
|
260
|
+
dict[str, str],
|
|
261
|
+
],
|
|
262
|
+
]:
|
|
263
|
+
headers = _get_headers(context)
|
|
87
264
|
logger.info(
|
|
88
265
|
f"Starting Workflow Server Request, trace ID: {context.trace_id}, "
|
|
89
266
|
f"process count: {get_active_process_count()}, process wrapper: {ENABLE_PROCESS_WRAPPER}"
|
|
@@ -100,29 +277,7 @@ def stream_workflow_route() -> Response:
|
|
|
100
277
|
parent=None,
|
|
101
278
|
)
|
|
102
279
|
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
headers = {
|
|
106
|
-
"X-Vellum-SDK-Version": vembda_initiated_event.body.sdk_version,
|
|
107
|
-
"X-Vellum-Server-Version": vembda_initiated_event.body.server_version,
|
|
108
|
-
"X-Vellum-Events-Emitted": str(is_events_emitting_enabled(context)),
|
|
109
|
-
}
|
|
110
|
-
|
|
111
|
-
# We can exceed the concurrency count currently with long running workflows due to a knative issue. So here
|
|
112
|
-
# if we detect a memory problem just exit us early
|
|
113
|
-
if not wait_for_available_process():
|
|
114
|
-
return Response(
|
|
115
|
-
json.dumps(
|
|
116
|
-
{
|
|
117
|
-
"detail": f"Workflow server concurrent request rate exceeded. "
|
|
118
|
-
f"Process count: {get_active_process_count()}"
|
|
119
|
-
}
|
|
120
|
-
),
|
|
121
|
-
status=429,
|
|
122
|
-
content_type="application/json",
|
|
123
|
-
headers=headers,
|
|
124
|
-
)
|
|
125
|
-
|
|
280
|
+
output_queue: Queue[Union[str, dict]] = Queue()
|
|
126
281
|
cancel_signal = MultiprocessingEvent()
|
|
127
282
|
timeout_signal = MultiprocessingEvent()
|
|
128
283
|
|
|
@@ -131,7 +286,7 @@ def stream_workflow_route() -> Response:
|
|
|
131
286
|
try:
|
|
132
287
|
process = stream_workflow_process_timeout(
|
|
133
288
|
executor_context=context,
|
|
134
|
-
queue=
|
|
289
|
+
queue=output_queue,
|
|
135
290
|
cancel_signal=cancel_signal,
|
|
136
291
|
timeout_signal=timeout_signal,
|
|
137
292
|
)
|
|
@@ -139,10 +294,10 @@ def stream_workflow_route() -> Response:
|
|
|
139
294
|
except Exception as e:
|
|
140
295
|
logger.exception(e)
|
|
141
296
|
|
|
142
|
-
|
|
297
|
+
output_queue.put(create_vembda_rejected_event(context, traceback.format_exc()))
|
|
143
298
|
|
|
144
299
|
try:
|
|
145
|
-
first_item =
|
|
300
|
+
first_item = output_queue.get(timeout=WORKFLOW_INITIATION_TIMEOUT_SECONDS)
|
|
146
301
|
except Empty:
|
|
147
302
|
logger.error("Request timed out trying to initiate the Workflow")
|
|
148
303
|
|
|
@@ -291,72 +446,9 @@ def stream_workflow_route() -> Response:
|
|
|
291
446
|
break
|
|
292
447
|
yield event
|
|
293
448
|
|
|
294
|
-
workflow_events = process_events(
|
|
449
|
+
workflow_events = process_events(output_queue)
|
|
295
450
|
|
|
296
|
-
|
|
297
|
-
try:
|
|
298
|
-
yield "\n"
|
|
299
|
-
yield vembda_initiated_event.model_dump_json()
|
|
300
|
-
yield "\n"
|
|
301
|
-
for row in workflow_events:
|
|
302
|
-
yield "\n"
|
|
303
|
-
if isinstance(row, dict):
|
|
304
|
-
dump = json.dumps(row)
|
|
305
|
-
yield dump
|
|
306
|
-
else:
|
|
307
|
-
yield row
|
|
308
|
-
yield "\n"
|
|
309
|
-
# Sometimes the connections get hung after they finish with the vembda fulfilled event
|
|
310
|
-
# if it happens during a knative scale down event. So we emit an END string so that
|
|
311
|
-
# we don't have to do string compares on all the events for performance.
|
|
312
|
-
yield "\n"
|
|
313
|
-
yield "END"
|
|
314
|
-
yield "\n"
|
|
315
|
-
|
|
316
|
-
logger.info(
|
|
317
|
-
f"Workflow stream completed, execution ID: {span_id}, process count: {get_active_process_count()}"
|
|
318
|
-
)
|
|
319
|
-
except GeneratorExit:
|
|
320
|
-
# These can happen either from Vembda disconnects (possibily from predict disconnects) or
|
|
321
|
-
# from knative activator gateway timeouts which are caused by idleTimeout or responseStartSeconds
|
|
322
|
-
# being exceeded.
|
|
323
|
-
app.logger.error(
|
|
324
|
-
"Client disconnected in the middle of the Workflow Stream",
|
|
325
|
-
extra={
|
|
326
|
-
"sentry_tags": {
|
|
327
|
-
"server_version": vembda_initiated_event.body.server_version,
|
|
328
|
-
"sdk_version": vembda_initiated_event.body.sdk_version,
|
|
329
|
-
}
|
|
330
|
-
},
|
|
331
|
-
)
|
|
332
|
-
return
|
|
333
|
-
except Exception as e:
|
|
334
|
-
logger.exception("Error during workflow response stream generator", extra={"error": e})
|
|
335
|
-
yield "\n"
|
|
336
|
-
yield "END"
|
|
337
|
-
yield "\n"
|
|
338
|
-
return
|
|
339
|
-
finally:
|
|
340
|
-
if ENABLE_PROCESS_WRAPPER:
|
|
341
|
-
try:
|
|
342
|
-
if process and process.is_alive():
|
|
343
|
-
process.kill()
|
|
344
|
-
if process:
|
|
345
|
-
increment_process_count(-1)
|
|
346
|
-
remove_active_span_id(span_id)
|
|
347
|
-
except Exception as e:
|
|
348
|
-
logger.error("Failed to kill process", e)
|
|
349
|
-
else:
|
|
350
|
-
increment_process_count(-1)
|
|
351
|
-
remove_active_span_id(span_id)
|
|
352
|
-
|
|
353
|
-
resp = Response(
|
|
354
|
-
stream_with_context(generator()),
|
|
355
|
-
status=200,
|
|
356
|
-
content_type="application/x-ndjson",
|
|
357
|
-
headers=headers,
|
|
358
|
-
)
|
|
359
|
-
return resp
|
|
451
|
+
return workflow_events, vembda_initiated_event, process, span_id, headers
|
|
360
452
|
|
|
361
453
|
|
|
362
454
|
@bp.route("/stream-node", methods=["POST"])
|
|
@@ -436,6 +528,7 @@ def serialize_route() -> Response:
|
|
|
436
528
|
files = data.get("files", {})
|
|
437
529
|
workspace_api_key = data.get("workspace_api_key")
|
|
438
530
|
is_new_server = data.get("is_new_server", False)
|
|
531
|
+
module = data.get("module")
|
|
439
532
|
|
|
440
533
|
if not files:
|
|
441
534
|
return Response(
|
|
@@ -448,7 +541,7 @@ def serialize_route() -> Response:
|
|
|
448
541
|
|
|
449
542
|
# Generate a unique namespace for this serialization request
|
|
450
543
|
namespace = get_random_namespace()
|
|
451
|
-
virtual_finder = VirtualFileFinder(files, namespace)
|
|
544
|
+
virtual_finder = VirtualFileFinder(files, namespace, source_module=module)
|
|
452
545
|
|
|
453
546
|
headers = {
|
|
454
547
|
"X-Vellum-Is-New-Server": str(is_new_server).lower(),
|
|
@@ -564,3 +657,12 @@ def startup_error_generator(
|
|
|
564
657
|
},
|
|
565
658
|
)
|
|
566
659
|
return
|
|
660
|
+
|
|
661
|
+
|
|
662
|
+
def _get_headers(context: WorkflowExecutorContext) -> dict[str, Union[str, Any]]:
|
|
663
|
+
headers = {
|
|
664
|
+
"X-Vellum-SDK-Version": get_version()["sdk_version"],
|
|
665
|
+
"X-Vellum-Server-Version": get_version()["server_version"],
|
|
666
|
+
"X-Vellum-Events-Emitted": str(is_events_emitting_enabled(context)),
|
|
667
|
+
}
|
|
668
|
+
return headers
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from datetime import datetime
|
|
1
|
+
from datetime import datetime, timezone
|
|
2
2
|
from io import StringIO
|
|
3
3
|
import json
|
|
4
4
|
import logging
|
|
@@ -11,7 +11,7 @@ from threading import Event as ThreadingEvent
|
|
|
11
11
|
import time
|
|
12
12
|
from traceback import format_exc
|
|
13
13
|
from uuid import UUID, uuid4
|
|
14
|
-
from typing import Any, Callable, Generator, Iterator, Optional, Tuple
|
|
14
|
+
from typing import Any, Callable, Generator, Iterator, Optional, Tuple
|
|
15
15
|
|
|
16
16
|
from vellum_ee.workflows.display.utils.events import event_enricher
|
|
17
17
|
from vellum_ee.workflows.server.virtual_file_loader import VirtualFileFinder
|
|
@@ -191,6 +191,7 @@ def stream_workflow(
|
|
|
191
191
|
previous_execution_id=executor_context.previous_execution_id,
|
|
192
192
|
timeout=executor_context.timeout,
|
|
193
193
|
trigger=trigger,
|
|
194
|
+
execution_id=executor_context.workflow_span_id,
|
|
194
195
|
)
|
|
195
196
|
except WorkflowInitializationException as e:
|
|
196
197
|
cancel_watcher_kill_switch.set()
|
|
@@ -272,32 +273,11 @@ def stream_node(
|
|
|
272
273
|
disable_redirect: bool = True,
|
|
273
274
|
) -> Iterator[dict]:
|
|
274
275
|
workflow, namespace = _create_workflow(executor_context)
|
|
275
|
-
Node: Optional[Type[BaseNode]] = None
|
|
276
|
-
|
|
277
|
-
for workflow_node in workflow.get_nodes():
|
|
278
|
-
if executor_context.node_id and workflow_node.__id__ == executor_context.node_id:
|
|
279
|
-
Node = workflow_node
|
|
280
|
-
break
|
|
281
|
-
elif (
|
|
282
|
-
executor_context.node_module
|
|
283
|
-
and executor_context.node_name
|
|
284
|
-
and workflow_node.__name__ == executor_context.node_name
|
|
285
|
-
and workflow_node.__module__ == f"{namespace}.{executor_context.node_module}"
|
|
286
|
-
):
|
|
287
|
-
Node = workflow_node
|
|
288
|
-
break
|
|
289
|
-
|
|
290
|
-
if not Node:
|
|
291
|
-
identifier = executor_context.node_id or f"{executor_context.node_module}.{executor_context.node_name}"
|
|
292
|
-
raise WorkflowInitializationException(
|
|
293
|
-
message=f"Node '{identifier}' not found in workflow",
|
|
294
|
-
workflow_definition=workflow.__class__,
|
|
295
|
-
)
|
|
296
276
|
|
|
297
277
|
def call_node() -> Generator[dict[str, Any], Any, None]:
|
|
298
278
|
executor_context.stream_start_time = time.time_ns()
|
|
299
279
|
|
|
300
|
-
for event in workflow.run_node(
|
|
280
|
+
for event in workflow.run_node(executor_context.node_ref, inputs=executor_context.inputs):
|
|
301
281
|
yield event.model_dump(mode="json")
|
|
302
282
|
|
|
303
283
|
return _call_stream(
|
|
@@ -358,7 +338,9 @@ def _call_stream(
|
|
|
358
338
|
def _create_workflow(executor_context: BaseExecutorContext) -> Tuple[BaseWorkflow, str]:
|
|
359
339
|
namespace = _get_file_namespace(executor_context)
|
|
360
340
|
if namespace != LOCAL_WORKFLOW_MODULE:
|
|
361
|
-
sys.meta_path.append(
|
|
341
|
+
sys.meta_path.append(
|
|
342
|
+
VirtualFileFinder(executor_context.files, namespace, source_module=executor_context.module)
|
|
343
|
+
)
|
|
362
344
|
|
|
363
345
|
workflow_context = _create_workflow_context(executor_context)
|
|
364
346
|
Workflow = BaseWorkflow.load_from_module(namespace)
|
|
@@ -449,6 +431,14 @@ def _enrich_event(event: WorkflowEvent, executor_context: Optional[BaseExecutorC
|
|
|
449
431
|
|
|
450
432
|
if executor_context is not None:
|
|
451
433
|
metadata["is_new_server"] = executor_context.is_new_server
|
|
434
|
+
|
|
435
|
+
if executor_context.vembda_service_initiated_timestamp is not None and event.timestamp is not None:
|
|
436
|
+
event_ts = event.timestamp
|
|
437
|
+
if event_ts.tzinfo is None:
|
|
438
|
+
event_ts = event_ts.replace(tzinfo=timezone.utc)
|
|
439
|
+
event_ts_ns = int(event_ts.timestamp() * 1_000_000_000)
|
|
440
|
+
initiated_latency = event_ts_ns - executor_context.vembda_service_initiated_timestamp
|
|
441
|
+
metadata["initiated_latency"] = initiated_latency
|
|
452
442
|
elif event.name == "workflow.execution.fulfilled" and is_deployment:
|
|
453
443
|
metadata = {}
|
|
454
444
|
memory_mb = get_memory_in_use_mb()
|
|
@@ -2,6 +2,7 @@ from datetime import datetime
|
|
|
2
2
|
from uuid import uuid4
|
|
3
3
|
from typing import Optional
|
|
4
4
|
|
|
5
|
+
from workflow_server.config import IS_ASYNC_MODE
|
|
5
6
|
from workflow_server.core.events import VembdaExecutionFulfilledBody, VembdaExecutionFulfilledEvent
|
|
6
7
|
from workflow_server.core.workflow_executor_context import BaseExecutorContext
|
|
7
8
|
|
|
@@ -46,6 +47,9 @@ def serialize_vembda_rejected_event(
|
|
|
46
47
|
|
|
47
48
|
|
|
48
49
|
def is_events_emitting_enabled(executor_context: Optional[BaseExecutorContext]) -> bool:
|
|
50
|
+
if IS_ASYNC_MODE:
|
|
51
|
+
return True
|
|
52
|
+
|
|
49
53
|
if not executor_context:
|
|
50
54
|
return False
|
|
51
55
|
|
|
@@ -3,7 +3,7 @@ from functools import cached_property
|
|
|
3
3
|
import os
|
|
4
4
|
import time
|
|
5
5
|
from uuid import UUID
|
|
6
|
-
from typing import Any, Optional
|
|
6
|
+
from typing import Any, Optional, Union
|
|
7
7
|
from typing_extensions import Self
|
|
8
8
|
|
|
9
9
|
from flask import has_request_context, request
|
|
@@ -37,6 +37,10 @@ class BaseExecutorContext(UniversalBaseModel):
|
|
|
37
37
|
feature_flags: Optional[dict[str, bool]] = None
|
|
38
38
|
is_new_server: bool = False
|
|
39
39
|
trigger_id: Optional[UUID] = None
|
|
40
|
+
# The actual 'execution id' of the workflow that we pass into the workflow
|
|
41
|
+
# when running in async mode.
|
|
42
|
+
workflow_span_id: Optional[UUID] = None
|
|
43
|
+
vembda_service_initiated_timestamp: Optional[int] = None
|
|
40
44
|
|
|
41
45
|
@field_validator("inputs", mode="before")
|
|
42
46
|
@classmethod
|
|
@@ -87,6 +91,18 @@ class NodeExecutorContext(BaseExecutorContext):
|
|
|
87
91
|
node_module: Optional[str] = None
|
|
88
92
|
node_name: Optional[str] = None
|
|
89
93
|
|
|
94
|
+
@property
|
|
95
|
+
def node_ref(self) -> Union[UUID, str]:
|
|
96
|
+
"""
|
|
97
|
+
Returns the node reference for use with workflow.run_node().
|
|
98
|
+
|
|
99
|
+
Returns node_id if it exists, otherwise returns the combination
|
|
100
|
+
of node_module and node_name as a fully qualified string.
|
|
101
|
+
"""
|
|
102
|
+
if self.node_id:
|
|
103
|
+
return self.node_id
|
|
104
|
+
return f"{self.node_module}.{self.node_name}"
|
|
105
|
+
|
|
90
106
|
@model_validator(mode="after")
|
|
91
107
|
def validate_node_identification(self) -> Self:
|
|
92
108
|
if not self.node_id and not (self.node_module and self.node_name):
|
|
@@ -33,6 +33,7 @@ class CustomGunicornLogger(glogging.Logger):
|
|
|
33
33
|
logger = logging.getLogger("gunicorn.access")
|
|
34
34
|
logger.addFilter(HealthCheckFilter())
|
|
35
35
|
logger.addFilter(SignalFilter())
|
|
36
|
+
logger.addFilter(StatusIsAvailableFilter())
|
|
36
37
|
|
|
37
38
|
|
|
38
39
|
class HealthCheckFilter(logging.Filter):
|
|
@@ -45,6 +46,11 @@ class SignalFilter(logging.Filter):
|
|
|
45
46
|
return "SIGTERM" not in record.getMessage()
|
|
46
47
|
|
|
47
48
|
|
|
49
|
+
class StatusIsAvailableFilter(logging.Filter):
|
|
50
|
+
def filter(self, record: Any) -> bool:
|
|
51
|
+
return "/status/is_available" not in record.getMessage()
|
|
52
|
+
|
|
53
|
+
|
|
48
54
|
def start() -> None:
|
|
49
55
|
if not is_development():
|
|
50
56
|
start_oom_killer_worker()
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
import logging
|
|
3
|
+
import multiprocessing
|
|
4
|
+
import signal
|
|
5
|
+
from time import sleep
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from workflow_server.config import IS_ASYNC_MODE, is_development
|
|
9
|
+
from workflow_server.utils.system_utils import get_active_process_count
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
process_killed_switch = multiprocessing.Event()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _wait_for_workers() -> None:
|
|
16
|
+
# Would be annoying to have this on for dev since would prevent reload restarts. Also disabling this
|
|
17
|
+
# for non async mode for now since it shouldn't be needed anyway cus we keep the requests open.
|
|
18
|
+
if is_development() and not IS_ASYNC_MODE:
|
|
19
|
+
return
|
|
20
|
+
|
|
21
|
+
start_time = datetime.now()
|
|
22
|
+
loops = 0
|
|
23
|
+
|
|
24
|
+
while get_active_process_count() > 0:
|
|
25
|
+
if loops % 30 == 0:
|
|
26
|
+
logger.info("Waiting for workflow processes to finish...")
|
|
27
|
+
|
|
28
|
+
# TODO needa pass in max workflow time here for VPC
|
|
29
|
+
if (datetime.now() - start_time).total_seconds() > 1800:
|
|
30
|
+
logger.warning("Max elapsed time waiting for workflow processes to complete exceeded, shutting down")
|
|
31
|
+
exit(1)
|
|
32
|
+
|
|
33
|
+
sleep(1)
|
|
34
|
+
loops += 1
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def gunicorn_exit_handler(_worker: Any) -> None:
|
|
38
|
+
logger.info("Received gunicorn kill signal")
|
|
39
|
+
process_killed_switch.set()
|
|
40
|
+
_wait_for_workers()
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def exit_handler(_signal: int, _frame: Any) -> None:
|
|
44
|
+
"""
|
|
45
|
+
Gunicorn overrides this signal handler but theres periods where the gunicorn server
|
|
46
|
+
hasn't initialized or for local dev where this will get called.
|
|
47
|
+
"""
|
|
48
|
+
process_killed_switch.set()
|
|
49
|
+
logger.warning("Received kill signal")
|
|
50
|
+
_wait_for_workers()
|
|
51
|
+
exit(1)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def init_signal_handlers() -> None:
|
|
55
|
+
signal.signal(signal.SIGTERM, exit_handler)
|
|
56
|
+
signal.signal(signal.SIGINT, exit_handler)
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
import multiprocessing
|
|
3
|
-
import signal
|
|
4
|
-
from typing import Any
|
|
5
|
-
|
|
6
|
-
logger = logging.getLogger(__name__)
|
|
7
|
-
process_killed_switch = multiprocessing.Event()
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
def gunicorn_exit_handler(_worker: Any) -> None:
|
|
11
|
-
process_killed_switch.set()
|
|
12
|
-
logger.warning("Received gunicorn kill signal")
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
def exit_handler(_signal: int, _frame: Any) -> None:
|
|
16
|
-
"""
|
|
17
|
-
Gunicorn overrides this signal handler but theres periods where the gunicorn server
|
|
18
|
-
hasn't initialized or for local dev where this will get called.
|
|
19
|
-
"""
|
|
20
|
-
process_killed_switch.set()
|
|
21
|
-
logger.warning("Received kill signal")
|
|
22
|
-
exit(1)
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
def init_signal_handlers() -> None:
|
|
26
|
-
signal.signal(signal.SIGTERM, exit_handler)
|
|
27
|
-
signal.signal(signal.SIGINT, exit_handler)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|