vellum-workflow-server 1.9.1.post2__tar.gz → 1.9.7.post1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/PKG-INFO +2 -2
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/pyproject.toml +2 -2
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/api/tests/test_workflow_view.py +24 -24
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/api/tests/test_workflow_view_stream_workflow_route.py +12 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/api/workflow_view.py +2 -1
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/core/executor.py +13 -2
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/core/workflow_executor_context.py +1 -0
- vellum_workflow_server-1.9.7.post1/src/workflow_server/utils/exit_handler.py +56 -0
- vellum_workflow_server-1.9.1.post2/src/workflow_server/utils/exit_handler.py +0 -27
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/README.md +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/__init__.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/api/__init__.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/api/auth_middleware.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/api/healthz_view.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/api/status_view.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/api/tests/__init__.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/api/tests/test_input_display_mapping.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/code_exec_runner.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/config.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/core/__init__.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/core/cancel_workflow.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/core/events.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/core/utils.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/logging_config.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/server.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/start.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/utils/__init__.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/utils/log_proxy.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/utils/oom_killer.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/utils/sentry.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/utils/system_utils.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/utils/tests/__init__.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/utils/tests/test_sentry_integration.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/utils/tests/test_system_utils.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/utils/tests/test_utils.py +0 -0
- {vellum_workflow_server-1.9.1.post2 → vellum_workflow_server-1.9.7.post1}/src/workflow_server/utils/utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: vellum-workflow-server
|
|
3
|
-
Version: 1.9.
|
|
3
|
+
Version: 1.9.7.post1
|
|
4
4
|
Summary:
|
|
5
5
|
License: AGPL
|
|
6
6
|
Requires-Python: >=3.9.0,<4
|
|
@@ -29,7 +29,7 @@ Requires-Dist: pyjwt (==2.10.0)
|
|
|
29
29
|
Requires-Dist: python-dotenv (==1.0.1)
|
|
30
30
|
Requires-Dist: retrying (==1.3.4)
|
|
31
31
|
Requires-Dist: sentry-sdk[flask] (==2.20.0)
|
|
32
|
-
Requires-Dist: vellum-ai (==1.9.
|
|
32
|
+
Requires-Dist: vellum-ai (==1.9.7)
|
|
33
33
|
Description-Content-Type: text/markdown
|
|
34
34
|
|
|
35
35
|
# Vellum Workflow Runner Server
|
|
@@ -3,7 +3,7 @@ name = "vellum-workflow-server"
|
|
|
3
3
|
|
|
4
4
|
[tool.poetry]
|
|
5
5
|
name = "vellum-workflow-server"
|
|
6
|
-
version = "1.9.
|
|
6
|
+
version = "1.9.7.post1"
|
|
7
7
|
description = ""
|
|
8
8
|
readme = "README.md"
|
|
9
9
|
authors = []
|
|
@@ -45,7 +45,7 @@ flask = "2.3.3"
|
|
|
45
45
|
orderly-set = "5.2.2"
|
|
46
46
|
pebble = "5.0.7"
|
|
47
47
|
gunicorn = "23.0.0"
|
|
48
|
-
vellum-ai = "1.9.
|
|
48
|
+
vellum-ai = "1.9.7"
|
|
49
49
|
python-dotenv = "1.0.1"
|
|
50
50
|
retrying = "1.3.4"
|
|
51
51
|
sentry-sdk = {extras = ["flask"], version = "2.20.0"}
|
|
@@ -63,11 +63,11 @@ class TestNode(BaseNode):
|
|
|
63
63
|
"comment": {"expanded": True, "value": "A test node for processing data."},
|
|
64
64
|
"position": {"x": 0.0, "y": 0.0},
|
|
65
65
|
},
|
|
66
|
-
"id": "
|
|
66
|
+
"id": "6f4c9178-9f46-4723-bcb7-0bd59db54eca",
|
|
67
67
|
"label": "Test Node",
|
|
68
68
|
"outputs": [],
|
|
69
|
-
"ports": [{"id": "
|
|
70
|
-
"trigger": {"id": "
|
|
69
|
+
"ports": [{"id": "4394823f-79a8-4dbc-99ae-06a1df6c7408", "name": "default", "type": "DEFAULT"}],
|
|
70
|
+
"trigger": {"id": "07240af1-67c6-4460-b53d-53f0b0f1b90e", "merge_behavior": "AWAIT_ATTRIBUTES"},
|
|
71
71
|
"type": "GENERIC",
|
|
72
72
|
}
|
|
73
73
|
|
|
@@ -127,11 +127,11 @@ class SomeOtherNode(BaseNode):
|
|
|
127
127
|
"comment": {"expanded": True, "value": "This is Some Node."},
|
|
128
128
|
"position": {"x": 0.0, "y": 0.0},
|
|
129
129
|
},
|
|
130
|
-
"id": "
|
|
130
|
+
"id": "89e84bac-5a5f-4f64-8083-7d3ebec98be1",
|
|
131
131
|
"label": "Some Node",
|
|
132
132
|
"outputs": [],
|
|
133
|
-
"ports": [{"id": "
|
|
134
|
-
"trigger": {"id": "
|
|
133
|
+
"ports": [{"id": "2983ea5c-1d29-483a-b896-53098f5de4f1", "name": "default", "type": "DEFAULT"}],
|
|
134
|
+
"trigger": {"id": "6996efb0-5a20-4719-8835-34fe6552764a", "merge_behavior": "AWAIT_ATTRIBUTES"},
|
|
135
135
|
"type": "GENERIC",
|
|
136
136
|
}
|
|
137
137
|
|
|
@@ -150,11 +150,11 @@ class SomeOtherNode(BaseNode):
|
|
|
150
150
|
"comment": {"expanded": True, "value": "This is Some Other Node."},
|
|
151
151
|
"position": {"x": 0.0, "y": 0.0},
|
|
152
152
|
},
|
|
153
|
-
"id": "
|
|
153
|
+
"id": "3cdbba02-8a34-4e0f-8b94-770a944dcaa3",
|
|
154
154
|
"label": "Some Other Node",
|
|
155
155
|
"outputs": [],
|
|
156
|
-
"ports": [{"id": "
|
|
157
|
-
"trigger": {"id": "
|
|
156
|
+
"ports": [{"id": "1839bde5-2ad4-4723-b21b-2c55fa833a7a", "name": "default", "type": "DEFAULT"}],
|
|
157
|
+
"trigger": {"id": "c36df8a8-5624-45be-99c9-826cf511a951", "merge_behavior": "AWAIT_ATTRIBUTES"},
|
|
158
158
|
"type": "GENERIC",
|
|
159
159
|
}
|
|
160
160
|
|
|
@@ -222,11 +222,11 @@ class HelperClass:
|
|
|
222
222
|
"comment": {"expanded": True, "value": "Processes input data."},
|
|
223
223
|
"position": {"x": 0.0, "y": 0.0},
|
|
224
224
|
},
|
|
225
|
-
"id": "
|
|
225
|
+
"id": "7121bcb9-98a1-4907-bf9b-9734d773fd15",
|
|
226
226
|
"label": "Processing Node",
|
|
227
227
|
"outputs": [],
|
|
228
|
-
"ports": [{"id": "
|
|
229
|
-
"trigger": {"id": "
|
|
228
|
+
"ports": [{"id": "de27da74-30e9-4e7b-95c2-92bdfc5bf042", "name": "default", "type": "DEFAULT"}],
|
|
229
|
+
"trigger": {"id": "e02bd85e-8b03-4b21-8b3e-f411042334ce", "merge_behavior": "AWAIT_ATTRIBUTES"},
|
|
230
230
|
"type": "GENERIC",
|
|
231
231
|
}
|
|
232
232
|
|
|
@@ -240,11 +240,11 @@ class HelperClass:
|
|
|
240
240
|
"comment": {"expanded": True, "value": "Transforms data format."},
|
|
241
241
|
"position": {"x": 0.0, "y": 0.0},
|
|
242
242
|
},
|
|
243
|
-
"id": "
|
|
243
|
+
"id": "6a785cb0-f631-4f03-94c6-e82331c14c1a",
|
|
244
244
|
"label": "Transformation Node",
|
|
245
245
|
"outputs": [],
|
|
246
|
-
"ports": [{"id": "
|
|
247
|
-
"trigger": {"id": "
|
|
246
|
+
"ports": [{"id": "67a13ea0-fd6b-44dc-af46-c72da06aa11f", "name": "default", "type": "DEFAULT"}],
|
|
247
|
+
"trigger": {"id": "08d4e317-baa8-478f-b278-99362e50e6b4", "merge_behavior": "AWAIT_ATTRIBUTES"},
|
|
248
248
|
"type": "GENERIC",
|
|
249
249
|
}
|
|
250
250
|
|
|
@@ -306,11 +306,11 @@ class BrokenNode(BaseNode)
|
|
|
306
306
|
"comment": {"expanded": True, "value": "This is Some Node."},
|
|
307
307
|
"position": {"x": 0.0, "y": 0.0},
|
|
308
308
|
},
|
|
309
|
-
"id": "
|
|
309
|
+
"id": "a2706730-074b-4ea3-968a-25e68af1caed",
|
|
310
310
|
"label": "Some Node",
|
|
311
311
|
"outputs": [],
|
|
312
|
-
"ports": [{"id": "
|
|
313
|
-
"trigger": {"id": "
|
|
312
|
+
"ports": [{"id": "e0ee3653-e071-4b91-9dfc-5e1dca9c665b", "name": "default", "type": "DEFAULT"}],
|
|
313
|
+
"trigger": {"id": "8d931b01-30ca-4c0d-b1b7-7c18379c83e6", "merge_behavior": "AWAIT_ATTRIBUTES"},
|
|
314
314
|
"type": "GENERIC",
|
|
315
315
|
}
|
|
316
316
|
|
|
@@ -371,12 +371,12 @@ class MyAdditionNode(BaseNode):
|
|
|
371
371
|
"adornments": None,
|
|
372
372
|
"attributes": [
|
|
373
373
|
{
|
|
374
|
-
"id": "
|
|
374
|
+
"id": "4223b340-447f-46c2-b35d-30ef16c5ae17",
|
|
375
375
|
"name": "arg1",
|
|
376
376
|
"value": None,
|
|
377
377
|
},
|
|
378
378
|
{
|
|
379
|
-
"id": "
|
|
379
|
+
"id": "1de0f46a-95f6-4cd0-bb0f-e2414054d507",
|
|
380
380
|
"name": "arg2",
|
|
381
381
|
"value": None,
|
|
382
382
|
},
|
|
@@ -387,11 +387,11 @@ class MyAdditionNode(BaseNode):
|
|
|
387
387
|
"comment": {"expanded": True, "value": "Custom node that performs simple addition."},
|
|
388
388
|
"position": {"x": 0.0, "y": 0.0},
|
|
389
389
|
},
|
|
390
|
-
"id": "
|
|
390
|
+
"id": "2464b610-fb6d-495b-b17c-933ee147f19f",
|
|
391
391
|
"label": "My Addition Node",
|
|
392
|
-
"outputs": [{"id": "
|
|
393
|
-
"ports": [{"id": "
|
|
394
|
-
"trigger": {"id": "
|
|
392
|
+
"outputs": [{"id": "f39d85c9-e7bf-45e1-bb67-f16225db0118", "name": "result", "type": "NUMBER", "value": None}],
|
|
393
|
+
"ports": [{"id": "bc489295-cd8a-4aa2-88bb-34446374100d", "name": "default", "type": "DEFAULT"}],
|
|
394
|
+
"trigger": {"id": "ff580cad-73d6-44fe-8f2c-4b8dc990ee70", "merge_behavior": "AWAIT_ATTRIBUTES"},
|
|
395
395
|
"type": "GENERIC",
|
|
396
396
|
"should_file_merge": True,
|
|
397
397
|
}
|
|
@@ -5,6 +5,7 @@ import io
|
|
|
5
5
|
import json
|
|
6
6
|
from queue import Empty
|
|
7
7
|
import re
|
|
8
|
+
import time
|
|
8
9
|
from unittest import mock
|
|
9
10
|
from uuid import uuid4
|
|
10
11
|
|
|
@@ -133,6 +134,8 @@ class Workflow(BaseWorkflow):
|
|
|
133
134
|
|
|
134
135
|
with mock.patch("builtins.open", mock.mock_open(read_data="104857600")):
|
|
135
136
|
# WHEN we call the stream route
|
|
137
|
+
ts_ns = time.time_ns()
|
|
138
|
+
request_body["vembda_service_initiated_timestamp"] = ts_ns
|
|
136
139
|
status_code, events = both_stream_types(request_body)
|
|
137
140
|
|
|
138
141
|
# THEN we get a 200 response
|
|
@@ -177,6 +180,15 @@ class Workflow(BaseWorkflow):
|
|
|
177
180
|
assert "is_new_server" in server_metadata
|
|
178
181
|
assert server_metadata["is_new_server"] is False
|
|
179
182
|
|
|
183
|
+
# AND the initiated event should have initiated_latency within a reasonable range
|
|
184
|
+
assert "initiated_latency" in server_metadata, "initiated_latency should be present in server_metadata"
|
|
185
|
+
initiated_latency = server_metadata["initiated_latency"]
|
|
186
|
+
assert isinstance(initiated_latency, int), "initiated_latency should be an integer (nanoseconds)"
|
|
187
|
+
# Latency should be positive and less than 60 seconds (60_000_000_000 nanoseconds) for CI
|
|
188
|
+
assert (
|
|
189
|
+
0 < initiated_latency < 60_000_000_000
|
|
190
|
+
), f"initiated_latency should be between 0 and 60 seconds, got {initiated_latency} ns"
|
|
191
|
+
|
|
180
192
|
assert events[2]["name"] == "workflow.execution.fulfilled", events[2]
|
|
181
193
|
assert events[2]["body"]["workflow_definition"]["module"] == ["test", "workflow"]
|
|
182
194
|
|
|
@@ -528,6 +528,7 @@ def serialize_route() -> Response:
|
|
|
528
528
|
files = data.get("files", {})
|
|
529
529
|
workspace_api_key = data.get("workspace_api_key")
|
|
530
530
|
is_new_server = data.get("is_new_server", False)
|
|
531
|
+
module = data.get("module")
|
|
531
532
|
|
|
532
533
|
if not files:
|
|
533
534
|
return Response(
|
|
@@ -540,7 +541,7 @@ def serialize_route() -> Response:
|
|
|
540
541
|
|
|
541
542
|
# Generate a unique namespace for this serialization request
|
|
542
543
|
namespace = get_random_namespace()
|
|
543
|
-
virtual_finder = VirtualFileFinder(files, namespace)
|
|
544
|
+
virtual_finder = VirtualFileFinder(files, namespace, source_module=module)
|
|
544
545
|
|
|
545
546
|
headers = {
|
|
546
547
|
"X-Vellum-Is-New-Server": str(is_new_server).lower(),
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from datetime import datetime
|
|
1
|
+
from datetime import datetime, timezone
|
|
2
2
|
from io import StringIO
|
|
3
3
|
import json
|
|
4
4
|
import logging
|
|
@@ -191,6 +191,7 @@ def stream_workflow(
|
|
|
191
191
|
previous_execution_id=executor_context.previous_execution_id,
|
|
192
192
|
timeout=executor_context.timeout,
|
|
193
193
|
trigger=trigger,
|
|
194
|
+
execution_id=executor_context.workflow_span_id,
|
|
194
195
|
)
|
|
195
196
|
except WorkflowInitializationException as e:
|
|
196
197
|
cancel_watcher_kill_switch.set()
|
|
@@ -358,7 +359,9 @@ def _call_stream(
|
|
|
358
359
|
def _create_workflow(executor_context: BaseExecutorContext) -> Tuple[BaseWorkflow, str]:
|
|
359
360
|
namespace = _get_file_namespace(executor_context)
|
|
360
361
|
if namespace != LOCAL_WORKFLOW_MODULE:
|
|
361
|
-
sys.meta_path.append(
|
|
362
|
+
sys.meta_path.append(
|
|
363
|
+
VirtualFileFinder(executor_context.files, namespace, source_module=executor_context.module)
|
|
364
|
+
)
|
|
362
365
|
|
|
363
366
|
workflow_context = _create_workflow_context(executor_context)
|
|
364
367
|
Workflow = BaseWorkflow.load_from_module(namespace)
|
|
@@ -449,6 +452,14 @@ def _enrich_event(event: WorkflowEvent, executor_context: Optional[BaseExecutorC
|
|
|
449
452
|
|
|
450
453
|
if executor_context is not None:
|
|
451
454
|
metadata["is_new_server"] = executor_context.is_new_server
|
|
455
|
+
|
|
456
|
+
if executor_context.vembda_service_initiated_timestamp is not None and event.timestamp is not None:
|
|
457
|
+
event_ts = event.timestamp
|
|
458
|
+
if event_ts.tzinfo is None:
|
|
459
|
+
event_ts = event_ts.replace(tzinfo=timezone.utc)
|
|
460
|
+
event_ts_ns = int(event_ts.timestamp() * 1_000_000_000)
|
|
461
|
+
initiated_latency = event_ts_ns - executor_context.vembda_service_initiated_timestamp
|
|
462
|
+
metadata["initiated_latency"] = initiated_latency
|
|
452
463
|
elif event.name == "workflow.execution.fulfilled" and is_deployment:
|
|
453
464
|
metadata = {}
|
|
454
465
|
memory_mb = get_memory_in_use_mb()
|
|
@@ -40,6 +40,7 @@ class BaseExecutorContext(UniversalBaseModel):
|
|
|
40
40
|
# The actual 'execution id' of the workflow that we pass into the workflow
|
|
41
41
|
# when running in async mode.
|
|
42
42
|
workflow_span_id: Optional[UUID] = None
|
|
43
|
+
vembda_service_initiated_timestamp: Optional[int] = None
|
|
43
44
|
|
|
44
45
|
@field_validator("inputs", mode="before")
|
|
45
46
|
@classmethod
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
import logging
|
|
3
|
+
import multiprocessing
|
|
4
|
+
import signal
|
|
5
|
+
from time import sleep
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from workflow_server.config import IS_ASYNC_MODE, is_development
|
|
9
|
+
from workflow_server.utils.system_utils import get_active_process_count
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
process_killed_switch = multiprocessing.Event()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _wait_for_workers() -> None:
|
|
16
|
+
# Would be annoying to have this on for dev since would prevent reload restarts. Also disabling this
|
|
17
|
+
# for non async mode for now since it shouldn't be needed anyway cus we keep the requests open.
|
|
18
|
+
if is_development() and not IS_ASYNC_MODE:
|
|
19
|
+
return
|
|
20
|
+
|
|
21
|
+
start_time = datetime.now()
|
|
22
|
+
loops = 0
|
|
23
|
+
|
|
24
|
+
while get_active_process_count() > 0:
|
|
25
|
+
if loops % 30 == 0:
|
|
26
|
+
logger.info("Waiting for workflow processes to finish...")
|
|
27
|
+
|
|
28
|
+
# TODO needa pass in max workflow time here for VPC
|
|
29
|
+
if (datetime.now() - start_time).total_seconds() > 1800:
|
|
30
|
+
logger.warning("Max elapsed time waiting for workflow processes to complete exceeded, shutting down")
|
|
31
|
+
exit(1)
|
|
32
|
+
|
|
33
|
+
sleep(1)
|
|
34
|
+
loops += 1
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def gunicorn_exit_handler(_worker: Any) -> None:
|
|
38
|
+
logger.info("Received gunicorn kill signal")
|
|
39
|
+
process_killed_switch.set()
|
|
40
|
+
_wait_for_workers()
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def exit_handler(_signal: int, _frame: Any) -> None:
|
|
44
|
+
"""
|
|
45
|
+
Gunicorn overrides this signal handler but theres periods where the gunicorn server
|
|
46
|
+
hasn't initialized or for local dev where this will get called.
|
|
47
|
+
"""
|
|
48
|
+
process_killed_switch.set()
|
|
49
|
+
logger.warning("Received kill signal")
|
|
50
|
+
_wait_for_workers()
|
|
51
|
+
exit(1)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def init_signal_handlers() -> None:
|
|
55
|
+
signal.signal(signal.SIGTERM, exit_handler)
|
|
56
|
+
signal.signal(signal.SIGINT, exit_handler)
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
import multiprocessing
|
|
3
|
-
import signal
|
|
4
|
-
from typing import Any
|
|
5
|
-
|
|
6
|
-
logger = logging.getLogger(__name__)
|
|
7
|
-
process_killed_switch = multiprocessing.Event()
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
def gunicorn_exit_handler(_worker: Any) -> None:
|
|
11
|
-
process_killed_switch.set()
|
|
12
|
-
logger.warning("Received gunicorn kill signal")
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
def exit_handler(_signal: int, _frame: Any) -> None:
|
|
16
|
-
"""
|
|
17
|
-
Gunicorn overrides this signal handler but theres periods where the gunicorn server
|
|
18
|
-
hasn't initialized or for local dev where this will get called.
|
|
19
|
-
"""
|
|
20
|
-
process_killed_switch.set()
|
|
21
|
-
logger.warning("Received kill signal")
|
|
22
|
-
exit(1)
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
def init_signal_handlers() -> None:
|
|
26
|
-
signal.signal(signal.SIGTERM, exit_handler)
|
|
27
|
-
signal.signal(signal.SIGINT, exit_handler)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|