vellum-workflow-server 1.9.2__tar.gz → 1.10.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/PKG-INFO +2 -2
  2. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/pyproject.toml +2 -2
  3. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/api/tests/test_workflow_view.py +24 -24
  4. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/api/tests/test_workflow_view_stream_workflow_route.py +12 -0
  5. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/api/workflow_view.py +2 -1
  6. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/core/executor.py +15 -25
  7. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/core/utils.py +4 -0
  8. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/core/workflow_executor_context.py +14 -1
  9. vellum_workflow_server-1.10.0/src/workflow_server/utils/exit_handler.py +56 -0
  10. vellum_workflow_server-1.9.2/src/workflow_server/utils/exit_handler.py +0 -27
  11. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/README.md +0 -0
  12. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/__init__.py +0 -0
  13. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/api/__init__.py +0 -0
  14. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/api/auth_middleware.py +0 -0
  15. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/api/healthz_view.py +0 -0
  16. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/api/status_view.py +0 -0
  17. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/api/tests/__init__.py +0 -0
  18. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/api/tests/test_input_display_mapping.py +0 -0
  19. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/code_exec_runner.py +0 -0
  20. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/config.py +0 -0
  21. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/core/__init__.py +0 -0
  22. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/core/cancel_workflow.py +0 -0
  23. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/core/events.py +0 -0
  24. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/logging_config.py +0 -0
  25. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/server.py +0 -0
  26. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/start.py +0 -0
  27. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/utils/__init__.py +0 -0
  28. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/utils/log_proxy.py +0 -0
  29. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/utils/oom_killer.py +0 -0
  30. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/utils/sentry.py +0 -0
  31. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/utils/system_utils.py +0 -0
  32. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/utils/tests/__init__.py +0 -0
  33. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/utils/tests/test_sentry_integration.py +0 -0
  34. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/utils/tests/test_system_utils.py +0 -0
  35. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/utils/tests/test_utils.py +0 -0
  36. {vellum_workflow_server-1.9.2 → vellum_workflow_server-1.10.0}/src/workflow_server/utils/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-workflow-server
3
- Version: 1.9.2
3
+ Version: 1.10.0
4
4
  Summary:
5
5
  License: AGPL
6
6
  Requires-Python: >=3.9.0,<4
@@ -29,7 +29,7 @@ Requires-Dist: pyjwt (==2.10.0)
29
29
  Requires-Dist: python-dotenv (==1.0.1)
30
30
  Requires-Dist: retrying (==1.3.4)
31
31
  Requires-Dist: sentry-sdk[flask] (==2.20.0)
32
- Requires-Dist: vellum-ai (==1.9.2)
32
+ Requires-Dist: vellum-ai (==1.10.0)
33
33
  Description-Content-Type: text/markdown
34
34
 
35
35
  # Vellum Workflow Runner Server
@@ -3,7 +3,7 @@ name = "vellum-workflow-server"
3
3
 
4
4
  [tool.poetry]
5
5
  name = "vellum-workflow-server"
6
- version = "1.9.2"
6
+ version = "1.10.0"
7
7
  description = ""
8
8
  readme = "README.md"
9
9
  authors = []
@@ -45,7 +45,7 @@ flask = "2.3.3"
45
45
  orderly-set = "5.2.2"
46
46
  pebble = "5.0.7"
47
47
  gunicorn = "23.0.0"
48
- vellum-ai = "1.9.2"
48
+ vellum-ai = "1.10.0"
49
49
  python-dotenv = "1.0.1"
50
50
  retrying = "1.3.4"
51
51
  sentry-sdk = {extras = ["flask"], version = "2.20.0"}
@@ -63,11 +63,11 @@ class TestNode(BaseNode):
63
63
  "comment": {"expanded": True, "value": "A test node for processing data."},
64
64
  "position": {"x": 0.0, "y": 0.0},
65
65
  },
66
- "id": "7a8b251d-f5ca-462a-b293-071d219460fb",
66
+ "id": "6f4c9178-9f46-4723-bcb7-0bd59db54eca",
67
67
  "label": "Test Node",
68
68
  "outputs": [],
69
- "ports": [{"id": "a3a0eefd-45d0-4f13-8c58-a836a9f7f9ed", "name": "default", "type": "DEFAULT"}],
70
- "trigger": {"id": "a022e36c-9852-4772-9be3-3c6c147fd811", "merge_behavior": "AWAIT_ATTRIBUTES"},
69
+ "ports": [{"id": "4394823f-79a8-4dbc-99ae-06a1df6c7408", "name": "default", "type": "DEFAULT"}],
70
+ "trigger": {"id": "07240af1-67c6-4460-b53d-53f0b0f1b90e", "merge_behavior": "AWAIT_ATTRIBUTES"},
71
71
  "type": "GENERIC",
72
72
  }
73
73
 
@@ -127,11 +127,11 @@ class SomeOtherNode(BaseNode):
127
127
  "comment": {"expanded": True, "value": "This is Some Node."},
128
128
  "position": {"x": 0.0, "y": 0.0},
129
129
  },
130
- "id": "1e559c2e-db82-41f0-9ceb-5e89b0c5a0a3",
130
+ "id": "89e84bac-5a5f-4f64-8083-7d3ebec98be1",
131
131
  "label": "Some Node",
132
132
  "outputs": [],
133
- "ports": [{"id": "48e39e97-5fd4-471e-b4f2-51d3baf06456", "name": "default", "type": "DEFAULT"}],
134
- "trigger": {"id": "e3381fb7-61fc-4c46-ae8e-51fc463b6a59", "merge_behavior": "AWAIT_ATTRIBUTES"},
133
+ "ports": [{"id": "2983ea5c-1d29-483a-b896-53098f5de4f1", "name": "default", "type": "DEFAULT"}],
134
+ "trigger": {"id": "6996efb0-5a20-4719-8835-34fe6552764a", "merge_behavior": "AWAIT_ATTRIBUTES"},
135
135
  "type": "GENERIC",
136
136
  }
137
137
 
@@ -150,11 +150,11 @@ class SomeOtherNode(BaseNode):
150
150
  "comment": {"expanded": True, "value": "This is Some Other Node."},
151
151
  "position": {"x": 0.0, "y": 0.0},
152
152
  },
153
- "id": "7aee541b-b245-4c8a-9137-3e4631d5100c",
153
+ "id": "3cdbba02-8a34-4e0f-8b94-770a944dcaa3",
154
154
  "label": "Some Other Node",
155
155
  "outputs": [],
156
- "ports": [{"id": "fb66b46a-d970-4bc9-83ea-70c154c57ddd", "name": "default", "type": "DEFAULT"}],
157
- "trigger": {"id": "13fa2714-20b3-4bc3-ab79-621a188e3bfa", "merge_behavior": "AWAIT_ATTRIBUTES"},
156
+ "ports": [{"id": "1839bde5-2ad4-4723-b21b-2c55fa833a7a", "name": "default", "type": "DEFAULT"}],
157
+ "trigger": {"id": "c36df8a8-5624-45be-99c9-826cf511a951", "merge_behavior": "AWAIT_ATTRIBUTES"},
158
158
  "type": "GENERIC",
159
159
  }
160
160
 
@@ -222,11 +222,11 @@ class HelperClass:
222
222
  "comment": {"expanded": True, "value": "Processes input data."},
223
223
  "position": {"x": 0.0, "y": 0.0},
224
224
  },
225
- "id": "f92c09f0-0434-46cb-829d-a73f801d6343",
225
+ "id": "7121bcb9-98a1-4907-bf9b-9734d773fd15",
226
226
  "label": "Processing Node",
227
227
  "outputs": [],
228
- "ports": [{"id": "abaa2984-b312-4491-b069-e689759f72c8", "name": "default", "type": "DEFAULT"}],
229
- "trigger": {"id": "35378c2b-f089-44af-ac37-efe4ea42c817", "merge_behavior": "AWAIT_ATTRIBUTES"},
228
+ "ports": [{"id": "de27da74-30e9-4e7b-95c2-92bdfc5bf042", "name": "default", "type": "DEFAULT"}],
229
+ "trigger": {"id": "e02bd85e-8b03-4b21-8b3e-f411042334ce", "merge_behavior": "AWAIT_ATTRIBUTES"},
230
230
  "type": "GENERIC",
231
231
  }
232
232
 
@@ -240,11 +240,11 @@ class HelperClass:
240
240
  "comment": {"expanded": True, "value": "Transforms data format."},
241
241
  "position": {"x": 0.0, "y": 0.0},
242
242
  },
243
- "id": "09ca32f7-c8f2-4469-97e5-1f288f85127a",
243
+ "id": "6a785cb0-f631-4f03-94c6-e82331c14c1a",
244
244
  "label": "Transformation Node",
245
245
  "outputs": [],
246
- "ports": [{"id": "88778117-fbfc-4b44-964b-5a4994aa2f24", "name": "default", "type": "DEFAULT"}],
247
- "trigger": {"id": "5d096263-7fbf-490a-83b7-e441852b5fb6", "merge_behavior": "AWAIT_ATTRIBUTES"},
246
+ "ports": [{"id": "67a13ea0-fd6b-44dc-af46-c72da06aa11f", "name": "default", "type": "DEFAULT"}],
247
+ "trigger": {"id": "08d4e317-baa8-478f-b278-99362e50e6b4", "merge_behavior": "AWAIT_ATTRIBUTES"},
248
248
  "type": "GENERIC",
249
249
  }
250
250
 
@@ -306,11 +306,11 @@ class BrokenNode(BaseNode)
306
306
  "comment": {"expanded": True, "value": "This is Some Node."},
307
307
  "position": {"x": 0.0, "y": 0.0},
308
308
  },
309
- "id": "1e559c2e-db82-41f0-9ceb-5e89b0c5a0a3",
309
+ "id": "a2706730-074b-4ea3-968a-25e68af1caed",
310
310
  "label": "Some Node",
311
311
  "outputs": [],
312
- "ports": [{"id": "48e39e97-5fd4-471e-b4f2-51d3baf06456", "name": "default", "type": "DEFAULT"}],
313
- "trigger": {"id": "e3381fb7-61fc-4c46-ae8e-51fc463b6a59", "merge_behavior": "AWAIT_ATTRIBUTES"},
312
+ "ports": [{"id": "e0ee3653-e071-4b91-9dfc-5e1dca9c665b", "name": "default", "type": "DEFAULT"}],
313
+ "trigger": {"id": "8d931b01-30ca-4c0d-b1b7-7c18379c83e6", "merge_behavior": "AWAIT_ATTRIBUTES"},
314
314
  "type": "GENERIC",
315
315
  }
316
316
 
@@ -371,12 +371,12 @@ class MyAdditionNode(BaseNode):
371
371
  "adornments": None,
372
372
  "attributes": [
373
373
  {
374
- "id": "aed3bcbb-d243-4a77-bb5e-409e9a28e868",
374
+ "id": "4223b340-447f-46c2-b35d-30ef16c5ae17",
375
375
  "name": "arg1",
376
376
  "value": None,
377
377
  },
378
378
  {
379
- "id": "9225d225-a41b-4642-8964-f28f58dcf4bf",
379
+ "id": "1de0f46a-95f6-4cd0-bb0f-e2414054d507",
380
380
  "name": "arg2",
381
381
  "value": None,
382
382
  },
@@ -387,11 +387,11 @@ class MyAdditionNode(BaseNode):
387
387
  "comment": {"expanded": True, "value": "Custom node that performs simple addition."},
388
388
  "position": {"x": 0.0, "y": 0.0},
389
389
  },
390
- "id": "195cd69d-3d2d-41e4-a432-16c433cb8d34",
390
+ "id": "2464b610-fb6d-495b-b17c-933ee147f19f",
391
391
  "label": "My Addition Node",
392
- "outputs": [{"id": "3d8e40cb-2aa8-44bd-ae6a-708a9fbc4779", "name": "result", "type": "NUMBER", "value": None}],
393
- "ports": [{"id": "9a9e4ef6-febf-4093-a515-217bbb1373db", "name": "default", "type": "DEFAULT"}],
394
- "trigger": {"id": "a5298668-d808-4a45-a62e-790943948e8a", "merge_behavior": "AWAIT_ATTRIBUTES"},
392
+ "outputs": [{"id": "f39d85c9-e7bf-45e1-bb67-f16225db0118", "name": "result", "type": "NUMBER", "value": None}],
393
+ "ports": [{"id": "bc489295-cd8a-4aa2-88bb-34446374100d", "name": "default", "type": "DEFAULT"}],
394
+ "trigger": {"id": "ff580cad-73d6-44fe-8f2c-4b8dc990ee70", "merge_behavior": "AWAIT_ATTRIBUTES"},
395
395
  "type": "GENERIC",
396
396
  "should_file_merge": True,
397
397
  }
@@ -5,6 +5,7 @@ import io
5
5
  import json
6
6
  from queue import Empty
7
7
  import re
8
+ import time
8
9
  from unittest import mock
9
10
  from uuid import uuid4
10
11
 
@@ -133,6 +134,8 @@ class Workflow(BaseWorkflow):
133
134
 
134
135
  with mock.patch("builtins.open", mock.mock_open(read_data="104857600")):
135
136
  # WHEN we call the stream route
137
+ ts_ns = time.time_ns()
138
+ request_body["vembda_service_initiated_timestamp"] = ts_ns
136
139
  status_code, events = both_stream_types(request_body)
137
140
 
138
141
  # THEN we get a 200 response
@@ -177,6 +180,15 @@ class Workflow(BaseWorkflow):
177
180
  assert "is_new_server" in server_metadata
178
181
  assert server_metadata["is_new_server"] is False
179
182
 
183
+ # AND the initiated event should have initiated_latency within a reasonable range
184
+ assert "initiated_latency" in server_metadata, "initiated_latency should be present in server_metadata"
185
+ initiated_latency = server_metadata["initiated_latency"]
186
+ assert isinstance(initiated_latency, int), "initiated_latency should be an integer (nanoseconds)"
187
+ # Latency should be positive and less than 60 seconds (60_000_000_000 nanoseconds) for CI
188
+ assert (
189
+ 0 < initiated_latency < 60_000_000_000
190
+ ), f"initiated_latency should be between 0 and 60 seconds, got {initiated_latency} ns"
191
+
180
192
  assert events[2]["name"] == "workflow.execution.fulfilled", events[2]
181
193
  assert events[2]["body"]["workflow_definition"]["module"] == ["test", "workflow"]
182
194
 
@@ -528,6 +528,7 @@ def serialize_route() -> Response:
528
528
  files = data.get("files", {})
529
529
  workspace_api_key = data.get("workspace_api_key")
530
530
  is_new_server = data.get("is_new_server", False)
531
+ module = data.get("module")
531
532
 
532
533
  if not files:
533
534
  return Response(
@@ -540,7 +541,7 @@ def serialize_route() -> Response:
540
541
 
541
542
  # Generate a unique namespace for this serialization request
542
543
  namespace = get_random_namespace()
543
- virtual_finder = VirtualFileFinder(files, namespace)
544
+ virtual_finder = VirtualFileFinder(files, namespace, source_module=module)
544
545
 
545
546
  headers = {
546
547
  "X-Vellum-Is-New-Server": str(is_new_server).lower(),
@@ -1,4 +1,4 @@
1
- from datetime import datetime
1
+ from datetime import datetime, timezone
2
2
  from io import StringIO
3
3
  import json
4
4
  import logging
@@ -11,7 +11,7 @@ from threading import Event as ThreadingEvent
11
11
  import time
12
12
  from traceback import format_exc
13
13
  from uuid import UUID, uuid4
14
- from typing import Any, Callable, Generator, Iterator, Optional, Tuple, Type
14
+ from typing import Any, Callable, Generator, Iterator, Optional, Tuple
15
15
 
16
16
  from vellum_ee.workflows.display.utils.events import event_enricher
17
17
  from vellum_ee.workflows.server.virtual_file_loader import VirtualFileFinder
@@ -191,6 +191,7 @@ def stream_workflow(
191
191
  previous_execution_id=executor_context.previous_execution_id,
192
192
  timeout=executor_context.timeout,
193
193
  trigger=trigger,
194
+ execution_id=executor_context.workflow_span_id,
194
195
  )
195
196
  except WorkflowInitializationException as e:
196
197
  cancel_watcher_kill_switch.set()
@@ -272,32 +273,11 @@ def stream_node(
272
273
  disable_redirect: bool = True,
273
274
  ) -> Iterator[dict]:
274
275
  workflow, namespace = _create_workflow(executor_context)
275
- Node: Optional[Type[BaseNode]] = None
276
-
277
- for workflow_node in workflow.get_nodes():
278
- if executor_context.node_id and workflow_node.__id__ == executor_context.node_id:
279
- Node = workflow_node
280
- break
281
- elif (
282
- executor_context.node_module
283
- and executor_context.node_name
284
- and workflow_node.__name__ == executor_context.node_name
285
- and workflow_node.__module__ == f"{namespace}.{executor_context.node_module}"
286
- ):
287
- Node = workflow_node
288
- break
289
-
290
- if not Node:
291
- identifier = executor_context.node_id or f"{executor_context.node_module}.{executor_context.node_name}"
292
- raise WorkflowInitializationException(
293
- message=f"Node '{identifier}' not found in workflow",
294
- workflow_definition=workflow.__class__,
295
- )
296
276
 
297
277
  def call_node() -> Generator[dict[str, Any], Any, None]:
298
278
  executor_context.stream_start_time = time.time_ns()
299
279
 
300
- for event in workflow.run_node(Node, inputs=executor_context.inputs): # type: ignore[arg-type]
280
+ for event in workflow.run_node(executor_context.node_ref, inputs=executor_context.inputs):
301
281
  yield event.model_dump(mode="json")
302
282
 
303
283
  return _call_stream(
@@ -358,7 +338,9 @@ def _call_stream(
358
338
  def _create_workflow(executor_context: BaseExecutorContext) -> Tuple[BaseWorkflow, str]:
359
339
  namespace = _get_file_namespace(executor_context)
360
340
  if namespace != LOCAL_WORKFLOW_MODULE:
361
- sys.meta_path.append(VirtualFileFinder(executor_context.files, namespace))
341
+ sys.meta_path.append(
342
+ VirtualFileFinder(executor_context.files, namespace, source_module=executor_context.module)
343
+ )
362
344
 
363
345
  workflow_context = _create_workflow_context(executor_context)
364
346
  Workflow = BaseWorkflow.load_from_module(namespace)
@@ -449,6 +431,14 @@ def _enrich_event(event: WorkflowEvent, executor_context: Optional[BaseExecutorC
449
431
 
450
432
  if executor_context is not None:
451
433
  metadata["is_new_server"] = executor_context.is_new_server
434
+
435
+ if executor_context.vembda_service_initiated_timestamp is not None and event.timestamp is not None:
436
+ event_ts = event.timestamp
437
+ if event_ts.tzinfo is None:
438
+ event_ts = event_ts.replace(tzinfo=timezone.utc)
439
+ event_ts_ns = int(event_ts.timestamp() * 1_000_000_000)
440
+ initiated_latency = event_ts_ns - executor_context.vembda_service_initiated_timestamp
441
+ metadata["initiated_latency"] = initiated_latency
452
442
  elif event.name == "workflow.execution.fulfilled" and is_deployment:
453
443
  metadata = {}
454
444
  memory_mb = get_memory_in_use_mb()
@@ -2,6 +2,7 @@ from datetime import datetime
2
2
  from uuid import uuid4
3
3
  from typing import Optional
4
4
 
5
+ from workflow_server.config import IS_ASYNC_MODE
5
6
  from workflow_server.core.events import VembdaExecutionFulfilledBody, VembdaExecutionFulfilledEvent
6
7
  from workflow_server.core.workflow_executor_context import BaseExecutorContext
7
8
 
@@ -46,6 +47,9 @@ def serialize_vembda_rejected_event(
46
47
 
47
48
 
48
49
  def is_events_emitting_enabled(executor_context: Optional[BaseExecutorContext]) -> bool:
50
+ if IS_ASYNC_MODE:
51
+ return True
52
+
49
53
  if not executor_context:
50
54
  return False
51
55
 
@@ -3,7 +3,7 @@ from functools import cached_property
3
3
  import os
4
4
  import time
5
5
  from uuid import UUID
6
- from typing import Any, Optional
6
+ from typing import Any, Optional, Union
7
7
  from typing_extensions import Self
8
8
 
9
9
  from flask import has_request_context, request
@@ -40,6 +40,7 @@ class BaseExecutorContext(UniversalBaseModel):
40
40
  # The actual 'execution id' of the workflow that we pass into the workflow
41
41
  # when running in async mode.
42
42
  workflow_span_id: Optional[UUID] = None
43
+ vembda_service_initiated_timestamp: Optional[int] = None
43
44
 
44
45
  @field_validator("inputs", mode="before")
45
46
  @classmethod
@@ -90,6 +91,18 @@ class NodeExecutorContext(BaseExecutorContext):
90
91
  node_module: Optional[str] = None
91
92
  node_name: Optional[str] = None
92
93
 
94
+ @property
95
+ def node_ref(self) -> Union[UUID, str]:
96
+ """
97
+ Returns the node reference for use with workflow.run_node().
98
+
99
+ Returns node_id if it exists, otherwise returns the combination
100
+ of node_module and node_name as a fully qualified string.
101
+ """
102
+ if self.node_id:
103
+ return self.node_id
104
+ return f"{self.node_module}.{self.node_name}"
105
+
93
106
  @model_validator(mode="after")
94
107
  def validate_node_identification(self) -> Self:
95
108
  if not self.node_id and not (self.node_module and self.node_name):
@@ -0,0 +1,56 @@
1
+ from datetime import datetime
2
+ import logging
3
+ import multiprocessing
4
+ import signal
5
+ from time import sleep
6
+ from typing import Any
7
+
8
+ from workflow_server.config import IS_ASYNC_MODE, is_development
9
+ from workflow_server.utils.system_utils import get_active_process_count
10
+
11
+ logger = logging.getLogger(__name__)
12
+ process_killed_switch = multiprocessing.Event()
13
+
14
+
15
+ def _wait_for_workers() -> None:
16
+ # Would be annoying to have this on for dev since would prevent reload restarts. Also disabling this
17
+ # for non async mode for now since it shouldn't be needed anyway cus we keep the requests open.
18
+ if is_development() and not IS_ASYNC_MODE:
19
+ return
20
+
21
+ start_time = datetime.now()
22
+ loops = 0
23
+
24
+ while get_active_process_count() > 0:
25
+ if loops % 30 == 0:
26
+ logger.info("Waiting for workflow processes to finish...")
27
+
28
+ # TODO needa pass in max workflow time here for VPC
29
+ if (datetime.now() - start_time).total_seconds() > 1800:
30
+ logger.warning("Max elapsed time waiting for workflow processes to complete exceeded, shutting down")
31
+ exit(1)
32
+
33
+ sleep(1)
34
+ loops += 1
35
+
36
+
37
+ def gunicorn_exit_handler(_worker: Any) -> None:
38
+ logger.info("Received gunicorn kill signal")
39
+ process_killed_switch.set()
40
+ _wait_for_workers()
41
+
42
+
43
+ def exit_handler(_signal: int, _frame: Any) -> None:
44
+ """
45
+ Gunicorn overrides this signal handler but theres periods where the gunicorn server
46
+ hasn't initialized or for local dev where this will get called.
47
+ """
48
+ process_killed_switch.set()
49
+ logger.warning("Received kill signal")
50
+ _wait_for_workers()
51
+ exit(1)
52
+
53
+
54
+ def init_signal_handlers() -> None:
55
+ signal.signal(signal.SIGTERM, exit_handler)
56
+ signal.signal(signal.SIGINT, exit_handler)
@@ -1,27 +0,0 @@
1
- import logging
2
- import multiprocessing
3
- import signal
4
- from typing import Any
5
-
6
- logger = logging.getLogger(__name__)
7
- process_killed_switch = multiprocessing.Event()
8
-
9
-
10
- def gunicorn_exit_handler(_worker: Any) -> None:
11
- process_killed_switch.set()
12
- logger.warning("Received gunicorn kill signal")
13
-
14
-
15
- def exit_handler(_signal: int, _frame: Any) -> None:
16
- """
17
- Gunicorn overrides this signal handler but theres periods where the gunicorn server
18
- hasn't initialized or for local dev where this will get called.
19
- """
20
- process_killed_switch.set()
21
- logger.warning("Received kill signal")
22
- exit(1)
23
-
24
-
25
- def init_signal_handlers() -> None:
26
- signal.signal(signal.SIGTERM, exit_handler)
27
- signal.signal(signal.SIGINT, exit_handler)