vellum-workflow-server 1.3.2.post1__py3-none-any.whl → 1.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vellum-workflow-server might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-workflow-server
3
- Version: 1.3.2.post1
3
+ Version: 1.3.3
4
4
  Summary:
5
5
  License: AGPL
6
6
  Requires-Python: >=3.9.0,<4
@@ -29,7 +29,7 @@ Requires-Dist: pyjwt (==2.10.0)
29
29
  Requires-Dist: python-dotenv (==1.0.1)
30
30
  Requires-Dist: retrying (==1.3.4)
31
31
  Requires-Dist: sentry-sdk[flask] (==2.20.0)
32
- Requires-Dist: vellum-ai (==1.3.2)
32
+ Requires-Dist: vellum-ai (==1.3.3)
33
33
  Description-Content-Type: text/markdown
34
34
 
35
35
  # Vellum Workflow Runner Server
@@ -6,14 +6,14 @@ workflow_server/api/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
6
6
  workflow_server/api/tests/test_input_display_mapping.py,sha256=drBZqMudFyB5wgiUOcMgRXz7E7ge-Qgxbstw4E4f0zE,2211
7
7
  workflow_server/api/tests/test_workflow_view.py,sha256=gwb53E44LBm06XHY1UwfHqKG-UfwwICh_IYPtowN_kE,19085
8
8
  workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=1vr9NI894zdJmmnsy_wBSqaJG83RwnYSmqucknYf9bI,26304
9
- workflow_server/api/workflow_view.py,sha256=5embr0m7UNbo9dGrr8bLd1lG9ER76WzFas8Y3qYQakg,21463
9
+ workflow_server/api/workflow_view.py,sha256=osYa14kdP3LI3HZzh2Nw16-GhI0bHXwCFgasUkfRjeI,21104
10
10
  workflow_server/code_exec_runner.py,sha256=lBnMIorPZL8zZBye6TjeCIs06WTJM7P2HR07B1fjJJI,2533
11
11
  workflow_server/config.py,sha256=DyTty8NrAwvtx-esM3KthnpsNh-nKdWNlovWQOgiGpg,1417
12
12
  workflow_server/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  workflow_server/core/cancel_workflow.py,sha256=Ffkc3mzmrdMEUcD-sHfEhX4IwVrka-E--SxKA1dUfIU,2185
14
14
  workflow_server/core/events.py,sha256=24MA66DVQuaLJJcZrS8IL1Zq4Ohi9CoouKZ5VgoH3Cs,1402
15
- workflow_server/core/executor.py,sha256=EC_AwqCgvjVuhbgceMEIN2fTe5R00ygEA0iW383kP9Q,18181
16
- workflow_server/core/utils.py,sha256=lgzxkAEjEXPxGXXQlUYTYuCdHht-eDJJmHj5AhEb3_o,1500
15
+ workflow_server/core/executor.py,sha256=M_LyVOcPDzAA3u1WBLMpz9gftjc_RtO3hYida6tkSJo,18383
16
+ workflow_server/core/utils.py,sha256=cmwHbKCfXqtUutBD3akGus0Ga7a1xG3zlOw-jEMx6mI,1795
17
17
  workflow_server/core/workflow_executor_context.py,sha256=w3OhV_AXpgh7AxpjEsc0vo-IJypgJcgr5DXJCqGptOU,1587
18
18
  workflow_server/server.py,sha256=QBU12AaAfAgLqfCDBd24qIJl_mbheiq0-hfcWV7rZM4,1234
19
19
  workflow_server/start.py,sha256=qpIg0SgIgz8RNyc8Cu9LxyzXdOXZRv9qq3M3uSBbgD0,2180
@@ -28,7 +28,7 @@ workflow_server/utils/tests/test_sentry_integration.py,sha256=LGmWiaLhFrx-jslrRj
28
28
  workflow_server/utils/tests/test_system_utils.py,sha256=_4GwXvVvU5BrATxUEWwQIPg0bzQXMWBtiBmjP8MTxJM,4314
29
29
  workflow_server/utils/tests/test_utils.py,sha256=qwK5Rmy3RQyjtlUrYAuGuDlBeRzZKsf1yS-y2IpUizQ,6452
30
30
  workflow_server/utils/utils.py,sha256=Wqqn-1l2ugkGgy5paWWdt0AVxAyPMQCYcnRSSOMjXlA,4355
31
- vellum_workflow_server-1.3.2.post1.dist-info/METADATA,sha256=8xI527ofEZSbeJ-uqCaC-dJCkBoHcNxJhfYSrCS-uuo,2273
32
- vellum_workflow_server-1.3.2.post1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
33
- vellum_workflow_server-1.3.2.post1.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
34
- vellum_workflow_server-1.3.2.post1.dist-info/RECORD,,
31
+ vellum_workflow_server-1.3.3.dist-info/METADATA,sha256=h2P_JqWFZfE5hBp1mymzyhsKJs8w6UkoZfo90a0E3Lc,2267
32
+ vellum_workflow_server-1.3.3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
33
+ vellum_workflow_server-1.3.3.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
34
+ vellum_workflow_server-1.3.3.dist-info/RECORD,,
@@ -36,7 +36,11 @@ from workflow_server.core.events import (
36
36
  VembdaExecutionInitiatedEvent,
37
37
  )
38
38
  from workflow_server.core.executor import stream_node_pebble_timeout, stream_workflow, stream_workflow_process_timeout
39
- from workflow_server.core.utils import create_vembda_rejected_event, serialize_vembda_rejected_event
39
+ from workflow_server.core.utils import (
40
+ create_vembda_rejected_event,
41
+ is_events_emitting_enabled,
42
+ serialize_vembda_rejected_event,
43
+ )
40
44
  from workflow_server.core.workflow_executor_context import (
41
45
  DEFAULT_TIMEOUT_SECONDS,
42
46
  NodeExecutorContext,
@@ -60,17 +64,6 @@ CUSTOM_NODES_DIRECTORY = "vellum_custom_nodes"
60
64
  WORKFLOW_INITIATION_TIMEOUT_SECONDS = 60
61
65
 
62
66
 
63
- def _events_emission_enabled(context: Union[WorkflowExecutorContext, NodeExecutorContext]) -> bool:
64
- flags = context.feature_flags
65
- if not (flags and flags.get("vembda-event-emitting-enabled")):
66
- return False
67
- try:
68
- module = importlib.import_module("vellum.workflows.events.emitters.vellum")
69
- return hasattr(module, "VellumEmitter")
70
- except Exception:
71
- return False
72
-
73
-
74
67
  @bp.route("/stream", methods=["POST"])
75
68
  def stream_workflow_route() -> Response:
76
69
  data = request.get_json()
@@ -107,7 +100,7 @@ def stream_workflow_route() -> Response:
107
100
  headers = {
108
101
  "X-Vellum-SDK-Version": vembda_initiated_event.body.sdk_version,
109
102
  "X-Vellum-Server-Version": vembda_initiated_event.body.server_version,
110
- "X-Vellum-Events-Emitted": str(_events_emission_enabled(context)),
103
+ "X-Vellum-Events-Emitted": str(is_events_emitting_enabled(context)),
111
104
  }
112
105
 
113
106
  # We can exceed the concurrency count currently with long running workflows due to a knative issue. So here
@@ -414,7 +407,7 @@ def stream_node_route() -> Response:
414
407
  headers = {
415
408
  "X-Vellum-SDK-Version": vembda_initiated_event.body.sdk_version,
416
409
  "X-Vellum-Server-Version": vembda_initiated_event.body.server_version,
417
- "X-Vellum-Events-Emitted": str(_events_emission_enabled(context)),
410
+ "X-Vellum-Events-Emitted": str(is_events_emitting_enabled(context)),
418
411
  }
419
412
 
420
413
  resp = Response(
@@ -8,6 +8,7 @@ import os
8
8
  import random
9
9
  import string
10
10
  import sys
11
+ import threading
11
12
  from threading import Event as ThreadingEvent
12
13
  import time
13
14
  from traceback import format_exc
@@ -43,7 +44,7 @@ from workflow_server.core.events import (
43
44
  VembdaExecutionFulfilledBody,
44
45
  VembdaExecutionFulfilledEvent,
45
46
  )
46
- from workflow_server.core.utils import serialize_vembda_rejected_event
47
+ from workflow_server.core.utils import is_events_emitting_enabled, serialize_vembda_rejected_event
47
48
  from workflow_server.core.workflow_executor_context import (
48
49
  DEFAULT_TIMEOUT_SECONDS,
49
50
  BaseExecutorContext,
@@ -120,6 +121,12 @@ def _stream_workflow_wrapper(
120
121
 
121
122
  logger.exception(e)
122
123
  queue.put(serialize_vembda_rejected_event(executor_context, "Internal Server Error"))
124
+
125
+ emitter_thread = next(
126
+ (t for t in threading.enumerate() if t.name.endswith(".background_thread") and t.is_alive()), None
127
+ )
128
+ if emitter_thread:
129
+ emitter_thread.join()
123
130
  queue.put(STREAM_FINISHED_EVENT)
124
131
 
125
132
  exit(0)
@@ -345,7 +352,7 @@ def _create_workflow(executor_context: WorkflowExecutorContext, namespace: str)
345
352
  )
346
353
 
347
354
  # Determine whether to enable the Vellum Emitter for event publishing
348
- use_vellum_emitter = bool((executor_context.feature_flags or {}).get("vembda-event-emitting-enabled"))
355
+ use_vellum_emitter = is_events_emitting_enabled(executor_context)
349
356
  emitters: list["BaseWorkflowEmitter"] = []
350
357
  if use_vellum_emitter:
351
358
  emitters = [VellumEmitter()]
@@ -38,3 +38,13 @@ def create_vembda_rejected_event(executor_context: Optional[BaseExecutorContext]
38
38
 
39
39
  def serialize_vembda_rejected_event(executor_context: Optional[BaseExecutorContext], error_message: str) -> str:
40
40
  return _create_vembda_rejected_event_base(executor_context, error_message).model_dump_json()
41
+
42
+
43
+ def is_events_emitting_enabled(executor_context: Optional[BaseExecutorContext]) -> bool:
44
+ if not executor_context:
45
+ return False
46
+
47
+ if not executor_context.feature_flags:
48
+ return False
49
+
50
+ return executor_context.feature_flags.get("vembda-event-emitting-enabled") or False