vellum-workflow-server 1.3.4__py3-none-any.whl → 1.3.4.post2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vellum-workflow-server might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-workflow-server
3
- Version: 1.3.4
3
+ Version: 1.3.4.post2
4
4
  Summary:
5
5
  License: AGPL
6
6
  Requires-Python: >=3.9.0,<4
@@ -5,14 +5,14 @@ workflow_server/api/healthz_view.py,sha256=itiRvBDBXncrw8Kbbc73UZLwqMAhgHOR3uSre
5
5
  workflow_server/api/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  workflow_server/api/tests/test_input_display_mapping.py,sha256=drBZqMudFyB5wgiUOcMgRXz7E7ge-Qgxbstw4E4f0zE,2211
7
7
  workflow_server/api/tests/test_workflow_view.py,sha256=gwb53E44LBm06XHY1UwfHqKG-UfwwICh_IYPtowN_kE,19085
8
- workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=1vr9NI894zdJmmnsy_wBSqaJG83RwnYSmqucknYf9bI,26304
9
- workflow_server/api/workflow_view.py,sha256=osYa14kdP3LI3HZzh2Nw16-GhI0bHXwCFgasUkfRjeI,21104
8
+ workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=Qo8u6mPyRCmE2jamY1yIh8l44hgo4-Nwlq03z61ND5g,27031
9
+ workflow_server/api/workflow_view.py,sha256=gSKqlMR2r2urSBYm_jFlcpXHOk5jQsDMSDwpSahp5A8,21144
10
10
  workflow_server/code_exec_runner.py,sha256=lBnMIorPZL8zZBye6TjeCIs06WTJM7P2HR07B1fjJJI,2533
11
11
  workflow_server/config.py,sha256=DyTty8NrAwvtx-esM3KthnpsNh-nKdWNlovWQOgiGpg,1417
12
12
  workflow_server/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  workflow_server/core/cancel_workflow.py,sha256=Ffkc3mzmrdMEUcD-sHfEhX4IwVrka-E--SxKA1dUfIU,2185
14
14
  workflow_server/core/events.py,sha256=24MA66DVQuaLJJcZrS8IL1Zq4Ohi9CoouKZ5VgoH3Cs,1402
15
- workflow_server/core/executor.py,sha256=M_LyVOcPDzAA3u1WBLMpz9gftjc_RtO3hYida6tkSJo,18383
15
+ workflow_server/core/executor.py,sha256=TTpMifrIZuzXxOUtX3E3CgLOb8eRXul2aGPpRCfWIco,18368
16
16
  workflow_server/core/utils.py,sha256=cmwHbKCfXqtUutBD3akGus0Ga7a1xG3zlOw-jEMx6mI,1795
17
17
  workflow_server/core/workflow_executor_context.py,sha256=w3OhV_AXpgh7AxpjEsc0vo-IJypgJcgr5DXJCqGptOU,1587
18
18
  workflow_server/server.py,sha256=QBU12AaAfAgLqfCDBd24qIJl_mbheiq0-hfcWV7rZM4,1234
@@ -28,7 +28,7 @@ workflow_server/utils/tests/test_sentry_integration.py,sha256=LGmWiaLhFrx-jslrRj
28
28
  workflow_server/utils/tests/test_system_utils.py,sha256=_4GwXvVvU5BrATxUEWwQIPg0bzQXMWBtiBmjP8MTxJM,4314
29
29
  workflow_server/utils/tests/test_utils.py,sha256=qwK5Rmy3RQyjtlUrYAuGuDlBeRzZKsf1yS-y2IpUizQ,6452
30
30
  workflow_server/utils/utils.py,sha256=Wqqn-1l2ugkGgy5paWWdt0AVxAyPMQCYcnRSSOMjXlA,4355
31
- vellum_workflow_server-1.3.4.dist-info/METADATA,sha256=5S0p5CKKxsab90EaMaRWtY7Wwuh3n_L58SxvMgvokc0,2267
32
- vellum_workflow_server-1.3.4.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
33
- vellum_workflow_server-1.3.4.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
34
- vellum_workflow_server-1.3.4.dist-info/RECORD,,
31
+ vellum_workflow_server-1.3.4.post2.dist-info/METADATA,sha256=t3_ELDwFzZUWqnYxfvYTp8uypnAsPAPeOnEpFoeBRvo,2273
32
+ vellum_workflow_server-1.3.4.post2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
33
+ vellum_workflow_server-1.3.4.post2.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
34
+ vellum_workflow_server-1.3.4.post2.dist-info/RECORD,,
@@ -4,6 +4,7 @@ from importlib.metadata import version
4
4
  import io
5
5
  import json
6
6
  from queue import Empty
7
+ import re
7
8
  from unittest import mock
8
9
  from uuid import uuid4
9
10
 
@@ -408,7 +409,16 @@ from vellum.workflows.inputs import BaseInputs
408
409
  assert len(events) == 2
409
410
 
410
411
 
411
- def test_stream_workflow_route__cancel(both_stream_types):
412
+ @pytest.mark.parametrize(
413
+ ["execute_workflow_stream", "assert_last_request"],
414
+ [
415
+ (flask_stream, False), # Unfortunately, can't make assertions on requests made in a subprocess.
416
+ (code_exec_stream, True),
417
+ (flask_stream_disable_process_wrapper, True),
418
+ ],
419
+ ids=["flask_stream", "code_exec_stream", "flask_stream_disable_process_wrapper"],
420
+ )
421
+ def test_stream_workflow_route__cancel(execute_workflow_stream, assert_last_request):
412
422
  # GIVEN a valid request body
413
423
  span_id = uuid4()
414
424
  request_body = {
@@ -447,10 +457,10 @@ class BasicCancellableWorkflow(BaseWorkflow):
447
457
 
448
458
  # WHEN we call the stream route with a mock cancelled return true
449
459
  with requests_mock.Mocker() as mocker:
450
- mocker.get(
451
- f"http://test.biz/vembda-public/cancel-workflow-execution-status/{span_id}", json={"cancelled": True}
460
+ response_mock = mocker.get(
461
+ re.compile("http://test.biz/vembda-public/cancel-workflow-execution-status"), json={"cancelled": True}
452
462
  )
453
- status_code, events = both_stream_types(request_body)
463
+ status_code, events = execute_workflow_stream(request_body)
454
464
 
455
465
  # THEN we get a 200 response
456
466
  assert status_code == 200, events
@@ -473,14 +483,20 @@ class BasicCancellableWorkflow(BaseWorkflow):
473
483
 
474
484
  assert events[1]["name"] == "workflow.execution.initiated", events[1]
475
485
  assert "display_context" in events[1]["body"], events[1]["body"]
476
- cancelled_event = None
477
- for event in events:
478
- if event["name"] == "workflow.execution.rejected":
479
- cancelled_event = event
480
- break
481
486
 
487
+ cancelled_event = events[-2]
488
+ assert cancelled_event["name"] == "workflow.execution.rejected"
482
489
  assert cancelled_event["body"]["error"]["message"] == "Workflow run cancelled"
483
490
 
491
+ # AND we called the cancel endpoint with the correct execution id
492
+ workflow_span_id = events[1]["span_id"]
493
+ if assert_last_request:
494
+ assert response_mock.last_request
495
+ assert (
496
+ response_mock.last_request.url
497
+ == f"http://test.biz/vembda-public/cancel-workflow-execution-status/{workflow_span_id}"
498
+ )
499
+
484
500
 
485
501
  def test_stream_workflow_route__very_large_events(both_stream_types):
486
502
  # GIVEN a valid request body
@@ -81,7 +81,7 @@ def stream_workflow_route() -> Response:
81
81
  )
82
82
 
83
83
  logger.info(
84
- f"Starting workflow stream, execution ID: {context.execution_id}, "
84
+ f"Starting Workflow Server Request, trace ID: {context.trace_id}, "
85
85
  f"process count: {get_active_process_count()}, process wrapper: {ENABLE_PROCESS_WRAPPER}"
86
86
  )
87
87
 
@@ -194,6 +194,8 @@ def stream_workflow_route() -> Response:
194
194
  headers=headers,
195
195
  )
196
196
 
197
+ logger.info(f"Starting Workflow Stream, execution ID: {span_id}, ")
198
+
197
199
  def process_events(queue: Queue) -> Iterator[Union[str, dict]]:
198
200
  event: Union[str, dict]
199
201
  loops = 0
@@ -309,8 +311,7 @@ def stream_workflow_route() -> Response:
309
311
  yield "\n"
310
312
 
311
313
  logger.info(
312
- f"Workflow stream completed, execution ID: {context.execution_id}, "
313
- f"process count: {get_active_process_count()}"
314
+ f"Workflow stream completed, execution ID: {span_id}, process count: {get_active_process_count()}"
314
315
  )
315
316
  except GeneratorExit:
316
317
  # These can happen either from Vembda disconnects (possibily from predict disconnects) or
@@ -192,19 +192,6 @@ def stream_workflow(
192
192
 
193
193
  cancel_watcher_kill_switch = ThreadingEvent()
194
194
  cancel_signal = cancel_signal or ThreadingEvent()
195
- cancel_watcher = CancelWorkflowWatcherThread(
196
- kill_switch=cancel_watcher_kill_switch,
197
- execution_id=executor_context.execution_id,
198
- timeout_seconds=executor_context.timeout,
199
- vembda_public_url=executor_context.vembda_public_url,
200
- cancel_signal=cancel_signal,
201
- )
202
-
203
- try:
204
- if executor_context.vembda_public_url:
205
- cancel_watcher.start()
206
- except Exception:
207
- logger.exception("Failed to start cancel watcher")
208
195
 
209
196
  try:
210
197
  stream = workflow.stream(
@@ -216,12 +203,25 @@ def stream_workflow(
216
203
  entrypoint_nodes=[run_from_node] if run_from_node else None,
217
204
  previous_execution_id=executor_context.previous_execution_id,
218
205
  )
219
-
220
206
  except Exception:
221
207
  cancel_watcher_kill_switch.set()
222
208
  logger.exception("Failed to generate Workflow Stream")
223
209
  raise
224
210
 
211
+ cancel_watcher = CancelWorkflowWatcherThread(
212
+ kill_switch=cancel_watcher_kill_switch,
213
+ execution_id=stream.span_id,
214
+ timeout_seconds=executor_context.timeout,
215
+ vembda_public_url=executor_context.vembda_public_url,
216
+ cancel_signal=cancel_signal,
217
+ )
218
+
219
+ try:
220
+ if executor_context.vembda_public_url:
221
+ cancel_watcher.start()
222
+ except Exception:
223
+ logger.exception("Failed to start cancel watcher")
224
+
225
225
  def call_workflow() -> Generator[dict[str, Any], Any, None]:
226
226
  try:
227
227
  first = True