vellum-workflow-server 1.11.0.post1__py3-none-any.whl → 1.11.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-workflow-server
3
- Version: 1.11.0.post1
3
+ Version: 1.11.21
4
4
  Summary:
5
5
  License: AGPL
6
6
  Requires-Python: >=3.9.0,<4
@@ -30,13 +30,15 @@ Requires-Dist: pyjwt (==2.10.0)
30
30
  Requires-Dist: python-dotenv (==1.2.1)
31
31
  Requires-Dist: retrying (==1.3.4)
32
32
  Requires-Dist: sentry-sdk[flask] (==2.20.0)
33
- Requires-Dist: vellum-ai (==1.11.0)
33
+ Requires-Dist: vellum-ai (==1.11.21)
34
34
  Description-Content-Type: text/markdown
35
35
 
36
36
  # Vellum Workflow Runner Server
37
+
37
38
  This package is meant for installing on container images in order to use custom docker images when using Vellum Workflows.
38
39
 
39
40
  ## Example Dockerfile Usage:
41
+
40
42
  ```
41
43
  FROM python:3.11.6-slim-bookworm
42
44
 
@@ -49,7 +51,6 @@ RUN pip install --upgrade pip
49
51
  RUN pip --no-cache-dir install vellum-workflow-server==0.13.2
50
52
 
51
53
  ENV PYTHONUNBUFFERED 1
52
- ENV PYTHONDONTWRITEBYTECODE 1
53
54
  COPY ./base-image/code_exec_entrypoint.sh .
54
55
  RUN chmod +x /code_exec_entrypoint.sh
55
56
 
@@ -57,5 +58,6 @@ CMD ["vellum_start_server"]
57
58
  ```
58
59
 
59
60
  ## Skipping Publishes
61
+
60
62
  If you wish to automatically skip publishing a new version when merging to main you can add a [skip-publish] to your commit message. This is useful if your changes are not time sensitive and can just go out with the next release. This avoids causing new services being created causing extra cold starts for our customers and also keeps our public versioning more tidy.
61
63
 
@@ -5,16 +5,16 @@ workflow_server/api/healthz_view.py,sha256=itiRvBDBXncrw8Kbbc73UZLwqMAhgHOR3uSre
5
5
  workflow_server/api/status_view.py,sha256=Jah8dBAVL4uOcRfsjKAOyfVONFyk9HQjXeRfjcIqhmA,514
6
6
  workflow_server/api/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  workflow_server/api/tests/test_input_display_mapping.py,sha256=drBZqMudFyB5wgiUOcMgRXz7E7ge-Qgxbstw4E4f0zE,2211
8
- workflow_server/api/tests/test_workflow_view.py,sha256=B6B8mCirt3FvpPKRP_AyzPJ199k_gwLzAcQuWRkzEfA,32343
8
+ workflow_server/api/tests/test_workflow_view.py,sha256=I2sd11ptKDqbylzB9rKqkMXeZoh8ttad3zIhNus86vk,32491
9
9
  workflow_server/api/tests/test_workflow_view_async_exec.py,sha256=eP_H2xI9SRfJdoJ6HPeynQecnxR50I_8aDCooF-YzIw,11952
10
- workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=16ZxP_nuAIP1Cg4-4z6EQttn07SRY1GkVq2m53z6XaE,42389
11
- workflow_server/api/workflow_view.py,sha256=gYT1lWSYadiG5OLQpg09mJT4xz_foSmsIMvtO7GNbAo,24669
10
+ workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=QilhiYv3_iaF-xvbmqUqqQqqVIZC4KgBe1u8Ku1q52s,42511
11
+ workflow_server/api/workflow_view.py,sha256=1VSHGY0VXPnJQTz4XIX_sBiYFlTdByHH7RSAoOHBdtk,26650
12
12
  workflow_server/code_exec_runner.py,sha256=vJlCQ8FkcG8RfCZ34Ea2Xt6J7dNkU5EqA-KxRkbVOeo,2219
13
13
  workflow_server/config.py,sha256=I4hfTsjIbHxoSKylPCjKnrysPV0jO5nfRKwpKvEcfAE,2193
14
14
  workflow_server/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
- workflow_server/core/cancel_workflow.py,sha256=QcEeYUIrxq4pub-z9BlGi5fLI3gVRml-56rMCW7j5Hc,2212
15
+ workflow_server/core/cancel_workflow.py,sha256=uMPZg_rQ6iKZBLuxgwla1NYwhkcbO0gLh8QYBfU_2_0,2371
16
16
  workflow_server/core/events.py,sha256=24MA66DVQuaLJJcZrS8IL1Zq4Ohi9CoouKZ5VgoH3Cs,1402
17
- workflow_server/core/executor.py,sha256=Drw6TcegjKFIoowDzeQzCf2QwblSa-stm70YBfFItRU,16968
17
+ workflow_server/core/executor.py,sha256=7mJDuP8uedCAygNorp2Vscy6w7xHHqNZ_jxJXkJZzeY,17112
18
18
  workflow_server/core/utils.py,sha256=mecVPqQkthrC4mpop3r8J3IWnBmKbDgqfCrSagyzVEg,2021
19
19
  workflow_server/core/workflow_executor_context.py,sha256=8faOdpU4cBeIbmOvg9VzD3eS5i_PKcH7tyNGzx_rehg,3899
20
20
  workflow_server/logging_config.py,sha256=Hvx1t8uhqMMinl-5qcef7ufUvzs6x14VRnCb7YZxEAg,1206
@@ -30,8 +30,8 @@ workflow_server/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
30
30
  workflow_server/utils/tests/test_sentry_integration.py,sha256=14PfuW8AaQNNtqLmBs16EPe5T3f_iTI7YJMCRtiboZk,4502
31
31
  workflow_server/utils/tests/test_system_utils.py,sha256=_4GwXvVvU5BrATxUEWwQIPg0bzQXMWBtiBmjP8MTxJM,4314
32
32
  workflow_server/utils/tests/test_utils.py,sha256=0Nq6du8o-iBtTrip9_wgHES53JSiJbVdSXaBnPobw3s,6930
33
- workflow_server/utils/utils.py,sha256=m7iMJtor5SQLWu7jlJw-X5Q3nmbq69BCxTMv6qnFYrA,4835
34
- vellum_workflow_server-1.11.0.post1.dist-info/METADATA,sha256=sU_57FzsmfgTgFLvjdhj_4qSa5hFOlBPrnTtRndDQnc,2308
35
- vellum_workflow_server-1.11.0.post1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
36
- vellum_workflow_server-1.11.0.post1.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
37
- vellum_workflow_server-1.11.0.post1.dist-info/RECORD,,
33
+ workflow_server/utils/utils.py,sha256=dwUBP_0ngq6q-y2IFrjEWbGvao2roDR-VAekN6z57kE,5078
34
+ vellum_workflow_server-1.11.21.dist-info/METADATA,sha256=WEJ5yhlrLvZjT0HHImwf0MlRjS6FwAUBTlghLEdbeWY,2277
35
+ vellum_workflow_server-1.11.21.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
36
+ vellum_workflow_server-1.11.21.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
37
+ vellum_workflow_server-1.11.21.dist-info/RECORD,,
@@ -389,7 +389,15 @@ class MyAdditionNode(BaseNode):
389
389
  },
390
390
  "id": "2464b610-fb6d-495b-b17c-933ee147f19f",
391
391
  "label": "My Addition Node",
392
- "outputs": [{"id": "f39d85c9-e7bf-45e1-bb67-f16225db0118", "name": "result", "type": "NUMBER", "value": None}],
392
+ "outputs": [
393
+ {
394
+ "id": "f39d85c9-e7bf-45e1-bb67-f16225db0118",
395
+ "name": "result",
396
+ "type": "NUMBER",
397
+ "value": None,
398
+ "schema": {"type": "integer"},
399
+ }
400
+ ],
393
401
  "ports": [{"id": "bc489295-cd8a-4aa2-88bb-34446374100d", "name": "default", "type": "DEFAULT"}],
394
402
  "trigger": {"id": "ff580cad-73d6-44fe-8f2c-4b8dc990ee70", "merge_behavior": "AWAIT_ATTRIBUTES"},
395
403
  "type": "GENERIC",
@@ -549,7 +549,10 @@ class Inputs(BaseInputs):
549
549
  # AND the third event should be workflow execution rejected
550
550
  assert events[2]["name"] == "workflow.execution.rejected"
551
551
  assert events[1]["span_id"] == events[2]["span_id"]
552
- assert "Required input variables foo should have defined value" in events[2]["body"]["error"]["message"]
552
+ actual_error_message = events[2]["body"]["error"]["message"]
553
+ assert "Required input variables" in actual_error_message
554
+ assert "foo" in actual_error_message
555
+ assert "should have defined value" in actual_error_message
553
556
 
554
557
  # AND the fourth event should be vembda execution fulfilled
555
558
  assert events[3]["name"] == "vembda.execution.fulfilled"
@@ -22,6 +22,13 @@ from vellum_ee.workflows.display.types import WorkflowDisplayContext
22
22
  from vellum_ee.workflows.display.workflows import BaseWorkflowDisplay
23
23
  from vellum_ee.workflows.server.virtual_file_loader import VirtualFileFinder
24
24
 
25
+ from vellum.workflows.errors import WorkflowError, WorkflowErrorCode
26
+ from vellum.workflows.events.workflow import (
27
+ WorkflowExecutionInitiatedBody,
28
+ WorkflowExecutionInitiatedEvent,
29
+ WorkflowExecutionRejectedBody,
30
+ WorkflowExecutionRejectedEvent,
31
+ )
25
32
  from vellum.workflows.exceptions import WorkflowInitializationException
26
33
  from vellum.workflows.nodes import BaseNode
27
34
  from vellum.workflows.vellum_client import create_vellum_client
@@ -174,6 +181,43 @@ def stream_workflow_route() -> Response:
174
181
  return resp
175
182
 
176
183
 
184
+ def _emit_async_error_events(
185
+ context: WorkflowExecutorContext, error_message: str, stacktrace: Optional[str] = None
186
+ ) -> None:
187
+ """
188
+ Emit workflow execution error events when async execution fails before or during workflow startup.
189
+
190
+ This ensures that errors in async mode are properly reported to Vellum's events API,
191
+ making them visible in the executions UI.
192
+ """
193
+ try:
194
+ workflow_span_id = context.workflow_span_id or str(uuid4())
195
+
196
+ initiated_event = WorkflowExecutionInitiatedEvent[Any, Any](
197
+ trace_id=context.trace_id,
198
+ span_id=workflow_span_id,
199
+ body=WorkflowExecutionInitiatedBody(inputs=context.inputs),
200
+ parent=context.execution_context.parent_context if context.execution_context else None,
201
+ )
202
+
203
+ rejected_event = WorkflowExecutionRejectedEvent(
204
+ trace_id=context.trace_id,
205
+ span_id=workflow_span_id,
206
+ body=WorkflowExecutionRejectedBody(
207
+ error=WorkflowError(
208
+ message=error_message,
209
+ code=WorkflowErrorCode.INTERNAL_ERROR,
210
+ ),
211
+ stacktrace=stacktrace,
212
+ ),
213
+ parent=context.execution_context.parent_context if context.execution_context else None,
214
+ )
215
+
216
+ context.vellum_client.events.create(request=[initiated_event, rejected_event]) # type: ignore[list-item]
217
+ except Exception as e:
218
+ logger.exception(f"Failed to emit async error events: {e}")
219
+
220
+
177
221
  @bp.route("/async-exec", methods=["POST"])
178
222
  def async_exec_workflow() -> Response:
179
223
  data = request.get_json()
@@ -208,8 +252,8 @@ def async_exec_workflow() -> Response:
208
252
  try:
209
253
  start_workflow_result = _start_workflow(context)
210
254
  if isinstance(start_workflow_result, Response):
211
- # TODO same here, should return this response as en event or it will get yeeted to the nether
212
- # return start_workflow_result
255
+ error_detail = start_workflow_result.get_json().get("detail", "Unknown error during workflow startup")
256
+ _emit_async_error_events(context, error_detail)
213
257
  return
214
258
 
215
259
  workflow_events, vembda_initiated_event, process, span_id, headers = start_workflow_result
@@ -223,6 +267,7 @@ def async_exec_workflow() -> Response:
223
267
  )
224
268
  except Exception as e:
225
269
  logger.exception("Error during workflow async background worker", e)
270
+ _emit_async_error_events(context, str(e), traceback.format_exc())
226
271
  finally:
227
272
  if ENABLE_PROCESS_WRAPPER:
228
273
  try:
@@ -531,11 +576,18 @@ def serialize_route() -> Response:
531
576
  is_new_server = data.get("is_new_server", False)
532
577
  module = data.get("module")
533
578
 
579
+ headers = {
580
+ "X-Vellum-Is-New-Server": str(is_new_server).lower(),
581
+ }
582
+
534
583
  if not files:
584
+ error_message = "No files received"
585
+ logger.warning(error_message)
535
586
  return Response(
536
- json.dumps({"detail": "No files received"}),
587
+ json.dumps({"detail": error_message}),
537
588
  status=400,
538
589
  content_type="application/json",
590
+ headers=headers,
539
591
  )
540
592
 
541
593
  client = create_vellum_client(api_key=workspace_api_key)
@@ -544,10 +596,6 @@ def serialize_route() -> Response:
544
596
  namespace = get_random_namespace()
545
597
  virtual_finder = VirtualFileFinder(files, namespace, source_module=module)
546
598
 
547
- headers = {
548
- "X-Vellum-Is-New-Server": str(is_new_server).lower(),
549
- }
550
-
551
599
  try:
552
600
  sys.meta_path.append(virtual_finder)
553
601
  result = BaseWorkflowDisplay.serialize_module(namespace, client=client, dry_run=True)
@@ -14,14 +14,18 @@ logger = logging.getLogger(__name__)
14
14
 
15
15
 
16
16
  def get_is_workflow_cancelled(execution_id: UUID, vembda_public_url: Optional[str]) -> bool:
17
- response = requests.get(
18
- f"{vembda_public_url}/vembda-public/cancel-workflow-execution-status/{execution_id}",
19
- headers={"Accept": "application/json"},
20
- timeout=5,
21
- )
22
- response.raise_for_status()
17
+ try:
18
+ response = requests.get(
19
+ f"{vembda_public_url}/vembda-public/cancel-workflow-execution-status/{execution_id}",
20
+ headers={"Accept": "application/json"},
21
+ timeout=5,
22
+ )
23
+ response.raise_for_status()
23
24
 
24
- return response.json().get("cancelled")
25
+ return response.json().get("cancelled", False)
26
+ except Exception:
27
+ logger.exception("Error checking workflow cancellation status")
28
+ return False
25
29
 
26
30
 
27
31
  class CancelWorkflowWatcherThread(Thread):
@@ -14,6 +14,7 @@ from typing import Any, Callable, Generator, Iterator, Optional, Tuple
14
14
 
15
15
  import orjson
16
16
  from vellum_ee.workflows.display.utils.events import event_enricher
17
+ from vellum_ee.workflows.display.utils.expressions import base_descriptor_validator
17
18
  from vellum_ee.workflows.server.virtual_file_loader import VirtualFileFinder
18
19
 
19
20
  from vellum.workflows import BaseWorkflow
@@ -177,6 +178,7 @@ def stream_workflow(
177
178
  node_output_mocks = MockNodeExecution.validate_all(
178
179
  executor_context.node_output_mocks,
179
180
  workflow.__class__,
181
+ descriptor_validator=base_descriptor_validator,
180
182
  )
181
183
 
182
184
  cancel_signal = cancel_signal or ThreadingEvent()
@@ -59,10 +59,19 @@ def convert_json_inputs_to_vellum(inputs: List[dict]) -> dict:
59
59
 
60
60
 
61
61
  def get_version() -> dict:
62
+ # Return hotswappable lock file so we can save it and reuse it
63
+ lock_file = None
64
+ try:
65
+ with open("/app/uv.lock", "r") as f:
66
+ lock_file = f.read()
67
+ except Exception:
68
+ pass
69
+
62
70
  return {
63
71
  "sdk_version": version("vellum-ai"),
64
72
  "server_version": "local" if is_development() else version("vellum-workflow-server"),
65
73
  "container_image": CONTAINER_IMAGE,
74
+ "lock_file": lock_file,
66
75
  }
67
76
 
68
77