vellum-workflow-server 1.11.15__py3-none-any.whl → 1.11.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-workflow-server
3
- Version: 1.11.15
3
+ Version: 1.11.21
4
4
  Summary:
5
5
  License: AGPL
6
6
  Requires-Python: >=3.9.0,<4
@@ -30,13 +30,15 @@ Requires-Dist: pyjwt (==2.10.0)
30
30
  Requires-Dist: python-dotenv (==1.2.1)
31
31
  Requires-Dist: retrying (==1.3.4)
32
32
  Requires-Dist: sentry-sdk[flask] (==2.20.0)
33
- Requires-Dist: vellum-ai (==1.11.15)
33
+ Requires-Dist: vellum-ai (==1.11.21)
34
34
  Description-Content-Type: text/markdown
35
35
 
36
36
  # Vellum Workflow Runner Server
37
+
37
38
  This package is meant for installing on container images in order to use custom docker images when using Vellum Workflows.
38
39
 
39
40
  ## Example Dockerfile Usage:
41
+
40
42
  ```
41
43
  FROM python:3.11.6-slim-bookworm
42
44
 
@@ -49,7 +51,6 @@ RUN pip install --upgrade pip
49
51
  RUN pip --no-cache-dir install vellum-workflow-server==0.13.2
50
52
 
51
53
  ENV PYTHONUNBUFFERED 1
52
- ENV PYTHONDONTWRITEBYTECODE 1
53
54
  COPY ./base-image/code_exec_entrypoint.sh .
54
55
  RUN chmod +x /code_exec_entrypoint.sh
55
56
 
@@ -57,5 +58,6 @@ CMD ["vellum_start_server"]
57
58
  ```
58
59
 
59
60
  ## Skipping Publishes
61
+
60
62
  If you wish to automatically skip publishing a new version when merging to main you can add a [skip-publish] to your commit message. This is useful if your changes are not time sensitive and can just go out with the next release. This avoids causing new services being created causing extra cold starts for our customers and also keeps our public versioning more tidy.
61
63
 
@@ -8,11 +8,11 @@ workflow_server/api/tests/test_input_display_mapping.py,sha256=drBZqMudFyB5wgiUO
8
8
  workflow_server/api/tests/test_workflow_view.py,sha256=I2sd11ptKDqbylzB9rKqkMXeZoh8ttad3zIhNus86vk,32491
9
9
  workflow_server/api/tests/test_workflow_view_async_exec.py,sha256=eP_H2xI9SRfJdoJ6HPeynQecnxR50I_8aDCooF-YzIw,11952
10
10
  workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=QilhiYv3_iaF-xvbmqUqqQqqVIZC4KgBe1u8Ku1q52s,42511
11
- workflow_server/api/workflow_view.py,sha256=60RWK2VFmmeLpHCGwf3DdBBx2_GP5KIRxE6rAEwG8Jw,24774
11
+ workflow_server/api/workflow_view.py,sha256=1VSHGY0VXPnJQTz4XIX_sBiYFlTdByHH7RSAoOHBdtk,26650
12
12
  workflow_server/code_exec_runner.py,sha256=vJlCQ8FkcG8RfCZ34Ea2Xt6J7dNkU5EqA-KxRkbVOeo,2219
13
13
  workflow_server/config.py,sha256=I4hfTsjIbHxoSKylPCjKnrysPV0jO5nfRKwpKvEcfAE,2193
14
14
  workflow_server/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
- workflow_server/core/cancel_workflow.py,sha256=QcEeYUIrxq4pub-z9BlGi5fLI3gVRml-56rMCW7j5Hc,2212
15
+ workflow_server/core/cancel_workflow.py,sha256=uMPZg_rQ6iKZBLuxgwla1NYwhkcbO0gLh8QYBfU_2_0,2371
16
16
  workflow_server/core/events.py,sha256=24MA66DVQuaLJJcZrS8IL1Zq4Ohi9CoouKZ5VgoH3Cs,1402
17
17
  workflow_server/core/executor.py,sha256=7mJDuP8uedCAygNorp2Vscy6w7xHHqNZ_jxJXkJZzeY,17112
18
18
  workflow_server/core/utils.py,sha256=mecVPqQkthrC4mpop3r8J3IWnBmKbDgqfCrSagyzVEg,2021
@@ -30,8 +30,8 @@ workflow_server/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
30
30
  workflow_server/utils/tests/test_sentry_integration.py,sha256=14PfuW8AaQNNtqLmBs16EPe5T3f_iTI7YJMCRtiboZk,4502
31
31
  workflow_server/utils/tests/test_system_utils.py,sha256=_4GwXvVvU5BrATxUEWwQIPg0bzQXMWBtiBmjP8MTxJM,4314
32
32
  workflow_server/utils/tests/test_utils.py,sha256=0Nq6du8o-iBtTrip9_wgHES53JSiJbVdSXaBnPobw3s,6930
33
- workflow_server/utils/utils.py,sha256=m7iMJtor5SQLWu7jlJw-X5Q3nmbq69BCxTMv6qnFYrA,4835
34
- vellum_workflow_server-1.11.15.dist-info/METADATA,sha256=oIldsrXZvwKAboSlLYy1LRYuEir14fTww2xS7ii3I7I,2304
35
- vellum_workflow_server-1.11.15.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
36
- vellum_workflow_server-1.11.15.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
37
- vellum_workflow_server-1.11.15.dist-info/RECORD,,
33
+ workflow_server/utils/utils.py,sha256=dwUBP_0ngq6q-y2IFrjEWbGvao2roDR-VAekN6z57kE,5078
34
+ vellum_workflow_server-1.11.21.dist-info/METADATA,sha256=WEJ5yhlrLvZjT0HHImwf0MlRjS6FwAUBTlghLEdbeWY,2277
35
+ vellum_workflow_server-1.11.21.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
36
+ vellum_workflow_server-1.11.21.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
37
+ vellum_workflow_server-1.11.21.dist-info/RECORD,,
@@ -22,6 +22,13 @@ from vellum_ee.workflows.display.types import WorkflowDisplayContext
22
22
  from vellum_ee.workflows.display.workflows import BaseWorkflowDisplay
23
23
  from vellum_ee.workflows.server.virtual_file_loader import VirtualFileFinder
24
24
 
25
+ from vellum.workflows.errors import WorkflowError, WorkflowErrorCode
26
+ from vellum.workflows.events.workflow import (
27
+ WorkflowExecutionInitiatedBody,
28
+ WorkflowExecutionInitiatedEvent,
29
+ WorkflowExecutionRejectedBody,
30
+ WorkflowExecutionRejectedEvent,
31
+ )
25
32
  from vellum.workflows.exceptions import WorkflowInitializationException
26
33
  from vellum.workflows.nodes import BaseNode
27
34
  from vellum.workflows.vellum_client import create_vellum_client
@@ -174,6 +181,43 @@ def stream_workflow_route() -> Response:
174
181
  return resp
175
182
 
176
183
 
184
+ def _emit_async_error_events(
185
+ context: WorkflowExecutorContext, error_message: str, stacktrace: Optional[str] = None
186
+ ) -> None:
187
+ """
188
+ Emit workflow execution error events when async execution fails before or during workflow startup.
189
+
190
+ This ensures that errors in async mode are properly reported to Vellum's events API,
191
+ making them visible in the executions UI.
192
+ """
193
+ try:
194
+ workflow_span_id = context.workflow_span_id or str(uuid4())
195
+
196
+ initiated_event = WorkflowExecutionInitiatedEvent[Any, Any](
197
+ trace_id=context.trace_id,
198
+ span_id=workflow_span_id,
199
+ body=WorkflowExecutionInitiatedBody(inputs=context.inputs),
200
+ parent=context.execution_context.parent_context if context.execution_context else None,
201
+ )
202
+
203
+ rejected_event = WorkflowExecutionRejectedEvent(
204
+ trace_id=context.trace_id,
205
+ span_id=workflow_span_id,
206
+ body=WorkflowExecutionRejectedBody(
207
+ error=WorkflowError(
208
+ message=error_message,
209
+ code=WorkflowErrorCode.INTERNAL_ERROR,
210
+ ),
211
+ stacktrace=stacktrace,
212
+ ),
213
+ parent=context.execution_context.parent_context if context.execution_context else None,
214
+ )
215
+
216
+ context.vellum_client.events.create(request=[initiated_event, rejected_event]) # type: ignore[list-item]
217
+ except Exception as e:
218
+ logger.exception(f"Failed to emit async error events: {e}")
219
+
220
+
177
221
  @bp.route("/async-exec", methods=["POST"])
178
222
  def async_exec_workflow() -> Response:
179
223
  data = request.get_json()
@@ -208,8 +252,8 @@ def async_exec_workflow() -> Response:
208
252
  try:
209
253
  start_workflow_result = _start_workflow(context)
210
254
  if isinstance(start_workflow_result, Response):
211
- # TODO same here, should return this response as en event or it will get yeeted to the nether
212
- # return start_workflow_result
255
+ error_detail = start_workflow_result.get_json().get("detail", "Unknown error during workflow startup")
256
+ _emit_async_error_events(context, error_detail)
213
257
  return
214
258
 
215
259
  workflow_events, vembda_initiated_event, process, span_id, headers = start_workflow_result
@@ -223,6 +267,7 @@ def async_exec_workflow() -> Response:
223
267
  )
224
268
  except Exception as e:
225
269
  logger.exception("Error during workflow async background worker", e)
270
+ _emit_async_error_events(context, str(e), traceback.format_exc())
226
271
  finally:
227
272
  if ENABLE_PROCESS_WRAPPER:
228
273
  try:
@@ -14,14 +14,18 @@ logger = logging.getLogger(__name__)
14
14
 
15
15
 
16
16
  def get_is_workflow_cancelled(execution_id: UUID, vembda_public_url: Optional[str]) -> bool:
17
- response = requests.get(
18
- f"{vembda_public_url}/vembda-public/cancel-workflow-execution-status/{execution_id}",
19
- headers={"Accept": "application/json"},
20
- timeout=5,
21
- )
22
- response.raise_for_status()
17
+ try:
18
+ response = requests.get(
19
+ f"{vembda_public_url}/vembda-public/cancel-workflow-execution-status/{execution_id}",
20
+ headers={"Accept": "application/json"},
21
+ timeout=5,
22
+ )
23
+ response.raise_for_status()
23
24
 
24
- return response.json().get("cancelled")
25
+ return response.json().get("cancelled", False)
26
+ except Exception:
27
+ logger.exception("Error checking workflow cancellation status")
28
+ return False
25
29
 
26
30
 
27
31
  class CancelWorkflowWatcherThread(Thread):
@@ -59,10 +59,19 @@ def convert_json_inputs_to_vellum(inputs: List[dict]) -> dict:
59
59
 
60
60
 
61
61
  def get_version() -> dict:
62
+ # Return hotswappable lock file so we can save it and reuse it
63
+ lock_file = None
64
+ try:
65
+ with open("/app/uv.lock", "r") as f:
66
+ lock_file = f.read()
67
+ except Exception:
68
+ pass
69
+
62
70
  return {
63
71
  "sdk_version": version("vellum-ai"),
64
72
  "server_version": "local" if is_development() else version("vellum-workflow-server"),
65
73
  "container_image": CONTAINER_IMAGE,
74
+ "lock_file": lock_file,
66
75
  }
67
76
 
68
77