vellum-workflow-server 0.14.72.post1__tar.gz → 0.14.72.post3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vellum-workflow-server might be problematic. Click here for more details.

Files changed (31) hide show
  1. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/PKG-INFO +1 -1
  2. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/pyproject.toml +1 -1
  3. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/api/workflow_view.py +19 -4
  4. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/code_exec_runner.py +3 -0
  5. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/core/executor.py +6 -0
  6. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/README.md +0 -0
  7. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/__init__.py +0 -0
  8. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/api/__init__.py +0 -0
  9. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/api/auth_middleware.py +0 -0
  10. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/api/healthz_view.py +0 -0
  11. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/api/tests/__init__.py +0 -0
  12. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/api/tests/test_input_display_mapping.py +0 -0
  13. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/api/tests/test_workflow_view.py +0 -0
  14. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/api/tests/test_workflow_view_stream_workflow_route.py +0 -0
  15. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/config.py +0 -0
  16. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/core/__init__.py +0 -0
  17. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/core/cancel_workflow.py +0 -0
  18. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/core/events.py +0 -0
  19. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/core/workflow_executor_context.py +0 -0
  20. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/server.py +0 -0
  21. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/start.py +0 -0
  22. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/utils/__init__.py +0 -0
  23. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/utils/exit_handler.py +0 -0
  24. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/utils/log_proxy.py +0 -0
  25. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/utils/oom_killer.py +0 -0
  26. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/utils/sentry.py +0 -0
  27. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/utils/system_utils.py +0 -0
  28. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/utils/tests/__init__.py +0 -0
  29. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/utils/tests/test_system_utils.py +0 -0
  30. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/utils/tests/test_utils.py +0 -0
  31. {vellum_workflow_server-0.14.72.post1 → vellum_workflow_server-0.14.72.post3}/src/workflow_server/utils/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-workflow-server
3
- Version: 0.14.72.post1
3
+ Version: 0.14.72.post3
4
4
  Summary:
5
5
  License: AGPL
6
6
  Requires-Python: >=3.9.0,<4
@@ -3,7 +3,7 @@ name = "vellum-workflow-server"
3
3
 
4
4
  [tool.poetry]
5
5
  name = "vellum-workflow-server"
6
- version = "0.14.72.post1"
6
+ version = "0.14.72.post3"
7
7
  description = ""
8
8
  readme = "README.md"
9
9
  authors = []
@@ -15,6 +15,7 @@ from typing import Any, Dict, Generator, Iterator, Union, cast
15
15
 
16
16
  from flask import Blueprint, Response, current_app as app, request, stream_with_context
17
17
  from pydantic import ValidationError
18
+ import sentry_sdk
18
19
  from vellum_ee.workflows.display.nodes.get_node_display_class import get_node_display_class
19
20
  from vellum_ee.workflows.display.types import WorkflowDisplayContext
20
21
 
@@ -112,6 +113,7 @@ def stream_workflow_route() -> Response:
112
113
  )
113
114
  increment_process_count(1)
114
115
  except Exception as e:
116
+ sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
115
117
  logger.exception(e)
116
118
 
117
119
  vembda_fulfilled_event = VembdaExecutionFulfilledEvent(
@@ -215,6 +217,7 @@ def stream_workflow_route() -> Response:
215
217
 
216
218
  continue
217
219
  except Exception as e:
220
+ sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
218
221
  logger.exception(e)
219
222
  break
220
223
 
@@ -249,7 +252,7 @@ def stream_workflow_route() -> Response:
249
252
  f"process count: {get_active_process_count()}"
250
253
  )
251
254
  except GeneratorExit:
252
- app.logger.error("Client disconnected in the middle of the stream")
255
+ app.logger.error("Client disconnected in the middle of the Workflow Stream")
253
256
  return
254
257
  finally:
255
258
  try:
@@ -327,6 +330,7 @@ def stream_node_route() -> Response:
327
330
  parent=None,
328
331
  )
329
332
  yield vembda_fulfilled_event.model_dump(mode="json")
333
+ sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
330
334
  app.logger.exception(stream_future.exception())
331
335
  break
332
336
  else:
@@ -401,6 +405,7 @@ def get_version_route() -> tuple[dict, int]:
401
405
 
402
406
  resp["nodes"] = nodes
403
407
  except Exception as e:
408
+ sentry_sdk.set_tag("vellum_trace_id", "unknown")
404
409
  logger.exception(f"Failed to discover nodes: {str(e)}")
405
410
  resp["nodes"] = []
406
411
 
@@ -409,10 +414,15 @@ def get_version_route() -> tuple[dict, int]:
409
414
 
410
415
  def get_workflow_request_context(data: dict) -> WorkflowExecutorContext:
411
416
  # not sure if this is the filter we want to pass forward?
417
+ trace_id = uuid4()
418
+ if "execution_context" in data and isinstance(data["execution_context"], dict):
419
+ if "trace_id" in data["execution_context"]:
420
+ trace_id = data["execution_context"]["trace_id"]
421
+
412
422
  context_data = {
413
423
  **data,
414
424
  "inputs": convert_json_inputs_to_vellum(data.get("inputs") or []),
415
- "trace_id": uuid4(),
425
+ "trace_id": trace_id,
416
426
  "request_start_time": time.time_ns(),
417
427
  }
418
428
 
@@ -420,10 +430,15 @@ def get_workflow_request_context(data: dict) -> WorkflowExecutorContext:
420
430
 
421
431
 
422
432
  def get_node_request_context(data: dict) -> NodeExecutorContext:
433
+ trace_id = uuid4()
434
+ if "execution_context" in data and isinstance(data["execution_context"], dict):
435
+ if "trace_id" in data["execution_context"]:
436
+ trace_id = data["execution_context"]["trace_id"]
437
+
423
438
  context_data = {
424
439
  **data,
425
440
  "inputs": convert_json_inputs_to_vellum(data["inputs"]),
426
- "trace_id": uuid4(),
441
+ "trace_id": trace_id,
427
442
  "request_start_time": time.time_ns(),
428
443
  }
429
444
 
@@ -455,5 +470,5 @@ def startup_error_generator(
455
470
 
456
471
  logger.error("Workflow stream could not start from resource constraints")
457
472
  except GeneratorExit:
458
- app.logger.error("Client disconnected in the middle of the stream")
473
+ app.logger.error("Client disconnected in the middle of the Startup Error Stream")
459
474
  return
@@ -5,6 +5,8 @@ import os
5
5
  from uuid import uuid4
6
6
  from typing import Optional
7
7
 
8
+ import sentry_sdk
9
+
8
10
  from vellum.workflows.exceptions import WorkflowInitializationException
9
11
  from workflow_server.api.workflow_view import get_workflow_request_context
10
12
  from workflow_server.core.events import (
@@ -69,6 +71,7 @@ def run_code_exec_stream() -> None:
69
71
 
70
72
  print(f"{_EVENT_LINE}{json.dumps(fulfilled_event)}") # noqa: T201
71
73
  except Exception as e:
74
+ sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id) if context else "unknown")
72
75
  logger.exception(e)
73
76
 
74
77
  vembda_fulfilled_event = VembdaExecutionFulfilledEvent(
@@ -15,6 +15,7 @@ from uuid import uuid4
15
15
  from typing import Any, Callable, Generator, Iterator, Optional, Tuple, Type
16
16
 
17
17
  from pebble import concurrent
18
+ import sentry_sdk
18
19
  from vellum_ee.workflows.display.workflows import BaseWorkflowDisplay
19
20
  from vellum_ee.workflows.server.virtual_file_loader import VirtualFileFinder
20
21
 
@@ -66,6 +67,7 @@ def _stream_node_wrapper(executor_context: NodeExecutorContext, queue: Queue) ->
66
67
  for event in stream_node(executor_context=executor_context):
67
68
  queue.put(event)
68
69
  except Exception as e:
70
+ sentry_sdk.set_tag("vellum_trace_id", str(executor_context.trace_id))
69
71
  logger.exception(e)
70
72
  queue.put(
71
73
  VembdaExecutionFulfilledEvent(
@@ -104,6 +106,7 @@ def _stream_workflow_wrapper(executor_context: WorkflowExecutorContext, queue: Q
104
106
  ).model_dump(mode="json")
105
107
  )
106
108
  except Exception as e:
109
+ sentry_sdk.set_tag("vellum_trace_id", str(executor_context.trace_id))
107
110
  logger.exception(e)
108
111
  queue.put(
109
112
  VembdaExecutionFulfilledEvent(
@@ -209,6 +212,7 @@ def stream_workflow(
209
212
  event.body.display_context = display_context
210
213
 
211
214
  if event.name.endswith("rejected") and event.body.error.code.name == "INTERNAL_ERROR": # type: ignore
215
+ sentry_sdk.set_tag("vellum_trace_id", str(executor_context.trace_id))
212
216
  logger.exception(event.body.error.message) # type: ignore
213
217
  event.body.error = WorkflowError( # type: ignore
214
218
  code=WorkflowErrorCode.INTERNAL_ERROR.value, message="Internal Error" # type: ignore
@@ -458,6 +462,7 @@ def _gather_workflow(context: WorkflowExecutorContext) -> Tuple[BaseWorkflow, st
458
462
  )
459
463
  return workflow, namespace
460
464
  except Exception as e:
465
+ sentry_sdk.set_tag("vellum_trace_id", str(context.trace_id))
461
466
  logger.exception("Failed to initialize Workflow")
462
467
  raise WorkflowInitializationException(f"Failed to initialize workflow: {e}") from e
463
468
 
@@ -466,5 +471,6 @@ def _gather_display_context(workflow: BaseWorkflow, namespace: str) -> Optional[
466
471
  try:
467
472
  return BaseWorkflowDisplay.gather_event_display_context(namespace, workflow.__class__)
468
473
  except Exception:
474
+ sentry_sdk.set_tag("vellum_trace_id", "unknown")
469
475
  logger.exception("Unable to Parse Workflow Display Context")
470
476
  return None