vellum-workflow-server 0.14.72.post4__py3-none-any.whl → 0.14.72.post6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of vellum-workflow-server might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-workflow-server
3
- Version: 0.14.72.post4
3
+ Version: 0.14.72.post6
4
4
  Summary:
5
5
  License: AGPL
6
6
  Requires-Python: >=3.9.0,<4
@@ -6,14 +6,14 @@ workflow_server/api/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
6
6
  workflow_server/api/tests/test_input_display_mapping.py,sha256=drBZqMudFyB5wgiUOcMgRXz7E7ge-Qgxbstw4E4f0zE,2211
7
7
  workflow_server/api/tests/test_workflow_view.py,sha256=2nscM_QsYPHkkTG8_JhNbE2LmGL5FQKXEtaLLjXouw0,14591
8
8
  workflow_server/api/tests/test_workflow_view_stream_workflow_route.py,sha256=wmeH8oGo0lKx3YzEJQ9nGjw7adqQD0EL7-xGLPgYWqc,24872
9
- workflow_server/api/workflow_view.py,sha256=9ZU2cziP-Fe-wFezanesjau3eTDaHNu2sO9iMuKR4Xw,18247
9
+ workflow_server/api/workflow_view.py,sha256=x9SSsWCzLuwXBhLJMc-vDMtmP626O6tw1AGh6KII_lc,17697
10
10
  workflow_server/code_exec_runner.py,sha256=p6lGaEGAfMFCxIpOJtH9KTvkffCDsJ3bbpmHdCGWquM,3265
11
11
  workflow_server/config.py,sha256=K5Tavm7wiqCZt0RWWue7zzb8N6e8aWnFOTNlBqEJPcI,1330
12
12
  workflow_server/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  workflow_server/core/cancel_workflow.py,sha256=Ffkc3mzmrdMEUcD-sHfEhX4IwVrka-E--SxKA1dUfIU,2185
14
14
  workflow_server/core/events.py,sha256=24MA66DVQuaLJJcZrS8IL1Zq4Ohi9CoouKZ5VgoH3Cs,1402
15
15
  workflow_server/core/executor.py,sha256=-FR91th7vp_v-R_Mo3NVS5GcsmD7iPo3D18Xi-bUrHo,18017
16
- workflow_server/core/workflow_executor_context.py,sha256=JHGlrEaf7IkCJjrAPMo64M7X0aiwWXe6exwXwxhwow4,1298
16
+ workflow_server/core/workflow_executor_context.py,sha256=a-v48GJbOWUh4JIf_bNwDX-BvfKkg4xwRSPEyRVQmp4,1373
17
17
  workflow_server/server.py,sha256=QBU12AaAfAgLqfCDBd24qIJl_mbheiq0-hfcWV7rZM4,1234
18
18
  workflow_server/start.py,sha256=DgtQhuCLc07BIWyJPLPZKZsQ8jwEFsvvfIo7MdwVrpw,1998
19
19
  workflow_server/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -26,7 +26,7 @@ workflow_server/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMp
26
26
  workflow_server/utils/tests/test_system_utils.py,sha256=MdBxI9gxUOpR_JBAHpEz6dGFY6JjxhMSM2oExpqFvNA,4314
27
27
  workflow_server/utils/tests/test_utils.py,sha256=qwK5Rmy3RQyjtlUrYAuGuDlBeRzZKsf1yS-y2IpUizQ,6452
28
28
  workflow_server/utils/utils.py,sha256=Wqqn-1l2ugkGgy5paWWdt0AVxAyPMQCYcnRSSOMjXlA,4355
29
- vellum_workflow_server-0.14.72.post4.dist-info/METADATA,sha256=9Isp-DKUVQduS6Sadl5OqHFPoiy4Ix9tJG8JqWQGfAY,2243
30
- vellum_workflow_server-0.14.72.post4.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
31
- vellum_workflow_server-0.14.72.post4.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
32
- vellum_workflow_server-0.14.72.post4.dist-info/RECORD,,
29
+ vellum_workflow_server-0.14.72.post6.dist-info/METADATA,sha256=pmr1HyQdJhIDgb7E0Qwu5YtfYqjXq9U1RAhfNqE_JyI,2243
30
+ vellum_workflow_server-0.14.72.post6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
31
+ vellum_workflow_server-0.14.72.post6.dist-info/entry_points.txt,sha256=uB_0yPkr7YV6RhEXzvFReUM8P4OQBlVXD6TN6eb9-oc,277
32
+ vellum_workflow_server-0.14.72.post6.dist-info/RECORD,,
@@ -398,7 +398,7 @@ def get_version_route() -> tuple[dict, int]:
398
398
  node_display_class = get_node_display_class(obj)
399
399
  exec_config_raw = node_display_class().serialize(WorkflowDisplayContext())
400
400
  exec_config = cast(Dict[str, Any], exec_config_raw)
401
- module = exec_config["definition"]["module"]
401
+ config_module = exec_config["definition"]["module"]
402
402
  label = exec_config["label"]
403
403
  description = (
404
404
  exec_config["display_data"]["comment"]["value"]
@@ -408,7 +408,7 @@ def get_version_route() -> tuple[dict, int]:
408
408
  nodes.append(
409
409
  {
410
410
  "id": str(uuid4()),
411
- "module": module,
411
+ "module": config_module,
412
412
  "name": obj.__name__,
413
413
  "label": pascal_to_title_case(label),
414
414
  "description": description,
@@ -428,16 +428,9 @@ def get_version_route() -> tuple[dict, int]:
428
428
 
429
429
 
430
430
  def get_workflow_request_context(data: dict) -> WorkflowExecutorContext:
431
- # not sure if this is the filter we want to pass forward?
432
- trace_id = uuid4()
433
- if "execution_context" in data and isinstance(data["execution_context"], dict):
434
- if "trace_id" in data["execution_context"]:
435
- trace_id = data["execution_context"]["trace_id"]
436
-
437
431
  context_data = {
438
432
  **data,
439
433
  "inputs": convert_json_inputs_to_vellum(data.get("inputs") or []),
440
- "trace_id": trace_id,
441
434
  "request_start_time": time.time_ns(),
442
435
  }
443
436
 
@@ -445,15 +438,9 @@ def get_workflow_request_context(data: dict) -> WorkflowExecutorContext:
445
438
 
446
439
 
447
440
  def get_node_request_context(data: dict) -> NodeExecutorContext:
448
- trace_id = uuid4()
449
- if "execution_context" in data and isinstance(data["execution_context"], dict):
450
- if "trace_id" in data["execution_context"]:
451
- trace_id = data["execution_context"]["trace_id"]
452
-
453
441
  context_data = {
454
442
  **data,
455
443
  "inputs": convert_json_inputs_to_vellum(data["inputs"]),
456
- "trace_id": trace_id,
457
444
  "request_start_time": time.time_ns(),
458
445
  }
459
446
 
@@ -15,7 +15,6 @@ class BaseExecutorContext(UniversalBaseModel):
15
15
  files: dict[str, str]
16
16
  environment_api_key: str
17
17
  execution_id: UUID
18
- trace_id: UUID
19
18
  module: str
20
19
  execution_context: ExecutionContext = field(default_factory=ExecutionContext)
21
20
  request_start_time: int
@@ -28,6 +27,10 @@ class BaseExecutorContext(UniversalBaseModel):
28
27
  def container_overhead_latency(self) -> int:
29
28
  return self.stream_start_time - self.request_start_time if self.stream_start_time else -1
30
29
 
30
+ @property
31
+ def trace_id(self) -> UUID:
32
+ return self.execution_context.trace_id
33
+
31
34
  def __hash__(self) -> int:
32
35
  # do we think we need anything else for a unique hash for caching?
33
36
  return hash(str(self.execution_id))