vellum-ai 1.2.5__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. vellum/__init__.py +8 -0
  2. vellum/client/README.md +1 -1
  3. vellum/client/core/client_wrapper.py +2 -2
  4. vellum/client/reference.md +0 -9
  5. vellum/client/resources/workflow_sandboxes/client.py +0 -12
  6. vellum/client/resources/workflow_sandboxes/raw_client.py +2 -10
  7. vellum/client/types/__init__.py +8 -0
  8. vellum/client/types/deployment_read.py +5 -5
  9. vellum/client/types/slim_deployment_read.py +5 -5
  10. vellum/client/types/slim_workflow_deployment.py +5 -5
  11. vellum/client/types/workflow_deployment_read.py +5 -5
  12. vellum/client/types/workflow_request_audio_input_request.py +30 -0
  13. vellum/client/types/workflow_request_document_input_request.py +30 -0
  14. vellum/client/types/workflow_request_image_input_request.py +30 -0
  15. vellum/client/types/workflow_request_input_request.py +8 -0
  16. vellum/client/types/workflow_request_video_input_request.py +30 -0
  17. vellum/types/workflow_request_audio_input_request.py +3 -0
  18. vellum/types/workflow_request_document_input_request.py +3 -0
  19. vellum/types/workflow_request_image_input_request.py +3 -0
  20. vellum/types/workflow_request_video_input_request.py +3 -0
  21. vellum/workflows/events/types.py +6 -1
  22. vellum/workflows/integrations/tests/test_mcp_service.py +106 -1
  23. vellum/workflows/nodes/__init__.py +2 -0
  24. vellum/workflows/nodes/displayable/__init__.py +2 -0
  25. vellum/workflows/nodes/displayable/web_search_node/__init__.py +3 -0
  26. vellum/workflows/nodes/displayable/web_search_node/node.py +133 -0
  27. vellum/workflows/resolvers/base.py +3 -2
  28. vellum/workflows/resolvers/resolver.py +62 -7
  29. vellum/workflows/resolvers/tests/test_resolver.py +79 -7
  30. vellum/workflows/resolvers/types.py +11 -0
  31. vellum/workflows/runner/runner.py +49 -1
  32. vellum/workflows/state/context.py +41 -7
  33. vellum/workflows/utils/zip.py +46 -0
  34. vellum/workflows/workflows/base.py +10 -0
  35. {vellum_ai-1.2.5.dist-info → vellum_ai-1.3.0.dist-info}/METADATA +1 -1
  36. {vellum_ai-1.2.5.dist-info → vellum_ai-1.3.0.dist-info}/RECORD +43 -31
  37. vellum_cli/tests/test_init.py +7 -24
  38. vellum_cli/tests/test_pull.py +27 -52
  39. vellum_ee/workflows/display/tests/workflow_serialization/generic_nodes/test_attributes_serialization.py +7 -33
  40. vellum_ee/workflows/tests/test_server.py +115 -0
  41. {vellum_ai-1.2.5.dist-info → vellum_ai-1.3.0.dist-info}/LICENSE +0 -0
  42. {vellum_ai-1.2.5.dist-info → vellum_ai-1.3.0.dist-info}/WHEEL +0 -0
  43. {vellum_ai-1.2.5.dist-info → vellum_ai-1.3.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,3 @@
1
+ from .node import WebSearchNode
2
+
3
+ __all__ = ["WebSearchNode"]
@@ -0,0 +1,133 @@
1
+ import logging
2
+ from typing import Any, ClassVar, Dict, List, Optional
3
+
4
+ from requests import Request, RequestException, Session
5
+ from requests.exceptions import JSONDecodeError
6
+
7
+ from vellum.workflows.errors.types import WorkflowErrorCode
8
+ from vellum.workflows.exceptions import NodeException
9
+ from vellum.workflows.nodes.bases import BaseNode
10
+ from vellum.workflows.outputs import BaseOutputs
11
+ from vellum.workflows.types.generics import StateType
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class WebSearchNode(BaseNode[StateType]):
17
+ """
18
+ Used to perform web search using SerpAPI.
19
+
20
+ query: str - The search query to execute
21
+ api_key: str - SerpAPI authentication key
22
+ num_results: int - Number of search results to return (default: 10)
23
+ location: Optional[str] - Geographic location filter for search
24
+ """
25
+
26
+ query: ClassVar[str] = ""
27
+ api_key: ClassVar[Optional[str]] = None
28
+ num_results: ClassVar[int] = 10
29
+ location: ClassVar[Optional[str]] = None
30
+
31
+ class Outputs(BaseOutputs):
32
+ """
33
+ The outputs of the WebSearchNode.
34
+
35
+ text: str - Concatenated search result snippets with titles
36
+ urls: List[str] - List of URLs from search results
37
+ results: List[Dict[str, Any]] - Raw search results from SerpAPI
38
+ """
39
+
40
+ text: str
41
+ urls: List[str]
42
+ results: List[Dict[str, Any]]
43
+
44
+ def _validate(self) -> None:
45
+ """Validate node inputs."""
46
+ if not self.query or not isinstance(self.query, str) or not self.query.strip():
47
+ raise NodeException(
48
+ "Query is required and must be a non-empty string", code=WorkflowErrorCode.INVALID_INPUTS
49
+ )
50
+
51
+ if self.api_key is None:
52
+ raise NodeException("API key is required", code=WorkflowErrorCode.INVALID_INPUTS)
53
+
54
+ if not isinstance(self.num_results, int) or self.num_results <= 0:
55
+ raise NodeException("num_results must be a positive integer", code=WorkflowErrorCode.INVALID_INPUTS)
56
+
57
+ def run(self) -> Outputs:
58
+ """Run the WebSearchNode to perform web search via SerpAPI."""
59
+ self._validate()
60
+
61
+ api_key_value = self.api_key
62
+
63
+ params = {
64
+ "q": self.query,
65
+ "api_key": api_key_value,
66
+ "num": self.num_results,
67
+ "engine": "google",
68
+ }
69
+
70
+ if self.location:
71
+ params["location"] = self.location
72
+
73
+ headers = {}
74
+ client_headers = self._context.vellum_client._client_wrapper.get_headers()
75
+ headers["User-Agent"] = client_headers.get("User-Agent")
76
+
77
+ try:
78
+ prepped = Request(method="GET", url="https://serpapi.com/search", params=params, headers=headers).prepare()
79
+ except Exception as e:
80
+ logger.exception("Failed to prepare SerpAPI request")
81
+ raise NodeException(f"Failed to prepare HTTP request: {e}", code=WorkflowErrorCode.PROVIDER_ERROR) from e
82
+
83
+ try:
84
+ with Session() as session:
85
+ response = session.send(prepped, timeout=30)
86
+ except RequestException as e:
87
+ logger.exception("SerpAPI request failed")
88
+ raise NodeException(f"HTTP request failed: {e}", code=WorkflowErrorCode.PROVIDER_ERROR) from e
89
+
90
+ if response.status_code == 401:
91
+ logger.error("SerpAPI authentication failed")
92
+ raise NodeException("Invalid API key", code=WorkflowErrorCode.INVALID_INPUTS)
93
+ elif response.status_code == 429:
94
+ logger.warning("SerpAPI rate limit exceeded")
95
+ raise NodeException("Rate limit exceeded", code=WorkflowErrorCode.PROVIDER_ERROR)
96
+ elif response.status_code >= 400:
97
+ logger.error(f"SerpAPI returned error status: {response.status_code}")
98
+ raise NodeException(f"SerpAPI error: HTTP {response.status_code}", code=WorkflowErrorCode.PROVIDER_ERROR)
99
+
100
+ try:
101
+ json_response = response.json()
102
+ except JSONDecodeError as e:
103
+ logger.exception("Failed to parse SerpAPI response as JSON")
104
+ raise NodeException(
105
+ f"Invalid JSON response from SerpAPI: {e}", code=WorkflowErrorCode.PROVIDER_ERROR
106
+ ) from e
107
+
108
+ if "error" in json_response:
109
+ error_msg = json_response["error"]
110
+ logger.error(f"SerpAPI returned error: {error_msg}")
111
+ raise NodeException(f"SerpAPI error: {error_msg}", code=WorkflowErrorCode.PROVIDER_ERROR)
112
+
113
+ organic_results = json_response.get("organic_results", [])
114
+
115
+ text_results = []
116
+ urls = []
117
+
118
+ for result in organic_results:
119
+ title = result.get("title", "")
120
+ snippet = result.get("snippet", "")
121
+ link = result.get("link", "")
122
+
123
+ if title and snippet:
124
+ text_results.append(f"{title}: {snippet}")
125
+ elif title:
126
+ text_results.append(title)
127
+ elif snippet:
128
+ text_results.append(snippet)
129
+
130
+ if link:
131
+ urls.append(link)
132
+
133
+ return self.Outputs(text="\n\n".join(text_results), urls=urls, results=organic_results)
@@ -1,8 +1,9 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from uuid import UUID
3
- from typing import TYPE_CHECKING, Iterator, Optional, Type
3
+ from typing import TYPE_CHECKING, Iterator, Optional, Type, Union
4
4
 
5
5
  from vellum.workflows.events.workflow import WorkflowEvent
6
+ from vellum.workflows.resolvers.types import LoadStateResult
6
7
  from vellum.workflows.state.base import BaseState
7
8
 
8
9
  if TYPE_CHECKING:
@@ -28,5 +29,5 @@ class BaseWorkflowResolver(ABC):
28
29
  pass
29
30
 
30
31
  @abstractmethod
31
- def load_state(self, previous_execution_id: Optional[UUID] = None) -> Optional[BaseState]:
32
+ def load_state(self, previous_execution_id: Optional[Union[UUID, str]] = None) -> Optional[LoadStateResult]:
32
33
  pass
@@ -1,10 +1,13 @@
1
1
  import logging
2
2
  from uuid import UUID
3
- from typing import Iterator, Optional
3
+ from typing import Iterator, List, Optional, Tuple, Union
4
4
 
5
+ from vellum.client.types.vellum_span import VellumSpan
6
+ from vellum.client.types.workflow_execution_initiated_event import WorkflowExecutionInitiatedEvent
5
7
  from vellum.workflows.events.workflow import WorkflowEvent
6
8
  from vellum.workflows.resolvers.base import BaseWorkflowResolver
7
- from vellum.workflows.state.base import BaseState, StateMeta
9
+ from vellum.workflows.resolvers.types import LoadStateResult
10
+ from vellum.workflows.state.base import BaseState
8
11
 
9
12
  logger = logging.getLogger(__name__)
10
13
 
@@ -16,7 +19,42 @@ class VellumResolver(BaseWorkflowResolver):
16
19
  def get_state_snapshot_history(self) -> Iterator[BaseState]:
17
20
  return iter([])
18
21
 
19
- def load_state(self, previous_execution_id: Optional[UUID] = None) -> Optional[BaseState]:
22
+ def _find_previous_and_root_span(
23
+ self, execution_id: str, spans: List[VellumSpan]
24
+ ) -> Tuple[Optional[str], Optional[str], Optional[str], Optional[str]]:
25
+ previous_trace_id: Optional[str] = None
26
+ root_trace_id: Optional[str] = None
27
+ previous_span_id: Optional[str] = None
28
+ root_span_id: Optional[str] = None
29
+
30
+ for span in spans:
31
+ # Look for workflow execution spans with matching ID first
32
+ if span.name == "workflow.execution" and span.span_id == execution_id:
33
+ # Find the WorkflowExecutionInitiatedEvent in the span's events
34
+ initiated_event = next(
35
+ (event for event in span.events if isinstance(event, WorkflowExecutionInitiatedEvent)), None
36
+ )
37
+ if initiated_event:
38
+ previous_trace_id = initiated_event.trace_id
39
+ previous_span_id = initiated_event.span_id
40
+ links = initiated_event.links
41
+ if links:
42
+ root_span = next((link for link in links if link.type == "ROOT_SPAN"), None)
43
+ if root_span:
44
+ root_trace_id = root_span.trace_id
45
+ root_span_id = root_span.span_context.span_id
46
+ else:
47
+ # no links means this is the first execution
48
+ root_trace_id = initiated_event.trace_id
49
+ root_span_id = initiated_event.span_id
50
+ break
51
+
52
+ return previous_trace_id, root_trace_id, previous_span_id, root_span_id
53
+
54
+ def load_state(self, previous_execution_id: Optional[Union[UUID, str]] = None) -> Optional[LoadStateResult]:
55
+ if isinstance(previous_execution_id, UUID):
56
+ previous_execution_id = str(previous_execution_id)
57
+
20
58
  if previous_execution_id is None:
21
59
  return None
22
60
 
@@ -26,17 +64,34 @@ class VellumResolver(BaseWorkflowResolver):
26
64
 
27
65
  client = self._context.vellum_client
28
66
  response = client.workflow_executions.retrieve_workflow_execution_detail(
29
- execution_id=str(previous_execution_id),
67
+ execution_id=previous_execution_id,
30
68
  )
31
69
 
32
70
  if response.state is None:
33
71
  return None
34
72
 
35
- meta = StateMeta.model_validate(response.state.pop("meta"))
73
+ previous_trace_id, root_trace_id, previous_span_id, root_span_id = self._find_previous_and_root_span(
74
+ previous_execution_id, response.spans
75
+ )
76
+
77
+ if previous_trace_id is None or root_trace_id is None or previous_span_id is None or root_span_id is None:
78
+ logger.warning("Could not find required execution events for state loading")
79
+ return None
80
+
81
+ if "meta" in response.state:
82
+ response.state.pop("meta")
36
83
 
37
84
  if self._workflow_class:
38
85
  state_class = self._workflow_class.get_state_class()
39
- return state_class(**response.state, meta=meta)
86
+ state = state_class(**response.state)
40
87
  else:
41
88
  logger.warning("No workflow class registered, falling back to BaseState")
42
- return BaseState(**response.state, meta=meta)
89
+ state = BaseState(**response.state)
90
+
91
+ return LoadStateResult(
92
+ state=state,
93
+ previous_trace_id=previous_trace_id,
94
+ previous_span_id=previous_span_id,
95
+ root_trace_id=root_trace_id,
96
+ root_span_id=root_span_id,
97
+ )
@@ -2,10 +2,18 @@ from datetime import datetime
2
2
  from unittest.mock import Mock
3
3
  from uuid import uuid4
4
4
 
5
+ from vellum.client.types.span_link import SpanLink
6
+ from vellum.client.types.vellum_code_resource_definition import VellumCodeResourceDefinition
5
7
  from vellum.client.types.workflow_execution_detail import WorkflowExecutionDetail
8
+ from vellum.client.types.workflow_execution_initiated_body import WorkflowExecutionInitiatedBody
9
+ from vellum.client.types.workflow_execution_initiated_event import WorkflowExecutionInitiatedEvent
10
+ from vellum.client.types.workflow_execution_span import WorkflowExecutionSpan
11
+ from vellum.client.types.workflow_execution_span_attributes import WorkflowExecutionSpanAttributes
12
+ from vellum.client.types.workflow_parent_context import WorkflowParentContext
6
13
  from vellum.workflows import BaseWorkflow
7
14
  from vellum.workflows.inputs.base import BaseInputs
8
15
  from vellum.workflows.resolvers.resolver import VellumResolver
16
+ from vellum.workflows.resolvers.types import LoadStateResult
9
17
  from vellum.workflows.state.base import BaseState, NodeExecutionCache
10
18
  from vellum.workflows.state.context import WorkflowContext
11
19
 
@@ -14,6 +22,7 @@ def test_load_state_with_context_success():
14
22
  """Test load_state successfully loads state when context and client are available."""
15
23
  resolver = VellumResolver()
16
24
  execution_id = uuid4()
25
+ root_execution_id = uuid4()
17
26
 
18
27
  class TestState(BaseState):
19
28
  test_key: str = "test_value"
@@ -22,12 +31,14 @@ def test_load_state_with_context_success():
22
31
  pass
23
32
 
24
33
  # GIVEN a state dictionary that matches what the resolver expects
34
+ prev_id = str(uuid4())
35
+ prev_span_id = str(uuid4())
25
36
  state_dict = {
26
37
  "test_key": "test_value",
27
38
  "meta": {
28
39
  "workflow_definition": "MockWorkflow",
29
- "id": str(uuid4()),
30
- "span_id": str(uuid4()),
40
+ "id": prev_id,
41
+ "span_id": prev_span_id,
31
42
  "updated_ts": datetime.now().isoformat(),
32
43
  "workflow_inputs": BaseInputs(),
33
44
  "external_inputs": {},
@@ -37,22 +48,83 @@ def test_load_state_with_context_success():
37
48
  },
38
49
  }
39
50
 
51
+ mock_workflow_definition = VellumCodeResourceDefinition(
52
+ name="TestWorkflow", module=["test", "module"], id=str(uuid4())
53
+ )
54
+
55
+ mock_body = WorkflowExecutionInitiatedBody(workflow_definition=mock_workflow_definition, inputs={})
56
+
57
+ previous_trace_id = str(uuid4())
58
+ root_trace_id = str(uuid4())
59
+
60
+ previous_invocation = WorkflowExecutionInitiatedEvent(
61
+ id=str(uuid4()),
62
+ timestamp=datetime.now(),
63
+ trace_id=previous_trace_id,
64
+ span_id=str(execution_id),
65
+ body=mock_body,
66
+ links=[
67
+ SpanLink(
68
+ trace_id=previous_trace_id,
69
+ type="PREVIOUS_SPAN",
70
+ span_context=WorkflowParentContext(workflow_definition=mock_workflow_definition, span_id=str(uuid4())),
71
+ ),
72
+ SpanLink(
73
+ trace_id=root_trace_id,
74
+ type="ROOT_SPAN",
75
+ span_context=WorkflowParentContext(
76
+ workflow_definition=mock_workflow_definition, span_id=str(root_execution_id)
77
+ ),
78
+ ),
79
+ ],
80
+ )
81
+
82
+ root_invocation = WorkflowExecutionInitiatedEvent(
83
+ id=str(uuid4()),
84
+ timestamp=datetime.now(),
85
+ trace_id=root_trace_id,
86
+ span_id=str(root_execution_id),
87
+ body=mock_body,
88
+ links=None, # Root invocation has no links
89
+ )
90
+
91
+ mock_span = WorkflowExecutionSpan(
92
+ span_id=str(execution_id), # Use the actual execution_id
93
+ start_ts=datetime.now(),
94
+ end_ts=datetime.now(),
95
+ attributes=WorkflowExecutionSpanAttributes(label="Test Workflow", workflow_id=str(uuid4())),
96
+ events=[previous_invocation, root_invocation],
97
+ )
98
+
40
99
  mock_response = WorkflowExecutionDetail(
41
- span_id="test-span-id", start=datetime.now(), inputs=[], outputs=[], spans=[], state=state_dict
100
+ span_id="test-span-id", start=datetime.now(), inputs=[], outputs=[], spans=[mock_span], state=state_dict
42
101
  )
43
102
 
44
103
  mock_client = Mock()
45
104
  mock_client.workflow_executions.retrieve_workflow_execution_detail.return_value = mock_response
46
105
 
47
- # AND context with the test workflow class
106
+ # AND context with the test workflow class is set up
48
107
  context = WorkflowContext(vellum_client=mock_client)
49
108
  TestWorkflow(context=context, resolvers=[resolver])
50
109
 
110
+ # WHEN load_state is called
51
111
  result = resolver.load_state(previous_execution_id=execution_id)
52
112
 
53
- # THEN should return an instance of TestWorkflow.State, not BaseState
54
- assert isinstance(result, TestState)
55
- assert result.test_key == "test_value"
113
+ # THEN should return LoadStateResult with state and span link info
114
+ assert isinstance(result, LoadStateResult)
115
+ assert result.state is not None
116
+ assert isinstance(result.state, TestState)
117
+ assert result.state.test_key == "test_value"
118
+
119
+ # AND the new state should have different meta IDs than those provided in the loaded state_dict
120
+ assert str(result.state.meta.id) != prev_id
121
+ assert str(result.state.meta.span_id) != prev_span_id
122
+
123
+ # AND should have span link info
124
+ assert result.previous_trace_id == previous_invocation.trace_id
125
+ assert result.previous_span_id == previous_invocation.span_id
126
+ assert result.root_trace_id == root_invocation.trace_id
127
+ assert result.root_span_id == root_invocation.span_id
56
128
 
57
129
  mock_client.workflow_executions.retrieve_workflow_execution_detail.assert_called_once_with(
58
130
  execution_id=str(execution_id)
@@ -0,0 +1,11 @@
1
+ from typing import NamedTuple
2
+
3
+ from vellum.workflows.state.base import BaseState
4
+
5
+
6
+ class LoadStateResult(NamedTuple):
7
+ state: BaseState
8
+ previous_trace_id: str
9
+ previous_span_id: str
10
+ root_trace_id: str
11
+ root_span_id: str
@@ -46,7 +46,7 @@ from vellum.workflows.events.node import (
46
46
  NodeExecutionRejectedBody,
47
47
  NodeExecutionStreamingBody,
48
48
  )
49
- from vellum.workflows.events.types import BaseEvent, NodeParentContext, ParentContext, WorkflowParentContext
49
+ from vellum.workflows.events.types import BaseEvent, NodeParentContext, ParentContext, SpanLink, WorkflowParentContext
50
50
  from vellum.workflows.events.workflow import (
51
51
  WorkflowEventStream,
52
52
  WorkflowExecutionFulfilledBody,
@@ -99,6 +99,7 @@ class WorkflowRunner(Generic[StateType]):
99
99
  state: Optional[StateType] = None,
100
100
  entrypoint_nodes: Optional[RunFromNodeArg] = None,
101
101
  external_inputs: Optional[ExternalInputsArg] = None,
102
+ previous_execution_id: Optional[Union[str, UUID]] = None,
102
103
  cancel_signal: Optional[ThreadingEvent] = None,
103
104
  node_output_mocks: Optional[MockNodeExecutionArg] = None,
104
105
  max_concurrency: Optional[int] = None,
@@ -110,6 +111,7 @@ class WorkflowRunner(Generic[StateType]):
110
111
  self.workflow = workflow
111
112
  self._is_resuming = False
112
113
  self._should_emit_initial_state = True
114
+ self._span_link_info: Optional[Tuple[str, str, str, str]] = None
113
115
  if entrypoint_nodes:
114
116
  if len(list(entrypoint_nodes)) > 1:
115
117
  raise ValueError("Cannot resume from multiple nodes")
@@ -140,6 +142,28 @@ class WorkflowRunner(Generic[StateType]):
140
142
  if issubclass(ei.inputs_class.__parent_class__, BaseNode)
141
143
  ]
142
144
  self._is_resuming = True
145
+ elif previous_execution_id:
146
+ for resolver in self.workflow.resolvers:
147
+ try:
148
+ load_state_result = resolver.load_state(previous_execution_id)
149
+ if load_state_result is not None:
150
+ state_class = self.workflow.get_state_class()
151
+ if isinstance(load_state_result.state, state_class):
152
+ self._initial_state = load_state_result.state
153
+ normalized_inputs = deepcopy(inputs) if inputs else self.workflow.get_default_inputs()
154
+ self._initial_state.meta.workflow_inputs = normalized_inputs
155
+ self._initial_state.meta.workflow_definition = self.workflow.__class__
156
+ self._span_link_info = (
157
+ load_state_result.previous_trace_id,
158
+ load_state_result.previous_span_id,
159
+ load_state_result.root_trace_id,
160
+ load_state_result.root_span_id,
161
+ )
162
+ break
163
+ except Exception as e:
164
+ logger.warning(f"Failed to load state from resolver {type(resolver).__name__}: {e}")
165
+ continue
166
+ self._entrypoints = self.workflow.get_entrypoints()
143
167
  else:
144
168
  normalized_inputs = deepcopy(inputs) if inputs else self.workflow.get_default_inputs()
145
169
  if state:
@@ -627,6 +651,29 @@ class WorkflowRunner(Generic[StateType]):
627
651
  return None
628
652
 
629
653
  def _initiate_workflow_event(self) -> WorkflowExecutionInitiatedEvent:
654
+ links: Optional[List[SpanLink]] = None
655
+
656
+ if self._span_link_info:
657
+ previous_trace_id, previous_span_id, root_trace_id, root_span_id = self._span_link_info
658
+ links = [
659
+ SpanLink(
660
+ trace_id=previous_trace_id,
661
+ type="PREVIOUS_SPAN",
662
+ span_context=WorkflowParentContext(
663
+ workflow_definition=self.workflow.__class__,
664
+ span_id=previous_span_id,
665
+ ),
666
+ ),
667
+ SpanLink(
668
+ trace_id=root_trace_id,
669
+ type="ROOT_SPAN",
670
+ span_context=WorkflowParentContext(
671
+ workflow_definition=self.workflow.__class__,
672
+ span_id=root_span_id,
673
+ ),
674
+ ),
675
+ ]
676
+
630
677
  return WorkflowExecutionInitiatedEvent(
631
678
  trace_id=self._execution_context.trace_id,
632
679
  span_id=self._initial_state.meta.span_id,
@@ -636,6 +683,7 @@ class WorkflowRunner(Generic[StateType]):
636
683
  initial_state=deepcopy(self._initial_state) if self._should_emit_initial_state else None,
637
684
  ),
638
685
  parent=self._execution_context.parent_context,
686
+ links=links,
639
687
  )
640
688
 
641
689
  def _stream_workflow_event(self, output: BaseOutput) -> WorkflowExecutionStreamingEvent:
@@ -1,15 +1,16 @@
1
1
  from functools import cached_property
2
2
  from queue import Queue
3
- from uuid import uuid4
3
+ from uuid import UUID, uuid4
4
4
  from typing import TYPE_CHECKING, Dict, List, Optional, Type
5
5
 
6
- from vellum import Vellum
6
+ from vellum import Vellum, __version__
7
7
  from vellum.workflows.context import ExecutionContext, get_execution_context, set_execution_context
8
8
  from vellum.workflows.events.types import ExternalParentContext
9
9
  from vellum.workflows.nodes.mocks import MockNodeExecution, MockNodeExecutionArg
10
10
  from vellum.workflows.outputs.base import BaseOutputs
11
11
  from vellum.workflows.references.constant import ConstantValueReference
12
12
  from vellum.workflows.utils.uuids import generate_workflow_deployment_prefix
13
+ from vellum.workflows.utils.zip import extract_zip_files
13
14
  from vellum.workflows.vellum_client import create_vellum_client
14
15
 
15
16
  if TYPE_CHECKING:
@@ -40,6 +41,9 @@ class WorkflowContext:
40
41
 
41
42
  if self._execution_context.parent_context is None:
42
43
  self._execution_context.parent_context = ExternalParentContext(span_id=uuid4())
44
+ # Only generate a new trace_id if one wasn't explicitly provided (i.e., if it's the default zero UUID)
45
+ if self._execution_context.trace_id == UUID("00000000-0000-0000-0000-000000000000"):
46
+ self._execution_context.trace_id = uuid4()
43
47
  # Propagate the updated context back to the global execution context
44
48
  set_execution_context(self._execution_context)
45
49
 
@@ -156,23 +160,53 @@ class WorkflowContext:
156
160
  Returns:
157
161
  BaseWorkflow instance if found, None otherwise
158
162
  """
159
- if not self.generated_files or not self.namespace:
163
+ if not self._generated_files or not self._namespace:
160
164
  return None
161
165
 
162
166
  expected_prefix = generate_workflow_deployment_prefix(deployment_name, release_tag)
163
167
 
164
- workflow_file_key = f"{expected_prefix}/workflow.py"
165
- if workflow_file_key not in self.generated_files:
166
- return None
168
+ try:
169
+ from vellum.workflows.workflows.base import BaseWorkflow
170
+
171
+ WorkflowClass = BaseWorkflow.load_from_module(f"{self.namespace}.{expected_prefix}")
172
+ WorkflowClass.is_dynamic = True
173
+ workflow_instance = WorkflowClass(context=WorkflowContext.create_from(self), parent_state=state)
174
+ return workflow_instance
175
+ except Exception:
176
+ pass
167
177
 
168
178
  try:
179
+ major_version = __version__.split(".")[0]
180
+ version_range = f">={major_version}.0.0,<={__version__}"
181
+
182
+ response = self.vellum_client.workflows.pull(
183
+ deployment_name,
184
+ release_tag=release_tag,
185
+ version=version_range,
186
+ request_options={"additional_headers": {"X-Vellum-Always-Success": "true"}},
187
+ )
188
+
189
+ if isinstance(response, dict) and response.get("success") is False:
190
+ return None
191
+
192
+ zip_bytes = b"".join(response)
193
+ pulled_files = extract_zip_files(zip_bytes)
194
+
195
+ for file_name, content in pulled_files.items():
196
+ prefixed_file_name = f"{expected_prefix}/{file_name}"
197
+ self._generated_files[prefixed_file_name] = content
198
+
169
199
  from vellum.workflows.workflows.base import BaseWorkflow
170
200
 
171
201
  WorkflowClass = BaseWorkflow.load_from_module(f"{self.namespace}.{expected_prefix}")
202
+ WorkflowClass.is_dynamic = True
172
203
  workflow_instance = WorkflowClass(context=WorkflowContext.create_from(self), parent_state=state)
173
204
  return workflow_instance
205
+
174
206
  except Exception:
175
- return None
207
+ pass
208
+
209
+ return None
176
210
 
177
211
  @classmethod
178
212
  def create_from(cls, context):
@@ -0,0 +1,46 @@
1
+ import io
2
+ import zipfile
3
+
4
+
5
+ def zip_file_map(file_map: dict[str, str]) -> bytes:
6
+ """
7
+ Create a zip file from a dictionary of file names to content.
8
+
9
+ Args:
10
+ file_map: Dictionary mapping file names to their content
11
+
12
+ Returns:
13
+ Bytes representing the zip file
14
+ """
15
+ zip_buffer = io.BytesIO()
16
+
17
+ with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file:
18
+ for filename, content in file_map.items():
19
+ zip_file.writestr(filename, content)
20
+
21
+ zip_bytes = zip_buffer.getvalue()
22
+ zip_buffer.close()
23
+
24
+ return zip_bytes
25
+
26
+
27
+ def extract_zip_files(zip_bytes: bytes) -> dict[str, str]:
28
+ """
29
+ Extract files from a zip archive.
30
+
31
+ Args:
32
+ zip_bytes: Bytes representing the zip file
33
+
34
+ Returns:
35
+ Dictionary mapping file names to their content
36
+ """
37
+ zip_buffer = io.BytesIO(zip_bytes)
38
+ extracted_files = {}
39
+
40
+ with zipfile.ZipFile(zip_buffer) as zip_file:
41
+ for file_name in zip_file.namelist():
42
+ with zip_file.open(file_name) as source:
43
+ content = source.read().decode("utf-8")
44
+ extracted_files[file_name] = content
45
+
46
+ return extracted_files
@@ -364,6 +364,7 @@ class BaseWorkflow(Generic[InputsType, StateType], metaclass=_BaseWorkflowMeta):
364
364
  state: Optional[StateType] = None,
365
365
  entrypoint_nodes: Optional[RunFromNodeArg] = None,
366
366
  external_inputs: Optional[ExternalInputsArg] = None,
367
+ previous_execution_id: Optional[Union[str, UUID]] = None,
367
368
  cancel_signal: Optional[ThreadingEvent] = None,
368
369
  node_output_mocks: Optional[MockNodeExecutionArg] = None,
369
370
  max_concurrency: Optional[int] = None,
@@ -389,6 +390,9 @@ class BaseWorkflow(Generic[InputsType, StateType], metaclass=_BaseWorkflowMeta):
389
390
  external_inputs: Optional[ExternalInputsArg] = None
390
391
  External inputs to pass to the Workflow. Useful for providing human-in-the-loop behavior to the Workflow.
391
392
 
393
+ previous_execution_id: Optional[Union[str, UUID]] = None
394
+ The execution ID of the previous execution to resume from.
395
+
392
396
  cancel_signal: Optional[ThreadingEvent] = None
393
397
  A threading event that can be used to cancel the Workflow Execution.
394
398
 
@@ -408,6 +412,7 @@ class BaseWorkflow(Generic[InputsType, StateType], metaclass=_BaseWorkflowMeta):
408
412
  state=state,
409
413
  entrypoint_nodes=entrypoint_nodes,
410
414
  external_inputs=external_inputs,
415
+ previous_execution_id=previous_execution_id,
411
416
  cancel_signal=cancel_signal,
412
417
  node_output_mocks=node_output_mocks,
413
418
  max_concurrency=max_concurrency,
@@ -476,6 +481,7 @@ class BaseWorkflow(Generic[InputsType, StateType], metaclass=_BaseWorkflowMeta):
476
481
  state: Optional[StateType] = None,
477
482
  entrypoint_nodes: Optional[RunFromNodeArg] = None,
478
483
  external_inputs: Optional[ExternalInputsArg] = None,
484
+ previous_execution_id: Optional[Union[str, UUID]] = None,
479
485
  cancel_signal: Optional[ThreadingEvent] = None,
480
486
  node_output_mocks: Optional[MockNodeExecutionArg] = None,
481
487
  max_concurrency: Optional[int] = None,
@@ -502,6 +508,9 @@ class BaseWorkflow(Generic[InputsType, StateType], metaclass=_BaseWorkflowMeta):
502
508
  external_inputs: Optional[ExternalInputsArg] = None
503
509
  External inputs to pass to the Workflow. Useful for providing human-in-the-loop behavior to the Workflow.
504
510
 
511
+ previous_execution_id: Optional[Union[str, UUID]] = None
512
+ The execution ID of the previous execution to resume from.
513
+
505
514
  cancel_signal: Optional[ThreadingEvent] = None
506
515
  A threading event that can be used to cancel the Workflow Execution.
507
516
 
@@ -522,6 +531,7 @@ class BaseWorkflow(Generic[InputsType, StateType], metaclass=_BaseWorkflowMeta):
522
531
  state=state,
523
532
  entrypoint_nodes=entrypoint_nodes,
524
533
  external_inputs=external_inputs,
534
+ previous_execution_id=previous_execution_id,
525
535
  cancel_signal=cancel_signal,
526
536
  node_output_mocks=node_output_mocks,
527
537
  max_concurrency=max_concurrency,