vellum-ai 0.10.6__py3-none-any.whl → 0.10.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. vellum/__init__.py +2 -0
  2. vellum/client/README.md +7 -52
  3. vellum/client/__init__.py +16 -136
  4. vellum/client/core/client_wrapper.py +1 -1
  5. vellum/client/resources/ad_hoc/client.py +14 -104
  6. vellum/client/resources/metric_definitions/client.py +113 -0
  7. vellum/client/resources/test_suites/client.py +8 -16
  8. vellum/client/resources/workflows/client.py +0 -32
  9. vellum/client/types/__init__.py +2 -0
  10. vellum/client/types/metric_definition_history_item.py +39 -0
  11. vellum/types/metric_definition_history_item.py +3 -0
  12. vellum/workflows/events/node.py +36 -3
  13. vellum/workflows/events/tests/test_event.py +89 -9
  14. vellum/workflows/events/types.py +1 -1
  15. vellum/workflows/nodes/core/inline_subworkflow_node/node.py +1 -0
  16. vellum/workflows/nodes/core/templating_node/node.py +5 -0
  17. vellum/workflows/nodes/displayable/api_node/node.py +1 -1
  18. vellum/workflows/nodes/displayable/bases/prompt_deployment_node.py +1 -2
  19. vellum/workflows/nodes/displayable/code_execution_node/node.py +1 -2
  20. vellum/workflows/nodes/displayable/code_execution_node/utils.py +13 -2
  21. vellum/workflows/nodes/displayable/conditional_node/node.py +2 -2
  22. vellum/workflows/nodes/displayable/inline_prompt_node/node.py +10 -3
  23. vellum/workflows/nodes/displayable/prompt_deployment_node/node.py +6 -1
  24. vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py +1 -2
  25. vellum/workflows/nodes/displayable/tests/test_text_prompt_deployment_node.py +1 -2
  26. vellum/workflows/ports/node_ports.py +2 -2
  27. vellum/workflows/ports/port.py +14 -0
  28. vellum/workflows/references/__init__.py +2 -0
  29. vellum/workflows/runner/runner.py +49 -8
  30. vellum/workflows/runner/types.py +1 -3
  31. vellum/workflows/state/encoder.py +2 -1
  32. vellum/workflows/types/__init__.py +5 -0
  33. vellum/workflows/types/tests/test_utils.py +6 -3
  34. vellum/workflows/types/utils.py +3 -0
  35. {vellum_ai-0.10.6.dist-info → vellum_ai-0.10.8.dist-info}/METADATA +1 -1
  36. {vellum_ai-0.10.6.dist-info → vellum_ai-0.10.8.dist-info}/RECORD +49 -47
  37. vellum_cli/__init__.py +23 -4
  38. vellum_cli/pull.py +28 -13
  39. vellum_cli/tests/test_pull.py +45 -2
  40. vellum_ee/workflows/display/nodes/base_node_display.py +1 -1
  41. vellum_ee/workflows/display/nodes/vellum/code_execution_node.py +17 -2
  42. vellum_ee/workflows/display/nodes/vellum/final_output_node.py +4 -2
  43. vellum_ee/workflows/display/nodes/vellum/map_node.py +20 -48
  44. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_conditional_node_serialization.py +5 -16
  45. vellum_ee/workflows/display/tests/workflow_serialization/test_basic_map_node_serialization.py +11 -8
  46. vellum_ee/workflows/display/utils/vellum.py +3 -2
  47. {vellum_ai-0.10.6.dist-info → vellum_ai-0.10.8.dist-info}/LICENSE +0 -0
  48. {vellum_ai-0.10.6.dist-info → vellum_ai-0.10.8.dist-info}/WHEEL +0 -0
  49. {vellum_ai-0.10.6.dist-info → vellum_ai-0.10.8.dist-info}/entry_points.txt +0 -0
@@ -232,25 +232,21 @@ class TestSuitesClient:
232
232
  api_key="YOUR_API_KEY",
233
233
  )
234
234
  response = client.test_suites.test_suite_test_cases_bulk(
235
- id="string",
235
+ id="id",
236
236
  request=[
237
237
  TestSuiteTestCaseCreateBulkOperationRequest(
238
- id="string",
238
+ id="id",
239
239
  data=CreateTestSuiteTestCaseRequest(
240
- label="string",
241
240
  input_values=[
242
241
  NamedTestCaseStringVariableValueRequest(
243
- value="string",
244
- name="string",
242
+ name="name",
245
243
  )
246
244
  ],
247
245
  evaluation_values=[
248
246
  NamedTestCaseStringVariableValueRequest(
249
- value="string",
250
- name="string",
247
+ name="name",
251
248
  )
252
249
  ],
253
- external_id="string",
254
250
  ),
255
251
  )
256
252
  ],
@@ -571,25 +567,21 @@ class AsyncTestSuitesClient:
571
567
 
572
568
  async def main() -> None:
573
569
  response = await client.test_suites.test_suite_test_cases_bulk(
574
- id="string",
570
+ id="id",
575
571
  request=[
576
572
  TestSuiteTestCaseCreateBulkOperationRequest(
577
- id="string",
573
+ id="id",
578
574
  data=CreateTestSuiteTestCaseRequest(
579
- label="string",
580
575
  input_values=[
581
576
  NamedTestCaseStringVariableValueRequest(
582
- value="string",
583
- name="string",
577
+ name="name",
584
578
  )
585
579
  ],
586
580
  evaluation_values=[
587
581
  NamedTestCaseStringVariableValueRequest(
588
- value="string",
589
- name="string",
582
+ name="name",
590
583
  )
591
584
  ],
592
- external_id="string",
593
585
  ),
594
586
  )
595
587
  ],
@@ -47,18 +47,6 @@ class WorkflowsClient:
47
47
  ------
48
48
  typing.Iterator[bytes]
49
49
 
50
-
51
- Examples
52
- --------
53
- from vellum import Vellum
54
-
55
- client = Vellum(
56
- api_key="YOUR_API_KEY",
57
- )
58
- client.workflows.pull(
59
- id="string",
60
- format="json",
61
- )
62
50
  """
63
51
  with self._client_wrapper.httpx_client.stream(
64
52
  f"v1/workflows/{jsonable_encoder(id)}/pull",
@@ -196,26 +184,6 @@ class AsyncWorkflowsClient:
196
184
  ------
197
185
  typing.AsyncIterator[bytes]
198
186
 
199
-
200
- Examples
201
- --------
202
- import asyncio
203
-
204
- from vellum import AsyncVellum
205
-
206
- client = AsyncVellum(
207
- api_key="YOUR_API_KEY",
208
- )
209
-
210
-
211
- async def main() -> None:
212
- await client.workflows.pull(
213
- id="string",
214
- format="json",
215
- )
216
-
217
-
218
- asyncio.run(main())
219
187
  """
220
188
  async with self._client_wrapper.httpx_client.stream(
221
189
  f"v1/workflows/{jsonable_encoder(id)}/pull",
@@ -197,6 +197,7 @@ from .metadata_filter_rule_combinator import MetadataFilterRuleCombinator
197
197
  from .metadata_filter_rule_request import MetadataFilterRuleRequest
198
198
  from .metadata_filters_request import MetadataFiltersRequest
199
199
  from .metric_definition_execution import MetricDefinitionExecution
200
+ from .metric_definition_history_item import MetricDefinitionHistoryItem
200
201
  from .metric_definition_input import MetricDefinitionInput
201
202
  from .metric_node_result import MetricNodeResult
202
203
  from .ml_model_read import MlModelRead
@@ -685,6 +686,7 @@ __all__ = [
685
686
  "MetadataFilterRuleRequest",
686
687
  "MetadataFiltersRequest",
687
688
  "MetricDefinitionExecution",
689
+ "MetricDefinitionHistoryItem",
688
690
  "MetricDefinitionInput",
689
691
  "MetricNodeResult",
690
692
  "MlModelRead",
@@ -0,0 +1,39 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from __future__ import annotations
4
+ from ..core.pydantic_utilities import UniversalBaseModel
5
+ from .array_vellum_value import ArrayVellumValue
6
+ import pydantic
7
+ import typing
8
+ from .vellum_variable import VellumVariable
9
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
10
+ from ..core.pydantic_utilities import update_forward_refs
11
+
12
+
13
+ class MetricDefinitionHistoryItem(UniversalBaseModel):
14
+ id: str
15
+ label: str = pydantic.Field()
16
+ """
17
+ A human-readable label for the metric
18
+ """
19
+
20
+ name: str = pydantic.Field()
21
+ """
22
+ A name that uniquely identifies this metric within its workspace
23
+ """
24
+
25
+ description: str
26
+ input_variables: typing.List[VellumVariable]
27
+ output_variables: typing.List[VellumVariable]
28
+
29
+ if IS_PYDANTIC_V2:
30
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
31
+ else:
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ extra = pydantic.Extra.allow
37
+
38
+
39
+ update_forward_refs(ArrayVellumValue, MetricDefinitionHistoryItem=MetricDefinitionHistoryItem)
@@ -0,0 +1,3 @@
1
+ # WARNING: This file will be removed in a future release. Please import from "vellum.client" instead.
2
+
3
+ from vellum.client.types.metric_definition_history_item import *
@@ -1,13 +1,14 @@
1
- from typing import Any, Dict, Generic, Literal, Type, Union
1
+ from typing import Any, Dict, Generic, Iterable, List, Literal, Optional, Set, Type, Union
2
2
 
3
- from pydantic import field_serializer
3
+ from pydantic import ConfigDict, SerializerFunctionWrapHandler, field_serializer, model_serializer
4
+ from pydantic.main import IncEx
4
5
 
5
6
  from vellum.core.pydantic_utilities import UniversalBaseModel
6
-
7
7
  from vellum.workflows.errors import VellumError
8
8
  from vellum.workflows.expressions.accessor import AccessorExpression
9
9
  from vellum.workflows.nodes.bases import BaseNode
10
10
  from vellum.workflows.outputs.base import BaseOutput
11
+ from vellum.workflows.ports.port import Port
11
12
  from vellum.workflows.references.node import NodeReference
12
13
  from vellum.workflows.types.generics import OutputsType
13
14
 
@@ -21,6 +22,15 @@ class _BaseNodeExecutionBody(UniversalBaseModel):
21
22
  def serialize_node_definition(self, node_definition: Type, _info: Any) -> Dict[str, Any]:
22
23
  return serialize_type_encoder(node_definition)
23
24
 
25
+ # Couldn't get this to work with model_config.exclude_none or model_config.exclude_defaults
26
+ # so we're excluding null invoked_ports manually here for now
27
+ @model_serializer(mode="wrap", when_used="json")
28
+ def serialize_model(self, handler: SerializerFunctionWrapHandler) -> Any:
29
+ serialized = super().serialize_model(handler) # type: ignore[call-arg, arg-type]
30
+ if "invoked_ports" in serialized and serialized["invoked_ports"] is None:
31
+ del serialized["invoked_ports"]
32
+ return serialized
33
+
24
34
 
25
35
  class _BaseNodeEvent(BaseEvent):
26
36
  body: _BaseNodeExecutionBody
@@ -31,6 +41,7 @@ class _BaseNodeEvent(BaseEvent):
31
41
 
32
42
 
33
43
  NodeInputName = Union[NodeReference, AccessorExpression]
44
+ InvokedPorts = Optional[Set["Port"]]
34
45
 
35
46
 
36
47
  class NodeExecutionInitiatedBody(_BaseNodeExecutionBody):
@@ -52,11 +63,18 @@ class NodeExecutionInitiatedEvent(_BaseNodeEvent):
52
63
 
53
64
  class NodeExecutionStreamingBody(_BaseNodeExecutionBody):
54
65
  output: BaseOutput
66
+ invoked_ports: InvokedPorts = None
55
67
 
56
68
  @field_serializer("output")
57
69
  def serialize_output(self, output: BaseOutput, _info: Any) -> Dict[str, Any]:
58
70
  return default_serializer(output)
59
71
 
72
+ @field_serializer("invoked_ports")
73
+ def serialize_invoked_ports(self, invoked_ports: InvokedPorts, _info: Any) -> Optional[List[Dict[str, Any]]]:
74
+ if not invoked_ports:
75
+ return None
76
+ return [default_serializer(port) for port in invoked_ports]
77
+
60
78
 
61
79
  class NodeExecutionStreamingEvent(_BaseNodeEvent):
62
80
  name: Literal["node.execution.streaming"] = "node.execution.streaming"
@@ -66,14 +84,25 @@ class NodeExecutionStreamingEvent(_BaseNodeEvent):
66
84
  def output(self) -> BaseOutput:
67
85
  return self.body.output
68
86
 
87
+ @property
88
+ def invoked_ports(self) -> InvokedPorts:
89
+ return self.body.invoked_ports
90
+
69
91
 
70
92
  class NodeExecutionFulfilledBody(_BaseNodeExecutionBody, Generic[OutputsType]):
71
93
  outputs: OutputsType
94
+ invoked_ports: InvokedPorts = None
72
95
 
73
96
  @field_serializer("outputs")
74
97
  def serialize_outputs(self, outputs: OutputsType, _info: Any) -> Dict[str, Any]:
75
98
  return default_serializer(outputs)
76
99
 
100
+ @field_serializer("invoked_ports")
101
+ def serialize_invoked_ports(self, invoked_ports: InvokedPorts, _info: Any) -> Optional[List[Dict[str, Any]]]:
102
+ if invoked_ports is None:
103
+ return None
104
+ return [default_serializer(port) for port in invoked_ports]
105
+
77
106
 
78
107
  class NodeExecutionFulfilledEvent(_BaseNodeEvent, Generic[OutputsType]):
79
108
  name: Literal["node.execution.fulfilled"] = "node.execution.fulfilled"
@@ -83,6 +112,10 @@ class NodeExecutionFulfilledEvent(_BaseNodeEvent, Generic[OutputsType]):
83
112
  def outputs(self) -> OutputsType:
84
113
  return self.body.outputs
85
114
 
115
+ @property
116
+ def invoked_ports(self) -> InvokedPorts:
117
+ return self.body.invoked_ports
118
+
86
119
 
87
120
  class NodeExecutionRejectedBody(_BaseNodeExecutionBody):
88
121
  error: VellumError
@@ -6,7 +6,14 @@ from uuid import UUID
6
6
  from deepdiff import DeepDiff
7
7
 
8
8
  from vellum.workflows.errors.types import VellumError, VellumErrorCode
9
- from vellum.workflows.events.node import NodeExecutionInitiatedBody, NodeExecutionInitiatedEvent
9
+ from vellum.workflows.events.node import (
10
+ NodeExecutionFulfilledBody,
11
+ NodeExecutionFulfilledEvent,
12
+ NodeExecutionInitiatedBody,
13
+ NodeExecutionInitiatedEvent,
14
+ NodeExecutionStreamingBody,
15
+ NodeExecutionStreamingEvent,
16
+ )
10
17
  from vellum.workflows.events.types import NodeParentContext, WorkflowParentContext
11
18
  from vellum.workflows.events.workflow import (
12
19
  WorkflowExecutionFulfilledBody,
@@ -93,10 +100,9 @@ module_root = name_parts[: name_parts.index("events")]
93
100
  node_definition=MockNode,
94
101
  span_id=UUID("123e4567-e89b-12d3-a456-426614174000"),
95
102
  parent=WorkflowParentContext(
96
- workflow_definition=MockWorkflow,
97
- span_id=UUID("123e4567-e89b-12d3-a456-426614174000")
98
- )
99
- )
103
+ workflow_definition=MockWorkflow, span_id=UUID("123e4567-e89b-12d3-a456-426614174000")
104
+ ),
105
+ ),
100
106
  ),
101
107
  {
102
108
  "id": "123e4567-e89b-12d3-a456-426614174000",
@@ -126,10 +132,10 @@ module_root = name_parts[: name_parts.index("events")]
126
132
  },
127
133
  "type": "WORKFLOW",
128
134
  "parent": None,
129
- "span_id": "123e4567-e89b-12d3-a456-426614174000"
135
+ "span_id": "123e4567-e89b-12d3-a456-426614174000",
130
136
  },
131
137
  "type": "WORKFLOW_NODE",
132
- "span_id": "123e4567-e89b-12d3-a456-426614174000"
138
+ "span_id": "123e4567-e89b-12d3-a456-426614174000",
133
139
  },
134
140
  },
135
141
  ),
@@ -164,7 +170,7 @@ module_root = name_parts[: name_parts.index("events")]
164
170
  "value": "foo",
165
171
  },
166
172
  },
167
- "parent": None
173
+ "parent": None,
168
174
  },
169
175
  ),
170
176
  (
@@ -233,6 +239,78 @@ module_root = name_parts[: name_parts.index("events")]
233
239
  "parent": None,
234
240
  },
235
241
  ),
242
+ (
243
+ NodeExecutionStreamingEvent(
244
+ id=UUID("123e4567-e89b-12d3-a456-426614174000"),
245
+ timestamp=datetime(2024, 1, 1, 12, 0, 0),
246
+ trace_id=UUID("123e4567-e89b-12d3-a456-426614174000"),
247
+ span_id=UUID("123e4567-e89b-12d3-a456-426614174000"),
248
+ body=NodeExecutionStreamingBody(
249
+ node_definition=MockNode,
250
+ output=BaseOutput(
251
+ name="example",
252
+ value="foo",
253
+ ),
254
+ ),
255
+ ),
256
+ {
257
+ "id": "123e4567-e89b-12d3-a456-426614174000",
258
+ "api_version": "2024-10-25",
259
+ "timestamp": "2024-01-01T12:00:00",
260
+ "trace_id": "123e4567-e89b-12d3-a456-426614174000",
261
+ "span_id": "123e4567-e89b-12d3-a456-426614174000",
262
+ "name": "node.execution.streaming",
263
+ "body": {
264
+ "node_definition": {
265
+ "name": "MockNode",
266
+ "module": module_root + ["events", "tests", "test_event"],
267
+ },
268
+ "output": {
269
+ "name": "example",
270
+ "value": "foo",
271
+ },
272
+ },
273
+ "parent": None,
274
+ },
275
+ ),
276
+ (
277
+ NodeExecutionFulfilledEvent(
278
+ id=UUID("123e4567-e89b-12d3-a456-426614174000"),
279
+ timestamp=datetime(2024, 1, 1, 12, 0, 0),
280
+ trace_id=UUID("123e4567-e89b-12d3-a456-426614174000"),
281
+ span_id=UUID("123e4567-e89b-12d3-a456-426614174000"),
282
+ body=NodeExecutionFulfilledBody(
283
+ node_definition=MockNode,
284
+ outputs=MockNode.Outputs(
285
+ example="foo",
286
+ ),
287
+ invoked_ports={MockNode.Ports.default},
288
+ ),
289
+ ),
290
+ {
291
+ "id": "123e4567-e89b-12d3-a456-426614174000",
292
+ "api_version": "2024-10-25",
293
+ "timestamp": "2024-01-01T12:00:00",
294
+ "trace_id": "123e4567-e89b-12d3-a456-426614174000",
295
+ "span_id": "123e4567-e89b-12d3-a456-426614174000",
296
+ "name": "node.execution.fulfilled",
297
+ "body": {
298
+ "node_definition": {
299
+ "name": "MockNode",
300
+ "module": module_root + ["events", "tests", "test_event"],
301
+ },
302
+ "outputs": {
303
+ "example": "foo",
304
+ },
305
+ "invoked_ports": [
306
+ {
307
+ "name": "default",
308
+ }
309
+ ],
310
+ },
311
+ "parent": None,
312
+ },
313
+ ),
236
314
  ],
237
315
  ids=[
238
316
  "workflow.execution.initiated",
@@ -240,7 +318,9 @@ module_root = name_parts[: name_parts.index("events")]
240
318
  "workflow.execution.streaming",
241
319
  "workflow.execution.fulfilled",
242
320
  "workflow.execution.rejected",
321
+ "node.execution.streaming",
322
+ "node.execution.fulfilled",
243
323
  ],
244
324
  )
245
325
  def test_event_serialization(event, expected_json):
246
- assert not DeepDiff(json.loads(event.model_dump_json()), expected_json)
326
+ assert not DeepDiff(event.model_dump(mode="json"), expected_json)
@@ -103,4 +103,4 @@ class BaseEvent(UniversalBaseModel):
103
103
  api_version: Literal["2024-10-25"] = "2024-10-25"
104
104
  trace_id: UUID
105
105
  span_id: UUID
106
- parent: Optional['ParentContext'] = None
106
+ parent: Optional[ParentContext] = None
@@ -18,6 +18,7 @@ class InlineSubworkflowNode(BaseSubworkflowNode[StateType], Generic[StateType, W
18
18
  Used to execute a Subworkflow defined inline.
19
19
 
20
20
  subworkflow: Type["BaseWorkflow[WorkflowInputsType, InnerStateType]"] - The Subworkflow to execute
21
+ subworkflow_inputs: ClassVar[EntityInputsInterface] = {}
21
22
  """
22
23
 
23
24
  subworkflow: Type["BaseWorkflow[WorkflowInputsType, InnerStateType]"]
@@ -82,6 +82,11 @@ class TemplatingNode(BaseNode[StateType], Generic[StateType, _OutputType], metac
82
82
  jinja_custom_filters: Mapping[str, Callable[[Union[str, bytes]], bool]] = _DEFAULT_JINJA_CUSTOM_FILTERS
83
83
 
84
84
  class Outputs(BaseNode.Outputs):
85
+ """
86
+ The outputs of the TemplatingNode.
87
+
88
+ result: _OutputType - The result of the template rendering
89
+ """
85
90
  # We use our mypy plugin to override the _OutputType with the actual output type
86
91
  # for downstream references to this output.
87
92
  result: _OutputType # type: ignore[valid-type]
@@ -8,7 +8,7 @@ from vellum.workflows.references.vellum_secret import VellumSecretReference
8
8
  class APINode(BaseAPINode):
9
9
  """
10
10
  Used to execute an API call. This node exists to be backwards compatible with Vellum's API Node, and for most cases,
11
- you should extend from `APINode` directly.
11
+ you should extend from `BaseAPINode` directly.
12
12
 
13
13
  url: str - The URL to send the request to.
14
14
  method: APIRequestMethod - The HTTP method to use for the request.
@@ -11,7 +11,6 @@ from vellum import (
11
11
  RawPromptExecutionOverridesRequest,
12
12
  StringInputRequest,
13
13
  )
14
-
15
14
  from vellum.workflows.constants import LATEST_RELEASE_TAG, OMIT
16
15
  from vellum.workflows.errors import VellumErrorCode
17
16
  from vellum.workflows.exceptions import NodeException
@@ -26,7 +25,7 @@ class BasePromptDeploymentNode(BasePromptNode, Generic[StateType]):
26
25
  prompt_inputs: EntityInputsInterface - The inputs for the Prompt
27
26
  deployment: Union[UUID, str] - Either the Prompt Deployment's UUID or its name.
28
27
  release_tag: str - The release tag to use for the Prompt Execution
29
- external_id: Optional[str] - The external ID to use for the Prompt Execution
28
+ external_id: Optional[str] - Optionally include a unique identifier for tracking purposes. Must be unique within a given Prompt Deployment.
30
29
  expand_meta: Optional[PromptDeploymentExpandMetaRequest] - Expandable execution fields to include in the response
31
30
  raw_overrides: Optional[RawPromptExecutionOverridesRequest] - The raw overrides to use for the Prompt Execution
32
31
  expand_raw: Optional[Sequence[str]] - Expandable raw fields to include in the response
@@ -67,9 +67,8 @@ class CodeExecutionNode(BaseNode[StateType], Generic[StateType, _OutputType], me
67
67
 
68
68
  filepath: str - The path to the script to execute.
69
69
  code_inputs: EntityInputsInterface - The inputs for the custom script.
70
- output_type: VellumVariableType = "STRING" - The type of the output from the custom script.
71
70
  runtime: CodeExecutionRuntime = "PYTHON_3_12" - The runtime to use for the custom script.
72
- packages: Optional[Sequence[CodeExecutionPackageRequest]] = None - The packages to use for the custom script.
71
+ packages: Optional[Sequence[CodeExecutionPackage]] = None - The packages to use for the custom script.
73
72
  request_options: Optional[RequestOptions] = None - The request options to use for the custom script.
74
73
  """
75
74
 
@@ -2,9 +2,20 @@ import os
2
2
  from typing import Union
3
3
 
4
4
 
5
+ def get_project_root() -> str:
6
+ current_dir = os.getcwd()
7
+ while current_dir != '/':
8
+ if ".git" in os.listdir(current_dir):
9
+ return current_dir
10
+ current_dir = os.path.dirname(current_dir)
11
+ raise FileNotFoundError("Project root not found.")
12
+
5
13
  def read_file_from_path(filepath: str) -> Union[str, None]:
6
- if not os.path.exists(filepath):
14
+ project_root = get_project_root()
15
+ relative_filepath = os.path.join(project_root, filepath)
16
+
17
+ if not os.path.exists(relative_filepath):
7
18
  return None
8
19
 
9
- with open(filepath) as file:
20
+ with open(relative_filepath, 'r') as file:
10
21
  return file.read()
@@ -1,4 +1,4 @@
1
- from typing import Iterable
1
+ from typing import Set
2
2
 
3
3
  from vellum.workflows.nodes.bases import BaseNode
4
4
  from vellum.workflows.outputs.base import BaseOutputs
@@ -15,7 +15,7 @@ class ConditionalNode(BaseNode):
15
15
  """
16
16
 
17
17
  class Ports(NodePorts):
18
- def __call__(self, outputs: BaseOutputs, state: BaseState) -> Iterable[Port]:
18
+ def __call__(self, outputs: BaseOutputs, state: BaseState) -> Set[Port]:
19
19
  all_ports = [port for port in self.__class__]
20
20
  enforce_single_invoked_port = validate_ports(all_ports)
21
21
 
@@ -9,16 +9,23 @@ from vellum.workflows.types.generics import StateType
9
9
 
10
10
  class InlinePromptNode(BaseInlinePromptNode[StateType]):
11
11
  """
12
- Used to execute an Inline Prompt and surface a string output for convenience.
12
+ Used to execute a Prompt defined inline.
13
13
 
14
14
  prompt_inputs: EntityInputsInterface - The inputs for the Prompt
15
15
  ml_model: str - Either the ML Model's UUID or its name.
16
- blocks: List[PromptBlockRequest] - The blocks that make up the Prompt
16
+ blocks: List[PromptBlock] - The blocks that make up the Prompt
17
+ functions: Optional[List[FunctionDefinition]] - The functions to include in the Prompt
17
18
  parameters: PromptParameters - The parameters for the Prompt
18
- expand_meta: Optional[AdHocExpandMetaRequest] - Set of expandable execution fields to include in the response
19
+ expand_meta: Optional[AdHocExpandMeta] - Expandable execution fields to include in the response
20
+ request_options: Optional[RequestOptions] - The request options to use for the Prompt Execution
19
21
  """
20
22
 
21
23
  class Outputs(BaseInlinePromptNode.Outputs):
24
+ """
25
+ The outputs of the InlinePromptNode.
26
+
27
+ text: str - The result of the Prompt Execution
28
+ """
22
29
  text: str
23
30
 
24
31
  def run(self) -> Iterator[BaseOutput]:
@@ -14,7 +14,7 @@ class PromptDeploymentNode(BasePromptDeploymentNode[StateType]):
14
14
  prompt_inputs: EntityInputsInterface - The inputs for the Prompt
15
15
  deployment: Union[UUID, str] - Either the Prompt Deployment's UUID or its name.
16
16
  release_tag: str - The release tag to use for the Prompt Execution
17
- external_id: Optional[str] - The external ID to use for the Prompt Execution
17
+ external_id: Optional[str] - Optionally include a unique identifier for tracking purposes. Must be unique within a given Prompt Deployment.
18
18
  expand_meta: Optional[PromptDeploymentExpandMetaRequest] - Expandable execution fields to include in the response
19
19
  raw_overrides: Optional[RawPromptExecutionOverridesRequest] - The raw overrides to use for the Prompt Execution
20
20
  expand_raw: Optional[Sequence[str]] - Expandable raw fields to include in the response
@@ -23,6 +23,11 @@ class PromptDeploymentNode(BasePromptDeploymentNode[StateType]):
23
23
  """
24
24
 
25
25
  class Outputs(BasePromptDeploymentNode.Outputs):
26
+ """
27
+ The outputs of the PromptDeploymentNode.
28
+
29
+ text: str - The result of the Prompt Execution
30
+ """
26
31
  text: str
27
32
 
28
33
  def run(self) -> Iterator[BaseOutput]:
@@ -12,7 +12,6 @@ from vellum import (
12
12
  WorkflowRequestStringInputRequest,
13
13
  )
14
14
  from vellum.core import RequestOptions
15
-
16
15
  from vellum.workflows.constants import LATEST_RELEASE_TAG, OMIT
17
16
  from vellum.workflows.errors import VellumErrorCode
18
17
  from vellum.workflows.exceptions import NodeException
@@ -28,7 +27,7 @@ class SubworkflowDeploymentNode(BaseSubworkflowNode[StateType], Generic[StateTyp
28
27
  subworkflow_inputs: EntityInputsInterface - The inputs for the Subworkflow
29
28
  deployment: Union[UUID, str] - Either the Workflow Deployment's UUID or its name.
30
29
  release_tag: str = LATEST_RELEASE_TAG - The release tag to use for the Workflow Execution
31
- external_id: Optional[str] = OMIT - The external ID to use for the Workflow Execution
30
+ external_id: Optional[str] = OMIT - Optionally include a unique identifier for tracking purposes. Must be unique within a given Workflow Deployment.
32
31
  expand_meta: Optional[WorkflowExpandMetaRequest] = OMIT - Expandable execution fields to include in the respownse
33
32
  metadata: Optional[Dict[str, Optional[Any]]] = OMIT - The metadata to use for the Workflow Execution
34
33
  request_options: Optional[RequestOptions] = None - The request options to use for the Workflow Execution
@@ -8,7 +8,6 @@ from vellum import (
8
8
  PromptOutput,
9
9
  StringVellumValue,
10
10
  )
11
-
12
11
  from vellum.workflows.constants import OMIT
13
12
  from vellum.workflows.inputs import BaseInputs
14
13
  from vellum.workflows.nodes import PromptDeploymentNode
@@ -65,7 +64,7 @@ def test_text_prompt_deployment_node__basic(vellum_client):
65
64
  assert text_output.name == "text"
66
65
  assert text_output.value == "Hello, world!"
67
66
 
68
- # AND we should have made the expected call to Vellum search
67
+ # AND we should have made the expected call to stream the prompt execution
69
68
  vellum_client.execute_prompt_stream.assert_called_once_with(
70
69
  expand_meta=OMIT,
71
70
  expand_raw=OMIT,
@@ -33,7 +33,7 @@ class _NodePortsMeta(type):
33
33
 
34
34
 
35
35
  class NodePorts(metaclass=_NodePortsMeta):
36
- def __call__(self, outputs: BaseOutputs, state: BaseState) -> Iterable[Port]:
36
+ def __call__(self, outputs: BaseOutputs, state: BaseState) -> Set[Port]:
37
37
  """
38
38
  Invokes the appropriate ports based on the fulfilled outputs and state.
39
39
  """
@@ -67,7 +67,7 @@ class NodePorts(metaclass=_NodePortsMeta):
67
67
 
68
68
  return invoked_ports
69
69
 
70
- def __lt__(self, output: BaseOutput) -> Iterable[Port]:
70
+ def __lt__(self, output: BaseOutput) -> Set[Port]:
71
71
  """
72
72
  Invokes the appropriate ports based on the streamed output
73
73
  """
@@ -1,5 +1,8 @@
1
1
  from typing import TYPE_CHECKING, Any, Iterator, List, Optional, Type
2
2
 
3
+ from pydantic import GetCoreSchemaHandler
4
+ from pydantic_core import core_schema
5
+
3
6
  from vellum.workflows.descriptors.base import BaseDescriptor
4
7
  from vellum.workflows.edges.edge import Edge
5
8
  from vellum.workflows.graph import Graph, GraphTarget
@@ -73,3 +76,14 @@ class Port:
73
76
 
74
77
  value = self._condition.resolve(state)
75
78
  return bool(value)
79
+
80
+ def serialize(self) -> dict:
81
+ return {
82
+ "name": self.name,
83
+ }
84
+
85
+ @classmethod
86
+ def __get_pydantic_core_schema__(
87
+ cls, source_type: Type[Any], handler: GetCoreSchemaHandler
88
+ ) -> core_schema.CoreSchema:
89
+ return core_schema.is_instance_schema(cls)
@@ -4,6 +4,7 @@ from .lazy import LazyReference
4
4
  from .node import NodeReference
5
5
  from .output import OutputReference
6
6
  from .state_value import StateValueReference
7
+ from .vellum_secret import VellumSecretReference
7
8
  from .workflow_input import WorkflowInputReference
8
9
 
9
10
  __all__ = [
@@ -13,5 +14,6 @@ __all__ = [
13
14
  "NodeReference",
14
15
  "OutputReference",
15
16
  "StateValueReference",
17
+ "VellumSecretReference",
16
18
  "WorkflowInputReference",
17
19
  ]