vellum-ai 0.14.74__py3-none-any.whl → 0.14.76__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,7 +18,7 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.14.74",
21
+ "X-Fern-SDK-Version": "0.14.76",
22
22
  }
23
23
  headers["X-API-KEY"] = self.api_key
24
24
  return headers
@@ -4270,7 +4270,15 @@ client.prompts.push(
4270
4270
  <dl>
4271
4271
  <dd>
4272
4272
 
4273
- **prompt_variant_id:** `typing.Optional[str]`
4273
+ **prompt_variant_id:** `typing.Optional[str]` — If specified, an existing Prompt Variant by the provided ID will be updated. Otherwise, a new Prompt Variant will be created and an ID generated.
4274
+
4275
+ </dd>
4276
+ </dl>
4277
+
4278
+ <dl>
4279
+ <dd>
4280
+
4281
+ **prompt_variant_label:** `typing.Optional[str]` — If provided, then the created/updated Prompt Variant will have this label.
4274
4282
 
4275
4283
  </dd>
4276
4284
  </dl>
@@ -110,6 +110,7 @@ class PromptsClient:
110
110
  *,
111
111
  exec_config: PromptExecConfig,
112
112
  prompt_variant_id: typing.Optional[str] = OMIT,
113
+ prompt_variant_label: typing.Optional[str] = OMIT,
113
114
  prompt_sandbox_id: typing.Optional[str] = OMIT,
114
115
  request_options: typing.Optional[RequestOptions] = None,
115
116
  ) -> PromptPushResponse:
@@ -121,6 +122,10 @@ class PromptsClient:
121
122
  exec_config : PromptExecConfig
122
123
 
123
124
  prompt_variant_id : typing.Optional[str]
125
+ If specified, an existing Prompt Variant by the provided ID will be updated. Otherwise, a new Prompt Variant will be created and an ID generated.
126
+
127
+ prompt_variant_label : typing.Optional[str]
128
+ If provided, then the created/updated Prompt Variant will have this label.
124
129
 
125
130
  prompt_sandbox_id : typing.Optional[str]
126
131
 
@@ -169,11 +174,12 @@ class PromptsClient:
169
174
  base_url=self._client_wrapper.get_environment().default,
170
175
  method="POST",
171
176
  json={
177
+ "prompt_variant_id": prompt_variant_id,
178
+ "prompt_variant_label": prompt_variant_label,
179
+ "prompt_sandbox_id": prompt_sandbox_id,
172
180
  "exec_config": convert_and_respect_annotation_metadata(
173
181
  object_=exec_config, annotation=PromptExecConfig, direction="write"
174
182
  ),
175
- "prompt_variant_id": prompt_variant_id,
176
- "prompt_sandbox_id": prompt_sandbox_id,
177
183
  },
178
184
  headers={
179
185
  "content-type": "application/json",
@@ -316,6 +322,7 @@ class AsyncPromptsClient:
316
322
  *,
317
323
  exec_config: PromptExecConfig,
318
324
  prompt_variant_id: typing.Optional[str] = OMIT,
325
+ prompt_variant_label: typing.Optional[str] = OMIT,
319
326
  prompt_sandbox_id: typing.Optional[str] = OMIT,
320
327
  request_options: typing.Optional[RequestOptions] = None,
321
328
  ) -> PromptPushResponse:
@@ -327,6 +334,10 @@ class AsyncPromptsClient:
327
334
  exec_config : PromptExecConfig
328
335
 
329
336
  prompt_variant_id : typing.Optional[str]
337
+ If specified, an existing Prompt Variant by the provided ID will be updated. Otherwise, a new Prompt Variant will be created and an ID generated.
338
+
339
+ prompt_variant_label : typing.Optional[str]
340
+ If provided, then the created/updated Prompt Variant will have this label.
330
341
 
331
342
  prompt_sandbox_id : typing.Optional[str]
332
343
 
@@ -383,11 +394,12 @@ class AsyncPromptsClient:
383
394
  base_url=self._client_wrapper.get_environment().default,
384
395
  method="POST",
385
396
  json={
397
+ "prompt_variant_id": prompt_variant_id,
398
+ "prompt_variant_label": prompt_variant_label,
399
+ "prompt_sandbox_id": prompt_sandbox_id,
386
400
  "exec_config": convert_and_respect_annotation_metadata(
387
401
  object_=exec_config, annotation=PromptExecConfig, direction="write"
388
402
  ),
389
- "prompt_variant_id": prompt_variant_id,
390
- "prompt_sandbox_id": prompt_sandbox_id,
391
403
  },
392
404
  headers={
393
405
  "content-type": "application/json",
@@ -17,6 +17,7 @@ if TYPE_CHECKING:
17
17
  from vellum.workflows.expressions.greater_than_or_equal_to import GreaterThanOrEqualToExpression
18
18
  from vellum.workflows.expressions.in_ import InExpression
19
19
  from vellum.workflows.expressions.is_blank import IsBlankExpression
20
+ from vellum.workflows.expressions.is_error import IsErrorExpression
20
21
  from vellum.workflows.expressions.is_nil import IsNilExpression
21
22
  from vellum.workflows.expressions.is_not_blank import IsNotBlankExpression
22
23
  from vellum.workflows.expressions.is_not_nil import IsNotNilExpression
@@ -358,3 +359,8 @@ class BaseDescriptor(Generic[_T]):
358
359
  from vellum.workflows.expressions.parse_json import ParseJsonExpression
359
360
 
360
361
  return ParseJsonExpression(expression=self)
362
+
363
+ def is_error(self) -> "IsErrorExpression":
364
+ from vellum.workflows.expressions.is_error import IsErrorExpression
365
+
366
+ return IsErrorExpression(expression=self)
@@ -1,5 +1,6 @@
1
1
  import pytest
2
2
 
3
+ from vellum.client.types.vellum_error import VellumError
3
4
  from vellum.workflows.descriptors.utils import resolve_value
4
5
  from vellum.workflows.errors.types import WorkflowError, WorkflowErrorCode
5
6
  from vellum.workflows.nodes.bases.base import BaseNode
@@ -96,6 +97,27 @@ class DummyNode(BaseNode[FixtureState]):
96
97
  ).does_not_contain("test"),
97
98
  False,
98
99
  ),
100
+ (
101
+ ConstantValueReference(
102
+ WorkflowError(
103
+ message="This is a test",
104
+ code=WorkflowErrorCode.USER_DEFINED_ERROR,
105
+ )
106
+ ).is_error(),
107
+ True,
108
+ ),
109
+ (
110
+ ConstantValueReference(
111
+ VellumError(
112
+ message="This is a test",
113
+ code="USER_DEFINED_ERROR",
114
+ )
115
+ ).is_error(),
116
+ True,
117
+ ),
118
+ (FixtureState.alpha.is_error(), False),
119
+ (FixtureState.eta.is_error(), False),
120
+ (DummyNode.Outputs.empty.is_error(), False),
99
121
  (ConstantValueReference('{"foo": "bar"}').parse_json(), {"foo": "bar"}),
100
122
  (ConstantValueReference('{"foo": "bar"}').parse_json()["foo"], "bar"),
101
123
  (ConstantValueReference("[1, 2, 3]").parse_json(), [1, 2, 3]),
@@ -150,6 +172,11 @@ class DummyNode(BaseNode[FixtureState]):
150
172
  "list_index",
151
173
  "error_contains",
152
174
  "error_does_not_contain",
175
+ "is_error_on_workflow_error",
176
+ "is_error_on_vellum_error",
177
+ "is_error_on_value",
178
+ "is_error_on_null",
179
+ "is_error_on_undefined",
153
180
  "parse_json_constant",
154
181
  "parse_json_accessor",
155
182
  "parse_json_list",
@@ -0,0 +1,23 @@
1
+ from typing import Generic, TypeVar, Union
2
+
3
+ from vellum.client.types.vellum_error import VellumError
4
+ from vellum.workflows.descriptors.base import BaseDescriptor
5
+ from vellum.workflows.descriptors.utils import resolve_value
6
+ from vellum.workflows.errors.types import WorkflowError
7
+ from vellum.workflows.state.base import BaseState
8
+
9
+ _T = TypeVar("_T")
10
+
11
+
12
+ class IsErrorExpression(BaseDescriptor[bool], Generic[_T]):
13
+ def __init__(
14
+ self,
15
+ *,
16
+ expression: Union[BaseDescriptor[_T], _T],
17
+ ) -> None:
18
+ super().__init__(name=f"{expression} is error", types=(bool,))
19
+ self._expression = expression
20
+
21
+ def resolve(self, state: "BaseState") -> bool:
22
+ expression = resolve_value(self._expression, state)
23
+ return isinstance(expression, (VellumError, WorkflowError))
@@ -457,21 +457,6 @@ class BaseNode(Generic[StateType], ABC, metaclass=BaseNodeMeta):
457
457
  resolved_value = resolve_value(descriptor.instance, self.state, path=descriptor.name, memo=inputs)
458
458
  setattr(self, descriptor.name, resolved_value)
459
459
 
460
- # Resolve descriptors set as defaults to the outputs class
461
- def _outputs_post_init(outputs_self: "BaseNode.Outputs", **kwargs: Any) -> None:
462
- for node_output_descriptor in self.Outputs:
463
- if node_output_descriptor.name in kwargs:
464
- continue
465
-
466
- if isinstance(node_output_descriptor.instance, BaseDescriptor):
467
- setattr(
468
- outputs_self,
469
- node_output_descriptor.name,
470
- node_output_descriptor.instance.resolve(self.state),
471
- )
472
-
473
- setattr(self.Outputs, "_outputs_post_init", _outputs_post_init)
474
-
475
460
  # We only want to store the attributes that were actually set as inputs, not every attribute that exists.
476
461
  all_inputs = {}
477
462
  for key, value in inputs.items():
@@ -3,6 +3,7 @@ import threading
3
3
  import time
4
4
 
5
5
  from vellum.workflows.inputs.base import BaseInputs
6
+ from vellum.workflows.nodes import FinalOutputNode
6
7
  from vellum.workflows.nodes.bases import BaseNode
7
8
  from vellum.workflows.nodes.core.map_node.node import MapNode
8
9
  from vellum.workflows.nodes.core.try_node.node import TryNode
@@ -221,3 +222,56 @@ def test_map_node_parallel_execution_with_workflow():
221
222
  # AND each item should have run on a different thread
222
223
  thread_ids_list = list(thread_ids.values())
223
224
  assert len(set(thread_ids_list)) == 3
225
+
226
+
227
+ def test_map_node__shared_state_race_condition():
228
+ processed_items = []
229
+
230
+ # GIVEN a templating node that processes items
231
+ class TemplatingNode(BaseNode):
232
+ item = MapNode.SubworkflowInputs.item
233
+
234
+ class Outputs(BaseOutputs):
235
+ processed_item: str
236
+
237
+ def run(self) -> Outputs:
238
+ processed_item = f"{self.item}!"
239
+ return self.Outputs(processed_item=processed_item)
240
+
241
+ # AND a final output node
242
+ class FinalOutput(FinalOutputNode[BaseState, str]):
243
+ class Outputs(FinalOutputNode.Outputs):
244
+ value = TemplatingNode.Outputs.processed_item
245
+
246
+ def run(self) -> Outputs:
247
+ outputs = super().run()
248
+ processed_items.append(outputs.value)
249
+ return outputs # type: ignore[return-value]
250
+
251
+ # AND a workflow using those nodes
252
+ class ProcessItemWorkflow(BaseWorkflow[MapNode.SubworkflowInputs, BaseState]):
253
+ graph = TemplatingNode >> FinalOutput
254
+
255
+ class Outputs(BaseWorkflow.Outputs):
256
+ result = FinalOutput.Outputs.value
257
+
258
+ # AND a map node with high concurrency
259
+ class RaceConditionMapNode(MapNode):
260
+ items = ["a", "b", "c", "d", "e", "f"]
261
+ subworkflow = ProcessItemWorkflow
262
+ max_concurrency = 1
263
+
264
+ # WHEN we run the map node multiple times to see pass consistently
265
+ num_runs = 50
266
+ for index in range(num_runs):
267
+ processed_items.clear()
268
+ node = RaceConditionMapNode(state=BaseState())
269
+ outputs = list(node.run())
270
+ final_result = outputs[-1].value
271
+
272
+ # THEN the state is unique among each run
273
+ assert len(set(processed_items)) == 6
274
+
275
+ # AND all results should be in correct order
276
+ expected_result = ["a!", "b!", "c!", "d!", "e!", "f!"]
277
+ assert final_result == expected_result, f"Failed on run {index}"
@@ -46,4 +46,13 @@ def test_get_function_name_subworkflow_deployment():
46
46
 
47
47
  result = get_function_name(deployment_config)
48
48
 
49
- assert result == "my-test-deployment"
49
+ assert result == "mytestdeployment"
50
+
51
+
52
+ def test_get_function_name_subworkflow_deployment_uuid():
53
+ """Test subworkflow deployment with UUID."""
54
+ deployment_config = DeploymentDefinition(deployment="57f09beb-b463-40e0-bf9e-c972e664352f", release_tag="v1.0.0")
55
+
56
+ result = get_function_name(deployment_config)
57
+
58
+ assert result == "57f09bebb46340e0bf9ec972e664352f"
@@ -156,7 +156,7 @@ def create_function_node(
156
156
  runtime: The runtime to use for code execution (default: "PYTHON_3_11_6")
157
157
  """
158
158
  if isinstance(function, DeploymentDefinition):
159
- deployment = function.deployment
159
+ deployment = function.deployment_id or function.deployment_name
160
160
  release_tag = function.release_tag
161
161
 
162
162
  def execute_workflow_deployment_function(self) -> BaseNode.Outputs:
@@ -314,6 +314,7 @@ def main(arguments):
314
314
 
315
315
  def get_function_name(function: Tool) -> str:
316
316
  if isinstance(function, DeploymentDefinition):
317
- return function.deployment
317
+ name = str(function.deployment_id or function.deployment_name)
318
+ return name.replace("-", "")
318
319
  else:
319
320
  return snake_case(function.__name__)
@@ -1,3 +1,4 @@
1
+ import inspect
1
2
  from typing import TYPE_CHECKING, Any, Generic, Iterator, Set, Tuple, Type, TypeVar, Union, cast
2
3
  from typing_extensions import dataclass_transform
3
4
 
@@ -9,6 +10,7 @@ from vellum.workflows.descriptors.base import BaseDescriptor
9
10
  from vellum.workflows.errors.types import WorkflowErrorCode
10
11
  from vellum.workflows.exceptions import NodeException
11
12
  from vellum.workflows.references.output import OutputReference
13
+ from vellum.workflows.types.generics import is_node_instance
12
14
  from vellum.workflows.types.utils import get_class_attr_names, infer_types
13
15
 
14
16
  if TYPE_CHECKING:
@@ -198,8 +200,30 @@ class BaseOutputs(metaclass=_BaseOutputsMeta):
198
200
  for name, value in kwargs.items():
199
201
  setattr(self, name, value)
200
202
 
201
- if hasattr(self, "_outputs_post_init") and callable(self._outputs_post_init):
202
- self._outputs_post_init(**kwargs)
203
+ # If climb up the to the caller's frame, and if it's a BaseNode instance, it should
204
+ # have a state attribute that we can use to resolve the output descriptors.
205
+ frame = inspect.currentframe()
206
+ if not frame:
207
+ return
208
+
209
+ caller_frame = frame.f_back
210
+ if not caller_frame or "self" not in caller_frame.f_locals:
211
+ return
212
+
213
+ caller_self = caller_frame.f_locals["self"]
214
+ if not is_node_instance(caller_self):
215
+ return
216
+
217
+ for node_output_descriptor in self.__class__:
218
+ if node_output_descriptor.name in kwargs:
219
+ continue
220
+
221
+ if isinstance(node_output_descriptor.instance, BaseDescriptor):
222
+ setattr(
223
+ self,
224
+ node_output_descriptor.name,
225
+ node_output_descriptor.instance.resolve(caller_self.state),
226
+ )
203
227
 
204
228
  def __eq__(self, other: object) -> bool:
205
229
  if not isinstance(other, (dict, BaseOutputs)):
@@ -75,3 +75,25 @@ VellumCodeResourceDefinition = Annotated[
75
75
  class DeploymentDefinition(UniversalBaseModel):
76
76
  deployment: str
77
77
  release_tag: str = "LATEST"
78
+
79
+ def _is_uuid(self) -> bool:
80
+ """Check if the deployment field is a valid UUID."""
81
+ try:
82
+ UUID(self.deployment)
83
+ return True
84
+ except ValueError:
85
+ return False
86
+
87
+ @property
88
+ def deployment_id(self) -> Optional[UUID]:
89
+ """Get the deployment ID if the deployment field is a UUID."""
90
+ if self._is_uuid():
91
+ return UUID(self.deployment)
92
+ return None
93
+
94
+ @property
95
+ def deployment_name(self) -> Optional[str]:
96
+ """Get the deployment name if the deployment field is not a UUID."""
97
+ if not self._is_uuid():
98
+ return self.deployment
99
+ return None
@@ -42,6 +42,11 @@ def is_node_class(obj: Any) -> TypeGuard[Type["BaseNode"]]:
42
42
  return isinstance(obj, type) and issubclass(obj, base_node_class)
43
43
 
44
44
 
45
+ def is_node_instance(obj: Any) -> TypeGuard["BaseNode"]:
46
+ base_node_class = _import_node_class()
47
+ return isinstance(obj, base_node_class)
48
+
49
+
45
50
  def is_workflow_class(obj: Any) -> TypeGuard[Type["BaseWorkflow"]]:
46
51
  base_workflow_class = import_workflow_class()
47
52
  return isinstance(obj, type) and issubclass(obj, base_workflow_class)
@@ -0,0 +1,33 @@
1
+ import pytest
2
+ from uuid import UUID
3
+
4
+ from vellum.workflows.types.definition import DeploymentDefinition
5
+
6
+
7
+ @pytest.mark.parametrize(
8
+ "deployment_value, expected_deployment_id, expected_deployment_name",
9
+ [
10
+ # Valid UUID string
11
+ (
12
+ "57f09beb-b463-40e0-bf9e-c972e664352f",
13
+ UUID("57f09beb-b463-40e0-bf9e-c972e664352f"),
14
+ None,
15
+ ),
16
+ # Name string
17
+ (
18
+ "tool-calling-subworkflow",
19
+ None,
20
+ "tool-calling-subworkflow",
21
+ ),
22
+ ],
23
+ ids=[
24
+ "valid_uuid",
25
+ "valid_name",
26
+ ],
27
+ )
28
+ def test_deployment_definition(deployment_value, expected_deployment_id, expected_deployment_name):
29
+ """Test that DeploymentDefinition properties correctly identify and extract UUID vs name."""
30
+ deployment = DeploymentDefinition(deployment=deployment_value)
31
+
32
+ assert deployment.deployment_id == expected_deployment_id
33
+ assert deployment.deployment_name == expected_deployment_name
@@ -201,7 +201,7 @@ def compile_workflow_deployment_function_definition(
201
201
  parameters = {"type": "object", "properties": properties, "required": required}
202
202
 
203
203
  return FunctionDefinition(
204
- name=deployment,
204
+ name=deployment.replace("-", ""),
205
205
  description=description,
206
206
  parameters=parameters,
207
207
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-ai
3
- Version: 0.14.74
3
+ Version: 0.14.76
4
4
  Summary:
5
5
  License: MIT
6
6
  Requires-Python: >=3.9,<4.0
@@ -10,15 +10,15 @@ vellum_cli/ping.py,sha256=p_BCCRjgPhng6JktuECtkDQLbhopt6JpmrtGoLnLJT8,1161
10
10
  vellum_cli/pull.py,sha256=udYyPlJ6VKDdh78rApNJOZgxHl82fcV6iGnRPSdX1LY,14750
11
11
  vellum_cli/push.py,sha256=ibAaf6zO41Qrgfholl18bCq8sWVYd2PDCiQsPkEYAFw,10296
12
12
  vellum_cli/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
- vellum_cli/tests/conftest.py,sha256=AFYZryKA2qnUuCPBxBKmHLFoPiE0WhBFFej9tNwSHdc,1526
13
+ vellum_cli/tests/conftest.py,sha256=wx3PlJjVB0HRf5dr2b_idOIw27WPPl0J0FNbhIJJaVk,1689
14
14
  vellum_cli/tests/test_config.py,sha256=uvKGDc8BoVyT9_H0Z-g8469zVxomn6Oi3Zj-vK7O_wU,2631
15
- vellum_cli/tests/test_image_push.py,sha256=xXZjI-CDp166cmk5P1NTa-aogergeIbcdfK4_KBsoHc,9095
15
+ vellum_cli/tests/test_image_push.py,sha256=qrlSZVMkPYc0ieqYaY2HCI8bBzwwL_Sx1G2nLb9_pSo,9199
16
16
  vellum_cli/tests/test_image_push_error_handling.py,sha256=_Wjfkn1orI2K4Ahzqz4u8T13or7NOX01K4BtcTuTIOM,7107
17
17
  vellum_cli/tests/test_init.py,sha256=8UOc_ThfouR4ja5cCl_URuLk7ohr9JXfCnG4yka1OUQ,18754
18
18
  vellum_cli/tests/test_main.py,sha256=qDZG-aQauPwBwM6A2DIu1494n47v3pL28XakTbLGZ-k,272
19
- vellum_cli/tests/test_ping.py,sha256=3ucVRThEmTadlV9LrJdCCrr1Ofj3rOjG6ue0BNR2UC0,2523
19
+ vellum_cli/tests/test_ping.py,sha256=178EJHxPZtnnPMNXXynsQt8DIFhsrdc2bL17_YsG17M,2580
20
20
  vellum_cli/tests/test_pull.py,sha256=hxMbW_j0weDDrkzVGpvLpFcwNQdn-fxTv4wBHeYizzc,49904
21
- vellum_cli/tests/test_push.py,sha256=uyWknZ1CBkxxfKVdwcyv7fkGSxvvSNRdJegp5cV-_V4,35167
21
+ vellum_cli/tests/test_push.py,sha256=rasQsxMYsSKgi_iaYz5cBD2ZBHusxhG9FAPx1Tn7mE4,35382
22
22
  vellum_ee/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
23
  vellum_ee/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  vellum_ee/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -144,7 +144,7 @@ vellum/client/README.md,sha256=CuGUYnaE0Imt0KqQ4sIPaUghCjLHkF3DdEvZWu14-8s,4807
144
144
  vellum/client/__init__.py,sha256=AYopGv2ZRVn3zsU8_km6KOvEHDbXiTPCVuYVI7bWvdA,120166
145
145
  vellum/client/core/__init__.py,sha256=SQ85PF84B9MuKnBwHNHWemSGuy-g_515gFYNFhvEE0I,1438
146
146
  vellum/client/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
147
- vellum/client/core/client_wrapper.py,sha256=KPv6Bi7gbvtP_1ZhF5Do4HCJqNkUHW0nNYBEtS1ZDeY,1869
147
+ vellum/client/core/client_wrapper.py,sha256=k8n4pGAeg-w1LRqjPGuiXrSk0gbuNUUd8vHKs8aA6Lg,1869
148
148
  vellum/client/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
149
149
  vellum/client/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
150
150
  vellum/client/core/http_client.py,sha256=Z77OIxIbL4OAB2IDqjRq_sYa5yNYAWfmdhdCSSvh6Y4,19552
@@ -160,7 +160,7 @@ vellum/client/errors/bad_request_error.py,sha256=_EbO8mWqN9kFZPvIap8qa1lL_EWkRcs
160
160
  vellum/client/errors/forbidden_error.py,sha256=QO1kKlhClAPES6zsEK7g9pglWnxn3KWaOCAawWOg6Aw,263
161
161
  vellum/client/errors/internal_server_error.py,sha256=8USCagXyJJ1MOm9snpcXIUt6eNXvrd_aq7Gfcu1vlOI,268
162
162
  vellum/client/errors/not_found_error.py,sha256=tBVCeBC8n3C811WHRj_n-hs3h8MqwR5gp0vLiobk7W8,262
163
- vellum/client/reference.md,sha256=I-z_aZGJKDQh443ywv92ezeI9w_XsiLh-vHULu8RsDg,91011
163
+ vellum/client/reference.md,sha256=AG_zpL_pTZNHF9fEU-3I74QxAfCfCAsUrOXizL2PWig,91317
164
164
  vellum/client/resources/__init__.py,sha256=XgQao4rJxyYu71j64RFIsshz4op9GE8-i-C5GCv-KVE,1555
165
165
  vellum/client/resources/ad_hoc/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
166
166
  vellum/client/resources/ad_hoc/client.py,sha256=rtpiGR6j8CcXSnN6UW_jYwLLdfJ9dwkTm_nta9oRzno,25933
@@ -188,7 +188,7 @@ vellum/client/resources/ml_models/client.py,sha256=XIYapTEY6GRNr7V0Kjy5bEeKmrhv9
188
188
  vellum/client/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
189
189
  vellum/client/resources/organizations/client.py,sha256=Uye92moqjAcOCs4astmuFpT92QdC5SLMunA-C8_G-gA,3675
190
190
  vellum/client/resources/prompts/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
191
- vellum/client/resources/prompts/client.py,sha256=Z9Q9zvoCI8onkEbGSr5xVpNzzstV7xU9MmTwBnNoX98,14222
191
+ vellum/client/resources/prompts/client.py,sha256=Kr_AZdPQUSesk_JtjHl6c7vIWp7TG2PcKC74NrpW6rQ,15060
192
192
  vellum/client/resources/release_reviews/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
193
193
  vellum/client/resources/release_reviews/client.py,sha256=nb-EC7c7Y0Rklvg6CnlUKO1EWrnK26osnYJ9Z5Yw9fA,5094
194
194
  vellum/client/resources/sandboxes/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
@@ -1507,9 +1507,9 @@ vellum/workflows/__init__.py,sha256=CssPsbNvN6rDhoLuqpEv7MMKGa51vE6dvAh6U31Pcio,
1507
1507
  vellum/workflows/constants.py,sha256=2yg4_uo5gpqViy3ZLSwfC8qTybleYCtOnhA4Rj6bacM,1310
1508
1508
  vellum/workflows/context.py,sha256=jvMuyeRluay8BQa7GX1TqUlmoHLCycAVYKkp87sfXSo,1644
1509
1509
  vellum/workflows/descriptors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1510
- vellum/workflows/descriptors/base.py,sha256=bvF3MWsc4Xyw5Z2s1A0fbsfMCebIbPYcGvbQ9uoa_Pg,14655
1510
+ vellum/workflows/descriptors/base.py,sha256=X47a4TClHknsnjs53DkiXnop_5uLGVor487oxhHuGo4,14902
1511
1511
  vellum/workflows/descriptors/exceptions.py,sha256=gUy4UD9JFUKSeQnQpeuDSLiRqWjWiIsxLahB7p_q3JY,54
1512
- vellum/workflows/descriptors/tests/test_utils.py,sha256=xoojJMyG5WLG9xGtmUjirz3lDFCcDsAcxjrtbdG8dNE,6060
1512
+ vellum/workflows/descriptors/tests/test_utils.py,sha256=HJ5DoRz0sJvViGxyZ_FtytZjxN2J8xTkGtaVwCy6Q90,6928
1513
1513
  vellum/workflows/descriptors/utils.py,sha256=gmVXJjf2yWmvlYey41J2FZHeSou0JuCHKb3826K_Jok,3838
1514
1514
  vellum/workflows/edges/__init__.py,sha256=wSkmAnz9xyi4vZwtDbKxwlplt2skD7n3NsxkvR_pUus,50
1515
1515
  vellum/workflows/edges/edge.py,sha256=N0SnY3gKVuxImPAdCbPMPlHJIXbkQ3fwq_LbJRvVMFc,677
@@ -1544,6 +1544,7 @@ vellum/workflows/expressions/greater_than.py,sha256=1sbUH6Obf-VoBgs7ilIncwYBHYfX
1544
1544
  vellum/workflows/expressions/greater_than_or_equal_to.py,sha256=tsD5ZalB4SlryvEsvVtDkSr5Z13B2pABmHB8oHD8ojs,1276
1545
1545
  vellum/workflows/expressions/in_.py,sha256=RgiAIFntXGN4eWoOVBj1gqLymnBxSiw5hYD3TngF3dk,1123
1546
1546
  vellum/workflows/expressions/is_blank.py,sha256=vOOmK5poXmiNRVH7MR0feIFnL4rwKn7vmmTkJ9TcfVU,904
1547
+ vellum/workflows/expressions/is_error.py,sha256=YkgISyxo20Oko53rxkLeVLZx7HFXRk6mJpAzgXeZPRg,809
1547
1548
  vellum/workflows/expressions/is_nil.py,sha256=xCHwhKlm2UnfC-bVedmGgENCrzNtcn4ZeCYwNflVWbU,748
1548
1549
  vellum/workflows/expressions/is_not_blank.py,sha256=GJNTe8TKIbh4RwWPFuPwEQw0hbxg2MobHg8bcal4xWU,911
1549
1550
  vellum/workflows/expressions/is_not_nil.py,sha256=sVNWq_7GKExujpCB_bXEmRxm1tnj0GRDbFY4BtTV1Ew,769
@@ -1572,7 +1573,7 @@ vellum/workflows/inputs/tests/test_inputs.py,sha256=lioA8917mFLYq7Ml69UNkqUjcWbb
1572
1573
  vellum/workflows/logging.py,sha256=_a217XogktV4Ncz6xKFz7WfYmZAzkfVRVuC0rWob8ls,437
1573
1574
  vellum/workflows/nodes/__init__.py,sha256=aVdQVv7Y3Ro3JlqXGpxwaU2zrI06plDHD2aumH5WUIs,1157
1574
1575
  vellum/workflows/nodes/bases/__init__.py,sha256=cniHuz_RXdJ4TQgD8CBzoiKDiPxg62ErdVpCbWICX64,58
1575
- vellum/workflows/nodes/bases/base.py,sha256=FHZ5_pzN9NJ5Vpj1uo2QP-BzxCtVCUvcDo-taoqmasw,21095
1576
+ vellum/workflows/nodes/bases/base.py,sha256=OLnt140jfqRHxg4xyzdiBd3TciqNPfuNNH2iXCpv7a4,20422
1576
1577
  vellum/workflows/nodes/bases/base_adornment_node.py,sha256=hrgzuTetM4ynPd9YGHoK8Vwwn4XITi3aZZ_OCnQrq4Y,3433
1577
1578
  vellum/workflows/nodes/bases/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1578
1579
  vellum/workflows/nodes/bases/tests/test_base_adornment_node.py,sha256=fXZI9KqpS4XMBrBnIEkK3foHaBVvyHwYcQWWDKay7ic,1148
@@ -1587,7 +1588,7 @@ vellum/workflows/nodes/core/inline_subworkflow_node/tests/test_node.py,sha256=kU
1587
1588
  vellum/workflows/nodes/core/map_node/__init__.py,sha256=MXpZYmGfhsMJHqqlpd64WiJRtbAtAMQz-_3fCU_cLV0,56
1588
1589
  vellum/workflows/nodes/core/map_node/node.py,sha256=rbF7fLAU0vUDEpgtWqeQTZFlhWOhJw38tgxWJ6exud8,9313
1589
1590
  vellum/workflows/nodes/core/map_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1590
- vellum/workflows/nodes/core/map_node/tests/test_node.py,sha256=rf7CCDtjHxoPKeEtm9a8v_MNvkvu5UThH4xRXYrdEl8,6904
1591
+ vellum/workflows/nodes/core/map_node/tests/test_node.py,sha256=v80IwAZl3w6WVhMXMV-4fGnwre2_S1Z6zL5HoYZrTz8,8787
1591
1592
  vellum/workflows/nodes/core/retry_node/__init__.py,sha256=lN2bIy5a3Uzhs_FYCrooADyYU6ZGShtvLKFWpelwPvo,60
1592
1593
  vellum/workflows/nodes/core/retry_node/node.py,sha256=EM4ya8Myr7ADllpjt9q-BAhB3hGrsF8MLZhp5eh4lyo,5590
1593
1594
  vellum/workflows/nodes/core/retry_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -1672,15 +1673,15 @@ vellum/workflows/nodes/experimental/tool_calling_node/__init__.py,sha256=S7OzT3I
1672
1673
  vellum/workflows/nodes/experimental/tool_calling_node/node.py,sha256=jwL1sbitmm1CpTOAEI0IIuc6VRr8d7yxUpS4Y5s9Bk8,5966
1673
1674
  vellum/workflows/nodes/experimental/tool_calling_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1674
1675
  vellum/workflows/nodes/experimental/tool_calling_node/tests/test_node.py,sha256=XK1H_QAT_nVFmFP442RYyPvpTfSgtU6kSGu3-OQPBNU,5072
1675
- vellum/workflows/nodes/experimental/tool_calling_node/tests/test_utils.py,sha256=sF4ZfGK2uQNBVXC9yrnbFVzKIM-HWvXO1wak160MUTE,1386
1676
- vellum/workflows/nodes/experimental/tool_calling_node/utils.py,sha256=ut3cdC7ezbxSvjMqHP0Km9yAVqOYoRztZVHShiq6zhk,12579
1676
+ vellum/workflows/nodes/experimental/tool_calling_node/tests/test_utils.py,sha256=-g90SdXscuikF7JP0lFGvSvPc8jl2vBuHwBeiYJIiXk,1719
1677
+ vellum/workflows/nodes/experimental/tool_calling_node/utils.py,sha256=U5JjMXg_NV_5XavydhOhsS4_EUoQ42KlEVBe_JiSR4g,12683
1677
1678
  vellum/workflows/nodes/mocks.py,sha256=a1FjWEIocseMfjzM-i8DNozpUsaW0IONRpZmXBoWlyc,10455
1678
1679
  vellum/workflows/nodes/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1679
1680
  vellum/workflows/nodes/tests/test_mocks.py,sha256=mfPvrs75PKcsNsbJLQAN6PDFoVqs9TmQxpdyFKDdO60,7837
1680
1681
  vellum/workflows/nodes/tests/test_utils.py,sha256=OPVZo9yi8qt0rVqayKhfgh4Hk-dVdIzqfbS89fDhRiE,4913
1681
1682
  vellum/workflows/nodes/utils.py,sha256=K2gf05eM-EKkKHf2SPpvEly8cBL4RftWSMvIZJIMlso,9455
1682
1683
  vellum/workflows/outputs/__init__.py,sha256=AyZ4pRh_ACQIGvkf0byJO46EDnSix1ZCAXfvh-ms1QE,94
1683
- vellum/workflows/outputs/base.py,sha256=1OGHqBJVk7i8cW8uewFWOhIjuMlRRpzCDrGE30ZwDjw,8763
1684
+ vellum/workflows/outputs/base.py,sha256=PUn0zhGzYCSZL34JXtXg9zALlXS_cqxZldLilPxDzb8,9614
1684
1685
  vellum/workflows/ports/__init__.py,sha256=bZuMt-R7z5bKwpu4uPW7LlJeePOQWmCcDSXe5frUY5g,101
1685
1686
  vellum/workflows/ports/node_ports.py,sha256=2Uo9gwNVCuH86J-GXcpc95QSDh5I-XVvhHJCMSWe-S8,2825
1686
1687
  vellum/workflows/ports/port.py,sha256=j_qiZlpx-a1cK5E7sxXwPcb_9NS-KUM-JoP8mgqg32k,4073
@@ -1716,14 +1717,15 @@ vellum/workflows/tests/test_undefined.py,sha256=zMCVliCXVNLrlC6hEGyOWDnQADJ2g83y
1716
1717
  vellum/workflows/types/__init__.py,sha256=KxUTMBGzuRCfiMqzzsykOeVvrrkaZmTTo1a7SLu8gRM,68
1717
1718
  vellum/workflows/types/code_execution_node_wrappers.py,sha256=3MNIoFZKzVzNS5qFLVuDwMV17QJw72zo7NRf52yMq5A,3074
1718
1719
  vellum/workflows/types/core.py,sha256=iLJkMKf417kjwRncWdT_qsfJ-qBv5x58um7SfrydJbs,1266
1719
- vellum/workflows/types/definition.py,sha256=guuCHZkto8bkknoMcjfXRhSaDuiNjx3SNkutPf1makc,2506
1720
- vellum/workflows/types/generics.py,sha256=tKXz0LwWJGKw1YGudyl9_yFDrRgU6yYV1yJV1Zv-LTw,1430
1720
+ vellum/workflows/types/definition.py,sha256=WSTi7DfwgIUMaHaNl0jV_9thw_wsLbzt5WRElJebaHw,3172
1721
+ vellum/workflows/types/generics.py,sha256=8jptbEx1fnJV0Lhj0MpCJOT6yNiEWeTOYOwrEAb5CRU,1576
1721
1722
  vellum/workflows/types/stack.py,sha256=h7NE0vXR7l9DevFBIzIAk1Zh59K-kECQtDTKOUunwMY,1314
1722
1723
  vellum/workflows/types/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1724
+ vellum/workflows/types/tests/test_definition.py,sha256=5wh_WEnE51epkoo-4PE-JbPlg8OGJUNlaBVWa9TcNSw,993
1723
1725
  vellum/workflows/types/tests/test_utils.py,sha256=UnZog59tR577mVwqZRqqWn2fScoOU1H6up0EzS8zYhw,2536
1724
1726
  vellum/workflows/types/utils.py,sha256=axxHbPLsnjhEOnMZrc5YarFd-P2bnsacBDQGNCvY8OY,6367
1725
1727
  vellum/workflows/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1726
- vellum/workflows/utils/functions.py,sha256=W4t-IXvCoIn_oVY_wEkph1xjpd_xpGzBwExWqgJXeho,7168
1728
+ vellum/workflows/utils/functions.py,sha256=ZN0rrIBF4R_KNt1CbRPVNGR36xEMUa1T7FkgZioou-Y,7185
1727
1729
  vellum/workflows/utils/names.py,sha256=QLUqfJ1tmSEeUwBKTTiv_Qk3QGbInC2RSmlXfGXc8Wo,380
1728
1730
  vellum/workflows/utils/pydantic_schema.py,sha256=eR_bBtY-T0pttJP-ARwagSdCOnwPUtiT3cegm2lzDTQ,1310
1729
1731
  vellum/workflows/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -1740,8 +1742,8 @@ vellum/workflows/workflows/event_filters.py,sha256=GSxIgwrX26a1Smfd-6yss2abGCnad
1740
1742
  vellum/workflows/workflows/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1741
1743
  vellum/workflows/workflows/tests/test_base_workflow.py,sha256=fROqff6AZpCIzaSwOKSdtYy4XR0UZQ6ejxL3RJOSJVs,20447
1742
1744
  vellum/workflows/workflows/tests/test_context.py,sha256=VJBUcyWVtMa_lE5KxdhgMu0WYNYnUQUDvTF7qm89hJ0,2333
1743
- vellum_ai-0.14.74.dist-info/LICENSE,sha256=hOypcdt481qGNISA784bnAGWAE6tyIf9gc2E78mYC3E,1574
1744
- vellum_ai-0.14.74.dist-info/METADATA,sha256=iFE4Twp7A5973aqOr498hlOQZ6LSnVAszYURwtB9VkI,5556
1745
- vellum_ai-0.14.74.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
1746
- vellum_ai-0.14.74.dist-info/entry_points.txt,sha256=HCH4yc_V3J_nDv3qJzZ_nYS8llCHZViCDP1ejgCc5Ak,42
1747
- vellum_ai-0.14.74.dist-info/RECORD,,
1745
+ vellum_ai-0.14.76.dist-info/LICENSE,sha256=hOypcdt481qGNISA784bnAGWAE6tyIf9gc2E78mYC3E,1574
1746
+ vellum_ai-0.14.76.dist-info/METADATA,sha256=nFVcjTVyBzDC1adJnQXZ6pqbwmtQ9gLYgHTlB5mSyik,5556
1747
+ vellum_ai-0.14.76.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
1748
+ vellum_ai-0.14.76.dist-info/entry_points.txt,sha256=HCH4yc_V3J_nDv3qJzZ_nYS8llCHZViCDP1ejgCc5Ak,42
1749
+ vellum_ai-0.14.76.dist-info/RECORD,,
@@ -58,3 +58,9 @@ def mock_module(request) -> Generator[MockModuleResult, None, None]:
58
58
 
59
59
  os.chdir(current_dir)
60
60
  shutil.rmtree(temp_dir)
61
+
62
+
63
+ @pytest.fixture
64
+ def info_log_level(monkeypatch):
65
+ """Set log level to INFO for tests that request this fixture"""
66
+ monkeypatch.setenv("LOG_LEVEL", "INFO")
@@ -37,7 +37,7 @@ def mock_subprocess_run(mocker):
37
37
  return mocker.patch("subprocess.run")
38
38
 
39
39
 
40
- @pytest.mark.usefixtures("vellum_client")
40
+ @pytest.mark.usefixtures("vellum_client", "info_log_level")
41
41
  def test_image_push__self_hosted_happy_path(mock_docker_from_env, mock_subprocess_run, monkeypatch):
42
42
  # GIVEN a self hosted vellum api URL env var
43
43
  monkeypatch.setenv("VELLUM_API_URL", "mycompany.api.com")
@@ -66,6 +66,7 @@ def test_image_push__self_hosted_happy_path(mock_docker_from_env, mock_subproces
66
66
  assert "Image successfully pushed" in result.output
67
67
 
68
68
 
69
+ @pytest.mark.usefixtures("info_log_level")
69
70
  def test_image_push__self_hosted_happy_path__workspace_option(
70
71
  mock_docker_from_env, mock_subprocess_run, mock_httpx_transport, mock_temp_dir
71
72
  ):
@@ -172,6 +173,7 @@ def test_image_push__self_hosted_blocks_repo(mock_docker_from_env, monkeypatch):
172
173
  assert "For adding images to your self hosted install you must include" in result.output
173
174
 
174
175
 
176
+ @pytest.mark.usefixtures("info_log_level")
175
177
  def test_image_push_with_source_success(
176
178
  mock_docker_from_env, mock_subprocess_run, vellum_client, monkeypatch, mock_temp_dir
177
179
  ):
@@ -1,3 +1,4 @@
1
+ import pytest
1
2
  from datetime import datetime
2
3
 
3
4
  from click.testing import CliRunner
@@ -8,6 +9,7 @@ from vellum.client.types.workspace_read import WorkspaceRead
8
9
  from vellum_cli import main as cli_main
9
10
 
10
11
 
12
+ @pytest.mark.usefixtures("info_log_level")
11
13
  def test_ping__happy_path(vellum_client):
12
14
  # GIVEN a cli
13
15
  runner = CliRunner()
@@ -140,6 +140,7 @@ def test_push__happy_path(mock_module, vellum_client, base_command):
140
140
  assert extracted_files["workflow.py"] == workflow_py_file_content
141
141
 
142
142
 
143
+ @pytest.mark.usefixtures("info_log_level")
143
144
  def test_push__verify_default_url_in_raw_httpx_transport(mock_module, mock_httpx_transport):
144
145
  # GIVEN a single workflow configured
145
146
  module = mock_module.module
@@ -396,6 +397,7 @@ def test_push__deployment(mock_module, vellum_client, base_command):
396
397
  assert extracted_files["workflow.py"] == workflow_py_file_content
397
398
 
398
399
 
400
+ @pytest.mark.usefixtures("info_log_level")
399
401
  def test_push__dry_run_option_returns_report(mock_module, vellum_client):
400
402
  # GIVEN a single workflow configured
401
403
  temp_dir = mock_module.temp_dir
@@ -459,6 +461,7 @@ class ExampleWorkflow(BaseWorkflow):
459
461
  assert "iterable_item_added" in result.output
460
462
 
461
463
 
464
+ @pytest.mark.usefixtures("info_log_level")
462
465
  def test_push__dry_run_option_no_errors_returns_success(mock_module, vellum_client):
463
466
  """Test that dry-run returns exit code 0 when there are no errors or diffs"""
464
467
  # GIVEN a workflow module with a valid workflow (using the same pattern as happy path test)
@@ -490,6 +493,7 @@ def test_push__dry_run_option_no_errors_returns_success(mock_module, vellum_clie
490
493
  assert "## Proposed Diffs" in result.output
491
494
 
492
495
 
496
+ @pytest.mark.usefixtures("info_log_level")
493
497
  def test_push__strict_option_returns_diffs(mock_module, vellum_client):
494
498
  # GIVEN a single workflow configured
495
499
  temp_dir = mock_module.temp_dir
@@ -733,6 +737,7 @@ MY_OTHER_VELLUM_API_KEY=aaabbbcccddd
733
737
  }
734
738
 
735
739
 
740
+ @pytest.mark.usefixtures("info_log_level")
736
741
  def test_push__workspace_option__uses_different_api_url_env(mock_module, mock_httpx_transport):
737
742
  # GIVEN a single workflow configured
738
743
  temp_dir = mock_module.temp_dir