vellum-ai 0.14.42__py3-none-any.whl → 0.14.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
vellum/__init__.py CHANGED
@@ -310,6 +310,7 @@ from .types import (
310
310
  PromptNodeResultData,
311
311
  PromptOutput,
312
312
  PromptParameters,
313
+ PromptPushResponse,
313
314
  PromptRequestChatHistoryInput,
314
315
  PromptRequestInput,
315
316
  PromptRequestJsonInput,
@@ -939,6 +940,7 @@ __all__ = [
939
940
  "PromptNodeResultData",
940
941
  "PromptOutput",
941
942
  "PromptParameters",
943
+ "PromptPushResponse",
942
944
  "PromptRequestChatHistoryInput",
943
945
  "PromptRequestInput",
944
946
  "PromptRequestJsonInput",
@@ -18,9 +18,9 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.14.42",
21
+ "X-Fern-SDK-Version": "0.14.43",
22
22
  }
23
- headers["X_API_KEY"] = self.api_key
23
+ headers["X-API-KEY"] = self.api_key
24
24
  return headers
25
25
 
26
26
  def get_environment(self) -> VellumEnvironment:
@@ -4266,6 +4266,113 @@ client.prompts.pull(
4266
4266
  </dl>
4267
4267
 
4268
4268
 
4269
+ </dd>
4270
+ </dl>
4271
+ </details>
4272
+
4273
+ <details><summary><code>client.prompts.<a href="src/vellum/resources/prompts/client.py">push</a>(...)</code></summary>
4274
+ <dl>
4275
+ <dd>
4276
+
4277
+ #### 📝 Description
4278
+
4279
+ <dl>
4280
+ <dd>
4281
+
4282
+ <dl>
4283
+ <dd>
4284
+
4285
+ Used to push updates to a Prompt in Vellum.
4286
+ </dd>
4287
+ </dl>
4288
+ </dd>
4289
+ </dl>
4290
+
4291
+ #### 🔌 Usage
4292
+
4293
+ <dl>
4294
+ <dd>
4295
+
4296
+ <dl>
4297
+ <dd>
4298
+
4299
+ ```python
4300
+ from vellum import (
4301
+ JinjaPromptBlock,
4302
+ PromptExecConfig,
4303
+ PromptParameters,
4304
+ Vellum,
4305
+ VellumVariable,
4306
+ )
4307
+
4308
+ client = Vellum(
4309
+ api_key="YOUR_API_KEY",
4310
+ )
4311
+ client.prompts.push(
4312
+ exec_config=PromptExecConfig(
4313
+ ml_model="ml_model",
4314
+ input_variables=[
4315
+ VellumVariable(
4316
+ id="id",
4317
+ key="key",
4318
+ type="STRING",
4319
+ )
4320
+ ],
4321
+ parameters=PromptParameters(),
4322
+ blocks=[
4323
+ JinjaPromptBlock(
4324
+ template="template",
4325
+ )
4326
+ ],
4327
+ ),
4328
+ )
4329
+
4330
+ ```
4331
+ </dd>
4332
+ </dl>
4333
+ </dd>
4334
+ </dl>
4335
+
4336
+ #### ⚙️ Parameters
4337
+
4338
+ <dl>
4339
+ <dd>
4340
+
4341
+ <dl>
4342
+ <dd>
4343
+
4344
+ **exec_config:** `PromptExecConfig`
4345
+
4346
+ </dd>
4347
+ </dl>
4348
+
4349
+ <dl>
4350
+ <dd>
4351
+
4352
+ **prompt_variant_id:** `typing.Optional[str]`
4353
+
4354
+ </dd>
4355
+ </dl>
4356
+
4357
+ <dl>
4358
+ <dd>
4359
+
4360
+ **prompt_sandbox_id:** `typing.Optional[str]`
4361
+
4362
+ </dd>
4363
+ </dl>
4364
+
4365
+ <dl>
4366
+ <dd>
4367
+
4368
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
4369
+
4370
+ </dd>
4371
+ </dl>
4372
+ </dd>
4373
+ </dl>
4374
+
4375
+
4269
4376
  </dd>
4270
4377
  </dl>
4271
4378
  </details>
@@ -1,7 +1,7 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from ...core.client_wrapper import SyncClientWrapper
4
3
  import typing
4
+ from ...core.client_wrapper import SyncClientWrapper
5
5
  from ...core.request_options import RequestOptions
6
6
  from ...types.prompt_exec_config import PromptExecConfig
7
7
  from ...core.jsonable_encoder import jsonable_encoder
@@ -10,8 +10,13 @@ from ...errors.bad_request_error import BadRequestError
10
10
  from ...errors.not_found_error import NotFoundError
11
11
  from json.decoder import JSONDecodeError
12
12
  from ...core.api_error import ApiError
13
+ from ...types.prompt_push_response import PromptPushResponse
14
+ from ...core.serialization import convert_and_respect_annotation_metadata
13
15
  from ...core.client_wrapper import AsyncClientWrapper
14
16
 
17
+ # this is used as the default value for optional parameters
18
+ OMIT = typing.cast(typing.Any, ...)
19
+
15
20
 
16
21
  class PromptsClient:
17
22
  def __init__(self, *, client_wrapper: SyncClientWrapper):
@@ -100,6 +105,113 @@ class PromptsClient:
100
105
  raise ApiError(status_code=_response.status_code, body=_response.text)
101
106
  raise ApiError(status_code=_response.status_code, body=_response_json)
102
107
 
108
+ def push(
109
+ self,
110
+ *,
111
+ exec_config: PromptExecConfig,
112
+ prompt_variant_id: typing.Optional[str] = OMIT,
113
+ prompt_sandbox_id: typing.Optional[str] = OMIT,
114
+ request_options: typing.Optional[RequestOptions] = None,
115
+ ) -> PromptPushResponse:
116
+ """
117
+ Used to push updates to a Prompt in Vellum.
118
+
119
+ Parameters
120
+ ----------
121
+ exec_config : PromptExecConfig
122
+
123
+ prompt_variant_id : typing.Optional[str]
124
+
125
+ prompt_sandbox_id : typing.Optional[str]
126
+
127
+ request_options : typing.Optional[RequestOptions]
128
+ Request-specific configuration.
129
+
130
+ Returns
131
+ -------
132
+ PromptPushResponse
133
+
134
+
135
+ Examples
136
+ --------
137
+ from vellum import (
138
+ JinjaPromptBlock,
139
+ PromptExecConfig,
140
+ PromptParameters,
141
+ Vellum,
142
+ VellumVariable,
143
+ )
144
+
145
+ client = Vellum(
146
+ api_key="YOUR_API_KEY",
147
+ )
148
+ client.prompts.push(
149
+ exec_config=PromptExecConfig(
150
+ ml_model="ml_model",
151
+ input_variables=[
152
+ VellumVariable(
153
+ id="id",
154
+ key="key",
155
+ type="STRING",
156
+ )
157
+ ],
158
+ parameters=PromptParameters(),
159
+ blocks=[
160
+ JinjaPromptBlock(
161
+ template="template",
162
+ )
163
+ ],
164
+ ),
165
+ )
166
+ """
167
+ _response = self._client_wrapper.httpx_client.request(
168
+ "v1/prompts/push",
169
+ base_url=self._client_wrapper.get_environment().default,
170
+ method="POST",
171
+ json={
172
+ "exec_config": convert_and_respect_annotation_metadata(
173
+ object_=exec_config, annotation=PromptExecConfig, direction="write"
174
+ ),
175
+ "prompt_variant_id": prompt_variant_id,
176
+ "prompt_sandbox_id": prompt_sandbox_id,
177
+ },
178
+ request_options=request_options,
179
+ omit=OMIT,
180
+ )
181
+ try:
182
+ if 200 <= _response.status_code < 300:
183
+ return typing.cast(
184
+ PromptPushResponse,
185
+ parse_obj_as(
186
+ type_=PromptPushResponse, # type: ignore
187
+ object_=_response.json(),
188
+ ),
189
+ )
190
+ if _response.status_code == 400:
191
+ raise BadRequestError(
192
+ typing.cast(
193
+ typing.Optional[typing.Any],
194
+ parse_obj_as(
195
+ type_=typing.Optional[typing.Any], # type: ignore
196
+ object_=_response.json(),
197
+ ),
198
+ )
199
+ )
200
+ if _response.status_code == 404:
201
+ raise NotFoundError(
202
+ typing.cast(
203
+ typing.Optional[typing.Any],
204
+ parse_obj_as(
205
+ type_=typing.Optional[typing.Any], # type: ignore
206
+ object_=_response.json(),
207
+ ),
208
+ )
209
+ )
210
+ _response_json = _response.json()
211
+ except JSONDecodeError:
212
+ raise ApiError(status_code=_response.status_code, body=_response.text)
213
+ raise ApiError(status_code=_response.status_code, body=_response_json)
214
+
103
215
 
104
216
  class AsyncPromptsClient:
105
217
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -195,3 +307,118 @@ class AsyncPromptsClient:
195
307
  except JSONDecodeError:
196
308
  raise ApiError(status_code=_response.status_code, body=_response.text)
197
309
  raise ApiError(status_code=_response.status_code, body=_response_json)
310
+
311
+ async def push(
312
+ self,
313
+ *,
314
+ exec_config: PromptExecConfig,
315
+ prompt_variant_id: typing.Optional[str] = OMIT,
316
+ prompt_sandbox_id: typing.Optional[str] = OMIT,
317
+ request_options: typing.Optional[RequestOptions] = None,
318
+ ) -> PromptPushResponse:
319
+ """
320
+ Used to push updates to a Prompt in Vellum.
321
+
322
+ Parameters
323
+ ----------
324
+ exec_config : PromptExecConfig
325
+
326
+ prompt_variant_id : typing.Optional[str]
327
+
328
+ prompt_sandbox_id : typing.Optional[str]
329
+
330
+ request_options : typing.Optional[RequestOptions]
331
+ Request-specific configuration.
332
+
333
+ Returns
334
+ -------
335
+ PromptPushResponse
336
+
337
+
338
+ Examples
339
+ --------
340
+ import asyncio
341
+
342
+ from vellum import (
343
+ AsyncVellum,
344
+ JinjaPromptBlock,
345
+ PromptExecConfig,
346
+ PromptParameters,
347
+ VellumVariable,
348
+ )
349
+
350
+ client = AsyncVellum(
351
+ api_key="YOUR_API_KEY",
352
+ )
353
+
354
+
355
+ async def main() -> None:
356
+ await client.prompts.push(
357
+ exec_config=PromptExecConfig(
358
+ ml_model="ml_model",
359
+ input_variables=[
360
+ VellumVariable(
361
+ id="id",
362
+ key="key",
363
+ type="STRING",
364
+ )
365
+ ],
366
+ parameters=PromptParameters(),
367
+ blocks=[
368
+ JinjaPromptBlock(
369
+ template="template",
370
+ )
371
+ ],
372
+ ),
373
+ )
374
+
375
+
376
+ asyncio.run(main())
377
+ """
378
+ _response = await self._client_wrapper.httpx_client.request(
379
+ "v1/prompts/push",
380
+ base_url=self._client_wrapper.get_environment().default,
381
+ method="POST",
382
+ json={
383
+ "exec_config": convert_and_respect_annotation_metadata(
384
+ object_=exec_config, annotation=PromptExecConfig, direction="write"
385
+ ),
386
+ "prompt_variant_id": prompt_variant_id,
387
+ "prompt_sandbox_id": prompt_sandbox_id,
388
+ },
389
+ request_options=request_options,
390
+ omit=OMIT,
391
+ )
392
+ try:
393
+ if 200 <= _response.status_code < 300:
394
+ return typing.cast(
395
+ PromptPushResponse,
396
+ parse_obj_as(
397
+ type_=PromptPushResponse, # type: ignore
398
+ object_=_response.json(),
399
+ ),
400
+ )
401
+ if _response.status_code == 400:
402
+ raise BadRequestError(
403
+ typing.cast(
404
+ typing.Optional[typing.Any],
405
+ parse_obj_as(
406
+ type_=typing.Optional[typing.Any], # type: ignore
407
+ object_=_response.json(),
408
+ ),
409
+ )
410
+ )
411
+ if _response.status_code == 404:
412
+ raise NotFoundError(
413
+ typing.cast(
414
+ typing.Optional[typing.Any],
415
+ parse_obj_as(
416
+ type_=typing.Optional[typing.Any], # type: ignore
417
+ object_=_response.json(),
418
+ ),
419
+ )
420
+ )
421
+ _response_json = _response.json()
422
+ except JSONDecodeError:
423
+ raise ApiError(status_code=_response.status_code, body=_response.text)
424
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -318,6 +318,7 @@ from .prompt_node_result import PromptNodeResult
318
318
  from .prompt_node_result_data import PromptNodeResultData
319
319
  from .prompt_output import PromptOutput
320
320
  from .prompt_parameters import PromptParameters
321
+ from .prompt_push_response import PromptPushResponse
321
322
  from .prompt_request_chat_history_input import PromptRequestChatHistoryInput
322
323
  from .prompt_request_input import PromptRequestInput
323
324
  from .prompt_request_json_input import PromptRequestJsonInput
@@ -919,6 +920,7 @@ __all__ = [
919
920
  "PromptNodeResultData",
920
921
  "PromptOutput",
921
922
  "PromptParameters",
923
+ "PromptPushResponse",
922
924
  "PromptRequestChatHistoryInput",
923
925
  "PromptRequestInput",
924
926
  "PromptRequestJsonInput",
@@ -50,9 +50,9 @@ class DeploymentRead(UniversalBaseModel):
50
50
  A human-readable description of the deployment
51
51
  """
52
52
 
53
- active_model_version_ids: typing.List[str] = pydantic.Field()
53
+ active_model_version_ids: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
54
54
  """
55
- Deprecated. The Prompt execution endpoints return a `prompt_version_id` that could be used instead.
55
+ Deprecated. This now always returns a null value.
56
56
  """
57
57
 
58
58
  last_deployed_history_item_id: str = pydantic.Field()
@@ -0,0 +1,20 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.pydantic_utilities import UniversalBaseModel
4
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
5
+ import typing
6
+ import pydantic
7
+
8
+
9
+ class PromptPushResponse(UniversalBaseModel):
10
+ prompt_variant_id: str
11
+ prompt_sandbox_id: str
12
+
13
+ if IS_PYDANTIC_V2:
14
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
15
+ else:
16
+
17
+ class Config:
18
+ frozen = True
19
+ smart_union = True
20
+ extra = pydantic.Extra.allow
@@ -0,0 +1,3 @@
1
+ # WARNING: This file will be removed in a future release. Please import from "vellum.client" instead.
2
+
3
+ from vellum.client.types.prompt_push_response import *
@@ -41,9 +41,6 @@ def is_nested_class(nested: Any, parent: Type) -> bool:
41
41
 
42
42
  class BaseNodeMeta(type):
43
43
  def __new__(mcs, name: str, bases: Tuple[Type, ...], dct: Dict[str, Any]) -> Any:
44
- # TODO: Inherit the inner Output classes from every base class.
45
- # https://app.shortcut.com/vellum/story/4007/support-auto-inheriting-parent-node-outputs
46
-
47
44
  if "Outputs" in dct:
48
45
  outputs_class = dct["Outputs"]
49
46
  if not any(issubclass(base, BaseOutputs) for base in outputs_class.__bases__):
@@ -0,0 +1,31 @@
1
+ from typing import Callable, Type
2
+
3
+ from vellum.workflows.nodes.bases.base import BaseNode
4
+ from vellum.workflows.nodes.bases.base_adornment_node import BaseAdornmentNode
5
+ from vellum.workflows.nodes.utils import create_adornment
6
+
7
+
8
+ def test_base_adornment_node__output_references_of_same_name():
9
+ # GIVEN a custom adornment node
10
+ class CustomAdornmentNode(BaseAdornmentNode):
11
+ @classmethod
12
+ def wrap(cls) -> Callable[..., Type["CustomAdornmentNode"]]:
13
+ return create_adornment(cls)
14
+
15
+ # AND two nodes wrapped by the adornment with the same output
16
+ @CustomAdornmentNode.wrap()
17
+ class AppleNode(BaseNode):
18
+ class Outputs(BaseNode.Outputs):
19
+ fruit: str
20
+
21
+ @CustomAdornmentNode.wrap()
22
+ class OrangeNode(BaseNode):
23
+ class Outputs(BaseNode.Outputs):
24
+ fruit: str
25
+
26
+ # WHEN get output references of these outputs
27
+ apple_output_reference = AppleNode.Outputs.fruit
28
+ orange_output_reference = OrangeNode.Outputs.fruit
29
+
30
+ # THEN the output references should not be equal
31
+ assert apple_output_reference != orange_output_reference, "Output references should not be equal"
@@ -1,6 +1,6 @@
1
1
  import json
2
2
  from uuid import uuid4
3
- from typing import Callable, ClassVar, Generic, Iterator, List, Optional, Set, Tuple, Union
3
+ from typing import Callable, ClassVar, Generator, Generic, Iterator, List, Optional, Set, Tuple, Union
4
4
 
5
5
  from vellum import (
6
6
  AdHocExecutePromptEvent,
@@ -8,6 +8,7 @@ from vellum import (
8
8
  ChatMessage,
9
9
  FunctionDefinition,
10
10
  PromptBlock,
11
+ PromptOutput,
11
12
  PromptParameters,
12
13
  PromptRequestChatHistoryInput,
13
14
  PromptRequestInput,
@@ -15,17 +16,19 @@ from vellum import (
15
16
  PromptRequestStringInput,
16
17
  VellumVariable,
17
18
  )
18
- from vellum.client import RequestOptions
19
+ from vellum.client import ApiError, RequestOptions
19
20
  from vellum.client.types.chat_message_request import ChatMessageRequest
20
21
  from vellum.client.types.prompt_settings import PromptSettings
21
22
  from vellum.client.types.rich_text_child_block import RichTextChildBlock
22
23
  from vellum.workflows.constants import OMIT
23
24
  from vellum.workflows.context import get_execution_context
24
25
  from vellum.workflows.errors import WorkflowErrorCode
26
+ from vellum.workflows.errors.types import vellum_error_to_workflow_error
25
27
  from vellum.workflows.events.types import default_serializer
26
28
  from vellum.workflows.exceptions import NodeException
27
29
  from vellum.workflows.nodes.displayable.bases.base_prompt_node import BasePromptNode
28
30
  from vellum.workflows.nodes.displayable.bases.inline_prompt_node.constants import DEFAULT_PROMPT_PARAMETERS
31
+ from vellum.workflows.outputs import BaseOutput
29
32
  from vellum.workflows.types import MergeBehavior
30
33
  from vellum.workflows.types.generics import StateType
31
34
  from vellum.workflows.utils.functions import compile_function_definition
@@ -103,17 +106,63 @@ class BaseInlinePromptNode(BasePromptNode[StateType], Generic[StateType]):
103
106
  else None
104
107
  )
105
108
 
106
- return self._context.vellum_client.ad_hoc.adhoc_execute_prompt_stream(
107
- ml_model=self.ml_model,
108
- input_values=input_values,
109
- input_variables=input_variables,
110
- parameters=self.parameters,
111
- blocks=self.blocks,
112
- settings=self.settings,
113
- functions=normalized_functions,
114
- expand_meta=self.expand_meta,
115
- request_options=request_options,
116
- )
109
+ if self.settings and not self.settings.stream_enabled:
110
+ # This endpoint is returning a single event, so we need to wrap it in a generator
111
+ # to match the existing interface.
112
+ response = self._context.vellum_client.ad_hoc.adhoc_execute_prompt(
113
+ ml_model=self.ml_model,
114
+ input_values=input_values,
115
+ input_variables=input_variables,
116
+ parameters=self.parameters,
117
+ blocks=self.blocks,
118
+ settings=self.settings,
119
+ functions=normalized_functions,
120
+ expand_meta=self.expand_meta,
121
+ request_options=request_options,
122
+ )
123
+ return iter([response])
124
+ else:
125
+ return self._context.vellum_client.ad_hoc.adhoc_execute_prompt_stream(
126
+ ml_model=self.ml_model,
127
+ input_values=input_values,
128
+ input_variables=input_variables,
129
+ parameters=self.parameters,
130
+ blocks=self.blocks,
131
+ settings=self.settings,
132
+ functions=normalized_functions,
133
+ expand_meta=self.expand_meta,
134
+ request_options=request_options,
135
+ )
136
+
137
+ def _process_prompt_event_stream(self) -> Generator[BaseOutput, None, Optional[List[PromptOutput]]]:
138
+ self._validate()
139
+ try:
140
+ prompt_event_stream = self._get_prompt_event_stream()
141
+ except ApiError as e:
142
+ self._handle_api_error(e)
143
+
144
+ if not self.settings or (self.settings and self.settings.stream_enabled):
145
+ # We don't use the INITIATED event anyway, so we can just skip it
146
+ # and use the exception handling to catch other api level errors
147
+ try:
148
+ next(prompt_event_stream)
149
+ except ApiError as e:
150
+ self._handle_api_error(e)
151
+
152
+ outputs: Optional[List[PromptOutput]] = None
153
+ for event in prompt_event_stream:
154
+ if event.state == "INITIATED":
155
+ continue
156
+ elif event.state == "STREAMING":
157
+ yield BaseOutput(name="results", delta=event.output.value)
158
+ elif event.state == "FULFILLED":
159
+ outputs = event.outputs
160
+ yield BaseOutput(name="results", value=event.outputs)
161
+ elif event.state == "REJECTED":
162
+ workflow_error = vellum_error_to_workflow_error(event.error)
163
+ raise NodeException.of(workflow_error)
164
+
165
+ return outputs
117
166
 
118
167
  def _compile_prompt_inputs(self) -> Tuple[List[VellumVariable], List[PromptRequestInput]]:
119
168
  input_variables: List[VellumVariable] = []
@@ -5,11 +5,14 @@ from uuid import uuid4
5
5
  from typing import Any, Iterator, List
6
6
 
7
7
  from vellum import (
8
+ AdHocExecutePromptEvent,
8
9
  ChatMessagePromptBlock,
10
+ FulfilledAdHocExecutePromptEvent,
9
11
  JinjaPromptBlock,
10
12
  PlainTextPromptBlock,
11
13
  PromptBlock,
12
14
  PromptParameters,
15
+ PromptSettings,
13
16
  RichTextPromptBlock,
14
17
  VariablePromptBlock,
15
18
  )
@@ -296,3 +299,177 @@ def test_inline_prompt_node__json_output(vellum_adhoc_prompt_client):
296
299
  request_options=mock.ANY,
297
300
  settings=None,
298
301
  )
302
+
303
+
304
+ def test_inline_prompt_node__streaming_disabled(vellum_adhoc_prompt_client):
305
+ # GIVEN an InlinePromptNode
306
+ class Inputs(BaseInputs):
307
+ input: str
308
+
309
+ class State(BaseState):
310
+ pass
311
+
312
+ # AND it has streaming disabled
313
+ class MyInlinePromptNode(InlinePromptNode):
314
+ ml_model = "gpt-4o"
315
+ blocks = []
316
+ parameters = PromptParameters(
317
+ stop=[],
318
+ temperature=0.0,
319
+ max_tokens=4096,
320
+ top_p=1.0,
321
+ top_k=0,
322
+ frequency_penalty=0.0,
323
+ presence_penalty=0.0,
324
+ logit_bias=None,
325
+ custom_parameters={},
326
+ )
327
+ settings = PromptSettings(stream_enabled=False)
328
+
329
+ # AND a known response from invoking an inline prompt
330
+ expected_output: list[PromptOutput] = [StringVellumValue(value="Hello, world!")]
331
+
332
+ def generate_prompt_event(*args: Any, **kwargs: Any) -> AdHocExecutePromptEvent:
333
+ execution_id = str(uuid4())
334
+ return FulfilledAdHocExecutePromptEvent(
335
+ execution_id=execution_id,
336
+ outputs=expected_output,
337
+ )
338
+
339
+ vellum_adhoc_prompt_client.adhoc_execute_prompt.side_effect = generate_prompt_event
340
+
341
+ # WHEN the node is run
342
+ node = MyInlinePromptNode()
343
+ outputs = [o for o in node.run()]
344
+
345
+ # THEN the node should have produced the outputs we expect
346
+ result_output = outputs[0]
347
+ assert result_output.name == "results"
348
+ assert result_output.value == expected_output
349
+
350
+ # AND we should have made the expected call to Vellum search
351
+ vellum_adhoc_prompt_client.adhoc_execute_prompt.assert_called_once_with(
352
+ blocks=[],
353
+ expand_meta=Ellipsis,
354
+ functions=None,
355
+ input_values=[],
356
+ input_variables=[],
357
+ ml_model="gpt-4o",
358
+ parameters=PromptParameters(
359
+ stop=[],
360
+ temperature=0.0,
361
+ max_tokens=4096,
362
+ top_p=1.0,
363
+ top_k=0,
364
+ frequency_penalty=0.0,
365
+ presence_penalty=0.0,
366
+ logit_bias=None,
367
+ custom_parameters={},
368
+ ),
369
+ request_options=mock.ANY,
370
+ settings=PromptSettings(stream_enabled=False),
371
+ )
372
+
373
+
374
+ def test_inline_prompt_node__json_output_with_streaming_disabled(vellum_adhoc_prompt_client):
375
+ # GIVEN an InlinePromptNode
376
+ class Inputs(BaseInputs):
377
+ input: str
378
+
379
+ class State(BaseState):
380
+ pass
381
+
382
+ class MyInlinePromptNode(InlinePromptNode):
383
+ ml_model = "gpt-4o"
384
+ blocks = []
385
+ parameters = PromptParameters(
386
+ stop=[],
387
+ temperature=0.0,
388
+ max_tokens=4096,
389
+ top_p=1.0,
390
+ top_k=0,
391
+ frequency_penalty=0.0,
392
+ presence_penalty=0.0,
393
+ logit_bias=None,
394
+ custom_parameters={
395
+ "json_mode": False,
396
+ "json_schema": {
397
+ "name": "get_result",
398
+ "schema": {
399
+ "type": "object",
400
+ "required": ["result"],
401
+ "properties": {"result": {"type": "string", "description": ""}},
402
+ },
403
+ },
404
+ },
405
+ )
406
+ settings = PromptSettings(stream_enabled=False)
407
+
408
+ # AND a known JSON response from invoking an inline prompt
409
+ expected_json = {"result": "Hello, world!"}
410
+ expected_outputs: List[PromptOutput] = [
411
+ StringVellumValue(value=json.dumps(expected_json)),
412
+ ]
413
+
414
+ def generate_prompt_event(*args: Any, **kwargs: Any) -> AdHocExecutePromptEvent:
415
+ execution_id = str(uuid4())
416
+ return FulfilledAdHocExecutePromptEvent(
417
+ execution_id=execution_id,
418
+ outputs=expected_outputs,
419
+ )
420
+
421
+ vellum_adhoc_prompt_client.adhoc_execute_prompt.side_effect = generate_prompt_event
422
+
423
+ # WHEN the node is run
424
+ node = MyInlinePromptNode(
425
+ state=State(
426
+ meta=StateMeta(workflow_inputs=Inputs(input="Generate JSON.")),
427
+ )
428
+ )
429
+ outputs = [o for o in node.run()]
430
+
431
+ # THEN the node should have produced the outputs we expect
432
+ results_output = outputs[0]
433
+ assert results_output.name == "results"
434
+ assert results_output.value == expected_outputs
435
+
436
+ text_output = outputs[1]
437
+ assert text_output.name == "text"
438
+ assert text_output.value == '{"result": "Hello, world!"}'
439
+
440
+ json_output = outputs[2]
441
+ assert json_output.name == "json"
442
+ assert json_output.value == expected_json
443
+
444
+ # AND we should have made the expected call to Vellum search
445
+ vellum_adhoc_prompt_client.adhoc_execute_prompt.assert_called_once_with(
446
+ blocks=[],
447
+ expand_meta=Ellipsis,
448
+ functions=None,
449
+ input_values=[],
450
+ input_variables=[],
451
+ ml_model="gpt-4o",
452
+ parameters=PromptParameters(
453
+ stop=[],
454
+ temperature=0.0,
455
+ max_tokens=4096,
456
+ top_p=1.0,
457
+ top_k=0,
458
+ frequency_penalty=0.0,
459
+ presence_penalty=0.0,
460
+ logit_bias=None,
461
+ custom_parameters={
462
+ "json_mode": False,
463
+ "json_schema": {
464
+ "name": "get_result",
465
+ "schema": {
466
+ "type": "object",
467
+ "required": ["result"],
468
+ "properties": {"result": {"type": "string", "description": ""}},
469
+ },
470
+ },
471
+ },
472
+ ),
473
+ request_options=mock.ANY,
474
+ settings=PromptSettings(stream_enabled=False),
475
+ )
@@ -8,11 +8,7 @@ from vellum.workflows.exceptions import NodeException
8
8
  from vellum.workflows.graph.graph import Graph
9
9
  from vellum.workflows.inputs.base import BaseInputs
10
10
  from vellum.workflows.nodes.bases import BaseNode
11
- from vellum.workflows.nodes.experimental.tool_calling_node.utils import (
12
- ToolRouterNode,
13
- create_function_node,
14
- create_tool_router_node,
15
- )
11
+ from vellum.workflows.nodes.experimental.tool_calling_node.utils import create_function_node, create_tool_router_node
16
12
  from vellum.workflows.outputs.base import BaseOutputs
17
13
  from vellum.workflows.state.base import BaseState
18
14
  from vellum.workflows.state.context import WorkflowContext
@@ -69,7 +65,7 @@ class ToolCallingNode(BaseNode):
69
65
  graph = self._graph
70
66
 
71
67
  class Outputs(BaseWorkflow.Outputs):
72
- text: str = ToolRouterNode.Outputs.text
68
+ text: str = self.tool_router_node.Outputs.text
73
69
  chat_history: List[ChatMessage] = ToolCallingState.chat_history
74
70
 
75
71
  subworkflow = ToolCallingWorkflow(
@@ -107,6 +103,7 @@ class ToolCallingNode(BaseNode):
107
103
  self._function_nodes = {
108
104
  function.__name__: create_function_node(
109
105
  function=function,
106
+ tool_router_node=self.tool_router_node,
110
107
  )
111
108
  for function in self.functions
112
109
  }
@@ -59,8 +59,8 @@ def create_tool_router_node(
59
59
  function_name = function.__name__
60
60
  port_condition = LazyReference(
61
61
  lambda: (
62
- ToolRouterNode.Outputs.results[0]["type"].equals("FUNCTION_CALL")
63
- & ToolRouterNode.Outputs.results[0]["value"]["name"].equals(function_name)
62
+ node.Outputs.results[0]["type"].equals("FUNCTION_CALL")
63
+ & node.Outputs.results[0]["value"]["name"].equals(function_name)
64
64
  )
65
65
  )
66
66
  port = Port.on_if(port_condition)
@@ -78,23 +78,26 @@ def create_tool_router_node(
78
78
  )
79
79
  )
80
80
 
81
- node = type(
82
- "ToolRouterNode",
83
- (ToolRouterNode,),
84
- {
85
- "ml_model": ml_model,
86
- "blocks": blocks,
87
- "functions": functions,
88
- "prompt_inputs": prompt_inputs,
89
- "Ports": Ports,
90
- "__module__": __name__,
91
- },
81
+ node = cast(
82
+ Type[ToolRouterNode],
83
+ type(
84
+ "ToolRouterNode",
85
+ (ToolRouterNode,),
86
+ {
87
+ "ml_model": ml_model,
88
+ "blocks": blocks,
89
+ "functions": functions,
90
+ "prompt_inputs": prompt_inputs,
91
+ "Ports": Ports,
92
+ "__module__": __name__,
93
+ },
94
+ ),
92
95
  )
93
96
 
94
97
  return node
95
98
 
96
99
 
97
- def create_function_node(function: Callable[..., Any]) -> Type[FunctionNode]:
100
+ def create_function_node(function: Callable[..., Any], tool_router_node: Type[ToolRouterNode]) -> Type[FunctionNode]:
98
101
  """
99
102
  Create a FunctionNode class for a given function.
100
103
 
@@ -103,7 +106,7 @@ def create_function_node(function: Callable[..., Any]) -> Type[FunctionNode]:
103
106
 
104
107
  # Create a class-level wrapper that calls the original function
105
108
  def execute_function(self) -> BaseNode.Outputs:
106
- outputs = self.state.meta.node_outputs.get(ToolRouterNode.Outputs.text)
109
+ outputs = self.state.meta.node_outputs.get(tool_router_node.Outputs.text)
107
110
  # first parse into json
108
111
  outputs = json.loads(outputs)
109
112
  arguments = outputs["arguments"]
@@ -78,7 +78,7 @@ class OutputReference(BaseDescriptor[_OutputType], Generic[_OutputType]):
78
78
  def __eq__(self, other: object) -> bool:
79
79
  if not isinstance(other, type(self)):
80
80
  return False
81
- return super().__eq__(other) and self._outputs_class == other._outputs_class
81
+ return super().__eq__(other) and id(self._outputs_class) == id(other._outputs_class)
82
82
 
83
83
  def __hash__(self) -> int:
84
84
  return hash((self._outputs_class, self._name))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-ai
3
- Version: 0.14.42
3
+ Version: 0.14.43
4
4
  Summary:
5
5
  License: MIT
6
6
  Requires-Python: >=3.9,<4.0
@@ -7,7 +7,7 @@ vellum_cli/image_push.py,sha256=8DDvRDJEZ-FukUCqGW1827bg1ybF4xBbx9WyqWYQE-g,6816
7
7
  vellum_cli/init.py,sha256=WpnMXPItPmh0f0bBGIer3p-e5gu8DUGwSArT_FuoMEw,5093
8
8
  vellum_cli/logger.py,sha256=PuRFa0WCh4sAGFS5aqWB0QIYpS6nBWwPJrIXpWxugV4,1022
9
9
  vellum_cli/ping.py,sha256=p_BCCRjgPhng6JktuECtkDQLbhopt6JpmrtGoLnLJT8,1161
10
- vellum_cli/pull.py,sha256=27Mh10aQ8H1OkTmQOJcOuJ5cQcYbNjkkuQrBmtkRe0o,12262
10
+ vellum_cli/pull.py,sha256=2hSJGeqooevMb--mcvRLQ1GYT-9290cI7VdSRifzmTg,12561
11
11
  vellum_cli/push.py,sha256=0bHAhp6B67JUFuiL9sSekrLOw-x2Dp6VsrzhFbOy7e4,9508
12
12
  vellum_cli/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  vellum_cli/tests/conftest.py,sha256=AFYZryKA2qnUuCPBxBKmHLFoPiE0WhBFFej9tNwSHdc,1526
@@ -16,7 +16,7 @@ vellum_cli/tests/test_image_push.py,sha256=i3lJuW8nFRwL1M1OF6752IZYvGAFgKmkB2hd_
16
16
  vellum_cli/tests/test_init.py,sha256=8UOc_ThfouR4ja5cCl_URuLk7ohr9JXfCnG4yka1OUQ,18754
17
17
  vellum_cli/tests/test_main.py,sha256=qDZG-aQauPwBwM6A2DIu1494n47v3pL28XakTbLGZ-k,272
18
18
  vellum_cli/tests/test_ping.py,sha256=3ucVRThEmTadlV9LrJdCCrr1Ofj3rOjG6ue0BNR2UC0,2523
19
- vellum_cli/tests/test_pull.py,sha256=5nIDX5yXLyi6p-siLvrzfaepI4RWbaoMwgFoQFUEhI0,46692
19
+ vellum_cli/tests/test_pull.py,sha256=iTxVbJGuehvgNt8Vp9W3Y5Bvaocfws8bl8LMGEbc_qQ,47508
20
20
  vellum_cli/tests/test_push.py,sha256=j22l7p_cy1KXdcvQKhWiM2bpu-3WL1q5IJquRm84mxE,25580
21
21
  vellum_ee/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
22
  vellum_ee/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -125,12 +125,12 @@ vellum_ee/workflows/tests/local_workflow/workflow.py,sha256=A4qOzOPNwePYxWbcAgIP
125
125
  vellum_ee/workflows/tests/test_display_meta.py,sha256=C25dErwghPNXio49pvSRxyOuc96srH6eYEwTAWdE2zY,2258
126
126
  vellum_ee/workflows/tests/test_server.py,sha256=SsOkS6sGO7uGC4mxvk4iv8AtcXs058P9hgFHzTWmpII,14519
127
127
  vellum_ee/workflows/tests/test_virtual_files.py,sha256=TJEcMR0v2S8CkloXNmCHA0QW0K6pYNGaIjraJz7sFvY,2762
128
- vellum/__init__.py,sha256=dGOb8Oe4E3jff14Y51TevTM2wx5z4C0nPCBrxW2E-Yg,41708
128
+ vellum/__init__.py,sha256=4UwLVTlKBQ0cTIJ3NTqxw51aEFNUPfmJ-rVYP1yVUU0,41758
129
129
  vellum/client/README.md,sha256=JkCJjmMZl4jrPj46pkmL9dpK4gSzQQmP5I7z4aME4LY,4749
130
130
  vellum/client/__init__.py,sha256=Z-JHK2jGxhtTtmkLeOaUGGJWIUNYGNVBLvUewC6lp6w,118148
131
131
  vellum/client/core/__init__.py,sha256=SQ85PF84B9MuKnBwHNHWemSGuy-g_515gFYNFhvEE0I,1438
132
132
  vellum/client/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
133
- vellum/client/core/client_wrapper.py,sha256=HDYrFzioATPcqeXlA9AJXpStw-Rw1q_cI0rzgpCOpNU,1869
133
+ vellum/client/core/client_wrapper.py,sha256=gOQ_e1s2iQI79ioggNAaNo71Ue9CGaAkWTvlVyZxaMI,1869
134
134
  vellum/client/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
135
135
  vellum/client/core/file.py,sha256=X9IbmkZmB2bB_DpmZAO3crWdXagOakAyn6UCOCImCPg,2322
136
136
  vellum/client/core/http_client.py,sha256=R0pQpCppnEtxccGvXl4uJ76s7ro_65Fo_erlNNLp_AI,19228
@@ -146,7 +146,7 @@ vellum/client/errors/bad_request_error.py,sha256=_EbO8mWqN9kFZPvIap8qa1lL_EWkRcs
146
146
  vellum/client/errors/forbidden_error.py,sha256=QO1kKlhClAPES6zsEK7g9pglWnxn3KWaOCAawWOg6Aw,263
147
147
  vellum/client/errors/internal_server_error.py,sha256=8USCagXyJJ1MOm9snpcXIUt6eNXvrd_aq7Gfcu1vlOI,268
148
148
  vellum/client/errors/not_found_error.py,sha256=tBVCeBC8n3C811WHRj_n-hs3h8MqwR5gp0vLiobk7W8,262
149
- vellum/client/reference.md,sha256=D7qqNgorznQIlZHVqW6UMjTmPSI2zSF2QuMx5KFn4F4,89585
149
+ vellum/client/reference.md,sha256=23-W_ajXfNLsPKA25ICSXpSxxnI0sRQgYCAk6D2w9LM,90890
150
150
  vellum/client/resources/__init__.py,sha256=XgQao4rJxyYu71j64RFIsshz4op9GE8-i-C5GCv-KVE,1555
151
151
  vellum/client/resources/ad_hoc/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
152
152
  vellum/client/resources/ad_hoc/client.py,sha256=93FXK-Wpvh72G8ji2__2Dmc5OYl9G5GRHiknyGIjeX4,25557
@@ -174,7 +174,7 @@ vellum/client/resources/ml_models/client.py,sha256=XIYapTEY6GRNr7V0Kjy5bEeKmrhv9
174
174
  vellum/client/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
175
175
  vellum/client/resources/organizations/client.py,sha256=Uye92moqjAcOCs4astmuFpT92QdC5SLMunA-C8_G-gA,3675
176
176
  vellum/client/resources/prompts/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
177
- vellum/client/resources/prompts/client.py,sha256=_rNTUjhl_ZF3vyQa_M1BSTrX4DlFXU_SXkwwCEYKD2s,6598
177
+ vellum/client/resources/prompts/client.py,sha256=C9t17kHuVsgarw1dEk8NSEaRXb-WOi0VT8huT2jySlk,14044
178
178
  vellum/client/resources/release_reviews/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
179
179
  vellum/client/resources/release_reviews/client.py,sha256=VLXcmw1o8cYYtdTJQpajJWE2ve1z40_IXIbQRQIhqpY,9395
180
180
  vellum/client/resources/sandboxes/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
@@ -198,7 +198,7 @@ vellum/client/resources/workspace_secrets/__init__.py,sha256=FTtvy8EDg9nNNg9WCat
198
198
  vellum/client/resources/workspace_secrets/client.py,sha256=h7UzXLyTttPq1t-JZGMg1BWxypxJvBGUdqg7KGT7MK4,8027
199
199
  vellum/client/resources/workspaces/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
200
200
  vellum/client/resources/workspaces/client.py,sha256=RthwzN1o-Jxwg5yyNNodavFyNUSxfLoTv26w3mRR5g8,3595
201
- vellum/client/types/__init__.py,sha256=JanJFC8Ha3zl3jT4kZgUzIbj2oo4AY1sONJH1nM7r_o,63143
201
+ vellum/client/types/__init__.py,sha256=DODWRvaxjqdVCvpr4b6kxku_2WZKSvl7lEl6tSr_gXU,63222
202
202
  vellum/client/types/ad_hoc_execute_prompt_event.py,sha256=bCjujA2XsOgyF3bRZbcEqV2rOIymRgsLoIRtZpB14xg,607
203
203
  vellum/client/types/ad_hoc_expand_meta.py,sha256=1gv-NCsy_6xBYupLvZH979yf2VMdxAU-l0y0ynMKZaw,1331
204
204
  vellum/client/types/ad_hoc_fulfilled_prompt_execution_meta.py,sha256=oDG60TpwK1YNSKhRsBbiP2O3ZF9PKR-M9chGIfKw4R4,1004
@@ -269,7 +269,7 @@ vellum/client/types/create_test_suite_test_case_request.py,sha256=SYUz7_aZMQlin_
269
269
  vellum/client/types/deployment_history_item.py,sha256=YfcHo4X5OjHXsffndZoAjShYncUN19ZwIm96qKE0G7o,1310
270
270
  vellum/client/types/deployment_provider_payload_response.py,sha256=b0lkt0rK88ARQaMWn9MAHeWtMBsZKofDMlOAUsQvv7g,818
271
271
  vellum/client/types/deployment_provider_payload_response_payload.py,sha256=xHLQnWFN0AZRZdrOiKawwpoKK7BTmnZfp0P7FCc2ZqE,188
272
- vellum/client/types/deployment_read.py,sha256=e1Z3vHwtJ1AsNE83PqXte9aFV2LhqEK4zRUeMSVJPGA,2160
272
+ vellum/client/types/deployment_read.py,sha256=GwegkT3HzTJL5_awWKn8rFdpILgtwWEZIPE_YCqXwWg,2139
273
273
  vellum/client/types/deployment_release_tag_deployment_history_item.py,sha256=df4qKHT1f-z0jnRS4UmP8MQe6u3PwYej_d8KDF7EL88,631
274
274
  vellum/client/types/deployment_release_tag_read.py,sha256=dUrTOz9LH1gAvC_ktMB_7NztkeBnlNSX_9x15Ld3D3I,1278
275
275
  vellum/client/types/docker_service_token.py,sha256=T0icNHBKsIs6TrEiDRjckM_f37hcF1DMwEE8161tTvY,614
@@ -505,6 +505,7 @@ vellum/client/types/prompt_node_result.py,sha256=3jewO-nPodoXTq_5RxgwhKfDZrvoPjR
505
505
  vellum/client/types/prompt_node_result_data.py,sha256=fNOxBfK3ablDBxkUWVVstJMYaGdHGgu27WxP87E6UQ4,872
506
506
  vellum/client/types/prompt_output.py,sha256=NpDGJNIYIivzQJnBeoJLpJlCk7gqBESLwv5Qtn_20qQ,398
507
507
  vellum/client/types/prompt_parameters.py,sha256=Vkwh4zI9gX1DuGQxrWiUUa1TshTfnPlS7_yRrziD5qg,1046
508
+ vellum/client/types/prompt_push_response.py,sha256=Un61pv4kVH0Omd8OKVfTk1yZ1RWrwuquAgl1vanb04Y,601
508
509
  vellum/client/types/prompt_request_chat_history_input.py,sha256=DB2io5piMSyA89f5lnIVYO4MLZoNALNSufx8Y-oOwOE,790
509
510
  vellum/client/types/prompt_request_input.py,sha256=brEdYhYm74Ac8XjK9wF0rKOLgnqd_Cg19yMS7VfB4qQ,400
510
511
  vellum/client/types/prompt_request_json_input.py,sha256=vLhwvCWL_yjVfDzT4921xK4Ql92OkvG-ruvOC_uppFI,739
@@ -1178,6 +1179,7 @@ vellum/types/prompt_node_result.py,sha256=9ootTTh8lscQ-0WE0-bqdmn7XFvpP7uavO-g7m
1178
1179
  vellum/types/prompt_node_result_data.py,sha256=2ivj-nvfHUB85fCsQHJV8nACA6lsqHWR2PFf5AqycBI,161
1179
1180
  vellum/types/prompt_output.py,sha256=t5Ue9qqNCyDJ54i4YPF4kQ4nbbzgrXT1srqv_PEdtQU,151
1180
1181
  vellum/types/prompt_parameters.py,sha256=MnKA48eOcybD-gSTp5jLmzib-U2KeSQ0G1VJpPg5qJc,155
1182
+ vellum/types/prompt_push_response.py,sha256=VZq4LnRDeMWdMIW5WqaU2v1jDUKKakerxakox781118,158
1181
1183
  vellum/types/prompt_request_chat_history_input.py,sha256=Ngf_R0NgpFWmLKP1zQsXANaRB8P9LqJuk5a0Dlm-78U,171
1182
1184
  vellum/types/prompt_request_input.py,sha256=kjHHbam1HcDm9_1br9iqlZhrke3nRJYh4UNZs1h5qy4,158
1183
1185
  vellum/types/prompt_request_json_input.py,sha256=OlXiUPchxe184SWbmIvbmARpY9YWPi8yPqWaVC8xoBU,163
@@ -1534,9 +1536,10 @@ vellum/workflows/inputs/tests/test_inputs.py,sha256=lioA8917mFLYq7Ml69UNkqUjcWbb
1534
1536
  vellum/workflows/logging.py,sha256=_a217XogktV4Ncz6xKFz7WfYmZAzkfVRVuC0rWob8ls,437
1535
1537
  vellum/workflows/nodes/__init__.py,sha256=aVdQVv7Y3Ro3JlqXGpxwaU2zrI06plDHD2aumH5WUIs,1157
1536
1538
  vellum/workflows/nodes/bases/__init__.py,sha256=cniHuz_RXdJ4TQgD8CBzoiKDiPxg62ErdVpCbWICX64,58
1537
- vellum/workflows/nodes/bases/base.py,sha256=eW-3RSkBgtuGY8x2nmbHYiUg_HXS5U57n3k6Fh-dJ9s,15330
1539
+ vellum/workflows/nodes/bases/base.py,sha256=tX3xOQIPfnspQPNaOMwGoZ93gM5d5dWeK1YUukzkINI,15158
1538
1540
  vellum/workflows/nodes/bases/base_adornment_node.py,sha256=Ao2opOW4kgNoYXFF9Pk7IMpVZdy6luwrjcqEwU5Q9V0,3404
1539
1541
  vellum/workflows/nodes/bases/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1542
+ vellum/workflows/nodes/bases/tests/test_base_adornment_node.py,sha256=fXZI9KqpS4XMBrBnIEkK3foHaBVvyHwYcQWWDKay7ic,1148
1540
1543
  vellum/workflows/nodes/bases/tests/test_base_node.py,sha256=6J85q-vtfG-NHzCndMKLk5_sEEDiI52sTGaxefcUCOU,7892
1541
1544
  vellum/workflows/nodes/core/__init__.py,sha256=5zDMCmyt1v0HTJzlUBwq3U9L825yZGZhT9JL18-mRR4,455
1542
1545
  vellum/workflows/nodes/core/error_node/__init__.py,sha256=g7RRnlHhqu4qByfLjBwCunmgGA8dI5gNsjS3h6TwlSI,60
@@ -1572,9 +1575,9 @@ vellum/workflows/nodes/displayable/bases/base_prompt_node/__init__.py,sha256=Org
1572
1575
  vellum/workflows/nodes/displayable/bases/base_prompt_node/node.py,sha256=amBXi7Tv50AbGLhfWbwX83PlOdV1XyYRyQmpa6_afE4,3511
1573
1576
  vellum/workflows/nodes/displayable/bases/inline_prompt_node/__init__.py,sha256=Hl35IAoepRpE-j4cALaXVJIYTYOF3qszyVbxTj4kS1s,82
1574
1577
  vellum/workflows/nodes/displayable/bases/inline_prompt_node/constants.py,sha256=fnjiRWLoRlC4Puo5oQcpZD5Hd-EesxsAo9l5tGAkpZQ,270
1575
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py,sha256=rga24gkK9_STRhFwhBwGL7oHhTTZvLWS_rXHHrp85p4,8386
1578
+ vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py,sha256=wqN1EjyjTL6McUmlkHWu3GXVzcNaqDjavvmKUHDaVqg,10623
1576
1579
  vellum/workflows/nodes/displayable/bases/inline_prompt_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1577
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/tests/test_inline_prompt_node.py,sha256=ZCXCZs-_OyPk4nqCpuWY-vw87lg92TDZ2tK_gckJ7mg,10450
1580
+ vellum/workflows/nodes/displayable/bases/inline_prompt_node/tests/test_inline_prompt_node.py,sha256=YPOFoaEBENfOzE_qWo3WdQ_E1dQk78aLCWk8gOMvTjg,16042
1578
1581
  vellum/workflows/nodes/displayable/bases/prompt_deployment_node.py,sha256=lwH7mfiHcRKFxU1Y9IPQVgb3o5trssuhwlKnA30rTWk,9777
1579
1582
  vellum/workflows/nodes/displayable/bases/search_node.py,sha256=3UtbqY3QO4kzfJHbmUNZGnEEfJmaoiF892u8H6TGjp8,5381
1580
1583
  vellum/workflows/nodes/displayable/bases/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -1629,8 +1632,8 @@ vellum/workflows/nodes/experimental/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQ
1629
1632
  vellum/workflows/nodes/experimental/openai_chat_completion_node/__init__.py,sha256=lsyD9laR9p7kx5-BXGH2gUTM242UhKy8SMV0SR6S2iE,90
1630
1633
  vellum/workflows/nodes/experimental/openai_chat_completion_node/node.py,sha256=cKI2Ls25L-JVt4z4a2ozQa-YBeVy21Z7BQ32Sj7iBPE,10460
1631
1634
  vellum/workflows/nodes/experimental/tool_calling_node/__init__.py,sha256=S7OzT3I4cyOU5Beoz87nPwCejCMP2FsHBFL8OcVmxJ4,118
1632
- vellum/workflows/nodes/experimental/tool_calling_node/node.py,sha256=w73v2pfpqFaTOYbtHkH7nFuGgqQebLO6dr_Mxn1n7Dc,4848
1633
- vellum/workflows/nodes/experimental/tool_calling_node/utils.py,sha256=hwJ1GjoNOEWoCdT1R0b6gVRAAGWGTUlKutTgBF7GRP4,4664
1635
+ vellum/workflows/nodes/experimental/tool_calling_node/node.py,sha256=NUC7VZj2D86IDQzjCq_a3-Xeqj_b3BE8T1kOMIfN7V8,4878
1636
+ vellum/workflows/nodes/experimental/tool_calling_node/utils.py,sha256=_b4xqs2jEQY9aWCCJsFvZZrvXo74NeYiIkD7uJ9RHeU,4781
1634
1637
  vellum/workflows/nodes/mocks.py,sha256=a1FjWEIocseMfjzM-i8DNozpUsaW0IONRpZmXBoWlyc,10455
1635
1638
  vellum/workflows/nodes/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1636
1639
  vellum/workflows/nodes/tests/test_mocks.py,sha256=mfPvrs75PKcsNsbJLQAN6PDFoVqs9TmQxpdyFKDdO60,7837
@@ -1650,7 +1653,7 @@ vellum/workflows/references/external_input.py,sha256=c_4SojTpykCSbGS1Pjmx9FfquyY
1650
1653
  vellum/workflows/references/input.py,sha256=3INu-TLTi4dziWmva6LO3WvgDlPzsjayUx61cVvqLJA,325
1651
1654
  vellum/workflows/references/lazy.py,sha256=jgUYmgt-yAybzPf_R-74MzdU8VuNwMYI8EQqrj9lVR0,2948
1652
1655
  vellum/workflows/references/node.py,sha256=LP854wDVs-9I_aZ7-nkbwXqL2H7W2_3LED2e9FixNS8,1418
1653
- vellum/workflows/references/output.py,sha256=QWrqLGpeo6mFxC_fKmL3LHIfrcDD9SzOhQdHvG99I9Y,3379
1656
+ vellum/workflows/references/output.py,sha256=Odpjqnw2uY6lbmt49sUwDclBPZMndYxgtFAGn1iKj8k,3387
1654
1657
  vellum/workflows/references/state_value.py,sha256=bInUF0A3Pt4-zhA0f6LdSuyv8tz7n5QRkHAEn4gsmqI,711
1655
1658
  vellum/workflows/references/tests/test_lazy.py,sha256=0s50-LizMTlSTBQahpK0fg_xqCucA8YTp6QmIMqPvMk,919
1656
1659
  vellum/workflows/references/vellum_secret.py,sha256=Od4d19a5yletWMqNfJR5d_mZQUkVcFzj29mE-T9J7yE,480
@@ -1695,8 +1698,8 @@ vellum/workflows/workflows/event_filters.py,sha256=GSxIgwrX26a1Smfd-6yss2abGCnad
1695
1698
  vellum/workflows/workflows/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1696
1699
  vellum/workflows/workflows/tests/test_base_workflow.py,sha256=8P5YIsNMO78_CR1NNK6wkEdkMB4b3Q_Ni1qxh78OnHo,20481
1697
1700
  vellum/workflows/workflows/tests/test_context.py,sha256=VJBUcyWVtMa_lE5KxdhgMu0WYNYnUQUDvTF7qm89hJ0,2333
1698
- vellum_ai-0.14.42.dist-info/LICENSE,sha256=hOypcdt481qGNISA784bnAGWAE6tyIf9gc2E78mYC3E,1574
1699
- vellum_ai-0.14.42.dist-info/METADATA,sha256=J5skujRAzUMKICz1RgTzBNlCpPUYQgSpgFEuNkYCuf0,5484
1700
- vellum_ai-0.14.42.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
1701
- vellum_ai-0.14.42.dist-info/entry_points.txt,sha256=HCH4yc_V3J_nDv3qJzZ_nYS8llCHZViCDP1ejgCc5Ak,42
1702
- vellum_ai-0.14.42.dist-info/RECORD,,
1701
+ vellum_ai-0.14.43.dist-info/LICENSE,sha256=hOypcdt481qGNISA784bnAGWAE6tyIf9gc2E78mYC3E,1574
1702
+ vellum_ai-0.14.43.dist-info/METADATA,sha256=_cI_OM2v1FvIfRXuOWWaVpZdA0DFUSezO_ENVULlBbI,5484
1703
+ vellum_ai-0.14.43.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
1704
+ vellum_ai-0.14.43.dist-info/entry_points.txt,sha256=HCH4yc_V3J_nDv3qJzZ_nYS8llCHZViCDP1ejgCc5Ak,42
1705
+ vellum_ai-0.14.43.dist-info/RECORD,,
vellum_cli/pull.py CHANGED
@@ -179,6 +179,13 @@ def pull_command(
179
179
  except ApiError as e:
180
180
  if e.status_code == 401 or e.status_code == 403:
181
181
  raise Exception("Please make sure your `VELLUM_API_KEY` environment variable is set correctly.")
182
+
183
+ if e.status_code == 500:
184
+ raise Exception(
185
+ "The Pull API failed with an unexpected error. Please try again later and contact support if the problem persists." # noqa: E501
186
+ )
187
+
188
+ # TODO: We should return an Origin header in to validate this case
182
189
  raise Exception(
183
190
  "The API we tried to pull is invalid. Please make sure your `VELLUM_API_URL` environment variable is set correctly." # noqa: E501
184
191
  )
@@ -950,6 +950,29 @@ def test_pull__unauthorized_error_path(vellum_client):
950
950
  assert str(result.exception) == "Please make sure your `VELLUM_API_KEY` environment variable is set correctly."
951
951
 
952
952
 
953
+ def test_pull__unexpected_error_path(vellum_client):
954
+ workflow_deployment = "test-workflow-deployment-id"
955
+
956
+ # GIVEN an unauthorized error with the error message from the API
957
+ def mock_error_generator():
958
+ yield b""
959
+ raise ApiError(status_code=500, body={"detail": "Internal server error"})
960
+
961
+ vellum_client.workflows.pull.return_value = mock_error_generator()
962
+
963
+ # WHEN the user runs the pull command
964
+ runner = CliRunner()
965
+ result = runner.invoke(cli_main, ["workflows", "pull", "--workflow-deployment", workflow_deployment])
966
+
967
+ # THEN the command returns an error
968
+ assert result.exit_code == 1
969
+ assert (
970
+ str(result.exception)
971
+ == """The Pull API failed with an unexpected error. \
972
+ Please try again later and contact support if the problem persists."""
973
+ )
974
+
975
+
953
976
  @pytest.mark.parametrize(
954
977
  "workflow_deployment",
955
978
  [