vellum-ai 0.14.50__py3-none-any.whl → 0.14.52__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. vellum/client/__init__.py +24 -12
  2. vellum/client/core/client_wrapper.py +1 -1
  3. vellum/client/resources/workflows/client.py +1 -2
  4. vellum/client/types/ml_model_usage_wrapper.py +1 -1
  5. vellum/workflows/nodes/core/map_node/tests/test_node.py +1 -1
  6. vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py +19 -2
  7. vellum/workflows/nodes/displayable/bases/inline_prompt_node/tests/test_inline_prompt_node.py +149 -3
  8. vellum/workflows/nodes/displayable/search_node/node.py +5 -0
  9. vellum/workflows/nodes/displayable/search_node/tests/test_node.py +43 -0
  10. vellum/workflows/nodes/displayable/tests/test_inline_text_prompt_node.py +1 -1
  11. vellum/workflows/nodes/experimental/__init__.py +3 -0
  12. vellum/workflows/nodes/experimental/tool_calling_node/tests/test_tool_calling_node.py +53 -0
  13. vellum/workflows/nodes/experimental/tool_calling_node/utils.py +12 -5
  14. {vellum_ai-0.14.50.dist-info → vellum_ai-0.14.52.dist-info}/METADATA +1 -1
  15. {vellum_ai-0.14.50.dist-info → vellum_ai-0.14.52.dist-info}/RECORD +27 -26
  16. vellum_cli/__init__.py +7 -0
  17. vellum_cli/pull.py +26 -3
  18. vellum_cli/push.py +5 -2
  19. vellum_cli/tests/test_pull.py +72 -0
  20. vellum_cli/tests/test_push.py +42 -0
  21. vellum_ee/workflows/display/nodes/base_node_display.py +8 -1
  22. vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py +29 -12
  23. vellum_ee/workflows/display/nodes/vellum/tests/test_prompt_node.py +33 -1
  24. vellum_ee/workflows/display/utils/expressions.py +1 -1
  25. {vellum_ai-0.14.50.dist-info → vellum_ai-0.14.52.dist-info}/LICENSE +0 -0
  26. {vellum_ai-0.14.50.dist-info → vellum_ai-0.14.52.dist-info}/WHEEL +0 -0
  27. {vellum_ai-0.14.50.dist-info → vellum_ai-0.14.52.dist-info}/entry_points.txt +0 -0
vellum/client/__init__.py CHANGED
@@ -133,11 +133,15 @@ class Vellum:
133
133
  self._client_wrapper = SyncClientWrapper(
134
134
  environment=environment,
135
135
  api_key=api_key,
136
- httpx_client=httpx_client
137
- if httpx_client is not None
138
- else httpx.Client(timeout=_defaulted_timeout, follow_redirects=follow_redirects)
139
- if follow_redirects is not None
140
- else httpx.Client(timeout=_defaulted_timeout),
136
+ httpx_client=(
137
+ httpx_client
138
+ if httpx_client is not None
139
+ else (
140
+ httpx.Client(timeout=_defaulted_timeout, follow_redirects=follow_redirects)
141
+ if follow_redirects is not None
142
+ else httpx.Client(timeout=_defaulted_timeout)
143
+ )
144
+ ),
141
145
  timeout=_defaulted_timeout,
142
146
  )
143
147
  self.ad_hoc = AdHocClient(client_wrapper=self._client_wrapper)
@@ -1442,7 +1446,9 @@ class Vellum:
1442
1446
  method="POST",
1443
1447
  json={
1444
1448
  "actuals": convert_and_respect_annotation_metadata(
1445
- object_=actuals, annotation=typing.Sequence[SubmitWorkflowExecutionActualRequest], direction="write"
1449
+ object_=actuals,
1450
+ annotation=typing.Sequence[SubmitWorkflowExecutionActualRequest],
1451
+ direction="write",
1446
1452
  ),
1447
1453
  "execution_id": execution_id,
1448
1454
  "external_id": external_id,
@@ -1509,11 +1515,15 @@ class AsyncVellum:
1509
1515
  self._client_wrapper = AsyncClientWrapper(
1510
1516
  environment=environment,
1511
1517
  api_key=api_key,
1512
- httpx_client=httpx_client
1513
- if httpx_client is not None
1514
- else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects)
1515
- if follow_redirects is not None
1516
- else httpx.AsyncClient(timeout=_defaulted_timeout),
1518
+ httpx_client=(
1519
+ httpx_client
1520
+ if httpx_client is not None
1521
+ else (
1522
+ httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects)
1523
+ if follow_redirects is not None
1524
+ else httpx.AsyncClient(timeout=_defaulted_timeout)
1525
+ )
1526
+ ),
1517
1527
  timeout=_defaulted_timeout,
1518
1528
  )
1519
1529
  self.ad_hoc = AsyncAdHocClient(client_wrapper=self._client_wrapper)
@@ -2906,7 +2916,9 @@ class AsyncVellum:
2906
2916
  method="POST",
2907
2917
  json={
2908
2918
  "actuals": convert_and_respect_annotation_metadata(
2909
- object_=actuals, annotation=typing.Sequence[SubmitWorkflowExecutionActualRequest], direction="write"
2919
+ object_=actuals,
2920
+ annotation=typing.Sequence[SubmitWorkflowExecutionActualRequest],
2921
+ direction="write",
2910
2922
  ),
2911
2923
  "execution_id": execution_id,
2912
2924
  "external_id": external_id,
@@ -18,7 +18,7 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.14.50",
21
+ "X-Fern-SDK-Version": "0.14.52",
22
22
  }
23
23
  headers["X-API-KEY"] = self.api_key
24
24
  return headers
@@ -69,8 +69,7 @@ class WorkflowsClient:
69
69
  try:
70
70
  if 200 <= _response.status_code < 300:
71
71
  _chunk_size = request_options.get("chunk_size", None) if request_options is not None else None
72
- for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
73
- yield _chunk
72
+ yield from _response.iter_bytes(chunk_size=_chunk_size)
74
73
  return
75
74
  _response.read()
76
75
  if _response.status_code == 400:
@@ -8,8 +8,8 @@ import pydantic
8
8
 
9
9
 
10
10
  class MlModelUsageWrapper(UniversalBaseModel):
11
- ml_model_usage: MlModelUsage
12
11
  ml_model_name: str
12
+ ml_model_usage: MlModelUsage
13
13
 
14
14
  if IS_PYDANTIC_V2:
15
15
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -194,7 +194,7 @@ def test_map_node_parallel_execution_with_workflow():
194
194
  thread_ids[self.item] = current_thread_id
195
195
 
196
196
  # Simulate work
197
- time.sleep(0.01)
197
+ time.sleep(0.1)
198
198
 
199
199
  end = time.time()
200
200
  end_str = datetime.datetime.fromtimestamp(end).strftime("%Y-%m-%d %H:%M:%S.%f")
@@ -18,9 +18,9 @@ from vellum import (
18
18
  )
19
19
  from vellum.client import ApiError, RequestOptions
20
20
  from vellum.client.types.chat_message_request import ChatMessageRequest
21
+ from vellum.client.types.prompt_exec_config import PromptExecConfig
21
22
  from vellum.client.types.prompt_settings import PromptSettings
22
23
  from vellum.client.types.rich_text_child_block import RichTextChildBlock
23
- from vellum.workflows.constants import OMIT
24
24
  from vellum.workflows.context import get_execution_context
25
25
  from vellum.workflows.errors import WorkflowErrorCode
26
26
  from vellum.workflows.errors.types import vellum_error_to_workflow_error
@@ -56,7 +56,7 @@ class BaseInlinePromptNode(BasePromptNode[StateType], Generic[StateType]):
56
56
  functions: Optional[List[Union[FunctionDefinition, Callable]]] = None
57
57
 
58
58
  parameters: PromptParameters = DEFAULT_PROMPT_PARAMETERS
59
- expand_meta: Optional[AdHocExpandMeta] = OMIT
59
+ expand_meta: Optional[AdHocExpandMeta] = None
60
60
 
61
61
  settings: Optional[PromptSettings] = None
62
62
 
@@ -135,6 +135,23 @@ class BaseInlinePromptNode(BasePromptNode[StateType], Generic[StateType]):
135
135
  )
136
136
 
137
137
  def _process_prompt_event_stream(self) -> Generator[BaseOutput, None, Optional[List[PromptOutput]]]:
138
+ try:
139
+ # Compile dict blocks into PromptBlocks
140
+ exec_config = PromptExecConfig.model_validate(
141
+ {
142
+ "ml_model": "",
143
+ "input_variables": [],
144
+ "parameters": {},
145
+ "blocks": self.blocks,
146
+ }
147
+ )
148
+ self.blocks = exec_config.blocks # type: ignore
149
+ except Exception:
150
+ raise NodeException(
151
+ message="Failed to compile blocks",
152
+ code=WorkflowErrorCode.INVALID_INPUTS,
153
+ )
154
+
138
155
  self._validate()
139
156
  try:
140
157
  prompt_event_stream = self._get_prompt_event_stream()
@@ -270,7 +270,7 @@ def test_inline_prompt_node__json_output(vellum_adhoc_prompt_client):
270
270
  # AND we should have made the expected call to Vellum search
271
271
  vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.assert_called_once_with(
272
272
  blocks=[],
273
- expand_meta=Ellipsis,
273
+ expand_meta=None,
274
274
  functions=None,
275
275
  input_values=[],
276
276
  input_variables=[],
@@ -350,7 +350,7 @@ def test_inline_prompt_node__streaming_disabled(vellum_adhoc_prompt_client):
350
350
  # AND we should have made the expected call to Vellum search
351
351
  vellum_adhoc_prompt_client.adhoc_execute_prompt.assert_called_once_with(
352
352
  blocks=[],
353
- expand_meta=Ellipsis,
353
+ expand_meta=None,
354
354
  functions=None,
355
355
  input_values=[],
356
356
  input_variables=[],
@@ -444,7 +444,7 @@ def test_inline_prompt_node__json_output_with_streaming_disabled(vellum_adhoc_pr
444
444
  # AND we should have made the expected call to Vellum search
445
445
  vellum_adhoc_prompt_client.adhoc_execute_prompt.assert_called_once_with(
446
446
  blocks=[],
447
- expand_meta=Ellipsis,
447
+ expand_meta=None,
448
448
  functions=None,
449
449
  input_values=[],
450
450
  input_variables=[],
@@ -473,3 +473,149 @@ def test_inline_prompt_node__json_output_with_streaming_disabled(vellum_adhoc_pr
473
473
  request_options=mock.ANY,
474
474
  settings=PromptSettings(stream_enabled=False),
475
475
  )
476
+
477
+
478
+ def test_inline_prompt_node__dict_blocks(vellum_adhoc_prompt_client):
479
+ # GIVEN a node that has dict blocks
480
+ class MyInlinePromptNode(InlinePromptNode):
481
+ ml_model = "gpt-4o"
482
+ blocks = [
483
+ { # type: ignore
484
+ "state": None,
485
+ "blocks": [
486
+ {
487
+ "state": None,
488
+ "blocks": [
489
+ {
490
+ "text": "You are a weather expert",
491
+ "state": None,
492
+ "block_type": "PLAIN_TEXT",
493
+ "cache_config": None,
494
+ }
495
+ ],
496
+ "block_type": "RICH_TEXT",
497
+ "cache_config": None,
498
+ }
499
+ ],
500
+ "chat_role": "SYSTEM",
501
+ "block_type": "CHAT_MESSAGE",
502
+ "chat_source": None,
503
+ "cache_config": None,
504
+ "chat_message_unterminated": None,
505
+ },
506
+ { # type: ignore
507
+ "state": None,
508
+ "blocks": [
509
+ {
510
+ "state": None,
511
+ "blocks": [
512
+ {
513
+ "state": None,
514
+ "block_type": "VARIABLE",
515
+ "cache_config": None,
516
+ "input_variable": "question",
517
+ }
518
+ ],
519
+ "block_type": "RICH_TEXT",
520
+ "cache_config": None,
521
+ }
522
+ ],
523
+ "chat_role": "USER",
524
+ "block_type": "CHAT_MESSAGE",
525
+ "chat_source": None,
526
+ "cache_config": None,
527
+ "chat_message_unterminated": None,
528
+ },
529
+ VariablePromptBlock(block_type="VARIABLE", state=None, cache_config=None, input_variable="chat_history"),
530
+ ]
531
+ prompt_inputs = {
532
+ "question": "What is the weather in Tokyo?",
533
+ "chat_history": "You are a weather expert",
534
+ }
535
+ settings = PromptSettings(stream_enabled=False)
536
+
537
+ # AND a known JSON response from invoking an inline prompt
538
+ expected_json = {"result": "Hello, world!"}
539
+ expected_outputs: List[PromptOutput] = [
540
+ StringVellumValue(value=json.dumps(expected_json)),
541
+ ]
542
+
543
+ def generate_prompt_event(*args: Any, **kwargs: Any) -> AdHocExecutePromptEvent:
544
+ execution_id = str(uuid4())
545
+ return FulfilledAdHocExecutePromptEvent(
546
+ execution_id=execution_id,
547
+ outputs=expected_outputs,
548
+ )
549
+
550
+ vellum_adhoc_prompt_client.adhoc_execute_prompt.side_effect = generate_prompt_event
551
+
552
+ # WHEN the node is run
553
+ node = MyInlinePromptNode()
554
+ outputs = [o for o in node.run()]
555
+
556
+ # THEN the node should have produced the outputs we expect
557
+ results_output = outputs[0]
558
+ assert results_output.name == "results"
559
+ assert results_output.value == expected_outputs
560
+
561
+ text_output = outputs[1]
562
+ assert text_output.name == "text"
563
+ assert text_output.value == '{"result": "Hello, world!"}'
564
+
565
+
566
+ def test_inline_prompt_node__dict_blocks_error(vellum_adhoc_prompt_client):
567
+ # GIVEN a node that has an error (wrong block type)
568
+ class MyInlinePromptNode(InlinePromptNode):
569
+ ml_model = "gpt-4o"
570
+ blocks = [
571
+ { # type: ignore
572
+ "state": None,
573
+ "blocks": [
574
+ {
575
+ "state": None,
576
+ "blocks": [
577
+ {
578
+ "text": "You are a weather expert",
579
+ "state": None,
580
+ "block_type": "PLAIN_TEXT",
581
+ "cache_config": None,
582
+ }
583
+ ],
584
+ "block_type": "WRONG_BLOCK_TYPE",
585
+ "cache_config": None,
586
+ }
587
+ ],
588
+ "chat_role": "SYSTEM",
589
+ "block_type": "CHAT_MESSAGE",
590
+ "chat_source": None,
591
+ "cache_config": None,
592
+ "chat_message_unterminated": None,
593
+ },
594
+ ]
595
+ prompt_inputs = {
596
+ "question": "What is the weather in Tokyo?",
597
+ }
598
+ settings = PromptSettings(stream_enabled=False)
599
+
600
+ # AND a known JSON response from invoking an inline prompt
601
+ expected_json = {"result": "Hello, world!"}
602
+ expected_outputs: List[PromptOutput] = [
603
+ StringVellumValue(value=json.dumps(expected_json)),
604
+ ]
605
+
606
+ def generate_prompt_event(*args: Any, **kwargs: Any) -> AdHocExecutePromptEvent:
607
+ execution_id = str(uuid4())
608
+ return FulfilledAdHocExecutePromptEvent(
609
+ execution_id=execution_id,
610
+ outputs=expected_outputs,
611
+ )
612
+
613
+ vellum_adhoc_prompt_client.adhoc_execute_prompt.side_effect = generate_prompt_event
614
+
615
+ node = MyInlinePromptNode()
616
+ with pytest.raises(NodeException) as excinfo:
617
+ list(node.run())
618
+
619
+ # THEN the node should raise the correct NodeException
620
+ assert excinfo.value.code == WorkflowErrorCode.INVALID_INPUTS
621
+ assert "Failed to compile blocks" == str(excinfo.value)
@@ -1,6 +1,8 @@
1
+ import json
1
2
  from typing import ClassVar
2
3
 
3
4
  from vellum.workflows.nodes.displayable.bases import BaseSearchNode as BaseSearchNode
5
+ from vellum.workflows.state.encoder import DefaultStateEncoder
4
6
  from vellum.workflows.types import MergeBehavior
5
7
  from vellum.workflows.types.generics import StateType
6
8
 
@@ -33,6 +35,9 @@ class SearchNode(BaseSearchNode[StateType]):
33
35
  text: str
34
36
 
35
37
  def run(self) -> Outputs:
38
+ if not isinstance(self.query, str):
39
+ self.query = json.dumps(self.query, cls=DefaultStateEncoder)
40
+
36
41
  results = self._perform_search().results
37
42
  text = self.chunk_separator.join([r.text for r in results])
38
43
  return self.Outputs(results=results, text=text)
@@ -1,4 +1,7 @@
1
+ import json
2
+
1
3
  from vellum import SearchResponse, SearchResult, SearchResultDocument
4
+ from vellum.client.types.chat_message import ChatMessage
2
5
  from vellum.client.types.json_vellum_value_request import JsonVellumValueRequest
3
6
  from vellum.client.types.search_filters_request import SearchFiltersRequest
4
7
  from vellum.client.types.search_request_options_request import SearchRequestOptionsRequest
@@ -13,6 +16,7 @@ from vellum.workflows.nodes.displayable.bases.types import (
13
16
  SearchFilters,
14
17
  )
15
18
  from vellum.workflows.nodes.displayable.search_node.node import SearchNode
19
+ from vellum.workflows.state.base import BaseState
16
20
 
17
21
 
18
22
  def test_run_workflow__happy_path(vellum_client):
@@ -172,3 +176,42 @@ def test_run_workflow__happy_path__options_attribute(vellum_client):
172
176
  ),
173
177
  ),
174
178
  )
179
+
180
+
181
+ def test_run_workflow__chat_history_as_query(vellum_client):
182
+ """
183
+ Confirm that we can successfully invoke a Search node with a chat history as the query param,
184
+ backwards compatible with original workflows
185
+ """
186
+
187
+ # GIVEN a state definition with a chat history
188
+ class MyState(BaseState):
189
+ chat_history: list[ChatMessage]
190
+
191
+ # AND a Search Node that uses the chat history as the query param
192
+ class MySearchNode(SearchNode[MyState]):
193
+ query = MyState.chat_history # type: ignore[assignment]
194
+ document_index = "document_index"
195
+ limit = 1
196
+
197
+ # AND a Search request that will return a 200 ok resposne
198
+ search_response = SearchResponse(
199
+ results=[
200
+ SearchResult(
201
+ text="Search query", score="0.0", keywords=["keywords"], document=SearchResultDocument(label="label")
202
+ )
203
+ ]
204
+ )
205
+
206
+ vellum_client.search.return_value = search_response
207
+
208
+ # WHEN we run the workflow
209
+ outputs = MySearchNode(state=MyState(chat_history=[ChatMessage(role="USER", text="Hello, world!")])).run()
210
+
211
+ # THEN the workflow should have completed successfully
212
+ assert outputs.text == "Search query"
213
+
214
+ # AND the options should be as expected
215
+ assert json.loads(vellum_client.search.call_args.kwargs["query"]) == [
216
+ {"role": "USER", "text": "Hello, world!", "source": None, "content": None}
217
+ ]
@@ -74,7 +74,7 @@ def test_inline_text_prompt_node__basic(vellum_adhoc_prompt_client):
74
74
  # AND we should have made the expected call to Vellum search
75
75
  vellum_adhoc_prompt_client.adhoc_execute_prompt_stream.assert_called_once_with(
76
76
  blocks=[],
77
- expand_meta=Ellipsis,
77
+ expand_meta=None,
78
78
  functions=None,
79
79
  input_values=[],
80
80
  input_variables=[],
@@ -0,0 +1,3 @@
1
+ from .tool_calling_node import ToolCallingNode
2
+
3
+ __all__ = ["ToolCallingNode"]
@@ -0,0 +1,53 @@
1
+ from vellum.client.types.function_call import FunctionCall
2
+ from vellum.client.types.function_call_vellum_value import FunctionCallVellumValue
3
+ from vellum.workflows.nodes.experimental.tool_calling_node.utils import create_tool_router_node
4
+ from vellum.workflows.state.base import BaseState, StateMeta
5
+
6
+
7
+ def first_function() -> str:
8
+ return "first_function"
9
+
10
+
11
+ def second_function() -> str:
12
+ return "second_function"
13
+
14
+
15
+ def test_port_condition_match_function_name():
16
+ """
17
+ Test that the port condition correctly matches the function name.
18
+ """
19
+ # GIVEN a tool router node
20
+ router_node = create_tool_router_node(
21
+ ml_model="test-model",
22
+ blocks=[],
23
+ functions=[first_function, second_function],
24
+ prompt_inputs=None,
25
+ )
26
+
27
+ # AND a state with a function call to the first function
28
+ state = BaseState(
29
+ meta=StateMeta(
30
+ node_outputs={
31
+ router_node.Outputs.results: [
32
+ FunctionCallVellumValue(
33
+ value=FunctionCall(
34
+ arguments={}, id="call_zp7pBQjGAOBCr7lo0AbR1HXT", name="first_function", state="FULFILLED"
35
+ ),
36
+ )
37
+ ],
38
+ },
39
+ )
40
+ )
41
+
42
+ # WHEN the port condition is resolved
43
+ # THEN the first function port should be true
44
+ first_function_port = getattr(router_node.Ports, "first_function")
45
+ assert first_function_port.resolve_condition(state) is True
46
+
47
+ # AND the second function port should be false
48
+ second_function_port = getattr(router_node.Ports, "second_function")
49
+ assert second_function_port.resolve_condition(state) is False
50
+
51
+ # AND the default port should be false
52
+ default_port = getattr(router_node.Ports, "default")
53
+ assert default_port.resolve_condition(state) is False
@@ -57,12 +57,19 @@ def create_tool_router_node(
57
57
  Ports = type("Ports", (), {})
58
58
  for function in functions:
59
59
  function_name = function.__name__
60
- port_condition = LazyReference(
61
- lambda: (
62
- node.Outputs.results[0]["type"].equals("FUNCTION_CALL")
63
- & node.Outputs.results[0]["value"]["name"].equals(function_name)
60
+
61
+ # Avoid using lambda to capture function_name
62
+ # lambda will capture the function_name by reference,
63
+ # and if the function_name is changed, the port_condition will also change.
64
+ def create_port_condition(fn_name):
65
+ return LazyReference(
66
+ lambda: (
67
+ node.Outputs.results[0]["type"].equals("FUNCTION_CALL")
68
+ & node.Outputs.results[0]["value"]["name"].equals(fn_name)
69
+ )
64
70
  )
65
- )
71
+
72
+ port_condition = create_port_condition(function_name)
66
73
  port = Port.on_if(port_condition)
67
74
  setattr(Ports, function_name, port)
68
75
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-ai
3
- Version: 0.14.50
3
+ Version: 0.14.52
4
4
  Summary:
5
5
  License: MIT
6
6
  Requires-Python: >=3.9,<4.0
@@ -1,14 +1,14 @@
1
1
  vellum_cli/CONTRIBUTING.md,sha256=FtDC7BGxSeMnwCXAUssFsAIElXtmJE-O5Z7BpolcgvI,2935
2
2
  vellum_cli/README.md,sha256=2NudRoLzWxNKqnuVy1JuQ7DerIaxWGYkrH8kMd-asIE,90
3
- vellum_cli/__init__.py,sha256=sCNP_hmFCexEWp1oQdpj8QsIUiAbo4MIlTalZEFruD8,12398
3
+ vellum_cli/__init__.py,sha256=2_6oGoVcLFUh4L63Kz4SBL4Y6XevJ70oYbg7BJ3cb5Q,12569
4
4
  vellum_cli/aliased_group.py,sha256=ugW498j0yv4ALJ8vS9MsO7ctDW7Jlir9j6nE_uHAP8c,3363
5
5
  vellum_cli/config.py,sha256=v5BmZ-t_v4Jmqd7KVuQMZF2pRI-rbMspSkVYXIRoTmI,9448
6
6
  vellum_cli/image_push.py,sha256=skFXf25ixMOX1yfcyAtii-RivYYv-_hsv-Z-bVB6m5Q,7380
7
7
  vellum_cli/init.py,sha256=WpnMXPItPmh0f0bBGIer3p-e5gu8DUGwSArT_FuoMEw,5093
8
8
  vellum_cli/logger.py,sha256=PuRFa0WCh4sAGFS5aqWB0QIYpS6nBWwPJrIXpWxugV4,1022
9
9
  vellum_cli/ping.py,sha256=p_BCCRjgPhng6JktuECtkDQLbhopt6JpmrtGoLnLJT8,1161
10
- vellum_cli/pull.py,sha256=2hSJGeqooevMb--mcvRLQ1GYT-9290cI7VdSRifzmTg,12561
11
- vellum_cli/push.py,sha256=nWHLDi_w0LXycNkVv00CiNwY469BcTNBn7NphWpCA7E,9711
10
+ vellum_cli/pull.py,sha256=M50yXzA_35N35gk1Y8KjLbXrzdRG86--XFQvEukxGtA,13371
11
+ vellum_cli/push.py,sha256=9oYmYhIWln3U0g7AstWEOA6ng5W_RthUA-Fie8FalFE,9846
12
12
  vellum_cli/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  vellum_cli/tests/conftest.py,sha256=AFYZryKA2qnUuCPBxBKmHLFoPiE0WhBFFej9tNwSHdc,1526
14
14
  vellum_cli/tests/test_config.py,sha256=uvKGDc8BoVyT9_H0Z-g8469zVxomn6Oi3Zj-vK7O_wU,2631
@@ -16,8 +16,8 @@ vellum_cli/tests/test_image_push.py,sha256=QM-JlR_aJappvwbCLteQZZf76sd7SE1sRj3ar
16
16
  vellum_cli/tests/test_init.py,sha256=8UOc_ThfouR4ja5cCl_URuLk7ohr9JXfCnG4yka1OUQ,18754
17
17
  vellum_cli/tests/test_main.py,sha256=qDZG-aQauPwBwM6A2DIu1494n47v3pL28XakTbLGZ-k,272
18
18
  vellum_cli/tests/test_ping.py,sha256=3ucVRThEmTadlV9LrJdCCrr1Ofj3rOjG6ue0BNR2UC0,2523
19
- vellum_cli/tests/test_pull.py,sha256=iTxVbJGuehvgNt8Vp9W3Y5Bvaocfws8bl8LMGEbc_qQ,47508
20
- vellum_cli/tests/test_push.py,sha256=uNMmPG9Z0uRN6xYYMzBGZaCXJZkAHWcbcSJRJ4eQk70,31182
19
+ vellum_cli/tests/test_pull.py,sha256=7HRAhIdkVW5mR2VckEaNDjp4rt-MlIxOWMMI2XNUPE8,49814
20
+ vellum_cli/tests/test_push.py,sha256=K-TaOjU4mc-x0-ee1DNXT7yZBC0pEM-R9VY57kdMdmY,32849
21
21
  vellum_ee/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
22
  vellum_ee/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
23
  vellum_ee/workflows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -26,7 +26,7 @@ vellum_ee/workflows/display/base.py,sha256=EqlQFD56kpqMY02ZBJBQajzJKh33Dwi60Wo77
26
26
  vellum_ee/workflows/display/editor/__init__.py,sha256=MSAgY91xCEg2neH5d8jXx5wRdR962ftZVa6vO9BGq9k,167
27
27
  vellum_ee/workflows/display/editor/types.py,sha256=x-tOOCJ6CF4HmiKDfCmcc3bOVfc1EBlP5o6u5WEfLoY,567
28
28
  vellum_ee/workflows/display/nodes/__init__.py,sha256=jI1aPBQf8DkmrYoZ4O-wR1duqZByOf5mDFmo_wFJPE4,307
29
- vellum_ee/workflows/display/nodes/base_node_display.py,sha256=mzODbbNfrjOi7rVQb6FFCEjQHZkTs76nAc8L-Q5yCnQ,16491
29
+ vellum_ee/workflows/display/nodes/base_node_display.py,sha256=2VyAk9SjBpt_b2fp81KlFxS5ddk2JhcldEI1S4crPj0,16921
30
30
  vellum_ee/workflows/display/nodes/get_node_display_class.py,sha256=jI_kUi9LnNLDpY63QtlC4TfN8P571VN4LpzH0I1ZtLk,1149
31
31
  vellum_ee/workflows/display/nodes/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
32
  vellum_ee/workflows/display/nodes/tests/test_base_node_display.py,sha256=Z4Mf7xLCNiblSbpKI0BrV5modQr-ZcFzhfir_OSyTTs,2997
@@ -40,7 +40,7 @@ vellum_ee/workflows/display/nodes/vellum/conditional_node.py,sha256=MrvyiYD0qgQf
40
40
  vellum_ee/workflows/display/nodes/vellum/error_node.py,sha256=m2DmOXm9-jiiIl6zwkXHNfsYp5PTpBHEdt5xaIsabWo,2363
41
41
  vellum_ee/workflows/display/nodes/vellum/final_output_node.py,sha256=jUDI2FwVaw0Or4zJL58J_g0S--i59Hzik60s_Es_M-8,3098
42
42
  vellum_ee/workflows/display/nodes/vellum/guardrail_node.py,sha256=5_5D5PMzBOeUdVtRlANbfEsu7Gv3r37dLvpfjGAqYac,2330
43
- vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py,sha256=gVwQwycEPNtCs8tWbFyIMLpCA7zXnqcmuuhFqRWNxZM,10368
43
+ vellum_ee/workflows/display/nodes/vellum/inline_prompt_node.py,sha256=-6Ru9W_vfNdLKLStB40qicMx6WvdejPM3PE54Onqk5w,10943
44
44
  vellum_ee/workflows/display/nodes/vellum/inline_subworkflow_node.py,sha256=fQV5o83BPTwGX6o-ThN4r7BcIhySyqwpW1JGYWpvSJI,5625
45
45
  vellum_ee/workflows/display/nodes/vellum/map_node.py,sha256=CiklGf5_tDbqE1XQm2mnbtoL01_2JYjcnB4FDTpMImQ,3824
46
46
  vellum_ee/workflows/display/nodes/vellum/merge_node.py,sha256=yBWeN4T_lOsDVnNOKWRiT7JYKu0IR5Fx2z99iq6QKSA,3273
@@ -55,7 +55,7 @@ vellum_ee/workflows/display/nodes/vellum/tests/test_code_execution_node.py,sha25
55
55
  vellum_ee/workflows/display/nodes/vellum/tests/test_error_node.py,sha256=540FoWMpJ3EN_DPjHsr9ODJWCRVcUa5hZBn-5T2GiHU,1665
56
56
  vellum_ee/workflows/display/nodes/vellum/tests/test_note_node.py,sha256=uiMB0cOxKZzos7YKnj4ef4DFa2bOvZJWIv-hfbUV6Go,1218
57
57
  vellum_ee/workflows/display/nodes/vellum/tests/test_prompt_deployment_node.py,sha256=G-qJyTNJkpqJiEZ3kCJl86CXJINLeFyf2lM0bQHCCOs,3822
58
- vellum_ee/workflows/display/nodes/vellum/tests/test_prompt_node.py,sha256=RPpromm0y9y-MukL8cmxpl9hYaw-JuNo8vFDOcLI4V4,8801
58
+ vellum_ee/workflows/display/nodes/vellum/tests/test_prompt_node.py,sha256=7GGbGhcaXkWqLoOU9dWWKFnjVVE_dId9vcKFYzpIHKg,9945
59
59
  vellum_ee/workflows/display/nodes/vellum/tests/test_retry_node.py,sha256=h93ysolmbo2viisyhRnXKHPxiDK0I_dSAbYoHFYIoO4,1953
60
60
  vellum_ee/workflows/display/nodes/vellum/tests/test_subworkflow_deployment_node.py,sha256=BUzHJgjdWnPeZxjFjHfDBKnbFjYjnbXPjc-1hne1B2Y,3965
61
61
  vellum_ee/workflows/display/nodes/vellum/tests/test_templating_node.py,sha256=LSk2gx9TpGXbAqKe8dggQW8yJZqj-Cf0EGJFeGGlEcw,3321
@@ -95,7 +95,7 @@ vellum_ee/workflows/display/tests/workflow_serialization/test_complex_terminal_n
95
95
  vellum_ee/workflows/display/types.py,sha256=i4T7ElU5b5h-nA1i3scmEhO1BqmNDc4eJDHavATD88w,2821
96
96
  vellum_ee/workflows/display/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
97
97
  vellum_ee/workflows/display/utils/exceptions.py,sha256=LSwwxCYNxFkf5XMUcFkaZKpQ13OSrI7y_bpEUwbKVk0,169
98
- vellum_ee/workflows/display/utils/expressions.py,sha256=qsKRgxm9zKFgAgjc9LqKEWP1rtdzXA1NDsXu9kyhf60,12416
98
+ vellum_ee/workflows/display/utils/expressions.py,sha256=8rwEsT99kSiCemIR7oFjlmphHoNqE5-7Fft5NdkWJ98,12419
99
99
  vellum_ee/workflows/display/utils/registry.py,sha256=fWIm5Jj-10gNFjgn34iBu4RWv3Vd15ijtSN0V97bpW8,1513
100
100
  vellum_ee/workflows/display/utils/vellum.py,sha256=mtoXmSYwR7rvrq-d6CzCW_auaJXTct0Mi1F0xpRCiNQ,5627
101
101
  vellum_ee/workflows/display/vellum.py,sha256=o7mq_vk2Yapu9DDKRz5l76h8EmCAypWGQYe6pryrbB8,3576
@@ -130,10 +130,10 @@ vellum_ee/workflows/tests/test_server.py,sha256=SsOkS6sGO7uGC4mxvk4iv8AtcXs058P9
130
130
  vellum_ee/workflows/tests/test_virtual_files.py,sha256=TJEcMR0v2S8CkloXNmCHA0QW0K6pYNGaIjraJz7sFvY,2762
131
131
  vellum/__init__.py,sha256=Hqfl49WZJzzqOKzVsTGi-j9twIqFOoRmACJsrEsjL44,41918
132
132
  vellum/client/README.md,sha256=qmaVIP42MnxAu8jV7u-CsgVFfs3-pHQODrXdZdFxtaw,4749
133
- vellum/client/__init__.py,sha256=PEnFl7LbXQcvAi3bVN2qyt5xm2FtVtq7xWKkcWM3Tg4,120166
133
+ vellum/client/__init__.py,sha256=nv_MItkRFOTsTDcray01bea7NvO-P9bAj8lnUfTbxOo,120440
134
134
  vellum/client/core/__init__.py,sha256=SQ85PF84B9MuKnBwHNHWemSGuy-g_515gFYNFhvEE0I,1438
135
135
  vellum/client/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
136
- vellum/client/core/client_wrapper.py,sha256=0Uo8ifbV1rC2jIjuU95Td5DeLVDs0xqQP3DETAbhbEU,1869
136
+ vellum/client/core/client_wrapper.py,sha256=P667a77GUeHTNshBFAxTS1VkSHNO_joyM4HtqkCS-8o,1869
137
137
  vellum/client/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
138
138
  vellum/client/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
139
139
  vellum/client/core/http_client.py,sha256=Z77OIxIbL4OAB2IDqjRq_sYa5yNYAWfmdhdCSSvh6Y4,19552
@@ -196,7 +196,7 @@ vellum/client/resources/workflow_sandboxes/client.py,sha256=XfMcbvSTF1_iTGIXsk1F
196
196
  vellum/client/resources/workflow_sandboxes/types/__init__.py,sha256=EaGVRU1w6kJiiHrbZOeEa0c3ggjfgv_jBqsyOkCRWOI,212
197
197
  vellum/client/resources/workflow_sandboxes/types/list_workflow_sandbox_examples_request_tag.py,sha256=TEwWit20W3X-zWPPLAhmUG05UudG9gaBSJ4Q4-rNJws,188
198
198
  vellum/client/resources/workflows/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
199
- vellum/client/resources/workflows/client.py,sha256=uDC61aybVmgxPiLKuLpAB-fK3sagnFFX06zzmQngInA,11285
199
+ vellum/client/resources/workflows/client.py,sha256=OwpMojUEZ6DdtqW5Q-165SCthYFbzt3IBVKHR5-4h-0,11244
200
200
  vellum/client/resources/workspace_secrets/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
201
201
  vellum/client/resources/workspace_secrets/client.py,sha256=zlBdbeTP6sqvtyl_DlrpfG-W5hSP7tJ1NYLSygi4CLU,8205
202
202
  vellum/client/resources/workspaces/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
@@ -404,7 +404,7 @@ vellum/client/types/metric_definition_input.py,sha256=4nmwpPqbeNQYCzLkXCkc-FGV5K
404
404
  vellum/client/types/metric_node_result.py,sha256=YdKq1DZiBD1RBtjyMejImylv3BqrwY8B_UF4Ij-6_64,660
405
405
  vellum/client/types/ml_model_read.py,sha256=Vr5KjaS2Tca0GXsltfSYQpuyGYpgIahPEFfS6HfFGSo,706
406
406
  vellum/client/types/ml_model_usage.py,sha256=WcZ2F1hfxyTwe-spOVwv-qJYDjs4hf9sn7BF2abawPo,910
407
- vellum/client/types/ml_model_usage_wrapper.py,sha256=K0V5O-NqB12FAKUcBb9r3b25u4x-4jFcef2-V1PQhMU,645
407
+ vellum/client/types/ml_model_usage_wrapper.py,sha256=Vi7urVmTn1E_aZV6TxnW-qjDayRv7A_6JDk84KqAIa0,645
408
408
  vellum/client/types/named_scenario_input_chat_history_variable_value_request.py,sha256=aVZmAxu-47c34NyhSkfi9tQqIPy29cdJ7Pb4MIgKeNw,862
409
409
  vellum/client/types/named_scenario_input_json_variable_value_request.py,sha256=UgnKv70zFviv1kl4nM7aM7IFA-7xyDOtglW4Y3GBZ28,757
410
410
  vellum/client/types/named_scenario_input_request.py,sha256=Pi8l377OHvKBwvPu9slZ1omf_NJ9S1mCQ5Wr-Ux5KVg,611
@@ -1558,7 +1558,7 @@ vellum/workflows/nodes/core/inline_subworkflow_node/tests/test_node.py,sha256=kU
1558
1558
  vellum/workflows/nodes/core/map_node/__init__.py,sha256=MXpZYmGfhsMJHqqlpd64WiJRtbAtAMQz-_3fCU_cLV0,56
1559
1559
  vellum/workflows/nodes/core/map_node/node.py,sha256=rbF7fLAU0vUDEpgtWqeQTZFlhWOhJw38tgxWJ6exud8,9313
1560
1560
  vellum/workflows/nodes/core/map_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1561
- vellum/workflows/nodes/core/map_node/tests/test_node.py,sha256=f3lSPYAU1vJUCLCujNOo0EAeBbOM9hnY5A1Wy58korc,6905
1561
+ vellum/workflows/nodes/core/map_node/tests/test_node.py,sha256=rf7CCDtjHxoPKeEtm9a8v_MNvkvu5UThH4xRXYrdEl8,6904
1562
1562
  vellum/workflows/nodes/core/retry_node/__init__.py,sha256=lN2bIy5a3Uzhs_FYCrooADyYU6ZGShtvLKFWpelwPvo,60
1563
1563
  vellum/workflows/nodes/core/retry_node/node.py,sha256=abtGvinLfi1tKqYIsWQKZtBUisF2Qw2yT1YoPw9cVk4,5297
1564
1564
  vellum/workflows/nodes/core/retry_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -1582,9 +1582,9 @@ vellum/workflows/nodes/displayable/bases/base_prompt_node/__init__.py,sha256=Org
1582
1582
  vellum/workflows/nodes/displayable/bases/base_prompt_node/node.py,sha256=amBXi7Tv50AbGLhfWbwX83PlOdV1XyYRyQmpa6_afE4,3511
1583
1583
  vellum/workflows/nodes/displayable/bases/inline_prompt_node/__init__.py,sha256=Hl35IAoepRpE-j4cALaXVJIYTYOF3qszyVbxTj4kS1s,82
1584
1584
  vellum/workflows/nodes/displayable/bases/inline_prompt_node/constants.py,sha256=fnjiRWLoRlC4Puo5oQcpZD5Hd-EesxsAo9l5tGAkpZQ,270
1585
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py,sha256=wqN1EjyjTL6McUmlkHWu3GXVzcNaqDjavvmKUHDaVqg,10623
1585
+ vellum/workflows/nodes/displayable/bases/inline_prompt_node/node.py,sha256=87Z4n1gsI3LMm0C4TaJ7nfykY8zKaKigOg7Da65E8YQ,11223
1586
1586
  vellum/workflows/nodes/displayable/bases/inline_prompt_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1587
- vellum/workflows/nodes/displayable/bases/inline_prompt_node/tests/test_inline_prompt_node.py,sha256=YPOFoaEBENfOzE_qWo3WdQ_E1dQk78aLCWk8gOMvTjg,16042
1587
+ vellum/workflows/nodes/displayable/bases/inline_prompt_node/tests/test_inline_prompt_node.py,sha256=5CNag1_aEFZbCL0nrOC5e1L-t90-4rp2xDwh0h52hVI,21407
1588
1588
  vellum/workflows/nodes/displayable/bases/prompt_deployment_node.py,sha256=T99UWACTD9ytVDVHa6W2go00V7HNwDxOyBFyMM2GnhQ,9567
1589
1589
  vellum/workflows/nodes/displayable/bases/search_node.py,sha256=3UtbqY3QO4kzfJHbmUNZGnEEfJmaoiF892u8H6TGjp8,5381
1590
1590
  vellum/workflows/nodes/displayable/bases/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -1623,24 +1623,25 @@ vellum/workflows/nodes/displayable/prompt_deployment_node/node.py,sha256=eUiQYdq
1623
1623
  vellum/workflows/nodes/displayable/prompt_deployment_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1624
1624
  vellum/workflows/nodes/displayable/prompt_deployment_node/tests/test_node.py,sha256=c_nuuqrwiIjgj4qIbVypfDuOc-3TlgO6CbXFqQl2Nqw,19725
1625
1625
  vellum/workflows/nodes/displayable/search_node/__init__.py,sha256=hpBpvbrDYf43DElRZFLzieSn8weXiwNiiNOJurERQbs,62
1626
- vellum/workflows/nodes/displayable/search_node/node.py,sha256=_VHHuTNN4icZBgc7O5U9SVKrv1zgKipU72fOtxTyrQU,1453
1626
+ vellum/workflows/nodes/displayable/search_node/node.py,sha256=vUTDyurYKw6KLABuVml_N_fbnNBDv5dBtejdoj82hWs,1646
1627
1627
  vellum/workflows/nodes/displayable/search_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1628
- vellum/workflows/nodes/displayable/search_node/tests/test_node.py,sha256=2-QCV7Vk_-YMls33p0GOUtCv3f2uPNZCjkB2CRjek7o,6562
1628
+ vellum/workflows/nodes/displayable/search_node/tests/test_node.py,sha256=OserVd6jPe6t49MQF0cxphI2irBLaC_GceMr0acFqoY,8075
1629
1629
  vellum/workflows/nodes/displayable/subworkflow_deployment_node/__init__.py,sha256=9yYM6001YZeqI1VOk1QuEM_yrffk_EdsO7qaPzINKds,92
1630
1630
  vellum/workflows/nodes/displayable/subworkflow_deployment_node/node.py,sha256=biv1H4gIX4B4VMFJ3Rp82NjE65GhmzLq7pREL0ozB2E,9484
1631
1631
  vellum/workflows/nodes/displayable/subworkflow_deployment_node/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1632
1632
  vellum/workflows/nodes/displayable/subworkflow_deployment_node/tests/test_node.py,sha256=2KdPh1TeIeW_3xJq4QzAwfcuqL6PmMTLNPz4nSaDLmY,18030
1633
1633
  vellum/workflows/nodes/displayable/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1634
- vellum/workflows/nodes/displayable/tests/test_inline_text_prompt_node.py,sha256=LaxohBcKfSW2PSiBBlx67FdW_q4YC2BM2ouH-vuGPAA,4700
1634
+ vellum/workflows/nodes/displayable/tests/test_inline_text_prompt_node.py,sha256=MHuIolSsrY9ziwoXWsye3XOODncL9DLZOkNYzQMLhRw,4696
1635
1635
  vellum/workflows/nodes/displayable/tests/test_search_node_wth_text_output.py,sha256=VepO5z1277c1y5N6LLIC31nnWD1aak2m5oPFplfJHHs,6935
1636
1636
  vellum/workflows/nodes/displayable/tests/test_text_prompt_deployment_node.py,sha256=dc3EEn1sOICpr3GdS8eyeFtExaGwWWcw9eHSdkRhQJU,2584
1637
1637
  vellum/workflows/nodes/experimental/README.md,sha256=eF6DfIL8t-HbF9-mcofOMymKrraiBHDLKTlnBa51ZiE,284
1638
- vellum/workflows/nodes/experimental/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1638
+ vellum/workflows/nodes/experimental/__init__.py,sha256=_tpZGWAZLydcKxfrj1-plrZeTajskVhUr1A6mHoSaWM,78
1639
1639
  vellum/workflows/nodes/experimental/openai_chat_completion_node/__init__.py,sha256=lsyD9laR9p7kx5-BXGH2gUTM242UhKy8SMV0SR6S2iE,90
1640
1640
  vellum/workflows/nodes/experimental/openai_chat_completion_node/node.py,sha256=cKI2Ls25L-JVt4z4a2ozQa-YBeVy21Z7BQ32Sj7iBPE,10460
1641
1641
  vellum/workflows/nodes/experimental/tool_calling_node/__init__.py,sha256=S7OzT3I4cyOU5Beoz87nPwCejCMP2FsHBFL8OcVmxJ4,118
1642
1642
  vellum/workflows/nodes/experimental/tool_calling_node/node.py,sha256=NUC7VZj2D86IDQzjCq_a3-Xeqj_b3BE8T1kOMIfN7V8,4878
1643
- vellum/workflows/nodes/experimental/tool_calling_node/utils.py,sha256=_b4xqs2jEQY9aWCCJsFvZZrvXo74NeYiIkD7uJ9RHeU,4781
1643
+ vellum/workflows/nodes/experimental/tool_calling_node/tests/test_tool_calling_node.py,sha256=sxG26mOwt4N36RLoPJ-ngginPqC5qFzD_kGj9izdCFI,1833
1644
+ vellum/workflows/nodes/experimental/tool_calling_node/utils.py,sha256=cdFR0yeb0mDl5CmH27cYQWIb4STg-ZfqtuI6rW66AHo,5097
1644
1645
  vellum/workflows/nodes/mocks.py,sha256=a1FjWEIocseMfjzM-i8DNozpUsaW0IONRpZmXBoWlyc,10455
1645
1646
  vellum/workflows/nodes/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1646
1647
  vellum/workflows/nodes/tests/test_mocks.py,sha256=mfPvrs75PKcsNsbJLQAN6PDFoVqs9TmQxpdyFKDdO60,7837
@@ -1706,8 +1707,8 @@ vellum/workflows/workflows/event_filters.py,sha256=GSxIgwrX26a1Smfd-6yss2abGCnad
1706
1707
  vellum/workflows/workflows/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
1707
1708
  vellum/workflows/workflows/tests/test_base_workflow.py,sha256=8P5YIsNMO78_CR1NNK6wkEdkMB4b3Q_Ni1qxh78OnHo,20481
1708
1709
  vellum/workflows/workflows/tests/test_context.py,sha256=VJBUcyWVtMa_lE5KxdhgMu0WYNYnUQUDvTF7qm89hJ0,2333
1709
- vellum_ai-0.14.50.dist-info/LICENSE,sha256=hOypcdt481qGNISA784bnAGWAE6tyIf9gc2E78mYC3E,1574
1710
- vellum_ai-0.14.50.dist-info/METADATA,sha256=E3j3kzjmM-9HAa1xAKCgCf5JsoBVzbm7vadnxywpahY,5484
1711
- vellum_ai-0.14.50.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
1712
- vellum_ai-0.14.50.dist-info/entry_points.txt,sha256=HCH4yc_V3J_nDv3qJzZ_nYS8llCHZViCDP1ejgCc5Ak,42
1713
- vellum_ai-0.14.50.dist-info/RECORD,,
1710
+ vellum_ai-0.14.52.dist-info/LICENSE,sha256=hOypcdt481qGNISA784bnAGWAE6tyIf9gc2E78mYC3E,1574
1711
+ vellum_ai-0.14.52.dist-info/METADATA,sha256=-vGzZDBmw_wd9r-qwKB7WAO8eJvsTB0_OGcEFjPwVU0,5484
1712
+ vellum_ai-0.14.52.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
1713
+ vellum_ai-0.14.52.dist-info/entry_points.txt,sha256=HCH4yc_V3J_nDv3qJzZ_nYS8llCHZViCDP1ejgCc5Ak,42
1714
+ vellum_ai-0.14.52.dist-info/RECORD,,
vellum_cli/__init__.py CHANGED
@@ -259,6 +259,11 @@ Helpful for running and debugging workflows locally.""",
259
259
  help="""Directory to pull the workflow into. If not specified, \
260
260
  the workflow will be pulled into the current working directory.""",
261
261
  )
262
+ @click.option(
263
+ "--workspace",
264
+ type=str,
265
+ help="The specific Workspace config to use when pulling",
266
+ )
262
267
  def workflows_pull(
263
268
  module: Optional[str],
264
269
  include_json: Optional[bool],
@@ -268,6 +273,7 @@ def workflows_pull(
268
273
  strict: Optional[bool],
269
274
  include_sandbox: Optional[bool],
270
275
  target_directory: Optional[str],
276
+ workspace: Optional[str],
271
277
  ) -> None:
272
278
  """
273
279
  Pull Workflows from Vellum. If a module is provided, only the Workflow for that module will be pulled.
@@ -283,6 +289,7 @@ def workflows_pull(
283
289
  strict=strict,
284
290
  include_sandbox=include_sandbox,
285
291
  target_directory=target_directory,
292
+ workspace=workspace,
286
293
  )
287
294
 
288
295
 
vellum_cli/pull.py CHANGED
@@ -13,7 +13,13 @@ from vellum.client.core.api_error import ApiError
13
13
  from vellum.client.core.pydantic_utilities import UniversalBaseModel
14
14
  from vellum.utils.uuid import is_valid_uuid
15
15
  from vellum.workflows.vellum_client import create_vellum_client
16
- from vellum_cli.config import VellumCliConfig, WorkflowConfig, WorkflowDeploymentConfig, load_vellum_cli_config
16
+ from vellum_cli.config import (
17
+ DEFAULT_WORKSPACE_CONFIG,
18
+ VellumCliConfig,
19
+ WorkflowConfig,
20
+ WorkflowDeploymentConfig,
21
+ load_vellum_cli_config,
22
+ )
17
23
  from vellum_cli.logger import load_cli_logger
18
24
 
19
25
  ERROR_LOG_FILE_NAME = "error.log"
@@ -43,6 +49,7 @@ def _resolve_workflow_config(
43
49
  module: Optional[str] = None,
44
50
  workflow_sandbox_id: Optional[str] = None,
45
51
  workflow_deployment: Optional[str] = None,
52
+ workspace: Optional[str] = None,
46
53
  ) -> WorkflowConfigResolutionResult:
47
54
  if workflow_sandbox_id and workflow_deployment:
48
55
  raise ValueError("Cannot specify both workflow_sandbox_id and workflow_deployment")
@@ -53,6 +60,7 @@ def _resolve_workflow_config(
53
60
  workflow_config = WorkflowConfig(
54
61
  workflow_sandbox_id=workflow_sandbox_id,
55
62
  module=module,
63
+ workspace=workspace or DEFAULT_WORKSPACE_CONFIG.name,
56
64
  )
57
65
  config.workflows.append(workflow_config)
58
66
  return WorkflowConfigResolutionResult(
@@ -132,8 +140,9 @@ def pull_command(
132
140
  strict: Optional[bool] = None,
133
141
  include_sandbox: Optional[bool] = None,
134
142
  target_directory: Optional[str] = None,
143
+ workspace: Optional[str] = None,
135
144
  ) -> None:
136
- load_dotenv()
145
+ load_dotenv(dotenv_path=os.path.join(os.getcwd(), ".env"))
137
146
  logger = load_cli_logger()
138
147
  config = load_vellum_cli_config()
139
148
 
@@ -142,6 +151,7 @@ def pull_command(
142
151
  module=module,
143
152
  workflow_sandbox_id=workflow_sandbox_id,
144
153
  workflow_deployment=workflow_deployment,
154
+ workspace=workspace,
145
155
  )
146
156
 
147
157
  workflow_config = workflow_config_result.workflow_config
@@ -157,7 +167,20 @@ def pull_command(
157
167
  else:
158
168
  logger.info(f"Pulling workflow from {pk}...")
159
169
 
160
- client = create_vellum_client()
170
+ resolved_workspace = workspace or workflow_config.workspace or DEFAULT_WORKSPACE_CONFIG.name
171
+ workspace_config = (
172
+ next((w for w in config.workspaces if w.name == resolved_workspace), DEFAULT_WORKSPACE_CONFIG)
173
+ if workspace
174
+ else DEFAULT_WORKSPACE_CONFIG
175
+ )
176
+ api_key = os.getenv(workspace_config.api_key)
177
+ if not api_key:
178
+ raise ValueError(f"No API key value found in environment for workspace '{workspace_config.name}'.")
179
+
180
+ client = create_vellum_client(
181
+ api_key=api_key,
182
+ api_url=workspace_config.api_url,
183
+ )
161
184
  query_parameters = {}
162
185
 
163
186
  if include_json:
vellum_cli/push.py CHANGED
@@ -45,8 +45,11 @@ def push_command(
45
45
  else config.workflows
46
46
  )
47
47
 
48
- if len(workflow_configs) > 1 and workspace:
49
- workflow_configs = [w for w in workflow_configs if w.workspace == workspace]
48
+ if len(workflow_configs) > 1:
49
+ if workspace:
50
+ workflow_configs = [w for w in workflow_configs if w.workspace == workspace]
51
+ else:
52
+ workflow_configs = [w for w in workflow_configs if w.workspace == DEFAULT_WORKSPACE_CONFIG.name]
50
53
 
51
54
  if len(workflow_configs) == 0:
52
55
  if module and module_exists(module):
@@ -3,6 +3,7 @@ import io
3
3
  import json
4
4
  import os
5
5
  import tempfile
6
+ from unittest import mock
6
7
  from uuid import uuid4
7
8
  import zipfile
8
9
 
@@ -1284,3 +1285,74 @@ def test_pull__workflow_deployment_with_name_and_id(vellum_client):
1284
1285
  assert lock_data["workflows"][0]["deployments"][0]["label"] == deployment_label
1285
1286
 
1286
1287
  os.chdir(current_dir)
1288
+
1289
+
1290
+ def test_pull__workspace_option__uses_different_api_key(mock_module, vellum_client_class):
1291
+ # GIVEN a module and workflow_sandbox_id
1292
+ temp_dir = mock_module.temp_dir
1293
+ module = mock_module.module
1294
+ set_pyproject_toml = mock_module.set_pyproject_toml
1295
+ workflow_sandbox_id = str(uuid4())
1296
+
1297
+ # AND a different workspace is set in the pyproject.toml
1298
+ set_pyproject_toml(
1299
+ {
1300
+ "workflows": [],
1301
+ "workspaces": [
1302
+ {
1303
+ "name": "my_other_workspace",
1304
+ "api_key": "MY_OTHER_VELLUM_API_KEY",
1305
+ }
1306
+ ],
1307
+ }
1308
+ )
1309
+
1310
+ # AND the .env file has the other api key stored
1311
+ with open(os.path.join(temp_dir, ".env"), "w") as f:
1312
+ f.write(
1313
+ """
1314
+ VELLUM_API_KEY=abcdef123456
1315
+ MY_OTHER_VELLUM_API_KEY=aaabbbcccddd
1316
+ """
1317
+ )
1318
+
1319
+ # AND the workflow pull API call returns a zip file
1320
+ vellum_client_class.return_value.workflows.pull.return_value = iter(
1321
+ [_zip_file_map({"workflow.py": "print('hello')"})]
1322
+ )
1323
+
1324
+ # WHEN calling `vellum pull` with --workspace
1325
+ runner = CliRunner()
1326
+ result = runner.invoke(
1327
+ cli_main,
1328
+ [
1329
+ "workflows",
1330
+ "pull",
1331
+ module,
1332
+ "--workflow-sandbox-id",
1333
+ workflow_sandbox_id,
1334
+ "--workspace",
1335
+ "my_other_workspace",
1336
+ ],
1337
+ )
1338
+
1339
+ # THEN it should succeed
1340
+ assert result.exit_code == 0, result.output
1341
+
1342
+ # AND we should have called the vellum client with the correct api key
1343
+ vellum_client_class.assert_called_once_with(
1344
+ api_key="aaabbbcccddd",
1345
+ environment=mock.ANY,
1346
+ )
1347
+
1348
+ # AND the vellum lock file should have been updated with the correct workspace
1349
+ with open(os.path.join(temp_dir, "vellum.lock.json")) as f:
1350
+ lock_file_content = json.load(f)
1351
+ assert lock_file_content["workflows"][0]["workspace"] == "my_other_workspace"
1352
+ assert lock_file_content["workflows"][0]["workflow_sandbox_id"] == workflow_sandbox_id
1353
+
1354
+ # AND the workflow.py file is written as expected
1355
+ workflow_py = os.path.join(temp_dir, *module.split("."), "workflow.py")
1356
+ assert os.path.exists(workflow_py)
1357
+ with open(workflow_py) as f:
1358
+ assert f.read() == "print('hello')"
@@ -870,3 +870,45 @@ def test_push__create_new_config_for_existing_module(mock_module, vellum_client)
870
870
  new_config = new_configs[0]
871
871
  assert new_config["workflow_sandbox_id"] == new_workflow_sandbox_id
872
872
  assert new_config["workspace"] == "default"
873
+
874
+
875
+ def test_push__use_default_workspace_if_not_specified__multiple_workflows_configured(mock_module, vellum_client):
876
+ # GIVEN a config with a workspace configured
877
+ temp_dir = mock_module.temp_dir
878
+ module = mock_module.module
879
+ workflow_sandbox_id = str(uuid4())
880
+ mock_module.set_pyproject_toml(
881
+ {
882
+ "workspaces": [
883
+ {"name": "my_other_workspace"},
884
+ ],
885
+ "workflows": [
886
+ {"module": module, "workflow_sandbox_id": workflow_sandbox_id, "workspace": "default"},
887
+ {"module": module, "workflow_sandbox_id": str(uuid4()), "workspace": "my_other_workspace"},
888
+ ],
889
+ }
890
+ )
891
+
892
+ # AND a workflow exists in the module successfully
893
+ _ensure_workflow_py(temp_dir, module)
894
+
895
+ # AND the push API call returns successfully
896
+ vellum_client.workflows.push.return_value = WorkflowPushResponse(
897
+ workflow_sandbox_id=workflow_sandbox_id,
898
+ )
899
+
900
+ # WHEN calling `vellum push` with a module without a workspace specified
901
+ runner = CliRunner()
902
+ result = runner.invoke(cli_main, ["workflows", "push", module])
903
+
904
+ # THEN it should succeed
905
+ assert result.exit_code == 0, result.output
906
+
907
+ # AND check that lockfile should maintain that this workflow is using the default workspace
908
+ with open(os.path.join(temp_dir, "vellum.lock.json")) as f:
909
+ lock_file_content = json.load(f)
910
+ configs = [w for w in lock_file_content["workflows"] if w["module"] == module]
911
+ assert len(configs) == 2
912
+ config = configs[0]
913
+ assert config["workflow_sandbox_id"] == workflow_sandbox_id
914
+ assert config["workspace"] == "default"
@@ -133,6 +133,10 @@ class BaseNodeDisplay(Generic[NodeType], metaclass=BaseNodeDisplayMeta):
133
133
  # Used by each class extending BaseNodeDisplay to specify which attributes are meant to be serialized
134
134
  # as the former `"inputs"` field
135
135
  __serializable_inputs__: Set[NodeReference] = set()
136
+ # Used by each class extending BaseNodeDisplay to specify which attributes are meant to be opted out
137
+ # of serialization. It's possible that we keep this one as a user facing api in the future, but
138
+ # don't want to commit to that decision just yet
139
+ __unserializable_attributes__: Set[NodeReference] = set()
136
140
  # END: Attributes for backwards compatible serialization
137
141
 
138
142
  def serialize(self, display_context: "WorkflowDisplayContext", **kwargs: Any) -> JsonObject:
@@ -145,9 +149,12 @@ class BaseNodeDisplay(Generic[NodeType], metaclass=BaseNodeDisplayMeta):
145
149
  # We don't need to serialize generic node attributes containing a subworkflow
146
150
  continue
147
151
 
152
+ if attribute in self.__unserializable_attributes__:
153
+ continue
154
+
148
155
  id = (
149
156
  str(self.attribute_ids_by_name[attribute.name])
150
- if self.attribute_ids_by_name
157
+ if self.attribute_ids_by_name.get(attribute.name)
151
158
  else str(uuid4_from_hash(f"{node_id}|{attribute.name}"))
152
159
  )
153
160
  try:
@@ -19,6 +19,14 @@ _InlinePromptNodeType = TypeVar("_InlinePromptNodeType", bound=InlinePromptNode)
19
19
 
20
20
  class BaseInlinePromptNodeDisplay(BaseNodeDisplay[_InlinePromptNodeType], Generic[_InlinePromptNodeType]):
21
21
  __serializable_inputs__ = {InlinePromptNode.prompt_inputs}
22
+ __unserializable_attributes__ = {
23
+ InlinePromptNode.blocks,
24
+ InlinePromptNode.functions,
25
+ InlinePromptNode.parameters,
26
+ InlinePromptNode.settings,
27
+ InlinePromptNode.expand_meta,
28
+ InlinePromptNode.request_options,
29
+ }
22
30
 
23
31
  def serialize(
24
32
  self, display_context: WorkflowDisplayContext, error_output_id: Optional[UUID] = None, **kwargs
@@ -219,16 +227,25 @@ class BaseInlinePromptNodeDisplay(BaseNodeDisplay[_InlinePromptNodeType], Generi
219
227
  return block
220
228
 
221
229
  def _serialize_attributes(self, display_context: "WorkflowDisplayContext"):
222
- attribute_instances_by_name = {}
230
+ attributes = []
223
231
  for attribute in self._node:
224
- if attribute.name in self.attribute_ids_by_name:
225
- attribute_instances_by_name[attribute.name] = attribute.instance
226
-
227
- return [
228
- {
229
- "id": str(attr_id),
230
- "name": attr_name,
231
- "value": serialize_value(display_context, attribute_instances_by_name[attr_name]),
232
- }
233
- for attr_name, attr_id in self.attribute_ids_by_name.items()
234
- ]
232
+ if attribute in self.__unserializable_attributes__:
233
+ continue
234
+
235
+ id = (
236
+ str(self.attribute_ids_by_name[attribute.name])
237
+ if self.attribute_ids_by_name.get(attribute.name)
238
+ else str(uuid4_from_hash(f"{self.node_id}|{attribute.name}"))
239
+ )
240
+ try:
241
+ attributes.append(
242
+ {
243
+ "id": id,
244
+ "name": attribute.name,
245
+ "value": serialize_value(display_context, attribute.instance),
246
+ }
247
+ )
248
+ except ValueError as e:
249
+ raise ValueError(f"Failed to serialize attribute '{attribute.name}': {e}")
250
+
251
+ return attributes
@@ -4,6 +4,7 @@ from typing import Type
4
4
 
5
5
  from vellum.client.types.variable_prompt_block import VariablePromptBlock
6
6
  from vellum.workflows import BaseWorkflow
7
+ from vellum.workflows.inputs import BaseInputs
7
8
  from vellum.workflows.nodes import BaseNode
8
9
  from vellum.workflows.nodes.displayable.inline_prompt_node.node import InlinePromptNode
9
10
  from vellum.workflows.ports.port import Port
@@ -145,7 +146,7 @@ def test_serialize_node__prompt_inputs__state_reference():
145
146
  ml_model = "gpt-4o"
146
147
 
147
148
  # AND a workflow with the prompt node
148
- class Workflow(BaseWorkflow):
149
+ class Workflow(BaseWorkflow[BaseInputs, MyState]):
149
150
  graph = MyPromptNode
150
151
 
151
152
  # WHEN the workflow is serialized
@@ -184,6 +185,37 @@ def test_serialize_node__prompt_inputs__state_reference():
184
185
  },
185
186
  ]
186
187
 
188
+ # AND the prompt attributes should include a dictionary reference with the state reference
189
+ prompt_inputs_attribute = next(
190
+ attribute for attribute in my_prompt_node["attributes"] if attribute["name"] == "prompt_inputs"
191
+ )
192
+ assert prompt_inputs_attribute == {
193
+ "id": "3b6e1363-e41b-458e-ad28-95a61fdedac1",
194
+ "name": "prompt_inputs",
195
+ "value": {
196
+ "type": "DICTIONARY_REFERENCE",
197
+ "entries": [
198
+ {
199
+ "key": "foo",
200
+ "value": {
201
+ "type": "WORKFLOW_STATE",
202
+ "state_variable_id": "45649791-c642-4405-aff9-a1fafd780ea1",
203
+ },
204
+ },
205
+ {
206
+ "key": "bar",
207
+ "value": {
208
+ "type": "CONSTANT_VALUE",
209
+ "value": {
210
+ "type": "STRING",
211
+ "value": "baz",
212
+ },
213
+ },
214
+ },
215
+ ],
216
+ },
217
+ }
218
+
187
219
 
188
220
  def test_serialize_node__unreferenced_variable_block__still_serializes():
189
221
  # GIVEN a prompt node with an unreferenced variable block
@@ -207,7 +207,7 @@ def serialize_value(display_context: "WorkflowDisplayContext", value: Any) -> Js
207
207
  if isinstance(value, StateValueReference):
208
208
  state_value_display = display_context.global_state_value_displays[value]
209
209
  return {
210
- "type": "STATE_VALUE",
210
+ "type": "WORKFLOW_STATE",
211
211
  "state_variable_id": str(state_value_display.id),
212
212
  }
213
213