vellum-ai 0.0.20__py3-none-any.whl → 0.0.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. vellum/__init__.py +38 -20
  2. vellum/client.py +278 -101
  3. vellum/core/__init__.py +11 -2
  4. vellum/core/client_wrapper.py +27 -0
  5. vellum/core/remove_none_from_dict.py +11 -0
  6. vellum/resources/deployments/client.py +35 -15
  7. vellum/resources/document_indexes/client.py +64 -16
  8. vellum/resources/documents/client.py +110 -35
  9. vellum/resources/model_versions/client.py +67 -25
  10. vellum/resources/registered_prompts/client.py +80 -16
  11. vellum/resources/sandboxes/client.py +90 -25
  12. vellum/resources/test_suites/client.py +90 -25
  13. vellum/types/__init__.py +24 -4
  14. vellum/types/deployment_read.py +2 -6
  15. vellum/types/document.py +3 -7
  16. vellum/types/document_document_to_document_index.py +2 -2
  17. vellum/types/document_index_read.py +3 -7
  18. vellum/types/enriched_normalized_completion.py +5 -9
  19. vellum/types/evaluation_params.py +1 -3
  20. vellum/types/evaluation_params_request.py +1 -3
  21. vellum/types/execute_workflow_stream_error_response.py +24 -0
  22. vellum/types/generate_error_response.py +1 -1
  23. vellum/types/generate_request.py +3 -7
  24. vellum/types/generate_result.py +2 -6
  25. vellum/types/generate_result_data.py +1 -1
  26. vellum/types/generate_result_error.py +1 -1
  27. vellum/types/model_version_build_config.py +2 -6
  28. vellum/types/model_version_compile_prompt_response.py +1 -1
  29. vellum/types/model_version_compiled_prompt.py +2 -4
  30. vellum/types/model_version_exec_config.py +3 -3
  31. vellum/types/model_version_read.py +7 -10
  32. vellum/types/model_version_sandbox_snapshot.py +3 -5
  33. vellum/types/prompt_template_block_properties.py +1 -0
  34. vellum/types/prompt_template_block_properties_request.py +3 -2
  35. vellum/types/prompt_template_block_request.py +1 -1
  36. vellum/types/prompt_template_input_variable.py +1 -1
  37. vellum/types/prompt_template_input_variable_request.py +1 -1
  38. vellum/types/provider_enum.py +5 -0
  39. vellum/types/register_prompt_error_response.py +1 -1
  40. vellum/types/register_prompt_prompt.py +2 -2
  41. vellum/types/register_prompt_prompt_info_request.py +1 -1
  42. vellum/types/register_prompt_response.py +5 -7
  43. vellum/types/registered_prompt_deployment.py +3 -3
  44. vellum/types/registered_prompt_model_version.py +2 -2
  45. vellum/types/registered_prompt_sandbox.py +2 -2
  46. vellum/types/registered_prompt_sandbox_snapshot.py +1 -1
  47. vellum/types/sandbox_scenario.py +2 -2
  48. vellum/types/scenario_input_request.py +1 -1
  49. vellum/types/search_error_response.py +1 -1
  50. vellum/types/search_filters_request.py +1 -1
  51. vellum/types/search_request_options_request.py +4 -6
  52. vellum/types/search_response.py +1 -1
  53. vellum/types/search_result.py +3 -3
  54. vellum/types/search_result_merging_request.py +1 -1
  55. vellum/types/search_weights_request.py +2 -2
  56. vellum/types/slim_document.py +5 -9
  57. vellum/types/submit_completion_actual_request.py +5 -15
  58. vellum/types/terminal_node_chat_history_result.py +1 -1
  59. vellum/types/terminal_node_json_result.py +1 -1
  60. vellum/types/terminal_node_result_output.py +2 -4
  61. vellum/types/terminal_node_string_result.py +1 -1
  62. vellum/types/test_suite_test_case.py +4 -8
  63. vellum/types/upload_document_response.py +1 -1
  64. vellum/types/workflow_event_error.py +26 -0
  65. vellum/types/workflow_execution_event_error_code.py +31 -0
  66. vellum/types/workflow_node_result_data.py +7 -11
  67. vellum/types/workflow_node_result_event.py +4 -3
  68. vellum/types/{workflow_node_result_event_state_enum.py → workflow_node_result_event_state.py} +5 -5
  69. vellum/types/workflow_request_chat_history_input_request.py +1 -3
  70. vellum/types/workflow_request_input_request.py +2 -6
  71. vellum/types/workflow_request_json_input_request.py +1 -3
  72. vellum/types/workflow_request_string_input_request.py +1 -3
  73. vellum/types/workflow_result_event.py +6 -3
  74. vellum/types/workflow_result_event_output_data.py +40 -0
  75. vellum/types/workflow_result_event_output_data_chat_history.py +32 -0
  76. vellum/types/workflow_result_event_output_data_json.py +31 -0
  77. vellum/types/workflow_result_event_output_data_string.py +33 -0
  78. vellum/types/workflow_stream_event.py +1 -4
  79. {vellum_ai-0.0.20.dist-info → vellum_ai-0.0.25.dist-info}/METADATA +1 -1
  80. vellum_ai-0.0.25.dist-info/RECORD +149 -0
  81. vellum/core/remove_none_from_headers.py +0 -11
  82. vellum/types/workflow_result_event_state_enum.py +0 -31
  83. vellum_ai-0.0.20.dist-info/RECORD +0 -142
  84. {vellum_ai-0.0.20.dist-info → vellum_ai-0.0.25.dist-info}/WHEEL +0 -0
@@ -4,12 +4,11 @@ import typing
4
4
  import urllib.parse
5
5
  from json.decoder import JSONDecodeError
6
6
 
7
- import httpx
8
7
  import pydantic
9
8
 
10
9
  from ...core.api_error import ApiError
10
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
11
11
  from ...core.jsonable_encoder import jsonable_encoder
12
- from ...core.remove_none_from_headers import remove_none_from_headers
13
12
  from ...environment import VellumEnvironment
14
13
  from ...errors.conflict_error import ConflictError
15
14
  from ...types.provider_enum import ProviderEnum
@@ -23,9 +22,11 @@ OMIT = typing.cast(typing.Any, ...)
23
22
 
24
23
 
25
24
  class RegisteredPromptsClient:
26
- def __init__(self, *, environment: VellumEnvironment = VellumEnvironment.PRODUCTION, api_key: str):
25
+ def __init__(
26
+ self, *, environment: VellumEnvironment = VellumEnvironment.PRODUCTION, client_wrapper: SyncClientWrapper
27
+ ):
27
28
  self._environment = environment
28
- self.api_key = api_key
29
+ self._client_wrapper = client_wrapper
29
30
 
30
31
  def register_prompt(
31
32
  self,
@@ -38,6 +39,37 @@ class RegisteredPromptsClient:
38
39
  parameters: RegisterPromptModelParametersRequest,
39
40
  meta: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
40
41
  ) -> RegisterPromptResponse:
42
+ """
43
+ <strong style="background-color:#ffc107; color:white; padding:4px; border-radius:4px">Unstable</strong>
44
+
45
+ Registers a prompt within Vellum and creates associated Vellum entities. Intended to be used by integration
46
+ partners, not directly by Vellum users.
47
+
48
+ Under the hood, this endpoint creates a new sandbox, a new model version, and a new deployment.
49
+
50
+ Parameters:
51
+ - label: str. A human-friendly label for corresponding entities created in Vellum. <span style="white-space: nowrap">`non-empty`</span>
52
+
53
+ - name: str. A uniquely-identifying name for corresponding entities created in Vellum. <span style="white-space: nowrap">`non-empty`</span>
54
+
55
+ - prompt: RegisterPromptPromptInfoRequest. Information about how to execute the prompt template.
56
+
57
+ - provider: ProviderEnum. The initial LLM provider to use for this prompt
58
+
59
+ * `ANTHROPIC` - Anthropic
60
+ * `COHERE` - Cohere
61
+ * `GOOGLE` - Google
62
+ * `HOSTED` - Hosted
63
+ * `MOSAICML` - MosaicML
64
+ * `MYSTIC` - Mystic
65
+ * `OPENAI` - OpenAI
66
+ * `PYQ` - Pyq
67
+ - model: str. The initial model to use for this prompt <span style="white-space: nowrap">`non-empty`</span>
68
+
69
+ - parameters: RegisterPromptModelParametersRequest. The initial model parameters to use for this prompt
70
+
71
+ - meta: typing.Optional[typing.Dict[str, typing.Any]]. Optionally include additional metadata to store along with the prompt.
72
+ """
41
73
  _request: typing.Dict[str, typing.Any] = {
42
74
  "label": label,
43
75
  "name": name,
@@ -48,11 +80,11 @@ class RegisteredPromptsClient:
48
80
  }
49
81
  if meta is not OMIT:
50
82
  _request["meta"] = meta
51
- _response = httpx.request(
83
+ _response = self._client_wrapper.httpx_client.request(
52
84
  "POST",
53
85
  urllib.parse.urljoin(f"{self._environment.default}/", "v1/registered-prompts/register"),
54
86
  json=jsonable_encoder(_request),
55
- headers=remove_none_from_headers({"X_API_KEY": self.api_key}),
87
+ headers=self._client_wrapper.get_headers(),
56
88
  timeout=None,
57
89
  )
58
90
  if 200 <= _response.status_code < 300:
@@ -67,9 +99,11 @@ class RegisteredPromptsClient:
67
99
 
68
100
 
69
101
  class AsyncRegisteredPromptsClient:
70
- def __init__(self, *, environment: VellumEnvironment = VellumEnvironment.PRODUCTION, api_key: str):
102
+ def __init__(
103
+ self, *, environment: VellumEnvironment = VellumEnvironment.PRODUCTION, client_wrapper: AsyncClientWrapper
104
+ ):
71
105
  self._environment = environment
72
- self.api_key = api_key
106
+ self._client_wrapper = client_wrapper
73
107
 
74
108
  async def register_prompt(
75
109
  self,
@@ -82,6 +116,37 @@ class AsyncRegisteredPromptsClient:
82
116
  parameters: RegisterPromptModelParametersRequest,
83
117
  meta: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
84
118
  ) -> RegisterPromptResponse:
119
+ """
120
+ <strong style="background-color:#ffc107; color:white; padding:4px; border-radius:4px">Unstable</strong>
121
+
122
+ Registers a prompt within Vellum and creates associated Vellum entities. Intended to be used by integration
123
+ partners, not directly by Vellum users.
124
+
125
+ Under the hood, this endpoint creates a new sandbox, a new model version, and a new deployment.
126
+
127
+ Parameters:
128
+ - label: str. A human-friendly label for corresponding entities created in Vellum. <span style="white-space: nowrap">`non-empty`</span>
129
+
130
+ - name: str. A uniquely-identifying name for corresponding entities created in Vellum. <span style="white-space: nowrap">`non-empty`</span>
131
+
132
+ - prompt: RegisterPromptPromptInfoRequest. Information about how to execute the prompt template.
133
+
134
+ - provider: ProviderEnum. The initial LLM provider to use for this prompt
135
+
136
+ * `ANTHROPIC` - Anthropic
137
+ * `COHERE` - Cohere
138
+ * `GOOGLE` - Google
139
+ * `HOSTED` - Hosted
140
+ * `MOSAICML` - MosaicML
141
+ * `MYSTIC` - Mystic
142
+ * `OPENAI` - OpenAI
143
+ * `PYQ` - Pyq
144
+ - model: str. The initial model to use for this prompt <span style="white-space: nowrap">`non-empty`</span>
145
+
146
+ - parameters: RegisterPromptModelParametersRequest. The initial model parameters to use for this prompt
147
+
148
+ - meta: typing.Optional[typing.Dict[str, typing.Any]]. Optionally include additional metadata to store along with the prompt.
149
+ """
85
150
  _request: typing.Dict[str, typing.Any] = {
86
151
  "label": label,
87
152
  "name": name,
@@ -92,14 +157,13 @@ class AsyncRegisteredPromptsClient:
92
157
  }
93
158
  if meta is not OMIT:
94
159
  _request["meta"] = meta
95
- async with httpx.AsyncClient() as _client:
96
- _response = await _client.request(
97
- "POST",
98
- urllib.parse.urljoin(f"{self._environment.default}/", "v1/registered-prompts/register"),
99
- json=jsonable_encoder(_request),
100
- headers=remove_none_from_headers({"X_API_KEY": self.api_key}),
101
- timeout=None,
102
- )
160
+ _response = await self._client_wrapper.httpx_client.request(
161
+ "POST",
162
+ urllib.parse.urljoin(f"{self._environment.default}/", "v1/registered-prompts/register"),
163
+ json=jsonable_encoder(_request),
164
+ headers=self._client_wrapper.get_headers(),
165
+ timeout=None,
166
+ )
103
167
  if 200 <= _response.status_code < 300:
104
168
  return pydantic.parse_obj_as(RegisterPromptResponse, _response.json()) # type: ignore
105
169
  if _response.status_code == 409:
@@ -4,12 +4,11 @@ import typing
4
4
  import urllib.parse
5
5
  from json.decoder import JSONDecodeError
6
6
 
7
- import httpx
8
7
  import pydantic
9
8
 
10
9
  from ...core.api_error import ApiError
10
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
11
11
  from ...core.jsonable_encoder import jsonable_encoder
12
- from ...core.remove_none_from_headers import remove_none_from_headers
13
12
  from ...environment import VellumEnvironment
14
13
  from ...types.sandbox_metric_input_params_request import SandboxMetricInputParamsRequest
15
14
  from ...types.sandbox_scenario import SandboxScenario
@@ -20,9 +19,11 @@ OMIT = typing.cast(typing.Any, ...)
20
19
 
21
20
 
22
21
  class SandboxesClient:
23
- def __init__(self, *, environment: VellumEnvironment = VellumEnvironment.PRODUCTION, api_key: str):
22
+ def __init__(
23
+ self, *, environment: VellumEnvironment = VellumEnvironment.PRODUCTION, client_wrapper: SyncClientWrapper
24
+ ):
24
25
  self._environment = environment
25
- self.api_key = api_key
26
+ self._client_wrapper = client_wrapper
26
27
 
27
28
  def upsert_sandbox_scenario(
28
29
  self,
@@ -33,6 +34,28 @@ class SandboxesClient:
33
34
  scenario_id: typing.Optional[str] = OMIT,
34
35
  metric_input_params: typing.Optional[SandboxMetricInputParamsRequest] = OMIT,
35
36
  ) -> SandboxScenario:
37
+ """
38
+ <strong style="background-color:#ffc107; color:white; padding:4px; border-radius:4px">Unstable</strong>
39
+
40
+ Upserts a new scenario for a sandbox, keying off of the optionally provided scenario id.
41
+
42
+ If an id is provided and has a match, the scenario will be updated. If no id is provided or no match
43
+ is found, a new scenario will be appended to the end.
44
+
45
+ Note that a full replacement of the scenario is performed, so any fields not provided will be removed
46
+ or overwritten with default values.
47
+
48
+ Parameters:
49
+ - id: str. A UUID string identifying this sandbox.
50
+
51
+ - label: typing.Optional[str].
52
+
53
+ - inputs: typing.List[ScenarioInputRequest]. The inputs for the scenario
54
+
55
+ - scenario_id: typing.Optional[str]. The id of the scenario to update. If none is provided, an id will be generated and a new scenario will be appended. <span style="white-space: nowrap">`non-empty`</span>
56
+
57
+ - metric_input_params: typing.Optional[SandboxMetricInputParamsRequest].
58
+ """
36
59
  _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
37
60
  if label is not OMIT:
38
61
  _request["label"] = label
@@ -40,11 +63,11 @@ class SandboxesClient:
40
63
  _request["scenario_id"] = scenario_id
41
64
  if metric_input_params is not OMIT:
42
65
  _request["metric_input_params"] = metric_input_params
43
- _response = httpx.request(
66
+ _response = self._client_wrapper.httpx_client.request(
44
67
  "POST",
45
68
  urllib.parse.urljoin(f"{self._environment.default}/", f"v1/sandboxes/{id}/scenarios"),
46
69
  json=jsonable_encoder(_request),
47
- headers=remove_none_from_headers({"X_API_KEY": self.api_key}),
70
+ headers=self._client_wrapper.get_headers(),
48
71
  timeout=None,
49
72
  )
50
73
  if 200 <= _response.status_code < 300:
@@ -56,10 +79,20 @@ class SandboxesClient:
56
79
  raise ApiError(status_code=_response.status_code, body=_response_json)
57
80
 
58
81
  def delete_sandbox_scenario(self, id: str, scenario_id: str) -> None:
59
- _response = httpx.request(
82
+ """
83
+ <strong style="background-color:#ffc107; color:white; padding:4px; border-radius:4px">Unstable</strong>
84
+
85
+ Deletes an existing scenario from a sandbox, keying off of the provided scenario id.
86
+
87
+ Parameters:
88
+ - id: str. A UUID string identifying this sandbox.
89
+
90
+ - scenario_id: str. An id identifying the scenario that you'd like to delete
91
+ """
92
+ _response = self._client_wrapper.httpx_client.request(
60
93
  "DELETE",
61
94
  urllib.parse.urljoin(f"{self._environment.default}/", f"v1/sandboxes/{id}/scenarios/{scenario_id}"),
62
- headers=remove_none_from_headers({"X_API_KEY": self.api_key}),
95
+ headers=self._client_wrapper.get_headers(),
63
96
  timeout=None,
64
97
  )
65
98
  if 200 <= _response.status_code < 300:
@@ -72,9 +105,11 @@ class SandboxesClient:
72
105
 
73
106
 
74
107
  class AsyncSandboxesClient:
75
- def __init__(self, *, environment: VellumEnvironment = VellumEnvironment.PRODUCTION, api_key: str):
108
+ def __init__(
109
+ self, *, environment: VellumEnvironment = VellumEnvironment.PRODUCTION, client_wrapper: AsyncClientWrapper
110
+ ):
76
111
  self._environment = environment
77
- self.api_key = api_key
112
+ self._client_wrapper = client_wrapper
78
113
 
79
114
  async def upsert_sandbox_scenario(
80
115
  self,
@@ -85,6 +120,28 @@ class AsyncSandboxesClient:
85
120
  scenario_id: typing.Optional[str] = OMIT,
86
121
  metric_input_params: typing.Optional[SandboxMetricInputParamsRequest] = OMIT,
87
122
  ) -> SandboxScenario:
123
+ """
124
+ <strong style="background-color:#ffc107; color:white; padding:4px; border-radius:4px">Unstable</strong>
125
+
126
+ Upserts a new scenario for a sandbox, keying off of the optionally provided scenario id.
127
+
128
+ If an id is provided and has a match, the scenario will be updated. If no id is provided or no match
129
+ is found, a new scenario will be appended to the end.
130
+
131
+ Note that a full replacement of the scenario is performed, so any fields not provided will be removed
132
+ or overwritten with default values.
133
+
134
+ Parameters:
135
+ - id: str. A UUID string identifying this sandbox.
136
+
137
+ - label: typing.Optional[str].
138
+
139
+ - inputs: typing.List[ScenarioInputRequest]. The inputs for the scenario
140
+
141
+ - scenario_id: typing.Optional[str]. The id of the scenario to update. If none is provided, an id will be generated and a new scenario will be appended. <span style="white-space: nowrap">`non-empty`</span>
142
+
143
+ - metric_input_params: typing.Optional[SandboxMetricInputParamsRequest].
144
+ """
88
145
  _request: typing.Dict[str, typing.Any] = {"inputs": inputs}
89
146
  if label is not OMIT:
90
147
  _request["label"] = label
@@ -92,14 +149,13 @@ class AsyncSandboxesClient:
92
149
  _request["scenario_id"] = scenario_id
93
150
  if metric_input_params is not OMIT:
94
151
  _request["metric_input_params"] = metric_input_params
95
- async with httpx.AsyncClient() as _client:
96
- _response = await _client.request(
97
- "POST",
98
- urllib.parse.urljoin(f"{self._environment.default}/", f"v1/sandboxes/{id}/scenarios"),
99
- json=jsonable_encoder(_request),
100
- headers=remove_none_from_headers({"X_API_KEY": self.api_key}),
101
- timeout=None,
102
- )
152
+ _response = await self._client_wrapper.httpx_client.request(
153
+ "POST",
154
+ urllib.parse.urljoin(f"{self._environment.default}/", f"v1/sandboxes/{id}/scenarios"),
155
+ json=jsonable_encoder(_request),
156
+ headers=self._client_wrapper.get_headers(),
157
+ timeout=None,
158
+ )
103
159
  if 200 <= _response.status_code < 300:
104
160
  return pydantic.parse_obj_as(SandboxScenario, _response.json()) # type: ignore
105
161
  try:
@@ -109,13 +165,22 @@ class AsyncSandboxesClient:
109
165
  raise ApiError(status_code=_response.status_code, body=_response_json)
110
166
 
111
167
  async def delete_sandbox_scenario(self, id: str, scenario_id: str) -> None:
112
- async with httpx.AsyncClient() as _client:
113
- _response = await _client.request(
114
- "DELETE",
115
- urllib.parse.urljoin(f"{self._environment.default}/", f"v1/sandboxes/{id}/scenarios/{scenario_id}"),
116
- headers=remove_none_from_headers({"X_API_KEY": self.api_key}),
117
- timeout=None,
118
- )
168
+ """
169
+ <strong style="background-color:#ffc107; color:white; padding:4px; border-radius:4px">Unstable</strong>
170
+
171
+ Deletes an existing scenario from a sandbox, keying off of the provided scenario id.
172
+
173
+ Parameters:
174
+ - id: str. A UUID string identifying this sandbox.
175
+
176
+ - scenario_id: str. An id identifying the scenario that you'd like to delete
177
+ """
178
+ _response = await self._client_wrapper.httpx_client.request(
179
+ "DELETE",
180
+ urllib.parse.urljoin(f"{self._environment.default}/", f"v1/sandboxes/{id}/scenarios/{scenario_id}"),
181
+ headers=self._client_wrapper.get_headers(),
182
+ timeout=None,
183
+ )
119
184
  if 200 <= _response.status_code < 300:
120
185
  return
121
186
  try:
@@ -4,12 +4,11 @@ import typing
4
4
  import urllib.parse
5
5
  from json.decoder import JSONDecodeError
6
6
 
7
- import httpx
8
7
  import pydantic
9
8
 
10
9
  from ...core.api_error import ApiError
10
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
11
11
  from ...core.jsonable_encoder import jsonable_encoder
12
- from ...core.remove_none_from_headers import remove_none_from_headers
13
12
  from ...environment import VellumEnvironment
14
13
  from ...types.evaluation_params_request import EvaluationParamsRequest
15
14
  from ...types.test_suite_test_case import TestSuiteTestCase
@@ -19,9 +18,11 @@ OMIT = typing.cast(typing.Any, ...)
19
18
 
20
19
 
21
20
  class TestSuitesClient:
22
- def __init__(self, *, environment: VellumEnvironment = VellumEnvironment.PRODUCTION, api_key: str):
21
+ def __init__(
22
+ self, *, environment: VellumEnvironment = VellumEnvironment.PRODUCTION, client_wrapper: SyncClientWrapper
23
+ ):
23
24
  self._environment = environment
24
- self.api_key = api_key
25
+ self._client_wrapper = client_wrapper
25
26
 
26
27
  def upsert_test_suite_test_case(
27
28
  self,
@@ -32,16 +33,38 @@ class TestSuitesClient:
32
33
  input_values: typing.Dict[str, typing.Any],
33
34
  evaluation_params: EvaluationParamsRequest,
34
35
  ) -> TestSuiteTestCase:
36
+ """
37
+ <strong style="background-color:#ffc107; color:white; padding:4px; border-radius:4px">Unstable</strong>
38
+
39
+ Upserts a new test case for a test suite, keying off of the optionally provided test case id.
40
+
41
+ If an id is provided and has a match, the test case will be updated. If no id is provided or no match
42
+ is found, a new test case will be appended to the end.
43
+
44
+ Note that a full replacement of the test case is performed, so any fields not provided will be removed
45
+ or overwritten with default values.
46
+
47
+ Parameters:
48
+ - id: str. A UUID string identifying this test suite.
49
+
50
+ - test_case_id: typing.Optional[str]. The id of the test case to update. If none is provided, an id will be generated and a new test case will be appended. <span style="white-space: nowrap">`non-empty`</span>
51
+
52
+ - label: typing.Optional[str]. A human-friendly label for the test case.
53
+
54
+ - input_values: typing.Dict[str, typing.Any]. Key/value pairs for each input variable that the Test Suite expects.
55
+
56
+ - evaluation_params: EvaluationParamsRequest. Parameters to use when evaluating the test case, specific to the test suite's evaluation metric.
57
+ """
35
58
  _request: typing.Dict[str, typing.Any] = {"input_values": input_values, "evaluation_params": evaluation_params}
36
59
  if test_case_id is not OMIT:
37
60
  _request["test_case_id"] = test_case_id
38
61
  if label is not OMIT:
39
62
  _request["label"] = label
40
- _response = httpx.request(
63
+ _response = self._client_wrapper.httpx_client.request(
41
64
  "POST",
42
65
  urllib.parse.urljoin(f"{self._environment.default}/", f"v1/test-suites/{id}/test-cases"),
43
66
  json=jsonable_encoder(_request),
44
- headers=remove_none_from_headers({"X_API_KEY": self.api_key}),
67
+ headers=self._client_wrapper.get_headers(),
45
68
  timeout=None,
46
69
  )
47
70
  if 200 <= _response.status_code < 300:
@@ -53,10 +76,20 @@ class TestSuitesClient:
53
76
  raise ApiError(status_code=_response.status_code, body=_response_json)
54
77
 
55
78
  def delete_test_suite_test_case(self, id: str, test_case_id: str) -> None:
56
- _response = httpx.request(
79
+ """
80
+ <strong style="background-color:#ffc107; color:white; padding:4px; border-radius:4px">Unstable</strong>
81
+
82
+ Deletes an existing test case for a test suite, keying off of the test case id.
83
+
84
+ Parameters:
85
+ - id: str. A UUID string identifying this test suite.
86
+
87
+ - test_case_id: str. An id identifying the test case that you'd like to delete
88
+ """
89
+ _response = self._client_wrapper.httpx_client.request(
57
90
  "DELETE",
58
91
  urllib.parse.urljoin(f"{self._environment.default}/", f"v1/test-suites/{id}/test-cases/{test_case_id}"),
59
- headers=remove_none_from_headers({"X_API_KEY": self.api_key}),
92
+ headers=self._client_wrapper.get_headers(),
60
93
  timeout=None,
61
94
  )
62
95
  if 200 <= _response.status_code < 300:
@@ -69,9 +102,11 @@ class TestSuitesClient:
69
102
 
70
103
 
71
104
  class AsyncTestSuitesClient:
72
- def __init__(self, *, environment: VellumEnvironment = VellumEnvironment.PRODUCTION, api_key: str):
105
+ def __init__(
106
+ self, *, environment: VellumEnvironment = VellumEnvironment.PRODUCTION, client_wrapper: AsyncClientWrapper
107
+ ):
73
108
  self._environment = environment
74
- self.api_key = api_key
109
+ self._client_wrapper = client_wrapper
75
110
 
76
111
  async def upsert_test_suite_test_case(
77
112
  self,
@@ -82,19 +117,40 @@ class AsyncTestSuitesClient:
82
117
  input_values: typing.Dict[str, typing.Any],
83
118
  evaluation_params: EvaluationParamsRequest,
84
119
  ) -> TestSuiteTestCase:
120
+ """
121
+ <strong style="background-color:#ffc107; color:white; padding:4px; border-radius:4px">Unstable</strong>
122
+
123
+ Upserts a new test case for a test suite, keying off of the optionally provided test case id.
124
+
125
+ If an id is provided and has a match, the test case will be updated. If no id is provided or no match
126
+ is found, a new test case will be appended to the end.
127
+
128
+ Note that a full replacement of the test case is performed, so any fields not provided will be removed
129
+ or overwritten with default values.
130
+
131
+ Parameters:
132
+ - id: str. A UUID string identifying this test suite.
133
+
134
+ - test_case_id: typing.Optional[str]. The id of the test case to update. If none is provided, an id will be generated and a new test case will be appended. <span style="white-space: nowrap">`non-empty`</span>
135
+
136
+ - label: typing.Optional[str]. A human-friendly label for the test case.
137
+
138
+ - input_values: typing.Dict[str, typing.Any]. Key/value pairs for each input variable that the Test Suite expects.
139
+
140
+ - evaluation_params: EvaluationParamsRequest. Parameters to use when evaluating the test case, specific to the test suite's evaluation metric.
141
+ """
85
142
  _request: typing.Dict[str, typing.Any] = {"input_values": input_values, "evaluation_params": evaluation_params}
86
143
  if test_case_id is not OMIT:
87
144
  _request["test_case_id"] = test_case_id
88
145
  if label is not OMIT:
89
146
  _request["label"] = label
90
- async with httpx.AsyncClient() as _client:
91
- _response = await _client.request(
92
- "POST",
93
- urllib.parse.urljoin(f"{self._environment.default}/", f"v1/test-suites/{id}/test-cases"),
94
- json=jsonable_encoder(_request),
95
- headers=remove_none_from_headers({"X_API_KEY": self.api_key}),
96
- timeout=None,
97
- )
147
+ _response = await self._client_wrapper.httpx_client.request(
148
+ "POST",
149
+ urllib.parse.urljoin(f"{self._environment.default}/", f"v1/test-suites/{id}/test-cases"),
150
+ json=jsonable_encoder(_request),
151
+ headers=self._client_wrapper.get_headers(),
152
+ timeout=None,
153
+ )
98
154
  if 200 <= _response.status_code < 300:
99
155
  return pydantic.parse_obj_as(TestSuiteTestCase, _response.json()) # type: ignore
100
156
  try:
@@ -104,13 +160,22 @@ class AsyncTestSuitesClient:
104
160
  raise ApiError(status_code=_response.status_code, body=_response_json)
105
161
 
106
162
  async def delete_test_suite_test_case(self, id: str, test_case_id: str) -> None:
107
- async with httpx.AsyncClient() as _client:
108
- _response = await _client.request(
109
- "DELETE",
110
- urllib.parse.urljoin(f"{self._environment.default}/", f"v1/test-suites/{id}/test-cases/{test_case_id}"),
111
- headers=remove_none_from_headers({"X_API_KEY": self.api_key}),
112
- timeout=None,
113
- )
163
+ """
164
+ <strong style="background-color:#ffc107; color:white; padding:4px; border-radius:4px">Unstable</strong>
165
+
166
+ Deletes an existing test case for a test suite, keying off of the test case id.
167
+
168
+ Parameters:
169
+ - id: str. A UUID string identifying this test suite.
170
+
171
+ - test_case_id: str. An id identifying the test case that you'd like to delete
172
+ """
173
+ _response = await self._client_wrapper.httpx_client.request(
174
+ "DELETE",
175
+ urllib.parse.urljoin(f"{self._environment.default}/", f"v1/test-suites/{id}/test-cases/{test_case_id}"),
176
+ headers=self._client_wrapper.get_headers(),
177
+ timeout=None,
178
+ )
114
179
  if 200 <= _response.status_code < 300:
115
180
  return
116
181
  try:
vellum/types/__init__.py CHANGED
@@ -19,6 +19,7 @@ from .enriched_normalized_completion import EnrichedNormalizedCompletion
19
19
  from .environment_enum import EnvironmentEnum
20
20
  from .evaluation_params import EvaluationParams
21
21
  from .evaluation_params_request import EvaluationParamsRequest
22
+ from .execute_workflow_stream_error_response import ExecuteWorkflowStreamErrorResponse
22
23
  from .finish_reason_enum import FinishReasonEnum
23
24
  from .generate_error_response import GenerateErrorResponse
24
25
  from .generate_options_request import GenerateOptionsRequest
@@ -101,6 +102,8 @@ from .terminal_node_string_result import TerminalNodeStringResult
101
102
  from .test_suite_test_case import TestSuiteTestCase
102
103
  from .upload_document_error_response import UploadDocumentErrorResponse
103
104
  from .upload_document_response import UploadDocumentResponse
105
+ from .workflow_event_error import WorkflowEventError
106
+ from .workflow_execution_event_error_code import WorkflowExecutionEventErrorCode
104
107
  from .workflow_execution_node_result_event import WorkflowExecutionNodeResultEvent
105
108
  from .workflow_execution_workflow_result_event import WorkflowExecutionWorkflowResultEvent
106
109
  from .workflow_node_result_data import (
@@ -113,7 +116,7 @@ from .workflow_node_result_data import (
113
116
  WorkflowNodeResultData_Terminal,
114
117
  )
115
118
  from .workflow_node_result_event import WorkflowNodeResultEvent
116
- from .workflow_node_result_event_state_enum import WorkflowNodeResultEventStateEnum
119
+ from .workflow_node_result_event_state import WorkflowNodeResultEventState
117
120
  from .workflow_request_chat_history_input_request import WorkflowRequestChatHistoryInputRequest
118
121
  from .workflow_request_input_request import (
119
122
  WorkflowRequestInputRequest,
@@ -124,7 +127,15 @@ from .workflow_request_input_request import (
124
127
  from .workflow_request_json_input_request import WorkflowRequestJsonInputRequest
125
128
  from .workflow_request_string_input_request import WorkflowRequestStringInputRequest
126
129
  from .workflow_result_event import WorkflowResultEvent
127
- from .workflow_result_event_state_enum import WorkflowResultEventStateEnum
130
+ from .workflow_result_event_output_data import (
131
+ WorkflowResultEventOutputData,
132
+ WorkflowResultEventOutputData_ChatHistory,
133
+ WorkflowResultEventOutputData_Json,
134
+ WorkflowResultEventOutputData_String,
135
+ )
136
+ from .workflow_result_event_output_data_chat_history import WorkflowResultEventOutputDataChatHistory
137
+ from .workflow_result_event_output_data_json import WorkflowResultEventOutputDataJson
138
+ from .workflow_result_event_output_data_string import WorkflowResultEventOutputDataString
128
139
  from .workflow_stream_event import WorkflowStreamEvent, WorkflowStreamEvent_Node, WorkflowStreamEvent_Workflow
129
140
 
130
141
  __all__ = [
@@ -147,6 +158,7 @@ __all__ = [
147
158
  "EnvironmentEnum",
148
159
  "EvaluationParams",
149
160
  "EvaluationParamsRequest",
161
+ "ExecuteWorkflowStreamErrorResponse",
150
162
  "FinishReasonEnum",
151
163
  "GenerateErrorResponse",
152
164
  "GenerateOptionsRequest",
@@ -227,6 +239,8 @@ __all__ = [
227
239
  "TestSuiteTestCase",
228
240
  "UploadDocumentErrorResponse",
229
241
  "UploadDocumentResponse",
242
+ "WorkflowEventError",
243
+ "WorkflowExecutionEventErrorCode",
230
244
  "WorkflowExecutionNodeResultEvent",
231
245
  "WorkflowExecutionWorkflowResultEvent",
232
246
  "WorkflowNodeResultData",
@@ -237,7 +251,7 @@ __all__ = [
237
251
  "WorkflowNodeResultData_Search",
238
252
  "WorkflowNodeResultData_Terminal",
239
253
  "WorkflowNodeResultEvent",
240
- "WorkflowNodeResultEventStateEnum",
254
+ "WorkflowNodeResultEventState",
241
255
  "WorkflowRequestChatHistoryInputRequest",
242
256
  "WorkflowRequestInputRequest",
243
257
  "WorkflowRequestInputRequest_ChatHistory",
@@ -246,7 +260,13 @@ __all__ = [
246
260
  "WorkflowRequestJsonInputRequest",
247
261
  "WorkflowRequestStringInputRequest",
248
262
  "WorkflowResultEvent",
249
- "WorkflowResultEventStateEnum",
263
+ "WorkflowResultEventOutputData",
264
+ "WorkflowResultEventOutputDataChatHistory",
265
+ "WorkflowResultEventOutputDataJson",
266
+ "WorkflowResultEventOutputDataString",
267
+ "WorkflowResultEventOutputData_ChatHistory",
268
+ "WorkflowResultEventOutputData_Json",
269
+ "WorkflowResultEventOutputData_String",
250
270
  "WorkflowStreamEvent",
251
271
  "WorkflowStreamEvent_Node",
252
272
  "WorkflowStreamEvent_Workflow",
@@ -15,14 +15,10 @@ class DeploymentRead(pydantic.BaseModel):
15
15
  id: str
16
16
  created: str
17
17
  label: str = pydantic.Field(
18
- description=(
19
- 'A human-readable label for the deployment <span style="white-space: nowrap">`<= 150 characters`</span> \n'
20
- )
18
+ description='A human-readable label for the deployment <span style="white-space: nowrap">`<= 150 characters`</span> '
21
19
  )
22
20
  name: str = pydantic.Field(
23
- description=(
24
- 'A name that uniquely identifies this deployment within its workspace <span style="white-space: nowrap">`<= 150 characters`</span> \n'
25
- )
21
+ description='A name that uniquely identifies this deployment within its workspace <span style="white-space: nowrap">`<= 150 characters`</span> '
26
22
  )
27
23
  status: typing.Optional[DeploymentReadStatusEnum] = pydantic.Field(
28
24
  description=(
vellum/types/document.py CHANGED
@@ -9,16 +9,12 @@ from ..core.datetime_utils import serialize_datetime
9
9
 
10
10
 
11
11
  class Document(pydantic.BaseModel):
12
- id: str = pydantic.Field(description=("The ID of the document.\n"))
12
+ id: str = pydantic.Field(description="The ID of the document.")
13
13
  label: str = pydantic.Field(
14
- description=(
15
- 'The human-readable name for the document. <span style="white-space: nowrap">`<= 1000 characters`</span> \n'
16
- )
14
+ description='The human-readable name for the document. <span style="white-space: nowrap">`<= 1000 characters`</span> '
17
15
  )
18
16
  external_id: typing.Optional[str] = pydantic.Field(
19
- description=(
20
- "The unique ID of the document as represented in an external system and specified when it was originally uploaded.\n"
21
- )
17
+ description="The unique ID of the document as represented in an external system and specified when it was originally uploaded."
22
18
  )
23
19
 
24
20
  def json(self, **kwargs: typing.Any) -> str:
@@ -10,9 +10,9 @@ from .indexing_state_enum import IndexingStateEnum
10
10
 
11
11
 
12
12
  class DocumentDocumentToDocumentIndex(pydantic.BaseModel):
13
- id: str = pydantic.Field(description=("Vellum-generated ID that uniquely identifies this link.\n"))
13
+ id: str = pydantic.Field(description="Vellum-generated ID that uniquely identifies this link.")
14
14
  document_index_id: str = pydantic.Field(
15
- description=("Vellum-generated ID that uniquely identifies the index this document is included in.\n")
15
+ description="Vellum-generated ID that uniquely identifies the index this document is included in."
16
16
  )
17
17
  indexing_state: typing.Optional[IndexingStateEnum] = pydantic.Field(
18
18
  description=(