vellum-ai 0.1.13__py3-none-any.whl → 0.2.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vellum/__init__.py +172 -4
- vellum/client.py +214 -10
- vellum/core/client_wrapper.py +1 -1
- vellum/resources/__init__.py +15 -1
- vellum/resources/deployments/__init__.py +3 -0
- vellum/resources/deployments/client.py +113 -2
- vellum/resources/deployments/types/__init__.py +5 -0
- vellum/resources/deployments/types/deployments_list_request_status.py +17 -0
- vellum/resources/document_indexes/client.py +55 -9
- vellum/resources/documents/client.py +22 -2
- vellum/resources/model_versions/client.py +18 -0
- vellum/resources/registered_prompts/client.py +104 -0
- vellum/resources/sandboxes/client.py +66 -4
- vellum/resources/test_suites/client.py +6 -6
- vellum/resources/workflow_deployments/__init__.py +5 -0
- vellum/resources/workflow_deployments/client.py +116 -0
- vellum/resources/workflow_deployments/types/__init__.py +5 -0
- vellum/resources/workflow_deployments/types/workflow_deployments_list_request_status.py +17 -0
- vellum/types/__init__.py +180 -4
- vellum/types/array_chat_message_content.py +33 -0
- vellum/types/array_chat_message_content_item.py +43 -0
- vellum/types/array_chat_message_content_item_request.py +45 -0
- vellum/types/array_chat_message_content_request.py +33 -0
- vellum/types/array_enum.py +5 -0
- vellum/types/array_variable_value.py +35 -0
- vellum/types/chat_history_enum.py +5 -0
- vellum/types/chat_history_input_request.py +4 -0
- vellum/types/chat_history_variable_value.py +29 -0
- vellum/types/chat_message.py +3 -1
- vellum/types/chat_message_content.py +53 -0
- vellum/types/chat_message_content_request.py +56 -0
- vellum/types/chat_message_request.py +3 -1
- vellum/types/deployment_read.py +5 -11
- vellum/types/document_index_read.py +2 -2
- vellum/types/{document_index_status.py → entity_status.py} +3 -3
- vellum/types/error_enum.py +5 -0
- vellum/types/execute_workflow_error_response.py +28 -0
- vellum/types/execute_workflow_response.py +32 -0
- vellum/types/execute_workflow_workflow_result_event.py +33 -0
- vellum/types/fulfilled_execute_workflow_workflow_result_event.py +35 -0
- vellum/types/function_call_chat_message_content.py +33 -0
- vellum/types/function_call_chat_message_content_request.py +33 -0
- vellum/types/function_call_chat_message_content_value.py +34 -0
- vellum/types/function_call_chat_message_content_value_request.py +34 -0
- vellum/types/function_call_enum.py +5 -0
- vellum/types/image_chat_message_content.py +33 -0
- vellum/types/image_chat_message_content_request.py +33 -0
- vellum/types/image_enum.py +5 -0
- vellum/types/json_enum.py +5 -0
- vellum/types/json_input_request.py +4 -0
- vellum/types/model_version_exec_config_parameters.py +1 -0
- vellum/types/number_enum.py +5 -0
- vellum/types/number_variable_value.py +28 -0
- vellum/types/paginated_slim_deployment_read_list.py +32 -0
- vellum/types/paginated_slim_workflow_deployment_list.py +32 -0
- vellum/types/register_prompt_model_parameters_request.py +1 -0
- vellum/types/rejected_execute_workflow_workflow_result_event.py +35 -0
- vellum/types/scenario_input_type_enum.py +2 -2
- vellum/types/search_results_enum.py +5 -0
- vellum/types/search_results_variable_value.py +29 -0
- vellum/types/slim_deployment_read.py +48 -0
- vellum/types/slim_workflow_deployment.py +57 -0
- vellum/types/string_chat_message_content.py +32 -0
- vellum/types/string_chat_message_content_request.py +32 -0
- vellum/types/string_enum.py +5 -0
- vellum/types/string_input_request.py +4 -0
- vellum/types/variable_value.py +102 -0
- vellum/types/vellum_image.py +29 -0
- vellum/types/vellum_image_request.py +29 -0
- vellum/types/vellum_variable_type.py +5 -0
- vellum/types/workflow_output.py +111 -0
- vellum/types/workflow_output_array.py +35 -0
- vellum/types/workflow_output_chat_history.py +35 -0
- vellum/types/workflow_output_error.py +35 -0
- vellum/types/workflow_output_function_call.py +35 -0
- vellum/types/workflow_output_image.py +35 -0
- vellum/types/workflow_output_json.py +34 -0
- vellum/types/workflow_output_number.py +34 -0
- vellum/types/workflow_output_search_results.py +35 -0
- vellum/types/workflow_output_string.py +34 -0
- vellum/types/workflow_result_event.py +2 -0
- {vellum_ai-0.1.13.dist-info → vellum_ai-0.2.1.dist-info}/METADATA +1 -1
- {vellum_ai-0.1.13.dist-info → vellum_ai-0.2.1.dist-info}/RECORD +85 -31
- vellum/types/deployment_status.py +0 -31
- {vellum_ai-0.1.13.dist-info → vellum_ai-0.2.1.dist-info}/LICENSE +0 -0
- {vellum_ai-0.1.13.dist-info → vellum_ai-0.2.1.dist-info}/WHEEL +0 -0
@@ -8,7 +8,7 @@ from ...core.api_error import ApiError
|
|
8
8
|
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
9
9
|
from ...core.jsonable_encoder import jsonable_encoder
|
10
10
|
from ...types.document_index_read import DocumentIndexRead
|
11
|
-
from ...types.
|
11
|
+
from ...types.entity_status import EntityStatus
|
12
12
|
from ...types.environment_enum import EnvironmentEnum
|
13
13
|
|
14
14
|
try:
|
@@ -29,7 +29,7 @@ class DocumentIndexesClient:
|
|
29
29
|
*,
|
30
30
|
label: str,
|
31
31
|
name: str,
|
32
|
-
status: typing.Optional[
|
32
|
+
status: typing.Optional[EntityStatus] = OMIT,
|
33
33
|
environment: typing.Optional[EnvironmentEnum] = OMIT,
|
34
34
|
indexing_config: typing.Dict[str, typing.Any],
|
35
35
|
copy_documents_from_index_id: typing.Optional[str] = OMIT,
|
@@ -42,10 +42,10 @@ class DocumentIndexesClient:
|
|
42
42
|
|
43
43
|
- name: str. A name that uniquely identifies this index within its workspace
|
44
44
|
|
45
|
-
- status: typing.Optional[
|
45
|
+
- status: typing.Optional[EntityStatus]. The current status of the document index
|
46
46
|
|
47
|
-
|
48
|
-
|
47
|
+
* `ACTIVE` - Active
|
48
|
+
* `ARCHIVED` - Archived
|
49
49
|
- environment: typing.Optional[EnvironmentEnum]. The environment this document index is used in
|
50
50
|
|
51
51
|
* `DEVELOPMENT` - Development
|
@@ -54,6 +54,20 @@ class DocumentIndexesClient:
|
|
54
54
|
- indexing_config: typing.Dict[str, typing.Any]. Configuration representing how documents should be indexed
|
55
55
|
|
56
56
|
- copy_documents_from_index_id: typing.Optional[str]. Optionally specify the id of a document index from which you'd like to copy and re-index its documents into this newly created index
|
57
|
+
---
|
58
|
+
from vellum import EntityStatus, EnvironmentEnum
|
59
|
+
from vellum.client import Vellum
|
60
|
+
|
61
|
+
client = Vellum(
|
62
|
+
api_key="YOUR_API_KEY",
|
63
|
+
)
|
64
|
+
client.document_indexes.create(
|
65
|
+
label="string",
|
66
|
+
name="string",
|
67
|
+
status=EntityStatus.ACTIVE,
|
68
|
+
environment=EnvironmentEnum.DEVELOPMENT,
|
69
|
+
indexing_config={"string": {"unknown": "string", "type": "unknown"}},
|
70
|
+
)
|
57
71
|
"""
|
58
72
|
_request: typing.Dict[str, typing.Any] = {"label": label, "name": name, "indexing_config": indexing_config}
|
59
73
|
if status is not OMIT:
|
@@ -83,6 +97,15 @@ class DocumentIndexesClient:
|
|
83
97
|
|
84
98
|
Parameters:
|
85
99
|
- id: str. Either the Document Index's ID or its unique name
|
100
|
+
---
|
101
|
+
from vellum.client import Vellum
|
102
|
+
|
103
|
+
client = Vellum(
|
104
|
+
api_key="YOUR_API_KEY",
|
105
|
+
)
|
106
|
+
client.document_indexes.retrieve(
|
107
|
+
id="string",
|
108
|
+
)
|
86
109
|
"""
|
87
110
|
_response = self._client_wrapper.httpx_client.request(
|
88
111
|
"GET",
|
@@ -108,7 +131,7 @@ class AsyncDocumentIndexesClient:
|
|
108
131
|
*,
|
109
132
|
label: str,
|
110
133
|
name: str,
|
111
|
-
status: typing.Optional[
|
134
|
+
status: typing.Optional[EntityStatus] = OMIT,
|
112
135
|
environment: typing.Optional[EnvironmentEnum] = OMIT,
|
113
136
|
indexing_config: typing.Dict[str, typing.Any],
|
114
137
|
copy_documents_from_index_id: typing.Optional[str] = OMIT,
|
@@ -121,10 +144,10 @@ class AsyncDocumentIndexesClient:
|
|
121
144
|
|
122
145
|
- name: str. A name that uniquely identifies this index within its workspace
|
123
146
|
|
124
|
-
- status: typing.Optional[
|
147
|
+
- status: typing.Optional[EntityStatus]. The current status of the document index
|
125
148
|
|
126
|
-
|
127
|
-
|
149
|
+
* `ACTIVE` - Active
|
150
|
+
* `ARCHIVED` - Archived
|
128
151
|
- environment: typing.Optional[EnvironmentEnum]. The environment this document index is used in
|
129
152
|
|
130
153
|
* `DEVELOPMENT` - Development
|
@@ -133,6 +156,20 @@ class AsyncDocumentIndexesClient:
|
|
133
156
|
- indexing_config: typing.Dict[str, typing.Any]. Configuration representing how documents should be indexed
|
134
157
|
|
135
158
|
- copy_documents_from_index_id: typing.Optional[str]. Optionally specify the id of a document index from which you'd like to copy and re-index its documents into this newly created index
|
159
|
+
---
|
160
|
+
from vellum import EntityStatus, EnvironmentEnum
|
161
|
+
from vellum.client import AsyncVellum
|
162
|
+
|
163
|
+
client = AsyncVellum(
|
164
|
+
api_key="YOUR_API_KEY",
|
165
|
+
)
|
166
|
+
await client.document_indexes.create(
|
167
|
+
label="string",
|
168
|
+
name="string",
|
169
|
+
status=EntityStatus.ACTIVE,
|
170
|
+
environment=EnvironmentEnum.DEVELOPMENT,
|
171
|
+
indexing_config={"string": {"unknown": "string", "type": "unknown"}},
|
172
|
+
)
|
136
173
|
"""
|
137
174
|
_request: typing.Dict[str, typing.Any] = {"label": label, "name": name, "indexing_config": indexing_config}
|
138
175
|
if status is not OMIT:
|
@@ -162,6 +199,15 @@ class AsyncDocumentIndexesClient:
|
|
162
199
|
|
163
200
|
Parameters:
|
164
201
|
- id: str. Either the Document Index's ID or its unique name
|
202
|
+
---
|
203
|
+
from vellum.client import AsyncVellum
|
204
|
+
|
205
|
+
client = AsyncVellum(
|
206
|
+
api_key="YOUR_API_KEY",
|
207
|
+
)
|
208
|
+
await client.document_indexes.retrieve(
|
209
|
+
id="string",
|
210
|
+
)
|
165
211
|
"""
|
166
212
|
_response = await self._client_wrapper.httpx_client.request(
|
167
213
|
"GET",
|
@@ -84,7 +84,7 @@ class DocumentsClient:
|
|
84
84
|
api_key="YOUR_API_KEY",
|
85
85
|
)
|
86
86
|
client.documents.destroy(
|
87
|
-
id="
|
87
|
+
id="string",
|
88
88
|
)
|
89
89
|
"""
|
90
90
|
_response = self._client_wrapper.httpx_client.request(
|
@@ -121,6 +121,16 @@ class DocumentsClient:
|
|
121
121
|
|
122
122
|
* `ACTIVE` - Active
|
123
123
|
- metadata: typing.Optional[typing.Dict[str, typing.Any]]. A JSON object containing any metadata associated with the document that you'd like to filter upon later.
|
124
|
+
---
|
125
|
+
from vellum.client import Vellum
|
126
|
+
|
127
|
+
client = Vellum(
|
128
|
+
api_key="YOUR_API_KEY",
|
129
|
+
)
|
130
|
+
client.documents.partial_update(
|
131
|
+
id="string",
|
132
|
+
status="ACTIVE",
|
133
|
+
)
|
124
134
|
"""
|
125
135
|
_request: typing.Dict[str, typing.Any] = {}
|
126
136
|
if label is not OMIT:
|
@@ -262,7 +272,7 @@ class AsyncDocumentsClient:
|
|
262
272
|
api_key="YOUR_API_KEY",
|
263
273
|
)
|
264
274
|
await client.documents.destroy(
|
265
|
-
id="
|
275
|
+
id="string",
|
266
276
|
)
|
267
277
|
"""
|
268
278
|
_response = await self._client_wrapper.httpx_client.request(
|
@@ -299,6 +309,16 @@ class AsyncDocumentsClient:
|
|
299
309
|
|
300
310
|
* `ACTIVE` - Active
|
301
311
|
- metadata: typing.Optional[typing.Dict[str, typing.Any]]. A JSON object containing any metadata associated with the document that you'd like to filter upon later.
|
312
|
+
---
|
313
|
+
from vellum.client import AsyncVellum
|
314
|
+
|
315
|
+
client = AsyncVellum(
|
316
|
+
api_key="YOUR_API_KEY",
|
317
|
+
)
|
318
|
+
await client.documents.partial_update(
|
319
|
+
id="string",
|
320
|
+
status="ACTIVE",
|
321
|
+
)
|
302
322
|
"""
|
303
323
|
_request: typing.Dict[str, typing.Any] = {}
|
304
324
|
if label is not OMIT:
|
@@ -23,6 +23,15 @@ class ModelVersionsClient:
|
|
23
23
|
|
24
24
|
Parameters:
|
25
25
|
- id: str. A UUID string identifying this model version.
|
26
|
+
---
|
27
|
+
from vellum.client import Vellum
|
28
|
+
|
29
|
+
client = Vellum(
|
30
|
+
api_key="YOUR_API_KEY",
|
31
|
+
)
|
32
|
+
client.model_versions.retrieve(
|
33
|
+
id="string",
|
34
|
+
)
|
26
35
|
"""
|
27
36
|
_response = self._client_wrapper.httpx_client.request(
|
28
37
|
"GET",
|
@@ -49,6 +58,15 @@ class AsyncModelVersionsClient:
|
|
49
58
|
|
50
59
|
Parameters:
|
51
60
|
- id: str. A UUID string identifying this model version.
|
61
|
+
---
|
62
|
+
from vellum.client import AsyncVellum
|
63
|
+
|
64
|
+
client = AsyncVellum(
|
65
|
+
api_key="YOUR_API_KEY",
|
66
|
+
)
|
67
|
+
await client.model_versions.retrieve(
|
68
|
+
id="string",
|
69
|
+
)
|
52
70
|
"""
|
53
71
|
_response = await self._client_wrapper.httpx_client.request(
|
54
72
|
"GET",
|
@@ -73,6 +73,58 @@ class RegisteredPromptsClient:
|
|
73
73
|
- parameters: RegisterPromptModelParametersRequest. The initial model parameters to use for this prompt
|
74
74
|
|
75
75
|
- meta: typing.Optional[typing.Dict[str, typing.Any]]. Optionally include additional metadata to store along with the prompt.
|
76
|
+
---
|
77
|
+
from vellum import (
|
78
|
+
BlockTypeEnum,
|
79
|
+
ChatMessageRole,
|
80
|
+
PromptTemplateBlockDataRequest,
|
81
|
+
PromptTemplateBlockPropertiesRequest,
|
82
|
+
PromptTemplateBlockRequest,
|
83
|
+
ProviderEnum,
|
84
|
+
RegisteredPromptInputVariableRequest,
|
85
|
+
RegisterPromptModelParametersRequest,
|
86
|
+
RegisterPromptPromptInfoRequest,
|
87
|
+
VellumVariableType,
|
88
|
+
)
|
89
|
+
from vellum.client import Vellum
|
90
|
+
|
91
|
+
client = Vellum(
|
92
|
+
api_key="YOUR_API_KEY",
|
93
|
+
)
|
94
|
+
client.registered_prompts.register_prompt(
|
95
|
+
label="string",
|
96
|
+
name="string",
|
97
|
+
prompt=RegisterPromptPromptInfoRequest(
|
98
|
+
prompt_block_data=PromptTemplateBlockDataRequest(
|
99
|
+
version=1,
|
100
|
+
blocks=[
|
101
|
+
PromptTemplateBlockRequest(
|
102
|
+
id="string",
|
103
|
+
block_type=BlockTypeEnum.CHAT_MESSAGE,
|
104
|
+
properties=PromptTemplateBlockPropertiesRequest(
|
105
|
+
chat_role=ChatMessageRole.SYSTEM,
|
106
|
+
template_type=VellumVariableType.STRING,
|
107
|
+
),
|
108
|
+
)
|
109
|
+
],
|
110
|
+
),
|
111
|
+
input_variables=[
|
112
|
+
RegisteredPromptInputVariableRequest(
|
113
|
+
key="string",
|
114
|
+
type=VellumVariableType.STRING,
|
115
|
+
)
|
116
|
+
],
|
117
|
+
),
|
118
|
+
provider=ProviderEnum.ANTHROPIC,
|
119
|
+
model="string",
|
120
|
+
parameters=RegisterPromptModelParametersRequest(
|
121
|
+
temperature=1.1,
|
122
|
+
max_tokens=1,
|
123
|
+
top_p=1.1,
|
124
|
+
frequency_penalty=1.1,
|
125
|
+
presence_penalty=1.1,
|
126
|
+
),
|
127
|
+
)
|
76
128
|
"""
|
77
129
|
_request: typing.Dict[str, typing.Any] = {
|
78
130
|
"label": label,
|
@@ -157,6 +209,58 @@ class AsyncRegisteredPromptsClient:
|
|
157
209
|
- parameters: RegisterPromptModelParametersRequest. The initial model parameters to use for this prompt
|
158
210
|
|
159
211
|
- meta: typing.Optional[typing.Dict[str, typing.Any]]. Optionally include additional metadata to store along with the prompt.
|
212
|
+
---
|
213
|
+
from vellum import (
|
214
|
+
BlockTypeEnum,
|
215
|
+
ChatMessageRole,
|
216
|
+
PromptTemplateBlockDataRequest,
|
217
|
+
PromptTemplateBlockPropertiesRequest,
|
218
|
+
PromptTemplateBlockRequest,
|
219
|
+
ProviderEnum,
|
220
|
+
RegisteredPromptInputVariableRequest,
|
221
|
+
RegisterPromptModelParametersRequest,
|
222
|
+
RegisterPromptPromptInfoRequest,
|
223
|
+
VellumVariableType,
|
224
|
+
)
|
225
|
+
from vellum.client import AsyncVellum
|
226
|
+
|
227
|
+
client = AsyncVellum(
|
228
|
+
api_key="YOUR_API_KEY",
|
229
|
+
)
|
230
|
+
await client.registered_prompts.register_prompt(
|
231
|
+
label="string",
|
232
|
+
name="string",
|
233
|
+
prompt=RegisterPromptPromptInfoRequest(
|
234
|
+
prompt_block_data=PromptTemplateBlockDataRequest(
|
235
|
+
version=1,
|
236
|
+
blocks=[
|
237
|
+
PromptTemplateBlockRequest(
|
238
|
+
id="string",
|
239
|
+
block_type=BlockTypeEnum.CHAT_MESSAGE,
|
240
|
+
properties=PromptTemplateBlockPropertiesRequest(
|
241
|
+
chat_role=ChatMessageRole.SYSTEM,
|
242
|
+
template_type=VellumVariableType.STRING,
|
243
|
+
),
|
244
|
+
)
|
245
|
+
],
|
246
|
+
),
|
247
|
+
input_variables=[
|
248
|
+
RegisteredPromptInputVariableRequest(
|
249
|
+
key="string",
|
250
|
+
type=VellumVariableType.STRING,
|
251
|
+
)
|
252
|
+
],
|
253
|
+
),
|
254
|
+
provider=ProviderEnum.ANTHROPIC,
|
255
|
+
model="string",
|
256
|
+
parameters=RegisterPromptModelParametersRequest(
|
257
|
+
temperature=1.1,
|
258
|
+
max_tokens=1,
|
259
|
+
top_p=1.1,
|
260
|
+
frequency_penalty=1.1,
|
261
|
+
presence_penalty=1.1,
|
262
|
+
),
|
263
|
+
)
|
160
264
|
"""
|
161
265
|
_request: typing.Dict[str, typing.Any] = {
|
162
266
|
"label": label,
|
@@ -52,6 +52,37 @@ class SandboxesClient:
|
|
52
52
|
- scenario_id: typing.Optional[str]. The id of the scenario to update. If none is provided, an id will be generated and a new scenario will be appended.
|
53
53
|
|
54
54
|
- metric_input_params: typing.Optional[SandboxMetricInputParamsRequest].
|
55
|
+
---
|
56
|
+
from vellum import (
|
57
|
+
ChatMessageRequest,
|
58
|
+
ChatMessageRole,
|
59
|
+
EvaluationParamsRequest,
|
60
|
+
SandboxMetricInputParamsRequest,
|
61
|
+
ScenarioInputRequest,
|
62
|
+
ScenarioInputTypeEnum,
|
63
|
+
)
|
64
|
+
from vellum.client import Vellum
|
65
|
+
|
66
|
+
client = Vellum(
|
67
|
+
api_key="YOUR_API_KEY",
|
68
|
+
)
|
69
|
+
client.sandboxes.upsert_sandbox_scenario(
|
70
|
+
id="string",
|
71
|
+
inputs=[
|
72
|
+
ScenarioInputRequest(
|
73
|
+
key="string",
|
74
|
+
type=ScenarioInputTypeEnum.TEXT,
|
75
|
+
chat_history=[
|
76
|
+
ChatMessageRequest(
|
77
|
+
role=ChatMessageRole.SYSTEM,
|
78
|
+
)
|
79
|
+
],
|
80
|
+
)
|
81
|
+
],
|
82
|
+
metric_input_params=SandboxMetricInputParamsRequest(
|
83
|
+
params=EvaluationParamsRequest(),
|
84
|
+
),
|
85
|
+
)
|
55
86
|
"""
|
56
87
|
_request: typing.Dict[str, typing.Any] = {"inputs": inputs}
|
57
88
|
if label is not OMIT:
|
@@ -90,8 +121,8 @@ class SandboxesClient:
|
|
90
121
|
api_key="YOUR_API_KEY",
|
91
122
|
)
|
92
123
|
client.sandboxes.delete_sandbox_scenario(
|
93
|
-
id="
|
94
|
-
scenario_id="
|
124
|
+
id="string",
|
125
|
+
scenario_id="string",
|
95
126
|
)
|
96
127
|
"""
|
97
128
|
_response = self._client_wrapper.httpx_client.request(
|
@@ -143,6 +174,37 @@ class AsyncSandboxesClient:
|
|
143
174
|
- scenario_id: typing.Optional[str]. The id of the scenario to update. If none is provided, an id will be generated and a new scenario will be appended.
|
144
175
|
|
145
176
|
- metric_input_params: typing.Optional[SandboxMetricInputParamsRequest].
|
177
|
+
---
|
178
|
+
from vellum import (
|
179
|
+
ChatMessageRequest,
|
180
|
+
ChatMessageRole,
|
181
|
+
EvaluationParamsRequest,
|
182
|
+
SandboxMetricInputParamsRequest,
|
183
|
+
ScenarioInputRequest,
|
184
|
+
ScenarioInputTypeEnum,
|
185
|
+
)
|
186
|
+
from vellum.client import AsyncVellum
|
187
|
+
|
188
|
+
client = AsyncVellum(
|
189
|
+
api_key="YOUR_API_KEY",
|
190
|
+
)
|
191
|
+
await client.sandboxes.upsert_sandbox_scenario(
|
192
|
+
id="string",
|
193
|
+
inputs=[
|
194
|
+
ScenarioInputRequest(
|
195
|
+
key="string",
|
196
|
+
type=ScenarioInputTypeEnum.TEXT,
|
197
|
+
chat_history=[
|
198
|
+
ChatMessageRequest(
|
199
|
+
role=ChatMessageRole.SYSTEM,
|
200
|
+
)
|
201
|
+
],
|
202
|
+
)
|
203
|
+
],
|
204
|
+
metric_input_params=SandboxMetricInputParamsRequest(
|
205
|
+
params=EvaluationParamsRequest(),
|
206
|
+
),
|
207
|
+
)
|
146
208
|
"""
|
147
209
|
_request: typing.Dict[str, typing.Any] = {"inputs": inputs}
|
148
210
|
if label is not OMIT:
|
@@ -181,8 +243,8 @@ class AsyncSandboxesClient:
|
|
181
243
|
api_key="YOUR_API_KEY",
|
182
244
|
)
|
183
245
|
await client.sandboxes.delete_sandbox_scenario(
|
184
|
-
id="
|
185
|
-
scenario_id="
|
246
|
+
id="string",
|
247
|
+
scenario_id="string",
|
186
248
|
)
|
187
249
|
"""
|
188
250
|
_response = await self._client_wrapper.httpx_client.request(
|
@@ -58,7 +58,7 @@ class TestSuitesClient:
|
|
58
58
|
api_key="YOUR_API_KEY",
|
59
59
|
)
|
60
60
|
client.test_suites.upsert_test_suite_test_case(
|
61
|
-
id="
|
61
|
+
id="string",
|
62
62
|
input_values=[],
|
63
63
|
evaluation_values=[],
|
64
64
|
)
|
@@ -100,8 +100,8 @@ class TestSuitesClient:
|
|
100
100
|
api_key="YOUR_API_KEY",
|
101
101
|
)
|
102
102
|
client.test_suites.delete_test_suite_test_case(
|
103
|
-
id="
|
104
|
-
test_case_id="
|
103
|
+
id="string",
|
104
|
+
test_case_id="string",
|
105
105
|
)
|
106
106
|
"""
|
107
107
|
_response = self._client_wrapper.httpx_client.request(
|
@@ -160,7 +160,7 @@ class AsyncTestSuitesClient:
|
|
160
160
|
api_key="YOUR_API_KEY",
|
161
161
|
)
|
162
162
|
await client.test_suites.upsert_test_suite_test_case(
|
163
|
-
id="
|
163
|
+
id="string",
|
164
164
|
input_values=[],
|
165
165
|
evaluation_values=[],
|
166
166
|
)
|
@@ -202,8 +202,8 @@ class AsyncTestSuitesClient:
|
|
202
202
|
api_key="YOUR_API_KEY",
|
203
203
|
)
|
204
204
|
await client.test_suites.delete_test_suite_test_case(
|
205
|
-
id="
|
206
|
-
test_case_id="
|
205
|
+
id="string",
|
206
|
+
test_case_id="string",
|
207
207
|
)
|
208
208
|
"""
|
209
209
|
_response = await self._client_wrapper.httpx_client.request(
|
@@ -0,0 +1,116 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import typing
|
4
|
+
import urllib.parse
|
5
|
+
from json.decoder import JSONDecodeError
|
6
|
+
|
7
|
+
from ...core.api_error import ApiError
|
8
|
+
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
9
|
+
from ...core.remove_none_from_dict import remove_none_from_dict
|
10
|
+
from ...types.paginated_slim_workflow_deployment_list import PaginatedSlimWorkflowDeploymentList
|
11
|
+
from .types.workflow_deployments_list_request_status import WorkflowDeploymentsListRequestStatus
|
12
|
+
|
13
|
+
try:
|
14
|
+
import pydantic.v1 as pydantic # type: ignore
|
15
|
+
except ImportError:
|
16
|
+
import pydantic # type: ignore
|
17
|
+
|
18
|
+
|
19
|
+
class WorkflowDeploymentsClient:
|
20
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
21
|
+
self._client_wrapper = client_wrapper
|
22
|
+
|
23
|
+
def list(
|
24
|
+
self,
|
25
|
+
*,
|
26
|
+
limit: typing.Optional[int] = None,
|
27
|
+
offset: typing.Optional[int] = None,
|
28
|
+
ordering: typing.Optional[str] = None,
|
29
|
+
status: typing.Optional[WorkflowDeploymentsListRequestStatus] = None,
|
30
|
+
) -> PaginatedSlimWorkflowDeploymentList:
|
31
|
+
"""
|
32
|
+
Parameters:
|
33
|
+
- limit: typing.Optional[int]. Number of results to return per page.
|
34
|
+
|
35
|
+
- offset: typing.Optional[int]. The initial index from which to return the results.
|
36
|
+
|
37
|
+
- ordering: typing.Optional[str]. Which field to use when ordering the results.
|
38
|
+
|
39
|
+
- status: typing.Optional[WorkflowDeploymentsListRequestStatus]. The current status of the workflow deployment
|
40
|
+
|
41
|
+
- `ACTIVE` - Active
|
42
|
+
- `ARCHIVED` - Archived---
|
43
|
+
from vellum import WorkflowDeploymentsListRequestStatus
|
44
|
+
from vellum.client import Vellum
|
45
|
+
|
46
|
+
client = Vellum(
|
47
|
+
api_key="YOUR_API_KEY",
|
48
|
+
)
|
49
|
+
client.workflow_deployments.list(
|
50
|
+
status=WorkflowDeploymentsListRequestStatus.ACTIVE,
|
51
|
+
)
|
52
|
+
"""
|
53
|
+
_response = self._client_wrapper.httpx_client.request(
|
54
|
+
"GET",
|
55
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", "v1/workflow-deployments"),
|
56
|
+
params=remove_none_from_dict({"limit": limit, "offset": offset, "ordering": ordering, "status": status}),
|
57
|
+
headers=self._client_wrapper.get_headers(),
|
58
|
+
timeout=None,
|
59
|
+
)
|
60
|
+
if 200 <= _response.status_code < 300:
|
61
|
+
return pydantic.parse_obj_as(PaginatedSlimWorkflowDeploymentList, _response.json()) # type: ignore
|
62
|
+
try:
|
63
|
+
_response_json = _response.json()
|
64
|
+
except JSONDecodeError:
|
65
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
66
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
67
|
+
|
68
|
+
|
69
|
+
class AsyncWorkflowDeploymentsClient:
|
70
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
71
|
+
self._client_wrapper = client_wrapper
|
72
|
+
|
73
|
+
async def list(
|
74
|
+
self,
|
75
|
+
*,
|
76
|
+
limit: typing.Optional[int] = None,
|
77
|
+
offset: typing.Optional[int] = None,
|
78
|
+
ordering: typing.Optional[str] = None,
|
79
|
+
status: typing.Optional[WorkflowDeploymentsListRequestStatus] = None,
|
80
|
+
) -> PaginatedSlimWorkflowDeploymentList:
|
81
|
+
"""
|
82
|
+
Parameters:
|
83
|
+
- limit: typing.Optional[int]. Number of results to return per page.
|
84
|
+
|
85
|
+
- offset: typing.Optional[int]. The initial index from which to return the results.
|
86
|
+
|
87
|
+
- ordering: typing.Optional[str]. Which field to use when ordering the results.
|
88
|
+
|
89
|
+
- status: typing.Optional[WorkflowDeploymentsListRequestStatus]. The current status of the workflow deployment
|
90
|
+
|
91
|
+
- `ACTIVE` - Active
|
92
|
+
- `ARCHIVED` - Archived---
|
93
|
+
from vellum import WorkflowDeploymentsListRequestStatus
|
94
|
+
from vellum.client import AsyncVellum
|
95
|
+
|
96
|
+
client = AsyncVellum(
|
97
|
+
api_key="YOUR_API_KEY",
|
98
|
+
)
|
99
|
+
await client.workflow_deployments.list(
|
100
|
+
status=WorkflowDeploymentsListRequestStatus.ACTIVE,
|
101
|
+
)
|
102
|
+
"""
|
103
|
+
_response = await self._client_wrapper.httpx_client.request(
|
104
|
+
"GET",
|
105
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_environment().default}/", "v1/workflow-deployments"),
|
106
|
+
params=remove_none_from_dict({"limit": limit, "offset": offset, "ordering": ordering, "status": status}),
|
107
|
+
headers=self._client_wrapper.get_headers(),
|
108
|
+
timeout=None,
|
109
|
+
)
|
110
|
+
if 200 <= _response.status_code < 300:
|
111
|
+
return pydantic.parse_obj_as(PaginatedSlimWorkflowDeploymentList, _response.json()) # type: ignore
|
112
|
+
try:
|
113
|
+
_response_json = _response.json()
|
114
|
+
except JSONDecodeError:
|
115
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
116
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
@@ -0,0 +1,17 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import enum
|
4
|
+
import typing
|
5
|
+
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
7
|
+
|
8
|
+
|
9
|
+
class WorkflowDeploymentsListRequestStatus(str, enum.Enum):
|
10
|
+
ACTIVE = "ACTIVE"
|
11
|
+
ARCHIVED = "ARCHIVED"
|
12
|
+
|
13
|
+
def visit(self, active: typing.Callable[[], T_Result], archived: typing.Callable[[], T_Result]) -> T_Result:
|
14
|
+
if self is WorkflowDeploymentsListRequestStatus.ACTIVE:
|
15
|
+
return active()
|
16
|
+
if self is WorkflowDeploymentsListRequestStatus.ARCHIVED:
|
17
|
+
return archived()
|