vellum-ai 0.6.9__py3-none-any.whl → 0.7.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (37) hide show
  1. vellum/__init__.py +14 -0
  2. vellum/client.py +28 -28
  3. vellum/core/client_wrapper.py +1 -1
  4. vellum/lib/test_suites/resources.py +5 -5
  5. vellum/resources/document_indexes/client.py +114 -0
  6. vellum/resources/test_suites/client.py +19 -51
  7. vellum/types/__init__.py +14 -0
  8. vellum/types/code_execution_node_json_result.py +1 -1
  9. vellum/types/execution_json_vellum_value.py +1 -1
  10. vellum/types/json_variable_value.py +1 -1
  11. vellum/types/json_vellum_value.py +1 -1
  12. vellum/types/merge_node_result.py +3 -0
  13. vellum/types/merge_node_result_data.py +25 -0
  14. vellum/types/named_test_case_json_variable_value.py +1 -1
  15. vellum/types/named_test_case_json_variable_value_request.py +1 -1
  16. vellum/types/node_input_compiled_json_value.py +1 -1
  17. vellum/types/node_output_compiled_json_value.py +1 -1
  18. vellum/types/prompt_node_result_data.py +1 -0
  19. vellum/types/templating_node_json_result.py +1 -1
  20. vellum/types/terminal_node_json_result.py +1 -1
  21. vellum/types/test_suite_run_execution_array_output.py +32 -0
  22. vellum/types/test_suite_run_execution_json_output.py +1 -1
  23. vellum/types/test_suite_run_execution_output.py +12 -0
  24. vellum/types/test_suite_run_metric_number_output.py +1 -1
  25. vellum/types/test_suite_run_metric_string_output.py +1 -1
  26. vellum/types/test_suite_test_case_bulk_operation_request.py +12 -0
  27. vellum/types/test_suite_test_case_rejected_bulk_result.py +1 -1
  28. vellum/types/test_suite_test_case_upsert_bulk_operation_request.py +35 -0
  29. vellum/types/upsert_enum.py +5 -0
  30. vellum/types/upsert_test_suite_test_case_request.py +49 -0
  31. vellum/types/workflow_output_json.py +1 -1
  32. vellum/types/workflow_request_json_input_request.py +1 -1
  33. vellum/types/workflow_result_event_output_data_json.py +1 -1
  34. {vellum_ai-0.6.9.dist-info → vellum_ai-0.7.1.dist-info}/METADATA +1 -1
  35. {vellum_ai-0.6.9.dist-info → vellum_ai-0.7.1.dist-info}/RECORD +37 -32
  36. {vellum_ai-0.6.9.dist-info → vellum_ai-0.7.1.dist-info}/LICENSE +0 -0
  37. {vellum_ai-0.6.9.dist-info → vellum_ai-0.7.1.dist-info}/WHEEL +0 -0
vellum/__init__.py CHANGED
@@ -205,6 +205,7 @@ from .types import (
205
205
  MapNodeResultData,
206
206
  MergeEnum,
207
207
  MergeNodeResult,
208
+ MergeNodeResultData,
208
209
  MetadataFilterConfigRequest,
209
210
  MetadataFilterRuleCombinator,
210
211
  MetadataFilterRuleRequest,
@@ -454,6 +455,7 @@ from .types import (
454
455
  TestSuiteRunExecConfig_External,
455
456
  TestSuiteRunExecConfig_WorkflowReleaseTag,
456
457
  TestSuiteRunExecution,
458
+ TestSuiteRunExecutionArrayOutput,
457
459
  TestSuiteRunExecutionChatHistoryOutput,
458
460
  TestSuiteRunExecutionErrorOutput,
459
461
  TestSuiteRunExecutionFunctionCallOutput,
@@ -462,6 +464,7 @@ from .types import (
462
464
  TestSuiteRunExecutionMetricResult,
463
465
  TestSuiteRunExecutionNumberOutput,
464
466
  TestSuiteRunExecutionOutput,
467
+ TestSuiteRunExecutionOutput_Array,
465
468
  TestSuiteRunExecutionOutput_ChatHistory,
466
469
  TestSuiteRunExecutionOutput_Error,
467
470
  TestSuiteRunExecutionOutput_FunctionCall,
@@ -499,6 +502,7 @@ from .types import (
499
502
  TestSuiteTestCaseBulkOperationRequest_Create,
500
503
  TestSuiteTestCaseBulkOperationRequest_Delete,
501
504
  TestSuiteTestCaseBulkOperationRequest_Replace,
505
+ TestSuiteTestCaseBulkOperationRequest_Upsert,
502
506
  TestSuiteTestCaseBulkResult,
503
507
  TestSuiteTestCaseBulkResult_Created,
504
508
  TestSuiteTestCaseBulkResult_Deleted,
@@ -515,6 +519,7 @@ from .types import (
515
519
  TestSuiteTestCaseReplaceBulkOperationRequest,
516
520
  TestSuiteTestCaseReplacedBulkResult,
517
521
  TestSuiteTestCaseReplacedBulkResultData,
522
+ TestSuiteTestCaseUpsertBulkOperationRequest,
518
523
  TextEmbedding3LargeEnum,
519
524
  TextEmbedding3SmallEnum,
520
525
  TextEmbeddingAda002Enum,
@@ -525,6 +530,8 @@ from .types import (
525
530
  TokenOverlappingWindowChunkingRequest,
526
531
  UploadDocumentErrorResponse,
527
532
  UploadDocumentResponse,
533
+ UpsertEnum,
534
+ UpsertTestSuiteTestCaseRequest,
528
535
  VellumError,
529
536
  VellumErrorCodeEnum,
530
537
  VellumErrorRequest,
@@ -839,6 +846,7 @@ __all__ = [
839
846
  "MapNodeResultData",
840
847
  "MergeEnum",
841
848
  "MergeNodeResult",
849
+ "MergeNodeResultData",
842
850
  "MetadataFilterConfigRequest",
843
851
  "MetadataFilterRuleCombinator",
844
852
  "MetadataFilterRuleRequest",
@@ -1089,6 +1097,7 @@ __all__ = [
1089
1097
  "TestSuiteRunExecConfig_External",
1090
1098
  "TestSuiteRunExecConfig_WorkflowReleaseTag",
1091
1099
  "TestSuiteRunExecution",
1100
+ "TestSuiteRunExecutionArrayOutput",
1092
1101
  "TestSuiteRunExecutionChatHistoryOutput",
1093
1102
  "TestSuiteRunExecutionErrorOutput",
1094
1103
  "TestSuiteRunExecutionFunctionCallOutput",
@@ -1097,6 +1106,7 @@ __all__ = [
1097
1106
  "TestSuiteRunExecutionMetricResult",
1098
1107
  "TestSuiteRunExecutionNumberOutput",
1099
1108
  "TestSuiteRunExecutionOutput",
1109
+ "TestSuiteRunExecutionOutput_Array",
1100
1110
  "TestSuiteRunExecutionOutput_ChatHistory",
1101
1111
  "TestSuiteRunExecutionOutput_Error",
1102
1112
  "TestSuiteRunExecutionOutput_FunctionCall",
@@ -1134,6 +1144,7 @@ __all__ = [
1134
1144
  "TestSuiteTestCaseBulkOperationRequest_Create",
1135
1145
  "TestSuiteTestCaseBulkOperationRequest_Delete",
1136
1146
  "TestSuiteTestCaseBulkOperationRequest_Replace",
1147
+ "TestSuiteTestCaseBulkOperationRequest_Upsert",
1137
1148
  "TestSuiteTestCaseBulkResult",
1138
1149
  "TestSuiteTestCaseBulkResult_Created",
1139
1150
  "TestSuiteTestCaseBulkResult_Deleted",
@@ -1150,6 +1161,7 @@ __all__ = [
1150
1161
  "TestSuiteTestCaseReplaceBulkOperationRequest",
1151
1162
  "TestSuiteTestCaseReplacedBulkResult",
1152
1163
  "TestSuiteTestCaseReplacedBulkResultData",
1164
+ "TestSuiteTestCaseUpsertBulkOperationRequest",
1153
1165
  "TextEmbedding3LargeEnum",
1154
1166
  "TextEmbedding3SmallEnum",
1155
1167
  "TextEmbeddingAda002Enum",
@@ -1160,6 +1172,8 @@ __all__ = [
1160
1172
  "TokenOverlappingWindowChunkingRequest",
1161
1173
  "UploadDocumentErrorResponse",
1162
1174
  "UploadDocumentResponse",
1175
+ "UpsertEnum",
1176
+ "UpsertTestSuiteTestCaseRequest",
1163
1177
  "VellumEnvironment",
1164
1178
  "VellumError",
1165
1179
  "VellumErrorCodeEnum",
vellum/client.py CHANGED
@@ -121,23 +121,23 @@ class Vellum:
121
121
  Executes a deployed Prompt and returns the result.
122
122
 
123
123
  Parameters:
124
- - inputs: typing.Sequence[PromptDeploymentInputRequest]. The list of inputs defined in the Prompt's deployment with their corresponding values.
124
+ - inputs: typing.Sequence[PromptDeploymentInputRequest]. A list consisting of the Prompt Deployment's input variables and their values.
125
125
 
126
126
  - prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
127
127
 
128
- - prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
128
+ - prompt_deployment_name: typing.Optional[str]. The unique name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
129
129
 
130
130
  - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
131
131
 
132
- - external_id: typing.Optional[str]. "Optionally include a unique identifier for tracking purposes. Must be unique for a given prompt deployment.
132
+ - external_id: typing.Optional[str]. Optionally include a unique identifier for tracking purposes. Must be unique within a given Prompt Deployment.
133
133
 
134
- - expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
134
+ - expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. An optionally specified configuration used to opt in to including additional metadata about this prompt execution in the API response. Corresponding values will be returned under the `meta` key of the API response.
135
135
 
136
- - raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest].
136
+ - raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest]. Overrides for the raw API request sent to the model host. Combined with `expand_raw`, it can be used to access new features from models.
137
137
 
138
- - expand_raw: typing.Optional[typing.Sequence[str]]. Returns the raw API response data sent from the model host. Combined with `raw_overrides`, it can be used to access new features from models.
138
+ - expand_raw: typing.Optional[typing.Sequence[str]]. A list of keys whose values you'd like to directly return from the JSON response of the model provider. Useful if you need lower-level info returned by model providers that Vellum would otherwise omit. Corresponding key/value pairs will be returned under the `raw` key of the API response.
139
139
 
140
- - metadata: typing.Optional[typing.Dict[str, typing.Any]].
140
+ - metadata: typing.Optional[typing.Dict[str, typing.Any]]. Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
141
141
 
142
142
  - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
143
143
  ---
@@ -256,23 +256,23 @@ class Vellum:
256
256
  Executes a deployed Prompt and streams back the results.
257
257
 
258
258
  Parameters:
259
- - inputs: typing.Sequence[PromptDeploymentInputRequest]. The list of inputs defined in the Prompt's deployment with their corresponding values.
259
+ - inputs: typing.Sequence[PromptDeploymentInputRequest]. A list consisting of the Prompt Deployment's input variables and their values.
260
260
 
261
261
  - prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
262
262
 
263
- - prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
263
+ - prompt_deployment_name: typing.Optional[str]. The unique name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
264
264
 
265
265
  - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
266
266
 
267
- - external_id: typing.Optional[str]. "Optionally include a unique identifier for tracking purposes. Must be unique for a given prompt deployment.
267
+ - external_id: typing.Optional[str]. Optionally include a unique identifier for tracking purposes. Must be unique within a given Prompt Deployment.
268
268
 
269
- - expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
269
+ - expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. An optionally specified configuration used to opt in to including additional metadata about this prompt execution in the API response. Corresponding values will be returned under the `meta` key of the API response.
270
270
 
271
- - raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest].
271
+ - raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest]. Overrides for the raw API request sent to the model host. Combined with `expand_raw`, it can be used to access new features from models.
272
272
 
273
- - expand_raw: typing.Optional[typing.Sequence[str]]. Returns the raw API response data sent from the model host. Combined with `raw_overrides`, it can be used to access new features from models.
273
+ - expand_raw: typing.Optional[typing.Sequence[str]]. A list of keys whose values you'd like to directly return from the JSON response of the model provider. Useful if you need lower-level info returned by model providers that Vellum would otherwise omit. Corresponding key/value pairs will be returned under the `raw` key of the API response.
274
274
 
275
- - metadata: typing.Optional[typing.Dict[str, typing.Any]].
275
+ - metadata: typing.Optional[typing.Dict[str, typing.Any]]. Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
276
276
 
277
277
  - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
278
278
  ---
@@ -1082,23 +1082,23 @@ class AsyncVellum:
1082
1082
  Executes a deployed Prompt and returns the result.
1083
1083
 
1084
1084
  Parameters:
1085
- - inputs: typing.Sequence[PromptDeploymentInputRequest]. The list of inputs defined in the Prompt's deployment with their corresponding values.
1085
+ - inputs: typing.Sequence[PromptDeploymentInputRequest]. A list consisting of the Prompt Deployment's input variables and their values.
1086
1086
 
1087
1087
  - prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
1088
1088
 
1089
- - prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
1089
+ - prompt_deployment_name: typing.Optional[str]. The unique name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
1090
1090
 
1091
1091
  - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
1092
1092
 
1093
- - external_id: typing.Optional[str]. "Optionally include a unique identifier for tracking purposes. Must be unique for a given prompt deployment.
1093
+ - external_id: typing.Optional[str]. Optionally include a unique identifier for tracking purposes. Must be unique within a given Prompt Deployment.
1094
1094
 
1095
- - expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
1095
+ - expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. An optionally specified configuration used to opt in to including additional metadata about this prompt execution in the API response. Corresponding values will be returned under the `meta` key of the API response.
1096
1096
 
1097
- - raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest].
1097
+ - raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest]. Overrides for the raw API request sent to the model host. Combined with `expand_raw`, it can be used to access new features from models.
1098
1098
 
1099
- - expand_raw: typing.Optional[typing.Sequence[str]]. Returns the raw API response data sent from the model host. Combined with `raw_overrides`, it can be used to access new features from models.
1099
+ - expand_raw: typing.Optional[typing.Sequence[str]]. A list of keys whose values you'd like to directly return from the JSON response of the model provider. Useful if you need lower-level info returned by model providers that Vellum would otherwise omit. Corresponding key/value pairs will be returned under the `raw` key of the API response.
1100
1100
 
1101
- - metadata: typing.Optional[typing.Dict[str, typing.Any]].
1101
+ - metadata: typing.Optional[typing.Dict[str, typing.Any]]. Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
1102
1102
 
1103
1103
  - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
1104
1104
  ---
@@ -1217,23 +1217,23 @@ class AsyncVellum:
1217
1217
  Executes a deployed Prompt and streams back the results.
1218
1218
 
1219
1219
  Parameters:
1220
- - inputs: typing.Sequence[PromptDeploymentInputRequest]. The list of inputs defined in the Prompt's deployment with their corresponding values.
1220
+ - inputs: typing.Sequence[PromptDeploymentInputRequest]. A list consisting of the Prompt Deployment's input variables and their values.
1221
1221
 
1222
1222
  - prompt_deployment_id: typing.Optional[str]. The ID of the Prompt Deployment. Must provide either this or prompt_deployment_name.
1223
1223
 
1224
- - prompt_deployment_name: typing.Optional[str]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
1224
+ - prompt_deployment_name: typing.Optional[str]. The unique name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
1225
1225
 
1226
1226
  - release_tag: typing.Optional[str]. Optionally specify a release tag if you want to pin to a specific release of the Prompt Deployment
1227
1227
 
1228
- - external_id: typing.Optional[str]. "Optionally include a unique identifier for tracking purposes. Must be unique for a given prompt deployment.
1228
+ - external_id: typing.Optional[str]. Optionally include a unique identifier for tracking purposes. Must be unique within a given Prompt Deployment.
1229
1229
 
1230
- - expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. The name of the Prompt Deployment. Must provide either this or prompt_deployment_id.
1230
+ - expand_meta: typing.Optional[PromptDeploymentExpandMetaRequestRequest]. An optionally specified configuration used to opt in to including additional metadata about this prompt execution in the API response. Corresponding values will be returned under the `meta` key of the API response.
1231
1231
 
1232
- - raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest].
1232
+ - raw_overrides: typing.Optional[RawPromptExecutionOverridesRequest]. Overrides for the raw API request sent to the model host. Combined with `expand_raw`, it can be used to access new features from models.
1233
1233
 
1234
- - expand_raw: typing.Optional[typing.Sequence[str]]. Returns the raw API response data sent from the model host. Combined with `raw_overrides`, it can be used to access new features from models.
1234
+ - expand_raw: typing.Optional[typing.Sequence[str]]. A list of keys whose values you'd like to directly return from the JSON response of the model provider. Useful if you need lower-level info returned by model providers that Vellum would otherwise omit. Corresponding key/value pairs will be returned under the `raw` key of the API response.
1235
1235
 
1236
- - metadata: typing.Optional[typing.Dict[str, typing.Any]].
1236
+ - metadata: typing.Optional[typing.Dict[str, typing.Any]]. Arbitrary JSON metadata associated with this request. Can be used to capture additional monitoring data such as user id, session id, etc. for future analysis.
1237
1237
 
1238
1238
  - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
1239
1239
  ---
@@ -18,7 +18,7 @@ class BaseClientWrapper:
18
18
  headers: typing.Dict[str, str] = {
19
19
  "X-Fern-Language": "Python",
20
20
  "X-Fern-SDK-Name": "vellum-ai",
21
- "X-Fern-SDK-Version": "0.6.9",
21
+ "X-Fern-SDK-Version": "0.7.1",
22
22
  }
23
23
  headers["X_API_KEY"] = self.api_key
24
24
  return headers
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
  import logging
4
4
  import time
5
5
  from functools import cached_property
6
- from typing import Callable, Generator, List, Any
6
+ from typing import Callable, Generator, List, cast, Iterable
7
7
 
8
8
  from vellum import TestSuiteRunRead, TestSuiteRunMetricOutput_Number
9
9
  from vellum.client import Vellum
@@ -174,7 +174,7 @@ class VellumTestSuiteRunResults:
174
174
  self,
175
175
  metric_identifier: str | None = None,
176
176
  output_identifier: str | None = None,
177
- ) -> List[float]:
177
+ ) -> List[float | None]:
178
178
  """Returns the values of a numeric metric output that match the given criteria."""
179
179
 
180
180
  metric_outputs: list[TestSuiteRunMetricOutput_Number] = []
@@ -198,7 +198,7 @@ class VellumTestSuiteRunResults:
198
198
  output_values = self.get_numeric_metric_output_values(
199
199
  metric_identifier=metric_identifier, output_identifier=output_identifier
200
200
  )
201
- return sum(output_values) / len(output_values)
201
+ return sum(cast(Iterable[float], filter(lambda o: isinstance(o, float), output_values))) / len(output_values)
202
202
 
203
203
  def get_min_metric_output(
204
204
  self, metric_identifier: str | None = None, output_identifier: str | None = None
@@ -207,7 +207,7 @@ class VellumTestSuiteRunResults:
207
207
  output_values = self.get_numeric_metric_output_values(
208
208
  metric_identifier=metric_identifier, output_identifier=output_identifier
209
209
  )
210
- return min(output_values)
210
+ return min(cast(Iterable[float], filter(lambda o: isinstance(o, float), output_values)))
211
211
 
212
212
  def get_max_metric_output(
213
213
  self, metric_identifier: str | None = None, output_identifier: str | None = None
@@ -216,7 +216,7 @@ class VellumTestSuiteRunResults:
216
216
  output_values = self.get_numeric_metric_output_values(
217
217
  metric_identifier=metric_identifier, output_identifier=output_identifier
218
218
  )
219
- return max(output_values)
219
+ return max(cast(Iterable[float], filter(lambda o: isinstance(o, float), output_values)))
220
220
 
221
221
  def wait_until_complete(self) -> None:
222
222
  """Wait until the Test Suite Run is no longer in a QUEUED or RUNNING state."""
@@ -458,6 +458,63 @@ class DocumentIndexesClient:
458
458
  raise ApiError(status_code=_response.status_code, body=_response.text)
459
459
  raise ApiError(status_code=_response.status_code, body=_response_json)
460
460
 
461
+ def add_document(
462
+ self, document_id: str, id: str, *, request_options: typing.Optional[RequestOptions] = None
463
+ ) -> None:
464
+ """
465
+ Adds a previously uploaded Document to the specified Document Index.
466
+
467
+ Parameters:
468
+ - document_id: str. Either the Vellum-generated ID or the originally supplied external_id that uniquely identifies the Document you'd like to add.
469
+
470
+ - id: str. Either the Vellum-generated ID or the originally specified name that uniquely identifies the Document Index to which you'd like to add the Document.
471
+
472
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
473
+ ---
474
+ from vellum.client import Vellum
475
+
476
+ client = Vellum(
477
+ api_key="YOUR_API_KEY",
478
+ )
479
+ client.document_indexes.add_document(
480
+ document_id="document_id",
481
+ id="id",
482
+ )
483
+ """
484
+ _response = self._client_wrapper.httpx_client.request(
485
+ method="POST",
486
+ url=urllib.parse.urljoin(
487
+ f"{self._client_wrapper.get_environment().default}/",
488
+ f"v1/document-indexes/{jsonable_encoder(id)}/documents/{jsonable_encoder(document_id)}",
489
+ ),
490
+ params=jsonable_encoder(
491
+ request_options.get("additional_query_parameters") if request_options is not None else None
492
+ ),
493
+ json=jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))
494
+ if request_options is not None
495
+ else None,
496
+ headers=jsonable_encoder(
497
+ remove_none_from_dict(
498
+ {
499
+ **self._client_wrapper.get_headers(),
500
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
501
+ }
502
+ )
503
+ ),
504
+ timeout=request_options.get("timeout_in_seconds")
505
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
506
+ else self._client_wrapper.get_timeout(),
507
+ retries=0,
508
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
509
+ )
510
+ if 200 <= _response.status_code < 300:
511
+ return
512
+ try:
513
+ _response_json = _response.json()
514
+ except JSONDecodeError:
515
+ raise ApiError(status_code=_response.status_code, body=_response.text)
516
+ raise ApiError(status_code=_response.status_code, body=_response_json)
517
+
461
518
  def remove_document(
462
519
  self, document_id: str, id: str, *, request_options: typing.Optional[RequestOptions] = None
463
520
  ) -> None:
@@ -950,6 +1007,63 @@ class AsyncDocumentIndexesClient:
950
1007
  raise ApiError(status_code=_response.status_code, body=_response.text)
951
1008
  raise ApiError(status_code=_response.status_code, body=_response_json)
952
1009
 
1010
+ async def add_document(
1011
+ self, document_id: str, id: str, *, request_options: typing.Optional[RequestOptions] = None
1012
+ ) -> None:
1013
+ """
1014
+ Adds a previously uploaded Document to the specified Document Index.
1015
+
1016
+ Parameters:
1017
+ - document_id: str. Either the Vellum-generated ID or the originally supplied external_id that uniquely identifies the Document you'd like to add.
1018
+
1019
+ - id: str. Either the Vellum-generated ID or the originally specified name that uniquely identifies the Document Index to which you'd like to add the Document.
1020
+
1021
+ - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
1022
+ ---
1023
+ from vellum.client import AsyncVellum
1024
+
1025
+ client = AsyncVellum(
1026
+ api_key="YOUR_API_KEY",
1027
+ )
1028
+ await client.document_indexes.add_document(
1029
+ document_id="document_id",
1030
+ id="id",
1031
+ )
1032
+ """
1033
+ _response = await self._client_wrapper.httpx_client.request(
1034
+ method="POST",
1035
+ url=urllib.parse.urljoin(
1036
+ f"{self._client_wrapper.get_environment().default}/",
1037
+ f"v1/document-indexes/{jsonable_encoder(id)}/documents/{jsonable_encoder(document_id)}",
1038
+ ),
1039
+ params=jsonable_encoder(
1040
+ request_options.get("additional_query_parameters") if request_options is not None else None
1041
+ ),
1042
+ json=jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))
1043
+ if request_options is not None
1044
+ else None,
1045
+ headers=jsonable_encoder(
1046
+ remove_none_from_dict(
1047
+ {
1048
+ **self._client_wrapper.get_headers(),
1049
+ **(request_options.get("additional_headers", {}) if request_options is not None else {}),
1050
+ }
1051
+ )
1052
+ ),
1053
+ timeout=request_options.get("timeout_in_seconds")
1054
+ if request_options is not None and request_options.get("timeout_in_seconds") is not None
1055
+ else self._client_wrapper.get_timeout(),
1056
+ retries=0,
1057
+ max_retries=request_options.get("max_retries") if request_options is not None else 0, # type: ignore
1058
+ )
1059
+ if 200 <= _response.status_code < 300:
1060
+ return
1061
+ try:
1062
+ _response_json = _response.json()
1063
+ except JSONDecodeError:
1064
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1065
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1066
+
953
1067
  async def remove_document(
954
1068
  self, document_id: str, id: str, *, request_options: typing.Optional[RequestOptions] = None
955
1069
  ) -> None:
@@ -11,11 +11,11 @@ from ...core.jsonable_encoder import jsonable_encoder
11
11
  from ...core.pydantic_utilities import pydantic_v1
12
12
  from ...core.remove_none_from_dict import remove_none_from_dict
13
13
  from ...core.request_options import RequestOptions
14
- from ...types.named_test_case_variable_value_request import NamedTestCaseVariableValueRequest
15
14
  from ...types.paginated_test_suite_test_case_list import PaginatedTestSuiteTestCaseList
16
15
  from ...types.test_suite_test_case import TestSuiteTestCase
17
16
  from ...types.test_suite_test_case_bulk_operation_request import TestSuiteTestCaseBulkOperationRequest
18
17
  from ...types.test_suite_test_case_bulk_result import TestSuiteTestCaseBulkResult
18
+ from ...types.upsert_test_suite_test_case_request import UpsertTestSuiteTestCaseRequest
19
19
 
20
20
  # this is used as the default value for optional parameters
21
21
  OMIT = typing.cast(typing.Any, ...)
@@ -99,11 +99,7 @@ class TestSuitesClient:
99
99
  self,
100
100
  id: str,
101
101
  *,
102
- upsert_test_suite_test_case_request_id: typing.Optional[str] = OMIT,
103
- external_id: typing.Optional[str] = OMIT,
104
- label: typing.Optional[str] = OMIT,
105
- input_values: typing.Sequence[NamedTestCaseVariableValueRequest],
106
- evaluation_values: typing.Sequence[NamedTestCaseVariableValueRequest],
102
+ request: UpsertTestSuiteTestCaseRequest,
107
103
  request_options: typing.Optional[RequestOptions] = None,
108
104
  ) -> TestSuiteTestCase:
109
105
  """
@@ -118,18 +114,11 @@ class TestSuitesClient:
118
114
  Parameters:
119
115
  - id: str. A UUID string identifying this test suite.
120
116
 
121
- - upsert_test_suite_test_case_request_id: typing.Optional[str]. The Vellum-generated ID of an existing Test Case whose data you'd like to replace. If specified and no Test Case exists with this ID, a 404 will be returned.
122
-
123
- - external_id: typing.Optional[str]. An ID external to Vellum that uniquely identifies the Test Case that you'd like to create/update. If there's a match on a Test Case that was previously created with the same external_id, it will be updated. Otherwise, a new Test Case will be created with this value as its external_id. If no external_id is specified, then a new Test Case will always be created.
124
-
125
- - label: typing.Optional[str]. A human-readable label used to convey the intention of this Test Case
126
-
127
- - input_values: typing.Sequence[NamedTestCaseVariableValueRequest]. Values for each of the Test Case's input variables
128
-
129
- - evaluation_values: typing.Sequence[NamedTestCaseVariableValueRequest]. Values for each of the Test Case's evaluation variables
117
+ - request: UpsertTestSuiteTestCaseRequest.
130
118
 
131
119
  - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
132
120
  ---
121
+ from vellum import UpsertTestSuiteTestCaseRequest
133
122
  from vellum.client import Vellum
134
123
 
135
124
  client = Vellum(
@@ -137,17 +126,12 @@ class TestSuitesClient:
137
126
  )
138
127
  client.test_suites.upsert_test_suite_test_case(
139
128
  id="id",
140
- input_values=[],
141
- evaluation_values=[],
129
+ request=UpsertTestSuiteTestCaseRequest(
130
+ input_values=[],
131
+ evaluation_values=[],
132
+ ),
142
133
  )
143
134
  """
144
- _request: typing.Dict[str, typing.Any] = {"input_values": input_values, "evaluation_values": evaluation_values}
145
- if upsert_test_suite_test_case_request_id is not OMIT:
146
- _request["id"] = upsert_test_suite_test_case_request_id
147
- if external_id is not OMIT:
148
- _request["external_id"] = external_id
149
- if label is not OMIT:
150
- _request["label"] = label
151
135
  _response = self._client_wrapper.httpx_client.request(
152
136
  method="POST",
153
137
  url=urllib.parse.urljoin(
@@ -157,10 +141,10 @@ class TestSuitesClient:
157
141
  params=jsonable_encoder(
158
142
  request_options.get("additional_query_parameters") if request_options is not None else None
159
143
  ),
160
- json=jsonable_encoder(_request)
144
+ json=jsonable_encoder(request)
161
145
  if request_options is None or request_options.get("additional_body_parameters") is None
162
146
  else {
163
- **jsonable_encoder(_request),
147
+ **jsonable_encoder(request),
164
148
  **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
165
149
  },
166
150
  headers=jsonable_encoder(
@@ -402,11 +386,7 @@ class AsyncTestSuitesClient:
402
386
  self,
403
387
  id: str,
404
388
  *,
405
- upsert_test_suite_test_case_request_id: typing.Optional[str] = OMIT,
406
- external_id: typing.Optional[str] = OMIT,
407
- label: typing.Optional[str] = OMIT,
408
- input_values: typing.Sequence[NamedTestCaseVariableValueRequest],
409
- evaluation_values: typing.Sequence[NamedTestCaseVariableValueRequest],
389
+ request: UpsertTestSuiteTestCaseRequest,
410
390
  request_options: typing.Optional[RequestOptions] = None,
411
391
  ) -> TestSuiteTestCase:
412
392
  """
@@ -421,18 +401,11 @@ class AsyncTestSuitesClient:
421
401
  Parameters:
422
402
  - id: str. A UUID string identifying this test suite.
423
403
 
424
- - upsert_test_suite_test_case_request_id: typing.Optional[str]. The Vellum-generated ID of an existing Test Case whose data you'd like to replace. If specified and no Test Case exists with this ID, a 404 will be returned.
425
-
426
- - external_id: typing.Optional[str]. An ID external to Vellum that uniquely identifies the Test Case that you'd like to create/update. If there's a match on a Test Case that was previously created with the same external_id, it will be updated. Otherwise, a new Test Case will be created with this value as its external_id. If no external_id is specified, then a new Test Case will always be created.
427
-
428
- - label: typing.Optional[str]. A human-readable label used to convey the intention of this Test Case
429
-
430
- - input_values: typing.Sequence[NamedTestCaseVariableValueRequest]. Values for each of the Test Case's input variables
431
-
432
- - evaluation_values: typing.Sequence[NamedTestCaseVariableValueRequest]. Values for each of the Test Case's evaluation variables
404
+ - request: UpsertTestSuiteTestCaseRequest.
433
405
 
434
406
  - request_options: typing.Optional[RequestOptions]. Request-specific configuration.
435
407
  ---
408
+ from vellum import UpsertTestSuiteTestCaseRequest
436
409
  from vellum.client import AsyncVellum
437
410
 
438
411
  client = AsyncVellum(
@@ -440,17 +413,12 @@ class AsyncTestSuitesClient:
440
413
  )
441
414
  await client.test_suites.upsert_test_suite_test_case(
442
415
  id="id",
443
- input_values=[],
444
- evaluation_values=[],
416
+ request=UpsertTestSuiteTestCaseRequest(
417
+ input_values=[],
418
+ evaluation_values=[],
419
+ ),
445
420
  )
446
421
  """
447
- _request: typing.Dict[str, typing.Any] = {"input_values": input_values, "evaluation_values": evaluation_values}
448
- if upsert_test_suite_test_case_request_id is not OMIT:
449
- _request["id"] = upsert_test_suite_test_case_request_id
450
- if external_id is not OMIT:
451
- _request["external_id"] = external_id
452
- if label is not OMIT:
453
- _request["label"] = label
454
422
  _response = await self._client_wrapper.httpx_client.request(
455
423
  method="POST",
456
424
  url=urllib.parse.urljoin(
@@ -460,10 +428,10 @@ class AsyncTestSuitesClient:
460
428
  params=jsonable_encoder(
461
429
  request_options.get("additional_query_parameters") if request_options is not None else None
462
430
  ),
463
- json=jsonable_encoder(_request)
431
+ json=jsonable_encoder(request)
464
432
  if request_options is None or request_options.get("additional_body_parameters") is None
465
433
  else {
466
- **jsonable_encoder(_request),
434
+ **jsonable_encoder(request),
467
435
  **(jsonable_encoder(remove_none_from_dict(request_options.get("additional_body_parameters", {})))),
468
436
  },
469
437
  headers=jsonable_encoder(
vellum/types/__init__.py CHANGED
@@ -242,6 +242,7 @@ from .map_node_result import MapNodeResult
242
242
  from .map_node_result_data import MapNodeResultData
243
243
  from .merge_enum import MergeEnum
244
244
  from .merge_node_result import MergeNodeResult
245
+ from .merge_node_result_data import MergeNodeResultData
245
246
  from .metadata_filter_config_request import MetadataFilterConfigRequest
246
247
  from .metadata_filter_rule_combinator import MetadataFilterRuleCombinator
247
248
  from .metadata_filter_rule_request import MetadataFilterRuleRequest
@@ -517,6 +518,7 @@ from .test_suite_run_exec_config_request import (
517
518
  TestSuiteRunExecConfigRequest_WorkflowReleaseTag,
518
519
  )
519
520
  from .test_suite_run_execution import TestSuiteRunExecution
521
+ from .test_suite_run_execution_array_output import TestSuiteRunExecutionArrayOutput
520
522
  from .test_suite_run_execution_chat_history_output import TestSuiteRunExecutionChatHistoryOutput
521
523
  from .test_suite_run_execution_error_output import TestSuiteRunExecutionErrorOutput
522
524
  from .test_suite_run_execution_function_call_output import TestSuiteRunExecutionFunctionCallOutput
@@ -526,6 +528,7 @@ from .test_suite_run_execution_metric_result import TestSuiteRunExecutionMetricR
526
528
  from .test_suite_run_execution_number_output import TestSuiteRunExecutionNumberOutput
527
529
  from .test_suite_run_execution_output import (
528
530
  TestSuiteRunExecutionOutput,
531
+ TestSuiteRunExecutionOutput_Array,
529
532
  TestSuiteRunExecutionOutput_ChatHistory,
530
533
  TestSuiteRunExecutionOutput_Error,
531
534
  TestSuiteRunExecutionOutput_FunctionCall,
@@ -569,6 +572,7 @@ from .test_suite_test_case_bulk_operation_request import (
569
572
  TestSuiteTestCaseBulkOperationRequest_Create,
570
573
  TestSuiteTestCaseBulkOperationRequest_Delete,
571
574
  TestSuiteTestCaseBulkOperationRequest_Replace,
575
+ TestSuiteTestCaseBulkOperationRequest_Upsert,
572
576
  )
573
577
  from .test_suite_test_case_bulk_result import (
574
578
  TestSuiteTestCaseBulkResult,
@@ -588,6 +592,7 @@ from .test_suite_test_case_rejected_bulk_result import TestSuiteTestCaseRejected
588
592
  from .test_suite_test_case_replace_bulk_operation_request import TestSuiteTestCaseReplaceBulkOperationRequest
589
593
  from .test_suite_test_case_replaced_bulk_result import TestSuiteTestCaseReplacedBulkResult
590
594
  from .test_suite_test_case_replaced_bulk_result_data import TestSuiteTestCaseReplacedBulkResultData
595
+ from .test_suite_test_case_upsert_bulk_operation_request import TestSuiteTestCaseUpsertBulkOperationRequest
591
596
  from .text_embedding_3_large_enum import TextEmbedding3LargeEnum
592
597
  from .text_embedding_3_small_enum import TextEmbedding3SmallEnum
593
598
  from .text_embedding_ada_002_enum import TextEmbeddingAda002Enum
@@ -598,6 +603,8 @@ from .token_overlapping_window_chunking import TokenOverlappingWindowChunking
598
603
  from .token_overlapping_window_chunking_request import TokenOverlappingWindowChunkingRequest
599
604
  from .upload_document_error_response import UploadDocumentErrorResponse
600
605
  from .upload_document_response import UploadDocumentResponse
606
+ from .upsert_enum import UpsertEnum
607
+ from .upsert_test_suite_test_case_request import UpsertTestSuiteTestCaseRequest
601
608
  from .vellum_error import VellumError
602
609
  from .vellum_error_code_enum import VellumErrorCodeEnum
603
610
  from .vellum_error_request import VellumErrorRequest
@@ -897,6 +904,7 @@ __all__ = [
897
904
  "MapNodeResultData",
898
905
  "MergeEnum",
899
906
  "MergeNodeResult",
907
+ "MergeNodeResultData",
900
908
  "MetadataFilterConfigRequest",
901
909
  "MetadataFilterRuleCombinator",
902
910
  "MetadataFilterRuleRequest",
@@ -1146,6 +1154,7 @@ __all__ = [
1146
1154
  "TestSuiteRunExecConfig_External",
1147
1155
  "TestSuiteRunExecConfig_WorkflowReleaseTag",
1148
1156
  "TestSuiteRunExecution",
1157
+ "TestSuiteRunExecutionArrayOutput",
1149
1158
  "TestSuiteRunExecutionChatHistoryOutput",
1150
1159
  "TestSuiteRunExecutionErrorOutput",
1151
1160
  "TestSuiteRunExecutionFunctionCallOutput",
@@ -1154,6 +1163,7 @@ __all__ = [
1154
1163
  "TestSuiteRunExecutionMetricResult",
1155
1164
  "TestSuiteRunExecutionNumberOutput",
1156
1165
  "TestSuiteRunExecutionOutput",
1166
+ "TestSuiteRunExecutionOutput_Array",
1157
1167
  "TestSuiteRunExecutionOutput_ChatHistory",
1158
1168
  "TestSuiteRunExecutionOutput_Error",
1159
1169
  "TestSuiteRunExecutionOutput_FunctionCall",
@@ -1191,6 +1201,7 @@ __all__ = [
1191
1201
  "TestSuiteTestCaseBulkOperationRequest_Create",
1192
1202
  "TestSuiteTestCaseBulkOperationRequest_Delete",
1193
1203
  "TestSuiteTestCaseBulkOperationRequest_Replace",
1204
+ "TestSuiteTestCaseBulkOperationRequest_Upsert",
1194
1205
  "TestSuiteTestCaseBulkResult",
1195
1206
  "TestSuiteTestCaseBulkResult_Created",
1196
1207
  "TestSuiteTestCaseBulkResult_Deleted",
@@ -1207,6 +1218,7 @@ __all__ = [
1207
1218
  "TestSuiteTestCaseReplaceBulkOperationRequest",
1208
1219
  "TestSuiteTestCaseReplacedBulkResult",
1209
1220
  "TestSuiteTestCaseReplacedBulkResultData",
1221
+ "TestSuiteTestCaseUpsertBulkOperationRequest",
1210
1222
  "TextEmbedding3LargeEnum",
1211
1223
  "TextEmbedding3SmallEnum",
1212
1224
  "TextEmbeddingAda002Enum",
@@ -1217,6 +1229,8 @@ __all__ = [
1217
1229
  "TokenOverlappingWindowChunkingRequest",
1218
1230
  "UploadDocumentErrorResponse",
1219
1231
  "UploadDocumentResponse",
1232
+ "UpsertEnum",
1233
+ "UpsertTestSuiteTestCaseRequest",
1220
1234
  "VellumError",
1221
1235
  "VellumErrorCodeEnum",
1222
1236
  "VellumErrorRequest",