vellum-ai 0.6.8__py3-none-any.whl → 0.7.1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (39) hide show
  1. vellum/__init__.py +16 -0
  2. vellum/client.py +28 -28
  3. vellum/core/client_wrapper.py +1 -1
  4. vellum/lib/test_suites/resources.py +5 -5
  5. vellum/resources/document_indexes/client.py +114 -0
  6. vellum/resources/test_suites/client.py +19 -51
  7. vellum/types/__init__.py +16 -0
  8. vellum/types/code_execution_node_json_result.py +1 -1
  9. vellum/types/execution_json_vellum_value.py +1 -1
  10. vellum/types/iteration_state_enum.py +5 -0
  11. vellum/types/json_variable_value.py +1 -1
  12. vellum/types/json_vellum_value.py +1 -1
  13. vellum/types/map_node_result_data.py +2 -0
  14. vellum/types/merge_node_result.py +3 -0
  15. vellum/types/merge_node_result_data.py +25 -0
  16. vellum/types/named_test_case_json_variable_value.py +1 -1
  17. vellum/types/named_test_case_json_variable_value_request.py +1 -1
  18. vellum/types/node_input_compiled_json_value.py +1 -1
  19. vellum/types/node_output_compiled_json_value.py +1 -1
  20. vellum/types/prompt_node_result_data.py +1 -0
  21. vellum/types/templating_node_json_result.py +1 -1
  22. vellum/types/terminal_node_json_result.py +1 -1
  23. vellum/types/test_suite_run_execution_array_output.py +32 -0
  24. vellum/types/test_suite_run_execution_json_output.py +1 -1
  25. vellum/types/test_suite_run_execution_output.py +12 -0
  26. vellum/types/test_suite_run_metric_number_output.py +1 -1
  27. vellum/types/test_suite_run_metric_string_output.py +1 -1
  28. vellum/types/test_suite_test_case_bulk_operation_request.py +12 -0
  29. vellum/types/test_suite_test_case_rejected_bulk_result.py +1 -1
  30. vellum/types/test_suite_test_case_upsert_bulk_operation_request.py +35 -0
  31. vellum/types/upsert_enum.py +5 -0
  32. vellum/types/upsert_test_suite_test_case_request.py +49 -0
  33. vellum/types/workflow_output_json.py +1 -1
  34. vellum/types/workflow_request_json_input_request.py +1 -1
  35. vellum/types/workflow_result_event_output_data_json.py +1 -1
  36. {vellum_ai-0.6.8.dist-info → vellum_ai-0.7.1.dist-info}/METADATA +1 -1
  37. {vellum_ai-0.6.8.dist-info → vellum_ai-0.7.1.dist-info}/RECORD +39 -33
  38. {vellum_ai-0.6.8.dist-info → vellum_ai-0.7.1.dist-info}/LICENSE +0 -0
  39. {vellum_ai-0.6.8.dist-info → vellum_ai-0.7.1.dist-info}/WHEEL +0 -0
vellum/types/__init__.py CHANGED
@@ -230,6 +230,7 @@ from .initiated_workflow_node_result_event import InitiatedWorkflowNodeResultEve
230
230
  from .instructor_vectorizer_config import InstructorVectorizerConfig
231
231
  from .instructor_vectorizer_config_request import InstructorVectorizerConfigRequest
232
232
  from .intfloat_multilingual_e_5_large_enum import IntfloatMultilingualE5LargeEnum
233
+ from .iteration_state_enum import IterationStateEnum
233
234
  from .json_enum import JsonEnum
234
235
  from .json_input_request import JsonInputRequest
235
236
  from .json_variable_value import JsonVariableValue
@@ -241,6 +242,7 @@ from .map_node_result import MapNodeResult
241
242
  from .map_node_result_data import MapNodeResultData
242
243
  from .merge_enum import MergeEnum
243
244
  from .merge_node_result import MergeNodeResult
245
+ from .merge_node_result_data import MergeNodeResultData
244
246
  from .metadata_filter_config_request import MetadataFilterConfigRequest
245
247
  from .metadata_filter_rule_combinator import MetadataFilterRuleCombinator
246
248
  from .metadata_filter_rule_request import MetadataFilterRuleRequest
@@ -516,6 +518,7 @@ from .test_suite_run_exec_config_request import (
516
518
  TestSuiteRunExecConfigRequest_WorkflowReleaseTag,
517
519
  )
518
520
  from .test_suite_run_execution import TestSuiteRunExecution
521
+ from .test_suite_run_execution_array_output import TestSuiteRunExecutionArrayOutput
519
522
  from .test_suite_run_execution_chat_history_output import TestSuiteRunExecutionChatHistoryOutput
520
523
  from .test_suite_run_execution_error_output import TestSuiteRunExecutionErrorOutput
521
524
  from .test_suite_run_execution_function_call_output import TestSuiteRunExecutionFunctionCallOutput
@@ -525,6 +528,7 @@ from .test_suite_run_execution_metric_result import TestSuiteRunExecutionMetricR
525
528
  from .test_suite_run_execution_number_output import TestSuiteRunExecutionNumberOutput
526
529
  from .test_suite_run_execution_output import (
527
530
  TestSuiteRunExecutionOutput,
531
+ TestSuiteRunExecutionOutput_Array,
528
532
  TestSuiteRunExecutionOutput_ChatHistory,
529
533
  TestSuiteRunExecutionOutput_Error,
530
534
  TestSuiteRunExecutionOutput_FunctionCall,
@@ -568,6 +572,7 @@ from .test_suite_test_case_bulk_operation_request import (
568
572
  TestSuiteTestCaseBulkOperationRequest_Create,
569
573
  TestSuiteTestCaseBulkOperationRequest_Delete,
570
574
  TestSuiteTestCaseBulkOperationRequest_Replace,
575
+ TestSuiteTestCaseBulkOperationRequest_Upsert,
571
576
  )
572
577
  from .test_suite_test_case_bulk_result import (
573
578
  TestSuiteTestCaseBulkResult,
@@ -587,6 +592,7 @@ from .test_suite_test_case_rejected_bulk_result import TestSuiteTestCaseRejected
587
592
  from .test_suite_test_case_replace_bulk_operation_request import TestSuiteTestCaseReplaceBulkOperationRequest
588
593
  from .test_suite_test_case_replaced_bulk_result import TestSuiteTestCaseReplacedBulkResult
589
594
  from .test_suite_test_case_replaced_bulk_result_data import TestSuiteTestCaseReplacedBulkResultData
595
+ from .test_suite_test_case_upsert_bulk_operation_request import TestSuiteTestCaseUpsertBulkOperationRequest
590
596
  from .text_embedding_3_large_enum import TextEmbedding3LargeEnum
591
597
  from .text_embedding_3_small_enum import TextEmbedding3SmallEnum
592
598
  from .text_embedding_ada_002_enum import TextEmbeddingAda002Enum
@@ -597,6 +603,8 @@ from .token_overlapping_window_chunking import TokenOverlappingWindowChunking
597
603
  from .token_overlapping_window_chunking_request import TokenOverlappingWindowChunkingRequest
598
604
  from .upload_document_error_response import UploadDocumentErrorResponse
599
605
  from .upload_document_response import UploadDocumentResponse
606
+ from .upsert_enum import UpsertEnum
607
+ from .upsert_test_suite_test_case_request import UpsertTestSuiteTestCaseRequest
600
608
  from .vellum_error import VellumError
601
609
  from .vellum_error_code_enum import VellumErrorCodeEnum
602
610
  from .vellum_error_request import VellumErrorRequest
@@ -884,6 +892,7 @@ __all__ = [
884
892
  "InstructorVectorizerConfig",
885
893
  "InstructorVectorizerConfigRequest",
886
894
  "IntfloatMultilingualE5LargeEnum",
895
+ "IterationStateEnum",
887
896
  "JsonEnum",
888
897
  "JsonInputRequest",
889
898
  "JsonVariableValue",
@@ -895,6 +904,7 @@ __all__ = [
895
904
  "MapNodeResultData",
896
905
  "MergeEnum",
897
906
  "MergeNodeResult",
907
+ "MergeNodeResultData",
898
908
  "MetadataFilterConfigRequest",
899
909
  "MetadataFilterRuleCombinator",
900
910
  "MetadataFilterRuleRequest",
@@ -1144,6 +1154,7 @@ __all__ = [
1144
1154
  "TestSuiteRunExecConfig_External",
1145
1155
  "TestSuiteRunExecConfig_WorkflowReleaseTag",
1146
1156
  "TestSuiteRunExecution",
1157
+ "TestSuiteRunExecutionArrayOutput",
1147
1158
  "TestSuiteRunExecutionChatHistoryOutput",
1148
1159
  "TestSuiteRunExecutionErrorOutput",
1149
1160
  "TestSuiteRunExecutionFunctionCallOutput",
@@ -1152,6 +1163,7 @@ __all__ = [
1152
1163
  "TestSuiteRunExecutionMetricResult",
1153
1164
  "TestSuiteRunExecutionNumberOutput",
1154
1165
  "TestSuiteRunExecutionOutput",
1166
+ "TestSuiteRunExecutionOutput_Array",
1155
1167
  "TestSuiteRunExecutionOutput_ChatHistory",
1156
1168
  "TestSuiteRunExecutionOutput_Error",
1157
1169
  "TestSuiteRunExecutionOutput_FunctionCall",
@@ -1189,6 +1201,7 @@ __all__ = [
1189
1201
  "TestSuiteTestCaseBulkOperationRequest_Create",
1190
1202
  "TestSuiteTestCaseBulkOperationRequest_Delete",
1191
1203
  "TestSuiteTestCaseBulkOperationRequest_Replace",
1204
+ "TestSuiteTestCaseBulkOperationRequest_Upsert",
1192
1205
  "TestSuiteTestCaseBulkResult",
1193
1206
  "TestSuiteTestCaseBulkResult_Created",
1194
1207
  "TestSuiteTestCaseBulkResult_Deleted",
@@ -1205,6 +1218,7 @@ __all__ = [
1205
1218
  "TestSuiteTestCaseReplaceBulkOperationRequest",
1206
1219
  "TestSuiteTestCaseReplacedBulkResult",
1207
1220
  "TestSuiteTestCaseReplacedBulkResultData",
1221
+ "TestSuiteTestCaseUpsertBulkOperationRequest",
1208
1222
  "TextEmbedding3LargeEnum",
1209
1223
  "TextEmbedding3SmallEnum",
1210
1224
  "TextEmbeddingAda002Enum",
@@ -1215,6 +1229,8 @@ __all__ = [
1215
1229
  "TokenOverlappingWindowChunkingRequest",
1216
1230
  "UploadDocumentErrorResponse",
1217
1231
  "UploadDocumentResponse",
1232
+ "UpsertEnum",
1233
+ "UpsertTestSuiteTestCaseRequest",
1218
1234
  "VellumError",
1219
1235
  "VellumErrorCodeEnum",
1220
1236
  "VellumErrorRequest",
@@ -9,7 +9,7 @@ from ..core.pydantic_utilities import pydantic_v1
9
9
 
10
10
  class CodeExecutionNodeJsonResult(pydantic_v1.BaseModel):
11
11
  id: str
12
- value: typing.Optional[typing.Dict[str, typing.Any]] = None
12
+ value: typing.Any
13
13
 
14
14
  def json(self, **kwargs: typing.Any) -> str:
15
15
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -18,7 +18,7 @@ class ExecutionJsonVellumValue(pydantic_v1.BaseModel):
18
18
  """
19
19
 
20
20
  name: str
21
- value: typing.Optional[typing.Dict[str, typing.Any]] = None
21
+ value: typing.Any
22
22
 
23
23
  def json(self, **kwargs: typing.Any) -> str:
24
24
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ IterationStateEnum = typing.Union[typing.Literal["INITIATED", "FULFILLED"], typing.Any]
@@ -8,7 +8,7 @@ from ..core.pydantic_utilities import pydantic_v1
8
8
 
9
9
 
10
10
  class JsonVariableValue(pydantic_v1.BaseModel):
11
- value: typing.Optional[typing.Dict[str, typing.Any]] = None
11
+ value: typing.Any
12
12
 
13
13
  def json(self, **kwargs: typing.Any) -> str:
14
14
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -12,7 +12,7 @@ class JsonVellumValue(pydantic_v1.BaseModel):
12
12
  A value representing a JSON object.
13
13
  """
14
14
 
15
- value: typing.Optional[typing.Dict[str, typing.Any]] = None
15
+ value: typing.Any
16
16
 
17
17
  def json(self, **kwargs: typing.Any) -> str:
18
18
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -5,10 +5,12 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from ..core.pydantic_utilities import pydantic_v1
8
+ from .iteration_state_enum import IterationStateEnum
8
9
 
9
10
 
10
11
  class MapNodeResultData(pydantic_v1.BaseModel):
11
12
  execution_ids: typing.List[str]
13
+ iteration_state: typing.Optional[IterationStateEnum] = None
12
14
 
13
15
  def json(self, **kwargs: typing.Any) -> str:
14
16
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -5,6 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from ..core.pydantic_utilities import pydantic_v1
8
+ from .merge_node_result_data import MergeNodeResultData
8
9
 
9
10
 
10
11
  class MergeNodeResult(pydantic_v1.BaseModel):
@@ -12,6 +13,8 @@ class MergeNodeResult(pydantic_v1.BaseModel):
12
13
  A Node Result Event emitted from a Merge Node.
13
14
  """
14
15
 
16
+ data: MergeNodeResultData
17
+
15
18
  def json(self, **kwargs: typing.Any) -> str:
16
19
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
17
20
  return super().json(**kwargs_with_defaults)
@@ -0,0 +1,25 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from ..core.pydantic_utilities import pydantic_v1
8
+
9
+
10
+ class MergeNodeResultData(pydantic_v1.BaseModel):
11
+ paused_node_data: typing.Optional[typing.Dict[str, typing.Any]] = None
12
+
13
+ def json(self, **kwargs: typing.Any) -> str:
14
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
15
+ return super().json(**kwargs_with_defaults)
16
+
17
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
18
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
19
+ return super().dict(**kwargs_with_defaults)
20
+
21
+ class Config:
22
+ frozen = True
23
+ smart_union = True
24
+ extra = pydantic_v1.Extra.allow
25
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -12,7 +12,7 @@ class NamedTestCaseJsonVariableValue(pydantic_v1.BaseModel):
12
12
  Named Test Case value that is of type JSON
13
13
  """
14
14
 
15
- value: typing.Optional[typing.Dict[str, typing.Any]] = None
15
+ value: typing.Any
16
16
  name: str
17
17
 
18
18
  def json(self, **kwargs: typing.Any) -> str:
@@ -12,7 +12,7 @@ class NamedTestCaseJsonVariableValueRequest(pydantic_v1.BaseModel):
12
12
  Named Test Case value that is of type JSON
13
13
  """
14
14
 
15
- value: typing.Optional[typing.Dict[str, typing.Any]] = None
15
+ value: typing.Any
16
16
  name: str
17
17
 
18
18
  def json(self, **kwargs: typing.Any) -> str:
@@ -10,7 +10,7 @@ from ..core.pydantic_utilities import pydantic_v1
10
10
  class NodeInputCompiledJsonValue(pydantic_v1.BaseModel):
11
11
  node_input_id: str
12
12
  key: str
13
- value: typing.Optional[typing.Dict[str, typing.Any]] = None
13
+ value: typing.Any
14
14
 
15
15
  def json(self, **kwargs: typing.Any) -> str:
16
16
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -13,7 +13,7 @@ class NodeOutputCompiledJsonValue(pydantic_v1.BaseModel):
13
13
  An output returned by a node that is of type JSON.
14
14
  """
15
15
 
16
- value: typing.Optional[typing.Dict[str, typing.Any]] = None
16
+ value: typing.Any
17
17
  node_output_id: str
18
18
  state: typing.Optional[WorkflowNodeResultEventState] = None
19
19
 
@@ -10,6 +10,7 @@ from ..core.pydantic_utilities import pydantic_v1
10
10
  class PromptNodeResultData(pydantic_v1.BaseModel):
11
11
  output_id: str
12
12
  array_output_id: typing.Optional[str] = None
13
+ execution_id: typing.Optional[str] = None
13
14
  text: typing.Optional[str] = None
14
15
  delta: typing.Optional[str] = None
15
16
 
@@ -9,7 +9,7 @@ from ..core.pydantic_utilities import pydantic_v1
9
9
 
10
10
  class TemplatingNodeJsonResult(pydantic_v1.BaseModel):
11
11
  id: str
12
- value: typing.Optional[typing.Dict[str, typing.Any]] = None
12
+ value: typing.Any
13
13
 
14
14
  def json(self, **kwargs: typing.Any) -> str:
15
15
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -14,7 +14,7 @@ class TerminalNodeJsonResult(pydantic_v1.BaseModel):
14
14
  The unique name given to the terminal node that produced this output.
15
15
  """
16
16
 
17
- value: typing.Optional[typing.Dict[str, typing.Any]] = None
17
+ value: typing.Any
18
18
 
19
19
  def json(self, **kwargs: typing.Any) -> str:
20
20
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from ..core.pydantic_utilities import pydantic_v1
8
+ from .array_vellum_value_item import ArrayVellumValueItem
9
+
10
+
11
+ class TestSuiteRunExecutionArrayOutput(pydantic_v1.BaseModel):
12
+ """
13
+ Execution output of an entity evaluated during a Test Suite Run that is of type ARRAY
14
+ """
15
+
16
+ name: str
17
+ value: typing.Optional[typing.List[ArrayVellumValueItem]] = None
18
+ output_variable_id: str
19
+
20
+ def json(self, **kwargs: typing.Any) -> str:
21
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
22
+ return super().json(**kwargs_with_defaults)
23
+
24
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().dict(**kwargs_with_defaults)
27
+
28
+ class Config:
29
+ frozen = True
30
+ smart_union = True
31
+ extra = pydantic_v1.Extra.allow
32
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -13,7 +13,7 @@ class TestSuiteRunExecutionJsonOutput(pydantic_v1.BaseModel):
13
13
  """
14
14
 
15
15
  name: str
16
- value: typing.Optional[typing.Dict[str, typing.Any]] = None
16
+ value: typing.Any
17
17
  output_variable_id: str
18
18
 
19
19
  def json(self, **kwargs: typing.Any) -> str:
@@ -4,6 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  import typing
6
6
 
7
+ from .test_suite_run_execution_array_output import TestSuiteRunExecutionArrayOutput
7
8
  from .test_suite_run_execution_chat_history_output import TestSuiteRunExecutionChatHistoryOutput
8
9
  from .test_suite_run_execution_error_output import TestSuiteRunExecutionErrorOutput
9
10
  from .test_suite_run_execution_function_call_output import TestSuiteRunExecutionFunctionCallOutput
@@ -83,6 +84,16 @@ class TestSuiteRunExecutionOutput_FunctionCall(TestSuiteRunExecutionFunctionCall
83
84
  populate_by_name = True
84
85
 
85
86
 
87
+ class TestSuiteRunExecutionOutput_Array(TestSuiteRunExecutionArrayOutput):
88
+ type: typing.Literal["ARRAY"] = "ARRAY"
89
+
90
+ class Config:
91
+ frozen = True
92
+ smart_union = True
93
+ allow_population_by_field_name = True
94
+ populate_by_name = True
95
+
96
+
86
97
  TestSuiteRunExecutionOutput = typing.Union[
87
98
  TestSuiteRunExecutionOutput_String,
88
99
  TestSuiteRunExecutionOutput_Number,
@@ -91,4 +102,5 @@ TestSuiteRunExecutionOutput = typing.Union[
91
102
  TestSuiteRunExecutionOutput_SearchResults,
92
103
  TestSuiteRunExecutionOutput_Error,
93
104
  TestSuiteRunExecutionOutput_FunctionCall,
105
+ TestSuiteRunExecutionOutput_Array,
94
106
  ]
@@ -12,7 +12,7 @@ class TestSuiteRunMetricNumberOutput(pydantic_v1.BaseModel):
12
12
  Output for a test suite run metric that is of type NUMBER
13
13
  """
14
14
 
15
- value: float
15
+ value: typing.Optional[float] = None
16
16
  name: str
17
17
 
18
18
  def json(self, **kwargs: typing.Any) -> str:
@@ -12,7 +12,7 @@ class TestSuiteRunMetricStringOutput(pydantic_v1.BaseModel):
12
12
  Output for a test suite run metric that is of type STRING
13
13
  """
14
14
 
15
- value: str
15
+ value: typing.Optional[str] = None
16
16
  name: str
17
17
 
18
18
  def json(self, **kwargs: typing.Any) -> str:
@@ -7,6 +7,7 @@ import typing
7
7
  from .test_suite_test_case_create_bulk_operation_request import TestSuiteTestCaseCreateBulkOperationRequest
8
8
  from .test_suite_test_case_delete_bulk_operation_request import TestSuiteTestCaseDeleteBulkOperationRequest
9
9
  from .test_suite_test_case_replace_bulk_operation_request import TestSuiteTestCaseReplaceBulkOperationRequest
10
+ from .test_suite_test_case_upsert_bulk_operation_request import TestSuiteTestCaseUpsertBulkOperationRequest
10
11
 
11
12
 
12
13
  class TestSuiteTestCaseBulkOperationRequest_Create(TestSuiteTestCaseCreateBulkOperationRequest):
@@ -29,6 +30,16 @@ class TestSuiteTestCaseBulkOperationRequest_Replace(TestSuiteTestCaseReplaceBulk
29
30
  populate_by_name = True
30
31
 
31
32
 
33
+ class TestSuiteTestCaseBulkOperationRequest_Upsert(TestSuiteTestCaseUpsertBulkOperationRequest):
34
+ type: typing.Literal["UPSERT"] = "UPSERT"
35
+
36
+ class Config:
37
+ frozen = True
38
+ smart_union = True
39
+ allow_population_by_field_name = True
40
+ populate_by_name = True
41
+
42
+
32
43
  class TestSuiteTestCaseBulkOperationRequest_Delete(TestSuiteTestCaseDeleteBulkOperationRequest):
33
44
  type: typing.Literal["DELETE"] = "DELETE"
34
45
 
@@ -42,5 +53,6 @@ class TestSuiteTestCaseBulkOperationRequest_Delete(TestSuiteTestCaseDeleteBulkOp
42
53
  TestSuiteTestCaseBulkOperationRequest = typing.Union[
43
54
  TestSuiteTestCaseBulkOperationRequest_Create,
44
55
  TestSuiteTestCaseBulkOperationRequest_Replace,
56
+ TestSuiteTestCaseBulkOperationRequest_Upsert,
45
57
  TestSuiteTestCaseBulkOperationRequest_Delete,
46
58
  ]
@@ -12,7 +12,7 @@ class TestSuiteTestCaseRejectedBulkResult(pydantic_v1.BaseModel):
12
12
  The result of a bulk operation that failed to operate on a Test Case.
13
13
  """
14
14
 
15
- id: str = pydantic_v1.Field()
15
+ id: typing.Optional[str] = pydantic_v1.Field(default=None)
16
16
  """
17
17
  An ID that maps back to one of the initially supplied operations. Can be used to determine the result of a given operation.
18
18
  """
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from ..core.pydantic_utilities import pydantic_v1
8
+ from .upsert_test_suite_test_case_request import UpsertTestSuiteTestCaseRequest
9
+
10
+
11
+ class TestSuiteTestCaseUpsertBulkOperationRequest(pydantic_v1.BaseModel):
12
+ """
13
+ A bulk operation that represents the upserting of a Test Case.
14
+ """
15
+
16
+ id: str = pydantic_v1.Field()
17
+ """
18
+ An ID representing this specific operation. Can later be used to look up information about the operation's success in the response.
19
+ """
20
+
21
+ data: UpsertTestSuiteTestCaseRequest
22
+
23
+ def json(self, **kwargs: typing.Any) -> str:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().json(**kwargs_with_defaults)
26
+
27
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().dict(**kwargs_with_defaults)
30
+
31
+ class Config:
32
+ frozen = True
33
+ smart_union = True
34
+ extra = pydantic_v1.Extra.allow
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ UpsertEnum = typing.Literal["UPSERT"]
@@ -0,0 +1,49 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from ..core.pydantic_utilities import pydantic_v1
8
+ from .named_test_case_variable_value_request import NamedTestCaseVariableValueRequest
9
+
10
+
11
+ class UpsertTestSuiteTestCaseRequest(pydantic_v1.BaseModel):
12
+ id: typing.Optional[str] = pydantic_v1.Field(default=None)
13
+ """
14
+ The Vellum-generated ID of an existing Test Case whose data you'd like to replace. If specified and no Test Case exists with this ID, a 404 will be returned.
15
+ """
16
+
17
+ external_id: typing.Optional[str] = pydantic_v1.Field(default=None)
18
+ """
19
+ An ID external to Vellum that uniquely identifies the Test Case that you'd like to create/update. If there's a match on a Test Case that was previously created with the same external_id, it will be updated. Otherwise, a new Test Case will be created with this value as its external_id. If no external_id is specified, then a new Test Case will always be created.
20
+ """
21
+
22
+ label: typing.Optional[str] = pydantic_v1.Field(default=None)
23
+ """
24
+ A human-readable label used to convey the intention of this Test Case
25
+ """
26
+
27
+ input_values: typing.List[NamedTestCaseVariableValueRequest] = pydantic_v1.Field()
28
+ """
29
+ Values for each of the Test Case's input variables
30
+ """
31
+
32
+ evaluation_values: typing.List[NamedTestCaseVariableValueRequest] = pydantic_v1.Field()
33
+ """
34
+ Values for each of the Test Case's evaluation variables
35
+ """
36
+
37
+ def json(self, **kwargs: typing.Any) -> str:
38
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
39
+ return super().json(**kwargs_with_defaults)
40
+
41
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
42
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
43
+ return super().dict(**kwargs_with_defaults)
44
+
45
+ class Config:
46
+ frozen = True
47
+ smart_union = True
48
+ extra = pydantic_v1.Extra.allow
49
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -18,7 +18,7 @@ class WorkflowOutputJson(pydantic_v1.BaseModel):
18
18
  The output's name, as defined in the workflow
19
19
  """
20
20
 
21
- value: typing.Optional[typing.Dict[str, typing.Any]] = None
21
+ value: typing.Any
22
22
 
23
23
  def json(self, **kwargs: typing.Any) -> str:
24
24
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -17,7 +17,7 @@ class WorkflowRequestJsonInputRequest(pydantic_v1.BaseModel):
17
17
  The variable's name, as defined in the Workflow.
18
18
  """
19
19
 
20
- value: typing.Dict[str, typing.Any]
20
+ value: typing.Any
21
21
 
22
22
  def json(self, **kwargs: typing.Any) -> str:
23
23
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -22,7 +22,7 @@ class WorkflowResultEventOutputDataJson(pydantic_v1.BaseModel):
22
22
  The newly output string value. Only relevant for string outputs with a state of STREAMING.
23
23
  """
24
24
 
25
- value: typing.Optional[typing.Dict[str, typing.Any]] = None
25
+ value: typing.Any
26
26
 
27
27
  def json(self, **kwargs: typing.Any) -> str:
28
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: vellum-ai
3
- Version: 0.6.8
3
+ Version: 0.7.1
4
4
  Summary:
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Programming Language :: Python :: 3