vellum-ai 0.5.1__py3-none-any.whl → 0.6.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vellum/__init__.py +170 -20
- vellum/core/client_wrapper.py +1 -1
- vellum/resources/document_indexes/client.py +61 -52
- vellum/resources/documents/client.py +8 -4
- vellum/types/__init__.py +189 -19
- vellum/types/add_openai_api_key_enum.py +3 -0
- vellum/types/array_variable_value_item.py +0 -24
- vellum/types/array_vellum_value_item.py +82 -0
- vellum/types/basic_vectorizer_intfloat_multilingual_e_5_large.py +29 -0
- vellum/types/basic_vectorizer_intfloat_multilingual_e_5_large_request.py +29 -0
- vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_cos_v_1.py +29 -0
- vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_cos_v_1_request.py +29 -0
- vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_dot_v_1.py +29 -0
- vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_dot_v_1_request.py +29 -0
- vellum/types/document_index_chunking.py +46 -0
- vellum/types/document_index_chunking_request.py +46 -0
- vellum/types/document_index_indexing_config.py +28 -0
- vellum/types/document_index_indexing_config_request.py +28 -0
- vellum/types/document_index_read.py +2 -4
- vellum/types/{rejected_function_call.py → error_vellum_value.py} +3 -5
- vellum/types/function_call.py +20 -18
- vellum/types/{fulfilled_function_call_request.py → function_call_request.py} +2 -2
- vellum/types/function_call_variable_value.py +1 -1
- vellum/types/{node_output_compiled_function_value.py → function_call_vellum_value.py} +5 -2
- vellum/types/hkunlp_instructor_xl_enum.py +5 -0
- vellum/types/hkunlp_instructor_xl_vectorizer.py +30 -0
- vellum/types/hkunlp_instructor_xl_vectorizer_request.py +30 -0
- vellum/types/{fulfilled_function_call.py → image_vellum_value.py} +4 -5
- vellum/types/indexing_config_vectorizer.py +106 -0
- vellum/types/indexing_config_vectorizer_request.py +106 -0
- vellum/types/instructor_vectorizer_config.py +31 -0
- vellum/types/instructor_vectorizer_config_request.py +31 -0
- vellum/types/intfloat_multilingual_e_5_large_enum.py +5 -0
- vellum/types/json_vellum_value.py +29 -0
- vellum/types/metric_enum.py +5 -0
- vellum/types/{chat_history_variable_value.py → metric_node_result.py} +4 -3
- vellum/types/named_test_case_function_call_variable_value.py +2 -2
- vellum/types/named_test_case_function_call_variable_value_request.py +2 -2
- vellum/types/node_output_compiled_array_value.py +8 -2
- vellum/types/node_output_compiled_chat_history_value.py +7 -1
- vellum/types/node_output_compiled_error_value.py +7 -1
- vellum/types/node_output_compiled_function_call_value.py +33 -0
- vellum/types/node_output_compiled_json_value.py +7 -1
- vellum/types/node_output_compiled_number_value.py +7 -1
- vellum/types/node_output_compiled_search_results_value.py +7 -1
- vellum/types/node_output_compiled_string_value.py +7 -1
- vellum/types/node_output_compiled_value.py +2 -2
- vellum/types/{search_results_variable_value.py → number_vellum_value.py} +6 -3
- vellum/types/open_ai_vectorizer_config.py +30 -0
- vellum/types/open_ai_vectorizer_config_request.py +30 -0
- vellum/types/open_ai_vectorizer_text_embedding_3_large.py +30 -0
- vellum/types/open_ai_vectorizer_text_embedding_3_large_request.py +30 -0
- vellum/types/open_ai_vectorizer_text_embedding_3_small.py +30 -0
- vellum/types/open_ai_vectorizer_text_embedding_3_small_request.py +30 -0
- vellum/types/open_ai_vectorizer_text_embedding_ada_002.py +30 -0
- vellum/types/open_ai_vectorizer_text_embedding_ada_002_request.py +30 -0
- vellum/types/prompt_output.py +8 -8
- vellum/types/reducto_chunker_config.py +29 -0
- vellum/types/reducto_chunker_config_request.py +29 -0
- vellum/types/reducto_chunker_enum.py +5 -0
- vellum/types/reducto_chunking.py +30 -0
- vellum/types/reducto_chunking_request.py +30 -0
- vellum/types/sentence_chunker_config.py +30 -0
- vellum/types/sentence_chunker_config_request.py +30 -0
- vellum/types/sentence_chunker_enum.py +5 -0
- vellum/types/sentence_chunking.py +30 -0
- vellum/types/sentence_chunking_request.py +30 -0
- vellum/types/sentence_transformers_multi_qa_mpnet_base_cos_v_1_enum.py +5 -0
- vellum/types/sentence_transformers_multi_qa_mpnet_base_dot_v_1_enum.py +5 -0
- vellum/types/string_vellum_value.py +29 -0
- vellum/types/test_case_function_call_variable_value.py +2 -2
- vellum/types/test_suite_run_execution_function_call_output.py +2 -2
- vellum/types/text_embedding_3_large_enum.py +5 -0
- vellum/types/text_embedding_3_small_enum.py +5 -0
- vellum/types/text_embedding_ada_002_enum.py +5 -0
- vellum/types/token_overlapping_window_chunker_config.py +30 -0
- vellum/types/token_overlapping_window_chunker_config_request.py +30 -0
- vellum/types/token_overlapping_window_chunker_enum.py +5 -0
- vellum/types/token_overlapping_window_chunking.py +30 -0
- vellum/types/token_overlapping_window_chunking_request.py +30 -0
- vellum/types/workflow_execution_actual_chat_history_request.py +5 -0
- vellum/types/workflow_execution_actual_json_request.py +5 -0
- vellum/types/workflow_execution_actual_string_request.py +5 -0
- vellum/types/workflow_node_result_data.py +12 -0
- vellum/types/workflow_output_array.py +2 -2
- {vellum_ai-0.5.1.dist-info → vellum_ai-0.6.0.dist-info}/METADATA +1 -1
- {vellum_ai-0.5.1.dist-info → vellum_ai-0.6.0.dist-info}/RECORD +89 -37
- {vellum_ai-0.5.1.dist-info → vellum_ai-0.6.0.dist-info}/LICENSE +0 -0
- {vellum_ai-0.5.1.dist-info → vellum_ai-0.6.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .open_ai_vectorizer_config import OpenAiVectorizerConfig
|
9
|
+
|
10
|
+
|
11
|
+
class OpenAiVectorizerTextEmbeddingAda002(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
OpenAI vectorizer for text-embedding-ada-002.
|
14
|
+
"""
|
15
|
+
|
16
|
+
config: OpenAiVectorizerConfig
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .open_ai_vectorizer_config_request import OpenAiVectorizerConfigRequest
|
9
|
+
|
10
|
+
|
11
|
+
class OpenAiVectorizerTextEmbeddingAda002Request(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
OpenAI vectorizer for text-embedding-ada-002.
|
14
|
+
"""
|
15
|
+
|
16
|
+
config: OpenAiVectorizerConfigRequest
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
vellum/types/prompt_output.py
CHANGED
@@ -4,13 +4,13 @@ from __future__ import annotations
|
|
4
4
|
|
5
5
|
import typing
|
6
6
|
|
7
|
-
from .
|
8
|
-
from .
|
9
|
-
from .
|
10
|
-
from .
|
7
|
+
from .error_vellum_value import ErrorVellumValue
|
8
|
+
from .function_call_vellum_value import FunctionCallVellumValue
|
9
|
+
from .json_vellum_value import JsonVellumValue
|
10
|
+
from .string_vellum_value import StringVellumValue
|
11
11
|
|
12
12
|
|
13
|
-
class PromptOutput_String(
|
13
|
+
class PromptOutput_String(StringVellumValue):
|
14
14
|
type: typing.Literal["STRING"] = "STRING"
|
15
15
|
|
16
16
|
class Config:
|
@@ -20,7 +20,7 @@ class PromptOutput_String(StringVariableValue):
|
|
20
20
|
populate_by_name = True
|
21
21
|
|
22
22
|
|
23
|
-
class PromptOutput_Json(
|
23
|
+
class PromptOutput_Json(JsonVellumValue):
|
24
24
|
type: typing.Literal["JSON"] = "JSON"
|
25
25
|
|
26
26
|
class Config:
|
@@ -30,7 +30,7 @@ class PromptOutput_Json(JsonVariableValue):
|
|
30
30
|
populate_by_name = True
|
31
31
|
|
32
32
|
|
33
|
-
class PromptOutput_Error(
|
33
|
+
class PromptOutput_Error(ErrorVellumValue):
|
34
34
|
type: typing.Literal["ERROR"] = "ERROR"
|
35
35
|
|
36
36
|
class Config:
|
@@ -40,7 +40,7 @@ class PromptOutput_Error(ErrorVariableValue):
|
|
40
40
|
populate_by_name = True
|
41
41
|
|
42
42
|
|
43
|
-
class PromptOutput_FunctionCall(
|
43
|
+
class PromptOutput_FunctionCall(FunctionCallVellumValue):
|
44
44
|
type: typing.Literal["FUNCTION_CALL"] = "FUNCTION_CALL"
|
45
45
|
|
46
46
|
class Config:
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class ReductoChunkerConfig(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
Configuration for Reducto chunking
|
13
|
+
"""
|
14
|
+
|
15
|
+
character_limit: typing.Optional[int] = None
|
16
|
+
|
17
|
+
def json(self, **kwargs: typing.Any) -> str:
|
18
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
19
|
+
return super().json(**kwargs_with_defaults)
|
20
|
+
|
21
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
return super().dict(**kwargs_with_defaults)
|
24
|
+
|
25
|
+
class Config:
|
26
|
+
frozen = True
|
27
|
+
smart_union = True
|
28
|
+
extra = pydantic_v1.Extra.allow
|
29
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class ReductoChunkerConfigRequest(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
Configuration for Reducto chunking
|
13
|
+
"""
|
14
|
+
|
15
|
+
character_limit: typing.Optional[int] = None
|
16
|
+
|
17
|
+
def json(self, **kwargs: typing.Any) -> str:
|
18
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
19
|
+
return super().json(**kwargs_with_defaults)
|
20
|
+
|
21
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
return super().dict(**kwargs_with_defaults)
|
24
|
+
|
25
|
+
class Config:
|
26
|
+
frozen = True
|
27
|
+
smart_union = True
|
28
|
+
extra = pydantic_v1.Extra.allow
|
29
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .reducto_chunker_config import ReductoChunkerConfig
|
9
|
+
|
10
|
+
|
11
|
+
class ReductoChunking(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
Reducto chunking
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunker_config: typing.Optional[ReductoChunkerConfig] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .reducto_chunker_config_request import ReductoChunkerConfigRequest
|
9
|
+
|
10
|
+
|
11
|
+
class ReductoChunkingRequest(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
Reducto chunking
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunker_config: typing.Optional[ReductoChunkerConfigRequest] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class SentenceChunkerConfig(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
Configuration for sentence chunking
|
13
|
+
"""
|
14
|
+
|
15
|
+
character_limit: typing.Optional[int] = None
|
16
|
+
min_overlap_ratio: typing.Optional[float] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class SentenceChunkerConfigRequest(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
Configuration for sentence chunking
|
13
|
+
"""
|
14
|
+
|
15
|
+
character_limit: typing.Optional[int] = None
|
16
|
+
min_overlap_ratio: typing.Optional[float] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .sentence_chunker_config import SentenceChunkerConfig
|
9
|
+
|
10
|
+
|
11
|
+
class SentenceChunking(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
Sentence chunking
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunker_config: typing.Optional[SentenceChunkerConfig] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .sentence_chunker_config_request import SentenceChunkerConfigRequest
|
9
|
+
|
10
|
+
|
11
|
+
class SentenceChunkingRequest(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
Sentence chunking
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunker_config: typing.Optional[SentenceChunkerConfigRequest] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class StringVellumValue(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
A value representing a string.
|
13
|
+
"""
|
14
|
+
|
15
|
+
value: typing.Optional[str] = None
|
16
|
+
|
17
|
+
def json(self, **kwargs: typing.Any) -> str:
|
18
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
19
|
+
return super().json(**kwargs_with_defaults)
|
20
|
+
|
21
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
return super().dict(**kwargs_with_defaults)
|
24
|
+
|
25
|
+
class Config:
|
26
|
+
frozen = True
|
27
|
+
smart_union = True
|
28
|
+
extra = pydantic_v1.Extra.allow
|
29
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -5,7 +5,7 @@ import typing
|
|
5
5
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
7
7
|
from ..core.pydantic_utilities import pydantic_v1
|
8
|
-
from .
|
8
|
+
from .function_call import FunctionCall
|
9
9
|
|
10
10
|
|
11
11
|
class TestCaseFunctionCallVariableValue(pydantic_v1.BaseModel):
|
@@ -15,7 +15,7 @@ class TestCaseFunctionCallVariableValue(pydantic_v1.BaseModel):
|
|
15
15
|
|
16
16
|
variable_id: str
|
17
17
|
name: str
|
18
|
-
value: typing.Optional[
|
18
|
+
value: typing.Optional[FunctionCall] = None
|
19
19
|
|
20
20
|
def json(self, **kwargs: typing.Any) -> str:
|
21
21
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -5,7 +5,7 @@ import typing
|
|
5
5
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
7
7
|
from ..core.pydantic_utilities import pydantic_v1
|
8
|
-
from .
|
8
|
+
from .function_call import FunctionCall
|
9
9
|
|
10
10
|
|
11
11
|
class TestSuiteRunExecutionFunctionCallOutput(pydantic_v1.BaseModel):
|
@@ -14,7 +14,7 @@ class TestSuiteRunExecutionFunctionCallOutput(pydantic_v1.BaseModel):
|
|
14
14
|
"""
|
15
15
|
|
16
16
|
name: str
|
17
|
-
value: typing.Optional[
|
17
|
+
value: typing.Optional[FunctionCall] = None
|
18
18
|
output_variable_id: str
|
19
19
|
|
20
20
|
def json(self, **kwargs: typing.Any) -> str:
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class TokenOverlappingWindowChunkerConfig(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
Configuration for token overlapping window chunking
|
13
|
+
"""
|
14
|
+
|
15
|
+
token_limit: typing.Optional[int] = None
|
16
|
+
overlap_ratio: typing.Optional[float] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class TokenOverlappingWindowChunkerConfigRequest(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
Configuration for token overlapping window chunking
|
13
|
+
"""
|
14
|
+
|
15
|
+
token_limit: typing.Optional[int] = None
|
16
|
+
overlap_ratio: typing.Optional[float] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .token_overlapping_window_chunker_config import TokenOverlappingWindowChunkerConfig
|
9
|
+
|
10
|
+
|
11
|
+
class TokenOverlappingWindowChunking(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
Token overlapping window chunking
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunker_config: typing.Optional[TokenOverlappingWindowChunkerConfig] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .token_overlapping_window_chunker_config_request import TokenOverlappingWindowChunkerConfigRequest
|
9
|
+
|
10
|
+
|
11
|
+
class TokenOverlappingWindowChunkingRequest(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
Token overlapping window chunking
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunker_config: typing.Optional[TokenOverlappingWindowChunkerConfigRequest] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -24,6 +24,11 @@ class WorkflowExecutionActualChatHistoryRequest(pydantic_v1.BaseModel):
|
|
24
24
|
Optionally provide a decimal number between 0.0 and 1.0 (inclusive) representing the quality of the output. 0 is the worst, 1 is the best.
|
25
25
|
"""
|
26
26
|
|
27
|
+
metadata: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
|
28
|
+
"""
|
29
|
+
Optionally provide additional metadata about the feedback submission.
|
30
|
+
"""
|
31
|
+
|
27
32
|
timestamp: typing.Optional[float] = pydantic_v1.Field(default=None)
|
28
33
|
"""
|
29
34
|
Optionally provide the timestamp representing when this feedback was collected. Used for reporting purposes.
|
@@ -23,6 +23,11 @@ class WorkflowExecutionActualJsonRequest(pydantic_v1.BaseModel):
|
|
23
23
|
Optionally provide a decimal number between 0.0 and 1.0 (inclusive) representing the quality of the output. 0 is the worst, 1 is the best.
|
24
24
|
"""
|
25
25
|
|
26
|
+
metadata: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
|
27
|
+
"""
|
28
|
+
Optionally provide additional metadata about the feedback submission.
|
29
|
+
"""
|
30
|
+
|
26
31
|
timestamp: typing.Optional[float] = pydantic_v1.Field(default=None)
|
27
32
|
"""
|
28
33
|
Optionally provide the timestamp representing when this feedback was collected. Used for reporting purposes.
|
@@ -23,6 +23,11 @@ class WorkflowExecutionActualStringRequest(pydantic_v1.BaseModel):
|
|
23
23
|
Optionally provide a decimal number between 0.0 and 1.0 (inclusive) representing the quality of the output. 0 is the worst, 1 is the best.
|
24
24
|
"""
|
25
25
|
|
26
|
+
metadata: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
|
27
|
+
"""
|
28
|
+
Optionally provide additional metadata about the feedback submission.
|
29
|
+
"""
|
30
|
+
|
26
31
|
timestamp: typing.Optional[float] = pydantic_v1.Field(default=None)
|
27
32
|
"""
|
28
33
|
Optionally provide the timestamp representing when this feedback was collected. Used for reporting purposes.
|