vellum-ai 0.5.2__py3-none-any.whl → 0.6.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vellum/__init__.py +166 -18
- vellum/core/client_wrapper.py +1 -1
- vellum/resources/document_indexes/client.py +61 -52
- vellum/resources/documents/client.py +8 -4
- vellum/types/__init__.py +185 -17
- vellum/types/add_openai_api_key_enum.py +3 -0
- vellum/types/array_variable_value_item.py +0 -24
- vellum/types/array_vellum_value_item.py +82 -0
- vellum/types/{rejected_function_call.py → basic_vectorizer_intfloat_multilingual_e_5_large.py} +3 -6
- vellum/types/basic_vectorizer_intfloat_multilingual_e_5_large_request.py +29 -0
- vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_cos_v_1.py +29 -0
- vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_cos_v_1_request.py +29 -0
- vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_dot_v_1.py +29 -0
- vellum/types/basic_vectorizer_sentence_transformers_multi_qa_mpnet_base_dot_v_1_request.py +29 -0
- vellum/types/document_index_chunking.py +46 -0
- vellum/types/document_index_chunking_request.py +46 -0
- vellum/types/document_index_indexing_config.py +28 -0
- vellum/types/document_index_indexing_config_request.py +28 -0
- vellum/types/document_index_read.py +2 -4
- vellum/types/function_call.py +20 -18
- vellum/types/{fulfilled_function_call_request.py → function_call_request.py} +2 -2
- vellum/types/function_call_vellum_value.py +1 -1
- vellum/types/hkunlp_instructor_xl_enum.py +5 -0
- vellum/types/hkunlp_instructor_xl_vectorizer.py +30 -0
- vellum/types/hkunlp_instructor_xl_vectorizer_request.py +30 -0
- vellum/types/{fulfilled_function_call.py → image_vellum_value.py} +4 -5
- vellum/types/indexing_config_vectorizer.py +106 -0
- vellum/types/indexing_config_vectorizer_request.py +106 -0
- vellum/types/instructor_vectorizer_config.py +31 -0
- vellum/types/instructor_vectorizer_config_request.py +31 -0
- vellum/types/intfloat_multilingual_e_5_large_enum.py +5 -0
- vellum/types/merge_enum.py +5 -0
- vellum/types/{chat_history_variable_value.py → merge_node_result.py} +4 -3
- vellum/types/metric_enum.py +5 -0
- vellum/types/{search_results_variable_value.py → metric_node_result.py} +4 -3
- vellum/types/named_test_case_function_call_variable_value.py +2 -2
- vellum/types/named_test_case_function_call_variable_value_request.py +2 -2
- vellum/types/node_output_compiled_array_value.py +2 -2
- vellum/types/number_vellum_value.py +29 -0
- vellum/types/open_ai_vectorizer_config.py +30 -0
- vellum/types/open_ai_vectorizer_config_request.py +30 -0
- vellum/types/open_ai_vectorizer_text_embedding_3_large.py +30 -0
- vellum/types/open_ai_vectorizer_text_embedding_3_large_request.py +30 -0
- vellum/types/open_ai_vectorizer_text_embedding_3_small.py +30 -0
- vellum/types/open_ai_vectorizer_text_embedding_3_small_request.py +30 -0
- vellum/types/open_ai_vectorizer_text_embedding_ada_002.py +30 -0
- vellum/types/open_ai_vectorizer_text_embedding_ada_002_request.py +30 -0
- vellum/types/reducto_chunker_config.py +29 -0
- vellum/types/reducto_chunker_config_request.py +29 -0
- vellum/types/reducto_chunker_enum.py +5 -0
- vellum/types/reducto_chunking.py +30 -0
- vellum/types/reducto_chunking_request.py +30 -0
- vellum/types/search_result_document.py +1 -1
- vellum/types/search_result_document_request.py +5 -0
- vellum/types/sentence_chunker_config.py +30 -0
- vellum/types/sentence_chunker_config_request.py +30 -0
- vellum/types/sentence_chunker_enum.py +5 -0
- vellum/types/sentence_chunking.py +30 -0
- vellum/types/sentence_chunking_request.py +30 -0
- vellum/types/sentence_transformers_multi_qa_mpnet_base_cos_v_1_enum.py +5 -0
- vellum/types/sentence_transformers_multi_qa_mpnet_base_dot_v_1_enum.py +5 -0
- vellum/types/submit_completion_actual_request.py +5 -0
- vellum/types/test_case_function_call_variable_value.py +2 -2
- vellum/types/test_suite_run_execution_function_call_output.py +2 -2
- vellum/types/text_embedding_3_large_enum.py +5 -0
- vellum/types/text_embedding_3_small_enum.py +5 -0
- vellum/types/text_embedding_ada_002_enum.py +5 -0
- vellum/types/token_overlapping_window_chunker_config.py +30 -0
- vellum/types/token_overlapping_window_chunker_config_request.py +30 -0
- vellum/types/token_overlapping_window_chunker_enum.py +5 -0
- vellum/types/token_overlapping_window_chunking.py +30 -0
- vellum/types/token_overlapping_window_chunking_request.py +30 -0
- vellum/types/workflow_execution_actual_chat_history_request.py +5 -0
- vellum/types/workflow_execution_actual_json_request.py +5 -0
- vellum/types/workflow_execution_actual_string_request.py +5 -0
- vellum/types/workflow_node_result_data.py +24 -0
- vellum/types/workflow_output_array.py +2 -2
- {vellum_ai-0.5.2.dist-info → vellum_ai-0.6.1.dist-info}/METADATA +2 -3
- {vellum_ai-0.5.2.dist-info → vellum_ai-0.6.1.dist-info}/RECORD +81 -31
- {vellum_ai-0.5.2.dist-info → vellum_ai-0.6.1.dist-info}/LICENSE +0 -0
- {vellum_ai-0.5.2.dist-info → vellum_ai-0.6.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .reducto_chunker_config_request import ReductoChunkerConfigRequest
|
9
|
+
|
10
|
+
|
11
|
+
class ReductoChunkingRequest(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
Reducto chunking
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunker_config: typing.Optional[ReductoChunkerConfigRequest] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -8,6 +8,11 @@ from ..core.pydantic_utilities import pydantic_v1
|
|
8
8
|
|
9
9
|
|
10
10
|
class SearchResultDocumentRequest(pydantic_v1.BaseModel):
|
11
|
+
id: typing.Optional[str] = pydantic_v1.Field(default=None)
|
12
|
+
"""
|
13
|
+
The ID of the document.
|
14
|
+
"""
|
15
|
+
|
11
16
|
label: str = pydantic_v1.Field()
|
12
17
|
"""
|
13
18
|
The human-readable name for the document.
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class SentenceChunkerConfig(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
Configuration for sentence chunking
|
13
|
+
"""
|
14
|
+
|
15
|
+
character_limit: typing.Optional[int] = None
|
16
|
+
min_overlap_ratio: typing.Optional[float] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class SentenceChunkerConfigRequest(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
Configuration for sentence chunking
|
13
|
+
"""
|
14
|
+
|
15
|
+
character_limit: typing.Optional[int] = None
|
16
|
+
min_overlap_ratio: typing.Optional[float] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .sentence_chunker_config import SentenceChunkerConfig
|
9
|
+
|
10
|
+
|
11
|
+
class SentenceChunking(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
Sentence chunking
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunker_config: typing.Optional[SentenceChunkerConfig] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .sentence_chunker_config_request import SentenceChunkerConfigRequest
|
9
|
+
|
10
|
+
|
11
|
+
class SentenceChunkingRequest(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
Sentence chunking
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunker_config: typing.Optional[SentenceChunkerConfigRequest] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -33,6 +33,11 @@ class SubmitCompletionActualRequest(pydantic_v1.BaseModel):
|
|
33
33
|
Optionally provide the timestamp representing when this feedback was collected. Used for reporting purposes.
|
34
34
|
"""
|
35
35
|
|
36
|
+
metadata: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
|
37
|
+
"""
|
38
|
+
Optionally provide additional metadata about the feedback submission.
|
39
|
+
"""
|
40
|
+
|
36
41
|
def json(self, **kwargs: typing.Any) -> str:
|
37
42
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
38
43
|
return super().json(**kwargs_with_defaults)
|
@@ -5,7 +5,7 @@ import typing
|
|
5
5
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
7
7
|
from ..core.pydantic_utilities import pydantic_v1
|
8
|
-
from .
|
8
|
+
from .function_call import FunctionCall
|
9
9
|
|
10
10
|
|
11
11
|
class TestCaseFunctionCallVariableValue(pydantic_v1.BaseModel):
|
@@ -15,7 +15,7 @@ class TestCaseFunctionCallVariableValue(pydantic_v1.BaseModel):
|
|
15
15
|
|
16
16
|
variable_id: str
|
17
17
|
name: str
|
18
|
-
value: typing.Optional[
|
18
|
+
value: typing.Optional[FunctionCall] = None
|
19
19
|
|
20
20
|
def json(self, **kwargs: typing.Any) -> str:
|
21
21
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -5,7 +5,7 @@ import typing
|
|
5
5
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
7
7
|
from ..core.pydantic_utilities import pydantic_v1
|
8
|
-
from .
|
8
|
+
from .function_call import FunctionCall
|
9
9
|
|
10
10
|
|
11
11
|
class TestSuiteRunExecutionFunctionCallOutput(pydantic_v1.BaseModel):
|
@@ -14,7 +14,7 @@ class TestSuiteRunExecutionFunctionCallOutput(pydantic_v1.BaseModel):
|
|
14
14
|
"""
|
15
15
|
|
16
16
|
name: str
|
17
|
-
value: typing.Optional[
|
17
|
+
value: typing.Optional[FunctionCall] = None
|
18
18
|
output_variable_id: str
|
19
19
|
|
20
20
|
def json(self, **kwargs: typing.Any) -> str:
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class TokenOverlappingWindowChunkerConfig(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
Configuration for token overlapping window chunking
|
13
|
+
"""
|
14
|
+
|
15
|
+
token_limit: typing.Optional[int] = None
|
16
|
+
overlap_ratio: typing.Optional[float] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
|
9
|
+
|
10
|
+
class TokenOverlappingWindowChunkerConfigRequest(pydantic_v1.BaseModel):
|
11
|
+
"""
|
12
|
+
Configuration for token overlapping window chunking
|
13
|
+
"""
|
14
|
+
|
15
|
+
token_limit: typing.Optional[int] = None
|
16
|
+
overlap_ratio: typing.Optional[float] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .token_overlapping_window_chunker_config import TokenOverlappingWindowChunkerConfig
|
9
|
+
|
10
|
+
|
11
|
+
class TokenOverlappingWindowChunking(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
Token overlapping window chunking
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunker_config: typing.Optional[TokenOverlappingWindowChunkerConfig] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from ..core.pydantic_utilities import pydantic_v1
|
8
|
+
from .token_overlapping_window_chunker_config_request import TokenOverlappingWindowChunkerConfigRequest
|
9
|
+
|
10
|
+
|
11
|
+
class TokenOverlappingWindowChunkingRequest(pydantic_v1.BaseModel):
|
12
|
+
"""
|
13
|
+
Token overlapping window chunking
|
14
|
+
"""
|
15
|
+
|
16
|
+
chunker_config: typing.Optional[TokenOverlappingWindowChunkerConfigRequest] = None
|
17
|
+
|
18
|
+
def json(self, **kwargs: typing.Any) -> str:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().json(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
24
|
+
return super().dict(**kwargs_with_defaults)
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
extra = pydantic_v1.Extra.allow
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -24,6 +24,11 @@ class WorkflowExecutionActualChatHistoryRequest(pydantic_v1.BaseModel):
|
|
24
24
|
Optionally provide a decimal number between 0.0 and 1.0 (inclusive) representing the quality of the output. 0 is the worst, 1 is the best.
|
25
25
|
"""
|
26
26
|
|
27
|
+
metadata: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
|
28
|
+
"""
|
29
|
+
Optionally provide additional metadata about the feedback submission.
|
30
|
+
"""
|
31
|
+
|
27
32
|
timestamp: typing.Optional[float] = pydantic_v1.Field(default=None)
|
28
33
|
"""
|
29
34
|
Optionally provide the timestamp representing when this feedback was collected. Used for reporting purposes.
|
@@ -23,6 +23,11 @@ class WorkflowExecutionActualJsonRequest(pydantic_v1.BaseModel):
|
|
23
23
|
Optionally provide a decimal number between 0.0 and 1.0 (inclusive) representing the quality of the output. 0 is the worst, 1 is the best.
|
24
24
|
"""
|
25
25
|
|
26
|
+
metadata: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
|
27
|
+
"""
|
28
|
+
Optionally provide additional metadata about the feedback submission.
|
29
|
+
"""
|
30
|
+
|
26
31
|
timestamp: typing.Optional[float] = pydantic_v1.Field(default=None)
|
27
32
|
"""
|
28
33
|
Optionally provide the timestamp representing when this feedback was collected. Used for reporting purposes.
|
@@ -23,6 +23,11 @@ class WorkflowExecutionActualStringRequest(pydantic_v1.BaseModel):
|
|
23
23
|
Optionally provide a decimal number between 0.0 and 1.0 (inclusive) representing the quality of the output. 0 is the worst, 1 is the best.
|
24
24
|
"""
|
25
25
|
|
26
|
+
metadata: typing.Optional[typing.Dict[str, typing.Any]] = pydantic_v1.Field(default=None)
|
27
|
+
"""
|
28
|
+
Optionally provide additional metadata about the feedback submission.
|
29
|
+
"""
|
30
|
+
|
26
31
|
timestamp: typing.Optional[float] = pydantic_v1.Field(default=None)
|
27
32
|
"""
|
28
33
|
Optionally provide the timestamp representing when this feedback was collected. Used for reporting purposes.
|
@@ -7,6 +7,8 @@ import typing
|
|
7
7
|
from .api_node_result import ApiNodeResult
|
8
8
|
from .code_execution_node_result import CodeExecutionNodeResult
|
9
9
|
from .conditional_node_result import ConditionalNodeResult
|
10
|
+
from .merge_node_result import MergeNodeResult
|
11
|
+
from .metric_node_result import MetricNodeResult
|
10
12
|
from .prompt_node_result import PromptNodeResult
|
11
13
|
from .search_node_result import SearchNodeResult
|
12
14
|
from .subworkflow_node_result import SubworkflowNodeResult
|
@@ -84,6 +86,16 @@ class WorkflowNodeResultData_Terminal(TerminalNodeResult):
|
|
84
86
|
populate_by_name = True
|
85
87
|
|
86
88
|
|
89
|
+
class WorkflowNodeResultData_Merge(MergeNodeResult):
|
90
|
+
type: typing.Literal["MERGE"] = "MERGE"
|
91
|
+
|
92
|
+
class Config:
|
93
|
+
frozen = True
|
94
|
+
smart_union = True
|
95
|
+
allow_population_by_field_name = True
|
96
|
+
populate_by_name = True
|
97
|
+
|
98
|
+
|
87
99
|
class WorkflowNodeResultData_Subworkflow(SubworkflowNodeResult):
|
88
100
|
type: typing.Literal["SUBWORKFLOW"] = "SUBWORKFLOW"
|
89
101
|
|
@@ -94,6 +106,16 @@ class WorkflowNodeResultData_Subworkflow(SubworkflowNodeResult):
|
|
94
106
|
populate_by_name = True
|
95
107
|
|
96
108
|
|
109
|
+
class WorkflowNodeResultData_Metric(MetricNodeResult):
|
110
|
+
type: typing.Literal["METRIC"] = "METRIC"
|
111
|
+
|
112
|
+
class Config:
|
113
|
+
frozen = True
|
114
|
+
smart_union = True
|
115
|
+
allow_population_by_field_name = True
|
116
|
+
populate_by_name = True
|
117
|
+
|
118
|
+
|
97
119
|
WorkflowNodeResultData = typing.Union[
|
98
120
|
WorkflowNodeResultData_Prompt,
|
99
121
|
WorkflowNodeResultData_Search,
|
@@ -102,5 +124,7 @@ WorkflowNodeResultData = typing.Union[
|
|
102
124
|
WorkflowNodeResultData_Conditional,
|
103
125
|
WorkflowNodeResultData_Api,
|
104
126
|
WorkflowNodeResultData_Terminal,
|
127
|
+
WorkflowNodeResultData_Merge,
|
105
128
|
WorkflowNodeResultData_Subworkflow,
|
129
|
+
WorkflowNodeResultData_Metric,
|
106
130
|
]
|
@@ -5,7 +5,7 @@ import typing
|
|
5
5
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
7
7
|
from ..core.pydantic_utilities import pydantic_v1
|
8
|
-
from .
|
8
|
+
from .array_vellum_value_item import ArrayVellumValueItem
|
9
9
|
|
10
10
|
|
11
11
|
class WorkflowOutputArray(pydantic_v1.BaseModel):
|
@@ -19,7 +19,7 @@ class WorkflowOutputArray(pydantic_v1.BaseModel):
|
|
19
19
|
The output's name, as defined in the workflow
|
20
20
|
"""
|
21
21
|
|
22
|
-
value: typing.Optional[typing.List[
|
22
|
+
value: typing.Optional[typing.List[ArrayVellumValueItem]] = None
|
23
23
|
|
24
24
|
def json(self, **kwargs: typing.Any) -> str:
|
25
25
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: vellum-ai
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.6.1
|
4
4
|
Summary:
|
5
5
|
Requires-Python: >=3.8,<4.0
|
6
6
|
Classifier: Programming Language :: Python :: 3
|
@@ -18,8 +18,7 @@ Description-Content-Type: text/markdown
|
|
18
18
|
# Vellum Python Library
|
19
19
|
|
20
20
|
[![pypi](https://img.shields.io/pypi/v/vellum-ai.svg)](https://pypi.python.org/pypi/vellum-ai)
|
21
|
-
[![fern shield](https://img.shields.io/badge/%F0%9F%8C%BF-SDK%20generated%20by%20Fern-brightgreen)](https://
|
22
|
-
|
21
|
+
[![fern shield](https://img.shields.io/badge/%F0%9F%8C%BF-SDK%20generated%20by%20Fern-brightgreen)](https://buildwithfern.com/?utm_source=vellum-ai/vellum-client-python/readme)
|
23
22
|
|
24
23
|
The Vellum Python Library provides access to the Vellum API from python.
|
25
24
|
|