vellum-ai 0.0.18__py3-none-any.whl → 0.0.25__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vellum/__init__.py +119 -16
- vellum/client.py +341 -77
- vellum/core/__init__.py +11 -2
- vellum/core/client_wrapper.py +27 -0
- vellum/core/remove_none_from_dict.py +11 -0
- vellum/errors/__init__.py +2 -1
- vellum/errors/forbidden_error.py +9 -0
- vellum/resources/deployments/client.py +35 -15
- vellum/resources/document_indexes/client.py +64 -16
- vellum/resources/documents/client.py +110 -35
- vellum/resources/model_versions/client.py +67 -25
- vellum/resources/registered_prompts/client.py +80 -16
- vellum/resources/sandboxes/client.py +90 -25
- vellum/resources/test_suites/client.py +90 -25
- vellum/types/__init__.py +108 -0
- vellum/types/conditional_node_result.py +25 -0
- vellum/types/conditional_node_result_data.py +24 -0
- vellum/types/deployment_node_result.py +25 -0
- vellum/types/deployment_node_result_data.py +26 -0
- vellum/types/deployment_read.py +2 -6
- vellum/types/document.py +3 -7
- vellum/types/document_document_to_document_index.py +2 -2
- vellum/types/document_index_read.py +3 -7
- vellum/types/enriched_normalized_completion.py +5 -9
- vellum/types/evaluation_params.py +1 -3
- vellum/types/evaluation_params_request.py +1 -3
- vellum/types/execute_workflow_stream_error_response.py +24 -0
- vellum/types/generate_error_response.py +1 -1
- vellum/types/generate_request.py +3 -7
- vellum/types/generate_result.py +2 -6
- vellum/types/generate_result_data.py +1 -1
- vellum/types/generate_result_error.py +1 -1
- vellum/types/model_version_build_config.py +2 -6
- vellum/types/model_version_compile_prompt_response.py +1 -1
- vellum/types/model_version_compiled_prompt.py +2 -4
- vellum/types/model_version_exec_config.py +3 -3
- vellum/types/model_version_read.py +7 -10
- vellum/types/model_version_sandbox_snapshot.py +3 -5
- vellum/types/prompt_node_result.py +25 -0
- vellum/types/prompt_node_result_data.py +26 -0
- vellum/types/prompt_template_block_properties.py +1 -0
- vellum/types/prompt_template_block_properties_request.py +3 -2
- vellum/types/prompt_template_block_request.py +1 -1
- vellum/types/prompt_template_input_variable.py +1 -1
- vellum/types/prompt_template_input_variable_request.py +1 -1
- vellum/types/provider_enum.py +5 -0
- vellum/types/register_prompt_error_response.py +1 -1
- vellum/types/register_prompt_prompt.py +2 -2
- vellum/types/register_prompt_prompt_info_request.py +1 -1
- vellum/types/register_prompt_response.py +5 -7
- vellum/types/registered_prompt_deployment.py +3 -3
- vellum/types/registered_prompt_model_version.py +2 -2
- vellum/types/registered_prompt_sandbox.py +2 -2
- vellum/types/registered_prompt_sandbox_snapshot.py +1 -1
- vellum/types/sandbox_node_result.py +25 -0
- vellum/types/sandbox_node_result_data.py +26 -0
- vellum/types/sandbox_scenario.py +2 -2
- vellum/types/scenario_input_request.py +1 -1
- vellum/types/search_error_response.py +1 -1
- vellum/types/search_filters_request.py +1 -1
- vellum/types/search_node_result.py +25 -0
- vellum/types/search_node_result_data.py +27 -0
- vellum/types/search_request_options_request.py +4 -6
- vellum/types/search_response.py +1 -1
- vellum/types/search_result.py +3 -3
- vellum/types/search_result_merging_request.py +1 -1
- vellum/types/search_weights_request.py +2 -2
- vellum/types/slim_document.py +5 -9
- vellum/types/submit_completion_actual_request.py +5 -15
- vellum/types/terminal_node_chat_history_result.py +26 -0
- vellum/types/terminal_node_json_result.py +25 -0
- vellum/types/terminal_node_result.py +25 -0
- vellum/types/terminal_node_result_data.py +25 -0
- vellum/types/terminal_node_result_output.py +40 -0
- vellum/types/terminal_node_string_result.py +25 -0
- vellum/types/test_suite_test_case.py +4 -8
- vellum/types/upload_document_response.py +1 -1
- vellum/types/workflow_event_error.py +26 -0
- vellum/types/workflow_execution_event_error_code.py +31 -0
- vellum/types/workflow_execution_node_result_event.py +27 -0
- vellum/types/workflow_execution_workflow_result_event.py +27 -0
- vellum/types/workflow_node_result_data.py +72 -0
- vellum/types/workflow_node_result_event.py +33 -0
- vellum/types/workflow_node_result_event_state.py +36 -0
- vellum/types/workflow_request_chat_history_input_request.py +28 -0
- vellum/types/workflow_request_input_request.py +40 -0
- vellum/types/workflow_request_json_input_request.py +27 -0
- vellum/types/workflow_request_string_input_request.py +27 -0
- vellum/types/workflow_result_event.py +31 -0
- vellum/types/workflow_result_event_output_data.py +40 -0
- vellum/types/workflow_result_event_output_data_chat_history.py +32 -0
- vellum/types/workflow_result_event_output_data_json.py +31 -0
- vellum/types/workflow_result_event_output_data_string.py +33 -0
- vellum/types/workflow_stream_event.py +29 -0
- {vellum_ai-0.0.18.dist-info → vellum_ai-0.0.25.dist-info}/METADATA +1 -1
- vellum_ai-0.0.25.dist-info/RECORD +149 -0
- vellum/core/remove_none_from_headers.py +0 -11
- vellum_ai-0.0.18.dist-info/RECORD +0 -113
- {vellum_ai-0.0.18.dist-info → vellum_ai-0.0.25.dist-info}/WHEEL +0 -0
@@ -0,0 +1,25 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from .prompt_node_result_data import PromptNodeResultData
|
10
|
+
|
11
|
+
|
12
|
+
class PromptNodeResult(pydantic.BaseModel):
|
13
|
+
data: PromptNodeResultData
|
14
|
+
|
15
|
+
def json(self, **kwargs: typing.Any) -> str:
|
16
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
17
|
+
return super().json(**kwargs_with_defaults)
|
18
|
+
|
19
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
20
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
21
|
+
return super().dict(**kwargs_with_defaults)
|
22
|
+
|
23
|
+
class Config:
|
24
|
+
frozen = True
|
25
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
|
10
|
+
|
11
|
+
class PromptNodeResultData(pydantic.BaseModel):
|
12
|
+
output_id: str
|
13
|
+
text: typing.Optional[str]
|
14
|
+
delta: typing.Optional[str]
|
15
|
+
|
16
|
+
def json(self, **kwargs: typing.Any) -> str:
|
17
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
18
|
+
return super().json(**kwargs_with_defaults)
|
19
|
+
|
20
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
21
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
22
|
+
return super().dict(**kwargs_with_defaults)
|
23
|
+
|
24
|
+
class Config:
|
25
|
+
frozen = True
|
26
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -20,6 +20,7 @@ class PromptTemplateBlockProperties(pydantic.BaseModel):
|
|
20
20
|
function_name: typing.Optional[str]
|
21
21
|
function_description: typing.Optional[str]
|
22
22
|
function_parameters: typing.Optional[typing.Dict[str, typing.Any]]
|
23
|
+
function_forced: typing.Optional[bool]
|
23
24
|
blocks: typing.Optional[typing.List[PromptTemplateBlock]]
|
24
25
|
|
25
26
|
def json(self, **kwargs: typing.Any) -> str:
|
@@ -18,12 +18,13 @@ class PromptTemplateBlockPropertiesRequest(pydantic.BaseModel):
|
|
18
18
|
template: typing.Optional[str]
|
19
19
|
template_type: typing.Optional[ContentType]
|
20
20
|
function_name: typing.Optional[str] = pydantic.Field(
|
21
|
-
description=
|
21
|
+
description='<span style="white-space: nowrap">`non-empty`</span>'
|
22
22
|
)
|
23
23
|
function_description: typing.Optional[str] = pydantic.Field(
|
24
|
-
description=
|
24
|
+
description='<span style="white-space: nowrap">`non-empty`</span>'
|
25
25
|
)
|
26
26
|
function_parameters: typing.Optional[typing.Dict[str, typing.Any]]
|
27
|
+
function_forced: typing.Optional[bool]
|
27
28
|
blocks: typing.Optional[typing.List[PromptTemplateBlockRequest]]
|
28
29
|
|
29
30
|
def json(self, **kwargs: typing.Any) -> str:
|
@@ -12,7 +12,7 @@ from .block_type_enum import BlockTypeEnum
|
|
12
12
|
|
13
13
|
|
14
14
|
class PromptTemplateBlockRequest(pydantic.BaseModel):
|
15
|
-
id: str = pydantic.Field(description=
|
15
|
+
id: str = pydantic.Field(description='<span style="white-space: nowrap">`non-empty`</span>')
|
16
16
|
block_type: BlockTypeEnum
|
17
17
|
properties: PromptTemplateBlockPropertiesRequest
|
18
18
|
|
@@ -9,7 +9,7 @@ from ..core.datetime_utils import serialize_datetime
|
|
9
9
|
|
10
10
|
|
11
11
|
class PromptTemplateInputVariable(pydantic.BaseModel):
|
12
|
-
key: str = pydantic.Field(description=
|
12
|
+
key: str = pydantic.Field(description="The name of the input variable.")
|
13
13
|
|
14
14
|
def json(self, **kwargs: typing.Any) -> str:
|
15
15
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -10,7 +10,7 @@ from ..core.datetime_utils import serialize_datetime
|
|
10
10
|
|
11
11
|
class PromptTemplateInputVariableRequest(pydantic.BaseModel):
|
12
12
|
key: str = pydantic.Field(
|
13
|
-
description=
|
13
|
+
description='The name of the input variable. <span style="white-space: nowrap">`non-empty`</span> '
|
14
14
|
)
|
15
15
|
|
16
16
|
def json(self, **kwargs: typing.Any) -> str:
|
vellum/types/provider_enum.py
CHANGED
@@ -13,6 +13,7 @@ class ProviderEnum(str, enum.Enum):
|
|
13
13
|
* `GOOGLE` - Google
|
14
14
|
* `HOSTED` - Hosted
|
15
15
|
* `MOSAICML` - MosaicML
|
16
|
+
* `MYSTIC` - Mystic
|
16
17
|
* `OPENAI` - OpenAI
|
17
18
|
* `PYQ` - Pyq
|
18
19
|
"""
|
@@ -22,6 +23,7 @@ class ProviderEnum(str, enum.Enum):
|
|
22
23
|
GOOGLE = "GOOGLE"
|
23
24
|
HOSTED = "HOSTED"
|
24
25
|
MOSAICML = "MOSAICML"
|
26
|
+
MYSTIC = "MYSTIC"
|
25
27
|
OPENAI = "OPENAI"
|
26
28
|
PYQ = "PYQ"
|
27
29
|
|
@@ -32,6 +34,7 @@ class ProviderEnum(str, enum.Enum):
|
|
32
34
|
google: typing.Callable[[], T_Result],
|
33
35
|
hosted: typing.Callable[[], T_Result],
|
34
36
|
mosaicml: typing.Callable[[], T_Result],
|
37
|
+
mystic: typing.Callable[[], T_Result],
|
35
38
|
openai: typing.Callable[[], T_Result],
|
36
39
|
pyq: typing.Callable[[], T_Result],
|
37
40
|
) -> T_Result:
|
@@ -45,6 +48,8 @@ class ProviderEnum(str, enum.Enum):
|
|
45
48
|
return hosted()
|
46
49
|
if self is ProviderEnum.MOSAICML:
|
47
50
|
return mosaicml()
|
51
|
+
if self is ProviderEnum.MYSTIC:
|
52
|
+
return mystic()
|
48
53
|
if self is ProviderEnum.OPENAI:
|
49
54
|
return openai()
|
50
55
|
if self is ProviderEnum.PYQ:
|
@@ -9,7 +9,7 @@ from ..core.datetime_utils import serialize_datetime
|
|
9
9
|
|
10
10
|
|
11
11
|
class RegisterPromptErrorResponse(pydantic.BaseModel):
|
12
|
-
detail: str = pydantic.Field(description=
|
12
|
+
detail: str = pydantic.Field(description="Details about why the request failed.")
|
13
13
|
|
14
14
|
def json(self, **kwargs: typing.Any) -> str:
|
15
15
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -9,8 +9,8 @@ from ..core.datetime_utils import serialize_datetime
|
|
9
9
|
|
10
10
|
|
11
11
|
class RegisterPromptPrompt(pydantic.BaseModel):
|
12
|
-
id: str = pydantic.Field(description=
|
13
|
-
label: str = pydantic.Field(description=
|
12
|
+
id: str = pydantic.Field(description="The ID of the generated prompt.")
|
13
|
+
label: str = pydantic.Field(description="A human-friendly label for the generated prompt.")
|
14
14
|
|
15
15
|
def json(self, **kwargs: typing.Any) -> str:
|
16
16
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -14,7 +14,7 @@ class RegisterPromptPromptInfoRequest(pydantic.BaseModel):
|
|
14
14
|
prompt_syntax_version: typing.Optional[int]
|
15
15
|
prompt_block_data: PromptTemplateBlockDataRequest
|
16
16
|
input_variables: typing.List[PromptTemplateInputVariableRequest] = pydantic.Field(
|
17
|
-
description=
|
17
|
+
description="Names of the input variables specified in the prompt template."
|
18
18
|
)
|
19
19
|
|
20
20
|
def json(self, **kwargs: typing.Any) -> str:
|
@@ -14,17 +14,15 @@ from .registered_prompt_sandbox_snapshot import RegisteredPromptSandboxSnapshot
|
|
14
14
|
|
15
15
|
|
16
16
|
class RegisterPromptResponse(pydantic.BaseModel):
|
17
|
-
prompt: RegisterPromptPrompt = pydantic.Field(description=
|
17
|
+
prompt: RegisterPromptPrompt = pydantic.Field(description="Information about the generated prompt")
|
18
18
|
sandbox_snapshot: RegisteredPromptSandboxSnapshot = pydantic.Field(
|
19
|
-
description=
|
19
|
+
description="Information about the generated sandbox snapshot"
|
20
20
|
)
|
21
|
-
sandbox: RegisteredPromptSandbox = pydantic.Field(description=
|
21
|
+
sandbox: RegisteredPromptSandbox = pydantic.Field(description="Information about the generated sandbox")
|
22
22
|
model_version: RegisteredPromptModelVersion = pydantic.Field(
|
23
|
-
description=
|
24
|
-
)
|
25
|
-
deployment: RegisteredPromptDeployment = pydantic.Field(
|
26
|
-
description=("Information about the generated deployment\n")
|
23
|
+
description="Information about the generated model version"
|
27
24
|
)
|
25
|
+
deployment: RegisteredPromptDeployment = pydantic.Field(description="Information about the generated deployment")
|
28
26
|
|
29
27
|
def json(self, **kwargs: typing.Any) -> str:
|
30
28
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -9,9 +9,9 @@ from ..core.datetime_utils import serialize_datetime
|
|
9
9
|
|
10
10
|
|
11
11
|
class RegisteredPromptDeployment(pydantic.BaseModel):
|
12
|
-
id: str = pydantic.Field(description=
|
13
|
-
name: str = pydantic.Field(description=
|
14
|
-
label: str = pydantic.Field(description=
|
12
|
+
id: str = pydantic.Field(description="The ID of the generated deployment.")
|
13
|
+
name: str = pydantic.Field(description="A uniquely-identifying name for generated deployment.")
|
14
|
+
label: str = pydantic.Field(description="A human-friendly label for the generated deployment.")
|
15
15
|
|
16
16
|
def json(self, **kwargs: typing.Any) -> str:
|
17
17
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -9,8 +9,8 @@ from ..core.datetime_utils import serialize_datetime
|
|
9
9
|
|
10
10
|
|
11
11
|
class RegisteredPromptModelVersion(pydantic.BaseModel):
|
12
|
-
id: str = pydantic.Field(description=
|
13
|
-
label: str = pydantic.Field(description=
|
12
|
+
id: str = pydantic.Field(description="The ID of the generated model version.")
|
13
|
+
label: str = pydantic.Field(description="A human-friendly label for the generated model version.")
|
14
14
|
|
15
15
|
def json(self, **kwargs: typing.Any) -> str:
|
16
16
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -9,8 +9,8 @@ from ..core.datetime_utils import serialize_datetime
|
|
9
9
|
|
10
10
|
|
11
11
|
class RegisteredPromptSandbox(pydantic.BaseModel):
|
12
|
-
id: str = pydantic.Field(description=
|
13
|
-
label: str = pydantic.Field(description=
|
12
|
+
id: str = pydantic.Field(description="The ID of the generated sandbox.")
|
13
|
+
label: str = pydantic.Field(description="A human-friendly label for the generated sandbox.")
|
14
14
|
|
15
15
|
def json(self, **kwargs: typing.Any) -> str:
|
16
16
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -9,7 +9,7 @@ from ..core.datetime_utils import serialize_datetime
|
|
9
9
|
|
10
10
|
|
11
11
|
class RegisteredPromptSandboxSnapshot(pydantic.BaseModel):
|
12
|
-
id: str = pydantic.Field(description=
|
12
|
+
id: str = pydantic.Field(description="The ID of the generated sandbox snapshot.")
|
13
13
|
|
14
14
|
def json(self, **kwargs: typing.Any) -> str:
|
15
15
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from .sandbox_node_result_data import SandboxNodeResultData
|
10
|
+
|
11
|
+
|
12
|
+
class SandboxNodeResult(pydantic.BaseModel):
|
13
|
+
data: SandboxNodeResultData
|
14
|
+
|
15
|
+
def json(self, **kwargs: typing.Any) -> str:
|
16
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
17
|
+
return super().json(**kwargs_with_defaults)
|
18
|
+
|
19
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
20
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
21
|
+
return super().dict(**kwargs_with_defaults)
|
22
|
+
|
23
|
+
class Config:
|
24
|
+
frozen = True
|
25
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
|
10
|
+
|
11
|
+
class SandboxNodeResultData(pydantic.BaseModel):
|
12
|
+
output_id: str
|
13
|
+
text: typing.Optional[str]
|
14
|
+
delta: typing.Optional[str]
|
15
|
+
|
16
|
+
def json(self, **kwargs: typing.Any) -> str:
|
17
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
18
|
+
return super().json(**kwargs_with_defaults)
|
19
|
+
|
20
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
21
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
22
|
+
return super().dict(**kwargs_with_defaults)
|
23
|
+
|
24
|
+
class Config:
|
25
|
+
frozen = True
|
26
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
vellum/types/sandbox_scenario.py
CHANGED
@@ -12,8 +12,8 @@ from .scenario_input import ScenarioInput
|
|
12
12
|
|
13
13
|
class SandboxScenario(pydantic.BaseModel):
|
14
14
|
label: typing.Optional[str]
|
15
|
-
inputs: typing.List[ScenarioInput] = pydantic.Field(description=
|
16
|
-
id: str = pydantic.Field(description=
|
15
|
+
inputs: typing.List[ScenarioInput] = pydantic.Field(description="The inputs for the scenario")
|
16
|
+
id: str = pydantic.Field(description="The id of the scenario")
|
17
17
|
metric_input_params: SandboxMetricInputParams
|
18
18
|
|
19
19
|
def json(self, **kwargs: typing.Any) -> str:
|
@@ -11,7 +11,7 @@ from .scenario_input_type_enum import ScenarioInputTypeEnum
|
|
11
11
|
|
12
12
|
|
13
13
|
class ScenarioInputRequest(pydantic.BaseModel):
|
14
|
-
key: str = pydantic.Field(description=
|
14
|
+
key: str = pydantic.Field(description='<span style="white-space: nowrap">`non-empty`</span>')
|
15
15
|
type: typing.Optional[ScenarioInputTypeEnum]
|
16
16
|
value: typing.Optional[str]
|
17
17
|
chat_history: typing.Optional[typing.List[ChatMessageRequest]]
|
@@ -9,7 +9,7 @@ from ..core.datetime_utils import serialize_datetime
|
|
9
9
|
|
10
10
|
|
11
11
|
class SearchErrorResponse(pydantic.BaseModel):
|
12
|
-
detail: str = pydantic.Field(description=
|
12
|
+
detail: str = pydantic.Field(description="Details about why the request failed.")
|
13
13
|
|
14
14
|
def json(self, **kwargs: typing.Any) -> str:
|
15
15
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -10,7 +10,7 @@ from ..core.datetime_utils import serialize_datetime
|
|
10
10
|
|
11
11
|
class SearchFiltersRequest(pydantic.BaseModel):
|
12
12
|
external_ids: typing.Optional[typing.List[str]] = pydantic.Field(
|
13
|
-
description=
|
13
|
+
description="The document external IDs to filter by"
|
14
14
|
)
|
15
15
|
|
16
16
|
def json(self, **kwargs: typing.Any) -> str:
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from .search_node_result_data import SearchNodeResultData
|
10
|
+
|
11
|
+
|
12
|
+
class SearchNodeResult(pydantic.BaseModel):
|
13
|
+
data: SearchNodeResultData
|
14
|
+
|
15
|
+
def json(self, **kwargs: typing.Any) -> str:
|
16
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
17
|
+
return super().json(**kwargs_with_defaults)
|
18
|
+
|
19
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
20
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
21
|
+
return super().dict(**kwargs_with_defaults)
|
22
|
+
|
23
|
+
class Config:
|
24
|
+
frozen = True
|
25
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
|
10
|
+
|
11
|
+
class SearchNodeResultData(pydantic.BaseModel):
|
12
|
+
results_output_id: str
|
13
|
+
results: typing.List[typing.Dict[str, typing.Any]]
|
14
|
+
text_output_id: typing.Optional[str]
|
15
|
+
text: typing.Optional[str]
|
16
|
+
|
17
|
+
def json(self, **kwargs: typing.Any) -> str:
|
18
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
19
|
+
return super().json(**kwargs_with_defaults)
|
20
|
+
|
21
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
return super().dict(**kwargs_with_defaults)
|
24
|
+
|
25
|
+
class Config:
|
26
|
+
frozen = True
|
27
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -12,16 +12,14 @@ from .search_weights_request import SearchWeightsRequest
|
|
12
12
|
|
13
13
|
|
14
14
|
class SearchRequestOptionsRequest(pydantic.BaseModel):
|
15
|
-
limit: typing.Optional[int] = pydantic.Field(description=
|
15
|
+
limit: typing.Optional[int] = pydantic.Field(description="The maximum number of results to return.")
|
16
16
|
weights: typing.Optional[SearchWeightsRequest] = pydantic.Field(
|
17
|
-
description=
|
17
|
+
description="The weights to use for the search. Must add up to 1.0."
|
18
18
|
)
|
19
19
|
result_merging: typing.Optional[SearchResultMergingRequest] = pydantic.Field(
|
20
|
-
description=
|
21
|
-
)
|
22
|
-
filters: typing.Optional[SearchFiltersRequest] = pydantic.Field(
|
23
|
-
description=("The filters to apply to the search.\n")
|
20
|
+
description="The configuration for merging results."
|
24
21
|
)
|
22
|
+
filters: typing.Optional[SearchFiltersRequest] = pydantic.Field(description="The filters to apply to the search.")
|
25
23
|
|
26
24
|
def json(self, **kwargs: typing.Any) -> str:
|
27
25
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
vellum/types/search_response.py
CHANGED
@@ -11,7 +11,7 @@ from .search_result import SearchResult
|
|
11
11
|
|
12
12
|
class SearchResponse(pydantic.BaseModel):
|
13
13
|
results: typing.List[SearchResult] = pydantic.Field(
|
14
|
-
description=
|
14
|
+
description="The results of the search. Each result represents a chunk that matches the search query."
|
15
15
|
)
|
16
16
|
|
17
17
|
def json(self, **kwargs: typing.Any) -> str:
|
vellum/types/search_result.py
CHANGED
@@ -11,11 +11,11 @@ from .document import Document
|
|
11
11
|
|
12
12
|
class SearchResult(pydantic.BaseModel):
|
13
13
|
document: Document = pydantic.Field(
|
14
|
-
description=
|
14
|
+
description="The document that contains the chunk that matched the search query."
|
15
15
|
)
|
16
|
-
text: str = pydantic.Field(description=
|
16
|
+
text: str = pydantic.Field(description="The text of the chunk that matched the search query.")
|
17
17
|
keywords: typing.List[str]
|
18
|
-
score: float = pydantic.Field(description=
|
18
|
+
score: float = pydantic.Field(description="A score representing how well the chunk matches the search query.")
|
19
19
|
|
20
20
|
def json(self, **kwargs: typing.Any) -> str:
|
21
21
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -9,7 +9,7 @@ from ..core.datetime_utils import serialize_datetime
|
|
9
9
|
|
10
10
|
|
11
11
|
class SearchResultMergingRequest(pydantic.BaseModel):
|
12
|
-
enabled: bool = pydantic.Field(description=
|
12
|
+
enabled: bool = pydantic.Field(description="Whether to enable merging results")
|
13
13
|
|
14
14
|
def json(self, **kwargs: typing.Any) -> str:
|
15
15
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
@@ -9,8 +9,8 @@ from ..core.datetime_utils import serialize_datetime
|
|
9
9
|
|
10
10
|
|
11
11
|
class SearchWeightsRequest(pydantic.BaseModel):
|
12
|
-
semantic_similarity: float = pydantic.Field(description=
|
13
|
-
keywords: float = pydantic.Field(description=
|
12
|
+
semantic_similarity: float = pydantic.Field(description="The relative weight to give to semantic similarity")
|
13
|
+
keywords: float = pydantic.Field(description="The relative weight to give to keyword matches")
|
14
14
|
|
15
15
|
def json(self, **kwargs: typing.Any) -> str:
|
16
16
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
vellum/types/slim_document.py
CHANGED
@@ -13,17 +13,15 @@ from .slim_document_status_enum import SlimDocumentStatusEnum
|
|
13
13
|
|
14
14
|
|
15
15
|
class SlimDocument(pydantic.BaseModel):
|
16
|
-
id: str = pydantic.Field(description=
|
16
|
+
id: str = pydantic.Field(description="Vellum-generated ID that uniquely identifies this document.")
|
17
17
|
external_id: typing.Optional[str] = pydantic.Field(
|
18
|
-
description=
|
18
|
+
description="The external ID that was originally provided when uploading the document."
|
19
19
|
)
|
20
20
|
last_uploaded_at: str = pydantic.Field(
|
21
|
-
description=
|
21
|
+
description="A timestamp representing when this document was most recently uploaded."
|
22
22
|
)
|
23
23
|
label: str = pydantic.Field(
|
24
|
-
description=
|
25
|
-
'Human-friendly name for this document. <span style="white-space: nowrap">`<= 1000 characters`</span> \n'
|
26
|
-
)
|
24
|
+
description='Human-friendly name for this document. <span style="white-space: nowrap">`<= 1000 characters`</span> '
|
27
25
|
)
|
28
26
|
processing_state: typing.Optional[ProcessingStateEnum] = pydantic.Field(
|
29
27
|
description=(
|
@@ -46,9 +44,7 @@ class SlimDocument(pydantic.BaseModel):
|
|
46
44
|
description=("The document's current status.\n" "\n" "* `ACTIVE` - Active\n")
|
47
45
|
)
|
48
46
|
keywords: typing.Optional[typing.List[str]] = pydantic.Field(
|
49
|
-
description=
|
50
|
-
"A list of keywords associated with this document. Originally provided when uploading the document.\n"
|
51
|
-
)
|
47
|
+
description="A list of keywords associated with this document. Originally provided when uploading the document."
|
52
48
|
)
|
53
49
|
document_to_document_indexes: typing.List[DocumentDocumentToDocumentIndex]
|
54
50
|
|
@@ -10,27 +10,17 @@ from ..core.datetime_utils import serialize_datetime
|
|
10
10
|
|
11
11
|
class SubmitCompletionActualRequest(pydantic.BaseModel):
|
12
12
|
id: typing.Optional[str] = pydantic.Field(
|
13
|
-
description=
|
14
|
-
"The Vellum-generated ID of a previously generated completion. Must provide either this or external_id.\n"
|
15
|
-
)
|
13
|
+
description="The Vellum-generated ID of a previously generated completion. Must provide either this or external_id."
|
16
14
|
)
|
17
15
|
external_id: typing.Optional[str] = pydantic.Field(
|
18
|
-
description=
|
19
|
-
"The external ID that was originally provided when generating the completion that you'd now like to submit actuals for. Must provide either this or id.\n"
|
20
|
-
)
|
21
|
-
)
|
22
|
-
text: typing.Optional[str] = pydantic.Field(
|
23
|
-
description=("Text representing what the completion _should_ have been.\n")
|
16
|
+
description="The external ID that was originally provided when generating the completion that you'd now like to submit actuals for. Must provide either this or id."
|
24
17
|
)
|
18
|
+
text: typing.Optional[str] = pydantic.Field(description="Text representing what the completion _should_ have been.")
|
25
19
|
quality: typing.Optional[float] = pydantic.Field(
|
26
|
-
description=
|
27
|
-
"A number between 0 and 1 representing the quality of the completion. 0 is the worst, 1 is the best.\n"
|
28
|
-
)
|
20
|
+
description="A number between 0 and 1 representing the quality of the completion. 0 is the worst, 1 is the best."
|
29
21
|
)
|
30
22
|
timestamp: typing.Optional[str] = pydantic.Field(
|
31
|
-
description=
|
32
|
-
"Optionally provide the timestamp representing when this feedback was collected. Used for reporting purposes.\n"
|
33
|
-
)
|
23
|
+
description="Optionally provide the timestamp representing when this feedback was collected. Used for reporting purposes."
|
34
24
|
)
|
35
25
|
|
36
26
|
def json(self, **kwargs: typing.Any) -> str:
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from .chat_message import ChatMessage
|
10
|
+
|
11
|
+
|
12
|
+
class TerminalNodeChatHistoryResult(pydantic.BaseModel):
|
13
|
+
name: str = pydantic.Field(description="The unique name given to the terminal node that produced this output.")
|
14
|
+
value: typing.List[ChatMessage]
|
15
|
+
|
16
|
+
def json(self, **kwargs: typing.Any) -> str:
|
17
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
18
|
+
return super().json(**kwargs_with_defaults)
|
19
|
+
|
20
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
21
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
22
|
+
return super().dict(**kwargs_with_defaults)
|
23
|
+
|
24
|
+
class Config:
|
25
|
+
frozen = True
|
26
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
|
10
|
+
|
11
|
+
class TerminalNodeJsonResult(pydantic.BaseModel):
|
12
|
+
name: str = pydantic.Field(description="The unique name given to the terminal node that produced this output.")
|
13
|
+
value: typing.Dict[str, typing.Any]
|
14
|
+
|
15
|
+
def json(self, **kwargs: typing.Any) -> str:
|
16
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
17
|
+
return super().json(**kwargs_with_defaults)
|
18
|
+
|
19
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
20
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
21
|
+
return super().dict(**kwargs_with_defaults)
|
22
|
+
|
23
|
+
class Config:
|
24
|
+
frozen = True
|
25
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from .terminal_node_result_data import TerminalNodeResultData
|
10
|
+
|
11
|
+
|
12
|
+
class TerminalNodeResult(pydantic.BaseModel):
|
13
|
+
data: TerminalNodeResultData
|
14
|
+
|
15
|
+
def json(self, **kwargs: typing.Any) -> str:
|
16
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
17
|
+
return super().json(**kwargs_with_defaults)
|
18
|
+
|
19
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
20
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
21
|
+
return super().dict(**kwargs_with_defaults)
|
22
|
+
|
23
|
+
class Config:
|
24
|
+
frozen = True
|
25
|
+
json_encoders = {dt.datetime: serialize_datetime}
|