vellum-ai 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vellum/__init__.py +70 -0
- vellum/client.py +331 -5
- vellum/core/client_wrapper.py +1 -1
- vellum/errors/forbidden_error.py +3 -2
- vellum/resources/registered_prompts/client.py +2 -0
- vellum/types/__init__.py +71 -0
- vellum/types/block_type_enum.py +4 -4
- vellum/types/chat_message_role.py +4 -4
- vellum/types/deployment_read.py +6 -6
- vellum/types/deployment_status.py +3 -3
- vellum/types/document_document_to_document_index.py +5 -5
- vellum/types/document_index_read.py +4 -4
- vellum/types/document_index_status.py +2 -2
- vellum/types/document_read.py +5 -5
- vellum/types/enriched_normalized_completion.py +3 -3
- vellum/types/environment_enum.py +3 -3
- vellum/types/error_variable_value.py +29 -0
- vellum/types/execute_prompt_api_error_response.py +28 -0
- vellum/types/execute_prompt_event.py +56 -0
- vellum/types/execute_prompt_response.py +31 -0
- vellum/types/finish_reason_enum.py +3 -3
- vellum/types/fulfilled_enum.py +5 -0
- vellum/types/fulfilled_execute_prompt_event.py +36 -0
- vellum/types/fulfilled_execute_prompt_response.py +39 -0
- vellum/types/fulfilled_prompt_execution_meta.py +34 -0
- vellum/types/generate_options_request.py +1 -1
- vellum/types/indexing_state_enum.py +5 -5
- vellum/types/initiated_enum.py +5 -0
- vellum/types/initiated_execute_prompt_event.py +34 -0
- vellum/types/initiated_prompt_execution_meta.py +35 -0
- vellum/types/json_variable_value.py +28 -0
- vellum/types/logical_operator.py +18 -18
- vellum/types/logprobs_enum.py +2 -2
- vellum/types/metadata_filter_rule_combinator.py +2 -2
- vellum/types/model_version_read.py +13 -12
- vellum/types/model_version_read_status_enum.py +4 -4
- vellum/types/processing_failure_reason_enum.py +2 -2
- vellum/types/processing_state_enum.py +4 -4
- vellum/types/prompt_deployment_expand_meta_request_request.py +42 -0
- vellum/types/prompt_execution_meta.py +37 -0
- vellum/types/prompt_output.py +41 -0
- vellum/types/provider_enum.py +17 -12
- vellum/types/raw_prompt_execution_overrides_request.py +32 -0
- vellum/types/rejected_enum.py +5 -0
- vellum/types/rejected_execute_prompt_event.py +36 -0
- vellum/types/rejected_execute_prompt_response.py +39 -0
- vellum/types/rejected_prompt_execution_meta.py +34 -0
- vellum/types/scenario_input_type_enum.py +2 -2
- vellum/types/slim_document.py +7 -7
- vellum/types/streaming_enum.py +5 -0
- vellum/types/streaming_execute_prompt_event.py +40 -0
- vellum/types/streaming_prompt_execution_meta.py +32 -0
- vellum/types/string_variable_value.py +28 -0
- vellum/types/vellum_error_code_enum.py +3 -3
- vellum/types/vellum_variable_type.py +11 -6
- vellum/types/workflow_execution_event_error_code.py +6 -6
- vellum/types/workflow_execution_event_type.py +2 -2
- vellum/types/workflow_node_result_event_state.py +4 -4
- vellum/types/workflow_request_input_request.py +14 -1
- vellum/types/workflow_request_number_input_request.py +29 -0
- {vellum_ai-0.1.8.dist-info → vellum_ai-0.1.10.dist-info}/METADATA +1 -1
- {vellum_ai-0.1.8.dist-info → vellum_ai-0.1.10.dist-info}/RECORD +63 -38
- {vellum_ai-0.1.8.dist-info → vellum_ai-0.1.10.dist-info}/WHEEL +0 -0
@@ -8,11 +8,11 @@ T_Result = typing.TypeVar("T_Result")
|
|
8
8
|
|
9
9
|
class IndexingStateEnum(str, enum.Enum):
|
10
10
|
"""
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
11
|
+
- `AWAITING_PROCESSING` - Awaiting Processing
|
12
|
+
- `QUEUED` - Queued
|
13
|
+
- `INDEXING` - Indexing
|
14
|
+
- `INDEXED` - Indexed
|
15
|
+
- `FAILED` - Failed
|
16
16
|
"""
|
17
17
|
|
18
18
|
AWAITING_PROCESSING = "AWAITING_PROCESSING"
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from .initiated_prompt_execution_meta import InitiatedPromptExecutionMeta
|
8
|
+
|
9
|
+
try:
|
10
|
+
import pydantic.v1 as pydantic # type: ignore
|
11
|
+
except ImportError:
|
12
|
+
import pydantic # type: ignore
|
13
|
+
|
14
|
+
|
15
|
+
class InitiatedExecutePromptEvent(pydantic.BaseModel):
|
16
|
+
"""
|
17
|
+
The initial data returned indicating that the response from the model has returned and begun streaming.
|
18
|
+
"""
|
19
|
+
|
20
|
+
meta: typing.Optional[InitiatedPromptExecutionMeta]
|
21
|
+
execution_id: str
|
22
|
+
|
23
|
+
def json(self, **kwargs: typing.Any) -> str:
|
24
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
25
|
+
return super().json(**kwargs_with_defaults)
|
26
|
+
|
27
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
29
|
+
return super().dict(**kwargs_with_defaults)
|
30
|
+
|
31
|
+
class Config:
|
32
|
+
frozen = True
|
33
|
+
smart_union = True
|
34
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
|
8
|
+
try:
|
9
|
+
import pydantic.v1 as pydantic # type: ignore
|
10
|
+
except ImportError:
|
11
|
+
import pydantic # type: ignore
|
12
|
+
|
13
|
+
|
14
|
+
class InitiatedPromptExecutionMeta(pydantic.BaseModel):
|
15
|
+
"""
|
16
|
+
The subset of the metadata tracked by Vellum during prompt execution that the request opted into with `expand_meta`.
|
17
|
+
"""
|
18
|
+
|
19
|
+
model_name: typing.Optional[str]
|
20
|
+
latency: typing.Optional[int]
|
21
|
+
deployment_release_tag: typing.Optional[str]
|
22
|
+
prompt_version_id: typing.Optional[str]
|
23
|
+
|
24
|
+
def json(self, **kwargs: typing.Any) -> str:
|
25
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
26
|
+
return super().json(**kwargs_with_defaults)
|
27
|
+
|
28
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
29
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
30
|
+
return super().dict(**kwargs_with_defaults)
|
31
|
+
|
32
|
+
class Config:
|
33
|
+
frozen = True
|
34
|
+
smart_union = True
|
35
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,28 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
|
8
|
+
try:
|
9
|
+
import pydantic.v1 as pydantic # type: ignore
|
10
|
+
except ImportError:
|
11
|
+
import pydantic # type: ignore
|
12
|
+
|
13
|
+
|
14
|
+
class JsonVariableValue(pydantic.BaseModel):
|
15
|
+
value: typing.Optional[typing.Dict[str, typing.Any]]
|
16
|
+
|
17
|
+
def json(self, **kwargs: typing.Any) -> str:
|
18
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
19
|
+
return super().json(**kwargs_with_defaults)
|
20
|
+
|
21
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
return super().dict(**kwargs_with_defaults)
|
24
|
+
|
25
|
+
class Config:
|
26
|
+
frozen = True
|
27
|
+
smart_union = True
|
28
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
vellum/types/logical_operator.py
CHANGED
@@ -8,24 +8,24 @@ T_Result = typing.TypeVar("T_Result")
|
|
8
8
|
|
9
9
|
class LogicalOperator(str, enum.Enum):
|
10
10
|
"""
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
11
|
+
- `=` - EQUALS
|
12
|
+
- `!=` - DOES_NOT_EQUAL
|
13
|
+
- `<` - LESS_THAN
|
14
|
+
- `>` - GREATER_THAN
|
15
|
+
- `<=` - LESS_THAN_OR_EQUAL_TO
|
16
|
+
- `>=` - GREATER_THAN_OR_EQUAL_TO
|
17
|
+
- `contains` - CONTAINS
|
18
|
+
- `beginsWith` - BEGINS_WITH
|
19
|
+
- `endsWith` - ENDS_WITH
|
20
|
+
- `doesNotContain` - DOES_NOT_CONTAIN
|
21
|
+
- `doesNotBeginWith` - DOES_NOT_BEGIN_WITH
|
22
|
+
- `doesNotEndWith` - DOES_NOT_END_WITH
|
23
|
+
- `null` - NULL
|
24
|
+
- `notNull` - NOT_NULL
|
25
|
+
- `in` - IN
|
26
|
+
- `notIn` - NOT_IN
|
27
|
+
- `between` - BETWEEN
|
28
|
+
- `notBetween` - NOT_BETWEEN
|
29
29
|
"""
|
30
30
|
|
31
31
|
EQUALS = "="
|
vellum/types/logprobs_enum.py
CHANGED
@@ -23,18 +23,19 @@ class ModelVersionRead(pydantic.BaseModel):
|
|
23
23
|
description=(
|
24
24
|
"Which LLM provider this model version is associated with.\n"
|
25
25
|
"\n"
|
26
|
-
"
|
27
|
-
"
|
28
|
-
"
|
29
|
-
"
|
30
|
-
"
|
31
|
-
"
|
32
|
-
"
|
33
|
-
"
|
34
|
-
"
|
35
|
-
"
|
36
|
-
"
|
37
|
-
"
|
26
|
+
"- `ANTHROPIC` - Anthropic\n"
|
27
|
+
"- `AWS_BEDROCK` - AWS Bedrock\n"
|
28
|
+
"- `AZURE_OPENAI` - Azure OpenAI\n"
|
29
|
+
"- `COHERE` - Cohere\n"
|
30
|
+
"- `GOOGLE` - Google\n"
|
31
|
+
"- `HOSTED` - Hosted\n"
|
32
|
+
"- `MOSAICML` - MosaicML\n"
|
33
|
+
"- `OPENAI` - OpenAI\n"
|
34
|
+
"- `FIREWORKS_AI` - Fireworks AI\n"
|
35
|
+
"- `HUGGINGFACE` - HuggingFace\n"
|
36
|
+
"- `MYSTIC` - Mystic\n"
|
37
|
+
"- `PYQ` - Pyq\n"
|
38
|
+
"- `REPLICATE` - Replicate\n"
|
38
39
|
)
|
39
40
|
)
|
40
41
|
external_id: str = pydantic.Field(
|
@@ -8,10 +8,10 @@ T_Result = typing.TypeVar("T_Result")
|
|
8
8
|
|
9
9
|
class ModelVersionReadStatusEnum(str, enum.Enum):
|
10
10
|
"""
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
11
|
+
- `CREATING` - Creating
|
12
|
+
- `READY` - Ready
|
13
|
+
- `CREATION_FAILED` - Creation Failed
|
14
|
+
- `DISABLED` - Disabled
|
15
15
|
"""
|
16
16
|
|
17
17
|
CREATING = "CREATING"
|
@@ -8,8 +8,8 @@ T_Result = typing.TypeVar("T_Result")
|
|
8
8
|
|
9
9
|
class ProcessingFailureReasonEnum(str, enum.Enum):
|
10
10
|
"""
|
11
|
-
|
12
|
-
|
11
|
+
- `EXCEEDED_CHARACTER_LIMIT` - Exceeded Character Limit
|
12
|
+
- `INVALID_FILE` - Invalid File
|
13
13
|
"""
|
14
14
|
|
15
15
|
EXCEEDED_CHARACTER_LIMIT = "EXCEEDED_CHARACTER_LIMIT"
|
@@ -8,10 +8,10 @@ T_Result = typing.TypeVar("T_Result")
|
|
8
8
|
|
9
9
|
class ProcessingStateEnum(str, enum.Enum):
|
10
10
|
"""
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
11
|
+
- `QUEUED` - Queued
|
12
|
+
- `PROCESSING` - Processing
|
13
|
+
- `PROCESSED` - Processed
|
14
|
+
- `FAILED` - Failed
|
15
15
|
"""
|
16
16
|
|
17
17
|
QUEUED = "QUEUED"
|
@@ -0,0 +1,42 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
|
8
|
+
try:
|
9
|
+
import pydantic.v1 as pydantic # type: ignore
|
10
|
+
except ImportError:
|
11
|
+
import pydantic # type: ignore
|
12
|
+
|
13
|
+
|
14
|
+
class PromptDeploymentExpandMetaRequestRequest(pydantic.BaseModel):
|
15
|
+
model_name: typing.Optional[bool] = pydantic.Field(
|
16
|
+
description="If enabled, the response will include the model identifier representing the ML Model invoked by the Prompt Deployment."
|
17
|
+
)
|
18
|
+
latency: typing.Optional[bool] = pydantic.Field(
|
19
|
+
description="If enabled, the response will include the time in nanoseconds it took to execute the Prompt Deployment."
|
20
|
+
)
|
21
|
+
deployment_release_tag: typing.Optional[bool] = pydantic.Field(
|
22
|
+
description="If enabled, the response will include the release tag of the Prompt Deployment."
|
23
|
+
)
|
24
|
+
prompt_version_id: typing.Optional[bool] = pydantic.Field(
|
25
|
+
description="If enabled, the response will include the ID of the Prompt Version backing the deployment."
|
26
|
+
)
|
27
|
+
finish_reason: typing.Optional[bool] = pydantic.Field(
|
28
|
+
description="If enabled, the response will include the reason provided by the model for why the execution finished."
|
29
|
+
)
|
30
|
+
|
31
|
+
def json(self, **kwargs: typing.Any) -> str:
|
32
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
33
|
+
return super().json(**kwargs_with_defaults)
|
34
|
+
|
35
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
36
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
37
|
+
return super().dict(**kwargs_with_defaults)
|
38
|
+
|
39
|
+
class Config:
|
40
|
+
frozen = True
|
41
|
+
smart_union = True
|
42
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,37 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from .finish_reason_enum import FinishReasonEnum
|
8
|
+
|
9
|
+
try:
|
10
|
+
import pydantic.v1 as pydantic # type: ignore
|
11
|
+
except ImportError:
|
12
|
+
import pydantic # type: ignore
|
13
|
+
|
14
|
+
|
15
|
+
class PromptExecutionMeta(pydantic.BaseModel):
|
16
|
+
"""
|
17
|
+
The subset of the metadata tracked by Vellum during prompt execution that the request opted into with `expand_meta`.
|
18
|
+
"""
|
19
|
+
|
20
|
+
model_name: typing.Optional[str]
|
21
|
+
latency: typing.Optional[int]
|
22
|
+
deployment_release_tag: typing.Optional[str]
|
23
|
+
prompt_version_id: typing.Optional[str]
|
24
|
+
finish_reason: typing.Optional[FinishReasonEnum]
|
25
|
+
|
26
|
+
def json(self, **kwargs: typing.Any) -> str:
|
27
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
28
|
+
return super().json(**kwargs_with_defaults)
|
29
|
+
|
30
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
31
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
32
|
+
return super().dict(**kwargs_with_defaults)
|
33
|
+
|
34
|
+
class Config:
|
35
|
+
frozen = True
|
36
|
+
smart_union = True
|
37
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,41 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import typing
|
6
|
+
|
7
|
+
import typing_extensions
|
8
|
+
|
9
|
+
from .error_variable_value import ErrorVariableValue
|
10
|
+
from .json_variable_value import JsonVariableValue
|
11
|
+
from .string_variable_value import StringVariableValue
|
12
|
+
|
13
|
+
|
14
|
+
class PromptOutput_String(StringVariableValue):
|
15
|
+
type: typing_extensions.Literal["STRING"]
|
16
|
+
|
17
|
+
class Config:
|
18
|
+
frozen = True
|
19
|
+
smart_union = True
|
20
|
+
allow_population_by_field_name = True
|
21
|
+
|
22
|
+
|
23
|
+
class PromptOutput_Json(JsonVariableValue):
|
24
|
+
type: typing_extensions.Literal["JSON"]
|
25
|
+
|
26
|
+
class Config:
|
27
|
+
frozen = True
|
28
|
+
smart_union = True
|
29
|
+
allow_population_by_field_name = True
|
30
|
+
|
31
|
+
|
32
|
+
class PromptOutput_Error(ErrorVariableValue):
|
33
|
+
type: typing_extensions.Literal["ERROR"]
|
34
|
+
|
35
|
+
class Config:
|
36
|
+
frozen = True
|
37
|
+
smart_union = True
|
38
|
+
allow_population_by_field_name = True
|
39
|
+
|
40
|
+
|
41
|
+
PromptOutput = typing.Union[PromptOutput_String, PromptOutput_Json, PromptOutput_Error]
|
vellum/types/provider_enum.py
CHANGED
@@ -8,18 +8,19 @@ T_Result = typing.TypeVar("T_Result")
|
|
8
8
|
|
9
9
|
class ProviderEnum(str, enum.Enum):
|
10
10
|
"""
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
11
|
+
- `ANTHROPIC` - Anthropic
|
12
|
+
- `AWS_BEDROCK` - AWS Bedrock
|
13
|
+
- `AZURE_OPENAI` - Azure OpenAI
|
14
|
+
- `COHERE` - Cohere
|
15
|
+
- `GOOGLE` - Google
|
16
|
+
- `HOSTED` - Hosted
|
17
|
+
- `MOSAICML` - MosaicML
|
18
|
+
- `OPENAI` - OpenAI
|
19
|
+
- `FIREWORKS_AI` - Fireworks AI
|
20
|
+
- `HUGGINGFACE` - HuggingFace
|
21
|
+
- `MYSTIC` - Mystic
|
22
|
+
- `PYQ` - Pyq
|
23
|
+
- `REPLICATE` - Replicate
|
23
24
|
"""
|
24
25
|
|
25
26
|
ANTHROPIC = "ANTHROPIC"
|
@@ -30,6 +31,7 @@ class ProviderEnum(str, enum.Enum):
|
|
30
31
|
HOSTED = "HOSTED"
|
31
32
|
MOSAICML = "MOSAICML"
|
32
33
|
OPENAI = "OPENAI"
|
34
|
+
FIREWORKS_AI = "FIREWORKS_AI"
|
33
35
|
HUGGINGFACE = "HUGGINGFACE"
|
34
36
|
MYSTIC = "MYSTIC"
|
35
37
|
PYQ = "PYQ"
|
@@ -45,6 +47,7 @@ class ProviderEnum(str, enum.Enum):
|
|
45
47
|
hosted: typing.Callable[[], T_Result],
|
46
48
|
mosaicml: typing.Callable[[], T_Result],
|
47
49
|
openai: typing.Callable[[], T_Result],
|
50
|
+
fireworks_ai: typing.Callable[[], T_Result],
|
48
51
|
huggingface: typing.Callable[[], T_Result],
|
49
52
|
mystic: typing.Callable[[], T_Result],
|
50
53
|
pyq: typing.Callable[[], T_Result],
|
@@ -66,6 +69,8 @@ class ProviderEnum(str, enum.Enum):
|
|
66
69
|
return mosaicml()
|
67
70
|
if self is ProviderEnum.OPENAI:
|
68
71
|
return openai()
|
72
|
+
if self is ProviderEnum.FIREWORKS_AI:
|
73
|
+
return fireworks_ai()
|
69
74
|
if self is ProviderEnum.HUGGINGFACE:
|
70
75
|
return huggingface()
|
71
76
|
if self is ProviderEnum.MYSTIC:
|
@@ -0,0 +1,32 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
|
8
|
+
try:
|
9
|
+
import pydantic.v1 as pydantic # type: ignore
|
10
|
+
except ImportError:
|
11
|
+
import pydantic # type: ignore
|
12
|
+
|
13
|
+
|
14
|
+
class RawPromptExecutionOverridesRequest(pydantic.BaseModel):
|
15
|
+
body: typing.Optional[typing.Dict[str, typing.Any]]
|
16
|
+
headers: typing.Optional[typing.Dict[str, typing.Optional[str]]] = pydantic.Field(
|
17
|
+
description="The raw headers to send to the model host."
|
18
|
+
)
|
19
|
+
url: typing.Optional[str] = pydantic.Field(description="The raw URL to send to the model host.")
|
20
|
+
|
21
|
+
def json(self, **kwargs: typing.Any) -> str:
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
return super().json(**kwargs_with_defaults)
|
24
|
+
|
25
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
27
|
+
return super().dict(**kwargs_with_defaults)
|
28
|
+
|
29
|
+
class Config:
|
30
|
+
frozen = True
|
31
|
+
smart_union = True
|
32
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,36 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from .rejected_prompt_execution_meta import RejectedPromptExecutionMeta
|
8
|
+
from .vellum_error import VellumError
|
9
|
+
|
10
|
+
try:
|
11
|
+
import pydantic.v1 as pydantic # type: ignore
|
12
|
+
except ImportError:
|
13
|
+
import pydantic # type: ignore
|
14
|
+
|
15
|
+
|
16
|
+
class RejectedExecutePromptEvent(pydantic.BaseModel):
|
17
|
+
"""
|
18
|
+
The final data returned indicating an error occurred during the stream.
|
19
|
+
"""
|
20
|
+
|
21
|
+
error: VellumError
|
22
|
+
execution_id: str
|
23
|
+
meta: typing.Optional[RejectedPromptExecutionMeta]
|
24
|
+
|
25
|
+
def json(self, **kwargs: typing.Any) -> str:
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
27
|
+
return super().json(**kwargs_with_defaults)
|
28
|
+
|
29
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
30
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
31
|
+
return super().dict(**kwargs_with_defaults)
|
32
|
+
|
33
|
+
class Config:
|
34
|
+
frozen = True
|
35
|
+
smart_union = True
|
36
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,39 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from .prompt_execution_meta import PromptExecutionMeta
|
8
|
+
from .vellum_error import VellumError
|
9
|
+
|
10
|
+
try:
|
11
|
+
import pydantic.v1 as pydantic # type: ignore
|
12
|
+
except ImportError:
|
13
|
+
import pydantic # type: ignore
|
14
|
+
|
15
|
+
|
16
|
+
class RejectedExecutePromptResponse(pydantic.BaseModel):
|
17
|
+
"""
|
18
|
+
The unsuccessful response from the model containing an error of what went wrong.
|
19
|
+
"""
|
20
|
+
|
21
|
+
meta: typing.Optional[PromptExecutionMeta]
|
22
|
+
raw: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
|
23
|
+
description="The subset of the raw response from the model that the request opted into with `expand_raw`."
|
24
|
+
)
|
25
|
+
execution_id: str = pydantic.Field(description="The ID of the execution.")
|
26
|
+
error: VellumError
|
27
|
+
|
28
|
+
def json(self, **kwargs: typing.Any) -> str:
|
29
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
30
|
+
return super().json(**kwargs_with_defaults)
|
31
|
+
|
32
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
33
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
34
|
+
return super().dict(**kwargs_with_defaults)
|
35
|
+
|
36
|
+
class Config:
|
37
|
+
frozen = True
|
38
|
+
smart_union = True
|
39
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
7
|
+
from .finish_reason_enum import FinishReasonEnum
|
8
|
+
|
9
|
+
try:
|
10
|
+
import pydantic.v1 as pydantic # type: ignore
|
11
|
+
except ImportError:
|
12
|
+
import pydantic # type: ignore
|
13
|
+
|
14
|
+
|
15
|
+
class RejectedPromptExecutionMeta(pydantic.BaseModel):
|
16
|
+
"""
|
17
|
+
The subset of the metadata tracked by Vellum during prompt execution that the request opted into with `expand_meta`.
|
18
|
+
"""
|
19
|
+
|
20
|
+
latency: typing.Optional[int]
|
21
|
+
finish_reason: typing.Optional[FinishReasonEnum]
|
22
|
+
|
23
|
+
def json(self, **kwargs: typing.Any) -> str:
|
24
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
25
|
+
return super().json(**kwargs_with_defaults)
|
26
|
+
|
27
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
29
|
+
return super().dict(**kwargs_with_defaults)
|
30
|
+
|
31
|
+
class Config:
|
32
|
+
frozen = True
|
33
|
+
smart_union = True
|
34
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
vellum/types/slim_document.py
CHANGED
@@ -28,22 +28,22 @@ class SlimDocument(pydantic.BaseModel):
|
|
28
28
|
description=(
|
29
29
|
"An enum value representing where this document is along its processing lifecycle. Note that this is different than its indexing lifecycle.\n"
|
30
30
|
"\n"
|
31
|
-
"
|
32
|
-
"
|
33
|
-
"
|
34
|
-
"
|
31
|
+
"- `QUEUED` - Queued\n"
|
32
|
+
"- `PROCESSING` - Processing\n"
|
33
|
+
"- `PROCESSED` - Processed\n"
|
34
|
+
"- `FAILED` - Failed\n"
|
35
35
|
)
|
36
36
|
)
|
37
37
|
processing_failure_reason: typing.Optional[ProcessingFailureReasonEnum] = pydantic.Field(
|
38
38
|
description=(
|
39
39
|
"An enum value representing why the document could not be processed. Is null unless processing_state is FAILED.\n"
|
40
40
|
"\n"
|
41
|
-
"
|
42
|
-
"
|
41
|
+
"- `EXCEEDED_CHARACTER_LIMIT` - Exceeded Character Limit\n"
|
42
|
+
"- `INVALID_FILE` - Invalid File\n"
|
43
43
|
)
|
44
44
|
)
|
45
45
|
status: typing.Optional[DocumentStatus] = pydantic.Field(
|
46
|
-
description=("The document's current status.\n" "\n" "
|
46
|
+
description=("The document's current status.\n" "\n" "- `ACTIVE` - Active\n")
|
47
47
|
)
|
48
48
|
keywords: typing.Optional[typing.List[str]] = pydantic.Field(
|
49
49
|
description="A list of keywords associated with this document. Originally provided when uploading the document."
|