vellum-ai 0.0.1__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- vellum/__init__.py +98 -0
- vellum/client.py +203 -0
- vellum/core/__init__.py +8 -0
- vellum/core/api_error.py +15 -0
- vellum/core/datetime_utils.py +28 -0
- vellum/core/jsonable_encoder.py +94 -0
- vellum/core/remove_none_from_headers.py +11 -0
- vellum/environment.py +17 -0
- vellum/py.typed +0 -0
- vellum/resources/__init__.py +5 -0
- vellum/resources/documents/__init__.py +2 -0
- vellum/resources/documents/client.py +135 -0
- vellum/resources/model_versions/__init__.py +2 -0
- vellum/resources/model_versions/client.py +53 -0
- vellum/types/__init__.py +91 -0
- vellum/types/block_type_enum.py +29 -0
- vellum/types/chat_role_enum.py +25 -0
- vellum/types/document.py +30 -0
- vellum/types/document_document_to_document_index.py +31 -0
- vellum/types/enriched_normalized_completion.py +39 -0
- vellum/types/finish_reason_enum.py +25 -0
- vellum/types/generate_error_response.py +24 -0
- vellum/types/generate_options_request.py +27 -0
- vellum/types/generate_request_request.py +31 -0
- vellum/types/generate_response.py +39 -0
- vellum/types/generate_result.py +35 -0
- vellum/types/generate_result_data.py +27 -0
- vellum/types/generate_result_error.py +24 -0
- vellum/types/indexing_state_enum.py +33 -0
- vellum/types/logprobs_enum.py +17 -0
- vellum/types/model_type_enum.py +17 -0
- vellum/types/model_version_build_config.py +34 -0
- vellum/types/model_version_exec_config_parameters.py +31 -0
- vellum/types/model_version_exec_config_read.py +35 -0
- vellum/types/model_version_read.py +43 -0
- vellum/types/model_version_read_status_enum.py +29 -0
- vellum/types/model_version_sandbox_snapshot.py +25 -0
- vellum/types/normalized_log_probs.py +26 -0
- vellum/types/normalized_token_log_probs.py +27 -0
- vellum/types/paginated_slim_document_list.py +28 -0
- vellum/types/processing_state_enum.py +29 -0
- vellum/types/prompt_template_block.py +27 -0
- vellum/types/prompt_template_block_data.py +26 -0
- vellum/types/prompt_template_block_properties.py +28 -0
- vellum/types/provider_enum.py +37 -0
- vellum/types/search_error_response.py +24 -0
- vellum/types/search_filters_request.py +26 -0
- vellum/types/search_request_options_request.py +36 -0
- vellum/types/search_response.py +27 -0
- vellum/types/search_result.py +30 -0
- vellum/types/search_result_merging_request.py +24 -0
- vellum/types/search_weights_request.py +25 -0
- vellum/types/slim_document.py +44 -0
- vellum/types/slim_document_status_enum.py +14 -0
- vellum/types/submit_completion_actual_request.py +46 -0
- vellum/types/submit_completion_actuals_error_response.py +24 -0
- vellum/types/upload_document_error_response.py +24 -0
- vellum/types/upload_document_response.py +24 -0
- vellum_ai-0.0.1.dist-info/METADATA +15 -0
- vellum_ai-0.0.1.dist-info/RECORD +61 -0
- vellum_ai-0.0.1.dist-info/WHEEL +4 -0
@@ -0,0 +1,53 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import urllib.parse
|
4
|
+
from json.decoder import JSONDecodeError
|
5
|
+
|
6
|
+
import httpx
|
7
|
+
import pydantic
|
8
|
+
|
9
|
+
from ...core.api_error import ApiError
|
10
|
+
from ...core.remove_none_from_headers import remove_none_from_headers
|
11
|
+
from ...environment import VellumEnvironment
|
12
|
+
from ...types.model_version_read import ModelVersionRead
|
13
|
+
|
14
|
+
|
15
|
+
class ModelVersionsClient:
|
16
|
+
def __init__(self, *, environment: VellumEnvironment = VellumEnvironment.PRODUCTION, api_key: str):
|
17
|
+
self._environment = environment
|
18
|
+
self.api_key = api_key
|
19
|
+
|
20
|
+
def retrieve(self, id: str) -> ModelVersionRead:
|
21
|
+
_response = httpx.request(
|
22
|
+
"GET",
|
23
|
+
urllib.parse.urljoin(f"{self._environment.default}/", f"v1/model-versions/{id}"),
|
24
|
+
headers=remove_none_from_headers({"X_API_KEY": self.api_key}),
|
25
|
+
)
|
26
|
+
if 200 <= _response.status_code < 300:
|
27
|
+
return pydantic.parse_obj_as(ModelVersionRead, _response.json()) # type: ignore
|
28
|
+
try:
|
29
|
+
_response_json = _response.json()
|
30
|
+
except JSONDecodeError:
|
31
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
32
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
33
|
+
|
34
|
+
|
35
|
+
class AsyncModelVersionsClient:
|
36
|
+
def __init__(self, *, environment: VellumEnvironment = VellumEnvironment.PRODUCTION, api_key: str):
|
37
|
+
self._environment = environment
|
38
|
+
self.api_key = api_key
|
39
|
+
|
40
|
+
async def retrieve(self, id: str) -> ModelVersionRead:
|
41
|
+
async with httpx.AsyncClient() as _client:
|
42
|
+
_response = await _client.request(
|
43
|
+
"GET",
|
44
|
+
urllib.parse.urljoin(f"{self._environment.default}/", f"v1/model-versions/{id}"),
|
45
|
+
headers=remove_none_from_headers({"X_API_KEY": self.api_key}),
|
46
|
+
)
|
47
|
+
if 200 <= _response.status_code < 300:
|
48
|
+
return pydantic.parse_obj_as(ModelVersionRead, _response.json()) # type: ignore
|
49
|
+
try:
|
50
|
+
_response_json = _response.json()
|
51
|
+
except JSONDecodeError:
|
52
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
53
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
vellum/types/__init__.py
ADDED
@@ -0,0 +1,91 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
from .block_type_enum import BlockTypeEnum
|
4
|
+
from .chat_role_enum import ChatRoleEnum
|
5
|
+
from .document import Document
|
6
|
+
from .document_document_to_document_index import DocumentDocumentToDocumentIndex
|
7
|
+
from .enriched_normalized_completion import EnrichedNormalizedCompletion
|
8
|
+
from .finish_reason_enum import FinishReasonEnum
|
9
|
+
from .generate_error_response import GenerateErrorResponse
|
10
|
+
from .generate_options_request import GenerateOptionsRequest
|
11
|
+
from .generate_request_request import GenerateRequestRequest
|
12
|
+
from .generate_response import GenerateResponse
|
13
|
+
from .generate_result import GenerateResult
|
14
|
+
from .generate_result_data import GenerateResultData
|
15
|
+
from .generate_result_error import GenerateResultError
|
16
|
+
from .indexing_state_enum import IndexingStateEnum
|
17
|
+
from .logprobs_enum import LogprobsEnum
|
18
|
+
from .model_type_enum import ModelTypeEnum
|
19
|
+
from .model_version_build_config import ModelVersionBuildConfig
|
20
|
+
from .model_version_exec_config_parameters import ModelVersionExecConfigParameters
|
21
|
+
from .model_version_exec_config_read import ModelVersionExecConfigRead
|
22
|
+
from .model_version_read import ModelVersionRead
|
23
|
+
from .model_version_read_status_enum import ModelVersionReadStatusEnum
|
24
|
+
from .model_version_sandbox_snapshot import ModelVersionSandboxSnapshot
|
25
|
+
from .normalized_log_probs import NormalizedLogProbs
|
26
|
+
from .normalized_token_log_probs import NormalizedTokenLogProbs
|
27
|
+
from .paginated_slim_document_list import PaginatedSlimDocumentList
|
28
|
+
from .processing_state_enum import ProcessingStateEnum
|
29
|
+
from .prompt_template_block import PromptTemplateBlock
|
30
|
+
from .prompt_template_block_data import PromptTemplateBlockData
|
31
|
+
from .prompt_template_block_properties import PromptTemplateBlockProperties
|
32
|
+
from .provider_enum import ProviderEnum
|
33
|
+
from .search_error_response import SearchErrorResponse
|
34
|
+
from .search_filters_request import SearchFiltersRequest
|
35
|
+
from .search_request_options_request import SearchRequestOptionsRequest
|
36
|
+
from .search_response import SearchResponse
|
37
|
+
from .search_result import SearchResult
|
38
|
+
from .search_result_merging_request import SearchResultMergingRequest
|
39
|
+
from .search_weights_request import SearchWeightsRequest
|
40
|
+
from .slim_document import SlimDocument
|
41
|
+
from .slim_document_status_enum import SlimDocumentStatusEnum
|
42
|
+
from .submit_completion_actual_request import SubmitCompletionActualRequest
|
43
|
+
from .submit_completion_actuals_error_response import SubmitCompletionActualsErrorResponse
|
44
|
+
from .upload_document_error_response import UploadDocumentErrorResponse
|
45
|
+
from .upload_document_response import UploadDocumentResponse
|
46
|
+
|
47
|
+
__all__ = [
|
48
|
+
"BlockTypeEnum",
|
49
|
+
"ChatRoleEnum",
|
50
|
+
"Document",
|
51
|
+
"DocumentDocumentToDocumentIndex",
|
52
|
+
"EnrichedNormalizedCompletion",
|
53
|
+
"FinishReasonEnum",
|
54
|
+
"GenerateErrorResponse",
|
55
|
+
"GenerateOptionsRequest",
|
56
|
+
"GenerateRequestRequest",
|
57
|
+
"GenerateResponse",
|
58
|
+
"GenerateResult",
|
59
|
+
"GenerateResultData",
|
60
|
+
"GenerateResultError",
|
61
|
+
"IndexingStateEnum",
|
62
|
+
"LogprobsEnum",
|
63
|
+
"ModelTypeEnum",
|
64
|
+
"ModelVersionBuildConfig",
|
65
|
+
"ModelVersionExecConfigParameters",
|
66
|
+
"ModelVersionExecConfigRead",
|
67
|
+
"ModelVersionRead",
|
68
|
+
"ModelVersionReadStatusEnum",
|
69
|
+
"ModelVersionSandboxSnapshot",
|
70
|
+
"NormalizedLogProbs",
|
71
|
+
"NormalizedTokenLogProbs",
|
72
|
+
"PaginatedSlimDocumentList",
|
73
|
+
"ProcessingStateEnum",
|
74
|
+
"PromptTemplateBlock",
|
75
|
+
"PromptTemplateBlockData",
|
76
|
+
"PromptTemplateBlockProperties",
|
77
|
+
"ProviderEnum",
|
78
|
+
"SearchErrorResponse",
|
79
|
+
"SearchFiltersRequest",
|
80
|
+
"SearchRequestOptionsRequest",
|
81
|
+
"SearchResponse",
|
82
|
+
"SearchResult",
|
83
|
+
"SearchResultMergingRequest",
|
84
|
+
"SearchWeightsRequest",
|
85
|
+
"SlimDocument",
|
86
|
+
"SlimDocumentStatusEnum",
|
87
|
+
"SubmitCompletionActualRequest",
|
88
|
+
"SubmitCompletionActualsErrorResponse",
|
89
|
+
"UploadDocumentErrorResponse",
|
90
|
+
"UploadDocumentResponse",
|
91
|
+
]
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import enum
|
4
|
+
import typing
|
5
|
+
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
7
|
+
|
8
|
+
|
9
|
+
class BlockTypeEnum(str, enum.Enum):
|
10
|
+
CHAT_MESSAGE = "CHAT_MESSAGE"
|
11
|
+
CHAT_HISTORY = "CHAT_HISTORY"
|
12
|
+
TEXT = "TEXT"
|
13
|
+
VARIABLE = "VARIABLE"
|
14
|
+
|
15
|
+
def visit(
|
16
|
+
self,
|
17
|
+
chat_message: typing.Callable[[], T_Result],
|
18
|
+
chat_history: typing.Callable[[], T_Result],
|
19
|
+
text: typing.Callable[[], T_Result],
|
20
|
+
variable: typing.Callable[[], T_Result],
|
21
|
+
) -> T_Result:
|
22
|
+
if self is BlockTypeEnum.CHAT_MESSAGE:
|
23
|
+
return chat_message()
|
24
|
+
if self is BlockTypeEnum.CHAT_HISTORY:
|
25
|
+
return chat_history()
|
26
|
+
if self is BlockTypeEnum.TEXT:
|
27
|
+
return text()
|
28
|
+
if self is BlockTypeEnum.VARIABLE:
|
29
|
+
return variable()
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import enum
|
4
|
+
import typing
|
5
|
+
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
7
|
+
|
8
|
+
|
9
|
+
class ChatRoleEnum(str, enum.Enum):
|
10
|
+
SYSTEM = "SYSTEM"
|
11
|
+
ASSISTANT = "ASSISTANT"
|
12
|
+
USER = "USER"
|
13
|
+
|
14
|
+
def visit(
|
15
|
+
self,
|
16
|
+
system: typing.Callable[[], T_Result],
|
17
|
+
assistant: typing.Callable[[], T_Result],
|
18
|
+
user: typing.Callable[[], T_Result],
|
19
|
+
) -> T_Result:
|
20
|
+
if self is ChatRoleEnum.SYSTEM:
|
21
|
+
return system()
|
22
|
+
if self is ChatRoleEnum.ASSISTANT:
|
23
|
+
return assistant()
|
24
|
+
if self is ChatRoleEnum.USER:
|
25
|
+
return user()
|
vellum/types/document.py
ADDED
@@ -0,0 +1,30 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
|
10
|
+
|
11
|
+
class Document(pydantic.BaseModel):
|
12
|
+
id: str = pydantic.Field(description=("The ID of the document.\n"))
|
13
|
+
label: str = pydantic.Field(description=("The human-readable name for the document.\n"))
|
14
|
+
external_id: typing.Optional[str] = pydantic.Field(
|
15
|
+
description=(
|
16
|
+
"The unique ID of the document as represented in an external system and specified when it was originally uploaded.\n"
|
17
|
+
)
|
18
|
+
)
|
19
|
+
|
20
|
+
def json(self, **kwargs: typing.Any) -> str:
|
21
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
22
|
+
return super().json(**kwargs_with_defaults)
|
23
|
+
|
24
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
25
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
26
|
+
return super().dict(**kwargs_with_defaults)
|
27
|
+
|
28
|
+
class Config:
|
29
|
+
frozen = True
|
30
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from .indexing_state_enum import IndexingStateEnum
|
10
|
+
|
11
|
+
|
12
|
+
class DocumentDocumentToDocumentIndex(pydantic.BaseModel):
|
13
|
+
id: str = pydantic.Field(description=("Vellum-generated ID that uniquely identifies this link.\n"))
|
14
|
+
document_index_id: str = pydantic.Field(
|
15
|
+
description=("Vellum-generated ID that uniquely identifies the index this document is included in.\n")
|
16
|
+
)
|
17
|
+
indexing_state: typing.Optional[IndexingStateEnum] = pydantic.Field(
|
18
|
+
description=("An enum value representing where this document is along its indexing lifecycle for this index.\n")
|
19
|
+
)
|
20
|
+
|
21
|
+
def json(self, **kwargs: typing.Any) -> str:
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
return super().json(**kwargs_with_defaults)
|
24
|
+
|
25
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
27
|
+
return super().dict(**kwargs_with_defaults)
|
28
|
+
|
29
|
+
class Config:
|
30
|
+
frozen = True
|
31
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,39 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from .finish_reason_enum import FinishReasonEnum
|
10
|
+
from .normalized_log_probs import NormalizedLogProbs
|
11
|
+
|
12
|
+
|
13
|
+
class EnrichedNormalizedCompletion(pydantic.BaseModel):
|
14
|
+
id: str = pydantic.Field(description=("The Vellum-generated ID of the completion.\n"))
|
15
|
+
external_id: typing.Optional[str] = pydantic.Field(
|
16
|
+
description=(
|
17
|
+
"The external ID that was originally provided along with the generation request, which uniquely identifies this generation in an external system.\n"
|
18
|
+
)
|
19
|
+
)
|
20
|
+
text: str = pydantic.Field(description=("The text generated by the LLM.\n"))
|
21
|
+
finish_reason: FinishReasonEnum = pydantic.Field(description=("The reason the generation finished.\n"))
|
22
|
+
logprobs: typing.Optional[NormalizedLogProbs] = pydantic.Field(
|
23
|
+
description=("The logprobs of the completion. Only present if specified in the original request options.\n")
|
24
|
+
)
|
25
|
+
model_version_id: str = pydantic.Field(
|
26
|
+
description=("The ID of the model version used to generate this completion.\n")
|
27
|
+
)
|
28
|
+
|
29
|
+
def json(self, **kwargs: typing.Any) -> str:
|
30
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
31
|
+
return super().json(**kwargs_with_defaults)
|
32
|
+
|
33
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
34
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
35
|
+
return super().dict(**kwargs_with_defaults)
|
36
|
+
|
37
|
+
class Config:
|
38
|
+
frozen = True
|
39
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import enum
|
4
|
+
import typing
|
5
|
+
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
7
|
+
|
8
|
+
|
9
|
+
class FinishReasonEnum(str, enum.Enum):
|
10
|
+
LENGTH = "LENGTH"
|
11
|
+
STOP = "STOP"
|
12
|
+
UNKNOWN = "UNKNOWN"
|
13
|
+
|
14
|
+
def visit(
|
15
|
+
self,
|
16
|
+
length: typing.Callable[[], T_Result],
|
17
|
+
stop: typing.Callable[[], T_Result],
|
18
|
+
unknown: typing.Callable[[], T_Result],
|
19
|
+
) -> T_Result:
|
20
|
+
if self is FinishReasonEnum.LENGTH:
|
21
|
+
return length()
|
22
|
+
if self is FinishReasonEnum.STOP:
|
23
|
+
return stop()
|
24
|
+
if self is FinishReasonEnum.UNKNOWN:
|
25
|
+
return unknown()
|
@@ -0,0 +1,24 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
|
10
|
+
|
11
|
+
class GenerateErrorResponse(pydantic.BaseModel):
|
12
|
+
detail: str = pydantic.Field(description=("Details about why the request failed.\n"))
|
13
|
+
|
14
|
+
def json(self, **kwargs: typing.Any) -> str:
|
15
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
16
|
+
return super().json(**kwargs_with_defaults)
|
17
|
+
|
18
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().dict(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
class Config:
|
23
|
+
frozen = True
|
24
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from .logprobs_enum import LogprobsEnum
|
10
|
+
|
11
|
+
|
12
|
+
class GenerateOptionsRequest(pydantic.BaseModel):
|
13
|
+
logprobs: typing.Optional[LogprobsEnum] = pydantic.Field(
|
14
|
+
description=("Which logprobs to include, if any. Defaults to NONE.\n")
|
15
|
+
)
|
16
|
+
|
17
|
+
def json(self, **kwargs: typing.Any) -> str:
|
18
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
19
|
+
return super().json(**kwargs_with_defaults)
|
20
|
+
|
21
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
return super().dict(**kwargs_with_defaults)
|
24
|
+
|
25
|
+
class Config:
|
26
|
+
frozen = True
|
27
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
|
10
|
+
|
11
|
+
class GenerateRequestRequest(pydantic.BaseModel):
|
12
|
+
input_values: typing.Dict[str, typing.Any] = pydantic.Field(
|
13
|
+
description=("Key/value pairs for each template variable defined in the deployment's prompt.\n")
|
14
|
+
)
|
15
|
+
external_ids: typing.Optional[typing.List[str]] = pydantic.Field(
|
16
|
+
description=(
|
17
|
+
"Optionally include a unique identifier for each generation, as represented outside of Vellum. Note that this should generally be a list of length one.\n"
|
18
|
+
)
|
19
|
+
)
|
20
|
+
|
21
|
+
def json(self, **kwargs: typing.Any) -> str:
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
return super().json(**kwargs_with_defaults)
|
24
|
+
|
25
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
27
|
+
return super().dict(**kwargs_with_defaults)
|
28
|
+
|
29
|
+
class Config:
|
30
|
+
frozen = True
|
31
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,39 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from .generate_result import GenerateResult
|
10
|
+
|
11
|
+
|
12
|
+
class GenerateResponse(pydantic.BaseModel):
|
13
|
+
results: typing.List[GenerateResult]
|
14
|
+
|
15
|
+
def json(self, **kwargs: typing.Any) -> str:
|
16
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
17
|
+
return super().json(**kwargs_with_defaults)
|
18
|
+
|
19
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
20
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
21
|
+
return super().dict(**kwargs_with_defaults)
|
22
|
+
|
23
|
+
@property
|
24
|
+
def texts(self) -> typing.List[str]:
|
25
|
+
return [
|
26
|
+
completion.text
|
27
|
+
for result in self.results
|
28
|
+
for completion in (result.data.completions if result.data else [])
|
29
|
+
]
|
30
|
+
|
31
|
+
@property
|
32
|
+
def text(self) -> str:
|
33
|
+
if len(self.texts) != 1:
|
34
|
+
raise ValueError(f"Expected exactly one completion, but got {len(self.texts)}")
|
35
|
+
return self.texts[0]
|
36
|
+
|
37
|
+
class Config:
|
38
|
+
frozen = True
|
39
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from .generate_result_data import GenerateResultData
|
10
|
+
from .generate_result_error import GenerateResultError
|
11
|
+
|
12
|
+
|
13
|
+
class GenerateResult(pydantic.BaseModel):
|
14
|
+
data: typing.Optional[GenerateResultData] = pydantic.Field(
|
15
|
+
description=(
|
16
|
+
"An object containing the resulting generation. This key will be absent if the LLM provider experienced an error.\n"
|
17
|
+
)
|
18
|
+
)
|
19
|
+
error: typing.Optional[GenerateResultError] = pydantic.Field(
|
20
|
+
description=(
|
21
|
+
"An object containing details about the error that occurred. This key will be absent if the LLM provider did not experience an error.\n"
|
22
|
+
)
|
23
|
+
)
|
24
|
+
|
25
|
+
def json(self, **kwargs: typing.Any) -> str:
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
27
|
+
return super().json(**kwargs_with_defaults)
|
28
|
+
|
29
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
30
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
31
|
+
return super().dict(**kwargs_with_defaults)
|
32
|
+
|
33
|
+
class Config:
|
34
|
+
frozen = True
|
35
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from .enriched_normalized_completion import EnrichedNormalizedCompletion
|
10
|
+
|
11
|
+
|
12
|
+
class GenerateResultData(pydantic.BaseModel):
|
13
|
+
completions: typing.List[EnrichedNormalizedCompletion] = pydantic.Field(
|
14
|
+
description=("The generated completions. This will generally be a list of length one.\n")
|
15
|
+
)
|
16
|
+
|
17
|
+
def json(self, **kwargs: typing.Any) -> str:
|
18
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
19
|
+
return super().json(**kwargs_with_defaults)
|
20
|
+
|
21
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
23
|
+
return super().dict(**kwargs_with_defaults)
|
24
|
+
|
25
|
+
class Config:
|
26
|
+
frozen = True
|
27
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,24 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
|
10
|
+
|
11
|
+
class GenerateResultError(pydantic.BaseModel):
|
12
|
+
message: str = pydantic.Field(description=("The error message returned by the LLM provider.\n"))
|
13
|
+
|
14
|
+
def json(self, **kwargs: typing.Any) -> str:
|
15
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
16
|
+
return super().json(**kwargs_with_defaults)
|
17
|
+
|
18
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
19
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
20
|
+
return super().dict(**kwargs_with_defaults)
|
21
|
+
|
22
|
+
class Config:
|
23
|
+
frozen = True
|
24
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import enum
|
4
|
+
import typing
|
5
|
+
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
7
|
+
|
8
|
+
|
9
|
+
class IndexingStateEnum(str, enum.Enum):
|
10
|
+
AWAITING_PROCESSING = "AWAITING_PROCESSING"
|
11
|
+
QUEUED = "QUEUED"
|
12
|
+
INDEXING = "INDEXING"
|
13
|
+
INDEXED = "INDEXED"
|
14
|
+
FAILED = "FAILED"
|
15
|
+
|
16
|
+
def visit(
|
17
|
+
self,
|
18
|
+
awaiting_processing: typing.Callable[[], T_Result],
|
19
|
+
queued: typing.Callable[[], T_Result],
|
20
|
+
indexing: typing.Callable[[], T_Result],
|
21
|
+
indexed: typing.Callable[[], T_Result],
|
22
|
+
failed: typing.Callable[[], T_Result],
|
23
|
+
) -> T_Result:
|
24
|
+
if self is IndexingStateEnum.AWAITING_PROCESSING:
|
25
|
+
return awaiting_processing()
|
26
|
+
if self is IndexingStateEnum.QUEUED:
|
27
|
+
return queued()
|
28
|
+
if self is IndexingStateEnum.INDEXING:
|
29
|
+
return indexing()
|
30
|
+
if self is IndexingStateEnum.INDEXED:
|
31
|
+
return indexed()
|
32
|
+
if self is IndexingStateEnum.FAILED:
|
33
|
+
return failed()
|
@@ -0,0 +1,17 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import enum
|
4
|
+
import typing
|
5
|
+
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
7
|
+
|
8
|
+
|
9
|
+
class LogprobsEnum(str, enum.Enum):
|
10
|
+
ALL = "ALL"
|
11
|
+
NONE = "NONE"
|
12
|
+
|
13
|
+
def visit(self, all: typing.Callable[[], T_Result], none: typing.Callable[[], T_Result]) -> T_Result:
|
14
|
+
if self is LogprobsEnum.ALL:
|
15
|
+
return all()
|
16
|
+
if self is LogprobsEnum.NONE:
|
17
|
+
return none()
|
@@ -0,0 +1,17 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import enum
|
4
|
+
import typing
|
5
|
+
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
7
|
+
|
8
|
+
|
9
|
+
class ModelTypeEnum(str, enum.Enum):
|
10
|
+
GENERATE = "GENERATE"
|
11
|
+
CLASSIFY = "CLASSIFY"
|
12
|
+
|
13
|
+
def visit(self, generate: typing.Callable[[], T_Result], classify: typing.Callable[[], T_Result]) -> T_Result:
|
14
|
+
if self is ModelTypeEnum.GENERATE:
|
15
|
+
return generate()
|
16
|
+
if self is ModelTypeEnum.CLASSIFY:
|
17
|
+
return classify()
|
@@ -0,0 +1,34 @@
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
2
|
+
|
3
|
+
import datetime as dt
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pydantic
|
7
|
+
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
9
|
+
from .model_version_sandbox_snapshot import ModelVersionSandboxSnapshot
|
10
|
+
|
11
|
+
|
12
|
+
class ModelVersionBuildConfig(pydantic.BaseModel):
|
13
|
+
base_model: str = pydantic.Field(
|
14
|
+
description=(
|
15
|
+
"The name of the base model used to create this model version, as identified by the LLM provider.\n"
|
16
|
+
)
|
17
|
+
)
|
18
|
+
sandbox_snapshot: typing.Optional[ModelVersionSandboxSnapshot] = pydantic.Field(
|
19
|
+
description=(
|
20
|
+
"Information about the sandbox snapshot that was used to create this model version, if applicable.\n"
|
21
|
+
)
|
22
|
+
)
|
23
|
+
|
24
|
+
def json(self, **kwargs: typing.Any) -> str:
|
25
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
26
|
+
return super().json(**kwargs_with_defaults)
|
27
|
+
|
28
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
29
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
30
|
+
return super().dict(**kwargs_with_defaults)
|
31
|
+
|
32
|
+
class Config:
|
33
|
+
frozen = True
|
34
|
+
json_encoders = {dt.datetime: serialize_datetime}
|