llama-cloud 0.1.33__py3-none-any.whl → 0.1.35__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +36 -0
- llama_cloud/client.py +3 -0
- llama_cloud/resources/__init__.py +6 -0
- llama_cloud/resources/beta/client.py +211 -8
- llama_cloud/resources/files/client.py +226 -0
- llama_cloud/resources/llama_extract/__init__.py +4 -0
- llama_cloud/resources/llama_extract/client.py +179 -0
- llama_cloud/resources/llama_extract/types/__init__.py +4 -0
- llama_cloud/resources/llama_extract/types/extract_stateless_request_data_schema.py +9 -0
- llama_cloud/resources/llama_extract/types/extract_stateless_request_data_schema_zero_value.py +7 -0
- llama_cloud/resources/organizations/client.py +10 -6
- llama_cloud/resources/parsing/client.py +24 -0
- llama_cloud/resources/users/__init__.py +2 -0
- llama_cloud/resources/users/client.py +155 -0
- llama_cloud/types/__init__.py +30 -0
- llama_cloud/types/data_source.py +2 -1
- llama_cloud/types/data_source_reader_version_metadata.py +32 -0
- llama_cloud/types/data_source_reader_version_metadata_reader_version.py +17 -0
- llama_cloud/types/extract_agent.py +3 -0
- llama_cloud/types/extract_config.py +7 -0
- llama_cloud/types/file_data.py +36 -0
- llama_cloud/types/legacy_parse_job_config.py +3 -0
- llama_cloud/types/llama_extract_settings.py +4 -0
- llama_cloud/types/llama_parse_parameters.py +3 -0
- llama_cloud/types/managed_open_ai_embedding.py +36 -0
- llama_cloud/types/managed_open_ai_embedding_config.py +34 -0
- llama_cloud/types/multimodal_parse_resolution.py +17 -0
- llama_cloud/types/paginated_response_quota_configuration.py +36 -0
- llama_cloud/types/parse_job_config.py +3 -0
- llama_cloud/types/pipeline_data_source.py +2 -1
- llama_cloud/types/pipeline_embedding_config.py +11 -0
- llama_cloud/types/quota_configuration.py +53 -0
- llama_cloud/types/quota_configuration_configuration_type.py +33 -0
- llama_cloud/types/quota_configuration_status.py +21 -0
- llama_cloud/types/quota_rate_limit_configuration_value.py +38 -0
- llama_cloud/types/quota_rate_limit_configuration_value_denominator_units.py +29 -0
- llama_cloud/types/struct_parse_conf.py +3 -0
- llama_cloud/types/update_user_response.py +33 -0
- llama_cloud/types/usage_response_active_alerts_item.py +4 -0
- llama_cloud/types/user_summary.py +38 -0
- llama_cloud/types/webhook_configuration_webhook_events_item.py +20 -0
- {llama_cloud-0.1.33.dist-info → llama_cloud-0.1.35.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.33.dist-info → llama_cloud-0.1.35.dist-info}/RECORD +45 -27
- {llama_cloud-0.1.33.dist-info → llama_cloud-0.1.35.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.33.dist-info → llama_cloud-0.1.35.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
import typing_extensions
|
|
7
|
+
|
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import pydantic
|
|
12
|
+
if pydantic.__version__.startswith("1."):
|
|
13
|
+
raise ImportError
|
|
14
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
15
|
+
except ImportError:
|
|
16
|
+
import pydantic # type: ignore
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ManagedOpenAiEmbedding(pydantic.BaseModel):
|
|
20
|
+
model_name: typing.Optional[typing_extensions.Literal["openai-text-embedding-3-small"]]
|
|
21
|
+
embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
|
|
22
|
+
num_workers: typing.Optional[int]
|
|
23
|
+
class_name: typing.Optional[str]
|
|
24
|
+
|
|
25
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
27
|
+
return super().json(**kwargs_with_defaults)
|
|
28
|
+
|
|
29
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
30
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
31
|
+
return super().dict(**kwargs_with_defaults)
|
|
32
|
+
|
|
33
|
+
class Config:
|
|
34
|
+
frozen = True
|
|
35
|
+
smart_union = True
|
|
36
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .managed_open_ai_embedding import ManagedOpenAiEmbedding
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ManagedOpenAiEmbeddingConfig(pydantic.BaseModel):
|
|
19
|
+
component: typing.Optional[ManagedOpenAiEmbedding] = pydantic.Field(
|
|
20
|
+
description="Configuration for the Managed OpenAI embedding model."
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
24
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
25
|
+
return super().json(**kwargs_with_defaults)
|
|
26
|
+
|
|
27
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
29
|
+
return super().dict(**kwargs_with_defaults)
|
|
30
|
+
|
|
31
|
+
class Config:
|
|
32
|
+
frozen = True
|
|
33
|
+
smart_union = True
|
|
34
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class MultimodalParseResolution(str, enum.Enum):
|
|
10
|
+
MEDIUM = "medium"
|
|
11
|
+
HIGH = "high"
|
|
12
|
+
|
|
13
|
+
def visit(self, medium: typing.Callable[[], T_Result], high: typing.Callable[[], T_Result]) -> T_Result:
|
|
14
|
+
if self is MultimodalParseResolution.MEDIUM:
|
|
15
|
+
return medium()
|
|
16
|
+
if self is MultimodalParseResolution.HIGH:
|
|
17
|
+
return high()
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .quota_configuration import QuotaConfiguration
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class PaginatedResponseQuotaConfiguration(pydantic.BaseModel):
|
|
19
|
+
total: int
|
|
20
|
+
page: int
|
|
21
|
+
size: int
|
|
22
|
+
pages: int
|
|
23
|
+
items: typing.List[QuotaConfiguration]
|
|
24
|
+
|
|
25
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
27
|
+
return super().json(**kwargs_with_defaults)
|
|
28
|
+
|
|
29
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
30
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
31
|
+
return super().dict(**kwargs_with_defaults)
|
|
32
|
+
|
|
33
|
+
class Config:
|
|
34
|
+
frozen = True
|
|
35
|
+
smart_union = True
|
|
36
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -8,6 +8,7 @@ from .fail_page_mode import FailPageMode
|
|
|
8
8
|
from .parse_job_config_priority import ParseJobConfigPriority
|
|
9
9
|
from .parser_languages import ParserLanguages
|
|
10
10
|
from .parsing_mode import ParsingMode
|
|
11
|
+
from .webhook_configuration import WebhookConfiguration
|
|
11
12
|
|
|
12
13
|
try:
|
|
13
14
|
import pydantic
|
|
@@ -23,6 +24,7 @@ class ParseJobConfig(pydantic.BaseModel):
|
|
|
23
24
|
Configuration for llamaparse job
|
|
24
25
|
"""
|
|
25
26
|
|
|
27
|
+
webhook_configurations: typing.Optional[typing.List[WebhookConfiguration]]
|
|
26
28
|
priority: typing.Optional[ParseJobConfigPriority]
|
|
27
29
|
custom_metadata: typing.Optional[typing.Dict[str, typing.Any]]
|
|
28
30
|
resource_info: typing.Optional[typing.Dict[str, typing.Any]]
|
|
@@ -42,6 +44,7 @@ class ParseJobConfig(pydantic.BaseModel):
|
|
|
42
44
|
fast_mode: typing.Optional[bool]
|
|
43
45
|
skip_diagonal_text: typing.Optional[bool]
|
|
44
46
|
preserve_layout_alignment_across_pages: typing.Optional[bool]
|
|
47
|
+
preserve_very_small_text: typing.Optional[bool]
|
|
45
48
|
gpt_4_o_mode: typing.Optional[bool] = pydantic.Field(alias="gpt4o_mode")
|
|
46
49
|
gpt_4_o_api_key: typing.Optional[str] = pydantic.Field(alias="gpt4o_api_key")
|
|
47
50
|
do_not_unroll_columns: typing.Optional[bool]
|
|
@@ -5,6 +5,7 @@ import typing
|
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
7
|
from .configurable_data_source_names import ConfigurableDataSourceNames
|
|
8
|
+
from .data_source_reader_version_metadata import DataSourceReaderVersionMetadata
|
|
8
9
|
from .pipeline_data_source_component import PipelineDataSourceComponent
|
|
9
10
|
from .pipeline_data_source_custom_metadata_value import PipelineDataSourceCustomMetadataValue
|
|
10
11
|
from .pipeline_data_source_status import PipelineDataSourceStatus
|
|
@@ -30,7 +31,7 @@ class PipelineDataSource(pydantic.BaseModel):
|
|
|
30
31
|
source_type: ConfigurableDataSourceNames
|
|
31
32
|
custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineDataSourceCustomMetadataValue]]]
|
|
32
33
|
component: PipelineDataSourceComponent = pydantic.Field(description="Component that implements the data source")
|
|
33
|
-
version_metadata: typing.Optional[
|
|
34
|
+
version_metadata: typing.Optional[DataSourceReaderVersionMetadata]
|
|
34
35
|
project_id: str
|
|
35
36
|
data_source_id: str = pydantic.Field(description="The ID of the data source.")
|
|
36
37
|
pipeline_id: str = pydantic.Field(description="The ID of the pipeline.")
|
|
@@ -11,6 +11,7 @@ from .bedrock_embedding_config import BedrockEmbeddingConfig
|
|
|
11
11
|
from .cohere_embedding_config import CohereEmbeddingConfig
|
|
12
12
|
from .gemini_embedding_config import GeminiEmbeddingConfig
|
|
13
13
|
from .hugging_face_inference_api_embedding_config import HuggingFaceInferenceApiEmbeddingConfig
|
|
14
|
+
from .managed_open_ai_embedding_config import ManagedOpenAiEmbeddingConfig
|
|
14
15
|
from .open_ai_embedding_config import OpenAiEmbeddingConfig
|
|
15
16
|
from .vertex_ai_embedding_config import VertexAiEmbeddingConfig
|
|
16
17
|
|
|
@@ -60,6 +61,15 @@ class PipelineEmbeddingConfig_HuggingfaceApiEmbedding(HuggingFaceInferenceApiEmb
|
|
|
60
61
|
allow_population_by_field_name = True
|
|
61
62
|
|
|
62
63
|
|
|
64
|
+
class PipelineEmbeddingConfig_ManagedOpenaiEmbedding(ManagedOpenAiEmbeddingConfig):
|
|
65
|
+
type: typing_extensions.Literal["MANAGED_OPENAI_EMBEDDING"]
|
|
66
|
+
|
|
67
|
+
class Config:
|
|
68
|
+
frozen = True
|
|
69
|
+
smart_union = True
|
|
70
|
+
allow_population_by_field_name = True
|
|
71
|
+
|
|
72
|
+
|
|
63
73
|
class PipelineEmbeddingConfig_OpenaiEmbedding(OpenAiEmbeddingConfig):
|
|
64
74
|
type: typing_extensions.Literal["OPENAI_EMBEDDING"]
|
|
65
75
|
|
|
@@ -84,6 +94,7 @@ PipelineEmbeddingConfig = typing.Union[
|
|
|
84
94
|
PipelineEmbeddingConfig_CohereEmbedding,
|
|
85
95
|
PipelineEmbeddingConfig_GeminiEmbedding,
|
|
86
96
|
PipelineEmbeddingConfig_HuggingfaceApiEmbedding,
|
|
97
|
+
PipelineEmbeddingConfig_ManagedOpenaiEmbedding,
|
|
87
98
|
PipelineEmbeddingConfig_OpenaiEmbedding,
|
|
88
99
|
PipelineEmbeddingConfig_VertexaiEmbedding,
|
|
89
100
|
]
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
import typing_extensions
|
|
7
|
+
|
|
8
|
+
from ..core.datetime_utils import serialize_datetime
|
|
9
|
+
from .quota_configuration_configuration_type import QuotaConfigurationConfigurationType
|
|
10
|
+
from .quota_configuration_status import QuotaConfigurationStatus
|
|
11
|
+
from .quota_rate_limit_configuration_value import QuotaRateLimitConfigurationValue
|
|
12
|
+
|
|
13
|
+
try:
|
|
14
|
+
import pydantic
|
|
15
|
+
if pydantic.__version__.startswith("1."):
|
|
16
|
+
raise ImportError
|
|
17
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
18
|
+
except ImportError:
|
|
19
|
+
import pydantic # type: ignore
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class QuotaConfiguration(pydantic.BaseModel):
|
|
23
|
+
"""
|
|
24
|
+
Full quota configuration model.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
source_type: typing_extensions.Literal["organization"]
|
|
28
|
+
source_id: str = pydantic.Field(description="The source ID, e.g. the organization ID")
|
|
29
|
+
configuration_type: QuotaConfigurationConfigurationType = pydantic.Field(description="The quota configuration type")
|
|
30
|
+
configuration_value: QuotaRateLimitConfigurationValue = pydantic.Field(description="The quota configuration value")
|
|
31
|
+
configuration_metadata: typing.Optional[typing.Dict[str, typing.Any]]
|
|
32
|
+
started_at: typing.Optional[dt.datetime] = pydantic.Field(description="The start date of the quota")
|
|
33
|
+
ended_at: typing.Optional[dt.datetime]
|
|
34
|
+
idempotency_key: typing.Optional[str]
|
|
35
|
+
status: QuotaConfigurationStatus = pydantic.Field(
|
|
36
|
+
description="The status of the quota, i.e. 'ACTIVE' or 'INACTIVE'"
|
|
37
|
+
)
|
|
38
|
+
id: typing.Optional[str]
|
|
39
|
+
created_at: typing.Optional[dt.datetime]
|
|
40
|
+
updated_at: typing.Optional[dt.datetime]
|
|
41
|
+
|
|
42
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
43
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
44
|
+
return super().json(**kwargs_with_defaults)
|
|
45
|
+
|
|
46
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
47
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
48
|
+
return super().dict(**kwargs_with_defaults)
|
|
49
|
+
|
|
50
|
+
class Config:
|
|
51
|
+
frozen = True
|
|
52
|
+
smart_union = True
|
|
53
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class QuotaConfigurationConfigurationType(str, enum.Enum):
|
|
10
|
+
"""
|
|
11
|
+
The quota configuration type
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
RATE_LIMIT_PARSE_CONCURRENT_PREMIUM = "rate_limit_parse_concurrent_premium"
|
|
15
|
+
RATE_LIMIT_PARSE_CONCURRENT_DEFAULT = "rate_limit_parse_concurrent_default"
|
|
16
|
+
RATE_LIMIT_CONCURRENT_JOBS_IN_EXECUTION_DEFAULT = "rate_limit_concurrent_jobs_in_execution_default"
|
|
17
|
+
RATE_LIMIT_CONCURRENT_JOBS_IN_EXECUTION_DOC_INGEST = "rate_limit_concurrent_jobs_in_execution_doc_ingest"
|
|
18
|
+
|
|
19
|
+
def visit(
|
|
20
|
+
self,
|
|
21
|
+
rate_limit_parse_concurrent_premium: typing.Callable[[], T_Result],
|
|
22
|
+
rate_limit_parse_concurrent_default: typing.Callable[[], T_Result],
|
|
23
|
+
rate_limit_concurrent_jobs_in_execution_default: typing.Callable[[], T_Result],
|
|
24
|
+
rate_limit_concurrent_jobs_in_execution_doc_ingest: typing.Callable[[], T_Result],
|
|
25
|
+
) -> T_Result:
|
|
26
|
+
if self is QuotaConfigurationConfigurationType.RATE_LIMIT_PARSE_CONCURRENT_PREMIUM:
|
|
27
|
+
return rate_limit_parse_concurrent_premium()
|
|
28
|
+
if self is QuotaConfigurationConfigurationType.RATE_LIMIT_PARSE_CONCURRENT_DEFAULT:
|
|
29
|
+
return rate_limit_parse_concurrent_default()
|
|
30
|
+
if self is QuotaConfigurationConfigurationType.RATE_LIMIT_CONCURRENT_JOBS_IN_EXECUTION_DEFAULT:
|
|
31
|
+
return rate_limit_concurrent_jobs_in_execution_default()
|
|
32
|
+
if self is QuotaConfigurationConfigurationType.RATE_LIMIT_CONCURRENT_JOBS_IN_EXECUTION_DOC_INGEST:
|
|
33
|
+
return rate_limit_concurrent_jobs_in_execution_doc_ingest()
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class QuotaConfigurationStatus(str, enum.Enum):
|
|
10
|
+
"""
|
|
11
|
+
The status of the quota, i.e. 'ACTIVE' or 'INACTIVE'
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
ACTIVE = "ACTIVE"
|
|
15
|
+
INACTIVE = "INACTIVE"
|
|
16
|
+
|
|
17
|
+
def visit(self, active: typing.Callable[[], T_Result], inactive: typing.Callable[[], T_Result]) -> T_Result:
|
|
18
|
+
if self is QuotaConfigurationStatus.ACTIVE:
|
|
19
|
+
return active()
|
|
20
|
+
if self is QuotaConfigurationStatus.INACTIVE:
|
|
21
|
+
return inactive()
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .quota_rate_limit_configuration_value_denominator_units import QuotaRateLimitConfigurationValueDenominatorUnits
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class QuotaRateLimitConfigurationValue(pydantic.BaseModel):
|
|
19
|
+
"""
|
|
20
|
+
Quota-specific wrapper for default rate limit configuration.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
numerator: int = pydantic.Field(description="The rate numerator")
|
|
24
|
+
denominator: typing.Optional[int]
|
|
25
|
+
denominator_units: typing.Optional[QuotaRateLimitConfigurationValueDenominatorUnits]
|
|
26
|
+
|
|
27
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
29
|
+
return super().json(**kwargs_with_defaults)
|
|
30
|
+
|
|
31
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
32
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
33
|
+
return super().dict(**kwargs_with_defaults)
|
|
34
|
+
|
|
35
|
+
class Config:
|
|
36
|
+
frozen = True
|
|
37
|
+
smart_union = True
|
|
38
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class QuotaRateLimitConfigurationValueDenominatorUnits(str, enum.Enum):
|
|
10
|
+
SECOND = "second"
|
|
11
|
+
MINUTE = "minute"
|
|
12
|
+
HOUR = "hour"
|
|
13
|
+
DAY = "day"
|
|
14
|
+
|
|
15
|
+
def visit(
|
|
16
|
+
self,
|
|
17
|
+
second: typing.Callable[[], T_Result],
|
|
18
|
+
minute: typing.Callable[[], T_Result],
|
|
19
|
+
hour: typing.Callable[[], T_Result],
|
|
20
|
+
day: typing.Callable[[], T_Result],
|
|
21
|
+
) -> T_Result:
|
|
22
|
+
if self is QuotaRateLimitConfigurationValueDenominatorUnits.SECOND:
|
|
23
|
+
return second()
|
|
24
|
+
if self is QuotaRateLimitConfigurationValueDenominatorUnits.MINUTE:
|
|
25
|
+
return minute()
|
|
26
|
+
if self is QuotaRateLimitConfigurationValueDenominatorUnits.HOUR:
|
|
27
|
+
return hour()
|
|
28
|
+
if self is QuotaRateLimitConfigurationValueDenominatorUnits.DAY:
|
|
29
|
+
return day()
|
|
@@ -33,6 +33,9 @@ class StructParseConf(pydantic.BaseModel):
|
|
|
33
33
|
struct_mode: typing.Optional[StructMode] = pydantic.Field(
|
|
34
34
|
description="The struct mode to use for the structured parsing."
|
|
35
35
|
)
|
|
36
|
+
fetch_logprobs: typing.Optional[bool] = pydantic.Field(
|
|
37
|
+
description="Whether to fetch logprobs for the structured parsing."
|
|
38
|
+
)
|
|
36
39
|
handle_missing: typing.Optional[bool] = pydantic.Field(
|
|
37
40
|
description="Whether to handle missing fields in the schema."
|
|
38
41
|
)
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .user_summary import UserSummary
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class UpdateUserResponse(pydantic.BaseModel):
|
|
19
|
+
user: UserSummary = pydantic.Field(description="The user of the response")
|
|
20
|
+
message: str = pydantic.Field(description="The message of the response")
|
|
21
|
+
|
|
22
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
23
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
24
|
+
return super().json(**kwargs_with_defaults)
|
|
25
|
+
|
|
26
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
27
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
28
|
+
return super().dict(**kwargs_with_defaults)
|
|
29
|
+
|
|
30
|
+
class Config:
|
|
31
|
+
frozen = True
|
|
32
|
+
smart_union = True
|
|
33
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -11,6 +11,7 @@ class UsageResponseActiveAlertsItem(str, enum.Enum):
|
|
|
11
11
|
PLAN_SPEND_LIMIT_SOFT_ALERT = "plan_spend_limit_soft_alert"
|
|
12
12
|
CONFIGURED_SPEND_LIMIT_EXCEEDED = "configured_spend_limit_exceeded"
|
|
13
13
|
FREE_CREDITS_EXHAUSTED = "free_credits_exhausted"
|
|
14
|
+
INTERNAL_SPENDING_ALERT = "internal_spending_alert"
|
|
14
15
|
|
|
15
16
|
def visit(
|
|
16
17
|
self,
|
|
@@ -18,6 +19,7 @@ class UsageResponseActiveAlertsItem(str, enum.Enum):
|
|
|
18
19
|
plan_spend_limit_soft_alert: typing.Callable[[], T_Result],
|
|
19
20
|
configured_spend_limit_exceeded: typing.Callable[[], T_Result],
|
|
20
21
|
free_credits_exhausted: typing.Callable[[], T_Result],
|
|
22
|
+
internal_spending_alert: typing.Callable[[], T_Result],
|
|
21
23
|
) -> T_Result:
|
|
22
24
|
if self is UsageResponseActiveAlertsItem.PLAN_SPEND_LIMIT_EXCEEDED:
|
|
23
25
|
return plan_spend_limit_exceeded()
|
|
@@ -27,3 +29,5 @@ class UsageResponseActiveAlertsItem(str, enum.Enum):
|
|
|
27
29
|
return configured_spend_limit_exceeded()
|
|
28
30
|
if self is UsageResponseActiveAlertsItem.FREE_CREDITS_EXHAUSTED:
|
|
29
31
|
return free_credits_exhausted()
|
|
32
|
+
if self is UsageResponseActiveAlertsItem.INTERNAL_SPENDING_ALERT:
|
|
33
|
+
return internal_spending_alert()
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class UserSummary(pydantic.BaseModel):
|
|
18
|
+
id: str = pydantic.Field(description="User's unique identifier")
|
|
19
|
+
email: str = pydantic.Field(description="User's email address")
|
|
20
|
+
password_hash: typing.Optional[str]
|
|
21
|
+
first_name: typing.Optional[str]
|
|
22
|
+
last_name: typing.Optional[str]
|
|
23
|
+
last_login: typing.Optional[dt.datetime]
|
|
24
|
+
created_at: dt.datetime = pydantic.Field(description="When the user was created")
|
|
25
|
+
updated_at: dt.datetime = pydantic.Field(description="When the user was last updated")
|
|
26
|
+
|
|
27
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
29
|
+
return super().json(**kwargs_with_defaults)
|
|
30
|
+
|
|
31
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
32
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
33
|
+
return super().dict(**kwargs_with_defaults)
|
|
34
|
+
|
|
35
|
+
class Config:
|
|
36
|
+
frozen = True
|
|
37
|
+
smart_union = True
|
|
38
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -12,6 +12,11 @@ class WebhookConfigurationWebhookEventsItem(str, enum.Enum):
|
|
|
12
12
|
EXTRACT_ERROR = "extract.error"
|
|
13
13
|
EXTRACT_PARTIAL_SUCCESS = "extract.partial_success"
|
|
14
14
|
EXTRACT_CANCELLED = "extract.cancelled"
|
|
15
|
+
PARSE_PENDING = "parse.pending"
|
|
16
|
+
PARSE_SUCCESS = "parse.success"
|
|
17
|
+
PARSE_ERROR = "parse.error"
|
|
18
|
+
PARSE_PARTIAL_SUCCESS = "parse.partial_success"
|
|
19
|
+
PARSE_CANCELLED = "parse.cancelled"
|
|
15
20
|
UNMAPPED_EVENT = "unmapped_event"
|
|
16
21
|
|
|
17
22
|
def visit(
|
|
@@ -21,6 +26,11 @@ class WebhookConfigurationWebhookEventsItem(str, enum.Enum):
|
|
|
21
26
|
extract_error: typing.Callable[[], T_Result],
|
|
22
27
|
extract_partial_success: typing.Callable[[], T_Result],
|
|
23
28
|
extract_cancelled: typing.Callable[[], T_Result],
|
|
29
|
+
parse_pending: typing.Callable[[], T_Result],
|
|
30
|
+
parse_success: typing.Callable[[], T_Result],
|
|
31
|
+
parse_error: typing.Callable[[], T_Result],
|
|
32
|
+
parse_partial_success: typing.Callable[[], T_Result],
|
|
33
|
+
parse_cancelled: typing.Callable[[], T_Result],
|
|
24
34
|
unmapped_event: typing.Callable[[], T_Result],
|
|
25
35
|
) -> T_Result:
|
|
26
36
|
if self is WebhookConfigurationWebhookEventsItem.EXTRACT_PENDING:
|
|
@@ -33,5 +43,15 @@ class WebhookConfigurationWebhookEventsItem(str, enum.Enum):
|
|
|
33
43
|
return extract_partial_success()
|
|
34
44
|
if self is WebhookConfigurationWebhookEventsItem.EXTRACT_CANCELLED:
|
|
35
45
|
return extract_cancelled()
|
|
46
|
+
if self is WebhookConfigurationWebhookEventsItem.PARSE_PENDING:
|
|
47
|
+
return parse_pending()
|
|
48
|
+
if self is WebhookConfigurationWebhookEventsItem.PARSE_SUCCESS:
|
|
49
|
+
return parse_success()
|
|
50
|
+
if self is WebhookConfigurationWebhookEventsItem.PARSE_ERROR:
|
|
51
|
+
return parse_error()
|
|
52
|
+
if self is WebhookConfigurationWebhookEventsItem.PARSE_PARTIAL_SUCCESS:
|
|
53
|
+
return parse_partial_success()
|
|
54
|
+
if self is WebhookConfigurationWebhookEventsItem.PARSE_CANCELLED:
|
|
55
|
+
return parse_cancelled()
|
|
36
56
|
if self is WebhookConfigurationWebhookEventsItem.UNMAPPED_EVENT:
|
|
37
57
|
return unmapped_event()
|