llama-cloud 0.1.34__py3-none-any.whl → 0.1.35__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (41) hide show
  1. llama_cloud/__init__.py +34 -0
  2. llama_cloud/client.py +3 -0
  3. llama_cloud/resources/__init__.py +6 -0
  4. llama_cloud/resources/beta/client.py +211 -8
  5. llama_cloud/resources/files/client.py +226 -0
  6. llama_cloud/resources/llama_extract/__init__.py +4 -0
  7. llama_cloud/resources/llama_extract/client.py +179 -0
  8. llama_cloud/resources/llama_extract/types/__init__.py +4 -0
  9. llama_cloud/resources/llama_extract/types/extract_stateless_request_data_schema.py +9 -0
  10. llama_cloud/resources/llama_extract/types/extract_stateless_request_data_schema_zero_value.py +7 -0
  11. llama_cloud/resources/parsing/client.py +24 -0
  12. llama_cloud/resources/users/__init__.py +2 -0
  13. llama_cloud/resources/users/client.py +155 -0
  14. llama_cloud/types/__init__.py +28 -0
  15. llama_cloud/types/data_source_reader_version_metadata.py +2 -1
  16. llama_cloud/types/data_source_reader_version_metadata_reader_version.py +17 -0
  17. llama_cloud/types/extract_agent.py +3 -0
  18. llama_cloud/types/extract_config.py +4 -0
  19. llama_cloud/types/file_data.py +36 -0
  20. llama_cloud/types/legacy_parse_job_config.py +3 -0
  21. llama_cloud/types/llama_extract_settings.py +4 -0
  22. llama_cloud/types/llama_parse_parameters.py +3 -0
  23. llama_cloud/types/managed_open_ai_embedding.py +36 -0
  24. llama_cloud/types/managed_open_ai_embedding_config.py +34 -0
  25. llama_cloud/types/multimodal_parse_resolution.py +17 -0
  26. llama_cloud/types/paginated_response_quota_configuration.py +36 -0
  27. llama_cloud/types/parse_job_config.py +3 -0
  28. llama_cloud/types/pipeline_embedding_config.py +11 -0
  29. llama_cloud/types/quota_configuration.py +53 -0
  30. llama_cloud/types/quota_configuration_configuration_type.py +33 -0
  31. llama_cloud/types/quota_configuration_status.py +21 -0
  32. llama_cloud/types/quota_rate_limit_configuration_value.py +38 -0
  33. llama_cloud/types/quota_rate_limit_configuration_value_denominator_units.py +29 -0
  34. llama_cloud/types/update_user_response.py +33 -0
  35. llama_cloud/types/usage_response_active_alerts_item.py +4 -0
  36. llama_cloud/types/user_summary.py +38 -0
  37. llama_cloud/types/webhook_configuration_webhook_events_item.py +20 -0
  38. {llama_cloud-0.1.34.dist-info → llama_cloud-0.1.35.dist-info}/METADATA +1 -1
  39. {llama_cloud-0.1.34.dist-info → llama_cloud-0.1.35.dist-info}/RECORD +41 -24
  40. {llama_cloud-0.1.34.dist-info → llama_cloud-0.1.35.dist-info}/LICENSE +0 -0
  41. {llama_cloud-0.1.34.dist-info → llama_cloud-0.1.35.dist-info}/WHEEL +0 -0
@@ -0,0 +1,155 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ import urllib.parse
5
+ from json.decoder import JSONDecodeError
6
+
7
+ from ...core.api_error import ApiError
8
+ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
9
+ from ...core.jsonable_encoder import jsonable_encoder
10
+ from ...errors.unprocessable_entity_error import UnprocessableEntityError
11
+ from ...types.http_validation_error import HttpValidationError
12
+ from ...types.update_user_response import UpdateUserResponse
13
+
14
+ try:
15
+ import pydantic
16
+ if pydantic.__version__.startswith("1."):
17
+ raise ImportError
18
+ import pydantic.v1 as pydantic # type: ignore
19
+ except ImportError:
20
+ import pydantic # type: ignore
21
+
22
+ # this is used as the default value for optional parameters
23
+ OMIT = typing.cast(typing.Any, ...)
24
+
25
+
26
+ class UsersClient:
27
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
28
+ self._client_wrapper = client_wrapper
29
+
30
+ def update_user(
31
+ self,
32
+ user_id: str,
33
+ *,
34
+ first_name: typing.Optional[str] = OMIT,
35
+ last_name: typing.Optional[str] = OMIT,
36
+ email: typing.Optional[str] = OMIT,
37
+ current_password: typing.Optional[str] = OMIT,
38
+ new_password: typing.Optional[str] = OMIT,
39
+ ) -> UpdateUserResponse:
40
+ """
41
+ Parameters:
42
+ - user_id: str.
43
+
44
+ - first_name: typing.Optional[str].
45
+
46
+ - last_name: typing.Optional[str].
47
+
48
+ - email: typing.Optional[str].
49
+
50
+ - current_password: typing.Optional[str].
51
+
52
+ - new_password: typing.Optional[str].
53
+ ---
54
+ from llama_cloud.client import LlamaCloud
55
+
56
+ client = LlamaCloud(
57
+ token="YOUR_TOKEN",
58
+ )
59
+ client.users.update_user(
60
+ user_id="string",
61
+ )
62
+ """
63
+ _request: typing.Dict[str, typing.Any] = {}
64
+ if first_name is not OMIT:
65
+ _request["first_name"] = first_name
66
+ if last_name is not OMIT:
67
+ _request["last_name"] = last_name
68
+ if email is not OMIT:
69
+ _request["email"] = email
70
+ if current_password is not OMIT:
71
+ _request["current_password"] = current_password
72
+ if new_password is not OMIT:
73
+ _request["new_password"] = new_password
74
+ _response = self._client_wrapper.httpx_client.request(
75
+ "PUT",
76
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/users/{user_id}"),
77
+ json=jsonable_encoder(_request),
78
+ headers=self._client_wrapper.get_headers(),
79
+ timeout=60,
80
+ )
81
+ if 200 <= _response.status_code < 300:
82
+ return pydantic.parse_obj_as(UpdateUserResponse, _response.json()) # type: ignore
83
+ if _response.status_code == 422:
84
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
85
+ try:
86
+ _response_json = _response.json()
87
+ except JSONDecodeError:
88
+ raise ApiError(status_code=_response.status_code, body=_response.text)
89
+ raise ApiError(status_code=_response.status_code, body=_response_json)
90
+
91
+
92
+ class AsyncUsersClient:
93
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
94
+ self._client_wrapper = client_wrapper
95
+
96
+ async def update_user(
97
+ self,
98
+ user_id: str,
99
+ *,
100
+ first_name: typing.Optional[str] = OMIT,
101
+ last_name: typing.Optional[str] = OMIT,
102
+ email: typing.Optional[str] = OMIT,
103
+ current_password: typing.Optional[str] = OMIT,
104
+ new_password: typing.Optional[str] = OMIT,
105
+ ) -> UpdateUserResponse:
106
+ """
107
+ Parameters:
108
+ - user_id: str.
109
+
110
+ - first_name: typing.Optional[str].
111
+
112
+ - last_name: typing.Optional[str].
113
+
114
+ - email: typing.Optional[str].
115
+
116
+ - current_password: typing.Optional[str].
117
+
118
+ - new_password: typing.Optional[str].
119
+ ---
120
+ from llama_cloud.client import AsyncLlamaCloud
121
+
122
+ client = AsyncLlamaCloud(
123
+ token="YOUR_TOKEN",
124
+ )
125
+ await client.users.update_user(
126
+ user_id="string",
127
+ )
128
+ """
129
+ _request: typing.Dict[str, typing.Any] = {}
130
+ if first_name is not OMIT:
131
+ _request["first_name"] = first_name
132
+ if last_name is not OMIT:
133
+ _request["last_name"] = last_name
134
+ if email is not OMIT:
135
+ _request["email"] = email
136
+ if current_password is not OMIT:
137
+ _request["current_password"] = current_password
138
+ if new_password is not OMIT:
139
+ _request["new_password"] = new_password
140
+ _response = await self._client_wrapper.httpx_client.request(
141
+ "PUT",
142
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/users/{user_id}"),
143
+ json=jsonable_encoder(_request),
144
+ headers=self._client_wrapper.get_headers(),
145
+ timeout=60,
146
+ )
147
+ if 200 <= _response.status_code < 300:
148
+ return pydantic.parse_obj_as(UpdateUserResponse, _response.json()) # type: ignore
149
+ if _response.status_code == 422:
150
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
151
+ try:
152
+ _response_json = _response.json()
153
+ except JSONDecodeError:
154
+ raise ApiError(status_code=_response.status_code, body=_response.text)
155
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -79,6 +79,7 @@ from .data_source_create_component import DataSourceCreateComponent
79
79
  from .data_source_create_custom_metadata_value import DataSourceCreateCustomMetadataValue
80
80
  from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
81
81
  from .data_source_reader_version_metadata import DataSourceReaderVersionMetadata
82
+ from .data_source_reader_version_metadata_reader_version import DataSourceReaderVersionMetadataReaderVersion
82
83
  from .data_source_update_dispatcher_config import DataSourceUpdateDispatcherConfig
83
84
  from .delete_params import DeleteParams
84
85
  from .document_block import DocumentBlock
@@ -140,6 +141,7 @@ from .extract_target import ExtractTarget
140
141
  from .fail_page_mode import FailPageMode
141
142
  from .file import File
142
143
  from .file_count_by_status_response import FileCountByStatusResponse
144
+ from .file_data import FileData
143
145
  from .file_id_presigned_url import FileIdPresignedUrl
144
146
  from .file_parse_public import FileParsePublic
145
147
  from .file_permission_info_value import FilePermissionInfoValue
@@ -199,12 +201,15 @@ from .llm_parameters import LlmParameters
199
201
  from .load_files_job_config import LoadFilesJobConfig
200
202
  from .managed_ingestion_status import ManagedIngestionStatus
201
203
  from .managed_ingestion_status_response import ManagedIngestionStatusResponse
204
+ from .managed_open_ai_embedding import ManagedOpenAiEmbedding
205
+ from .managed_open_ai_embedding_config import ManagedOpenAiEmbeddingConfig
202
206
  from .message_annotation import MessageAnnotation
203
207
  from .message_role import MessageRole
204
208
  from .metadata_filter import MetadataFilter
205
209
  from .metadata_filter_value import MetadataFilterValue
206
210
  from .metadata_filters import MetadataFilters
207
211
  from .metadata_filters_filters_item import MetadataFiltersFiltersItem
212
+ from .multimodal_parse_resolution import MultimodalParseResolution
208
213
  from .node_relationship import NodeRelationship
209
214
  from .none_chunking_config import NoneChunkingConfig
210
215
  from .none_segmentation_config import NoneSegmentationConfig
@@ -225,6 +230,7 @@ from .paginated_list_pipeline_files_response import PaginatedListPipelineFilesRe
225
230
  from .paginated_report_response import PaginatedReportResponse
226
231
  from .paginated_response_agent_data import PaginatedResponseAgentData
227
232
  from .paginated_response_aggregate_group import PaginatedResponseAggregateGroup
233
+ from .paginated_response_quota_configuration import PaginatedResponseQuotaConfiguration
228
234
  from .parse_job_config import ParseJobConfig
229
235
  from .parse_job_config_priority import ParseJobConfigPriority
230
236
  from .parse_plan_level import ParsePlanLevel
@@ -268,6 +274,7 @@ from .pipeline_embedding_config import (
268
274
  PipelineEmbeddingConfig_CohereEmbedding,
269
275
  PipelineEmbeddingConfig_GeminiEmbedding,
270
276
  PipelineEmbeddingConfig_HuggingfaceApiEmbedding,
277
+ PipelineEmbeddingConfig_ManagedOpenaiEmbedding,
271
278
  PipelineEmbeddingConfig_OpenaiEmbedding,
272
279
  PipelineEmbeddingConfig_VertexaiEmbedding,
273
280
  )
@@ -304,6 +311,11 @@ from .progress_event_status import ProgressEventStatus
304
311
  from .project import Project
305
312
  from .project_create import ProjectCreate
306
313
  from .prompt_conf import PromptConf
314
+ from .quota_configuration import QuotaConfiguration
315
+ from .quota_configuration_configuration_type import QuotaConfigurationConfigurationType
316
+ from .quota_configuration_status import QuotaConfigurationStatus
317
+ from .quota_rate_limit_configuration_value import QuotaRateLimitConfigurationValue
318
+ from .quota_rate_limit_configuration_value_denominator_units import QuotaRateLimitConfigurationValueDenominatorUnits
307
319
  from .re_rank_config import ReRankConfig
308
320
  from .re_ranker_type import ReRankerType
309
321
  from .recurring_credit_grant import RecurringCreditGrant
@@ -349,6 +361,7 @@ from .text_node import TextNode
349
361
  from .text_node_relationships_value import TextNodeRelationshipsValue
350
362
  from .text_node_with_score import TextNodeWithScore
351
363
  from .token_chunking_config import TokenChunkingConfig
364
+ from .update_user_response import UpdateUserResponse
352
365
  from .usage_and_plan import UsageAndPlan
353
366
  from .usage_metric_response import UsageMetricResponse
354
367
  from .usage_response import UsageResponse
@@ -358,6 +371,7 @@ from .user_organization import UserOrganization
358
371
  from .user_organization_create import UserOrganizationCreate
359
372
  from .user_organization_delete import UserOrganizationDelete
360
373
  from .user_organization_role import UserOrganizationRole
374
+ from .user_summary import UserSummary
361
375
  from .validation_error import ValidationError
362
376
  from .validation_error_loc_item import ValidationErrorLocItem
363
377
  from .vertex_ai_embedding_config import VertexAiEmbeddingConfig
@@ -442,6 +456,7 @@ __all__ = [
442
456
  "DataSourceCreateCustomMetadataValue",
443
457
  "DataSourceCustomMetadataValue",
444
458
  "DataSourceReaderVersionMetadata",
459
+ "DataSourceReaderVersionMetadataReaderVersion",
445
460
  "DataSourceUpdateDispatcherConfig",
446
461
  "DeleteParams",
447
462
  "DocumentBlock",
@@ -499,6 +514,7 @@ __all__ = [
499
514
  "FailPageMode",
500
515
  "File",
501
516
  "FileCountByStatusResponse",
517
+ "FileData",
502
518
  "FileIdPresignedUrl",
503
519
  "FileParsePublic",
504
520
  "FilePermissionInfoValue",
@@ -554,12 +570,15 @@ __all__ = [
554
570
  "LoadFilesJobConfig",
555
571
  "ManagedIngestionStatus",
556
572
  "ManagedIngestionStatusResponse",
573
+ "ManagedOpenAiEmbedding",
574
+ "ManagedOpenAiEmbeddingConfig",
557
575
  "MessageAnnotation",
558
576
  "MessageRole",
559
577
  "MetadataFilter",
560
578
  "MetadataFilterValue",
561
579
  "MetadataFilters",
562
580
  "MetadataFiltersFiltersItem",
581
+ "MultimodalParseResolution",
563
582
  "NodeRelationship",
564
583
  "NoneChunkingConfig",
565
584
  "NoneSegmentationConfig",
@@ -580,6 +599,7 @@ __all__ = [
580
599
  "PaginatedReportResponse",
581
600
  "PaginatedResponseAgentData",
582
601
  "PaginatedResponseAggregateGroup",
602
+ "PaginatedResponseQuotaConfiguration",
583
603
  "ParseJobConfig",
584
604
  "ParseJobConfigPriority",
585
605
  "ParsePlanLevel",
@@ -620,6 +640,7 @@ __all__ = [
620
640
  "PipelineEmbeddingConfig_CohereEmbedding",
621
641
  "PipelineEmbeddingConfig_GeminiEmbedding",
622
642
  "PipelineEmbeddingConfig_HuggingfaceApiEmbedding",
643
+ "PipelineEmbeddingConfig_ManagedOpenaiEmbedding",
623
644
  "PipelineEmbeddingConfig_OpenaiEmbedding",
624
645
  "PipelineEmbeddingConfig_VertexaiEmbedding",
625
646
  "PipelineFile",
@@ -651,6 +672,11 @@ __all__ = [
651
672
  "Project",
652
673
  "ProjectCreate",
653
674
  "PromptConf",
675
+ "QuotaConfiguration",
676
+ "QuotaConfigurationConfigurationType",
677
+ "QuotaConfigurationStatus",
678
+ "QuotaRateLimitConfigurationValue",
679
+ "QuotaRateLimitConfigurationValueDenominatorUnits",
654
680
  "ReRankConfig",
655
681
  "ReRankerType",
656
682
  "RecurringCreditGrant",
@@ -694,6 +720,7 @@ __all__ = [
694
720
  "TextNodeRelationshipsValue",
695
721
  "TextNodeWithScore",
696
722
  "TokenChunkingConfig",
723
+ "UpdateUserResponse",
697
724
  "UsageAndPlan",
698
725
  "UsageMetricResponse",
699
726
  "UsageResponse",
@@ -703,6 +730,7 @@ __all__ = [
703
730
  "UserOrganizationCreate",
704
731
  "UserOrganizationDelete",
705
732
  "UserOrganizationRole",
733
+ "UserSummary",
706
734
  "ValidationError",
707
735
  "ValidationErrorLocItem",
708
736
  "VertexAiEmbeddingConfig",
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .data_source_reader_version_metadata_reader_version import DataSourceReaderVersionMetadataReaderVersion
7
8
 
8
9
  try:
9
10
  import pydantic
@@ -15,7 +16,7 @@ except ImportError:
15
16
 
16
17
 
17
18
  class DataSourceReaderVersionMetadata(pydantic.BaseModel):
18
- reader_version: typing.Optional[str]
19
+ reader_version: typing.Optional[DataSourceReaderVersionMetadataReaderVersion]
19
20
 
20
21
  def json(self, **kwargs: typing.Any) -> str:
21
22
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,17 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class DataSourceReaderVersionMetadataReaderVersion(str, enum.Enum):
10
+ ONE_0 = "1.0"
11
+ TWO_0 = "2.0"
12
+
13
+ def visit(self, one_0: typing.Callable[[], T_Result], two_0: typing.Callable[[], T_Result]) -> T_Result:
14
+ if self is DataSourceReaderVersionMetadataReaderVersion.ONE_0:
15
+ return one_0()
16
+ if self is DataSourceReaderVersionMetadataReaderVersion.TWO_0:
17
+ return two_0()
@@ -3,6 +3,8 @@
3
3
  import datetime as dt
4
4
  import typing
5
5
 
6
+ import typing_extensions
7
+
6
8
  from ..core.datetime_utils import serialize_datetime
7
9
  from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
8
10
  from .extract_config import ExtractConfig
@@ -28,6 +30,7 @@ class ExtractAgent(pydantic.BaseModel):
28
30
  description="The schema of the data."
29
31
  )
30
32
  config: ExtractConfig = pydantic.Field(description="The configuration parameters for the extraction agent.")
33
+ custom_configuration: typing.Optional[typing_extensions.Literal["default"]]
31
34
  created_at: typing.Optional[dt.datetime]
32
35
  updated_at: typing.Optional[dt.datetime]
33
36
 
@@ -38,9 +38,13 @@ class ExtractConfig(pydantic.BaseModel):
38
38
  chunk_mode: typing.Optional[DocumentChunkMode] = pydantic.Field(
39
39
  description="The mode to use for chunking the document."
40
40
  )
41
+ high_resolution_mode: typing.Optional[bool] = pydantic.Field(
42
+ description="Whether to use high resolution mode for the extraction."
43
+ )
41
44
  invalidate_cache: typing.Optional[bool] = pydantic.Field(
42
45
  description="Whether to invalidate the cache for the extraction."
43
46
  )
47
+ page_range: typing.Optional[str]
44
48
 
45
49
  def json(self, **kwargs: typing.Any) -> str:
46
50
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class FileData(pydantic.BaseModel):
18
+ """
19
+ Schema for file data with base64 content and MIME type.
20
+ """
21
+
22
+ data: str = pydantic.Field(description="The file content as base64-encoded string")
23
+ mime_type: str = pydantic.Field(description="The MIME type of the file (e.g., 'application/pdf', 'text/plain')")
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -43,6 +43,9 @@ class LegacyParseJobConfig(pydantic.BaseModel):
43
43
  preserve_layout_alignment_across_pages: typing.Optional[bool] = pydantic.Field(
44
44
  alias="preserveLayoutAlignmentAcrossPages", description="Whether to preserve layout alignment across pages."
45
45
  )
46
+ preserve_very_small_text: typing.Optional[bool] = pydantic.Field(
47
+ alias="preserveVerySmallText", description="Whether to preserve very small text lines."
48
+ )
46
49
  invalidate_cache: bool = pydantic.Field(alias="invalidateCache", description="Whether to invalidate the cache.")
47
50
  output_pdf_of_document: typing.Optional[bool] = pydantic.Field(alias="outputPDFOfDocument")
48
51
  outlined_table_extraction: typing.Optional[bool] = pydantic.Field(alias="outlinedTableExtraction")
@@ -6,6 +6,7 @@ import typing
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .chunk_mode import ChunkMode
8
8
  from .llama_parse_parameters import LlamaParseParameters
9
+ from .multimodal_parse_resolution import MultimodalParseResolution
9
10
  from .struct_parse_conf import StructParseConf
10
11
 
11
12
  try:
@@ -48,6 +49,9 @@ class LlamaExtractSettings(pydantic.BaseModel):
48
49
  llama_parse_params: typing.Optional[LlamaParseParameters] = pydantic.Field(
49
50
  description="LlamaParse related settings."
50
51
  )
52
+ multimodal_parse_resolution: typing.Optional[MultimodalParseResolution] = pydantic.Field(
53
+ description="The resolution to use for multimodal parsing."
54
+ )
51
55
 
52
56
  def json(self, **kwargs: typing.Any) -> str:
53
57
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -8,6 +8,7 @@ from .fail_page_mode import FailPageMode
8
8
  from .llama_parse_parameters_priority import LlamaParseParametersPriority
9
9
  from .parser_languages import ParserLanguages
10
10
  from .parsing_mode import ParsingMode
11
+ from .webhook_configuration import WebhookConfiguration
11
12
 
12
13
  try:
13
14
  import pydantic
@@ -23,6 +24,7 @@ class LlamaParseParameters(pydantic.BaseModel):
23
24
  Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline.
24
25
  """
25
26
 
27
+ webhook_configurations: typing.Optional[typing.List[WebhookConfiguration]]
26
28
  priority: typing.Optional[LlamaParseParametersPriority]
27
29
  languages: typing.Optional[typing.List[ParserLanguages]]
28
30
  parsing_instruction: typing.Optional[str]
@@ -40,6 +42,7 @@ class LlamaParseParameters(pydantic.BaseModel):
40
42
  fast_mode: typing.Optional[bool]
41
43
  skip_diagonal_text: typing.Optional[bool]
42
44
  preserve_layout_alignment_across_pages: typing.Optional[bool]
45
+ preserve_very_small_text: typing.Optional[bool]
43
46
  gpt_4_o_mode: typing.Optional[bool] = pydantic.Field(alias="gpt4o_mode")
44
47
  gpt_4_o_api_key: typing.Optional[str] = pydantic.Field(alias="gpt4o_api_key")
45
48
  do_not_unroll_columns: typing.Optional[bool]
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ import typing_extensions
7
+
8
+ from ..core.datetime_utils import serialize_datetime
9
+
10
+ try:
11
+ import pydantic
12
+ if pydantic.__version__.startswith("1."):
13
+ raise ImportError
14
+ import pydantic.v1 as pydantic # type: ignore
15
+ except ImportError:
16
+ import pydantic # type: ignore
17
+
18
+
19
+ class ManagedOpenAiEmbedding(pydantic.BaseModel):
20
+ model_name: typing.Optional[typing_extensions.Literal["openai-text-embedding-3-small"]]
21
+ embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
22
+ num_workers: typing.Optional[int]
23
+ class_name: typing.Optional[str]
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,34 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .managed_open_ai_embedding import ManagedOpenAiEmbedding
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class ManagedOpenAiEmbeddingConfig(pydantic.BaseModel):
19
+ component: typing.Optional[ManagedOpenAiEmbedding] = pydantic.Field(
20
+ description="Configuration for the Managed OpenAI embedding model."
21
+ )
22
+
23
+ def json(self, **kwargs: typing.Any) -> str:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().json(**kwargs_with_defaults)
26
+
27
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().dict(**kwargs_with_defaults)
30
+
31
+ class Config:
32
+ frozen = True
33
+ smart_union = True
34
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,17 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class MultimodalParseResolution(str, enum.Enum):
10
+ MEDIUM = "medium"
11
+ HIGH = "high"
12
+
13
+ def visit(self, medium: typing.Callable[[], T_Result], high: typing.Callable[[], T_Result]) -> T_Result:
14
+ if self is MultimodalParseResolution.MEDIUM:
15
+ return medium()
16
+ if self is MultimodalParseResolution.HIGH:
17
+ return high()
@@ -0,0 +1,36 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .quota_configuration import QuotaConfiguration
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class PaginatedResponseQuotaConfiguration(pydantic.BaseModel):
19
+ total: int
20
+ page: int
21
+ size: int
22
+ pages: int
23
+ items: typing.List[QuotaConfiguration]
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -8,6 +8,7 @@ from .fail_page_mode import FailPageMode
8
8
  from .parse_job_config_priority import ParseJobConfigPriority
9
9
  from .parser_languages import ParserLanguages
10
10
  from .parsing_mode import ParsingMode
11
+ from .webhook_configuration import WebhookConfiguration
11
12
 
12
13
  try:
13
14
  import pydantic
@@ -23,6 +24,7 @@ class ParseJobConfig(pydantic.BaseModel):
23
24
  Configuration for llamaparse job
24
25
  """
25
26
 
27
+ webhook_configurations: typing.Optional[typing.List[WebhookConfiguration]]
26
28
  priority: typing.Optional[ParseJobConfigPriority]
27
29
  custom_metadata: typing.Optional[typing.Dict[str, typing.Any]]
28
30
  resource_info: typing.Optional[typing.Dict[str, typing.Any]]
@@ -42,6 +44,7 @@ class ParseJobConfig(pydantic.BaseModel):
42
44
  fast_mode: typing.Optional[bool]
43
45
  skip_diagonal_text: typing.Optional[bool]
44
46
  preserve_layout_alignment_across_pages: typing.Optional[bool]
47
+ preserve_very_small_text: typing.Optional[bool]
45
48
  gpt_4_o_mode: typing.Optional[bool] = pydantic.Field(alias="gpt4o_mode")
46
49
  gpt_4_o_api_key: typing.Optional[str] = pydantic.Field(alias="gpt4o_api_key")
47
50
  do_not_unroll_columns: typing.Optional[bool]
@@ -11,6 +11,7 @@ from .bedrock_embedding_config import BedrockEmbeddingConfig
11
11
  from .cohere_embedding_config import CohereEmbeddingConfig
12
12
  from .gemini_embedding_config import GeminiEmbeddingConfig
13
13
  from .hugging_face_inference_api_embedding_config import HuggingFaceInferenceApiEmbeddingConfig
14
+ from .managed_open_ai_embedding_config import ManagedOpenAiEmbeddingConfig
14
15
  from .open_ai_embedding_config import OpenAiEmbeddingConfig
15
16
  from .vertex_ai_embedding_config import VertexAiEmbeddingConfig
16
17
 
@@ -60,6 +61,15 @@ class PipelineEmbeddingConfig_HuggingfaceApiEmbedding(HuggingFaceInferenceApiEmb
60
61
  allow_population_by_field_name = True
61
62
 
62
63
 
64
+ class PipelineEmbeddingConfig_ManagedOpenaiEmbedding(ManagedOpenAiEmbeddingConfig):
65
+ type: typing_extensions.Literal["MANAGED_OPENAI_EMBEDDING"]
66
+
67
+ class Config:
68
+ frozen = True
69
+ smart_union = True
70
+ allow_population_by_field_name = True
71
+
72
+
63
73
  class PipelineEmbeddingConfig_OpenaiEmbedding(OpenAiEmbeddingConfig):
64
74
  type: typing_extensions.Literal["OPENAI_EMBEDDING"]
65
75
 
@@ -84,6 +94,7 @@ PipelineEmbeddingConfig = typing.Union[
84
94
  PipelineEmbeddingConfig_CohereEmbedding,
85
95
  PipelineEmbeddingConfig_GeminiEmbedding,
86
96
  PipelineEmbeddingConfig_HuggingfaceApiEmbedding,
97
+ PipelineEmbeddingConfig_ManagedOpenaiEmbedding,
87
98
  PipelineEmbeddingConfig_OpenaiEmbedding,
88
99
  PipelineEmbeddingConfig_VertexaiEmbedding,
89
100
  ]