llama-cloud 0.1.42__py3-none-any.whl → 0.1.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (29) hide show
  1. llama_cloud/__init__.py +13 -19
  2. llama_cloud/resources/__init__.py +6 -0
  3. llama_cloud/resources/beta/client.py +555 -0
  4. llama_cloud/resources/jobs/client.py +0 -8
  5. llama_cloud/resources/llama_extract/__init__.py +6 -0
  6. llama_cloud/resources/llama_extract/client.py +825 -941
  7. llama_cloud/resources/llama_extract/types/__init__.py +6 -0
  8. llama_cloud/types/__init__.py +10 -22
  9. llama_cloud/types/{prompt_conf.py → delete_response.py} +6 -9
  10. llama_cloud/types/extract_config.py +1 -0
  11. llama_cloud/types/extract_models.py +4 -0
  12. llama_cloud/types/{extract_job_create.py → extracted_table.py} +8 -14
  13. llama_cloud/types/paginated_response_spreadsheet_job.py +34 -0
  14. llama_cloud/types/public_model_name.py +4 -0
  15. llama_cloud/types/spreadsheet_job.py +50 -0
  16. llama_cloud/types/spreadsheet_parsing_config.py +35 -0
  17. {llama_cloud-0.1.42.dist-info → llama_cloud-0.1.43.dist-info}/METADATA +1 -1
  18. {llama_cloud-0.1.42.dist-info → llama_cloud-0.1.43.dist-info}/RECORD +23 -26
  19. llama_cloud/types/chunk_mode.py +0 -29
  20. llama_cloud/types/llama_extract_settings.py +0 -67
  21. llama_cloud/types/multimodal_parse_resolution.py +0 -17
  22. llama_cloud/types/schema_relax_mode.py +0 -25
  23. llama_cloud/types/struct_mode.py +0 -33
  24. llama_cloud/types/struct_parse_conf.py +0 -63
  25. /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_data_schema_override.py +0 -0
  26. /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_data_schema_override_zero_value.py +0 -0
  27. /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_priority.py +0 -0
  28. {llama_cloud-0.1.42.dist-info → llama_cloud-0.1.43.dist-info}/LICENSE +0 -0
  29. {llama_cloud-0.1.42.dist-info → llama_cloud-0.1.43.dist-info}/WHEEL +0 -0
@@ -6,6 +6,9 @@ from .extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
6
6
  from .extract_agent_update_data_schema_zero_value import ExtractAgentUpdateDataSchemaZeroValue
7
7
  from .extract_job_create_batch_data_schema_override import ExtractJobCreateBatchDataSchemaOverride
8
8
  from .extract_job_create_batch_data_schema_override_zero_value import ExtractJobCreateBatchDataSchemaOverrideZeroValue
9
+ from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
10
+ from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
11
+ from .extract_job_create_priority import ExtractJobCreatePriority
9
12
  from .extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
10
13
  from .extract_schema_validate_request_data_schema_zero_value import ExtractSchemaValidateRequestDataSchemaZeroValue
11
14
  from .extract_stateless_request_data_schema import ExtractStatelessRequestDataSchema
@@ -18,6 +21,9 @@ __all__ = [
18
21
  "ExtractAgentUpdateDataSchemaZeroValue",
19
22
  "ExtractJobCreateBatchDataSchemaOverride",
20
23
  "ExtractJobCreateBatchDataSchemaOverrideZeroValue",
24
+ "ExtractJobCreateDataSchemaOverride",
25
+ "ExtractJobCreateDataSchemaOverrideZeroValue",
26
+ "ExtractJobCreatePriority",
21
27
  "ExtractSchemaValidateRequestDataSchema",
22
28
  "ExtractSchemaValidateRequestDataSchemaZeroValue",
23
29
  "ExtractStatelessRequestDataSchema",
@@ -42,7 +42,6 @@ from .chat_app import ChatApp
42
42
  from .chat_app_response import ChatAppResponse
43
43
  from .chat_data import ChatData
44
44
  from .chat_message import ChatMessage
45
- from .chunk_mode import ChunkMode
46
45
  from .classification_result import ClassificationResult
47
46
  from .classifier_rule import ClassifierRule
48
47
  from .classify_job import ClassifyJob
@@ -91,6 +90,7 @@ from .data_source_reader_version_metadata import DataSourceReaderVersionMetadata
91
90
  from .data_source_reader_version_metadata_reader_version import DataSourceReaderVersionMetadataReaderVersion
92
91
  from .data_source_update_dispatcher_config import DataSourceUpdateDispatcherConfig
93
92
  from .delete_params import DeleteParams
93
+ from .delete_response import DeleteResponse
94
94
  from .document_chunk_mode import DocumentChunkMode
95
95
  from .document_ingestion_job_params import DocumentIngestionJobParams
96
96
  from .element_segmentation_config import ElementSegmentationConfig
@@ -122,10 +122,6 @@ from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
122
122
  from .extract_config import ExtractConfig
123
123
  from .extract_config_priority import ExtractConfigPriority
124
124
  from .extract_job import ExtractJob
125
- from .extract_job_create import ExtractJobCreate
126
- from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
127
- from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
128
- from .extract_job_create_priority import ExtractJobCreatePriority
129
125
  from .extract_mode import ExtractMode
130
126
  from .extract_models import ExtractModels
131
127
  from .extract_resultset import ExtractResultset
@@ -145,6 +141,7 @@ from .extract_schema_validate_response import ExtractSchemaValidateResponse
145
141
  from .extract_schema_validate_response_data_schema_value import ExtractSchemaValidateResponseDataSchemaValue
146
142
  from .extract_state import ExtractState
147
143
  from .extract_target import ExtractTarget
144
+ from .extracted_table import ExtractedTable
148
145
  from .fail_page_mode import FailPageMode
149
146
  from .failure_handling_config import FailureHandlingConfig
150
147
  from .file import File
@@ -202,7 +199,6 @@ from .license_info_response import LicenseInfoResponse
202
199
  from .llama_extract_feature_availability import LlamaExtractFeatureAvailability
203
200
  from .llama_extract_mode_availability import LlamaExtractModeAvailability
204
201
  from .llama_extract_mode_availability_status import LlamaExtractModeAvailabilityStatus
205
- from .llama_extract_settings import LlamaExtractSettings
206
202
  from .llama_parse_parameters import LlamaParseParameters
207
203
  from .llama_parse_parameters_priority import LlamaParseParametersPriority
208
204
  from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
@@ -219,7 +215,6 @@ from .metadata_filter import MetadataFilter
219
215
  from .metadata_filter_value import MetadataFilterValue
220
216
  from .metadata_filters import MetadataFilters
221
217
  from .metadata_filters_filters_item import MetadataFiltersFiltersItem
222
- from .multimodal_parse_resolution import MultimodalParseResolution
223
218
  from .node_relationship import NodeRelationship
224
219
  from .none_chunking_config import NoneChunkingConfig
225
220
  from .none_segmentation_config import NoneSegmentationConfig
@@ -241,6 +236,7 @@ from .paginated_response_agent_data import PaginatedResponseAgentData
241
236
  from .paginated_response_aggregate_group import PaginatedResponseAggregateGroup
242
237
  from .paginated_response_classify_job import PaginatedResponseClassifyJob
243
238
  from .paginated_response_quota_configuration import PaginatedResponseQuotaConfiguration
239
+ from .paginated_response_spreadsheet_job import PaginatedResponseSpreadsheetJob
244
240
  from .parse_configuration import ParseConfiguration
245
241
  from .parse_configuration_create import ParseConfigurationCreate
246
242
  from .parse_configuration_filter import ParseConfigurationFilter
@@ -322,7 +318,6 @@ from .preset_retrieval_params_search_filters_inference_schema_value import (
322
318
  from .presigned_url import PresignedUrl
323
319
  from .project import Project
324
320
  from .project_create import ProjectCreate
325
- from .prompt_conf import PromptConf
326
321
  from .public_model_name import PublicModelName
327
322
  from .quota_configuration import QuotaConfiguration
328
323
  from .quota_configuration_configuration_type import QuotaConfigurationConfigurationType
@@ -342,14 +337,13 @@ from .retriever_pipeline import RetrieverPipeline
342
337
  from .role import Role
343
338
  from .schema_generation_availability import SchemaGenerationAvailability
344
339
  from .schema_generation_availability_status import SchemaGenerationAvailabilityStatus
345
- from .schema_relax_mode import SchemaRelaxMode
346
340
  from .semantic_chunking_config import SemanticChunkingConfig
347
341
  from .sentence_chunking_config import SentenceChunkingConfig
348
342
  from .sparse_model_config import SparseModelConfig
349
343
  from .sparse_model_type import SparseModelType
344
+ from .spreadsheet_job import SpreadsheetJob
345
+ from .spreadsheet_parsing_config import SpreadsheetParsingConfig
350
346
  from .status_enum import StatusEnum
351
- from .struct_mode import StructMode
352
- from .struct_parse_conf import StructParseConf
353
347
  from .supported_llm_model import SupportedLlmModel
354
348
  from .supported_llm_model_names import SupportedLlmModelNames
355
349
  from .text_node import TextNode
@@ -414,7 +408,6 @@ __all__ = [
414
408
  "ChatAppResponse",
415
409
  "ChatData",
416
410
  "ChatMessage",
417
- "ChunkMode",
418
411
  "ClassificationResult",
419
412
  "ClassifierRule",
420
413
  "ClassifyJob",
@@ -463,6 +456,7 @@ __all__ = [
463
456
  "DataSourceReaderVersionMetadataReaderVersion",
464
457
  "DataSourceUpdateDispatcherConfig",
465
458
  "DeleteParams",
459
+ "DeleteResponse",
466
460
  "DocumentChunkMode",
467
461
  "DocumentIngestionJobParams",
468
462
  "ElementSegmentationConfig",
@@ -490,10 +484,6 @@ __all__ = [
490
484
  "ExtractConfig",
491
485
  "ExtractConfigPriority",
492
486
  "ExtractJob",
493
- "ExtractJobCreate",
494
- "ExtractJobCreateDataSchemaOverride",
495
- "ExtractJobCreateDataSchemaOverrideZeroValue",
496
- "ExtractJobCreatePriority",
497
487
  "ExtractMode",
498
488
  "ExtractModels",
499
489
  "ExtractResultset",
@@ -513,6 +503,7 @@ __all__ = [
513
503
  "ExtractSchemaValidateResponseDataSchemaValue",
514
504
  "ExtractState",
515
505
  "ExtractTarget",
506
+ "ExtractedTable",
516
507
  "FailPageMode",
517
508
  "FailureHandlingConfig",
518
509
  "File",
@@ -568,7 +559,6 @@ __all__ = [
568
559
  "LlamaExtractFeatureAvailability",
569
560
  "LlamaExtractModeAvailability",
570
561
  "LlamaExtractModeAvailabilityStatus",
571
- "LlamaExtractSettings",
572
562
  "LlamaParseParameters",
573
563
  "LlamaParseParametersPriority",
574
564
  "LlamaParseSupportedFileExtensions",
@@ -585,7 +575,6 @@ __all__ = [
585
575
  "MetadataFilterValue",
586
576
  "MetadataFilters",
587
577
  "MetadataFiltersFiltersItem",
588
- "MultimodalParseResolution",
589
578
  "NodeRelationship",
590
579
  "NoneChunkingConfig",
591
580
  "NoneSegmentationConfig",
@@ -607,6 +596,7 @@ __all__ = [
607
596
  "PaginatedResponseAggregateGroup",
608
597
  "PaginatedResponseClassifyJob",
609
598
  "PaginatedResponseQuotaConfiguration",
599
+ "PaginatedResponseSpreadsheetJob",
610
600
  "ParseConfiguration",
611
601
  "ParseConfigurationCreate",
612
602
  "ParseConfigurationFilter",
@@ -680,7 +670,6 @@ __all__ = [
680
670
  "PresignedUrl",
681
671
  "Project",
682
672
  "ProjectCreate",
683
- "PromptConf",
684
673
  "PublicModelName",
685
674
  "QuotaConfiguration",
686
675
  "QuotaConfigurationConfigurationType",
@@ -700,14 +689,13 @@ __all__ = [
700
689
  "Role",
701
690
  "SchemaGenerationAvailability",
702
691
  "SchemaGenerationAvailabilityStatus",
703
- "SchemaRelaxMode",
704
692
  "SemanticChunkingConfig",
705
693
  "SentenceChunkingConfig",
706
694
  "SparseModelConfig",
707
695
  "SparseModelType",
696
+ "SpreadsheetJob",
697
+ "SpreadsheetParsingConfig",
708
698
  "StatusEnum",
709
- "StructMode",
710
- "StructParseConf",
711
699
  "SupportedLlmModel",
712
700
  "SupportedLlmModelNames",
713
701
  "TextNode",
@@ -14,15 +14,12 @@ except ImportError:
14
14
  import pydantic # type: ignore
15
15
 
16
16
 
17
- class PromptConf(pydantic.BaseModel):
18
- system_prompt: typing.Optional[str] = pydantic.Field(description="The system prompt to use for the extraction.")
19
- extraction_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for the extraction.")
20
- error_handling_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for error handling.")
21
- reasoning_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for reasoning.")
22
- cite_sources_prompt: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
23
- description="The prompt to use for citing sources."
24
- )
25
- scratchpad_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for scratchpad.")
17
+ class DeleteResponse(pydantic.BaseModel):
18
+ """
19
+ API response for bulk delete operation
20
+ """
21
+
22
+ deleted_count: int
26
23
 
27
24
  def json(self, **kwargs: typing.Any) -> str:
28
25
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -50,6 +50,7 @@ class ExtractConfig(pydantic.BaseModel):
50
50
  invalidate_cache: typing.Optional[bool] = pydantic.Field(
51
51
  description="Whether to invalidate the cache for the extraction."
52
52
  )
53
+ num_pages_context: typing.Optional[int]
53
54
  page_range: typing.Optional[str]
54
55
 
55
56
  def json(self, **kwargs: typing.Any) -> str:
@@ -14,6 +14,7 @@ class ExtractModels(str, enum.Enum):
14
14
  OPENAI_GPT_5_MINI = "openai-gpt-5-mini"
15
15
  GEMINI_20_FLASH = "gemini-2.0-flash"
16
16
  GEMINI_25_FLASH = "gemini-2.5-flash"
17
+ GEMINI_25_FLASH_LITE = "gemini-2.5-flash-lite"
17
18
  GEMINI_25_PRO = "gemini-2.5-pro"
18
19
  OPENAI_GPT_4_O = "openai-gpt-4o"
19
20
  OPENAI_GPT_4_O_MINI = "openai-gpt-4o-mini"
@@ -27,6 +28,7 @@ class ExtractModels(str, enum.Enum):
27
28
  openai_gpt_5_mini: typing.Callable[[], T_Result],
28
29
  gemini_20_flash: typing.Callable[[], T_Result],
29
30
  gemini_25_flash: typing.Callable[[], T_Result],
31
+ gemini_25_flash_lite: typing.Callable[[], T_Result],
30
32
  gemini_25_pro: typing.Callable[[], T_Result],
31
33
  openai_gpt_4_o: typing.Callable[[], T_Result],
32
34
  openai_gpt_4_o_mini: typing.Callable[[], T_Result],
@@ -45,6 +47,8 @@ class ExtractModels(str, enum.Enum):
45
47
  return gemini_20_flash()
46
48
  if self is ExtractModels.GEMINI_25_FLASH:
47
49
  return gemini_25_flash()
50
+ if self is ExtractModels.GEMINI_25_FLASH_LITE:
51
+ return gemini_25_flash_lite()
48
52
  if self is ExtractModels.GEMINI_25_PRO:
49
53
  return gemini_25_pro()
50
54
  if self is ExtractModels.OPENAI_GPT_4_O:
@@ -4,10 +4,6 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .extract_config import ExtractConfig
8
- from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
9
- from .extract_job_create_priority import ExtractJobCreatePriority
10
- from .webhook_configuration import WebhookConfiguration
11
7
 
12
8
  try:
13
9
  import pydantic
@@ -18,19 +14,17 @@ except ImportError:
18
14
  import pydantic # type: ignore
19
15
 
20
16
 
21
- class ExtractJobCreate(pydantic.BaseModel):
17
+ class ExtractedTable(pydantic.BaseModel):
22
18
  """
23
- Schema for creating an extraction job.
19
+ A single extracted table from a spreadsheet
24
20
  """
25
21
 
26
- priority: typing.Optional[ExtractJobCreatePriority]
27
- webhook_configurations: typing.Optional[typing.List[WebhookConfiguration]]
28
- extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
29
- file_id: str = pydantic.Field(description="The id of the file")
30
- data_schema_override: typing.Optional[ExtractJobCreateDataSchemaOverride] = pydantic.Field(
31
- description="The data schema to override the extraction agent's data schema with"
32
- )
33
- config_override: typing.Optional[ExtractConfig]
22
+ table_id: int = pydantic.Field(description="Unique identifier for this table within the file")
23
+ sheet_name: str = pydantic.Field(description="Worksheet name where table was found")
24
+ row_span: int = pydantic.Field(description="Number of rows in the table")
25
+ col_span: int = pydantic.Field(description="Number of columns in the table")
26
+ has_headers: bool = pydantic.Field(description="Whether the table has header rows")
27
+ metadata_json: typing.Optional[str]
34
28
 
35
29
  def json(self, **kwargs: typing.Any) -> str:
36
30
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,34 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .spreadsheet_job import SpreadsheetJob
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class PaginatedResponseSpreadsheetJob(pydantic.BaseModel):
19
+ items: typing.List[SpreadsheetJob] = pydantic.Field(description="The list of items.")
20
+ next_page_token: typing.Optional[str]
21
+ total_size: typing.Optional[int]
22
+
23
+ def json(self, **kwargs: typing.Any) -> str:
24
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
25
+ return super().json(**kwargs_with_defaults)
26
+
27
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
28
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
29
+ return super().dict(**kwargs_with_defaults)
30
+
31
+ class Config:
32
+ frozen = True
33
+ smart_union = True
34
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -26,6 +26,7 @@ class PublicModelName(str, enum.Enum):
26
26
  GEMINI_25_PRO = "gemini-2.5-pro"
27
27
  GEMINI_20_FLASH = "gemini-2.0-flash"
28
28
  GEMINI_20_FLASH_LITE = "gemini-2.0-flash-lite"
29
+ GEMINI_25_FLASH_LITE = "gemini-2.5-flash-lite"
29
30
  GEMINI_15_FLASH = "gemini-1.5-flash"
30
31
  GEMINI_15_PRO = "gemini-1.5-pro"
31
32
 
@@ -50,6 +51,7 @@ class PublicModelName(str, enum.Enum):
50
51
  gemini_25_pro: typing.Callable[[], T_Result],
51
52
  gemini_20_flash: typing.Callable[[], T_Result],
52
53
  gemini_20_flash_lite: typing.Callable[[], T_Result],
54
+ gemini_25_flash_lite: typing.Callable[[], T_Result],
53
55
  gemini_15_flash: typing.Callable[[], T_Result],
54
56
  gemini_15_pro: typing.Callable[[], T_Result],
55
57
  ) -> T_Result:
@@ -91,6 +93,8 @@ class PublicModelName(str, enum.Enum):
91
93
  return gemini_20_flash()
92
94
  if self is PublicModelName.GEMINI_20_FLASH_LITE:
93
95
  return gemini_20_flash_lite()
96
+ if self is PublicModelName.GEMINI_25_FLASH_LITE:
97
+ return gemini_25_flash_lite()
94
98
  if self is PublicModelName.GEMINI_15_FLASH:
95
99
  return gemini_15_flash()
96
100
  if self is PublicModelName.GEMINI_15_PRO:
@@ -0,0 +1,50 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extracted_table import ExtractedTable
8
+ from .spreadsheet_parsing_config import SpreadsheetParsingConfig
9
+ from .status_enum import StatusEnum
10
+
11
+ try:
12
+ import pydantic
13
+ if pydantic.__version__.startswith("1."):
14
+ raise ImportError
15
+ import pydantic.v1 as pydantic # type: ignore
16
+ except ImportError:
17
+ import pydantic # type: ignore
18
+
19
+
20
+ class SpreadsheetJob(pydantic.BaseModel):
21
+ """
22
+ A spreadsheet parsing job
23
+ """
24
+
25
+ id: str = pydantic.Field(description="The ID of the job")
26
+ user_id: str = pydantic.Field(description="The ID of the user")
27
+ project_id: str = pydantic.Field(description="The ID of the project")
28
+ file_id: str = pydantic.Field(description="The ID of the file to parse")
29
+ config: SpreadsheetParsingConfig = pydantic.Field(description="Configuration for the parsing job")
30
+ status: StatusEnum = pydantic.Field(description="The status of the parsing job")
31
+ created_at: str = pydantic.Field(description="When the job was created")
32
+ updated_at: str = pydantic.Field(description="When the job was last updated")
33
+ success: typing.Optional[bool]
34
+ tables: typing.Optional[typing.List[ExtractedTable]] = pydantic.Field(
35
+ description="All extracted tables (populated when job is complete)"
36
+ )
37
+ errors: typing.Optional[typing.List[str]] = pydantic.Field(description="Any errors encountered")
38
+
39
+ def json(self, **kwargs: typing.Any) -> str:
40
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
+ return super().json(**kwargs_with_defaults)
42
+
43
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
44
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
45
+ return super().dict(**kwargs_with_defaults)
46
+
47
+ class Config:
48
+ frozen = True
49
+ smart_union = True
50
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class SpreadsheetParsingConfig(pydantic.BaseModel):
18
+ """
19
+ Configuration for spreadsheet parsing
20
+ """
21
+
22
+ sheet_names: typing.Optional[typing.List[str]]
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llama-cloud
3
- Version: 0.1.42
3
+ Version: 0.1.43
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Logan Markewich
@@ -1,4 +1,4 @@
1
- llama_cloud/__init__.py,sha256=D5E3XpRgWPP3FeZdrZPfOI0JmxcEwisHSvWp5NjL9Lw,26441
1
+ llama_cloud/__init__.py,sha256=LReIMfDh7RbmpAj40gB63X3h8DPB7puSXbRB_xSUGSA,26353
2
2
  llama_cloud/client.py,sha256=GDYFdv8HLjksP7v9Srg2s0R1k_nouz2toh27EG3y110,6385
3
3
  llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
4
4
  llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
@@ -9,7 +9,7 @@ llama_cloud/core/remove_none_from_dict.py,sha256=8m91FC3YuVem0Gm9_sXhJ2tGvP33owJ
9
9
  llama_cloud/environment.py,sha256=feTjOebeFZMrBdnHat4RE5aHlpt-sJm4NhK4ntV1htI,167
10
10
  llama_cloud/errors/__init__.py,sha256=pbbVUFtB9LCocA1RMWMMF_RKjsy5YkOKX5BAuE49w6g,170
11
11
  llama_cloud/errors/unprocessable_entity_error.py,sha256=FvR7XPlV3Xx5nu8HNlmLhBRdk4so_gCHjYT5PyZe6sM,313
12
- llama_cloud/resources/__init__.py,sha256=j5itg2tAAkpAKMu5oLAl9I9a79D9BMUYYHTvc1nHZfM,4147
12
+ llama_cloud/resources/__init__.py,sha256=OIkUubxiMa60QIlM0k1oXjIVG8OiuD2YlkfGdW26ykM,4391
13
13
  llama_cloud/resources/admin/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
14
14
  llama_cloud/resources/admin/client.py,sha256=iJClMzp6OQ_TOnAwgcPSb0BkEuuFeIq0r15lDmWUD0s,8502
15
15
  llama_cloud/resources/agent_deployments/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
@@ -17,7 +17,7 @@ llama_cloud/resources/agent_deployments/client.py,sha256=3EOzOjmRs4KISgJ566enq3F
17
17
  llama_cloud/resources/alpha/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
18
18
  llama_cloud/resources/alpha/client.py,sha256=OAnzukNHIWRgpuG4gTT7-dVFCogOwXc-NxpH3DULXuA,3801
19
19
  llama_cloud/resources/beta/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
20
- llama_cloud/resources/beta/client.py,sha256=9h0KmR_3yhjtoM3Ai0SeUXI_--xB7KaW1AnsD9aCnYA,116295
20
+ llama_cloud/resources/beta/client.py,sha256=UqWCfbM8PqayLb6tqVcJEBRKM_fPYdn2eUN0gZPqAoI,138776
21
21
  llama_cloud/resources/chat_apps/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
22
22
  llama_cloud/resources/chat_apps/client.py,sha256=orSI8rpQbUwVEToolEeiEi5Qe--suXFvfu6D9JDii5I,23595
23
23
  llama_cloud/resources/classifier/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
@@ -42,16 +42,19 @@ llama_cloud/resources/files/client.py,sha256=Crd0IR0cV5fld4jUGAHE8VsIbw7vCYrOIyB
42
42
  llama_cloud/resources/files/types/__init__.py,sha256=ZZuDQsYsxmQ9VwpfN7oqftzGRnFTR2EMYdCa7zARo4g,204
43
43
  llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py,sha256=Wc8wFgujOO5pZvbbh2TMMzpa37GKZd14GYNJ9bdq7BE,214
44
44
  llama_cloud/resources/jobs/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
45
- llama_cloud/resources/jobs/client.py,sha256=b2R_Oj2OCtcv-IIJNz9aq42hDgrOk_huqTSJhTB9VaA,6202
46
- llama_cloud/resources/llama_extract/__init__.py,sha256=V6VZ8hQXwAuvOOZyk43nnbINoDQqEr03AjKQPhYKluk,997
47
- llama_cloud/resources/llama_extract/client.py,sha256=gk57ynYMMNlR0n_57w2MoGXLt_IfAQ5tJIvqFh0GYcM,85612
48
- llama_cloud/resources/llama_extract/types/__init__.py,sha256=2Iu4w5LXZY2Govr1RzahIfY0b84y658SQjMDtj7rH_0,1497
45
+ llama_cloud/resources/jobs/client.py,sha256=uwrnDYScD45LK0tWdIGmcHYl9HEdHtK7XlwKckgVu08,5586
46
+ llama_cloud/resources/llama_extract/__init__.py,sha256=s-2VtIbgemm9LsW8ShuZalU-7yVSEynZHokDd1-10GE,1241
47
+ llama_cloud/resources/llama_extract/client.py,sha256=T7ItJxO0n3osTE3v8OXVRGygw6n2o87b0aJiX0rqkyk,81374
48
+ llama_cloud/resources/llama_extract/types/__init__.py,sha256=G-VjtCYdHuEqjMNhMgx8mbjy1g-vorghwmpaGObRHCc,1884
49
49
  llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py,sha256=zB31hJQ8hKaIsPkfTWiX5hqsPVFMyyeWEDZ_Aq237jo,305
50
50
  llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_zero_value.py,sha256=xoyXH3f0Y5beMWBxmtXSz6QoB_df_-0QBsYdjBhZnGw,217
51
51
  llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema.py,sha256=argR5gPRUYWY6ADCMKRdg-8NM-rsBM91_TEn8NKqVy8,305
52
52
  llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_zero_value.py,sha256=Nvd892EFhg-PzlqoFp5i2owL7hCZ2SsuL7U4Tk9NeRI,217
53
53
  llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_override.py,sha256=GykJ1BBecRtWYD3ZPi1YINqrr-me_pyr2w_4Ei4QOZQ,351
54
54
  llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_override_zero_value.py,sha256=7zXOgTYUwVAeyYeqWvX69m-7mhvK0V9cBRvgqVSd0X0,228
55
+ llama_cloud/resources/llama_extract/types/extract_job_create_data_schema_override.py,sha256=vuiJ2lGJjbXEnvFKzVnKyvgwhMXPg1Pb5GZne2DrB60,330
56
+ llama_cloud/resources/llama_extract/types/extract_job_create_data_schema_override_zero_value.py,sha256=HHEYxOSQXXyBYOiUQg_qwfQtXFj-OtThMwbUDBIgZU0,223
57
+ llama_cloud/resources/llama_extract/types/extract_job_create_priority.py,sha256=_Qdc-ScGUcsgb0pv9-Viq2JgEoDYUi0AKStlw2E4Rb4,810
55
58
  llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py,sha256=uMqpKJdCmUNtryS2bkQTNA1AgDlWdtsBOP31iMt3zNA,346
56
59
  llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py,sha256=cUS7ez5r0Vx8T7SxwLYptZMmvpT5JoDVMyn54Q6VL-g,227
57
60
  llama_cloud/resources/llama_extract/types/extract_stateless_request_data_schema.py,sha256=lBblR9zgjJsbWL-2bDisCj7EQiX6aky6GQ4tuMr3LtU,325
@@ -73,7 +76,7 @@ llama_cloud/resources/retrievers/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-
73
76
  llama_cloud/resources/retrievers/client.py,sha256=z2LhmA-cZVFzr9P6loeCZYnJbvSIk0QitFeVFp-IyZk,32126
74
77
  llama_cloud/resources/users/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
75
78
  llama_cloud/resources/users/client.py,sha256=A2s8e2syQHkkSwPz-Lrt_Zxp1K-8nqJqj5EafE6NWYs,5545
76
- llama_cloud/types/__init__.py,sha256=dcaGQ6aoPvx2BHbPqSuq0Ukw6IgLwYJBTo9JZSvXAEU,32349
79
+ llama_cloud/types/__init__.py,sha256=p_tcv6haFhbsYDvM_P2HDHfMxweK31ZLqj3uPCRqd6Y,31819
77
80
  llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
78
81
  llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
79
82
  llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
@@ -104,7 +107,6 @@ llama_cloud/types/chat_app.py,sha256=fLuzYkXLq51C_Y23hoLwfmG-OiT7jlyHt2JGe6-f1IA
104
107
  llama_cloud/types/chat_app_response.py,sha256=WSKr1KI9_pGTSstr3I53kZ8qb3y87Q4ulh8fR0C7sSU,1784
105
108
  llama_cloud/types/chat_data.py,sha256=ZYqVtjXF6qPGajU4IWZu3InpU54TXJwBFiqxBepylP0,1197
106
109
  llama_cloud/types/chat_message.py,sha256=94GmO2Kp6VXhb7ZXhBPz8ExeZX1b_2LZLowuWtj6Tuw,1580
107
- llama_cloud/types/chunk_mode.py,sha256=J4vqAQfQG6PWsIv1Fe_99nVsAfDbv_P81_KVsJ9AkU4,790
108
110
  llama_cloud/types/classification_result.py,sha256=1faExxbtJLoYjy0h0Gl38Shk2idySEOenJBjQlcRpXs,1309
109
111
  llama_cloud/types/classifier_rule.py,sha256=-64iBABkQ_IXN8rA77xA6L4xSsj8epTVT9Z1C7ypGx0,1533
110
112
  llama_cloud/types/classify_job.py,sha256=adYCoiUbHNUFfr_FalvhULikeGmWp4I0P1vziVYrycM,1776
@@ -153,6 +155,7 @@ llama_cloud/types/data_source_reader_version_metadata.py,sha256=hh7Hunen9GHlvtLb
153
155
  llama_cloud/types/data_source_reader_version_metadata_reader_version.py,sha256=qZtQtHEnpWE48CjBPdljoYSzuk2rdrw5CCpWbLtM6Ps,735
154
156
  llama_cloud/types/data_source_update_dispatcher_config.py,sha256=Sh6HhXfEV2Z6PYhkYQucs2MxyKVpL3UPV-I4cbf--bA,1242
155
157
  llama_cloud/types/delete_params.py,sha256=1snPrd3WO9C1bKf0WdMslE2HQMF0yYLI3U7N53cmurM,1285
158
+ llama_cloud/types/delete_response.py,sha256=ULsQ5Faps-quM81U_vFNhYFNLqFc8qr2XArfDY3R3NA,1032
156
159
  llama_cloud/types/document_chunk_mode.py,sha256=6qH43Q0lIob2DMU95GsmSEOs4kQxOIyUFXj_kRDnyV4,470
157
160
  llama_cloud/types/document_ingestion_job_params.py,sha256=33xTAl-K-m1j_Ufkj7w2GaYg9EUH5Hwsjn869X-fWMk,1524
158
161
  llama_cloud/types/element_segmentation_config.py,sha256=QOBk8YFrgK0I2m3caqV5bpYaGXbk0fMSjZ4hUPZXZDI,959
@@ -163,15 +166,11 @@ llama_cloud/types/embedding_model_config_update_embedding_config.py,sha256=mrXFx
163
166
  llama_cloud/types/eval_execution_params.py,sha256=ntVaJh5SMZMPL4QLUiihVjUlg2SKbrezvbMKGlrF66Q,1369
164
167
  llama_cloud/types/extract_agent.py,sha256=Vj6tg8aEjUPADsUlkhHSCotrfWt8uoktaV45J81KeLc,1869
165
168
  llama_cloud/types/extract_agent_data_schema_value.py,sha256=UaDQ2KjajLDccW7F4NKdfpefeTJrr1hl0c95WRETYkM,201
166
- llama_cloud/types/extract_config.py,sha256=FgkMtDgzeFZPNPHUR0HB00EauAz9Zh4ef6uC3yPyRO8,2780
169
+ llama_cloud/types/extract_config.py,sha256=h7rbxcns9m4rMk-GI2kqN4Vth1OuO1ti1KeuDUXIzFI,2824
167
170
  llama_cloud/types/extract_config_priority.py,sha256=btl5lxl25Ve6_lTbQzQyjOKle8XoY0r16lk3364c3uw,795
168
171
  llama_cloud/types/extract_job.py,sha256=Yx4fDdCdylAji2LPTwqflVpz1o9slpj9tTLS93-1tzU,1431
169
- llama_cloud/types/extract_job_create.py,sha256=5CcKnYprImF0wEqUJDqi6flAIJ0rzOWxmrCvtl_b8WM,1802
170
- llama_cloud/types/extract_job_create_data_schema_override.py,sha256=vuiJ2lGJjbXEnvFKzVnKyvgwhMXPg1Pb5GZne2DrB60,330
171
- llama_cloud/types/extract_job_create_data_schema_override_zero_value.py,sha256=HHEYxOSQXXyBYOiUQg_qwfQtXFj-OtThMwbUDBIgZU0,223
172
- llama_cloud/types/extract_job_create_priority.py,sha256=_Qdc-ScGUcsgb0pv9-Viq2JgEoDYUi0AKStlw2E4Rb4,810
173
172
  llama_cloud/types/extract_mode.py,sha256=S7H-XcH1wvPbOPVdwG9kVnZaH1pMY-LNzAD6TjCm0mc,785
174
- llama_cloud/types/extract_models.py,sha256=w_B-TzIkRvZhiBKA2bzyHXgQHs0tT4dEiIZsEnwiCgE,2072
173
+ llama_cloud/types/extract_models.py,sha256=FwXyBJE-rcqblcslpm5gVA1S0V21GjaPIW1BqDxVqGQ,2281
175
174
  llama_cloud/types/extract_resultset.py,sha256=Alje0YQJUiA_aKi0hQs7TAnhDmZuQ_yL9b6HCNYBFQg,1627
176
175
  llama_cloud/types/extract_resultset_data.py,sha256=v9Ae4SxLsvYPE9crko4N16lBjsxuZpz1yrUOhnaM_VY,427
177
176
  llama_cloud/types/extract_resultset_data_item_value.py,sha256=JwqgDIGW0irr8QWaSTIrl24FhGxTUDOXIbxoSdIjuxs,209
@@ -189,6 +188,7 @@ llama_cloud/types/extract_schema_validate_response.py,sha256=EVSeXsljZC-gIpdXr16
189
188
  llama_cloud/types/extract_schema_validate_response_data_schema_value.py,sha256=lX9RbBHcmBRagA-K7x1he8EEmmNCiAs-tHumGfPvFVQ,224
190
189
  llama_cloud/types/extract_state.py,sha256=TNeVAXXKZaiM2srlbQlzRSn4_TDpR4xyT_yQhJUxFvk,775
191
190
  llama_cloud/types/extract_target.py,sha256=Gt-FNqblzcjdfq1hxsqEjWWu-HNLXdKy4w98nog52Ms,478
191
+ llama_cloud/types/extracted_table.py,sha256=5pgpSICKDYMF93lLgMAn5qk1xct5yrI29Gy7R1R1fII,1489
192
192
  llama_cloud/types/fail_page_mode.py,sha256=n4fgPpiEB5siPoEg0Sux4COg7ElNybjshxDoUihZwRU,786
193
193
  llama_cloud/types/failure_handling_config.py,sha256=EmAQW0qm7-JTSYFwhmIWxqkVNWym_AyAJIMEmeI9Cqc,1216
194
194
  llama_cloud/types/file.py,sha256=sXdF-cdHL3k1-DPIxAjYpb-kNHzcOAV_earVoYITzUA,1765
@@ -235,7 +235,6 @@ llama_cloud/types/license_info_response.py,sha256=fE9vcWO8k92SBqb_wOyBu_16C61s72
235
235
  llama_cloud/types/llama_extract_feature_availability.py,sha256=oHJ3OyHf2rXmZhBSQfxVNnCFOp8IMKx_28EffCIEbLU,1228
236
236
  llama_cloud/types/llama_extract_mode_availability.py,sha256=UtpYxpdZ29u3UarhGzH89H5rurvZQtOqO6a44gMm9DM,1379
237
237
  llama_cloud/types/llama_extract_mode_availability_status.py,sha256=_ildgVCsBdqOLD__qdEjcYxqgKunXhJ_VHUeqjZJX8c,566
238
- llama_cloud/types/llama_extract_settings.py,sha256=mWMjXL9t7d-J051Y3iSMgT-qa1h8VvCKrpFFvqv3FHM,2779
239
238
  llama_cloud/types/llama_parse_parameters.py,sha256=eaPEThvhnKuSl-kSm9e07tCnE2btGS2ZLTHzmdnD7NY,6953
240
239
  llama_cloud/types/llama_parse_parameters_priority.py,sha256=EFRudtaID_s8rLKlfW8O8O9TDbpZdniIidK-xchhfRI,830
241
240
  llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=B_0N3f8Aq59W9FbsH50mGBUiyWTIXQjHFl739uAyaQw,11207
@@ -252,7 +251,6 @@ llama_cloud/types/metadata_filter.py,sha256=LX2fGsUb4wvF5bj9iWO6IPQGi3i0L2Lb4cE6
252
251
  llama_cloud/types/metadata_filter_value.py,sha256=ij721gXNI7zbgsuDl9-AqBcXg2WDuVZhYS5F5YqekEs,188
253
252
  llama_cloud/types/metadata_filters.py,sha256=uSf6sB4oQu6WzMPNFG6Tc4euqEiYcj_X14Y5JWt9xVE,1315
254
253
  llama_cloud/types/metadata_filters_filters_item.py,sha256=e8KhD2q6Qc2_aK6r5CvyxC0oWVYO4F4vBIcB9eMEPPM,246
255
- llama_cloud/types/multimodal_parse_resolution.py,sha256=_eNBgAmei6rvWT1tEIefC_dl_Y3ALR81gIgJYCgy6eA,489
256
254
  llama_cloud/types/node_relationship.py,sha256=2e2PqWm0LOTiImvtsyiuaAPNIl0BItjSrQZTJv65GRA,1209
257
255
  llama_cloud/types/none_chunking_config.py,sha256=D062t314Vp-s4n9h8wNgsYfElI4PonPKmihvjEmaqdA,952
258
256
  llama_cloud/types/none_segmentation_config.py,sha256=j3jUA6E8uFtwDMEu4TFG3Q4ZGCGiuUfUW9AMO1NNqXU,956
@@ -274,6 +272,7 @@ llama_cloud/types/paginated_response_agent_data.py,sha256=u6Y-Cq9qjGF5tskMOQChUN
274
272
  llama_cloud/types/paginated_response_aggregate_group.py,sha256=1ajZLZJLU6-GuQ_PPsEVRFZ6bm9he807F_F_DmB2HlQ,1179
275
273
  llama_cloud/types/paginated_response_classify_job.py,sha256=ABpHn-ryRS8erj02ncxshAFe2Enw5JvSZqqbZuy0nWA,1167
276
274
  llama_cloud/types/paginated_response_quota_configuration.py,sha256=S-miK621O7V6hBB05xcFBKCwa-gBK17iTHh29Saebz8,1123
275
+ llama_cloud/types/paginated_response_spreadsheet_job.py,sha256=9_5zc_Z36tz29bf1Y8efBaTIOPfFO_ylr4uPAvZwua4,1179
277
276
  llama_cloud/types/parse_configuration.py,sha256=mXjChoWseMnj-OEUUwkV-B5bUjPZ0SGfHr8lX4dAlRY,1762
278
277
  llama_cloud/types/parse_configuration_create.py,sha256=3tnlIgHH_UgFFYP2OdVKyfIpa9mAzIzQxN4hDeazf3M,1467
279
278
  llama_cloud/types/parse_configuration_filter.py,sha256=pObpFpnMq9CXfzZteY0S-2Lmj55mIdpQU4fZrEvgiZE,1260
@@ -330,8 +329,7 @@ llama_cloud/types/preset_retrieval_params_search_filters_inference_schema_value.
330
329
  llama_cloud/types/presigned_url.py,sha256=-DOQo7XKvUsl-9Gz7fX6VOHdQLzGH2XRau24ASvG92E,1275
331
330
  llama_cloud/types/project.py,sha256=4NNh_ZAjEkoWl5st6b1jsPVf_SYKtUTB6rS1701G4IQ,1441
332
331
  llama_cloud/types/project_create.py,sha256=GxGmsXGJM-cHrvPFLktEkj9JtNsSdFae7-HPZFB4er0,1014
333
- llama_cloud/types/prompt_conf.py,sha256=hh8I3jxk3K6e5QZoBCLqszohMYtk73PERYoL36lLmTk,1660
334
- llama_cloud/types/public_model_name.py,sha256=sKnedLmz7dS6U1PyT7qSjvoUuFpB2Q9HkXYjh16-EVw,4405
332
+ llama_cloud/types/public_model_name.py,sha256=vT1ubiaYjJYR2XZi-rcs0KH2oRp4OertpeyjUnBMt-w,4616
335
333
  llama_cloud/types/quota_configuration.py,sha256=gTt2pLHhh9PpWxs1gtH1sTxM3Z6BBOAgSBI8AHCRoFI,2178
336
334
  llama_cloud/types/quota_configuration_configuration_type.py,sha256=6HQN4TO5zBkbWABfWu22F-5xjJF0jTWVGZdK36bSsso,1900
337
335
  llama_cloud/types/quota_configuration_status.py,sha256=Lcmu1Ek9GAcj7LP8ciMzHrDcXvQ6eEFXEXOzG8v_HmE,580
@@ -350,14 +348,13 @@ llama_cloud/types/retriever_pipeline.py,sha256=F1pZDxg8JdQXRHE6ciFezd7a-Wv5bHplP
350
348
  llama_cloud/types/role.py,sha256=4pbyLVNPleDd624cDcOhu9y1WvqC0J0gmNirTOW97iA,1342
351
349
  llama_cloud/types/schema_generation_availability.py,sha256=42x9DCjLVRH27ZQC8bB4Atxd2rKoHoX2EZTT5S3LIlU,1111
352
350
  llama_cloud/types/schema_generation_availability_status.py,sha256=bRU9bKidO01Zh3qZLH7tTJQSMImeqOlFDzF30Rhff7o,566
353
- llama_cloud/types/schema_relax_mode.py,sha256=v4or6dYTvWvBBNtEd2ZSaUAb1706I0Zuh-Xztm-zx_0,635
354
351
  llama_cloud/types/semantic_chunking_config.py,sha256=dFDniTVWpRc7UcmVFvljUoyL5Ztd-l-YrHII7U-yM-k,1053
355
352
  llama_cloud/types/sentence_chunking_config.py,sha256=NA9xidK5ICxJPkEMQZWNcsV0Hw9Co_bzRWeYe4uSh9I,1116
356
353
  llama_cloud/types/sparse_model_config.py,sha256=vwt0_3ncjFCtNyWsMSYRrVuoTAWsdnQCHSTUM4HK-Lc,1529
357
354
  llama_cloud/types/sparse_model_type.py,sha256=vmjOS3tSqopsvxWqw3keeIL4kgskJv6TJL-Gw_qQQ5s,933
355
+ llama_cloud/types/spreadsheet_job.py,sha256=S5pkG1TVrZPRJheAqy4ldN9hO821UDJo8K71FlL47LI,2062
356
+ llama_cloud/types/spreadsheet_parsing_config.py,sha256=0W0WqaL_kHjf_7A6TIjWYcyvvGumINC0yiB3s4xtWiw,1069
358
357
  llama_cloud/types/status_enum.py,sha256=cUBIlys89E8PUzmVqqawu7qTDF0aRqBwiijOmRDPvx0,1018
359
- llama_cloud/types/struct_mode.py,sha256=ROicwjXfFmgVU8_xSVxJlnFUzRNKG5VIEF1wYg9uOPU,1020
360
- llama_cloud/types/struct_parse_conf.py,sha256=3QQBy8VP9JB16d4fTGK_GiU6PUALIOWCN9GYI3in6ic,2439
361
358
  llama_cloud/types/supported_llm_model.py,sha256=hubSopFICVNEegbJbtbpK6zRHwFPwUNtrw_NAw_3bfg,1380
362
359
  llama_cloud/types/supported_llm_model_names.py,sha256=w2FrfffSwpJflq1EoO6Kw7ViTOZNGX4hf60k0Qf3VLA,3213
363
360
  llama_cloud/types/text_node.py,sha256=Tq3QmuKC5cIHvC9wAtvhsXl1g2sACs2yJwQ0Uko8GSU,2846
@@ -382,7 +379,7 @@ llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2B
382
379
  llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
383
380
  llama_cloud/types/webhook_configuration.py,sha256=E0QIuApBLlFGgdsy5VjGIkodclJvAxSO8y8n3DsGHrg,1398
384
381
  llama_cloud/types/webhook_configuration_webhook_events_item.py,sha256=OL3moFO_6hsKZYSBQBsSHmWA0NgLcLJgBPZfABwT60c,2544
385
- llama_cloud-0.1.42.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
386
- llama_cloud-0.1.42.dist-info/METADATA,sha256=vErxIy1G7T_HC_CElPfrmjljgy4z0SBhFHRwvw-AiGw,2706
387
- llama_cloud-0.1.42.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
388
- llama_cloud-0.1.42.dist-info/RECORD,,
382
+ llama_cloud-0.1.43.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
383
+ llama_cloud-0.1.43.dist-info/METADATA,sha256=2MbPx3Q6WATJbYOg21kE_SOcp5NsUDpU5a-c7uG75_Y,2706
384
+ llama_cloud-0.1.43.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
385
+ llama_cloud-0.1.43.dist-info/RECORD,,
@@ -1,29 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import enum
4
- import typing
5
-
6
- T_Result = typing.TypeVar("T_Result")
7
-
8
-
9
- class ChunkMode(str, enum.Enum):
10
- PAGE = "PAGE"
11
- DOCUMENT = "DOCUMENT"
12
- SECTION = "SECTION"
13
- GROUPED_PAGES = "GROUPED_PAGES"
14
-
15
- def visit(
16
- self,
17
- page: typing.Callable[[], T_Result],
18
- document: typing.Callable[[], T_Result],
19
- section: typing.Callable[[], T_Result],
20
- grouped_pages: typing.Callable[[], T_Result],
21
- ) -> T_Result:
22
- if self is ChunkMode.PAGE:
23
- return page()
24
- if self is ChunkMode.DOCUMENT:
25
- return document()
26
- if self is ChunkMode.SECTION:
27
- return section()
28
- if self is ChunkMode.GROUPED_PAGES:
29
- return grouped_pages()