llama-cloud 0.1.40__py3-none-any.whl → 0.1.42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (60) hide show
  1. llama_cloud/__init__.py +18 -72
  2. llama_cloud/client.py +2 -5
  3. llama_cloud/resources/__init__.py +0 -4
  4. llama_cloud/resources/alpha/client.py +14 -30
  5. llama_cloud/resources/beta/client.py +1791 -344
  6. llama_cloud/resources/llama_extract/client.py +48 -0
  7. llama_cloud/resources/organizations/client.py +18 -4
  8. llama_cloud/resources/parsing/client.py +56 -0
  9. llama_cloud/resources/pipelines/client.py +164 -0
  10. llama_cloud/types/__init__.py +18 -72
  11. llama_cloud/types/agent_data.py +1 -1
  12. llama_cloud/types/agent_deployment_summary.py +1 -2
  13. llama_cloud/types/{report_create_response.py → api_key.py} +14 -2
  14. llama_cloud/types/{edit_suggestion.py → api_key_query_response.py} +6 -6
  15. llama_cloud/types/api_key_type.py +17 -0
  16. llama_cloud/types/{src_app_schema_chat_chat_message.py → chat_message.py} +1 -1
  17. llama_cloud/types/extract_config.py +8 -2
  18. llama_cloud/types/extract_models.py +28 -28
  19. llama_cloud/types/legacy_parse_job_config.py +3 -0
  20. llama_cloud/types/llama_extract_mode_availability.py +4 -3
  21. llama_cloud/types/llama_extract_settings.py +1 -1
  22. llama_cloud/types/llama_parse_parameters.py +7 -0
  23. llama_cloud/types/organization.py +1 -0
  24. llama_cloud/types/{progress_event.py → parse_configuration.py} +12 -12
  25. llama_cloud/types/{llama_index_core_base_llms_types_chat_message.py → parse_configuration_create.py} +9 -7
  26. llama_cloud/types/{report_update_event.py → parse_configuration_filter.py} +8 -6
  27. llama_cloud/types/{report_state_event.py → parse_configuration_query_response.py} +6 -6
  28. llama_cloud/types/parse_job_config.py +7 -0
  29. llama_cloud/types/pipeline_create.py +1 -1
  30. llama_cloud/types/playground_session.py +2 -2
  31. llama_cloud/types/public_model_name.py +97 -0
  32. llama_cloud/types/quota_configuration_configuration_type.py +4 -0
  33. {llama_cloud-0.1.40.dist-info → llama_cloud-0.1.42.dist-info}/METADATA +1 -1
  34. {llama_cloud-0.1.40.dist-info → llama_cloud-0.1.42.dist-info}/RECORD +36 -58
  35. {llama_cloud-0.1.40.dist-info → llama_cloud-0.1.42.dist-info}/WHEEL +1 -1
  36. llama_cloud/resources/reports/__init__.py +0 -5
  37. llama_cloud/resources/reports/client.py +0 -1230
  38. llama_cloud/resources/reports/types/__init__.py +0 -7
  39. llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +0 -25
  40. llama_cloud/types/audio_block.py +0 -34
  41. llama_cloud/types/document_block.py +0 -35
  42. llama_cloud/types/edit_suggestion_blocks_item.py +0 -8
  43. llama_cloud/types/image_block.py +0 -35
  44. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +0 -56
  45. llama_cloud/types/paginated_report_response.py +0 -35
  46. llama_cloud/types/progress_event_status.py +0 -33
  47. llama_cloud/types/report.py +0 -33
  48. llama_cloud/types/report_block.py +0 -35
  49. llama_cloud/types/report_block_dependency.py +0 -29
  50. llama_cloud/types/report_event_item.py +0 -40
  51. llama_cloud/types/report_event_item_event_data.py +0 -45
  52. llama_cloud/types/report_event_type.py +0 -37
  53. llama_cloud/types/report_metadata.py +0 -43
  54. llama_cloud/types/report_plan.py +0 -36
  55. llama_cloud/types/report_plan_block.py +0 -36
  56. llama_cloud/types/report_query.py +0 -33
  57. llama_cloud/types/report_response.py +0 -41
  58. llama_cloud/types/report_state.py +0 -37
  59. llama_cloud/types/text_block.py +0 -31
  60. {llama_cloud-0.1.40.dist-info → llama_cloud-0.1.42.dist-info}/LICENSE +0 -0
@@ -19,7 +19,9 @@ from .agent_data import AgentData
19
19
  from .agent_deployment_list import AgentDeploymentList
20
20
  from .agent_deployment_summary import AgentDeploymentSummary
21
21
  from .aggregate_group import AggregateGroup
22
- from .audio_block import AudioBlock
22
+ from .api_key import ApiKey
23
+ from .api_key_query_response import ApiKeyQueryResponse
24
+ from .api_key_type import ApiKeyType
23
25
  from .auto_transform_config import AutoTransformConfig
24
26
  from .azure_open_ai_embedding import AzureOpenAiEmbedding
25
27
  from .azure_open_ai_embedding_config import AzureOpenAiEmbeddingConfig
@@ -39,6 +41,7 @@ from .character_chunking_config import CharacterChunkingConfig
39
41
  from .chat_app import ChatApp
40
42
  from .chat_app_response import ChatAppResponse
41
43
  from .chat_data import ChatData
44
+ from .chat_message import ChatMessage
42
45
  from .chunk_mode import ChunkMode
43
46
  from .classification_result import ClassificationResult
44
47
  from .classifier_rule import ClassifierRule
@@ -88,11 +91,8 @@ from .data_source_reader_version_metadata import DataSourceReaderVersionMetadata
88
91
  from .data_source_reader_version_metadata_reader_version import DataSourceReaderVersionMetadataReaderVersion
89
92
  from .data_source_update_dispatcher_config import DataSourceUpdateDispatcherConfig
90
93
  from .delete_params import DeleteParams
91
- from .document_block import DocumentBlock
92
94
  from .document_chunk_mode import DocumentChunkMode
93
95
  from .document_ingestion_job_params import DocumentIngestionJobParams
94
- from .edit_suggestion import EditSuggestion
95
- from .edit_suggestion_blocks_item import EditSuggestionBlocksItem
96
96
  from .element_segmentation_config import ElementSegmentationConfig
97
97
  from .embedding_model_config import EmbeddingModelConfig
98
98
  from .embedding_model_config_embedding_config import (
@@ -178,7 +178,6 @@ from .http_validation_error import HttpValidationError
178
178
  from .hugging_face_inference_api_embedding import HuggingFaceInferenceApiEmbedding
179
179
  from .hugging_face_inference_api_embedding_config import HuggingFaceInferenceApiEmbeddingConfig
180
180
  from .hugging_face_inference_api_embedding_token import HuggingFaceInferenceApiEmbeddingToken
181
- from .image_block import ImageBlock
182
181
  from .ingestion_error_response import IngestionErrorResponse
183
182
  from .input_message import InputMessage
184
183
  from .job_name_mapping import JobNameMapping
@@ -204,14 +203,6 @@ from .llama_extract_feature_availability import LlamaExtractFeatureAvailability
204
203
  from .llama_extract_mode_availability import LlamaExtractModeAvailability
205
204
  from .llama_extract_mode_availability_status import LlamaExtractModeAvailabilityStatus
206
205
  from .llama_extract_settings import LlamaExtractSettings
207
- from .llama_index_core_base_llms_types_chat_message import LlamaIndexCoreBaseLlmsTypesChatMessage
208
- from .llama_index_core_base_llms_types_chat_message_blocks_item import (
209
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem,
210
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio,
211
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document,
212
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
213
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text,
214
- )
215
206
  from .llama_parse_parameters import LlamaParseParameters
216
207
  from .llama_parse_parameters_priority import LlamaParseParametersPriority
217
208
  from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
@@ -246,11 +237,14 @@ from .paginated_extract_runs_response import PaginatedExtractRunsResponse
246
237
  from .paginated_jobs_history_with_metrics import PaginatedJobsHistoryWithMetrics
247
238
  from .paginated_list_cloud_documents_response import PaginatedListCloudDocumentsResponse
248
239
  from .paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
249
- from .paginated_report_response import PaginatedReportResponse
250
240
  from .paginated_response_agent_data import PaginatedResponseAgentData
251
241
  from .paginated_response_aggregate_group import PaginatedResponseAggregateGroup
252
242
  from .paginated_response_classify_job import PaginatedResponseClassifyJob
253
243
  from .paginated_response_quota_configuration import PaginatedResponseQuotaConfiguration
244
+ from .parse_configuration import ParseConfiguration
245
+ from .parse_configuration_create import ParseConfigurationCreate
246
+ from .parse_configuration_filter import ParseConfigurationFilter
247
+ from .parse_configuration_query_response import ParseConfigurationQueryResponse
254
248
  from .parse_job_config import ParseJobConfig
255
249
  from .parse_job_config_priority import ParseJobConfigPriority
256
250
  from .parse_plan_level import ParsePlanLevel
@@ -326,11 +320,10 @@ from .preset_retrieval_params_search_filters_inference_schema_value import (
326
320
  PresetRetrievalParamsSearchFiltersInferenceSchemaValue,
327
321
  )
328
322
  from .presigned_url import PresignedUrl
329
- from .progress_event import ProgressEvent
330
- from .progress_event_status import ProgressEventStatus
331
323
  from .project import Project
332
324
  from .project_create import ProjectCreate
333
325
  from .prompt_conf import PromptConf
326
+ from .public_model_name import PublicModelName
334
327
  from .quota_configuration import QuotaConfiguration
335
328
  from .quota_configuration_configuration_type import QuotaConfigurationConfigurationType
336
329
  from .quota_configuration_status import QuotaConfigurationStatus
@@ -341,26 +334,6 @@ from .re_ranker_type import ReRankerType
341
334
  from .recurring_credit_grant import RecurringCreditGrant
342
335
  from .related_node_info import RelatedNodeInfo
343
336
  from .related_node_info_node_type import RelatedNodeInfoNodeType
344
- from .report import Report
345
- from .report_block import ReportBlock
346
- from .report_block_dependency import ReportBlockDependency
347
- from .report_create_response import ReportCreateResponse
348
- from .report_event_item import ReportEventItem
349
- from .report_event_item_event_data import (
350
- ReportEventItemEventData,
351
- ReportEventItemEventData_Progress,
352
- ReportEventItemEventData_ReportBlockUpdate,
353
- ReportEventItemEventData_ReportStateUpdate,
354
- )
355
- from .report_event_type import ReportEventType
356
- from .report_metadata import ReportMetadata
357
- from .report_plan import ReportPlan
358
- from .report_plan_block import ReportPlanBlock
359
- from .report_query import ReportQuery
360
- from .report_response import ReportResponse
361
- from .report_state import ReportState
362
- from .report_state_event import ReportStateEvent
363
- from .report_update_event import ReportUpdateEvent
364
337
  from .retrieval_mode import RetrievalMode
365
338
  from .retrieve_results import RetrieveResults
366
339
  from .retriever import Retriever
@@ -374,13 +347,11 @@ from .semantic_chunking_config import SemanticChunkingConfig
374
347
  from .sentence_chunking_config import SentenceChunkingConfig
375
348
  from .sparse_model_config import SparseModelConfig
376
349
  from .sparse_model_type import SparseModelType
377
- from .src_app_schema_chat_chat_message import SrcAppSchemaChatChatMessage
378
350
  from .status_enum import StatusEnum
379
351
  from .struct_mode import StructMode
380
352
  from .struct_parse_conf import StructParseConf
381
353
  from .supported_llm_model import SupportedLlmModel
382
354
  from .supported_llm_model_names import SupportedLlmModelNames
383
- from .text_block import TextBlock
384
355
  from .text_node import TextNode
385
356
  from .text_node_relationships_value import TextNodeRelationshipsValue
386
357
  from .text_node_with_score import TextNodeWithScore
@@ -420,7 +391,9 @@ __all__ = [
420
391
  "AgentDeploymentList",
421
392
  "AgentDeploymentSummary",
422
393
  "AggregateGroup",
423
- "AudioBlock",
394
+ "ApiKey",
395
+ "ApiKeyQueryResponse",
396
+ "ApiKeyType",
424
397
  "AutoTransformConfig",
425
398
  "AzureOpenAiEmbedding",
426
399
  "AzureOpenAiEmbeddingConfig",
@@ -440,6 +413,7 @@ __all__ = [
440
413
  "ChatApp",
441
414
  "ChatAppResponse",
442
415
  "ChatData",
416
+ "ChatMessage",
443
417
  "ChunkMode",
444
418
  "ClassificationResult",
445
419
  "ClassifierRule",
@@ -489,11 +463,8 @@ __all__ = [
489
463
  "DataSourceReaderVersionMetadataReaderVersion",
490
464
  "DataSourceUpdateDispatcherConfig",
491
465
  "DeleteParams",
492
- "DocumentBlock",
493
466
  "DocumentChunkMode",
494
467
  "DocumentIngestionJobParams",
495
- "EditSuggestion",
496
- "EditSuggestionBlocksItem",
497
468
  "ElementSegmentationConfig",
498
469
  "EmbeddingModelConfig",
499
470
  "EmbeddingModelConfigEmbeddingConfig",
@@ -575,7 +546,6 @@ __all__ = [
575
546
  "HuggingFaceInferenceApiEmbedding",
576
547
  "HuggingFaceInferenceApiEmbeddingConfig",
577
548
  "HuggingFaceInferenceApiEmbeddingToken",
578
- "ImageBlock",
579
549
  "IngestionErrorResponse",
580
550
  "InputMessage",
581
551
  "JobNameMapping",
@@ -599,12 +569,6 @@ __all__ = [
599
569
  "LlamaExtractModeAvailability",
600
570
  "LlamaExtractModeAvailabilityStatus",
601
571
  "LlamaExtractSettings",
602
- "LlamaIndexCoreBaseLlmsTypesChatMessage",
603
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem",
604
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio",
605
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document",
606
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image",
607
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
608
572
  "LlamaParseParameters",
609
573
  "LlamaParseParametersPriority",
610
574
  "LlamaParseSupportedFileExtensions",
@@ -639,11 +603,14 @@ __all__ = [
639
603
  "PaginatedJobsHistoryWithMetrics",
640
604
  "PaginatedListCloudDocumentsResponse",
641
605
  "PaginatedListPipelineFilesResponse",
642
- "PaginatedReportResponse",
643
606
  "PaginatedResponseAgentData",
644
607
  "PaginatedResponseAggregateGroup",
645
608
  "PaginatedResponseClassifyJob",
646
609
  "PaginatedResponseQuotaConfiguration",
610
+ "ParseConfiguration",
611
+ "ParseConfigurationCreate",
612
+ "ParseConfigurationFilter",
613
+ "ParseConfigurationQueryResponse",
647
614
  "ParseJobConfig",
648
615
  "ParseJobConfigPriority",
649
616
  "ParsePlanLevel",
@@ -711,11 +678,10 @@ __all__ = [
711
678
  "PresetRetrievalParams",
712
679
  "PresetRetrievalParamsSearchFiltersInferenceSchemaValue",
713
680
  "PresignedUrl",
714
- "ProgressEvent",
715
- "ProgressEventStatus",
716
681
  "Project",
717
682
  "ProjectCreate",
718
683
  "PromptConf",
684
+ "PublicModelName",
719
685
  "QuotaConfiguration",
720
686
  "QuotaConfigurationConfigurationType",
721
687
  "QuotaConfigurationStatus",
@@ -726,24 +692,6 @@ __all__ = [
726
692
  "RecurringCreditGrant",
727
693
  "RelatedNodeInfo",
728
694
  "RelatedNodeInfoNodeType",
729
- "Report",
730
- "ReportBlock",
731
- "ReportBlockDependency",
732
- "ReportCreateResponse",
733
- "ReportEventItem",
734
- "ReportEventItemEventData",
735
- "ReportEventItemEventData_Progress",
736
- "ReportEventItemEventData_ReportBlockUpdate",
737
- "ReportEventItemEventData_ReportStateUpdate",
738
- "ReportEventType",
739
- "ReportMetadata",
740
- "ReportPlan",
741
- "ReportPlanBlock",
742
- "ReportQuery",
743
- "ReportResponse",
744
- "ReportState",
745
- "ReportStateEvent",
746
- "ReportUpdateEvent",
747
695
  "RetrievalMode",
748
696
  "RetrieveResults",
749
697
  "Retriever",
@@ -757,13 +705,11 @@ __all__ = [
757
705
  "SentenceChunkingConfig",
758
706
  "SparseModelConfig",
759
707
  "SparseModelType",
760
- "SrcAppSchemaChatChatMessage",
761
708
  "StatusEnum",
762
709
  "StructMode",
763
710
  "StructParseConf",
764
711
  "SupportedLlmModel",
765
712
  "SupportedLlmModelNames",
766
- "TextBlock",
767
713
  "TextNode",
768
714
  "TextNodeRelationshipsValue",
769
715
  "TextNodeWithScore",
@@ -20,7 +20,7 @@ class AgentData(pydantic.BaseModel):
20
20
  """
21
21
 
22
22
  id: typing.Optional[str]
23
- agent_slug: str
23
+ deployment_name: str
24
24
  collection: typing.Optional[str]
25
25
  data: typing.Dict[str, typing.Any]
26
26
  created_at: typing.Optional[dt.datetime]
@@ -17,10 +17,9 @@ except ImportError:
17
17
  class AgentDeploymentSummary(pydantic.BaseModel):
18
18
  id: str = pydantic.Field(description="Deployment ID. Prefixed with dpl-")
19
19
  project_id: str = pydantic.Field(description="Project ID")
20
- agent_slug: str = pydantic.Field(description="readable ID of the deployed app")
20
+ deployment_name: str = pydantic.Field(description="Identifier of the deployed app")
21
21
  thumbnail_url: typing.Optional[str]
22
22
  base_url: str = pydantic.Field(description="Base URL of the deployed app")
23
- display_name: str = pydantic.Field(description="Display name of the deployed app")
24
23
  created_at: dt.datetime = pydantic.Field(description="Timestamp when the app deployment was created")
25
24
  updated_at: dt.datetime = pydantic.Field(description="Timestamp when the app deployment was last updated")
26
25
  api_key_id: typing.Optional[str]
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .api_key_type import ApiKeyType
7
8
 
8
9
  try:
9
10
  import pydantic
@@ -14,8 +15,19 @@ except ImportError:
14
15
  import pydantic # type: ignore
15
16
 
16
17
 
17
- class ReportCreateResponse(pydantic.BaseModel):
18
- id: str = pydantic.Field(description="The id of the report")
18
+ class ApiKey(pydantic.BaseModel):
19
+ """
20
+ Schema for an API Key.
21
+ """
22
+
23
+ id: str = pydantic.Field(description="Unique identifier")
24
+ created_at: typing.Optional[dt.datetime]
25
+ updated_at: typing.Optional[dt.datetime]
26
+ name: typing.Optional[str]
27
+ project_id: typing.Optional[str]
28
+ key_type: typing.Optional[ApiKeyType]
29
+ user_id: str
30
+ redacted_api_key: str
19
31
 
20
32
  def json(self, **kwargs: typing.Any) -> str:
21
33
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .edit_suggestion_blocks_item import EditSuggestionBlocksItem
7
+ from .api_key import ApiKey
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -15,14 +15,14 @@ except ImportError:
15
15
  import pydantic # type: ignore
16
16
 
17
17
 
18
- class EditSuggestion(pydantic.BaseModel):
18
+ class ApiKeyQueryResponse(pydantic.BaseModel):
19
19
  """
20
- A suggestion for an edit to a report.
20
+ Response schema for paginated API key queries.
21
21
  """
22
22
 
23
- justification: str
24
- blocks: typing.List[EditSuggestionBlocksItem]
25
- removed_indices: typing.Optional[typing.List[int]]
23
+ items: typing.List[ApiKey] = pydantic.Field(description="The list of items.")
24
+ next_page_token: typing.Optional[str]
25
+ total_size: typing.Optional[int]
26
26
 
27
27
  def json(self, **kwargs: typing.Any) -> str:
28
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,17 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ApiKeyType(str, enum.Enum):
10
+ USER = "user"
11
+ AGENT = "agent"
12
+
13
+ def visit(self, user: typing.Callable[[], T_Result], agent: typing.Callable[[], T_Result]) -> T_Result:
14
+ if self is ApiKeyType.USER:
15
+ return user()
16
+ if self is ApiKeyType.AGENT:
17
+ return agent()
@@ -16,7 +16,7 @@ except ImportError:
16
16
  import pydantic # type: ignore
17
17
 
18
18
 
19
- class SrcAppSchemaChatChatMessage(pydantic.BaseModel):
19
+ class ChatMessage(pydantic.BaseModel):
20
20
  id: str
21
21
  index: int = pydantic.Field(description="The index of the message in the chat.")
22
22
  annotations: typing.Optional[typing.List[MessageAnnotation]] = pydantic.Field(
@@ -7,7 +7,9 @@ from ..core.datetime_utils import serialize_datetime
7
7
  from .document_chunk_mode import DocumentChunkMode
8
8
  from .extract_config_priority import ExtractConfigPriority
9
9
  from .extract_mode import ExtractMode
10
+ from .extract_models import ExtractModels
10
11
  from .extract_target import ExtractTarget
12
+ from .public_model_name import PublicModelName
11
13
 
12
14
  try:
13
15
  import pydantic
@@ -25,9 +27,13 @@ class ExtractConfig(pydantic.BaseModel):
25
27
 
26
28
  priority: typing.Optional[ExtractConfigPriority]
27
29
  extraction_target: typing.Optional[ExtractTarget] = pydantic.Field(description="The extraction target specified.")
28
- extraction_mode: typing.Optional[ExtractMode] = pydantic.Field(description="The extraction mode specified.")
30
+ extraction_mode: typing.Optional[ExtractMode] = pydantic.Field(
31
+ description="The extraction mode specified (FAST, BALANCED, MULTIMODAL, PREMIUM)."
32
+ )
33
+ parse_model: typing.Optional[PublicModelName]
34
+ extract_model: typing.Optional[ExtractModels]
29
35
  multimodal_fast_mode: typing.Optional[bool] = pydantic.Field(
30
- description="Whether to use fast mode for multimodal extraction."
36
+ description="DEPRECATED: Whether to use fast mode for multimodal extraction."
31
37
  )
32
38
  system_prompt: typing.Optional[str]
33
39
  use_reasoning: typing.Optional[bool] = pydantic.Field(description="Whether to use reasoning for the extraction.")
@@ -7,47 +7,47 @@ T_Result = typing.TypeVar("T_Result")
7
7
 
8
8
 
9
9
  class ExtractModels(str, enum.Enum):
10
- GPT_41 = "gpt-4.1"
11
- GPT_41_MINI = "gpt-4.1-mini"
12
- GPT_41_NANO = "gpt-4.1-nano"
10
+ OPENAI_GPT_41 = "openai-gpt-4-1"
11
+ OPENAI_GPT_41_MINI = "openai-gpt-4-1-mini"
12
+ OPENAI_GPT_41_NANO = "openai-gpt-4-1-nano"
13
+ OPENAI_GPT_5 = "openai-gpt-5"
14
+ OPENAI_GPT_5_MINI = "openai-gpt-5-mini"
13
15
  GEMINI_20_FLASH = "gemini-2.0-flash"
14
- O_3_MINI = "o3-mini"
15
16
  GEMINI_25_FLASH = "gemini-2.5-flash"
16
17
  GEMINI_25_PRO = "gemini-2.5-pro"
17
- GEMINI_25_FLASH_LITE_PREVIEW_0617 = "gemini-2.5-flash-lite-preview-06-17"
18
- GPT_4_O = "gpt-4o"
19
- GPT_4_O_MINI = "gpt-4o-mini"
18
+ OPENAI_GPT_4_O = "openai-gpt-4o"
19
+ OPENAI_GPT_4_O_MINI = "openai-gpt-4o-mini"
20
20
 
21
21
  def visit(
22
22
  self,
23
- gpt_41: typing.Callable[[], T_Result],
24
- gpt_41_mini: typing.Callable[[], T_Result],
25
- gpt_41_nano: typing.Callable[[], T_Result],
23
+ openai_gpt_41: typing.Callable[[], T_Result],
24
+ openai_gpt_41_mini: typing.Callable[[], T_Result],
25
+ openai_gpt_41_nano: typing.Callable[[], T_Result],
26
+ openai_gpt_5: typing.Callable[[], T_Result],
27
+ openai_gpt_5_mini: typing.Callable[[], T_Result],
26
28
  gemini_20_flash: typing.Callable[[], T_Result],
27
- o_3_mini: typing.Callable[[], T_Result],
28
29
  gemini_25_flash: typing.Callable[[], T_Result],
29
30
  gemini_25_pro: typing.Callable[[], T_Result],
30
- gemini_25_flash_lite_preview_0617: typing.Callable[[], T_Result],
31
- gpt_4_o: typing.Callable[[], T_Result],
32
- gpt_4_o_mini: typing.Callable[[], T_Result],
31
+ openai_gpt_4_o: typing.Callable[[], T_Result],
32
+ openai_gpt_4_o_mini: typing.Callable[[], T_Result],
33
33
  ) -> T_Result:
34
- if self is ExtractModels.GPT_41:
35
- return gpt_41()
36
- if self is ExtractModels.GPT_41_MINI:
37
- return gpt_41_mini()
38
- if self is ExtractModels.GPT_41_NANO:
39
- return gpt_41_nano()
34
+ if self is ExtractModels.OPENAI_GPT_41:
35
+ return openai_gpt_41()
36
+ if self is ExtractModels.OPENAI_GPT_41_MINI:
37
+ return openai_gpt_41_mini()
38
+ if self is ExtractModels.OPENAI_GPT_41_NANO:
39
+ return openai_gpt_41_nano()
40
+ if self is ExtractModels.OPENAI_GPT_5:
41
+ return openai_gpt_5()
42
+ if self is ExtractModels.OPENAI_GPT_5_MINI:
43
+ return openai_gpt_5_mini()
40
44
  if self is ExtractModels.GEMINI_20_FLASH:
41
45
  return gemini_20_flash()
42
- if self is ExtractModels.O_3_MINI:
43
- return o_3_mini()
44
46
  if self is ExtractModels.GEMINI_25_FLASH:
45
47
  return gemini_25_flash()
46
48
  if self is ExtractModels.GEMINI_25_PRO:
47
49
  return gemini_25_pro()
48
- if self is ExtractModels.GEMINI_25_FLASH_LITE_PREVIEW_0617:
49
- return gemini_25_flash_lite_preview_0617()
50
- if self is ExtractModels.GPT_4_O:
51
- return gpt_4_o()
52
- if self is ExtractModels.GPT_4_O_MINI:
53
- return gpt_4_o_mini()
50
+ if self is ExtractModels.OPENAI_GPT_4_O:
51
+ return openai_gpt_4_o()
52
+ if self is ExtractModels.OPENAI_GPT_4_O_MINI:
53
+ return openai_gpt_4_o_mini()
@@ -59,6 +59,9 @@ class LegacyParseJobConfig(pydantic.BaseModel):
59
59
  alias="doNotUnrollColumns", description="Whether to unroll columns."
60
60
  )
61
61
  spread_sheet_extract_sub_tables: typing.Optional[bool] = pydantic.Field(alias="spreadSheetExtractSubTables")
62
+ spread_sheet_force_formula_computation: typing.Optional[bool] = pydantic.Field(
63
+ alias="spreadSheetForceFormulaComputation"
64
+ )
62
65
  extract_layout: typing.Optional[bool] = pydantic.Field(alias="extractLayout")
63
66
  high_res_ocr: typing.Optional[bool] = pydantic.Field(alias="highResOcr")
64
67
  html_make_all_elements_visible: typing.Optional[bool] = pydantic.Field(alias="htmlMakeAllElementsVisible")
@@ -19,9 +19,10 @@ class LlamaExtractModeAvailability(pydantic.BaseModel):
19
19
  mode: str
20
20
  status: LlamaExtractModeAvailabilityStatus
21
21
  parse_mode: str
22
- parse_models: typing.List[str]
23
- extract_models: typing.List[str]
24
- missing_models: typing.Optional[typing.List[str]]
22
+ available_parse_models: typing.Optional[typing.List[str]]
23
+ missing_parse_models: typing.Optional[typing.List[str]]
24
+ available_extract_models: typing.Optional[typing.List[str]]
25
+ missing_extract_models: typing.Optional[typing.List[str]]
25
26
 
26
27
  def json(self, **kwargs: typing.Any) -> str:
27
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -44,7 +44,7 @@ class LlamaExtractSettings(pydantic.BaseModel):
44
44
  description="Whether to use experimental multimodal parsing."
45
45
  )
46
46
  use_pixel_extraction: typing.Optional[bool] = pydantic.Field(
47
- description="Whether to use extraction over pixels for multimodal mode."
47
+ description="DEPRECATED: Whether to use extraction over pixels for multimodal mode."
48
48
  )
49
49
  llama_parse_params: typing.Optional[LlamaParseParameters] = pydantic.Field(
50
50
  description="LlamaParse related settings."
@@ -50,6 +50,11 @@ class LlamaParseParameters(pydantic.BaseModel):
50
50
  high_res_ocr: typing.Optional[bool]
51
51
  html_make_all_elements_visible: typing.Optional[bool]
52
52
  layout_aware: typing.Optional[bool]
53
+ specialized_chart_parsing_agentic: typing.Optional[bool]
54
+ specialized_chart_parsing_plus: typing.Optional[bool]
55
+ specialized_chart_parsing_efficient: typing.Optional[bool]
56
+ specialized_image_parsing: typing.Optional[bool]
57
+ precise_bounding_box: typing.Optional[bool]
53
58
  html_remove_navigation_elements: typing.Optional[bool]
54
59
  html_remove_fixed_elements: typing.Optional[bool]
55
60
  guess_xlsx_sheet_name: typing.Optional[bool]
@@ -99,6 +104,8 @@ class LlamaParseParameters(pydantic.BaseModel):
99
104
  complemental_formatting_instruction: typing.Optional[str]
100
105
  content_guideline_instruction: typing.Optional[str]
101
106
  spreadsheet_extract_sub_tables: typing.Optional[bool]
107
+ spreadsheet_force_formula_computation: typing.Optional[bool]
108
+ inline_images_in_markdown: typing.Optional[bool]
102
109
  job_timeout_in_seconds: typing.Optional[float]
103
110
  job_timeout_extra_time_per_page_in_seconds: typing.Optional[float]
104
111
  strict_mode_image_extraction: typing.Optional[bool]
@@ -28,6 +28,7 @@ class Organization(pydantic.BaseModel):
28
28
  parse_plan_level: typing.Optional[ParsePlanLevel] = pydantic.Field(
29
29
  description="Whether the organization is a Parse Premium customer."
30
30
  )
31
+ feature_flags: typing.Optional[typing.Dict[str, typing.Any]]
31
32
 
32
33
  def json(self, **kwargs: typing.Any) -> str:
33
34
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,8 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .progress_event_status import ProgressEventStatus
8
- from .report_event_type import ReportEventType
7
+ from .llama_parse_parameters import LlamaParseParameters
9
8
 
10
9
  try:
11
10
  import pydantic
@@ -16,19 +15,20 @@ except ImportError:
16
15
  import pydantic # type: ignore
17
16
 
18
17
 
19
- class ProgressEvent(pydantic.BaseModel):
18
+ class ParseConfiguration(pydantic.BaseModel):
20
19
  """
21
- Event for tracking progress of operations in workflows.
20
+ Parse configuration schema.
22
21
  """
23
22
 
24
- timestamp: typing.Optional[dt.datetime]
25
- id: typing.Optional[str] = pydantic.Field(description="The ID of the event")
26
- group_id: typing.Optional[str] = pydantic.Field(description="The ID of the group this event belongs to")
27
- variant: ReportEventType
28
- msg: str = pydantic.Field(description="The message to display to the user")
29
- progress: typing.Optional[float]
30
- status: typing.Optional[ProgressEventStatus] = pydantic.Field(description="Current status of the operation")
31
- extra_detail: typing.Optional[typing.Dict[str, typing.Any]]
23
+ id: str = pydantic.Field(description="Unique identifier for the parse configuration")
24
+ name: str = pydantic.Field(description="Name of the parse configuration")
25
+ source_type: str = pydantic.Field(description="Type of the source (e.g., 'project')")
26
+ source_id: str = pydantic.Field(description="ID of the source")
27
+ creator: typing.Optional[str]
28
+ version: str = pydantic.Field(description="Version of the configuration")
29
+ parameters: LlamaParseParameters = pydantic.Field(description="LlamaParseParameters configuration")
30
+ created_at: dt.datetime = pydantic.Field(description="Creation timestamp")
31
+ updated_at: dt.datetime = pydantic.Field(description="Last update timestamp")
32
32
 
33
33
  def json(self, **kwargs: typing.Any) -> str:
34
34
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,8 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .llama_index_core_base_llms_types_chat_message_blocks_item import LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem
8
- from .message_role import MessageRole
7
+ from .llama_parse_parameters import LlamaParseParameters
9
8
 
10
9
  try:
11
10
  import pydantic
@@ -16,14 +15,17 @@ except ImportError:
16
15
  import pydantic # type: ignore
17
16
 
18
17
 
19
- class LlamaIndexCoreBaseLlmsTypesChatMessage(pydantic.BaseModel):
18
+ class ParseConfigurationCreate(pydantic.BaseModel):
20
19
  """
21
- Chat message.
20
+ Schema for creating a new parse configuration (API boundary).
22
21
  """
23
22
 
24
- role: typing.Optional[MessageRole]
25
- additional_kwargs: typing.Optional[typing.Dict[str, typing.Any]]
26
- blocks: typing.Optional[typing.List[LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem]]
23
+ name: str = pydantic.Field(description="Name of the parse configuration")
24
+ source_type: typing.Optional[str]
25
+ source_id: typing.Optional[str]
26
+ creator: typing.Optional[str]
27
+ version: str = pydantic.Field(description="Version of the configuration")
28
+ parameters: LlamaParseParameters = pydantic.Field(description="LlamaParseParameters configuration")
27
29
 
28
30
  def json(self, **kwargs: typing.Any) -> str:
29
31
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,7 +4,6 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .report_block import ReportBlock
8
7
 
9
8
  try:
10
9
  import pydantic
@@ -15,14 +14,17 @@ except ImportError:
15
14
  import pydantic # type: ignore
16
15
 
17
16
 
18
- class ReportUpdateEvent(pydantic.BaseModel):
17
+ class ParseConfigurationFilter(pydantic.BaseModel):
19
18
  """
20
- Event for updating the state of an report.
19
+ Filter parameters for parse configuration queries.
21
20
  """
22
21
 
23
- timestamp: typing.Optional[dt.datetime]
24
- msg: typing.Optional[str] = pydantic.Field(description="The message to display to the user")
25
- block: ReportBlock = pydantic.Field(description="The block to update")
22
+ name: typing.Optional[str]
23
+ source_type: typing.Optional[str]
24
+ source_id: typing.Optional[str]
25
+ creator: typing.Optional[str]
26
+ version: typing.Optional[str]
27
+ parse_config_ids: typing.Optional[typing.List[str]]
26
28
 
27
29
  def json(self, **kwargs: typing.Any) -> str:
28
30
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .report_state import ReportState
7
+ from .parse_configuration import ParseConfiguration
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -15,14 +15,14 @@ except ImportError:
15
15
  import pydantic # type: ignore
16
16
 
17
17
 
18
- class ReportStateEvent(pydantic.BaseModel):
18
+ class ParseConfigurationQueryResponse(pydantic.BaseModel):
19
19
  """
20
- Event for notifying when an report's state changes.
20
+ Response schema for paginated parse configuration queries.
21
21
  """
22
22
 
23
- timestamp: typing.Optional[dt.datetime]
24
- msg: str = pydantic.Field(description="The message to display to the user")
25
- status: ReportState = pydantic.Field(description="The new state of the report")
23
+ items: typing.List[ParseConfiguration] = pydantic.Field(description="The list of items.")
24
+ next_page_token: typing.Optional[str]
25
+ total_size: typing.Optional[int]
26
26
 
27
27
  def json(self, **kwargs: typing.Any) -> str:
28
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}