llama-cloud 0.1.39__py3-none-any.whl → 0.1.41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. llama_cloud/__init__.py +24 -72
  2. llama_cloud/client.py +2 -5
  3. llama_cloud/resources/__init__.py +0 -4
  4. llama_cloud/resources/admin/client.py +5 -5
  5. llama_cloud/resources/beta/client.py +1351 -335
  6. llama_cloud/resources/llama_extract/client.py +56 -0
  7. llama_cloud/resources/parsing/client.py +8 -0
  8. llama_cloud/resources/pipelines/client.py +37 -0
  9. llama_cloud/types/__init__.py +24 -72
  10. llama_cloud/types/{src_app_schema_chat_chat_message.py → chat_message.py} +1 -1
  11. llama_cloud/types/extract_config.py +8 -2
  12. llama_cloud/types/extract_job_create.py +2 -0
  13. llama_cloud/types/extract_job_create_priority.py +29 -0
  14. llama_cloud/types/extract_models.py +28 -28
  15. llama_cloud/types/job_names.py +0 -4
  16. llama_cloud/types/{document_block.py → llama_extract_feature_availability.py} +5 -6
  17. llama_cloud/types/llama_extract_mode_availability.py +4 -3
  18. llama_cloud/types/llama_extract_settings.py +1 -1
  19. llama_cloud/types/llama_parse_parameters.py +1 -0
  20. llama_cloud/types/{progress_event.py → parse_configuration.py} +12 -12
  21. llama_cloud/types/{llama_index_core_base_llms_types_chat_message.py → parse_configuration_create.py} +9 -7
  22. llama_cloud/types/{edit_suggestion.py → parse_configuration_filter.py} +8 -6
  23. llama_cloud/types/{report_update_event.py → parse_configuration_query_response.py} +6 -6
  24. llama_cloud/types/parse_job_config.py +1 -0
  25. llama_cloud/types/pipeline.py +4 -0
  26. llama_cloud/types/pipeline_create.py +2 -0
  27. llama_cloud/types/playground_session.py +2 -2
  28. llama_cloud/types/public_model_name.py +97 -0
  29. llama_cloud/types/{report_create_response.py → schema_generation_availability.py} +4 -2
  30. llama_cloud/types/schema_generation_availability_status.py +17 -0
  31. llama_cloud/types/{report_event_item.py → sparse_model_config.py} +10 -8
  32. llama_cloud/types/sparse_model_type.py +33 -0
  33. llama_cloud/types/webhook_configuration.py +1 -0
  34. llama_cloud-0.1.41.dist-info/METADATA +106 -0
  35. {llama_cloud-0.1.39.dist-info → llama_cloud-0.1.41.dist-info}/RECORD +37 -56
  36. {llama_cloud-0.1.39.dist-info → llama_cloud-0.1.41.dist-info}/WHEEL +1 -1
  37. llama_cloud/resources/reports/__init__.py +0 -5
  38. llama_cloud/resources/reports/client.py +0 -1230
  39. llama_cloud/resources/reports/types/__init__.py +0 -7
  40. llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +0 -25
  41. llama_cloud/types/audio_block.py +0 -34
  42. llama_cloud/types/edit_suggestion_blocks_item.py +0 -8
  43. llama_cloud/types/image_block.py +0 -35
  44. llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +0 -56
  45. llama_cloud/types/paginated_report_response.py +0 -35
  46. llama_cloud/types/progress_event_status.py +0 -33
  47. llama_cloud/types/report.py +0 -33
  48. llama_cloud/types/report_block.py +0 -35
  49. llama_cloud/types/report_block_dependency.py +0 -29
  50. llama_cloud/types/report_event_item_event_data.py +0 -45
  51. llama_cloud/types/report_event_type.py +0 -37
  52. llama_cloud/types/report_metadata.py +0 -43
  53. llama_cloud/types/report_plan.py +0 -36
  54. llama_cloud/types/report_plan_block.py +0 -36
  55. llama_cloud/types/report_query.py +0 -33
  56. llama_cloud/types/report_response.py +0 -41
  57. llama_cloud/types/report_state.py +0 -37
  58. llama_cloud/types/report_state_event.py +0 -38
  59. llama_cloud/types/text_block.py +0 -31
  60. llama_cloud-0.1.39.dist-info/METADATA +0 -32
  61. {llama_cloud-0.1.39.dist-info → llama_cloud-0.1.41.dist-info}/LICENSE +0 -0
@@ -19,7 +19,6 @@ from .agent_data import AgentData
19
19
  from .agent_deployment_list import AgentDeploymentList
20
20
  from .agent_deployment_summary import AgentDeploymentSummary
21
21
  from .aggregate_group import AggregateGroup
22
- from .audio_block import AudioBlock
23
22
  from .auto_transform_config import AutoTransformConfig
24
23
  from .azure_open_ai_embedding import AzureOpenAiEmbedding
25
24
  from .azure_open_ai_embedding_config import AzureOpenAiEmbeddingConfig
@@ -39,6 +38,7 @@ from .character_chunking_config import CharacterChunkingConfig
39
38
  from .chat_app import ChatApp
40
39
  from .chat_app_response import ChatAppResponse
41
40
  from .chat_data import ChatData
41
+ from .chat_message import ChatMessage
42
42
  from .chunk_mode import ChunkMode
43
43
  from .classification_result import ClassificationResult
44
44
  from .classifier_rule import ClassifierRule
@@ -88,11 +88,8 @@ from .data_source_reader_version_metadata import DataSourceReaderVersionMetadata
88
88
  from .data_source_reader_version_metadata_reader_version import DataSourceReaderVersionMetadataReaderVersion
89
89
  from .data_source_update_dispatcher_config import DataSourceUpdateDispatcherConfig
90
90
  from .delete_params import DeleteParams
91
- from .document_block import DocumentBlock
92
91
  from .document_chunk_mode import DocumentChunkMode
93
92
  from .document_ingestion_job_params import DocumentIngestionJobParams
94
- from .edit_suggestion import EditSuggestion
95
- from .edit_suggestion_blocks_item import EditSuggestionBlocksItem
96
93
  from .element_segmentation_config import ElementSegmentationConfig
97
94
  from .embedding_model_config import EmbeddingModelConfig
98
95
  from .embedding_model_config_embedding_config import (
@@ -125,6 +122,7 @@ from .extract_job import ExtractJob
125
122
  from .extract_job_create import ExtractJobCreate
126
123
  from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
127
124
  from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
125
+ from .extract_job_create_priority import ExtractJobCreatePriority
128
126
  from .extract_mode import ExtractMode
129
127
  from .extract_models import ExtractModels
130
128
  from .extract_resultset import ExtractResultset
@@ -177,7 +175,6 @@ from .http_validation_error import HttpValidationError
177
175
  from .hugging_face_inference_api_embedding import HuggingFaceInferenceApiEmbedding
178
176
  from .hugging_face_inference_api_embedding_config import HuggingFaceInferenceApiEmbeddingConfig
179
177
  from .hugging_face_inference_api_embedding_token import HuggingFaceInferenceApiEmbeddingToken
180
- from .image_block import ImageBlock
181
178
  from .ingestion_error_response import IngestionErrorResponse
182
179
  from .input_message import InputMessage
183
180
  from .job_name_mapping import JobNameMapping
@@ -199,17 +196,10 @@ from .job_record_with_usage_metrics import JobRecordWithUsageMetrics
199
196
  from .l_lama_parse_transform_config import LLamaParseTransformConfig
200
197
  from .legacy_parse_job_config import LegacyParseJobConfig
201
198
  from .license_info_response import LicenseInfoResponse
199
+ from .llama_extract_feature_availability import LlamaExtractFeatureAvailability
202
200
  from .llama_extract_mode_availability import LlamaExtractModeAvailability
203
201
  from .llama_extract_mode_availability_status import LlamaExtractModeAvailabilityStatus
204
202
  from .llama_extract_settings import LlamaExtractSettings
205
- from .llama_index_core_base_llms_types_chat_message import LlamaIndexCoreBaseLlmsTypesChatMessage
206
- from .llama_index_core_base_llms_types_chat_message_blocks_item import (
207
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem,
208
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio,
209
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document,
210
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
211
- LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text,
212
- )
213
203
  from .llama_parse_parameters import LlamaParseParameters
214
204
  from .llama_parse_parameters_priority import LlamaParseParametersPriority
215
205
  from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
@@ -244,11 +234,14 @@ from .paginated_extract_runs_response import PaginatedExtractRunsResponse
244
234
  from .paginated_jobs_history_with_metrics import PaginatedJobsHistoryWithMetrics
245
235
  from .paginated_list_cloud_documents_response import PaginatedListCloudDocumentsResponse
246
236
  from .paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
247
- from .paginated_report_response import PaginatedReportResponse
248
237
  from .paginated_response_agent_data import PaginatedResponseAgentData
249
238
  from .paginated_response_aggregate_group import PaginatedResponseAggregateGroup
250
239
  from .paginated_response_classify_job import PaginatedResponseClassifyJob
251
240
  from .paginated_response_quota_configuration import PaginatedResponseQuotaConfiguration
241
+ from .parse_configuration import ParseConfiguration
242
+ from .parse_configuration_create import ParseConfigurationCreate
243
+ from .parse_configuration_filter import ParseConfigurationFilter
244
+ from .parse_configuration_query_response import ParseConfigurationQueryResponse
252
245
  from .parse_job_config import ParseJobConfig
253
246
  from .parse_job_config_priority import ParseJobConfigPriority
254
247
  from .parse_plan_level import ParsePlanLevel
@@ -324,11 +317,10 @@ from .preset_retrieval_params_search_filters_inference_schema_value import (
324
317
  PresetRetrievalParamsSearchFiltersInferenceSchemaValue,
325
318
  )
326
319
  from .presigned_url import PresignedUrl
327
- from .progress_event import ProgressEvent
328
- from .progress_event_status import ProgressEventStatus
329
320
  from .project import Project
330
321
  from .project_create import ProjectCreate
331
322
  from .prompt_conf import PromptConf
323
+ from .public_model_name import PublicModelName
332
324
  from .quota_configuration import QuotaConfiguration
333
325
  from .quota_configuration_configuration_type import QuotaConfigurationConfigurationType
334
326
  from .quota_configuration_status import QuotaConfigurationStatus
@@ -339,42 +331,24 @@ from .re_ranker_type import ReRankerType
339
331
  from .recurring_credit_grant import RecurringCreditGrant
340
332
  from .related_node_info import RelatedNodeInfo
341
333
  from .related_node_info_node_type import RelatedNodeInfoNodeType
342
- from .report import Report
343
- from .report_block import ReportBlock
344
- from .report_block_dependency import ReportBlockDependency
345
- from .report_create_response import ReportCreateResponse
346
- from .report_event_item import ReportEventItem
347
- from .report_event_item_event_data import (
348
- ReportEventItemEventData,
349
- ReportEventItemEventData_Progress,
350
- ReportEventItemEventData_ReportBlockUpdate,
351
- ReportEventItemEventData_ReportStateUpdate,
352
- )
353
- from .report_event_type import ReportEventType
354
- from .report_metadata import ReportMetadata
355
- from .report_plan import ReportPlan
356
- from .report_plan_block import ReportPlanBlock
357
- from .report_query import ReportQuery
358
- from .report_response import ReportResponse
359
- from .report_state import ReportState
360
- from .report_state_event import ReportStateEvent
361
- from .report_update_event import ReportUpdateEvent
362
334
  from .retrieval_mode import RetrievalMode
363
335
  from .retrieve_results import RetrieveResults
364
336
  from .retriever import Retriever
365
337
  from .retriever_create import RetrieverCreate
366
338
  from .retriever_pipeline import RetrieverPipeline
367
339
  from .role import Role
340
+ from .schema_generation_availability import SchemaGenerationAvailability
341
+ from .schema_generation_availability_status import SchemaGenerationAvailabilityStatus
368
342
  from .schema_relax_mode import SchemaRelaxMode
369
343
  from .semantic_chunking_config import SemanticChunkingConfig
370
344
  from .sentence_chunking_config import SentenceChunkingConfig
371
- from .src_app_schema_chat_chat_message import SrcAppSchemaChatChatMessage
345
+ from .sparse_model_config import SparseModelConfig
346
+ from .sparse_model_type import SparseModelType
372
347
  from .status_enum import StatusEnum
373
348
  from .struct_mode import StructMode
374
349
  from .struct_parse_conf import StructParseConf
375
350
  from .supported_llm_model import SupportedLlmModel
376
351
  from .supported_llm_model_names import SupportedLlmModelNames
377
- from .text_block import TextBlock
378
352
  from .text_node import TextNode
379
353
  from .text_node_relationships_value import TextNodeRelationshipsValue
380
354
  from .text_node_with_score import TextNodeWithScore
@@ -414,7 +388,6 @@ __all__ = [
414
388
  "AgentDeploymentList",
415
389
  "AgentDeploymentSummary",
416
390
  "AggregateGroup",
417
- "AudioBlock",
418
391
  "AutoTransformConfig",
419
392
  "AzureOpenAiEmbedding",
420
393
  "AzureOpenAiEmbeddingConfig",
@@ -434,6 +407,7 @@ __all__ = [
434
407
  "ChatApp",
435
408
  "ChatAppResponse",
436
409
  "ChatData",
410
+ "ChatMessage",
437
411
  "ChunkMode",
438
412
  "ClassificationResult",
439
413
  "ClassifierRule",
@@ -483,11 +457,8 @@ __all__ = [
483
457
  "DataSourceReaderVersionMetadataReaderVersion",
484
458
  "DataSourceUpdateDispatcherConfig",
485
459
  "DeleteParams",
486
- "DocumentBlock",
487
460
  "DocumentChunkMode",
488
461
  "DocumentIngestionJobParams",
489
- "EditSuggestion",
490
- "EditSuggestionBlocksItem",
491
462
  "ElementSegmentationConfig",
492
463
  "EmbeddingModelConfig",
493
464
  "EmbeddingModelConfigEmbeddingConfig",
@@ -516,6 +487,7 @@ __all__ = [
516
487
  "ExtractJobCreate",
517
488
  "ExtractJobCreateDataSchemaOverride",
518
489
  "ExtractJobCreateDataSchemaOverrideZeroValue",
490
+ "ExtractJobCreatePriority",
519
491
  "ExtractMode",
520
492
  "ExtractModels",
521
493
  "ExtractResultset",
@@ -568,7 +540,6 @@ __all__ = [
568
540
  "HuggingFaceInferenceApiEmbedding",
569
541
  "HuggingFaceInferenceApiEmbeddingConfig",
570
542
  "HuggingFaceInferenceApiEmbeddingToken",
571
- "ImageBlock",
572
543
  "IngestionErrorResponse",
573
544
  "InputMessage",
574
545
  "JobNameMapping",
@@ -588,15 +559,10 @@ __all__ = [
588
559
  "LLamaParseTransformConfig",
589
560
  "LegacyParseJobConfig",
590
561
  "LicenseInfoResponse",
562
+ "LlamaExtractFeatureAvailability",
591
563
  "LlamaExtractModeAvailability",
592
564
  "LlamaExtractModeAvailabilityStatus",
593
565
  "LlamaExtractSettings",
594
- "LlamaIndexCoreBaseLlmsTypesChatMessage",
595
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem",
596
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio",
597
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document",
598
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image",
599
- "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
600
566
  "LlamaParseParameters",
601
567
  "LlamaParseParametersPriority",
602
568
  "LlamaParseSupportedFileExtensions",
@@ -631,11 +597,14 @@ __all__ = [
631
597
  "PaginatedJobsHistoryWithMetrics",
632
598
  "PaginatedListCloudDocumentsResponse",
633
599
  "PaginatedListPipelineFilesResponse",
634
- "PaginatedReportResponse",
635
600
  "PaginatedResponseAgentData",
636
601
  "PaginatedResponseAggregateGroup",
637
602
  "PaginatedResponseClassifyJob",
638
603
  "PaginatedResponseQuotaConfiguration",
604
+ "ParseConfiguration",
605
+ "ParseConfigurationCreate",
606
+ "ParseConfigurationFilter",
607
+ "ParseConfigurationQueryResponse",
639
608
  "ParseJobConfig",
640
609
  "ParseJobConfigPriority",
641
610
  "ParsePlanLevel",
@@ -703,11 +672,10 @@ __all__ = [
703
672
  "PresetRetrievalParams",
704
673
  "PresetRetrievalParamsSearchFiltersInferenceSchemaValue",
705
674
  "PresignedUrl",
706
- "ProgressEvent",
707
- "ProgressEventStatus",
708
675
  "Project",
709
676
  "ProjectCreate",
710
677
  "PromptConf",
678
+ "PublicModelName",
711
679
  "QuotaConfiguration",
712
680
  "QuotaConfigurationConfigurationType",
713
681
  "QuotaConfigurationStatus",
@@ -718,40 +686,24 @@ __all__ = [
718
686
  "RecurringCreditGrant",
719
687
  "RelatedNodeInfo",
720
688
  "RelatedNodeInfoNodeType",
721
- "Report",
722
- "ReportBlock",
723
- "ReportBlockDependency",
724
- "ReportCreateResponse",
725
- "ReportEventItem",
726
- "ReportEventItemEventData",
727
- "ReportEventItemEventData_Progress",
728
- "ReportEventItemEventData_ReportBlockUpdate",
729
- "ReportEventItemEventData_ReportStateUpdate",
730
- "ReportEventType",
731
- "ReportMetadata",
732
- "ReportPlan",
733
- "ReportPlanBlock",
734
- "ReportQuery",
735
- "ReportResponse",
736
- "ReportState",
737
- "ReportStateEvent",
738
- "ReportUpdateEvent",
739
689
  "RetrievalMode",
740
690
  "RetrieveResults",
741
691
  "Retriever",
742
692
  "RetrieverCreate",
743
693
  "RetrieverPipeline",
744
694
  "Role",
695
+ "SchemaGenerationAvailability",
696
+ "SchemaGenerationAvailabilityStatus",
745
697
  "SchemaRelaxMode",
746
698
  "SemanticChunkingConfig",
747
699
  "SentenceChunkingConfig",
748
- "SrcAppSchemaChatChatMessage",
700
+ "SparseModelConfig",
701
+ "SparseModelType",
749
702
  "StatusEnum",
750
703
  "StructMode",
751
704
  "StructParseConf",
752
705
  "SupportedLlmModel",
753
706
  "SupportedLlmModelNames",
754
- "TextBlock",
755
707
  "TextNode",
756
708
  "TextNodeRelationshipsValue",
757
709
  "TextNodeWithScore",
@@ -16,7 +16,7 @@ except ImportError:
16
16
  import pydantic # type: ignore
17
17
 
18
18
 
19
- class SrcAppSchemaChatChatMessage(pydantic.BaseModel):
19
+ class ChatMessage(pydantic.BaseModel):
20
20
  id: str
21
21
  index: int = pydantic.Field(description="The index of the message in the chat.")
22
22
  annotations: typing.Optional[typing.List[MessageAnnotation]] = pydantic.Field(
@@ -7,7 +7,9 @@ from ..core.datetime_utils import serialize_datetime
7
7
  from .document_chunk_mode import DocumentChunkMode
8
8
  from .extract_config_priority import ExtractConfigPriority
9
9
  from .extract_mode import ExtractMode
10
+ from .extract_models import ExtractModels
10
11
  from .extract_target import ExtractTarget
12
+ from .public_model_name import PublicModelName
11
13
 
12
14
  try:
13
15
  import pydantic
@@ -25,9 +27,13 @@ class ExtractConfig(pydantic.BaseModel):
25
27
 
26
28
  priority: typing.Optional[ExtractConfigPriority]
27
29
  extraction_target: typing.Optional[ExtractTarget] = pydantic.Field(description="The extraction target specified.")
28
- extraction_mode: typing.Optional[ExtractMode] = pydantic.Field(description="The extraction mode specified.")
30
+ extraction_mode: typing.Optional[ExtractMode] = pydantic.Field(
31
+ description="The extraction mode specified (FAST, BALANCED, MULTIMODAL, PREMIUM)."
32
+ )
33
+ parse_model: typing.Optional[PublicModelName]
34
+ extract_model: typing.Optional[ExtractModels]
29
35
  multimodal_fast_mode: typing.Optional[bool] = pydantic.Field(
30
- description="Whether to use fast mode for multimodal extraction."
36
+ description="DEPRECATED: Whether to use fast mode for multimodal extraction."
31
37
  )
32
38
  system_prompt: typing.Optional[str]
33
39
  use_reasoning: typing.Optional[bool] = pydantic.Field(description="Whether to use reasoning for the extraction.")
@@ -6,6 +6,7 @@ import typing
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .extract_config import ExtractConfig
8
8
  from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
9
+ from .extract_job_create_priority import ExtractJobCreatePriority
9
10
  from .webhook_configuration import WebhookConfiguration
10
11
 
11
12
  try:
@@ -22,6 +23,7 @@ class ExtractJobCreate(pydantic.BaseModel):
22
23
  Schema for creating an extraction job.
23
24
  """
24
25
 
26
+ priority: typing.Optional[ExtractJobCreatePriority]
25
27
  webhook_configurations: typing.Optional[typing.List[WebhookConfiguration]]
26
28
  extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
27
29
  file_id: str = pydantic.Field(description="The id of the file")
@@ -0,0 +1,29 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ExtractJobCreatePriority(str, enum.Enum):
10
+ LOW = "low"
11
+ MEDIUM = "medium"
12
+ HIGH = "high"
13
+ CRITICAL = "critical"
14
+
15
+ def visit(
16
+ self,
17
+ low: typing.Callable[[], T_Result],
18
+ medium: typing.Callable[[], T_Result],
19
+ high: typing.Callable[[], T_Result],
20
+ critical: typing.Callable[[], T_Result],
21
+ ) -> T_Result:
22
+ if self is ExtractJobCreatePriority.LOW:
23
+ return low()
24
+ if self is ExtractJobCreatePriority.MEDIUM:
25
+ return medium()
26
+ if self is ExtractJobCreatePriority.HIGH:
27
+ return high()
28
+ if self is ExtractJobCreatePriority.CRITICAL:
29
+ return critical()
@@ -7,47 +7,47 @@ T_Result = typing.TypeVar("T_Result")
7
7
 
8
8
 
9
9
  class ExtractModels(str, enum.Enum):
10
- GPT_41 = "gpt-4.1"
11
- GPT_41_MINI = "gpt-4.1-mini"
12
- GPT_41_NANO = "gpt-4.1-nano"
10
+ OPENAI_GPT_41 = "openai-gpt-4-1"
11
+ OPENAI_GPT_41_MINI = "openai-gpt-4-1-mini"
12
+ OPENAI_GPT_41_NANO = "openai-gpt-4-1-nano"
13
+ OPENAI_GPT_5 = "openai-gpt-5"
14
+ OPENAI_GPT_5_MINI = "openai-gpt-5-mini"
13
15
  GEMINI_20_FLASH = "gemini-2.0-flash"
14
- O_3_MINI = "o3-mini"
15
16
  GEMINI_25_FLASH = "gemini-2.5-flash"
16
17
  GEMINI_25_PRO = "gemini-2.5-pro"
17
- GEMINI_25_FLASH_LITE_PREVIEW_0617 = "gemini-2.5-flash-lite-preview-06-17"
18
- GPT_4_O = "gpt-4o"
19
- GPT_4_O_MINI = "gpt-4o-mini"
18
+ OPENAI_GPT_4_O = "openai-gpt-4o"
19
+ OPENAI_GPT_4_O_MINI = "openai-gpt-4o-mini"
20
20
 
21
21
  def visit(
22
22
  self,
23
- gpt_41: typing.Callable[[], T_Result],
24
- gpt_41_mini: typing.Callable[[], T_Result],
25
- gpt_41_nano: typing.Callable[[], T_Result],
23
+ openai_gpt_41: typing.Callable[[], T_Result],
24
+ openai_gpt_41_mini: typing.Callable[[], T_Result],
25
+ openai_gpt_41_nano: typing.Callable[[], T_Result],
26
+ openai_gpt_5: typing.Callable[[], T_Result],
27
+ openai_gpt_5_mini: typing.Callable[[], T_Result],
26
28
  gemini_20_flash: typing.Callable[[], T_Result],
27
- o_3_mini: typing.Callable[[], T_Result],
28
29
  gemini_25_flash: typing.Callable[[], T_Result],
29
30
  gemini_25_pro: typing.Callable[[], T_Result],
30
- gemini_25_flash_lite_preview_0617: typing.Callable[[], T_Result],
31
- gpt_4_o: typing.Callable[[], T_Result],
32
- gpt_4_o_mini: typing.Callable[[], T_Result],
31
+ openai_gpt_4_o: typing.Callable[[], T_Result],
32
+ openai_gpt_4_o_mini: typing.Callable[[], T_Result],
33
33
  ) -> T_Result:
34
- if self is ExtractModels.GPT_41:
35
- return gpt_41()
36
- if self is ExtractModels.GPT_41_MINI:
37
- return gpt_41_mini()
38
- if self is ExtractModels.GPT_41_NANO:
39
- return gpt_41_nano()
34
+ if self is ExtractModels.OPENAI_GPT_41:
35
+ return openai_gpt_41()
36
+ if self is ExtractModels.OPENAI_GPT_41_MINI:
37
+ return openai_gpt_41_mini()
38
+ if self is ExtractModels.OPENAI_GPT_41_NANO:
39
+ return openai_gpt_41_nano()
40
+ if self is ExtractModels.OPENAI_GPT_5:
41
+ return openai_gpt_5()
42
+ if self is ExtractModels.OPENAI_GPT_5_MINI:
43
+ return openai_gpt_5_mini()
40
44
  if self is ExtractModels.GEMINI_20_FLASH:
41
45
  return gemini_20_flash()
42
- if self is ExtractModels.O_3_MINI:
43
- return o_3_mini()
44
46
  if self is ExtractModels.GEMINI_25_FLASH:
45
47
  return gemini_25_flash()
46
48
  if self is ExtractModels.GEMINI_25_PRO:
47
49
  return gemini_25_pro()
48
- if self is ExtractModels.GEMINI_25_FLASH_LITE_PREVIEW_0617:
49
- return gemini_25_flash_lite_preview_0617()
50
- if self is ExtractModels.GPT_4_O:
51
- return gpt_4_o()
52
- if self is ExtractModels.GPT_4_O_MINI:
53
- return gpt_4_o_mini()
50
+ if self is ExtractModels.OPENAI_GPT_4_O:
51
+ return openai_gpt_4_o()
52
+ if self is ExtractModels.OPENAI_GPT_4_O_MINI:
53
+ return openai_gpt_4_o_mini()
@@ -15,7 +15,6 @@ class JobNames(str, enum.Enum):
15
15
  LOAD_FILES_JOB = "load_files_job"
16
16
  PLAYGROUND_JOB = "playground_job"
17
17
  PIPELINE_MANAGED_INGESTION_JOB = "pipeline_managed_ingestion_job"
18
- DATA_SOURCE_MANAGED_INGESTION_JOB = "data_source_managed_ingestion_job"
19
18
  DATA_SOURCE_UPDATE_DISPATCHER_JOB = "data_source_update_dispatcher_job"
20
19
  PIPELINE_FILE_UPDATE_DISPATCHER_JOB = "pipeline_file_update_dispatcher_job"
21
20
  PIPELINE_FILE_UPDATER_JOB = "pipeline_file_updater_job"
@@ -35,7 +34,6 @@ class JobNames(str, enum.Enum):
35
34
  load_files_job: typing.Callable[[], T_Result],
36
35
  playground_job: typing.Callable[[], T_Result],
37
36
  pipeline_managed_ingestion_job: typing.Callable[[], T_Result],
38
- data_source_managed_ingestion_job: typing.Callable[[], T_Result],
39
37
  data_source_update_dispatcher_job: typing.Callable[[], T_Result],
40
38
  pipeline_file_update_dispatcher_job: typing.Callable[[], T_Result],
41
39
  pipeline_file_updater_job: typing.Callable[[], T_Result],
@@ -57,8 +55,6 @@ class JobNames(str, enum.Enum):
57
55
  return playground_job()
58
56
  if self is JobNames.PIPELINE_MANAGED_INGESTION_JOB:
59
57
  return pipeline_managed_ingestion_job()
60
- if self is JobNames.DATA_SOURCE_MANAGED_INGESTION_JOB:
61
- return data_source_managed_ingestion_job()
62
58
  if self is JobNames.DATA_SOURCE_UPDATE_DISPATCHER_JOB:
63
59
  return data_source_update_dispatcher_job()
64
60
  if self is JobNames.PIPELINE_FILE_UPDATE_DISPATCHER_JOB:
@@ -4,6 +4,8 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .llama_extract_mode_availability import LlamaExtractModeAvailability
8
+ from .schema_generation_availability import SchemaGenerationAvailability
7
9
 
8
10
  try:
9
11
  import pydantic
@@ -14,12 +16,9 @@ except ImportError:
14
16
  import pydantic # type: ignore
15
17
 
16
18
 
17
- class DocumentBlock(pydantic.BaseModel):
18
- data: typing.Optional[str]
19
- path: typing.Optional[str]
20
- url: typing.Optional[str]
21
- title: typing.Optional[str]
22
- document_mimetype: typing.Optional[str]
19
+ class LlamaExtractFeatureAvailability(pydantic.BaseModel):
20
+ schema_generation: SchemaGenerationAvailability
21
+ available_modes: typing.List[LlamaExtractModeAvailability]
23
22
 
24
23
  def json(self, **kwargs: typing.Any) -> str:
25
24
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -19,9 +19,10 @@ class LlamaExtractModeAvailability(pydantic.BaseModel):
19
19
  mode: str
20
20
  status: LlamaExtractModeAvailabilityStatus
21
21
  parse_mode: str
22
- parse_models: typing.List[str]
23
- extract_models: typing.List[str]
24
- missing_models: typing.Optional[typing.List[str]]
22
+ available_parse_models: typing.Optional[typing.List[str]]
23
+ missing_parse_models: typing.Optional[typing.List[str]]
24
+ available_extract_models: typing.Optional[typing.List[str]]
25
+ missing_extract_models: typing.Optional[typing.List[str]]
25
26
 
26
27
  def json(self, **kwargs: typing.Any) -> str:
27
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -44,7 +44,7 @@ class LlamaExtractSettings(pydantic.BaseModel):
44
44
  description="Whether to use experimental multimodal parsing."
45
45
  )
46
46
  use_pixel_extraction: typing.Optional[bool] = pydantic.Field(
47
- description="Whether to use extraction over pixels for multimodal mode."
47
+ description="DEPRECATED: Whether to use extraction over pixels for multimodal mode."
48
48
  )
49
49
  llama_parse_params: typing.Optional[LlamaParseParameters] = pydantic.Field(
50
50
  description="LlamaParse related settings."
@@ -49,6 +49,7 @@ class LlamaParseParameters(pydantic.BaseModel):
49
49
  extract_layout: typing.Optional[bool]
50
50
  high_res_ocr: typing.Optional[bool]
51
51
  html_make_all_elements_visible: typing.Optional[bool]
52
+ layout_aware: typing.Optional[bool]
52
53
  html_remove_navigation_elements: typing.Optional[bool]
53
54
  html_remove_fixed_elements: typing.Optional[bool]
54
55
  guess_xlsx_sheet_name: typing.Optional[bool]
@@ -4,8 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .progress_event_status import ProgressEventStatus
8
- from .report_event_type import ReportEventType
7
+ from .llama_parse_parameters import LlamaParseParameters
9
8
 
10
9
  try:
11
10
  import pydantic
@@ -16,19 +15,20 @@ except ImportError:
16
15
  import pydantic # type: ignore
17
16
 
18
17
 
19
- class ProgressEvent(pydantic.BaseModel):
18
+ class ParseConfiguration(pydantic.BaseModel):
20
19
  """
21
- Event for tracking progress of operations in workflows.
20
+ Parse configuration schema.
22
21
  """
23
22
 
24
- timestamp: typing.Optional[dt.datetime]
25
- id: typing.Optional[str] = pydantic.Field(description="The ID of the event")
26
- group_id: typing.Optional[str] = pydantic.Field(description="The ID of the group this event belongs to")
27
- variant: ReportEventType
28
- msg: str = pydantic.Field(description="The message to display to the user")
29
- progress: typing.Optional[float]
30
- status: typing.Optional[ProgressEventStatus] = pydantic.Field(description="Current status of the operation")
31
- extra_detail: typing.Optional[typing.Dict[str, typing.Any]]
23
+ id: str = pydantic.Field(description="Unique identifier for the parse configuration")
24
+ name: str = pydantic.Field(description="Name of the parse configuration")
25
+ source_type: str = pydantic.Field(description="Type of the source (e.g., 'project')")
26
+ source_id: str = pydantic.Field(description="ID of the source")
27
+ creator: typing.Optional[str]
28
+ version: str = pydantic.Field(description="Version of the configuration")
29
+ parameters: LlamaParseParameters = pydantic.Field(description="LlamaParseParameters configuration")
30
+ created_at: dt.datetime = pydantic.Field(description="Creation timestamp")
31
+ updated_at: dt.datetime = pydantic.Field(description="Last update timestamp")
32
32
 
33
33
  def json(self, **kwargs: typing.Any) -> str:
34
34
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,8 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .llama_index_core_base_llms_types_chat_message_blocks_item import LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem
8
- from .message_role import MessageRole
7
+ from .llama_parse_parameters import LlamaParseParameters
9
8
 
10
9
  try:
11
10
  import pydantic
@@ -16,14 +15,17 @@ except ImportError:
16
15
  import pydantic # type: ignore
17
16
 
18
17
 
19
- class LlamaIndexCoreBaseLlmsTypesChatMessage(pydantic.BaseModel):
18
+ class ParseConfigurationCreate(pydantic.BaseModel):
20
19
  """
21
- Chat message.
20
+ Schema for creating a new parse configuration (API boundary).
22
21
  """
23
22
 
24
- role: typing.Optional[MessageRole]
25
- additional_kwargs: typing.Optional[typing.Dict[str, typing.Any]]
26
- blocks: typing.Optional[typing.List[LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem]]
23
+ name: str = pydantic.Field(description="Name of the parse configuration")
24
+ source_type: typing.Optional[str]
25
+ source_id: typing.Optional[str]
26
+ creator: typing.Optional[str]
27
+ version: str = pydantic.Field(description="Version of the configuration")
28
+ parameters: LlamaParseParameters = pydantic.Field(description="LlamaParseParameters configuration")
27
29
 
28
30
  def json(self, **kwargs: typing.Any) -> str:
29
31
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,7 +4,6 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .edit_suggestion_blocks_item import EditSuggestionBlocksItem
8
7
 
9
8
  try:
10
9
  import pydantic
@@ -15,14 +14,17 @@ except ImportError:
15
14
  import pydantic # type: ignore
16
15
 
17
16
 
18
- class EditSuggestion(pydantic.BaseModel):
17
+ class ParseConfigurationFilter(pydantic.BaseModel):
19
18
  """
20
- A suggestion for an edit to a report.
19
+ Filter parameters for parse configuration queries.
21
20
  """
22
21
 
23
- justification: str
24
- blocks: typing.List[EditSuggestionBlocksItem]
25
- removed_indices: typing.Optional[typing.List[int]]
22
+ name: typing.Optional[str]
23
+ source_type: typing.Optional[str]
24
+ source_id: typing.Optional[str]
25
+ creator: typing.Optional[str]
26
+ version: typing.Optional[str]
27
+ parse_config_ids: typing.Optional[typing.List[str]]
26
28
 
27
29
  def json(self, **kwargs: typing.Any) -> str:
28
30
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .report_block import ReportBlock
7
+ from .parse_configuration import ParseConfiguration
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -15,14 +15,14 @@ except ImportError:
15
15
  import pydantic # type: ignore
16
16
 
17
17
 
18
- class ReportUpdateEvent(pydantic.BaseModel):
18
+ class ParseConfigurationQueryResponse(pydantic.BaseModel):
19
19
  """
20
- Event for updating the state of an report.
20
+ Response schema for paginated parse configuration queries.
21
21
  """
22
22
 
23
- timestamp: typing.Optional[dt.datetime]
24
- msg: typing.Optional[str] = pydantic.Field(description="The message to display to the user")
25
- block: ReportBlock = pydantic.Field(description="The block to update")
23
+ items: typing.List[ParseConfiguration] = pydantic.Field(description="The list of items.")
24
+ next_page_token: typing.Optional[str]
25
+ total_size: typing.Optional[int]
26
26
 
27
27
  def json(self, **kwargs: typing.Any) -> str:
28
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -51,6 +51,7 @@ class ParseJobConfig(pydantic.BaseModel):
51
51
  extract_layout: typing.Optional[bool]
52
52
  high_res_ocr: typing.Optional[bool]
53
53
  html_make_all_elements_visible: typing.Optional[bool]
54
+ layout_aware: typing.Optional[bool]
54
55
  html_remove_navigation_elements: typing.Optional[bool]
55
56
  html_remove_fixed_elements: typing.Optional[bool]
56
57
  guess_xlsx_sheet_name: typing.Optional[bool]