llama-cloud 0.1.40__py3-none-any.whl → 0.1.41__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_cloud/__init__.py +12 -72
- llama_cloud/client.py +2 -5
- llama_cloud/resources/__init__.py +0 -4
- llama_cloud/resources/beta/client.py +1351 -335
- llama_cloud/resources/llama_extract/client.py +48 -0
- llama_cloud/types/__init__.py +12 -72
- llama_cloud/types/{src_app_schema_chat_chat_message.py → chat_message.py} +1 -1
- llama_cloud/types/extract_config.py +8 -2
- llama_cloud/types/extract_models.py +28 -28
- llama_cloud/types/llama_extract_mode_availability.py +4 -3
- llama_cloud/types/llama_extract_settings.py +1 -1
- llama_cloud/types/{progress_event.py → parse_configuration.py} +12 -12
- llama_cloud/types/{llama_index_core_base_llms_types_chat_message.py → parse_configuration_create.py} +9 -7
- llama_cloud/types/{edit_suggestion.py → parse_configuration_filter.py} +8 -6
- llama_cloud/types/{report_update_event.py → parse_configuration_query_response.py} +6 -6
- llama_cloud/types/pipeline_create.py +1 -1
- llama_cloud/types/playground_session.py +2 -2
- llama_cloud/types/public_model_name.py +97 -0
- {llama_cloud-0.1.40.dist-info → llama_cloud-0.1.41.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.40.dist-info → llama_cloud-0.1.41.dist-info}/RECORD +22 -47
- {llama_cloud-0.1.40.dist-info → llama_cloud-0.1.41.dist-info}/WHEEL +1 -1
- llama_cloud/resources/reports/__init__.py +0 -5
- llama_cloud/resources/reports/client.py +0 -1230
- llama_cloud/resources/reports/types/__init__.py +0 -7
- llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py +0 -25
- llama_cloud/types/audio_block.py +0 -34
- llama_cloud/types/document_block.py +0 -35
- llama_cloud/types/edit_suggestion_blocks_item.py +0 -8
- llama_cloud/types/image_block.py +0 -35
- llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py +0 -56
- llama_cloud/types/paginated_report_response.py +0 -35
- llama_cloud/types/progress_event_status.py +0 -33
- llama_cloud/types/report.py +0 -33
- llama_cloud/types/report_block.py +0 -35
- llama_cloud/types/report_block_dependency.py +0 -29
- llama_cloud/types/report_create_response.py +0 -31
- llama_cloud/types/report_event_item.py +0 -40
- llama_cloud/types/report_event_item_event_data.py +0 -45
- llama_cloud/types/report_event_type.py +0 -37
- llama_cloud/types/report_metadata.py +0 -43
- llama_cloud/types/report_plan.py +0 -36
- llama_cloud/types/report_plan_block.py +0 -36
- llama_cloud/types/report_query.py +0 -33
- llama_cloud/types/report_response.py +0 -41
- llama_cloud/types/report_state.py +0 -37
- llama_cloud/types/report_state_event.py +0 -38
- llama_cloud/types/text_block.py +0 -31
- {llama_cloud-0.1.40.dist-info → llama_cloud-0.1.41.dist-info}/LICENSE +0 -0
|
@@ -111,7 +111,9 @@ class LlamaExtractClient:
|
|
|
111
111
|
ExtractConfig,
|
|
112
112
|
ExtractConfigPriority,
|
|
113
113
|
ExtractMode,
|
|
114
|
+
ExtractModels,
|
|
114
115
|
ExtractTarget,
|
|
116
|
+
PublicModelName,
|
|
115
117
|
)
|
|
116
118
|
from llama_cloud.client import LlamaCloud
|
|
117
119
|
|
|
@@ -124,6 +126,8 @@ class LlamaExtractClient:
|
|
|
124
126
|
priority=ExtractConfigPriority.LOW,
|
|
125
127
|
extraction_target=ExtractTarget.PER_DOC,
|
|
126
128
|
extraction_mode=ExtractMode.FAST,
|
|
129
|
+
parse_model=PublicModelName.OPENAI_GPT_4_O,
|
|
130
|
+
extract_model=ExtractModels.OPENAI_GPT_4_1,
|
|
127
131
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
128
132
|
),
|
|
129
133
|
)
|
|
@@ -359,7 +363,9 @@ class LlamaExtractClient:
|
|
|
359
363
|
ExtractConfig,
|
|
360
364
|
ExtractConfigPriority,
|
|
361
365
|
ExtractMode,
|
|
366
|
+
ExtractModels,
|
|
362
367
|
ExtractTarget,
|
|
368
|
+
PublicModelName,
|
|
363
369
|
)
|
|
364
370
|
from llama_cloud.client import LlamaCloud
|
|
365
371
|
|
|
@@ -372,6 +378,8 @@ class LlamaExtractClient:
|
|
|
372
378
|
priority=ExtractConfigPriority.LOW,
|
|
373
379
|
extraction_target=ExtractTarget.PER_DOC,
|
|
374
380
|
extraction_mode=ExtractMode.FAST,
|
|
381
|
+
parse_model=PublicModelName.OPENAI_GPT_4_O,
|
|
382
|
+
extract_model=ExtractModels.OPENAI_GPT_4_1,
|
|
375
383
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
376
384
|
),
|
|
377
385
|
)
|
|
@@ -472,7 +480,9 @@ class LlamaExtractClient:
|
|
|
472
480
|
ExtractJobCreate,
|
|
473
481
|
ExtractJobCreatePriority,
|
|
474
482
|
ExtractMode,
|
|
483
|
+
ExtractModels,
|
|
475
484
|
ExtractTarget,
|
|
485
|
+
PublicModelName,
|
|
476
486
|
)
|
|
477
487
|
from llama_cloud.client import LlamaCloud
|
|
478
488
|
|
|
@@ -488,6 +498,8 @@ class LlamaExtractClient:
|
|
|
488
498
|
priority=ExtractConfigPriority.LOW,
|
|
489
499
|
extraction_target=ExtractTarget.PER_DOC,
|
|
490
500
|
extraction_mode=ExtractMode.FAST,
|
|
501
|
+
parse_model=PublicModelName.OPENAI_GPT_4_O,
|
|
502
|
+
extract_model=ExtractModels.OPENAI_GPT_4_1,
|
|
491
503
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
492
504
|
),
|
|
493
505
|
),
|
|
@@ -564,6 +576,7 @@ class LlamaExtractClient:
|
|
|
564
576
|
ExtractJobCreate,
|
|
565
577
|
ExtractJobCreatePriority,
|
|
566
578
|
ExtractMode,
|
|
579
|
+
ExtractModels,
|
|
567
580
|
ExtractTarget,
|
|
568
581
|
FailPageMode,
|
|
569
582
|
LlamaExtractSettings,
|
|
@@ -571,6 +584,7 @@ class LlamaExtractClient:
|
|
|
571
584
|
LlamaParseParametersPriority,
|
|
572
585
|
MultimodalParseResolution,
|
|
573
586
|
ParsingMode,
|
|
587
|
+
PublicModelName,
|
|
574
588
|
)
|
|
575
589
|
from llama_cloud.client import LlamaCloud
|
|
576
590
|
|
|
@@ -586,6 +600,8 @@ class LlamaExtractClient:
|
|
|
586
600
|
priority=ExtractConfigPriority.LOW,
|
|
587
601
|
extraction_target=ExtractTarget.PER_DOC,
|
|
588
602
|
extraction_mode=ExtractMode.FAST,
|
|
603
|
+
parse_model=PublicModelName.OPENAI_GPT_4_O,
|
|
604
|
+
extract_model=ExtractModels.OPENAI_GPT_4_1,
|
|
589
605
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
590
606
|
),
|
|
591
607
|
),
|
|
@@ -693,7 +709,9 @@ class LlamaExtractClient:
|
|
|
693
709
|
ExtractConfig,
|
|
694
710
|
ExtractConfigPriority,
|
|
695
711
|
ExtractMode,
|
|
712
|
+
ExtractModels,
|
|
696
713
|
ExtractTarget,
|
|
714
|
+
PublicModelName,
|
|
697
715
|
)
|
|
698
716
|
from llama_cloud.client import LlamaCloud
|
|
699
717
|
|
|
@@ -707,6 +725,8 @@ class LlamaExtractClient:
|
|
|
707
725
|
priority=ExtractConfigPriority.LOW,
|
|
708
726
|
extraction_target=ExtractTarget.PER_DOC,
|
|
709
727
|
extraction_mode=ExtractMode.FAST,
|
|
728
|
+
parse_model=PublicModelName.OPENAI_GPT_4_O,
|
|
729
|
+
extract_model=ExtractModels.OPENAI_GPT_4_1,
|
|
710
730
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
711
731
|
),
|
|
712
732
|
)
|
|
@@ -988,8 +1008,10 @@ class LlamaExtractClient:
|
|
|
988
1008
|
ExtractConfig,
|
|
989
1009
|
ExtractConfigPriority,
|
|
990
1010
|
ExtractMode,
|
|
1011
|
+
ExtractModels,
|
|
991
1012
|
ExtractTarget,
|
|
992
1013
|
FileData,
|
|
1014
|
+
PublicModelName,
|
|
993
1015
|
)
|
|
994
1016
|
from llama_cloud.client import LlamaCloud
|
|
995
1017
|
|
|
@@ -1001,6 +1023,8 @@ class LlamaExtractClient:
|
|
|
1001
1023
|
priority=ExtractConfigPriority.LOW,
|
|
1002
1024
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1003
1025
|
extraction_mode=ExtractMode.FAST,
|
|
1026
|
+
parse_model=PublicModelName.OPENAI_GPT_4_O,
|
|
1027
|
+
extract_model=ExtractModels.OPENAI_GPT_4_1,
|
|
1004
1028
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
1005
1029
|
),
|
|
1006
1030
|
file=FileData(
|
|
@@ -1108,7 +1132,9 @@ class AsyncLlamaExtractClient:
|
|
|
1108
1132
|
ExtractConfig,
|
|
1109
1133
|
ExtractConfigPriority,
|
|
1110
1134
|
ExtractMode,
|
|
1135
|
+
ExtractModels,
|
|
1111
1136
|
ExtractTarget,
|
|
1137
|
+
PublicModelName,
|
|
1112
1138
|
)
|
|
1113
1139
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1114
1140
|
|
|
@@ -1121,6 +1147,8 @@ class AsyncLlamaExtractClient:
|
|
|
1121
1147
|
priority=ExtractConfigPriority.LOW,
|
|
1122
1148
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1123
1149
|
extraction_mode=ExtractMode.FAST,
|
|
1150
|
+
parse_model=PublicModelName.OPENAI_GPT_4_O,
|
|
1151
|
+
extract_model=ExtractModels.OPENAI_GPT_4_1,
|
|
1124
1152
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
1125
1153
|
),
|
|
1126
1154
|
)
|
|
@@ -1356,7 +1384,9 @@ class AsyncLlamaExtractClient:
|
|
|
1356
1384
|
ExtractConfig,
|
|
1357
1385
|
ExtractConfigPriority,
|
|
1358
1386
|
ExtractMode,
|
|
1387
|
+
ExtractModels,
|
|
1359
1388
|
ExtractTarget,
|
|
1389
|
+
PublicModelName,
|
|
1360
1390
|
)
|
|
1361
1391
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1362
1392
|
|
|
@@ -1369,6 +1399,8 @@ class AsyncLlamaExtractClient:
|
|
|
1369
1399
|
priority=ExtractConfigPriority.LOW,
|
|
1370
1400
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1371
1401
|
extraction_mode=ExtractMode.FAST,
|
|
1402
|
+
parse_model=PublicModelName.OPENAI_GPT_4_O,
|
|
1403
|
+
extract_model=ExtractModels.OPENAI_GPT_4_1,
|
|
1372
1404
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
1373
1405
|
),
|
|
1374
1406
|
)
|
|
@@ -1469,7 +1501,9 @@ class AsyncLlamaExtractClient:
|
|
|
1469
1501
|
ExtractJobCreate,
|
|
1470
1502
|
ExtractJobCreatePriority,
|
|
1471
1503
|
ExtractMode,
|
|
1504
|
+
ExtractModels,
|
|
1472
1505
|
ExtractTarget,
|
|
1506
|
+
PublicModelName,
|
|
1473
1507
|
)
|
|
1474
1508
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1475
1509
|
|
|
@@ -1485,6 +1519,8 @@ class AsyncLlamaExtractClient:
|
|
|
1485
1519
|
priority=ExtractConfigPriority.LOW,
|
|
1486
1520
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1487
1521
|
extraction_mode=ExtractMode.FAST,
|
|
1522
|
+
parse_model=PublicModelName.OPENAI_GPT_4_O,
|
|
1523
|
+
extract_model=ExtractModels.OPENAI_GPT_4_1,
|
|
1488
1524
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
1489
1525
|
),
|
|
1490
1526
|
),
|
|
@@ -1561,6 +1597,7 @@ class AsyncLlamaExtractClient:
|
|
|
1561
1597
|
ExtractJobCreate,
|
|
1562
1598
|
ExtractJobCreatePriority,
|
|
1563
1599
|
ExtractMode,
|
|
1600
|
+
ExtractModels,
|
|
1564
1601
|
ExtractTarget,
|
|
1565
1602
|
FailPageMode,
|
|
1566
1603
|
LlamaExtractSettings,
|
|
@@ -1568,6 +1605,7 @@ class AsyncLlamaExtractClient:
|
|
|
1568
1605
|
LlamaParseParametersPriority,
|
|
1569
1606
|
MultimodalParseResolution,
|
|
1570
1607
|
ParsingMode,
|
|
1608
|
+
PublicModelName,
|
|
1571
1609
|
)
|
|
1572
1610
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1573
1611
|
|
|
@@ -1583,6 +1621,8 @@ class AsyncLlamaExtractClient:
|
|
|
1583
1621
|
priority=ExtractConfigPriority.LOW,
|
|
1584
1622
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1585
1623
|
extraction_mode=ExtractMode.FAST,
|
|
1624
|
+
parse_model=PublicModelName.OPENAI_GPT_4_O,
|
|
1625
|
+
extract_model=ExtractModels.OPENAI_GPT_4_1,
|
|
1586
1626
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
1587
1627
|
),
|
|
1588
1628
|
),
|
|
@@ -1690,7 +1730,9 @@ class AsyncLlamaExtractClient:
|
|
|
1690
1730
|
ExtractConfig,
|
|
1691
1731
|
ExtractConfigPriority,
|
|
1692
1732
|
ExtractMode,
|
|
1733
|
+
ExtractModels,
|
|
1693
1734
|
ExtractTarget,
|
|
1735
|
+
PublicModelName,
|
|
1694
1736
|
)
|
|
1695
1737
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1696
1738
|
|
|
@@ -1704,6 +1746,8 @@ class AsyncLlamaExtractClient:
|
|
|
1704
1746
|
priority=ExtractConfigPriority.LOW,
|
|
1705
1747
|
extraction_target=ExtractTarget.PER_DOC,
|
|
1706
1748
|
extraction_mode=ExtractMode.FAST,
|
|
1749
|
+
parse_model=PublicModelName.OPENAI_GPT_4_O,
|
|
1750
|
+
extract_model=ExtractModels.OPENAI_GPT_4_1,
|
|
1707
1751
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
1708
1752
|
),
|
|
1709
1753
|
)
|
|
@@ -1985,8 +2029,10 @@ class AsyncLlamaExtractClient:
|
|
|
1985
2029
|
ExtractConfig,
|
|
1986
2030
|
ExtractConfigPriority,
|
|
1987
2031
|
ExtractMode,
|
|
2032
|
+
ExtractModels,
|
|
1988
2033
|
ExtractTarget,
|
|
1989
2034
|
FileData,
|
|
2035
|
+
PublicModelName,
|
|
1990
2036
|
)
|
|
1991
2037
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1992
2038
|
|
|
@@ -1998,6 +2044,8 @@ class AsyncLlamaExtractClient:
|
|
|
1998
2044
|
priority=ExtractConfigPriority.LOW,
|
|
1999
2045
|
extraction_target=ExtractTarget.PER_DOC,
|
|
2000
2046
|
extraction_mode=ExtractMode.FAST,
|
|
2047
|
+
parse_model=PublicModelName.OPENAI_GPT_4_O,
|
|
2048
|
+
extract_model=ExtractModels.OPENAI_GPT_4_1,
|
|
2001
2049
|
chunk_mode=DocumentChunkMode.PAGE,
|
|
2002
2050
|
),
|
|
2003
2051
|
file=FileData(
|
llama_cloud/types/__init__.py
CHANGED
|
@@ -19,7 +19,6 @@ from .agent_data import AgentData
|
|
|
19
19
|
from .agent_deployment_list import AgentDeploymentList
|
|
20
20
|
from .agent_deployment_summary import AgentDeploymentSummary
|
|
21
21
|
from .aggregate_group import AggregateGroup
|
|
22
|
-
from .audio_block import AudioBlock
|
|
23
22
|
from .auto_transform_config import AutoTransformConfig
|
|
24
23
|
from .azure_open_ai_embedding import AzureOpenAiEmbedding
|
|
25
24
|
from .azure_open_ai_embedding_config import AzureOpenAiEmbeddingConfig
|
|
@@ -39,6 +38,7 @@ from .character_chunking_config import CharacterChunkingConfig
|
|
|
39
38
|
from .chat_app import ChatApp
|
|
40
39
|
from .chat_app_response import ChatAppResponse
|
|
41
40
|
from .chat_data import ChatData
|
|
41
|
+
from .chat_message import ChatMessage
|
|
42
42
|
from .chunk_mode import ChunkMode
|
|
43
43
|
from .classification_result import ClassificationResult
|
|
44
44
|
from .classifier_rule import ClassifierRule
|
|
@@ -88,11 +88,8 @@ from .data_source_reader_version_metadata import DataSourceReaderVersionMetadata
|
|
|
88
88
|
from .data_source_reader_version_metadata_reader_version import DataSourceReaderVersionMetadataReaderVersion
|
|
89
89
|
from .data_source_update_dispatcher_config import DataSourceUpdateDispatcherConfig
|
|
90
90
|
from .delete_params import DeleteParams
|
|
91
|
-
from .document_block import DocumentBlock
|
|
92
91
|
from .document_chunk_mode import DocumentChunkMode
|
|
93
92
|
from .document_ingestion_job_params import DocumentIngestionJobParams
|
|
94
|
-
from .edit_suggestion import EditSuggestion
|
|
95
|
-
from .edit_suggestion_blocks_item import EditSuggestionBlocksItem
|
|
96
93
|
from .element_segmentation_config import ElementSegmentationConfig
|
|
97
94
|
from .embedding_model_config import EmbeddingModelConfig
|
|
98
95
|
from .embedding_model_config_embedding_config import (
|
|
@@ -178,7 +175,6 @@ from .http_validation_error import HttpValidationError
|
|
|
178
175
|
from .hugging_face_inference_api_embedding import HuggingFaceInferenceApiEmbedding
|
|
179
176
|
from .hugging_face_inference_api_embedding_config import HuggingFaceInferenceApiEmbeddingConfig
|
|
180
177
|
from .hugging_face_inference_api_embedding_token import HuggingFaceInferenceApiEmbeddingToken
|
|
181
|
-
from .image_block import ImageBlock
|
|
182
178
|
from .ingestion_error_response import IngestionErrorResponse
|
|
183
179
|
from .input_message import InputMessage
|
|
184
180
|
from .job_name_mapping import JobNameMapping
|
|
@@ -204,14 +200,6 @@ from .llama_extract_feature_availability import LlamaExtractFeatureAvailability
|
|
|
204
200
|
from .llama_extract_mode_availability import LlamaExtractModeAvailability
|
|
205
201
|
from .llama_extract_mode_availability_status import LlamaExtractModeAvailabilityStatus
|
|
206
202
|
from .llama_extract_settings import LlamaExtractSettings
|
|
207
|
-
from .llama_index_core_base_llms_types_chat_message import LlamaIndexCoreBaseLlmsTypesChatMessage
|
|
208
|
-
from .llama_index_core_base_llms_types_chat_message_blocks_item import (
|
|
209
|
-
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem,
|
|
210
|
-
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio,
|
|
211
|
-
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document,
|
|
212
|
-
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
|
|
213
|
-
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text,
|
|
214
|
-
)
|
|
215
203
|
from .llama_parse_parameters import LlamaParseParameters
|
|
216
204
|
from .llama_parse_parameters_priority import LlamaParseParametersPriority
|
|
217
205
|
from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
|
|
@@ -246,11 +234,14 @@ from .paginated_extract_runs_response import PaginatedExtractRunsResponse
|
|
|
246
234
|
from .paginated_jobs_history_with_metrics import PaginatedJobsHistoryWithMetrics
|
|
247
235
|
from .paginated_list_cloud_documents_response import PaginatedListCloudDocumentsResponse
|
|
248
236
|
from .paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
|
|
249
|
-
from .paginated_report_response import PaginatedReportResponse
|
|
250
237
|
from .paginated_response_agent_data import PaginatedResponseAgentData
|
|
251
238
|
from .paginated_response_aggregate_group import PaginatedResponseAggregateGroup
|
|
252
239
|
from .paginated_response_classify_job import PaginatedResponseClassifyJob
|
|
253
240
|
from .paginated_response_quota_configuration import PaginatedResponseQuotaConfiguration
|
|
241
|
+
from .parse_configuration import ParseConfiguration
|
|
242
|
+
from .parse_configuration_create import ParseConfigurationCreate
|
|
243
|
+
from .parse_configuration_filter import ParseConfigurationFilter
|
|
244
|
+
from .parse_configuration_query_response import ParseConfigurationQueryResponse
|
|
254
245
|
from .parse_job_config import ParseJobConfig
|
|
255
246
|
from .parse_job_config_priority import ParseJobConfigPriority
|
|
256
247
|
from .parse_plan_level import ParsePlanLevel
|
|
@@ -326,11 +317,10 @@ from .preset_retrieval_params_search_filters_inference_schema_value import (
|
|
|
326
317
|
PresetRetrievalParamsSearchFiltersInferenceSchemaValue,
|
|
327
318
|
)
|
|
328
319
|
from .presigned_url import PresignedUrl
|
|
329
|
-
from .progress_event import ProgressEvent
|
|
330
|
-
from .progress_event_status import ProgressEventStatus
|
|
331
320
|
from .project import Project
|
|
332
321
|
from .project_create import ProjectCreate
|
|
333
322
|
from .prompt_conf import PromptConf
|
|
323
|
+
from .public_model_name import PublicModelName
|
|
334
324
|
from .quota_configuration import QuotaConfiguration
|
|
335
325
|
from .quota_configuration_configuration_type import QuotaConfigurationConfigurationType
|
|
336
326
|
from .quota_configuration_status import QuotaConfigurationStatus
|
|
@@ -341,26 +331,6 @@ from .re_ranker_type import ReRankerType
|
|
|
341
331
|
from .recurring_credit_grant import RecurringCreditGrant
|
|
342
332
|
from .related_node_info import RelatedNodeInfo
|
|
343
333
|
from .related_node_info_node_type import RelatedNodeInfoNodeType
|
|
344
|
-
from .report import Report
|
|
345
|
-
from .report_block import ReportBlock
|
|
346
|
-
from .report_block_dependency import ReportBlockDependency
|
|
347
|
-
from .report_create_response import ReportCreateResponse
|
|
348
|
-
from .report_event_item import ReportEventItem
|
|
349
|
-
from .report_event_item_event_data import (
|
|
350
|
-
ReportEventItemEventData,
|
|
351
|
-
ReportEventItemEventData_Progress,
|
|
352
|
-
ReportEventItemEventData_ReportBlockUpdate,
|
|
353
|
-
ReportEventItemEventData_ReportStateUpdate,
|
|
354
|
-
)
|
|
355
|
-
from .report_event_type import ReportEventType
|
|
356
|
-
from .report_metadata import ReportMetadata
|
|
357
|
-
from .report_plan import ReportPlan
|
|
358
|
-
from .report_plan_block import ReportPlanBlock
|
|
359
|
-
from .report_query import ReportQuery
|
|
360
|
-
from .report_response import ReportResponse
|
|
361
|
-
from .report_state import ReportState
|
|
362
|
-
from .report_state_event import ReportStateEvent
|
|
363
|
-
from .report_update_event import ReportUpdateEvent
|
|
364
334
|
from .retrieval_mode import RetrievalMode
|
|
365
335
|
from .retrieve_results import RetrieveResults
|
|
366
336
|
from .retriever import Retriever
|
|
@@ -374,13 +344,11 @@ from .semantic_chunking_config import SemanticChunkingConfig
|
|
|
374
344
|
from .sentence_chunking_config import SentenceChunkingConfig
|
|
375
345
|
from .sparse_model_config import SparseModelConfig
|
|
376
346
|
from .sparse_model_type import SparseModelType
|
|
377
|
-
from .src_app_schema_chat_chat_message import SrcAppSchemaChatChatMessage
|
|
378
347
|
from .status_enum import StatusEnum
|
|
379
348
|
from .struct_mode import StructMode
|
|
380
349
|
from .struct_parse_conf import StructParseConf
|
|
381
350
|
from .supported_llm_model import SupportedLlmModel
|
|
382
351
|
from .supported_llm_model_names import SupportedLlmModelNames
|
|
383
|
-
from .text_block import TextBlock
|
|
384
352
|
from .text_node import TextNode
|
|
385
353
|
from .text_node_relationships_value import TextNodeRelationshipsValue
|
|
386
354
|
from .text_node_with_score import TextNodeWithScore
|
|
@@ -420,7 +388,6 @@ __all__ = [
|
|
|
420
388
|
"AgentDeploymentList",
|
|
421
389
|
"AgentDeploymentSummary",
|
|
422
390
|
"AggregateGroup",
|
|
423
|
-
"AudioBlock",
|
|
424
391
|
"AutoTransformConfig",
|
|
425
392
|
"AzureOpenAiEmbedding",
|
|
426
393
|
"AzureOpenAiEmbeddingConfig",
|
|
@@ -440,6 +407,7 @@ __all__ = [
|
|
|
440
407
|
"ChatApp",
|
|
441
408
|
"ChatAppResponse",
|
|
442
409
|
"ChatData",
|
|
410
|
+
"ChatMessage",
|
|
443
411
|
"ChunkMode",
|
|
444
412
|
"ClassificationResult",
|
|
445
413
|
"ClassifierRule",
|
|
@@ -489,11 +457,8 @@ __all__ = [
|
|
|
489
457
|
"DataSourceReaderVersionMetadataReaderVersion",
|
|
490
458
|
"DataSourceUpdateDispatcherConfig",
|
|
491
459
|
"DeleteParams",
|
|
492
|
-
"DocumentBlock",
|
|
493
460
|
"DocumentChunkMode",
|
|
494
461
|
"DocumentIngestionJobParams",
|
|
495
|
-
"EditSuggestion",
|
|
496
|
-
"EditSuggestionBlocksItem",
|
|
497
462
|
"ElementSegmentationConfig",
|
|
498
463
|
"EmbeddingModelConfig",
|
|
499
464
|
"EmbeddingModelConfigEmbeddingConfig",
|
|
@@ -575,7 +540,6 @@ __all__ = [
|
|
|
575
540
|
"HuggingFaceInferenceApiEmbedding",
|
|
576
541
|
"HuggingFaceInferenceApiEmbeddingConfig",
|
|
577
542
|
"HuggingFaceInferenceApiEmbeddingToken",
|
|
578
|
-
"ImageBlock",
|
|
579
543
|
"IngestionErrorResponse",
|
|
580
544
|
"InputMessage",
|
|
581
545
|
"JobNameMapping",
|
|
@@ -599,12 +563,6 @@ __all__ = [
|
|
|
599
563
|
"LlamaExtractModeAvailability",
|
|
600
564
|
"LlamaExtractModeAvailabilityStatus",
|
|
601
565
|
"LlamaExtractSettings",
|
|
602
|
-
"LlamaIndexCoreBaseLlmsTypesChatMessage",
|
|
603
|
-
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem",
|
|
604
|
-
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Audio",
|
|
605
|
-
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document",
|
|
606
|
-
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image",
|
|
607
|
-
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
|
|
608
566
|
"LlamaParseParameters",
|
|
609
567
|
"LlamaParseParametersPriority",
|
|
610
568
|
"LlamaParseSupportedFileExtensions",
|
|
@@ -639,11 +597,14 @@ __all__ = [
|
|
|
639
597
|
"PaginatedJobsHistoryWithMetrics",
|
|
640
598
|
"PaginatedListCloudDocumentsResponse",
|
|
641
599
|
"PaginatedListPipelineFilesResponse",
|
|
642
|
-
"PaginatedReportResponse",
|
|
643
600
|
"PaginatedResponseAgentData",
|
|
644
601
|
"PaginatedResponseAggregateGroup",
|
|
645
602
|
"PaginatedResponseClassifyJob",
|
|
646
603
|
"PaginatedResponseQuotaConfiguration",
|
|
604
|
+
"ParseConfiguration",
|
|
605
|
+
"ParseConfigurationCreate",
|
|
606
|
+
"ParseConfigurationFilter",
|
|
607
|
+
"ParseConfigurationQueryResponse",
|
|
647
608
|
"ParseJobConfig",
|
|
648
609
|
"ParseJobConfigPriority",
|
|
649
610
|
"ParsePlanLevel",
|
|
@@ -711,11 +672,10 @@ __all__ = [
|
|
|
711
672
|
"PresetRetrievalParams",
|
|
712
673
|
"PresetRetrievalParamsSearchFiltersInferenceSchemaValue",
|
|
713
674
|
"PresignedUrl",
|
|
714
|
-
"ProgressEvent",
|
|
715
|
-
"ProgressEventStatus",
|
|
716
675
|
"Project",
|
|
717
676
|
"ProjectCreate",
|
|
718
677
|
"PromptConf",
|
|
678
|
+
"PublicModelName",
|
|
719
679
|
"QuotaConfiguration",
|
|
720
680
|
"QuotaConfigurationConfigurationType",
|
|
721
681
|
"QuotaConfigurationStatus",
|
|
@@ -726,24 +686,6 @@ __all__ = [
|
|
|
726
686
|
"RecurringCreditGrant",
|
|
727
687
|
"RelatedNodeInfo",
|
|
728
688
|
"RelatedNodeInfoNodeType",
|
|
729
|
-
"Report",
|
|
730
|
-
"ReportBlock",
|
|
731
|
-
"ReportBlockDependency",
|
|
732
|
-
"ReportCreateResponse",
|
|
733
|
-
"ReportEventItem",
|
|
734
|
-
"ReportEventItemEventData",
|
|
735
|
-
"ReportEventItemEventData_Progress",
|
|
736
|
-
"ReportEventItemEventData_ReportBlockUpdate",
|
|
737
|
-
"ReportEventItemEventData_ReportStateUpdate",
|
|
738
|
-
"ReportEventType",
|
|
739
|
-
"ReportMetadata",
|
|
740
|
-
"ReportPlan",
|
|
741
|
-
"ReportPlanBlock",
|
|
742
|
-
"ReportQuery",
|
|
743
|
-
"ReportResponse",
|
|
744
|
-
"ReportState",
|
|
745
|
-
"ReportStateEvent",
|
|
746
|
-
"ReportUpdateEvent",
|
|
747
689
|
"RetrievalMode",
|
|
748
690
|
"RetrieveResults",
|
|
749
691
|
"Retriever",
|
|
@@ -757,13 +699,11 @@ __all__ = [
|
|
|
757
699
|
"SentenceChunkingConfig",
|
|
758
700
|
"SparseModelConfig",
|
|
759
701
|
"SparseModelType",
|
|
760
|
-
"SrcAppSchemaChatChatMessage",
|
|
761
702
|
"StatusEnum",
|
|
762
703
|
"StructMode",
|
|
763
704
|
"StructParseConf",
|
|
764
705
|
"SupportedLlmModel",
|
|
765
706
|
"SupportedLlmModelNames",
|
|
766
|
-
"TextBlock",
|
|
767
707
|
"TextNode",
|
|
768
708
|
"TextNodeRelationshipsValue",
|
|
769
709
|
"TextNodeWithScore",
|
|
@@ -16,7 +16,7 @@ except ImportError:
|
|
|
16
16
|
import pydantic # type: ignore
|
|
17
17
|
|
|
18
18
|
|
|
19
|
-
class
|
|
19
|
+
class ChatMessage(pydantic.BaseModel):
|
|
20
20
|
id: str
|
|
21
21
|
index: int = pydantic.Field(description="The index of the message in the chat.")
|
|
22
22
|
annotations: typing.Optional[typing.List[MessageAnnotation]] = pydantic.Field(
|
|
@@ -7,7 +7,9 @@ from ..core.datetime_utils import serialize_datetime
|
|
|
7
7
|
from .document_chunk_mode import DocumentChunkMode
|
|
8
8
|
from .extract_config_priority import ExtractConfigPriority
|
|
9
9
|
from .extract_mode import ExtractMode
|
|
10
|
+
from .extract_models import ExtractModels
|
|
10
11
|
from .extract_target import ExtractTarget
|
|
12
|
+
from .public_model_name import PublicModelName
|
|
11
13
|
|
|
12
14
|
try:
|
|
13
15
|
import pydantic
|
|
@@ -25,9 +27,13 @@ class ExtractConfig(pydantic.BaseModel):
|
|
|
25
27
|
|
|
26
28
|
priority: typing.Optional[ExtractConfigPriority]
|
|
27
29
|
extraction_target: typing.Optional[ExtractTarget] = pydantic.Field(description="The extraction target specified.")
|
|
28
|
-
extraction_mode: typing.Optional[ExtractMode] = pydantic.Field(
|
|
30
|
+
extraction_mode: typing.Optional[ExtractMode] = pydantic.Field(
|
|
31
|
+
description="The extraction mode specified (FAST, BALANCED, MULTIMODAL, PREMIUM)."
|
|
32
|
+
)
|
|
33
|
+
parse_model: typing.Optional[PublicModelName]
|
|
34
|
+
extract_model: typing.Optional[ExtractModels]
|
|
29
35
|
multimodal_fast_mode: typing.Optional[bool] = pydantic.Field(
|
|
30
|
-
description="Whether to use fast mode for multimodal extraction."
|
|
36
|
+
description="DEPRECATED: Whether to use fast mode for multimodal extraction."
|
|
31
37
|
)
|
|
32
38
|
system_prompt: typing.Optional[str]
|
|
33
39
|
use_reasoning: typing.Optional[bool] = pydantic.Field(description="Whether to use reasoning for the extraction.")
|
|
@@ -7,47 +7,47 @@ T_Result = typing.TypeVar("T_Result")
|
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
class ExtractModels(str, enum.Enum):
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
10
|
+
OPENAI_GPT_41 = "openai-gpt-4-1"
|
|
11
|
+
OPENAI_GPT_41_MINI = "openai-gpt-4-1-mini"
|
|
12
|
+
OPENAI_GPT_41_NANO = "openai-gpt-4-1-nano"
|
|
13
|
+
OPENAI_GPT_5 = "openai-gpt-5"
|
|
14
|
+
OPENAI_GPT_5_MINI = "openai-gpt-5-mini"
|
|
13
15
|
GEMINI_20_FLASH = "gemini-2.0-flash"
|
|
14
|
-
O_3_MINI = "o3-mini"
|
|
15
16
|
GEMINI_25_FLASH = "gemini-2.5-flash"
|
|
16
17
|
GEMINI_25_PRO = "gemini-2.5-pro"
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
GPT_4_O_MINI = "gpt-4o-mini"
|
|
18
|
+
OPENAI_GPT_4_O = "openai-gpt-4o"
|
|
19
|
+
OPENAI_GPT_4_O_MINI = "openai-gpt-4o-mini"
|
|
20
20
|
|
|
21
21
|
def visit(
|
|
22
22
|
self,
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
23
|
+
openai_gpt_41: typing.Callable[[], T_Result],
|
|
24
|
+
openai_gpt_41_mini: typing.Callable[[], T_Result],
|
|
25
|
+
openai_gpt_41_nano: typing.Callable[[], T_Result],
|
|
26
|
+
openai_gpt_5: typing.Callable[[], T_Result],
|
|
27
|
+
openai_gpt_5_mini: typing.Callable[[], T_Result],
|
|
26
28
|
gemini_20_flash: typing.Callable[[], T_Result],
|
|
27
|
-
o_3_mini: typing.Callable[[], T_Result],
|
|
28
29
|
gemini_25_flash: typing.Callable[[], T_Result],
|
|
29
30
|
gemini_25_pro: typing.Callable[[], T_Result],
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
gpt_4_o_mini: typing.Callable[[], T_Result],
|
|
31
|
+
openai_gpt_4_o: typing.Callable[[], T_Result],
|
|
32
|
+
openai_gpt_4_o_mini: typing.Callable[[], T_Result],
|
|
33
33
|
) -> T_Result:
|
|
34
|
-
if self is ExtractModels.
|
|
35
|
-
return
|
|
36
|
-
if self is ExtractModels.
|
|
37
|
-
return
|
|
38
|
-
if self is ExtractModels.
|
|
39
|
-
return
|
|
34
|
+
if self is ExtractModels.OPENAI_GPT_41:
|
|
35
|
+
return openai_gpt_41()
|
|
36
|
+
if self is ExtractModels.OPENAI_GPT_41_MINI:
|
|
37
|
+
return openai_gpt_41_mini()
|
|
38
|
+
if self is ExtractModels.OPENAI_GPT_41_NANO:
|
|
39
|
+
return openai_gpt_41_nano()
|
|
40
|
+
if self is ExtractModels.OPENAI_GPT_5:
|
|
41
|
+
return openai_gpt_5()
|
|
42
|
+
if self is ExtractModels.OPENAI_GPT_5_MINI:
|
|
43
|
+
return openai_gpt_5_mini()
|
|
40
44
|
if self is ExtractModels.GEMINI_20_FLASH:
|
|
41
45
|
return gemini_20_flash()
|
|
42
|
-
if self is ExtractModels.O_3_MINI:
|
|
43
|
-
return o_3_mini()
|
|
44
46
|
if self is ExtractModels.GEMINI_25_FLASH:
|
|
45
47
|
return gemini_25_flash()
|
|
46
48
|
if self is ExtractModels.GEMINI_25_PRO:
|
|
47
49
|
return gemini_25_pro()
|
|
48
|
-
if self is ExtractModels.
|
|
49
|
-
return
|
|
50
|
-
if self is ExtractModels.
|
|
51
|
-
return
|
|
52
|
-
if self is ExtractModels.GPT_4_O_MINI:
|
|
53
|
-
return gpt_4_o_mini()
|
|
50
|
+
if self is ExtractModels.OPENAI_GPT_4_O:
|
|
51
|
+
return openai_gpt_4_o()
|
|
52
|
+
if self is ExtractModels.OPENAI_GPT_4_O_MINI:
|
|
53
|
+
return openai_gpt_4_o_mini()
|
|
@@ -19,9 +19,10 @@ class LlamaExtractModeAvailability(pydantic.BaseModel):
|
|
|
19
19
|
mode: str
|
|
20
20
|
status: LlamaExtractModeAvailabilityStatus
|
|
21
21
|
parse_mode: str
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
22
|
+
available_parse_models: typing.Optional[typing.List[str]]
|
|
23
|
+
missing_parse_models: typing.Optional[typing.List[str]]
|
|
24
|
+
available_extract_models: typing.Optional[typing.List[str]]
|
|
25
|
+
missing_extract_models: typing.Optional[typing.List[str]]
|
|
25
26
|
|
|
26
27
|
def json(self, **kwargs: typing.Any) -> str:
|
|
27
28
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -44,7 +44,7 @@ class LlamaExtractSettings(pydantic.BaseModel):
|
|
|
44
44
|
description="Whether to use experimental multimodal parsing."
|
|
45
45
|
)
|
|
46
46
|
use_pixel_extraction: typing.Optional[bool] = pydantic.Field(
|
|
47
|
-
description="Whether to use extraction over pixels for multimodal mode."
|
|
47
|
+
description="DEPRECATED: Whether to use extraction over pixels for multimodal mode."
|
|
48
48
|
)
|
|
49
49
|
llama_parse_params: typing.Optional[LlamaParseParameters] = pydantic.Field(
|
|
50
50
|
description="LlamaParse related settings."
|
|
@@ -4,8 +4,7 @@ import datetime as dt
|
|
|
4
4
|
import typing
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
from .
|
|
8
|
-
from .report_event_type import ReportEventType
|
|
7
|
+
from .llama_parse_parameters import LlamaParseParameters
|
|
9
8
|
|
|
10
9
|
try:
|
|
11
10
|
import pydantic
|
|
@@ -16,19 +15,20 @@ except ImportError:
|
|
|
16
15
|
import pydantic # type: ignore
|
|
17
16
|
|
|
18
17
|
|
|
19
|
-
class
|
|
18
|
+
class ParseConfiguration(pydantic.BaseModel):
|
|
20
19
|
"""
|
|
21
|
-
|
|
20
|
+
Parse configuration schema.
|
|
22
21
|
"""
|
|
23
22
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
23
|
+
id: str = pydantic.Field(description="Unique identifier for the parse configuration")
|
|
24
|
+
name: str = pydantic.Field(description="Name of the parse configuration")
|
|
25
|
+
source_type: str = pydantic.Field(description="Type of the source (e.g., 'project')")
|
|
26
|
+
source_id: str = pydantic.Field(description="ID of the source")
|
|
27
|
+
creator: typing.Optional[str]
|
|
28
|
+
version: str = pydantic.Field(description="Version of the configuration")
|
|
29
|
+
parameters: LlamaParseParameters = pydantic.Field(description="LlamaParseParameters configuration")
|
|
30
|
+
created_at: dt.datetime = pydantic.Field(description="Creation timestamp")
|
|
31
|
+
updated_at: dt.datetime = pydantic.Field(description="Last update timestamp")
|
|
32
32
|
|
|
33
33
|
def json(self, **kwargs: typing.Any) -> str:
|
|
34
34
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
llama_cloud/types/{llama_index_core_base_llms_types_chat_message.py → parse_configuration_create.py}
RENAMED
|
@@ -4,8 +4,7 @@ import datetime as dt
|
|
|
4
4
|
import typing
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
from .
|
|
8
|
-
from .message_role import MessageRole
|
|
7
|
+
from .llama_parse_parameters import LlamaParseParameters
|
|
9
8
|
|
|
10
9
|
try:
|
|
11
10
|
import pydantic
|
|
@@ -16,14 +15,17 @@ except ImportError:
|
|
|
16
15
|
import pydantic # type: ignore
|
|
17
16
|
|
|
18
17
|
|
|
19
|
-
class
|
|
18
|
+
class ParseConfigurationCreate(pydantic.BaseModel):
|
|
20
19
|
"""
|
|
21
|
-
|
|
20
|
+
Schema for creating a new parse configuration (API boundary).
|
|
22
21
|
"""
|
|
23
22
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
23
|
+
name: str = pydantic.Field(description="Name of the parse configuration")
|
|
24
|
+
source_type: typing.Optional[str]
|
|
25
|
+
source_id: typing.Optional[str]
|
|
26
|
+
creator: typing.Optional[str]
|
|
27
|
+
version: str = pydantic.Field(description="Version of the configuration")
|
|
28
|
+
parameters: LlamaParseParameters = pydantic.Field(description="LlamaParseParameters configuration")
|
|
27
29
|
|
|
28
30
|
def json(self, **kwargs: typing.Any) -> str:
|
|
29
31
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|