llama-cloud 0.1.22__py3-none-any.whl → 0.1.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (87) hide show
  1. llama_cloud/__init__.py +6 -66
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +1 -18
  4. llama_cloud/resources/data_sources/__init__.py +2 -2
  5. llama_cloud/resources/data_sources/client.py +5 -5
  6. llama_cloud/resources/data_sources/types/__init__.py +1 -2
  7. llama_cloud/resources/files/__init__.py +0 -3
  8. llama_cloud/resources/files/client.py +18 -19
  9. llama_cloud/resources/jobs/client.py +8 -0
  10. llama_cloud/resources/llama_extract/__init__.py +0 -8
  11. llama_cloud/resources/llama_extract/client.py +92 -24
  12. llama_cloud/resources/llama_extract/types/__init__.py +0 -8
  13. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py +2 -4
  14. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema.py +2 -4
  15. llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_override.py +2 -4
  16. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py +2 -4
  17. llama_cloud/resources/organizations/client.py +14 -4
  18. llama_cloud/resources/parsing/client.py +8 -0
  19. llama_cloud/resources/pipelines/__init__.py +0 -2
  20. llama_cloud/resources/pipelines/client.py +43 -9
  21. llama_cloud/resources/pipelines/types/__init__.py +0 -2
  22. llama_cloud/types/__init__.py +6 -46
  23. llama_cloud/types/composite_retrieval_result.py +5 -1
  24. llama_cloud/types/data_source.py +2 -2
  25. llama_cloud/types/data_source_create.py +2 -2
  26. llama_cloud/types/extract_agent.py +2 -4
  27. llama_cloud/types/extract_job_create_data_schema_override.py +2 -4
  28. llama_cloud/types/extract_models.py +20 -8
  29. llama_cloud/types/extract_resultset.py +2 -2
  30. llama_cloud/types/extract_resultset_data.py +2 -4
  31. llama_cloud/types/extract_run.py +3 -4
  32. llama_cloud/types/extract_run_data.py +2 -4
  33. llama_cloud/types/extract_schema_validate_response.py +2 -2
  34. llama_cloud/types/file.py +3 -4
  35. llama_cloud/types/{llm_config_result.py → file_id_presigned_url.py} +9 -6
  36. llama_cloud/types/json_type.py +9 -0
  37. llama_cloud/types/legacy_parse_job_config.py +1 -0
  38. llama_cloud/types/llama_extract_settings.py +3 -1
  39. llama_cloud/types/llama_parse_parameters.py +1 -0
  40. llama_cloud/types/page_figure_metadata.py +1 -0
  41. llama_cloud/types/{llm_configs_response.py → page_figure_node_with_score.py} +9 -4
  42. llama_cloud/types/parse_job_config.py +1 -0
  43. llama_cloud/types/pipeline_data_source.py +2 -2
  44. llama_cloud/types/pipeline_file.py +5 -8
  45. llama_cloud/types/pipeline_file_create.py +2 -2
  46. llama_cloud/types/preset_retrieval_params.py +8 -0
  47. llama_cloud/types/retrieve_results.py +7 -1
  48. llama_cloud/types/supported_llm_model_names.py +20 -12
  49. llama_cloud/types/user_organization.py +1 -1
  50. llama_cloud/types/user_organization_delete.py +1 -0
  51. {llama_cloud-0.1.22.dist-info → llama_cloud-0.1.24.dist-info}/METADATA +2 -3
  52. {llama_cloud-0.1.22.dist-info → llama_cloud-0.1.24.dist-info}/RECORD +54 -86
  53. {llama_cloud-0.1.22.dist-info → llama_cloud-0.1.24.dist-info}/WHEEL +1 -1
  54. llama_cloud/resources/admin/__init__.py +0 -2
  55. llama_cloud/resources/admin/client.py +0 -78
  56. llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py +0 -7
  57. llama_cloud/resources/files/types/__init__.py +0 -7
  58. llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py +0 -7
  59. llama_cloud/resources/files/types/file_create_permission_info_value.py +0 -7
  60. llama_cloud/resources/files/types/file_create_resource_info_value.py +0 -5
  61. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_zero_value.py +0 -7
  62. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_zero_value.py +0 -7
  63. llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_override_zero_value.py +0 -7
  64. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py +0 -7
  65. llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py +0 -7
  66. llama_cloud/types/data_source_create_custom_metadata_value.py +0 -7
  67. llama_cloud/types/data_source_custom_metadata_value.py +0 -7
  68. llama_cloud/types/extract_agent_data_schema_value.py +0 -5
  69. llama_cloud/types/extract_job_create_data_schema_override_zero_value.py +0 -7
  70. llama_cloud/types/extract_resultset_data_item_value.py +0 -7
  71. llama_cloud/types/extract_resultset_data_zero_value.py +0 -7
  72. llama_cloud/types/extract_resultset_extraction_metadata_value.py +0 -7
  73. llama_cloud/types/extract_run_data_item_value.py +0 -5
  74. llama_cloud/types/extract_run_data_schema_value.py +0 -5
  75. llama_cloud/types/extract_run_data_zero_value.py +0 -5
  76. llama_cloud/types/extract_run_extraction_metadata_value.py +0 -7
  77. llama_cloud/types/extract_schema_validate_response_data_schema_value.py +0 -7
  78. llama_cloud/types/file_permission_info_value.py +0 -5
  79. llama_cloud/types/file_resource_info_value.py +0 -5
  80. llama_cloud/types/llm_config_result_llm_type.py +0 -33
  81. llama_cloud/types/pipeline_data_source_custom_metadata_value.py +0 -7
  82. llama_cloud/types/pipeline_file_config_hash_value.py +0 -5
  83. llama_cloud/types/pipeline_file_create_custom_metadata_value.py +0 -7
  84. llama_cloud/types/pipeline_file_custom_metadata_value.py +0 -7
  85. llama_cloud/types/pipeline_file_permission_info_value.py +0 -7
  86. llama_cloud/types/pipeline_file_resource_info_value.py +0 -7
  87. {llama_cloud-0.1.22.dist-info → llama_cloud-0.1.24.dist-info}/LICENSE +0 -0
@@ -17,6 +17,7 @@ from ...types.eval_execution_params import EvalExecutionParams
17
17
  from ...types.file_count_by_status_response import FileCountByStatusResponse
18
18
  from ...types.http_validation_error import HttpValidationError
19
19
  from ...types.input_message import InputMessage
20
+ from ...types.json_type import JsonType
20
21
  from ...types.llama_parse_parameters import LlamaParseParameters
21
22
  from ...types.managed_ingestion_status_response import ManagedIngestionStatusResponse
22
23
  from ...types.metadata_filters import MetadataFilters
@@ -36,7 +37,6 @@ from ...types.preset_retrieval_params import PresetRetrievalParams
36
37
  from ...types.retrieval_mode import RetrievalMode
37
38
  from ...types.retrieve_results import RetrieveResults
38
39
  from ...types.text_node import TextNode
39
- from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
40
40
  from .types.pipeline_update_embedding_config import PipelineUpdateEmbeddingConfig
41
41
  from .types.pipeline_update_transform_config import PipelineUpdateTransformConfig
42
42
 
@@ -63,6 +63,7 @@ class PipelinesClient:
63
63
  project_name: typing.Optional[str] = None,
64
64
  pipeline_name: typing.Optional[str] = None,
65
65
  pipeline_type: typing.Optional[PipelineType] = None,
66
+ organization_id: typing.Optional[str] = None,
66
67
  ) -> typing.List[Pipeline]:
67
68
  """
68
69
  Search for pipelines by various parameters.
@@ -75,6 +76,8 @@ class PipelinesClient:
75
76
  - pipeline_name: typing.Optional[str].
76
77
 
77
78
  - pipeline_type: typing.Optional[PipelineType].
79
+
80
+ - organization_id: typing.Optional[str].
78
81
  ---
79
82
  from llama_cloud import PipelineType
80
83
  from llama_cloud.client import LlamaCloud
@@ -95,6 +98,7 @@ class PipelinesClient:
95
98
  "project_name": project_name,
96
99
  "pipeline_name": pipeline_name,
97
100
  "pipeline_type": pipeline_type,
101
+ "organization_id": organization_id,
98
102
  }
99
103
  ),
100
104
  headers=self._client_wrapper.get_headers(),
@@ -658,9 +662,7 @@ class PipelinesClient:
658
662
  file_id: str,
659
663
  pipeline_id: str,
660
664
  *,
661
- custom_metadata: typing.Optional[
662
- typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]
663
- ] = OMIT,
665
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]] = OMIT,
664
666
  ) -> PipelineFile:
665
667
  """
666
668
  Update a file for a pipeline.
@@ -670,7 +672,7 @@ class PipelinesClient:
670
672
 
671
673
  - pipeline_id: str.
672
674
 
673
- - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
675
+ - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]].
674
676
  ---
675
677
  from llama_cloud.client import LlamaCloud
676
678
 
@@ -1037,9 +1039,12 @@ class PipelinesClient:
1037
1039
  rerank_top_n: typing.Optional[int] = OMIT,
1038
1040
  alpha: typing.Optional[float] = OMIT,
1039
1041
  search_filters: typing.Optional[MetadataFilters] = OMIT,
1042
+ search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]] = OMIT,
1040
1043
  files_top_k: typing.Optional[int] = OMIT,
1041
1044
  retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
1042
1045
  retrieve_image_nodes: typing.Optional[bool] = OMIT,
1046
+ retrieve_page_screenshot_nodes: typing.Optional[bool] = OMIT,
1047
+ retrieve_page_figure_nodes: typing.Optional[bool] = OMIT,
1043
1048
  query: str,
1044
1049
  class_name: typing.Optional[str] = OMIT,
1045
1050
  ) -> RetrieveResults:
@@ -1067,12 +1072,18 @@ class PipelinesClient:
1067
1072
 
1068
1073
  - search_filters: typing.Optional[MetadataFilters].
1069
1074
 
1075
+ - search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]].
1076
+
1070
1077
  - files_top_k: typing.Optional[int].
1071
1078
 
1072
1079
  - retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
1073
1080
 
1074
1081
  - retrieve_image_nodes: typing.Optional[bool]. Whether to retrieve image nodes.
1075
1082
 
1083
+ - retrieve_page_screenshot_nodes: typing.Optional[bool]. Whether to retrieve page screenshot nodes.
1084
+
1085
+ - retrieve_page_figure_nodes: typing.Optional[bool]. Whether to retrieve page figure nodes.
1086
+
1076
1087
  - query: str. The query to retrieve against.
1077
1088
 
1078
1089
  - class_name: typing.Optional[str].
@@ -1108,12 +1119,18 @@ class PipelinesClient:
1108
1119
  _request["alpha"] = alpha
1109
1120
  if search_filters is not OMIT:
1110
1121
  _request["search_filters"] = search_filters
1122
+ if search_filters_inference_schema is not OMIT:
1123
+ _request["search_filters_inference_schema"] = search_filters_inference_schema
1111
1124
  if files_top_k is not OMIT:
1112
1125
  _request["files_top_k"] = files_top_k
1113
1126
  if retrieval_mode is not OMIT:
1114
1127
  _request["retrieval_mode"] = retrieval_mode
1115
1128
  if retrieve_image_nodes is not OMIT:
1116
1129
  _request["retrieve_image_nodes"] = retrieve_image_nodes
1130
+ if retrieve_page_screenshot_nodes is not OMIT:
1131
+ _request["retrieve_page_screenshot_nodes"] = retrieve_page_screenshot_nodes
1132
+ if retrieve_page_figure_nodes is not OMIT:
1133
+ _request["retrieve_page_figure_nodes"] = retrieve_page_figure_nodes
1117
1134
  if class_name is not OMIT:
1118
1135
  _request["class_name"] = class_name
1119
1136
  _response = self._client_wrapper.httpx_client.request(
@@ -1681,6 +1698,7 @@ class AsyncPipelinesClient:
1681
1698
  project_name: typing.Optional[str] = None,
1682
1699
  pipeline_name: typing.Optional[str] = None,
1683
1700
  pipeline_type: typing.Optional[PipelineType] = None,
1701
+ organization_id: typing.Optional[str] = None,
1684
1702
  ) -> typing.List[Pipeline]:
1685
1703
  """
1686
1704
  Search for pipelines by various parameters.
@@ -1693,6 +1711,8 @@ class AsyncPipelinesClient:
1693
1711
  - pipeline_name: typing.Optional[str].
1694
1712
 
1695
1713
  - pipeline_type: typing.Optional[PipelineType].
1714
+
1715
+ - organization_id: typing.Optional[str].
1696
1716
  ---
1697
1717
  from llama_cloud import PipelineType
1698
1718
  from llama_cloud.client import AsyncLlamaCloud
@@ -1713,6 +1733,7 @@ class AsyncPipelinesClient:
1713
1733
  "project_name": project_name,
1714
1734
  "pipeline_name": pipeline_name,
1715
1735
  "pipeline_type": pipeline_type,
1736
+ "organization_id": organization_id,
1716
1737
  }
1717
1738
  ),
1718
1739
  headers=self._client_wrapper.get_headers(),
@@ -2276,9 +2297,7 @@ class AsyncPipelinesClient:
2276
2297
  file_id: str,
2277
2298
  pipeline_id: str,
2278
2299
  *,
2279
- custom_metadata: typing.Optional[
2280
- typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]
2281
- ] = OMIT,
2300
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]] = OMIT,
2282
2301
  ) -> PipelineFile:
2283
2302
  """
2284
2303
  Update a file for a pipeline.
@@ -2288,7 +2307,7 @@ class AsyncPipelinesClient:
2288
2307
 
2289
2308
  - pipeline_id: str.
2290
2309
 
2291
- - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
2310
+ - custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]].
2292
2311
  ---
2293
2312
  from llama_cloud.client import AsyncLlamaCloud
2294
2313
 
@@ -2657,9 +2676,12 @@ class AsyncPipelinesClient:
2657
2676
  rerank_top_n: typing.Optional[int] = OMIT,
2658
2677
  alpha: typing.Optional[float] = OMIT,
2659
2678
  search_filters: typing.Optional[MetadataFilters] = OMIT,
2679
+ search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]] = OMIT,
2660
2680
  files_top_k: typing.Optional[int] = OMIT,
2661
2681
  retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
2662
2682
  retrieve_image_nodes: typing.Optional[bool] = OMIT,
2683
+ retrieve_page_screenshot_nodes: typing.Optional[bool] = OMIT,
2684
+ retrieve_page_figure_nodes: typing.Optional[bool] = OMIT,
2663
2685
  query: str,
2664
2686
  class_name: typing.Optional[str] = OMIT,
2665
2687
  ) -> RetrieveResults:
@@ -2687,12 +2709,18 @@ class AsyncPipelinesClient:
2687
2709
 
2688
2710
  - search_filters: typing.Optional[MetadataFilters].
2689
2711
 
2712
+ - search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]].
2713
+
2690
2714
  - files_top_k: typing.Optional[int].
2691
2715
 
2692
2716
  - retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
2693
2717
 
2694
2718
  - retrieve_image_nodes: typing.Optional[bool]. Whether to retrieve image nodes.
2695
2719
 
2720
+ - retrieve_page_screenshot_nodes: typing.Optional[bool]. Whether to retrieve page screenshot nodes.
2721
+
2722
+ - retrieve_page_figure_nodes: typing.Optional[bool]. Whether to retrieve page figure nodes.
2723
+
2696
2724
  - query: str. The query to retrieve against.
2697
2725
 
2698
2726
  - class_name: typing.Optional[str].
@@ -2728,12 +2756,18 @@ class AsyncPipelinesClient:
2728
2756
  _request["alpha"] = alpha
2729
2757
  if search_filters is not OMIT:
2730
2758
  _request["search_filters"] = search_filters
2759
+ if search_filters_inference_schema is not OMIT:
2760
+ _request["search_filters_inference_schema"] = search_filters_inference_schema
2731
2761
  if files_top_k is not OMIT:
2732
2762
  _request["files_top_k"] = files_top_k
2733
2763
  if retrieval_mode is not OMIT:
2734
2764
  _request["retrieval_mode"] = retrieval_mode
2735
2765
  if retrieve_image_nodes is not OMIT:
2736
2766
  _request["retrieve_image_nodes"] = retrieve_image_nodes
2767
+ if retrieve_page_screenshot_nodes is not OMIT:
2768
+ _request["retrieve_page_screenshot_nodes"] = retrieve_page_screenshot_nodes
2769
+ if retrieve_page_figure_nodes is not OMIT:
2770
+ _request["retrieve_page_figure_nodes"] = retrieve_page_figure_nodes
2737
2771
  if class_name is not OMIT:
2738
2772
  _request["class_name"] = class_name
2739
2773
  _response = await self._client_wrapper.httpx_client.request(
@@ -1,6 +1,5 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from .pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
4
3
  from .pipeline_update_embedding_config import (
5
4
  PipelineUpdateEmbeddingConfig,
6
5
  PipelineUpdateEmbeddingConfig_AzureEmbedding,
@@ -14,7 +13,6 @@ from .pipeline_update_embedding_config import (
14
13
  from .pipeline_update_transform_config import PipelineUpdateTransformConfig
15
14
 
16
15
  __all__ = [
17
- "PipelineFileUpdateCustomMetadataValue",
18
16
  "PipelineUpdateEmbeddingConfig",
19
17
  "PipelineUpdateEmbeddingConfig_AzureEmbedding",
20
18
  "PipelineUpdateEmbeddingConfig_BedrockEmbedding",
@@ -72,8 +72,6 @@ from .data_source import DataSource
72
72
  from .data_source_component import DataSourceComponent
73
73
  from .data_source_create import DataSourceCreate
74
74
  from .data_source_create_component import DataSourceCreateComponent
75
- from .data_source_create_custom_metadata_value import DataSourceCreateCustomMetadataValue
76
- from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
77
75
  from .data_source_update_dispatcher_config import DataSourceUpdateDispatcherConfig
78
76
  from .delete_params import DeleteParams
79
77
  from .document_block import DocumentBlock
@@ -106,35 +104,24 @@ from .embedding_model_config_update_embedding_config import (
106
104
  )
107
105
  from .eval_execution_params import EvalExecutionParams
108
106
  from .extract_agent import ExtractAgent
109
- from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
110
107
  from .extract_config import ExtractConfig
111
108
  from .extract_job import ExtractJob
112
109
  from .extract_job_create import ExtractJobCreate
113
110
  from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
114
- from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
115
111
  from .extract_mode import ExtractMode
116
112
  from .extract_models import ExtractModels
117
113
  from .extract_resultset import ExtractResultset
118
114
  from .extract_resultset_data import ExtractResultsetData
119
- from .extract_resultset_data_item_value import ExtractResultsetDataItemValue
120
- from .extract_resultset_data_zero_value import ExtractResultsetDataZeroValue
121
- from .extract_resultset_extraction_metadata_value import ExtractResultsetExtractionMetadataValue
122
115
  from .extract_run import ExtractRun
123
116
  from .extract_run_data import ExtractRunData
124
- from .extract_run_data_item_value import ExtractRunDataItemValue
125
- from .extract_run_data_schema_value import ExtractRunDataSchemaValue
126
- from .extract_run_data_zero_value import ExtractRunDataZeroValue
127
- from .extract_run_extraction_metadata_value import ExtractRunExtractionMetadataValue
128
117
  from .extract_schema_validate_response import ExtractSchemaValidateResponse
129
- from .extract_schema_validate_response_data_schema_value import ExtractSchemaValidateResponseDataSchemaValue
130
118
  from .extract_state import ExtractState
131
119
  from .extract_target import ExtractTarget
132
120
  from .fail_page_mode import FailPageMode
133
121
  from .file import File
134
122
  from .file_count_by_status_response import FileCountByStatusResponse
123
+ from .file_id_presigned_url import FileIdPresignedUrl
135
124
  from .file_parse_public import FileParsePublic
136
- from .file_permission_info_value import FilePermissionInfoValue
137
- from .file_resource_info_value import FileResourceInfoValue
138
125
  from .filter_condition import FilterCondition
139
126
  from .filter_operator import FilterOperator
140
127
  from .free_credits_usage import FreeCreditsUsage
@@ -163,6 +150,7 @@ from .job_record_parameters import (
163
150
  JobRecordParameters_PipelineManagedIngestion,
164
151
  )
165
152
  from .job_record_with_usage_metrics import JobRecordWithUsageMetrics
153
+ from .json_type import JsonType
166
154
  from .l_lama_parse_transform_config import LLamaParseTransformConfig
167
155
  from .legacy_parse_job_config import LegacyParseJobConfig
168
156
  from .llama_extract_settings import LlamaExtractSettings
@@ -176,9 +164,6 @@ from .llama_index_core_base_llms_types_chat_message_blocks_item import (
176
164
  )
177
165
  from .llama_parse_parameters import LlamaParseParameters
178
166
  from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
179
- from .llm_config_result import LlmConfigResult
180
- from .llm_config_result_llm_type import LlmConfigResultLlmType
181
- from .llm_configs_response import LlmConfigsResponse
182
167
  from .llm_model_data import LlmModelData
183
168
  from .llm_parameters import LlmParameters
184
169
  from .load_files_job_config import LoadFilesJobConfig
@@ -199,6 +184,7 @@ from .open_ai_embedding_config import OpenAiEmbeddingConfig
199
184
  from .organization import Organization
200
185
  from .organization_create import OrganizationCreate
201
186
  from .page_figure_metadata import PageFigureMetadata
187
+ from .page_figure_node_with_score import PageFigureNodeWithScore
202
188
  from .page_screenshot_metadata import PageScreenshotMetadata
203
189
  from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
204
190
  from .page_segmentation_config import PageSegmentationConfig
@@ -239,7 +225,6 @@ from .pipeline_create_transform_config import PipelineCreateTransformConfig
239
225
  from .pipeline_data_source import PipelineDataSource
240
226
  from .pipeline_data_source_component import PipelineDataSourceComponent
241
227
  from .pipeline_data_source_create import PipelineDataSourceCreate
242
- from .pipeline_data_source_custom_metadata_value import PipelineDataSourceCustomMetadataValue
243
228
  from .pipeline_data_source_status import PipelineDataSourceStatus
244
229
  from .pipeline_deployment import PipelineDeployment
245
230
  from .pipeline_embedding_config import (
@@ -253,12 +238,7 @@ from .pipeline_embedding_config import (
253
238
  PipelineEmbeddingConfig_VertexaiEmbedding,
254
239
  )
255
240
  from .pipeline_file import PipelineFile
256
- from .pipeline_file_config_hash_value import PipelineFileConfigHashValue
257
241
  from .pipeline_file_create import PipelineFileCreate
258
- from .pipeline_file_create_custom_metadata_value import PipelineFileCreateCustomMetadataValue
259
- from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
260
- from .pipeline_file_permission_info_value import PipelineFilePermissionInfoValue
261
- from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
262
242
  from .pipeline_file_status import PipelineFileStatus
263
243
  from .pipeline_file_update_dispatcher_config import PipelineFileUpdateDispatcherConfig
264
244
  from .pipeline_file_updater_config import PipelineFileUpdaterConfig
@@ -410,8 +390,6 @@ __all__ = [
410
390
  "DataSourceComponent",
411
391
  "DataSourceCreate",
412
392
  "DataSourceCreateComponent",
413
- "DataSourceCreateCustomMetadataValue",
414
- "DataSourceCustomMetadataValue",
415
393
  "DataSourceUpdateDispatcherConfig",
416
394
  "DeleteParams",
417
395
  "DocumentBlock",
@@ -440,35 +418,24 @@ __all__ = [
440
418
  "EmbeddingModelConfigUpdateEmbeddingConfig_VertexaiEmbedding",
441
419
  "EvalExecutionParams",
442
420
  "ExtractAgent",
443
- "ExtractAgentDataSchemaValue",
444
421
  "ExtractConfig",
445
422
  "ExtractJob",
446
423
  "ExtractJobCreate",
447
424
  "ExtractJobCreateDataSchemaOverride",
448
- "ExtractJobCreateDataSchemaOverrideZeroValue",
449
425
  "ExtractMode",
450
426
  "ExtractModels",
451
427
  "ExtractResultset",
452
428
  "ExtractResultsetData",
453
- "ExtractResultsetDataItemValue",
454
- "ExtractResultsetDataZeroValue",
455
- "ExtractResultsetExtractionMetadataValue",
456
429
  "ExtractRun",
457
430
  "ExtractRunData",
458
- "ExtractRunDataItemValue",
459
- "ExtractRunDataSchemaValue",
460
- "ExtractRunDataZeroValue",
461
- "ExtractRunExtractionMetadataValue",
462
431
  "ExtractSchemaValidateResponse",
463
- "ExtractSchemaValidateResponseDataSchemaValue",
464
432
  "ExtractState",
465
433
  "ExtractTarget",
466
434
  "FailPageMode",
467
435
  "File",
468
436
  "FileCountByStatusResponse",
437
+ "FileIdPresignedUrl",
469
438
  "FileParsePublic",
470
- "FilePermissionInfoValue",
471
- "FileResourceInfoValue",
472
439
  "FilterCondition",
473
440
  "FilterOperator",
474
441
  "FreeCreditsUsage",
@@ -495,6 +462,7 @@ __all__ = [
495
462
  "JobRecordParameters_PipelineFileUpdater",
496
463
  "JobRecordParameters_PipelineManagedIngestion",
497
464
  "JobRecordWithUsageMetrics",
465
+ "JsonType",
498
466
  "LLamaParseTransformConfig",
499
467
  "LegacyParseJobConfig",
500
468
  "LlamaExtractSettings",
@@ -506,9 +474,6 @@ __all__ = [
506
474
  "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
507
475
  "LlamaParseParameters",
508
476
  "LlamaParseSupportedFileExtensions",
509
- "LlmConfigResult",
510
- "LlmConfigResultLlmType",
511
- "LlmConfigsResponse",
512
477
  "LlmModelData",
513
478
  "LlmParameters",
514
479
  "LoadFilesJobConfig",
@@ -529,6 +494,7 @@ __all__ = [
529
494
  "Organization",
530
495
  "OrganizationCreate",
531
496
  "PageFigureMetadata",
497
+ "PageFigureNodeWithScore",
532
498
  "PageScreenshotMetadata",
533
499
  "PageScreenshotNodeWithScore",
534
500
  "PageSegmentationConfig",
@@ -567,7 +533,6 @@ __all__ = [
567
533
  "PipelineDataSource",
568
534
  "PipelineDataSourceComponent",
569
535
  "PipelineDataSourceCreate",
570
- "PipelineDataSourceCustomMetadataValue",
571
536
  "PipelineDataSourceStatus",
572
537
  "PipelineDeployment",
573
538
  "PipelineEmbeddingConfig",
@@ -579,12 +544,7 @@ __all__ = [
579
544
  "PipelineEmbeddingConfig_OpenaiEmbedding",
580
545
  "PipelineEmbeddingConfig_VertexaiEmbedding",
581
546
  "PipelineFile",
582
- "PipelineFileConfigHashValue",
583
547
  "PipelineFileCreate",
584
- "PipelineFileCreateCustomMetadataValue",
585
- "PipelineFileCustomMetadataValue",
586
- "PipelineFilePermissionInfoValue",
587
- "PipelineFileResourceInfoValue",
588
548
  "PipelineFileStatus",
589
549
  "PipelineFileUpdateDispatcherConfig",
590
550
  "PipelineFileUpdaterConfig",
@@ -5,6 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .composite_retrieved_text_node_with_score import CompositeRetrievedTextNodeWithScore
8
+ from .page_figure_node_with_score import PageFigureNodeWithScore
8
9
  from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
9
10
 
10
11
  try:
@@ -21,7 +22,10 @@ class CompositeRetrievalResult(pydantic.BaseModel):
21
22
  description="The retrieved nodes from the composite retrieval."
22
23
  )
23
24
  image_nodes: typing.Optional[typing.List[PageScreenshotNodeWithScore]] = pydantic.Field(
24
- description="The image nodes retrieved by the pipeline for the given query."
25
+ description="The image nodes retrieved by the pipeline for the given query. Deprecated - will soon be replaced with 'page_screenshot_nodes'."
26
+ )
27
+ page_figure_nodes: typing.Optional[typing.List[PageFigureNodeWithScore]] = pydantic.Field(
28
+ description="The page figure nodes retrieved by the pipeline for the given query."
25
29
  )
26
30
 
27
31
  def json(self, **kwargs: typing.Any) -> str:
@@ -6,7 +6,7 @@ import typing
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .configurable_data_source_names import ConfigurableDataSourceNames
8
8
  from .data_source_component import DataSourceComponent
9
- from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
9
+ from .json_type import JsonType
10
10
 
11
11
  try:
12
12
  import pydantic
@@ -27,7 +27,7 @@ class DataSource(pydantic.BaseModel):
27
27
  updated_at: typing.Optional[dt.datetime]
28
28
  name: str = pydantic.Field(description="The name of the data source.")
29
29
  source_type: ConfigurableDataSourceNames
30
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCustomMetadataValue]]]
30
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
31
31
  component: DataSourceComponent = pydantic.Field(description="Component that implements the data source")
32
32
  version_metadata: typing.Optional[typing.Dict[str, typing.Any]]
33
33
  project_id: str
@@ -6,7 +6,7 @@ import typing
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .configurable_data_source_names import ConfigurableDataSourceNames
8
8
  from .data_source_create_component import DataSourceCreateComponent
9
- from .data_source_create_custom_metadata_value import DataSourceCreateCustomMetadataValue
9
+ from .json_type import JsonType
10
10
 
11
11
  try:
12
12
  import pydantic
@@ -24,7 +24,7 @@ class DataSourceCreate(pydantic.BaseModel):
24
24
 
25
25
  name: str = pydantic.Field(description="The name of the data source.")
26
26
  source_type: ConfigurableDataSourceNames
27
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCreateCustomMetadataValue]]]
27
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
28
28
  component: DataSourceCreateComponent = pydantic.Field(description="Component that implements the data source")
29
29
 
30
30
  def json(self, **kwargs: typing.Any) -> str:
@@ -4,8 +4,8 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
8
7
  from .extract_config import ExtractConfig
8
+ from .json_type import JsonType
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -24,9 +24,7 @@ class ExtractAgent(pydantic.BaseModel):
24
24
  id: str = pydantic.Field(description="The id of the extraction agent.")
25
25
  name: str = pydantic.Field(description="The name of the extraction agent.")
26
26
  project_id: str = pydantic.Field(description="The ID of the project that the extraction agent belongs to.")
27
- data_schema: typing.Dict[str, typing.Optional[ExtractAgentDataSchemaValue]] = pydantic.Field(
28
- description="The schema of the data."
29
- )
27
+ data_schema: typing.Dict[str, typing.Optional[JsonType]] = pydantic.Field(description="The schema of the data.")
30
28
  config: ExtractConfig = pydantic.Field(description="The configuration parameters for the extraction agent.")
31
29
  created_at: typing.Optional[dt.datetime]
32
30
  updated_at: typing.Optional[dt.datetime]
@@ -2,8 +2,6 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
5
+ from .json_type import JsonType
6
6
 
7
- ExtractJobCreateDataSchemaOverride = typing.Union[
8
- typing.Dict[str, typing.Optional[ExtractJobCreateDataSchemaOverrideZeroValue]], str
9
- ]
7
+ ExtractJobCreateDataSchemaOverride = typing.Union[typing.Dict[str, typing.Optional[JsonType]], str]
@@ -7,27 +7,39 @@ T_Result = typing.TypeVar("T_Result")
7
7
 
8
8
 
9
9
  class ExtractModels(str, enum.Enum):
10
- GPT_4_O = "gpt-4o"
11
- GPT_4_O_MINI = "gpt-4o-mini"
12
10
  GPT_41 = "gpt-4.1"
13
11
  GPT_41_MINI = "gpt-4.1-mini"
12
+ GEMINI_20_FLASH = "gemini-2.0-flash"
14
13
  O_3_MINI = "o3-mini"
14
+ GEMINI_25_FLASH = "gemini-2.5-flash"
15
+ GEMINI_25_PRO = "gemini-2.5-pro"
16
+ GPT_4_O = "gpt-4o"
17
+ GPT_4_O_MINI = "gpt-4o-mini"
15
18
 
16
19
  def visit(
17
20
  self,
18
- gpt_4_o: typing.Callable[[], T_Result],
19
- gpt_4_o_mini: typing.Callable[[], T_Result],
20
21
  gpt_41: typing.Callable[[], T_Result],
21
22
  gpt_41_mini: typing.Callable[[], T_Result],
23
+ gemini_20_flash: typing.Callable[[], T_Result],
22
24
  o_3_mini: typing.Callable[[], T_Result],
25
+ gemini_25_flash: typing.Callable[[], T_Result],
26
+ gemini_25_pro: typing.Callable[[], T_Result],
27
+ gpt_4_o: typing.Callable[[], T_Result],
28
+ gpt_4_o_mini: typing.Callable[[], T_Result],
23
29
  ) -> T_Result:
24
- if self is ExtractModels.GPT_4_O:
25
- return gpt_4_o()
26
- if self is ExtractModels.GPT_4_O_MINI:
27
- return gpt_4_o_mini()
28
30
  if self is ExtractModels.GPT_41:
29
31
  return gpt_41()
30
32
  if self is ExtractModels.GPT_41_MINI:
31
33
  return gpt_41_mini()
34
+ if self is ExtractModels.GEMINI_20_FLASH:
35
+ return gemini_20_flash()
32
36
  if self is ExtractModels.O_3_MINI:
33
37
  return o_3_mini()
38
+ if self is ExtractModels.GEMINI_25_FLASH:
39
+ return gemini_25_flash()
40
+ if self is ExtractModels.GEMINI_25_PRO:
41
+ return gemini_25_pro()
42
+ if self is ExtractModels.GPT_4_O:
43
+ return gpt_4_o()
44
+ if self is ExtractModels.GPT_4_O_MINI:
45
+ return gpt_4_o_mini()
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .extract_resultset_data import ExtractResultsetData
8
- from .extract_resultset_extraction_metadata_value import ExtractResultsetExtractionMetadataValue
8
+ from .json_type import JsonType
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -24,7 +24,7 @@ class ExtractResultset(pydantic.BaseModel):
24
24
  run_id: str = pydantic.Field(description="The id of the extraction run")
25
25
  extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
26
26
  data: typing.Optional[ExtractResultsetData] = pydantic.Field(description="The data extracted from the file")
27
- extraction_metadata: typing.Dict[str, typing.Optional[ExtractResultsetExtractionMetadataValue]] = pydantic.Field(
27
+ extraction_metadata: typing.Dict[str, typing.Optional[JsonType]] = pydantic.Field(
28
28
  description="The metadata extracted from the file"
29
29
  )
30
30
 
@@ -2,10 +2,8 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .extract_resultset_data_item_value import ExtractResultsetDataItemValue
6
- from .extract_resultset_data_zero_value import ExtractResultsetDataZeroValue
5
+ from .json_type import JsonType
7
6
 
8
7
  ExtractResultsetData = typing.Union[
9
- typing.Dict[str, typing.Optional[ExtractResultsetDataZeroValue]],
10
- typing.List[typing.Dict[str, typing.Optional[ExtractResultsetDataItemValue]]],
8
+ typing.Dict[str, typing.Optional[JsonType]], typing.List[typing.Dict[str, typing.Optional[JsonType]]]
11
9
  ]
@@ -6,10 +6,9 @@ import typing
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .extract_config import ExtractConfig
8
8
  from .extract_run_data import ExtractRunData
9
- from .extract_run_data_schema_value import ExtractRunDataSchemaValue
10
- from .extract_run_extraction_metadata_value import ExtractRunExtractionMetadataValue
11
9
  from .extract_state import ExtractState
12
10
  from .file import File
11
+ from .json_type import JsonType
13
12
 
14
13
  try:
15
14
  import pydantic
@@ -29,7 +28,7 @@ class ExtractRun(pydantic.BaseModel):
29
28
  created_at: typing.Optional[dt.datetime]
30
29
  updated_at: typing.Optional[dt.datetime]
31
30
  extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
32
- data_schema: typing.Dict[str, typing.Optional[ExtractRunDataSchemaValue]] = pydantic.Field(
31
+ data_schema: typing.Dict[str, typing.Optional[JsonType]] = pydantic.Field(
33
32
  description="The schema used for extraction"
34
33
  )
35
34
  config: ExtractConfig = pydantic.Field(description="The config used for extraction")
@@ -38,7 +37,7 @@ class ExtractRun(pydantic.BaseModel):
38
37
  error: typing.Optional[str]
39
38
  job_id: typing.Optional[str]
40
39
  data: typing.Optional[ExtractRunData] = pydantic.Field(description="The data extracted from the file")
41
- extraction_metadata: typing.Optional[typing.Dict[str, typing.Optional[ExtractRunExtractionMetadataValue]]]
40
+ extraction_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
42
41
  from_ui: bool = pydantic.Field(description="Whether this extraction run was triggered from the UI")
43
42
 
44
43
  def json(self, **kwargs: typing.Any) -> str:
@@ -2,10 +2,8 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .extract_run_data_item_value import ExtractRunDataItemValue
6
- from .extract_run_data_zero_value import ExtractRunDataZeroValue
5
+ from .json_type import JsonType
7
6
 
8
7
  ExtractRunData = typing.Union[
9
- typing.Dict[str, typing.Optional[ExtractRunDataZeroValue]],
10
- typing.List[typing.Dict[str, typing.Optional[ExtractRunDataItemValue]]],
8
+ typing.Dict[str, typing.Optional[JsonType]], typing.List[typing.Dict[str, typing.Optional[JsonType]]]
11
9
  ]
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .extract_schema_validate_response_data_schema_value import ExtractSchemaValidateResponseDataSchemaValue
7
+ from .json_type import JsonType
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -16,7 +16,7 @@ except ImportError:
16
16
 
17
17
 
18
18
  class ExtractSchemaValidateResponse(pydantic.BaseModel):
19
- data_schema: typing.Dict[str, typing.Optional[ExtractSchemaValidateResponseDataSchemaValue]]
19
+ data_schema: typing.Dict[str, typing.Optional[JsonType]]
20
20
 
21
21
  def json(self, **kwargs: typing.Any) -> str:
22
22
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
llama_cloud/types/file.py CHANGED
@@ -4,8 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .file_permission_info_value import FilePermissionInfoValue
8
- from .file_resource_info_value import FileResourceInfoValue
7
+ from .json_type import JsonType
9
8
 
10
9
  try:
11
10
  import pydantic
@@ -30,8 +29,8 @@ class File(pydantic.BaseModel):
30
29
  file_type: typing.Optional[str]
31
30
  project_id: str = pydantic.Field(description="The ID of the project that the file belongs to")
32
31
  last_modified_at: typing.Optional[dt.datetime]
33
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileResourceInfoValue]]]
34
- permission_info: typing.Optional[typing.Dict[str, typing.Optional[FilePermissionInfoValue]]]
32
+ resource_info: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
33
+ permission_info: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
35
34
  data_source_id: typing.Optional[str]
36
35
 
37
36
  def json(self, **kwargs: typing.Any) -> str: