llama-cloud 0.1.23__py3-none-any.whl → 0.1.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (88) hide show
  1. llama_cloud/__init__.py +6 -70
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +1 -20
  4. llama_cloud/resources/data_sources/__init__.py +2 -2
  5. llama_cloud/resources/data_sources/client.py +5 -5
  6. llama_cloud/resources/data_sources/types/__init__.py +1 -2
  7. llama_cloud/resources/files/__init__.py +0 -3
  8. llama_cloud/resources/files/client.py +18 -19
  9. llama_cloud/resources/jobs/client.py +8 -0
  10. llama_cloud/resources/llama_extract/__init__.py +0 -8
  11. llama_cloud/resources/llama_extract/client.py +92 -24
  12. llama_cloud/resources/llama_extract/types/__init__.py +0 -8
  13. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py +2 -4
  14. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema.py +2 -4
  15. llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_override.py +2 -4
  16. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py +2 -4
  17. llama_cloud/resources/organizations/client.py +14 -4
  18. llama_cloud/resources/parsing/client.py +8 -0
  19. llama_cloud/resources/pipelines/__init__.py +0 -4
  20. llama_cloud/resources/pipelines/client.py +29 -20
  21. llama_cloud/resources/pipelines/types/__init__.py +0 -4
  22. llama_cloud/types/__init__.py +6 -50
  23. llama_cloud/types/composite_retrieval_result.py +5 -1
  24. llama_cloud/types/data_source.py +2 -2
  25. llama_cloud/types/data_source_create.py +2 -2
  26. llama_cloud/types/extract_agent.py +2 -4
  27. llama_cloud/types/extract_job_create_data_schema_override.py +2 -4
  28. llama_cloud/types/extract_models.py +20 -8
  29. llama_cloud/types/extract_resultset.py +2 -2
  30. llama_cloud/types/extract_resultset_data.py +2 -4
  31. llama_cloud/types/extract_run.py +3 -4
  32. llama_cloud/types/extract_run_data.py +2 -4
  33. llama_cloud/types/extract_schema_validate_response.py +2 -2
  34. llama_cloud/types/file.py +3 -4
  35. llama_cloud/types/{llm_config_result.py → file_id_presigned_url.py} +9 -5
  36. llama_cloud/types/json_type.py +9 -0
  37. llama_cloud/types/legacy_parse_job_config.py +1 -0
  38. llama_cloud/types/llama_extract_settings.py +3 -1
  39. llama_cloud/types/llama_parse_parameters.py +1 -0
  40. llama_cloud/types/page_figure_metadata.py +1 -0
  41. llama_cloud/types/{llm_configs_response.py → page_figure_node_with_score.py} +9 -4
  42. llama_cloud/types/parse_job_config.py +1 -0
  43. llama_cloud/types/pipeline_data_source.py +2 -2
  44. llama_cloud/types/pipeline_file.py +5 -8
  45. llama_cloud/types/pipeline_file_create.py +2 -2
  46. llama_cloud/types/preset_retrieval_params.py +8 -6
  47. llama_cloud/types/retrieve_results.py +5 -1
  48. llama_cloud/types/supported_llm_model_names.py +12 -4
  49. llama_cloud/types/user_organization_delete.py +1 -0
  50. {llama_cloud-0.1.23.dist-info → llama_cloud-0.1.24.dist-info}/METADATA +1 -1
  51. {llama_cloud-0.1.23.dist-info → llama_cloud-0.1.24.dist-info}/RECORD +53 -87
  52. llama_cloud/resources/admin/__init__.py +0 -2
  53. llama_cloud/resources/admin/client.py +0 -78
  54. llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py +0 -7
  55. llama_cloud/resources/files/types/__init__.py +0 -7
  56. llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py +0 -7
  57. llama_cloud/resources/files/types/file_create_permission_info_value.py +0 -7
  58. llama_cloud/resources/files/types/file_create_resource_info_value.py +0 -5
  59. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_zero_value.py +0 -7
  60. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_zero_value.py +0 -7
  61. llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_override_zero_value.py +0 -7
  62. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py +0 -7
  63. llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py +0 -7
  64. llama_cloud/resources/pipelines/types/retrieval_params_search_filters_inference_schema_value.py +0 -7
  65. llama_cloud/types/data_source_create_custom_metadata_value.py +0 -7
  66. llama_cloud/types/data_source_custom_metadata_value.py +0 -7
  67. llama_cloud/types/extract_agent_data_schema_value.py +0 -5
  68. llama_cloud/types/extract_job_create_data_schema_override_zero_value.py +0 -7
  69. llama_cloud/types/extract_resultset_data_item_value.py +0 -7
  70. llama_cloud/types/extract_resultset_data_zero_value.py +0 -7
  71. llama_cloud/types/extract_resultset_extraction_metadata_value.py +0 -7
  72. llama_cloud/types/extract_run_data_item_value.py +0 -5
  73. llama_cloud/types/extract_run_data_schema_value.py +0 -5
  74. llama_cloud/types/extract_run_data_zero_value.py +0 -5
  75. llama_cloud/types/extract_run_extraction_metadata_value.py +0 -7
  76. llama_cloud/types/extract_schema_validate_response_data_schema_value.py +0 -7
  77. llama_cloud/types/file_permission_info_value.py +0 -5
  78. llama_cloud/types/file_resource_info_value.py +0 -5
  79. llama_cloud/types/llm_config_result_llm_type.py +0 -33
  80. llama_cloud/types/pipeline_data_source_custom_metadata_value.py +0 -7
  81. llama_cloud/types/pipeline_file_config_hash_value.py +0 -5
  82. llama_cloud/types/pipeline_file_create_custom_metadata_value.py +0 -7
  83. llama_cloud/types/pipeline_file_custom_metadata_value.py +0 -7
  84. llama_cloud/types/pipeline_file_permission_info_value.py +0 -7
  85. llama_cloud/types/pipeline_file_resource_info_value.py +0 -7
  86. llama_cloud/types/preset_retrieval_params_search_filters_inference_schema_value.py +0 -7
  87. {llama_cloud-0.1.23.dist-info → llama_cloud-0.1.24.dist-info}/LICENSE +0 -0
  88. {llama_cloud-0.1.23.dist-info → llama_cloud-0.1.24.dist-info}/WHEEL +0 -0
@@ -1,6 +1,5 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from .pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
4
3
  from .pipeline_update_embedding_config import (
5
4
  PipelineUpdateEmbeddingConfig,
6
5
  PipelineUpdateEmbeddingConfig_AzureEmbedding,
@@ -12,10 +11,8 @@ from .pipeline_update_embedding_config import (
12
11
  PipelineUpdateEmbeddingConfig_VertexaiEmbedding,
13
12
  )
14
13
  from .pipeline_update_transform_config import PipelineUpdateTransformConfig
15
- from .retrieval_params_search_filters_inference_schema_value import RetrievalParamsSearchFiltersInferenceSchemaValue
16
14
 
17
15
  __all__ = [
18
- "PipelineFileUpdateCustomMetadataValue",
19
16
  "PipelineUpdateEmbeddingConfig",
20
17
  "PipelineUpdateEmbeddingConfig_AzureEmbedding",
21
18
  "PipelineUpdateEmbeddingConfig_BedrockEmbedding",
@@ -25,5 +22,4 @@ __all__ = [
25
22
  "PipelineUpdateEmbeddingConfig_OpenaiEmbedding",
26
23
  "PipelineUpdateEmbeddingConfig_VertexaiEmbedding",
27
24
  "PipelineUpdateTransformConfig",
28
- "RetrievalParamsSearchFiltersInferenceSchemaValue",
29
25
  ]
@@ -72,8 +72,6 @@ from .data_source import DataSource
72
72
  from .data_source_component import DataSourceComponent
73
73
  from .data_source_create import DataSourceCreate
74
74
  from .data_source_create_component import DataSourceCreateComponent
75
- from .data_source_create_custom_metadata_value import DataSourceCreateCustomMetadataValue
76
- from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
77
75
  from .data_source_update_dispatcher_config import DataSourceUpdateDispatcherConfig
78
76
  from .delete_params import DeleteParams
79
77
  from .document_block import DocumentBlock
@@ -106,35 +104,24 @@ from .embedding_model_config_update_embedding_config import (
106
104
  )
107
105
  from .eval_execution_params import EvalExecutionParams
108
106
  from .extract_agent import ExtractAgent
109
- from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
110
107
  from .extract_config import ExtractConfig
111
108
  from .extract_job import ExtractJob
112
109
  from .extract_job_create import ExtractJobCreate
113
110
  from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
114
- from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
115
111
  from .extract_mode import ExtractMode
116
112
  from .extract_models import ExtractModels
117
113
  from .extract_resultset import ExtractResultset
118
114
  from .extract_resultset_data import ExtractResultsetData
119
- from .extract_resultset_data_item_value import ExtractResultsetDataItemValue
120
- from .extract_resultset_data_zero_value import ExtractResultsetDataZeroValue
121
- from .extract_resultset_extraction_metadata_value import ExtractResultsetExtractionMetadataValue
122
115
  from .extract_run import ExtractRun
123
116
  from .extract_run_data import ExtractRunData
124
- from .extract_run_data_item_value import ExtractRunDataItemValue
125
- from .extract_run_data_schema_value import ExtractRunDataSchemaValue
126
- from .extract_run_data_zero_value import ExtractRunDataZeroValue
127
- from .extract_run_extraction_metadata_value import ExtractRunExtractionMetadataValue
128
117
  from .extract_schema_validate_response import ExtractSchemaValidateResponse
129
- from .extract_schema_validate_response_data_schema_value import ExtractSchemaValidateResponseDataSchemaValue
130
118
  from .extract_state import ExtractState
131
119
  from .extract_target import ExtractTarget
132
120
  from .fail_page_mode import FailPageMode
133
121
  from .file import File
134
122
  from .file_count_by_status_response import FileCountByStatusResponse
123
+ from .file_id_presigned_url import FileIdPresignedUrl
135
124
  from .file_parse_public import FileParsePublic
136
- from .file_permission_info_value import FilePermissionInfoValue
137
- from .file_resource_info_value import FileResourceInfoValue
138
125
  from .filter_condition import FilterCondition
139
126
  from .filter_operator import FilterOperator
140
127
  from .free_credits_usage import FreeCreditsUsage
@@ -163,6 +150,7 @@ from .job_record_parameters import (
163
150
  JobRecordParameters_PipelineManagedIngestion,
164
151
  )
165
152
  from .job_record_with_usage_metrics import JobRecordWithUsageMetrics
153
+ from .json_type import JsonType
166
154
  from .l_lama_parse_transform_config import LLamaParseTransformConfig
167
155
  from .legacy_parse_job_config import LegacyParseJobConfig
168
156
  from .llama_extract_settings import LlamaExtractSettings
@@ -176,9 +164,6 @@ from .llama_index_core_base_llms_types_chat_message_blocks_item import (
176
164
  )
177
165
  from .llama_parse_parameters import LlamaParseParameters
178
166
  from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
179
- from .llm_config_result import LlmConfigResult
180
- from .llm_config_result_llm_type import LlmConfigResultLlmType
181
- from .llm_configs_response import LlmConfigsResponse
182
167
  from .llm_model_data import LlmModelData
183
168
  from .llm_parameters import LlmParameters
184
169
  from .load_files_job_config import LoadFilesJobConfig
@@ -199,6 +184,7 @@ from .open_ai_embedding_config import OpenAiEmbeddingConfig
199
184
  from .organization import Organization
200
185
  from .organization_create import OrganizationCreate
201
186
  from .page_figure_metadata import PageFigureMetadata
187
+ from .page_figure_node_with_score import PageFigureNodeWithScore
202
188
  from .page_screenshot_metadata import PageScreenshotMetadata
203
189
  from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
204
190
  from .page_segmentation_config import PageSegmentationConfig
@@ -239,7 +225,6 @@ from .pipeline_create_transform_config import PipelineCreateTransformConfig
239
225
  from .pipeline_data_source import PipelineDataSource
240
226
  from .pipeline_data_source_component import PipelineDataSourceComponent
241
227
  from .pipeline_data_source_create import PipelineDataSourceCreate
242
- from .pipeline_data_source_custom_metadata_value import PipelineDataSourceCustomMetadataValue
243
228
  from .pipeline_data_source_status import PipelineDataSourceStatus
244
229
  from .pipeline_deployment import PipelineDeployment
245
230
  from .pipeline_embedding_config import (
@@ -253,12 +238,7 @@ from .pipeline_embedding_config import (
253
238
  PipelineEmbeddingConfig_VertexaiEmbedding,
254
239
  )
255
240
  from .pipeline_file import PipelineFile
256
- from .pipeline_file_config_hash_value import PipelineFileConfigHashValue
257
241
  from .pipeline_file_create import PipelineFileCreate
258
- from .pipeline_file_create_custom_metadata_value import PipelineFileCreateCustomMetadataValue
259
- from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
260
- from .pipeline_file_permission_info_value import PipelineFilePermissionInfoValue
261
- from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
262
242
  from .pipeline_file_status import PipelineFileStatus
263
243
  from .pipeline_file_update_dispatcher_config import PipelineFileUpdateDispatcherConfig
264
244
  from .pipeline_file_updater_config import PipelineFileUpdaterConfig
@@ -276,9 +256,6 @@ from .playground_session import PlaygroundSession
276
256
  from .pooling import Pooling
277
257
  from .preset_composite_retrieval_params import PresetCompositeRetrievalParams
278
258
  from .preset_retrieval_params import PresetRetrievalParams
279
- from .preset_retrieval_params_search_filters_inference_schema_value import (
280
- PresetRetrievalParamsSearchFiltersInferenceSchemaValue,
281
- )
282
259
  from .presigned_url import PresignedUrl
283
260
  from .progress_event import ProgressEvent
284
261
  from .progress_event_status import ProgressEventStatus
@@ -413,8 +390,6 @@ __all__ = [
413
390
  "DataSourceComponent",
414
391
  "DataSourceCreate",
415
392
  "DataSourceCreateComponent",
416
- "DataSourceCreateCustomMetadataValue",
417
- "DataSourceCustomMetadataValue",
418
393
  "DataSourceUpdateDispatcherConfig",
419
394
  "DeleteParams",
420
395
  "DocumentBlock",
@@ -443,35 +418,24 @@ __all__ = [
443
418
  "EmbeddingModelConfigUpdateEmbeddingConfig_VertexaiEmbedding",
444
419
  "EvalExecutionParams",
445
420
  "ExtractAgent",
446
- "ExtractAgentDataSchemaValue",
447
421
  "ExtractConfig",
448
422
  "ExtractJob",
449
423
  "ExtractJobCreate",
450
424
  "ExtractJobCreateDataSchemaOverride",
451
- "ExtractJobCreateDataSchemaOverrideZeroValue",
452
425
  "ExtractMode",
453
426
  "ExtractModels",
454
427
  "ExtractResultset",
455
428
  "ExtractResultsetData",
456
- "ExtractResultsetDataItemValue",
457
- "ExtractResultsetDataZeroValue",
458
- "ExtractResultsetExtractionMetadataValue",
459
429
  "ExtractRun",
460
430
  "ExtractRunData",
461
- "ExtractRunDataItemValue",
462
- "ExtractRunDataSchemaValue",
463
- "ExtractRunDataZeroValue",
464
- "ExtractRunExtractionMetadataValue",
465
431
  "ExtractSchemaValidateResponse",
466
- "ExtractSchemaValidateResponseDataSchemaValue",
467
432
  "ExtractState",
468
433
  "ExtractTarget",
469
434
  "FailPageMode",
470
435
  "File",
471
436
  "FileCountByStatusResponse",
437
+ "FileIdPresignedUrl",
472
438
  "FileParsePublic",
473
- "FilePermissionInfoValue",
474
- "FileResourceInfoValue",
475
439
  "FilterCondition",
476
440
  "FilterOperator",
477
441
  "FreeCreditsUsage",
@@ -498,6 +462,7 @@ __all__ = [
498
462
  "JobRecordParameters_PipelineFileUpdater",
499
463
  "JobRecordParameters_PipelineManagedIngestion",
500
464
  "JobRecordWithUsageMetrics",
465
+ "JsonType",
501
466
  "LLamaParseTransformConfig",
502
467
  "LegacyParseJobConfig",
503
468
  "LlamaExtractSettings",
@@ -509,9 +474,6 @@ __all__ = [
509
474
  "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
510
475
  "LlamaParseParameters",
511
476
  "LlamaParseSupportedFileExtensions",
512
- "LlmConfigResult",
513
- "LlmConfigResultLlmType",
514
- "LlmConfigsResponse",
515
477
  "LlmModelData",
516
478
  "LlmParameters",
517
479
  "LoadFilesJobConfig",
@@ -532,6 +494,7 @@ __all__ = [
532
494
  "Organization",
533
495
  "OrganizationCreate",
534
496
  "PageFigureMetadata",
497
+ "PageFigureNodeWithScore",
535
498
  "PageScreenshotMetadata",
536
499
  "PageScreenshotNodeWithScore",
537
500
  "PageSegmentationConfig",
@@ -570,7 +533,6 @@ __all__ = [
570
533
  "PipelineDataSource",
571
534
  "PipelineDataSourceComponent",
572
535
  "PipelineDataSourceCreate",
573
- "PipelineDataSourceCustomMetadataValue",
574
536
  "PipelineDataSourceStatus",
575
537
  "PipelineDeployment",
576
538
  "PipelineEmbeddingConfig",
@@ -582,12 +544,7 @@ __all__ = [
582
544
  "PipelineEmbeddingConfig_OpenaiEmbedding",
583
545
  "PipelineEmbeddingConfig_VertexaiEmbedding",
584
546
  "PipelineFile",
585
- "PipelineFileConfigHashValue",
586
547
  "PipelineFileCreate",
587
- "PipelineFileCreateCustomMetadataValue",
588
- "PipelineFileCustomMetadataValue",
589
- "PipelineFilePermissionInfoValue",
590
- "PipelineFileResourceInfoValue",
591
548
  "PipelineFileStatus",
592
549
  "PipelineFileUpdateDispatcherConfig",
593
550
  "PipelineFileUpdaterConfig",
@@ -603,7 +560,6 @@ __all__ = [
603
560
  "Pooling",
604
561
  "PresetCompositeRetrievalParams",
605
562
  "PresetRetrievalParams",
606
- "PresetRetrievalParamsSearchFiltersInferenceSchemaValue",
607
563
  "PresignedUrl",
608
564
  "ProgressEvent",
609
565
  "ProgressEventStatus",
@@ -5,6 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .composite_retrieved_text_node_with_score import CompositeRetrievedTextNodeWithScore
8
+ from .page_figure_node_with_score import PageFigureNodeWithScore
8
9
  from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
9
10
 
10
11
  try:
@@ -21,7 +22,10 @@ class CompositeRetrievalResult(pydantic.BaseModel):
21
22
  description="The retrieved nodes from the composite retrieval."
22
23
  )
23
24
  image_nodes: typing.Optional[typing.List[PageScreenshotNodeWithScore]] = pydantic.Field(
24
- description="The image nodes retrieved by the pipeline for the given query."
25
+ description="The image nodes retrieved by the pipeline for the given query. Deprecated - will soon be replaced with 'page_screenshot_nodes'."
26
+ )
27
+ page_figure_nodes: typing.Optional[typing.List[PageFigureNodeWithScore]] = pydantic.Field(
28
+ description="The page figure nodes retrieved by the pipeline for the given query."
25
29
  )
26
30
 
27
31
  def json(self, **kwargs: typing.Any) -> str:
@@ -6,7 +6,7 @@ import typing
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .configurable_data_source_names import ConfigurableDataSourceNames
8
8
  from .data_source_component import DataSourceComponent
9
- from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
9
+ from .json_type import JsonType
10
10
 
11
11
  try:
12
12
  import pydantic
@@ -27,7 +27,7 @@ class DataSource(pydantic.BaseModel):
27
27
  updated_at: typing.Optional[dt.datetime]
28
28
  name: str = pydantic.Field(description="The name of the data source.")
29
29
  source_type: ConfigurableDataSourceNames
30
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCustomMetadataValue]]]
30
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
31
31
  component: DataSourceComponent = pydantic.Field(description="Component that implements the data source")
32
32
  version_metadata: typing.Optional[typing.Dict[str, typing.Any]]
33
33
  project_id: str
@@ -6,7 +6,7 @@ import typing
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .configurable_data_source_names import ConfigurableDataSourceNames
8
8
  from .data_source_create_component import DataSourceCreateComponent
9
- from .data_source_create_custom_metadata_value import DataSourceCreateCustomMetadataValue
9
+ from .json_type import JsonType
10
10
 
11
11
  try:
12
12
  import pydantic
@@ -24,7 +24,7 @@ class DataSourceCreate(pydantic.BaseModel):
24
24
 
25
25
  name: str = pydantic.Field(description="The name of the data source.")
26
26
  source_type: ConfigurableDataSourceNames
27
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[DataSourceCreateCustomMetadataValue]]]
27
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
28
28
  component: DataSourceCreateComponent = pydantic.Field(description="Component that implements the data source")
29
29
 
30
30
  def json(self, **kwargs: typing.Any) -> str:
@@ -4,8 +4,8 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
8
7
  from .extract_config import ExtractConfig
8
+ from .json_type import JsonType
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -24,9 +24,7 @@ class ExtractAgent(pydantic.BaseModel):
24
24
  id: str = pydantic.Field(description="The id of the extraction agent.")
25
25
  name: str = pydantic.Field(description="The name of the extraction agent.")
26
26
  project_id: str = pydantic.Field(description="The ID of the project that the extraction agent belongs to.")
27
- data_schema: typing.Dict[str, typing.Optional[ExtractAgentDataSchemaValue]] = pydantic.Field(
28
- description="The schema of the data."
29
- )
27
+ data_schema: typing.Dict[str, typing.Optional[JsonType]] = pydantic.Field(description="The schema of the data.")
30
28
  config: ExtractConfig = pydantic.Field(description="The configuration parameters for the extraction agent.")
31
29
  created_at: typing.Optional[dt.datetime]
32
30
  updated_at: typing.Optional[dt.datetime]
@@ -2,8 +2,6 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
5
+ from .json_type import JsonType
6
6
 
7
- ExtractJobCreateDataSchemaOverride = typing.Union[
8
- typing.Dict[str, typing.Optional[ExtractJobCreateDataSchemaOverrideZeroValue]], str
9
- ]
7
+ ExtractJobCreateDataSchemaOverride = typing.Union[typing.Dict[str, typing.Optional[JsonType]], str]
@@ -7,27 +7,39 @@ T_Result = typing.TypeVar("T_Result")
7
7
 
8
8
 
9
9
  class ExtractModels(str, enum.Enum):
10
- GPT_4_O = "gpt-4o"
11
- GPT_4_O_MINI = "gpt-4o-mini"
12
10
  GPT_41 = "gpt-4.1"
13
11
  GPT_41_MINI = "gpt-4.1-mini"
12
+ GEMINI_20_FLASH = "gemini-2.0-flash"
14
13
  O_3_MINI = "o3-mini"
14
+ GEMINI_25_FLASH = "gemini-2.5-flash"
15
+ GEMINI_25_PRO = "gemini-2.5-pro"
16
+ GPT_4_O = "gpt-4o"
17
+ GPT_4_O_MINI = "gpt-4o-mini"
15
18
 
16
19
  def visit(
17
20
  self,
18
- gpt_4_o: typing.Callable[[], T_Result],
19
- gpt_4_o_mini: typing.Callable[[], T_Result],
20
21
  gpt_41: typing.Callable[[], T_Result],
21
22
  gpt_41_mini: typing.Callable[[], T_Result],
23
+ gemini_20_flash: typing.Callable[[], T_Result],
22
24
  o_3_mini: typing.Callable[[], T_Result],
25
+ gemini_25_flash: typing.Callable[[], T_Result],
26
+ gemini_25_pro: typing.Callable[[], T_Result],
27
+ gpt_4_o: typing.Callable[[], T_Result],
28
+ gpt_4_o_mini: typing.Callable[[], T_Result],
23
29
  ) -> T_Result:
24
- if self is ExtractModels.GPT_4_O:
25
- return gpt_4_o()
26
- if self is ExtractModels.GPT_4_O_MINI:
27
- return gpt_4_o_mini()
28
30
  if self is ExtractModels.GPT_41:
29
31
  return gpt_41()
30
32
  if self is ExtractModels.GPT_41_MINI:
31
33
  return gpt_41_mini()
34
+ if self is ExtractModels.GEMINI_20_FLASH:
35
+ return gemini_20_flash()
32
36
  if self is ExtractModels.O_3_MINI:
33
37
  return o_3_mini()
38
+ if self is ExtractModels.GEMINI_25_FLASH:
39
+ return gemini_25_flash()
40
+ if self is ExtractModels.GEMINI_25_PRO:
41
+ return gemini_25_pro()
42
+ if self is ExtractModels.GPT_4_O:
43
+ return gpt_4_o()
44
+ if self is ExtractModels.GPT_4_O_MINI:
45
+ return gpt_4_o_mini()
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .extract_resultset_data import ExtractResultsetData
8
- from .extract_resultset_extraction_metadata_value import ExtractResultsetExtractionMetadataValue
8
+ from .json_type import JsonType
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -24,7 +24,7 @@ class ExtractResultset(pydantic.BaseModel):
24
24
  run_id: str = pydantic.Field(description="The id of the extraction run")
25
25
  extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
26
26
  data: typing.Optional[ExtractResultsetData] = pydantic.Field(description="The data extracted from the file")
27
- extraction_metadata: typing.Dict[str, typing.Optional[ExtractResultsetExtractionMetadataValue]] = pydantic.Field(
27
+ extraction_metadata: typing.Dict[str, typing.Optional[JsonType]] = pydantic.Field(
28
28
  description="The metadata extracted from the file"
29
29
  )
30
30
 
@@ -2,10 +2,8 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .extract_resultset_data_item_value import ExtractResultsetDataItemValue
6
- from .extract_resultset_data_zero_value import ExtractResultsetDataZeroValue
5
+ from .json_type import JsonType
7
6
 
8
7
  ExtractResultsetData = typing.Union[
9
- typing.Dict[str, typing.Optional[ExtractResultsetDataZeroValue]],
10
- typing.List[typing.Dict[str, typing.Optional[ExtractResultsetDataItemValue]]],
8
+ typing.Dict[str, typing.Optional[JsonType]], typing.List[typing.Dict[str, typing.Optional[JsonType]]]
11
9
  ]
@@ -6,10 +6,9 @@ import typing
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .extract_config import ExtractConfig
8
8
  from .extract_run_data import ExtractRunData
9
- from .extract_run_data_schema_value import ExtractRunDataSchemaValue
10
- from .extract_run_extraction_metadata_value import ExtractRunExtractionMetadataValue
11
9
  from .extract_state import ExtractState
12
10
  from .file import File
11
+ from .json_type import JsonType
13
12
 
14
13
  try:
15
14
  import pydantic
@@ -29,7 +28,7 @@ class ExtractRun(pydantic.BaseModel):
29
28
  created_at: typing.Optional[dt.datetime]
30
29
  updated_at: typing.Optional[dt.datetime]
31
30
  extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
32
- data_schema: typing.Dict[str, typing.Optional[ExtractRunDataSchemaValue]] = pydantic.Field(
31
+ data_schema: typing.Dict[str, typing.Optional[JsonType]] = pydantic.Field(
33
32
  description="The schema used for extraction"
34
33
  )
35
34
  config: ExtractConfig = pydantic.Field(description="The config used for extraction")
@@ -38,7 +37,7 @@ class ExtractRun(pydantic.BaseModel):
38
37
  error: typing.Optional[str]
39
38
  job_id: typing.Optional[str]
40
39
  data: typing.Optional[ExtractRunData] = pydantic.Field(description="The data extracted from the file")
41
- extraction_metadata: typing.Optional[typing.Dict[str, typing.Optional[ExtractRunExtractionMetadataValue]]]
40
+ extraction_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
42
41
  from_ui: bool = pydantic.Field(description="Whether this extraction run was triggered from the UI")
43
42
 
44
43
  def json(self, **kwargs: typing.Any) -> str:
@@ -2,10 +2,8 @@
2
2
 
3
3
  import typing
4
4
 
5
- from .extract_run_data_item_value import ExtractRunDataItemValue
6
- from .extract_run_data_zero_value import ExtractRunDataZeroValue
5
+ from .json_type import JsonType
7
6
 
8
7
  ExtractRunData = typing.Union[
9
- typing.Dict[str, typing.Optional[ExtractRunDataZeroValue]],
10
- typing.List[typing.Dict[str, typing.Optional[ExtractRunDataItemValue]]],
8
+ typing.Dict[str, typing.Optional[JsonType]], typing.List[typing.Dict[str, typing.Optional[JsonType]]]
11
9
  ]
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .extract_schema_validate_response_data_schema_value import ExtractSchemaValidateResponseDataSchemaValue
7
+ from .json_type import JsonType
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -16,7 +16,7 @@ except ImportError:
16
16
 
17
17
 
18
18
  class ExtractSchemaValidateResponse(pydantic.BaseModel):
19
- data_schema: typing.Dict[str, typing.Optional[ExtractSchemaValidateResponseDataSchemaValue]]
19
+ data_schema: typing.Dict[str, typing.Optional[JsonType]]
20
20
 
21
21
  def json(self, **kwargs: typing.Any) -> str:
22
22
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
llama_cloud/types/file.py CHANGED
@@ -4,8 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .file_permission_info_value import FilePermissionInfoValue
8
- from .file_resource_info_value import FileResourceInfoValue
7
+ from .json_type import JsonType
9
8
 
10
9
  try:
11
10
  import pydantic
@@ -30,8 +29,8 @@ class File(pydantic.BaseModel):
30
29
  file_type: typing.Optional[str]
31
30
  project_id: str = pydantic.Field(description="The ID of the project that the file belongs to")
32
31
  last_modified_at: typing.Optional[dt.datetime]
33
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[FileResourceInfoValue]]]
34
- permission_info: typing.Optional[typing.Dict[str, typing.Optional[FilePermissionInfoValue]]]
32
+ resource_info: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
33
+ permission_info: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
35
34
  data_source_id: typing.Optional[str]
36
35
 
37
36
  def json(self, **kwargs: typing.Any) -> str:
@@ -4,7 +4,6 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .llm_config_result_llm_type import LlmConfigResultLlmType
8
7
 
9
8
  try:
10
9
  import pydantic
@@ -15,10 +14,15 @@ except ImportError:
15
14
  import pydantic # type: ignore
16
15
 
17
16
 
18
- class LlmConfigResult(pydantic.BaseModel):
19
- llm_type: LlmConfigResultLlmType
20
- valid: bool
21
- error_message: typing.Optional[str]
17
+ class FileIdPresignedUrl(pydantic.BaseModel):
18
+ """
19
+ Schema for a presigned URL with a file ID.
20
+ """
21
+
22
+ url: str = pydantic.Field(description="A presigned URL for IO operations against a private file")
23
+ expires_at: dt.datetime = pydantic.Field(description="The time at which the presigned URL expires")
24
+ form_fields: typing.Optional[typing.Dict[str, typing.Optional[str]]]
25
+ file_id: str = pydantic.Field(description="The ID of the file associated with the presigned URL")
22
26
 
23
27
  def json(self, **kwargs: typing.Any) -> str:
24
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,9 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from __future__ import annotations
4
+
5
+ import typing
6
+
7
+ JsonType = typing.Union[
8
+ str, int, float, bool, typing.List[typing.Optional[JsonType]], typing.Dict[str, typing.Optional[JsonType]]
9
+ ]
@@ -45,6 +45,7 @@ class LegacyParseJobConfig(pydantic.BaseModel):
45
45
  )
46
46
  invalidate_cache: bool = pydantic.Field(alias="invalidateCache", description="Whether to invalidate the cache.")
47
47
  output_pdf_of_document: typing.Optional[bool] = pydantic.Field(alias="outputPDFOfDocument")
48
+ outlined_table_extraction: typing.Optional[bool] = pydantic.Field(alias="outlinedTableExtraction")
48
49
  save_images: typing.Optional[bool] = pydantic.Field(alias="saveImages")
49
50
  gpt_4_o: typing.Optional[bool] = pydantic.Field(alias="gpt4o", description="Whether to use GPT4o.")
50
51
  open_aiapi_key: str = pydantic.Field(alias="openAIAPIKey", description="The OpenAI API key.")
@@ -39,7 +39,9 @@ class LlamaExtractSettings(pydantic.BaseModel):
39
39
  extraction_agent_config: typing.Optional[typing.Dict[str, StructParseConf]] = pydantic.Field(
40
40
  description="The configuration for the extraction agent."
41
41
  )
42
- use_multimodal_extraction: typing.Optional[bool]
42
+ use_pixel_extraction: typing.Optional[bool] = pydantic.Field(
43
+ description="Whether to use extraction over pixels for multimodal mode."
44
+ )
43
45
  llama_parse_params: typing.Optional[LlamaParseParameters] = pydantic.Field(
44
46
  description="LlamaParse related settings."
45
47
  )
@@ -31,6 +31,7 @@ class LlamaParseParameters(pydantic.BaseModel):
31
31
  disable_reconstruction: typing.Optional[bool]
32
32
  disable_image_extraction: typing.Optional[bool]
33
33
  invalidate_cache: typing.Optional[bool]
34
+ outlined_table_extraction: typing.Optional[bool]
34
35
  output_pdf_of_document: typing.Optional[bool]
35
36
  do_not_cache: typing.Optional[bool]
36
37
  fast_mode: typing.Optional[bool]
@@ -21,6 +21,7 @@ class PageFigureMetadata(pydantic.BaseModel):
21
21
  figure_size: int = pydantic.Field(description="The size of the figure in bytes")
22
22
  is_likely_noise: typing.Optional[bool] = pydantic.Field(description="Whether the figure is likely to be noise")
23
23
  confidence: float = pydantic.Field(description="The confidence of the figure")
24
+ metadata: typing.Optional[typing.Dict[str, typing.Any]]
24
25
 
25
26
  def json(self, **kwargs: typing.Any) -> str:
26
27
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .llm_config_result import LlmConfigResult
7
+ from .page_figure_metadata import PageFigureMetadata
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -15,9 +15,14 @@ except ImportError:
15
15
  import pydantic # type: ignore
16
16
 
17
17
 
18
- class LlmConfigsResponse(pydantic.BaseModel):
19
- llm_configs: typing.List[LlmConfigResult]
20
- last_validated_at: str
18
+ class PageFigureNodeWithScore(pydantic.BaseModel):
19
+ """
20
+ Page figure metadata with score
21
+ """
22
+
23
+ node: PageFigureMetadata
24
+ score: float = pydantic.Field(description="The score of the figure node")
25
+ class_name: typing.Optional[str]
21
26
 
22
27
  def json(self, **kwargs: typing.Any) -> str:
23
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -33,6 +33,7 @@ class ParseJobConfig(pydantic.BaseModel):
33
33
  disable_reconstruction: typing.Optional[bool]
34
34
  disable_image_extraction: typing.Optional[bool]
35
35
  invalidate_cache: typing.Optional[bool]
36
+ outlined_table_extraction: typing.Optional[bool]
36
37
  output_pdf_of_document: typing.Optional[bool]
37
38
  do_not_cache: typing.Optional[bool]
38
39
  fast_mode: typing.Optional[bool]
@@ -5,8 +5,8 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .configurable_data_source_names import ConfigurableDataSourceNames
8
+ from .json_type import JsonType
8
9
  from .pipeline_data_source_component import PipelineDataSourceComponent
9
- from .pipeline_data_source_custom_metadata_value import PipelineDataSourceCustomMetadataValue
10
10
  from .pipeline_data_source_status import PipelineDataSourceStatus
11
11
 
12
12
  try:
@@ -28,7 +28,7 @@ class PipelineDataSource(pydantic.BaseModel):
28
28
  updated_at: typing.Optional[dt.datetime]
29
29
  name: str = pydantic.Field(description="The name of the data source.")
30
30
  source_type: ConfigurableDataSourceNames
31
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineDataSourceCustomMetadataValue]]]
31
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
32
32
  component: PipelineDataSourceComponent = pydantic.Field(description="Component that implements the data source")
33
33
  version_metadata: typing.Optional[typing.Dict[str, typing.Any]]
34
34
  project_id: str
@@ -4,10 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .pipeline_file_config_hash_value import PipelineFileConfigHashValue
8
- from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
9
- from .pipeline_file_permission_info_value import PipelineFilePermissionInfoValue
10
- from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
7
+ from .json_type import JsonType
11
8
  from .pipeline_file_status import PipelineFileStatus
12
9
 
13
10
  try:
@@ -33,13 +30,13 @@ class PipelineFile(pydantic.BaseModel):
33
30
  file_type: typing.Optional[str]
34
31
  project_id: str = pydantic.Field(description="The ID of the project that the file belongs to")
35
32
  last_modified_at: typing.Optional[dt.datetime]
36
- resource_info: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileResourceInfoValue]]]
37
- permission_info: typing.Optional[typing.Dict[str, typing.Optional[PipelineFilePermissionInfoValue]]]
33
+ resource_info: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
34
+ permission_info: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
38
35
  data_source_id: typing.Optional[str]
39
36
  file_id: typing.Optional[str]
40
37
  pipeline_id: str = pydantic.Field(description="The ID of the pipeline that the file is associated with")
41
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileCustomMetadataValue]]]
42
- config_hash: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileConfigHashValue]]]
38
+ custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
39
+ config_hash: typing.Optional[typing.Dict[str, typing.Optional[JsonType]]]
43
40
  indexed_page_count: typing.Optional[int]
44
41
  status: typing.Optional[PipelineFileStatus]
45
42
  status_updated_at: typing.Optional[dt.datetime]