llama-cloud 0.1.29__py3-none-any.whl → 0.1.31__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (42) hide show
  1. llama_cloud/__init__.py +26 -16
  2. llama_cloud/client.py +0 -3
  3. llama_cloud/resources/__init__.py +0 -2
  4. llama_cloud/resources/beta/client.py +602 -0
  5. llama_cloud/resources/data_sources/types/data_source_update_component.py +0 -2
  6. llama_cloud/resources/parsing/client.py +56 -0
  7. llama_cloud/resources/pipelines/client.py +64 -0
  8. llama_cloud/types/__init__.py +26 -14
  9. llama_cloud/types/{model_configuration.py → agent_data.py} +8 -7
  10. llama_cloud/types/agent_deployment_summary.py +1 -1
  11. llama_cloud/types/{cloud_google_drive_data_source.py → aggregate_group.py} +8 -5
  12. llama_cloud/types/base_plan.py +3 -0
  13. llama_cloud/types/data_source_component.py +0 -2
  14. llama_cloud/types/data_source_create_component.py +0 -2
  15. llama_cloud/types/filter_operation.py +46 -0
  16. llama_cloud/types/filter_operation_eq.py +6 -0
  17. llama_cloud/types/filter_operation_gt.py +6 -0
  18. llama_cloud/types/filter_operation_gte.py +6 -0
  19. llama_cloud/types/filter_operation_includes_item.py +6 -0
  20. llama_cloud/types/filter_operation_lt.py +6 -0
  21. llama_cloud/types/filter_operation_lte.py +6 -0
  22. llama_cloud/types/input_message.py +2 -2
  23. llama_cloud/types/legacy_parse_job_config.py +13 -0
  24. llama_cloud/types/llama_extract_settings.py +3 -0
  25. llama_cloud/types/llama_index_core_base_llms_types_chat_message.py +2 -2
  26. llama_cloud/types/llama_parse_parameters.py +7 -0
  27. llama_cloud/types/{llama_index_core_base_llms_types_message_role.py → message_role.py} +9 -9
  28. llama_cloud/types/{text_content_block.py → paginated_response_agent_data.py} +5 -5
  29. llama_cloud/types/{message.py → paginated_response_aggregate_group.py} +5 -9
  30. llama_cloud/types/parse_job_config.py +7 -0
  31. llama_cloud/types/pipeline_data_source_component.py +0 -2
  32. llama_cloud/types/playground_session.py +2 -2
  33. llama_cloud/types/role.py +0 -1
  34. llama_cloud/types/{app_schema_chat_chat_message.py → src_app_schema_chat_chat_message.py} +3 -3
  35. llama_cloud/types/user_organization_role.py +0 -1
  36. {llama_cloud-0.1.29.dist-info → llama_cloud-0.1.31.dist-info}/METADATA +1 -1
  37. {llama_cloud-0.1.29.dist-info → llama_cloud-0.1.31.dist-info}/RECORD +39 -35
  38. llama_cloud/resources/responses/__init__.py +0 -2
  39. llama_cloud/resources/responses/client.py +0 -137
  40. llama_cloud/types/app_schema_responses_message_role.py +0 -33
  41. {llama_cloud-0.1.29.dist-info → llama_cloud-0.1.31.dist-info}/LICENSE +0 -0
  42. {llama_cloud-0.1.29.dist-info → llama_cloud-0.1.31.dist-info}/WHEEL +0 -0
@@ -233,6 +233,7 @@ class ParsingClient:
233
233
  language: typing.List[ParserLanguages],
234
234
  extract_layout: bool,
235
235
  max_pages: typing.Optional[int] = OMIT,
236
+ merge_tables_across_pages_in_markdown: bool,
236
237
  outlined_table_extraction: bool,
237
238
  output_pdf_of_document: bool,
238
239
  output_s_3_path_prefix: str,
@@ -283,6 +284,12 @@ class ParsingClient:
283
284
  parsing_instruction: str,
284
285
  fast_mode: bool,
285
286
  formatting_instruction: str,
287
+ hide_headers: bool,
288
+ hide_footers: bool,
289
+ page_header_prefix: str,
290
+ page_header_suffix: str,
291
+ page_footer_prefix: str,
292
+ page_footer_suffix: str,
286
293
  ) -> ParsingJob:
287
294
  """
288
295
  Parameters:
@@ -364,6 +371,8 @@ class ParsingClient:
364
371
 
365
372
  - max_pages: typing.Optional[int].
366
373
 
374
+ - merge_tables_across_pages_in_markdown: bool.
375
+
367
376
  - outlined_table_extraction: bool.
368
377
 
369
378
  - output_pdf_of_document: bool.
@@ -463,6 +472,18 @@ class ParsingClient:
463
472
  - fast_mode: bool.
464
473
 
465
474
  - formatting_instruction: str.
475
+
476
+ - hide_headers: bool.
477
+
478
+ - hide_footers: bool.
479
+
480
+ - page_header_prefix: str.
481
+
482
+ - page_header_suffix: str.
483
+
484
+ - page_footer_prefix: str.
485
+
486
+ - page_footer_suffix: str.
466
487
  """
467
488
  _request: typing.Dict[str, typing.Any] = {
468
489
  "adaptive_long_table": adaptive_long_table,
@@ -500,6 +521,7 @@ class ParsingClient:
500
521
  "invalidate_cache": invalidate_cache,
501
522
  "language": language,
502
523
  "extract_layout": extract_layout,
524
+ "merge_tables_across_pages_in_markdown": merge_tables_across_pages_in_markdown,
503
525
  "outlined_table_extraction": outlined_table_extraction,
504
526
  "output_pdf_of_document": output_pdf_of_document,
505
527
  "output_s3_path_prefix": output_s_3_path_prefix,
@@ -548,6 +570,12 @@ class ParsingClient:
548
570
  "parsing_instruction": parsing_instruction,
549
571
  "fast_mode": fast_mode,
550
572
  "formatting_instruction": formatting_instruction,
573
+ "hide_headers": hide_headers,
574
+ "hide_footers": hide_footers,
575
+ "page_header_prefix": page_header_prefix,
576
+ "page_header_suffix": page_header_suffix,
577
+ "page_footer_prefix": page_footer_prefix,
578
+ "page_footer_suffix": page_footer_suffix,
551
579
  }
552
580
  if file is not OMIT:
553
581
  _request["file"] = file
@@ -1365,6 +1393,7 @@ class AsyncParsingClient:
1365
1393
  language: typing.List[ParserLanguages],
1366
1394
  extract_layout: bool,
1367
1395
  max_pages: typing.Optional[int] = OMIT,
1396
+ merge_tables_across_pages_in_markdown: bool,
1368
1397
  outlined_table_extraction: bool,
1369
1398
  output_pdf_of_document: bool,
1370
1399
  output_s_3_path_prefix: str,
@@ -1415,6 +1444,12 @@ class AsyncParsingClient:
1415
1444
  parsing_instruction: str,
1416
1445
  fast_mode: bool,
1417
1446
  formatting_instruction: str,
1447
+ hide_headers: bool,
1448
+ hide_footers: bool,
1449
+ page_header_prefix: str,
1450
+ page_header_suffix: str,
1451
+ page_footer_prefix: str,
1452
+ page_footer_suffix: str,
1418
1453
  ) -> ParsingJob:
1419
1454
  """
1420
1455
  Parameters:
@@ -1496,6 +1531,8 @@ class AsyncParsingClient:
1496
1531
 
1497
1532
  - max_pages: typing.Optional[int].
1498
1533
 
1534
+ - merge_tables_across_pages_in_markdown: bool.
1535
+
1499
1536
  - outlined_table_extraction: bool.
1500
1537
 
1501
1538
  - output_pdf_of_document: bool.
@@ -1595,6 +1632,18 @@ class AsyncParsingClient:
1595
1632
  - fast_mode: bool.
1596
1633
 
1597
1634
  - formatting_instruction: str.
1635
+
1636
+ - hide_headers: bool.
1637
+
1638
+ - hide_footers: bool.
1639
+
1640
+ - page_header_prefix: str.
1641
+
1642
+ - page_header_suffix: str.
1643
+
1644
+ - page_footer_prefix: str.
1645
+
1646
+ - page_footer_suffix: str.
1598
1647
  """
1599
1648
  _request: typing.Dict[str, typing.Any] = {
1600
1649
  "adaptive_long_table": adaptive_long_table,
@@ -1632,6 +1681,7 @@ class AsyncParsingClient:
1632
1681
  "invalidate_cache": invalidate_cache,
1633
1682
  "language": language,
1634
1683
  "extract_layout": extract_layout,
1684
+ "merge_tables_across_pages_in_markdown": merge_tables_across_pages_in_markdown,
1635
1685
  "outlined_table_extraction": outlined_table_extraction,
1636
1686
  "output_pdf_of_document": output_pdf_of_document,
1637
1687
  "output_s3_path_prefix": output_s_3_path_prefix,
@@ -1680,6 +1730,12 @@ class AsyncParsingClient:
1680
1730
  "parsing_instruction": parsing_instruction,
1681
1731
  "fast_mode": fast_mode,
1682
1732
  "formatting_instruction": formatting_instruction,
1733
+ "hide_headers": hide_headers,
1734
+ "hide_footers": hide_footers,
1735
+ "page_header_prefix": page_header_prefix,
1736
+ "page_header_suffix": page_header_suffix,
1737
+ "page_footer_prefix": page_footer_prefix,
1738
+ "page_footer_suffix": page_footer_suffix,
1683
1739
  }
1684
1740
  if file is not OMIT:
1685
1741
  _request["file"] = file
@@ -410,6 +410,38 @@ class PipelinesClient:
410
410
  raise ApiError(status_code=_response.status_code, body=_response.text)
411
411
  raise ApiError(status_code=_response.status_code, body=_response_json)
412
412
 
413
+ def force_delete_pipeline(self, pipeline_id: str) -> None:
414
+ """
415
+ Parameters:
416
+ - pipeline_id: str.
417
+ ---
418
+ from llama_cloud.client import LlamaCloud
419
+
420
+ client = LlamaCloud(
421
+ token="YOUR_TOKEN",
422
+ )
423
+ client.pipelines.force_delete_pipeline(
424
+ pipeline_id="string",
425
+ )
426
+ """
427
+ _response = self._client_wrapper.httpx_client.request(
428
+ "POST",
429
+ urllib.parse.urljoin(
430
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/force-delete"
431
+ ),
432
+ headers=self._client_wrapper.get_headers(),
433
+ timeout=60,
434
+ )
435
+ if 200 <= _response.status_code < 300:
436
+ return
437
+ if _response.status_code == 422:
438
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
439
+ try:
440
+ _response_json = _response.json()
441
+ except JSONDecodeError:
442
+ raise ApiError(status_code=_response.status_code, body=_response.text)
443
+ raise ApiError(status_code=_response.status_code, body=_response_json)
444
+
413
445
  def copy_pipeline(self, pipeline_id: str) -> Pipeline:
414
446
  """
415
447
  Copy a pipeline by ID.
@@ -2049,6 +2081,38 @@ class AsyncPipelinesClient:
2049
2081
  raise ApiError(status_code=_response.status_code, body=_response.text)
2050
2082
  raise ApiError(status_code=_response.status_code, body=_response_json)
2051
2083
 
2084
+ async def force_delete_pipeline(self, pipeline_id: str) -> None:
2085
+ """
2086
+ Parameters:
2087
+ - pipeline_id: str.
2088
+ ---
2089
+ from llama_cloud.client import AsyncLlamaCloud
2090
+
2091
+ client = AsyncLlamaCloud(
2092
+ token="YOUR_TOKEN",
2093
+ )
2094
+ await client.pipelines.force_delete_pipeline(
2095
+ pipeline_id="string",
2096
+ )
2097
+ """
2098
+ _response = await self._client_wrapper.httpx_client.request(
2099
+ "POST",
2100
+ urllib.parse.urljoin(
2101
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/force-delete"
2102
+ ),
2103
+ headers=self._client_wrapper.get_headers(),
2104
+ timeout=60,
2105
+ )
2106
+ if 200 <= _response.status_code < 300:
2107
+ return
2108
+ if _response.status_code == 422:
2109
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
2110
+ try:
2111
+ _response_json = _response.json()
2112
+ except JSONDecodeError:
2113
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2114
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2115
+
2052
2116
  async def copy_pipeline(self, pipeline_id: str) -> Pipeline:
2053
2117
  """
2054
2118
  Copy a pipeline by ID.
@@ -15,10 +15,10 @@ from .advanced_mode_transform_config_segmentation_config import (
15
15
  AdvancedModeTransformConfigSegmentationConfig_None,
16
16
  AdvancedModeTransformConfigSegmentationConfig_Page,
17
17
  )
18
+ from .agent_data import AgentData
18
19
  from .agent_deployment_list import AgentDeploymentList
19
20
  from .agent_deployment_summary import AgentDeploymentSummary
20
- from .app_schema_chat_chat_message import AppSchemaChatChatMessage
21
- from .app_schema_responses_message_role import AppSchemaResponsesMessageRole
21
+ from .aggregate_group import AggregateGroup
22
22
  from .audio_block import AudioBlock
23
23
  from .auto_transform_config import AutoTransformConfig
24
24
  from .azure_open_ai_embedding import AzureOpenAiEmbedding
@@ -46,7 +46,6 @@ from .cloud_box_data_source import CloudBoxDataSource
46
46
  from .cloud_confluence_data_source import CloudConfluenceDataSource
47
47
  from .cloud_document import CloudDocument
48
48
  from .cloud_document_create import CloudDocumentCreate
49
- from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
50
49
  from .cloud_jira_data_source import CloudJiraDataSource
51
50
  from .cloud_milvus_vector_store import CloudMilvusVectorStore
52
51
  from .cloud_mongo_db_atlas_vector_search import CloudMongoDbAtlasVectorSearch
@@ -143,6 +142,13 @@ from .file_parse_public import FileParsePublic
143
142
  from .file_permission_info_value import FilePermissionInfoValue
144
143
  from .file_resource_info_value import FileResourceInfoValue
145
144
  from .filter_condition import FilterCondition
145
+ from .filter_operation import FilterOperation
146
+ from .filter_operation_eq import FilterOperationEq
147
+ from .filter_operation_gt import FilterOperationGt
148
+ from .filter_operation_gte import FilterOperationGte
149
+ from .filter_operation_includes_item import FilterOperationIncludesItem
150
+ from .filter_operation_lt import FilterOperationLt
151
+ from .filter_operation_lte import FilterOperationLte
146
152
  from .filter_operator import FilterOperator
147
153
  from .free_credits_usage import FreeCreditsUsage
148
154
  from .gemini_embedding import GeminiEmbedding
@@ -182,7 +188,6 @@ from .llama_index_core_base_llms_types_chat_message_blocks_item import (
182
188
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image,
183
189
  LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text,
184
190
  )
185
- from .llama_index_core_base_llms_types_message_role import LlamaIndexCoreBaseLlmsTypesMessageRole
186
191
  from .llama_parse_parameters import LlamaParseParameters
187
192
  from .llama_parse_parameters_priority import LlamaParseParametersPriority
188
193
  from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
@@ -191,13 +196,12 @@ from .llm_parameters import LlmParameters
191
196
  from .load_files_job_config import LoadFilesJobConfig
192
197
  from .managed_ingestion_status import ManagedIngestionStatus
193
198
  from .managed_ingestion_status_response import ManagedIngestionStatusResponse
194
- from .message import Message
195
199
  from .message_annotation import MessageAnnotation
200
+ from .message_role import MessageRole
196
201
  from .metadata_filter import MetadataFilter
197
202
  from .metadata_filter_value import MetadataFilterValue
198
203
  from .metadata_filters import MetadataFilters
199
204
  from .metadata_filters_filters_item import MetadataFiltersFiltersItem
200
- from .model_configuration import ModelConfiguration
201
205
  from .node_relationship import NodeRelationship
202
206
  from .none_chunking_config import NoneChunkingConfig
203
207
  from .none_segmentation_config import NoneSegmentationConfig
@@ -216,6 +220,8 @@ from .paginated_jobs_history_with_metrics import PaginatedJobsHistoryWithMetrics
216
220
  from .paginated_list_cloud_documents_response import PaginatedListCloudDocumentsResponse
217
221
  from .paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
218
222
  from .paginated_report_response import PaginatedReportResponse
223
+ from .paginated_response_agent_data import PaginatedResponseAgentData
224
+ from .paginated_response_aggregate_group import PaginatedResponseAggregateGroup
219
225
  from .parse_job_config import ParseJobConfig
220
226
  from .parse_job_config_priority import ParseJobConfigPriority
221
227
  from .parse_plan_level import ParsePlanLevel
@@ -329,13 +335,13 @@ from .role import Role
329
335
  from .schema_relax_mode import SchemaRelaxMode
330
336
  from .semantic_chunking_config import SemanticChunkingConfig
331
337
  from .sentence_chunking_config import SentenceChunkingConfig
338
+ from .src_app_schema_chat_chat_message import SrcAppSchemaChatChatMessage
332
339
  from .status_enum import StatusEnum
333
340
  from .struct_mode import StructMode
334
341
  from .struct_parse_conf import StructParseConf
335
342
  from .supported_llm_model import SupportedLlmModel
336
343
  from .supported_llm_model_names import SupportedLlmModelNames
337
344
  from .text_block import TextBlock
338
- from .text_content_block import TextContentBlock
339
345
  from .text_node import TextNode
340
346
  from .text_node_relationships_value import TextNodeRelationshipsValue
341
347
  from .text_node_with_score import TextNodeWithScore
@@ -369,10 +375,10 @@ __all__ = [
369
375
  "AdvancedModeTransformConfigSegmentationConfig_Element",
370
376
  "AdvancedModeTransformConfigSegmentationConfig_None",
371
377
  "AdvancedModeTransformConfigSegmentationConfig_Page",
378
+ "AgentData",
372
379
  "AgentDeploymentList",
373
380
  "AgentDeploymentSummary",
374
- "AppSchemaChatChatMessage",
375
- "AppSchemaResponsesMessageRole",
381
+ "AggregateGroup",
376
382
  "AudioBlock",
377
383
  "AutoTransformConfig",
378
384
  "AzureOpenAiEmbedding",
@@ -400,7 +406,6 @@ __all__ = [
400
406
  "CloudConfluenceDataSource",
401
407
  "CloudDocument",
402
408
  "CloudDocumentCreate",
403
- "CloudGoogleDriveDataSource",
404
409
  "CloudJiraDataSource",
405
410
  "CloudMilvusVectorStore",
406
411
  "CloudMongoDbAtlasVectorSearch",
@@ -493,6 +498,13 @@ __all__ = [
493
498
  "FilePermissionInfoValue",
494
499
  "FileResourceInfoValue",
495
500
  "FilterCondition",
501
+ "FilterOperation",
502
+ "FilterOperationEq",
503
+ "FilterOperationGt",
504
+ "FilterOperationGte",
505
+ "FilterOperationIncludesItem",
506
+ "FilterOperationLt",
507
+ "FilterOperationLte",
496
508
  "FilterOperator",
497
509
  "FreeCreditsUsage",
498
510
  "GeminiEmbedding",
@@ -528,7 +540,6 @@ __all__ = [
528
540
  "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Document",
529
541
  "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Image",
530
542
  "LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem_Text",
531
- "LlamaIndexCoreBaseLlmsTypesMessageRole",
532
543
  "LlamaParseParameters",
533
544
  "LlamaParseParametersPriority",
534
545
  "LlamaParseSupportedFileExtensions",
@@ -537,13 +548,12 @@ __all__ = [
537
548
  "LoadFilesJobConfig",
538
549
  "ManagedIngestionStatus",
539
550
  "ManagedIngestionStatusResponse",
540
- "Message",
541
551
  "MessageAnnotation",
552
+ "MessageRole",
542
553
  "MetadataFilter",
543
554
  "MetadataFilterValue",
544
555
  "MetadataFilters",
545
556
  "MetadataFiltersFiltersItem",
546
- "ModelConfiguration",
547
557
  "NodeRelationship",
548
558
  "NoneChunkingConfig",
549
559
  "NoneSegmentationConfig",
@@ -562,6 +572,8 @@ __all__ = [
562
572
  "PaginatedListCloudDocumentsResponse",
563
573
  "PaginatedListPipelineFilesResponse",
564
574
  "PaginatedReportResponse",
575
+ "PaginatedResponseAgentData",
576
+ "PaginatedResponseAggregateGroup",
565
577
  "ParseJobConfig",
566
578
  "ParseJobConfigPriority",
567
579
  "ParsePlanLevel",
@@ -665,13 +677,13 @@ __all__ = [
665
677
  "SchemaRelaxMode",
666
678
  "SemanticChunkingConfig",
667
679
  "SentenceChunkingConfig",
680
+ "SrcAppSchemaChatChatMessage",
668
681
  "StatusEnum",
669
682
  "StructMode",
670
683
  "StructParseConf",
671
684
  "SupportedLlmModel",
672
685
  "SupportedLlmModelNames",
673
686
  "TextBlock",
674
- "TextContentBlock",
675
687
  "TextNode",
676
688
  "TextNodeRelationshipsValue",
677
689
  "TextNodeWithScore",
@@ -4,7 +4,6 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .supported_llm_model_names import SupportedLlmModelNames
8
7
 
9
8
  try:
10
9
  import pydantic
@@ -15,15 +14,17 @@ except ImportError:
15
14
  import pydantic # type: ignore
16
15
 
17
16
 
18
- class ModelConfiguration(pydantic.BaseModel):
17
+ class AgentData(pydantic.BaseModel):
19
18
  """
20
- Configuration for the language model used in response generation.
19
+ API Result for a single agent data item
21
20
  """
22
21
 
23
- model_name: typing.Optional[SupportedLlmModelNames] = pydantic.Field(
24
- description="The name of the model to use for LLM completions."
25
- )
26
- temperature: typing.Optional[float] = pydantic.Field(description="The temperature to use for LLM completions.")
22
+ id: typing.Optional[str]
23
+ agent_slug: str
24
+ collection: typing.Optional[str]
25
+ data: typing.Dict[str, typing.Any]
26
+ created_at: typing.Optional[dt.datetime]
27
+ updated_at: typing.Optional[dt.datetime]
27
28
 
28
29
  def json(self, **kwargs: typing.Any) -> str:
29
30
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -17,7 +17,7 @@ except ImportError:
17
17
  class AgentDeploymentSummary(pydantic.BaseModel):
18
18
  id: str = pydantic.Field(description="Deployment ID. Prefixed with dpl-")
19
19
  project_id: str = pydantic.Field(description="Project ID")
20
- app_slug: str = pydantic.Field(description="readable ID of the deployed app")
20
+ agent_slug: str = pydantic.Field(description="readable ID of the deployed app")
21
21
  thumbnail_url: typing.Optional[str]
22
22
  base_url: str = pydantic.Field(description="Base URL of the deployed app")
23
23
  display_name: str = pydantic.Field(description="Display name of the deployed app")
@@ -14,11 +14,14 @@ except ImportError:
14
14
  import pydantic # type: ignore
15
15
 
16
16
 
17
- class CloudGoogleDriveDataSource(pydantic.BaseModel):
18
- supports_access_control: typing.Optional[bool]
19
- folder_id: str = pydantic.Field(description="The ID of the Google Drive folder to read from.")
20
- service_account_key: typing.Optional[typing.Dict[str, typing.Any]]
21
- class_name: typing.Optional[str]
17
+ class AggregateGroup(pydantic.BaseModel):
18
+ """
19
+ API Result for a single group in the aggregate response
20
+ """
21
+
22
+ group_key: typing.Dict[str, typing.Any]
23
+ count: typing.Optional[int]
24
+ first_item: typing.Optional[typing.Dict[str, typing.Any]]
22
25
 
23
26
  def json(self, **kwargs: typing.Any) -> str:
24
27
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -35,6 +35,9 @@ class BasePlan(pydantic.BaseModel):
35
35
  is_payment_failed: typing.Optional[bool] = pydantic.Field(
36
36
  description="Whether the organization has a failed payment that requires support contact"
37
37
  )
38
+ failure_count: typing.Optional[int] = pydantic.Field(
39
+ description="The number of payment failures for this organization"
40
+ )
38
41
 
39
42
  def json(self, **kwargs: typing.Any) -> str:
40
43
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -5,7 +5,6 @@ import typing
5
5
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
6
  from .cloud_box_data_source import CloudBoxDataSource
7
7
  from .cloud_confluence_data_source import CloudConfluenceDataSource
8
- from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
9
8
  from .cloud_jira_data_source import CloudJiraDataSource
10
9
  from .cloud_notion_page_data_source import CloudNotionPageDataSource
11
10
  from .cloud_one_drive_data_source import CloudOneDriveDataSource
@@ -17,7 +16,6 @@ DataSourceComponent = typing.Union[
17
16
  typing.Dict[str, typing.Any],
18
17
  CloudS3DataSource,
19
18
  CloudAzStorageBlobDataSource,
20
- CloudGoogleDriveDataSource,
21
19
  CloudOneDriveDataSource,
22
20
  CloudSharepointDataSource,
23
21
  CloudSlackDataSource,
@@ -5,7 +5,6 @@ import typing
5
5
  from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
6
6
  from .cloud_box_data_source import CloudBoxDataSource
7
7
  from .cloud_confluence_data_source import CloudConfluenceDataSource
8
- from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
9
8
  from .cloud_jira_data_source import CloudJiraDataSource
10
9
  from .cloud_notion_page_data_source import CloudNotionPageDataSource
11
10
  from .cloud_one_drive_data_source import CloudOneDriveDataSource
@@ -17,7 +16,6 @@ DataSourceCreateComponent = typing.Union[
17
16
  typing.Dict[str, typing.Any],
18
17
  CloudS3DataSource,
19
18
  CloudAzStorageBlobDataSource,
20
- CloudGoogleDriveDataSource,
21
19
  CloudOneDriveDataSource,
22
20
  CloudSharepointDataSource,
23
21
  CloudSlackDataSource,
@@ -0,0 +1,46 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .filter_operation_eq import FilterOperationEq
8
+ from .filter_operation_gt import FilterOperationGt
9
+ from .filter_operation_gte import FilterOperationGte
10
+ from .filter_operation_includes_item import FilterOperationIncludesItem
11
+ from .filter_operation_lt import FilterOperationLt
12
+ from .filter_operation_lte import FilterOperationLte
13
+
14
+ try:
15
+ import pydantic
16
+ if pydantic.__version__.startswith("1."):
17
+ raise ImportError
18
+ import pydantic.v1 as pydantic # type: ignore
19
+ except ImportError:
20
+ import pydantic # type: ignore
21
+
22
+
23
+ class FilterOperation(pydantic.BaseModel):
24
+ """
25
+ API request model for a filter comparison operation.
26
+ """
27
+
28
+ eq: typing.Optional[FilterOperationEq]
29
+ gt: typing.Optional[FilterOperationGt]
30
+ gte: typing.Optional[FilterOperationGte]
31
+ lt: typing.Optional[FilterOperationLt]
32
+ lte: typing.Optional[FilterOperationLte]
33
+ includes: typing.Optional[typing.List[typing.Optional[FilterOperationIncludesItem]]]
34
+
35
+ def json(self, **kwargs: typing.Any) -> str:
36
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
37
+ return super().json(**kwargs_with_defaults)
38
+
39
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
40
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
+ return super().dict(**kwargs_with_defaults)
42
+
43
+ class Config:
44
+ frozen = True
45
+ smart_union = True
46
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,6 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ FilterOperationEq = typing.Union[float, int, str, dt.datetime]
@@ -0,0 +1,6 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ FilterOperationGt = typing.Union[float, int, str, dt.datetime]
@@ -0,0 +1,6 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ FilterOperationGte = typing.Union[float, int, str, dt.datetime]
@@ -0,0 +1,6 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ FilterOperationIncludesItem = typing.Union[float, int, str, dt.datetime]
@@ -0,0 +1,6 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ FilterOperationLt = typing.Union[float, int, str, dt.datetime]
@@ -0,0 +1,6 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ FilterOperationLte = typing.Union[float, int, str, dt.datetime]
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .llama_index_core_base_llms_types_message_role import LlamaIndexCoreBaseLlmsTypesMessageRole
7
+ from .message_role import MessageRole
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -21,7 +21,7 @@ class InputMessage(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  id: typing.Optional[str] = pydantic.Field(description="ID of the message, if any. a UUID.")
24
- role: LlamaIndexCoreBaseLlmsTypesMessageRole
24
+ role: MessageRole
25
25
  content: str
26
26
  data: typing.Optional[typing.Dict[str, typing.Any]]
27
27
  class_name: typing.Optional[str]
@@ -46,6 +46,9 @@ class LegacyParseJobConfig(pydantic.BaseModel):
46
46
  invalidate_cache: bool = pydantic.Field(alias="invalidateCache", description="Whether to invalidate the cache.")
47
47
  output_pdf_of_document: typing.Optional[bool] = pydantic.Field(alias="outputPDFOfDocument")
48
48
  outlined_table_extraction: typing.Optional[bool] = pydantic.Field(alias="outlinedTableExtraction")
49
+ merge_tables_across_pages_in_markdown: typing.Optional[bool] = pydantic.Field(
50
+ alias="mergeTablesAcrossPagesInMarkdown"
51
+ )
49
52
  save_images: typing.Optional[bool] = pydantic.Field(alias="saveImages")
50
53
  gpt_4_o: typing.Optional[bool] = pydantic.Field(alias="gpt4o", description="Whether to use GPT4o.")
51
54
  open_aiapi_key: str = pydantic.Field(alias="openAIAPIKey", description="The OpenAI API key.")
@@ -175,6 +178,16 @@ class LegacyParseJobConfig(pydantic.BaseModel):
175
178
  system_prompt: typing.Optional[str] = pydantic.Field(alias="systemPrompt")
176
179
  system_prompt_append: typing.Optional[str] = pydantic.Field(alias="systemPromptAppend")
177
180
  user_prompt: typing.Optional[str] = pydantic.Field(alias="userPrompt")
181
+ page_header_prefix: typing.Optional[str] = pydantic.Field(alias="pageHeaderPrefix")
182
+ page_header_suffix: typing.Optional[str] = pydantic.Field(alias="pageHeaderSuffix")
183
+ page_footer_prefix: typing.Optional[str] = pydantic.Field(alias="pageFooterPrefix")
184
+ page_footer_suffix: typing.Optional[str] = pydantic.Field(alias="pageFooterSuffix")
185
+ hide_headers: typing.Optional[bool] = pydantic.Field(
186
+ alias="hideHeaders", description="Whether to hide headers in the output."
187
+ )
188
+ hide_footers: typing.Optional[bool] = pydantic.Field(
189
+ alias="hideFooters", description="Whether to hide footers in the output."
190
+ )
178
191
 
179
192
  def json(self, **kwargs: typing.Any) -> str:
180
193
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -39,6 +39,9 @@ class LlamaExtractSettings(pydantic.BaseModel):
39
39
  extraction_agent_config: typing.Optional[typing.Dict[str, StructParseConf]] = pydantic.Field(
40
40
  description="The configuration for the extraction agent."
41
41
  )
42
+ use_multimodal_parsing: typing.Optional[bool] = pydantic.Field(
43
+ description="Whether to use experimental multimodal parsing."
44
+ )
42
45
  use_pixel_extraction: typing.Optional[bool] = pydantic.Field(
43
46
  description="Whether to use extraction over pixels for multimodal mode."
44
47
  )
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .llama_index_core_base_llms_types_chat_message_blocks_item import LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem
8
- from .llama_index_core_base_llms_types_message_role import LlamaIndexCoreBaseLlmsTypesMessageRole
8
+ from .message_role import MessageRole
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -21,7 +21,7 @@ class LlamaIndexCoreBaseLlmsTypesChatMessage(pydantic.BaseModel):
21
21
  Chat message.
22
22
  """
23
23
 
24
- role: typing.Optional[LlamaIndexCoreBaseLlmsTypesMessageRole]
24
+ role: typing.Optional[MessageRole]
25
25
  additional_kwargs: typing.Optional[typing.Dict[str, typing.Any]]
26
26
  blocks: typing.Optional[typing.List[LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem]]
27
27