llama-cloud 0.1.41__py3-none-any.whl → 0.1.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (43) hide show
  1. llama_cloud/__init__.py +19 -19
  2. llama_cloud/resources/__init__.py +6 -0
  3. llama_cloud/resources/alpha/client.py +14 -30
  4. llama_cloud/resources/beta/client.py +1045 -59
  5. llama_cloud/resources/jobs/client.py +0 -8
  6. llama_cloud/resources/llama_extract/__init__.py +6 -0
  7. llama_cloud/resources/llama_extract/client.py +825 -941
  8. llama_cloud/resources/llama_extract/types/__init__.py +6 -0
  9. llama_cloud/resources/organizations/client.py +18 -4
  10. llama_cloud/resources/parsing/client.py +56 -0
  11. llama_cloud/resources/pipelines/client.py +164 -0
  12. llama_cloud/types/__init__.py +16 -22
  13. llama_cloud/types/agent_data.py +1 -1
  14. llama_cloud/types/agent_deployment_summary.py +1 -2
  15. llama_cloud/types/{prompt_conf.py → api_key.py} +14 -9
  16. llama_cloud/types/{extract_job_create.py → api_key_query_response.py} +6 -14
  17. llama_cloud/types/api_key_type.py +17 -0
  18. llama_cloud/types/delete_response.py +35 -0
  19. llama_cloud/types/extract_config.py +1 -0
  20. llama_cloud/types/extract_models.py +4 -0
  21. llama_cloud/types/extracted_table.py +40 -0
  22. llama_cloud/types/legacy_parse_job_config.py +3 -0
  23. llama_cloud/types/llama_parse_parameters.py +7 -0
  24. llama_cloud/types/organization.py +1 -0
  25. llama_cloud/types/paginated_response_spreadsheet_job.py +34 -0
  26. llama_cloud/types/parse_job_config.py +7 -0
  27. llama_cloud/types/public_model_name.py +4 -0
  28. llama_cloud/types/quota_configuration_configuration_type.py +4 -0
  29. llama_cloud/types/spreadsheet_job.py +50 -0
  30. llama_cloud/types/spreadsheet_parsing_config.py +35 -0
  31. {llama_cloud-0.1.41.dist-info → llama_cloud-0.1.43.dist-info}/METADATA +1 -1
  32. {llama_cloud-0.1.41.dist-info → llama_cloud-0.1.43.dist-info}/RECORD +37 -37
  33. llama_cloud/types/chunk_mode.py +0 -29
  34. llama_cloud/types/llama_extract_settings.py +0 -67
  35. llama_cloud/types/multimodal_parse_resolution.py +0 -17
  36. llama_cloud/types/schema_relax_mode.py +0 -25
  37. llama_cloud/types/struct_mode.py +0 -33
  38. llama_cloud/types/struct_parse_conf.py +0 -63
  39. /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_data_schema_override.py +0 -0
  40. /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_data_schema_override_zero_value.py +0 -0
  41. /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_priority.py +0 -0
  42. {llama_cloud-0.1.41.dist-info → llama_cloud-0.1.43.dist-info}/LICENSE +0 -0
  43. {llama_cloud-0.1.41.dist-info → llama_cloud-0.1.43.dist-info}/WHEEL +0 -0
@@ -6,6 +6,9 @@ from .extract_agent_update_data_schema import ExtractAgentUpdateDataSchema
6
6
  from .extract_agent_update_data_schema_zero_value import ExtractAgentUpdateDataSchemaZeroValue
7
7
  from .extract_job_create_batch_data_schema_override import ExtractJobCreateBatchDataSchemaOverride
8
8
  from .extract_job_create_batch_data_schema_override_zero_value import ExtractJobCreateBatchDataSchemaOverrideZeroValue
9
+ from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
10
+ from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
11
+ from .extract_job_create_priority import ExtractJobCreatePriority
9
12
  from .extract_schema_validate_request_data_schema import ExtractSchemaValidateRequestDataSchema
10
13
  from .extract_schema_validate_request_data_schema_zero_value import ExtractSchemaValidateRequestDataSchemaZeroValue
11
14
  from .extract_stateless_request_data_schema import ExtractStatelessRequestDataSchema
@@ -18,6 +21,9 @@ __all__ = [
18
21
  "ExtractAgentUpdateDataSchemaZeroValue",
19
22
  "ExtractJobCreateBatchDataSchemaOverride",
20
23
  "ExtractJobCreateBatchDataSchemaOverrideZeroValue",
24
+ "ExtractJobCreateDataSchemaOverride",
25
+ "ExtractJobCreateDataSchemaOverrideZeroValue",
26
+ "ExtractJobCreatePriority",
21
27
  "ExtractSchemaValidateRequestDataSchema",
22
28
  "ExtractSchemaValidateRequestDataSchemaZeroValue",
23
29
  "ExtractStatelessRequestDataSchema",
@@ -229,7 +229,9 @@ class OrganizationsClient:
229
229
  raise ApiError(status_code=_response.status_code, body=_response.text)
230
230
  raise ApiError(status_code=_response.status_code, body=_response_json)
231
231
 
232
- def update_organization(self, organization_id: str, *, name: str) -> Organization:
232
+ def update_organization(
233
+ self, organization_id: str, *, name: str, feature_flags: typing.Optional[typing.Dict[str, typing.Any]] = OMIT
234
+ ) -> Organization:
233
235
  """
234
236
  Update an existing organization.
235
237
 
@@ -237,6 +239,8 @@ class OrganizationsClient:
237
239
  - organization_id: str.
238
240
 
239
241
  - name: str. A name for the organization.
242
+
243
+ - feature_flags: typing.Optional[typing.Dict[str, typing.Any]].
240
244
  ---
241
245
  from llama_cloud.client import LlamaCloud
242
246
 
@@ -248,10 +252,13 @@ class OrganizationsClient:
248
252
  name="string",
249
253
  )
250
254
  """
255
+ _request: typing.Dict[str, typing.Any] = {"name": name}
256
+ if feature_flags is not OMIT:
257
+ _request["feature_flags"] = feature_flags
251
258
  _response = self._client_wrapper.httpx_client.request(
252
259
  "PUT",
253
260
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}"),
254
- json=jsonable_encoder({"name": name}),
261
+ json=jsonable_encoder(_request),
255
262
  headers=self._client_wrapper.get_headers(),
256
263
  timeout=60,
257
264
  )
@@ -937,7 +944,9 @@ class AsyncOrganizationsClient:
937
944
  raise ApiError(status_code=_response.status_code, body=_response.text)
938
945
  raise ApiError(status_code=_response.status_code, body=_response_json)
939
946
 
940
- async def update_organization(self, organization_id: str, *, name: str) -> Organization:
947
+ async def update_organization(
948
+ self, organization_id: str, *, name: str, feature_flags: typing.Optional[typing.Dict[str, typing.Any]] = OMIT
949
+ ) -> Organization:
941
950
  """
942
951
  Update an existing organization.
943
952
 
@@ -945,6 +954,8 @@ class AsyncOrganizationsClient:
945
954
  - organization_id: str.
946
955
 
947
956
  - name: str. A name for the organization.
957
+
958
+ - feature_flags: typing.Optional[typing.Dict[str, typing.Any]].
948
959
  ---
949
960
  from llama_cloud.client import AsyncLlamaCloud
950
961
 
@@ -956,10 +967,13 @@ class AsyncOrganizationsClient:
956
967
  name="string",
957
968
  )
958
969
  """
970
+ _request: typing.Dict[str, typing.Any] = {"name": name}
971
+ if feature_flags is not OMIT:
972
+ _request["feature_flags"] = feature_flags
959
973
  _response = await self._client_wrapper.httpx_client.request(
960
974
  "PUT",
961
975
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}"),
962
- json=jsonable_encoder({"name": name}),
976
+ json=jsonable_encoder(_request),
963
977
  headers=self._client_wrapper.get_headers(),
964
978
  timeout=60,
965
979
  )
@@ -228,6 +228,11 @@ class ParsingClient:
228
228
  high_res_ocr: bool,
229
229
  html_make_all_elements_visible: bool,
230
230
  layout_aware: bool,
231
+ specialized_chart_parsing_agentic: bool,
232
+ specialized_chart_parsing_plus: bool,
233
+ specialized_chart_parsing_efficient: bool,
234
+ specialized_image_parsing: bool,
235
+ precise_bounding_box: bool,
231
236
  html_remove_fixed_elements: bool,
232
237
  html_remove_navigation_elements: bool,
233
238
  http_proxy: str,
@@ -250,6 +255,8 @@ class ParsingClient:
250
255
  preserve_very_small_text: bool,
251
256
  skip_diagonal_text: bool,
252
257
  spreadsheet_extract_sub_tables: bool,
258
+ spreadsheet_force_formula_computation: bool,
259
+ inline_images_in_markdown: bool,
253
260
  structured_output: bool,
254
261
  structured_output_json_schema: str,
255
262
  structured_output_json_schema_name: str,
@@ -360,6 +367,16 @@ class ParsingClient:
360
367
 
361
368
  - layout_aware: bool.
362
369
 
370
+ - specialized_chart_parsing_agentic: bool.
371
+
372
+ - specialized_chart_parsing_plus: bool.
373
+
374
+ - specialized_chart_parsing_efficient: bool.
375
+
376
+ - specialized_image_parsing: bool.
377
+
378
+ - precise_bounding_box: bool.
379
+
363
380
  - html_remove_fixed_elements: bool.
364
381
 
365
382
  - html_remove_navigation_elements: bool.
@@ -404,6 +421,10 @@ class ParsingClient:
404
421
 
405
422
  - spreadsheet_extract_sub_tables: bool.
406
423
 
424
+ - spreadsheet_force_formula_computation: bool.
425
+
426
+ - inline_images_in_markdown: bool.
427
+
407
428
  - structured_output: bool.
408
429
 
409
430
  - structured_output_json_schema: str.
@@ -526,6 +547,11 @@ class ParsingClient:
526
547
  "high_res_ocr": high_res_ocr,
527
548
  "html_make_all_elements_visible": html_make_all_elements_visible,
528
549
  "layout_aware": layout_aware,
550
+ "specialized_chart_parsing_agentic": specialized_chart_parsing_agentic,
551
+ "specialized_chart_parsing_plus": specialized_chart_parsing_plus,
552
+ "specialized_chart_parsing_efficient": specialized_chart_parsing_efficient,
553
+ "specialized_image_parsing": specialized_image_parsing,
554
+ "precise_bounding_box": precise_bounding_box,
529
555
  "html_remove_fixed_elements": html_remove_fixed_elements,
530
556
  "html_remove_navigation_elements": html_remove_navigation_elements,
531
557
  "http_proxy": http_proxy,
@@ -547,6 +573,8 @@ class ParsingClient:
547
573
  "preserve_very_small_text": preserve_very_small_text,
548
574
  "skip_diagonal_text": skip_diagonal_text,
549
575
  "spreadsheet_extract_sub_tables": spreadsheet_extract_sub_tables,
576
+ "spreadsheet_force_formula_computation": spreadsheet_force_formula_computation,
577
+ "inline_images_in_markdown": inline_images_in_markdown,
550
578
  "structured_output": structured_output,
551
579
  "structured_output_json_schema": structured_output_json_schema,
552
580
  "structured_output_json_schema_name": structured_output_json_schema_name,
@@ -1404,6 +1432,11 @@ class AsyncParsingClient:
1404
1432
  high_res_ocr: bool,
1405
1433
  html_make_all_elements_visible: bool,
1406
1434
  layout_aware: bool,
1435
+ specialized_chart_parsing_agentic: bool,
1436
+ specialized_chart_parsing_plus: bool,
1437
+ specialized_chart_parsing_efficient: bool,
1438
+ specialized_image_parsing: bool,
1439
+ precise_bounding_box: bool,
1407
1440
  html_remove_fixed_elements: bool,
1408
1441
  html_remove_navigation_elements: bool,
1409
1442
  http_proxy: str,
@@ -1426,6 +1459,8 @@ class AsyncParsingClient:
1426
1459
  preserve_very_small_text: bool,
1427
1460
  skip_diagonal_text: bool,
1428
1461
  spreadsheet_extract_sub_tables: bool,
1462
+ spreadsheet_force_formula_computation: bool,
1463
+ inline_images_in_markdown: bool,
1429
1464
  structured_output: bool,
1430
1465
  structured_output_json_schema: str,
1431
1466
  structured_output_json_schema_name: str,
@@ -1536,6 +1571,16 @@ class AsyncParsingClient:
1536
1571
 
1537
1572
  - layout_aware: bool.
1538
1573
 
1574
+ - specialized_chart_parsing_agentic: bool.
1575
+
1576
+ - specialized_chart_parsing_plus: bool.
1577
+
1578
+ - specialized_chart_parsing_efficient: bool.
1579
+
1580
+ - specialized_image_parsing: bool.
1581
+
1582
+ - precise_bounding_box: bool.
1583
+
1539
1584
  - html_remove_fixed_elements: bool.
1540
1585
 
1541
1586
  - html_remove_navigation_elements: bool.
@@ -1580,6 +1625,10 @@ class AsyncParsingClient:
1580
1625
 
1581
1626
  - spreadsheet_extract_sub_tables: bool.
1582
1627
 
1628
+ - spreadsheet_force_formula_computation: bool.
1629
+
1630
+ - inline_images_in_markdown: bool.
1631
+
1583
1632
  - structured_output: bool.
1584
1633
 
1585
1634
  - structured_output_json_schema: str.
@@ -1702,6 +1751,11 @@ class AsyncParsingClient:
1702
1751
  "high_res_ocr": high_res_ocr,
1703
1752
  "html_make_all_elements_visible": html_make_all_elements_visible,
1704
1753
  "layout_aware": layout_aware,
1754
+ "specialized_chart_parsing_agentic": specialized_chart_parsing_agentic,
1755
+ "specialized_chart_parsing_plus": specialized_chart_parsing_plus,
1756
+ "specialized_chart_parsing_efficient": specialized_chart_parsing_efficient,
1757
+ "specialized_image_parsing": specialized_image_parsing,
1758
+ "precise_bounding_box": precise_bounding_box,
1705
1759
  "html_remove_fixed_elements": html_remove_fixed_elements,
1706
1760
  "html_remove_navigation_elements": html_remove_navigation_elements,
1707
1761
  "http_proxy": http_proxy,
@@ -1723,6 +1777,8 @@ class AsyncParsingClient:
1723
1777
  "preserve_very_small_text": preserve_very_small_text,
1724
1778
  "skip_diagonal_text": skip_diagonal_text,
1725
1779
  "spreadsheet_extract_sub_tables": spreadsheet_extract_sub_tables,
1780
+ "spreadsheet_force_formula_computation": spreadsheet_force_formula_computation,
1781
+ "inline_images_in_markdown": inline_images_in_markdown,
1726
1782
  "structured_output": structured_output,
1727
1783
  "structured_output_json_schema": structured_output_json_schema,
1728
1784
  "structured_output_json_schema_name": structured_output_json_schema_name,
@@ -1706,6 +1706,44 @@ class PipelinesClient:
1706
1706
  raise ApiError(status_code=_response.status_code, body=_response.text)
1707
1707
  raise ApiError(status_code=_response.status_code, body=_response_json)
1708
1708
 
1709
+ def sync_pipeline_document(self, document_id: str, pipeline_id: str) -> typing.Any:
1710
+ """
1711
+ Sync a specific document for a pipeline.
1712
+
1713
+ Parameters:
1714
+ - document_id: str.
1715
+
1716
+ - pipeline_id: str.
1717
+ ---
1718
+ from llama_cloud.client import LlamaCloud
1719
+
1720
+ client = LlamaCloud(
1721
+ token="YOUR_TOKEN",
1722
+ )
1723
+ client.pipelines.sync_pipeline_document(
1724
+ document_id="string",
1725
+ pipeline_id="string",
1726
+ )
1727
+ """
1728
+ _response = self._client_wrapper.httpx_client.request(
1729
+ "POST",
1730
+ urllib.parse.urljoin(
1731
+ f"{self._client_wrapper.get_base_url()}/",
1732
+ f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/sync",
1733
+ ),
1734
+ headers=self._client_wrapper.get_headers(),
1735
+ timeout=60,
1736
+ )
1737
+ if 200 <= _response.status_code < 300:
1738
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
1739
+ if _response.status_code == 422:
1740
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1741
+ try:
1742
+ _response_json = _response.json()
1743
+ except JSONDecodeError:
1744
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1745
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1746
+
1709
1747
  def list_pipeline_document_chunks(self, document_id: str, pipeline_id: str) -> typing.List[TextNode]:
1710
1748
  """
1711
1749
  Return a list of chunks for a pipeline document.
@@ -1744,6 +1782,50 @@ class PipelinesClient:
1744
1782
  raise ApiError(status_code=_response.status_code, body=_response.text)
1745
1783
  raise ApiError(status_code=_response.status_code, body=_response_json)
1746
1784
 
1785
+ def force_sync_all_pipeline_documents(
1786
+ self, pipeline_id: str, *, batch_size: typing.Optional[int] = None, only_failed: typing.Optional[bool] = None
1787
+ ) -> None:
1788
+ """
1789
+ Force sync all documents in a pipeline by batching document ingestion jobs.
1790
+
1791
+ - Iterates all document refs for the pipeline
1792
+ - Enqueues document ingestion jobs in batches of `batch_size`
1793
+
1794
+ Parameters:
1795
+ - pipeline_id: str.
1796
+
1797
+ - batch_size: typing.Optional[int].
1798
+
1799
+ - only_failed: typing.Optional[bool]. Only sync retriable documents (failed/cancelled/not-started/stalled-in-progress)
1800
+ ---
1801
+ from llama_cloud.client import LlamaCloud
1802
+
1803
+ client = LlamaCloud(
1804
+ token="YOUR_TOKEN",
1805
+ )
1806
+ client.pipelines.force_sync_all_pipeline_documents(
1807
+ pipeline_id="string",
1808
+ )
1809
+ """
1810
+ _response = self._client_wrapper.httpx_client.request(
1811
+ "POST",
1812
+ urllib.parse.urljoin(
1813
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents/force-sync-all"
1814
+ ),
1815
+ params=remove_none_from_dict({"batch_size": batch_size, "only_failed": only_failed}),
1816
+ headers=self._client_wrapper.get_headers(),
1817
+ timeout=60,
1818
+ )
1819
+ if 200 <= _response.status_code < 300:
1820
+ return
1821
+ if _response.status_code == 422:
1822
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1823
+ try:
1824
+ _response_json = _response.json()
1825
+ except JSONDecodeError:
1826
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1827
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1828
+
1747
1829
 
1748
1830
  class AsyncPipelinesClient:
1749
1831
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -3397,6 +3479,44 @@ class AsyncPipelinesClient:
3397
3479
  raise ApiError(status_code=_response.status_code, body=_response.text)
3398
3480
  raise ApiError(status_code=_response.status_code, body=_response_json)
3399
3481
 
3482
+ async def sync_pipeline_document(self, document_id: str, pipeline_id: str) -> typing.Any:
3483
+ """
3484
+ Sync a specific document for a pipeline.
3485
+
3486
+ Parameters:
3487
+ - document_id: str.
3488
+
3489
+ - pipeline_id: str.
3490
+ ---
3491
+ from llama_cloud.client import AsyncLlamaCloud
3492
+
3493
+ client = AsyncLlamaCloud(
3494
+ token="YOUR_TOKEN",
3495
+ )
3496
+ await client.pipelines.sync_pipeline_document(
3497
+ document_id="string",
3498
+ pipeline_id="string",
3499
+ )
3500
+ """
3501
+ _response = await self._client_wrapper.httpx_client.request(
3502
+ "POST",
3503
+ urllib.parse.urljoin(
3504
+ f"{self._client_wrapper.get_base_url()}/",
3505
+ f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/sync",
3506
+ ),
3507
+ headers=self._client_wrapper.get_headers(),
3508
+ timeout=60,
3509
+ )
3510
+ if 200 <= _response.status_code < 300:
3511
+ return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
3512
+ if _response.status_code == 422:
3513
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
3514
+ try:
3515
+ _response_json = _response.json()
3516
+ except JSONDecodeError:
3517
+ raise ApiError(status_code=_response.status_code, body=_response.text)
3518
+ raise ApiError(status_code=_response.status_code, body=_response_json)
3519
+
3400
3520
  async def list_pipeline_document_chunks(self, document_id: str, pipeline_id: str) -> typing.List[TextNode]:
3401
3521
  """
3402
3522
  Return a list of chunks for a pipeline document.
@@ -3434,3 +3554,47 @@ class AsyncPipelinesClient:
3434
3554
  except JSONDecodeError:
3435
3555
  raise ApiError(status_code=_response.status_code, body=_response.text)
3436
3556
  raise ApiError(status_code=_response.status_code, body=_response_json)
3557
+
3558
+ async def force_sync_all_pipeline_documents(
3559
+ self, pipeline_id: str, *, batch_size: typing.Optional[int] = None, only_failed: typing.Optional[bool] = None
3560
+ ) -> None:
3561
+ """
3562
+ Force sync all documents in a pipeline by batching document ingestion jobs.
3563
+
3564
+ - Iterates all document refs for the pipeline
3565
+ - Enqueues document ingestion jobs in batches of `batch_size`
3566
+
3567
+ Parameters:
3568
+ - pipeline_id: str.
3569
+
3570
+ - batch_size: typing.Optional[int].
3571
+
3572
+ - only_failed: typing.Optional[bool]. Only sync retriable documents (failed/cancelled/not-started/stalled-in-progress)
3573
+ ---
3574
+ from llama_cloud.client import AsyncLlamaCloud
3575
+
3576
+ client = AsyncLlamaCloud(
3577
+ token="YOUR_TOKEN",
3578
+ )
3579
+ await client.pipelines.force_sync_all_pipeline_documents(
3580
+ pipeline_id="string",
3581
+ )
3582
+ """
3583
+ _response = await self._client_wrapper.httpx_client.request(
3584
+ "POST",
3585
+ urllib.parse.urljoin(
3586
+ f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents/force-sync-all"
3587
+ ),
3588
+ params=remove_none_from_dict({"batch_size": batch_size, "only_failed": only_failed}),
3589
+ headers=self._client_wrapper.get_headers(),
3590
+ timeout=60,
3591
+ )
3592
+ if 200 <= _response.status_code < 300:
3593
+ return
3594
+ if _response.status_code == 422:
3595
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
3596
+ try:
3597
+ _response_json = _response.json()
3598
+ except JSONDecodeError:
3599
+ raise ApiError(status_code=_response.status_code, body=_response.text)
3600
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -19,6 +19,9 @@ from .agent_data import AgentData
19
19
  from .agent_deployment_list import AgentDeploymentList
20
20
  from .agent_deployment_summary import AgentDeploymentSummary
21
21
  from .aggregate_group import AggregateGroup
22
+ from .api_key import ApiKey
23
+ from .api_key_query_response import ApiKeyQueryResponse
24
+ from .api_key_type import ApiKeyType
22
25
  from .auto_transform_config import AutoTransformConfig
23
26
  from .azure_open_ai_embedding import AzureOpenAiEmbedding
24
27
  from .azure_open_ai_embedding_config import AzureOpenAiEmbeddingConfig
@@ -39,7 +42,6 @@ from .chat_app import ChatApp
39
42
  from .chat_app_response import ChatAppResponse
40
43
  from .chat_data import ChatData
41
44
  from .chat_message import ChatMessage
42
- from .chunk_mode import ChunkMode
43
45
  from .classification_result import ClassificationResult
44
46
  from .classifier_rule import ClassifierRule
45
47
  from .classify_job import ClassifyJob
@@ -88,6 +90,7 @@ from .data_source_reader_version_metadata import DataSourceReaderVersionMetadata
88
90
  from .data_source_reader_version_metadata_reader_version import DataSourceReaderVersionMetadataReaderVersion
89
91
  from .data_source_update_dispatcher_config import DataSourceUpdateDispatcherConfig
90
92
  from .delete_params import DeleteParams
93
+ from .delete_response import DeleteResponse
91
94
  from .document_chunk_mode import DocumentChunkMode
92
95
  from .document_ingestion_job_params import DocumentIngestionJobParams
93
96
  from .element_segmentation_config import ElementSegmentationConfig
@@ -119,10 +122,6 @@ from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
119
122
  from .extract_config import ExtractConfig
120
123
  from .extract_config_priority import ExtractConfigPriority
121
124
  from .extract_job import ExtractJob
122
- from .extract_job_create import ExtractJobCreate
123
- from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
124
- from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
125
- from .extract_job_create_priority import ExtractJobCreatePriority
126
125
  from .extract_mode import ExtractMode
127
126
  from .extract_models import ExtractModels
128
127
  from .extract_resultset import ExtractResultset
@@ -142,6 +141,7 @@ from .extract_schema_validate_response import ExtractSchemaValidateResponse
142
141
  from .extract_schema_validate_response_data_schema_value import ExtractSchemaValidateResponseDataSchemaValue
143
142
  from .extract_state import ExtractState
144
143
  from .extract_target import ExtractTarget
144
+ from .extracted_table import ExtractedTable
145
145
  from .fail_page_mode import FailPageMode
146
146
  from .failure_handling_config import FailureHandlingConfig
147
147
  from .file import File
@@ -199,7 +199,6 @@ from .license_info_response import LicenseInfoResponse
199
199
  from .llama_extract_feature_availability import LlamaExtractFeatureAvailability
200
200
  from .llama_extract_mode_availability import LlamaExtractModeAvailability
201
201
  from .llama_extract_mode_availability_status import LlamaExtractModeAvailabilityStatus
202
- from .llama_extract_settings import LlamaExtractSettings
203
202
  from .llama_parse_parameters import LlamaParseParameters
204
203
  from .llama_parse_parameters_priority import LlamaParseParametersPriority
205
204
  from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
@@ -216,7 +215,6 @@ from .metadata_filter import MetadataFilter
216
215
  from .metadata_filter_value import MetadataFilterValue
217
216
  from .metadata_filters import MetadataFilters
218
217
  from .metadata_filters_filters_item import MetadataFiltersFiltersItem
219
- from .multimodal_parse_resolution import MultimodalParseResolution
220
218
  from .node_relationship import NodeRelationship
221
219
  from .none_chunking_config import NoneChunkingConfig
222
220
  from .none_segmentation_config import NoneSegmentationConfig
@@ -238,6 +236,7 @@ from .paginated_response_agent_data import PaginatedResponseAgentData
238
236
  from .paginated_response_aggregate_group import PaginatedResponseAggregateGroup
239
237
  from .paginated_response_classify_job import PaginatedResponseClassifyJob
240
238
  from .paginated_response_quota_configuration import PaginatedResponseQuotaConfiguration
239
+ from .paginated_response_spreadsheet_job import PaginatedResponseSpreadsheetJob
241
240
  from .parse_configuration import ParseConfiguration
242
241
  from .parse_configuration_create import ParseConfigurationCreate
243
242
  from .parse_configuration_filter import ParseConfigurationFilter
@@ -319,7 +318,6 @@ from .preset_retrieval_params_search_filters_inference_schema_value import (
319
318
  from .presigned_url import PresignedUrl
320
319
  from .project import Project
321
320
  from .project_create import ProjectCreate
322
- from .prompt_conf import PromptConf
323
321
  from .public_model_name import PublicModelName
324
322
  from .quota_configuration import QuotaConfiguration
325
323
  from .quota_configuration_configuration_type import QuotaConfigurationConfigurationType
@@ -339,14 +337,13 @@ from .retriever_pipeline import RetrieverPipeline
339
337
  from .role import Role
340
338
  from .schema_generation_availability import SchemaGenerationAvailability
341
339
  from .schema_generation_availability_status import SchemaGenerationAvailabilityStatus
342
- from .schema_relax_mode import SchemaRelaxMode
343
340
  from .semantic_chunking_config import SemanticChunkingConfig
344
341
  from .sentence_chunking_config import SentenceChunkingConfig
345
342
  from .sparse_model_config import SparseModelConfig
346
343
  from .sparse_model_type import SparseModelType
344
+ from .spreadsheet_job import SpreadsheetJob
345
+ from .spreadsheet_parsing_config import SpreadsheetParsingConfig
347
346
  from .status_enum import StatusEnum
348
- from .struct_mode import StructMode
349
- from .struct_parse_conf import StructParseConf
350
347
  from .supported_llm_model import SupportedLlmModel
351
348
  from .supported_llm_model_names import SupportedLlmModelNames
352
349
  from .text_node import TextNode
@@ -388,6 +385,9 @@ __all__ = [
388
385
  "AgentDeploymentList",
389
386
  "AgentDeploymentSummary",
390
387
  "AggregateGroup",
388
+ "ApiKey",
389
+ "ApiKeyQueryResponse",
390
+ "ApiKeyType",
391
391
  "AutoTransformConfig",
392
392
  "AzureOpenAiEmbedding",
393
393
  "AzureOpenAiEmbeddingConfig",
@@ -408,7 +408,6 @@ __all__ = [
408
408
  "ChatAppResponse",
409
409
  "ChatData",
410
410
  "ChatMessage",
411
- "ChunkMode",
412
411
  "ClassificationResult",
413
412
  "ClassifierRule",
414
413
  "ClassifyJob",
@@ -457,6 +456,7 @@ __all__ = [
457
456
  "DataSourceReaderVersionMetadataReaderVersion",
458
457
  "DataSourceUpdateDispatcherConfig",
459
458
  "DeleteParams",
459
+ "DeleteResponse",
460
460
  "DocumentChunkMode",
461
461
  "DocumentIngestionJobParams",
462
462
  "ElementSegmentationConfig",
@@ -484,10 +484,6 @@ __all__ = [
484
484
  "ExtractConfig",
485
485
  "ExtractConfigPriority",
486
486
  "ExtractJob",
487
- "ExtractJobCreate",
488
- "ExtractJobCreateDataSchemaOverride",
489
- "ExtractJobCreateDataSchemaOverrideZeroValue",
490
- "ExtractJobCreatePriority",
491
487
  "ExtractMode",
492
488
  "ExtractModels",
493
489
  "ExtractResultset",
@@ -507,6 +503,7 @@ __all__ = [
507
503
  "ExtractSchemaValidateResponseDataSchemaValue",
508
504
  "ExtractState",
509
505
  "ExtractTarget",
506
+ "ExtractedTable",
510
507
  "FailPageMode",
511
508
  "FailureHandlingConfig",
512
509
  "File",
@@ -562,7 +559,6 @@ __all__ = [
562
559
  "LlamaExtractFeatureAvailability",
563
560
  "LlamaExtractModeAvailability",
564
561
  "LlamaExtractModeAvailabilityStatus",
565
- "LlamaExtractSettings",
566
562
  "LlamaParseParameters",
567
563
  "LlamaParseParametersPriority",
568
564
  "LlamaParseSupportedFileExtensions",
@@ -579,7 +575,6 @@ __all__ = [
579
575
  "MetadataFilterValue",
580
576
  "MetadataFilters",
581
577
  "MetadataFiltersFiltersItem",
582
- "MultimodalParseResolution",
583
578
  "NodeRelationship",
584
579
  "NoneChunkingConfig",
585
580
  "NoneSegmentationConfig",
@@ -601,6 +596,7 @@ __all__ = [
601
596
  "PaginatedResponseAggregateGroup",
602
597
  "PaginatedResponseClassifyJob",
603
598
  "PaginatedResponseQuotaConfiguration",
599
+ "PaginatedResponseSpreadsheetJob",
604
600
  "ParseConfiguration",
605
601
  "ParseConfigurationCreate",
606
602
  "ParseConfigurationFilter",
@@ -674,7 +670,6 @@ __all__ = [
674
670
  "PresignedUrl",
675
671
  "Project",
676
672
  "ProjectCreate",
677
- "PromptConf",
678
673
  "PublicModelName",
679
674
  "QuotaConfiguration",
680
675
  "QuotaConfigurationConfigurationType",
@@ -694,14 +689,13 @@ __all__ = [
694
689
  "Role",
695
690
  "SchemaGenerationAvailability",
696
691
  "SchemaGenerationAvailabilityStatus",
697
- "SchemaRelaxMode",
698
692
  "SemanticChunkingConfig",
699
693
  "SentenceChunkingConfig",
700
694
  "SparseModelConfig",
701
695
  "SparseModelType",
696
+ "SpreadsheetJob",
697
+ "SpreadsheetParsingConfig",
702
698
  "StatusEnum",
703
- "StructMode",
704
- "StructParseConf",
705
699
  "SupportedLlmModel",
706
700
  "SupportedLlmModelNames",
707
701
  "TextNode",
@@ -20,7 +20,7 @@ class AgentData(pydantic.BaseModel):
20
20
  """
21
21
 
22
22
  id: typing.Optional[str]
23
- agent_slug: str
23
+ deployment_name: str
24
24
  collection: typing.Optional[str]
25
25
  data: typing.Dict[str, typing.Any]
26
26
  created_at: typing.Optional[dt.datetime]
@@ -17,10 +17,9 @@ except ImportError:
17
17
  class AgentDeploymentSummary(pydantic.BaseModel):
18
18
  id: str = pydantic.Field(description="Deployment ID. Prefixed with dpl-")
19
19
  project_id: str = pydantic.Field(description="Project ID")
20
- agent_slug: str = pydantic.Field(description="readable ID of the deployed app")
20
+ deployment_name: str = pydantic.Field(description="Identifier of the deployed app")
21
21
  thumbnail_url: typing.Optional[str]
22
22
  base_url: str = pydantic.Field(description="Base URL of the deployed app")
23
- display_name: str = pydantic.Field(description="Display name of the deployed app")
24
23
  created_at: dt.datetime = pydantic.Field(description="Timestamp when the app deployment was created")
25
24
  updated_at: dt.datetime = pydantic.Field(description="Timestamp when the app deployment was last updated")
26
25
  api_key_id: typing.Optional[str]
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .api_key_type import ApiKeyType
7
8
 
8
9
  try:
9
10
  import pydantic
@@ -14,15 +15,19 @@ except ImportError:
14
15
  import pydantic # type: ignore
15
16
 
16
17
 
17
- class PromptConf(pydantic.BaseModel):
18
- system_prompt: typing.Optional[str] = pydantic.Field(description="The system prompt to use for the extraction.")
19
- extraction_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for the extraction.")
20
- error_handling_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for error handling.")
21
- reasoning_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for reasoning.")
22
- cite_sources_prompt: typing.Optional[typing.Dict[str, str]] = pydantic.Field(
23
- description="The prompt to use for citing sources."
24
- )
25
- scratchpad_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for scratchpad.")
18
+ class ApiKey(pydantic.BaseModel):
19
+ """
20
+ Schema for an API Key.
21
+ """
22
+
23
+ id: str = pydantic.Field(description="Unique identifier")
24
+ created_at: typing.Optional[dt.datetime]
25
+ updated_at: typing.Optional[dt.datetime]
26
+ name: typing.Optional[str]
27
+ project_id: typing.Optional[str]
28
+ key_type: typing.Optional[ApiKeyType]
29
+ user_id: str
30
+ redacted_api_key: str
26
31
 
27
32
  def json(self, **kwargs: typing.Any) -> str:
28
33
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}