llama-cloud 0.1.41__py3-none-any.whl → 0.1.42__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +6 -0
- llama_cloud/resources/alpha/client.py +14 -30
- llama_cloud/resources/beta/client.py +455 -24
- llama_cloud/resources/organizations/client.py +18 -4
- llama_cloud/resources/parsing/client.py +56 -0
- llama_cloud/resources/pipelines/client.py +164 -0
- llama_cloud/types/__init__.py +6 -0
- llama_cloud/types/agent_data.py +1 -1
- llama_cloud/types/agent_deployment_summary.py +1 -2
- llama_cloud/types/api_key.py +43 -0
- llama_cloud/types/api_key_query_response.py +38 -0
- llama_cloud/types/api_key_type.py +17 -0
- llama_cloud/types/legacy_parse_job_config.py +3 -0
- llama_cloud/types/llama_parse_parameters.py +7 -0
- llama_cloud/types/organization.py +1 -0
- llama_cloud/types/parse_job_config.py +7 -0
- llama_cloud/types/quota_configuration_configuration_type.py +4 -0
- {llama_cloud-0.1.41.dist-info → llama_cloud-0.1.42.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.41.dist-info → llama_cloud-0.1.42.dist-info}/RECORD +21 -18
- {llama_cloud-0.1.41.dist-info → llama_cloud-0.1.42.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.41.dist-info → llama_cloud-0.1.42.dist-info}/WHEEL +0 -0
|
@@ -228,6 +228,11 @@ class ParsingClient:
|
|
|
228
228
|
high_res_ocr: bool,
|
|
229
229
|
html_make_all_elements_visible: bool,
|
|
230
230
|
layout_aware: bool,
|
|
231
|
+
specialized_chart_parsing_agentic: bool,
|
|
232
|
+
specialized_chart_parsing_plus: bool,
|
|
233
|
+
specialized_chart_parsing_efficient: bool,
|
|
234
|
+
specialized_image_parsing: bool,
|
|
235
|
+
precise_bounding_box: bool,
|
|
231
236
|
html_remove_fixed_elements: bool,
|
|
232
237
|
html_remove_navigation_elements: bool,
|
|
233
238
|
http_proxy: str,
|
|
@@ -250,6 +255,8 @@ class ParsingClient:
|
|
|
250
255
|
preserve_very_small_text: bool,
|
|
251
256
|
skip_diagonal_text: bool,
|
|
252
257
|
spreadsheet_extract_sub_tables: bool,
|
|
258
|
+
spreadsheet_force_formula_computation: bool,
|
|
259
|
+
inline_images_in_markdown: bool,
|
|
253
260
|
structured_output: bool,
|
|
254
261
|
structured_output_json_schema: str,
|
|
255
262
|
structured_output_json_schema_name: str,
|
|
@@ -360,6 +367,16 @@ class ParsingClient:
|
|
|
360
367
|
|
|
361
368
|
- layout_aware: bool.
|
|
362
369
|
|
|
370
|
+
- specialized_chart_parsing_agentic: bool.
|
|
371
|
+
|
|
372
|
+
- specialized_chart_parsing_plus: bool.
|
|
373
|
+
|
|
374
|
+
- specialized_chart_parsing_efficient: bool.
|
|
375
|
+
|
|
376
|
+
- specialized_image_parsing: bool.
|
|
377
|
+
|
|
378
|
+
- precise_bounding_box: bool.
|
|
379
|
+
|
|
363
380
|
- html_remove_fixed_elements: bool.
|
|
364
381
|
|
|
365
382
|
- html_remove_navigation_elements: bool.
|
|
@@ -404,6 +421,10 @@ class ParsingClient:
|
|
|
404
421
|
|
|
405
422
|
- spreadsheet_extract_sub_tables: bool.
|
|
406
423
|
|
|
424
|
+
- spreadsheet_force_formula_computation: bool.
|
|
425
|
+
|
|
426
|
+
- inline_images_in_markdown: bool.
|
|
427
|
+
|
|
407
428
|
- structured_output: bool.
|
|
408
429
|
|
|
409
430
|
- structured_output_json_schema: str.
|
|
@@ -526,6 +547,11 @@ class ParsingClient:
|
|
|
526
547
|
"high_res_ocr": high_res_ocr,
|
|
527
548
|
"html_make_all_elements_visible": html_make_all_elements_visible,
|
|
528
549
|
"layout_aware": layout_aware,
|
|
550
|
+
"specialized_chart_parsing_agentic": specialized_chart_parsing_agentic,
|
|
551
|
+
"specialized_chart_parsing_plus": specialized_chart_parsing_plus,
|
|
552
|
+
"specialized_chart_parsing_efficient": specialized_chart_parsing_efficient,
|
|
553
|
+
"specialized_image_parsing": specialized_image_parsing,
|
|
554
|
+
"precise_bounding_box": precise_bounding_box,
|
|
529
555
|
"html_remove_fixed_elements": html_remove_fixed_elements,
|
|
530
556
|
"html_remove_navigation_elements": html_remove_navigation_elements,
|
|
531
557
|
"http_proxy": http_proxy,
|
|
@@ -547,6 +573,8 @@ class ParsingClient:
|
|
|
547
573
|
"preserve_very_small_text": preserve_very_small_text,
|
|
548
574
|
"skip_diagonal_text": skip_diagonal_text,
|
|
549
575
|
"spreadsheet_extract_sub_tables": spreadsheet_extract_sub_tables,
|
|
576
|
+
"spreadsheet_force_formula_computation": spreadsheet_force_formula_computation,
|
|
577
|
+
"inline_images_in_markdown": inline_images_in_markdown,
|
|
550
578
|
"structured_output": structured_output,
|
|
551
579
|
"structured_output_json_schema": structured_output_json_schema,
|
|
552
580
|
"structured_output_json_schema_name": structured_output_json_schema_name,
|
|
@@ -1404,6 +1432,11 @@ class AsyncParsingClient:
|
|
|
1404
1432
|
high_res_ocr: bool,
|
|
1405
1433
|
html_make_all_elements_visible: bool,
|
|
1406
1434
|
layout_aware: bool,
|
|
1435
|
+
specialized_chart_parsing_agentic: bool,
|
|
1436
|
+
specialized_chart_parsing_plus: bool,
|
|
1437
|
+
specialized_chart_parsing_efficient: bool,
|
|
1438
|
+
specialized_image_parsing: bool,
|
|
1439
|
+
precise_bounding_box: bool,
|
|
1407
1440
|
html_remove_fixed_elements: bool,
|
|
1408
1441
|
html_remove_navigation_elements: bool,
|
|
1409
1442
|
http_proxy: str,
|
|
@@ -1426,6 +1459,8 @@ class AsyncParsingClient:
|
|
|
1426
1459
|
preserve_very_small_text: bool,
|
|
1427
1460
|
skip_diagonal_text: bool,
|
|
1428
1461
|
spreadsheet_extract_sub_tables: bool,
|
|
1462
|
+
spreadsheet_force_formula_computation: bool,
|
|
1463
|
+
inline_images_in_markdown: bool,
|
|
1429
1464
|
structured_output: bool,
|
|
1430
1465
|
structured_output_json_schema: str,
|
|
1431
1466
|
structured_output_json_schema_name: str,
|
|
@@ -1536,6 +1571,16 @@ class AsyncParsingClient:
|
|
|
1536
1571
|
|
|
1537
1572
|
- layout_aware: bool.
|
|
1538
1573
|
|
|
1574
|
+
- specialized_chart_parsing_agentic: bool.
|
|
1575
|
+
|
|
1576
|
+
- specialized_chart_parsing_plus: bool.
|
|
1577
|
+
|
|
1578
|
+
- specialized_chart_parsing_efficient: bool.
|
|
1579
|
+
|
|
1580
|
+
- specialized_image_parsing: bool.
|
|
1581
|
+
|
|
1582
|
+
- precise_bounding_box: bool.
|
|
1583
|
+
|
|
1539
1584
|
- html_remove_fixed_elements: bool.
|
|
1540
1585
|
|
|
1541
1586
|
- html_remove_navigation_elements: bool.
|
|
@@ -1580,6 +1625,10 @@ class AsyncParsingClient:
|
|
|
1580
1625
|
|
|
1581
1626
|
- spreadsheet_extract_sub_tables: bool.
|
|
1582
1627
|
|
|
1628
|
+
- spreadsheet_force_formula_computation: bool.
|
|
1629
|
+
|
|
1630
|
+
- inline_images_in_markdown: bool.
|
|
1631
|
+
|
|
1583
1632
|
- structured_output: bool.
|
|
1584
1633
|
|
|
1585
1634
|
- structured_output_json_schema: str.
|
|
@@ -1702,6 +1751,11 @@ class AsyncParsingClient:
|
|
|
1702
1751
|
"high_res_ocr": high_res_ocr,
|
|
1703
1752
|
"html_make_all_elements_visible": html_make_all_elements_visible,
|
|
1704
1753
|
"layout_aware": layout_aware,
|
|
1754
|
+
"specialized_chart_parsing_agentic": specialized_chart_parsing_agentic,
|
|
1755
|
+
"specialized_chart_parsing_plus": specialized_chart_parsing_plus,
|
|
1756
|
+
"specialized_chart_parsing_efficient": specialized_chart_parsing_efficient,
|
|
1757
|
+
"specialized_image_parsing": specialized_image_parsing,
|
|
1758
|
+
"precise_bounding_box": precise_bounding_box,
|
|
1705
1759
|
"html_remove_fixed_elements": html_remove_fixed_elements,
|
|
1706
1760
|
"html_remove_navigation_elements": html_remove_navigation_elements,
|
|
1707
1761
|
"http_proxy": http_proxy,
|
|
@@ -1723,6 +1777,8 @@ class AsyncParsingClient:
|
|
|
1723
1777
|
"preserve_very_small_text": preserve_very_small_text,
|
|
1724
1778
|
"skip_diagonal_text": skip_diagonal_text,
|
|
1725
1779
|
"spreadsheet_extract_sub_tables": spreadsheet_extract_sub_tables,
|
|
1780
|
+
"spreadsheet_force_formula_computation": spreadsheet_force_formula_computation,
|
|
1781
|
+
"inline_images_in_markdown": inline_images_in_markdown,
|
|
1726
1782
|
"structured_output": structured_output,
|
|
1727
1783
|
"structured_output_json_schema": structured_output_json_schema,
|
|
1728
1784
|
"structured_output_json_schema_name": structured_output_json_schema_name,
|
|
@@ -1706,6 +1706,44 @@ class PipelinesClient:
|
|
|
1706
1706
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1707
1707
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1708
1708
|
|
|
1709
|
+
def sync_pipeline_document(self, document_id: str, pipeline_id: str) -> typing.Any:
|
|
1710
|
+
"""
|
|
1711
|
+
Sync a specific document for a pipeline.
|
|
1712
|
+
|
|
1713
|
+
Parameters:
|
|
1714
|
+
- document_id: str.
|
|
1715
|
+
|
|
1716
|
+
- pipeline_id: str.
|
|
1717
|
+
---
|
|
1718
|
+
from llama_cloud.client import LlamaCloud
|
|
1719
|
+
|
|
1720
|
+
client = LlamaCloud(
|
|
1721
|
+
token="YOUR_TOKEN",
|
|
1722
|
+
)
|
|
1723
|
+
client.pipelines.sync_pipeline_document(
|
|
1724
|
+
document_id="string",
|
|
1725
|
+
pipeline_id="string",
|
|
1726
|
+
)
|
|
1727
|
+
"""
|
|
1728
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
1729
|
+
"POST",
|
|
1730
|
+
urllib.parse.urljoin(
|
|
1731
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
1732
|
+
f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/sync",
|
|
1733
|
+
),
|
|
1734
|
+
headers=self._client_wrapper.get_headers(),
|
|
1735
|
+
timeout=60,
|
|
1736
|
+
)
|
|
1737
|
+
if 200 <= _response.status_code < 300:
|
|
1738
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
1739
|
+
if _response.status_code == 422:
|
|
1740
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1741
|
+
try:
|
|
1742
|
+
_response_json = _response.json()
|
|
1743
|
+
except JSONDecodeError:
|
|
1744
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1745
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1746
|
+
|
|
1709
1747
|
def list_pipeline_document_chunks(self, document_id: str, pipeline_id: str) -> typing.List[TextNode]:
|
|
1710
1748
|
"""
|
|
1711
1749
|
Return a list of chunks for a pipeline document.
|
|
@@ -1744,6 +1782,50 @@ class PipelinesClient:
|
|
|
1744
1782
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1745
1783
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1746
1784
|
|
|
1785
|
+
def force_sync_all_pipeline_documents(
|
|
1786
|
+
self, pipeline_id: str, *, batch_size: typing.Optional[int] = None, only_failed: typing.Optional[bool] = None
|
|
1787
|
+
) -> None:
|
|
1788
|
+
"""
|
|
1789
|
+
Force sync all documents in a pipeline by batching document ingestion jobs.
|
|
1790
|
+
|
|
1791
|
+
- Iterates all document refs for the pipeline
|
|
1792
|
+
- Enqueues document ingestion jobs in batches of `batch_size`
|
|
1793
|
+
|
|
1794
|
+
Parameters:
|
|
1795
|
+
- pipeline_id: str.
|
|
1796
|
+
|
|
1797
|
+
- batch_size: typing.Optional[int].
|
|
1798
|
+
|
|
1799
|
+
- only_failed: typing.Optional[bool]. Only sync retriable documents (failed/cancelled/not-started/stalled-in-progress)
|
|
1800
|
+
---
|
|
1801
|
+
from llama_cloud.client import LlamaCloud
|
|
1802
|
+
|
|
1803
|
+
client = LlamaCloud(
|
|
1804
|
+
token="YOUR_TOKEN",
|
|
1805
|
+
)
|
|
1806
|
+
client.pipelines.force_sync_all_pipeline_documents(
|
|
1807
|
+
pipeline_id="string",
|
|
1808
|
+
)
|
|
1809
|
+
"""
|
|
1810
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
1811
|
+
"POST",
|
|
1812
|
+
urllib.parse.urljoin(
|
|
1813
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents/force-sync-all"
|
|
1814
|
+
),
|
|
1815
|
+
params=remove_none_from_dict({"batch_size": batch_size, "only_failed": only_failed}),
|
|
1816
|
+
headers=self._client_wrapper.get_headers(),
|
|
1817
|
+
timeout=60,
|
|
1818
|
+
)
|
|
1819
|
+
if 200 <= _response.status_code < 300:
|
|
1820
|
+
return
|
|
1821
|
+
if _response.status_code == 422:
|
|
1822
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1823
|
+
try:
|
|
1824
|
+
_response_json = _response.json()
|
|
1825
|
+
except JSONDecodeError:
|
|
1826
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1827
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1828
|
+
|
|
1747
1829
|
|
|
1748
1830
|
class AsyncPipelinesClient:
|
|
1749
1831
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
@@ -3397,6 +3479,44 @@ class AsyncPipelinesClient:
|
|
|
3397
3479
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3398
3480
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3399
3481
|
|
|
3482
|
+
async def sync_pipeline_document(self, document_id: str, pipeline_id: str) -> typing.Any:
|
|
3483
|
+
"""
|
|
3484
|
+
Sync a specific document for a pipeline.
|
|
3485
|
+
|
|
3486
|
+
Parameters:
|
|
3487
|
+
- document_id: str.
|
|
3488
|
+
|
|
3489
|
+
- pipeline_id: str.
|
|
3490
|
+
---
|
|
3491
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
3492
|
+
|
|
3493
|
+
client = AsyncLlamaCloud(
|
|
3494
|
+
token="YOUR_TOKEN",
|
|
3495
|
+
)
|
|
3496
|
+
await client.pipelines.sync_pipeline_document(
|
|
3497
|
+
document_id="string",
|
|
3498
|
+
pipeline_id="string",
|
|
3499
|
+
)
|
|
3500
|
+
"""
|
|
3501
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
3502
|
+
"POST",
|
|
3503
|
+
urllib.parse.urljoin(
|
|
3504
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
3505
|
+
f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/sync",
|
|
3506
|
+
),
|
|
3507
|
+
headers=self._client_wrapper.get_headers(),
|
|
3508
|
+
timeout=60,
|
|
3509
|
+
)
|
|
3510
|
+
if 200 <= _response.status_code < 300:
|
|
3511
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
3512
|
+
if _response.status_code == 422:
|
|
3513
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3514
|
+
try:
|
|
3515
|
+
_response_json = _response.json()
|
|
3516
|
+
except JSONDecodeError:
|
|
3517
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3518
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3519
|
+
|
|
3400
3520
|
async def list_pipeline_document_chunks(self, document_id: str, pipeline_id: str) -> typing.List[TextNode]:
|
|
3401
3521
|
"""
|
|
3402
3522
|
Return a list of chunks for a pipeline document.
|
|
@@ -3434,3 +3554,47 @@ class AsyncPipelinesClient:
|
|
|
3434
3554
|
except JSONDecodeError:
|
|
3435
3555
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3436
3556
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3557
|
+
|
|
3558
|
+
async def force_sync_all_pipeline_documents(
|
|
3559
|
+
self, pipeline_id: str, *, batch_size: typing.Optional[int] = None, only_failed: typing.Optional[bool] = None
|
|
3560
|
+
) -> None:
|
|
3561
|
+
"""
|
|
3562
|
+
Force sync all documents in a pipeline by batching document ingestion jobs.
|
|
3563
|
+
|
|
3564
|
+
- Iterates all document refs for the pipeline
|
|
3565
|
+
- Enqueues document ingestion jobs in batches of `batch_size`
|
|
3566
|
+
|
|
3567
|
+
Parameters:
|
|
3568
|
+
- pipeline_id: str.
|
|
3569
|
+
|
|
3570
|
+
- batch_size: typing.Optional[int].
|
|
3571
|
+
|
|
3572
|
+
- only_failed: typing.Optional[bool]. Only sync retriable documents (failed/cancelled/not-started/stalled-in-progress)
|
|
3573
|
+
---
|
|
3574
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
3575
|
+
|
|
3576
|
+
client = AsyncLlamaCloud(
|
|
3577
|
+
token="YOUR_TOKEN",
|
|
3578
|
+
)
|
|
3579
|
+
await client.pipelines.force_sync_all_pipeline_documents(
|
|
3580
|
+
pipeline_id="string",
|
|
3581
|
+
)
|
|
3582
|
+
"""
|
|
3583
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
3584
|
+
"POST",
|
|
3585
|
+
urllib.parse.urljoin(
|
|
3586
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents/force-sync-all"
|
|
3587
|
+
),
|
|
3588
|
+
params=remove_none_from_dict({"batch_size": batch_size, "only_failed": only_failed}),
|
|
3589
|
+
headers=self._client_wrapper.get_headers(),
|
|
3590
|
+
timeout=60,
|
|
3591
|
+
)
|
|
3592
|
+
if 200 <= _response.status_code < 300:
|
|
3593
|
+
return
|
|
3594
|
+
if _response.status_code == 422:
|
|
3595
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3596
|
+
try:
|
|
3597
|
+
_response_json = _response.json()
|
|
3598
|
+
except JSONDecodeError:
|
|
3599
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3600
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
llama_cloud/types/__init__.py
CHANGED
|
@@ -19,6 +19,9 @@ from .agent_data import AgentData
|
|
|
19
19
|
from .agent_deployment_list import AgentDeploymentList
|
|
20
20
|
from .agent_deployment_summary import AgentDeploymentSummary
|
|
21
21
|
from .aggregate_group import AggregateGroup
|
|
22
|
+
from .api_key import ApiKey
|
|
23
|
+
from .api_key_query_response import ApiKeyQueryResponse
|
|
24
|
+
from .api_key_type import ApiKeyType
|
|
22
25
|
from .auto_transform_config import AutoTransformConfig
|
|
23
26
|
from .azure_open_ai_embedding import AzureOpenAiEmbedding
|
|
24
27
|
from .azure_open_ai_embedding_config import AzureOpenAiEmbeddingConfig
|
|
@@ -388,6 +391,9 @@ __all__ = [
|
|
|
388
391
|
"AgentDeploymentList",
|
|
389
392
|
"AgentDeploymentSummary",
|
|
390
393
|
"AggregateGroup",
|
|
394
|
+
"ApiKey",
|
|
395
|
+
"ApiKeyQueryResponse",
|
|
396
|
+
"ApiKeyType",
|
|
391
397
|
"AutoTransformConfig",
|
|
392
398
|
"AzureOpenAiEmbedding",
|
|
393
399
|
"AzureOpenAiEmbeddingConfig",
|
llama_cloud/types/agent_data.py
CHANGED
|
@@ -17,10 +17,9 @@ except ImportError:
|
|
|
17
17
|
class AgentDeploymentSummary(pydantic.BaseModel):
|
|
18
18
|
id: str = pydantic.Field(description="Deployment ID. Prefixed with dpl-")
|
|
19
19
|
project_id: str = pydantic.Field(description="Project ID")
|
|
20
|
-
|
|
20
|
+
deployment_name: str = pydantic.Field(description="Identifier of the deployed app")
|
|
21
21
|
thumbnail_url: typing.Optional[str]
|
|
22
22
|
base_url: str = pydantic.Field(description="Base URL of the deployed app")
|
|
23
|
-
display_name: str = pydantic.Field(description="Display name of the deployed app")
|
|
24
23
|
created_at: dt.datetime = pydantic.Field(description="Timestamp when the app deployment was created")
|
|
25
24
|
updated_at: dt.datetime = pydantic.Field(description="Timestamp when the app deployment was last updated")
|
|
26
25
|
api_key_id: typing.Optional[str]
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .api_key_type import ApiKeyType
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ApiKey(pydantic.BaseModel):
|
|
19
|
+
"""
|
|
20
|
+
Schema for an API Key.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
id: str = pydantic.Field(description="Unique identifier")
|
|
24
|
+
created_at: typing.Optional[dt.datetime]
|
|
25
|
+
updated_at: typing.Optional[dt.datetime]
|
|
26
|
+
name: typing.Optional[str]
|
|
27
|
+
project_id: typing.Optional[str]
|
|
28
|
+
key_type: typing.Optional[ApiKeyType]
|
|
29
|
+
user_id: str
|
|
30
|
+
redacted_api_key: str
|
|
31
|
+
|
|
32
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
33
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
34
|
+
return super().json(**kwargs_with_defaults)
|
|
35
|
+
|
|
36
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
37
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
38
|
+
return super().dict(**kwargs_with_defaults)
|
|
39
|
+
|
|
40
|
+
class Config:
|
|
41
|
+
frozen = True
|
|
42
|
+
smart_union = True
|
|
43
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .api_key import ApiKey
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ApiKeyQueryResponse(pydantic.BaseModel):
|
|
19
|
+
"""
|
|
20
|
+
Response schema for paginated API key queries.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
items: typing.List[ApiKey] = pydantic.Field(description="The list of items.")
|
|
24
|
+
next_page_token: typing.Optional[str]
|
|
25
|
+
total_size: typing.Optional[int]
|
|
26
|
+
|
|
27
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
29
|
+
return super().json(**kwargs_with_defaults)
|
|
30
|
+
|
|
31
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
32
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
33
|
+
return super().dict(**kwargs_with_defaults)
|
|
34
|
+
|
|
35
|
+
class Config:
|
|
36
|
+
frozen = True
|
|
37
|
+
smart_union = True
|
|
38
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ApiKeyType(str, enum.Enum):
|
|
10
|
+
USER = "user"
|
|
11
|
+
AGENT = "agent"
|
|
12
|
+
|
|
13
|
+
def visit(self, user: typing.Callable[[], T_Result], agent: typing.Callable[[], T_Result]) -> T_Result:
|
|
14
|
+
if self is ApiKeyType.USER:
|
|
15
|
+
return user()
|
|
16
|
+
if self is ApiKeyType.AGENT:
|
|
17
|
+
return agent()
|
|
@@ -59,6 +59,9 @@ class LegacyParseJobConfig(pydantic.BaseModel):
|
|
|
59
59
|
alias="doNotUnrollColumns", description="Whether to unroll columns."
|
|
60
60
|
)
|
|
61
61
|
spread_sheet_extract_sub_tables: typing.Optional[bool] = pydantic.Field(alias="spreadSheetExtractSubTables")
|
|
62
|
+
spread_sheet_force_formula_computation: typing.Optional[bool] = pydantic.Field(
|
|
63
|
+
alias="spreadSheetForceFormulaComputation"
|
|
64
|
+
)
|
|
62
65
|
extract_layout: typing.Optional[bool] = pydantic.Field(alias="extractLayout")
|
|
63
66
|
high_res_ocr: typing.Optional[bool] = pydantic.Field(alias="highResOcr")
|
|
64
67
|
html_make_all_elements_visible: typing.Optional[bool] = pydantic.Field(alias="htmlMakeAllElementsVisible")
|
|
@@ -50,6 +50,11 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
50
50
|
high_res_ocr: typing.Optional[bool]
|
|
51
51
|
html_make_all_elements_visible: typing.Optional[bool]
|
|
52
52
|
layout_aware: typing.Optional[bool]
|
|
53
|
+
specialized_chart_parsing_agentic: typing.Optional[bool]
|
|
54
|
+
specialized_chart_parsing_plus: typing.Optional[bool]
|
|
55
|
+
specialized_chart_parsing_efficient: typing.Optional[bool]
|
|
56
|
+
specialized_image_parsing: typing.Optional[bool]
|
|
57
|
+
precise_bounding_box: typing.Optional[bool]
|
|
53
58
|
html_remove_navigation_elements: typing.Optional[bool]
|
|
54
59
|
html_remove_fixed_elements: typing.Optional[bool]
|
|
55
60
|
guess_xlsx_sheet_name: typing.Optional[bool]
|
|
@@ -99,6 +104,8 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
99
104
|
complemental_formatting_instruction: typing.Optional[str]
|
|
100
105
|
content_guideline_instruction: typing.Optional[str]
|
|
101
106
|
spreadsheet_extract_sub_tables: typing.Optional[bool]
|
|
107
|
+
spreadsheet_force_formula_computation: typing.Optional[bool]
|
|
108
|
+
inline_images_in_markdown: typing.Optional[bool]
|
|
102
109
|
job_timeout_in_seconds: typing.Optional[float]
|
|
103
110
|
job_timeout_extra_time_per_page_in_seconds: typing.Optional[float]
|
|
104
111
|
strict_mode_image_extraction: typing.Optional[bool]
|
|
@@ -28,6 +28,7 @@ class Organization(pydantic.BaseModel):
|
|
|
28
28
|
parse_plan_level: typing.Optional[ParsePlanLevel] = pydantic.Field(
|
|
29
29
|
description="Whether the organization is a Parse Premium customer."
|
|
30
30
|
)
|
|
31
|
+
feature_flags: typing.Optional[typing.Dict[str, typing.Any]]
|
|
31
32
|
|
|
32
33
|
def json(self, **kwargs: typing.Any) -> str:
|
|
33
34
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -52,6 +52,11 @@ class ParseJobConfig(pydantic.BaseModel):
|
|
|
52
52
|
high_res_ocr: typing.Optional[bool]
|
|
53
53
|
html_make_all_elements_visible: typing.Optional[bool]
|
|
54
54
|
layout_aware: typing.Optional[bool]
|
|
55
|
+
specialized_chart_parsing_agentic: typing.Optional[bool]
|
|
56
|
+
specialized_chart_parsing_plus: typing.Optional[bool]
|
|
57
|
+
specialized_chart_parsing_efficient: typing.Optional[bool]
|
|
58
|
+
specialized_image_parsing: typing.Optional[bool]
|
|
59
|
+
precise_bounding_box: typing.Optional[bool]
|
|
55
60
|
html_remove_navigation_elements: typing.Optional[bool]
|
|
56
61
|
html_remove_fixed_elements: typing.Optional[bool]
|
|
57
62
|
guess_xlsx_sheet_name: typing.Optional[bool]
|
|
@@ -101,6 +106,8 @@ class ParseJobConfig(pydantic.BaseModel):
|
|
|
101
106
|
complemental_formatting_instruction: typing.Optional[str]
|
|
102
107
|
content_guideline_instruction: typing.Optional[str]
|
|
103
108
|
spreadsheet_extract_sub_tables: typing.Optional[bool]
|
|
109
|
+
spreadsheet_force_formula_computation: typing.Optional[bool]
|
|
110
|
+
inline_images_in_markdown: typing.Optional[bool]
|
|
104
111
|
job_timeout_in_seconds: typing.Optional[float]
|
|
105
112
|
job_timeout_extra_time_per_page_in_seconds: typing.Optional[float]
|
|
106
113
|
strict_mode_image_extraction: typing.Optional[bool]
|
|
@@ -15,6 +15,7 @@ class QuotaConfigurationConfigurationType(str, enum.Enum):
|
|
|
15
15
|
RATE_LIMIT_PARSE_CONCURRENT_DEFAULT = "rate_limit_parse_concurrent_default"
|
|
16
16
|
RATE_LIMIT_CONCURRENT_JOBS_IN_EXECUTION_DEFAULT = "rate_limit_concurrent_jobs_in_execution_default"
|
|
17
17
|
RATE_LIMIT_CONCURRENT_JOBS_IN_EXECUTION_DOC_INGEST = "rate_limit_concurrent_jobs_in_execution_doc_ingest"
|
|
18
|
+
LIMIT_EMBEDDING_CHARACTER = "limit_embedding_character"
|
|
18
19
|
|
|
19
20
|
def visit(
|
|
20
21
|
self,
|
|
@@ -22,6 +23,7 @@ class QuotaConfigurationConfigurationType(str, enum.Enum):
|
|
|
22
23
|
rate_limit_parse_concurrent_default: typing.Callable[[], T_Result],
|
|
23
24
|
rate_limit_concurrent_jobs_in_execution_default: typing.Callable[[], T_Result],
|
|
24
25
|
rate_limit_concurrent_jobs_in_execution_doc_ingest: typing.Callable[[], T_Result],
|
|
26
|
+
limit_embedding_character: typing.Callable[[], T_Result],
|
|
25
27
|
) -> T_Result:
|
|
26
28
|
if self is QuotaConfigurationConfigurationType.RATE_LIMIT_PARSE_CONCURRENT_PREMIUM:
|
|
27
29
|
return rate_limit_parse_concurrent_premium()
|
|
@@ -31,3 +33,5 @@ class QuotaConfigurationConfigurationType(str, enum.Enum):
|
|
|
31
33
|
return rate_limit_concurrent_jobs_in_execution_default()
|
|
32
34
|
if self is QuotaConfigurationConfigurationType.RATE_LIMIT_CONCURRENT_JOBS_IN_EXECUTION_DOC_INGEST:
|
|
33
35
|
return rate_limit_concurrent_jobs_in_execution_doc_ingest()
|
|
36
|
+
if self is QuotaConfigurationConfigurationType.LIMIT_EMBEDDING_CHARACTER:
|
|
37
|
+
return limit_embedding_character()
|