llama-cloud 0.1.42__py3-none-any.whl → 0.1.43__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +13 -19
- llama_cloud/resources/__init__.py +6 -0
- llama_cloud/resources/beta/client.py +555 -0
- llama_cloud/resources/jobs/client.py +0 -8
- llama_cloud/resources/llama_extract/__init__.py +6 -0
- llama_cloud/resources/llama_extract/client.py +825 -941
- llama_cloud/resources/llama_extract/types/__init__.py +6 -0
- llama_cloud/types/__init__.py +10 -22
- llama_cloud/types/{prompt_conf.py → delete_response.py} +6 -9
- llama_cloud/types/extract_config.py +1 -0
- llama_cloud/types/extract_models.py +4 -0
- llama_cloud/types/{extract_job_create.py → extracted_table.py} +8 -14
- llama_cloud/types/paginated_response_spreadsheet_job.py +34 -0
- llama_cloud/types/public_model_name.py +4 -0
- llama_cloud/types/spreadsheet_job.py +50 -0
- llama_cloud/types/spreadsheet_parsing_config.py +35 -0
- {llama_cloud-0.1.42.dist-info → llama_cloud-0.1.43.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.42.dist-info → llama_cloud-0.1.43.dist-info}/RECORD +23 -26
- llama_cloud/types/chunk_mode.py +0 -29
- llama_cloud/types/llama_extract_settings.py +0 -67
- llama_cloud/types/multimodal_parse_resolution.py +0 -17
- llama_cloud/types/schema_relax_mode.py +0 -25
- llama_cloud/types/struct_mode.py +0 -33
- llama_cloud/types/struct_parse_conf.py +0 -63
- /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_data_schema_override.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_data_schema_override_zero_value.py +0 -0
- /llama_cloud/{types → resources/llama_extract/types}/extract_job_create_priority.py +0 -0
- {llama_cloud-0.1.42.dist-info → llama_cloud-0.1.43.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.42.dist-info → llama_cloud-0.1.43.dist-info}/WHEEL +0 -0
|
@@ -18,6 +18,7 @@ from ...types.api_key_type import ApiKeyType
|
|
|
18
18
|
from ...types.batch import Batch
|
|
19
19
|
from ...types.batch_paginated_list import BatchPaginatedList
|
|
20
20
|
from ...types.batch_public_output import BatchPublicOutput
|
|
21
|
+
from ...types.delete_response import DeleteResponse
|
|
21
22
|
from ...types.file import File
|
|
22
23
|
from ...types.file_create import FileCreate
|
|
23
24
|
from ...types.file_filter import FileFilter
|
|
@@ -28,10 +29,14 @@ from ...types.llama_parse_parameters import LlamaParseParameters
|
|
|
28
29
|
from ...types.paginated_response_agent_data import PaginatedResponseAgentData
|
|
29
30
|
from ...types.paginated_response_aggregate_group import PaginatedResponseAggregateGroup
|
|
30
31
|
from ...types.paginated_response_quota_configuration import PaginatedResponseQuotaConfiguration
|
|
32
|
+
from ...types.paginated_response_spreadsheet_job import PaginatedResponseSpreadsheetJob
|
|
31
33
|
from ...types.parse_configuration import ParseConfiguration
|
|
32
34
|
from ...types.parse_configuration_create import ParseConfigurationCreate
|
|
33
35
|
from ...types.parse_configuration_filter import ParseConfigurationFilter
|
|
34
36
|
from ...types.parse_configuration_query_response import ParseConfigurationQueryResponse
|
|
37
|
+
from ...types.presigned_url import PresignedUrl
|
|
38
|
+
from ...types.spreadsheet_job import SpreadsheetJob
|
|
39
|
+
from ...types.spreadsheet_parsing_config import SpreadsheetParsingConfig
|
|
35
40
|
|
|
36
41
|
try:
|
|
37
42
|
import pydantic
|
|
@@ -786,6 +791,61 @@ class BetaClient:
|
|
|
786
791
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
787
792
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
788
793
|
|
|
794
|
+
def delete_agent_data_by_query_api_v_1_beta_agent_data_delete_post(
|
|
795
|
+
self,
|
|
796
|
+
*,
|
|
797
|
+
project_id: typing.Optional[str] = None,
|
|
798
|
+
organization_id: typing.Optional[str] = None,
|
|
799
|
+
deployment_name: str,
|
|
800
|
+
collection: typing.Optional[str] = OMIT,
|
|
801
|
+
filter: typing.Optional[typing.Dict[str, typing.Optional[FilterOperation]]] = OMIT,
|
|
802
|
+
) -> DeleteResponse:
|
|
803
|
+
"""
|
|
804
|
+
Bulk delete agent data by query (deployment_name, collection, optional filters).
|
|
805
|
+
|
|
806
|
+
Parameters:
|
|
807
|
+
- project_id: typing.Optional[str].
|
|
808
|
+
|
|
809
|
+
- organization_id: typing.Optional[str].
|
|
810
|
+
|
|
811
|
+
- deployment_name: str. The agent deployment's name to delete data for
|
|
812
|
+
|
|
813
|
+
- collection: typing.Optional[str]. The logical agent data collection to delete from
|
|
814
|
+
|
|
815
|
+
- filter: typing.Optional[typing.Dict[str, typing.Optional[FilterOperation]]].
|
|
816
|
+
---
|
|
817
|
+
from llama_cloud.client import LlamaCloud
|
|
818
|
+
|
|
819
|
+
client = LlamaCloud(
|
|
820
|
+
token="YOUR_TOKEN",
|
|
821
|
+
)
|
|
822
|
+
client.beta.delete_agent_data_by_query_api_v_1_beta_agent_data_delete_post(
|
|
823
|
+
deployment_name="string",
|
|
824
|
+
)
|
|
825
|
+
"""
|
|
826
|
+
_request: typing.Dict[str, typing.Any] = {"deployment_name": deployment_name}
|
|
827
|
+
if collection is not OMIT:
|
|
828
|
+
_request["collection"] = collection
|
|
829
|
+
if filter is not OMIT:
|
|
830
|
+
_request["filter"] = filter
|
|
831
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
832
|
+
"POST",
|
|
833
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/agent-data/:delete"),
|
|
834
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
835
|
+
json=jsonable_encoder(_request),
|
|
836
|
+
headers=self._client_wrapper.get_headers(),
|
|
837
|
+
timeout=60,
|
|
838
|
+
)
|
|
839
|
+
if 200 <= _response.status_code < 300:
|
|
840
|
+
return pydantic.parse_obj_as(DeleteResponse, _response.json()) # type: ignore
|
|
841
|
+
if _response.status_code == 422:
|
|
842
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
843
|
+
try:
|
|
844
|
+
_response_json = _response.json()
|
|
845
|
+
except JSONDecodeError:
|
|
846
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
847
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
848
|
+
|
|
789
849
|
def list_quota_configurations(
|
|
790
850
|
self,
|
|
791
851
|
*,
|
|
@@ -1569,6 +1629,226 @@ class BetaClient:
|
|
|
1569
1629
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1570
1630
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1571
1631
|
|
|
1632
|
+
def list_spreadsheet_jobs(
|
|
1633
|
+
self,
|
|
1634
|
+
*,
|
|
1635
|
+
include_results: typing.Optional[bool] = None,
|
|
1636
|
+
project_id: typing.Optional[str] = None,
|
|
1637
|
+
organization_id: typing.Optional[str] = None,
|
|
1638
|
+
page_size: typing.Optional[int] = None,
|
|
1639
|
+
page_token: typing.Optional[str] = None,
|
|
1640
|
+
) -> PaginatedResponseSpreadsheetJob:
|
|
1641
|
+
"""
|
|
1642
|
+
List spreadsheet parsing jobs.
|
|
1643
|
+
Experimental: This endpoint is not yet ready for production use and is subject to change at any time.
|
|
1644
|
+
|
|
1645
|
+
Parameters:
|
|
1646
|
+
- include_results: typing.Optional[bool].
|
|
1647
|
+
|
|
1648
|
+
- project_id: typing.Optional[str].
|
|
1649
|
+
|
|
1650
|
+
- organization_id: typing.Optional[str].
|
|
1651
|
+
|
|
1652
|
+
- page_size: typing.Optional[int].
|
|
1653
|
+
|
|
1654
|
+
- page_token: typing.Optional[str].
|
|
1655
|
+
---
|
|
1656
|
+
from llama_cloud.client import LlamaCloud
|
|
1657
|
+
|
|
1658
|
+
client = LlamaCloud(
|
|
1659
|
+
token="YOUR_TOKEN",
|
|
1660
|
+
)
|
|
1661
|
+
client.beta.list_spreadsheet_jobs()
|
|
1662
|
+
"""
|
|
1663
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
1664
|
+
"GET",
|
|
1665
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/spreadsheet/jobs"),
|
|
1666
|
+
params=remove_none_from_dict(
|
|
1667
|
+
{
|
|
1668
|
+
"include_results": include_results,
|
|
1669
|
+
"project_id": project_id,
|
|
1670
|
+
"organization_id": organization_id,
|
|
1671
|
+
"page_size": page_size,
|
|
1672
|
+
"page_token": page_token,
|
|
1673
|
+
}
|
|
1674
|
+
),
|
|
1675
|
+
headers=self._client_wrapper.get_headers(),
|
|
1676
|
+
timeout=60,
|
|
1677
|
+
)
|
|
1678
|
+
if 200 <= _response.status_code < 300:
|
|
1679
|
+
return pydantic.parse_obj_as(PaginatedResponseSpreadsheetJob, _response.json()) # type: ignore
|
|
1680
|
+
if _response.status_code == 422:
|
|
1681
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1682
|
+
try:
|
|
1683
|
+
_response_json = _response.json()
|
|
1684
|
+
except JSONDecodeError:
|
|
1685
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1686
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1687
|
+
|
|
1688
|
+
def create_spreadsheet_job(
|
|
1689
|
+
self,
|
|
1690
|
+
*,
|
|
1691
|
+
project_id: typing.Optional[str] = None,
|
|
1692
|
+
organization_id: typing.Optional[str] = None,
|
|
1693
|
+
file_id: str,
|
|
1694
|
+
config: typing.Optional[SpreadsheetParsingConfig] = OMIT,
|
|
1695
|
+
) -> SpreadsheetJob:
|
|
1696
|
+
"""
|
|
1697
|
+
Create a spreadsheet parsing job.
|
|
1698
|
+
Experimental: This endpoint is not yet ready for production use and is subject to change at any time.
|
|
1699
|
+
|
|
1700
|
+
Parameters:
|
|
1701
|
+
- project_id: typing.Optional[str].
|
|
1702
|
+
|
|
1703
|
+
- organization_id: typing.Optional[str].
|
|
1704
|
+
|
|
1705
|
+
- file_id: str. The ID of the file to parse
|
|
1706
|
+
|
|
1707
|
+
- config: typing.Optional[SpreadsheetParsingConfig]. Configuration for the parsing job
|
|
1708
|
+
---
|
|
1709
|
+
from llama_cloud import SpreadsheetParsingConfig
|
|
1710
|
+
from llama_cloud.client import LlamaCloud
|
|
1711
|
+
|
|
1712
|
+
client = LlamaCloud(
|
|
1713
|
+
token="YOUR_TOKEN",
|
|
1714
|
+
)
|
|
1715
|
+
client.beta.create_spreadsheet_job(
|
|
1716
|
+
file_id="string",
|
|
1717
|
+
config=SpreadsheetParsingConfig(),
|
|
1718
|
+
)
|
|
1719
|
+
"""
|
|
1720
|
+
_request: typing.Dict[str, typing.Any] = {"file_id": file_id}
|
|
1721
|
+
if config is not OMIT:
|
|
1722
|
+
_request["config"] = config
|
|
1723
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
1724
|
+
"POST",
|
|
1725
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/spreadsheet/jobs"),
|
|
1726
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1727
|
+
json=jsonable_encoder(_request),
|
|
1728
|
+
headers=self._client_wrapper.get_headers(),
|
|
1729
|
+
timeout=60,
|
|
1730
|
+
)
|
|
1731
|
+
if 200 <= _response.status_code < 300:
|
|
1732
|
+
return pydantic.parse_obj_as(SpreadsheetJob, _response.json()) # type: ignore
|
|
1733
|
+
if _response.status_code == 422:
|
|
1734
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1735
|
+
try:
|
|
1736
|
+
_response_json = _response.json()
|
|
1737
|
+
except JSONDecodeError:
|
|
1738
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1739
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1740
|
+
|
|
1741
|
+
def get_spreadsheet_job(
|
|
1742
|
+
self,
|
|
1743
|
+
spreadsheet_job_id: str,
|
|
1744
|
+
*,
|
|
1745
|
+
include_results: typing.Optional[bool] = None,
|
|
1746
|
+
project_id: typing.Optional[str] = None,
|
|
1747
|
+
organization_id: typing.Optional[str] = None,
|
|
1748
|
+
) -> SpreadsheetJob:
|
|
1749
|
+
"""
|
|
1750
|
+
Get a spreadsheet parsing job.
|
|
1751
|
+
|
|
1752
|
+
When include_results=True (default), the response will include extracted tables and results
|
|
1753
|
+
if the job is complete, eliminating the need for a separate /results call.
|
|
1754
|
+
|
|
1755
|
+
Experimental: This endpoint is not yet ready for production use and is subject to change at any time.
|
|
1756
|
+
|
|
1757
|
+
Parameters:
|
|
1758
|
+
- spreadsheet_job_id: str.
|
|
1759
|
+
|
|
1760
|
+
- include_results: typing.Optional[bool].
|
|
1761
|
+
|
|
1762
|
+
- project_id: typing.Optional[str].
|
|
1763
|
+
|
|
1764
|
+
- organization_id: typing.Optional[str].
|
|
1765
|
+
---
|
|
1766
|
+
from llama_cloud.client import LlamaCloud
|
|
1767
|
+
|
|
1768
|
+
client = LlamaCloud(
|
|
1769
|
+
token="YOUR_TOKEN",
|
|
1770
|
+
)
|
|
1771
|
+
client.beta.get_spreadsheet_job(
|
|
1772
|
+
spreadsheet_job_id="string",
|
|
1773
|
+
)
|
|
1774
|
+
"""
|
|
1775
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
1776
|
+
"GET",
|
|
1777
|
+
urllib.parse.urljoin(
|
|
1778
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/spreadsheet/jobs/{spreadsheet_job_id}"
|
|
1779
|
+
),
|
|
1780
|
+
params=remove_none_from_dict(
|
|
1781
|
+
{"include_results": include_results, "project_id": project_id, "organization_id": organization_id}
|
|
1782
|
+
),
|
|
1783
|
+
headers=self._client_wrapper.get_headers(),
|
|
1784
|
+
timeout=60,
|
|
1785
|
+
)
|
|
1786
|
+
if 200 <= _response.status_code < 300:
|
|
1787
|
+
return pydantic.parse_obj_as(SpreadsheetJob, _response.json()) # type: ignore
|
|
1788
|
+
if _response.status_code == 422:
|
|
1789
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1790
|
+
try:
|
|
1791
|
+
_response_json = _response.json()
|
|
1792
|
+
except JSONDecodeError:
|
|
1793
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1794
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1795
|
+
|
|
1796
|
+
def get_table_download_presigned_url(
|
|
1797
|
+
self,
|
|
1798
|
+
spreadsheet_job_id: str,
|
|
1799
|
+
table_id: int,
|
|
1800
|
+
*,
|
|
1801
|
+
expires_at_seconds: typing.Optional[int] = None,
|
|
1802
|
+
project_id: typing.Optional[str] = None,
|
|
1803
|
+
organization_id: typing.Optional[str] = None,
|
|
1804
|
+
) -> PresignedUrl:
|
|
1805
|
+
"""
|
|
1806
|
+
Generate a presigned URL to download a specific extracted table.
|
|
1807
|
+
Experimental: This endpoint is not yet ready for production use and is subject to change at any time.
|
|
1808
|
+
|
|
1809
|
+
Parameters:
|
|
1810
|
+
- spreadsheet_job_id: str.
|
|
1811
|
+
|
|
1812
|
+
- table_id: int.
|
|
1813
|
+
|
|
1814
|
+
- expires_at_seconds: typing.Optional[int].
|
|
1815
|
+
|
|
1816
|
+
- project_id: typing.Optional[str].
|
|
1817
|
+
|
|
1818
|
+
- organization_id: typing.Optional[str].
|
|
1819
|
+
---
|
|
1820
|
+
from llama_cloud.client import LlamaCloud
|
|
1821
|
+
|
|
1822
|
+
client = LlamaCloud(
|
|
1823
|
+
token="YOUR_TOKEN",
|
|
1824
|
+
)
|
|
1825
|
+
client.beta.get_table_download_presigned_url(
|
|
1826
|
+
spreadsheet_job_id="string",
|
|
1827
|
+
table_id=1,
|
|
1828
|
+
)
|
|
1829
|
+
"""
|
|
1830
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
1831
|
+
"GET",
|
|
1832
|
+
urllib.parse.urljoin(
|
|
1833
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
1834
|
+
f"api/v1/beta/spreadsheet/jobs/{spreadsheet_job_id}/tables/{table_id}/result",
|
|
1835
|
+
),
|
|
1836
|
+
params=remove_none_from_dict(
|
|
1837
|
+
{"expires_at_seconds": expires_at_seconds, "project_id": project_id, "organization_id": organization_id}
|
|
1838
|
+
),
|
|
1839
|
+
headers=self._client_wrapper.get_headers(),
|
|
1840
|
+
timeout=60,
|
|
1841
|
+
)
|
|
1842
|
+
if 200 <= _response.status_code < 300:
|
|
1843
|
+
return pydantic.parse_obj_as(PresignedUrl, _response.json()) # type: ignore
|
|
1844
|
+
if _response.status_code == 422:
|
|
1845
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1846
|
+
try:
|
|
1847
|
+
_response_json = _response.json()
|
|
1848
|
+
except JSONDecodeError:
|
|
1849
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1850
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1851
|
+
|
|
1572
1852
|
|
|
1573
1853
|
class AsyncBetaClient:
|
|
1574
1854
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
@@ -2311,6 +2591,61 @@ class AsyncBetaClient:
|
|
|
2311
2591
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2312
2592
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2313
2593
|
|
|
2594
|
+
async def delete_agent_data_by_query_api_v_1_beta_agent_data_delete_post(
|
|
2595
|
+
self,
|
|
2596
|
+
*,
|
|
2597
|
+
project_id: typing.Optional[str] = None,
|
|
2598
|
+
organization_id: typing.Optional[str] = None,
|
|
2599
|
+
deployment_name: str,
|
|
2600
|
+
collection: typing.Optional[str] = OMIT,
|
|
2601
|
+
filter: typing.Optional[typing.Dict[str, typing.Optional[FilterOperation]]] = OMIT,
|
|
2602
|
+
) -> DeleteResponse:
|
|
2603
|
+
"""
|
|
2604
|
+
Bulk delete agent data by query (deployment_name, collection, optional filters).
|
|
2605
|
+
|
|
2606
|
+
Parameters:
|
|
2607
|
+
- project_id: typing.Optional[str].
|
|
2608
|
+
|
|
2609
|
+
- organization_id: typing.Optional[str].
|
|
2610
|
+
|
|
2611
|
+
- deployment_name: str. The agent deployment's name to delete data for
|
|
2612
|
+
|
|
2613
|
+
- collection: typing.Optional[str]. The logical agent data collection to delete from
|
|
2614
|
+
|
|
2615
|
+
- filter: typing.Optional[typing.Dict[str, typing.Optional[FilterOperation]]].
|
|
2616
|
+
---
|
|
2617
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2618
|
+
|
|
2619
|
+
client = AsyncLlamaCloud(
|
|
2620
|
+
token="YOUR_TOKEN",
|
|
2621
|
+
)
|
|
2622
|
+
await client.beta.delete_agent_data_by_query_api_v_1_beta_agent_data_delete_post(
|
|
2623
|
+
deployment_name="string",
|
|
2624
|
+
)
|
|
2625
|
+
"""
|
|
2626
|
+
_request: typing.Dict[str, typing.Any] = {"deployment_name": deployment_name}
|
|
2627
|
+
if collection is not OMIT:
|
|
2628
|
+
_request["collection"] = collection
|
|
2629
|
+
if filter is not OMIT:
|
|
2630
|
+
_request["filter"] = filter
|
|
2631
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
2632
|
+
"POST",
|
|
2633
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/agent-data/:delete"),
|
|
2634
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
2635
|
+
json=jsonable_encoder(_request),
|
|
2636
|
+
headers=self._client_wrapper.get_headers(),
|
|
2637
|
+
timeout=60,
|
|
2638
|
+
)
|
|
2639
|
+
if 200 <= _response.status_code < 300:
|
|
2640
|
+
return pydantic.parse_obj_as(DeleteResponse, _response.json()) # type: ignore
|
|
2641
|
+
if _response.status_code == 422:
|
|
2642
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2643
|
+
try:
|
|
2644
|
+
_response_json = _response.json()
|
|
2645
|
+
except JSONDecodeError:
|
|
2646
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2647
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2648
|
+
|
|
2314
2649
|
async def list_quota_configurations(
|
|
2315
2650
|
self,
|
|
2316
2651
|
*,
|
|
@@ -3093,3 +3428,223 @@ class AsyncBetaClient:
|
|
|
3093
3428
|
except JSONDecodeError:
|
|
3094
3429
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3095
3430
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3431
|
+
|
|
3432
|
+
async def list_spreadsheet_jobs(
|
|
3433
|
+
self,
|
|
3434
|
+
*,
|
|
3435
|
+
include_results: typing.Optional[bool] = None,
|
|
3436
|
+
project_id: typing.Optional[str] = None,
|
|
3437
|
+
organization_id: typing.Optional[str] = None,
|
|
3438
|
+
page_size: typing.Optional[int] = None,
|
|
3439
|
+
page_token: typing.Optional[str] = None,
|
|
3440
|
+
) -> PaginatedResponseSpreadsheetJob:
|
|
3441
|
+
"""
|
|
3442
|
+
List spreadsheet parsing jobs.
|
|
3443
|
+
Experimental: This endpoint is not yet ready for production use and is subject to change at any time.
|
|
3444
|
+
|
|
3445
|
+
Parameters:
|
|
3446
|
+
- include_results: typing.Optional[bool].
|
|
3447
|
+
|
|
3448
|
+
- project_id: typing.Optional[str].
|
|
3449
|
+
|
|
3450
|
+
- organization_id: typing.Optional[str].
|
|
3451
|
+
|
|
3452
|
+
- page_size: typing.Optional[int].
|
|
3453
|
+
|
|
3454
|
+
- page_token: typing.Optional[str].
|
|
3455
|
+
---
|
|
3456
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
3457
|
+
|
|
3458
|
+
client = AsyncLlamaCloud(
|
|
3459
|
+
token="YOUR_TOKEN",
|
|
3460
|
+
)
|
|
3461
|
+
await client.beta.list_spreadsheet_jobs()
|
|
3462
|
+
"""
|
|
3463
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
3464
|
+
"GET",
|
|
3465
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/spreadsheet/jobs"),
|
|
3466
|
+
params=remove_none_from_dict(
|
|
3467
|
+
{
|
|
3468
|
+
"include_results": include_results,
|
|
3469
|
+
"project_id": project_id,
|
|
3470
|
+
"organization_id": organization_id,
|
|
3471
|
+
"page_size": page_size,
|
|
3472
|
+
"page_token": page_token,
|
|
3473
|
+
}
|
|
3474
|
+
),
|
|
3475
|
+
headers=self._client_wrapper.get_headers(),
|
|
3476
|
+
timeout=60,
|
|
3477
|
+
)
|
|
3478
|
+
if 200 <= _response.status_code < 300:
|
|
3479
|
+
return pydantic.parse_obj_as(PaginatedResponseSpreadsheetJob, _response.json()) # type: ignore
|
|
3480
|
+
if _response.status_code == 422:
|
|
3481
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3482
|
+
try:
|
|
3483
|
+
_response_json = _response.json()
|
|
3484
|
+
except JSONDecodeError:
|
|
3485
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3486
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3487
|
+
|
|
3488
|
+
async def create_spreadsheet_job(
|
|
3489
|
+
self,
|
|
3490
|
+
*,
|
|
3491
|
+
project_id: typing.Optional[str] = None,
|
|
3492
|
+
organization_id: typing.Optional[str] = None,
|
|
3493
|
+
file_id: str,
|
|
3494
|
+
config: typing.Optional[SpreadsheetParsingConfig] = OMIT,
|
|
3495
|
+
) -> SpreadsheetJob:
|
|
3496
|
+
"""
|
|
3497
|
+
Create a spreadsheet parsing job.
|
|
3498
|
+
Experimental: This endpoint is not yet ready for production use and is subject to change at any time.
|
|
3499
|
+
|
|
3500
|
+
Parameters:
|
|
3501
|
+
- project_id: typing.Optional[str].
|
|
3502
|
+
|
|
3503
|
+
- organization_id: typing.Optional[str].
|
|
3504
|
+
|
|
3505
|
+
- file_id: str. The ID of the file to parse
|
|
3506
|
+
|
|
3507
|
+
- config: typing.Optional[SpreadsheetParsingConfig]. Configuration for the parsing job
|
|
3508
|
+
---
|
|
3509
|
+
from llama_cloud import SpreadsheetParsingConfig
|
|
3510
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
3511
|
+
|
|
3512
|
+
client = AsyncLlamaCloud(
|
|
3513
|
+
token="YOUR_TOKEN",
|
|
3514
|
+
)
|
|
3515
|
+
await client.beta.create_spreadsheet_job(
|
|
3516
|
+
file_id="string",
|
|
3517
|
+
config=SpreadsheetParsingConfig(),
|
|
3518
|
+
)
|
|
3519
|
+
"""
|
|
3520
|
+
_request: typing.Dict[str, typing.Any] = {"file_id": file_id}
|
|
3521
|
+
if config is not OMIT:
|
|
3522
|
+
_request["config"] = config
|
|
3523
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
3524
|
+
"POST",
|
|
3525
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/spreadsheet/jobs"),
|
|
3526
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
3527
|
+
json=jsonable_encoder(_request),
|
|
3528
|
+
headers=self._client_wrapper.get_headers(),
|
|
3529
|
+
timeout=60,
|
|
3530
|
+
)
|
|
3531
|
+
if 200 <= _response.status_code < 300:
|
|
3532
|
+
return pydantic.parse_obj_as(SpreadsheetJob, _response.json()) # type: ignore
|
|
3533
|
+
if _response.status_code == 422:
|
|
3534
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3535
|
+
try:
|
|
3536
|
+
_response_json = _response.json()
|
|
3537
|
+
except JSONDecodeError:
|
|
3538
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3539
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3540
|
+
|
|
3541
|
+
async def get_spreadsheet_job(
|
|
3542
|
+
self,
|
|
3543
|
+
spreadsheet_job_id: str,
|
|
3544
|
+
*,
|
|
3545
|
+
include_results: typing.Optional[bool] = None,
|
|
3546
|
+
project_id: typing.Optional[str] = None,
|
|
3547
|
+
organization_id: typing.Optional[str] = None,
|
|
3548
|
+
) -> SpreadsheetJob:
|
|
3549
|
+
"""
|
|
3550
|
+
Get a spreadsheet parsing job.
|
|
3551
|
+
|
|
3552
|
+
When include_results=True (default), the response will include extracted tables and results
|
|
3553
|
+
if the job is complete, eliminating the need for a separate /results call.
|
|
3554
|
+
|
|
3555
|
+
Experimental: This endpoint is not yet ready for production use and is subject to change at any time.
|
|
3556
|
+
|
|
3557
|
+
Parameters:
|
|
3558
|
+
- spreadsheet_job_id: str.
|
|
3559
|
+
|
|
3560
|
+
- include_results: typing.Optional[bool].
|
|
3561
|
+
|
|
3562
|
+
- project_id: typing.Optional[str].
|
|
3563
|
+
|
|
3564
|
+
- organization_id: typing.Optional[str].
|
|
3565
|
+
---
|
|
3566
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
3567
|
+
|
|
3568
|
+
client = AsyncLlamaCloud(
|
|
3569
|
+
token="YOUR_TOKEN",
|
|
3570
|
+
)
|
|
3571
|
+
await client.beta.get_spreadsheet_job(
|
|
3572
|
+
spreadsheet_job_id="string",
|
|
3573
|
+
)
|
|
3574
|
+
"""
|
|
3575
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
3576
|
+
"GET",
|
|
3577
|
+
urllib.parse.urljoin(
|
|
3578
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/spreadsheet/jobs/{spreadsheet_job_id}"
|
|
3579
|
+
),
|
|
3580
|
+
params=remove_none_from_dict(
|
|
3581
|
+
{"include_results": include_results, "project_id": project_id, "organization_id": organization_id}
|
|
3582
|
+
),
|
|
3583
|
+
headers=self._client_wrapper.get_headers(),
|
|
3584
|
+
timeout=60,
|
|
3585
|
+
)
|
|
3586
|
+
if 200 <= _response.status_code < 300:
|
|
3587
|
+
return pydantic.parse_obj_as(SpreadsheetJob, _response.json()) # type: ignore
|
|
3588
|
+
if _response.status_code == 422:
|
|
3589
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3590
|
+
try:
|
|
3591
|
+
_response_json = _response.json()
|
|
3592
|
+
except JSONDecodeError:
|
|
3593
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3594
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3595
|
+
|
|
3596
|
+
async def get_table_download_presigned_url(
|
|
3597
|
+
self,
|
|
3598
|
+
spreadsheet_job_id: str,
|
|
3599
|
+
table_id: int,
|
|
3600
|
+
*,
|
|
3601
|
+
expires_at_seconds: typing.Optional[int] = None,
|
|
3602
|
+
project_id: typing.Optional[str] = None,
|
|
3603
|
+
organization_id: typing.Optional[str] = None,
|
|
3604
|
+
) -> PresignedUrl:
|
|
3605
|
+
"""
|
|
3606
|
+
Generate a presigned URL to download a specific extracted table.
|
|
3607
|
+
Experimental: This endpoint is not yet ready for production use and is subject to change at any time.
|
|
3608
|
+
|
|
3609
|
+
Parameters:
|
|
3610
|
+
- spreadsheet_job_id: str.
|
|
3611
|
+
|
|
3612
|
+
- table_id: int.
|
|
3613
|
+
|
|
3614
|
+
- expires_at_seconds: typing.Optional[int].
|
|
3615
|
+
|
|
3616
|
+
- project_id: typing.Optional[str].
|
|
3617
|
+
|
|
3618
|
+
- organization_id: typing.Optional[str].
|
|
3619
|
+
---
|
|
3620
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
3621
|
+
|
|
3622
|
+
client = AsyncLlamaCloud(
|
|
3623
|
+
token="YOUR_TOKEN",
|
|
3624
|
+
)
|
|
3625
|
+
await client.beta.get_table_download_presigned_url(
|
|
3626
|
+
spreadsheet_job_id="string",
|
|
3627
|
+
table_id=1,
|
|
3628
|
+
)
|
|
3629
|
+
"""
|
|
3630
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
3631
|
+
"GET",
|
|
3632
|
+
urllib.parse.urljoin(
|
|
3633
|
+
f"{self._client_wrapper.get_base_url()}/",
|
|
3634
|
+
f"api/v1/beta/spreadsheet/jobs/{spreadsheet_job_id}/tables/{table_id}/result",
|
|
3635
|
+
),
|
|
3636
|
+
params=remove_none_from_dict(
|
|
3637
|
+
{"expires_at_seconds": expires_at_seconds, "project_id": project_id, "organization_id": organization_id}
|
|
3638
|
+
),
|
|
3639
|
+
headers=self._client_wrapper.get_headers(),
|
|
3640
|
+
timeout=60,
|
|
3641
|
+
)
|
|
3642
|
+
if 200 <= _response.status_code < 300:
|
|
3643
|
+
return pydantic.parse_obj_as(PresignedUrl, _response.json()) # type: ignore
|
|
3644
|
+
if _response.status_code == 422:
|
|
3645
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3646
|
+
try:
|
|
3647
|
+
_response_json = _response.json()
|
|
3648
|
+
except JSONDecodeError:
|
|
3649
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3650
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
@@ -31,7 +31,6 @@ class JobsClient:
|
|
|
31
31
|
limit: typing.Optional[int] = None,
|
|
32
32
|
offset: typing.Optional[int] = None,
|
|
33
33
|
sort: typing.Optional[str] = None,
|
|
34
|
-
include_usage_metrics: typing.Optional[bool] = None,
|
|
35
34
|
project_id: typing.Optional[str] = None,
|
|
36
35
|
organization_id: typing.Optional[str] = None,
|
|
37
36
|
) -> PaginatedJobsHistoryWithMetrics:
|
|
@@ -51,8 +50,6 @@ class JobsClient:
|
|
|
51
50
|
|
|
52
51
|
- sort: typing.Optional[str].
|
|
53
52
|
|
|
54
|
-
- include_usage_metrics: typing.Optional[bool]. Deprecated: This parameter is no longer supported as we've moved to usage v2. It will be removed in a future version.
|
|
55
|
-
|
|
56
53
|
- project_id: typing.Optional[str].
|
|
57
54
|
|
|
58
55
|
- organization_id: typing.Optional[str].
|
|
@@ -73,7 +70,6 @@ class JobsClient:
|
|
|
73
70
|
"limit": limit,
|
|
74
71
|
"offset": offset,
|
|
75
72
|
"sort": sort,
|
|
76
|
-
"include_usage_metrics": include_usage_metrics,
|
|
77
73
|
"project_id": project_id,
|
|
78
74
|
"organization_id": organization_id,
|
|
79
75
|
}
|
|
@@ -103,7 +99,6 @@ class AsyncJobsClient:
|
|
|
103
99
|
limit: typing.Optional[int] = None,
|
|
104
100
|
offset: typing.Optional[int] = None,
|
|
105
101
|
sort: typing.Optional[str] = None,
|
|
106
|
-
include_usage_metrics: typing.Optional[bool] = None,
|
|
107
102
|
project_id: typing.Optional[str] = None,
|
|
108
103
|
organization_id: typing.Optional[str] = None,
|
|
109
104
|
) -> PaginatedJobsHistoryWithMetrics:
|
|
@@ -123,8 +118,6 @@ class AsyncJobsClient:
|
|
|
123
118
|
|
|
124
119
|
- sort: typing.Optional[str].
|
|
125
120
|
|
|
126
|
-
- include_usage_metrics: typing.Optional[bool]. Deprecated: This parameter is no longer supported as we've moved to usage v2. It will be removed in a future version.
|
|
127
|
-
|
|
128
121
|
- project_id: typing.Optional[str].
|
|
129
122
|
|
|
130
123
|
- organization_id: typing.Optional[str].
|
|
@@ -145,7 +138,6 @@ class AsyncJobsClient:
|
|
|
145
138
|
"limit": limit,
|
|
146
139
|
"offset": offset,
|
|
147
140
|
"sort": sort,
|
|
148
|
-
"include_usage_metrics": include_usage_metrics,
|
|
149
141
|
"project_id": project_id,
|
|
150
142
|
"organization_id": organization_id,
|
|
151
143
|
}
|
|
@@ -7,6 +7,9 @@ from .types import (
|
|
|
7
7
|
ExtractAgentUpdateDataSchemaZeroValue,
|
|
8
8
|
ExtractJobCreateBatchDataSchemaOverride,
|
|
9
9
|
ExtractJobCreateBatchDataSchemaOverrideZeroValue,
|
|
10
|
+
ExtractJobCreateDataSchemaOverride,
|
|
11
|
+
ExtractJobCreateDataSchemaOverrideZeroValue,
|
|
12
|
+
ExtractJobCreatePriority,
|
|
10
13
|
ExtractSchemaValidateRequestDataSchema,
|
|
11
14
|
ExtractSchemaValidateRequestDataSchemaZeroValue,
|
|
12
15
|
ExtractStatelessRequestDataSchema,
|
|
@@ -20,6 +23,9 @@ __all__ = [
|
|
|
20
23
|
"ExtractAgentUpdateDataSchemaZeroValue",
|
|
21
24
|
"ExtractJobCreateBatchDataSchemaOverride",
|
|
22
25
|
"ExtractJobCreateBatchDataSchemaOverrideZeroValue",
|
|
26
|
+
"ExtractJobCreateDataSchemaOverride",
|
|
27
|
+
"ExtractJobCreateDataSchemaOverrideZeroValue",
|
|
28
|
+
"ExtractJobCreatePriority",
|
|
23
29
|
"ExtractSchemaValidateRequestDataSchema",
|
|
24
30
|
"ExtractSchemaValidateRequestDataSchemaZeroValue",
|
|
25
31
|
"ExtractStatelessRequestDataSchema",
|