llama-cloud 0.1.28__py3-none-any.whl → 0.1.30__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +8 -2
- llama_cloud/client.py +3 -0
- llama_cloud/resources/__init__.py +2 -0
- llama_cloud/resources/admin/__init__.py +2 -0
- llama_cloud/resources/admin/client.py +88 -0
- llama_cloud/resources/data_sources/types/data_source_update_component.py +0 -2
- llama_cloud/resources/parsing/client.py +244 -0
- llama_cloud/types/__init__.py +6 -2
- llama_cloud/types/data_source_component.py +0 -2
- llama_cloud/types/data_source_create_component.py +0 -2
- llama_cloud/types/extract_job_create.py +2 -0
- llama_cloud/types/extract_models.py +4 -4
- llama_cloud/types/job_record.py +2 -0
- llama_cloud/types/legacy_parse_job_config.py +10 -0
- llama_cloud/types/{cloud_google_drive_data_source.py → license_info_response.py} +5 -5
- llama_cloud/types/llama_extract_settings.py +3 -0
- llama_cloud/types/llama_parse_parameters.py +6 -0
- llama_cloud/types/parse_job_config.py +6 -0
- llama_cloud/types/pipeline_data_source_component.py +0 -2
- llama_cloud/types/webhook_configuration.py +38 -0
- llama_cloud/types/webhook_configuration_webhook_events_item.py +37 -0
- {llama_cloud-0.1.28.dist-info → llama_cloud-0.1.30.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.28.dist-info → llama_cloud-0.1.30.dist-info}/RECORD +25 -21
- {llama_cloud-0.1.28.dist-info → llama_cloud-0.1.30.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.28.dist-info → llama_cloud-0.1.30.dist-info}/WHEEL +0 -0
llama_cloud/__init__.py
CHANGED
|
@@ -43,7 +43,6 @@ from .types import (
|
|
|
43
43
|
CloudConfluenceDataSource,
|
|
44
44
|
CloudDocument,
|
|
45
45
|
CloudDocumentCreate,
|
|
46
|
-
CloudGoogleDriveDataSource,
|
|
47
46
|
CloudJiraDataSource,
|
|
48
47
|
CloudMilvusVectorStore,
|
|
49
48
|
CloudMongoDbAtlasVectorSearch,
|
|
@@ -163,6 +162,7 @@ from .types import (
|
|
|
163
162
|
JobRecordWithUsageMetrics,
|
|
164
163
|
LLamaParseTransformConfig,
|
|
165
164
|
LegacyParseJobConfig,
|
|
165
|
+
LicenseInfoResponse,
|
|
166
166
|
LlamaExtractSettings,
|
|
167
167
|
LlamaIndexCoreBaseLlmsTypesChatMessage,
|
|
168
168
|
LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem,
|
|
@@ -332,6 +332,8 @@ from .types import (
|
|
|
332
332
|
VertexAiEmbeddingConfig,
|
|
333
333
|
VertexEmbeddingMode,
|
|
334
334
|
VertexTextEmbedding,
|
|
335
|
+
WebhookConfiguration,
|
|
336
|
+
WebhookConfigurationWebhookEventsItem,
|
|
335
337
|
)
|
|
336
338
|
from .errors import UnprocessableEntityError
|
|
337
339
|
from .resources import (
|
|
@@ -369,6 +371,7 @@ from .resources import (
|
|
|
369
371
|
PipelineUpdateTransformConfig,
|
|
370
372
|
RetrievalParamsSearchFiltersInferenceSchemaValue,
|
|
371
373
|
UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction,
|
|
374
|
+
admin,
|
|
372
375
|
beta,
|
|
373
376
|
chat_apps,
|
|
374
377
|
data_sinks,
|
|
@@ -432,7 +435,6 @@ __all__ = [
|
|
|
432
435
|
"CloudConfluenceDataSource",
|
|
433
436
|
"CloudDocument",
|
|
434
437
|
"CloudDocumentCreate",
|
|
435
|
-
"CloudGoogleDriveDataSource",
|
|
436
438
|
"CloudJiraDataSource",
|
|
437
439
|
"CloudMilvusVectorStore",
|
|
438
440
|
"CloudMongoDbAtlasVectorSearch",
|
|
@@ -574,6 +576,7 @@ __all__ = [
|
|
|
574
576
|
"JobRecordWithUsageMetrics",
|
|
575
577
|
"LLamaParseTransformConfig",
|
|
576
578
|
"LegacyParseJobConfig",
|
|
579
|
+
"LicenseInfoResponse",
|
|
577
580
|
"LlamaCloudEnvironment",
|
|
578
581
|
"LlamaExtractSettings",
|
|
579
582
|
"LlamaIndexCoreBaseLlmsTypesChatMessage",
|
|
@@ -757,6 +760,9 @@ __all__ = [
|
|
|
757
760
|
"VertexAiEmbeddingConfig",
|
|
758
761
|
"VertexEmbeddingMode",
|
|
759
762
|
"VertexTextEmbedding",
|
|
763
|
+
"WebhookConfiguration",
|
|
764
|
+
"WebhookConfigurationWebhookEventsItem",
|
|
765
|
+
"admin",
|
|
760
766
|
"beta",
|
|
761
767
|
"chat_apps",
|
|
762
768
|
"data_sinks",
|
llama_cloud/client.py
CHANGED
|
@@ -6,6 +6,7 @@ import httpx
|
|
|
6
6
|
|
|
7
7
|
from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
8
8
|
from .environment import LlamaCloudEnvironment
|
|
9
|
+
from .resources.admin.client import AdminClient, AsyncAdminClient
|
|
9
10
|
from .resources.beta.client import AsyncBetaClient, BetaClient
|
|
10
11
|
from .resources.chat_apps.client import AsyncChatAppsClient, ChatAppsClient
|
|
11
12
|
from .resources.data_sinks.client import AsyncDataSinksClient, DataSinksClient
|
|
@@ -54,6 +55,7 @@ class LlamaCloud:
|
|
|
54
55
|
self.parsing = ParsingClient(client_wrapper=self._client_wrapper)
|
|
55
56
|
self.chat_apps = ChatAppsClient(client_wrapper=self._client_wrapper)
|
|
56
57
|
self.llama_apps = LlamaAppsClient(client_wrapper=self._client_wrapper)
|
|
58
|
+
self.admin = AdminClient(client_wrapper=self._client_wrapper)
|
|
57
59
|
self.llama_extract = LlamaExtractClient(client_wrapper=self._client_wrapper)
|
|
58
60
|
self.reports = ReportsClient(client_wrapper=self._client_wrapper)
|
|
59
61
|
self.beta = BetaClient(client_wrapper=self._client_wrapper)
|
|
@@ -88,6 +90,7 @@ class AsyncLlamaCloud:
|
|
|
88
90
|
self.parsing = AsyncParsingClient(client_wrapper=self._client_wrapper)
|
|
89
91
|
self.chat_apps = AsyncChatAppsClient(client_wrapper=self._client_wrapper)
|
|
90
92
|
self.llama_apps = AsyncLlamaAppsClient(client_wrapper=self._client_wrapper)
|
|
93
|
+
self.admin = AsyncAdminClient(client_wrapper=self._client_wrapper)
|
|
91
94
|
self.llama_extract = AsyncLlamaExtractClient(client_wrapper=self._client_wrapper)
|
|
92
95
|
self.reports = AsyncReportsClient(client_wrapper=self._client_wrapper)
|
|
93
96
|
self.beta = AsyncBetaClient(client_wrapper=self._client_wrapper)
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
# This file was auto-generated by Fern from our API Definition.
|
|
2
2
|
|
|
3
3
|
from . import (
|
|
4
|
+
admin,
|
|
4
5
|
beta,
|
|
5
6
|
chat_apps,
|
|
6
7
|
data_sinks,
|
|
@@ -92,6 +93,7 @@ __all__ = [
|
|
|
92
93
|
"PipelineUpdateTransformConfig",
|
|
93
94
|
"RetrievalParamsSearchFiltersInferenceSchemaValue",
|
|
94
95
|
"UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction",
|
|
96
|
+
"admin",
|
|
95
97
|
"beta",
|
|
96
98
|
"chat_apps",
|
|
97
99
|
"data_sinks",
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
import urllib.parse
|
|
5
|
+
from json.decoder import JSONDecodeError
|
|
6
|
+
|
|
7
|
+
from ...core.api_error import ApiError
|
|
8
|
+
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
9
|
+
from ...core.remove_none_from_dict import remove_none_from_dict
|
|
10
|
+
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
11
|
+
from ...types.http_validation_error import HttpValidationError
|
|
12
|
+
from ...types.license_info_response import LicenseInfoResponse
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
import pydantic
|
|
16
|
+
if pydantic.__version__.startswith("1."):
|
|
17
|
+
raise ImportError
|
|
18
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
19
|
+
except ImportError:
|
|
20
|
+
import pydantic # type: ignore
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class AdminClient:
|
|
24
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
25
|
+
self._client_wrapper = client_wrapper
|
|
26
|
+
|
|
27
|
+
def get_license_info(self, *, include_scopes: typing.Optional[bool] = None) -> LicenseInfoResponse:
|
|
28
|
+
"""
|
|
29
|
+
Parameters:
|
|
30
|
+
- include_scopes: typing.Optional[bool]. Whether to include scopes in the response
|
|
31
|
+
---
|
|
32
|
+
from llama_cloud.client import LlamaCloud
|
|
33
|
+
|
|
34
|
+
client = LlamaCloud(
|
|
35
|
+
token="YOUR_TOKEN",
|
|
36
|
+
)
|
|
37
|
+
client.admin.get_license_info()
|
|
38
|
+
"""
|
|
39
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
40
|
+
"GET",
|
|
41
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/admin/license/info"),
|
|
42
|
+
params=remove_none_from_dict({"include_scopes": include_scopes}),
|
|
43
|
+
headers=self._client_wrapper.get_headers(),
|
|
44
|
+
timeout=60,
|
|
45
|
+
)
|
|
46
|
+
if 200 <= _response.status_code < 300:
|
|
47
|
+
return pydantic.parse_obj_as(LicenseInfoResponse, _response.json()) # type: ignore
|
|
48
|
+
if _response.status_code == 422:
|
|
49
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
50
|
+
try:
|
|
51
|
+
_response_json = _response.json()
|
|
52
|
+
except JSONDecodeError:
|
|
53
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
54
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class AsyncAdminClient:
|
|
58
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
59
|
+
self._client_wrapper = client_wrapper
|
|
60
|
+
|
|
61
|
+
async def get_license_info(self, *, include_scopes: typing.Optional[bool] = None) -> LicenseInfoResponse:
|
|
62
|
+
"""
|
|
63
|
+
Parameters:
|
|
64
|
+
- include_scopes: typing.Optional[bool]. Whether to include scopes in the response
|
|
65
|
+
---
|
|
66
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
67
|
+
|
|
68
|
+
client = AsyncLlamaCloud(
|
|
69
|
+
token="YOUR_TOKEN",
|
|
70
|
+
)
|
|
71
|
+
await client.admin.get_license_info()
|
|
72
|
+
"""
|
|
73
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
74
|
+
"GET",
|
|
75
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/admin/license/info"),
|
|
76
|
+
params=remove_none_from_dict({"include_scopes": include_scopes}),
|
|
77
|
+
headers=self._client_wrapper.get_headers(),
|
|
78
|
+
timeout=60,
|
|
79
|
+
)
|
|
80
|
+
if 200 <= _response.status_code < 300:
|
|
81
|
+
return pydantic.parse_obj_as(LicenseInfoResponse, _response.json()) # type: ignore
|
|
82
|
+
if _response.status_code == 422:
|
|
83
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
84
|
+
try:
|
|
85
|
+
_response_json = _response.json()
|
|
86
|
+
except JSONDecodeError:
|
|
87
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
88
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
@@ -5,7 +5,6 @@ import typing
|
|
|
5
5
|
from ....types.cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
6
6
|
from ....types.cloud_box_data_source import CloudBoxDataSource
|
|
7
7
|
from ....types.cloud_confluence_data_source import CloudConfluenceDataSource
|
|
8
|
-
from ....types.cloud_google_drive_data_source import CloudGoogleDriveDataSource
|
|
9
8
|
from ....types.cloud_jira_data_source import CloudJiraDataSource
|
|
10
9
|
from ....types.cloud_notion_page_data_source import CloudNotionPageDataSource
|
|
11
10
|
from ....types.cloud_one_drive_data_source import CloudOneDriveDataSource
|
|
@@ -17,7 +16,6 @@ DataSourceUpdateComponent = typing.Union[
|
|
|
17
16
|
typing.Dict[str, typing.Any],
|
|
18
17
|
CloudS3DataSource,
|
|
19
18
|
CloudAzStorageBlobDataSource,
|
|
20
|
-
CloudGoogleDriveDataSource,
|
|
21
19
|
CloudOneDriveDataSource,
|
|
22
20
|
CloudSharepointDataSource,
|
|
23
21
|
CloudSlackDataSource,
|
|
@@ -283,6 +283,12 @@ class ParsingClient:
|
|
|
283
283
|
parsing_instruction: str,
|
|
284
284
|
fast_mode: bool,
|
|
285
285
|
formatting_instruction: str,
|
|
286
|
+
hide_headers: bool,
|
|
287
|
+
hide_footers: bool,
|
|
288
|
+
page_header_prefix: str,
|
|
289
|
+
page_header_suffix: str,
|
|
290
|
+
page_footer_prefix: str,
|
|
291
|
+
page_footer_suffix: str,
|
|
286
292
|
) -> ParsingJob:
|
|
287
293
|
"""
|
|
288
294
|
Parameters:
|
|
@@ -463,6 +469,18 @@ class ParsingClient:
|
|
|
463
469
|
- fast_mode: bool.
|
|
464
470
|
|
|
465
471
|
- formatting_instruction: str.
|
|
472
|
+
|
|
473
|
+
- hide_headers: bool.
|
|
474
|
+
|
|
475
|
+
- hide_footers: bool.
|
|
476
|
+
|
|
477
|
+
- page_header_prefix: str.
|
|
478
|
+
|
|
479
|
+
- page_header_suffix: str.
|
|
480
|
+
|
|
481
|
+
- page_footer_prefix: str.
|
|
482
|
+
|
|
483
|
+
- page_footer_suffix: str.
|
|
466
484
|
"""
|
|
467
485
|
_request: typing.Dict[str, typing.Any] = {
|
|
468
486
|
"adaptive_long_table": adaptive_long_table,
|
|
@@ -548,6 +566,12 @@ class ParsingClient:
|
|
|
548
566
|
"parsing_instruction": parsing_instruction,
|
|
549
567
|
"fast_mode": fast_mode,
|
|
550
568
|
"formatting_instruction": formatting_instruction,
|
|
569
|
+
"hide_headers": hide_headers,
|
|
570
|
+
"hide_footers": hide_footers,
|
|
571
|
+
"page_header_prefix": page_header_prefix,
|
|
572
|
+
"page_header_suffix": page_header_suffix,
|
|
573
|
+
"page_footer_prefix": page_footer_prefix,
|
|
574
|
+
"page_footer_suffix": page_footer_suffix,
|
|
551
575
|
}
|
|
552
576
|
if file is not OMIT:
|
|
553
577
|
_request["file"] = file
|
|
@@ -709,6 +733,40 @@ class ParsingClient:
|
|
|
709
733
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
710
734
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
711
735
|
|
|
736
|
+
def get_job_raw_text_result_raw(self, job_id: str) -> typing.Any:
|
|
737
|
+
"""
|
|
738
|
+
Get a job by id
|
|
739
|
+
|
|
740
|
+
Parameters:
|
|
741
|
+
- job_id: str.
|
|
742
|
+
---
|
|
743
|
+
from llama_cloud.client import LlamaCloud
|
|
744
|
+
|
|
745
|
+
client = LlamaCloud(
|
|
746
|
+
token="YOUR_TOKEN",
|
|
747
|
+
)
|
|
748
|
+
client.parsing.get_job_raw_text_result_raw(
|
|
749
|
+
job_id="string",
|
|
750
|
+
)
|
|
751
|
+
"""
|
|
752
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
753
|
+
"GET",
|
|
754
|
+
urllib.parse.urljoin(
|
|
755
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/raw/text"
|
|
756
|
+
),
|
|
757
|
+
headers=self._client_wrapper.get_headers(),
|
|
758
|
+
timeout=60,
|
|
759
|
+
)
|
|
760
|
+
if 200 <= _response.status_code < 300:
|
|
761
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
762
|
+
if _response.status_code == 422:
|
|
763
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
764
|
+
try:
|
|
765
|
+
_response_json = _response.json()
|
|
766
|
+
except JSONDecodeError:
|
|
767
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
768
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
769
|
+
|
|
712
770
|
def get_job_raw_text_result(self, job_id: str) -> typing.Any:
|
|
713
771
|
"""
|
|
714
772
|
Get a job by id
|
|
@@ -725,6 +783,38 @@ class ParsingClient:
|
|
|
725
783
|
job_id="string",
|
|
726
784
|
)
|
|
727
785
|
"""
|
|
786
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
787
|
+
"GET",
|
|
788
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/pdf"),
|
|
789
|
+
headers=self._client_wrapper.get_headers(),
|
|
790
|
+
timeout=60,
|
|
791
|
+
)
|
|
792
|
+
if 200 <= _response.status_code < 300:
|
|
793
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
794
|
+
if _response.status_code == 422:
|
|
795
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
796
|
+
try:
|
|
797
|
+
_response_json = _response.json()
|
|
798
|
+
except JSONDecodeError:
|
|
799
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
800
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
801
|
+
|
|
802
|
+
def get_job_raw_text_result_raw_pdf(self, job_id: str) -> typing.Any:
|
|
803
|
+
"""
|
|
804
|
+
Get a job by id
|
|
805
|
+
|
|
806
|
+
Parameters:
|
|
807
|
+
- job_id: str.
|
|
808
|
+
---
|
|
809
|
+
from llama_cloud.client import LlamaCloud
|
|
810
|
+
|
|
811
|
+
client = LlamaCloud(
|
|
812
|
+
token="YOUR_TOKEN",
|
|
813
|
+
)
|
|
814
|
+
client.parsing.get_job_raw_text_result_raw_pdf(
|
|
815
|
+
job_id="string",
|
|
816
|
+
)
|
|
817
|
+
"""
|
|
728
818
|
_response = self._client_wrapper.httpx_client.request(
|
|
729
819
|
"GET",
|
|
730
820
|
urllib.parse.urljoin(
|
|
@@ -835,6 +925,38 @@ class ParsingClient:
|
|
|
835
925
|
job_id="string",
|
|
836
926
|
)
|
|
837
927
|
"""
|
|
928
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
929
|
+
"GET",
|
|
930
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/xlsx"),
|
|
931
|
+
headers=self._client_wrapper.get_headers(),
|
|
932
|
+
timeout=60,
|
|
933
|
+
)
|
|
934
|
+
if 200 <= _response.status_code < 300:
|
|
935
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
936
|
+
if _response.status_code == 422:
|
|
937
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
938
|
+
try:
|
|
939
|
+
_response_json = _response.json()
|
|
940
|
+
except JSONDecodeError:
|
|
941
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
942
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
943
|
+
|
|
944
|
+
def get_job_raw_xlsx_result_raw(self, job_id: str) -> typing.Any:
|
|
945
|
+
"""
|
|
946
|
+
Get a job by id
|
|
947
|
+
|
|
948
|
+
Parameters:
|
|
949
|
+
- job_id: str.
|
|
950
|
+
---
|
|
951
|
+
from llama_cloud.client import LlamaCloud
|
|
952
|
+
|
|
953
|
+
client = LlamaCloud(
|
|
954
|
+
token="YOUR_TOKEN",
|
|
955
|
+
)
|
|
956
|
+
client.parsing.get_job_raw_xlsx_result_raw(
|
|
957
|
+
job_id="string",
|
|
958
|
+
)
|
|
959
|
+
"""
|
|
838
960
|
_response = self._client_wrapper.httpx_client.request(
|
|
839
961
|
"GET",
|
|
840
962
|
urllib.parse.urljoin(
|
|
@@ -1317,6 +1439,12 @@ class AsyncParsingClient:
|
|
|
1317
1439
|
parsing_instruction: str,
|
|
1318
1440
|
fast_mode: bool,
|
|
1319
1441
|
formatting_instruction: str,
|
|
1442
|
+
hide_headers: bool,
|
|
1443
|
+
hide_footers: bool,
|
|
1444
|
+
page_header_prefix: str,
|
|
1445
|
+
page_header_suffix: str,
|
|
1446
|
+
page_footer_prefix: str,
|
|
1447
|
+
page_footer_suffix: str,
|
|
1320
1448
|
) -> ParsingJob:
|
|
1321
1449
|
"""
|
|
1322
1450
|
Parameters:
|
|
@@ -1497,6 +1625,18 @@ class AsyncParsingClient:
|
|
|
1497
1625
|
- fast_mode: bool.
|
|
1498
1626
|
|
|
1499
1627
|
- formatting_instruction: str.
|
|
1628
|
+
|
|
1629
|
+
- hide_headers: bool.
|
|
1630
|
+
|
|
1631
|
+
- hide_footers: bool.
|
|
1632
|
+
|
|
1633
|
+
- page_header_prefix: str.
|
|
1634
|
+
|
|
1635
|
+
- page_header_suffix: str.
|
|
1636
|
+
|
|
1637
|
+
- page_footer_prefix: str.
|
|
1638
|
+
|
|
1639
|
+
- page_footer_suffix: str.
|
|
1500
1640
|
"""
|
|
1501
1641
|
_request: typing.Dict[str, typing.Any] = {
|
|
1502
1642
|
"adaptive_long_table": adaptive_long_table,
|
|
@@ -1582,6 +1722,12 @@ class AsyncParsingClient:
|
|
|
1582
1722
|
"parsing_instruction": parsing_instruction,
|
|
1583
1723
|
"fast_mode": fast_mode,
|
|
1584
1724
|
"formatting_instruction": formatting_instruction,
|
|
1725
|
+
"hide_headers": hide_headers,
|
|
1726
|
+
"hide_footers": hide_footers,
|
|
1727
|
+
"page_header_prefix": page_header_prefix,
|
|
1728
|
+
"page_header_suffix": page_header_suffix,
|
|
1729
|
+
"page_footer_prefix": page_footer_prefix,
|
|
1730
|
+
"page_footer_suffix": page_footer_suffix,
|
|
1585
1731
|
}
|
|
1586
1732
|
if file is not OMIT:
|
|
1587
1733
|
_request["file"] = file
|
|
@@ -1745,6 +1891,40 @@ class AsyncParsingClient:
|
|
|
1745
1891
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1746
1892
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1747
1893
|
|
|
1894
|
+
async def get_job_raw_text_result_raw(self, job_id: str) -> typing.Any:
|
|
1895
|
+
"""
|
|
1896
|
+
Get a job by id
|
|
1897
|
+
|
|
1898
|
+
Parameters:
|
|
1899
|
+
- job_id: str.
|
|
1900
|
+
---
|
|
1901
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1902
|
+
|
|
1903
|
+
client = AsyncLlamaCloud(
|
|
1904
|
+
token="YOUR_TOKEN",
|
|
1905
|
+
)
|
|
1906
|
+
await client.parsing.get_job_raw_text_result_raw(
|
|
1907
|
+
job_id="string",
|
|
1908
|
+
)
|
|
1909
|
+
"""
|
|
1910
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1911
|
+
"GET",
|
|
1912
|
+
urllib.parse.urljoin(
|
|
1913
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/raw/text"
|
|
1914
|
+
),
|
|
1915
|
+
headers=self._client_wrapper.get_headers(),
|
|
1916
|
+
timeout=60,
|
|
1917
|
+
)
|
|
1918
|
+
if 200 <= _response.status_code < 300:
|
|
1919
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
1920
|
+
if _response.status_code == 422:
|
|
1921
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1922
|
+
try:
|
|
1923
|
+
_response_json = _response.json()
|
|
1924
|
+
except JSONDecodeError:
|
|
1925
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1926
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1927
|
+
|
|
1748
1928
|
async def get_job_raw_text_result(self, job_id: str) -> typing.Any:
|
|
1749
1929
|
"""
|
|
1750
1930
|
Get a job by id
|
|
@@ -1761,6 +1941,38 @@ class AsyncParsingClient:
|
|
|
1761
1941
|
job_id="string",
|
|
1762
1942
|
)
|
|
1763
1943
|
"""
|
|
1944
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1945
|
+
"GET",
|
|
1946
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/pdf"),
|
|
1947
|
+
headers=self._client_wrapper.get_headers(),
|
|
1948
|
+
timeout=60,
|
|
1949
|
+
)
|
|
1950
|
+
if 200 <= _response.status_code < 300:
|
|
1951
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
1952
|
+
if _response.status_code == 422:
|
|
1953
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1954
|
+
try:
|
|
1955
|
+
_response_json = _response.json()
|
|
1956
|
+
except JSONDecodeError:
|
|
1957
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1958
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1959
|
+
|
|
1960
|
+
async def get_job_raw_text_result_raw_pdf(self, job_id: str) -> typing.Any:
|
|
1961
|
+
"""
|
|
1962
|
+
Get a job by id
|
|
1963
|
+
|
|
1964
|
+
Parameters:
|
|
1965
|
+
- job_id: str.
|
|
1966
|
+
---
|
|
1967
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1968
|
+
|
|
1969
|
+
client = AsyncLlamaCloud(
|
|
1970
|
+
token="YOUR_TOKEN",
|
|
1971
|
+
)
|
|
1972
|
+
await client.parsing.get_job_raw_text_result_raw_pdf(
|
|
1973
|
+
job_id="string",
|
|
1974
|
+
)
|
|
1975
|
+
"""
|
|
1764
1976
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1765
1977
|
"GET",
|
|
1766
1978
|
urllib.parse.urljoin(
|
|
@@ -1871,6 +2083,38 @@ class AsyncParsingClient:
|
|
|
1871
2083
|
job_id="string",
|
|
1872
2084
|
)
|
|
1873
2085
|
"""
|
|
2086
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
2087
|
+
"GET",
|
|
2088
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/xlsx"),
|
|
2089
|
+
headers=self._client_wrapper.get_headers(),
|
|
2090
|
+
timeout=60,
|
|
2091
|
+
)
|
|
2092
|
+
if 200 <= _response.status_code < 300:
|
|
2093
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
2094
|
+
if _response.status_code == 422:
|
|
2095
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2096
|
+
try:
|
|
2097
|
+
_response_json = _response.json()
|
|
2098
|
+
except JSONDecodeError:
|
|
2099
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2100
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2101
|
+
|
|
2102
|
+
async def get_job_raw_xlsx_result_raw(self, job_id: str) -> typing.Any:
|
|
2103
|
+
"""
|
|
2104
|
+
Get a job by id
|
|
2105
|
+
|
|
2106
|
+
Parameters:
|
|
2107
|
+
- job_id: str.
|
|
2108
|
+
---
|
|
2109
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2110
|
+
|
|
2111
|
+
client = AsyncLlamaCloud(
|
|
2112
|
+
token="YOUR_TOKEN",
|
|
2113
|
+
)
|
|
2114
|
+
await client.parsing.get_job_raw_xlsx_result_raw(
|
|
2115
|
+
job_id="string",
|
|
2116
|
+
)
|
|
2117
|
+
"""
|
|
1874
2118
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1875
2119
|
"GET",
|
|
1876
2120
|
urllib.parse.urljoin(
|
llama_cloud/types/__init__.py
CHANGED
|
@@ -46,7 +46,6 @@ from .cloud_box_data_source import CloudBoxDataSource
|
|
|
46
46
|
from .cloud_confluence_data_source import CloudConfluenceDataSource
|
|
47
47
|
from .cloud_document import CloudDocument
|
|
48
48
|
from .cloud_document_create import CloudDocumentCreate
|
|
49
|
-
from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
|
|
50
49
|
from .cloud_jira_data_source import CloudJiraDataSource
|
|
51
50
|
from .cloud_milvus_vector_store import CloudMilvusVectorStore
|
|
52
51
|
from .cloud_mongo_db_atlas_vector_search import CloudMongoDbAtlasVectorSearch
|
|
@@ -172,6 +171,7 @@ from .job_record_parameters import (
|
|
|
172
171
|
from .job_record_with_usage_metrics import JobRecordWithUsageMetrics
|
|
173
172
|
from .l_lama_parse_transform_config import LLamaParseTransformConfig
|
|
174
173
|
from .legacy_parse_job_config import LegacyParseJobConfig
|
|
174
|
+
from .license_info_response import LicenseInfoResponse
|
|
175
175
|
from .llama_extract_settings import LlamaExtractSettings
|
|
176
176
|
from .llama_index_core_base_llms_types_chat_message import LlamaIndexCoreBaseLlmsTypesChatMessage
|
|
177
177
|
from .llama_index_core_base_llms_types_chat_message_blocks_item import (
|
|
@@ -353,6 +353,8 @@ from .validation_error_loc_item import ValidationErrorLocItem
|
|
|
353
353
|
from .vertex_ai_embedding_config import VertexAiEmbeddingConfig
|
|
354
354
|
from .vertex_embedding_mode import VertexEmbeddingMode
|
|
355
355
|
from .vertex_text_embedding import VertexTextEmbedding
|
|
356
|
+
from .webhook_configuration import WebhookConfiguration
|
|
357
|
+
from .webhook_configuration_webhook_events_item import WebhookConfigurationWebhookEventsItem
|
|
356
358
|
|
|
357
359
|
__all__ = [
|
|
358
360
|
"AdvancedModeTransformConfig",
|
|
@@ -397,7 +399,6 @@ __all__ = [
|
|
|
397
399
|
"CloudConfluenceDataSource",
|
|
398
400
|
"CloudDocument",
|
|
399
401
|
"CloudDocumentCreate",
|
|
400
|
-
"CloudGoogleDriveDataSource",
|
|
401
402
|
"CloudJiraDataSource",
|
|
402
403
|
"CloudMilvusVectorStore",
|
|
403
404
|
"CloudMongoDbAtlasVectorSearch",
|
|
@@ -517,6 +518,7 @@ __all__ = [
|
|
|
517
518
|
"JobRecordWithUsageMetrics",
|
|
518
519
|
"LLamaParseTransformConfig",
|
|
519
520
|
"LegacyParseJobConfig",
|
|
521
|
+
"LicenseInfoResponse",
|
|
520
522
|
"LlamaExtractSettings",
|
|
521
523
|
"LlamaIndexCoreBaseLlmsTypesChatMessage",
|
|
522
524
|
"LlamaIndexCoreBaseLlmsTypesChatMessageBlocksItem",
|
|
@@ -686,4 +688,6 @@ __all__ = [
|
|
|
686
688
|
"VertexAiEmbeddingConfig",
|
|
687
689
|
"VertexEmbeddingMode",
|
|
688
690
|
"VertexTextEmbedding",
|
|
691
|
+
"WebhookConfiguration",
|
|
692
|
+
"WebhookConfigurationWebhookEventsItem",
|
|
689
693
|
]
|
|
@@ -5,7 +5,6 @@ import typing
|
|
|
5
5
|
from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
6
6
|
from .cloud_box_data_source import CloudBoxDataSource
|
|
7
7
|
from .cloud_confluence_data_source import CloudConfluenceDataSource
|
|
8
|
-
from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
|
|
9
8
|
from .cloud_jira_data_source import CloudJiraDataSource
|
|
10
9
|
from .cloud_notion_page_data_source import CloudNotionPageDataSource
|
|
11
10
|
from .cloud_one_drive_data_source import CloudOneDriveDataSource
|
|
@@ -17,7 +16,6 @@ DataSourceComponent = typing.Union[
|
|
|
17
16
|
typing.Dict[str, typing.Any],
|
|
18
17
|
CloudS3DataSource,
|
|
19
18
|
CloudAzStorageBlobDataSource,
|
|
20
|
-
CloudGoogleDriveDataSource,
|
|
21
19
|
CloudOneDriveDataSource,
|
|
22
20
|
CloudSharepointDataSource,
|
|
23
21
|
CloudSlackDataSource,
|
|
@@ -5,7 +5,6 @@ import typing
|
|
|
5
5
|
from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
6
6
|
from .cloud_box_data_source import CloudBoxDataSource
|
|
7
7
|
from .cloud_confluence_data_source import CloudConfluenceDataSource
|
|
8
|
-
from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
|
|
9
8
|
from .cloud_jira_data_source import CloudJiraDataSource
|
|
10
9
|
from .cloud_notion_page_data_source import CloudNotionPageDataSource
|
|
11
10
|
from .cloud_one_drive_data_source import CloudOneDriveDataSource
|
|
@@ -17,7 +16,6 @@ DataSourceCreateComponent = typing.Union[
|
|
|
17
16
|
typing.Dict[str, typing.Any],
|
|
18
17
|
CloudS3DataSource,
|
|
19
18
|
CloudAzStorageBlobDataSource,
|
|
20
|
-
CloudGoogleDriveDataSource,
|
|
21
19
|
CloudOneDriveDataSource,
|
|
22
20
|
CloudSharepointDataSource,
|
|
23
21
|
CloudSlackDataSource,
|
|
@@ -6,6 +6,7 @@ import typing
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
7
|
from .extract_config import ExtractConfig
|
|
8
8
|
from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
|
|
9
|
+
from .webhook_configuration import WebhookConfiguration
|
|
9
10
|
|
|
10
11
|
try:
|
|
11
12
|
import pydantic
|
|
@@ -21,6 +22,7 @@ class ExtractJobCreate(pydantic.BaseModel):
|
|
|
21
22
|
Schema for creating an extraction job.
|
|
22
23
|
"""
|
|
23
24
|
|
|
25
|
+
webhook_configurations: typing.Optional[typing.List[WebhookConfiguration]]
|
|
24
26
|
extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
|
|
25
27
|
file_id: str = pydantic.Field(description="The id of the file")
|
|
26
28
|
data_schema_override: typing.Optional[ExtractJobCreateDataSchemaOverride] = pydantic.Field(
|
|
@@ -14,7 +14,7 @@ class ExtractModels(str, enum.Enum):
|
|
|
14
14
|
O_3_MINI = "o3-mini"
|
|
15
15
|
GEMINI_25_FLASH = "gemini-2.5-flash"
|
|
16
16
|
GEMINI_25_PRO = "gemini-2.5-pro"
|
|
17
|
-
|
|
17
|
+
GEMINI_25_FLASH_LITE_PREVIEW_0617 = "gemini-2.5-flash-lite-preview-06-17"
|
|
18
18
|
GPT_4_O = "gpt-4o"
|
|
19
19
|
GPT_4_O_MINI = "gpt-4o-mini"
|
|
20
20
|
|
|
@@ -27,7 +27,7 @@ class ExtractModels(str, enum.Enum):
|
|
|
27
27
|
o_3_mini: typing.Callable[[], T_Result],
|
|
28
28
|
gemini_25_flash: typing.Callable[[], T_Result],
|
|
29
29
|
gemini_25_pro: typing.Callable[[], T_Result],
|
|
30
|
-
|
|
30
|
+
gemini_25_flash_lite_preview_0617: typing.Callable[[], T_Result],
|
|
31
31
|
gpt_4_o: typing.Callable[[], T_Result],
|
|
32
32
|
gpt_4_o_mini: typing.Callable[[], T_Result],
|
|
33
33
|
) -> T_Result:
|
|
@@ -45,8 +45,8 @@ class ExtractModels(str, enum.Enum):
|
|
|
45
45
|
return gemini_25_flash()
|
|
46
46
|
if self is ExtractModels.GEMINI_25_PRO:
|
|
47
47
|
return gemini_25_pro()
|
|
48
|
-
if self is ExtractModels.
|
|
49
|
-
return
|
|
48
|
+
if self is ExtractModels.GEMINI_25_FLASH_LITE_PREVIEW_0617:
|
|
49
|
+
return gemini_25_flash_lite_preview_0617()
|
|
50
50
|
if self is ExtractModels.GPT_4_O:
|
|
51
51
|
return gpt_4_o()
|
|
52
52
|
if self is ExtractModels.GPT_4_O_MINI:
|
llama_cloud/types/job_record.py
CHANGED
|
@@ -7,6 +7,7 @@ from ..core.datetime_utils import serialize_datetime
|
|
|
7
7
|
from .job_names import JobNames
|
|
8
8
|
from .job_record_parameters import JobRecordParameters
|
|
9
9
|
from .status_enum import StatusEnum
|
|
10
|
+
from .webhook_configuration import WebhookConfiguration
|
|
10
11
|
|
|
11
12
|
try:
|
|
12
13
|
import pydantic
|
|
@@ -22,6 +23,7 @@ class JobRecord(pydantic.BaseModel):
|
|
|
22
23
|
Schema for a job's metadata.
|
|
23
24
|
"""
|
|
24
25
|
|
|
26
|
+
webhook_configurations: typing.Optional[typing.List[WebhookConfiguration]]
|
|
25
27
|
job_name: JobNames = pydantic.Field(description="The name of the job.")
|
|
26
28
|
partitions: typing.Dict[str, str] = pydantic.Field(
|
|
27
29
|
description="The partitions for this execution. Used for determining where to save job output."
|
|
@@ -175,6 +175,16 @@ class LegacyParseJobConfig(pydantic.BaseModel):
|
|
|
175
175
|
system_prompt: typing.Optional[str] = pydantic.Field(alias="systemPrompt")
|
|
176
176
|
system_prompt_append: typing.Optional[str] = pydantic.Field(alias="systemPromptAppend")
|
|
177
177
|
user_prompt: typing.Optional[str] = pydantic.Field(alias="userPrompt")
|
|
178
|
+
page_header_prefix: typing.Optional[str] = pydantic.Field(alias="pageHeaderPrefix")
|
|
179
|
+
page_header_suffix: typing.Optional[str] = pydantic.Field(alias="pageHeaderSuffix")
|
|
180
|
+
page_footer_prefix: typing.Optional[str] = pydantic.Field(alias="pageFooterPrefix")
|
|
181
|
+
page_footer_suffix: typing.Optional[str] = pydantic.Field(alias="pageFooterSuffix")
|
|
182
|
+
hide_headers: typing.Optional[bool] = pydantic.Field(
|
|
183
|
+
alias="hideHeaders", description="Whether to hide headers in the output."
|
|
184
|
+
)
|
|
185
|
+
hide_footers: typing.Optional[bool] = pydantic.Field(
|
|
186
|
+
alias="hideFooters", description="Whether to hide footers in the output."
|
|
187
|
+
)
|
|
178
188
|
|
|
179
189
|
def json(self, **kwargs: typing.Any) -> str:
|
|
180
190
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -14,11 +14,11 @@ except ImportError:
|
|
|
14
14
|
import pydantic # type: ignore
|
|
15
15
|
|
|
16
16
|
|
|
17
|
-
class
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
17
|
+
class LicenseInfoResponse(pydantic.BaseModel):
|
|
18
|
+
status: str = pydantic.Field(description="License validation status")
|
|
19
|
+
expires_at: dt.datetime = pydantic.Field(description="License expiration date")
|
|
20
|
+
scopes: typing.Optional[typing.List[str]]
|
|
21
|
+
message: typing.Optional[str]
|
|
22
22
|
|
|
23
23
|
def json(self, **kwargs: typing.Any) -> str:
|
|
24
24
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -39,6 +39,9 @@ class LlamaExtractSettings(pydantic.BaseModel):
|
|
|
39
39
|
extraction_agent_config: typing.Optional[typing.Dict[str, StructParseConf]] = pydantic.Field(
|
|
40
40
|
description="The configuration for the extraction agent."
|
|
41
41
|
)
|
|
42
|
+
use_multimodal_parsing: typing.Optional[bool] = pydantic.Field(
|
|
43
|
+
description="Whether to use experimental multimodal parsing."
|
|
44
|
+
)
|
|
42
45
|
use_pixel_extraction: typing.Optional[bool] = pydantic.Field(
|
|
43
46
|
description="Whether to use extraction over pixels for multimodal mode."
|
|
44
47
|
)
|
|
@@ -101,6 +101,12 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
101
101
|
strict_mode_reconstruction: typing.Optional[bool]
|
|
102
102
|
strict_mode_buggy_font: typing.Optional[bool]
|
|
103
103
|
save_images: typing.Optional[bool]
|
|
104
|
+
hide_headers: typing.Optional[bool]
|
|
105
|
+
hide_footers: typing.Optional[bool]
|
|
106
|
+
page_header_prefix: typing.Optional[str]
|
|
107
|
+
page_header_suffix: typing.Optional[str]
|
|
108
|
+
page_footer_prefix: typing.Optional[str]
|
|
109
|
+
page_footer_suffix: typing.Optional[str]
|
|
104
110
|
ignore_document_elements_for_layout_detection: typing.Optional[bool]
|
|
105
111
|
output_tables_as_html: typing.Optional[bool] = pydantic.Field(alias="output_tables_as_HTML")
|
|
106
112
|
internal_is_screenshot_job: typing.Optional[bool]
|
|
@@ -103,6 +103,12 @@ class ParseJobConfig(pydantic.BaseModel):
|
|
|
103
103
|
strict_mode_reconstruction: typing.Optional[bool]
|
|
104
104
|
strict_mode_buggy_font: typing.Optional[bool]
|
|
105
105
|
save_images: typing.Optional[bool]
|
|
106
|
+
hide_headers: typing.Optional[bool]
|
|
107
|
+
hide_footers: typing.Optional[bool]
|
|
108
|
+
page_header_prefix: typing.Optional[str]
|
|
109
|
+
page_header_suffix: typing.Optional[str]
|
|
110
|
+
page_footer_prefix: typing.Optional[str]
|
|
111
|
+
page_footer_suffix: typing.Optional[str]
|
|
106
112
|
ignore_document_elements_for_layout_detection: typing.Optional[bool]
|
|
107
113
|
output_tables_as_html: typing.Optional[bool] = pydantic.Field(alias="output_tables_as_HTML")
|
|
108
114
|
internal_is_screenshot_job: typing.Optional[bool]
|
|
@@ -5,7 +5,6 @@ import typing
|
|
|
5
5
|
from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
6
6
|
from .cloud_box_data_source import CloudBoxDataSource
|
|
7
7
|
from .cloud_confluence_data_source import CloudConfluenceDataSource
|
|
8
|
-
from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
|
|
9
8
|
from .cloud_jira_data_source import CloudJiraDataSource
|
|
10
9
|
from .cloud_notion_page_data_source import CloudNotionPageDataSource
|
|
11
10
|
from .cloud_one_drive_data_source import CloudOneDriveDataSource
|
|
@@ -17,7 +16,6 @@ PipelineDataSourceComponent = typing.Union[
|
|
|
17
16
|
typing.Dict[str, typing.Any],
|
|
18
17
|
CloudS3DataSource,
|
|
19
18
|
CloudAzStorageBlobDataSource,
|
|
20
|
-
CloudGoogleDriveDataSource,
|
|
21
19
|
CloudOneDriveDataSource,
|
|
22
20
|
CloudSharepointDataSource,
|
|
23
21
|
CloudSlackDataSource,
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .webhook_configuration_webhook_events_item import WebhookConfigurationWebhookEventsItem
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class WebhookConfiguration(pydantic.BaseModel):
|
|
19
|
+
"""
|
|
20
|
+
Allows the user to configure webhook options for notifications and callbacks.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
webhook_url: typing.Optional[str]
|
|
24
|
+
webhook_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]]
|
|
25
|
+
webhook_events: typing.Optional[typing.List[WebhookConfigurationWebhookEventsItem]]
|
|
26
|
+
|
|
27
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
28
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
29
|
+
return super().json(**kwargs_with_defaults)
|
|
30
|
+
|
|
31
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
32
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
33
|
+
return super().dict(**kwargs_with_defaults)
|
|
34
|
+
|
|
35
|
+
class Config:
|
|
36
|
+
frozen = True
|
|
37
|
+
smart_union = True
|
|
38
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class WebhookConfigurationWebhookEventsItem(str, enum.Enum):
|
|
10
|
+
EXTRACT_PENDING = "extract.pending"
|
|
11
|
+
EXTRACT_SUCCESS = "extract.success"
|
|
12
|
+
EXTRACT_ERROR = "extract.error"
|
|
13
|
+
EXTRACT_PARTIAL_SUCCESS = "extract.partial_success"
|
|
14
|
+
EXTRACT_CANCELLED = "extract.cancelled"
|
|
15
|
+
UNMAPPED_EVENT = "unmapped_event"
|
|
16
|
+
|
|
17
|
+
def visit(
|
|
18
|
+
self,
|
|
19
|
+
extract_pending: typing.Callable[[], T_Result],
|
|
20
|
+
extract_success: typing.Callable[[], T_Result],
|
|
21
|
+
extract_error: typing.Callable[[], T_Result],
|
|
22
|
+
extract_partial_success: typing.Callable[[], T_Result],
|
|
23
|
+
extract_cancelled: typing.Callable[[], T_Result],
|
|
24
|
+
unmapped_event: typing.Callable[[], T_Result],
|
|
25
|
+
) -> T_Result:
|
|
26
|
+
if self is WebhookConfigurationWebhookEventsItem.EXTRACT_PENDING:
|
|
27
|
+
return extract_pending()
|
|
28
|
+
if self is WebhookConfigurationWebhookEventsItem.EXTRACT_SUCCESS:
|
|
29
|
+
return extract_success()
|
|
30
|
+
if self is WebhookConfigurationWebhookEventsItem.EXTRACT_ERROR:
|
|
31
|
+
return extract_error()
|
|
32
|
+
if self is WebhookConfigurationWebhookEventsItem.EXTRACT_PARTIAL_SUCCESS:
|
|
33
|
+
return extract_partial_success()
|
|
34
|
+
if self is WebhookConfigurationWebhookEventsItem.EXTRACT_CANCELLED:
|
|
35
|
+
return extract_cancelled()
|
|
36
|
+
if self is WebhookConfigurationWebhookEventsItem.UNMAPPED_EVENT:
|
|
37
|
+
return unmapped_event()
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
llama_cloud/__init__.py,sha256=
|
|
2
|
-
llama_cloud/client.py,sha256=
|
|
1
|
+
llama_cloud/__init__.py,sha256=J8cweD7dD1ETW9wbr9TdNztTs_GP5amLcQ64J5AfFes,25215
|
|
2
|
+
llama_cloud/client.py,sha256=ylV-19129KufjzRDCoH4yARObhdUxc9vLL4kV-7fIck,6132
|
|
3
3
|
llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
|
|
4
4
|
llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
|
|
5
5
|
llama_cloud/core/client_wrapper.py,sha256=xmj0jCdQ0ySzbSqHUWOkpRRy069y74I_HuXkWltcsVM,1507
|
|
@@ -9,7 +9,9 @@ llama_cloud/core/remove_none_from_dict.py,sha256=8m91FC3YuVem0Gm9_sXhJ2tGvP33owJ
|
|
|
9
9
|
llama_cloud/environment.py,sha256=feTjOebeFZMrBdnHat4RE5aHlpt-sJm4NhK4ntV1htI,167
|
|
10
10
|
llama_cloud/errors/__init__.py,sha256=pbbVUFtB9LCocA1RMWMMF_RKjsy5YkOKX5BAuE49w6g,170
|
|
11
11
|
llama_cloud/errors/unprocessable_entity_error.py,sha256=FvR7XPlV3Xx5nu8HNlmLhBRdk4so_gCHjYT5PyZe6sM,313
|
|
12
|
-
llama_cloud/resources/__init__.py,sha256=
|
|
12
|
+
llama_cloud/resources/__init__.py,sha256=n3hSlo3KQatoFhDLk7Vm_hB_5lzh70T0S2r3cSpDWec,4211
|
|
13
|
+
llama_cloud/resources/admin/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
14
|
+
llama_cloud/resources/admin/client.py,sha256=mzA_ezCjugKNmvWCMWEF0Z0k86ErACWov1VtPV1J2tU,3678
|
|
13
15
|
llama_cloud/resources/beta/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
14
16
|
llama_cloud/resources/beta/client.py,sha256=mfqHAPWQEZwZM0LRYkia36EFdGrU2sZ_Y-MM1JU_0Yg,14966
|
|
15
17
|
llama_cloud/resources/chat_apps/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
@@ -21,7 +23,7 @@ llama_cloud/resources/data_sinks/types/data_sink_update_component.py,sha256=EWbs
|
|
|
21
23
|
llama_cloud/resources/data_sources/__init__.py,sha256=McURkcNBGHXH1hmRDRmZI1dRzJrekCTHZsgv03r2oZI,227
|
|
22
24
|
llama_cloud/resources/data_sources/client.py,sha256=SZFm8bW5nkaXringdSnmxHqvVjKM7cNNOtqVXjgTKhc,21855
|
|
23
25
|
llama_cloud/resources/data_sources/types/__init__.py,sha256=Cd5xEECTzXqQSfJALfJPSjudlSLeb3RENeJVi8vwPbM,303
|
|
24
|
-
llama_cloud/resources/data_sources/types/data_source_update_component.py,sha256=
|
|
26
|
+
llama_cloud/resources/data_sources/types/data_source_update_component.py,sha256=OjMWPLF9hKl1gUdi9d87uW7W3ITnscphTA1_NLc2PoE,1061
|
|
25
27
|
llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py,sha256=3aFC-p8MSxjhOu2nFtqk0pixj6RqNqcFnbOYngUdZUk,215
|
|
26
28
|
llama_cloud/resources/embedding_model_configs/__init__.py,sha256=cXDtKKq-gj7yjFjdQ5GrGyPs-T5tRV_0JjUMGlAbdUs,1115
|
|
27
29
|
llama_cloud/resources/embedding_model_configs/client.py,sha256=2JDvZJtSger9QJ8luPct-2zvwjaJAR8VcKsTZ1wgYTE,17769
|
|
@@ -53,7 +55,7 @@ llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_s
|
|
|
53
55
|
llama_cloud/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
54
56
|
llama_cloud/resources/organizations/client.py,sha256=CdrdNdB9R-bOsNqZ4Jbm1BzG1RafXMFjuCsrVYf2OrE,56567
|
|
55
57
|
llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
56
|
-
llama_cloud/resources/parsing/client.py,sha256=
|
|
58
|
+
llama_cloud/resources/parsing/client.py,sha256=tUA6jXDoUbbu4qM-VvoUDU6BFSOZTUSfD6lz7wfCqnA,87707
|
|
57
59
|
llama_cloud/resources/pipelines/__init__.py,sha256=zyvVEOF_krvEZkCIj_kZoMKfhDqHo_R32a1mv9CriQc,1193
|
|
58
60
|
llama_cloud/resources/pipelines/client.py,sha256=BcBqzTPu1LUsdimXvuaaKjUu6w5xjbL-ZBfWsO183Vk,132360
|
|
59
61
|
llama_cloud/resources/pipelines/types/__init__.py,sha256=C68NQ5QzA0dFXf9oePFFGmV1vn96jcAp-QAznSgoRYQ,1375
|
|
@@ -71,7 +73,7 @@ llama_cloud/resources/responses/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_
|
|
|
71
73
|
llama_cloud/resources/responses/client.py,sha256=ard4U9yZcD89pJ_hyYqeRDIfQYaX2WGl36OK7re8q3U,5481
|
|
72
74
|
llama_cloud/resources/retrievers/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
73
75
|
llama_cloud/resources/retrievers/client.py,sha256=z2LhmA-cZVFzr9P6loeCZYnJbvSIk0QitFeVFp-IyZk,32126
|
|
74
|
-
llama_cloud/types/__init__.py,sha256=
|
|
76
|
+
llama_cloud/types/__init__.py,sha256=AN53ky-a7dob2L_8xPRUSyS75IGy5MZqT8-7k85ASIs,29974
|
|
75
77
|
llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
|
|
76
78
|
llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
|
|
77
79
|
llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
|
|
@@ -106,7 +108,6 @@ llama_cloud/types/cloud_box_data_source.py,sha256=9bffCaKGvctSsk9OdTpzzP__O1NDpb
|
|
|
106
108
|
llama_cloud/types/cloud_confluence_data_source.py,sha256=ok8BOv51SC4Ia9kX3DC8LuZjnP8hmdy-vqzOrTZek2A,1720
|
|
107
109
|
llama_cloud/types/cloud_document.py,sha256=Rg_H8lcz2TzxEAIdU-m5mGpkM7s0j1Cn4JHkXYddmGs,1255
|
|
108
110
|
llama_cloud/types/cloud_document_create.py,sha256=fQ1gZAtLCpr-a-sPbMez_5fK9JMU3uyp2tNvIzWNG3U,1278
|
|
109
|
-
llama_cloud/types/cloud_google_drive_data_source.py,sha256=Gzr9vtw57Hl2hxa9qoWdIO6XO3DfSLvivJbABVQDJDQ,1219
|
|
110
111
|
llama_cloud/types/cloud_jira_data_source.py,sha256=9R20k8Ne0Bl9X5dgSxpM_IGOFmC70Llz0pJ93rAKRvw,1458
|
|
111
112
|
llama_cloud/types/cloud_milvus_vector_store.py,sha256=CHFTJSYPZKYPUU-jpB1MG8OwRvnPiT07o7cYCvQMZLA,1235
|
|
112
113
|
llama_cloud/types/cloud_mongo_db_atlas_vector_search.py,sha256=CQ9euGBd3a72dvpTapRBhakme-fQbY2OaSoe0GDSHDo,1771
|
|
@@ -132,9 +133,9 @@ llama_cloud/types/data_sink_component.py,sha256=uvuxLY3MPDpv_bkT0y-tHSZVPRSHCkDB
|
|
|
132
133
|
llama_cloud/types/data_sink_create.py,sha256=dAaFPCwZ5oX0Fbf7ij62dzSaYnrhj3EHmnLnYnw2KgI,1360
|
|
133
134
|
llama_cloud/types/data_sink_create_component.py,sha256=8QfNKSTJV_sQ0nJxlpfh0fBkMTSnQD1DTJR8ZMYaesI,755
|
|
134
135
|
llama_cloud/types/data_source.py,sha256=4_lTRToLO4u9LYK66VygCPycrZuyct_aiovlxG5H2sE,1768
|
|
135
|
-
llama_cloud/types/data_source_component.py,sha256=
|
|
136
|
+
llama_cloud/types/data_source_component.py,sha256=QBxAneOFe8crS0z-eFo3gd1siToQ4hYsLdfB4p3ZeVU,974
|
|
136
137
|
llama_cloud/types/data_source_create.py,sha256=s0bAX_GUwiRdrL-PXS9ROrvq3xpmqbqzdMa6thqL2P4,1581
|
|
137
|
-
llama_cloud/types/data_source_create_component.py,sha256
|
|
138
|
+
llama_cloud/types/data_source_create_component.py,sha256=6dlkvut0gyy6JA_F4--xPHYOCHi14N6oooWOnOEugzE,980
|
|
138
139
|
llama_cloud/types/data_source_create_custom_metadata_value.py,sha256=ejSsQNbszYQaUWFh9r9kQpHf88qbhuRv1SI9J_MOSC0,215
|
|
139
140
|
llama_cloud/types/data_source_custom_metadata_value.py,sha256=pTZn5yjZYmuOhsLABFJOKZblZUkRqo1CqLAuP5tKji4,209
|
|
140
141
|
llama_cloud/types/data_source_update_dispatcher_config.py,sha256=Sh6HhXfEV2Z6PYhkYQucs2MxyKVpL3UPV-I4cbf--bA,1242
|
|
@@ -155,11 +156,11 @@ llama_cloud/types/extract_agent_data_schema_value.py,sha256=UaDQ2KjajLDccW7F4NKd
|
|
|
155
156
|
llama_cloud/types/extract_config.py,sha256=pYErVV6Lq4VteqO3Wxu4exCfiGnJ9_aqSuXiLuNI6JE,2194
|
|
156
157
|
llama_cloud/types/extract_config_priority.py,sha256=btl5lxl25Ve6_lTbQzQyjOKle8XoY0r16lk3364c3uw,795
|
|
157
158
|
llama_cloud/types/extract_job.py,sha256=Yx4fDdCdylAji2LPTwqflVpz1o9slpj9tTLS93-1tzU,1431
|
|
158
|
-
llama_cloud/types/extract_job_create.py,sha256=
|
|
159
|
+
llama_cloud/types/extract_job_create.py,sha256=yLtrh46fsK8Q2_hz8Ub3mvGriSn5BI2OjjwpWRy5YsA,1680
|
|
159
160
|
llama_cloud/types/extract_job_create_data_schema_override.py,sha256=vuiJ2lGJjbXEnvFKzVnKyvgwhMXPg1Pb5GZne2DrB60,330
|
|
160
161
|
llama_cloud/types/extract_job_create_data_schema_override_zero_value.py,sha256=HHEYxOSQXXyBYOiUQg_qwfQtXFj-OtThMwbUDBIgZU0,223
|
|
161
162
|
llama_cloud/types/extract_mode.py,sha256=DwTMzDq3HHJop_fxQelHEE_k8UcdDz-W_v_Oj2WWXLk,931
|
|
162
|
-
llama_cloud/types/extract_models.py,sha256=
|
|
163
|
+
llama_cloud/types/extract_models.py,sha256=tx4NquIoJ4irXncqRUjnuE542nPu5jMuzy-ZaMdg3PI,1958
|
|
163
164
|
llama_cloud/types/extract_resultset.py,sha256=Alje0YQJUiA_aKi0hQs7TAnhDmZuQ_yL9b6HCNYBFQg,1627
|
|
164
165
|
llama_cloud/types/extract_resultset_data.py,sha256=v9Ae4SxLsvYPE9crko4N16lBjsxuZpz1yrUOhnaM_VY,427
|
|
165
166
|
llama_cloud/types/extract_resultset_data_item_value.py,sha256=JwqgDIGW0irr8QWaSTIrl24FhGxTUDOXIbxoSdIjuxs,209
|
|
@@ -198,16 +199,17 @@ llama_cloud/types/ingestion_error_response.py,sha256=8u0cyT44dnpkNeUKemTvJMUqi_W
|
|
|
198
199
|
llama_cloud/types/input_message.py,sha256=H7XMpGjkk7f9Fgz4YuuD9OBpNDR68lnP91LxCP1R-Vw,1433
|
|
199
200
|
llama_cloud/types/job_name_mapping.py,sha256=2dQFQlVHoeSlkyEKSEJv0M3PzJf7hMvkuABj3vMY7ys,1617
|
|
200
201
|
llama_cloud/types/job_names.py,sha256=WacongwoJygg_gCyYjPsOVv3cmVtRaX633JNgFxy-d8,3915
|
|
201
|
-
llama_cloud/types/job_record.py,sha256=
|
|
202
|
+
llama_cloud/types/job_record.py,sha256=Z6sF9AruZJo-kTRgNufAWS3WK1yaEqop6kox1GpBYy4,2219
|
|
202
203
|
llama_cloud/types/job_record_parameters.py,sha256=Oqxp5y0owPfjLc_NR7AYE8P3zM2PJo36N9olbyNl7AA,3425
|
|
203
204
|
llama_cloud/types/job_record_with_usage_metrics.py,sha256=iNV2do5TB_0e3PoOz_DJyAaM6Cn9G8KG-dGPGgEs5SY,1198
|
|
204
205
|
llama_cloud/types/l_lama_parse_transform_config.py,sha256=YQRJZvKh1Ee2FUyW_N0nqYJoW599qBgH3JCH9SH6YLo,1249
|
|
205
|
-
llama_cloud/types/legacy_parse_job_config.py,sha256=
|
|
206
|
-
llama_cloud/types/
|
|
206
|
+
llama_cloud/types/legacy_parse_job_config.py,sha256=5l1ZT0n2UTX5t45ePjZZ07RkQNUg5E6n0Xb1gz_CzxE,12522
|
|
207
|
+
llama_cloud/types/license_info_response.py,sha256=fE9vcWO8k92SBqb_wOyBu_16C61s72utA-SifEi9iBc,1192
|
|
208
|
+
llama_cloud/types/llama_extract_settings.py,sha256=Y60XxsxVHUtX-ZjC0tyNzsaDIj_ojxYC1iy2w4vti54,2532
|
|
207
209
|
llama_cloud/types/llama_index_core_base_llms_types_chat_message.py,sha256=tF54vcCwjArHWozzC81bCZfI4gJBmhnx6s592VoQ5UM,1452
|
|
208
210
|
llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py,sha256=-aL8fh-w2Xf4uQs_LHzb3q6LL_onLAcVzCR5yMI4qJw,1571
|
|
209
211
|
llama_cloud/types/llama_index_core_base_llms_types_message_role.py,sha256=i8G2QGRrEUmb1P9BrKW3frfTOQ9RlJvMU0FMCRNpE5c,1602
|
|
210
|
-
llama_cloud/types/llama_parse_parameters.py,sha256
|
|
212
|
+
llama_cloud/types/llama_parse_parameters.py,sha256=SiSqreBFW5hGf7gVuXdITwW1ugxv03L5VpNQqoeI6Pk,6260
|
|
211
213
|
llama_cloud/types/llama_parse_parameters_priority.py,sha256=EFRudtaID_s8rLKlfW8O8O9TDbpZdniIidK-xchhfRI,830
|
|
212
214
|
llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=B_0N3f8Aq59W9FbsH50mGBUiyWTIXQjHFl739uAyaQw,11207
|
|
213
215
|
llama_cloud/types/llm_model_data.py,sha256=6rrycqGwlK3LZ2S-WtgmeomithdLhDCgwBBZQ5KLaso,1300
|
|
@@ -240,7 +242,7 @@ llama_cloud/types/paginated_jobs_history_with_metrics.py,sha256=Bxy6N0x0FARJhgwN
|
|
|
240
242
|
llama_cloud/types/paginated_list_cloud_documents_response.py,sha256=MsjS0SWlT0syELDck4x2sxxR3_NC1e6QTdepgVmK9aY,1341
|
|
241
243
|
llama_cloud/types/paginated_list_pipeline_files_response.py,sha256=2TKR2oHSQRyLMqWz1qQBSIvz-ZJb8U_94367lwOJ2S4,1317
|
|
242
244
|
llama_cloud/types/paginated_report_response.py,sha256=o79QhQi9r0HZZrhvRlA6WGjxtyPuxN0xONhwXSwxtcs,1104
|
|
243
|
-
llama_cloud/types/parse_job_config.py,sha256=
|
|
245
|
+
llama_cloud/types/parse_job_config.py,sha256=MuP202tVYpLxtHvobcCzMog348ACahqGdD4z1PHjd6o,6723
|
|
244
246
|
llama_cloud/types/parse_job_config_priority.py,sha256=__-gVv1GzktVCYZVyl6zeDt0pAZwYl-mxM0xkIHPEro,800
|
|
245
247
|
llama_cloud/types/parse_plan_level.py,sha256=GBkDS19qfHseBa17EXfuTPNT4GNv5alyPrWEvWji3GY,528
|
|
246
248
|
llama_cloud/types/parser_languages.py,sha256=Ps3IlaSt6tyxEI657N3-vZL96r2puk8wsf31cWnO-SI,10840
|
|
@@ -262,7 +264,7 @@ llama_cloud/types/pipeline_create.py,sha256=PKchM5cxkidXVFv2qON0uVh5lv8aqsy5OrZv
|
|
|
262
264
|
llama_cloud/types/pipeline_create_embedding_config.py,sha256=PQqmVBFUyZXYKKBmVQF2zPsGp1L6rje6g3RtXEcdfc8,2811
|
|
263
265
|
llama_cloud/types/pipeline_create_transform_config.py,sha256=HP6tzLsw_pomK1Ye2PYCS_XDZK_TMgg22mz17_zYKFg,303
|
|
264
266
|
llama_cloud/types/pipeline_data_source.py,sha256=g8coq6ohp09TtqzvB3_A8Nzery3J5knIfxGWzUtozmg,2381
|
|
265
|
-
llama_cloud/types/pipeline_data_source_component.py,sha256=
|
|
267
|
+
llama_cloud/types/pipeline_data_source_component.py,sha256=pcAIb6xuRJajDVBF_a4_2USPLtZ8ve-WQvSdKKQu50Q,982
|
|
266
268
|
llama_cloud/types/pipeline_data_source_create.py,sha256=wMsymqB-YGyf3jdQr-N5ODVG6v0w68EMxGBNdQXeJe0,1178
|
|
267
269
|
llama_cloud/types/pipeline_data_source_custom_metadata_value.py,sha256=8n3r60sxMx4_udW0yzJZxzyWeK6L3cc2-jLGZFW4EDs,217
|
|
268
270
|
llama_cloud/types/pipeline_data_source_status.py,sha256=BD4xoftwp9lWC8EjJTnf3boIG_AyzjLPuP4qJxGhmcc,1039
|
|
@@ -349,7 +351,9 @@ llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPX
|
|
|
349
351
|
llama_cloud/types/vertex_ai_embedding_config.py,sha256=DvQk2xMJFmo54MEXTzoM4KSADyhGm_ygmFyx6wIcQdw,1159
|
|
350
352
|
llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2BaIMltDqGnIowU,1217
|
|
351
353
|
llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
|
|
352
|
-
llama_cloud
|
|
353
|
-
llama_cloud
|
|
354
|
-
llama_cloud-0.1.
|
|
355
|
-
llama_cloud-0.1.
|
|
354
|
+
llama_cloud/types/webhook_configuration.py,sha256=_Xm15whrWoKNBuCoO5y_NunA-ByhCAYK87LnC4W-Pzg,1350
|
|
355
|
+
llama_cloud/types/webhook_configuration_webhook_events_item.py,sha256=LTfOwphnoYUQYwsHGTlCxoVU_PseIRAbmQJRBdyXnbg,1519
|
|
356
|
+
llama_cloud-0.1.30.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
|
|
357
|
+
llama_cloud-0.1.30.dist-info/METADATA,sha256=uHG2_pSkr7dmrXGGSTKO11eIshFPn2ke4kEW2aq0Kgc,1194
|
|
358
|
+
llama_cloud-0.1.30.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
359
|
+
llama_cloud-0.1.30.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|