llama-cloud 0.0.6__py3-none-any.whl → 0.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +18 -4
- llama_cloud/client.py +3 -0
- llama_cloud/resources/__init__.py +4 -1
- llama_cloud/resources/component_definitions/client.py +18 -18
- llama_cloud/resources/data_sinks/client.py +2 -2
- llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +2 -0
- llama_cloud/resources/data_sources/client.py +2 -2
- llama_cloud/resources/data_sources/types/data_source_update_component_one.py +4 -4
- llama_cloud/resources/evals/client.py +12 -12
- llama_cloud/resources/extraction/__init__.py +5 -0
- llama_cloud/resources/extraction/client.py +648 -0
- llama_cloud/resources/extraction/types/__init__.py +5 -0
- llama_cloud/resources/extraction/types/extraction_schema_update_data_schema_value.py +7 -0
- llama_cloud/resources/files/client.py +8 -8
- llama_cloud/resources/parsing/client.py +16 -0
- llama_cloud/resources/pipelines/client.py +156 -12
- llama_cloud/resources/projects/client.py +24 -24
- llama_cloud/types/__init__.py +14 -4
- llama_cloud/types/azure_open_ai_embedding.py +3 -0
- llama_cloud/types/{cloud_gcs_data_source.py → cloud_azure_ai_search_vector_store.py} +9 -7
- llama_cloud/types/{cloud_google_drive_data_source.py → cloud_notion_page_data_source.py} +4 -5
- llama_cloud/types/cloud_slack_data_source.py +42 -0
- llama_cloud/types/configurable_data_sink_names.py +4 -0
- llama_cloud/types/configurable_data_source_names.py +8 -8
- llama_cloud/types/data_sink_component_one.py +2 -0
- llama_cloud/types/data_sink_create_component_one.py +2 -0
- llama_cloud/types/data_source_component_one.py +4 -4
- llama_cloud/types/data_source_create_component_one.py +4 -4
- llama_cloud/types/eval_dataset_job_record.py +1 -1
- llama_cloud/types/extraction_result.py +42 -0
- llama_cloud/types/extraction_result_data_value.py +5 -0
- llama_cloud/types/extraction_schema.py +44 -0
- llama_cloud/types/extraction_schema_data_schema_value.py +7 -0
- llama_cloud/types/llama_parse_parameters.py +2 -0
- llama_cloud/types/llama_parse_supported_file_extensions.py +124 -0
- llama_cloud/types/pipeline.py +0 -4
- llama_cloud/types/pipeline_data_source_component_one.py +4 -4
- llama_cloud/types/text_node.py +1 -0
- {llama_cloud-0.0.6.dist-info → llama_cloud-0.0.8.dist-info}/METADATA +1 -2
- {llama_cloud-0.0.6.dist-info → llama_cloud-0.0.8.dist-info}/RECORD +42 -33
- {llama_cloud-0.0.6.dist-info → llama_cloud-0.0.8.dist-info}/WHEEL +1 -1
- {llama_cloud-0.0.6.dist-info → llama_cloud-0.0.8.dist-info}/LICENSE +0 -0
|
@@ -31,7 +31,7 @@ class FilesClient:
|
|
|
31
31
|
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
32
32
|
self._client_wrapper = client_wrapper
|
|
33
33
|
|
|
34
|
-
def
|
|
34
|
+
def get_file(self, id: str, *, project_id: typing.Optional[str] = None) -> File:
|
|
35
35
|
"""
|
|
36
36
|
Read File metadata objects.
|
|
37
37
|
|
|
@@ -45,7 +45,7 @@ class FilesClient:
|
|
|
45
45
|
client = LlamaCloud(
|
|
46
46
|
token="YOUR_TOKEN",
|
|
47
47
|
)
|
|
48
|
-
client.files.
|
|
48
|
+
client.files.get_file(
|
|
49
49
|
id="string",
|
|
50
50
|
)
|
|
51
51
|
"""
|
|
@@ -101,7 +101,7 @@ class FilesClient:
|
|
|
101
101
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
102
102
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
103
103
|
|
|
104
|
-
def
|
|
104
|
+
def list_files(self, *, project_id: typing.Optional[str] = None) -> typing.List[File]:
|
|
105
105
|
"""
|
|
106
106
|
Read File metadata objects.
|
|
107
107
|
|
|
@@ -113,7 +113,7 @@ class FilesClient:
|
|
|
113
113
|
client = LlamaCloud(
|
|
114
114
|
token="YOUR_TOKEN",
|
|
115
115
|
)
|
|
116
|
-
client.files.
|
|
116
|
+
client.files.list_files()
|
|
117
117
|
"""
|
|
118
118
|
_response = self._client_wrapper.httpx_client.request(
|
|
119
119
|
"GET",
|
|
@@ -293,7 +293,7 @@ class AsyncFilesClient:
|
|
|
293
293
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
294
294
|
self._client_wrapper = client_wrapper
|
|
295
295
|
|
|
296
|
-
async def
|
|
296
|
+
async def get_file(self, id: str, *, project_id: typing.Optional[str] = None) -> File:
|
|
297
297
|
"""
|
|
298
298
|
Read File metadata objects.
|
|
299
299
|
|
|
@@ -307,7 +307,7 @@ class AsyncFilesClient:
|
|
|
307
307
|
client = AsyncLlamaCloud(
|
|
308
308
|
token="YOUR_TOKEN",
|
|
309
309
|
)
|
|
310
|
-
await client.files.
|
|
310
|
+
await client.files.get_file(
|
|
311
311
|
id="string",
|
|
312
312
|
)
|
|
313
313
|
"""
|
|
@@ -363,7 +363,7 @@ class AsyncFilesClient:
|
|
|
363
363
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
364
364
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
365
365
|
|
|
366
|
-
async def
|
|
366
|
+
async def list_files(self, *, project_id: typing.Optional[str] = None) -> typing.List[File]:
|
|
367
367
|
"""
|
|
368
368
|
Read File metadata objects.
|
|
369
369
|
|
|
@@ -375,7 +375,7 @@ class AsyncFilesClient:
|
|
|
375
375
|
client = AsyncLlamaCloud(
|
|
376
376
|
token="YOUR_TOKEN",
|
|
377
377
|
)
|
|
378
|
-
await client.files.
|
|
378
|
+
await client.files.list_files()
|
|
379
379
|
"""
|
|
380
380
|
_response = await self._client_wrapper.httpx_client.request(
|
|
381
381
|
"GET",
|
|
@@ -111,6 +111,8 @@ class ParsingClient:
|
|
|
111
111
|
gpt_4_o_api_key: str,
|
|
112
112
|
do_not_unroll_columns: bool,
|
|
113
113
|
page_separator: str,
|
|
114
|
+
bounding_box: str,
|
|
115
|
+
target_pages: str,
|
|
114
116
|
file: typing.IO,
|
|
115
117
|
) -> ParsingJob:
|
|
116
118
|
"""
|
|
@@ -137,6 +139,10 @@ class ParsingClient:
|
|
|
137
139
|
|
|
138
140
|
- page_separator: str.
|
|
139
141
|
|
|
142
|
+
- bounding_box: str.
|
|
143
|
+
|
|
144
|
+
- target_pages: str.
|
|
145
|
+
|
|
140
146
|
- file: typing.IO.
|
|
141
147
|
"""
|
|
142
148
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -154,6 +160,8 @@ class ParsingClient:
|
|
|
154
160
|
"gpt4o_api_key": gpt_4_o_api_key,
|
|
155
161
|
"do_not_unroll_columns": do_not_unroll_columns,
|
|
156
162
|
"page_separator": page_separator,
|
|
163
|
+
"bounding_box": bounding_box,
|
|
164
|
+
"target_pages": target_pages,
|
|
157
165
|
}
|
|
158
166
|
),
|
|
159
167
|
files={"file": file},
|
|
@@ -576,6 +584,8 @@ class AsyncParsingClient:
|
|
|
576
584
|
gpt_4_o_api_key: str,
|
|
577
585
|
do_not_unroll_columns: bool,
|
|
578
586
|
page_separator: str,
|
|
587
|
+
bounding_box: str,
|
|
588
|
+
target_pages: str,
|
|
579
589
|
file: typing.IO,
|
|
580
590
|
) -> ParsingJob:
|
|
581
591
|
"""
|
|
@@ -602,6 +612,10 @@ class AsyncParsingClient:
|
|
|
602
612
|
|
|
603
613
|
- page_separator: str.
|
|
604
614
|
|
|
615
|
+
- bounding_box: str.
|
|
616
|
+
|
|
617
|
+
- target_pages: str.
|
|
618
|
+
|
|
605
619
|
- file: typing.IO.
|
|
606
620
|
"""
|
|
607
621
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -619,6 +633,8 @@ class AsyncParsingClient:
|
|
|
619
633
|
"gpt4o_api_key": gpt_4_o_api_key,
|
|
620
634
|
"do_not_unroll_columns": do_not_unroll_columns,
|
|
621
635
|
"page_separator": page_separator,
|
|
636
|
+
"bounding_box": bounding_box,
|
|
637
|
+
"target_pages": target_pages,
|
|
622
638
|
}
|
|
623
639
|
),
|
|
624
640
|
files={"file": file},
|
|
@@ -644,25 +644,38 @@ class PipelinesClient:
|
|
|
644
644
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
645
645
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
646
646
|
|
|
647
|
-
def
|
|
647
|
+
def list_pipeline_files(
|
|
648
|
+
self,
|
|
649
|
+
pipeline_id: str,
|
|
650
|
+
*,
|
|
651
|
+
data_source_id: typing.Optional[str] = None,
|
|
652
|
+
only_manually_uploaded: typing.Optional[bool] = None,
|
|
653
|
+
) -> typing.List[PipelineFile]:
|
|
648
654
|
"""
|
|
649
655
|
Get files for a pipeline.
|
|
650
656
|
|
|
651
657
|
Parameters:
|
|
652
658
|
- pipeline_id: str.
|
|
659
|
+
|
|
660
|
+
- data_source_id: typing.Optional[str].
|
|
661
|
+
|
|
662
|
+
- only_manually_uploaded: typing.Optional[bool].
|
|
653
663
|
---
|
|
654
664
|
from llama_cloud.client import LlamaCloud
|
|
655
665
|
|
|
656
666
|
client = LlamaCloud(
|
|
657
667
|
token="YOUR_TOKEN",
|
|
658
668
|
)
|
|
659
|
-
client.pipelines.
|
|
669
|
+
client.pipelines.list_pipeline_files(
|
|
660
670
|
pipeline_id="string",
|
|
661
671
|
)
|
|
662
672
|
"""
|
|
663
673
|
_response = self._client_wrapper.httpx_client.request(
|
|
664
674
|
"GET",
|
|
665
675
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files"),
|
|
676
|
+
params=remove_none_from_dict(
|
|
677
|
+
{"data_source_id": data_source_id, "only_manually_uploaded": only_manually_uploaded}
|
|
678
|
+
),
|
|
666
679
|
headers=self._client_wrapper.get_headers(),
|
|
667
680
|
timeout=60,
|
|
668
681
|
)
|
|
@@ -837,7 +850,66 @@ class PipelinesClient:
|
|
|
837
850
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
838
851
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
839
852
|
|
|
840
|
-
def
|
|
853
|
+
def import_pipeline_metadata(self, pipeline_id: str, *, upload_file: typing.IO) -> typing.Dict[str, str]:
|
|
854
|
+
"""
|
|
855
|
+
Import metadata for a pipeline.
|
|
856
|
+
|
|
857
|
+
Parameters:
|
|
858
|
+
- pipeline_id: str.
|
|
859
|
+
|
|
860
|
+
- upload_file: typing.IO.
|
|
861
|
+
"""
|
|
862
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
863
|
+
"PUT",
|
|
864
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/metadata"),
|
|
865
|
+
data=jsonable_encoder({}),
|
|
866
|
+
files={"upload_file": upload_file},
|
|
867
|
+
headers=self._client_wrapper.get_headers(),
|
|
868
|
+
timeout=60,
|
|
869
|
+
)
|
|
870
|
+
if 200 <= _response.status_code < 300:
|
|
871
|
+
return pydantic.parse_obj_as(typing.Dict[str, str], _response.json()) # type: ignore
|
|
872
|
+
if _response.status_code == 422:
|
|
873
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
874
|
+
try:
|
|
875
|
+
_response_json = _response.json()
|
|
876
|
+
except JSONDecodeError:
|
|
877
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
878
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
879
|
+
|
|
880
|
+
def delete_pipeline_files_metadata(self, pipeline_id: str) -> None:
|
|
881
|
+
"""
|
|
882
|
+
Delete metadata for all files in a pipeline.
|
|
883
|
+
|
|
884
|
+
Parameters:
|
|
885
|
+
- pipeline_id: str.
|
|
886
|
+
---
|
|
887
|
+
from llama_cloud.client import LlamaCloud
|
|
888
|
+
|
|
889
|
+
client = LlamaCloud(
|
|
890
|
+
token="YOUR_TOKEN",
|
|
891
|
+
)
|
|
892
|
+
client.pipelines.delete_pipeline_files_metadata(
|
|
893
|
+
pipeline_id="string",
|
|
894
|
+
)
|
|
895
|
+
"""
|
|
896
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
897
|
+
"DELETE",
|
|
898
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/metadata"),
|
|
899
|
+
headers=self._client_wrapper.get_headers(),
|
|
900
|
+
timeout=60,
|
|
901
|
+
)
|
|
902
|
+
if 200 <= _response.status_code < 300:
|
|
903
|
+
return
|
|
904
|
+
if _response.status_code == 422:
|
|
905
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
906
|
+
try:
|
|
907
|
+
_response_json = _response.json()
|
|
908
|
+
except JSONDecodeError:
|
|
909
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
910
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
911
|
+
|
|
912
|
+
def list_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
|
|
841
913
|
"""
|
|
842
914
|
Get data sources for a pipeline.
|
|
843
915
|
|
|
@@ -849,7 +921,7 @@ class PipelinesClient:
|
|
|
849
921
|
client = LlamaCloud(
|
|
850
922
|
token="YOUR_TOKEN",
|
|
851
923
|
)
|
|
852
|
-
client.pipelines.
|
|
924
|
+
client.pipelines.list_pipeline_data_sources(
|
|
853
925
|
pipeline_id="string",
|
|
854
926
|
)
|
|
855
927
|
"""
|
|
@@ -1064,7 +1136,7 @@ class PipelinesClient:
|
|
|
1064
1136
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1065
1137
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1066
1138
|
|
|
1067
|
-
def
|
|
1139
|
+
def list_pipeline_jobs(self, pipeline_id: str) -> typing.List[PipelineDeployment]:
|
|
1068
1140
|
"""
|
|
1069
1141
|
Get jobs for a pipeline.
|
|
1070
1142
|
|
|
@@ -1076,7 +1148,7 @@ class PipelinesClient:
|
|
|
1076
1148
|
client = LlamaCloud(
|
|
1077
1149
|
token="YOUR_TOKEN",
|
|
1078
1150
|
)
|
|
1079
|
-
client.pipelines.
|
|
1151
|
+
client.pipelines.list_pipeline_jobs(
|
|
1080
1152
|
pipeline_id="string",
|
|
1081
1153
|
)
|
|
1082
1154
|
"""
|
|
@@ -1968,25 +2040,38 @@ class AsyncPipelinesClient:
|
|
|
1968
2040
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1969
2041
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1970
2042
|
|
|
1971
|
-
async def
|
|
2043
|
+
async def list_pipeline_files(
|
|
2044
|
+
self,
|
|
2045
|
+
pipeline_id: str,
|
|
2046
|
+
*,
|
|
2047
|
+
data_source_id: typing.Optional[str] = None,
|
|
2048
|
+
only_manually_uploaded: typing.Optional[bool] = None,
|
|
2049
|
+
) -> typing.List[PipelineFile]:
|
|
1972
2050
|
"""
|
|
1973
2051
|
Get files for a pipeline.
|
|
1974
2052
|
|
|
1975
2053
|
Parameters:
|
|
1976
2054
|
- pipeline_id: str.
|
|
2055
|
+
|
|
2056
|
+
- data_source_id: typing.Optional[str].
|
|
2057
|
+
|
|
2058
|
+
- only_manually_uploaded: typing.Optional[bool].
|
|
1977
2059
|
---
|
|
1978
2060
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1979
2061
|
|
|
1980
2062
|
client = AsyncLlamaCloud(
|
|
1981
2063
|
token="YOUR_TOKEN",
|
|
1982
2064
|
)
|
|
1983
|
-
await client.pipelines.
|
|
2065
|
+
await client.pipelines.list_pipeline_files(
|
|
1984
2066
|
pipeline_id="string",
|
|
1985
2067
|
)
|
|
1986
2068
|
"""
|
|
1987
2069
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1988
2070
|
"GET",
|
|
1989
2071
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files"),
|
|
2072
|
+
params=remove_none_from_dict(
|
|
2073
|
+
{"data_source_id": data_source_id, "only_manually_uploaded": only_manually_uploaded}
|
|
2074
|
+
),
|
|
1990
2075
|
headers=self._client_wrapper.get_headers(),
|
|
1991
2076
|
timeout=60,
|
|
1992
2077
|
)
|
|
@@ -2161,7 +2246,66 @@ class AsyncPipelinesClient:
|
|
|
2161
2246
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2162
2247
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2163
2248
|
|
|
2164
|
-
async def
|
|
2249
|
+
async def import_pipeline_metadata(self, pipeline_id: str, *, upload_file: typing.IO) -> typing.Dict[str, str]:
|
|
2250
|
+
"""
|
|
2251
|
+
Import metadata for a pipeline.
|
|
2252
|
+
|
|
2253
|
+
Parameters:
|
|
2254
|
+
- pipeline_id: str.
|
|
2255
|
+
|
|
2256
|
+
- upload_file: typing.IO.
|
|
2257
|
+
"""
|
|
2258
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
2259
|
+
"PUT",
|
|
2260
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/metadata"),
|
|
2261
|
+
data=jsonable_encoder({}),
|
|
2262
|
+
files={"upload_file": upload_file},
|
|
2263
|
+
headers=self._client_wrapper.get_headers(),
|
|
2264
|
+
timeout=60,
|
|
2265
|
+
)
|
|
2266
|
+
if 200 <= _response.status_code < 300:
|
|
2267
|
+
return pydantic.parse_obj_as(typing.Dict[str, str], _response.json()) # type: ignore
|
|
2268
|
+
if _response.status_code == 422:
|
|
2269
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2270
|
+
try:
|
|
2271
|
+
_response_json = _response.json()
|
|
2272
|
+
except JSONDecodeError:
|
|
2273
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2274
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2275
|
+
|
|
2276
|
+
async def delete_pipeline_files_metadata(self, pipeline_id: str) -> None:
|
|
2277
|
+
"""
|
|
2278
|
+
Delete metadata for all files in a pipeline.
|
|
2279
|
+
|
|
2280
|
+
Parameters:
|
|
2281
|
+
- pipeline_id: str.
|
|
2282
|
+
---
|
|
2283
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
2284
|
+
|
|
2285
|
+
client = AsyncLlamaCloud(
|
|
2286
|
+
token="YOUR_TOKEN",
|
|
2287
|
+
)
|
|
2288
|
+
await client.pipelines.delete_pipeline_files_metadata(
|
|
2289
|
+
pipeline_id="string",
|
|
2290
|
+
)
|
|
2291
|
+
"""
|
|
2292
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
2293
|
+
"DELETE",
|
|
2294
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/metadata"),
|
|
2295
|
+
headers=self._client_wrapper.get_headers(),
|
|
2296
|
+
timeout=60,
|
|
2297
|
+
)
|
|
2298
|
+
if 200 <= _response.status_code < 300:
|
|
2299
|
+
return
|
|
2300
|
+
if _response.status_code == 422:
|
|
2301
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2302
|
+
try:
|
|
2303
|
+
_response_json = _response.json()
|
|
2304
|
+
except JSONDecodeError:
|
|
2305
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2306
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2307
|
+
|
|
2308
|
+
async def list_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
|
|
2165
2309
|
"""
|
|
2166
2310
|
Get data sources for a pipeline.
|
|
2167
2311
|
|
|
@@ -2173,7 +2317,7 @@ class AsyncPipelinesClient:
|
|
|
2173
2317
|
client = AsyncLlamaCloud(
|
|
2174
2318
|
token="YOUR_TOKEN",
|
|
2175
2319
|
)
|
|
2176
|
-
await client.pipelines.
|
|
2320
|
+
await client.pipelines.list_pipeline_data_sources(
|
|
2177
2321
|
pipeline_id="string",
|
|
2178
2322
|
)
|
|
2179
2323
|
"""
|
|
@@ -2388,7 +2532,7 @@ class AsyncPipelinesClient:
|
|
|
2388
2532
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2389
2533
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2390
2534
|
|
|
2391
|
-
async def
|
|
2535
|
+
async def list_pipeline_jobs(self, pipeline_id: str) -> typing.List[PipelineDeployment]:
|
|
2392
2536
|
"""
|
|
2393
2537
|
Get jobs for a pipeline.
|
|
2394
2538
|
|
|
@@ -2400,7 +2544,7 @@ class AsyncPipelinesClient:
|
|
|
2400
2544
|
client = AsyncLlamaCloud(
|
|
2401
2545
|
token="YOUR_TOKEN",
|
|
2402
2546
|
)
|
|
2403
|
-
await client.pipelines.
|
|
2547
|
+
await client.pipelines.list_pipeline_jobs(
|
|
2404
2548
|
pipeline_id="string",
|
|
2405
2549
|
)
|
|
2406
2550
|
"""
|
|
@@ -238,9 +238,9 @@ class ProjectsClient:
|
|
|
238
238
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
239
239
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
240
240
|
|
|
241
|
-
def
|
|
241
|
+
def list_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
|
|
242
242
|
"""
|
|
243
|
-
|
|
243
|
+
List eval datasets for a project.
|
|
244
244
|
|
|
245
245
|
Parameters:
|
|
246
246
|
- project_id: str.
|
|
@@ -250,7 +250,7 @@ class ProjectsClient:
|
|
|
250
250
|
client = LlamaCloud(
|
|
251
251
|
token="YOUR_TOKEN",
|
|
252
252
|
)
|
|
253
|
-
client.projects.
|
|
253
|
+
client.projects.list_datasets_for_project(
|
|
254
254
|
project_id="string",
|
|
255
255
|
)
|
|
256
256
|
"""
|
|
@@ -353,9 +353,9 @@ class ProjectsClient:
|
|
|
353
353
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
354
354
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
355
355
|
|
|
356
|
-
def
|
|
356
|
+
def list_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
|
|
357
357
|
"""
|
|
358
|
-
|
|
358
|
+
List local eval results for a project.
|
|
359
359
|
|
|
360
360
|
Parameters:
|
|
361
361
|
- project_id: str.
|
|
@@ -365,7 +365,7 @@ class ProjectsClient:
|
|
|
365
365
|
client = LlamaCloud(
|
|
366
366
|
token="YOUR_TOKEN",
|
|
367
367
|
)
|
|
368
|
-
client.projects.
|
|
368
|
+
client.projects.list_local_evals_for_project(
|
|
369
369
|
project_id="string",
|
|
370
370
|
)
|
|
371
371
|
"""
|
|
@@ -385,9 +385,9 @@ class ProjectsClient:
|
|
|
385
385
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
386
386
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
387
387
|
|
|
388
|
-
def
|
|
388
|
+
def list_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
|
|
389
389
|
"""
|
|
390
|
-
|
|
390
|
+
List local eval sets for a project.
|
|
391
391
|
|
|
392
392
|
Parameters:
|
|
393
393
|
- project_id: str.
|
|
@@ -397,7 +397,7 @@ class ProjectsClient:
|
|
|
397
397
|
client = LlamaCloud(
|
|
398
398
|
token="YOUR_TOKEN",
|
|
399
399
|
)
|
|
400
|
-
client.projects.
|
|
400
|
+
client.projects.list_local_eval_sets_for_project(
|
|
401
401
|
project_id="string",
|
|
402
402
|
)
|
|
403
403
|
"""
|
|
@@ -457,9 +457,9 @@ class ProjectsClient:
|
|
|
457
457
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
458
458
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
459
459
|
|
|
460
|
-
def
|
|
460
|
+
def list_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
|
|
461
461
|
"""
|
|
462
|
-
|
|
462
|
+
List PromptMixin prompt sets for a project.
|
|
463
463
|
|
|
464
464
|
Parameters:
|
|
465
465
|
- project_id: str.
|
|
@@ -469,7 +469,7 @@ class ProjectsClient:
|
|
|
469
469
|
client = LlamaCloud(
|
|
470
470
|
token="YOUR_TOKEN",
|
|
471
471
|
)
|
|
472
|
-
client.projects.
|
|
472
|
+
client.projects.list_promptmixin_prompts(
|
|
473
473
|
project_id="string",
|
|
474
474
|
)
|
|
475
475
|
"""
|
|
@@ -824,9 +824,9 @@ class AsyncProjectsClient:
|
|
|
824
824
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
825
825
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
826
826
|
|
|
827
|
-
async def
|
|
827
|
+
async def list_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
|
|
828
828
|
"""
|
|
829
|
-
|
|
829
|
+
List eval datasets for a project.
|
|
830
830
|
|
|
831
831
|
Parameters:
|
|
832
832
|
- project_id: str.
|
|
@@ -836,7 +836,7 @@ class AsyncProjectsClient:
|
|
|
836
836
|
client = AsyncLlamaCloud(
|
|
837
837
|
token="YOUR_TOKEN",
|
|
838
838
|
)
|
|
839
|
-
await client.projects.
|
|
839
|
+
await client.projects.list_datasets_for_project(
|
|
840
840
|
project_id="string",
|
|
841
841
|
)
|
|
842
842
|
"""
|
|
@@ -939,9 +939,9 @@ class AsyncProjectsClient:
|
|
|
939
939
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
940
940
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
941
941
|
|
|
942
|
-
async def
|
|
942
|
+
async def list_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
|
|
943
943
|
"""
|
|
944
|
-
|
|
944
|
+
List local eval results for a project.
|
|
945
945
|
|
|
946
946
|
Parameters:
|
|
947
947
|
- project_id: str.
|
|
@@ -951,7 +951,7 @@ class AsyncProjectsClient:
|
|
|
951
951
|
client = AsyncLlamaCloud(
|
|
952
952
|
token="YOUR_TOKEN",
|
|
953
953
|
)
|
|
954
|
-
await client.projects.
|
|
954
|
+
await client.projects.list_local_evals_for_project(
|
|
955
955
|
project_id="string",
|
|
956
956
|
)
|
|
957
957
|
"""
|
|
@@ -971,9 +971,9 @@ class AsyncProjectsClient:
|
|
|
971
971
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
972
972
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
973
973
|
|
|
974
|
-
async def
|
|
974
|
+
async def list_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
|
|
975
975
|
"""
|
|
976
|
-
|
|
976
|
+
List local eval sets for a project.
|
|
977
977
|
|
|
978
978
|
Parameters:
|
|
979
979
|
- project_id: str.
|
|
@@ -983,7 +983,7 @@ class AsyncProjectsClient:
|
|
|
983
983
|
client = AsyncLlamaCloud(
|
|
984
984
|
token="YOUR_TOKEN",
|
|
985
985
|
)
|
|
986
|
-
await client.projects.
|
|
986
|
+
await client.projects.list_local_eval_sets_for_project(
|
|
987
987
|
project_id="string",
|
|
988
988
|
)
|
|
989
989
|
"""
|
|
@@ -1043,9 +1043,9 @@ class AsyncProjectsClient:
|
|
|
1043
1043
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1044
1044
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1045
1045
|
|
|
1046
|
-
async def
|
|
1046
|
+
async def list_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
|
|
1047
1047
|
"""
|
|
1048
|
-
|
|
1048
|
+
List PromptMixin prompt sets for a project.
|
|
1049
1049
|
|
|
1050
1050
|
Parameters:
|
|
1051
1051
|
- project_id: str.
|
|
@@ -1055,7 +1055,7 @@ class AsyncProjectsClient:
|
|
|
1055
1055
|
client = AsyncLlamaCloud(
|
|
1056
1056
|
token="YOUR_TOKEN",
|
|
1057
1057
|
)
|
|
1058
|
-
await client.projects.
|
|
1058
|
+
await client.projects.list_promptmixin_prompts(
|
|
1059
1059
|
project_id="string",
|
|
1060
1060
|
)
|
|
1061
1061
|
"""
|
llama_cloud/types/__init__.py
CHANGED
|
@@ -6,17 +6,18 @@ from .base_prompt_template import BasePromptTemplate
|
|
|
6
6
|
from .bedrock_embedding import BedrockEmbedding
|
|
7
7
|
from .chat_message import ChatMessage
|
|
8
8
|
from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
9
|
+
from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
|
|
9
10
|
from .cloud_chroma_vector_store import CloudChromaVectorStore
|
|
10
11
|
from .cloud_document import CloudDocument
|
|
11
12
|
from .cloud_document_create import CloudDocumentCreate
|
|
12
|
-
from .
|
|
13
|
-
from .cloud_google_drive_data_source import CloudGoogleDriveDataSource
|
|
13
|
+
from .cloud_notion_page_data_source import CloudNotionPageDataSource
|
|
14
14
|
from .cloud_one_drive_data_source import CloudOneDriveDataSource
|
|
15
15
|
from .cloud_pinecone_vector_store import CloudPineconeVectorStore
|
|
16
16
|
from .cloud_postgres_vector_store import CloudPostgresVectorStore
|
|
17
17
|
from .cloud_qdrant_vector_store import CloudQdrantVectorStore
|
|
18
18
|
from .cloud_s_3_data_source import CloudS3DataSource
|
|
19
19
|
from .cloud_sharepoint_data_source import CloudSharepointDataSource
|
|
20
|
+
from .cloud_slack_data_source import CloudSlackDataSource
|
|
20
21
|
from .cloud_weaviate_vector_store import CloudWeaviateVectorStore
|
|
21
22
|
from .code_splitter import CodeSplitter
|
|
22
23
|
from .cohere_embedding import CohereEmbedding
|
|
@@ -52,6 +53,10 @@ from .eval_llm_model_data import EvalLlmModelData
|
|
|
52
53
|
from .eval_question import EvalQuestion
|
|
53
54
|
from .eval_question_create import EvalQuestionCreate
|
|
54
55
|
from .eval_question_result import EvalQuestionResult
|
|
56
|
+
from .extraction_result import ExtractionResult
|
|
57
|
+
from .extraction_result_data_value import ExtractionResultDataValue
|
|
58
|
+
from .extraction_schema import ExtractionSchema
|
|
59
|
+
from .extraction_schema_data_schema_value import ExtractionSchemaDataSchemaValue
|
|
55
60
|
from .file import File
|
|
56
61
|
from .file_resource_info_value import FileResourceInfoValue
|
|
57
62
|
from .filter_condition import FilterCondition
|
|
@@ -132,17 +137,18 @@ __all__ = [
|
|
|
132
137
|
"BedrockEmbedding",
|
|
133
138
|
"ChatMessage",
|
|
134
139
|
"CloudAzStorageBlobDataSource",
|
|
140
|
+
"CloudAzureAiSearchVectorStore",
|
|
135
141
|
"CloudChromaVectorStore",
|
|
136
142
|
"CloudDocument",
|
|
137
143
|
"CloudDocumentCreate",
|
|
138
|
-
"
|
|
139
|
-
"CloudGoogleDriveDataSource",
|
|
144
|
+
"CloudNotionPageDataSource",
|
|
140
145
|
"CloudOneDriveDataSource",
|
|
141
146
|
"CloudPineconeVectorStore",
|
|
142
147
|
"CloudPostgresVectorStore",
|
|
143
148
|
"CloudQdrantVectorStore",
|
|
144
149
|
"CloudS3DataSource",
|
|
145
150
|
"CloudSharepointDataSource",
|
|
151
|
+
"CloudSlackDataSource",
|
|
146
152
|
"CloudWeaviateVectorStore",
|
|
147
153
|
"CodeSplitter",
|
|
148
154
|
"CohereEmbedding",
|
|
@@ -178,6 +184,10 @@ __all__ = [
|
|
|
178
184
|
"EvalQuestion",
|
|
179
185
|
"EvalQuestionCreate",
|
|
180
186
|
"EvalQuestionResult",
|
|
187
|
+
"ExtractionResult",
|
|
188
|
+
"ExtractionResultDataValue",
|
|
189
|
+
"ExtractionSchema",
|
|
190
|
+
"ExtractionSchemaDataSchemaValue",
|
|
181
191
|
"File",
|
|
182
192
|
"FileResourceInfoValue",
|
|
183
193
|
"FilterCondition",
|
|
@@ -62,6 +62,9 @@ class AzureOpenAiEmbedding(pydantic.BaseModel):
|
|
|
62
62
|
)
|
|
63
63
|
azure_endpoint: typing.Optional[str] = pydantic.Field(description="The Azure endpoint to use.")
|
|
64
64
|
azure_deployment: typing.Optional[str] = pydantic.Field(description="The Azure deployment to use.")
|
|
65
|
+
use_azure_ad: bool = pydantic.Field(
|
|
66
|
+
description="Indicates if Microsoft Entra ID (former Azure AD) is used for token authentication"
|
|
67
|
+
)
|
|
65
68
|
class_name: typing.Optional[str]
|
|
66
69
|
|
|
67
70
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -14,16 +14,18 @@ except ImportError:
|
|
|
14
14
|
import pydantic # type: ignore
|
|
15
15
|
|
|
16
16
|
|
|
17
|
-
class
|
|
17
|
+
class CloudAzureAiSearchVectorStore(pydantic.BaseModel):
|
|
18
18
|
"""
|
|
19
|
-
|
|
19
|
+
Cloud Azure AI Search Vector Store.
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
22
|
+
supports_nested_metadata_filters: typing.Optional[bool]
|
|
23
|
+
search_service_api_key: str
|
|
24
|
+
search_service_endpoint: str
|
|
25
|
+
search_service_api_version: typing.Optional[str]
|
|
26
|
+
index_name: typing.Optional[str]
|
|
27
|
+
filterable_metadata_field_keys: typing.Optional[typing.List[str]]
|
|
28
|
+
embedding_dimension: typing.Optional[int]
|
|
27
29
|
class_name: typing.Optional[str]
|
|
28
30
|
|
|
29
31
|
def json(self, **kwargs: typing.Any) -> str:
|