llama-cloud 0.1.14__py3-none-any.whl → 0.1.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +8 -28
- llama_cloud/resources/evals/client.py +0 -643
- llama_cloud/resources/llama_extract/client.py +168 -6
- llama_cloud/resources/parsing/client.py +0 -8
- llama_cloud/resources/pipelines/client.py +10 -371
- llama_cloud/resources/projects/client.py +72 -923
- llama_cloud/resources/retrievers/client.py +124 -0
- llama_cloud/types/__init__.py +8 -28
- llama_cloud/types/chunk_mode.py +4 -0
- llama_cloud/types/extract_config.py +0 -3
- llama_cloud/types/{local_eval.py → extract_job_create_batch.py} +9 -14
- llama_cloud/types/extract_job_create_batch_data_schema_override.py +9 -0
- llama_cloud/types/extract_job_create_batch_data_schema_override_zero_value.py +7 -0
- llama_cloud/types/extract_mode.py +9 -1
- llama_cloud/types/llama_parse_parameters.py +0 -1
- llama_cloud/types/{local_eval_results.py → paginated_extract_runs_response.py} +7 -8
- llama_cloud/types/prompt_conf.py +1 -0
- llama_cloud/types/report_block.py +1 -0
- llama_cloud/types/struct_mode.py +4 -0
- llama_cloud/types/struct_parse_conf.py +6 -0
- llama_cloud/types/usage.py +2 -1
- {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/RECORD +25 -35
- llama_cloud/types/eval_dataset.py +0 -40
- llama_cloud/types/eval_dataset_job_params.py +0 -39
- llama_cloud/types/eval_dataset_job_record.py +0 -58
- llama_cloud/types/eval_execution_params_override.py +0 -37
- llama_cloud/types/eval_metric.py +0 -17
- llama_cloud/types/eval_question.py +0 -38
- llama_cloud/types/eval_question_create.py +0 -31
- llama_cloud/types/eval_question_result.py +0 -52
- llama_cloud/types/local_eval_sets.py +0 -33
- llama_cloud/types/metric_result.py +0 -33
- llama_cloud/types/prompt_mixin_prompts.py +0 -39
- llama_cloud/types/prompt_spec.py +0 -36
- {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/WHEEL +0 -0
|
@@ -14,12 +14,14 @@ from ...types.extract_agent_create import ExtractAgentCreate
|
|
|
14
14
|
from ...types.extract_agent_update import ExtractAgentUpdate
|
|
15
15
|
from ...types.extract_job import ExtractJob
|
|
16
16
|
from ...types.extract_job_create import ExtractJobCreate
|
|
17
|
+
from ...types.extract_job_create_batch import ExtractJobCreateBatch
|
|
17
18
|
from ...types.extract_resultset import ExtractResultset
|
|
18
19
|
from ...types.extract_run import ExtractRun
|
|
19
20
|
from ...types.extract_schema_validate_request import ExtractSchemaValidateRequest
|
|
20
21
|
from ...types.extract_schema_validate_response import ExtractSchemaValidateResponse
|
|
21
22
|
from ...types.http_validation_error import HttpValidationError
|
|
22
23
|
from ...types.llama_extract_settings import LlamaExtractSettings
|
|
24
|
+
from ...types.paginated_extract_runs_response import PaginatedExtractRunsResponse
|
|
23
25
|
|
|
24
26
|
try:
|
|
25
27
|
import pydantic
|
|
@@ -469,6 +471,50 @@ class LlamaExtractClient:
|
|
|
469
471
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
470
472
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
471
473
|
|
|
474
|
+
def run_batch_jobs(self, *, request: ExtractJobCreateBatch) -> typing.List[ExtractJob]:
|
|
475
|
+
"""
|
|
476
|
+
Parameters:
|
|
477
|
+
- request: ExtractJobCreateBatch.
|
|
478
|
+
---
|
|
479
|
+
from llama_cloud import (
|
|
480
|
+
ExtractConfig,
|
|
481
|
+
ExtractJobCreateBatch,
|
|
482
|
+
ExtractMode,
|
|
483
|
+
ExtractTarget,
|
|
484
|
+
)
|
|
485
|
+
from llama_cloud.client import LlamaCloud
|
|
486
|
+
|
|
487
|
+
client = LlamaCloud(
|
|
488
|
+
token="YOUR_TOKEN",
|
|
489
|
+
)
|
|
490
|
+
client.llama_extract.run_batch_jobs(
|
|
491
|
+
request=ExtractJobCreateBatch(
|
|
492
|
+
extraction_agent_id="string",
|
|
493
|
+
file_ids=[],
|
|
494
|
+
config_override=ExtractConfig(
|
|
495
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
496
|
+
extraction_mode=ExtractMode.FAST,
|
|
497
|
+
),
|
|
498
|
+
),
|
|
499
|
+
)
|
|
500
|
+
"""
|
|
501
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
502
|
+
"POST",
|
|
503
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/batch"),
|
|
504
|
+
json=jsonable_encoder(request),
|
|
505
|
+
headers=self._client_wrapper.get_headers(),
|
|
506
|
+
timeout=60,
|
|
507
|
+
)
|
|
508
|
+
if 200 <= _response.status_code < 300:
|
|
509
|
+
return pydantic.parse_obj_as(typing.List[ExtractJob], _response.json()) # type: ignore
|
|
510
|
+
if _response.status_code == 422:
|
|
511
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
512
|
+
try:
|
|
513
|
+
_response_json = _response.json()
|
|
514
|
+
except JSONDecodeError:
|
|
515
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
516
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
517
|
+
|
|
472
518
|
def get_job_result(self, job_id: str) -> ExtractResultset:
|
|
473
519
|
"""
|
|
474
520
|
Parameters:
|
|
@@ -499,10 +545,16 @@ class LlamaExtractClient:
|
|
|
499
545
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
500
546
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
501
547
|
|
|
502
|
-
def list_extract_runs(
|
|
548
|
+
def list_extract_runs(
|
|
549
|
+
self, *, extraction_agent_id: str, skip: typing.Optional[int] = None, limit: typing.Optional[int] = None
|
|
550
|
+
) -> PaginatedExtractRunsResponse:
|
|
503
551
|
"""
|
|
504
552
|
Parameters:
|
|
505
553
|
- extraction_agent_id: str.
|
|
554
|
+
|
|
555
|
+
- skip: typing.Optional[int].
|
|
556
|
+
|
|
557
|
+
- limit: typing.Optional[int].
|
|
506
558
|
---
|
|
507
559
|
from llama_cloud.client import LlamaCloud
|
|
508
560
|
|
|
@@ -516,12 +568,12 @@ class LlamaExtractClient:
|
|
|
516
568
|
_response = self._client_wrapper.httpx_client.request(
|
|
517
569
|
"GET",
|
|
518
570
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/runs"),
|
|
519
|
-
params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
|
|
571
|
+
params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id, "skip": skip, "limit": limit}),
|
|
520
572
|
headers=self._client_wrapper.get_headers(),
|
|
521
573
|
timeout=60,
|
|
522
574
|
)
|
|
523
575
|
if 200 <= _response.status_code < 300:
|
|
524
|
-
return pydantic.parse_obj_as(
|
|
576
|
+
return pydantic.parse_obj_as(PaginatedExtractRunsResponse, _response.json()) # type: ignore
|
|
525
577
|
if _response.status_code == 422:
|
|
526
578
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
527
579
|
try:
|
|
@@ -590,6 +642,36 @@ class LlamaExtractClient:
|
|
|
590
642
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
591
643
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
592
644
|
|
|
645
|
+
def delete_extraction_run(self, run_id: str) -> typing.Any:
|
|
646
|
+
"""
|
|
647
|
+
Parameters:
|
|
648
|
+
- run_id: str.
|
|
649
|
+
---
|
|
650
|
+
from llama_cloud.client import LlamaCloud
|
|
651
|
+
|
|
652
|
+
client = LlamaCloud(
|
|
653
|
+
token="YOUR_TOKEN",
|
|
654
|
+
)
|
|
655
|
+
client.llama_extract.delete_extraction_run(
|
|
656
|
+
run_id="string",
|
|
657
|
+
)
|
|
658
|
+
"""
|
|
659
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
660
|
+
"DELETE",
|
|
661
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/{run_id}"),
|
|
662
|
+
headers=self._client_wrapper.get_headers(),
|
|
663
|
+
timeout=60,
|
|
664
|
+
)
|
|
665
|
+
if 200 <= _response.status_code < 300:
|
|
666
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
667
|
+
if _response.status_code == 422:
|
|
668
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
669
|
+
try:
|
|
670
|
+
_response_json = _response.json()
|
|
671
|
+
except JSONDecodeError:
|
|
672
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
673
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
674
|
+
|
|
593
675
|
|
|
594
676
|
class AsyncLlamaExtractClient:
|
|
595
677
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
@@ -1029,6 +1111,50 @@ class AsyncLlamaExtractClient:
|
|
|
1029
1111
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1030
1112
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1031
1113
|
|
|
1114
|
+
async def run_batch_jobs(self, *, request: ExtractJobCreateBatch) -> typing.List[ExtractJob]:
|
|
1115
|
+
"""
|
|
1116
|
+
Parameters:
|
|
1117
|
+
- request: ExtractJobCreateBatch.
|
|
1118
|
+
---
|
|
1119
|
+
from llama_cloud import (
|
|
1120
|
+
ExtractConfig,
|
|
1121
|
+
ExtractJobCreateBatch,
|
|
1122
|
+
ExtractMode,
|
|
1123
|
+
ExtractTarget,
|
|
1124
|
+
)
|
|
1125
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1126
|
+
|
|
1127
|
+
client = AsyncLlamaCloud(
|
|
1128
|
+
token="YOUR_TOKEN",
|
|
1129
|
+
)
|
|
1130
|
+
await client.llama_extract.run_batch_jobs(
|
|
1131
|
+
request=ExtractJobCreateBatch(
|
|
1132
|
+
extraction_agent_id="string",
|
|
1133
|
+
file_ids=[],
|
|
1134
|
+
config_override=ExtractConfig(
|
|
1135
|
+
extraction_target=ExtractTarget.PER_DOC,
|
|
1136
|
+
extraction_mode=ExtractMode.FAST,
|
|
1137
|
+
),
|
|
1138
|
+
),
|
|
1139
|
+
)
|
|
1140
|
+
"""
|
|
1141
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1142
|
+
"POST",
|
|
1143
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/batch"),
|
|
1144
|
+
json=jsonable_encoder(request),
|
|
1145
|
+
headers=self._client_wrapper.get_headers(),
|
|
1146
|
+
timeout=60,
|
|
1147
|
+
)
|
|
1148
|
+
if 200 <= _response.status_code < 300:
|
|
1149
|
+
return pydantic.parse_obj_as(typing.List[ExtractJob], _response.json()) # type: ignore
|
|
1150
|
+
if _response.status_code == 422:
|
|
1151
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1152
|
+
try:
|
|
1153
|
+
_response_json = _response.json()
|
|
1154
|
+
except JSONDecodeError:
|
|
1155
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1156
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1157
|
+
|
|
1032
1158
|
async def get_job_result(self, job_id: str) -> ExtractResultset:
|
|
1033
1159
|
"""
|
|
1034
1160
|
Parameters:
|
|
@@ -1059,10 +1185,16 @@ class AsyncLlamaExtractClient:
|
|
|
1059
1185
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1060
1186
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1061
1187
|
|
|
1062
|
-
async def list_extract_runs(
|
|
1188
|
+
async def list_extract_runs(
|
|
1189
|
+
self, *, extraction_agent_id: str, skip: typing.Optional[int] = None, limit: typing.Optional[int] = None
|
|
1190
|
+
) -> PaginatedExtractRunsResponse:
|
|
1063
1191
|
"""
|
|
1064
1192
|
Parameters:
|
|
1065
1193
|
- extraction_agent_id: str.
|
|
1194
|
+
|
|
1195
|
+
- skip: typing.Optional[int].
|
|
1196
|
+
|
|
1197
|
+
- limit: typing.Optional[int].
|
|
1066
1198
|
---
|
|
1067
1199
|
from llama_cloud.client import AsyncLlamaCloud
|
|
1068
1200
|
|
|
@@ -1076,12 +1208,12 @@ class AsyncLlamaExtractClient:
|
|
|
1076
1208
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1077
1209
|
"GET",
|
|
1078
1210
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/runs"),
|
|
1079
|
-
params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
|
|
1211
|
+
params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id, "skip": skip, "limit": limit}),
|
|
1080
1212
|
headers=self._client_wrapper.get_headers(),
|
|
1081
1213
|
timeout=60,
|
|
1082
1214
|
)
|
|
1083
1215
|
if 200 <= _response.status_code < 300:
|
|
1084
|
-
return pydantic.parse_obj_as(
|
|
1216
|
+
return pydantic.parse_obj_as(PaginatedExtractRunsResponse, _response.json()) # type: ignore
|
|
1085
1217
|
if _response.status_code == 422:
|
|
1086
1218
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1087
1219
|
try:
|
|
@@ -1149,3 +1281,33 @@ class AsyncLlamaExtractClient:
|
|
|
1149
1281
|
except JSONDecodeError:
|
|
1150
1282
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1151
1283
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1284
|
+
|
|
1285
|
+
async def delete_extraction_run(self, run_id: str) -> typing.Any:
|
|
1286
|
+
"""
|
|
1287
|
+
Parameters:
|
|
1288
|
+
- run_id: str.
|
|
1289
|
+
---
|
|
1290
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1291
|
+
|
|
1292
|
+
client = AsyncLlamaCloud(
|
|
1293
|
+
token="YOUR_TOKEN",
|
|
1294
|
+
)
|
|
1295
|
+
await client.llama_extract.delete_extraction_run(
|
|
1296
|
+
run_id="string",
|
|
1297
|
+
)
|
|
1298
|
+
"""
|
|
1299
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1300
|
+
"DELETE",
|
|
1301
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/extraction/runs/{run_id}"),
|
|
1302
|
+
headers=self._client_wrapper.get_headers(),
|
|
1303
|
+
timeout=60,
|
|
1304
|
+
)
|
|
1305
|
+
if 200 <= _response.status_code < 300:
|
|
1306
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
1307
|
+
if _response.status_code == 422:
|
|
1308
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1309
|
+
try:
|
|
1310
|
+
_response_json = _response.json()
|
|
1311
|
+
except JSONDecodeError:
|
|
1312
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1313
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
@@ -212,7 +212,6 @@ class ParsingClient:
|
|
|
212
212
|
bbox_left: float,
|
|
213
213
|
bbox_right: float,
|
|
214
214
|
bbox_top: float,
|
|
215
|
-
compact_markdown_table: bool,
|
|
216
215
|
disable_ocr: bool,
|
|
217
216
|
disable_reconstruction: bool,
|
|
218
217
|
disable_image_extraction: bool,
|
|
@@ -314,8 +313,6 @@ class ParsingClient:
|
|
|
314
313
|
|
|
315
314
|
- bbox_top: float.
|
|
316
315
|
|
|
317
|
-
- compact_markdown_table: bool.
|
|
318
|
-
|
|
319
316
|
- disable_ocr: bool.
|
|
320
317
|
|
|
321
318
|
- disable_reconstruction: bool.
|
|
@@ -452,7 +449,6 @@ class ParsingClient:
|
|
|
452
449
|
"bbox_left": bbox_left,
|
|
453
450
|
"bbox_right": bbox_right,
|
|
454
451
|
"bbox_top": bbox_top,
|
|
455
|
-
"compact_markdown_table": compact_markdown_table,
|
|
456
452
|
"disable_ocr": disable_ocr,
|
|
457
453
|
"disable_reconstruction": disable_reconstruction,
|
|
458
454
|
"disable_image_extraction": disable_image_extraction,
|
|
@@ -1192,7 +1188,6 @@ class AsyncParsingClient:
|
|
|
1192
1188
|
bbox_left: float,
|
|
1193
1189
|
bbox_right: float,
|
|
1194
1190
|
bbox_top: float,
|
|
1195
|
-
compact_markdown_table: bool,
|
|
1196
1191
|
disable_ocr: bool,
|
|
1197
1192
|
disable_reconstruction: bool,
|
|
1198
1193
|
disable_image_extraction: bool,
|
|
@@ -1294,8 +1289,6 @@ class AsyncParsingClient:
|
|
|
1294
1289
|
|
|
1295
1290
|
- bbox_top: float.
|
|
1296
1291
|
|
|
1297
|
-
- compact_markdown_table: bool.
|
|
1298
|
-
|
|
1299
1292
|
- disable_ocr: bool.
|
|
1300
1293
|
|
|
1301
1294
|
- disable_reconstruction: bool.
|
|
@@ -1432,7 +1425,6 @@ class AsyncParsingClient:
|
|
|
1432
1425
|
"bbox_left": bbox_left,
|
|
1433
1426
|
"bbox_right": bbox_right,
|
|
1434
1427
|
"bbox_top": bbox_top,
|
|
1435
|
-
"compact_markdown_table": compact_markdown_table,
|
|
1436
1428
|
"disable_ocr": disable_ocr,
|
|
1437
1429
|
"disable_reconstruction": disable_reconstruction,
|
|
1438
1430
|
"disable_image_extraction": disable_image_extraction,
|