llama-cloud 0.1.15__py3-none-any.whl → 0.1.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +10 -32
- llama_cloud/environment.py +1 -1
- llama_cloud/resources/chat_apps/client.py +20 -0
- llama_cloud/resources/evals/client.py +0 -643
- llama_cloud/resources/llama_extract/client.py +98 -6
- llama_cloud/resources/parsing/client.py +8 -0
- llama_cloud/resources/pipelines/client.py +14 -375
- llama_cloud/resources/projects/client.py +72 -923
- llama_cloud/resources/retrievers/client.py +161 -4
- llama_cloud/types/__init__.py +10 -32
- llama_cloud/types/base_plan.py +3 -0
- llama_cloud/types/base_plan_name.py +12 -0
- llama_cloud/types/cloud_confluence_data_source.py +1 -0
- llama_cloud/types/extract_config.py +0 -3
- llama_cloud/types/extract_mode.py +13 -1
- llama_cloud/types/extract_run.py +1 -0
- llama_cloud/types/llama_extract_settings.py +1 -0
- llama_cloud/types/llama_parse_parameters.py +1 -0
- llama_cloud/types/parsing_mode.py +12 -0
- llama_cloud/types/pipeline_file.py +2 -1
- llama_cloud/types/pipeline_file_status.py +33 -0
- llama_cloud/types/plan_limits.py +1 -0
- llama_cloud/types/preset_composite_retrieval_params.py +4 -2
- llama_cloud/types/prompt_conf.py +1 -0
- llama_cloud/types/{eval_question_create.py → re_rank_config.py} +6 -2
- llama_cloud/types/re_ranker_type.py +41 -0
- llama_cloud/types/report_block.py +1 -0
- llama_cloud/types/struct_mode.py +4 -0
- llama_cloud/types/struct_parse_conf.py +6 -0
- llama_cloud/types/usage_and_plan.py +2 -2
- llama_cloud/types/{usage.py → usage_response.py} +3 -3
- llama_cloud/types/{usage_active_alerts_item.py → usage_response_active_alerts_item.py} +8 -4
- {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/RECORD +36 -47
- llama_cloud/types/eval_dataset.py +0 -40
- llama_cloud/types/eval_dataset_job_params.py +0 -39
- llama_cloud/types/eval_dataset_job_record.py +0 -58
- llama_cloud/types/eval_execution_params_override.py +0 -37
- llama_cloud/types/eval_metric.py +0 -17
- llama_cloud/types/eval_question.py +0 -38
- llama_cloud/types/eval_question_result.py +0 -52
- llama_cloud/types/local_eval.py +0 -47
- llama_cloud/types/local_eval_results.py +0 -40
- llama_cloud/types/local_eval_sets.py +0 -33
- llama_cloud/types/metric_result.py +0 -33
- llama_cloud/types/prompt_mixin_prompts.py +0 -39
- llama_cloud/types/prompt_spec.py +0 -36
- {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/WHEEL +0 -0
|
@@ -336,9 +336,11 @@ class LlamaExtractClient:
|
|
|
336
336
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
337
337
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
338
338
|
|
|
339
|
-
def run_job(self, *, request: ExtractJobCreate) -> ExtractJob:
|
|
339
|
+
def run_job(self, *, from_ui: typing.Optional[bool] = None, request: ExtractJobCreate) -> ExtractJob:
|
|
340
340
|
"""
|
|
341
341
|
Parameters:
|
|
342
|
+
- from_ui: typing.Optional[bool].
|
|
343
|
+
|
|
342
344
|
- request: ExtractJobCreate.
|
|
343
345
|
---
|
|
344
346
|
from llama_cloud import (
|
|
@@ -366,6 +368,7 @@ class LlamaExtractClient:
|
|
|
366
368
|
_response = self._client_wrapper.httpx_client.request(
|
|
367
369
|
"POST",
|
|
368
370
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
|
|
371
|
+
params=remove_none_from_dict({"from_ui": from_ui}),
|
|
369
372
|
json=jsonable_encoder(request),
|
|
370
373
|
headers=self._client_wrapper.get_headers(),
|
|
371
374
|
timeout=60,
|
|
@@ -411,10 +414,16 @@ class LlamaExtractClient:
|
|
|
411
414
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
412
415
|
|
|
413
416
|
def run_job_test_user(
|
|
414
|
-
self,
|
|
417
|
+
self,
|
|
418
|
+
*,
|
|
419
|
+
from_ui: typing.Optional[bool] = None,
|
|
420
|
+
job_create: ExtractJobCreate,
|
|
421
|
+
extract_settings: typing.Optional[LlamaExtractSettings] = OMIT,
|
|
415
422
|
) -> ExtractJob:
|
|
416
423
|
"""
|
|
417
424
|
Parameters:
|
|
425
|
+
- from_ui: typing.Optional[bool].
|
|
426
|
+
|
|
418
427
|
- job_create: ExtractJobCreate.
|
|
419
428
|
|
|
420
429
|
- extract_settings: typing.Optional[LlamaExtractSettings].
|
|
@@ -457,6 +466,7 @@ class LlamaExtractClient:
|
|
|
457
466
|
_response = self._client_wrapper.httpx_client.request(
|
|
458
467
|
"POST",
|
|
459
468
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/test"),
|
|
469
|
+
params=remove_none_from_dict({"from_ui": from_ui}),
|
|
460
470
|
json=jsonable_encoder(_request),
|
|
461
471
|
headers=self._client_wrapper.get_headers(),
|
|
462
472
|
timeout=60,
|
|
@@ -471,9 +481,13 @@ class LlamaExtractClient:
|
|
|
471
481
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
472
482
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
473
483
|
|
|
474
|
-
def run_batch_jobs(
|
|
484
|
+
def run_batch_jobs(
|
|
485
|
+
self, *, from_ui: typing.Optional[bool] = None, request: ExtractJobCreateBatch
|
|
486
|
+
) -> typing.List[ExtractJob]:
|
|
475
487
|
"""
|
|
476
488
|
Parameters:
|
|
489
|
+
- from_ui: typing.Optional[bool].
|
|
490
|
+
|
|
477
491
|
- request: ExtractJobCreateBatch.
|
|
478
492
|
---
|
|
479
493
|
from llama_cloud import (
|
|
@@ -501,6 +515,7 @@ class LlamaExtractClient:
|
|
|
501
515
|
_response = self._client_wrapper.httpx_client.request(
|
|
502
516
|
"POST",
|
|
503
517
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/batch"),
|
|
518
|
+
params=remove_none_from_dict({"from_ui": from_ui}),
|
|
504
519
|
json=jsonable_encoder(request),
|
|
505
520
|
headers=self._client_wrapper.get_headers(),
|
|
506
521
|
timeout=60,
|
|
@@ -582,6 +597,37 @@ class LlamaExtractClient:
|
|
|
582
597
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
583
598
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
584
599
|
|
|
600
|
+
def get_latest_run_from_ui(self, *, extraction_agent_id: str) -> typing.Optional[ExtractRun]:
|
|
601
|
+
"""
|
|
602
|
+
Parameters:
|
|
603
|
+
- extraction_agent_id: str.
|
|
604
|
+
---
|
|
605
|
+
from llama_cloud.client import LlamaCloud
|
|
606
|
+
|
|
607
|
+
client = LlamaCloud(
|
|
608
|
+
token="YOUR_TOKEN",
|
|
609
|
+
)
|
|
610
|
+
client.llama_extract.get_latest_run_from_ui(
|
|
611
|
+
extraction_agent_id="string",
|
|
612
|
+
)
|
|
613
|
+
"""
|
|
614
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
615
|
+
"GET",
|
|
616
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/runs/latest-from-ui"),
|
|
617
|
+
params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
|
|
618
|
+
headers=self._client_wrapper.get_headers(),
|
|
619
|
+
timeout=60,
|
|
620
|
+
)
|
|
621
|
+
if 200 <= _response.status_code < 300:
|
|
622
|
+
return pydantic.parse_obj_as(typing.Optional[ExtractRun], _response.json()) # type: ignore
|
|
623
|
+
if _response.status_code == 422:
|
|
624
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
625
|
+
try:
|
|
626
|
+
_response_json = _response.json()
|
|
627
|
+
except JSONDecodeError:
|
|
628
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
629
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
630
|
+
|
|
585
631
|
def get_run_by_job_id(self, job_id: str) -> ExtractRun:
|
|
586
632
|
"""
|
|
587
633
|
Parameters:
|
|
@@ -976,9 +1022,11 @@ class AsyncLlamaExtractClient:
|
|
|
976
1022
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
977
1023
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
978
1024
|
|
|
979
|
-
async def run_job(self, *, request: ExtractJobCreate) -> ExtractJob:
|
|
1025
|
+
async def run_job(self, *, from_ui: typing.Optional[bool] = None, request: ExtractJobCreate) -> ExtractJob:
|
|
980
1026
|
"""
|
|
981
1027
|
Parameters:
|
|
1028
|
+
- from_ui: typing.Optional[bool].
|
|
1029
|
+
|
|
982
1030
|
- request: ExtractJobCreate.
|
|
983
1031
|
---
|
|
984
1032
|
from llama_cloud import (
|
|
@@ -1006,6 +1054,7 @@ class AsyncLlamaExtractClient:
|
|
|
1006
1054
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1007
1055
|
"POST",
|
|
1008
1056
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
|
|
1057
|
+
params=remove_none_from_dict({"from_ui": from_ui}),
|
|
1009
1058
|
json=jsonable_encoder(request),
|
|
1010
1059
|
headers=self._client_wrapper.get_headers(),
|
|
1011
1060
|
timeout=60,
|
|
@@ -1051,10 +1100,16 @@ class AsyncLlamaExtractClient:
|
|
|
1051
1100
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1052
1101
|
|
|
1053
1102
|
async def run_job_test_user(
|
|
1054
|
-
self,
|
|
1103
|
+
self,
|
|
1104
|
+
*,
|
|
1105
|
+
from_ui: typing.Optional[bool] = None,
|
|
1106
|
+
job_create: ExtractJobCreate,
|
|
1107
|
+
extract_settings: typing.Optional[LlamaExtractSettings] = OMIT,
|
|
1055
1108
|
) -> ExtractJob:
|
|
1056
1109
|
"""
|
|
1057
1110
|
Parameters:
|
|
1111
|
+
- from_ui: typing.Optional[bool].
|
|
1112
|
+
|
|
1058
1113
|
- job_create: ExtractJobCreate.
|
|
1059
1114
|
|
|
1060
1115
|
- extract_settings: typing.Optional[LlamaExtractSettings].
|
|
@@ -1097,6 +1152,7 @@ class AsyncLlamaExtractClient:
|
|
|
1097
1152
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1098
1153
|
"POST",
|
|
1099
1154
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/test"),
|
|
1155
|
+
params=remove_none_from_dict({"from_ui": from_ui}),
|
|
1100
1156
|
json=jsonable_encoder(_request),
|
|
1101
1157
|
headers=self._client_wrapper.get_headers(),
|
|
1102
1158
|
timeout=60,
|
|
@@ -1111,9 +1167,13 @@ class AsyncLlamaExtractClient:
|
|
|
1111
1167
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1112
1168
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1113
1169
|
|
|
1114
|
-
async def run_batch_jobs(
|
|
1170
|
+
async def run_batch_jobs(
|
|
1171
|
+
self, *, from_ui: typing.Optional[bool] = None, request: ExtractJobCreateBatch
|
|
1172
|
+
) -> typing.List[ExtractJob]:
|
|
1115
1173
|
"""
|
|
1116
1174
|
Parameters:
|
|
1175
|
+
- from_ui: typing.Optional[bool].
|
|
1176
|
+
|
|
1117
1177
|
- request: ExtractJobCreateBatch.
|
|
1118
1178
|
---
|
|
1119
1179
|
from llama_cloud import (
|
|
@@ -1141,6 +1201,7 @@ class AsyncLlamaExtractClient:
|
|
|
1141
1201
|
_response = await self._client_wrapper.httpx_client.request(
|
|
1142
1202
|
"POST",
|
|
1143
1203
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/batch"),
|
|
1204
|
+
params=remove_none_from_dict({"from_ui": from_ui}),
|
|
1144
1205
|
json=jsonable_encoder(request),
|
|
1145
1206
|
headers=self._client_wrapper.get_headers(),
|
|
1146
1207
|
timeout=60,
|
|
@@ -1222,6 +1283,37 @@ class AsyncLlamaExtractClient:
|
|
|
1222
1283
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1223
1284
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1224
1285
|
|
|
1286
|
+
async def get_latest_run_from_ui(self, *, extraction_agent_id: str) -> typing.Optional[ExtractRun]:
|
|
1287
|
+
"""
|
|
1288
|
+
Parameters:
|
|
1289
|
+
- extraction_agent_id: str.
|
|
1290
|
+
---
|
|
1291
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
1292
|
+
|
|
1293
|
+
client = AsyncLlamaCloud(
|
|
1294
|
+
token="YOUR_TOKEN",
|
|
1295
|
+
)
|
|
1296
|
+
await client.llama_extract.get_latest_run_from_ui(
|
|
1297
|
+
extraction_agent_id="string",
|
|
1298
|
+
)
|
|
1299
|
+
"""
|
|
1300
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1301
|
+
"GET",
|
|
1302
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/runs/latest-from-ui"),
|
|
1303
|
+
params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
|
|
1304
|
+
headers=self._client_wrapper.get_headers(),
|
|
1305
|
+
timeout=60,
|
|
1306
|
+
)
|
|
1307
|
+
if 200 <= _response.status_code < 300:
|
|
1308
|
+
return pydantic.parse_obj_as(typing.Optional[ExtractRun], _response.json()) # type: ignore
|
|
1309
|
+
if _response.status_code == 422:
|
|
1310
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1311
|
+
try:
|
|
1312
|
+
_response_json = _response.json()
|
|
1313
|
+
except JSONDecodeError:
|
|
1314
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1315
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1316
|
+
|
|
1225
1317
|
async def get_run_by_job_id(self, job_id: str) -> ExtractRun:
|
|
1226
1318
|
"""
|
|
1227
1319
|
Parameters:
|
|
@@ -248,6 +248,7 @@ class ParsingClient:
|
|
|
248
248
|
vendor_multimodal_model_name: str,
|
|
249
249
|
model: str,
|
|
250
250
|
webhook_url: str,
|
|
251
|
+
preset: str,
|
|
251
252
|
parse_mode: typing.Optional[ParsingMode] = OMIT,
|
|
252
253
|
system_prompt: str,
|
|
253
254
|
system_prompt_append: str,
|
|
@@ -385,6 +386,8 @@ class ParsingClient:
|
|
|
385
386
|
|
|
386
387
|
- webhook_url: str.
|
|
387
388
|
|
|
389
|
+
- preset: str.
|
|
390
|
+
|
|
388
391
|
- parse_mode: typing.Optional[ParsingMode].
|
|
389
392
|
|
|
390
393
|
- system_prompt: str.
|
|
@@ -484,6 +487,7 @@ class ParsingClient:
|
|
|
484
487
|
"vendor_multimodal_model_name": vendor_multimodal_model_name,
|
|
485
488
|
"model": model,
|
|
486
489
|
"webhook_url": webhook_url,
|
|
490
|
+
"preset": preset,
|
|
487
491
|
"system_prompt": system_prompt,
|
|
488
492
|
"system_prompt_append": system_prompt_append,
|
|
489
493
|
"user_prompt": user_prompt,
|
|
@@ -1224,6 +1228,7 @@ class AsyncParsingClient:
|
|
|
1224
1228
|
vendor_multimodal_model_name: str,
|
|
1225
1229
|
model: str,
|
|
1226
1230
|
webhook_url: str,
|
|
1231
|
+
preset: str,
|
|
1227
1232
|
parse_mode: typing.Optional[ParsingMode] = OMIT,
|
|
1228
1233
|
system_prompt: str,
|
|
1229
1234
|
system_prompt_append: str,
|
|
@@ -1361,6 +1366,8 @@ class AsyncParsingClient:
|
|
|
1361
1366
|
|
|
1362
1367
|
- webhook_url: str.
|
|
1363
1368
|
|
|
1369
|
+
- preset: str.
|
|
1370
|
+
|
|
1364
1371
|
- parse_mode: typing.Optional[ParsingMode].
|
|
1365
1372
|
|
|
1366
1373
|
- system_prompt: str.
|
|
@@ -1460,6 +1467,7 @@ class AsyncParsingClient:
|
|
|
1460
1467
|
"vendor_multimodal_model_name": vendor_multimodal_model_name,
|
|
1461
1468
|
"model": model,
|
|
1462
1469
|
"webhook_url": webhook_url,
|
|
1470
|
+
"preset": preset,
|
|
1463
1471
|
"system_prompt": system_prompt,
|
|
1464
1472
|
"system_prompt_append": system_prompt_append,
|
|
1465
1473
|
"user_prompt": user_prompt,
|