llama-cloud 0.1.16__py3-none-any.whl → 0.1.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (32) hide show
  1. llama_cloud/__init__.py +10 -6
  2. llama_cloud/environment.py +1 -1
  3. llama_cloud/resources/chat_apps/client.py +24 -4
  4. llama_cloud/resources/llama_extract/client.py +98 -10
  5. llama_cloud/resources/parsing/client.py +16 -65
  6. llama_cloud/resources/pipelines/client.py +6 -6
  7. llama_cloud/resources/projects/client.py +70 -0
  8. llama_cloud/resources/retrievers/client.py +41 -8
  9. llama_cloud/types/__init__.py +10 -6
  10. llama_cloud/types/base_plan.py +3 -0
  11. llama_cloud/types/base_plan_name.py +12 -0
  12. llama_cloud/types/cloud_confluence_data_source.py +1 -0
  13. llama_cloud/types/extract_job_create.py +2 -1
  14. llama_cloud/types/extract_mode.py +8 -4
  15. llama_cloud/types/extract_run.py +1 -0
  16. llama_cloud/types/llama_extract_settings.py +3 -2
  17. llama_cloud/types/llama_parse_parameters.py +4 -2
  18. llama_cloud/types/parsing_mode.py +12 -0
  19. llama_cloud/types/pipeline_file.py +2 -1
  20. llama_cloud/types/pipeline_file_status.py +33 -0
  21. llama_cloud/types/plan_limits.py +1 -0
  22. llama_cloud/types/preset_composite_retrieval_params.py +4 -2
  23. llama_cloud/types/{parsing_usage.py → re_rank_config.py} +6 -3
  24. llama_cloud/types/re_ranker_type.py +41 -0
  25. llama_cloud/types/supported_llm_model_names.py +0 -12
  26. llama_cloud/types/usage_and_plan.py +2 -2
  27. llama_cloud/types/{usage.py → usage_response.py} +3 -3
  28. llama_cloud/types/{usage_active_alerts_item.py → usage_response_active_alerts_item.py} +8 -4
  29. {llama_cloud-0.1.16.dist-info → llama_cloud-0.1.18.dist-info}/METADATA +1 -1
  30. {llama_cloud-0.1.16.dist-info → llama_cloud-0.1.18.dist-info}/RECORD +32 -30
  31. {llama_cloud-0.1.16.dist-info → llama_cloud-0.1.18.dist-info}/LICENSE +0 -0
  32. {llama_cloud-0.1.16.dist-info → llama_cloud-0.1.18.dist-info}/WHEEL +0 -0
llama_cloud/__init__.py CHANGED
@@ -199,7 +199,6 @@ from .types import (
199
199
  ParsingJobStructuredResult,
200
200
  ParsingJobTextResult,
201
201
  ParsingMode,
202
- ParsingUsage,
203
202
  PartitionNames,
204
203
  Permission,
205
204
  Pipeline,
@@ -234,6 +233,7 @@ from .types import (
234
233
  PipelineFileCustomMetadataValue,
235
234
  PipelineFilePermissionInfoValue,
236
235
  PipelineFileResourceInfoValue,
236
+ PipelineFileStatus,
237
237
  PipelineTransformConfig,
238
238
  PipelineTransformConfig_Advanced,
239
239
  PipelineTransformConfig_Auto,
@@ -250,6 +250,8 @@ from .types import (
250
250
  ProjectCreate,
251
251
  PromptConf,
252
252
  PydanticProgramMode,
253
+ ReRankConfig,
254
+ ReRankerType,
253
255
  RecurringCreditGrant,
254
256
  RelatedNodeInfo,
255
257
  RelatedNodeInfoNodeType,
@@ -293,10 +295,10 @@ from .types import (
293
295
  TokenChunkingConfig,
294
296
  TokenTextSplitter,
295
297
  TransformationCategoryNames,
296
- Usage,
297
- UsageActiveAlertsItem,
298
298
  UsageAndPlan,
299
299
  UsageMetricResponse,
300
+ UsageResponse,
301
+ UsageResponseActiveAlertsItem,
300
302
  UserJobRecord,
301
303
  UserOrganization,
302
304
  UserOrganizationCreate,
@@ -567,7 +569,6 @@ __all__ = [
567
569
  "ParsingJobStructuredResult",
568
570
  "ParsingJobTextResult",
569
571
  "ParsingMode",
570
- "ParsingUsage",
571
572
  "PartitionNames",
572
573
  "Permission",
573
574
  "Pipeline",
@@ -602,6 +603,7 @@ __all__ = [
602
603
  "PipelineFileCustomMetadataValue",
603
604
  "PipelineFilePermissionInfoValue",
604
605
  "PipelineFileResourceInfoValue",
606
+ "PipelineFileStatus",
605
607
  "PipelineFileUpdateCustomMetadataValue",
606
608
  "PipelineTransformConfig",
607
609
  "PipelineTransformConfig_Advanced",
@@ -628,6 +630,8 @@ __all__ = [
628
630
  "ProjectCreate",
629
631
  "PromptConf",
630
632
  "PydanticProgramMode",
633
+ "ReRankConfig",
634
+ "ReRankerType",
631
635
  "RecurringCreditGrant",
632
636
  "RelatedNodeInfo",
633
637
  "RelatedNodeInfoNodeType",
@@ -673,10 +677,10 @@ __all__ = [
673
677
  "TransformationCategoryNames",
674
678
  "UnprocessableEntityError",
675
679
  "UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction",
676
- "Usage",
677
- "UsageActiveAlertsItem",
678
680
  "UsageAndPlan",
679
681
  "UsageMetricResponse",
682
+ "UsageResponse",
683
+ "UsageResponseActiveAlertsItem",
680
684
  "UserJobRecord",
681
685
  "UserOrganization",
682
686
  "UserOrganizationCreate",
@@ -4,4 +4,4 @@ import enum
4
4
 
5
5
 
6
6
  class LlamaCloudEnvironment(enum.Enum):
7
- DEFAULT = "https://api.cloud.llamaindex.ai/"
7
+ DEFAULT = "https://api.cloud.llamaindex.ai"
@@ -95,6 +95,8 @@ class ChatAppsClient:
95
95
  CompositeRetrievalMode,
96
96
  LlmParameters,
97
97
  PresetCompositeRetrievalParams,
98
+ ReRankConfig,
99
+ ReRankerType,
98
100
  SupportedLlmModelNames,
99
101
  )
100
102
  from llama_cloud.client import LlamaCloud
@@ -106,10 +108,13 @@ class ChatAppsClient:
106
108
  name="string",
107
109
  retriever_id="string",
108
110
  llm_config=LlmParameters(
109
- model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
111
+ model_name=SupportedLlmModelNames.GPT_4_O,
110
112
  ),
111
113
  retrieval_config=PresetCompositeRetrievalParams(
112
114
  mode=CompositeRetrievalMode.ROUTING,
115
+ rerank_config=ReRankConfig(
116
+ type=ReRankerType.SYSTEM_DEFAULT,
117
+ ),
113
118
  ),
114
119
  )
115
120
  """
@@ -200,6 +205,8 @@ class ChatAppsClient:
200
205
  CompositeRetrievalMode,
201
206
  LlmParameters,
202
207
  PresetCompositeRetrievalParams,
208
+ ReRankConfig,
209
+ ReRankerType,
203
210
  SupportedLlmModelNames,
204
211
  )
205
212
  from llama_cloud.client import LlamaCloud
@@ -210,10 +217,13 @@ class ChatAppsClient:
210
217
  client.chat_apps.update_chat_app(
211
218
  id="string",
212
219
  llm_config=LlmParameters(
213
- model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
220
+ model_name=SupportedLlmModelNames.GPT_4_O,
214
221
  ),
215
222
  retrieval_config=PresetCompositeRetrievalParams(
216
223
  mode=CompositeRetrievalMode.ROUTING,
224
+ rerank_config=ReRankConfig(
225
+ type=ReRankerType.SYSTEM_DEFAULT,
226
+ ),
217
227
  ),
218
228
  )
219
229
  """
@@ -378,6 +388,8 @@ class AsyncChatAppsClient:
378
388
  CompositeRetrievalMode,
379
389
  LlmParameters,
380
390
  PresetCompositeRetrievalParams,
391
+ ReRankConfig,
392
+ ReRankerType,
381
393
  SupportedLlmModelNames,
382
394
  )
383
395
  from llama_cloud.client import AsyncLlamaCloud
@@ -389,10 +401,13 @@ class AsyncChatAppsClient:
389
401
  name="string",
390
402
  retriever_id="string",
391
403
  llm_config=LlmParameters(
392
- model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
404
+ model_name=SupportedLlmModelNames.GPT_4_O,
393
405
  ),
394
406
  retrieval_config=PresetCompositeRetrievalParams(
395
407
  mode=CompositeRetrievalMode.ROUTING,
408
+ rerank_config=ReRankConfig(
409
+ type=ReRankerType.SYSTEM_DEFAULT,
410
+ ),
396
411
  ),
397
412
  )
398
413
  """
@@ -483,6 +498,8 @@ class AsyncChatAppsClient:
483
498
  CompositeRetrievalMode,
484
499
  LlmParameters,
485
500
  PresetCompositeRetrievalParams,
501
+ ReRankConfig,
502
+ ReRankerType,
486
503
  SupportedLlmModelNames,
487
504
  )
488
505
  from llama_cloud.client import AsyncLlamaCloud
@@ -493,10 +510,13 @@ class AsyncChatAppsClient:
493
510
  await client.chat_apps.update_chat_app(
494
511
  id="string",
495
512
  llm_config=LlmParameters(
496
- model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
513
+ model_name=SupportedLlmModelNames.GPT_4_O,
497
514
  ),
498
515
  retrieval_config=PresetCompositeRetrievalParams(
499
516
  mode=CompositeRetrievalMode.ROUTING,
517
+ rerank_config=ReRankConfig(
518
+ type=ReRankerType.SYSTEM_DEFAULT,
519
+ ),
500
520
  ),
501
521
  )
502
522
  """
@@ -336,9 +336,11 @@ class LlamaExtractClient:
336
336
  raise ApiError(status_code=_response.status_code, body=_response.text)
337
337
  raise ApiError(status_code=_response.status_code, body=_response_json)
338
338
 
339
- def run_job(self, *, request: ExtractJobCreate) -> ExtractJob:
339
+ def run_job(self, *, from_ui: typing.Optional[bool] = None, request: ExtractJobCreate) -> ExtractJob:
340
340
  """
341
341
  Parameters:
342
+ - from_ui: typing.Optional[bool].
343
+
342
344
  - request: ExtractJobCreate.
343
345
  ---
344
346
  from llama_cloud import (
@@ -355,7 +357,6 @@ class LlamaExtractClient:
355
357
  client.llama_extract.run_job(
356
358
  request=ExtractJobCreate(
357
359
  extraction_agent_id="string",
358
- file_id="string",
359
360
  config_override=ExtractConfig(
360
361
  extraction_target=ExtractTarget.PER_DOC,
361
362
  extraction_mode=ExtractMode.FAST,
@@ -366,6 +367,7 @@ class LlamaExtractClient:
366
367
  _response = self._client_wrapper.httpx_client.request(
367
368
  "POST",
368
369
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
370
+ params=remove_none_from_dict({"from_ui": from_ui}),
369
371
  json=jsonable_encoder(request),
370
372
  headers=self._client_wrapper.get_headers(),
371
373
  timeout=60,
@@ -411,10 +413,16 @@ class LlamaExtractClient:
411
413
  raise ApiError(status_code=_response.status_code, body=_response_json)
412
414
 
413
415
  def run_job_test_user(
414
- self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
416
+ self,
417
+ *,
418
+ from_ui: typing.Optional[bool] = None,
419
+ job_create: ExtractJobCreate,
420
+ extract_settings: typing.Optional[LlamaExtractSettings] = OMIT,
415
421
  ) -> ExtractJob:
416
422
  """
417
423
  Parameters:
424
+ - from_ui: typing.Optional[bool].
425
+
418
426
  - job_create: ExtractJobCreate.
419
427
 
420
428
  - extract_settings: typing.Optional[LlamaExtractSettings].
@@ -437,7 +445,6 @@ class LlamaExtractClient:
437
445
  client.llama_extract.run_job_test_user(
438
446
  job_create=ExtractJobCreate(
439
447
  extraction_agent_id="string",
440
- file_id="string",
441
448
  config_override=ExtractConfig(
442
449
  extraction_target=ExtractTarget.PER_DOC,
443
450
  extraction_mode=ExtractMode.FAST,
@@ -457,6 +464,7 @@ class LlamaExtractClient:
457
464
  _response = self._client_wrapper.httpx_client.request(
458
465
  "POST",
459
466
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/test"),
467
+ params=remove_none_from_dict({"from_ui": from_ui}),
460
468
  json=jsonable_encoder(_request),
461
469
  headers=self._client_wrapper.get_headers(),
462
470
  timeout=60,
@@ -471,9 +479,13 @@ class LlamaExtractClient:
471
479
  raise ApiError(status_code=_response.status_code, body=_response.text)
472
480
  raise ApiError(status_code=_response.status_code, body=_response_json)
473
481
 
474
- def run_batch_jobs(self, *, request: ExtractJobCreateBatch) -> typing.List[ExtractJob]:
482
+ def run_batch_jobs(
483
+ self, *, from_ui: typing.Optional[bool] = None, request: ExtractJobCreateBatch
484
+ ) -> typing.List[ExtractJob]:
475
485
  """
476
486
  Parameters:
487
+ - from_ui: typing.Optional[bool].
488
+
477
489
  - request: ExtractJobCreateBatch.
478
490
  ---
479
491
  from llama_cloud import (
@@ -501,6 +513,7 @@ class LlamaExtractClient:
501
513
  _response = self._client_wrapper.httpx_client.request(
502
514
  "POST",
503
515
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/batch"),
516
+ params=remove_none_from_dict({"from_ui": from_ui}),
504
517
  json=jsonable_encoder(request),
505
518
  headers=self._client_wrapper.get_headers(),
506
519
  timeout=60,
@@ -582,6 +595,37 @@ class LlamaExtractClient:
582
595
  raise ApiError(status_code=_response.status_code, body=_response.text)
583
596
  raise ApiError(status_code=_response.status_code, body=_response_json)
584
597
 
598
+ def get_latest_run_from_ui(self, *, extraction_agent_id: str) -> typing.Optional[ExtractRun]:
599
+ """
600
+ Parameters:
601
+ - extraction_agent_id: str.
602
+ ---
603
+ from llama_cloud.client import LlamaCloud
604
+
605
+ client = LlamaCloud(
606
+ token="YOUR_TOKEN",
607
+ )
608
+ client.llama_extract.get_latest_run_from_ui(
609
+ extraction_agent_id="string",
610
+ )
611
+ """
612
+ _response = self._client_wrapper.httpx_client.request(
613
+ "GET",
614
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/runs/latest-from-ui"),
615
+ params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
616
+ headers=self._client_wrapper.get_headers(),
617
+ timeout=60,
618
+ )
619
+ if 200 <= _response.status_code < 300:
620
+ return pydantic.parse_obj_as(typing.Optional[ExtractRun], _response.json()) # type: ignore
621
+ if _response.status_code == 422:
622
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
623
+ try:
624
+ _response_json = _response.json()
625
+ except JSONDecodeError:
626
+ raise ApiError(status_code=_response.status_code, body=_response.text)
627
+ raise ApiError(status_code=_response.status_code, body=_response_json)
628
+
585
629
  def get_run_by_job_id(self, job_id: str) -> ExtractRun:
586
630
  """
587
631
  Parameters:
@@ -976,9 +1020,11 @@ class AsyncLlamaExtractClient:
976
1020
  raise ApiError(status_code=_response.status_code, body=_response.text)
977
1021
  raise ApiError(status_code=_response.status_code, body=_response_json)
978
1022
 
979
- async def run_job(self, *, request: ExtractJobCreate) -> ExtractJob:
1023
+ async def run_job(self, *, from_ui: typing.Optional[bool] = None, request: ExtractJobCreate) -> ExtractJob:
980
1024
  """
981
1025
  Parameters:
1026
+ - from_ui: typing.Optional[bool].
1027
+
982
1028
  - request: ExtractJobCreate.
983
1029
  ---
984
1030
  from llama_cloud import (
@@ -995,7 +1041,6 @@ class AsyncLlamaExtractClient:
995
1041
  await client.llama_extract.run_job(
996
1042
  request=ExtractJobCreate(
997
1043
  extraction_agent_id="string",
998
- file_id="string",
999
1044
  config_override=ExtractConfig(
1000
1045
  extraction_target=ExtractTarget.PER_DOC,
1001
1046
  extraction_mode=ExtractMode.FAST,
@@ -1006,6 +1051,7 @@ class AsyncLlamaExtractClient:
1006
1051
  _response = await self._client_wrapper.httpx_client.request(
1007
1052
  "POST",
1008
1053
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
1054
+ params=remove_none_from_dict({"from_ui": from_ui}),
1009
1055
  json=jsonable_encoder(request),
1010
1056
  headers=self._client_wrapper.get_headers(),
1011
1057
  timeout=60,
@@ -1051,10 +1097,16 @@ class AsyncLlamaExtractClient:
1051
1097
  raise ApiError(status_code=_response.status_code, body=_response_json)
1052
1098
 
1053
1099
  async def run_job_test_user(
1054
- self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
1100
+ self,
1101
+ *,
1102
+ from_ui: typing.Optional[bool] = None,
1103
+ job_create: ExtractJobCreate,
1104
+ extract_settings: typing.Optional[LlamaExtractSettings] = OMIT,
1055
1105
  ) -> ExtractJob:
1056
1106
  """
1057
1107
  Parameters:
1108
+ - from_ui: typing.Optional[bool].
1109
+
1058
1110
  - job_create: ExtractJobCreate.
1059
1111
 
1060
1112
  - extract_settings: typing.Optional[LlamaExtractSettings].
@@ -1077,7 +1129,6 @@ class AsyncLlamaExtractClient:
1077
1129
  await client.llama_extract.run_job_test_user(
1078
1130
  job_create=ExtractJobCreate(
1079
1131
  extraction_agent_id="string",
1080
- file_id="string",
1081
1132
  config_override=ExtractConfig(
1082
1133
  extraction_target=ExtractTarget.PER_DOC,
1083
1134
  extraction_mode=ExtractMode.FAST,
@@ -1097,6 +1148,7 @@ class AsyncLlamaExtractClient:
1097
1148
  _response = await self._client_wrapper.httpx_client.request(
1098
1149
  "POST",
1099
1150
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/test"),
1151
+ params=remove_none_from_dict({"from_ui": from_ui}),
1100
1152
  json=jsonable_encoder(_request),
1101
1153
  headers=self._client_wrapper.get_headers(),
1102
1154
  timeout=60,
@@ -1111,9 +1163,13 @@ class AsyncLlamaExtractClient:
1111
1163
  raise ApiError(status_code=_response.status_code, body=_response.text)
1112
1164
  raise ApiError(status_code=_response.status_code, body=_response_json)
1113
1165
 
1114
- async def run_batch_jobs(self, *, request: ExtractJobCreateBatch) -> typing.List[ExtractJob]:
1166
+ async def run_batch_jobs(
1167
+ self, *, from_ui: typing.Optional[bool] = None, request: ExtractJobCreateBatch
1168
+ ) -> typing.List[ExtractJob]:
1115
1169
  """
1116
1170
  Parameters:
1171
+ - from_ui: typing.Optional[bool].
1172
+
1117
1173
  - request: ExtractJobCreateBatch.
1118
1174
  ---
1119
1175
  from llama_cloud import (
@@ -1141,6 +1197,7 @@ class AsyncLlamaExtractClient:
1141
1197
  _response = await self._client_wrapper.httpx_client.request(
1142
1198
  "POST",
1143
1199
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/batch"),
1200
+ params=remove_none_from_dict({"from_ui": from_ui}),
1144
1201
  json=jsonable_encoder(request),
1145
1202
  headers=self._client_wrapper.get_headers(),
1146
1203
  timeout=60,
@@ -1222,6 +1279,37 @@ class AsyncLlamaExtractClient:
1222
1279
  raise ApiError(status_code=_response.status_code, body=_response.text)
1223
1280
  raise ApiError(status_code=_response.status_code, body=_response_json)
1224
1281
 
1282
+ async def get_latest_run_from_ui(self, *, extraction_agent_id: str) -> typing.Optional[ExtractRun]:
1283
+ """
1284
+ Parameters:
1285
+ - extraction_agent_id: str.
1286
+ ---
1287
+ from llama_cloud.client import AsyncLlamaCloud
1288
+
1289
+ client = AsyncLlamaCloud(
1290
+ token="YOUR_TOKEN",
1291
+ )
1292
+ await client.llama_extract.get_latest_run_from_ui(
1293
+ extraction_agent_id="string",
1294
+ )
1295
+ """
1296
+ _response = await self._client_wrapper.httpx_client.request(
1297
+ "GET",
1298
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/runs/latest-from-ui"),
1299
+ params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
1300
+ headers=self._client_wrapper.get_headers(),
1301
+ timeout=60,
1302
+ )
1303
+ if 200 <= _response.status_code < 300:
1304
+ return pydantic.parse_obj_as(typing.Optional[ExtractRun], _response.json()) # type: ignore
1305
+ if _response.status_code == 422:
1306
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1307
+ try:
1308
+ _response_json = _response.json()
1309
+ except JSONDecodeError:
1310
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1311
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1312
+
1225
1313
  async def get_run_by_job_id(self, job_id: str) -> ExtractRun:
1226
1314
  """
1227
1315
  Parameters:
@@ -19,7 +19,6 @@ from ...types.parsing_job_markdown_result import ParsingJobMarkdownResult
19
19
  from ...types.parsing_job_structured_result import ParsingJobStructuredResult
20
20
  from ...types.parsing_job_text_result import ParsingJobTextResult
21
21
  from ...types.parsing_mode import ParsingMode
22
- from ...types.parsing_usage import ParsingUsage
23
22
  from ...types.presigned_url import PresignedUrl
24
23
 
25
24
  try:
@@ -212,6 +211,7 @@ class ParsingClient:
212
211
  bbox_left: float,
213
212
  bbox_right: float,
214
213
  bbox_top: float,
214
+ compact_markdown_table: bool,
215
215
  disable_ocr: bool,
216
216
  disable_reconstruction: bool,
217
217
  disable_image_extraction: bool,
@@ -248,6 +248,7 @@ class ParsingClient:
248
248
  vendor_multimodal_model_name: str,
249
249
  model: str,
250
250
  webhook_url: str,
251
+ preset: str,
251
252
  parse_mode: typing.Optional[ParsingMode] = OMIT,
252
253
  system_prompt: str,
253
254
  system_prompt_append: str,
@@ -313,6 +314,8 @@ class ParsingClient:
313
314
 
314
315
  - bbox_top: float.
315
316
 
317
+ - compact_markdown_table: bool.
318
+
316
319
  - disable_ocr: bool.
317
320
 
318
321
  - disable_reconstruction: bool.
@@ -385,6 +388,8 @@ class ParsingClient:
385
388
 
386
389
  - webhook_url: str.
387
390
 
391
+ - preset: str.
392
+
388
393
  - parse_mode: typing.Optional[ParsingMode].
389
394
 
390
395
  - system_prompt: str.
@@ -449,6 +454,7 @@ class ParsingClient:
449
454
  "bbox_left": bbox_left,
450
455
  "bbox_right": bbox_right,
451
456
  "bbox_top": bbox_top,
457
+ "compact_markdown_table": compact_markdown_table,
452
458
  "disable_ocr": disable_ocr,
453
459
  "disable_reconstruction": disable_reconstruction,
454
460
  "disable_image_extraction": disable_image_extraction,
@@ -484,6 +490,7 @@ class ParsingClient:
484
490
  "vendor_multimodal_model_name": vendor_multimodal_model_name,
485
491
  "model": model,
486
492
  "webhook_url": webhook_url,
493
+ "preset": preset,
487
494
  "system_prompt": system_prompt,
488
495
  "system_prompt_append": system_prompt_append,
489
496
  "user_prompt": user_prompt,
@@ -532,38 +539,6 @@ class ParsingClient:
532
539
  raise ApiError(status_code=_response.status_code, body=_response.text)
533
540
  raise ApiError(status_code=_response.status_code, body=_response_json)
534
541
 
535
- def usage(self, *, organization_id: typing.Optional[str] = None) -> ParsingUsage:
536
- """
537
- DEPRECATED: use either /organizations/{organization_id}/usage or /projects/{project_id}/usage instead
538
- Get parsing usage for user
539
-
540
- Parameters:
541
- - organization_id: typing.Optional[str].
542
- ---
543
- from llama_cloud.client import LlamaCloud
544
-
545
- client = LlamaCloud(
546
- token="YOUR_TOKEN",
547
- )
548
- client.parsing.usage()
549
- """
550
- _response = self._client_wrapper.httpx_client.request(
551
- "GET",
552
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/usage"),
553
- params=remove_none_from_dict({"organization_id": organization_id}),
554
- headers=self._client_wrapper.get_headers(),
555
- timeout=60,
556
- )
557
- if 200 <= _response.status_code < 300:
558
- return pydantic.parse_obj_as(ParsingUsage, _response.json()) # type: ignore
559
- if _response.status_code == 422:
560
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
561
- try:
562
- _response_json = _response.json()
563
- except JSONDecodeError:
564
- raise ApiError(status_code=_response.status_code, body=_response.text)
565
- raise ApiError(status_code=_response.status_code, body=_response_json)
566
-
567
542
  def get_job(self, job_id: str) -> ParsingJob:
568
543
  """
569
544
  Get a job by id
@@ -1188,6 +1163,7 @@ class AsyncParsingClient:
1188
1163
  bbox_left: float,
1189
1164
  bbox_right: float,
1190
1165
  bbox_top: float,
1166
+ compact_markdown_table: bool,
1191
1167
  disable_ocr: bool,
1192
1168
  disable_reconstruction: bool,
1193
1169
  disable_image_extraction: bool,
@@ -1224,6 +1200,7 @@ class AsyncParsingClient:
1224
1200
  vendor_multimodal_model_name: str,
1225
1201
  model: str,
1226
1202
  webhook_url: str,
1203
+ preset: str,
1227
1204
  parse_mode: typing.Optional[ParsingMode] = OMIT,
1228
1205
  system_prompt: str,
1229
1206
  system_prompt_append: str,
@@ -1289,6 +1266,8 @@ class AsyncParsingClient:
1289
1266
 
1290
1267
  - bbox_top: float.
1291
1268
 
1269
+ - compact_markdown_table: bool.
1270
+
1292
1271
  - disable_ocr: bool.
1293
1272
 
1294
1273
  - disable_reconstruction: bool.
@@ -1361,6 +1340,8 @@ class AsyncParsingClient:
1361
1340
 
1362
1341
  - webhook_url: str.
1363
1342
 
1343
+ - preset: str.
1344
+
1364
1345
  - parse_mode: typing.Optional[ParsingMode].
1365
1346
 
1366
1347
  - system_prompt: str.
@@ -1425,6 +1406,7 @@ class AsyncParsingClient:
1425
1406
  "bbox_left": bbox_left,
1426
1407
  "bbox_right": bbox_right,
1427
1408
  "bbox_top": bbox_top,
1409
+ "compact_markdown_table": compact_markdown_table,
1428
1410
  "disable_ocr": disable_ocr,
1429
1411
  "disable_reconstruction": disable_reconstruction,
1430
1412
  "disable_image_extraction": disable_image_extraction,
@@ -1460,6 +1442,7 @@ class AsyncParsingClient:
1460
1442
  "vendor_multimodal_model_name": vendor_multimodal_model_name,
1461
1443
  "model": model,
1462
1444
  "webhook_url": webhook_url,
1445
+ "preset": preset,
1463
1446
  "system_prompt": system_prompt,
1464
1447
  "system_prompt_append": system_prompt_append,
1465
1448
  "user_prompt": user_prompt,
@@ -1508,38 +1491,6 @@ class AsyncParsingClient:
1508
1491
  raise ApiError(status_code=_response.status_code, body=_response.text)
1509
1492
  raise ApiError(status_code=_response.status_code, body=_response_json)
1510
1493
 
1511
- async def usage(self, *, organization_id: typing.Optional[str] = None) -> ParsingUsage:
1512
- """
1513
- DEPRECATED: use either /organizations/{organization_id}/usage or /projects/{project_id}/usage instead
1514
- Get parsing usage for user
1515
-
1516
- Parameters:
1517
- - organization_id: typing.Optional[str].
1518
- ---
1519
- from llama_cloud.client import AsyncLlamaCloud
1520
-
1521
- client = AsyncLlamaCloud(
1522
- token="YOUR_TOKEN",
1523
- )
1524
- await client.parsing.usage()
1525
- """
1526
- _response = await self._client_wrapper.httpx_client.request(
1527
- "GET",
1528
- urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/usage"),
1529
- params=remove_none_from_dict({"organization_id": organization_id}),
1530
- headers=self._client_wrapper.get_headers(),
1531
- timeout=60,
1532
- )
1533
- if 200 <= _response.status_code < 300:
1534
- return pydantic.parse_obj_as(ParsingUsage, _response.json()) # type: ignore
1535
- if _response.status_code == 422:
1536
- raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1537
- try:
1538
- _response_json = _response.json()
1539
- except JSONDecodeError:
1540
- raise ApiError(status_code=_response.status_code, body=_response.text)
1541
- raise ApiError(status_code=_response.status_code, body=_response_json)
1542
-
1543
1494
  async def get_job(self, job_id: str) -> ParsingJob:
1544
1495
  """
1545
1496
  Get a job by id
@@ -465,7 +465,7 @@ class PipelinesClient:
465
465
  raise ApiError(status_code=_response.status_code, body=_response.text)
466
466
  raise ApiError(status_code=_response.status_code, body=_response_json)
467
467
 
468
- def add_files_to_pipeline(
468
+ def add_files_to_pipeline_api(
469
469
  self, pipeline_id: str, *, request: typing.List[PipelineFileCreate]
470
470
  ) -> typing.List[PipelineFile]:
471
471
  """
@@ -481,7 +481,7 @@ class PipelinesClient:
481
481
  client = LlamaCloud(
482
482
  token="YOUR_TOKEN",
483
483
  )
484
- client.pipelines.add_files_to_pipeline(
484
+ client.pipelines.add_files_to_pipeline_api(
485
485
  pipeline_id="string",
486
486
  request=[],
487
487
  )
@@ -1218,7 +1218,7 @@ class PipelinesClient:
1218
1218
  retrieval_mode=RetrievalMode.CHUNKS,
1219
1219
  ),
1220
1220
  llm_parameters=LlmParameters(
1221
- model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
1221
+ model_name=SupportedLlmModelNames.GPT_4_O,
1222
1222
  ),
1223
1223
  ),
1224
1224
  )
@@ -2020,7 +2020,7 @@ class AsyncPipelinesClient:
2020
2020
  raise ApiError(status_code=_response.status_code, body=_response.text)
2021
2021
  raise ApiError(status_code=_response.status_code, body=_response_json)
2022
2022
 
2023
- async def add_files_to_pipeline(
2023
+ async def add_files_to_pipeline_api(
2024
2024
  self, pipeline_id: str, *, request: typing.List[PipelineFileCreate]
2025
2025
  ) -> typing.List[PipelineFile]:
2026
2026
  """
@@ -2036,7 +2036,7 @@ class AsyncPipelinesClient:
2036
2036
  client = AsyncLlamaCloud(
2037
2037
  token="YOUR_TOKEN",
2038
2038
  )
2039
- await client.pipelines.add_files_to_pipeline(
2039
+ await client.pipelines.add_files_to_pipeline_api(
2040
2040
  pipeline_id="string",
2041
2041
  request=[],
2042
2042
  )
@@ -2775,7 +2775,7 @@ class AsyncPipelinesClient:
2775
2775
  retrieval_mode=RetrievalMode.CHUNKS,
2776
2776
  ),
2777
2777
  llm_parameters=LlmParameters(
2778
- model_name=SupportedLlmModelNames.GPT_3_5_TURBO,
2778
+ model_name=SupportedLlmModelNames.GPT_4_O,
2779
2779
  ),
2780
2780
  ),
2781
2781
  )