llama-cloud 0.1.16__py3-none-any.whl → 0.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (29) hide show
  1. llama_cloud/__init__.py +10 -4
  2. llama_cloud/environment.py +1 -1
  3. llama_cloud/resources/chat_apps/client.py +20 -0
  4. llama_cloud/resources/llama_extract/client.py +98 -6
  5. llama_cloud/resources/parsing/client.py +8 -0
  6. llama_cloud/resources/pipelines/client.py +4 -4
  7. llama_cloud/resources/retrievers/client.py +41 -8
  8. llama_cloud/types/__init__.py +10 -4
  9. llama_cloud/types/base_plan.py +3 -0
  10. llama_cloud/types/base_plan_name.py +12 -0
  11. llama_cloud/types/cloud_confluence_data_source.py +1 -0
  12. llama_cloud/types/extract_mode.py +8 -4
  13. llama_cloud/types/extract_run.py +1 -0
  14. llama_cloud/types/llama_extract_settings.py +1 -0
  15. llama_cloud/types/llama_parse_parameters.py +1 -0
  16. llama_cloud/types/parsing_mode.py +12 -0
  17. llama_cloud/types/pipeline_file.py +2 -1
  18. llama_cloud/types/pipeline_file_status.py +33 -0
  19. llama_cloud/types/plan_limits.py +1 -0
  20. llama_cloud/types/preset_composite_retrieval_params.py +4 -2
  21. llama_cloud/types/re_rank_config.py +35 -0
  22. llama_cloud/types/re_ranker_type.py +41 -0
  23. llama_cloud/types/usage_and_plan.py +2 -2
  24. llama_cloud/types/{usage.py → usage_response.py} +3 -3
  25. llama_cloud/types/{usage_active_alerts_item.py → usage_response_active_alerts_item.py} +8 -4
  26. {llama_cloud-0.1.16.dist-info → llama_cloud-0.1.17.dist-info}/METADATA +1 -1
  27. {llama_cloud-0.1.16.dist-info → llama_cloud-0.1.17.dist-info}/RECORD +29 -26
  28. {llama_cloud-0.1.16.dist-info → llama_cloud-0.1.17.dist-info}/LICENSE +0 -0
  29. {llama_cloud-0.1.16.dist-info → llama_cloud-0.1.17.dist-info}/WHEEL +0 -0
llama_cloud/__init__.py CHANGED
@@ -234,6 +234,7 @@ from .types import (
234
234
  PipelineFileCustomMetadataValue,
235
235
  PipelineFilePermissionInfoValue,
236
236
  PipelineFileResourceInfoValue,
237
+ PipelineFileStatus,
237
238
  PipelineTransformConfig,
238
239
  PipelineTransformConfig_Advanced,
239
240
  PipelineTransformConfig_Auto,
@@ -250,6 +251,8 @@ from .types import (
250
251
  ProjectCreate,
251
252
  PromptConf,
252
253
  PydanticProgramMode,
254
+ ReRankConfig,
255
+ ReRankerType,
253
256
  RecurringCreditGrant,
254
257
  RelatedNodeInfo,
255
258
  RelatedNodeInfoNodeType,
@@ -293,10 +296,10 @@ from .types import (
293
296
  TokenChunkingConfig,
294
297
  TokenTextSplitter,
295
298
  TransformationCategoryNames,
296
- Usage,
297
- UsageActiveAlertsItem,
298
299
  UsageAndPlan,
299
300
  UsageMetricResponse,
301
+ UsageResponse,
302
+ UsageResponseActiveAlertsItem,
300
303
  UserJobRecord,
301
304
  UserOrganization,
302
305
  UserOrganizationCreate,
@@ -602,6 +605,7 @@ __all__ = [
602
605
  "PipelineFileCustomMetadataValue",
603
606
  "PipelineFilePermissionInfoValue",
604
607
  "PipelineFileResourceInfoValue",
608
+ "PipelineFileStatus",
605
609
  "PipelineFileUpdateCustomMetadataValue",
606
610
  "PipelineTransformConfig",
607
611
  "PipelineTransformConfig_Advanced",
@@ -628,6 +632,8 @@ __all__ = [
628
632
  "ProjectCreate",
629
633
  "PromptConf",
630
634
  "PydanticProgramMode",
635
+ "ReRankConfig",
636
+ "ReRankerType",
631
637
  "RecurringCreditGrant",
632
638
  "RelatedNodeInfo",
633
639
  "RelatedNodeInfoNodeType",
@@ -673,10 +679,10 @@ __all__ = [
673
679
  "TransformationCategoryNames",
674
680
  "UnprocessableEntityError",
675
681
  "UpdateReportPlanApiV1ReportsReportIdPlanPatchRequestAction",
676
- "Usage",
677
- "UsageActiveAlertsItem",
678
682
  "UsageAndPlan",
679
683
  "UsageMetricResponse",
684
+ "UsageResponse",
685
+ "UsageResponseActiveAlertsItem",
680
686
  "UserJobRecord",
681
687
  "UserOrganization",
682
688
  "UserOrganizationCreate",
@@ -4,4 +4,4 @@ import enum
4
4
 
5
5
 
6
6
  class LlamaCloudEnvironment(enum.Enum):
7
- DEFAULT = "https://api.cloud.llamaindex.ai/"
7
+ DEFAULT = "https://api.cloud.llamaindex.ai"
@@ -95,6 +95,8 @@ class ChatAppsClient:
95
95
  CompositeRetrievalMode,
96
96
  LlmParameters,
97
97
  PresetCompositeRetrievalParams,
98
+ ReRankConfig,
99
+ ReRankerType,
98
100
  SupportedLlmModelNames,
99
101
  )
100
102
  from llama_cloud.client import LlamaCloud
@@ -110,6 +112,9 @@ class ChatAppsClient:
110
112
  ),
111
113
  retrieval_config=PresetCompositeRetrievalParams(
112
114
  mode=CompositeRetrievalMode.ROUTING,
115
+ rerank_config=ReRankConfig(
116
+ type=ReRankerType.SYSTEM_DEFAULT,
117
+ ),
113
118
  ),
114
119
  )
115
120
  """
@@ -200,6 +205,8 @@ class ChatAppsClient:
200
205
  CompositeRetrievalMode,
201
206
  LlmParameters,
202
207
  PresetCompositeRetrievalParams,
208
+ ReRankConfig,
209
+ ReRankerType,
203
210
  SupportedLlmModelNames,
204
211
  )
205
212
  from llama_cloud.client import LlamaCloud
@@ -214,6 +221,9 @@ class ChatAppsClient:
214
221
  ),
215
222
  retrieval_config=PresetCompositeRetrievalParams(
216
223
  mode=CompositeRetrievalMode.ROUTING,
224
+ rerank_config=ReRankConfig(
225
+ type=ReRankerType.SYSTEM_DEFAULT,
226
+ ),
217
227
  ),
218
228
  )
219
229
  """
@@ -378,6 +388,8 @@ class AsyncChatAppsClient:
378
388
  CompositeRetrievalMode,
379
389
  LlmParameters,
380
390
  PresetCompositeRetrievalParams,
391
+ ReRankConfig,
392
+ ReRankerType,
381
393
  SupportedLlmModelNames,
382
394
  )
383
395
  from llama_cloud.client import AsyncLlamaCloud
@@ -393,6 +405,9 @@ class AsyncChatAppsClient:
393
405
  ),
394
406
  retrieval_config=PresetCompositeRetrievalParams(
395
407
  mode=CompositeRetrievalMode.ROUTING,
408
+ rerank_config=ReRankConfig(
409
+ type=ReRankerType.SYSTEM_DEFAULT,
410
+ ),
396
411
  ),
397
412
  )
398
413
  """
@@ -483,6 +498,8 @@ class AsyncChatAppsClient:
483
498
  CompositeRetrievalMode,
484
499
  LlmParameters,
485
500
  PresetCompositeRetrievalParams,
501
+ ReRankConfig,
502
+ ReRankerType,
486
503
  SupportedLlmModelNames,
487
504
  )
488
505
  from llama_cloud.client import AsyncLlamaCloud
@@ -497,6 +514,9 @@ class AsyncChatAppsClient:
497
514
  ),
498
515
  retrieval_config=PresetCompositeRetrievalParams(
499
516
  mode=CompositeRetrievalMode.ROUTING,
517
+ rerank_config=ReRankConfig(
518
+ type=ReRankerType.SYSTEM_DEFAULT,
519
+ ),
500
520
  ),
501
521
  )
502
522
  """
@@ -336,9 +336,11 @@ class LlamaExtractClient:
336
336
  raise ApiError(status_code=_response.status_code, body=_response.text)
337
337
  raise ApiError(status_code=_response.status_code, body=_response_json)
338
338
 
339
- def run_job(self, *, request: ExtractJobCreate) -> ExtractJob:
339
+ def run_job(self, *, from_ui: typing.Optional[bool] = None, request: ExtractJobCreate) -> ExtractJob:
340
340
  """
341
341
  Parameters:
342
+ - from_ui: typing.Optional[bool].
343
+
342
344
  - request: ExtractJobCreate.
343
345
  ---
344
346
  from llama_cloud import (
@@ -366,6 +368,7 @@ class LlamaExtractClient:
366
368
  _response = self._client_wrapper.httpx_client.request(
367
369
  "POST",
368
370
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
371
+ params=remove_none_from_dict({"from_ui": from_ui}),
369
372
  json=jsonable_encoder(request),
370
373
  headers=self._client_wrapper.get_headers(),
371
374
  timeout=60,
@@ -411,10 +414,16 @@ class LlamaExtractClient:
411
414
  raise ApiError(status_code=_response.status_code, body=_response_json)
412
415
 
413
416
  def run_job_test_user(
414
- self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
417
+ self,
418
+ *,
419
+ from_ui: typing.Optional[bool] = None,
420
+ job_create: ExtractJobCreate,
421
+ extract_settings: typing.Optional[LlamaExtractSettings] = OMIT,
415
422
  ) -> ExtractJob:
416
423
  """
417
424
  Parameters:
425
+ - from_ui: typing.Optional[bool].
426
+
418
427
  - job_create: ExtractJobCreate.
419
428
 
420
429
  - extract_settings: typing.Optional[LlamaExtractSettings].
@@ -457,6 +466,7 @@ class LlamaExtractClient:
457
466
  _response = self._client_wrapper.httpx_client.request(
458
467
  "POST",
459
468
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/test"),
469
+ params=remove_none_from_dict({"from_ui": from_ui}),
460
470
  json=jsonable_encoder(_request),
461
471
  headers=self._client_wrapper.get_headers(),
462
472
  timeout=60,
@@ -471,9 +481,13 @@ class LlamaExtractClient:
471
481
  raise ApiError(status_code=_response.status_code, body=_response.text)
472
482
  raise ApiError(status_code=_response.status_code, body=_response_json)
473
483
 
474
- def run_batch_jobs(self, *, request: ExtractJobCreateBatch) -> typing.List[ExtractJob]:
484
+ def run_batch_jobs(
485
+ self, *, from_ui: typing.Optional[bool] = None, request: ExtractJobCreateBatch
486
+ ) -> typing.List[ExtractJob]:
475
487
  """
476
488
  Parameters:
489
+ - from_ui: typing.Optional[bool].
490
+
477
491
  - request: ExtractJobCreateBatch.
478
492
  ---
479
493
  from llama_cloud import (
@@ -501,6 +515,7 @@ class LlamaExtractClient:
501
515
  _response = self._client_wrapper.httpx_client.request(
502
516
  "POST",
503
517
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/batch"),
518
+ params=remove_none_from_dict({"from_ui": from_ui}),
504
519
  json=jsonable_encoder(request),
505
520
  headers=self._client_wrapper.get_headers(),
506
521
  timeout=60,
@@ -582,6 +597,37 @@ class LlamaExtractClient:
582
597
  raise ApiError(status_code=_response.status_code, body=_response.text)
583
598
  raise ApiError(status_code=_response.status_code, body=_response_json)
584
599
 
600
+ def get_latest_run_from_ui(self, *, extraction_agent_id: str) -> typing.Optional[ExtractRun]:
601
+ """
602
+ Parameters:
603
+ - extraction_agent_id: str.
604
+ ---
605
+ from llama_cloud.client import LlamaCloud
606
+
607
+ client = LlamaCloud(
608
+ token="YOUR_TOKEN",
609
+ )
610
+ client.llama_extract.get_latest_run_from_ui(
611
+ extraction_agent_id="string",
612
+ )
613
+ """
614
+ _response = self._client_wrapper.httpx_client.request(
615
+ "GET",
616
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/runs/latest-from-ui"),
617
+ params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
618
+ headers=self._client_wrapper.get_headers(),
619
+ timeout=60,
620
+ )
621
+ if 200 <= _response.status_code < 300:
622
+ return pydantic.parse_obj_as(typing.Optional[ExtractRun], _response.json()) # type: ignore
623
+ if _response.status_code == 422:
624
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
625
+ try:
626
+ _response_json = _response.json()
627
+ except JSONDecodeError:
628
+ raise ApiError(status_code=_response.status_code, body=_response.text)
629
+ raise ApiError(status_code=_response.status_code, body=_response_json)
630
+
585
631
  def get_run_by_job_id(self, job_id: str) -> ExtractRun:
586
632
  """
587
633
  Parameters:
@@ -976,9 +1022,11 @@ class AsyncLlamaExtractClient:
976
1022
  raise ApiError(status_code=_response.status_code, body=_response.text)
977
1023
  raise ApiError(status_code=_response.status_code, body=_response_json)
978
1024
 
979
- async def run_job(self, *, request: ExtractJobCreate) -> ExtractJob:
1025
+ async def run_job(self, *, from_ui: typing.Optional[bool] = None, request: ExtractJobCreate) -> ExtractJob:
980
1026
  """
981
1027
  Parameters:
1028
+ - from_ui: typing.Optional[bool].
1029
+
982
1030
  - request: ExtractJobCreate.
983
1031
  ---
984
1032
  from llama_cloud import (
@@ -1006,6 +1054,7 @@ class AsyncLlamaExtractClient:
1006
1054
  _response = await self._client_wrapper.httpx_client.request(
1007
1055
  "POST",
1008
1056
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs"),
1057
+ params=remove_none_from_dict({"from_ui": from_ui}),
1009
1058
  json=jsonable_encoder(request),
1010
1059
  headers=self._client_wrapper.get_headers(),
1011
1060
  timeout=60,
@@ -1051,10 +1100,16 @@ class AsyncLlamaExtractClient:
1051
1100
  raise ApiError(status_code=_response.status_code, body=_response_json)
1052
1101
 
1053
1102
  async def run_job_test_user(
1054
- self, *, job_create: ExtractJobCreate, extract_settings: typing.Optional[LlamaExtractSettings] = OMIT
1103
+ self,
1104
+ *,
1105
+ from_ui: typing.Optional[bool] = None,
1106
+ job_create: ExtractJobCreate,
1107
+ extract_settings: typing.Optional[LlamaExtractSettings] = OMIT,
1055
1108
  ) -> ExtractJob:
1056
1109
  """
1057
1110
  Parameters:
1111
+ - from_ui: typing.Optional[bool].
1112
+
1058
1113
  - job_create: ExtractJobCreate.
1059
1114
 
1060
1115
  - extract_settings: typing.Optional[LlamaExtractSettings].
@@ -1097,6 +1152,7 @@ class AsyncLlamaExtractClient:
1097
1152
  _response = await self._client_wrapper.httpx_client.request(
1098
1153
  "POST",
1099
1154
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/test"),
1155
+ params=remove_none_from_dict({"from_ui": from_ui}),
1100
1156
  json=jsonable_encoder(_request),
1101
1157
  headers=self._client_wrapper.get_headers(),
1102
1158
  timeout=60,
@@ -1111,9 +1167,13 @@ class AsyncLlamaExtractClient:
1111
1167
  raise ApiError(status_code=_response.status_code, body=_response.text)
1112
1168
  raise ApiError(status_code=_response.status_code, body=_response_json)
1113
1169
 
1114
- async def run_batch_jobs(self, *, request: ExtractJobCreateBatch) -> typing.List[ExtractJob]:
1170
+ async def run_batch_jobs(
1171
+ self, *, from_ui: typing.Optional[bool] = None, request: ExtractJobCreateBatch
1172
+ ) -> typing.List[ExtractJob]:
1115
1173
  """
1116
1174
  Parameters:
1175
+ - from_ui: typing.Optional[bool].
1176
+
1117
1177
  - request: ExtractJobCreateBatch.
1118
1178
  ---
1119
1179
  from llama_cloud import (
@@ -1141,6 +1201,7 @@ class AsyncLlamaExtractClient:
1141
1201
  _response = await self._client_wrapper.httpx_client.request(
1142
1202
  "POST",
1143
1203
  urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/jobs/batch"),
1204
+ params=remove_none_from_dict({"from_ui": from_ui}),
1144
1205
  json=jsonable_encoder(request),
1145
1206
  headers=self._client_wrapper.get_headers(),
1146
1207
  timeout=60,
@@ -1222,6 +1283,37 @@ class AsyncLlamaExtractClient:
1222
1283
  raise ApiError(status_code=_response.status_code, body=_response.text)
1223
1284
  raise ApiError(status_code=_response.status_code, body=_response_json)
1224
1285
 
1286
+ async def get_latest_run_from_ui(self, *, extraction_agent_id: str) -> typing.Optional[ExtractRun]:
1287
+ """
1288
+ Parameters:
1289
+ - extraction_agent_id: str.
1290
+ ---
1291
+ from llama_cloud.client import AsyncLlamaCloud
1292
+
1293
+ client = AsyncLlamaCloud(
1294
+ token="YOUR_TOKEN",
1295
+ )
1296
+ await client.llama_extract.get_latest_run_from_ui(
1297
+ extraction_agent_id="string",
1298
+ )
1299
+ """
1300
+ _response = await self._client_wrapper.httpx_client.request(
1301
+ "GET",
1302
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/runs/latest-from-ui"),
1303
+ params=remove_none_from_dict({"extraction_agent_id": extraction_agent_id}),
1304
+ headers=self._client_wrapper.get_headers(),
1305
+ timeout=60,
1306
+ )
1307
+ if 200 <= _response.status_code < 300:
1308
+ return pydantic.parse_obj_as(typing.Optional[ExtractRun], _response.json()) # type: ignore
1309
+ if _response.status_code == 422:
1310
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
1311
+ try:
1312
+ _response_json = _response.json()
1313
+ except JSONDecodeError:
1314
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1315
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1316
+
1225
1317
  async def get_run_by_job_id(self, job_id: str) -> ExtractRun:
1226
1318
  """
1227
1319
  Parameters:
@@ -248,6 +248,7 @@ class ParsingClient:
248
248
  vendor_multimodal_model_name: str,
249
249
  model: str,
250
250
  webhook_url: str,
251
+ preset: str,
251
252
  parse_mode: typing.Optional[ParsingMode] = OMIT,
252
253
  system_prompt: str,
253
254
  system_prompt_append: str,
@@ -385,6 +386,8 @@ class ParsingClient:
385
386
 
386
387
  - webhook_url: str.
387
388
 
389
+ - preset: str.
390
+
388
391
  - parse_mode: typing.Optional[ParsingMode].
389
392
 
390
393
  - system_prompt: str.
@@ -484,6 +487,7 @@ class ParsingClient:
484
487
  "vendor_multimodal_model_name": vendor_multimodal_model_name,
485
488
  "model": model,
486
489
  "webhook_url": webhook_url,
490
+ "preset": preset,
487
491
  "system_prompt": system_prompt,
488
492
  "system_prompt_append": system_prompt_append,
489
493
  "user_prompt": user_prompt,
@@ -1224,6 +1228,7 @@ class AsyncParsingClient:
1224
1228
  vendor_multimodal_model_name: str,
1225
1229
  model: str,
1226
1230
  webhook_url: str,
1231
+ preset: str,
1227
1232
  parse_mode: typing.Optional[ParsingMode] = OMIT,
1228
1233
  system_prompt: str,
1229
1234
  system_prompt_append: str,
@@ -1361,6 +1366,8 @@ class AsyncParsingClient:
1361
1366
 
1362
1367
  - webhook_url: str.
1363
1368
 
1369
+ - preset: str.
1370
+
1364
1371
  - parse_mode: typing.Optional[ParsingMode].
1365
1372
 
1366
1373
  - system_prompt: str.
@@ -1460,6 +1467,7 @@ class AsyncParsingClient:
1460
1467
  "vendor_multimodal_model_name": vendor_multimodal_model_name,
1461
1468
  "model": model,
1462
1469
  "webhook_url": webhook_url,
1470
+ "preset": preset,
1463
1471
  "system_prompt": system_prompt,
1464
1472
  "system_prompt_append": system_prompt_append,
1465
1473
  "user_prompt": user_prompt,
@@ -465,7 +465,7 @@ class PipelinesClient:
465
465
  raise ApiError(status_code=_response.status_code, body=_response.text)
466
466
  raise ApiError(status_code=_response.status_code, body=_response_json)
467
467
 
468
- def add_files_to_pipeline(
468
+ def add_files_to_pipeline_api(
469
469
  self, pipeline_id: str, *, request: typing.List[PipelineFileCreate]
470
470
  ) -> typing.List[PipelineFile]:
471
471
  """
@@ -481,7 +481,7 @@ class PipelinesClient:
481
481
  client = LlamaCloud(
482
482
  token="YOUR_TOKEN",
483
483
  )
484
- client.pipelines.add_files_to_pipeline(
484
+ client.pipelines.add_files_to_pipeline_api(
485
485
  pipeline_id="string",
486
486
  request=[],
487
487
  )
@@ -2020,7 +2020,7 @@ class AsyncPipelinesClient:
2020
2020
  raise ApiError(status_code=_response.status_code, body=_response.text)
2021
2021
  raise ApiError(status_code=_response.status_code, body=_response_json)
2022
2022
 
2023
- async def add_files_to_pipeline(
2023
+ async def add_files_to_pipeline_api(
2024
2024
  self, pipeline_id: str, *, request: typing.List[PipelineFileCreate]
2025
2025
  ) -> typing.List[PipelineFile]:
2026
2026
  """
@@ -2036,7 +2036,7 @@ class AsyncPipelinesClient:
2036
2036
  client = AsyncLlamaCloud(
2037
2037
  token="YOUR_TOKEN",
2038
2038
  )
2039
- await client.pipelines.add_files_to_pipeline(
2039
+ await client.pipelines.add_files_to_pipeline_api(
2040
2040
  pipeline_id="string",
2041
2041
  request=[],
2042
2042
  )
@@ -12,6 +12,7 @@ from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
12
  from ...types.composite_retrieval_mode import CompositeRetrievalMode
13
13
  from ...types.composite_retrieval_result import CompositeRetrievalResult
14
14
  from ...types.http_validation_error import HttpValidationError
15
+ from ...types.re_rank_config import ReRankConfig
15
16
  from ...types.retriever import Retriever
16
17
  from ...types.retriever_create import RetrieverCreate
17
18
  from ...types.retriever_pipeline import RetrieverPipeline
@@ -296,6 +297,7 @@ class RetrieversClient:
296
297
  *,
297
298
  mode: typing.Optional[CompositeRetrievalMode] = OMIT,
298
299
  rerank_top_n: typing.Optional[int] = OMIT,
300
+ rerank_config: typing.Optional[ReRankConfig] = OMIT,
299
301
  query: str,
300
302
  ) -> CompositeRetrievalResult:
301
303
  """
@@ -306,11 +308,13 @@ class RetrieversClient:
306
308
 
307
309
  - mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
308
310
 
309
- - rerank_top_n: typing.Optional[int]. The number of nodes to retrieve after reranking over retrieved nodes from all retrieval tools.
311
+ - rerank_top_n: typing.Optional[int].
312
+
313
+ - rerank_config: typing.Optional[ReRankConfig]. The rerank configuration for composite retrieval.
310
314
 
311
315
  - query: str. The query to retrieve against.
312
316
  ---
313
- from llama_cloud import CompositeRetrievalMode
317
+ from llama_cloud import CompositeRetrievalMode, ReRankConfig, ReRankerType
314
318
  from llama_cloud.client import LlamaCloud
315
319
 
316
320
  client = LlamaCloud(
@@ -319,6 +323,9 @@ class RetrieversClient:
319
323
  client.retrievers.retrieve(
320
324
  retriever_id="string",
321
325
  mode=CompositeRetrievalMode.ROUTING,
326
+ rerank_config=ReRankConfig(
327
+ type=ReRankerType.SYSTEM_DEFAULT,
328
+ ),
322
329
  query="string",
323
330
  )
324
331
  """
@@ -327,6 +334,8 @@ class RetrieversClient:
327
334
  _request["mode"] = mode
328
335
  if rerank_top_n is not OMIT:
329
336
  _request["rerank_top_n"] = rerank_top_n
337
+ if rerank_config is not OMIT:
338
+ _request["rerank_config"] = rerank_config
330
339
  _response = self._client_wrapper.httpx_client.request(
331
340
  "POST",
332
341
  urllib.parse.urljoin(
@@ -353,6 +362,7 @@ class RetrieversClient:
353
362
  organization_id: typing.Optional[str] = None,
354
363
  mode: typing.Optional[CompositeRetrievalMode] = OMIT,
355
364
  rerank_top_n: typing.Optional[int] = OMIT,
365
+ rerank_config: typing.Optional[ReRankConfig] = OMIT,
356
366
  query: str,
357
367
  pipelines: typing.Optional[typing.List[RetrieverPipeline]] = OMIT,
358
368
  ) -> CompositeRetrievalResult:
@@ -366,13 +376,15 @@ class RetrieversClient:
366
376
 
367
377
  - mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
368
378
 
369
- - rerank_top_n: typing.Optional[int]. The number of nodes to retrieve after reranking over retrieved nodes from all retrieval tools.
379
+ - rerank_top_n: typing.Optional[int].
380
+
381
+ - rerank_config: typing.Optional[ReRankConfig]. The rerank configuration for composite retrieval.
370
382
 
371
383
  - query: str. The query to retrieve against.
372
384
 
373
385
  - pipelines: typing.Optional[typing.List[RetrieverPipeline]]. The pipelines to use for retrieval.
374
386
  ---
375
- from llama_cloud import CompositeRetrievalMode
387
+ from llama_cloud import CompositeRetrievalMode, ReRankConfig, ReRankerType
376
388
  from llama_cloud.client import LlamaCloud
377
389
 
378
390
  client = LlamaCloud(
@@ -380,6 +392,9 @@ class RetrieversClient:
380
392
  )
381
393
  client.retrievers.direct_retrieve(
382
394
  mode=CompositeRetrievalMode.ROUTING,
395
+ rerank_config=ReRankConfig(
396
+ type=ReRankerType.SYSTEM_DEFAULT,
397
+ ),
383
398
  query="string",
384
399
  )
385
400
  """
@@ -388,6 +403,8 @@ class RetrieversClient:
388
403
  _request["mode"] = mode
389
404
  if rerank_top_n is not OMIT:
390
405
  _request["rerank_top_n"] = rerank_top_n
406
+ if rerank_config is not OMIT:
407
+ _request["rerank_config"] = rerank_config
391
408
  if pipelines is not OMIT:
392
409
  _request["pipelines"] = pipelines
393
410
  _response = self._client_wrapper.httpx_client.request(
@@ -677,6 +694,7 @@ class AsyncRetrieversClient:
677
694
  *,
678
695
  mode: typing.Optional[CompositeRetrievalMode] = OMIT,
679
696
  rerank_top_n: typing.Optional[int] = OMIT,
697
+ rerank_config: typing.Optional[ReRankConfig] = OMIT,
680
698
  query: str,
681
699
  ) -> CompositeRetrievalResult:
682
700
  """
@@ -687,11 +705,13 @@ class AsyncRetrieversClient:
687
705
 
688
706
  - mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
689
707
 
690
- - rerank_top_n: typing.Optional[int]. The number of nodes to retrieve after reranking over retrieved nodes from all retrieval tools.
708
+ - rerank_top_n: typing.Optional[int].
709
+
710
+ - rerank_config: typing.Optional[ReRankConfig]. The rerank configuration for composite retrieval.
691
711
 
692
712
  - query: str. The query to retrieve against.
693
713
  ---
694
- from llama_cloud import CompositeRetrievalMode
714
+ from llama_cloud import CompositeRetrievalMode, ReRankConfig, ReRankerType
695
715
  from llama_cloud.client import AsyncLlamaCloud
696
716
 
697
717
  client = AsyncLlamaCloud(
@@ -700,6 +720,9 @@ class AsyncRetrieversClient:
700
720
  await client.retrievers.retrieve(
701
721
  retriever_id="string",
702
722
  mode=CompositeRetrievalMode.ROUTING,
723
+ rerank_config=ReRankConfig(
724
+ type=ReRankerType.SYSTEM_DEFAULT,
725
+ ),
703
726
  query="string",
704
727
  )
705
728
  """
@@ -708,6 +731,8 @@ class AsyncRetrieversClient:
708
731
  _request["mode"] = mode
709
732
  if rerank_top_n is not OMIT:
710
733
  _request["rerank_top_n"] = rerank_top_n
734
+ if rerank_config is not OMIT:
735
+ _request["rerank_config"] = rerank_config
711
736
  _response = await self._client_wrapper.httpx_client.request(
712
737
  "POST",
713
738
  urllib.parse.urljoin(
@@ -734,6 +759,7 @@ class AsyncRetrieversClient:
734
759
  organization_id: typing.Optional[str] = None,
735
760
  mode: typing.Optional[CompositeRetrievalMode] = OMIT,
736
761
  rerank_top_n: typing.Optional[int] = OMIT,
762
+ rerank_config: typing.Optional[ReRankConfig] = OMIT,
737
763
  query: str,
738
764
  pipelines: typing.Optional[typing.List[RetrieverPipeline]] = OMIT,
739
765
  ) -> CompositeRetrievalResult:
@@ -747,13 +773,15 @@ class AsyncRetrieversClient:
747
773
 
748
774
  - mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
749
775
 
750
- - rerank_top_n: typing.Optional[int]. The number of nodes to retrieve after reranking over retrieved nodes from all retrieval tools.
776
+ - rerank_top_n: typing.Optional[int].
777
+
778
+ - rerank_config: typing.Optional[ReRankConfig]. The rerank configuration for composite retrieval.
751
779
 
752
780
  - query: str. The query to retrieve against.
753
781
 
754
782
  - pipelines: typing.Optional[typing.List[RetrieverPipeline]]. The pipelines to use for retrieval.
755
783
  ---
756
- from llama_cloud import CompositeRetrievalMode
784
+ from llama_cloud import CompositeRetrievalMode, ReRankConfig, ReRankerType
757
785
  from llama_cloud.client import AsyncLlamaCloud
758
786
 
759
787
  client = AsyncLlamaCloud(
@@ -761,6 +789,9 @@ class AsyncRetrieversClient:
761
789
  )
762
790
  await client.retrievers.direct_retrieve(
763
791
  mode=CompositeRetrievalMode.ROUTING,
792
+ rerank_config=ReRankConfig(
793
+ type=ReRankerType.SYSTEM_DEFAULT,
794
+ ),
764
795
  query="string",
765
796
  )
766
797
  """
@@ -769,6 +800,8 @@ class AsyncRetrieversClient:
769
800
  _request["mode"] = mode
770
801
  if rerank_top_n is not OMIT:
771
802
  _request["rerank_top_n"] = rerank_top_n
803
+ if rerank_config is not OMIT:
804
+ _request["rerank_config"] = rerank_config
772
805
  if pipelines is not OMIT:
773
806
  _request["pipelines"] = pipelines
774
807
  _response = await self._client_wrapper.httpx_client.request(
@@ -247,6 +247,7 @@ from .pipeline_file_create_custom_metadata_value import PipelineFileCreateCustom
247
247
  from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
248
248
  from .pipeline_file_permission_info_value import PipelineFilePermissionInfoValue
249
249
  from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
250
+ from .pipeline_file_status import PipelineFileStatus
250
251
  from .pipeline_transform_config import (
251
252
  PipelineTransformConfig,
252
253
  PipelineTransformConfig_Advanced,
@@ -265,6 +266,8 @@ from .project import Project
265
266
  from .project_create import ProjectCreate
266
267
  from .prompt_conf import PromptConf
267
268
  from .pydantic_program_mode import PydanticProgramMode
269
+ from .re_rank_config import ReRankConfig
270
+ from .re_ranker_type import ReRankerType
268
271
  from .recurring_credit_grant import RecurringCreditGrant
269
272
  from .related_node_info import RelatedNodeInfo
270
273
  from .related_node_info_node_type import RelatedNodeInfoNodeType
@@ -310,10 +313,10 @@ from .text_node_with_score import TextNodeWithScore
310
313
  from .token_chunking_config import TokenChunkingConfig
311
314
  from .token_text_splitter import TokenTextSplitter
312
315
  from .transformation_category_names import TransformationCategoryNames
313
- from .usage import Usage
314
- from .usage_active_alerts_item import UsageActiveAlertsItem
315
316
  from .usage_and_plan import UsageAndPlan
316
317
  from .usage_metric_response import UsageMetricResponse
318
+ from .usage_response import UsageResponse
319
+ from .usage_response_active_alerts_item import UsageResponseActiveAlertsItem
317
320
  from .user_job_record import UserJobRecord
318
321
  from .user_organization import UserOrganization
319
322
  from .user_organization_create import UserOrganizationCreate
@@ -559,6 +562,7 @@ __all__ = [
559
562
  "PipelineFileCustomMetadataValue",
560
563
  "PipelineFilePermissionInfoValue",
561
564
  "PipelineFileResourceInfoValue",
565
+ "PipelineFileStatus",
562
566
  "PipelineTransformConfig",
563
567
  "PipelineTransformConfig_Advanced",
564
568
  "PipelineTransformConfig_Auto",
@@ -575,6 +579,8 @@ __all__ = [
575
579
  "ProjectCreate",
576
580
  "PromptConf",
577
581
  "PydanticProgramMode",
582
+ "ReRankConfig",
583
+ "ReRankerType",
578
584
  "RecurringCreditGrant",
579
585
  "RelatedNodeInfo",
580
586
  "RelatedNodeInfoNodeType",
@@ -618,10 +624,10 @@ __all__ = [
618
624
  "TokenChunkingConfig",
619
625
  "TokenTextSplitter",
620
626
  "TransformationCategoryNames",
621
- "Usage",
622
- "UsageActiveAlertsItem",
623
627
  "UsageAndPlan",
624
628
  "UsageMetricResponse",
629
+ "UsageResponse",
630
+ "UsageResponseActiveAlertsItem",
625
631
  "UserJobRecord",
626
632
  "UserOrganization",
627
633
  "UserOrganizationCreate",
@@ -32,6 +32,9 @@ class BasePlan(pydantic.BaseModel):
32
32
  starting_on: typing.Optional[dt.datetime]
33
33
  ending_before: typing.Optional[dt.datetime]
34
34
  current_billing_period: typing.Optional[BillingPeriod]
35
+ is_payment_failed: typing.Optional[bool] = pydantic.Field(
36
+ description="Whether the organization has a failed payment that requires support contact"
37
+ )
35
38
 
36
39
  def json(self, **kwargs: typing.Any) -> str:
37
40
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -15,6 +15,9 @@ class BasePlanName(str, enum.Enum):
15
15
  PRO = "pro"
16
16
  ENTERPRISE_CONTRACT = "enterprise_contract"
17
17
  ENTERPRISE_POC = "enterprise_poc"
18
+ FREE_V_1 = "free_v1"
19
+ STARTER_V_1 = "starter_v1"
20
+ PRO_V_1 = "pro_v1"
18
21
 
19
22
  def visit(
20
23
  self,
@@ -26,6 +29,9 @@ class BasePlanName(str, enum.Enum):
26
29
  pro: typing.Callable[[], T_Result],
27
30
  enterprise_contract: typing.Callable[[], T_Result],
28
31
  enterprise_poc: typing.Callable[[], T_Result],
32
+ free_v_1: typing.Callable[[], T_Result],
33
+ starter_v_1: typing.Callable[[], T_Result],
34
+ pro_v_1: typing.Callable[[], T_Result],
29
35
  ) -> T_Result:
30
36
  if self is BasePlanName.FREE:
31
37
  return free()
@@ -43,3 +49,9 @@ class BasePlanName(str, enum.Enum):
43
49
  return enterprise_contract()
44
50
  if self is BasePlanName.ENTERPRISE_POC:
45
51
  return enterprise_poc()
52
+ if self is BasePlanName.FREE_V_1:
53
+ return free_v_1()
54
+ if self is BasePlanName.STARTER_V_1:
55
+ return starter_v_1()
56
+ if self is BasePlanName.PRO_V_1:
57
+ return pro_v_1()
@@ -26,6 +26,7 @@ class CloudConfluenceDataSource(pydantic.BaseModel):
26
26
  page_ids: typing.Optional[str]
27
27
  cql: typing.Optional[str]
28
28
  label: typing.Optional[str]
29
+ index_restricted_pages: typing.Optional[bool] = pydantic.Field(description="Whether to index restricted pages.")
29
30
  class_name: typing.Optional[str]
30
31
 
31
32
  def json(self, **kwargs: typing.Any) -> str:
@@ -8,18 +8,22 @@ T_Result = typing.TypeVar("T_Result")
8
8
 
9
9
  class ExtractMode(str, enum.Enum):
10
10
  FAST = "FAST"
11
- ACCURATE = "ACCURATE"
11
+ BALANCED = "BALANCED"
12
12
  MULTIMODAL = "MULTIMODAL"
13
+ ACCURATE = "ACCURATE"
13
14
 
14
15
  def visit(
15
16
  self,
16
17
  fast: typing.Callable[[], T_Result],
17
- accurate: typing.Callable[[], T_Result],
18
+ balanced: typing.Callable[[], T_Result],
18
19
  multimodal: typing.Callable[[], T_Result],
20
+ accurate: typing.Callable[[], T_Result],
19
21
  ) -> T_Result:
20
22
  if self is ExtractMode.FAST:
21
23
  return fast()
22
- if self is ExtractMode.ACCURATE:
23
- return accurate()
24
+ if self is ExtractMode.BALANCED:
25
+ return balanced()
24
26
  if self is ExtractMode.MULTIMODAL:
25
27
  return multimodal()
28
+ if self is ExtractMode.ACCURATE:
29
+ return accurate()
@@ -39,6 +39,7 @@ class ExtractRun(pydantic.BaseModel):
39
39
  job_id: typing.Optional[str]
40
40
  data: typing.Optional[ExtractRunData] = pydantic.Field(description="The data extracted from the file")
41
41
  extraction_metadata: typing.Optional[typing.Dict[str, typing.Optional[ExtractRunExtractionMetadataValue]]]
42
+ from_ui: bool = pydantic.Field(description="Whether this extraction run was triggered from the UI")
42
43
 
43
44
  def json(self, **kwargs: typing.Any) -> str:
44
45
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -39,6 +39,7 @@ class LlamaExtractSettings(pydantic.BaseModel):
39
39
  extraction_agent_config: typing.Optional[typing.Dict[str, StructParseConf]] = pydantic.Field(
40
40
  description="The configuration for the extraction agent."
41
41
  )
42
+ use_multimodal_extraction: typing.Optional[bool]
42
43
  llama_parse_params: typing.Optional[LlamaParseParameters] = pydantic.Field(
43
44
  description="LlamaParse related settings."
44
45
  )
@@ -56,6 +56,7 @@ class LlamaParseParameters(pydantic.BaseModel):
56
56
  page_prefix: typing.Optional[str]
57
57
  page_suffix: typing.Optional[str]
58
58
  webhook_url: typing.Optional[str]
59
+ preset: typing.Optional[str]
59
60
  take_screenshot: typing.Optional[bool]
60
61
  is_formatting_instruction: typing.Optional[bool]
61
62
  premium_mode: typing.Optional[bool]
@@ -15,7 +15,10 @@ class ParsingMode(str, enum.Enum):
15
15
  PARSE_PAGE_WITH_LLM = "parse_page_with_llm"
16
16
  PARSE_PAGE_WITH_LVM = "parse_page_with_lvm"
17
17
  PARSE_PAGE_WITH_AGENT = "parse_page_with_agent"
18
+ PARSE_PAGE_WITH_LAYOUT_AGENT = "parse_page_with_layout_agent"
18
19
  PARSE_DOCUMENT_WITH_LLM = "parse_document_with_llm"
20
+ PARSE_DOCUMENT_WITH_LVM = "parse_document_with_lvm"
21
+ PARSE_DOCUMENT_WITH_AGENT = "parse_document_with_agent"
19
22
 
20
23
  def visit(
21
24
  self,
@@ -23,7 +26,10 @@ class ParsingMode(str, enum.Enum):
23
26
  parse_page_with_llm: typing.Callable[[], T_Result],
24
27
  parse_page_with_lvm: typing.Callable[[], T_Result],
25
28
  parse_page_with_agent: typing.Callable[[], T_Result],
29
+ parse_page_with_layout_agent: typing.Callable[[], T_Result],
26
30
  parse_document_with_llm: typing.Callable[[], T_Result],
31
+ parse_document_with_lvm: typing.Callable[[], T_Result],
32
+ parse_document_with_agent: typing.Callable[[], T_Result],
27
33
  ) -> T_Result:
28
34
  if self is ParsingMode.PARSE_PAGE_WITHOUT_LLM:
29
35
  return parse_page_without_llm()
@@ -33,5 +39,11 @@ class ParsingMode(str, enum.Enum):
33
39
  return parse_page_with_lvm()
34
40
  if self is ParsingMode.PARSE_PAGE_WITH_AGENT:
35
41
  return parse_page_with_agent()
42
+ if self is ParsingMode.PARSE_PAGE_WITH_LAYOUT_AGENT:
43
+ return parse_page_with_layout_agent()
36
44
  if self is ParsingMode.PARSE_DOCUMENT_WITH_LLM:
37
45
  return parse_document_with_llm()
46
+ if self is ParsingMode.PARSE_DOCUMENT_WITH_LVM:
47
+ return parse_document_with_lvm()
48
+ if self is ParsingMode.PARSE_DOCUMENT_WITH_AGENT:
49
+ return parse_document_with_agent()
@@ -8,6 +8,7 @@ from .pipeline_file_config_hash_value import PipelineFileConfigHashValue
8
8
  from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
9
9
  from .pipeline_file_permission_info_value import PipelineFilePermissionInfoValue
10
10
  from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
11
+ from .pipeline_file_status import PipelineFileStatus
11
12
 
12
13
  try:
13
14
  import pydantic
@@ -40,7 +41,7 @@ class PipelineFile(pydantic.BaseModel):
40
41
  custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileCustomMetadataValue]]]
41
42
  config_hash: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileConfigHashValue]]]
42
43
  indexed_page_count: typing.Optional[int]
43
- status: typing.Optional[str]
44
+ status: typing.Optional[PipelineFileStatus]
44
45
 
45
46
  def json(self, **kwargs: typing.Any) -> str:
46
47
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,33 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class PipelineFileStatus(str, enum.Enum):
10
+ NOT_STARTED = "NOT_STARTED"
11
+ IN_PROGRESS = "IN_PROGRESS"
12
+ SUCCESS = "SUCCESS"
13
+ ERROR = "ERROR"
14
+ CANCELLED = "CANCELLED"
15
+
16
+ def visit(
17
+ self,
18
+ not_started: typing.Callable[[], T_Result],
19
+ in_progress: typing.Callable[[], T_Result],
20
+ success: typing.Callable[[], T_Result],
21
+ error: typing.Callable[[], T_Result],
22
+ cancelled: typing.Callable[[], T_Result],
23
+ ) -> T_Result:
24
+ if self is PipelineFileStatus.NOT_STARTED:
25
+ return not_started()
26
+ if self is PipelineFileStatus.IN_PROGRESS:
27
+ return in_progress()
28
+ if self is PipelineFileStatus.SUCCESS:
29
+ return success()
30
+ if self is PipelineFileStatus.ERROR:
31
+ return error()
32
+ if self is PipelineFileStatus.CANCELLED:
33
+ return cancelled()
@@ -18,6 +18,7 @@ class PlanLimits(pydantic.BaseModel):
18
18
  allow_pay_as_you_go: bool = pydantic.Field(description="Whether usage is allowed after credit grants are exhausted")
19
19
  subscription_cost_usd: int
20
20
  max_monthly_invoice_total_usd: typing.Optional[int]
21
+ spending_soft_alerts_usd_cents: typing.Optional[typing.List[int]]
21
22
  max_concurrent_parse_jobs_premium: typing.Optional[int]
22
23
  max_concurrent_parse_jobs_other: typing.Optional[int]
23
24
  max_extraction_agents: typing.Optional[int]
@@ -5,6 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .composite_retrieval_mode import CompositeRetrievalMode
8
+ from .re_rank_config import ReRankConfig
8
9
 
9
10
  try:
10
11
  import pydantic
@@ -17,8 +18,9 @@ except ImportError:
17
18
 
18
19
  class PresetCompositeRetrievalParams(pydantic.BaseModel):
19
20
  mode: typing.Optional[CompositeRetrievalMode] = pydantic.Field(description="The mode of composite retrieval.")
20
- rerank_top_n: typing.Optional[int] = pydantic.Field(
21
- description="The number of nodes to retrieve after reranking over retrieved nodes from all retrieval tools."
21
+ rerank_top_n: typing.Optional[int]
22
+ rerank_config: typing.Optional[ReRankConfig] = pydantic.Field(
23
+ description="The rerank configuration for composite retrieval."
22
24
  )
23
25
 
24
26
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .re_ranker_type import ReRankerType
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class ReRankConfig(pydantic.BaseModel):
19
+ top_n: typing.Optional[int] = pydantic.Field(
20
+ description="The number of nodes to retrieve after reranking over retrieved nodes from all retrieval tools."
21
+ )
22
+ type: typing.Optional[ReRankerType] = pydantic.Field(description="The type of reranker to use.")
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,41 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ReRankerType(str, enum.Enum):
10
+ """
11
+ Enum for the reranker type.
12
+ """
13
+
14
+ SYSTEM_DEFAULT = "system_default"
15
+ LLM = "llm"
16
+ COHERE = "cohere"
17
+ BEDROCK = "bedrock"
18
+ SCORE = "score"
19
+ DISABLED = "disabled"
20
+
21
+ def visit(
22
+ self,
23
+ system_default: typing.Callable[[], T_Result],
24
+ llm: typing.Callable[[], T_Result],
25
+ cohere: typing.Callable[[], T_Result],
26
+ bedrock: typing.Callable[[], T_Result],
27
+ score: typing.Callable[[], T_Result],
28
+ disabled: typing.Callable[[], T_Result],
29
+ ) -> T_Result:
30
+ if self is ReRankerType.SYSTEM_DEFAULT:
31
+ return system_default()
32
+ if self is ReRankerType.LLM:
33
+ return llm()
34
+ if self is ReRankerType.COHERE:
35
+ return cohere()
36
+ if self is ReRankerType.BEDROCK:
37
+ return bedrock()
38
+ if self is ReRankerType.SCORE:
39
+ return score()
40
+ if self is ReRankerType.DISABLED:
41
+ return disabled()
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .base_plan import BasePlan
8
- from .usage import Usage
8
+ from .usage_response import UsageResponse
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -18,7 +18,7 @@ except ImportError:
18
18
 
19
19
  class UsageAndPlan(pydantic.BaseModel):
20
20
  plan: BasePlan
21
- usage: Usage
21
+ usage: UsageResponse
22
22
 
23
23
  def json(self, **kwargs: typing.Any) -> str:
24
24
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .free_credits_usage import FreeCreditsUsage
8
- from .usage_active_alerts_item import UsageActiveAlertsItem
8
+ from .usage_response_active_alerts_item import UsageResponseActiveAlertsItem
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -16,7 +16,7 @@ except ImportError:
16
16
  import pydantic # type: ignore
17
17
 
18
18
 
19
- class Usage(pydantic.BaseModel):
19
+ class UsageResponse(pydantic.BaseModel):
20
20
  """
21
21
  Response model
22
22
  """
@@ -25,7 +25,7 @@ class Usage(pydantic.BaseModel):
25
25
  total_users: typing.Optional[int]
26
26
  total_indexes: typing.Optional[int]
27
27
  total_indexed_pages: typing.Optional[int]
28
- active_alerts: typing.Optional[typing.List[UsageActiveAlertsItem]]
28
+ active_alerts: typing.Optional[typing.List[UsageResponseActiveAlertsItem]]
29
29
  current_invoice_total_usd_cents: typing.Optional[int]
30
30
  total_extraction_agents: typing.Optional[int]
31
31
 
@@ -6,20 +6,24 @@ import typing
6
6
  T_Result = typing.TypeVar("T_Result")
7
7
 
8
8
 
9
- class UsageActiveAlertsItem(str, enum.Enum):
9
+ class UsageResponseActiveAlertsItem(str, enum.Enum):
10
10
  PLAN_SPEND_LIMIT_EXCEEDED = "plan_spend_limit_exceeded"
11
+ PLAN_SPEND_LIMIT_SOFT_ALERT = "plan_spend_limit_soft_alert"
11
12
  CONFIGURED_SPEND_LIMIT_EXCEEDED = "configured_spend_limit_exceeded"
12
13
  FREE_CREDITS_EXHAUSTED = "free_credits_exhausted"
13
14
 
14
15
  def visit(
15
16
  self,
16
17
  plan_spend_limit_exceeded: typing.Callable[[], T_Result],
18
+ plan_spend_limit_soft_alert: typing.Callable[[], T_Result],
17
19
  configured_spend_limit_exceeded: typing.Callable[[], T_Result],
18
20
  free_credits_exhausted: typing.Callable[[], T_Result],
19
21
  ) -> T_Result:
20
- if self is UsageActiveAlertsItem.PLAN_SPEND_LIMIT_EXCEEDED:
22
+ if self is UsageResponseActiveAlertsItem.PLAN_SPEND_LIMIT_EXCEEDED:
21
23
  return plan_spend_limit_exceeded()
22
- if self is UsageActiveAlertsItem.CONFIGURED_SPEND_LIMIT_EXCEEDED:
24
+ if self is UsageResponseActiveAlertsItem.PLAN_SPEND_LIMIT_SOFT_ALERT:
25
+ return plan_spend_limit_soft_alert()
26
+ if self is UsageResponseActiveAlertsItem.CONFIGURED_SPEND_LIMIT_EXCEEDED:
23
27
  return configured_spend_limit_exceeded()
24
- if self is UsageActiveAlertsItem.FREE_CREDITS_EXHAUSTED:
28
+ if self is UsageResponseActiveAlertsItem.FREE_CREDITS_EXHAUSTED:
25
29
  return free_credits_exhausted()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-cloud
3
- Version: 0.1.16
3
+ Version: 0.1.17
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Logan Markewich
@@ -1,4 +1,4 @@
1
- llama_cloud/__init__.py,sha256=GsERaXUabzoc0F4eXn1nzIVnb9iuBaEMCgSyfYJ2TMQ,22569
1
+ llama_cloud/__init__.py,sha256=g_a0ws6UELyKPRknXkioQRO8cW7WeK82-QGFi2gQBjI,22727
2
2
  llama_cloud/client.py,sha256=0fK6iRBCA77eSs0zFrYQj-zD0BLy6Dr2Ss0ETJ4WaOY,5555
3
3
  llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
4
4
  llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
@@ -6,12 +6,12 @@ llama_cloud/core/client_wrapper.py,sha256=xmj0jCdQ0ySzbSqHUWOkpRRy069y74I_HuXkWl
6
6
  llama_cloud/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
7
7
  llama_cloud/core/jsonable_encoder.py,sha256=OewL6HcVqdSMCYDWwN0tsh7BZasBeOJZytrAxkH977k,3891
8
8
  llama_cloud/core/remove_none_from_dict.py,sha256=8m91FC3YuVem0Gm9_sXhJ2tGvP33owJJdrqCLEdowGw,330
9
- llama_cloud/environment.py,sha256=q4q-uY5WgcSlzfHwEANOqFQPu0lstqvMnVOsSfifMKo,168
9
+ llama_cloud/environment.py,sha256=feTjOebeFZMrBdnHat4RE5aHlpt-sJm4NhK4ntV1htI,167
10
10
  llama_cloud/errors/__init__.py,sha256=pbbVUFtB9LCocA1RMWMMF_RKjsy5YkOKX5BAuE49w6g,170
11
11
  llama_cloud/errors/unprocessable_entity_error.py,sha256=FvR7XPlV3Xx5nu8HNlmLhBRdk4so_gCHjYT5PyZe6sM,313
12
12
  llama_cloud/resources/__init__.py,sha256=h2kWef5KlC8qpr-1MJyIoFVCsNBidRUUUWztnsr9AHs,3298
13
13
  llama_cloud/resources/chat_apps/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
14
- llama_cloud/resources/chat_apps/client.py,sha256=olEESaQB-fmQTIi1zfheTTUpt3iBw5-lTYVmiIX9u_s,22943
14
+ llama_cloud/resources/chat_apps/client.py,sha256=r3URXWvgb_rGpiKbHLJXkE2OlOYb1g4LrWgzQeK3ivM,23619
15
15
  llama_cloud/resources/component_definitions/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
16
16
  llama_cloud/resources/component_definitions/client.py,sha256=YYfoXNa1qim2OdD5y4N5mvoBZKtrCuXS560mtqH_-1c,7569
17
17
  llama_cloud/resources/data_sinks/__init__.py,sha256=ZHUjn3HbKhq_7QS1q74r2m5RGKF5lxcvF2P6pGvpcis,147
@@ -38,13 +38,13 @@ llama_cloud/resources/files/types/file_create_resource_info_value.py,sha256=R7Y-
38
38
  llama_cloud/resources/jobs/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
39
39
  llama_cloud/resources/jobs/client.py,sha256=mN9uOzys9aZkhOJkApUy0yhfNeK8X09xQxT34ZPptNY,5386
40
40
  llama_cloud/resources/llama_extract/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
41
- llama_cloud/resources/llama_extract/client.py,sha256=xEezIrVQcLW7bTle3gNi2bXVDm3trjXsUJpJtChXHVo,53044
41
+ llama_cloud/resources/llama_extract/client.py,sha256=ZFYdW0Rw06daAe2f-jiiHYydltYL3yYm6_LALKcHZ-4,56798
42
42
  llama_cloud/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
43
43
  llama_cloud/resources/organizations/client.py,sha256=OGSVpkfY5wu8-22IFWVmtbYSDiy0-KqA3Lc1E_jNHvg,55889
44
44
  llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
45
- llama_cloud/resources/parsing/client.py,sha256=cdEEqjb5pRvb-Vq9VXjgh1107emTzYh5VP-Uu4aV3XI,74026
45
+ llama_cloud/resources/parsing/client.py,sha256=dUeURj_Hr3T8jZZUXiwubp-ooL9MSHFxOKNrn8X6HWA,74184
46
46
  llama_cloud/resources/pipelines/__init__.py,sha256=Mx7p3jDZRLMltsfywSufam_4AnHvmAfsxtMHVI72e-8,1083
47
- llama_cloud/resources/pipelines/client.py,sha256=My_TCezdFHfzPmzSzD25DIKNO88XUrQGeFmwOQ-Z0Gk,125055
47
+ llama_cloud/resources/pipelines/client.py,sha256=Irq4P4tZT3RyFZ66xIaYnQsEFtEpfjts3uVq6JZ2Vew,125071
48
48
  llama_cloud/resources/pipelines/types/__init__.py,sha256=jjaMc0V3K1HZLMYZ6WT4ydMtBCVy-oF5koqTCovbDws,1202
49
49
  llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py,sha256=trI48WLxPcAqV9207Q6-3cj1nl4EGlZpw7En56ZsPgg,217
50
50
  llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py,sha256=c8FF64fDrBMX_2RX4uY3CjbNc0Ss_AUJ4Eqs-KeV4Wc,2874
@@ -56,8 +56,8 @@ llama_cloud/resources/reports/client.py,sha256=kHjtXVVc1Xi3T1GyBvSW5K4mTdr6xQwZA
56
56
  llama_cloud/resources/reports/types/__init__.py,sha256=LfwDYrI4RcQu-o42iAe7HkcwHww2YU90lOonBPTmZIk,291
57
57
  llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py,sha256=Qh-MSeRvDBfNb5hoLELivv1pLtrYVf52WVoP7G8V34A,807
58
58
  llama_cloud/resources/retrievers/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
59
- llama_cloud/resources/retrievers/client.py,sha256=fmRVQjMaSaytaU1NMvE_vosyrbkdY93kGi2VKAGcb4U,30245
60
- llama_cloud/types/__init__.py,sha256=AHJ1ew2Q4Y-b1dj2WHJDv9mSH7b--pfw2FrCgoIeC6I,27769
59
+ llama_cloud/resources/retrievers/client.py,sha256=T7fu41wXAYUTGh23ZWlKPM4e8zH7mg5MDa8F1GxNYwQ,31502
60
+ llama_cloud/types/__init__.py,sha256=Jk2CpRyCPl-Ww9FvOsnN20xsPszmwvLgezBNm8e0dyo,28020
61
61
  llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
62
62
  llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
63
63
  llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
@@ -65,9 +65,9 @@ llama_cloud/types/app_schema_chat_chat_message.py,sha256=4Mplkc7PczuxKL7Gga3aj8Q
65
65
  llama_cloud/types/auto_transform_config.py,sha256=HVeHZM75DMRznScqLTfrMwcZwIdyWPuaEYbPewnHqwc,1168
66
66
  llama_cloud/types/azure_open_ai_embedding.py,sha256=MeDqZoPYFN7Nv_imY9cfqDU9SPlEyAY4HcQZ4PF5X3g,2264
67
67
  llama_cloud/types/azure_open_ai_embedding_config.py,sha256=o1zZhzcGElH3SeixFErrm7P_WFHQ6LvrLem_nKJWunw,1170
68
- llama_cloud/types/base_plan.py,sha256=JThRn0N9swW7cVmfvAaJDqTNAUTqlYAHJFZhfTxHe0c,1743
68
+ llama_cloud/types/base_plan.py,sha256=5DZi20EOciTc5okLAxQDqyGylsW-DflTy14dcvQb2fQ,1910
69
69
  llama_cloud/types/base_plan_metronome_plan_type.py,sha256=I3g_dVoWWztbmpWpYmseDqQSbwtlLUl2vS01tfgMjEA,499
70
- llama_cloud/types/base_plan_name.py,sha256=a0GU8mJiloUQttaN5vmFpAx3UmwWabI0lmYts0cMSsw,1478
70
+ llama_cloud/types/base_plan_name.py,sha256=keHQaw9YV9ghsWnGfnHrLtB4qNz0v4TWX4_MoO3flRM,1926
71
71
  llama_cloud/types/base_plan_plan_frequency.py,sha256=idUZlDaSdMrMZ2lQ1ytBWM4QyduIZu6Gt2eLU0LVqH4,684
72
72
  llama_cloud/types/base_prompt_template.py,sha256=Cw3887tnytHZ5bJBSlniyU9k5ASidv9VYR86--IbNqo,1248
73
73
  llama_cloud/types/bedrock_embedding.py,sha256=qrUoVW9Q2DLg-3nBRfGsZqUWGszfzc6ZHR8LJiXTZk4,1908
@@ -83,7 +83,7 @@ llama_cloud/types/chunk_mode.py,sha256=J4vqAQfQG6PWsIv1Fe_99nVsAfDbv_P81_KVsJ9Ak
83
83
  llama_cloud/types/cloud_az_storage_blob_data_source.py,sha256=NT4cYsD1M868_bSJxKM9cvTMtjQtQxKloE4vRv8_lwg,1534
84
84
  llama_cloud/types/cloud_azure_ai_search_vector_store.py,sha256=9GTaft7BaKsR9RJQp5dlpbslXUlTMA1AcDdKV1ApfqI,1513
85
85
  llama_cloud/types/cloud_box_data_source.py,sha256=9bffCaKGvctSsk9OdTpzzP__O1NDpb9wdvKY2uwjpwY,1470
86
- llama_cloud/types/cloud_confluence_data_source.py,sha256=bl4LUbY3RdTkYRbzSHBTG1zKy1fJ9faNACthSe_6Gvw,1486
86
+ llama_cloud/types/cloud_confluence_data_source.py,sha256=QsZbWPbRYxRoaCOGFzvLC_31QMSDQHaLYy3dgpMnQrM,1603
87
87
  llama_cloud/types/cloud_document.py,sha256=Rg_H8lcz2TzxEAIdU-m5mGpkM7s0j1Cn4JHkXYddmGs,1255
88
88
  llama_cloud/types/cloud_document_create.py,sha256=fQ1gZAtLCpr-a-sPbMez_5fK9JMU3uyp2tNvIzWNG3U,1278
89
89
  llama_cloud/types/cloud_google_drive_data_source.py,sha256=jf5k7SY8scR-8_X27ShYSh1vOiFteqIH6cNcG7xZGLE,1304
@@ -148,13 +148,13 @@ llama_cloud/types/extract_job_create_batch_data_schema_override.py,sha256=GykJ1B
148
148
  llama_cloud/types/extract_job_create_batch_data_schema_override_zero_value.py,sha256=7zXOgTYUwVAeyYeqWvX69m-7mhvK0V9cBRvgqVSd0X0,228
149
149
  llama_cloud/types/extract_job_create_data_schema_override.py,sha256=vuiJ2lGJjbXEnvFKzVnKyvgwhMXPg1Pb5GZne2DrB60,330
150
150
  llama_cloud/types/extract_job_create_data_schema_override_zero_value.py,sha256=HHEYxOSQXXyBYOiUQg_qwfQtXFj-OtThMwbUDBIgZU0,223
151
- llama_cloud/types/extract_mode.py,sha256=mMkEugv91d-kcWLGUlr7Nm62p0eSlXeqfMAKw7u7wXI,644
151
+ llama_cloud/types/extract_mode.py,sha256=uTyzzzeO5lo4idGRQQSnJc9pKSkuKFdXaFRyH0dKd_Q,790
152
152
  llama_cloud/types/extract_resultset.py,sha256=Alje0YQJUiA_aKi0hQs7TAnhDmZuQ_yL9b6HCNYBFQg,1627
153
153
  llama_cloud/types/extract_resultset_data.py,sha256=v9Ae4SxLsvYPE9crko4N16lBjsxuZpz1yrUOhnaM_VY,427
154
154
  llama_cloud/types/extract_resultset_data_item_value.py,sha256=JwqgDIGW0irr8QWaSTIrl24FhGxTUDOXIbxoSdIjuxs,209
155
155
  llama_cloud/types/extract_resultset_data_zero_value.py,sha256=-tqgtp3hwIr2NhuC28wVWqQDgFFGYPfRdzneMtNzoBU,209
156
156
  llama_cloud/types/extract_resultset_extraction_metadata_value.py,sha256=LEFcxgBCY35Tw93RIU8aEcyJYcLuhPp5-_G5XP07-xw,219
157
- llama_cloud/types/extract_run.py,sha256=VrVw5kYwPFARyWO5hgcWLgpUu3cmaZlmeGNXM826EI4,2264
157
+ llama_cloud/types/extract_run.py,sha256=wjjt1kwMLouLq8CyA0RTnEzNIiWfPAw10mgPwXHNAV8,2368
158
158
  llama_cloud/types/extract_run_data.py,sha256=Y24NhSSXSHDOI3qtETs9Iln5y3p5kCl4LB5F_RIoUj4,385
159
159
  llama_cloud/types/extract_run_data_item_value.py,sha256=jbR5Yo3bGwHw72OJJ1l5NGTngE-rC2Jxd5b6BrNKzOc,197
160
160
  llama_cloud/types/extract_run_data_schema_value.py,sha256=C4uNdNQHBrkribgmR6nxOQpRo1eydYJ78a0lm7B-e4o,199
@@ -186,10 +186,10 @@ llama_cloud/types/job_name_mapping.py,sha256=2dQFQlVHoeSlkyEKSEJv0M3PzJf7hMvkuAB
186
186
  llama_cloud/types/job_names.py,sha256=ZapQT__pLI14SagjGi8AsEwWY949hBoplQemMgb_Aoc,4098
187
187
  llama_cloud/types/job_record.py,sha256=r2WzLQXSOFogNMN2rl10rAlYI9OTCmVn06QaZXxa0rQ,2058
188
188
  llama_cloud/types/job_record_with_usage_metrics.py,sha256=iNV2do5TB_0e3PoOz_DJyAaM6Cn9G8KG-dGPGgEs5SY,1198
189
- llama_cloud/types/llama_extract_settings.py,sha256=Yh9Ah9W0X4l-znjYm4oNIh8-LCBc99JEQmGU87bUzWs,2225
189
+ llama_cloud/types/llama_extract_settings.py,sha256=HtuUbO_2GbypKl9IglpzmR22N2MPnjXVScSk2VcMx14,2278
190
190
  llama_cloud/types/llama_index_core_base_llms_types_chat_message.py,sha256=NelHo-T-ebVMhRKsqE_xV8AJW4c7o6lS0uEQnPsmTwg,1365
191
191
  llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py,sha256=tTglUqrSUaVc2Wsi4uIt5MU-80_oxZzTnhf8ziilVGY,874
192
- llama_cloud/types/llama_parse_parameters.py,sha256=TMKaebSDi_6B4qsalE2zyYCJirj_HW_x5MhSIimGPJ8,5234
192
+ llama_cloud/types/llama_parse_parameters.py,sha256=S3ynbHglFjqejskks6NyDtPQu_3ni19QelEkJjt7-HU,5267
193
193
  llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=B_0N3f8Aq59W9FbsH50mGBUiyWTIXQjHFl739uAyaQw,11207
194
194
  llama_cloud/types/llm.py,sha256=7iIItVPjURp4u5xxJDAFIefUdhUKwIuA245WXilJPXE,2234
195
195
  llama_cloud/types/llm_model_data.py,sha256=6rrycqGwlK3LZ2S-WtgmeomithdLhDCgwBBZQ5KLaso,1300
@@ -231,7 +231,7 @@ llama_cloud/types/parsing_job_json_result.py,sha256=BA3_u-ChHpE5wm08WmOvgPUsMsCl
231
231
  llama_cloud/types/parsing_job_markdown_result.py,sha256=gPIUO0JwtKwvSHcRYEr995DNl7VN3EaaSaj4aPHCP4o,1077
232
232
  llama_cloud/types/parsing_job_structured_result.py,sha256=w_Z4DOHjwUPmffjc4qJiGYbniWTpkjpVcD4irL1dDj0,1017
233
233
  llama_cloud/types/parsing_job_text_result.py,sha256=TP-7IRTWZLAZz7NYLkzi4PsGnaRJuPTt40p56Mk6Rhw,1065
234
- llama_cloud/types/parsing_mode.py,sha256=ppsF_Mia1FF26Zk3sZBwERxuqMbhvVDuVoR2kOsKJdE,1340
234
+ llama_cloud/types/parsing_mode.py,sha256=s89EhQB3N9yH9a5EtuB8tDcrHLe2KJTM6e0Do-iU7FE,2038
235
235
  llama_cloud/types/parsing_usage.py,sha256=JLlozu-vIkcRKqWaOVJ9Z2TrY7peJRTzOpYjOThGKGQ,1012
236
236
  llama_cloud/types/partition_names.py,sha256=zZZn-sn59gwch2fa7fGMwFWUEuu5Dfen3ZqKtcPnBEM,1877
237
237
  llama_cloud/types/permission.py,sha256=LjhZdo0oLvk7ZVIF1d6Qja--AKH5Ri0naUhuJvZS6Ng,1345
@@ -246,19 +246,20 @@ llama_cloud/types/pipeline_data_source_create.py,sha256=wMsymqB-YGyf3jdQr-N5ODVG
246
246
  llama_cloud/types/pipeline_data_source_custom_metadata_value.py,sha256=8n3r60sxMx4_udW0yzJZxzyWeK6L3cc2-jLGZFW4EDs,217
247
247
  llama_cloud/types/pipeline_deployment.py,sha256=eVBrz032aPb2cqtIIVYT5MTHQvBNm89XazoNrRWVugo,1356
248
248
  llama_cloud/types/pipeline_embedding_config.py,sha256=mpeJ6bOMvRUO12VTYbcHmgJ3ssHNKAUQMrF06j2t7Lc,2721
249
- llama_cloud/types/pipeline_file.py,sha256=PUfKFTXEsrIMmXlzguEeETmo0yjDk-cA0iofNiT-hlw,2452
249
+ llama_cloud/types/pipeline_file.py,sha256=_y0O_I7xr4ydXJb4__qyD8OlgTxS2I4RXdKCgV7qXGc,2520
250
250
  llama_cloud/types/pipeline_file_config_hash_value.py,sha256=4lvLnDpzNAHdiMkGJTTNDTu3p3H7Nxw5MR1Mzte7-_M,201
251
251
  llama_cloud/types/pipeline_file_create.py,sha256=yoMIzWED0ktKerE48kgzInBa3d0aNGO5JjTtDTDAn4A,1310
252
252
  llama_cloud/types/pipeline_file_create_custom_metadata_value.py,sha256=olVj5yhQFx1QqWO1Wv9d6AtL-YyYO9_OYtOfcD2ZeGY,217
253
253
  llama_cloud/types/pipeline_file_custom_metadata_value.py,sha256=ClFphYDNlHxeyLF5BWxIUhs2rooS0Xtqxr_Ae8dn8zE,211
254
254
  llama_cloud/types/pipeline_file_permission_info_value.py,sha256=a9yfg5n9po0-4ljGx8DtJoeLBwWFpaEk9ZQUN195BXg,211
255
255
  llama_cloud/types/pipeline_file_resource_info_value.py,sha256=s3uFGQNwlUEr-X4TJZkW_kMBvX3h1sXRJoYlJRvHSDc,209
256
+ llama_cloud/types/pipeline_file_status.py,sha256=7AJOlwqZVcsk6aPF6Q-x7UzjdzdBj4FeXAZ4m35Bb5M,1003
256
257
  llama_cloud/types/pipeline_transform_config.py,sha256=zMr-ePLKGjbaScxbAHaSwYBL7rrNibVlnn0cbgElDfU,824
257
258
  llama_cloud/types/pipeline_type.py,sha256=tTqrhxHP5xd7W2dQGD0e5FOv886nwJssyaVlXpWrtRo,551
258
- llama_cloud/types/plan_limits.py,sha256=Hqo2IyFoI4BSYtaYF4JYIGqYA0xIdpSqc3Ht_cycJTc,1986
259
+ llama_cloud/types/plan_limits.py,sha256=WAbDbRl8gsQxvhmuVB0YT8mry-0uKg6c66uivyppdQU,2056
259
260
  llama_cloud/types/playground_session.py,sha256=F8u2KZL2YaOrsT-o1n4zbhyPxSsoduc3ZCzQB8AecFA,1858
260
261
  llama_cloud/types/pooling.py,sha256=5Fr6c8rx9SDWwWzEvD78suob2d79ktodUtLUAUHMbP8,651
261
- llama_cloud/types/preset_composite_retrieval_params.py,sha256=8msstaAZUMs0ziQSEq4RJXNuQFztSDtDfyJHr632ubQ,1321
262
+ llama_cloud/types/preset_composite_retrieval_params.py,sha256=yEf1pk4Wz5J6SxgB8elklwuyVDCRSZqfWC6x3hJUS4Q,1366
262
263
  llama_cloud/types/preset_retrieval_params.py,sha256=gEkjXr4202ebLtPL6pYX5hj5NSwANpAdhZbEHCbE2RA,1782
263
264
  llama_cloud/types/presigned_url.py,sha256=-DOQo7XKvUsl-9Gz7fX6VOHdQLzGH2XRau24ASvG92E,1275
264
265
  llama_cloud/types/progress_event.py,sha256=Bk73A8geTVaq0ze5pMnbkAmx7FSOHQIixYCpCas_dcY,1684
@@ -267,6 +268,8 @@ llama_cloud/types/project.py,sha256=4NNh_ZAjEkoWl5st6b1jsPVf_SYKtUTB6rS1701G4IQ,
267
268
  llama_cloud/types/project_create.py,sha256=GxGmsXGJM-cHrvPFLktEkj9JtNsSdFae7-HPZFB4er0,1014
268
269
  llama_cloud/types/prompt_conf.py,sha256=4vAKt0Gce9ALRb_-FE0QbRiFM1Rc9OQAADggwBwgauE,1402
269
270
  llama_cloud/types/pydantic_program_mode.py,sha256=QfvpqR7TqyNuOxo78Sr58VOu7KDSBrHJM4XXBB0F5z0,1202
271
+ llama_cloud/types/re_rank_config.py,sha256=mxRWwrC5BLg3DP1yEyRwW2lIpv5BuXZfTy8f4RbcOp0,1262
272
+ llama_cloud/types/re_ranker_type.py,sha256=qYItMEHrf80ePBp7gNGBSL67mkTIsqco92WJaJiYweo,1123
270
273
  llama_cloud/types/recurring_credit_grant.py,sha256=19qI3p5k1mQ1Qoo-gCQU02Aa42XpEsmwxPF1F88F-Yg,1517
271
274
  llama_cloud/types/related_node_info.py,sha256=frQg_RqrSBc62ooJ4QOF5QRKymHcNot5WVFAB_g1sMg,1216
272
275
  llama_cloud/types/related_node_info_node_type.py,sha256=lH95d8G-EnKCllV_igJsBfYt49y162PoNxWtrCo_Kgk,173
@@ -307,10 +310,10 @@ llama_cloud/types/text_node_with_score.py,sha256=k-KYWO_mgJBvO6xUfOD5W6v1Ku9E586
307
310
  llama_cloud/types/token_chunking_config.py,sha256=XNvnTsNd--YOMQ_Ad8hoqhYgQftqkBHKVn6i7nJnMqs,1067
308
311
  llama_cloud/types/token_text_splitter.py,sha256=iTT3x9yO021v757B2r-0Z-WFQiIESLqEJUCmUUwPQ_o,1899
309
312
  llama_cloud/types/transformation_category_names.py,sha256=Wb7NBB0f-tEtfEZQis-iKy71SUKmmHFcXf6XLn6g0XU,545
310
- llama_cloud/types/usage.py,sha256=LT4Jr4kiQiug2A_cNmGSxWV7UZmseYzxACXo7dTQIBQ,1466
311
- llama_cloud/types/usage_active_alerts_item.py,sha256=YZkSH_Vd3hu5f-Nv0LKKj9slVTa3GsOcbSPhttKcVqQ,964
312
- llama_cloud/types/usage_and_plan.py,sha256=DsQVkOkh6yiDY9FsGR34DcTocj53loO2lU55P45XnWY,1040
313
+ llama_cloud/types/usage_and_plan.py,sha256=bclc7TE7CTBu7RLiTHG426dziyj--I8m5NVu86I2AV4,1065
313
314
  llama_cloud/types/usage_metric_response.py,sha256=ukvtNZLeLacv-5F0-GQ5wTBZOPUPEjAeurgYPc4s7nA,1047
315
+ llama_cloud/types/usage_response.py,sha256=o0u15PGNQmOOie4kJFfc4Rw0jKGLckBJdH0NCAfT8_k,1499
316
+ llama_cloud/types/usage_response_active_alerts_item.py,sha256=5EgU7go_CPe2Bmio12MwDoJaMnaMW0XjFNvVks0BhQY,1255
314
317
  llama_cloud/types/user_job_record.py,sha256=mJHdokJsemXJOwM2l7fsW3X0SlwSNcy7yHbcXZHh3I4,1098
315
318
  llama_cloud/types/user_organization.py,sha256=Ydel7grMnKiPMWJmSWhCFCm3v_n286Gk36ANtDLNLd4,1770
316
319
  llama_cloud/types/user_organization_create.py,sha256=Zj57s9xuYVnLW2p8i4j2QORL-G1y7Ab3avXE1baERQY,1189
@@ -321,7 +324,7 @@ llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPX
321
324
  llama_cloud/types/vertex_ai_embedding_config.py,sha256=DvQk2xMJFmo54MEXTzoM4KSADyhGm_ygmFyx6wIcQdw,1159
322
325
  llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2BaIMltDqGnIowU,1217
323
326
  llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
324
- llama_cloud-0.1.16.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
325
- llama_cloud-0.1.16.dist-info/METADATA,sha256=nCSIO_-vJxp4O2kbNl74lwlihxhu62Bg3eI7yjC8tu4,902
326
- llama_cloud-0.1.16.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
327
- llama_cloud-0.1.16.dist-info/RECORD,,
327
+ llama_cloud-0.1.17.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
328
+ llama_cloud-0.1.17.dist-info/METADATA,sha256=lH_iTxlsKdlvdppMsvFpJtyl5vFWAsT2NVppajCSBEs,902
329
+ llama_cloud-0.1.17.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
330
+ llama_cloud-0.1.17.dist-info/RECORD,,