llama-cloud 0.1.16__py3-none-any.whl → 0.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +10 -6
- llama_cloud/environment.py +1 -1
- llama_cloud/resources/chat_apps/client.py +24 -4
- llama_cloud/resources/llama_extract/client.py +98 -10
- llama_cloud/resources/parsing/client.py +16 -65
- llama_cloud/resources/pipelines/client.py +6 -6
- llama_cloud/resources/projects/client.py +70 -0
- llama_cloud/resources/retrievers/client.py +41 -8
- llama_cloud/types/__init__.py +10 -6
- llama_cloud/types/base_plan.py +3 -0
- llama_cloud/types/base_plan_name.py +12 -0
- llama_cloud/types/cloud_confluence_data_source.py +1 -0
- llama_cloud/types/extract_job_create.py +2 -1
- llama_cloud/types/extract_mode.py +8 -4
- llama_cloud/types/extract_run.py +1 -0
- llama_cloud/types/llama_extract_settings.py +3 -2
- llama_cloud/types/llama_parse_parameters.py +4 -2
- llama_cloud/types/parsing_mode.py +12 -0
- llama_cloud/types/pipeline_file.py +2 -1
- llama_cloud/types/pipeline_file_status.py +33 -0
- llama_cloud/types/plan_limits.py +1 -0
- llama_cloud/types/preset_composite_retrieval_params.py +4 -2
- llama_cloud/types/{parsing_usage.py → re_rank_config.py} +6 -3
- llama_cloud/types/re_ranker_type.py +41 -0
- llama_cloud/types/supported_llm_model_names.py +0 -12
- llama_cloud/types/usage_and_plan.py +2 -2
- llama_cloud/types/{usage.py → usage_response.py} +3 -3
- llama_cloud/types/{usage_active_alerts_item.py → usage_response_active_alerts_item.py} +8 -4
- {llama_cloud-0.1.16.dist-info → llama_cloud-0.1.18.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.16.dist-info → llama_cloud-0.1.18.dist-info}/RECORD +32 -30
- {llama_cloud-0.1.16.dist-info → llama_cloud-0.1.18.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.16.dist-info → llama_cloud-0.1.18.dist-info}/WHEEL +0 -0
|
@@ -250,6 +250,41 @@ class ProjectsClient:
|
|
|
250
250
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
251
251
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
252
252
|
|
|
253
|
+
def get_current_project(
|
|
254
|
+
self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
255
|
+
) -> Project:
|
|
256
|
+
"""
|
|
257
|
+
Get the current project.
|
|
258
|
+
|
|
259
|
+
Parameters:
|
|
260
|
+
- project_id: typing.Optional[str].
|
|
261
|
+
|
|
262
|
+
- organization_id: typing.Optional[str].
|
|
263
|
+
---
|
|
264
|
+
from llama_cloud.client import LlamaCloud
|
|
265
|
+
|
|
266
|
+
client = LlamaCloud(
|
|
267
|
+
token="YOUR_TOKEN",
|
|
268
|
+
)
|
|
269
|
+
client.projects.get_current_project()
|
|
270
|
+
"""
|
|
271
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
272
|
+
"GET",
|
|
273
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects/current"),
|
|
274
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
275
|
+
headers=self._client_wrapper.get_headers(),
|
|
276
|
+
timeout=60,
|
|
277
|
+
)
|
|
278
|
+
if 200 <= _response.status_code < 300:
|
|
279
|
+
return pydantic.parse_obj_as(Project, _response.json()) # type: ignore
|
|
280
|
+
if _response.status_code == 422:
|
|
281
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
282
|
+
try:
|
|
283
|
+
_response_json = _response.json()
|
|
284
|
+
except JSONDecodeError:
|
|
285
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
286
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
287
|
+
|
|
253
288
|
def get_project_usage(
|
|
254
289
|
self,
|
|
255
290
|
project_id: typing.Optional[str],
|
|
@@ -522,6 +557,41 @@ class AsyncProjectsClient:
|
|
|
522
557
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
523
558
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
524
559
|
|
|
560
|
+
async def get_current_project(
|
|
561
|
+
self, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
562
|
+
) -> Project:
|
|
563
|
+
"""
|
|
564
|
+
Get the current project.
|
|
565
|
+
|
|
566
|
+
Parameters:
|
|
567
|
+
- project_id: typing.Optional[str].
|
|
568
|
+
|
|
569
|
+
- organization_id: typing.Optional[str].
|
|
570
|
+
---
|
|
571
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
572
|
+
|
|
573
|
+
client = AsyncLlamaCloud(
|
|
574
|
+
token="YOUR_TOKEN",
|
|
575
|
+
)
|
|
576
|
+
await client.projects.get_current_project()
|
|
577
|
+
"""
|
|
578
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
579
|
+
"GET",
|
|
580
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/projects/current"),
|
|
581
|
+
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
582
|
+
headers=self._client_wrapper.get_headers(),
|
|
583
|
+
timeout=60,
|
|
584
|
+
)
|
|
585
|
+
if 200 <= _response.status_code < 300:
|
|
586
|
+
return pydantic.parse_obj_as(Project, _response.json()) # type: ignore
|
|
587
|
+
if _response.status_code == 422:
|
|
588
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
589
|
+
try:
|
|
590
|
+
_response_json = _response.json()
|
|
591
|
+
except JSONDecodeError:
|
|
592
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
593
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
594
|
+
|
|
525
595
|
async def get_project_usage(
|
|
526
596
|
self,
|
|
527
597
|
project_id: typing.Optional[str],
|
|
@@ -12,6 +12,7 @@ from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
|
12
12
|
from ...types.composite_retrieval_mode import CompositeRetrievalMode
|
|
13
13
|
from ...types.composite_retrieval_result import CompositeRetrievalResult
|
|
14
14
|
from ...types.http_validation_error import HttpValidationError
|
|
15
|
+
from ...types.re_rank_config import ReRankConfig
|
|
15
16
|
from ...types.retriever import Retriever
|
|
16
17
|
from ...types.retriever_create import RetrieverCreate
|
|
17
18
|
from ...types.retriever_pipeline import RetrieverPipeline
|
|
@@ -296,6 +297,7 @@ class RetrieversClient:
|
|
|
296
297
|
*,
|
|
297
298
|
mode: typing.Optional[CompositeRetrievalMode] = OMIT,
|
|
298
299
|
rerank_top_n: typing.Optional[int] = OMIT,
|
|
300
|
+
rerank_config: typing.Optional[ReRankConfig] = OMIT,
|
|
299
301
|
query: str,
|
|
300
302
|
) -> CompositeRetrievalResult:
|
|
301
303
|
"""
|
|
@@ -306,11 +308,13 @@ class RetrieversClient:
|
|
|
306
308
|
|
|
307
309
|
- mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
|
|
308
310
|
|
|
309
|
-
- rerank_top_n: typing.Optional[int].
|
|
311
|
+
- rerank_top_n: typing.Optional[int].
|
|
312
|
+
|
|
313
|
+
- rerank_config: typing.Optional[ReRankConfig]. The rerank configuration for composite retrieval.
|
|
310
314
|
|
|
311
315
|
- query: str. The query to retrieve against.
|
|
312
316
|
---
|
|
313
|
-
from llama_cloud import CompositeRetrievalMode
|
|
317
|
+
from llama_cloud import CompositeRetrievalMode, ReRankConfig, ReRankerType
|
|
314
318
|
from llama_cloud.client import LlamaCloud
|
|
315
319
|
|
|
316
320
|
client = LlamaCloud(
|
|
@@ -319,6 +323,9 @@ class RetrieversClient:
|
|
|
319
323
|
client.retrievers.retrieve(
|
|
320
324
|
retriever_id="string",
|
|
321
325
|
mode=CompositeRetrievalMode.ROUTING,
|
|
326
|
+
rerank_config=ReRankConfig(
|
|
327
|
+
type=ReRankerType.SYSTEM_DEFAULT,
|
|
328
|
+
),
|
|
322
329
|
query="string",
|
|
323
330
|
)
|
|
324
331
|
"""
|
|
@@ -327,6 +334,8 @@ class RetrieversClient:
|
|
|
327
334
|
_request["mode"] = mode
|
|
328
335
|
if rerank_top_n is not OMIT:
|
|
329
336
|
_request["rerank_top_n"] = rerank_top_n
|
|
337
|
+
if rerank_config is not OMIT:
|
|
338
|
+
_request["rerank_config"] = rerank_config
|
|
330
339
|
_response = self._client_wrapper.httpx_client.request(
|
|
331
340
|
"POST",
|
|
332
341
|
urllib.parse.urljoin(
|
|
@@ -353,6 +362,7 @@ class RetrieversClient:
|
|
|
353
362
|
organization_id: typing.Optional[str] = None,
|
|
354
363
|
mode: typing.Optional[CompositeRetrievalMode] = OMIT,
|
|
355
364
|
rerank_top_n: typing.Optional[int] = OMIT,
|
|
365
|
+
rerank_config: typing.Optional[ReRankConfig] = OMIT,
|
|
356
366
|
query: str,
|
|
357
367
|
pipelines: typing.Optional[typing.List[RetrieverPipeline]] = OMIT,
|
|
358
368
|
) -> CompositeRetrievalResult:
|
|
@@ -366,13 +376,15 @@ class RetrieversClient:
|
|
|
366
376
|
|
|
367
377
|
- mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
|
|
368
378
|
|
|
369
|
-
- rerank_top_n: typing.Optional[int].
|
|
379
|
+
- rerank_top_n: typing.Optional[int].
|
|
380
|
+
|
|
381
|
+
- rerank_config: typing.Optional[ReRankConfig]. The rerank configuration for composite retrieval.
|
|
370
382
|
|
|
371
383
|
- query: str. The query to retrieve against.
|
|
372
384
|
|
|
373
385
|
- pipelines: typing.Optional[typing.List[RetrieverPipeline]]. The pipelines to use for retrieval.
|
|
374
386
|
---
|
|
375
|
-
from llama_cloud import CompositeRetrievalMode
|
|
387
|
+
from llama_cloud import CompositeRetrievalMode, ReRankConfig, ReRankerType
|
|
376
388
|
from llama_cloud.client import LlamaCloud
|
|
377
389
|
|
|
378
390
|
client = LlamaCloud(
|
|
@@ -380,6 +392,9 @@ class RetrieversClient:
|
|
|
380
392
|
)
|
|
381
393
|
client.retrievers.direct_retrieve(
|
|
382
394
|
mode=CompositeRetrievalMode.ROUTING,
|
|
395
|
+
rerank_config=ReRankConfig(
|
|
396
|
+
type=ReRankerType.SYSTEM_DEFAULT,
|
|
397
|
+
),
|
|
383
398
|
query="string",
|
|
384
399
|
)
|
|
385
400
|
"""
|
|
@@ -388,6 +403,8 @@ class RetrieversClient:
|
|
|
388
403
|
_request["mode"] = mode
|
|
389
404
|
if rerank_top_n is not OMIT:
|
|
390
405
|
_request["rerank_top_n"] = rerank_top_n
|
|
406
|
+
if rerank_config is not OMIT:
|
|
407
|
+
_request["rerank_config"] = rerank_config
|
|
391
408
|
if pipelines is not OMIT:
|
|
392
409
|
_request["pipelines"] = pipelines
|
|
393
410
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -677,6 +694,7 @@ class AsyncRetrieversClient:
|
|
|
677
694
|
*,
|
|
678
695
|
mode: typing.Optional[CompositeRetrievalMode] = OMIT,
|
|
679
696
|
rerank_top_n: typing.Optional[int] = OMIT,
|
|
697
|
+
rerank_config: typing.Optional[ReRankConfig] = OMIT,
|
|
680
698
|
query: str,
|
|
681
699
|
) -> CompositeRetrievalResult:
|
|
682
700
|
"""
|
|
@@ -687,11 +705,13 @@ class AsyncRetrieversClient:
|
|
|
687
705
|
|
|
688
706
|
- mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
|
|
689
707
|
|
|
690
|
-
- rerank_top_n: typing.Optional[int].
|
|
708
|
+
- rerank_top_n: typing.Optional[int].
|
|
709
|
+
|
|
710
|
+
- rerank_config: typing.Optional[ReRankConfig]. The rerank configuration for composite retrieval.
|
|
691
711
|
|
|
692
712
|
- query: str. The query to retrieve against.
|
|
693
713
|
---
|
|
694
|
-
from llama_cloud import CompositeRetrievalMode
|
|
714
|
+
from llama_cloud import CompositeRetrievalMode, ReRankConfig, ReRankerType
|
|
695
715
|
from llama_cloud.client import AsyncLlamaCloud
|
|
696
716
|
|
|
697
717
|
client = AsyncLlamaCloud(
|
|
@@ -700,6 +720,9 @@ class AsyncRetrieversClient:
|
|
|
700
720
|
await client.retrievers.retrieve(
|
|
701
721
|
retriever_id="string",
|
|
702
722
|
mode=CompositeRetrievalMode.ROUTING,
|
|
723
|
+
rerank_config=ReRankConfig(
|
|
724
|
+
type=ReRankerType.SYSTEM_DEFAULT,
|
|
725
|
+
),
|
|
703
726
|
query="string",
|
|
704
727
|
)
|
|
705
728
|
"""
|
|
@@ -708,6 +731,8 @@ class AsyncRetrieversClient:
|
|
|
708
731
|
_request["mode"] = mode
|
|
709
732
|
if rerank_top_n is not OMIT:
|
|
710
733
|
_request["rerank_top_n"] = rerank_top_n
|
|
734
|
+
if rerank_config is not OMIT:
|
|
735
|
+
_request["rerank_config"] = rerank_config
|
|
711
736
|
_response = await self._client_wrapper.httpx_client.request(
|
|
712
737
|
"POST",
|
|
713
738
|
urllib.parse.urljoin(
|
|
@@ -734,6 +759,7 @@ class AsyncRetrieversClient:
|
|
|
734
759
|
organization_id: typing.Optional[str] = None,
|
|
735
760
|
mode: typing.Optional[CompositeRetrievalMode] = OMIT,
|
|
736
761
|
rerank_top_n: typing.Optional[int] = OMIT,
|
|
762
|
+
rerank_config: typing.Optional[ReRankConfig] = OMIT,
|
|
737
763
|
query: str,
|
|
738
764
|
pipelines: typing.Optional[typing.List[RetrieverPipeline]] = OMIT,
|
|
739
765
|
) -> CompositeRetrievalResult:
|
|
@@ -747,13 +773,15 @@ class AsyncRetrieversClient:
|
|
|
747
773
|
|
|
748
774
|
- mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
|
|
749
775
|
|
|
750
|
-
- rerank_top_n: typing.Optional[int].
|
|
776
|
+
- rerank_top_n: typing.Optional[int].
|
|
777
|
+
|
|
778
|
+
- rerank_config: typing.Optional[ReRankConfig]. The rerank configuration for composite retrieval.
|
|
751
779
|
|
|
752
780
|
- query: str. The query to retrieve against.
|
|
753
781
|
|
|
754
782
|
- pipelines: typing.Optional[typing.List[RetrieverPipeline]]. The pipelines to use for retrieval.
|
|
755
783
|
---
|
|
756
|
-
from llama_cloud import CompositeRetrievalMode
|
|
784
|
+
from llama_cloud import CompositeRetrievalMode, ReRankConfig, ReRankerType
|
|
757
785
|
from llama_cloud.client import AsyncLlamaCloud
|
|
758
786
|
|
|
759
787
|
client = AsyncLlamaCloud(
|
|
@@ -761,6 +789,9 @@ class AsyncRetrieversClient:
|
|
|
761
789
|
)
|
|
762
790
|
await client.retrievers.direct_retrieve(
|
|
763
791
|
mode=CompositeRetrievalMode.ROUTING,
|
|
792
|
+
rerank_config=ReRankConfig(
|
|
793
|
+
type=ReRankerType.SYSTEM_DEFAULT,
|
|
794
|
+
),
|
|
764
795
|
query="string",
|
|
765
796
|
)
|
|
766
797
|
"""
|
|
@@ -769,6 +800,8 @@ class AsyncRetrieversClient:
|
|
|
769
800
|
_request["mode"] = mode
|
|
770
801
|
if rerank_top_n is not OMIT:
|
|
771
802
|
_request["rerank_top_n"] = rerank_top_n
|
|
803
|
+
if rerank_config is not OMIT:
|
|
804
|
+
_request["rerank_config"] = rerank_config
|
|
772
805
|
if pipelines is not OMIT:
|
|
773
806
|
_request["pipelines"] = pipelines
|
|
774
807
|
_response = await self._client_wrapper.httpx_client.request(
|
llama_cloud/types/__init__.py
CHANGED
|
@@ -208,7 +208,6 @@ from .parsing_job_markdown_result import ParsingJobMarkdownResult
|
|
|
208
208
|
from .parsing_job_structured_result import ParsingJobStructuredResult
|
|
209
209
|
from .parsing_job_text_result import ParsingJobTextResult
|
|
210
210
|
from .parsing_mode import ParsingMode
|
|
211
|
-
from .parsing_usage import ParsingUsage
|
|
212
211
|
from .partition_names import PartitionNames
|
|
213
212
|
from .permission import Permission
|
|
214
213
|
from .pipeline import Pipeline
|
|
@@ -247,6 +246,7 @@ from .pipeline_file_create_custom_metadata_value import PipelineFileCreateCustom
|
|
|
247
246
|
from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
|
|
248
247
|
from .pipeline_file_permission_info_value import PipelineFilePermissionInfoValue
|
|
249
248
|
from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
|
|
249
|
+
from .pipeline_file_status import PipelineFileStatus
|
|
250
250
|
from .pipeline_transform_config import (
|
|
251
251
|
PipelineTransformConfig,
|
|
252
252
|
PipelineTransformConfig_Advanced,
|
|
@@ -265,6 +265,8 @@ from .project import Project
|
|
|
265
265
|
from .project_create import ProjectCreate
|
|
266
266
|
from .prompt_conf import PromptConf
|
|
267
267
|
from .pydantic_program_mode import PydanticProgramMode
|
|
268
|
+
from .re_rank_config import ReRankConfig
|
|
269
|
+
from .re_ranker_type import ReRankerType
|
|
268
270
|
from .recurring_credit_grant import RecurringCreditGrant
|
|
269
271
|
from .related_node_info import RelatedNodeInfo
|
|
270
272
|
from .related_node_info_node_type import RelatedNodeInfoNodeType
|
|
@@ -310,10 +312,10 @@ from .text_node_with_score import TextNodeWithScore
|
|
|
310
312
|
from .token_chunking_config import TokenChunkingConfig
|
|
311
313
|
from .token_text_splitter import TokenTextSplitter
|
|
312
314
|
from .transformation_category_names import TransformationCategoryNames
|
|
313
|
-
from .usage import Usage
|
|
314
|
-
from .usage_active_alerts_item import UsageActiveAlertsItem
|
|
315
315
|
from .usage_and_plan import UsageAndPlan
|
|
316
316
|
from .usage_metric_response import UsageMetricResponse
|
|
317
|
+
from .usage_response import UsageResponse
|
|
318
|
+
from .usage_response_active_alerts_item import UsageResponseActiveAlertsItem
|
|
317
319
|
from .user_job_record import UserJobRecord
|
|
318
320
|
from .user_organization import UserOrganization
|
|
319
321
|
from .user_organization_create import UserOrganizationCreate
|
|
@@ -524,7 +526,6 @@ __all__ = [
|
|
|
524
526
|
"ParsingJobStructuredResult",
|
|
525
527
|
"ParsingJobTextResult",
|
|
526
528
|
"ParsingMode",
|
|
527
|
-
"ParsingUsage",
|
|
528
529
|
"PartitionNames",
|
|
529
530
|
"Permission",
|
|
530
531
|
"Pipeline",
|
|
@@ -559,6 +560,7 @@ __all__ = [
|
|
|
559
560
|
"PipelineFileCustomMetadataValue",
|
|
560
561
|
"PipelineFilePermissionInfoValue",
|
|
561
562
|
"PipelineFileResourceInfoValue",
|
|
563
|
+
"PipelineFileStatus",
|
|
562
564
|
"PipelineTransformConfig",
|
|
563
565
|
"PipelineTransformConfig_Advanced",
|
|
564
566
|
"PipelineTransformConfig_Auto",
|
|
@@ -575,6 +577,8 @@ __all__ = [
|
|
|
575
577
|
"ProjectCreate",
|
|
576
578
|
"PromptConf",
|
|
577
579
|
"PydanticProgramMode",
|
|
580
|
+
"ReRankConfig",
|
|
581
|
+
"ReRankerType",
|
|
578
582
|
"RecurringCreditGrant",
|
|
579
583
|
"RelatedNodeInfo",
|
|
580
584
|
"RelatedNodeInfoNodeType",
|
|
@@ -618,10 +622,10 @@ __all__ = [
|
|
|
618
622
|
"TokenChunkingConfig",
|
|
619
623
|
"TokenTextSplitter",
|
|
620
624
|
"TransformationCategoryNames",
|
|
621
|
-
"Usage",
|
|
622
|
-
"UsageActiveAlertsItem",
|
|
623
625
|
"UsageAndPlan",
|
|
624
626
|
"UsageMetricResponse",
|
|
627
|
+
"UsageResponse",
|
|
628
|
+
"UsageResponseActiveAlertsItem",
|
|
625
629
|
"UserJobRecord",
|
|
626
630
|
"UserOrganization",
|
|
627
631
|
"UserOrganizationCreate",
|
llama_cloud/types/base_plan.py
CHANGED
|
@@ -32,6 +32,9 @@ class BasePlan(pydantic.BaseModel):
|
|
|
32
32
|
starting_on: typing.Optional[dt.datetime]
|
|
33
33
|
ending_before: typing.Optional[dt.datetime]
|
|
34
34
|
current_billing_period: typing.Optional[BillingPeriod]
|
|
35
|
+
is_payment_failed: typing.Optional[bool] = pydantic.Field(
|
|
36
|
+
description="Whether the organization has a failed payment that requires support contact"
|
|
37
|
+
)
|
|
35
38
|
|
|
36
39
|
def json(self, **kwargs: typing.Any) -> str:
|
|
37
40
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -15,6 +15,9 @@ class BasePlanName(str, enum.Enum):
|
|
|
15
15
|
PRO = "pro"
|
|
16
16
|
ENTERPRISE_CONTRACT = "enterprise_contract"
|
|
17
17
|
ENTERPRISE_POC = "enterprise_poc"
|
|
18
|
+
FREE_V_1 = "free_v1"
|
|
19
|
+
STARTER_V_1 = "starter_v1"
|
|
20
|
+
PRO_V_1 = "pro_v1"
|
|
18
21
|
|
|
19
22
|
def visit(
|
|
20
23
|
self,
|
|
@@ -26,6 +29,9 @@ class BasePlanName(str, enum.Enum):
|
|
|
26
29
|
pro: typing.Callable[[], T_Result],
|
|
27
30
|
enterprise_contract: typing.Callable[[], T_Result],
|
|
28
31
|
enterprise_poc: typing.Callable[[], T_Result],
|
|
32
|
+
free_v_1: typing.Callable[[], T_Result],
|
|
33
|
+
starter_v_1: typing.Callable[[], T_Result],
|
|
34
|
+
pro_v_1: typing.Callable[[], T_Result],
|
|
29
35
|
) -> T_Result:
|
|
30
36
|
if self is BasePlanName.FREE:
|
|
31
37
|
return free()
|
|
@@ -43,3 +49,9 @@ class BasePlanName(str, enum.Enum):
|
|
|
43
49
|
return enterprise_contract()
|
|
44
50
|
if self is BasePlanName.ENTERPRISE_POC:
|
|
45
51
|
return enterprise_poc()
|
|
52
|
+
if self is BasePlanName.FREE_V_1:
|
|
53
|
+
return free_v_1()
|
|
54
|
+
if self is BasePlanName.STARTER_V_1:
|
|
55
|
+
return starter_v_1()
|
|
56
|
+
if self is BasePlanName.PRO_V_1:
|
|
57
|
+
return pro_v_1()
|
|
@@ -26,6 +26,7 @@ class CloudConfluenceDataSource(pydantic.BaseModel):
|
|
|
26
26
|
page_ids: typing.Optional[str]
|
|
27
27
|
cql: typing.Optional[str]
|
|
28
28
|
label: typing.Optional[str]
|
|
29
|
+
index_restricted_pages: typing.Optional[bool] = pydantic.Field(description="Whether to index restricted pages.")
|
|
29
30
|
class_name: typing.Optional[str]
|
|
30
31
|
|
|
31
32
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -22,7 +22,8 @@ class ExtractJobCreate(pydantic.BaseModel):
|
|
|
22
22
|
"""
|
|
23
23
|
|
|
24
24
|
extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
|
|
25
|
-
file_id: str
|
|
25
|
+
file_id: typing.Optional[str]
|
|
26
|
+
file: typing.Optional[str]
|
|
26
27
|
data_schema_override: typing.Optional[ExtractJobCreateDataSchemaOverride] = pydantic.Field(
|
|
27
28
|
description="The data schema to override the extraction agent's data schema with"
|
|
28
29
|
)
|
|
@@ -8,18 +8,22 @@ T_Result = typing.TypeVar("T_Result")
|
|
|
8
8
|
|
|
9
9
|
class ExtractMode(str, enum.Enum):
|
|
10
10
|
FAST = "FAST"
|
|
11
|
-
|
|
11
|
+
BALANCED = "BALANCED"
|
|
12
12
|
MULTIMODAL = "MULTIMODAL"
|
|
13
|
+
ACCURATE = "ACCURATE"
|
|
13
14
|
|
|
14
15
|
def visit(
|
|
15
16
|
self,
|
|
16
17
|
fast: typing.Callable[[], T_Result],
|
|
17
|
-
|
|
18
|
+
balanced: typing.Callable[[], T_Result],
|
|
18
19
|
multimodal: typing.Callable[[], T_Result],
|
|
20
|
+
accurate: typing.Callable[[], T_Result],
|
|
19
21
|
) -> T_Result:
|
|
20
22
|
if self is ExtractMode.FAST:
|
|
21
23
|
return fast()
|
|
22
|
-
if self is ExtractMode.
|
|
23
|
-
return
|
|
24
|
+
if self is ExtractMode.BALANCED:
|
|
25
|
+
return balanced()
|
|
24
26
|
if self is ExtractMode.MULTIMODAL:
|
|
25
27
|
return multimodal()
|
|
28
|
+
if self is ExtractMode.ACCURATE:
|
|
29
|
+
return accurate()
|
llama_cloud/types/extract_run.py
CHANGED
|
@@ -39,6 +39,7 @@ class ExtractRun(pydantic.BaseModel):
|
|
|
39
39
|
job_id: typing.Optional[str]
|
|
40
40
|
data: typing.Optional[ExtractRunData] = pydantic.Field(description="The data extracted from the file")
|
|
41
41
|
extraction_metadata: typing.Optional[typing.Dict[str, typing.Optional[ExtractRunExtractionMetadataValue]]]
|
|
42
|
+
from_ui: bool = pydantic.Field(description="Whether this extraction run was triggered from the UI")
|
|
42
43
|
|
|
43
44
|
def json(self, **kwargs: typing.Any) -> str:
|
|
44
45
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -26,8 +26,8 @@ class LlamaExtractSettings(pydantic.BaseModel):
|
|
|
26
26
|
max_file_size: typing.Optional[int] = pydantic.Field(
|
|
27
27
|
description="The maximum file size (in bytes) allowed for the document."
|
|
28
28
|
)
|
|
29
|
-
|
|
30
|
-
description="The maximum
|
|
29
|
+
max_file_size_ui: typing.Optional[int] = pydantic.Field(
|
|
30
|
+
description="The maximum file size (in bytes) allowed for the document."
|
|
31
31
|
)
|
|
32
32
|
max_pages: typing.Optional[int] = pydantic.Field(
|
|
33
33
|
description="The maximum number of pages allowed for the document."
|
|
@@ -39,6 +39,7 @@ class LlamaExtractSettings(pydantic.BaseModel):
|
|
|
39
39
|
extraction_agent_config: typing.Optional[typing.Dict[str, StructParseConf]] = pydantic.Field(
|
|
40
40
|
description="The configuration for the extraction agent."
|
|
41
41
|
)
|
|
42
|
+
use_multimodal_extraction: typing.Optional[bool]
|
|
42
43
|
llama_parse_params: typing.Optional[LlamaParseParameters] = pydantic.Field(
|
|
43
44
|
description="LlamaParse related settings."
|
|
44
45
|
)
|
|
@@ -26,6 +26,7 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
26
26
|
disable_ocr: typing.Optional[bool]
|
|
27
27
|
annotate_links: typing.Optional[bool]
|
|
28
28
|
adaptive_long_table: typing.Optional[bool]
|
|
29
|
+
compact_markdown_table: typing.Optional[bool]
|
|
29
30
|
disable_reconstruction: typing.Optional[bool]
|
|
30
31
|
disable_image_extraction: typing.Optional[bool]
|
|
31
32
|
invalidate_cache: typing.Optional[bool]
|
|
@@ -56,13 +57,14 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
56
57
|
page_prefix: typing.Optional[str]
|
|
57
58
|
page_suffix: typing.Optional[str]
|
|
58
59
|
webhook_url: typing.Optional[str]
|
|
60
|
+
preset: typing.Optional[str]
|
|
59
61
|
take_screenshot: typing.Optional[bool]
|
|
60
62
|
is_formatting_instruction: typing.Optional[bool]
|
|
61
63
|
premium_mode: typing.Optional[bool]
|
|
62
64
|
continuous_mode: typing.Optional[bool]
|
|
63
|
-
|
|
65
|
+
input_s_3_path: typing.Optional[str] = pydantic.Field(alias="input_s3_path")
|
|
64
66
|
input_s_3_region: typing.Optional[str] = pydantic.Field(alias="input_s3_region")
|
|
65
|
-
|
|
67
|
+
output_s_3_path_prefix: typing.Optional[str] = pydantic.Field(alias="output_s3_path_prefix")
|
|
66
68
|
output_s_3_region: typing.Optional[str] = pydantic.Field(alias="output_s3_region")
|
|
67
69
|
project_id: typing.Optional[str]
|
|
68
70
|
azure_openai_deployment_name: typing.Optional[str]
|
|
@@ -15,7 +15,10 @@ class ParsingMode(str, enum.Enum):
|
|
|
15
15
|
PARSE_PAGE_WITH_LLM = "parse_page_with_llm"
|
|
16
16
|
PARSE_PAGE_WITH_LVM = "parse_page_with_lvm"
|
|
17
17
|
PARSE_PAGE_WITH_AGENT = "parse_page_with_agent"
|
|
18
|
+
PARSE_PAGE_WITH_LAYOUT_AGENT = "parse_page_with_layout_agent"
|
|
18
19
|
PARSE_DOCUMENT_WITH_LLM = "parse_document_with_llm"
|
|
20
|
+
PARSE_DOCUMENT_WITH_LVM = "parse_document_with_lvm"
|
|
21
|
+
PARSE_DOCUMENT_WITH_AGENT = "parse_document_with_agent"
|
|
19
22
|
|
|
20
23
|
def visit(
|
|
21
24
|
self,
|
|
@@ -23,7 +26,10 @@ class ParsingMode(str, enum.Enum):
|
|
|
23
26
|
parse_page_with_llm: typing.Callable[[], T_Result],
|
|
24
27
|
parse_page_with_lvm: typing.Callable[[], T_Result],
|
|
25
28
|
parse_page_with_agent: typing.Callable[[], T_Result],
|
|
29
|
+
parse_page_with_layout_agent: typing.Callable[[], T_Result],
|
|
26
30
|
parse_document_with_llm: typing.Callable[[], T_Result],
|
|
31
|
+
parse_document_with_lvm: typing.Callable[[], T_Result],
|
|
32
|
+
parse_document_with_agent: typing.Callable[[], T_Result],
|
|
27
33
|
) -> T_Result:
|
|
28
34
|
if self is ParsingMode.PARSE_PAGE_WITHOUT_LLM:
|
|
29
35
|
return parse_page_without_llm()
|
|
@@ -33,5 +39,11 @@ class ParsingMode(str, enum.Enum):
|
|
|
33
39
|
return parse_page_with_lvm()
|
|
34
40
|
if self is ParsingMode.PARSE_PAGE_WITH_AGENT:
|
|
35
41
|
return parse_page_with_agent()
|
|
42
|
+
if self is ParsingMode.PARSE_PAGE_WITH_LAYOUT_AGENT:
|
|
43
|
+
return parse_page_with_layout_agent()
|
|
36
44
|
if self is ParsingMode.PARSE_DOCUMENT_WITH_LLM:
|
|
37
45
|
return parse_document_with_llm()
|
|
46
|
+
if self is ParsingMode.PARSE_DOCUMENT_WITH_LVM:
|
|
47
|
+
return parse_document_with_lvm()
|
|
48
|
+
if self is ParsingMode.PARSE_DOCUMENT_WITH_AGENT:
|
|
49
|
+
return parse_document_with_agent()
|
|
@@ -8,6 +8,7 @@ from .pipeline_file_config_hash_value import PipelineFileConfigHashValue
|
|
|
8
8
|
from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
|
|
9
9
|
from .pipeline_file_permission_info_value import PipelineFilePermissionInfoValue
|
|
10
10
|
from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
|
|
11
|
+
from .pipeline_file_status import PipelineFileStatus
|
|
11
12
|
|
|
12
13
|
try:
|
|
13
14
|
import pydantic
|
|
@@ -40,7 +41,7 @@ class PipelineFile(pydantic.BaseModel):
|
|
|
40
41
|
custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileCustomMetadataValue]]]
|
|
41
42
|
config_hash: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileConfigHashValue]]]
|
|
42
43
|
indexed_page_count: typing.Optional[int]
|
|
43
|
-
status: typing.Optional[
|
|
44
|
+
status: typing.Optional[PipelineFileStatus]
|
|
44
45
|
|
|
45
46
|
def json(self, **kwargs: typing.Any) -> str:
|
|
46
47
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class PipelineFileStatus(str, enum.Enum):
|
|
10
|
+
NOT_STARTED = "NOT_STARTED"
|
|
11
|
+
IN_PROGRESS = "IN_PROGRESS"
|
|
12
|
+
SUCCESS = "SUCCESS"
|
|
13
|
+
ERROR = "ERROR"
|
|
14
|
+
CANCELLED = "CANCELLED"
|
|
15
|
+
|
|
16
|
+
def visit(
|
|
17
|
+
self,
|
|
18
|
+
not_started: typing.Callable[[], T_Result],
|
|
19
|
+
in_progress: typing.Callable[[], T_Result],
|
|
20
|
+
success: typing.Callable[[], T_Result],
|
|
21
|
+
error: typing.Callable[[], T_Result],
|
|
22
|
+
cancelled: typing.Callable[[], T_Result],
|
|
23
|
+
) -> T_Result:
|
|
24
|
+
if self is PipelineFileStatus.NOT_STARTED:
|
|
25
|
+
return not_started()
|
|
26
|
+
if self is PipelineFileStatus.IN_PROGRESS:
|
|
27
|
+
return in_progress()
|
|
28
|
+
if self is PipelineFileStatus.SUCCESS:
|
|
29
|
+
return success()
|
|
30
|
+
if self is PipelineFileStatus.ERROR:
|
|
31
|
+
return error()
|
|
32
|
+
if self is PipelineFileStatus.CANCELLED:
|
|
33
|
+
return cancelled()
|
llama_cloud/types/plan_limits.py
CHANGED
|
@@ -18,6 +18,7 @@ class PlanLimits(pydantic.BaseModel):
|
|
|
18
18
|
allow_pay_as_you_go: bool = pydantic.Field(description="Whether usage is allowed after credit grants are exhausted")
|
|
19
19
|
subscription_cost_usd: int
|
|
20
20
|
max_monthly_invoice_total_usd: typing.Optional[int]
|
|
21
|
+
spending_soft_alerts_usd_cents: typing.Optional[typing.List[int]]
|
|
21
22
|
max_concurrent_parse_jobs_premium: typing.Optional[int]
|
|
22
23
|
max_concurrent_parse_jobs_other: typing.Optional[int]
|
|
23
24
|
max_extraction_agents: typing.Optional[int]
|
|
@@ -5,6 +5,7 @@ import typing
|
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
7
|
from .composite_retrieval_mode import CompositeRetrievalMode
|
|
8
|
+
from .re_rank_config import ReRankConfig
|
|
8
9
|
|
|
9
10
|
try:
|
|
10
11
|
import pydantic
|
|
@@ -17,8 +18,9 @@ except ImportError:
|
|
|
17
18
|
|
|
18
19
|
class PresetCompositeRetrievalParams(pydantic.BaseModel):
|
|
19
20
|
mode: typing.Optional[CompositeRetrievalMode] = pydantic.Field(description="The mode of composite retrieval.")
|
|
20
|
-
rerank_top_n: typing.Optional[int]
|
|
21
|
-
|
|
21
|
+
rerank_top_n: typing.Optional[int]
|
|
22
|
+
rerank_config: typing.Optional[ReRankConfig] = pydantic.Field(
|
|
23
|
+
description="The rerank configuration for composite retrieval."
|
|
22
24
|
)
|
|
23
25
|
|
|
24
26
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -4,6 +4,7 @@ import datetime as dt
|
|
|
4
4
|
import typing
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .re_ranker_type import ReRankerType
|
|
7
8
|
|
|
8
9
|
try:
|
|
9
10
|
import pydantic
|
|
@@ -14,9 +15,11 @@ except ImportError:
|
|
|
14
15
|
import pydantic # type: ignore
|
|
15
16
|
|
|
16
17
|
|
|
17
|
-
class
|
|
18
|
-
|
|
19
|
-
|
|
18
|
+
class ReRankConfig(pydantic.BaseModel):
|
|
19
|
+
top_n: typing.Optional[int] = pydantic.Field(
|
|
20
|
+
description="The number of nodes to retrieve after reranking over retrieved nodes from all retrieval tools."
|
|
21
|
+
)
|
|
22
|
+
type: typing.Optional[ReRankerType] = pydantic.Field(description="The type of reranker to use.")
|
|
20
23
|
|
|
21
24
|
def json(self, **kwargs: typing.Any) -> str:
|
|
22
25
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ReRankerType(str, enum.Enum):
|
|
10
|
+
"""
|
|
11
|
+
Enum for the reranker type.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
SYSTEM_DEFAULT = "system_default"
|
|
15
|
+
LLM = "llm"
|
|
16
|
+
COHERE = "cohere"
|
|
17
|
+
BEDROCK = "bedrock"
|
|
18
|
+
SCORE = "score"
|
|
19
|
+
DISABLED = "disabled"
|
|
20
|
+
|
|
21
|
+
def visit(
|
|
22
|
+
self,
|
|
23
|
+
system_default: typing.Callable[[], T_Result],
|
|
24
|
+
llm: typing.Callable[[], T_Result],
|
|
25
|
+
cohere: typing.Callable[[], T_Result],
|
|
26
|
+
bedrock: typing.Callable[[], T_Result],
|
|
27
|
+
score: typing.Callable[[], T_Result],
|
|
28
|
+
disabled: typing.Callable[[], T_Result],
|
|
29
|
+
) -> T_Result:
|
|
30
|
+
if self is ReRankerType.SYSTEM_DEFAULT:
|
|
31
|
+
return system_default()
|
|
32
|
+
if self is ReRankerType.LLM:
|
|
33
|
+
return llm()
|
|
34
|
+
if self is ReRankerType.COHERE:
|
|
35
|
+
return cohere()
|
|
36
|
+
if self is ReRankerType.BEDROCK:
|
|
37
|
+
return bedrock()
|
|
38
|
+
if self is ReRankerType.SCORE:
|
|
39
|
+
return score()
|
|
40
|
+
if self is ReRankerType.DISABLED:
|
|
41
|
+
return disabled()
|