llama-cloud 0.1.15__py3-none-any.whl → 0.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (49) hide show
  1. llama_cloud/__init__.py +10 -32
  2. llama_cloud/environment.py +1 -1
  3. llama_cloud/resources/chat_apps/client.py +20 -0
  4. llama_cloud/resources/evals/client.py +0 -643
  5. llama_cloud/resources/llama_extract/client.py +98 -6
  6. llama_cloud/resources/parsing/client.py +8 -0
  7. llama_cloud/resources/pipelines/client.py +14 -375
  8. llama_cloud/resources/projects/client.py +72 -923
  9. llama_cloud/resources/retrievers/client.py +161 -4
  10. llama_cloud/types/__init__.py +10 -32
  11. llama_cloud/types/base_plan.py +3 -0
  12. llama_cloud/types/base_plan_name.py +12 -0
  13. llama_cloud/types/cloud_confluence_data_source.py +1 -0
  14. llama_cloud/types/extract_config.py +0 -3
  15. llama_cloud/types/extract_mode.py +13 -1
  16. llama_cloud/types/extract_run.py +1 -0
  17. llama_cloud/types/llama_extract_settings.py +1 -0
  18. llama_cloud/types/llama_parse_parameters.py +1 -0
  19. llama_cloud/types/parsing_mode.py +12 -0
  20. llama_cloud/types/pipeline_file.py +2 -1
  21. llama_cloud/types/pipeline_file_status.py +33 -0
  22. llama_cloud/types/plan_limits.py +1 -0
  23. llama_cloud/types/preset_composite_retrieval_params.py +4 -2
  24. llama_cloud/types/prompt_conf.py +1 -0
  25. llama_cloud/types/{eval_question_create.py → re_rank_config.py} +6 -2
  26. llama_cloud/types/re_ranker_type.py +41 -0
  27. llama_cloud/types/report_block.py +1 -0
  28. llama_cloud/types/struct_mode.py +4 -0
  29. llama_cloud/types/struct_parse_conf.py +6 -0
  30. llama_cloud/types/usage_and_plan.py +2 -2
  31. llama_cloud/types/{usage.py → usage_response.py} +3 -3
  32. llama_cloud/types/{usage_active_alerts_item.py → usage_response_active_alerts_item.py} +8 -4
  33. {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/METADATA +1 -1
  34. {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/RECORD +36 -47
  35. llama_cloud/types/eval_dataset.py +0 -40
  36. llama_cloud/types/eval_dataset_job_params.py +0 -39
  37. llama_cloud/types/eval_dataset_job_record.py +0 -58
  38. llama_cloud/types/eval_execution_params_override.py +0 -37
  39. llama_cloud/types/eval_metric.py +0 -17
  40. llama_cloud/types/eval_question.py +0 -38
  41. llama_cloud/types/eval_question_result.py +0 -52
  42. llama_cloud/types/local_eval.py +0 -47
  43. llama_cloud/types/local_eval_results.py +0 -40
  44. llama_cloud/types/local_eval_sets.py +0 -33
  45. llama_cloud/types/metric_result.py +0 -33
  46. llama_cloud/types/prompt_mixin_prompts.py +0 -39
  47. llama_cloud/types/prompt_spec.py +0 -36
  48. {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/LICENSE +0 -0
  49. {llama_cloud-0.1.15.dist-info → llama_cloud-0.1.17.dist-info}/WHEEL +0 -0
@@ -12,6 +12,7 @@ from ...errors.unprocessable_entity_error import UnprocessableEntityError
12
12
  from ...types.composite_retrieval_mode import CompositeRetrievalMode
13
13
  from ...types.composite_retrieval_result import CompositeRetrievalResult
14
14
  from ...types.http_validation_error import HttpValidationError
15
+ from ...types.re_rank_config import ReRankConfig
15
16
  from ...types.retriever import Retriever
16
17
  from ...types.retriever_create import RetrieverCreate
17
18
  from ...types.retriever_pipeline import RetrieverPipeline
@@ -296,6 +297,7 @@ class RetrieversClient:
296
297
  *,
297
298
  mode: typing.Optional[CompositeRetrievalMode] = OMIT,
298
299
  rerank_top_n: typing.Optional[int] = OMIT,
300
+ rerank_config: typing.Optional[ReRankConfig] = OMIT,
299
301
  query: str,
300
302
  ) -> CompositeRetrievalResult:
301
303
  """
@@ -306,11 +308,13 @@ class RetrieversClient:
306
308
 
307
309
  - mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
308
310
 
309
- - rerank_top_n: typing.Optional[int]. The number of nodes to retrieve after reranking over retrieved nodes from all retrieval tools.
311
+ - rerank_top_n: typing.Optional[int].
312
+
313
+ - rerank_config: typing.Optional[ReRankConfig]. The rerank configuration for composite retrieval.
310
314
 
311
315
  - query: str. The query to retrieve against.
312
316
  ---
313
- from llama_cloud import CompositeRetrievalMode
317
+ from llama_cloud import CompositeRetrievalMode, ReRankConfig, ReRankerType
314
318
  from llama_cloud.client import LlamaCloud
315
319
 
316
320
  client = LlamaCloud(
@@ -319,6 +323,9 @@ class RetrieversClient:
319
323
  client.retrievers.retrieve(
320
324
  retriever_id="string",
321
325
  mode=CompositeRetrievalMode.ROUTING,
326
+ rerank_config=ReRankConfig(
327
+ type=ReRankerType.SYSTEM_DEFAULT,
328
+ ),
322
329
  query="string",
323
330
  )
324
331
  """
@@ -327,6 +334,8 @@ class RetrieversClient:
327
334
  _request["mode"] = mode
328
335
  if rerank_top_n is not OMIT:
329
336
  _request["rerank_top_n"] = rerank_top_n
337
+ if rerank_config is not OMIT:
338
+ _request["rerank_config"] = rerank_config
330
339
  _response = self._client_wrapper.httpx_client.request(
331
340
  "POST",
332
341
  urllib.parse.urljoin(
@@ -346,6 +355,76 @@ class RetrieversClient:
346
355
  raise ApiError(status_code=_response.status_code, body=_response.text)
347
356
  raise ApiError(status_code=_response.status_code, body=_response_json)
348
357
 
358
+ def direct_retrieve(
359
+ self,
360
+ *,
361
+ project_id: typing.Optional[str] = None,
362
+ organization_id: typing.Optional[str] = None,
363
+ mode: typing.Optional[CompositeRetrievalMode] = OMIT,
364
+ rerank_top_n: typing.Optional[int] = OMIT,
365
+ rerank_config: typing.Optional[ReRankConfig] = OMIT,
366
+ query: str,
367
+ pipelines: typing.Optional[typing.List[RetrieverPipeline]] = OMIT,
368
+ ) -> CompositeRetrievalResult:
369
+ """
370
+ Retrieve data using specified pipelines without creating a persistent retriever.
371
+
372
+ Parameters:
373
+ - project_id: typing.Optional[str].
374
+
375
+ - organization_id: typing.Optional[str].
376
+
377
+ - mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
378
+
379
+ - rerank_top_n: typing.Optional[int].
380
+
381
+ - rerank_config: typing.Optional[ReRankConfig]. The rerank configuration for composite retrieval.
382
+
383
+ - query: str. The query to retrieve against.
384
+
385
+ - pipelines: typing.Optional[typing.List[RetrieverPipeline]]. The pipelines to use for retrieval.
386
+ ---
387
+ from llama_cloud import CompositeRetrievalMode, ReRankConfig, ReRankerType
388
+ from llama_cloud.client import LlamaCloud
389
+
390
+ client = LlamaCloud(
391
+ token="YOUR_TOKEN",
392
+ )
393
+ client.retrievers.direct_retrieve(
394
+ mode=CompositeRetrievalMode.ROUTING,
395
+ rerank_config=ReRankConfig(
396
+ type=ReRankerType.SYSTEM_DEFAULT,
397
+ ),
398
+ query="string",
399
+ )
400
+ """
401
+ _request: typing.Dict[str, typing.Any] = {"query": query}
402
+ if mode is not OMIT:
403
+ _request["mode"] = mode
404
+ if rerank_top_n is not OMIT:
405
+ _request["rerank_top_n"] = rerank_top_n
406
+ if rerank_config is not OMIT:
407
+ _request["rerank_config"] = rerank_config
408
+ if pipelines is not OMIT:
409
+ _request["pipelines"] = pipelines
410
+ _response = self._client_wrapper.httpx_client.request(
411
+ "POST",
412
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/retrievers/retrieve"),
413
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
414
+ json=jsonable_encoder(_request),
415
+ headers=self._client_wrapper.get_headers(),
416
+ timeout=60,
417
+ )
418
+ if 200 <= _response.status_code < 300:
419
+ return pydantic.parse_obj_as(CompositeRetrievalResult, _response.json()) # type: ignore
420
+ if _response.status_code == 422:
421
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
422
+ try:
423
+ _response_json = _response.json()
424
+ except JSONDecodeError:
425
+ raise ApiError(status_code=_response.status_code, body=_response.text)
426
+ raise ApiError(status_code=_response.status_code, body=_response_json)
427
+
349
428
 
350
429
  class AsyncRetrieversClient:
351
430
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -615,6 +694,7 @@ class AsyncRetrieversClient:
615
694
  *,
616
695
  mode: typing.Optional[CompositeRetrievalMode] = OMIT,
617
696
  rerank_top_n: typing.Optional[int] = OMIT,
697
+ rerank_config: typing.Optional[ReRankConfig] = OMIT,
618
698
  query: str,
619
699
  ) -> CompositeRetrievalResult:
620
700
  """
@@ -625,11 +705,13 @@ class AsyncRetrieversClient:
625
705
 
626
706
  - mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
627
707
 
628
- - rerank_top_n: typing.Optional[int]. The number of nodes to retrieve after reranking over retrieved nodes from all retrieval tools.
708
+ - rerank_top_n: typing.Optional[int].
709
+
710
+ - rerank_config: typing.Optional[ReRankConfig]. The rerank configuration for composite retrieval.
629
711
 
630
712
  - query: str. The query to retrieve against.
631
713
  ---
632
- from llama_cloud import CompositeRetrievalMode
714
+ from llama_cloud import CompositeRetrievalMode, ReRankConfig, ReRankerType
633
715
  from llama_cloud.client import AsyncLlamaCloud
634
716
 
635
717
  client = AsyncLlamaCloud(
@@ -638,6 +720,9 @@ class AsyncRetrieversClient:
638
720
  await client.retrievers.retrieve(
639
721
  retriever_id="string",
640
722
  mode=CompositeRetrievalMode.ROUTING,
723
+ rerank_config=ReRankConfig(
724
+ type=ReRankerType.SYSTEM_DEFAULT,
725
+ ),
641
726
  query="string",
642
727
  )
643
728
  """
@@ -646,6 +731,8 @@ class AsyncRetrieversClient:
646
731
  _request["mode"] = mode
647
732
  if rerank_top_n is not OMIT:
648
733
  _request["rerank_top_n"] = rerank_top_n
734
+ if rerank_config is not OMIT:
735
+ _request["rerank_config"] = rerank_config
649
736
  _response = await self._client_wrapper.httpx_client.request(
650
737
  "POST",
651
738
  urllib.parse.urljoin(
@@ -664,3 +751,73 @@ class AsyncRetrieversClient:
664
751
  except JSONDecodeError:
665
752
  raise ApiError(status_code=_response.status_code, body=_response.text)
666
753
  raise ApiError(status_code=_response.status_code, body=_response_json)
754
+
755
+ async def direct_retrieve(
756
+ self,
757
+ *,
758
+ project_id: typing.Optional[str] = None,
759
+ organization_id: typing.Optional[str] = None,
760
+ mode: typing.Optional[CompositeRetrievalMode] = OMIT,
761
+ rerank_top_n: typing.Optional[int] = OMIT,
762
+ rerank_config: typing.Optional[ReRankConfig] = OMIT,
763
+ query: str,
764
+ pipelines: typing.Optional[typing.List[RetrieverPipeline]] = OMIT,
765
+ ) -> CompositeRetrievalResult:
766
+ """
767
+ Retrieve data using specified pipelines without creating a persistent retriever.
768
+
769
+ Parameters:
770
+ - project_id: typing.Optional[str].
771
+
772
+ - organization_id: typing.Optional[str].
773
+
774
+ - mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
775
+
776
+ - rerank_top_n: typing.Optional[int].
777
+
778
+ - rerank_config: typing.Optional[ReRankConfig]. The rerank configuration for composite retrieval.
779
+
780
+ - query: str. The query to retrieve against.
781
+
782
+ - pipelines: typing.Optional[typing.List[RetrieverPipeline]]. The pipelines to use for retrieval.
783
+ ---
784
+ from llama_cloud import CompositeRetrievalMode, ReRankConfig, ReRankerType
785
+ from llama_cloud.client import AsyncLlamaCloud
786
+
787
+ client = AsyncLlamaCloud(
788
+ token="YOUR_TOKEN",
789
+ )
790
+ await client.retrievers.direct_retrieve(
791
+ mode=CompositeRetrievalMode.ROUTING,
792
+ rerank_config=ReRankConfig(
793
+ type=ReRankerType.SYSTEM_DEFAULT,
794
+ ),
795
+ query="string",
796
+ )
797
+ """
798
+ _request: typing.Dict[str, typing.Any] = {"query": query}
799
+ if mode is not OMIT:
800
+ _request["mode"] = mode
801
+ if rerank_top_n is not OMIT:
802
+ _request["rerank_top_n"] = rerank_top_n
803
+ if rerank_config is not OMIT:
804
+ _request["rerank_config"] = rerank_config
805
+ if pipelines is not OMIT:
806
+ _request["pipelines"] = pipelines
807
+ _response = await self._client_wrapper.httpx_client.request(
808
+ "POST",
809
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/retrievers/retrieve"),
810
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
811
+ json=jsonable_encoder(_request),
812
+ headers=self._client_wrapper.get_headers(),
813
+ timeout=60,
814
+ )
815
+ if 200 <= _response.status_code < 300:
816
+ return pydantic.parse_obj_as(CompositeRetrievalResult, _response.json()) # type: ignore
817
+ if _response.status_code == 422:
818
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
819
+ try:
820
+ _response_json = _response.json()
821
+ except JSONDecodeError:
822
+ raise ApiError(status_code=_response.status_code, body=_response.text)
823
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -103,15 +103,7 @@ from .embedding_model_config_update_embedding_config import (
103
103
  EmbeddingModelConfigUpdateEmbeddingConfig_OpenaiEmbedding,
104
104
  EmbeddingModelConfigUpdateEmbeddingConfig_VertexaiEmbedding,
105
105
  )
106
- from .eval_dataset import EvalDataset
107
- from .eval_dataset_job_params import EvalDatasetJobParams
108
- from .eval_dataset_job_record import EvalDatasetJobRecord
109
106
  from .eval_execution_params import EvalExecutionParams
110
- from .eval_execution_params_override import EvalExecutionParamsOverride
111
- from .eval_metric import EvalMetric
112
- from .eval_question import EvalQuestion
113
- from .eval_question_create import EvalQuestionCreate
114
- from .eval_question_result import EvalQuestionResult
115
107
  from .extract_agent import ExtractAgent
116
108
  from .extract_agent_create import ExtractAgentCreate
117
109
  from .extract_agent_create_data_schema import ExtractAgentCreateDataSchema
@@ -178,9 +170,6 @@ from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtens
178
170
  from .llm import Llm
179
171
  from .llm_model_data import LlmModelData
180
172
  from .llm_parameters import LlmParameters
181
- from .local_eval import LocalEval
182
- from .local_eval_results import LocalEvalResults
183
- from .local_eval_sets import LocalEvalSets
184
173
  from .managed_ingestion_status import ManagedIngestionStatus
185
174
  from .managed_ingestion_status_response import ManagedIngestionStatusResponse
186
175
  from .markdown_element_node_parser import MarkdownElementNodeParser
@@ -191,7 +180,6 @@ from .metadata_filter import MetadataFilter
191
180
  from .metadata_filter_value import MetadataFilterValue
192
181
  from .metadata_filters import MetadataFilters
193
182
  from .metadata_filters_filters_item import MetadataFiltersFiltersItem
194
- from .metric_result import MetricResult
195
183
  from .node_parser import NodeParser
196
184
  from .node_relationship import NodeRelationship
197
185
  from .none_chunking_config import NoneChunkingConfig
@@ -259,6 +247,7 @@ from .pipeline_file_create_custom_metadata_value import PipelineFileCreateCustom
259
247
  from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
260
248
  from .pipeline_file_permission_info_value import PipelineFilePermissionInfoValue
261
249
  from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
250
+ from .pipeline_file_status import PipelineFileStatus
262
251
  from .pipeline_transform_config import (
263
252
  PipelineTransformConfig,
264
253
  PipelineTransformConfig_Advanced,
@@ -276,9 +265,9 @@ from .progress_event_status import ProgressEventStatus
276
265
  from .project import Project
277
266
  from .project_create import ProjectCreate
278
267
  from .prompt_conf import PromptConf
279
- from .prompt_mixin_prompts import PromptMixinPrompts
280
- from .prompt_spec import PromptSpec
281
268
  from .pydantic_program_mode import PydanticProgramMode
269
+ from .re_rank_config import ReRankConfig
270
+ from .re_ranker_type import ReRankerType
282
271
  from .recurring_credit_grant import RecurringCreditGrant
283
272
  from .related_node_info import RelatedNodeInfo
284
273
  from .related_node_info_node_type import RelatedNodeInfoNodeType
@@ -324,10 +313,10 @@ from .text_node_with_score import TextNodeWithScore
324
313
  from .token_chunking_config import TokenChunkingConfig
325
314
  from .token_text_splitter import TokenTextSplitter
326
315
  from .transformation_category_names import TransformationCategoryNames
327
- from .usage import Usage
328
- from .usage_active_alerts_item import UsageActiveAlertsItem
329
316
  from .usage_and_plan import UsageAndPlan
330
317
  from .usage_metric_response import UsageMetricResponse
318
+ from .usage_response import UsageResponse
319
+ from .usage_response_active_alerts_item import UsageResponseActiveAlertsItem
331
320
  from .user_job_record import UserJobRecord
332
321
  from .user_organization import UserOrganization
333
322
  from .user_organization_create import UserOrganizationCreate
@@ -435,15 +424,7 @@ __all__ = [
435
424
  "EmbeddingModelConfigUpdateEmbeddingConfig_HuggingfaceApiEmbedding",
436
425
  "EmbeddingModelConfigUpdateEmbeddingConfig_OpenaiEmbedding",
437
426
  "EmbeddingModelConfigUpdateEmbeddingConfig_VertexaiEmbedding",
438
- "EvalDataset",
439
- "EvalDatasetJobParams",
440
- "EvalDatasetJobRecord",
441
427
  "EvalExecutionParams",
442
- "EvalExecutionParamsOverride",
443
- "EvalMetric",
444
- "EvalQuestion",
445
- "EvalQuestionCreate",
446
- "EvalQuestionResult",
447
428
  "ExtractAgent",
448
429
  "ExtractAgentCreate",
449
430
  "ExtractAgentCreateDataSchema",
@@ -508,9 +489,6 @@ __all__ = [
508
489
  "Llm",
509
490
  "LlmModelData",
510
491
  "LlmParameters",
511
- "LocalEval",
512
- "LocalEvalResults",
513
- "LocalEvalSets",
514
492
  "ManagedIngestionStatus",
515
493
  "ManagedIngestionStatusResponse",
516
494
  "MarkdownElementNodeParser",
@@ -521,7 +499,6 @@ __all__ = [
521
499
  "MetadataFilterValue",
522
500
  "MetadataFilters",
523
501
  "MetadataFiltersFiltersItem",
524
- "MetricResult",
525
502
  "NodeParser",
526
503
  "NodeRelationship",
527
504
  "NoneChunkingConfig",
@@ -585,6 +562,7 @@ __all__ = [
585
562
  "PipelineFileCustomMetadataValue",
586
563
  "PipelineFilePermissionInfoValue",
587
564
  "PipelineFileResourceInfoValue",
565
+ "PipelineFileStatus",
588
566
  "PipelineTransformConfig",
589
567
  "PipelineTransformConfig_Advanced",
590
568
  "PipelineTransformConfig_Auto",
@@ -600,9 +578,9 @@ __all__ = [
600
578
  "Project",
601
579
  "ProjectCreate",
602
580
  "PromptConf",
603
- "PromptMixinPrompts",
604
- "PromptSpec",
605
581
  "PydanticProgramMode",
582
+ "ReRankConfig",
583
+ "ReRankerType",
606
584
  "RecurringCreditGrant",
607
585
  "RelatedNodeInfo",
608
586
  "RelatedNodeInfoNodeType",
@@ -646,10 +624,10 @@ __all__ = [
646
624
  "TokenChunkingConfig",
647
625
  "TokenTextSplitter",
648
626
  "TransformationCategoryNames",
649
- "Usage",
650
- "UsageActiveAlertsItem",
651
627
  "UsageAndPlan",
652
628
  "UsageMetricResponse",
629
+ "UsageResponse",
630
+ "UsageResponseActiveAlertsItem",
653
631
  "UserJobRecord",
654
632
  "UserOrganization",
655
633
  "UserOrganizationCreate",
@@ -32,6 +32,9 @@ class BasePlan(pydantic.BaseModel):
32
32
  starting_on: typing.Optional[dt.datetime]
33
33
  ending_before: typing.Optional[dt.datetime]
34
34
  current_billing_period: typing.Optional[BillingPeriod]
35
+ is_payment_failed: typing.Optional[bool] = pydantic.Field(
36
+ description="Whether the organization has a failed payment that requires support contact"
37
+ )
35
38
 
36
39
  def json(self, **kwargs: typing.Any) -> str:
37
40
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -15,6 +15,9 @@ class BasePlanName(str, enum.Enum):
15
15
  PRO = "pro"
16
16
  ENTERPRISE_CONTRACT = "enterprise_contract"
17
17
  ENTERPRISE_POC = "enterprise_poc"
18
+ FREE_V_1 = "free_v1"
19
+ STARTER_V_1 = "starter_v1"
20
+ PRO_V_1 = "pro_v1"
18
21
 
19
22
  def visit(
20
23
  self,
@@ -26,6 +29,9 @@ class BasePlanName(str, enum.Enum):
26
29
  pro: typing.Callable[[], T_Result],
27
30
  enterprise_contract: typing.Callable[[], T_Result],
28
31
  enterprise_poc: typing.Callable[[], T_Result],
32
+ free_v_1: typing.Callable[[], T_Result],
33
+ starter_v_1: typing.Callable[[], T_Result],
34
+ pro_v_1: typing.Callable[[], T_Result],
29
35
  ) -> T_Result:
30
36
  if self is BasePlanName.FREE:
31
37
  return free()
@@ -43,3 +49,9 @@ class BasePlanName(str, enum.Enum):
43
49
  return enterprise_contract()
44
50
  if self is BasePlanName.ENTERPRISE_POC:
45
51
  return enterprise_poc()
52
+ if self is BasePlanName.FREE_V_1:
53
+ return free_v_1()
54
+ if self is BasePlanName.STARTER_V_1:
55
+ return starter_v_1()
56
+ if self is BasePlanName.PRO_V_1:
57
+ return pro_v_1()
@@ -26,6 +26,7 @@ class CloudConfluenceDataSource(pydantic.BaseModel):
26
26
  page_ids: typing.Optional[str]
27
27
  cql: typing.Optional[str]
28
28
  label: typing.Optional[str]
29
+ index_restricted_pages: typing.Optional[bool] = pydantic.Field(description="Whether to index restricted pages.")
29
30
  class_name: typing.Optional[str]
30
31
 
31
32
  def json(self, **kwargs: typing.Any) -> str:
@@ -23,9 +23,6 @@ class ExtractConfig(pydantic.BaseModel):
23
23
 
24
24
  extraction_target: typing.Optional[ExtractTarget] = pydantic.Field(description="The extraction target specified.")
25
25
  extraction_mode: typing.Optional[ExtractMode] = pydantic.Field(description="The extraction mode specified.")
26
- handle_missing: typing.Optional[bool] = pydantic.Field(
27
- description="Whether to handle missing fields in the schema."
28
- )
29
26
  system_prompt: typing.Optional[str]
30
27
 
31
28
  def json(self, **kwargs: typing.Any) -> str:
@@ -8,10 +8,22 @@ T_Result = typing.TypeVar("T_Result")
8
8
 
9
9
  class ExtractMode(str, enum.Enum):
10
10
  FAST = "FAST"
11
+ BALANCED = "BALANCED"
12
+ MULTIMODAL = "MULTIMODAL"
11
13
  ACCURATE = "ACCURATE"
12
14
 
13
- def visit(self, fast: typing.Callable[[], T_Result], accurate: typing.Callable[[], T_Result]) -> T_Result:
15
+ def visit(
16
+ self,
17
+ fast: typing.Callable[[], T_Result],
18
+ balanced: typing.Callable[[], T_Result],
19
+ multimodal: typing.Callable[[], T_Result],
20
+ accurate: typing.Callable[[], T_Result],
21
+ ) -> T_Result:
14
22
  if self is ExtractMode.FAST:
15
23
  return fast()
24
+ if self is ExtractMode.BALANCED:
25
+ return balanced()
26
+ if self is ExtractMode.MULTIMODAL:
27
+ return multimodal()
16
28
  if self is ExtractMode.ACCURATE:
17
29
  return accurate()
@@ -39,6 +39,7 @@ class ExtractRun(pydantic.BaseModel):
39
39
  job_id: typing.Optional[str]
40
40
  data: typing.Optional[ExtractRunData] = pydantic.Field(description="The data extracted from the file")
41
41
  extraction_metadata: typing.Optional[typing.Dict[str, typing.Optional[ExtractRunExtractionMetadataValue]]]
42
+ from_ui: bool = pydantic.Field(description="Whether this extraction run was triggered from the UI")
42
43
 
43
44
  def json(self, **kwargs: typing.Any) -> str:
44
45
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -39,6 +39,7 @@ class LlamaExtractSettings(pydantic.BaseModel):
39
39
  extraction_agent_config: typing.Optional[typing.Dict[str, StructParseConf]] = pydantic.Field(
40
40
  description="The configuration for the extraction agent."
41
41
  )
42
+ use_multimodal_extraction: typing.Optional[bool]
42
43
  llama_parse_params: typing.Optional[LlamaParseParameters] = pydantic.Field(
43
44
  description="LlamaParse related settings."
44
45
  )
@@ -56,6 +56,7 @@ class LlamaParseParameters(pydantic.BaseModel):
56
56
  page_prefix: typing.Optional[str]
57
57
  page_suffix: typing.Optional[str]
58
58
  webhook_url: typing.Optional[str]
59
+ preset: typing.Optional[str]
59
60
  take_screenshot: typing.Optional[bool]
60
61
  is_formatting_instruction: typing.Optional[bool]
61
62
  premium_mode: typing.Optional[bool]
@@ -15,7 +15,10 @@ class ParsingMode(str, enum.Enum):
15
15
  PARSE_PAGE_WITH_LLM = "parse_page_with_llm"
16
16
  PARSE_PAGE_WITH_LVM = "parse_page_with_lvm"
17
17
  PARSE_PAGE_WITH_AGENT = "parse_page_with_agent"
18
+ PARSE_PAGE_WITH_LAYOUT_AGENT = "parse_page_with_layout_agent"
18
19
  PARSE_DOCUMENT_WITH_LLM = "parse_document_with_llm"
20
+ PARSE_DOCUMENT_WITH_LVM = "parse_document_with_lvm"
21
+ PARSE_DOCUMENT_WITH_AGENT = "parse_document_with_agent"
19
22
 
20
23
  def visit(
21
24
  self,
@@ -23,7 +26,10 @@ class ParsingMode(str, enum.Enum):
23
26
  parse_page_with_llm: typing.Callable[[], T_Result],
24
27
  parse_page_with_lvm: typing.Callable[[], T_Result],
25
28
  parse_page_with_agent: typing.Callable[[], T_Result],
29
+ parse_page_with_layout_agent: typing.Callable[[], T_Result],
26
30
  parse_document_with_llm: typing.Callable[[], T_Result],
31
+ parse_document_with_lvm: typing.Callable[[], T_Result],
32
+ parse_document_with_agent: typing.Callable[[], T_Result],
27
33
  ) -> T_Result:
28
34
  if self is ParsingMode.PARSE_PAGE_WITHOUT_LLM:
29
35
  return parse_page_without_llm()
@@ -33,5 +39,11 @@ class ParsingMode(str, enum.Enum):
33
39
  return parse_page_with_lvm()
34
40
  if self is ParsingMode.PARSE_PAGE_WITH_AGENT:
35
41
  return parse_page_with_agent()
42
+ if self is ParsingMode.PARSE_PAGE_WITH_LAYOUT_AGENT:
43
+ return parse_page_with_layout_agent()
36
44
  if self is ParsingMode.PARSE_DOCUMENT_WITH_LLM:
37
45
  return parse_document_with_llm()
46
+ if self is ParsingMode.PARSE_DOCUMENT_WITH_LVM:
47
+ return parse_document_with_lvm()
48
+ if self is ParsingMode.PARSE_DOCUMENT_WITH_AGENT:
49
+ return parse_document_with_agent()
@@ -8,6 +8,7 @@ from .pipeline_file_config_hash_value import PipelineFileConfigHashValue
8
8
  from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
9
9
  from .pipeline_file_permission_info_value import PipelineFilePermissionInfoValue
10
10
  from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
11
+ from .pipeline_file_status import PipelineFileStatus
11
12
 
12
13
  try:
13
14
  import pydantic
@@ -40,7 +41,7 @@ class PipelineFile(pydantic.BaseModel):
40
41
  custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileCustomMetadataValue]]]
41
42
  config_hash: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileConfigHashValue]]]
42
43
  indexed_page_count: typing.Optional[int]
43
- status: typing.Optional[str]
44
+ status: typing.Optional[PipelineFileStatus]
44
45
 
45
46
  def json(self, **kwargs: typing.Any) -> str:
46
47
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,33 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class PipelineFileStatus(str, enum.Enum):
10
+ NOT_STARTED = "NOT_STARTED"
11
+ IN_PROGRESS = "IN_PROGRESS"
12
+ SUCCESS = "SUCCESS"
13
+ ERROR = "ERROR"
14
+ CANCELLED = "CANCELLED"
15
+
16
+ def visit(
17
+ self,
18
+ not_started: typing.Callable[[], T_Result],
19
+ in_progress: typing.Callable[[], T_Result],
20
+ success: typing.Callable[[], T_Result],
21
+ error: typing.Callable[[], T_Result],
22
+ cancelled: typing.Callable[[], T_Result],
23
+ ) -> T_Result:
24
+ if self is PipelineFileStatus.NOT_STARTED:
25
+ return not_started()
26
+ if self is PipelineFileStatus.IN_PROGRESS:
27
+ return in_progress()
28
+ if self is PipelineFileStatus.SUCCESS:
29
+ return success()
30
+ if self is PipelineFileStatus.ERROR:
31
+ return error()
32
+ if self is PipelineFileStatus.CANCELLED:
33
+ return cancelled()
@@ -18,6 +18,7 @@ class PlanLimits(pydantic.BaseModel):
18
18
  allow_pay_as_you_go: bool = pydantic.Field(description="Whether usage is allowed after credit grants are exhausted")
19
19
  subscription_cost_usd: int
20
20
  max_monthly_invoice_total_usd: typing.Optional[int]
21
+ spending_soft_alerts_usd_cents: typing.Optional[typing.List[int]]
21
22
  max_concurrent_parse_jobs_premium: typing.Optional[int]
22
23
  max_concurrent_parse_jobs_other: typing.Optional[int]
23
24
  max_extraction_agents: typing.Optional[int]
@@ -5,6 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .composite_retrieval_mode import CompositeRetrievalMode
8
+ from .re_rank_config import ReRankConfig
8
9
 
9
10
  try:
10
11
  import pydantic
@@ -17,8 +18,9 @@ except ImportError:
17
18
 
18
19
  class PresetCompositeRetrievalParams(pydantic.BaseModel):
19
20
  mode: typing.Optional[CompositeRetrievalMode] = pydantic.Field(description="The mode of composite retrieval.")
20
- rerank_top_n: typing.Optional[int] = pydantic.Field(
21
- description="The number of nodes to retrieve after reranking over retrieved nodes from all retrieval tools."
21
+ rerank_top_n: typing.Optional[int]
22
+ rerank_config: typing.Optional[ReRankConfig] = pydantic.Field(
23
+ description="The rerank configuration for composite retrieval."
22
24
  )
23
25
 
24
26
  def json(self, **kwargs: typing.Any) -> str:
@@ -18,6 +18,7 @@ class PromptConf(pydantic.BaseModel):
18
18
  system_prompt: typing.Optional[str] = pydantic.Field(description="The system prompt to use for the extraction.")
19
19
  extraction_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for the extraction.")
20
20
  error_handling_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for error handling.")
21
+ reasoning_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for reasoning.")
21
22
 
22
23
  def json(self, **kwargs: typing.Any) -> str:
23
24
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .re_ranker_type import ReRankerType
7
8
 
8
9
  try:
9
10
  import pydantic
@@ -14,8 +15,11 @@ except ImportError:
14
15
  import pydantic # type: ignore
15
16
 
16
17
 
17
- class EvalQuestionCreate(pydantic.BaseModel):
18
- content: str = pydantic.Field(description="The content of the question.")
18
+ class ReRankConfig(pydantic.BaseModel):
19
+ top_n: typing.Optional[int] = pydantic.Field(
20
+ description="The number of nodes to retrieve after reranking over retrieved nodes from all retrieval tools."
21
+ )
22
+ type: typing.Optional[ReRankerType] = pydantic.Field(description="The type of reranker to use.")
19
23
 
20
24
  def json(self, **kwargs: typing.Any) -> str:
21
25
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,41 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ReRankerType(str, enum.Enum):
10
+ """
11
+ Enum for the reranker type.
12
+ """
13
+
14
+ SYSTEM_DEFAULT = "system_default"
15
+ LLM = "llm"
16
+ COHERE = "cohere"
17
+ BEDROCK = "bedrock"
18
+ SCORE = "score"
19
+ DISABLED = "disabled"
20
+
21
+ def visit(
22
+ self,
23
+ system_default: typing.Callable[[], T_Result],
24
+ llm: typing.Callable[[], T_Result],
25
+ cohere: typing.Callable[[], T_Result],
26
+ bedrock: typing.Callable[[], T_Result],
27
+ score: typing.Callable[[], T_Result],
28
+ disabled: typing.Callable[[], T_Result],
29
+ ) -> T_Result:
30
+ if self is ReRankerType.SYSTEM_DEFAULT:
31
+ return system_default()
32
+ if self is ReRankerType.LLM:
33
+ return llm()
34
+ if self is ReRankerType.COHERE:
35
+ return cohere()
36
+ if self is ReRankerType.BEDROCK:
37
+ return bedrock()
38
+ if self is ReRankerType.SCORE:
39
+ return score()
40
+ if self is ReRankerType.DISABLED:
41
+ return disabled()