llama-cloud 0.1.14__py3-none-any.whl → 0.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (37) hide show
  1. llama_cloud/__init__.py +8 -28
  2. llama_cloud/resources/evals/client.py +0 -643
  3. llama_cloud/resources/llama_extract/client.py +168 -6
  4. llama_cloud/resources/parsing/client.py +0 -8
  5. llama_cloud/resources/pipelines/client.py +10 -371
  6. llama_cloud/resources/projects/client.py +72 -923
  7. llama_cloud/resources/retrievers/client.py +124 -0
  8. llama_cloud/types/__init__.py +8 -28
  9. llama_cloud/types/chunk_mode.py +4 -0
  10. llama_cloud/types/extract_config.py +0 -3
  11. llama_cloud/types/{local_eval.py → extract_job_create_batch.py} +9 -14
  12. llama_cloud/types/extract_job_create_batch_data_schema_override.py +9 -0
  13. llama_cloud/types/extract_job_create_batch_data_schema_override_zero_value.py +7 -0
  14. llama_cloud/types/extract_mode.py +9 -1
  15. llama_cloud/types/llama_parse_parameters.py +0 -1
  16. llama_cloud/types/{local_eval_results.py → paginated_extract_runs_response.py} +7 -8
  17. llama_cloud/types/prompt_conf.py +1 -0
  18. llama_cloud/types/report_block.py +1 -0
  19. llama_cloud/types/struct_mode.py +4 -0
  20. llama_cloud/types/struct_parse_conf.py +6 -0
  21. llama_cloud/types/usage.py +2 -1
  22. {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/METADATA +1 -1
  23. {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/RECORD +25 -35
  24. llama_cloud/types/eval_dataset.py +0 -40
  25. llama_cloud/types/eval_dataset_job_params.py +0 -39
  26. llama_cloud/types/eval_dataset_job_record.py +0 -58
  27. llama_cloud/types/eval_execution_params_override.py +0 -37
  28. llama_cloud/types/eval_metric.py +0 -17
  29. llama_cloud/types/eval_question.py +0 -38
  30. llama_cloud/types/eval_question_create.py +0 -31
  31. llama_cloud/types/eval_question_result.py +0 -52
  32. llama_cloud/types/local_eval_sets.py +0 -33
  33. llama_cloud/types/metric_result.py +0 -33
  34. llama_cloud/types/prompt_mixin_prompts.py +0 -39
  35. llama_cloud/types/prompt_spec.py +0 -36
  36. {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/LICENSE +0 -0
  37. {llama_cloud-0.1.14.dist-info → llama_cloud-0.1.16.dist-info}/WHEEL +0 -0
@@ -346,6 +346,68 @@ class RetrieversClient:
346
346
  raise ApiError(status_code=_response.status_code, body=_response.text)
347
347
  raise ApiError(status_code=_response.status_code, body=_response_json)
348
348
 
349
+ def direct_retrieve(
350
+ self,
351
+ *,
352
+ project_id: typing.Optional[str] = None,
353
+ organization_id: typing.Optional[str] = None,
354
+ mode: typing.Optional[CompositeRetrievalMode] = OMIT,
355
+ rerank_top_n: typing.Optional[int] = OMIT,
356
+ query: str,
357
+ pipelines: typing.Optional[typing.List[RetrieverPipeline]] = OMIT,
358
+ ) -> CompositeRetrievalResult:
359
+ """
360
+ Retrieve data using specified pipelines without creating a persistent retriever.
361
+
362
+ Parameters:
363
+ - project_id: typing.Optional[str].
364
+
365
+ - organization_id: typing.Optional[str].
366
+
367
+ - mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
368
+
369
+ - rerank_top_n: typing.Optional[int]. The number of nodes to retrieve after reranking over retrieved nodes from all retrieval tools.
370
+
371
+ - query: str. The query to retrieve against.
372
+
373
+ - pipelines: typing.Optional[typing.List[RetrieverPipeline]]. The pipelines to use for retrieval.
374
+ ---
375
+ from llama_cloud import CompositeRetrievalMode
376
+ from llama_cloud.client import LlamaCloud
377
+
378
+ client = LlamaCloud(
379
+ token="YOUR_TOKEN",
380
+ )
381
+ client.retrievers.direct_retrieve(
382
+ mode=CompositeRetrievalMode.ROUTING,
383
+ query="string",
384
+ )
385
+ """
386
+ _request: typing.Dict[str, typing.Any] = {"query": query}
387
+ if mode is not OMIT:
388
+ _request["mode"] = mode
389
+ if rerank_top_n is not OMIT:
390
+ _request["rerank_top_n"] = rerank_top_n
391
+ if pipelines is not OMIT:
392
+ _request["pipelines"] = pipelines
393
+ _response = self._client_wrapper.httpx_client.request(
394
+ "POST",
395
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/retrievers/retrieve"),
396
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
397
+ json=jsonable_encoder(_request),
398
+ headers=self._client_wrapper.get_headers(),
399
+ timeout=60,
400
+ )
401
+ if 200 <= _response.status_code < 300:
402
+ return pydantic.parse_obj_as(CompositeRetrievalResult, _response.json()) # type: ignore
403
+ if _response.status_code == 422:
404
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
405
+ try:
406
+ _response_json = _response.json()
407
+ except JSONDecodeError:
408
+ raise ApiError(status_code=_response.status_code, body=_response.text)
409
+ raise ApiError(status_code=_response.status_code, body=_response_json)
410
+
349
411
 
350
412
  class AsyncRetrieversClient:
351
413
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -664,3 +726,65 @@ class AsyncRetrieversClient:
664
726
  except JSONDecodeError:
665
727
  raise ApiError(status_code=_response.status_code, body=_response.text)
666
728
  raise ApiError(status_code=_response.status_code, body=_response_json)
729
+
730
+ async def direct_retrieve(
731
+ self,
732
+ *,
733
+ project_id: typing.Optional[str] = None,
734
+ organization_id: typing.Optional[str] = None,
735
+ mode: typing.Optional[CompositeRetrievalMode] = OMIT,
736
+ rerank_top_n: typing.Optional[int] = OMIT,
737
+ query: str,
738
+ pipelines: typing.Optional[typing.List[RetrieverPipeline]] = OMIT,
739
+ ) -> CompositeRetrievalResult:
740
+ """
741
+ Retrieve data using specified pipelines without creating a persistent retriever.
742
+
743
+ Parameters:
744
+ - project_id: typing.Optional[str].
745
+
746
+ - organization_id: typing.Optional[str].
747
+
748
+ - mode: typing.Optional[CompositeRetrievalMode]. The mode of composite retrieval.
749
+
750
+ - rerank_top_n: typing.Optional[int]. The number of nodes to retrieve after reranking over retrieved nodes from all retrieval tools.
751
+
752
+ - query: str. The query to retrieve against.
753
+
754
+ - pipelines: typing.Optional[typing.List[RetrieverPipeline]]. The pipelines to use for retrieval.
755
+ ---
756
+ from llama_cloud import CompositeRetrievalMode
757
+ from llama_cloud.client import AsyncLlamaCloud
758
+
759
+ client = AsyncLlamaCloud(
760
+ token="YOUR_TOKEN",
761
+ )
762
+ await client.retrievers.direct_retrieve(
763
+ mode=CompositeRetrievalMode.ROUTING,
764
+ query="string",
765
+ )
766
+ """
767
+ _request: typing.Dict[str, typing.Any] = {"query": query}
768
+ if mode is not OMIT:
769
+ _request["mode"] = mode
770
+ if rerank_top_n is not OMIT:
771
+ _request["rerank_top_n"] = rerank_top_n
772
+ if pipelines is not OMIT:
773
+ _request["pipelines"] = pipelines
774
+ _response = await self._client_wrapper.httpx_client.request(
775
+ "POST",
776
+ urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/retrievers/retrieve"),
777
+ params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
778
+ json=jsonable_encoder(_request),
779
+ headers=self._client_wrapper.get_headers(),
780
+ timeout=60,
781
+ )
782
+ if 200 <= _response.status_code < 300:
783
+ return pydantic.parse_obj_as(CompositeRetrievalResult, _response.json()) # type: ignore
784
+ if _response.status_code == 422:
785
+ raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
786
+ try:
787
+ _response_json = _response.json()
788
+ except JSONDecodeError:
789
+ raise ApiError(status_code=_response.status_code, body=_response.text)
790
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -103,15 +103,7 @@ from .embedding_model_config_update_embedding_config import (
103
103
  EmbeddingModelConfigUpdateEmbeddingConfig_OpenaiEmbedding,
104
104
  EmbeddingModelConfigUpdateEmbeddingConfig_VertexaiEmbedding,
105
105
  )
106
- from .eval_dataset import EvalDataset
107
- from .eval_dataset_job_params import EvalDatasetJobParams
108
- from .eval_dataset_job_record import EvalDatasetJobRecord
109
106
  from .eval_execution_params import EvalExecutionParams
110
- from .eval_execution_params_override import EvalExecutionParamsOverride
111
- from .eval_metric import EvalMetric
112
- from .eval_question import EvalQuestion
113
- from .eval_question_create import EvalQuestionCreate
114
- from .eval_question_result import EvalQuestionResult
115
107
  from .extract_agent import ExtractAgent
116
108
  from .extract_agent_create import ExtractAgentCreate
117
109
  from .extract_agent_create_data_schema import ExtractAgentCreateDataSchema
@@ -123,6 +115,9 @@ from .extract_agent_update_data_schema_zero_value import ExtractAgentUpdateDataS
123
115
  from .extract_config import ExtractConfig
124
116
  from .extract_job import ExtractJob
125
117
  from .extract_job_create import ExtractJobCreate
118
+ from .extract_job_create_batch import ExtractJobCreateBatch
119
+ from .extract_job_create_batch_data_schema_override import ExtractJobCreateBatchDataSchemaOverride
120
+ from .extract_job_create_batch_data_schema_override_zero_value import ExtractJobCreateBatchDataSchemaOverrideZeroValue
126
121
  from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
127
122
  from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
128
123
  from .extract_mode import ExtractMode
@@ -175,9 +170,6 @@ from .llama_parse_supported_file_extensions import LlamaParseSupportedFileExtens
175
170
  from .llm import Llm
176
171
  from .llm_model_data import LlmModelData
177
172
  from .llm_parameters import LlmParameters
178
- from .local_eval import LocalEval
179
- from .local_eval_results import LocalEvalResults
180
- from .local_eval_sets import LocalEvalSets
181
173
  from .managed_ingestion_status import ManagedIngestionStatus
182
174
  from .managed_ingestion_status_response import ManagedIngestionStatusResponse
183
175
  from .markdown_element_node_parser import MarkdownElementNodeParser
@@ -188,7 +180,6 @@ from .metadata_filter import MetadataFilter
188
180
  from .metadata_filter_value import MetadataFilterValue
189
181
  from .metadata_filters import MetadataFilters
190
182
  from .metadata_filters_filters_item import MetadataFiltersFiltersItem
191
- from .metric_result import MetricResult
192
183
  from .node_parser import NodeParser
193
184
  from .node_relationship import NodeRelationship
194
185
  from .none_chunking_config import NoneChunkingConfig
@@ -203,6 +194,7 @@ from .page_screenshot_metadata import PageScreenshotMetadata
203
194
  from .page_screenshot_node_with_score import PageScreenshotNodeWithScore
204
195
  from .page_segmentation_config import PageSegmentationConfig
205
196
  from .page_splitter_node_parser import PageSplitterNodeParser
197
+ from .paginated_extract_runs_response import PaginatedExtractRunsResponse
206
198
  from .paginated_jobs_history_with_metrics import PaginatedJobsHistoryWithMetrics
207
199
  from .paginated_list_cloud_documents_response import PaginatedListCloudDocumentsResponse
208
200
  from .paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
@@ -272,8 +264,6 @@ from .progress_event_status import ProgressEventStatus
272
264
  from .project import Project
273
265
  from .project_create import ProjectCreate
274
266
  from .prompt_conf import PromptConf
275
- from .prompt_mixin_prompts import PromptMixinPrompts
276
- from .prompt_spec import PromptSpec
277
267
  from .pydantic_program_mode import PydanticProgramMode
278
268
  from .recurring_credit_grant import RecurringCreditGrant
279
269
  from .related_node_info import RelatedNodeInfo
@@ -431,15 +421,7 @@ __all__ = [
431
421
  "EmbeddingModelConfigUpdateEmbeddingConfig_HuggingfaceApiEmbedding",
432
422
  "EmbeddingModelConfigUpdateEmbeddingConfig_OpenaiEmbedding",
433
423
  "EmbeddingModelConfigUpdateEmbeddingConfig_VertexaiEmbedding",
434
- "EvalDataset",
435
- "EvalDatasetJobParams",
436
- "EvalDatasetJobRecord",
437
424
  "EvalExecutionParams",
438
- "EvalExecutionParamsOverride",
439
- "EvalMetric",
440
- "EvalQuestion",
441
- "EvalQuestionCreate",
442
- "EvalQuestionResult",
443
425
  "ExtractAgent",
444
426
  "ExtractAgentCreate",
445
427
  "ExtractAgentCreateDataSchema",
@@ -451,6 +433,9 @@ __all__ = [
451
433
  "ExtractConfig",
452
434
  "ExtractJob",
453
435
  "ExtractJobCreate",
436
+ "ExtractJobCreateBatch",
437
+ "ExtractJobCreateBatchDataSchemaOverride",
438
+ "ExtractJobCreateBatchDataSchemaOverrideZeroValue",
454
439
  "ExtractJobCreateDataSchemaOverride",
455
440
  "ExtractJobCreateDataSchemaOverrideZeroValue",
456
441
  "ExtractMode",
@@ -501,9 +486,6 @@ __all__ = [
501
486
  "Llm",
502
487
  "LlmModelData",
503
488
  "LlmParameters",
504
- "LocalEval",
505
- "LocalEvalResults",
506
- "LocalEvalSets",
507
489
  "ManagedIngestionStatus",
508
490
  "ManagedIngestionStatusResponse",
509
491
  "MarkdownElementNodeParser",
@@ -514,7 +496,6 @@ __all__ = [
514
496
  "MetadataFilterValue",
515
497
  "MetadataFilters",
516
498
  "MetadataFiltersFiltersItem",
517
- "MetricResult",
518
499
  "NodeParser",
519
500
  "NodeRelationship",
520
501
  "NoneChunkingConfig",
@@ -529,6 +510,7 @@ __all__ = [
529
510
  "PageScreenshotNodeWithScore",
530
511
  "PageSegmentationConfig",
531
512
  "PageSplitterNodeParser",
513
+ "PaginatedExtractRunsResponse",
532
514
  "PaginatedJobsHistoryWithMetrics",
533
515
  "PaginatedListCloudDocumentsResponse",
534
516
  "PaginatedListPipelineFilesResponse",
@@ -592,8 +574,6 @@ __all__ = [
592
574
  "Project",
593
575
  "ProjectCreate",
594
576
  "PromptConf",
595
- "PromptMixinPrompts",
596
- "PromptSpec",
597
577
  "PydanticProgramMode",
598
578
  "RecurringCreditGrant",
599
579
  "RelatedNodeInfo",
@@ -10,12 +10,14 @@ class ChunkMode(str, enum.Enum):
10
10
  PAGE = "PAGE"
11
11
  DOCUMENT = "DOCUMENT"
12
12
  SECTION = "SECTION"
13
+ GROUPED_PAGES = "GROUPED_PAGES"
13
14
 
14
15
  def visit(
15
16
  self,
16
17
  page: typing.Callable[[], T_Result],
17
18
  document: typing.Callable[[], T_Result],
18
19
  section: typing.Callable[[], T_Result],
20
+ grouped_pages: typing.Callable[[], T_Result],
19
21
  ) -> T_Result:
20
22
  if self is ChunkMode.PAGE:
21
23
  return page()
@@ -23,3 +25,5 @@ class ChunkMode(str, enum.Enum):
23
25
  return document()
24
26
  if self is ChunkMode.SECTION:
25
27
  return section()
28
+ if self is ChunkMode.GROUPED_PAGES:
29
+ return grouped_pages()
@@ -23,9 +23,6 @@ class ExtractConfig(pydantic.BaseModel):
23
23
 
24
24
  extraction_target: typing.Optional[ExtractTarget] = pydantic.Field(description="The extraction target specified.")
25
25
  extraction_mode: typing.Optional[ExtractMode] = pydantic.Field(description="The extraction mode specified.")
26
- handle_missing: typing.Optional[bool] = pydantic.Field(
27
- description="Whether to handle missing fields in the schema."
28
- )
29
26
  system_prompt: typing.Optional[str]
30
27
 
31
28
  def json(self, **kwargs: typing.Any) -> str:
@@ -4,6 +4,8 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .extract_config import ExtractConfig
8
+ from .extract_job_create_batch_data_schema_override import ExtractJobCreateBatchDataSchemaOverride
7
9
 
8
10
  try:
9
11
  import pydantic
@@ -14,24 +16,17 @@ except ImportError:
14
16
  import pydantic # type: ignore
15
17
 
16
18
 
17
- class LocalEval(pydantic.BaseModel):
19
+ class ExtractJobCreateBatch(pydantic.BaseModel):
18
20
  """
19
- Evaluation result, EvaluationResult from llama_index.
20
-
21
- Output of an BaseEvaluator.
21
+ Schema for creating extraction jobs in batch.
22
22
  """
23
23
 
24
- query: typing.Optional[str]
25
- contexts: typing.Optional[typing.List[str]]
26
- response: typing.Optional[str]
27
- passing: typing.Optional[bool]
28
- feedback: typing.Optional[str]
29
- score: typing.Optional[float]
30
- pairwise_source: typing.Optional[str]
31
- invalid_result: typing.Optional[bool] = pydantic.Field(
32
- description="Whether the evaluation result is an invalid one."
24
+ extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
25
+ file_ids: typing.List[str] = pydantic.Field(description="The ids of the files")
26
+ data_schema_override: typing.Optional[ExtractJobCreateBatchDataSchemaOverride] = pydantic.Field(
27
+ description="The data schema to override the extraction agent's data schema with"
33
28
  )
34
- invalid_reason: typing.Optional[str]
29
+ config_override: typing.Optional[ExtractConfig]
35
30
 
36
31
  def json(self, **kwargs: typing.Any) -> str:
37
32
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,9 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .extract_job_create_batch_data_schema_override_zero_value import ExtractJobCreateBatchDataSchemaOverrideZeroValue
6
+
7
+ ExtractJobCreateBatchDataSchemaOverride = typing.Union[
8
+ typing.Dict[str, typing.Optional[ExtractJobCreateBatchDataSchemaOverrideZeroValue]], str
9
+ ]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractJobCreateBatchDataSchemaOverrideZeroValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -9,9 +9,17 @@ T_Result = typing.TypeVar("T_Result")
9
9
  class ExtractMode(str, enum.Enum):
10
10
  FAST = "FAST"
11
11
  ACCURATE = "ACCURATE"
12
+ MULTIMODAL = "MULTIMODAL"
12
13
 
13
- def visit(self, fast: typing.Callable[[], T_Result], accurate: typing.Callable[[], T_Result]) -> T_Result:
14
+ def visit(
15
+ self,
16
+ fast: typing.Callable[[], T_Result],
17
+ accurate: typing.Callable[[], T_Result],
18
+ multimodal: typing.Callable[[], T_Result],
19
+ ) -> T_Result:
14
20
  if self is ExtractMode.FAST:
15
21
  return fast()
16
22
  if self is ExtractMode.ACCURATE:
17
23
  return accurate()
24
+ if self is ExtractMode.MULTIMODAL:
25
+ return multimodal()
@@ -26,7 +26,6 @@ class LlamaParseParameters(pydantic.BaseModel):
26
26
  disable_ocr: typing.Optional[bool]
27
27
  annotate_links: typing.Optional[bool]
28
28
  adaptive_long_table: typing.Optional[bool]
29
- compact_markdown_table: typing.Optional[bool]
30
29
  disable_reconstruction: typing.Optional[bool]
31
30
  disable_image_extraction: typing.Optional[bool]
32
31
  invalidate_cache: typing.Optional[bool]
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .local_eval import LocalEval
7
+ from .extract_run import ExtractRun
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -15,16 +15,15 @@ except ImportError:
15
15
  import pydantic # type: ignore
16
16
 
17
17
 
18
- class LocalEvalResults(pydantic.BaseModel):
18
+ class PaginatedExtractRunsResponse(pydantic.BaseModel):
19
19
  """
20
- Schema for the result of a local evaluation.
20
+ Schema for paginated extraction runs response.
21
21
  """
22
22
 
23
- project_id: str = pydantic.Field(description="The ID of the project.")
24
- eval_set_id: typing.Optional[str]
25
- app_name: str = pydantic.Field(description="The name of the app.")
26
- eval_name: str = pydantic.Field(description="The name of the eval.")
27
- result: LocalEval = pydantic.Field(description="The eval results.")
23
+ items: typing.List[ExtractRun] = pydantic.Field(description="The list of extraction runs")
24
+ total: int = pydantic.Field(description="The total number of extraction runs")
25
+ skip: int = pydantic.Field(description="The number of extraction runs skipped")
26
+ limit: int = pydantic.Field(description="The maximum number of extraction runs returned")
28
27
 
29
28
  def json(self, **kwargs: typing.Any) -> str:
30
29
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -18,6 +18,7 @@ class PromptConf(pydantic.BaseModel):
18
18
  system_prompt: typing.Optional[str] = pydantic.Field(description="The system prompt to use for the extraction.")
19
19
  extraction_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for the extraction.")
20
20
  error_handling_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for error handling.")
21
+ reasoning_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for reasoning.")
21
22
 
22
23
  def json(self, **kwargs: typing.Any) -> str:
23
24
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -18,6 +18,7 @@ except ImportError:
18
18
  class ReportBlock(pydantic.BaseModel):
19
19
  idx: int = pydantic.Field(description="The index of the block")
20
20
  template: str = pydantic.Field(description="The content of the block")
21
+ requires_human_review: typing.Optional[bool] = pydantic.Field(description="Whether the block requires human review")
21
22
  sources: typing.Optional[typing.List[TextNodeWithScore]] = pydantic.Field(description="The sources for the block")
22
23
 
23
24
  def json(self, **kwargs: typing.Any) -> str:
@@ -10,6 +10,7 @@ class StructMode(str, enum.Enum):
10
10
  STRUCT_PARSE = "STRUCT_PARSE"
11
11
  JSON_MODE = "JSON_MODE"
12
12
  FUNC_CALL = "FUNC_CALL"
13
+ STRUCT_RELAXED = "STRUCT_RELAXED"
13
14
  UNSTRUCTURED = "UNSTRUCTURED"
14
15
 
15
16
  def visit(
@@ -17,6 +18,7 @@ class StructMode(str, enum.Enum):
17
18
  struct_parse: typing.Callable[[], T_Result],
18
19
  json_mode: typing.Callable[[], T_Result],
19
20
  func_call: typing.Callable[[], T_Result],
21
+ struct_relaxed: typing.Callable[[], T_Result],
20
22
  unstructured: typing.Callable[[], T_Result],
21
23
  ) -> T_Result:
22
24
  if self is StructMode.STRUCT_PARSE:
@@ -25,5 +27,7 @@ class StructMode(str, enum.Enum):
25
27
  return json_mode()
26
28
  if self is StructMode.FUNC_CALL:
27
29
  return func_call()
30
+ if self is StructMode.STRUCT_RELAXED:
31
+ return struct_relaxed()
28
32
  if self is StructMode.UNSTRUCTURED:
29
33
  return unstructured()
@@ -32,6 +32,12 @@ class StructParseConf(pydantic.BaseModel):
32
32
  struct_mode: typing.Optional[StructMode] = pydantic.Field(
33
33
  description="The struct mode to use for the structured parsing."
34
34
  )
35
+ handle_missing: typing.Optional[bool] = pydantic.Field(
36
+ description="Whether to handle missing fields in the schema."
37
+ )
38
+ use_reasoning: typing.Optional[bool] = pydantic.Field(
39
+ description="Whether to use reasoning for the structured parsing."
40
+ )
35
41
  prompt_conf: typing.Optional[PromptConf] = pydantic.Field(
36
42
  description="The prompt configuration for the structured parsing."
37
43
  )
@@ -18,7 +18,7 @@ except ImportError:
18
18
 
19
19
  class Usage(pydantic.BaseModel):
20
20
  """
21
- Response model; use UsageSubmission for tracking
21
+ Response model
22
22
  """
23
23
 
24
24
  active_free_credits_usage: typing.Optional[typing.List[FreeCreditsUsage]]
@@ -27,6 +27,7 @@ class Usage(pydantic.BaseModel):
27
27
  total_indexed_pages: typing.Optional[int]
28
28
  active_alerts: typing.Optional[typing.List[UsageActiveAlertsItem]]
29
29
  current_invoice_total_usd_cents: typing.Optional[int]
30
+ total_extraction_agents: typing.Optional[int]
30
31
 
31
32
  def json(self, **kwargs: typing.Any) -> str:
32
33
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-cloud
3
- Version: 0.1.14
3
+ Version: 0.1.16
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Logan Markewich