llama-cloud 0.1.9__py3-none-any.whl → 0.1.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

Files changed (46) hide show
  1. llama_cloud/__init__.py +34 -8
  2. llama_cloud/resources/__init__.py +14 -3
  3. llama_cloud/resources/chat_apps/client.py +99 -133
  4. llama_cloud/resources/llama_extract/__init__.py +16 -2
  5. llama_cloud/resources/llama_extract/client.py +328 -122
  6. llama_cloud/resources/llama_extract/types/__init__.py +14 -3
  7. llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py +9 -0
  8. llama_cloud/resources/llama_extract/types/{extract_agent_create_data_schema_value.py → extract_agent_create_data_schema_zero_value.py} +1 -1
  9. llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema.py +9 -0
  10. llama_cloud/resources/llama_extract/types/{extract_agent_update_data_schema_value.py → extract_agent_update_data_schema_zero_value.py} +1 -1
  11. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py +9 -0
  12. llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py +7 -0
  13. llama_cloud/resources/organizations/client.py +8 -12
  14. llama_cloud/resources/parsing/client.py +244 -0
  15. llama_cloud/resources/reports/client.py +30 -26
  16. llama_cloud/resources/retrievers/client.py +16 -4
  17. llama_cloud/types/__init__.py +22 -4
  18. llama_cloud/types/chat_app.py +11 -9
  19. llama_cloud/types/chat_app_response.py +12 -10
  20. llama_cloud/types/cloud_mongo_db_atlas_vector_search.py +1 -0
  21. llama_cloud/types/edit_suggestion.py +3 -4
  22. llama_cloud/types/edit_suggestion_blocks_item.py +8 -0
  23. llama_cloud/types/extract_config.py +2 -0
  24. llama_cloud/types/extract_job_create.py +4 -2
  25. llama_cloud/types/extract_job_create_data_schema_override.py +9 -0
  26. llama_cloud/types/{extract_job_create_data_schema_override_value.py → extract_job_create_data_schema_override_zero_value.py} +1 -1
  27. llama_cloud/types/extract_mode.py +7 -7
  28. llama_cloud/types/extract_run.py +2 -2
  29. llama_cloud/types/extract_run_data.py +11 -0
  30. llama_cloud/types/extract_run_data_item_value.py +5 -0
  31. llama_cloud/types/extract_run_data_zero_value.py +5 -0
  32. llama_cloud/types/extract_schema_validate_response.py +32 -0
  33. llama_cloud/types/extract_schema_validate_response_data_schema_value.py +7 -0
  34. llama_cloud/types/extract_target.py +17 -0
  35. llama_cloud/types/llama_extract_settings.py +8 -1
  36. llama_cloud/types/llama_parse_parameters.py +9 -0
  37. llama_cloud/types/plan.py +4 -0
  38. llama_cloud/types/preset_composite_retrieval_params.py +35 -0
  39. llama_cloud/types/report_file_info.py +37 -0
  40. llama_cloud/types/report_metadata.py +2 -1
  41. llama_cloud/types/supported_llm_model_names.py +28 -4
  42. {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.11.dist-info}/METADATA +1 -1
  43. {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.11.dist-info}/RECORD +45 -32
  44. llama_cloud/types/extract_run_data_value.py +0 -5
  45. {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.11.dist-info}/LICENSE +0 -0
  46. {llama_cloud-0.1.9.dist-info → llama_cloud-0.1.11.dist-info}/WHEEL +0 -0
@@ -73,6 +73,7 @@ from .data_source_create_custom_metadata_value import DataSourceCreateCustomMeta
73
73
  from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
74
74
  from .data_source_definition import DataSourceDefinition
75
75
  from .edit_suggestion import EditSuggestion
76
+ from .edit_suggestion_blocks_item import EditSuggestionBlocksItem
76
77
  from .element_segmentation_config import ElementSegmentationConfig
77
78
  from .embedding_model_config import EmbeddingModelConfig
78
79
  from .embedding_model_config_embedding_config import (
@@ -110,7 +111,8 @@ from .extract_agent_data_schema_value import ExtractAgentDataSchemaValue
110
111
  from .extract_config import ExtractConfig
111
112
  from .extract_job import ExtractJob
112
113
  from .extract_job_create import ExtractJobCreate
113
- from .extract_job_create_data_schema_override_value import ExtractJobCreateDataSchemaOverrideValue
114
+ from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
115
+ from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
114
116
  from .extract_mode import ExtractMode
115
117
  from .extract_resultset import ExtractResultset
116
118
  from .extract_resultset_data import ExtractResultsetData
@@ -118,10 +120,15 @@ from .extract_resultset_data_item_value import ExtractResultsetDataItemValue
118
120
  from .extract_resultset_data_zero_value import ExtractResultsetDataZeroValue
119
121
  from .extract_resultset_extraction_metadata_value import ExtractResultsetExtractionMetadataValue
120
122
  from .extract_run import ExtractRun
123
+ from .extract_run_data import ExtractRunData
124
+ from .extract_run_data_item_value import ExtractRunDataItemValue
121
125
  from .extract_run_data_schema_value import ExtractRunDataSchemaValue
122
- from .extract_run_data_value import ExtractRunDataValue
126
+ from .extract_run_data_zero_value import ExtractRunDataZeroValue
123
127
  from .extract_run_extraction_metadata_value import ExtractRunExtractionMetadataValue
128
+ from .extract_schema_validate_response import ExtractSchemaValidateResponse
129
+ from .extract_schema_validate_response_data_schema_value import ExtractSchemaValidateResponseDataSchemaValue
124
130
  from .extract_state import ExtractState
131
+ from .extract_target import ExtractTarget
125
132
  from .file import File
126
133
  from .file_permission_info_value import FilePermissionInfoValue
127
134
  from .file_resource_info_value import FileResourceInfoValue
@@ -240,6 +247,7 @@ from .pipeline_type import PipelineType
240
247
  from .plan import Plan
241
248
  from .playground_session import PlaygroundSession
242
249
  from .pooling import Pooling
250
+ from .preset_composite_retrieval_params import PresetCompositeRetrievalParams
243
251
  from .preset_retrieval_params import PresetRetrievalParams
244
252
  from .presigned_url import PresignedUrl
245
253
  from .progress_event import ProgressEvent
@@ -263,6 +271,7 @@ from .report_event_item_event_data import (
263
271
  ReportEventItemEventData_ReportStateUpdate,
264
272
  )
265
273
  from .report_event_type import ReportEventType
274
+ from .report_file_info import ReportFileInfo
266
275
  from .report_metadata import ReportMetadata
267
276
  from .report_plan import ReportPlan
268
277
  from .report_plan_block import ReportPlanBlock
@@ -373,6 +382,7 @@ __all__ = [
373
382
  "DataSourceCustomMetadataValue",
374
383
  "DataSourceDefinition",
375
384
  "EditSuggestion",
385
+ "EditSuggestionBlocksItem",
376
386
  "ElementSegmentationConfig",
377
387
  "EmbeddingModelConfig",
378
388
  "EmbeddingModelConfigEmbeddingConfig",
@@ -406,7 +416,8 @@ __all__ = [
406
416
  "ExtractConfig",
407
417
  "ExtractJob",
408
418
  "ExtractJobCreate",
409
- "ExtractJobCreateDataSchemaOverrideValue",
419
+ "ExtractJobCreateDataSchemaOverride",
420
+ "ExtractJobCreateDataSchemaOverrideZeroValue",
410
421
  "ExtractMode",
411
422
  "ExtractResultset",
412
423
  "ExtractResultsetData",
@@ -414,10 +425,15 @@ __all__ = [
414
425
  "ExtractResultsetDataZeroValue",
415
426
  "ExtractResultsetExtractionMetadataValue",
416
427
  "ExtractRun",
428
+ "ExtractRunData",
429
+ "ExtractRunDataItemValue",
417
430
  "ExtractRunDataSchemaValue",
418
- "ExtractRunDataValue",
431
+ "ExtractRunDataZeroValue",
419
432
  "ExtractRunExtractionMetadataValue",
433
+ "ExtractSchemaValidateResponse",
434
+ "ExtractSchemaValidateResponseDataSchemaValue",
420
435
  "ExtractState",
436
+ "ExtractTarget",
421
437
  "File",
422
438
  "FilePermissionInfoValue",
423
439
  "FileResourceInfoValue",
@@ -528,6 +544,7 @@ __all__ = [
528
544
  "Plan",
529
545
  "PlaygroundSession",
530
546
  "Pooling",
547
+ "PresetCompositeRetrievalParams",
531
548
  "PresetRetrievalParams",
532
549
  "PresignedUrl",
533
550
  "ProgressEvent",
@@ -549,6 +566,7 @@ __all__ = [
549
566
  "ReportEventItemEventData_ReportBlockUpdate",
550
567
  "ReportEventItemEventData_ReportStateUpdate",
551
568
  "ReportEventType",
569
+ "ReportFileInfo",
552
570
  "ReportMetadata",
553
571
  "ReportPlan",
554
572
  "ReportPlanBlock",
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .llm_parameters import LlmParameters
8
- from .preset_retrieval_params import PresetRetrievalParams
8
+ from .preset_composite_retrieval_params import PresetCompositeRetrievalParams
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -21,14 +21,16 @@ class ChatApp(pydantic.BaseModel):
21
21
  Schema for a chat app
22
22
  """
23
23
 
24
- id: str
25
- name: str
26
- pipeline_id: str
27
- project_id: str
28
- llm_config: LlmParameters
29
- retrieval_config: PresetRetrievalParams
30
- created_at: dt.datetime
31
- updated_at: dt.datetime
24
+ id: str = pydantic.Field(description="Unique identifier")
25
+ created_at: typing.Optional[dt.datetime]
26
+ updated_at: typing.Optional[dt.datetime]
27
+ name: str = pydantic.Field(description="Name of the chat app")
28
+ retriever_id: str = pydantic.Field(description="ID of the retriever to use for the chat app")
29
+ llm_config: LlmParameters = pydantic.Field(description="Configuration for the LLM model to use for the chat app")
30
+ retrieval_config: PresetCompositeRetrievalParams = pydantic.Field(
31
+ description="Configuration for the retrieval model to use for the chat app"
32
+ )
33
+ project_id: str = pydantic.Field(description="ID of the project the chat app belongs to")
32
34
 
33
35
  def json(self, **kwargs: typing.Any) -> str:
34
36
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .llm_parameters import LlmParameters
8
- from .preset_retrieval_params import PresetRetrievalParams
8
+ from .preset_composite_retrieval_params import PresetCompositeRetrievalParams
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -17,15 +17,17 @@ except ImportError:
17
17
 
18
18
 
19
19
  class ChatAppResponse(pydantic.BaseModel):
20
- id: str
21
- name: str
22
- pipeline_id: str
23
- project_id: str
24
- llm_config: LlmParameters
25
- retrieval_config: PresetRetrievalParams
26
- created_at: dt.datetime
27
- updated_at: dt.datetime
28
- pipeline_name: str
20
+ id: str = pydantic.Field(description="Unique identifier")
21
+ created_at: typing.Optional[dt.datetime]
22
+ updated_at: typing.Optional[dt.datetime]
23
+ name: str = pydantic.Field(description="Name of the chat app")
24
+ retriever_id: str = pydantic.Field(description="ID of the retriever to use for the chat app")
25
+ llm_config: LlmParameters = pydantic.Field(description="Configuration for the LLM model to use for the chat app")
26
+ retrieval_config: PresetCompositeRetrievalParams = pydantic.Field(
27
+ description="Configuration for the retrieval model to use for the chat app"
28
+ )
29
+ project_id: str = pydantic.Field(description="ID of the project the chat app belongs to")
30
+ retriever_name: str
29
31
 
30
32
  def json(self, **kwargs: typing.Any) -> str:
31
33
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -35,6 +35,7 @@ class CloudMongoDbAtlasVectorSearch(pydantic.BaseModel):
35
35
  collection_name: str
36
36
  vector_index_name: typing.Optional[str]
37
37
  fulltext_index_name: typing.Optional[str]
38
+ embedding_dimension: typing.Optional[int]
38
39
  class_name: typing.Optional[str]
39
40
 
40
41
  def json(self, **kwargs: typing.Any) -> str:
@@ -4,7 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .report_block import ReportBlock
7
+ from .edit_suggestion_blocks_item import EditSuggestionBlocksItem
8
8
 
9
9
  try:
10
10
  import pydantic
@@ -21,9 +21,8 @@ class EditSuggestion(pydantic.BaseModel):
21
21
  """
22
22
 
23
23
  justification: str
24
- start_line: int
25
- end_line: int
26
- blocks: typing.List[ReportBlock]
24
+ blocks: typing.List[EditSuggestionBlocksItem]
25
+ removed_indices: typing.Optional[typing.List[int]]
27
26
 
28
27
  def json(self, **kwargs: typing.Any) -> str:
29
28
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,8 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .report_block import ReportBlock
6
+ from .report_plan_block import ReportPlanBlock
7
+
8
+ EditSuggestionBlocksItem = typing.Union[ReportBlock, ReportPlanBlock]
@@ -5,6 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .extract_mode import ExtractMode
8
+ from .extract_target import ExtractTarget
8
9
 
9
10
  try:
10
11
  import pydantic
@@ -20,6 +21,7 @@ class ExtractConfig(pydantic.BaseModel):
20
21
  Additional parameters for the extraction agent.
21
22
  """
22
23
 
24
+ extraction_target: typing.Optional[ExtractTarget] = pydantic.Field(description="The extraction target specified.")
23
25
  extraction_mode: typing.Optional[ExtractMode] = pydantic.Field(description="The extraction mode specified.")
24
26
  handle_missing: typing.Optional[bool] = pydantic.Field(
25
27
  description="Whether to handle missing fields in the schema."
@@ -5,7 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .extract_config import ExtractConfig
8
- from .extract_job_create_data_schema_override_value import ExtractJobCreateDataSchemaOverrideValue
8
+ from .extract_job_create_data_schema_override import ExtractJobCreateDataSchemaOverride
9
9
 
10
10
  try:
11
11
  import pydantic
@@ -23,7 +23,9 @@ class ExtractJobCreate(pydantic.BaseModel):
23
23
 
24
24
  extraction_agent_id: str = pydantic.Field(description="The id of the extraction agent")
25
25
  file_id: str = pydantic.Field(description="The id of the file")
26
- data_schema_override: typing.Optional[typing.Dict[str, typing.Optional[ExtractJobCreateDataSchemaOverrideValue]]]
26
+ data_schema_override: typing.Optional[ExtractJobCreateDataSchemaOverride] = pydantic.Field(
27
+ description="The data schema to override the extraction agent's data schema with"
28
+ )
27
29
  config_override: typing.Optional[ExtractConfig]
28
30
 
29
31
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,9 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .extract_job_create_data_schema_override_zero_value import ExtractJobCreateDataSchemaOverrideZeroValue
6
+
7
+ ExtractJobCreateDataSchemaOverride = typing.Union[
8
+ typing.Dict[str, typing.Optional[ExtractJobCreateDataSchemaOverrideZeroValue]], str
9
+ ]
@@ -2,6 +2,6 @@
2
2
 
3
3
  import typing
4
4
 
5
- ExtractJobCreateDataSchemaOverrideValue = typing.Union[
5
+ ExtractJobCreateDataSchemaOverrideZeroValue = typing.Union[
6
6
  typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
7
  ]
@@ -7,11 +7,11 @@ T_Result = typing.TypeVar("T_Result")
7
7
 
8
8
 
9
9
  class ExtractMode(str, enum.Enum):
10
- PER_DOC = "PER_DOC"
11
- PER_PAGE = "PER_PAGE"
10
+ FAST = "FAST"
11
+ ACCURATE = "ACCURATE"
12
12
 
13
- def visit(self, per_doc: typing.Callable[[], T_Result], per_page: typing.Callable[[], T_Result]) -> T_Result:
14
- if self is ExtractMode.PER_DOC:
15
- return per_doc()
16
- if self is ExtractMode.PER_PAGE:
17
- return per_page()
13
+ def visit(self, fast: typing.Callable[[], T_Result], accurate: typing.Callable[[], T_Result]) -> T_Result:
14
+ if self is ExtractMode.FAST:
15
+ return fast()
16
+ if self is ExtractMode.ACCURATE:
17
+ return accurate()
@@ -5,8 +5,8 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .extract_config import ExtractConfig
8
+ from .extract_run_data import ExtractRunData
8
9
  from .extract_run_data_schema_value import ExtractRunDataSchemaValue
9
- from .extract_run_data_value import ExtractRunDataValue
10
10
  from .extract_run_extraction_metadata_value import ExtractRunExtractionMetadataValue
11
11
  from .extract_state import ExtractState
12
12
  from .file import File
@@ -37,7 +37,7 @@ class ExtractRun(pydantic.BaseModel):
37
37
  status: ExtractState = pydantic.Field(description="The status of the extraction run")
38
38
  error: typing.Optional[str]
39
39
  job_id: typing.Optional[str]
40
- data: typing.Optional[typing.Dict[str, typing.Optional[ExtractRunDataValue]]]
40
+ data: typing.Optional[ExtractRunData] = pydantic.Field(description="The data extracted from the file")
41
41
  extraction_metadata: typing.Optional[typing.Dict[str, typing.Optional[ExtractRunExtractionMetadataValue]]]
42
42
 
43
43
  def json(self, **kwargs: typing.Any) -> str:
@@ -0,0 +1,11 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from .extract_run_data_item_value import ExtractRunDataItemValue
6
+ from .extract_run_data_zero_value import ExtractRunDataZeroValue
7
+
8
+ ExtractRunData = typing.Union[
9
+ typing.Dict[str, typing.Optional[ExtractRunDataZeroValue]],
10
+ typing.List[typing.Dict[str, typing.Optional[ExtractRunDataItemValue]]],
11
+ ]
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractRunDataItemValue = typing.Union[typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool]
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractRunDataZeroValue = typing.Union[typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool]
@@ -0,0 +1,32 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .extract_schema_validate_response_data_schema_value import ExtractSchemaValidateResponseDataSchemaValue
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class ExtractSchemaValidateResponse(pydantic.BaseModel):
19
+ data_schema: typing.Dict[str, typing.Optional[ExtractSchemaValidateResponseDataSchemaValue]]
20
+
21
+ def json(self, **kwargs: typing.Any) -> str:
22
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
23
+ return super().json(**kwargs_with_defaults)
24
+
25
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().dict(**kwargs_with_defaults)
28
+
29
+ class Config:
30
+ frozen = True
31
+ smart_union = True
32
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ExtractSchemaValidateResponseDataSchemaValue = typing.Union[
6
+ typing.Dict[str, typing.Any], typing.List[typing.Any], str, int, float, bool
7
+ ]
@@ -0,0 +1,17 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ExtractTarget(str, enum.Enum):
10
+ PER_DOC = "PER_DOC"
11
+ PER_PAGE = "PER_PAGE"
12
+
13
+ def visit(self, per_doc: typing.Callable[[], T_Result], per_page: typing.Callable[[], T_Result]) -> T_Result:
14
+ if self is ExtractTarget.PER_DOC:
15
+ return per_doc()
16
+ if self is ExtractTarget.PER_PAGE:
17
+ return per_page()
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .llama_parse_parameters import LlamaParseParameters
7
8
 
8
9
  try:
9
10
  import pydantic
@@ -25,11 +26,17 @@ class LlamaExtractSettings(pydantic.BaseModel):
25
26
  max_file_size: typing.Optional[int] = pydantic.Field(
26
27
  description="The maximum file size (in bytes) allowed for the document."
27
28
  )
28
- max_num_pages: typing.Optional[int] = pydantic.Field(
29
+ max_tokens: typing.Optional[int] = pydantic.Field(
30
+ description="The maximum number of tokens allowed for the document."
31
+ )
32
+ max_pages: typing.Optional[int] = pydantic.Field(
29
33
  description="The maximum number of pages allowed for the document."
30
34
  )
31
35
  extraction_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for the extraction.")
32
36
  error_handling_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for error handling.")
37
+ llama_parse_params: typing.Optional[LlamaParseParameters] = pydantic.Field(
38
+ description="LlamaParse related settings."
39
+ )
33
40
 
34
41
  def json(self, **kwargs: typing.Any) -> str:
35
42
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -82,6 +82,15 @@ class LlamaParseParameters(pydantic.BaseModel):
82
82
  complemental_formatting_instruction: typing.Optional[str]
83
83
  content_guideline_instruction: typing.Optional[str]
84
84
  spreadsheet_extract_sub_tables: typing.Optional[bool]
85
+ job_timeout_in_seconds: typing.Optional[float]
86
+ job_timeout_extra_time_per_page_in_seconds: typing.Optional[float]
87
+ strict_mode_image_extraction: typing.Optional[bool]
88
+ strict_mode_image_ocr: typing.Optional[bool]
89
+ strict_mode_reconstruction: typing.Optional[bool]
90
+ strict_mode_buggy_font: typing.Optional[bool]
91
+ ignore_document_elements_for_layout_detection: typing.Optional[bool]
92
+ output_tables_as_html: typing.Optional[bool] = pydantic.Field(alias="output_tables_as_HTML")
93
+ internal_is_screenshot_job: typing.Optional[bool]
85
94
 
86
95
  def json(self, **kwargs: typing.Any) -> str:
87
96
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
llama_cloud/types/plan.py CHANGED
@@ -15,6 +15,8 @@ except ImportError:
15
15
 
16
16
 
17
17
  class Plan(pydantic.BaseModel):
18
+ id: str = pydantic.Field(description="The ID of the plan")
19
+ name: typing.Optional[str]
18
20
  total_users: typing.Optional[int]
19
21
  total_indexes: typing.Optional[int]
20
22
  total_indexed_pages: typing.Optional[int]
@@ -25,6 +27,8 @@ class Plan(pydantic.BaseModel):
25
27
  allowed_external_index: typing.Optional[bool] = pydantic.Field(
26
28
  description="If is allowed to use external data sources or sinks in indexes"
27
29
  )
30
+ starting_on: typing.Optional[dt.datetime]
31
+ ending_before: typing.Optional[dt.datetime]
28
32
 
29
33
  def json(self, **kwargs: typing.Any) -> str:
30
34
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,35 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .composite_retrieval_mode import CompositeRetrievalMode
8
+
9
+ try:
10
+ import pydantic
11
+ if pydantic.__version__.startswith("1."):
12
+ raise ImportError
13
+ import pydantic.v1 as pydantic # type: ignore
14
+ except ImportError:
15
+ import pydantic # type: ignore
16
+
17
+
18
+ class PresetCompositeRetrievalParams(pydantic.BaseModel):
19
+ mode: typing.Optional[CompositeRetrievalMode] = pydantic.Field(description="The mode of composite retrieval.")
20
+ rerank_top_n: typing.Optional[int] = pydantic.Field(
21
+ description="The number of nodes to retrieve after reranking over retrieved nodes from all retrieval tools."
22
+ )
23
+
24
+ def json(self, **kwargs: typing.Any) -> str:
25
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
26
+ return super().json(**kwargs_with_defaults)
27
+
28
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
29
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
30
+ return super().dict(**kwargs_with_defaults)
31
+
32
+ class Config:
33
+ frozen = True
34
+ smart_union = True
35
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -0,0 +1,37 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+
8
+ try:
9
+ import pydantic
10
+ if pydantic.__version__.startswith("1."):
11
+ raise ImportError
12
+ import pydantic.v1 as pydantic # type: ignore
13
+ except ImportError:
14
+ import pydantic # type: ignore
15
+
16
+
17
+ class ReportFileInfo(pydantic.BaseModel):
18
+ """
19
+ Information about a file stored in S3
20
+ """
21
+
22
+ original_name: str = pydantic.Field(description="Original filename uploaded by user")
23
+ s_3_path: str = pydantic.Field(alias="s3_path", description="Path to file in S3")
24
+
25
+ def json(self, **kwargs: typing.Any) -> str:
26
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
27
+ return super().json(**kwargs_with_defaults)
28
+
29
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
30
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
31
+ return super().dict(**kwargs_with_defaults)
32
+
33
+ class Config:
34
+ frozen = True
35
+ smart_union = True
36
+ allow_population_by_field_name = True
37
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -4,6 +4,7 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .report_file_info import ReportFileInfo
7
8
  from .report_state import ReportState
8
9
 
9
10
  try:
@@ -25,7 +26,7 @@ class ReportMetadata(pydantic.BaseModel):
25
26
  report_metadata: typing.Dict[str, typing.Any] = pydantic.Field(description="The metadata for the report")
26
27
  state: ReportState = pydantic.Field(description="The state of the report")
27
28
  input_files: typing.Optional[typing.List[str]]
28
- template_file: typing.Optional[str]
29
+ template_file: typing.Optional[ReportFileInfo]
29
30
  template_text: typing.Optional[str]
30
31
  template_instructions: typing.Optional[str]
31
32
 
@@ -12,8 +12,14 @@ class SupportedLlmModelNames(str, enum.Enum):
12
12
  GPT_4_TURBO = "GPT_4_TURBO"
13
13
  GPT_4_O = "GPT_4O"
14
14
  GPT_4_O_MINI = "GPT_4O_MINI"
15
- AZURE_OPENAI = "AZURE_OPENAI"
15
+ AZURE_OPENAI_GPT_3_5_TURBO = "AZURE_OPENAI_GPT_3_5_TURBO"
16
+ AZURE_OPENAI_GPT_4_O = "AZURE_OPENAI_GPT_4O"
17
+ AZURE_OPENAI_GPT_4_O_MINI = "AZURE_OPENAI_GPT_4O_MINI"
18
+ AZURE_OPENAI_GPT_4 = "AZURE_OPENAI_GPT_4"
19
+ AZURE_OPENAI_TEXT_EMBEDDING_3_LARGE = "AZURE_OPENAI_TEXT_EMBEDDING_3_LARGE"
16
20
  CLAUDE_3_5_SONNET = "CLAUDE_3_5_SONNET"
21
+ BEDROCK_CLAUDE_3_5_SONNET = "BEDROCK_CLAUDE_3_5_SONNET"
22
+ VERTEX_AI_CLAUDE_3_5_SONNET = "VERTEX_AI_CLAUDE_3_5_SONNET"
17
23
 
18
24
  def visit(
19
25
  self,
@@ -22,8 +28,14 @@ class SupportedLlmModelNames(str, enum.Enum):
22
28
  gpt_4_turbo: typing.Callable[[], T_Result],
23
29
  gpt_4_o: typing.Callable[[], T_Result],
24
30
  gpt_4_o_mini: typing.Callable[[], T_Result],
25
- azure_openai: typing.Callable[[], T_Result],
31
+ azure_openai_gpt_3_5_turbo: typing.Callable[[], T_Result],
32
+ azure_openai_gpt_4_o: typing.Callable[[], T_Result],
33
+ azure_openai_gpt_4_o_mini: typing.Callable[[], T_Result],
34
+ azure_openai_gpt_4: typing.Callable[[], T_Result],
35
+ azure_openai_text_embedding_3_large: typing.Callable[[], T_Result],
26
36
  claude_3_5_sonnet: typing.Callable[[], T_Result],
37
+ bedrock_claude_3_5_sonnet: typing.Callable[[], T_Result],
38
+ vertex_ai_claude_3_5_sonnet: typing.Callable[[], T_Result],
27
39
  ) -> T_Result:
28
40
  if self is SupportedLlmModelNames.GPT_3_5_TURBO:
29
41
  return gpt_3_5_turbo()
@@ -35,7 +47,19 @@ class SupportedLlmModelNames(str, enum.Enum):
35
47
  return gpt_4_o()
36
48
  if self is SupportedLlmModelNames.GPT_4_O_MINI:
37
49
  return gpt_4_o_mini()
38
- if self is SupportedLlmModelNames.AZURE_OPENAI:
39
- return azure_openai()
50
+ if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_3_5_TURBO:
51
+ return azure_openai_gpt_3_5_turbo()
52
+ if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_4_O:
53
+ return azure_openai_gpt_4_o()
54
+ if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_4_O_MINI:
55
+ return azure_openai_gpt_4_o_mini()
56
+ if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_4:
57
+ return azure_openai_gpt_4()
58
+ if self is SupportedLlmModelNames.AZURE_OPENAI_TEXT_EMBEDDING_3_LARGE:
59
+ return azure_openai_text_embedding_3_large()
40
60
  if self is SupportedLlmModelNames.CLAUDE_3_5_SONNET:
41
61
  return claude_3_5_sonnet()
62
+ if self is SupportedLlmModelNames.BEDROCK_CLAUDE_3_5_SONNET:
63
+ return bedrock_claude_3_5_sonnet()
64
+ if self is SupportedLlmModelNames.VERTEX_AI_CLAUDE_3_5_SONNET:
65
+ return vertex_ai_claude_3_5_sonnet()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-cloud
3
- Version: 0.1.9
3
+ Version: 0.1.11
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Logan Markewich