llama-cloud 0.1.11__py3-none-any.whl → 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of llama-cloud might be problematic. Click here for more details.

@@ -0,0 +1,25 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ChunkMode(str, enum.Enum):
10
+ PAGE = "PAGE"
11
+ DOCUMENT = "DOCUMENT"
12
+ SECTION = "SECTION"
13
+
14
+ def visit(
15
+ self,
16
+ page: typing.Callable[[], T_Result],
17
+ document: typing.Callable[[], T_Result],
18
+ section: typing.Callable[[], T_Result],
19
+ ) -> T_Result:
20
+ if self is ChunkMode.PAGE:
21
+ return page()
22
+ if self is ChunkMode.DOCUMENT:
23
+ return document()
24
+ if self is ChunkMode.SECTION:
25
+ return section()
@@ -6,7 +6,6 @@ import typing
6
6
  import typing_extensions
7
7
 
8
8
  from ..core.datetime_utils import serialize_datetime
9
- from .base import Base
10
9
  from .eval_dataset_job_params import EvalDatasetJobParams
11
10
  from .status_enum import StatusEnum
12
11
 
@@ -43,7 +42,7 @@ class EvalDatasetJobRecord(pydantic.BaseModel):
43
42
  started_at: typing.Optional[dt.datetime]
44
43
  ended_at: typing.Optional[dt.datetime]
45
44
  updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
46
- data: typing.Optional[Base]
45
+ data: typing.Optional[typing.Any]
47
46
 
48
47
  def json(self, **kwargs: typing.Any) -> str:
49
48
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,7 +4,6 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .base import Base
8
7
  from .job_names import JobNames
9
8
  from .status_enum import StatusEnum
10
9
 
@@ -26,7 +25,7 @@ class JobRecord(pydantic.BaseModel):
26
25
  partitions: typing.Dict[str, str] = pydantic.Field(
27
26
  description="The partitions for this execution. Used for determining where to save job output."
28
27
  )
29
- parameters: typing.Optional[Base]
28
+ parameters: typing.Optional[typing.Any]
30
29
  session_id: typing.Optional[str]
31
30
  correlation_id: typing.Optional[str]
32
31
  parent_job_execution_id: typing.Optional[str]
@@ -41,7 +40,7 @@ class JobRecord(pydantic.BaseModel):
41
40
  started_at: typing.Optional[dt.datetime]
42
41
  ended_at: typing.Optional[dt.datetime]
43
42
  updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
44
- data: typing.Optional[Base]
43
+ data: typing.Optional[typing.Any]
45
44
 
46
45
  def json(self, **kwargs: typing.Any) -> str:
47
46
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -4,7 +4,9 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
+ from .chunk_mode import ChunkMode
7
8
  from .llama_parse_parameters import LlamaParseParameters
9
+ from .struct_parse_conf import StructParseConf
8
10
 
9
11
  try:
10
12
  import pydantic
@@ -21,8 +23,6 @@ class LlamaExtractSettings(pydantic.BaseModel):
21
23
  are exposed to the user.
22
24
  """
23
25
 
24
- model: typing.Optional[str] = pydantic.Field(description="The model to use for the extraction.")
25
- temperature: typing.Optional[float] = pydantic.Field(description="The temperature to use for the extraction.")
26
26
  max_file_size: typing.Optional[int] = pydantic.Field(
27
27
  description="The maximum file size (in bytes) allowed for the document."
28
28
  )
@@ -32,8 +32,13 @@ class LlamaExtractSettings(pydantic.BaseModel):
32
32
  max_pages: typing.Optional[int] = pydantic.Field(
33
33
  description="The maximum number of pages allowed for the document."
34
34
  )
35
- extraction_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for the extraction.")
36
- error_handling_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for error handling.")
35
+ chunk_mode: typing.Optional[ChunkMode] = pydantic.Field(description="The mode to use for chunking the document.")
36
+ max_chunk_size: typing.Optional[int] = pydantic.Field(
37
+ description="The maximum size of the chunks (in tokens) to use for chunking the document."
38
+ )
39
+ extraction_agent_config: typing.Optional[typing.Dict[str, StructParseConf]] = pydantic.Field(
40
+ description="The configuration for the extraction agent."
41
+ )
37
42
  llama_parse_params: typing.Optional[LlamaParseParameters] = pydantic.Field(
38
43
  description="LlamaParse related settings."
39
44
  )
@@ -5,6 +5,7 @@ import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
7
  from .parser_languages import ParserLanguages
8
+ from .parsing_mode import ParsingMode
8
9
 
9
10
  try:
10
11
  import pydantic
@@ -31,6 +32,7 @@ class LlamaParseParameters(pydantic.BaseModel):
31
32
  do_not_cache: typing.Optional[bool]
32
33
  fast_mode: typing.Optional[bool]
33
34
  skip_diagonal_text: typing.Optional[bool]
35
+ preserve_layout_alignment_across_pages: typing.Optional[bool]
34
36
  gpt_4_o_mode: typing.Optional[bool] = pydantic.Field(alias="gpt4o_mode")
35
37
  gpt_4_o_api_key: typing.Optional[str] = pydantic.Field(alias="gpt4o_api_key")
36
38
  do_not_unroll_columns: typing.Optional[bool]
@@ -91,6 +93,10 @@ class LlamaParseParameters(pydantic.BaseModel):
91
93
  ignore_document_elements_for_layout_detection: typing.Optional[bool]
92
94
  output_tables_as_html: typing.Optional[bool] = pydantic.Field(alias="output_tables_as_HTML")
93
95
  internal_is_screenshot_job: typing.Optional[bool]
96
+ parse_mode: typing.Optional[ParsingMode]
97
+ system_prompt: typing.Optional[str]
98
+ system_prompt_append: typing.Optional[str]
99
+ user_prompt: typing.Optional[str]
94
100
 
95
101
  def json(self, **kwargs: typing.Any) -> str:
96
102
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -22,6 +22,7 @@ class LlmParameters(pydantic.BaseModel):
22
22
  system_prompt: typing.Optional[str]
23
23
  temperature: typing.Optional[float]
24
24
  use_chain_of_thought_reasoning: typing.Optional[bool]
25
+ use_citation: typing.Optional[bool]
25
26
  class_name: typing.Optional[str]
26
27
 
27
28
  def json(self, **kwargs: typing.Any) -> str:
@@ -18,6 +18,7 @@ class PageScreenshotMetadata(pydantic.BaseModel):
18
18
  page_index: int = pydantic.Field(description="The index of the page for which the screenshot is taken (0-indexed)")
19
19
  file_id: str = pydantic.Field(description="The ID of the file that the page screenshot was taken from")
20
20
  image_size: int = pydantic.Field(description="The size of the image in bytes")
21
+ metadata: typing.Optional[typing.Dict[str, typing.Any]]
21
22
 
22
23
  def json(self, **kwargs: typing.Any) -> str:
23
24
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -0,0 +1,37 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class ParsingMode(str, enum.Enum):
10
+ """
11
+ Enum for representing the mode of parsing to be used
12
+ """
13
+
14
+ PARSE_PAGE_WITHOUT_LLM = "parse_page_without_llm"
15
+ PARSE_PAGE_WITH_LLM = "parse_page_with_llm"
16
+ PARSE_PAGE_WITH_LVM = "parse_page_with_lvm"
17
+ PARSE_PAGE_WITH_AGENT = "parse_page_with_agent"
18
+ PARSE_DOCUMENT_WITH_LLM = "parse_document_with_llm"
19
+
20
+ def visit(
21
+ self,
22
+ parse_page_without_llm: typing.Callable[[], T_Result],
23
+ parse_page_with_llm: typing.Callable[[], T_Result],
24
+ parse_page_with_lvm: typing.Callable[[], T_Result],
25
+ parse_page_with_agent: typing.Callable[[], T_Result],
26
+ parse_document_with_llm: typing.Callable[[], T_Result],
27
+ ) -> T_Result:
28
+ if self is ParsingMode.PARSE_PAGE_WITHOUT_LLM:
29
+ return parse_page_without_llm()
30
+ if self is ParsingMode.PARSE_PAGE_WITH_LLM:
31
+ return parse_page_with_llm()
32
+ if self is ParsingMode.PARSE_PAGE_WITH_LVM:
33
+ return parse_page_with_lvm()
34
+ if self is ParsingMode.PARSE_PAGE_WITH_AGENT:
35
+ return parse_page_with_agent()
36
+ if self is ParsingMode.PARSE_DOCUMENT_WITH_LLM:
37
+ return parse_document_with_llm()
@@ -14,13 +14,10 @@ except ImportError:
14
14
  import pydantic # type: ignore
15
15
 
16
16
 
17
- class ReportFileInfo(pydantic.BaseModel):
18
- """
19
- Information about a file stored in S3
20
- """
21
-
22
- original_name: str = pydantic.Field(description="Original filename uploaded by user")
23
- s_3_path: str = pydantic.Field(alias="s3_path", description="Path to file in S3")
17
+ class PromptConf(pydantic.BaseModel):
18
+ system_prompt: typing.Optional[str] = pydantic.Field(description="The system prompt to use for the extraction.")
19
+ extraction_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for the extraction.")
20
+ error_handling_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for error handling.")
24
21
 
25
22
  def json(self, **kwargs: typing.Any) -> str:
26
23
  kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
@@ -33,5 +30,4 @@ class ReportFileInfo(pydantic.BaseModel):
33
30
  class Config:
34
31
  frozen = True
35
32
  smart_union = True
36
- allow_population_by_field_name = True
37
33
  json_encoders = {dt.datetime: serialize_datetime}
@@ -4,7 +4,6 @@ import datetime as dt
4
4
  import typing
5
5
 
6
6
  from ..core.datetime_utils import serialize_datetime
7
- from .report_file_info import ReportFileInfo
8
7
  from .report_state import ReportState
9
8
 
10
9
  try:
@@ -26,7 +25,7 @@ class ReportMetadata(pydantic.BaseModel):
26
25
  report_metadata: typing.Dict[str, typing.Any] = pydantic.Field(description="The metadata for the report")
27
26
  state: ReportState = pydantic.Field(description="The state of the report")
28
27
  input_files: typing.Optional[typing.List[str]]
29
- template_file: typing.Optional[ReportFileInfo]
28
+ template_file: typing.Optional[str]
30
29
  template_text: typing.Optional[str]
31
30
  template_instructions: typing.Optional[str]
32
31
 
@@ -0,0 +1,25 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class SchemaRelaxMode(str, enum.Enum):
10
+ FULL = "FULL"
11
+ TOP_LEVEL = "TOP_LEVEL"
12
+ LEAF = "LEAF"
13
+
14
+ def visit(
15
+ self,
16
+ full: typing.Callable[[], T_Result],
17
+ top_level: typing.Callable[[], T_Result],
18
+ leaf: typing.Callable[[], T_Result],
19
+ ) -> T_Result:
20
+ if self is SchemaRelaxMode.FULL:
21
+ return full()
22
+ if self is SchemaRelaxMode.TOP_LEVEL:
23
+ return top_level()
24
+ if self is SchemaRelaxMode.LEAF:
25
+ return leaf()
@@ -0,0 +1,29 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import enum
4
+ import typing
5
+
6
+ T_Result = typing.TypeVar("T_Result")
7
+
8
+
9
+ class StructMode(str, enum.Enum):
10
+ STRUCT_PARSE = "STRUCT_PARSE"
11
+ JSON_MODE = "JSON_MODE"
12
+ FUNC_CALL = "FUNC_CALL"
13
+ UNSTRUCTURED = "UNSTRUCTURED"
14
+
15
+ def visit(
16
+ self,
17
+ struct_parse: typing.Callable[[], T_Result],
18
+ json_mode: typing.Callable[[], T_Result],
19
+ func_call: typing.Callable[[], T_Result],
20
+ unstructured: typing.Callable[[], T_Result],
21
+ ) -> T_Result:
22
+ if self is StructMode.STRUCT_PARSE:
23
+ return struct_parse()
24
+ if self is StructMode.JSON_MODE:
25
+ return json_mode()
26
+ if self is StructMode.FUNC_CALL:
27
+ return func_call()
28
+ if self is StructMode.UNSTRUCTURED:
29
+ return unstructured()
@@ -0,0 +1,50 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import datetime as dt
4
+ import typing
5
+
6
+ from ..core.datetime_utils import serialize_datetime
7
+ from .prompt_conf import PromptConf
8
+ from .schema_relax_mode import SchemaRelaxMode
9
+ from .struct_mode import StructMode
10
+
11
+ try:
12
+ import pydantic
13
+ if pydantic.__version__.startswith("1."):
14
+ raise ImportError
15
+ import pydantic.v1 as pydantic # type: ignore
16
+ except ImportError:
17
+ import pydantic # type: ignore
18
+
19
+
20
+ class StructParseConf(pydantic.BaseModel):
21
+ """
22
+ Configuration for the structured parsing agent.
23
+ """
24
+
25
+ model: typing.Optional[str] = pydantic.Field(description="The model to use for the structured parsing.")
26
+ temperature: typing.Optional[float] = pydantic.Field(
27
+ description="The temperature to use for the structured parsing."
28
+ )
29
+ relaxation_mode: typing.Optional[SchemaRelaxMode] = pydantic.Field(
30
+ description="The relaxation mode to use for the structured parsing."
31
+ )
32
+ struct_mode: typing.Optional[StructMode] = pydantic.Field(
33
+ description="The struct mode to use for the structured parsing."
34
+ )
35
+ prompt_conf: typing.Optional[PromptConf] = pydantic.Field(
36
+ description="The prompt configuration for the structured parsing."
37
+ )
38
+
39
+ def json(self, **kwargs: typing.Any) -> str:
40
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
41
+ return super().json(**kwargs_with_defaults)
42
+
43
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
44
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
45
+ return super().dict(**kwargs_with_defaults)
46
+
47
+ class Config:
48
+ frozen = True
49
+ smart_union = True
50
+ json_encoders = {dt.datetime: serialize_datetime}
@@ -16,7 +16,6 @@ class SupportedLlmModelNames(str, enum.Enum):
16
16
  AZURE_OPENAI_GPT_4_O = "AZURE_OPENAI_GPT_4O"
17
17
  AZURE_OPENAI_GPT_4_O_MINI = "AZURE_OPENAI_GPT_4O_MINI"
18
18
  AZURE_OPENAI_GPT_4 = "AZURE_OPENAI_GPT_4"
19
- AZURE_OPENAI_TEXT_EMBEDDING_3_LARGE = "AZURE_OPENAI_TEXT_EMBEDDING_3_LARGE"
20
19
  CLAUDE_3_5_SONNET = "CLAUDE_3_5_SONNET"
21
20
  BEDROCK_CLAUDE_3_5_SONNET = "BEDROCK_CLAUDE_3_5_SONNET"
22
21
  VERTEX_AI_CLAUDE_3_5_SONNET = "VERTEX_AI_CLAUDE_3_5_SONNET"
@@ -32,7 +31,6 @@ class SupportedLlmModelNames(str, enum.Enum):
32
31
  azure_openai_gpt_4_o: typing.Callable[[], T_Result],
33
32
  azure_openai_gpt_4_o_mini: typing.Callable[[], T_Result],
34
33
  azure_openai_gpt_4: typing.Callable[[], T_Result],
35
- azure_openai_text_embedding_3_large: typing.Callable[[], T_Result],
36
34
  claude_3_5_sonnet: typing.Callable[[], T_Result],
37
35
  bedrock_claude_3_5_sonnet: typing.Callable[[], T_Result],
38
36
  vertex_ai_claude_3_5_sonnet: typing.Callable[[], T_Result],
@@ -55,8 +53,6 @@ class SupportedLlmModelNames(str, enum.Enum):
55
53
  return azure_openai_gpt_4_o_mini()
56
54
  if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_4:
57
55
  return azure_openai_gpt_4()
58
- if self is SupportedLlmModelNames.AZURE_OPENAI_TEXT_EMBEDDING_3_LARGE:
59
- return azure_openai_text_embedding_3_large()
60
56
  if self is SupportedLlmModelNames.CLAUDE_3_5_SONNET:
61
57
  return claude_3_5_sonnet()
62
58
  if self is SupportedLlmModelNames.BEDROCK_CLAUDE_3_5_SONNET:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: llama-cloud
3
- Version: 0.1.11
3
+ Version: 0.1.13
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Logan Markewich
@@ -12,9 +12,7 @@ Classifier: Programming Language :: Python :: 3.8
12
12
  Classifier: Programming Language :: Python :: 3.9
13
13
  Classifier: Programming Language :: Python :: 3.10
14
14
  Classifier: Programming Language :: Python :: 3.11
15
- Classifier: Programming Language :: Python :: 3.12
16
- Classifier: Programming Language :: Python :: 3.13
17
- Requires-Dist: certifi (>=2024.7.4,<2025.0.0)
15
+ Requires-Dist: certifi (>=2024.7.4)
18
16
  Requires-Dist: httpx (>=0.20.0)
19
17
  Requires-Dist: pydantic (>=1.10)
20
18
  Description-Content-Type: text/markdown
@@ -1,4 +1,4 @@
1
- llama_cloud/__init__.py,sha256=kEjAyoqnkOJBRpU4xpdUH0e2g_XmkUwObp6YQFz9oAo,22037
1
+ llama_cloud/__init__.py,sha256=Gj7v4ih6fClpdnFDcbyebawJILfPIlnr4PrS2JhhEWA,22197
2
2
  llama_cloud/client.py,sha256=0fK6iRBCA77eSs0zFrYQj-zD0BLy6Dr2Ss0ETJ4WaOY,5555
3
3
  llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
4
4
  llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
@@ -30,7 +30,7 @@ llama_cloud/resources/embedding_model_configs/types/embedding_model_config_creat
30
30
  llama_cloud/resources/evals/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
31
31
  llama_cloud/resources/evals/client.py,sha256=JyPHP9MsJ-15XHUVu-UjCcINo2IDPr2OageAqLBGlmw,27578
32
32
  llama_cloud/resources/files/__init__.py,sha256=3B0SNM8EE6PddD5LpxYllci9vflEXy1xjPzhEEd-OUk,293
33
- llama_cloud/resources/files/client.py,sha256=BAwJabD4B7y17oNzi-tRVXtl5QtspX9-JqWWQ66Lzfc,49320
33
+ llama_cloud/resources/files/client.py,sha256=7VmhrE5fbftB6p6QUQUkGM5FO48obF73keq86vGFyhE,49676
34
34
  llama_cloud/resources/files/types/__init__.py,sha256=EPYENAwkjBWv1MLf8s7R5-RO-cxZ_8NPrqfR4ZoR7jY,418
35
35
  llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py,sha256=Wc8wFgujOO5pZvbbh2TMMzpa37GKZd14GYNJ9bdq7BE,214
36
36
  llama_cloud/resources/files/types/file_create_permission_info_value.py,sha256=KPCFuEaa8NiB85A5MfdXRAQ0poAUTl7Feg6BTfmdWas,209
@@ -38,7 +38,7 @@ llama_cloud/resources/files/types/file_create_resource_info_value.py,sha256=R7Y-
38
38
  llama_cloud/resources/jobs/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
39
39
  llama_cloud/resources/jobs/client.py,sha256=mN9uOzys9aZkhOJkApUy0yhfNeK8X09xQxT34ZPptNY,5386
40
40
  llama_cloud/resources/llama_extract/__init__.py,sha256=MgOA61chV7LogriUoyswOT627LaVt3UIb-imM3BvHdQ,617
41
- llama_cloud/resources/llama_extract/client.py,sha256=TGaJ8SkHteKnjxPFYxoFJaj7hClU-xldrwZDLlfKnhE,54954
41
+ llama_cloud/resources/llama_extract/client.py,sha256=SLUG01nPtdLhAXn0nMYUqkYSVrRtPQgq155fPn82KiM,55654
42
42
  llama_cloud/resources/llama_extract/types/__init__.py,sha256=yY34YD-MI4SnSbyJY5JwCGBBfqRr0dNh2zibRUt8mt4,895
43
43
  llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py,sha256=zB31hJQ8hKaIsPkfTWiX5hqsPVFMyyeWEDZ_Aq237jo,305
44
44
  llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_zero_value.py,sha256=xoyXH3f0Y5beMWBxmtXSz6QoB_df_-0QBsYdjBhZnGw,217
@@ -49,7 +49,7 @@ llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_s
49
49
  llama_cloud/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
50
50
  llama_cloud/resources/organizations/client.py,sha256=VRqPsWYEksvysYgKIOGnfhXjC3aaf9OHK6fHsS-XHqk,55460
51
51
  llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
52
- llama_cloud/resources/parsing/client.py,sha256=8WHscm_0WzNpewtjN2m01DiX3UdV4PlDlRlrEEwxjQs,72070
52
+ llama_cloud/resources/parsing/client.py,sha256=wOGNhHFrdqkJ8xSOhUFnlmS2ZahDeKP1kUWSo9WCHbY,73610
53
53
  llama_cloud/resources/pipelines/__init__.py,sha256=Mx7p3jDZRLMltsfywSufam_4AnHvmAfsxtMHVI72e-8,1083
54
54
  llama_cloud/resources/pipelines/client.py,sha256=MORoQkrH6-8-utV41zrXjFW2BegDsa_6pJhJvFH4OMQ,134251
55
55
  llama_cloud/resources/pipelines/types/__init__.py,sha256=jjaMc0V3K1HZLMYZ6WT4ydMtBCVy-oF5koqTCovbDws,1202
@@ -59,12 +59,12 @@ llama_cloud/resources/pipelines/types/pipeline_update_transform_config.py,sha256
59
59
  llama_cloud/resources/projects/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
60
60
  llama_cloud/resources/projects/client.py,sha256=B1A68C_rm7pfI6_fq9Xm1zuHdt9O8mLk1ZVvIt0iFb4,55882
61
61
  llama_cloud/resources/reports/__init__.py,sha256=cruYbQ1bIuJbRpkfaQY7ajUEslffjd7KzvzMzbtPH94,217
62
- llama_cloud/resources/reports/client.py,sha256=-Gr3GgfEACSXoQ6knz8Vm0p6Rl6Ej3YosRfqCv74T5U,45908
62
+ llama_cloud/resources/reports/client.py,sha256=Ubf1xfaVK8PaDaO22cJdjlxpPkdSoMZr9zRDKrA2T0s,46432
63
63
  llama_cloud/resources/reports/types/__init__.py,sha256=LfwDYrI4RcQu-o42iAe7HkcwHww2YU90lOonBPTmZIk,291
64
64
  llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py,sha256=Qh-MSeRvDBfNb5hoLELivv1pLtrYVf52WVoP7G8V34A,807
65
65
  llama_cloud/resources/retrievers/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
66
66
  llama_cloud/resources/retrievers/client.py,sha256=ASDdqnwXX4qj0sCAkWO7RKFnQ1oiLzBLIQ2bwqnMOKs,24905
67
- llama_cloud/types/__init__.py,sha256=if6omz4lReynuFoo6gdR4-jULBhuWEre7JDNSlxRTJw,26111
67
+ llama_cloud/types/__init__.py,sha256=34m42Eg8gOU0pfNnnjeEapXKArCpibp7Typwd70sRXI,26365
68
68
  llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
69
69
  llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
70
70
  llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
@@ -72,7 +72,6 @@ llama_cloud/types/app_schema_chat_chat_message.py,sha256=4Mplkc7PczuxKL7Gga3aj8Q
72
72
  llama_cloud/types/auto_transform_config.py,sha256=HVeHZM75DMRznScqLTfrMwcZwIdyWPuaEYbPewnHqwc,1168
73
73
  llama_cloud/types/azure_open_ai_embedding.py,sha256=MeDqZoPYFN7Nv_imY9cfqDU9SPlEyAY4HcQZ4PF5X3g,2264
74
74
  llama_cloud/types/azure_open_ai_embedding_config.py,sha256=o1zZhzcGElH3SeixFErrm7P_WFHQ6LvrLem_nKJWunw,1170
75
- llama_cloud/types/base.py,sha256=cn_Zn61yLMDCMm1iZTPvKILSRlqRzOqZtSYyYBY5dIE,938
76
75
  llama_cloud/types/base_prompt_template.py,sha256=Cw3887tnytHZ5bJBSlniyU9k5ASidv9VYR86--IbNqo,1248
77
76
  llama_cloud/types/bedrock_embedding.py,sha256=qrUoVW9Q2DLg-3nBRfGsZqUWGszfzc6ZHR8LJiXTZk4,1908
78
77
  llama_cloud/types/bedrock_embedding_config.py,sha256=32dMhoA2cLx1jeogDnCl9WPVb83Hn99nAALnt5BM208,1147
@@ -82,6 +81,7 @@ llama_cloud/types/character_splitter.py,sha256=Jm6ie7c9JmMqIqLfAN-96sYvNUaIyLzCP
82
81
  llama_cloud/types/chat_app.py,sha256=fLuzYkXLq51C_Y23hoLwfmG-OiT7jlyHt2JGe6-f1IA,1795
83
82
  llama_cloud/types/chat_app_response.py,sha256=WSKr1KI9_pGTSstr3I53kZ8qb3y87Q4ulh8fR0C7sSU,1784
84
83
  llama_cloud/types/chat_data.py,sha256=ZYqVtjXF6qPGajU4IWZu3InpU54TXJwBFiqxBepylP0,1197
84
+ llama_cloud/types/chunk_mode.py,sha256=7FIsCfJqZyek1cwRykSgRY24gA0Qo9kMGdJDFjabb9c,621
85
85
  llama_cloud/types/cloud_az_storage_blob_data_source.py,sha256=NT4cYsD1M868_bSJxKM9cvTMtjQtQxKloE4vRv8_lwg,1534
86
86
  llama_cloud/types/cloud_azure_ai_search_vector_store.py,sha256=9GTaft7BaKsR9RJQp5dlpbslXUlTMA1AcDdKV1ApfqI,1513
87
87
  llama_cloud/types/cloud_box_data_source.py,sha256=9bffCaKGvctSsk9OdTpzzP__O1NDpb9wdvKY2uwjpwY,1470
@@ -134,7 +134,7 @@ llama_cloud/types/embedding_model_config_update.py,sha256=BiA1KbFT-TSvy5OEyChd0d
134
134
  llama_cloud/types/embedding_model_config_update_embedding_config.py,sha256=mrXFxzb9GRaH4UUnOe_05-uYUuiTgDDCRadAMbPmGgc,2991
135
135
  llama_cloud/types/eval_dataset.py,sha256=FIP4uHqUXg0LxGPaq-LmW2aTcEdQk-i5AYLbGqsQSV0,1310
136
136
  llama_cloud/types/eval_dataset_job_params.py,sha256=vcXLJWO581uigNvGAurPDgMeEFtQURWucLF5pemdeS0,1343
137
- llama_cloud/types/eval_dataset_job_record.py,sha256=lcX6iTJDngUsuvVeiqDgWDMieuIRJFRewKqiAGgwWAw,2185
137
+ llama_cloud/types/eval_dataset_job_record.py,sha256=vBDz7xezpE8AB6Kw7sZLYxgMcv0dxUWVC01_fI2QuUU,2168
138
138
  llama_cloud/types/eval_execution_params.py,sha256=ntVaJh5SMZMPL4QLUiihVjUlg2SKbrezvbMKGlrF66Q,1369
139
139
  llama_cloud/types/eval_execution_params_override.py,sha256=ihEFbMRYmFJ5mWmFW24JjV6D0qqeDM4p829mSxMGtOQ,1195
140
140
  llama_cloud/types/eval_metric.py,sha256=vhO_teMLiyzBdzKpOBW8Bm9qCw2h6m3unp2XotB7pDQ,499
@@ -181,16 +181,16 @@ llama_cloud/types/input_message.py,sha256=Ym6-tX6CMWKuHfxRtyM2y16kqSS3BzHged9rFR
181
181
  llama_cloud/types/interval_usage_and_plan.py,sha256=qRZs0MsbJ_X53YfgNujKnJjRYhMn2Bn6bjBUKKUZnZc,1161
182
182
  llama_cloud/types/job_name_mapping.py,sha256=2dQFQlVHoeSlkyEKSEJv0M3PzJf7hMvkuABj3vMY7ys,1617
183
183
  llama_cloud/types/job_names.py,sha256=ZapQT__pLI14SagjGi8AsEwWY949hBoplQemMgb_Aoc,4098
184
- llama_cloud/types/job_record.py,sha256=-tp6w7dyd5KZMMynxSrL5W5YoJSdqTRWolx_f0_Hbh0,2069
184
+ llama_cloud/types/job_record.py,sha256=r2WzLQXSOFogNMN2rl10rAlYI9OTCmVn06QaZXxa0rQ,2058
185
185
  llama_cloud/types/job_record_with_usage_metrics.py,sha256=iNV2do5TB_0e3PoOz_DJyAaM6Cn9G8KG-dGPGgEs5SY,1198
186
- llama_cloud/types/llama_extract_settings.py,sha256=X7dbrvMKGLIu6TmS6goev_jsBTS1rp_a8nZIKrjvA9U,2140
186
+ llama_cloud/types/llama_extract_settings.py,sha256=Yh9Ah9W0X4l-znjYm4oNIh8-LCBc99JEQmGU87bUzWs,2225
187
187
  llama_cloud/types/llama_index_core_base_llms_types_chat_message.py,sha256=NelHo-T-ebVMhRKsqE_xV8AJW4c7o6lS0uEQnPsmTwg,1365
188
188
  llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py,sha256=tTglUqrSUaVc2Wsi4uIt5MU-80_oxZzTnhf8ziilVGY,874
189
- llama_cloud/types/llama_parse_parameters.py,sha256=mUJwdL73Ngcy2O64xKk1lWHbnWnQpwAEjTWScM_piuc,4881
189
+ llama_cloud/types/llama_parse_parameters.py,sha256=EQsbxUsMOpmG3RgkZ151dPlwuI7puKw1CA4YIknst4U,5155
190
190
  llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=B_0N3f8Aq59W9FbsH50mGBUiyWTIXQjHFl739uAyaQw,11207
191
191
  llama_cloud/types/llm.py,sha256=7iIItVPjURp4u5xxJDAFIefUdhUKwIuA245WXilJPXE,2234
192
192
  llama_cloud/types/llm_model_data.py,sha256=6rrycqGwlK3LZ2S-WtgmeomithdLhDCgwBBZQ5KLaso,1300
193
- llama_cloud/types/llm_parameters.py,sha256=taefXZiK4IsuPHcWXxfEd36OefUqAJVuMG2e9vjnyUA,1337
193
+ llama_cloud/types/llm_parameters.py,sha256=RTKYt09lm9a1MlnBfYuTP2x_Ww4byUNNc1TqIel5O1Y,1377
194
194
  llama_cloud/types/local_eval.py,sha256=aJ8jRG0b5EL9cLjx281bzAzPw7Ar004Jfp6mBmyjuTA,1491
195
195
  llama_cloud/types/local_eval_results.py,sha256=YfK6AhfD0gr5apQBfrfzrTHDXvrk7ynAUUjNSKu9NVk,1380
196
196
  llama_cloud/types/local_eval_sets.py,sha256=XJSSriwRvkma889pPiBQrpRakKejKOX3tWPu1TGb1ug,1181
@@ -215,7 +215,7 @@ llama_cloud/types/open_ai_embedding_config.py,sha256=Mquc0JrtCo8lVYA2WW7q0ZikS3H
215
215
  llama_cloud/types/organization.py,sha256=p8mYRqSsGxw17AmdW8x8nP7P1UbdpYkwr51WTIjTVLw,1467
216
216
  llama_cloud/types/organization_create.py,sha256=hUXRwArIx_0D_lilpL7z-B0oJJ5yEX8Sbu2xqfH_9so,1086
217
217
  llama_cloud/types/page_figure_metadata.py,sha256=iIg6_f2SwJg6UcQo9X4MoSm_ygxnIBmFjS2LuUsI6qE,1528
218
- llama_cloud/types/page_screenshot_metadata.py,sha256=dXwWNDS7670xvIIuB1C_gLlsvAzQH4BRR3jLOojRvGs,1268
218
+ llama_cloud/types/page_screenshot_metadata.py,sha256=lobrq0AsOr8sDwMgA9ytop8lRmRFvJW2oiql3yLvbjM,1328
219
219
  llama_cloud/types/page_screenshot_node_with_score.py,sha256=EdqoXbmARCz1DV14E2saCPshIeII709uM4cLwxw_mkM,1232
220
220
  llama_cloud/types/page_segmentation_config.py,sha256=VH8uuxnubnJak1gSpS64OoMueHidhsDB-2eq2tVHbag,998
221
221
  llama_cloud/types/page_splitter_node_parser.py,sha256=rQgS1CDk18UKA0r9OPvjdtM570jzFArdLCTxYAtZny8,1424
@@ -230,6 +230,7 @@ llama_cloud/types/parsing_job_json_result.py,sha256=BA3_u-ChHpE5wm08WmOvgPUsMsCl
230
230
  llama_cloud/types/parsing_job_markdown_result.py,sha256=gPIUO0JwtKwvSHcRYEr995DNl7VN3EaaSaj4aPHCP4o,1077
231
231
  llama_cloud/types/parsing_job_structured_result.py,sha256=w_Z4DOHjwUPmffjc4qJiGYbniWTpkjpVcD4irL1dDj0,1017
232
232
  llama_cloud/types/parsing_job_text_result.py,sha256=TP-7IRTWZLAZz7NYLkzi4PsGnaRJuPTt40p56Mk6Rhw,1065
233
+ llama_cloud/types/parsing_mode.py,sha256=ppsF_Mia1FF26Zk3sZBwERxuqMbhvVDuVoR2kOsKJdE,1340
233
234
  llama_cloud/types/parsing_usage.py,sha256=JLlozu-vIkcRKqWaOVJ9Z2TrY7peJRTzOpYjOThGKGQ,1012
234
235
  llama_cloud/types/partition_names.py,sha256=zZZn-sn59gwch2fa7fGMwFWUEuu5Dfen3ZqKtcPnBEM,1877
235
236
  llama_cloud/types/permission.py,sha256=LjhZdo0oLvk7ZVIF1d6Qja--AKH5Ri0naUhuJvZS6Ng,1345
@@ -263,6 +264,7 @@ llama_cloud/types/progress_event.py,sha256=Bk73A8geTVaq0ze5pMnbkAmx7FSOHQIixYCpC
263
264
  llama_cloud/types/progress_event_status.py,sha256=yb4RAXwOKU6Bi7iyYy-3lwhF6_mLz0ZFyGjxIdaByoE,893
264
265
  llama_cloud/types/project.py,sha256=4NNh_ZAjEkoWl5st6b1jsPVf_SYKtUTB6rS1701G4IQ,1441
265
266
  llama_cloud/types/project_create.py,sha256=GxGmsXGJM-cHrvPFLktEkj9JtNsSdFae7-HPZFB4er0,1014
267
+ llama_cloud/types/prompt_conf.py,sha256=B3G9kdx1Md5fsx2ix4NYz5emvKi2GisYOOp9RozCPCU,1294
266
268
  llama_cloud/types/prompt_mixin_prompts.py,sha256=_ipiIFWmWSuaJ5VFI5rXa_C7lHaIL3Yv5izh7__xTxI,1323
267
269
  llama_cloud/types/prompt_spec.py,sha256=tPJTIzN9pYmiZD-HcPHFuhh4n1ak9FI5f7xFNV31djQ,1410
268
270
  llama_cloud/types/pydantic_program_mode.py,sha256=QfvpqR7TqyNuOxo78Sr58VOu7KDSBrHJM4XXBB0F5z0,1202
@@ -275,8 +277,7 @@ llama_cloud/types/report_create_response.py,sha256=tmnVkyAMVf0HNQy186DFVV1oZQzYG
275
277
  llama_cloud/types/report_event_item.py,sha256=_-0wgI96Ama2qKqUODTmI_fEcrnW5eAAjL1AoFEr4cQ,1451
276
278
  llama_cloud/types/report_event_item_event_data.py,sha256=_v_2wZVGuNgXpitYNcKlA9hJVMLECOKf8A-pUuLron8,1171
277
279
  llama_cloud/types/report_event_type.py,sha256=cPqKDVI8STX5BLndiGEovV4baa2it5fbfvcbiKyxAY8,1230
278
- llama_cloud/types/report_file_info.py,sha256=-EQgQAvfFvVcuOBM_inhfdWD-757mfsDrPjwwii-MHk,1230
279
- llama_cloud/types/report_metadata.py,sha256=TEUaRqy2ULtyFu3lFbea2X1SnYa5QMXZNJYerAQS1wM,1607
280
+ llama_cloud/types/report_metadata.py,sha256=cKB8wfToixuy8QEBNKzVTBznES9x4PU42DGnyiym5lc,1551
280
281
  llama_cloud/types/report_plan.py,sha256=UvtYQaSNUTWbmC-rP0c57rbGpDRPUQgou0c2r96FVUo,1332
281
282
  llama_cloud/types/report_plan_block.py,sha256=YlZ4fp4J3rduNKUknm0LfpHES_pgtQGFA9ZzErHoR40,1320
282
283
  llama_cloud/types/report_query.py,sha256=IwZNM37fgwD2CrHkQ3LtdKwUCyL2r4SrZc0xwfaTa_I,1216
@@ -290,12 +291,15 @@ llama_cloud/types/retriever.py,sha256=ZItPsorL8x1XjtJT49ZodaMqU8h2GfwlB4U4cgnfZk
290
291
  llama_cloud/types/retriever_create.py,sha256=WyUR9DRzu3Q9tzKEeXCdQuzCY6WKi9ADJkZea9rqvxU,1286
291
292
  llama_cloud/types/retriever_pipeline.py,sha256=F1pZDxg8JdQXRHE6ciFezd7a-Wv5bHplPcGDED-J4b0,1330
292
293
  llama_cloud/types/role.py,sha256=SCi2TyFbc68RJuNB-OdcP8ut03Uv5zPZk84QMmf17w8,1384
294
+ llama_cloud/types/schema_relax_mode.py,sha256=v4or6dYTvWvBBNtEd2ZSaUAb1706I0Zuh-Xztm-zx_0,635
293
295
  llama_cloud/types/semantic_chunking_config.py,sha256=dFDniTVWpRc7UcmVFvljUoyL5Ztd-l-YrHII7U-yM-k,1053
294
296
  llama_cloud/types/sentence_chunking_config.py,sha256=NA9xidK5ICxJPkEMQZWNcsV0Hw9Co_bzRWeYe4uSh9I,1116
295
297
  llama_cloud/types/sentence_splitter.py,sha256=GbC3KE20Nd85uzO4bqJttjqJhQ_1co2gKnSQxzfOAiM,2140
296
298
  llama_cloud/types/status_enum.py,sha256=cUBIlys89E8PUzmVqqawu7qTDF0aRqBwiijOmRDPvx0,1018
299
+ llama_cloud/types/struct_mode.py,sha256=AjYmpXTEYlMNNac6cNjEGYQBJwKJERw2ERdjGKgrX3o,845
300
+ llama_cloud/types/struct_parse_conf.py,sha256=bD0gZzN6tR8VO9s81KPwTffLQDnLLAAcNrnknii_178,1825
297
301
  llama_cloud/types/supported_llm_model.py,sha256=0v-g01LyZB7TeN0zwAeSJejRoT95SVaXOJhNz7boJwM,1461
298
- llama_cloud/types/supported_llm_model_names.py,sha256=LbYwfxt0dx6LekdcP2VYElybUmmpsCNonnVg53FjLrA,2949
302
+ llama_cloud/types/supported_llm_model_names.py,sha256=dEhmwGQVG-dmuGGbTWBAYadr-g5u3kiVz308CLWuSqw,2657
299
303
  llama_cloud/types/text_block.py,sha256=X154sQkSyposXuRcEWNp_tWcDQ-AI6q_-MfJUN5exP8,958
300
304
  llama_cloud/types/text_node.py,sha256=Tq3QmuKC5cIHvC9wAtvhsXl1g2sACs2yJwQ0Uko8GSU,2846
301
305
  llama_cloud/types/text_node_relationships_value.py,sha256=qmXURTk1Xg7ZDzRSSV1uDEel0AXRLohND5ioezibHY0,217
@@ -315,7 +319,7 @@ llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPX
315
319
  llama_cloud/types/vertex_ai_embedding_config.py,sha256=DvQk2xMJFmo54MEXTzoM4KSADyhGm_ygmFyx6wIcQdw,1159
316
320
  llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2BaIMltDqGnIowU,1217
317
321
  llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
318
- llama_cloud-0.1.11.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
319
- llama_cloud-0.1.11.dist-info/METADATA,sha256=OtYtZpSxKTx7kbaQz2ZBgkJ_8I-pkX9tUJJ6GBq2KAA,912
320
- llama_cloud-0.1.11.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
321
- llama_cloud-0.1.11.dist-info/RECORD,,
322
+ llama_cloud-0.1.13.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
323
+ llama_cloud-0.1.13.dist-info/METADATA,sha256=z7rMCV38oE2gVWpA1bfFemHvMaNa-JuQp8U_f3FYRVo,800
324
+ llama_cloud-0.1.13.dist-info/WHEEL,sha256=d2fvjOD7sXsVzChCqf0Ty0JbHKBaLYwDbGQDwQTnJ50,88
325
+ llama_cloud-0.1.13.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.1
2
+ Generator: poetry-core 1.7.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
llama_cloud/types/base.py DELETED
@@ -1,29 +0,0 @@
1
- # This file was auto-generated by Fern from our API Definition.
2
-
3
- import datetime as dt
4
- import typing
5
-
6
- from ..core.datetime_utils import serialize_datetime
7
-
8
- try:
9
- import pydantic
10
- if pydantic.__version__.startswith("1."):
11
- raise ImportError
12
- import pydantic.v1 as pydantic # type: ignore
13
- except ImportError:
14
- import pydantic # type: ignore
15
-
16
-
17
- class Base(pydantic.BaseModel):
18
- def json(self, **kwargs: typing.Any) -> str:
19
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
20
- return super().json(**kwargs_with_defaults)
21
-
22
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
23
- kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
24
- return super().dict(**kwargs_with_defaults)
25
-
26
- class Config:
27
- frozen = True
28
- smart_union = True
29
- json_encoders = {dt.datetime: serialize_datetime}