llama-cloud 0.1.10__py3-none-any.whl → 0.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +14 -2
- llama_cloud/resources/files/client.py +12 -2
- llama_cloud/resources/llama_extract/client.py +64 -20
- llama_cloud/resources/parsing/client.py +180 -0
- llama_cloud/resources/reports/client.py +22 -4
- llama_cloud/types/__init__.py +14 -2
- llama_cloud/types/chunk_mode.py +25 -0
- llama_cloud/types/edit_suggestion.py +3 -4
- llama_cloud/types/edit_suggestion_blocks_item.py +8 -0
- llama_cloud/types/extract_config.py +2 -0
- llama_cloud/types/extract_mode.py +7 -7
- llama_cloud/types/extract_target.py +17 -0
- llama_cloud/types/llama_extract_settings.py +13 -5
- llama_cloud/types/llama_parse_parameters.py +1 -0
- llama_cloud/types/{report_file_info.py → prompt_conf.py} +4 -8
- llama_cloud/types/report_metadata.py +1 -2
- llama_cloud/types/schema_relax_mode.py +25 -0
- llama_cloud/types/struct_mode.py +29 -0
- llama_cloud/types/struct_parse_conf.py +50 -0
- llama_cloud/types/supported_llm_model_names.py +24 -4
- {llama_cloud-0.1.10.dist-info → llama_cloud-0.1.12.dist-info}/METADATA +2 -3
- {llama_cloud-0.1.10.dist-info → llama_cloud-0.1.12.dist-info}/RECORD +24 -18
- {llama_cloud-0.1.10.dist-info → llama_cloud-0.1.12.dist-info}/WHEEL +1 -1
- {llama_cloud-0.1.10.dist-info → llama_cloud-0.1.12.dist-info}/LICENSE +0 -0
llama_cloud/types/__init__.py
CHANGED
|
@@ -29,6 +29,7 @@ from .character_splitter import CharacterSplitter
|
|
|
29
29
|
from .chat_app import ChatApp
|
|
30
30
|
from .chat_app_response import ChatAppResponse
|
|
31
31
|
from .chat_data import ChatData
|
|
32
|
+
from .chunk_mode import ChunkMode
|
|
32
33
|
from .cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
33
34
|
from .cloud_azure_ai_search_vector_store import CloudAzureAiSearchVectorStore
|
|
34
35
|
from .cloud_box_data_source import CloudBoxDataSource
|
|
@@ -73,6 +74,7 @@ from .data_source_create_custom_metadata_value import DataSourceCreateCustomMeta
|
|
|
73
74
|
from .data_source_custom_metadata_value import DataSourceCustomMetadataValue
|
|
74
75
|
from .data_source_definition import DataSourceDefinition
|
|
75
76
|
from .edit_suggestion import EditSuggestion
|
|
77
|
+
from .edit_suggestion_blocks_item import EditSuggestionBlocksItem
|
|
76
78
|
from .element_segmentation_config import ElementSegmentationConfig
|
|
77
79
|
from .embedding_model_config import EmbeddingModelConfig
|
|
78
80
|
from .embedding_model_config_embedding_config import (
|
|
@@ -127,6 +129,7 @@ from .extract_run_extraction_metadata_value import ExtractRunExtractionMetadataV
|
|
|
127
129
|
from .extract_schema_validate_response import ExtractSchemaValidateResponse
|
|
128
130
|
from .extract_schema_validate_response_data_schema_value import ExtractSchemaValidateResponseDataSchemaValue
|
|
129
131
|
from .extract_state import ExtractState
|
|
132
|
+
from .extract_target import ExtractTarget
|
|
130
133
|
from .file import File
|
|
131
134
|
from .file_permission_info_value import FilePermissionInfoValue
|
|
132
135
|
from .file_resource_info_value import FileResourceInfoValue
|
|
@@ -252,6 +255,7 @@ from .progress_event import ProgressEvent
|
|
|
252
255
|
from .progress_event_status import ProgressEventStatus
|
|
253
256
|
from .project import Project
|
|
254
257
|
from .project_create import ProjectCreate
|
|
258
|
+
from .prompt_conf import PromptConf
|
|
255
259
|
from .prompt_mixin_prompts import PromptMixinPrompts
|
|
256
260
|
from .prompt_spec import PromptSpec
|
|
257
261
|
from .pydantic_program_mode import PydanticProgramMode
|
|
@@ -269,7 +273,6 @@ from .report_event_item_event_data import (
|
|
|
269
273
|
ReportEventItemEventData_ReportStateUpdate,
|
|
270
274
|
)
|
|
271
275
|
from .report_event_type import ReportEventType
|
|
272
|
-
from .report_file_info import ReportFileInfo
|
|
273
276
|
from .report_metadata import ReportMetadata
|
|
274
277
|
from .report_plan import ReportPlan
|
|
275
278
|
from .report_plan_block import ReportPlanBlock
|
|
@@ -284,10 +287,13 @@ from .retriever import Retriever
|
|
|
284
287
|
from .retriever_create import RetrieverCreate
|
|
285
288
|
from .retriever_pipeline import RetrieverPipeline
|
|
286
289
|
from .role import Role
|
|
290
|
+
from .schema_relax_mode import SchemaRelaxMode
|
|
287
291
|
from .semantic_chunking_config import SemanticChunkingConfig
|
|
288
292
|
from .sentence_chunking_config import SentenceChunkingConfig
|
|
289
293
|
from .sentence_splitter import SentenceSplitter
|
|
290
294
|
from .status_enum import StatusEnum
|
|
295
|
+
from .struct_mode import StructMode
|
|
296
|
+
from .struct_parse_conf import StructParseConf
|
|
291
297
|
from .supported_llm_model import SupportedLlmModel
|
|
292
298
|
from .supported_llm_model_names import SupportedLlmModelNames
|
|
293
299
|
from .text_block import TextBlock
|
|
@@ -336,6 +342,7 @@ __all__ = [
|
|
|
336
342
|
"ChatApp",
|
|
337
343
|
"ChatAppResponse",
|
|
338
344
|
"ChatData",
|
|
345
|
+
"ChunkMode",
|
|
339
346
|
"CloudAzStorageBlobDataSource",
|
|
340
347
|
"CloudAzureAiSearchVectorStore",
|
|
341
348
|
"CloudBoxDataSource",
|
|
@@ -380,6 +387,7 @@ __all__ = [
|
|
|
380
387
|
"DataSourceCustomMetadataValue",
|
|
381
388
|
"DataSourceDefinition",
|
|
382
389
|
"EditSuggestion",
|
|
390
|
+
"EditSuggestionBlocksItem",
|
|
383
391
|
"ElementSegmentationConfig",
|
|
384
392
|
"EmbeddingModelConfig",
|
|
385
393
|
"EmbeddingModelConfigEmbeddingConfig",
|
|
@@ -430,6 +438,7 @@ __all__ = [
|
|
|
430
438
|
"ExtractSchemaValidateResponse",
|
|
431
439
|
"ExtractSchemaValidateResponseDataSchemaValue",
|
|
432
440
|
"ExtractState",
|
|
441
|
+
"ExtractTarget",
|
|
433
442
|
"File",
|
|
434
443
|
"FilePermissionInfoValue",
|
|
435
444
|
"FileResourceInfoValue",
|
|
@@ -547,6 +556,7 @@ __all__ = [
|
|
|
547
556
|
"ProgressEventStatus",
|
|
548
557
|
"Project",
|
|
549
558
|
"ProjectCreate",
|
|
559
|
+
"PromptConf",
|
|
550
560
|
"PromptMixinPrompts",
|
|
551
561
|
"PromptSpec",
|
|
552
562
|
"PydanticProgramMode",
|
|
@@ -562,7 +572,6 @@ __all__ = [
|
|
|
562
572
|
"ReportEventItemEventData_ReportBlockUpdate",
|
|
563
573
|
"ReportEventItemEventData_ReportStateUpdate",
|
|
564
574
|
"ReportEventType",
|
|
565
|
-
"ReportFileInfo",
|
|
566
575
|
"ReportMetadata",
|
|
567
576
|
"ReportPlan",
|
|
568
577
|
"ReportPlanBlock",
|
|
@@ -577,10 +586,13 @@ __all__ = [
|
|
|
577
586
|
"RetrieverCreate",
|
|
578
587
|
"RetrieverPipeline",
|
|
579
588
|
"Role",
|
|
589
|
+
"SchemaRelaxMode",
|
|
580
590
|
"SemanticChunkingConfig",
|
|
581
591
|
"SentenceChunkingConfig",
|
|
582
592
|
"SentenceSplitter",
|
|
583
593
|
"StatusEnum",
|
|
594
|
+
"StructMode",
|
|
595
|
+
"StructParseConf",
|
|
584
596
|
"SupportedLlmModel",
|
|
585
597
|
"SupportedLlmModelNames",
|
|
586
598
|
"TextBlock",
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ChunkMode(str, enum.Enum):
|
|
10
|
+
PAGE = "PAGE"
|
|
11
|
+
DOCUMENT = "DOCUMENT"
|
|
12
|
+
SECTION = "SECTION"
|
|
13
|
+
|
|
14
|
+
def visit(
|
|
15
|
+
self,
|
|
16
|
+
page: typing.Callable[[], T_Result],
|
|
17
|
+
document: typing.Callable[[], T_Result],
|
|
18
|
+
section: typing.Callable[[], T_Result],
|
|
19
|
+
) -> T_Result:
|
|
20
|
+
if self is ChunkMode.PAGE:
|
|
21
|
+
return page()
|
|
22
|
+
if self is ChunkMode.DOCUMENT:
|
|
23
|
+
return document()
|
|
24
|
+
if self is ChunkMode.SECTION:
|
|
25
|
+
return section()
|
|
@@ -4,7 +4,7 @@ import datetime as dt
|
|
|
4
4
|
import typing
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
from .
|
|
7
|
+
from .edit_suggestion_blocks_item import EditSuggestionBlocksItem
|
|
8
8
|
|
|
9
9
|
try:
|
|
10
10
|
import pydantic
|
|
@@ -21,9 +21,8 @@ class EditSuggestion(pydantic.BaseModel):
|
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
23
|
justification: str
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
blocks: typing.List[ReportBlock]
|
|
24
|
+
blocks: typing.List[EditSuggestionBlocksItem]
|
|
25
|
+
removed_indices: typing.Optional[typing.List[int]]
|
|
27
26
|
|
|
28
27
|
def json(self, **kwargs: typing.Any) -> str:
|
|
29
28
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -5,6 +5,7 @@ import typing
|
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
7
|
from .extract_mode import ExtractMode
|
|
8
|
+
from .extract_target import ExtractTarget
|
|
8
9
|
|
|
9
10
|
try:
|
|
10
11
|
import pydantic
|
|
@@ -20,6 +21,7 @@ class ExtractConfig(pydantic.BaseModel):
|
|
|
20
21
|
Additional parameters for the extraction agent.
|
|
21
22
|
"""
|
|
22
23
|
|
|
24
|
+
extraction_target: typing.Optional[ExtractTarget] = pydantic.Field(description="The extraction target specified.")
|
|
23
25
|
extraction_mode: typing.Optional[ExtractMode] = pydantic.Field(description="The extraction mode specified.")
|
|
24
26
|
handle_missing: typing.Optional[bool] = pydantic.Field(
|
|
25
27
|
description="Whether to handle missing fields in the schema."
|
|
@@ -7,11 +7,11 @@ T_Result = typing.TypeVar("T_Result")
|
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
class ExtractMode(str, enum.Enum):
|
|
10
|
-
|
|
11
|
-
|
|
10
|
+
FAST = "FAST"
|
|
11
|
+
ACCURATE = "ACCURATE"
|
|
12
12
|
|
|
13
|
-
def visit(self,
|
|
14
|
-
if self is ExtractMode.
|
|
15
|
-
return
|
|
16
|
-
if self is ExtractMode.
|
|
17
|
-
return
|
|
13
|
+
def visit(self, fast: typing.Callable[[], T_Result], accurate: typing.Callable[[], T_Result]) -> T_Result:
|
|
14
|
+
if self is ExtractMode.FAST:
|
|
15
|
+
return fast()
|
|
16
|
+
if self is ExtractMode.ACCURATE:
|
|
17
|
+
return accurate()
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ExtractTarget(str, enum.Enum):
|
|
10
|
+
PER_DOC = "PER_DOC"
|
|
11
|
+
PER_PAGE = "PER_PAGE"
|
|
12
|
+
|
|
13
|
+
def visit(self, per_doc: typing.Callable[[], T_Result], per_page: typing.Callable[[], T_Result]) -> T_Result:
|
|
14
|
+
if self is ExtractTarget.PER_DOC:
|
|
15
|
+
return per_doc()
|
|
16
|
+
if self is ExtractTarget.PER_PAGE:
|
|
17
|
+
return per_page()
|
|
@@ -4,7 +4,9 @@ import datetime as dt
|
|
|
4
4
|
import typing
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .chunk_mode import ChunkMode
|
|
7
8
|
from .llama_parse_parameters import LlamaParseParameters
|
|
9
|
+
from .struct_parse_conf import StructParseConf
|
|
8
10
|
|
|
9
11
|
try:
|
|
10
12
|
import pydantic
|
|
@@ -21,16 +23,22 @@ class LlamaExtractSettings(pydantic.BaseModel):
|
|
|
21
23
|
are exposed to the user.
|
|
22
24
|
"""
|
|
23
25
|
|
|
24
|
-
model: typing.Optional[str] = pydantic.Field(description="The model to use for the extraction.")
|
|
25
|
-
temperature: typing.Optional[float] = pydantic.Field(description="The temperature to use for the extraction.")
|
|
26
26
|
max_file_size: typing.Optional[int] = pydantic.Field(
|
|
27
27
|
description="The maximum file size (in bytes) allowed for the document."
|
|
28
28
|
)
|
|
29
|
-
|
|
29
|
+
max_tokens: typing.Optional[int] = pydantic.Field(
|
|
30
|
+
description="The maximum number of tokens allowed for the document."
|
|
31
|
+
)
|
|
32
|
+
max_pages: typing.Optional[int] = pydantic.Field(
|
|
30
33
|
description="The maximum number of pages allowed for the document."
|
|
31
34
|
)
|
|
32
|
-
|
|
33
|
-
|
|
35
|
+
chunk_mode: typing.Optional[ChunkMode] = pydantic.Field(description="The mode to use for chunking the document.")
|
|
36
|
+
max_chunk_size: typing.Optional[int] = pydantic.Field(
|
|
37
|
+
description="The maximum size of the chunks (in tokens) to use for chunking the document."
|
|
38
|
+
)
|
|
39
|
+
extraction_agent_config: typing.Optional[typing.Dict[str, StructParseConf]] = pydantic.Field(
|
|
40
|
+
description="The configuration for the extraction agent."
|
|
41
|
+
)
|
|
34
42
|
llama_parse_params: typing.Optional[LlamaParseParameters] = pydantic.Field(
|
|
35
43
|
description="LlamaParse related settings."
|
|
36
44
|
)
|
|
@@ -90,6 +90,7 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
90
90
|
strict_mode_buggy_font: typing.Optional[bool]
|
|
91
91
|
ignore_document_elements_for_layout_detection: typing.Optional[bool]
|
|
92
92
|
output_tables_as_html: typing.Optional[bool] = pydantic.Field(alias="output_tables_as_HTML")
|
|
93
|
+
internal_is_screenshot_job: typing.Optional[bool]
|
|
93
94
|
|
|
94
95
|
def json(self, **kwargs: typing.Any) -> str:
|
|
95
96
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -14,13 +14,10 @@ except ImportError:
|
|
|
14
14
|
import pydantic # type: ignore
|
|
15
15
|
|
|
16
16
|
|
|
17
|
-
class
|
|
18
|
-
""
|
|
19
|
-
|
|
20
|
-
""
|
|
21
|
-
|
|
22
|
-
original_name: str = pydantic.Field(description="Original filename uploaded by user")
|
|
23
|
-
s_3_path: str = pydantic.Field(alias="s3_path", description="Path to file in S3")
|
|
17
|
+
class PromptConf(pydantic.BaseModel):
|
|
18
|
+
system_prompt: typing.Optional[str] = pydantic.Field(description="The system prompt to use for the extraction.")
|
|
19
|
+
extraction_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for the extraction.")
|
|
20
|
+
error_handling_prompt: typing.Optional[str] = pydantic.Field(description="The prompt to use for error handling.")
|
|
24
21
|
|
|
25
22
|
def json(self, **kwargs: typing.Any) -> str:
|
|
26
23
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -33,5 +30,4 @@ class ReportFileInfo(pydantic.BaseModel):
|
|
|
33
30
|
class Config:
|
|
34
31
|
frozen = True
|
|
35
32
|
smart_union = True
|
|
36
|
-
allow_population_by_field_name = True
|
|
37
33
|
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -4,7 +4,6 @@ import datetime as dt
|
|
|
4
4
|
import typing
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
|
-
from .report_file_info import ReportFileInfo
|
|
8
7
|
from .report_state import ReportState
|
|
9
8
|
|
|
10
9
|
try:
|
|
@@ -26,7 +25,7 @@ class ReportMetadata(pydantic.BaseModel):
|
|
|
26
25
|
report_metadata: typing.Dict[str, typing.Any] = pydantic.Field(description="The metadata for the report")
|
|
27
26
|
state: ReportState = pydantic.Field(description="The state of the report")
|
|
28
27
|
input_files: typing.Optional[typing.List[str]]
|
|
29
|
-
template_file: typing.Optional[
|
|
28
|
+
template_file: typing.Optional[str]
|
|
30
29
|
template_text: typing.Optional[str]
|
|
31
30
|
template_instructions: typing.Optional[str]
|
|
32
31
|
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class SchemaRelaxMode(str, enum.Enum):
|
|
10
|
+
FULL = "FULL"
|
|
11
|
+
TOP_LEVEL = "TOP_LEVEL"
|
|
12
|
+
LEAF = "LEAF"
|
|
13
|
+
|
|
14
|
+
def visit(
|
|
15
|
+
self,
|
|
16
|
+
full: typing.Callable[[], T_Result],
|
|
17
|
+
top_level: typing.Callable[[], T_Result],
|
|
18
|
+
leaf: typing.Callable[[], T_Result],
|
|
19
|
+
) -> T_Result:
|
|
20
|
+
if self is SchemaRelaxMode.FULL:
|
|
21
|
+
return full()
|
|
22
|
+
if self is SchemaRelaxMode.TOP_LEVEL:
|
|
23
|
+
return top_level()
|
|
24
|
+
if self is SchemaRelaxMode.LEAF:
|
|
25
|
+
return leaf()
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import enum
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
T_Result = typing.TypeVar("T_Result")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class StructMode(str, enum.Enum):
|
|
10
|
+
STRUCT_PARSE = "STRUCT_PARSE"
|
|
11
|
+
JSON_MODE = "JSON_MODE"
|
|
12
|
+
FUNC_CALL = "FUNC_CALL"
|
|
13
|
+
UNSTRUCTURED = "UNSTRUCTURED"
|
|
14
|
+
|
|
15
|
+
def visit(
|
|
16
|
+
self,
|
|
17
|
+
struct_parse: typing.Callable[[], T_Result],
|
|
18
|
+
json_mode: typing.Callable[[], T_Result],
|
|
19
|
+
func_call: typing.Callable[[], T_Result],
|
|
20
|
+
unstructured: typing.Callable[[], T_Result],
|
|
21
|
+
) -> T_Result:
|
|
22
|
+
if self is StructMode.STRUCT_PARSE:
|
|
23
|
+
return struct_parse()
|
|
24
|
+
if self is StructMode.JSON_MODE:
|
|
25
|
+
return json_mode()
|
|
26
|
+
if self is StructMode.FUNC_CALL:
|
|
27
|
+
return func_call()
|
|
28
|
+
if self is StructMode.UNSTRUCTURED:
|
|
29
|
+
return unstructured()
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .prompt_conf import PromptConf
|
|
8
|
+
from .schema_relax_mode import SchemaRelaxMode
|
|
9
|
+
from .struct_mode import StructMode
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
import pydantic
|
|
13
|
+
if pydantic.__version__.startswith("1."):
|
|
14
|
+
raise ImportError
|
|
15
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
16
|
+
except ImportError:
|
|
17
|
+
import pydantic # type: ignore
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class StructParseConf(pydantic.BaseModel):
|
|
21
|
+
"""
|
|
22
|
+
Configuration for the structured parsing agent.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
model: typing.Optional[str] = pydantic.Field(description="The model to use for the structured parsing.")
|
|
26
|
+
temperature: typing.Optional[float] = pydantic.Field(
|
|
27
|
+
description="The temperature to use for the structured parsing."
|
|
28
|
+
)
|
|
29
|
+
relaxation_mode: typing.Optional[SchemaRelaxMode] = pydantic.Field(
|
|
30
|
+
description="The relaxation mode to use for the structured parsing."
|
|
31
|
+
)
|
|
32
|
+
struct_mode: typing.Optional[StructMode] = pydantic.Field(
|
|
33
|
+
description="The struct mode to use for the structured parsing."
|
|
34
|
+
)
|
|
35
|
+
prompt_conf: typing.Optional[PromptConf] = pydantic.Field(
|
|
36
|
+
description="The prompt configuration for the structured parsing."
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
40
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
41
|
+
return super().json(**kwargs_with_defaults)
|
|
42
|
+
|
|
43
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
44
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
45
|
+
return super().dict(**kwargs_with_defaults)
|
|
46
|
+
|
|
47
|
+
class Config:
|
|
48
|
+
frozen = True
|
|
49
|
+
smart_union = True
|
|
50
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -12,8 +12,13 @@ class SupportedLlmModelNames(str, enum.Enum):
|
|
|
12
12
|
GPT_4_TURBO = "GPT_4_TURBO"
|
|
13
13
|
GPT_4_O = "GPT_4O"
|
|
14
14
|
GPT_4_O_MINI = "GPT_4O_MINI"
|
|
15
|
-
|
|
15
|
+
AZURE_OPENAI_GPT_3_5_TURBO = "AZURE_OPENAI_GPT_3_5_TURBO"
|
|
16
|
+
AZURE_OPENAI_GPT_4_O = "AZURE_OPENAI_GPT_4O"
|
|
17
|
+
AZURE_OPENAI_GPT_4_O_MINI = "AZURE_OPENAI_GPT_4O_MINI"
|
|
18
|
+
AZURE_OPENAI_GPT_4 = "AZURE_OPENAI_GPT_4"
|
|
16
19
|
CLAUDE_3_5_SONNET = "CLAUDE_3_5_SONNET"
|
|
20
|
+
BEDROCK_CLAUDE_3_5_SONNET = "BEDROCK_CLAUDE_3_5_SONNET"
|
|
21
|
+
VERTEX_AI_CLAUDE_3_5_SONNET = "VERTEX_AI_CLAUDE_3_5_SONNET"
|
|
17
22
|
|
|
18
23
|
def visit(
|
|
19
24
|
self,
|
|
@@ -22,8 +27,13 @@ class SupportedLlmModelNames(str, enum.Enum):
|
|
|
22
27
|
gpt_4_turbo: typing.Callable[[], T_Result],
|
|
23
28
|
gpt_4_o: typing.Callable[[], T_Result],
|
|
24
29
|
gpt_4_o_mini: typing.Callable[[], T_Result],
|
|
25
|
-
|
|
30
|
+
azure_openai_gpt_3_5_turbo: typing.Callable[[], T_Result],
|
|
31
|
+
azure_openai_gpt_4_o: typing.Callable[[], T_Result],
|
|
32
|
+
azure_openai_gpt_4_o_mini: typing.Callable[[], T_Result],
|
|
33
|
+
azure_openai_gpt_4: typing.Callable[[], T_Result],
|
|
26
34
|
claude_3_5_sonnet: typing.Callable[[], T_Result],
|
|
35
|
+
bedrock_claude_3_5_sonnet: typing.Callable[[], T_Result],
|
|
36
|
+
vertex_ai_claude_3_5_sonnet: typing.Callable[[], T_Result],
|
|
27
37
|
) -> T_Result:
|
|
28
38
|
if self is SupportedLlmModelNames.GPT_3_5_TURBO:
|
|
29
39
|
return gpt_3_5_turbo()
|
|
@@ -35,7 +45,17 @@ class SupportedLlmModelNames(str, enum.Enum):
|
|
|
35
45
|
return gpt_4_o()
|
|
36
46
|
if self is SupportedLlmModelNames.GPT_4_O_MINI:
|
|
37
47
|
return gpt_4_o_mini()
|
|
38
|
-
if self is SupportedLlmModelNames.
|
|
39
|
-
return
|
|
48
|
+
if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_3_5_TURBO:
|
|
49
|
+
return azure_openai_gpt_3_5_turbo()
|
|
50
|
+
if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_4_O:
|
|
51
|
+
return azure_openai_gpt_4_o()
|
|
52
|
+
if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_4_O_MINI:
|
|
53
|
+
return azure_openai_gpt_4_o_mini()
|
|
54
|
+
if self is SupportedLlmModelNames.AZURE_OPENAI_GPT_4:
|
|
55
|
+
return azure_openai_gpt_4()
|
|
40
56
|
if self is SupportedLlmModelNames.CLAUDE_3_5_SONNET:
|
|
41
57
|
return claude_3_5_sonnet()
|
|
58
|
+
if self is SupportedLlmModelNames.BEDROCK_CLAUDE_3_5_SONNET:
|
|
59
|
+
return bedrock_claude_3_5_sonnet()
|
|
60
|
+
if self is SupportedLlmModelNames.VERTEX_AI_CLAUDE_3_5_SONNET:
|
|
61
|
+
return vertex_ai_claude_3_5_sonnet()
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: llama-cloud
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.12
|
|
4
4
|
Summary:
|
|
5
5
|
License: MIT
|
|
6
6
|
Author: Logan Markewich
|
|
@@ -13,8 +13,7 @@ Classifier: Programming Language :: Python :: 3.9
|
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.10
|
|
14
14
|
Classifier: Programming Language :: Python :: 3.11
|
|
15
15
|
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
-
|
|
17
|
-
Requires-Dist: certifi (>=2024.7.4,<2025.0.0)
|
|
16
|
+
Requires-Dist: certifi (>=2024.7.4)
|
|
18
17
|
Requires-Dist: httpx (>=0.20.0)
|
|
19
18
|
Requires-Dist: pydantic (>=1.10)
|
|
20
19
|
Description-Content-Type: text/markdown
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
llama_cloud/__init__.py,sha256=
|
|
1
|
+
llama_cloud/__init__.py,sha256=X4Q_rgTHOWZ4unIpgBwbg6xxbHL7C_NDfae-yHhM12k,22183
|
|
2
2
|
llama_cloud/client.py,sha256=0fK6iRBCA77eSs0zFrYQj-zD0BLy6Dr2Ss0ETJ4WaOY,5555
|
|
3
3
|
llama_cloud/core/__init__.py,sha256=QJS3CJ2TYP2E1Tge0CS6Z7r8LTNzJHQVX1hD3558eP0,519
|
|
4
4
|
llama_cloud/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
|
|
@@ -30,7 +30,7 @@ llama_cloud/resources/embedding_model_configs/types/embedding_model_config_creat
|
|
|
30
30
|
llama_cloud/resources/evals/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
31
31
|
llama_cloud/resources/evals/client.py,sha256=JyPHP9MsJ-15XHUVu-UjCcINo2IDPr2OageAqLBGlmw,27578
|
|
32
32
|
llama_cloud/resources/files/__init__.py,sha256=3B0SNM8EE6PddD5LpxYllci9vflEXy1xjPzhEEd-OUk,293
|
|
33
|
-
llama_cloud/resources/files/client.py,sha256=
|
|
33
|
+
llama_cloud/resources/files/client.py,sha256=7VmhrE5fbftB6p6QUQUkGM5FO48obF73keq86vGFyhE,49676
|
|
34
34
|
llama_cloud/resources/files/types/__init__.py,sha256=EPYENAwkjBWv1MLf8s7R5-RO-cxZ_8NPrqfR4ZoR7jY,418
|
|
35
35
|
llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py,sha256=Wc8wFgujOO5pZvbbh2TMMzpa37GKZd14GYNJ9bdq7BE,214
|
|
36
36
|
llama_cloud/resources/files/types/file_create_permission_info_value.py,sha256=KPCFuEaa8NiB85A5MfdXRAQ0poAUTl7Feg6BTfmdWas,209
|
|
@@ -38,7 +38,7 @@ llama_cloud/resources/files/types/file_create_resource_info_value.py,sha256=R7Y-
|
|
|
38
38
|
llama_cloud/resources/jobs/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
39
39
|
llama_cloud/resources/jobs/client.py,sha256=mN9uOzys9aZkhOJkApUy0yhfNeK8X09xQxT34ZPptNY,5386
|
|
40
40
|
llama_cloud/resources/llama_extract/__init__.py,sha256=MgOA61chV7LogriUoyswOT627LaVt3UIb-imM3BvHdQ,617
|
|
41
|
-
llama_cloud/resources/llama_extract/client.py,sha256=
|
|
41
|
+
llama_cloud/resources/llama_extract/client.py,sha256=V8rmOzmyL3oRdvYbHQFZE77dFvUzzjsnEA1ep2_zLuM,55218
|
|
42
42
|
llama_cloud/resources/llama_extract/types/__init__.py,sha256=yY34YD-MI4SnSbyJY5JwCGBBfqRr0dNh2zibRUt8mt4,895
|
|
43
43
|
llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py,sha256=zB31hJQ8hKaIsPkfTWiX5hqsPVFMyyeWEDZ_Aq237jo,305
|
|
44
44
|
llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_zero_value.py,sha256=xoyXH3f0Y5beMWBxmtXSz6QoB_df_-0QBsYdjBhZnGw,217
|
|
@@ -49,7 +49,7 @@ llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_s
|
|
|
49
49
|
llama_cloud/resources/organizations/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
50
50
|
llama_cloud/resources/organizations/client.py,sha256=VRqPsWYEksvysYgKIOGnfhXjC3aaf9OHK6fHsS-XHqk,55460
|
|
51
51
|
llama_cloud/resources/parsing/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
52
|
-
llama_cloud/resources/parsing/client.py,sha256=
|
|
52
|
+
llama_cloud/resources/parsing/client.py,sha256=8WHscm_0WzNpewtjN2m01DiX3UdV4PlDlRlrEEwxjQs,72070
|
|
53
53
|
llama_cloud/resources/pipelines/__init__.py,sha256=Mx7p3jDZRLMltsfywSufam_4AnHvmAfsxtMHVI72e-8,1083
|
|
54
54
|
llama_cloud/resources/pipelines/client.py,sha256=MORoQkrH6-8-utV41zrXjFW2BegDsa_6pJhJvFH4OMQ,134251
|
|
55
55
|
llama_cloud/resources/pipelines/types/__init__.py,sha256=jjaMc0V3K1HZLMYZ6WT4ydMtBCVy-oF5koqTCovbDws,1202
|
|
@@ -59,12 +59,12 @@ llama_cloud/resources/pipelines/types/pipeline_update_transform_config.py,sha256
|
|
|
59
59
|
llama_cloud/resources/projects/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
60
60
|
llama_cloud/resources/projects/client.py,sha256=B1A68C_rm7pfI6_fq9Xm1zuHdt9O8mLk1ZVvIt0iFb4,55882
|
|
61
61
|
llama_cloud/resources/reports/__init__.py,sha256=cruYbQ1bIuJbRpkfaQY7ajUEslffjd7KzvzMzbtPH94,217
|
|
62
|
-
llama_cloud/resources/reports/client.py,sha256
|
|
62
|
+
llama_cloud/resources/reports/client.py,sha256=Ubf1xfaVK8PaDaO22cJdjlxpPkdSoMZr9zRDKrA2T0s,46432
|
|
63
63
|
llama_cloud/resources/reports/types/__init__.py,sha256=LfwDYrI4RcQu-o42iAe7HkcwHww2YU90lOonBPTmZIk,291
|
|
64
64
|
llama_cloud/resources/reports/types/update_report_plan_api_v_1_reports_report_id_plan_patch_request_action.py,sha256=Qh-MSeRvDBfNb5hoLELivv1pLtrYVf52WVoP7G8V34A,807
|
|
65
65
|
llama_cloud/resources/retrievers/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
66
66
|
llama_cloud/resources/retrievers/client.py,sha256=ASDdqnwXX4qj0sCAkWO7RKFnQ1oiLzBLIQ2bwqnMOKs,24905
|
|
67
|
-
llama_cloud/types/__init__.py,sha256=
|
|
67
|
+
llama_cloud/types/__init__.py,sha256=X2X5A34cSJO0B2zEHZxvsSyv0SLKlCxkPcNCG4J6hxo,26343
|
|
68
68
|
llama_cloud/types/advanced_mode_transform_config.py,sha256=4xCXye0_cPmVS1F8aNTx81sIaEPjQH9kiCCAIoqUzlI,1502
|
|
69
69
|
llama_cloud/types/advanced_mode_transform_config_chunking_config.py,sha256=wYbJnWLpeQDfhmDZz-wJfYzD1iGT5Jcxb9ga3mzUuvk,1983
|
|
70
70
|
llama_cloud/types/advanced_mode_transform_config_segmentation_config.py,sha256=anNGq0F5-IlbIW3kpC8OilzLJnUq5tdIcWHnRnmlYsg,1303
|
|
@@ -82,6 +82,7 @@ llama_cloud/types/character_splitter.py,sha256=Jm6ie7c9JmMqIqLfAN-96sYvNUaIyLzCP
|
|
|
82
82
|
llama_cloud/types/chat_app.py,sha256=fLuzYkXLq51C_Y23hoLwfmG-OiT7jlyHt2JGe6-f1IA,1795
|
|
83
83
|
llama_cloud/types/chat_app_response.py,sha256=WSKr1KI9_pGTSstr3I53kZ8qb3y87Q4ulh8fR0C7sSU,1784
|
|
84
84
|
llama_cloud/types/chat_data.py,sha256=ZYqVtjXF6qPGajU4IWZu3InpU54TXJwBFiqxBepylP0,1197
|
|
85
|
+
llama_cloud/types/chunk_mode.py,sha256=7FIsCfJqZyek1cwRykSgRY24gA0Qo9kMGdJDFjabb9c,621
|
|
85
86
|
llama_cloud/types/cloud_az_storage_blob_data_source.py,sha256=NT4cYsD1M868_bSJxKM9cvTMtjQtQxKloE4vRv8_lwg,1534
|
|
86
87
|
llama_cloud/types/cloud_azure_ai_search_vector_store.py,sha256=9GTaft7BaKsR9RJQp5dlpbslXUlTMA1AcDdKV1ApfqI,1513
|
|
87
88
|
llama_cloud/types/cloud_box_data_source.py,sha256=9bffCaKGvctSsk9OdTpzzP__O1NDpb9wdvKY2uwjpwY,1470
|
|
@@ -125,7 +126,8 @@ llama_cloud/types/data_source_create_component.py,sha256=-P4FGv9Xg951n-77_bb-2_C
|
|
|
125
126
|
llama_cloud/types/data_source_create_custom_metadata_value.py,sha256=ejSsQNbszYQaUWFh9r9kQpHf88qbhuRv1SI9J_MOSC0,215
|
|
126
127
|
llama_cloud/types/data_source_custom_metadata_value.py,sha256=pTZn5yjZYmuOhsLABFJOKZblZUkRqo1CqLAuP5tKji4,209
|
|
127
128
|
llama_cloud/types/data_source_definition.py,sha256=HlSlTxzYcQJOSo_2OSroAE8vAr-otDvTNBSEkA54vL8,1575
|
|
128
|
-
llama_cloud/types/edit_suggestion.py,sha256=
|
|
129
|
+
llama_cloud/types/edit_suggestion.py,sha256=uzXSZYJiU3FaNN-TvEd3EXdaXvjQIe7Mf4kntKkyB2I,1202
|
|
130
|
+
llama_cloud/types/edit_suggestion_blocks_item.py,sha256=ojTk4lh0IHmrWP5wLPTIlsc2jAUDoHvdjJ5sm2uMut0,236
|
|
129
131
|
llama_cloud/types/element_segmentation_config.py,sha256=QOBk8YFrgK0I2m3caqV5bpYaGXbk0fMSjZ4hUPZXZDI,959
|
|
130
132
|
llama_cloud/types/embedding_model_config.py,sha256=6-o0vsAX89eHQdCAG5sI317Aivr4Tvs6ycg9TqNgybo,1525
|
|
131
133
|
llama_cloud/types/embedding_model_config_embedding_config.py,sha256=9rmfeiJYhBPmSJCXp-qxkOAd9WPwL5Hks7jIKd8XCPM,2901
|
|
@@ -142,12 +144,12 @@ llama_cloud/types/eval_question_create.py,sha256=oOwxkE5gPj8RAwgr3uuTHfTvLSXmYkk
|
|
|
142
144
|
llama_cloud/types/eval_question_result.py,sha256=Y4RFXnA4YJTlzM6_NtLOi0rt6hRZoQbToiVJqm41ArY,2168
|
|
143
145
|
llama_cloud/types/extract_agent.py,sha256=T98IOueut4M52Qm7hqcUOcWFFDhZ-ye0OFdXgfFGtS4,1763
|
|
144
146
|
llama_cloud/types/extract_agent_data_schema_value.py,sha256=UaDQ2KjajLDccW7F4NKdfpefeTJrr1hl0c95WRETYkM,201
|
|
145
|
-
llama_cloud/types/extract_config.py,sha256=
|
|
147
|
+
llama_cloud/types/extract_config.py,sha256=s0f8Yzfuzl0P_xV91SNj0Cbp77I_FMXCxL5lEJyXR6I,1505
|
|
146
148
|
llama_cloud/types/extract_job.py,sha256=Yx4fDdCdylAji2LPTwqflVpz1o9slpj9tTLS93-1tzU,1431
|
|
147
149
|
llama_cloud/types/extract_job_create.py,sha256=UK1mBIKyflo7e6m1MxMN95pLscj67jH_yvs8EvmBXqU,1545
|
|
148
150
|
llama_cloud/types/extract_job_create_data_schema_override.py,sha256=vuiJ2lGJjbXEnvFKzVnKyvgwhMXPg1Pb5GZne2DrB60,330
|
|
149
151
|
llama_cloud/types/extract_job_create_data_schema_override_zero_value.py,sha256=HHEYxOSQXXyBYOiUQg_qwfQtXFj-OtThMwbUDBIgZU0,223
|
|
150
|
-
llama_cloud/types/extract_mode.py,sha256=
|
|
152
|
+
llama_cloud/types/extract_mode.py,sha256=Xu8TvYHXYs-EcELV0hXbkcPuMyK1BLBQPKIBuHeUSnY,457
|
|
151
153
|
llama_cloud/types/extract_resultset.py,sha256=Alje0YQJUiA_aKi0hQs7TAnhDmZuQ_yL9b6HCNYBFQg,1627
|
|
152
154
|
llama_cloud/types/extract_resultset_data.py,sha256=v9Ae4SxLsvYPE9crko4N16lBjsxuZpz1yrUOhnaM_VY,427
|
|
153
155
|
llama_cloud/types/extract_resultset_data_item_value.py,sha256=JwqgDIGW0irr8QWaSTIrl24FhGxTUDOXIbxoSdIjuxs,209
|
|
@@ -162,6 +164,7 @@ llama_cloud/types/extract_run_extraction_metadata_value.py,sha256=tBbPk7mkNWvjej
|
|
|
162
164
|
llama_cloud/types/extract_schema_validate_response.py,sha256=EVSeXsljZC-gIpdXr16khI4kbZbc3jU-7rKVp5F_SQk,1170
|
|
163
165
|
llama_cloud/types/extract_schema_validate_response_data_schema_value.py,sha256=lX9RbBHcmBRagA-K7x1he8EEmmNCiAs-tHumGfPvFVQ,224
|
|
164
166
|
llama_cloud/types/extract_state.py,sha256=TNeVAXXKZaiM2srlbQlzRSn4_TDpR4xyT_yQhJUxFvk,775
|
|
167
|
+
llama_cloud/types/extract_target.py,sha256=Gt-FNqblzcjdfq1hxsqEjWWu-HNLXdKy4w98nog52Ms,478
|
|
165
168
|
llama_cloud/types/file.py,sha256=rQXitPRKOYw91nK5qOZ0vpOmIx_MCpRb0g78d9dQs6w,1822
|
|
166
169
|
llama_cloud/types/file_permission_info_value.py,sha256=RyQlNbhvIKS87Ywu7XUaw5jDToZX64M9Wqzu1U_q2Us,197
|
|
167
170
|
llama_cloud/types/file_resource_info_value.py,sha256=g6T6ELeLK9jgcvX6r-EuAl_4JkwnyqdS0RRoabMReSU,195
|
|
@@ -181,10 +184,10 @@ llama_cloud/types/job_name_mapping.py,sha256=2dQFQlVHoeSlkyEKSEJv0M3PzJf7hMvkuAB
|
|
|
181
184
|
llama_cloud/types/job_names.py,sha256=ZapQT__pLI14SagjGi8AsEwWY949hBoplQemMgb_Aoc,4098
|
|
182
185
|
llama_cloud/types/job_record.py,sha256=-tp6w7dyd5KZMMynxSrL5W5YoJSdqTRWolx_f0_Hbh0,2069
|
|
183
186
|
llama_cloud/types/job_record_with_usage_metrics.py,sha256=iNV2do5TB_0e3PoOz_DJyAaM6Cn9G8KG-dGPGgEs5SY,1198
|
|
184
|
-
llama_cloud/types/llama_extract_settings.py,sha256=
|
|
187
|
+
llama_cloud/types/llama_extract_settings.py,sha256=Yh9Ah9W0X4l-znjYm4oNIh8-LCBc99JEQmGU87bUzWs,2225
|
|
185
188
|
llama_cloud/types/llama_index_core_base_llms_types_chat_message.py,sha256=NelHo-T-ebVMhRKsqE_xV8AJW4c7o6lS0uEQnPsmTwg,1365
|
|
186
189
|
llama_cloud/types/llama_index_core_base_llms_types_chat_message_blocks_item.py,sha256=tTglUqrSUaVc2Wsi4uIt5MU-80_oxZzTnhf8ziilVGY,874
|
|
187
|
-
llama_cloud/types/llama_parse_parameters.py,sha256=
|
|
190
|
+
llama_cloud/types/llama_parse_parameters.py,sha256=mUJwdL73Ngcy2O64xKk1lWHbnWnQpwAEjTWScM_piuc,4881
|
|
188
191
|
llama_cloud/types/llama_parse_supported_file_extensions.py,sha256=B_0N3f8Aq59W9FbsH50mGBUiyWTIXQjHFl739uAyaQw,11207
|
|
189
192
|
llama_cloud/types/llm.py,sha256=7iIItVPjURp4u5xxJDAFIefUdhUKwIuA245WXilJPXE,2234
|
|
190
193
|
llama_cloud/types/llm_model_data.py,sha256=6rrycqGwlK3LZ2S-WtgmeomithdLhDCgwBBZQ5KLaso,1300
|
|
@@ -261,6 +264,7 @@ llama_cloud/types/progress_event.py,sha256=Bk73A8geTVaq0ze5pMnbkAmx7FSOHQIixYCpC
|
|
|
261
264
|
llama_cloud/types/progress_event_status.py,sha256=yb4RAXwOKU6Bi7iyYy-3lwhF6_mLz0ZFyGjxIdaByoE,893
|
|
262
265
|
llama_cloud/types/project.py,sha256=4NNh_ZAjEkoWl5st6b1jsPVf_SYKtUTB6rS1701G4IQ,1441
|
|
263
266
|
llama_cloud/types/project_create.py,sha256=GxGmsXGJM-cHrvPFLktEkj9JtNsSdFae7-HPZFB4er0,1014
|
|
267
|
+
llama_cloud/types/prompt_conf.py,sha256=B3G9kdx1Md5fsx2ix4NYz5emvKi2GisYOOp9RozCPCU,1294
|
|
264
268
|
llama_cloud/types/prompt_mixin_prompts.py,sha256=_ipiIFWmWSuaJ5VFI5rXa_C7lHaIL3Yv5izh7__xTxI,1323
|
|
265
269
|
llama_cloud/types/prompt_spec.py,sha256=tPJTIzN9pYmiZD-HcPHFuhh4n1ak9FI5f7xFNV31djQ,1410
|
|
266
270
|
llama_cloud/types/pydantic_program_mode.py,sha256=QfvpqR7TqyNuOxo78Sr58VOu7KDSBrHJM4XXBB0F5z0,1202
|
|
@@ -273,8 +277,7 @@ llama_cloud/types/report_create_response.py,sha256=tmnVkyAMVf0HNQy186DFVV1oZQzYG
|
|
|
273
277
|
llama_cloud/types/report_event_item.py,sha256=_-0wgI96Ama2qKqUODTmI_fEcrnW5eAAjL1AoFEr4cQ,1451
|
|
274
278
|
llama_cloud/types/report_event_item_event_data.py,sha256=_v_2wZVGuNgXpitYNcKlA9hJVMLECOKf8A-pUuLron8,1171
|
|
275
279
|
llama_cloud/types/report_event_type.py,sha256=cPqKDVI8STX5BLndiGEovV4baa2it5fbfvcbiKyxAY8,1230
|
|
276
|
-
llama_cloud/types/
|
|
277
|
-
llama_cloud/types/report_metadata.py,sha256=TEUaRqy2ULtyFu3lFbea2X1SnYa5QMXZNJYerAQS1wM,1607
|
|
280
|
+
llama_cloud/types/report_metadata.py,sha256=cKB8wfToixuy8QEBNKzVTBznES9x4PU42DGnyiym5lc,1551
|
|
278
281
|
llama_cloud/types/report_plan.py,sha256=UvtYQaSNUTWbmC-rP0c57rbGpDRPUQgou0c2r96FVUo,1332
|
|
279
282
|
llama_cloud/types/report_plan_block.py,sha256=YlZ4fp4J3rduNKUknm0LfpHES_pgtQGFA9ZzErHoR40,1320
|
|
280
283
|
llama_cloud/types/report_query.py,sha256=IwZNM37fgwD2CrHkQ3LtdKwUCyL2r4SrZc0xwfaTa_I,1216
|
|
@@ -288,12 +291,15 @@ llama_cloud/types/retriever.py,sha256=ZItPsorL8x1XjtJT49ZodaMqU8h2GfwlB4U4cgnfZk
|
|
|
288
291
|
llama_cloud/types/retriever_create.py,sha256=WyUR9DRzu3Q9tzKEeXCdQuzCY6WKi9ADJkZea9rqvxU,1286
|
|
289
292
|
llama_cloud/types/retriever_pipeline.py,sha256=F1pZDxg8JdQXRHE6ciFezd7a-Wv5bHplPcGDED-J4b0,1330
|
|
290
293
|
llama_cloud/types/role.py,sha256=SCi2TyFbc68RJuNB-OdcP8ut03Uv5zPZk84QMmf17w8,1384
|
|
294
|
+
llama_cloud/types/schema_relax_mode.py,sha256=v4or6dYTvWvBBNtEd2ZSaUAb1706I0Zuh-Xztm-zx_0,635
|
|
291
295
|
llama_cloud/types/semantic_chunking_config.py,sha256=dFDniTVWpRc7UcmVFvljUoyL5Ztd-l-YrHII7U-yM-k,1053
|
|
292
296
|
llama_cloud/types/sentence_chunking_config.py,sha256=NA9xidK5ICxJPkEMQZWNcsV0Hw9Co_bzRWeYe4uSh9I,1116
|
|
293
297
|
llama_cloud/types/sentence_splitter.py,sha256=GbC3KE20Nd85uzO4bqJttjqJhQ_1co2gKnSQxzfOAiM,2140
|
|
294
298
|
llama_cloud/types/status_enum.py,sha256=cUBIlys89E8PUzmVqqawu7qTDF0aRqBwiijOmRDPvx0,1018
|
|
299
|
+
llama_cloud/types/struct_mode.py,sha256=AjYmpXTEYlMNNac6cNjEGYQBJwKJERw2ERdjGKgrX3o,845
|
|
300
|
+
llama_cloud/types/struct_parse_conf.py,sha256=bD0gZzN6tR8VO9s81KPwTffLQDnLLAAcNrnknii_178,1825
|
|
295
301
|
llama_cloud/types/supported_llm_model.py,sha256=0v-g01LyZB7TeN0zwAeSJejRoT95SVaXOJhNz7boJwM,1461
|
|
296
|
-
llama_cloud/types/supported_llm_model_names.py,sha256=
|
|
302
|
+
llama_cloud/types/supported_llm_model_names.py,sha256=dEhmwGQVG-dmuGGbTWBAYadr-g5u3kiVz308CLWuSqw,2657
|
|
297
303
|
llama_cloud/types/text_block.py,sha256=X154sQkSyposXuRcEWNp_tWcDQ-AI6q_-MfJUN5exP8,958
|
|
298
304
|
llama_cloud/types/text_node.py,sha256=Tq3QmuKC5cIHvC9wAtvhsXl1g2sACs2yJwQ0Uko8GSU,2846
|
|
299
305
|
llama_cloud/types/text_node_relationships_value.py,sha256=qmXURTk1Xg7ZDzRSSV1uDEel0AXRLohND5ioezibHY0,217
|
|
@@ -313,7 +319,7 @@ llama_cloud/types/validation_error_loc_item.py,sha256=LAtjCHIllWRBFXvAZ5QZpp7CPX
|
|
|
313
319
|
llama_cloud/types/vertex_ai_embedding_config.py,sha256=DvQk2xMJFmo54MEXTzoM4KSADyhGm_ygmFyx6wIcQdw,1159
|
|
314
320
|
llama_cloud/types/vertex_embedding_mode.py,sha256=yY23FjuWU_DkXjBb3JoKV4SCMqel2BaIMltDqGnIowU,1217
|
|
315
321
|
llama_cloud/types/vertex_text_embedding.py,sha256=-C4fNCYfFl36ATdBMGFVPpiHIKxjk0KB1ERA2Ec20aU,1932
|
|
316
|
-
llama_cloud-0.1.
|
|
317
|
-
llama_cloud-0.1.
|
|
318
|
-
llama_cloud-0.1.
|
|
319
|
-
llama_cloud-0.1.
|
|
322
|
+
llama_cloud-0.1.12.dist-info/LICENSE,sha256=_iNqtPcw1Ue7dZKwOwgPtbegMUkWVy15hC7bffAdNmY,1067
|
|
323
|
+
llama_cloud-0.1.12.dist-info/METADATA,sha256=cq17QpgehHf9h_wM1gBd636awXVxMkUkCgxlcHtiDQ4,851
|
|
324
|
+
llama_cloud-0.1.12.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
325
|
+
llama_cloud-0.1.12.dist-info/RECORD,,
|
|
File without changes
|