llama-cloud 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- llama_cloud/__init__.py +76 -10
- llama_cloud/client.py +3 -0
- llama_cloud/environment.py +1 -1
- llama_cloud/resources/__init__.py +23 -1
- llama_cloud/resources/data_sinks/client.py +26 -20
- llama_cloud/resources/data_sources/client.py +16 -16
- llama_cloud/resources/embedding_model_configs/__init__.py +23 -0
- llama_cloud/resources/embedding_model_configs/client.py +416 -0
- llama_cloud/resources/embedding_model_configs/types/__init__.py +23 -0
- llama_cloud/resources/embedding_model_configs/types/embedding_model_config_create_embedding_config.py +89 -0
- llama_cloud/resources/evals/client.py +36 -26
- llama_cloud/resources/extraction/client.py +32 -32
- llama_cloud/resources/files/__init__.py +2 -2
- llama_cloud/resources/files/client.py +310 -54
- llama_cloud/resources/files/types/__init__.py +3 -1
- llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py +7 -0
- llama_cloud/resources/files/types/file_create_permission_info_value.py +7 -0
- llama_cloud/resources/organizations/client.py +125 -56
- llama_cloud/resources/parsing/client.py +652 -264
- llama_cloud/resources/pipelines/client.py +617 -310
- llama_cloud/resources/projects/client.py +341 -136
- llama_cloud/types/__init__.py +58 -10
- llama_cloud/types/azure_open_ai_embedding.py +12 -6
- llama_cloud/types/base_prompt_template.py +6 -2
- llama_cloud/types/bedrock_embedding.py +12 -6
- llama_cloud/types/character_splitter.py +4 -2
- llama_cloud/types/chat_message.py +1 -1
- llama_cloud/types/cloud_az_storage_blob_data_source.py +16 -7
- llama_cloud/types/cloud_box_data_source.py +13 -6
- llama_cloud/types/cloud_confluence_data_source.py +7 -6
- llama_cloud/types/cloud_document.py +3 -1
- llama_cloud/types/cloud_document_create.py +3 -1
- llama_cloud/types/cloud_google_drive_data_source.py +1 -0
- llama_cloud/types/cloud_jira_data_source.py +7 -4
- llama_cloud/types/cloud_notion_page_data_source.py +3 -2
- llama_cloud/types/cloud_one_drive_data_source.py +6 -2
- llama_cloud/types/cloud_postgres_vector_store.py +1 -1
- llama_cloud/types/cloud_s_3_data_source.py +9 -4
- llama_cloud/types/cloud_sharepoint_data_source.py +9 -5
- llama_cloud/types/cloud_slack_data_source.py +7 -6
- llama_cloud/types/code_splitter.py +1 -1
- llama_cloud/types/cohere_embedding.py +7 -3
- llama_cloud/types/data_sink.py +4 -4
- llama_cloud/types/data_sink_create.py +1 -1
- llama_cloud/types/data_source.py +7 -5
- llama_cloud/types/data_source_create.py +4 -2
- llama_cloud/types/embedding_model_config.py +43 -0
- llama_cloud/types/embedding_model_config_embedding_config.py +89 -0
- llama_cloud/types/embedding_model_config_update.py +35 -0
- llama_cloud/types/embedding_model_config_update_embedding_config.py +89 -0
- llama_cloud/types/eval_dataset.py +2 -2
- llama_cloud/types/eval_dataset_job_record.py +13 -7
- llama_cloud/types/eval_execution_params_override.py +6 -2
- llama_cloud/types/eval_question.py +2 -2
- llama_cloud/types/extraction_result.py +2 -2
- llama_cloud/types/extraction_schema.py +5 -3
- llama_cloud/types/file.py +15 -7
- llama_cloud/types/file_permission_info_value.py +5 -0
- llama_cloud/types/filter_operator.py +2 -2
- llama_cloud/types/gemini_embedding.py +10 -6
- llama_cloud/types/hugging_face_inference_api_embedding.py +27 -11
- llama_cloud/types/input_message.py +3 -1
- llama_cloud/types/interval_usage_and_plan.py +36 -0
- llama_cloud/types/job_name_mapping.py +4 -0
- llama_cloud/types/llama_parse_parameters.py +21 -0
- llama_cloud/types/llm.py +4 -2
- llama_cloud/types/llm_parameters.py +5 -2
- llama_cloud/types/local_eval.py +10 -8
- llama_cloud/types/local_eval_results.py +1 -1
- llama_cloud/types/managed_ingestion_status_response.py +5 -3
- llama_cloud/types/markdown_element_node_parser.py +5 -3
- llama_cloud/types/markdown_node_parser.py +3 -2
- llama_cloud/types/metadata_filter.py +2 -2
- llama_cloud/types/metric_result.py +3 -3
- llama_cloud/types/node_parser.py +1 -1
- llama_cloud/types/open_ai_embedding.py +12 -6
- llama_cloud/types/organization.py +2 -2
- llama_cloud/types/page_splitter_node_parser.py +2 -2
- llama_cloud/types/paginated_list_pipeline_files_response.py +35 -0
- llama_cloud/types/parsing_job_structured_result.py +32 -0
- llama_cloud/types/permission.py +3 -3
- llama_cloud/types/pipeline.py +17 -6
- llama_cloud/types/pipeline_configuration_hashes.py +3 -3
- llama_cloud/types/pipeline_create.py +15 -4
- llama_cloud/types/pipeline_data_source.py +13 -7
- llama_cloud/types/pipeline_data_source_create.py +3 -1
- llama_cloud/types/pipeline_deployment.py +4 -4
- llama_cloud/types/pipeline_file.py +25 -10
- llama_cloud/types/pipeline_file_create.py +3 -1
- llama_cloud/types/pipeline_file_permission_info_value.py +7 -0
- llama_cloud/types/plan.py +40 -0
- llama_cloud/types/playground_session.py +2 -2
- llama_cloud/types/preset_retrieval_params.py +14 -7
- llama_cloud/types/presigned_url.py +3 -1
- llama_cloud/types/project.py +2 -2
- llama_cloud/types/prompt_mixin_prompts.py +1 -1
- llama_cloud/types/prompt_spec.py +4 -2
- llama_cloud/types/role.py +3 -3
- llama_cloud/types/sentence_splitter.py +4 -2
- llama_cloud/types/text_node.py +3 -3
- llama_cloud/types/{hugging_face_inference_api_embedding_token.py → token.py} +1 -1
- llama_cloud/types/token_text_splitter.py +1 -1
- llama_cloud/types/usage.py +41 -0
- llama_cloud/types/user_organization.py +9 -5
- llama_cloud/types/user_organization_create.py +4 -4
- llama_cloud/types/user_organization_delete.py +2 -2
- llama_cloud/types/user_organization_role.py +2 -2
- llama_cloud/types/value.py +5 -0
- llama_cloud/types/vertex_text_embedding.py +9 -5
- {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/METADATA +1 -1
- {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/RECORD +113 -99
- llama_cloud/types/data_sink_component.py +0 -20
- llama_cloud/types/data_source_component.py +0 -28
- llama_cloud/types/metadata_filter_value.py +0 -5
- llama_cloud/types/pipeline_data_source_component.py +0 -28
- {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/LICENSE +0 -0
- {llama_cloud-0.1.4.dist-info → llama_cloud-0.1.6.dist-info}/WHEEL +0 -0
|
@@ -25,15 +25,24 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
25
25
|
disable_ocr: typing.Optional[bool]
|
|
26
26
|
annotate_links: typing.Optional[bool]
|
|
27
27
|
disable_reconstruction: typing.Optional[bool]
|
|
28
|
+
disable_image_extraction: typing.Optional[bool]
|
|
28
29
|
invalidate_cache: typing.Optional[bool]
|
|
30
|
+
output_pdf_of_document: typing.Optional[bool]
|
|
29
31
|
do_not_cache: typing.Optional[bool]
|
|
30
32
|
fast_mode: typing.Optional[bool]
|
|
31
33
|
skip_diagonal_text: typing.Optional[bool]
|
|
32
34
|
gpt_4_o_mode: typing.Optional[bool] = pydantic.Field(alias="gpt4o_mode")
|
|
33
35
|
gpt_4_o_api_key: typing.Optional[str] = pydantic.Field(alias="gpt4o_api_key")
|
|
34
36
|
do_not_unroll_columns: typing.Optional[bool]
|
|
37
|
+
html_make_all_elements_visible: typing.Optional[bool]
|
|
38
|
+
html_remove_fixed_elements: typing.Optional[bool]
|
|
39
|
+
guess_xlsx_sheet_name: typing.Optional[bool]
|
|
35
40
|
page_separator: typing.Optional[str]
|
|
36
41
|
bounding_box: typing.Optional[str]
|
|
42
|
+
bbox_top: typing.Optional[float]
|
|
43
|
+
bbox_right: typing.Optional[float]
|
|
44
|
+
bbox_bottom: typing.Optional[float]
|
|
45
|
+
bbox_left: typing.Optional[float]
|
|
37
46
|
target_pages: typing.Optional[str]
|
|
38
47
|
use_vendor_multimodal_model: typing.Optional[bool]
|
|
39
48
|
vendor_multimodal_model_name: typing.Optional[str]
|
|
@@ -47,10 +56,22 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
47
56
|
continuous_mode: typing.Optional[bool]
|
|
48
57
|
s_3_input_path: typing.Optional[str] = pydantic.Field(alias="s3_input_path")
|
|
49
58
|
s_3_output_path_prefix: typing.Optional[str] = pydantic.Field(alias="s3_output_path_prefix")
|
|
59
|
+
project_id: typing.Optional[str]
|
|
50
60
|
azure_openai_deployment_name: typing.Optional[str]
|
|
51
61
|
azure_openai_endpoint: typing.Optional[str]
|
|
52
62
|
azure_openai_api_version: typing.Optional[str]
|
|
53
63
|
azure_openai_key: typing.Optional[str]
|
|
64
|
+
input_url: typing.Optional[str]
|
|
65
|
+
http_proxy: typing.Optional[str]
|
|
66
|
+
auto_mode: typing.Optional[bool]
|
|
67
|
+
auto_mode_trigger_on_regexp_in_page: typing.Optional[str]
|
|
68
|
+
auto_mode_trigger_on_text_in_page: typing.Optional[str]
|
|
69
|
+
auto_mode_trigger_on_table_in_page: typing.Optional[bool]
|
|
70
|
+
auto_mode_trigger_on_image_in_page: typing.Optional[bool]
|
|
71
|
+
structured_output: typing.Optional[bool]
|
|
72
|
+
structured_output_json_schema: typing.Optional[str]
|
|
73
|
+
structured_output_json_schema_name: typing.Optional[str]
|
|
74
|
+
max_pages: typing.Optional[int]
|
|
54
75
|
|
|
55
76
|
def json(self, **kwargs: typing.Any) -> str:
|
|
56
77
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
llama_cloud/types/llm.py
CHANGED
|
@@ -34,7 +34,7 @@ class Llm(pydantic.BaseModel):
|
|
|
34
34
|
"""
|
|
35
35
|
|
|
36
36
|
callback_manager: typing.Optional[typing.Any]
|
|
37
|
-
system_prompt: typing.Optional[str]
|
|
37
|
+
system_prompt: typing.Optional[str] = pydantic.Field(description="System prompt for LLM calls.")
|
|
38
38
|
messages_to_prompt: typing.Optional[str] = pydantic.Field(
|
|
39
39
|
description="Function to convert a list of messages to an LLM prompt."
|
|
40
40
|
)
|
|
@@ -43,7 +43,9 @@ class Llm(pydantic.BaseModel):
|
|
|
43
43
|
)
|
|
44
44
|
output_parser: typing.Optional[typing.Any]
|
|
45
45
|
pydantic_program_mode: typing.Optional[PydanticProgramMode]
|
|
46
|
-
query_wrapper_prompt: typing.Optional[BasePromptTemplate]
|
|
46
|
+
query_wrapper_prompt: typing.Optional[BasePromptTemplate] = pydantic.Field(
|
|
47
|
+
description="Query wrapper prompt for LLM calls."
|
|
48
|
+
)
|
|
47
49
|
|
|
48
50
|
def json(self, **kwargs: typing.Any) -> str:
|
|
49
51
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -19,8 +19,11 @@ class LlmParameters(pydantic.BaseModel):
|
|
|
19
19
|
model_name: typing.Optional[SupportedLlmModelNames] = pydantic.Field(
|
|
20
20
|
description="The name of the model to use for LLM completions."
|
|
21
21
|
)
|
|
22
|
-
system_prompt: typing.Optional[str]
|
|
23
|
-
temperature: typing.Optional[float]
|
|
22
|
+
system_prompt: typing.Optional[str] = pydantic.Field(description="The system prompt to use for the completion.")
|
|
23
|
+
temperature: typing.Optional[float] = pydantic.Field(description="The temperature value for the model.")
|
|
24
|
+
use_chain_of_thought_reasoning: typing.Optional[bool] = pydantic.Field(
|
|
25
|
+
description="Whether to use chain of thought reasoning."
|
|
26
|
+
)
|
|
24
27
|
class_name: typing.Optional[str]
|
|
25
28
|
|
|
26
29
|
def json(self, **kwargs: typing.Any) -> str:
|
llama_cloud/types/local_eval.py
CHANGED
|
@@ -21,17 +21,19 @@ class LocalEval(pydantic.BaseModel):
|
|
|
21
21
|
Output of an BaseEvaluator.
|
|
22
22
|
"""
|
|
23
23
|
|
|
24
|
-
query: typing.Optional[str]
|
|
25
|
-
contexts: typing.Optional[typing.List[str]]
|
|
26
|
-
response: typing.Optional[str]
|
|
27
|
-
passing: typing.Optional[bool]
|
|
28
|
-
feedback: typing.Optional[str]
|
|
29
|
-
score: typing.Optional[float]
|
|
30
|
-
pairwise_source: typing.Optional[str]
|
|
24
|
+
query: typing.Optional[str] = pydantic.Field(description="Query string")
|
|
25
|
+
contexts: typing.Optional[typing.List[str]] = pydantic.Field(description="Context strings")
|
|
26
|
+
response: typing.Optional[str] = pydantic.Field(description="Response string")
|
|
27
|
+
passing: typing.Optional[bool] = pydantic.Field(description="Binary evaluation result (passing or not)")
|
|
28
|
+
feedback: typing.Optional[str] = pydantic.Field(description="Feedback or reasoning for the response")
|
|
29
|
+
score: typing.Optional[float] = pydantic.Field(description="Score for the response")
|
|
30
|
+
pairwise_source: typing.Optional[str] = pydantic.Field(
|
|
31
|
+
description="Used only for pairwise and specifies whether it is from original order of presented answers or flipped order"
|
|
32
|
+
)
|
|
31
33
|
invalid_result: typing.Optional[bool] = pydantic.Field(
|
|
32
34
|
description="Whether the evaluation result is an invalid one."
|
|
33
35
|
)
|
|
34
|
-
invalid_reason: typing.Optional[str]
|
|
36
|
+
invalid_reason: typing.Optional[str] = pydantic.Field(description="Reason for invalid evaluation.")
|
|
35
37
|
|
|
36
38
|
def json(self, **kwargs: typing.Any) -> str:
|
|
37
39
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -21,7 +21,7 @@ class LocalEvalResults(pydantic.BaseModel):
|
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
23
|
project_id: str = pydantic.Field(description="The ID of the project.")
|
|
24
|
-
eval_set_id: typing.Optional[str]
|
|
24
|
+
eval_set_id: typing.Optional[str] = pydantic.Field(description="The ID of the local eval result set.")
|
|
25
25
|
app_name: str = pydantic.Field(description="The name of the app.")
|
|
26
26
|
eval_name: str = pydantic.Field(description="The name of the eval.")
|
|
27
27
|
result: LocalEval = pydantic.Field(description="The eval results.")
|
|
@@ -17,10 +17,12 @@ except ImportError:
|
|
|
17
17
|
|
|
18
18
|
|
|
19
19
|
class ManagedIngestionStatusResponse(pydantic.BaseModel):
|
|
20
|
-
job_id: typing.Optional[str]
|
|
21
|
-
deployment_date: typing.Optional[dt.datetime]
|
|
20
|
+
job_id: typing.Optional[str] = pydantic.Field(description="ID of the latest job.")
|
|
21
|
+
deployment_date: typing.Optional[dt.datetime] = pydantic.Field(description="Date of the deployment.")
|
|
22
22
|
status: ManagedIngestionStatus = pydantic.Field(description="Status of the ingestion.")
|
|
23
|
-
error: typing.Optional[typing.List[IngestionErrorResponse]]
|
|
23
|
+
error: typing.Optional[typing.List[IngestionErrorResponse]] = pydantic.Field(
|
|
24
|
+
description="List of errors that occurred during ingestion."
|
|
25
|
+
)
|
|
24
26
|
|
|
25
27
|
def json(self, **kwargs: typing.Any) -> str:
|
|
26
28
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -29,12 +29,14 @@ class MarkdownElementNodeParser(pydantic.BaseModel):
|
|
|
29
29
|
)
|
|
30
30
|
include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
|
|
31
31
|
callback_manager: typing.Optional[typing.Any]
|
|
32
|
-
id_func: typing.Optional[str]
|
|
33
|
-
llm: typing.Optional[Llm]
|
|
32
|
+
id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
|
|
33
|
+
llm: typing.Optional[Llm] = pydantic.Field(description="LLM model to use for summarization.")
|
|
34
34
|
summary_query_str: typing.Optional[str] = pydantic.Field(description="Query string to use for summarization.")
|
|
35
35
|
num_workers: typing.Optional[int] = pydantic.Field(description="Num of workers for async jobs.")
|
|
36
36
|
show_progress: typing.Optional[bool] = pydantic.Field(description="Whether to show progress.")
|
|
37
|
-
nested_node_parser: typing.Optional[NodeParser]
|
|
37
|
+
nested_node_parser: typing.Optional[NodeParser] = pydantic.Field(
|
|
38
|
+
description="Other types of node parsers to handle some types of nodes."
|
|
39
|
+
)
|
|
38
40
|
class_name: typing.Optional[str]
|
|
39
41
|
|
|
40
42
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -18,7 +18,8 @@ class MarkdownNodeParser(pydantic.BaseModel):
|
|
|
18
18
|
"""
|
|
19
19
|
Markdown node parser.
|
|
20
20
|
|
|
21
|
-
Splits a document into Nodes using
|
|
21
|
+
Splits a document into Nodes using Markdown header-based splitting logic.
|
|
22
|
+
Each node contains its text content and the path of headers leading to it.
|
|
22
23
|
|
|
23
24
|
Args:
|
|
24
25
|
include_metadata (bool): whether to include metadata in nodes
|
|
@@ -30,7 +31,7 @@ class MarkdownNodeParser(pydantic.BaseModel):
|
|
|
30
31
|
)
|
|
31
32
|
include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
|
|
32
33
|
callback_manager: typing.Optional[typing.Any]
|
|
33
|
-
id_func: typing.Optional[str]
|
|
34
|
+
id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
|
|
34
35
|
class_name: typing.Optional[str]
|
|
35
36
|
|
|
36
37
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -5,7 +5,7 @@ import typing
|
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
7
|
from .filter_operator import FilterOperator
|
|
8
|
-
from .
|
|
8
|
+
from .value import Value
|
|
9
9
|
|
|
10
10
|
try:
|
|
11
11
|
import pydantic
|
|
@@ -27,7 +27,7 @@ class MetadataFilter(pydantic.BaseModel):
|
|
|
27
27
|
"""
|
|
28
28
|
|
|
29
29
|
key: str
|
|
30
|
-
value: typing.Optional[
|
|
30
|
+
value: typing.Optional[Value]
|
|
31
31
|
operator: typing.Optional[FilterOperator]
|
|
32
32
|
|
|
33
33
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -15,9 +15,9 @@ except ImportError:
|
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
class MetricResult(pydantic.BaseModel):
|
|
18
|
-
passing: typing.Optional[bool]
|
|
19
|
-
score: typing.Optional[float]
|
|
20
|
-
feedback: typing.Optional[str]
|
|
18
|
+
passing: typing.Optional[bool] = pydantic.Field(description="Whether the metric passed or not.")
|
|
19
|
+
score: typing.Optional[float] = pydantic.Field(description="The score for the metric.")
|
|
20
|
+
feedback: typing.Optional[str] = pydantic.Field(description="The reasoning for the metric.")
|
|
21
21
|
|
|
22
22
|
def json(self, **kwargs: typing.Any) -> str:
|
|
23
23
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
llama_cloud/types/node_parser.py
CHANGED
|
@@ -24,7 +24,7 @@ class NodeParser(pydantic.BaseModel):
|
|
|
24
24
|
)
|
|
25
25
|
include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
|
|
26
26
|
callback_manager: typing.Optional[typing.Any]
|
|
27
|
-
id_func: typing.Optional[str]
|
|
27
|
+
id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
|
|
28
28
|
class_name: typing.Optional[str]
|
|
29
29
|
|
|
30
30
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -17,20 +17,26 @@ except ImportError:
|
|
|
17
17
|
class OpenAiEmbedding(pydantic.BaseModel):
|
|
18
18
|
model_name: typing.Optional[str] = pydantic.Field(description="The name of the OpenAI embedding model.")
|
|
19
19
|
embed_batch_size: typing.Optional[int] = pydantic.Field(description="The batch size for embedding calls.")
|
|
20
|
-
num_workers: typing.Optional[int]
|
|
20
|
+
num_workers: typing.Optional[int] = pydantic.Field(
|
|
21
|
+
description="The number of workers to use for async embedding calls."
|
|
22
|
+
)
|
|
21
23
|
additional_kwargs: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(
|
|
22
24
|
description="Additional kwargs for the OpenAI API."
|
|
23
25
|
)
|
|
24
|
-
api_key: typing.Optional[str]
|
|
25
|
-
api_base: typing.Optional[str]
|
|
26
|
-
api_version: typing.Optional[str]
|
|
26
|
+
api_key: typing.Optional[str] = pydantic.Field(description="The OpenAI API key.")
|
|
27
|
+
api_base: typing.Optional[str] = pydantic.Field(description="The base URL for OpenAI API.")
|
|
28
|
+
api_version: typing.Optional[str] = pydantic.Field(description="The version for OpenAI API.")
|
|
27
29
|
max_retries: typing.Optional[int] = pydantic.Field(description="Maximum number of retries.")
|
|
28
30
|
timeout: typing.Optional[float] = pydantic.Field(description="Timeout for each request.")
|
|
29
|
-
default_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]]
|
|
31
|
+
default_headers: typing.Optional[typing.Dict[str, typing.Optional[str]]] = pydantic.Field(
|
|
32
|
+
description="The default headers for API requests."
|
|
33
|
+
)
|
|
30
34
|
reuse_client: typing.Optional[bool] = pydantic.Field(
|
|
31
35
|
description="Reuse the OpenAI client between requests. When doing anything with large volumes of async API calls, setting this to false can improve stability."
|
|
32
36
|
)
|
|
33
|
-
dimensions: typing.Optional[int]
|
|
37
|
+
dimensions: typing.Optional[int] = pydantic.Field(
|
|
38
|
+
description="The number of dimensions on the output embedding vectors. Works only with v3 embedding models."
|
|
39
|
+
)
|
|
34
40
|
class_name: typing.Optional[str]
|
|
35
41
|
|
|
36
42
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -20,8 +20,8 @@ class Organization(pydantic.BaseModel):
|
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
22
|
id: str = pydantic.Field(description="Unique identifier")
|
|
23
|
-
created_at: typing.Optional[dt.datetime]
|
|
24
|
-
updated_at: typing.Optional[dt.datetime]
|
|
23
|
+
created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
|
|
24
|
+
updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
|
|
25
25
|
name: str = pydantic.Field(description="A name for the organization.")
|
|
26
26
|
|
|
27
27
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -24,8 +24,8 @@ class PageSplitterNodeParser(pydantic.BaseModel):
|
|
|
24
24
|
)
|
|
25
25
|
include_prev_next_rel: typing.Optional[bool] = pydantic.Field(description="Include prev/next node relationships.")
|
|
26
26
|
callback_manager: typing.Optional[typing.Any]
|
|
27
|
-
id_func: typing.Optional[str]
|
|
28
|
-
page_separator: typing.Optional[str]
|
|
27
|
+
id_func: typing.Optional[str] = pydantic.Field(description="Function to generate node IDs.")
|
|
28
|
+
page_separator: typing.Optional[str] = pydantic.Field(description="Separator to split text into pages.")
|
|
29
29
|
class_name: typing.Optional[str]
|
|
30
30
|
|
|
31
31
|
def json(self, **kwargs: typing.Any) -> str:
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
from .pipeline_file import PipelineFile
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import pydantic
|
|
11
|
+
if pydantic.__version__.startswith("1."):
|
|
12
|
+
raise ImportError
|
|
13
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
14
|
+
except ImportError:
|
|
15
|
+
import pydantic # type: ignore
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class PaginatedListPipelineFilesResponse(pydantic.BaseModel):
|
|
19
|
+
files: typing.List[PipelineFile] = pydantic.Field(description="The files to list")
|
|
20
|
+
limit: int = pydantic.Field(description="The limit of the files")
|
|
21
|
+
offset: int = pydantic.Field(description="The offset of the files")
|
|
22
|
+
total_count: int = pydantic.Field(description="The total number of files")
|
|
23
|
+
|
|
24
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
25
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
26
|
+
return super().json(**kwargs_with_defaults)
|
|
27
|
+
|
|
28
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
29
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
30
|
+
return super().dict(**kwargs_with_defaults)
|
|
31
|
+
|
|
32
|
+
class Config:
|
|
33
|
+
frozen = True
|
|
34
|
+
smart_union = True
|
|
35
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
from ..core.datetime_utils import serialize_datetime
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import pydantic
|
|
10
|
+
if pydantic.__version__.startswith("1."):
|
|
11
|
+
raise ImportError
|
|
12
|
+
import pydantic.v1 as pydantic # type: ignore
|
|
13
|
+
except ImportError:
|
|
14
|
+
import pydantic # type: ignore
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ParsingJobStructuredResult(pydantic.BaseModel):
|
|
18
|
+
structured: typing.Any
|
|
19
|
+
job_metadata: typing.Any
|
|
20
|
+
|
|
21
|
+
def json(self, **kwargs: typing.Any) -> str:
|
|
22
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
23
|
+
return super().json(**kwargs_with_defaults)
|
|
24
|
+
|
|
25
|
+
def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
|
|
26
|
+
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
27
|
+
return super().dict(**kwargs_with_defaults)
|
|
28
|
+
|
|
29
|
+
class Config:
|
|
30
|
+
frozen = True
|
|
31
|
+
smart_union = True
|
|
32
|
+
json_encoders = {dt.datetime: serialize_datetime}
|
llama_cloud/types/permission.py
CHANGED
|
@@ -20,10 +20,10 @@ class Permission(pydantic.BaseModel):
|
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
22
|
id: str = pydantic.Field(description="Unique identifier")
|
|
23
|
-
created_at: typing.Optional[dt.datetime]
|
|
24
|
-
updated_at: typing.Optional[dt.datetime]
|
|
23
|
+
created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
|
|
24
|
+
updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
|
|
25
25
|
name: str = pydantic.Field(description="A name for the permission.")
|
|
26
|
-
description: typing.Optional[str]
|
|
26
|
+
description: typing.Optional[str] = pydantic.Field(description="A description for the permission.")
|
|
27
27
|
access: bool = pydantic.Field(description="Whether the permission is granted or not.")
|
|
28
28
|
|
|
29
29
|
def json(self, **kwargs: typing.Any) -> str:
|
llama_cloud/types/pipeline.py
CHANGED
|
@@ -29,19 +29,26 @@ class Pipeline(pydantic.BaseModel):
|
|
|
29
29
|
"""
|
|
30
30
|
|
|
31
31
|
id: str = pydantic.Field(description="Unique identifier")
|
|
32
|
-
created_at: typing.Optional[dt.datetime]
|
|
33
|
-
updated_at: typing.Optional[dt.datetime]
|
|
32
|
+
created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
|
|
33
|
+
updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
|
|
34
34
|
name: str
|
|
35
35
|
project_id: str
|
|
36
|
+
embedding_model_config_id: typing.Optional[str] = pydantic.Field(
|
|
37
|
+
description="The ID of the EmbeddingModelConfig this pipeline is using."
|
|
38
|
+
)
|
|
36
39
|
pipeline_type: typing.Optional[PipelineType] = pydantic.Field(
|
|
37
40
|
description="Type of pipeline. Either PLAYGROUND or MANAGED."
|
|
38
41
|
)
|
|
39
|
-
managed_pipeline_id: typing.Optional[str]
|
|
42
|
+
managed_pipeline_id: typing.Optional[str] = pydantic.Field(
|
|
43
|
+
description="The ID of the ManagedPipeline this playground pipeline is linked to."
|
|
44
|
+
)
|
|
40
45
|
embedding_config: PipelineEmbeddingConfig
|
|
41
46
|
configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = pydantic.Field(
|
|
42
47
|
description="Deprecated don't use it, List of configured transformations."
|
|
43
48
|
)
|
|
44
|
-
config_hash: typing.Optional[PipelineConfigurationHashes]
|
|
49
|
+
config_hash: typing.Optional[PipelineConfigurationHashes] = pydantic.Field(
|
|
50
|
+
description="Hashes for the configuration of the pipeline."
|
|
51
|
+
)
|
|
45
52
|
transform_config: typing.Optional[PipelineTransformConfig] = pydantic.Field(
|
|
46
53
|
description="Configuration for the transformation."
|
|
47
54
|
)
|
|
@@ -51,8 +58,12 @@ class Pipeline(pydantic.BaseModel):
|
|
|
51
58
|
eval_parameters: typing.Optional[EvalExecutionParams] = pydantic.Field(
|
|
52
59
|
description="Eval parameters for the pipeline."
|
|
53
60
|
)
|
|
54
|
-
llama_parse_parameters: typing.Optional[LlamaParseParameters]
|
|
55
|
-
|
|
61
|
+
llama_parse_parameters: typing.Optional[LlamaParseParameters] = pydantic.Field(
|
|
62
|
+
description="Settings that can be configured for how to use LlamaParse to parse files within a LlamaCloud pipeline."
|
|
63
|
+
)
|
|
64
|
+
data_sink: typing.Optional[DataSink] = pydantic.Field(
|
|
65
|
+
description="The data sink for the pipeline. If None, the pipeline will use the fully managed data sink."
|
|
66
|
+
)
|
|
56
67
|
|
|
57
68
|
def json(self, **kwargs: typing.Any) -> str:
|
|
58
69
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -19,9 +19,9 @@ class PipelineConfigurationHashes(pydantic.BaseModel):
|
|
|
19
19
|
Hashes for the configuration of a pipeline.
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
|
-
embedding_config_hash: typing.Optional[str]
|
|
23
|
-
parsing_config_hash: typing.Optional[str]
|
|
24
|
-
transform_config_hash: typing.Optional[str]
|
|
22
|
+
embedding_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the embedding config.")
|
|
23
|
+
parsing_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the llama parse parameters.")
|
|
24
|
+
transform_config_hash: typing.Optional[str] = pydantic.Field(description="Hash of the transform config.")
|
|
25
25
|
|
|
26
26
|
def json(self, **kwargs: typing.Any) -> str:
|
|
27
27
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -31,9 +31,18 @@ class PipelineCreate(pydantic.BaseModel):
|
|
|
31
31
|
transform_config: typing.Optional[PipelineCreateTransformConfig] = pydantic.Field(
|
|
32
32
|
description="Configuration for the transformation."
|
|
33
33
|
)
|
|
34
|
-
configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]]
|
|
35
|
-
|
|
36
|
-
|
|
34
|
+
configured_transformations: typing.Optional[typing.List[ConfiguredTransformationItem]] = pydantic.Field(
|
|
35
|
+
description="Deprecated, use embedding_config or transform_config instead. configured transformations for the pipeline."
|
|
36
|
+
)
|
|
37
|
+
data_sink_id: typing.Optional[str] = pydantic.Field(
|
|
38
|
+
description="Data sink ID. When provided instead of data_sink, the data sink will be looked up by ID."
|
|
39
|
+
)
|
|
40
|
+
embedding_model_config_id: typing.Optional[str] = pydantic.Field(
|
|
41
|
+
description="Embedding model config ID. When provided instead of embedding_config, the embedding model config will be looked up by ID."
|
|
42
|
+
)
|
|
43
|
+
data_sink: typing.Optional[DataSinkCreate] = pydantic.Field(
|
|
44
|
+
description="Data sink. When provided instead of data_sink_id, the data sink will be created."
|
|
45
|
+
)
|
|
37
46
|
preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = pydantic.Field(
|
|
38
47
|
description="Preset retrieval parameters for the pipeline."
|
|
39
48
|
)
|
|
@@ -45,7 +54,9 @@ class PipelineCreate(pydantic.BaseModel):
|
|
|
45
54
|
pipeline_type: typing.Optional[PipelineType] = pydantic.Field(
|
|
46
55
|
description="Type of pipeline. Either PLAYGROUND or MANAGED."
|
|
47
56
|
)
|
|
48
|
-
managed_pipeline_id: typing.Optional[str]
|
|
57
|
+
managed_pipeline_id: typing.Optional[str] = pydantic.Field(
|
|
58
|
+
description="The ID of the ManagedPipeline this playground pipeline is linked to."
|
|
59
|
+
)
|
|
49
60
|
|
|
50
61
|
def json(self, **kwargs: typing.Any) -> str:
|
|
51
62
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -5,7 +5,7 @@ import typing
|
|
|
5
5
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
7
|
from .configurable_data_source_names import ConfigurableDataSourceNames
|
|
8
|
-
from .
|
|
8
|
+
from .data_source_create_component import DataSourceCreateComponent
|
|
9
9
|
from .pipeline_data_source_custom_metadata_value import PipelineDataSourceCustomMetadataValue
|
|
10
10
|
|
|
11
11
|
try:
|
|
@@ -23,18 +23,24 @@ class PipelineDataSource(pydantic.BaseModel):
|
|
|
23
23
|
"""
|
|
24
24
|
|
|
25
25
|
id: str = pydantic.Field(description="Unique identifier")
|
|
26
|
-
created_at: typing.Optional[dt.datetime]
|
|
27
|
-
updated_at: typing.Optional[dt.datetime]
|
|
26
|
+
created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
|
|
27
|
+
updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
|
|
28
28
|
name: str = pydantic.Field(description="The name of the data source.")
|
|
29
29
|
source_type: ConfigurableDataSourceNames
|
|
30
|
-
custom_metadata: typing.Optional[
|
|
31
|
-
|
|
30
|
+
custom_metadata: typing.Optional[
|
|
31
|
+
typing.Dict[str, typing.Optional[PipelineDataSourceCustomMetadataValue]]
|
|
32
|
+
] = pydantic.Field(description="Custom metadata that will be present on all data loaded from the data source")
|
|
33
|
+
component: DataSourceCreateComponent = pydantic.Field(description="Component that implements the data source")
|
|
32
34
|
project_id: str
|
|
33
35
|
data_source_id: str = pydantic.Field(description="The ID of the data source.")
|
|
34
36
|
pipeline_id: str = pydantic.Field(description="The ID of the pipeline.")
|
|
35
37
|
last_synced_at: dt.datetime = pydantic.Field(description="The last time the data source was automatically synced.")
|
|
36
|
-
sync_interval: typing.Optional[float]
|
|
37
|
-
|
|
38
|
+
sync_interval: typing.Optional[float] = pydantic.Field(
|
|
39
|
+
description="The interval at which the data source should be synced."
|
|
40
|
+
)
|
|
41
|
+
sync_schedule_set_by: typing.Optional[str] = pydantic.Field(
|
|
42
|
+
description="The id of the user who set the sync schedule."
|
|
43
|
+
)
|
|
38
44
|
|
|
39
45
|
def json(self, **kwargs: typing.Any) -> str:
|
|
40
46
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -20,7 +20,9 @@ class PipelineDataSourceCreate(pydantic.BaseModel):
|
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
22
|
data_source_id: str = pydantic.Field(description="The ID of the data source.")
|
|
23
|
-
sync_interval: typing.Optional[float]
|
|
23
|
+
sync_interval: typing.Optional[float] = pydantic.Field(
|
|
24
|
+
description="The interval at which the data source should be synced."
|
|
25
|
+
)
|
|
24
26
|
|
|
25
27
|
def json(self, **kwargs: typing.Any) -> str:
|
|
26
28
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -17,11 +17,11 @@ except ImportError:
|
|
|
17
17
|
|
|
18
18
|
class PipelineDeployment(pydantic.BaseModel):
|
|
19
19
|
id: str = pydantic.Field(description="Unique identifier")
|
|
20
|
-
created_at: typing.Optional[dt.datetime]
|
|
21
|
-
updated_at: typing.Optional[dt.datetime]
|
|
20
|
+
created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
|
|
21
|
+
updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
|
|
22
22
|
status: ManagedIngestionStatus = pydantic.Field(description="Status of the pipeline deployment.")
|
|
23
|
-
started_at: typing.Optional[dt.datetime]
|
|
24
|
-
ended_at: typing.Optional[dt.datetime]
|
|
23
|
+
started_at: typing.Optional[dt.datetime] = pydantic.Field(description="Time the pipeline deployment started.")
|
|
24
|
+
ended_at: typing.Optional[dt.datetime] = pydantic.Field(description="Time the pipeline deployment finished.")
|
|
25
25
|
|
|
26
26
|
def json(self, **kwargs: typing.Any) -> str:
|
|
27
27
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -6,6 +6,7 @@ import typing
|
|
|
6
6
|
from ..core.datetime_utils import serialize_datetime
|
|
7
7
|
from .pipeline_file_config_hash_value import PipelineFileConfigHashValue
|
|
8
8
|
from .pipeline_file_custom_metadata_value import PipelineFileCustomMetadataValue
|
|
9
|
+
from .pipeline_file_permission_info_value import PipelineFilePermissionInfoValue
|
|
9
10
|
from .pipeline_file_resource_info_value import PipelineFileResourceInfoValue
|
|
10
11
|
|
|
11
12
|
try:
|
|
@@ -23,19 +24,33 @@ class PipelineFile(pydantic.BaseModel):
|
|
|
23
24
|
"""
|
|
24
25
|
|
|
25
26
|
id: str = pydantic.Field(description="Unique identifier")
|
|
26
|
-
created_at: typing.Optional[dt.datetime]
|
|
27
|
-
updated_at: typing.Optional[dt.datetime]
|
|
27
|
+
created_at: typing.Optional[dt.datetime] = pydantic.Field(description="Creation datetime")
|
|
28
|
+
updated_at: typing.Optional[dt.datetime] = pydantic.Field(description="Update datetime")
|
|
28
29
|
name: typing.Optional[str]
|
|
29
|
-
file_size: typing.Optional[int]
|
|
30
|
-
file_type: typing.Optional[str]
|
|
30
|
+
file_size: typing.Optional[int] = pydantic.Field(description="Size of the file in bytes")
|
|
31
|
+
file_type: typing.Optional[str] = pydantic.Field(description="File type (e.g. pdf, docx, etc.)")
|
|
31
32
|
project_id: str = pydantic.Field(description="The ID of the project that the file belongs to")
|
|
32
|
-
last_modified_at: typing.Optional[dt.datetime]
|
|
33
|
-
resource_info: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileResourceInfoValue]]]
|
|
34
|
-
|
|
35
|
-
|
|
33
|
+
last_modified_at: typing.Optional[dt.datetime] = pydantic.Field(description="The last modified time of the file")
|
|
34
|
+
resource_info: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileResourceInfoValue]]] = pydantic.Field(
|
|
35
|
+
description="Resource information for the file"
|
|
36
|
+
)
|
|
37
|
+
permission_info: typing.Optional[
|
|
38
|
+
typing.Dict[str, typing.Optional[PipelineFilePermissionInfoValue]]
|
|
39
|
+
] = pydantic.Field(description="Permission information for the file")
|
|
40
|
+
data_source_id: typing.Optional[str] = pydantic.Field(
|
|
41
|
+
description="The ID of the data source that the file belongs to"
|
|
42
|
+
)
|
|
43
|
+
file_id: typing.Optional[str] = pydantic.Field(description="The ID of the file")
|
|
36
44
|
pipeline_id: str = pydantic.Field(description="The ID of the pipeline that the file is associated with")
|
|
37
|
-
custom_metadata: typing.Optional[
|
|
38
|
-
|
|
45
|
+
custom_metadata: typing.Optional[
|
|
46
|
+
typing.Dict[str, typing.Optional[PipelineFileCustomMetadataValue]]
|
|
47
|
+
] = pydantic.Field(description="Custom metadata for the file")
|
|
48
|
+
config_hash: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileConfigHashValue]]] = pydantic.Field(
|
|
49
|
+
description="Hashes for the configuration of the pipeline."
|
|
50
|
+
)
|
|
51
|
+
indexed_page_count: typing.Optional[int] = pydantic.Field(
|
|
52
|
+
description="The number of pages that have been indexed for this file"
|
|
53
|
+
)
|
|
39
54
|
|
|
40
55
|
def json(self, **kwargs: typing.Any) -> str:
|
|
41
56
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|
|
@@ -21,7 +21,9 @@ class PipelineFileCreate(pydantic.BaseModel):
|
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
23
|
file_id: str = pydantic.Field(description="The ID of the file")
|
|
24
|
-
custom_metadata: typing.Optional[
|
|
24
|
+
custom_metadata: typing.Optional[
|
|
25
|
+
typing.Dict[str, typing.Optional[PipelineFileCreateCustomMetadataValue]]
|
|
26
|
+
] = pydantic.Field(description="Custom metadata for the file")
|
|
25
27
|
|
|
26
28
|
def json(self, **kwargs: typing.Any) -> str:
|
|
27
29
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|