llama-cloud 0.0.6__tar.gz → 0.0.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/PKG-INFO +1 -1
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/component_definitions/client.py +18 -18
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/data_sinks/client.py +2 -2
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/data_sources/client.py +2 -2
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/evals/client.py +12 -12
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/files/client.py +8 -8
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/parsing/client.py +16 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/pipelines/client.py +12 -12
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/projects/client.py +24 -24
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/llama_parse_parameters.py +2 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/llama_parse_supported_file_extensions.py +124 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pipeline.py +0 -4
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/pyproject.toml +1 -1
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/LICENSE +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/README.md +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/client.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/core/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/core/api_error.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/core/client_wrapper.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/core/datetime_utils.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/core/jsonable_encoder.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/core/remove_none_from_dict.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/environment.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/errors/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/errors/unprocessable_entity_error.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/component_definitions/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/data_sinks/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/data_sinks/types/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/data_sinks/types/data_sink_update_component.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/data_sources/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/data_sources/types/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/data_sources/types/data_source_update_component.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/data_sources/types/data_source_update_component_one.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/evals/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/files/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/files/types/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/files/types/file_create_resource_info_value.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/parsing/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/pipelines/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/pipelines/types/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/projects/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/__init__.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/azure_open_ai_embedding.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/base.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/base_prompt_template.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/bedrock_embedding.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/chat_message.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/cloud_az_storage_blob_data_source.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/cloud_chroma_vector_store.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/cloud_document.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/cloud_document_create.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/cloud_gcs_data_source.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/cloud_google_drive_data_source.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/cloud_one_drive_data_source.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/cloud_pinecone_vector_store.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/cloud_postgres_vector_store.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/cloud_qdrant_vector_store.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/cloud_s_3_data_source.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/cloud_sharepoint_data_source.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/cloud_weaviate_vector_store.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/code_splitter.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/cohere_embedding.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/configurable_data_sink_names.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/configurable_data_source_names.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/configurable_transformation_definition.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/configurable_transformation_names.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/configured_transformation_item.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/configured_transformation_item_component.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/configured_transformation_item_component_one.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_sink.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_sink_component.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_sink_component_one.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_sink_create.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_sink_create_component.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_sink_create_component_one.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_sink_definition.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_source.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_source_component.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_source_component_one.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_source_create.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_source_create_component.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_source_create_component_one.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_source_create_custom_metadata_value.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_source_custom_metadata_value.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/data_source_definition.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/eval_dataset.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/eval_dataset_job_params.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/eval_dataset_job_record.py +1 -1
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/eval_execution_params.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/eval_execution_params_override.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/eval_llm_model_data.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/eval_question.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/eval_question_create.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/eval_question_result.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/file.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/file_resource_info_value.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/filter_condition.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/filter_operator.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/gemini_embedding.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/html_node_parser.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/http_validation_error.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/hugging_face_inference_api_embedding.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/hugging_face_inference_api_embedding_token.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/json_node_parser.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/llm.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/local_eval.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/local_eval_results.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/local_eval_sets.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/managed_ingestion_status.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/managed_ingestion_status_response.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/markdown_element_node_parser.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/markdown_node_parser.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/message_role.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/metadata_filter.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/metadata_filter_value.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/metadata_filters.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/metadata_filters_filters_item.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/metric_result.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/node_parser.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/object_type.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/open_ai_embedding.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/parser_languages.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/parsing_history_item.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/parsing_job.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/parsing_job_json_result.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/parsing_job_markdown_result.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/parsing_job_text_result.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/parsing_usage.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pipeline_create.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pipeline_data_source.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pipeline_data_source_component.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pipeline_data_source_component_one.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pipeline_data_source_create.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pipeline_data_source_custom_metadata_value.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pipeline_deployment.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pipeline_file.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pipeline_file_create.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pipeline_file_create_custom_metadata_value.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pipeline_file_custom_metadata_value.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pipeline_file_resource_info_value.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pipeline_type.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pooling.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/preset_retrieval_params.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/presigned_url.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/project.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/project_create.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/prompt_mixin_prompts.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/prompt_spec.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/pydantic_program_mode.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/related_node_info.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/retrieve_results.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/sentence_splitter.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/simple_file_node_parser.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/status_enum.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/supported_eval_llm_model.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/supported_eval_llm_model_names.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/text_node.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/text_node_relationships_value.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/text_node_with_score.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/token_text_splitter.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/transformation_category_names.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/validation_error.py +0 -0
- {llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/types/validation_error_loc_item.py +0 -0
{llama_cloud-0.0.6 → llama_cloud-0.0.7}/llama_cloud/resources/component_definitions/client.py
RENAMED
|
@@ -23,9 +23,9 @@ class ComponentDefinitionsClient:
|
|
|
23
23
|
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
24
24
|
self._client_wrapper = client_wrapper
|
|
25
25
|
|
|
26
|
-
def
|
|
26
|
+
def list_transformation_definitions(self) -> typing.List[ConfigurableTransformationDefinition]:
|
|
27
27
|
"""
|
|
28
|
-
|
|
28
|
+
List transformation component definitions.
|
|
29
29
|
|
|
30
30
|
---
|
|
31
31
|
from llama_cloud.client import LlamaCloud
|
|
@@ -33,7 +33,7 @@ class ComponentDefinitionsClient:
|
|
|
33
33
|
client = LlamaCloud(
|
|
34
34
|
token="YOUR_TOKEN",
|
|
35
35
|
)
|
|
36
|
-
client.component_definitions.
|
|
36
|
+
client.component_definitions.list_transformation_definitions()
|
|
37
37
|
"""
|
|
38
38
|
_response = self._client_wrapper.httpx_client.request(
|
|
39
39
|
"GET",
|
|
@@ -51,9 +51,9 @@ class ComponentDefinitionsClient:
|
|
|
51
51
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
52
52
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
53
53
|
|
|
54
|
-
def
|
|
54
|
+
def list_data_source_definitions(self) -> typing.List[DataSourceDefinition]:
|
|
55
55
|
"""
|
|
56
|
-
|
|
56
|
+
List data source component definitions.
|
|
57
57
|
|
|
58
58
|
---
|
|
59
59
|
from llama_cloud.client import LlamaCloud
|
|
@@ -61,7 +61,7 @@ class ComponentDefinitionsClient:
|
|
|
61
61
|
client = LlamaCloud(
|
|
62
62
|
token="YOUR_TOKEN",
|
|
63
63
|
)
|
|
64
|
-
client.component_definitions.
|
|
64
|
+
client.component_definitions.list_data_source_definitions()
|
|
65
65
|
"""
|
|
66
66
|
_response = self._client_wrapper.httpx_client.request(
|
|
67
67
|
"GET",
|
|
@@ -77,9 +77,9 @@ class ComponentDefinitionsClient:
|
|
|
77
77
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
78
78
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
79
79
|
|
|
80
|
-
def
|
|
80
|
+
def list_data_sink_definitions(self) -> typing.List[DataSinkDefinition]:
|
|
81
81
|
"""
|
|
82
|
-
|
|
82
|
+
List data sink component definitions.
|
|
83
83
|
|
|
84
84
|
---
|
|
85
85
|
from llama_cloud.client import LlamaCloud
|
|
@@ -87,7 +87,7 @@ class ComponentDefinitionsClient:
|
|
|
87
87
|
client = LlamaCloud(
|
|
88
88
|
token="YOUR_TOKEN",
|
|
89
89
|
)
|
|
90
|
-
client.component_definitions.
|
|
90
|
+
client.component_definitions.list_data_sink_definitions()
|
|
91
91
|
"""
|
|
92
92
|
_response = self._client_wrapper.httpx_client.request(
|
|
93
93
|
"GET",
|
|
@@ -108,9 +108,9 @@ class AsyncComponentDefinitionsClient:
|
|
|
108
108
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
109
109
|
self._client_wrapper = client_wrapper
|
|
110
110
|
|
|
111
|
-
async def
|
|
111
|
+
async def list_transformation_definitions(self) -> typing.List[ConfigurableTransformationDefinition]:
|
|
112
112
|
"""
|
|
113
|
-
|
|
113
|
+
List transformation component definitions.
|
|
114
114
|
|
|
115
115
|
---
|
|
116
116
|
from llama_cloud.client import AsyncLlamaCloud
|
|
@@ -118,7 +118,7 @@ class AsyncComponentDefinitionsClient:
|
|
|
118
118
|
client = AsyncLlamaCloud(
|
|
119
119
|
token="YOUR_TOKEN",
|
|
120
120
|
)
|
|
121
|
-
await client.component_definitions.
|
|
121
|
+
await client.component_definitions.list_transformation_definitions()
|
|
122
122
|
"""
|
|
123
123
|
_response = await self._client_wrapper.httpx_client.request(
|
|
124
124
|
"GET",
|
|
@@ -136,9 +136,9 @@ class AsyncComponentDefinitionsClient:
|
|
|
136
136
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
137
137
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
138
138
|
|
|
139
|
-
async def
|
|
139
|
+
async def list_data_source_definitions(self) -> typing.List[DataSourceDefinition]:
|
|
140
140
|
"""
|
|
141
|
-
|
|
141
|
+
List data source component definitions.
|
|
142
142
|
|
|
143
143
|
---
|
|
144
144
|
from llama_cloud.client import AsyncLlamaCloud
|
|
@@ -146,7 +146,7 @@ class AsyncComponentDefinitionsClient:
|
|
|
146
146
|
client = AsyncLlamaCloud(
|
|
147
147
|
token="YOUR_TOKEN",
|
|
148
148
|
)
|
|
149
|
-
await client.component_definitions.
|
|
149
|
+
await client.component_definitions.list_data_source_definitions()
|
|
150
150
|
"""
|
|
151
151
|
_response = await self._client_wrapper.httpx_client.request(
|
|
152
152
|
"GET",
|
|
@@ -162,9 +162,9 @@ class AsyncComponentDefinitionsClient:
|
|
|
162
162
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
163
163
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
164
164
|
|
|
165
|
-
async def
|
|
165
|
+
async def list_data_sink_definitions(self) -> typing.List[DataSinkDefinition]:
|
|
166
166
|
"""
|
|
167
|
-
|
|
167
|
+
List data sink component definitions.
|
|
168
168
|
|
|
169
169
|
---
|
|
170
170
|
from llama_cloud.client import AsyncLlamaCloud
|
|
@@ -172,7 +172,7 @@ class AsyncComponentDefinitionsClient:
|
|
|
172
172
|
client = AsyncLlamaCloud(
|
|
173
173
|
token="YOUR_TOKEN",
|
|
174
174
|
)
|
|
175
|
-
await client.component_definitions.
|
|
175
|
+
await client.component_definitions.list_data_sink_definitions()
|
|
176
176
|
"""
|
|
177
177
|
_response = await self._client_wrapper.httpx_client.request(
|
|
178
178
|
"GET",
|
|
@@ -33,7 +33,7 @@ class DataSinksClient:
|
|
|
33
33
|
|
|
34
34
|
def list_data_sinks(self, *, project_id: typing.Optional[str] = None) -> typing.List[DataSink]:
|
|
35
35
|
"""
|
|
36
|
-
|
|
36
|
+
List data sinks for a given project.
|
|
37
37
|
If project_id is not provided, uses the default project.
|
|
38
38
|
|
|
39
39
|
Parameters:
|
|
@@ -268,7 +268,7 @@ class AsyncDataSinksClient:
|
|
|
268
268
|
|
|
269
269
|
async def list_data_sinks(self, *, project_id: typing.Optional[str] = None) -> typing.List[DataSink]:
|
|
270
270
|
"""
|
|
271
|
-
|
|
271
|
+
List data sinks for a given project.
|
|
272
272
|
If project_id is not provided, uses the default project.
|
|
273
273
|
|
|
274
274
|
Parameters:
|
|
@@ -34,7 +34,7 @@ class DataSourcesClient:
|
|
|
34
34
|
|
|
35
35
|
def list_data_sources(self, *, project_id: typing.Optional[str] = None) -> typing.List[DataSource]:
|
|
36
36
|
"""
|
|
37
|
-
|
|
37
|
+
List data sources for a given project.
|
|
38
38
|
If project_id is not provided, uses the default project.
|
|
39
39
|
|
|
40
40
|
Parameters:
|
|
@@ -274,7 +274,7 @@ class AsyncDataSourcesClient:
|
|
|
274
274
|
|
|
275
275
|
async def list_data_sources(self, *, project_id: typing.Optional[str] = None) -> typing.List[DataSource]:
|
|
276
276
|
"""
|
|
277
|
-
|
|
277
|
+
List data sources for a given project.
|
|
278
278
|
If project_id is not provided, uses the default project.
|
|
279
279
|
|
|
280
280
|
Parameters:
|
|
@@ -130,9 +130,9 @@ class EvalsClient:
|
|
|
130
130
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
131
131
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
132
132
|
|
|
133
|
-
def
|
|
133
|
+
def list_questions(self, dataset_id: str) -> typing.List[EvalQuestion]:
|
|
134
134
|
"""
|
|
135
|
-
|
|
135
|
+
List questions for a dataset.
|
|
136
136
|
|
|
137
137
|
Parameters:
|
|
138
138
|
- dataset_id: str.
|
|
@@ -142,7 +142,7 @@ class EvalsClient:
|
|
|
142
142
|
client = LlamaCloud(
|
|
143
143
|
token="YOUR_TOKEN",
|
|
144
144
|
)
|
|
145
|
-
client.evals.
|
|
145
|
+
client.evals.list_questions(
|
|
146
146
|
dataset_id="string",
|
|
147
147
|
)
|
|
148
148
|
"""
|
|
@@ -348,9 +348,9 @@ class EvalsClient:
|
|
|
348
348
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
349
349
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
350
350
|
|
|
351
|
-
def
|
|
351
|
+
def list_supported_models(self) -> typing.List[SupportedEvalLlmModel]:
|
|
352
352
|
"""
|
|
353
|
-
|
|
353
|
+
List supported models.
|
|
354
354
|
|
|
355
355
|
---
|
|
356
356
|
from llama_cloud.client import LlamaCloud
|
|
@@ -358,7 +358,7 @@ class EvalsClient:
|
|
|
358
358
|
client = LlamaCloud(
|
|
359
359
|
token="YOUR_TOKEN",
|
|
360
360
|
)
|
|
361
|
-
client.evals.
|
|
361
|
+
client.evals.list_supported_models()
|
|
362
362
|
"""
|
|
363
363
|
_response = self._client_wrapper.httpx_client.request(
|
|
364
364
|
"GET",
|
|
@@ -481,9 +481,9 @@ class AsyncEvalsClient:
|
|
|
481
481
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
482
482
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
483
483
|
|
|
484
|
-
async def
|
|
484
|
+
async def list_questions(self, dataset_id: str) -> typing.List[EvalQuestion]:
|
|
485
485
|
"""
|
|
486
|
-
|
|
486
|
+
List questions for a dataset.
|
|
487
487
|
|
|
488
488
|
Parameters:
|
|
489
489
|
- dataset_id: str.
|
|
@@ -493,7 +493,7 @@ class AsyncEvalsClient:
|
|
|
493
493
|
client = AsyncLlamaCloud(
|
|
494
494
|
token="YOUR_TOKEN",
|
|
495
495
|
)
|
|
496
|
-
await client.evals.
|
|
496
|
+
await client.evals.list_questions(
|
|
497
497
|
dataset_id="string",
|
|
498
498
|
)
|
|
499
499
|
"""
|
|
@@ -699,9 +699,9 @@ class AsyncEvalsClient:
|
|
|
699
699
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
700
700
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
701
701
|
|
|
702
|
-
async def
|
|
702
|
+
async def list_supported_models(self) -> typing.List[SupportedEvalLlmModel]:
|
|
703
703
|
"""
|
|
704
|
-
|
|
704
|
+
List supported models.
|
|
705
705
|
|
|
706
706
|
---
|
|
707
707
|
from llama_cloud.client import AsyncLlamaCloud
|
|
@@ -709,7 +709,7 @@ class AsyncEvalsClient:
|
|
|
709
709
|
client = AsyncLlamaCloud(
|
|
710
710
|
token="YOUR_TOKEN",
|
|
711
711
|
)
|
|
712
|
-
await client.evals.
|
|
712
|
+
await client.evals.list_supported_models()
|
|
713
713
|
"""
|
|
714
714
|
_response = await self._client_wrapper.httpx_client.request(
|
|
715
715
|
"GET",
|
|
@@ -31,7 +31,7 @@ class FilesClient:
|
|
|
31
31
|
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
32
32
|
self._client_wrapper = client_wrapper
|
|
33
33
|
|
|
34
|
-
def
|
|
34
|
+
def get_file(self, id: str, *, project_id: typing.Optional[str] = None) -> File:
|
|
35
35
|
"""
|
|
36
36
|
Read File metadata objects.
|
|
37
37
|
|
|
@@ -45,7 +45,7 @@ class FilesClient:
|
|
|
45
45
|
client = LlamaCloud(
|
|
46
46
|
token="YOUR_TOKEN",
|
|
47
47
|
)
|
|
48
|
-
client.files.
|
|
48
|
+
client.files.get_file(
|
|
49
49
|
id="string",
|
|
50
50
|
)
|
|
51
51
|
"""
|
|
@@ -101,7 +101,7 @@ class FilesClient:
|
|
|
101
101
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
102
102
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
103
103
|
|
|
104
|
-
def
|
|
104
|
+
def list_files(self, *, project_id: typing.Optional[str] = None) -> typing.List[File]:
|
|
105
105
|
"""
|
|
106
106
|
Read File metadata objects.
|
|
107
107
|
|
|
@@ -113,7 +113,7 @@ class FilesClient:
|
|
|
113
113
|
client = LlamaCloud(
|
|
114
114
|
token="YOUR_TOKEN",
|
|
115
115
|
)
|
|
116
|
-
client.files.
|
|
116
|
+
client.files.list_files()
|
|
117
117
|
"""
|
|
118
118
|
_response = self._client_wrapper.httpx_client.request(
|
|
119
119
|
"GET",
|
|
@@ -293,7 +293,7 @@ class AsyncFilesClient:
|
|
|
293
293
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
294
294
|
self._client_wrapper = client_wrapper
|
|
295
295
|
|
|
296
|
-
async def
|
|
296
|
+
async def get_file(self, id: str, *, project_id: typing.Optional[str] = None) -> File:
|
|
297
297
|
"""
|
|
298
298
|
Read File metadata objects.
|
|
299
299
|
|
|
@@ -307,7 +307,7 @@ class AsyncFilesClient:
|
|
|
307
307
|
client = AsyncLlamaCloud(
|
|
308
308
|
token="YOUR_TOKEN",
|
|
309
309
|
)
|
|
310
|
-
await client.files.
|
|
310
|
+
await client.files.get_file(
|
|
311
311
|
id="string",
|
|
312
312
|
)
|
|
313
313
|
"""
|
|
@@ -363,7 +363,7 @@ class AsyncFilesClient:
|
|
|
363
363
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
364
364
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
365
365
|
|
|
366
|
-
async def
|
|
366
|
+
async def list_files(self, *, project_id: typing.Optional[str] = None) -> typing.List[File]:
|
|
367
367
|
"""
|
|
368
368
|
Read File metadata objects.
|
|
369
369
|
|
|
@@ -375,7 +375,7 @@ class AsyncFilesClient:
|
|
|
375
375
|
client = AsyncLlamaCloud(
|
|
376
376
|
token="YOUR_TOKEN",
|
|
377
377
|
)
|
|
378
|
-
await client.files.
|
|
378
|
+
await client.files.list_files()
|
|
379
379
|
"""
|
|
380
380
|
_response = await self._client_wrapper.httpx_client.request(
|
|
381
381
|
"GET",
|
|
@@ -111,6 +111,8 @@ class ParsingClient:
|
|
|
111
111
|
gpt_4_o_api_key: str,
|
|
112
112
|
do_not_unroll_columns: bool,
|
|
113
113
|
page_separator: str,
|
|
114
|
+
bounding_box: str,
|
|
115
|
+
target_pages: str,
|
|
114
116
|
file: typing.IO,
|
|
115
117
|
) -> ParsingJob:
|
|
116
118
|
"""
|
|
@@ -137,6 +139,10 @@ class ParsingClient:
|
|
|
137
139
|
|
|
138
140
|
- page_separator: str.
|
|
139
141
|
|
|
142
|
+
- bounding_box: str.
|
|
143
|
+
|
|
144
|
+
- target_pages: str.
|
|
145
|
+
|
|
140
146
|
- file: typing.IO.
|
|
141
147
|
"""
|
|
142
148
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -154,6 +160,8 @@ class ParsingClient:
|
|
|
154
160
|
"gpt4o_api_key": gpt_4_o_api_key,
|
|
155
161
|
"do_not_unroll_columns": do_not_unroll_columns,
|
|
156
162
|
"page_separator": page_separator,
|
|
163
|
+
"bounding_box": bounding_box,
|
|
164
|
+
"target_pages": target_pages,
|
|
157
165
|
}
|
|
158
166
|
),
|
|
159
167
|
files={"file": file},
|
|
@@ -576,6 +584,8 @@ class AsyncParsingClient:
|
|
|
576
584
|
gpt_4_o_api_key: str,
|
|
577
585
|
do_not_unroll_columns: bool,
|
|
578
586
|
page_separator: str,
|
|
587
|
+
bounding_box: str,
|
|
588
|
+
target_pages: str,
|
|
579
589
|
file: typing.IO,
|
|
580
590
|
) -> ParsingJob:
|
|
581
591
|
"""
|
|
@@ -602,6 +612,10 @@ class AsyncParsingClient:
|
|
|
602
612
|
|
|
603
613
|
- page_separator: str.
|
|
604
614
|
|
|
615
|
+
- bounding_box: str.
|
|
616
|
+
|
|
617
|
+
- target_pages: str.
|
|
618
|
+
|
|
605
619
|
- file: typing.IO.
|
|
606
620
|
"""
|
|
607
621
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -619,6 +633,8 @@ class AsyncParsingClient:
|
|
|
619
633
|
"gpt4o_api_key": gpt_4_o_api_key,
|
|
620
634
|
"do_not_unroll_columns": do_not_unroll_columns,
|
|
621
635
|
"page_separator": page_separator,
|
|
636
|
+
"bounding_box": bounding_box,
|
|
637
|
+
"target_pages": target_pages,
|
|
622
638
|
}
|
|
623
639
|
),
|
|
624
640
|
files={"file": file},
|
|
@@ -644,7 +644,7 @@ class PipelinesClient:
|
|
|
644
644
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
645
645
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
646
646
|
|
|
647
|
-
def
|
|
647
|
+
def list_pipeline_files(self, pipeline_id: str) -> typing.List[PipelineFile]:
|
|
648
648
|
"""
|
|
649
649
|
Get files for a pipeline.
|
|
650
650
|
|
|
@@ -656,7 +656,7 @@ class PipelinesClient:
|
|
|
656
656
|
client = LlamaCloud(
|
|
657
657
|
token="YOUR_TOKEN",
|
|
658
658
|
)
|
|
659
|
-
client.pipelines.
|
|
659
|
+
client.pipelines.list_pipeline_files(
|
|
660
660
|
pipeline_id="string",
|
|
661
661
|
)
|
|
662
662
|
"""
|
|
@@ -837,7 +837,7 @@ class PipelinesClient:
|
|
|
837
837
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
838
838
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
839
839
|
|
|
840
|
-
def
|
|
840
|
+
def list_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
|
|
841
841
|
"""
|
|
842
842
|
Get data sources for a pipeline.
|
|
843
843
|
|
|
@@ -849,7 +849,7 @@ class PipelinesClient:
|
|
|
849
849
|
client = LlamaCloud(
|
|
850
850
|
token="YOUR_TOKEN",
|
|
851
851
|
)
|
|
852
|
-
client.pipelines.
|
|
852
|
+
client.pipelines.list_pipeline_data_sources(
|
|
853
853
|
pipeline_id="string",
|
|
854
854
|
)
|
|
855
855
|
"""
|
|
@@ -1064,7 +1064,7 @@ class PipelinesClient:
|
|
|
1064
1064
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1065
1065
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1066
1066
|
|
|
1067
|
-
def
|
|
1067
|
+
def list_pipeline_jobs(self, pipeline_id: str) -> typing.List[PipelineDeployment]:
|
|
1068
1068
|
"""
|
|
1069
1069
|
Get jobs for a pipeline.
|
|
1070
1070
|
|
|
@@ -1076,7 +1076,7 @@ class PipelinesClient:
|
|
|
1076
1076
|
client = LlamaCloud(
|
|
1077
1077
|
token="YOUR_TOKEN",
|
|
1078
1078
|
)
|
|
1079
|
-
client.pipelines.
|
|
1079
|
+
client.pipelines.list_pipeline_jobs(
|
|
1080
1080
|
pipeline_id="string",
|
|
1081
1081
|
)
|
|
1082
1082
|
"""
|
|
@@ -1968,7 +1968,7 @@ class AsyncPipelinesClient:
|
|
|
1968
1968
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1969
1969
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1970
1970
|
|
|
1971
|
-
async def
|
|
1971
|
+
async def list_pipeline_files(self, pipeline_id: str) -> typing.List[PipelineFile]:
|
|
1972
1972
|
"""
|
|
1973
1973
|
Get files for a pipeline.
|
|
1974
1974
|
|
|
@@ -1980,7 +1980,7 @@ class AsyncPipelinesClient:
|
|
|
1980
1980
|
client = AsyncLlamaCloud(
|
|
1981
1981
|
token="YOUR_TOKEN",
|
|
1982
1982
|
)
|
|
1983
|
-
await client.pipelines.
|
|
1983
|
+
await client.pipelines.list_pipeline_files(
|
|
1984
1984
|
pipeline_id="string",
|
|
1985
1985
|
)
|
|
1986
1986
|
"""
|
|
@@ -2161,7 +2161,7 @@ class AsyncPipelinesClient:
|
|
|
2161
2161
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2162
2162
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2163
2163
|
|
|
2164
|
-
async def
|
|
2164
|
+
async def list_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
|
|
2165
2165
|
"""
|
|
2166
2166
|
Get data sources for a pipeline.
|
|
2167
2167
|
|
|
@@ -2173,7 +2173,7 @@ class AsyncPipelinesClient:
|
|
|
2173
2173
|
client = AsyncLlamaCloud(
|
|
2174
2174
|
token="YOUR_TOKEN",
|
|
2175
2175
|
)
|
|
2176
|
-
await client.pipelines.
|
|
2176
|
+
await client.pipelines.list_pipeline_data_sources(
|
|
2177
2177
|
pipeline_id="string",
|
|
2178
2178
|
)
|
|
2179
2179
|
"""
|
|
@@ -2388,7 +2388,7 @@ class AsyncPipelinesClient:
|
|
|
2388
2388
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2389
2389
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2390
2390
|
|
|
2391
|
-
async def
|
|
2391
|
+
async def list_pipeline_jobs(self, pipeline_id: str) -> typing.List[PipelineDeployment]:
|
|
2392
2392
|
"""
|
|
2393
2393
|
Get jobs for a pipeline.
|
|
2394
2394
|
|
|
@@ -2400,7 +2400,7 @@ class AsyncPipelinesClient:
|
|
|
2400
2400
|
client = AsyncLlamaCloud(
|
|
2401
2401
|
token="YOUR_TOKEN",
|
|
2402
2402
|
)
|
|
2403
|
-
await client.pipelines.
|
|
2403
|
+
await client.pipelines.list_pipeline_jobs(
|
|
2404
2404
|
pipeline_id="string",
|
|
2405
2405
|
)
|
|
2406
2406
|
"""
|
|
@@ -238,9 +238,9 @@ class ProjectsClient:
|
|
|
238
238
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
239
239
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
240
240
|
|
|
241
|
-
def
|
|
241
|
+
def list_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
|
|
242
242
|
"""
|
|
243
|
-
|
|
243
|
+
List eval datasets for a project.
|
|
244
244
|
|
|
245
245
|
Parameters:
|
|
246
246
|
- project_id: str.
|
|
@@ -250,7 +250,7 @@ class ProjectsClient:
|
|
|
250
250
|
client = LlamaCloud(
|
|
251
251
|
token="YOUR_TOKEN",
|
|
252
252
|
)
|
|
253
|
-
client.projects.
|
|
253
|
+
client.projects.list_datasets_for_project(
|
|
254
254
|
project_id="string",
|
|
255
255
|
)
|
|
256
256
|
"""
|
|
@@ -353,9 +353,9 @@ class ProjectsClient:
|
|
|
353
353
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
354
354
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
355
355
|
|
|
356
|
-
def
|
|
356
|
+
def list_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
|
|
357
357
|
"""
|
|
358
|
-
|
|
358
|
+
List local eval results for a project.
|
|
359
359
|
|
|
360
360
|
Parameters:
|
|
361
361
|
- project_id: str.
|
|
@@ -365,7 +365,7 @@ class ProjectsClient:
|
|
|
365
365
|
client = LlamaCloud(
|
|
366
366
|
token="YOUR_TOKEN",
|
|
367
367
|
)
|
|
368
|
-
client.projects.
|
|
368
|
+
client.projects.list_local_evals_for_project(
|
|
369
369
|
project_id="string",
|
|
370
370
|
)
|
|
371
371
|
"""
|
|
@@ -385,9 +385,9 @@ class ProjectsClient:
|
|
|
385
385
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
386
386
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
387
387
|
|
|
388
|
-
def
|
|
388
|
+
def list_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
|
|
389
389
|
"""
|
|
390
|
-
|
|
390
|
+
List local eval sets for a project.
|
|
391
391
|
|
|
392
392
|
Parameters:
|
|
393
393
|
- project_id: str.
|
|
@@ -397,7 +397,7 @@ class ProjectsClient:
|
|
|
397
397
|
client = LlamaCloud(
|
|
398
398
|
token="YOUR_TOKEN",
|
|
399
399
|
)
|
|
400
|
-
client.projects.
|
|
400
|
+
client.projects.list_local_eval_sets_for_project(
|
|
401
401
|
project_id="string",
|
|
402
402
|
)
|
|
403
403
|
"""
|
|
@@ -457,9 +457,9 @@ class ProjectsClient:
|
|
|
457
457
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
458
458
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
459
459
|
|
|
460
|
-
def
|
|
460
|
+
def list_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
|
|
461
461
|
"""
|
|
462
|
-
|
|
462
|
+
List PromptMixin prompt sets for a project.
|
|
463
463
|
|
|
464
464
|
Parameters:
|
|
465
465
|
- project_id: str.
|
|
@@ -469,7 +469,7 @@ class ProjectsClient:
|
|
|
469
469
|
client = LlamaCloud(
|
|
470
470
|
token="YOUR_TOKEN",
|
|
471
471
|
)
|
|
472
|
-
client.projects.
|
|
472
|
+
client.projects.list_promptmixin_prompts(
|
|
473
473
|
project_id="string",
|
|
474
474
|
)
|
|
475
475
|
"""
|
|
@@ -824,9 +824,9 @@ class AsyncProjectsClient:
|
|
|
824
824
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
825
825
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
826
826
|
|
|
827
|
-
async def
|
|
827
|
+
async def list_datasets_for_project(self, project_id: str) -> typing.List[EvalDataset]:
|
|
828
828
|
"""
|
|
829
|
-
|
|
829
|
+
List eval datasets for a project.
|
|
830
830
|
|
|
831
831
|
Parameters:
|
|
832
832
|
- project_id: str.
|
|
@@ -836,7 +836,7 @@ class AsyncProjectsClient:
|
|
|
836
836
|
client = AsyncLlamaCloud(
|
|
837
837
|
token="YOUR_TOKEN",
|
|
838
838
|
)
|
|
839
|
-
await client.projects.
|
|
839
|
+
await client.projects.list_datasets_for_project(
|
|
840
840
|
project_id="string",
|
|
841
841
|
)
|
|
842
842
|
"""
|
|
@@ -939,9 +939,9 @@ class AsyncProjectsClient:
|
|
|
939
939
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
940
940
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
941
941
|
|
|
942
|
-
async def
|
|
942
|
+
async def list_local_evals_for_project(self, project_id: str) -> typing.List[LocalEvalResults]:
|
|
943
943
|
"""
|
|
944
|
-
|
|
944
|
+
List local eval results for a project.
|
|
945
945
|
|
|
946
946
|
Parameters:
|
|
947
947
|
- project_id: str.
|
|
@@ -951,7 +951,7 @@ class AsyncProjectsClient:
|
|
|
951
951
|
client = AsyncLlamaCloud(
|
|
952
952
|
token="YOUR_TOKEN",
|
|
953
953
|
)
|
|
954
|
-
await client.projects.
|
|
954
|
+
await client.projects.list_local_evals_for_project(
|
|
955
955
|
project_id="string",
|
|
956
956
|
)
|
|
957
957
|
"""
|
|
@@ -971,9 +971,9 @@ class AsyncProjectsClient:
|
|
|
971
971
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
972
972
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
973
973
|
|
|
974
|
-
async def
|
|
974
|
+
async def list_local_eval_sets_for_project(self, project_id: str) -> typing.List[LocalEvalSets]:
|
|
975
975
|
"""
|
|
976
|
-
|
|
976
|
+
List local eval sets for a project.
|
|
977
977
|
|
|
978
978
|
Parameters:
|
|
979
979
|
- project_id: str.
|
|
@@ -983,7 +983,7 @@ class AsyncProjectsClient:
|
|
|
983
983
|
client = AsyncLlamaCloud(
|
|
984
984
|
token="YOUR_TOKEN",
|
|
985
985
|
)
|
|
986
|
-
await client.projects.
|
|
986
|
+
await client.projects.list_local_eval_sets_for_project(
|
|
987
987
|
project_id="string",
|
|
988
988
|
)
|
|
989
989
|
"""
|
|
@@ -1043,9 +1043,9 @@ class AsyncProjectsClient:
|
|
|
1043
1043
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1044
1044
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1045
1045
|
|
|
1046
|
-
async def
|
|
1046
|
+
async def list_promptmixin_prompts(self, project_id: str) -> typing.List[PromptMixinPrompts]:
|
|
1047
1047
|
"""
|
|
1048
|
-
|
|
1048
|
+
List PromptMixin prompt sets for a project.
|
|
1049
1049
|
|
|
1050
1050
|
Parameters:
|
|
1051
1051
|
- project_id: str.
|
|
@@ -1055,7 +1055,7 @@ class AsyncProjectsClient:
|
|
|
1055
1055
|
client = AsyncLlamaCloud(
|
|
1056
1056
|
token="YOUR_TOKEN",
|
|
1057
1057
|
)
|
|
1058
|
-
await client.projects.
|
|
1058
|
+
await client.projects.list_promptmixin_prompts(
|
|
1059
1059
|
project_id="string",
|
|
1060
1060
|
)
|
|
1061
1061
|
"""
|
|
@@ -31,6 +31,8 @@ class LlamaParseParameters(pydantic.BaseModel):
|
|
|
31
31
|
gpt_4_o_api_key: typing.Optional[str] = pydantic.Field(alias="gpt4o_api_key")
|
|
32
32
|
do_not_unroll_columns: typing.Optional[bool]
|
|
33
33
|
page_separator: typing.Optional[str]
|
|
34
|
+
bounding_box: typing.Optional[str]
|
|
35
|
+
target_pages: typing.Optional[str]
|
|
34
36
|
|
|
35
37
|
def json(self, **kwargs: typing.Any) -> str:
|
|
36
38
|
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
|