llama-cloud 0.1.41__py3-none-any.whl → 1.0.0b4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_cloud/__init__.py +101 -816
- llama_cloud/_base_client.py +2124 -0
- llama_cloud/_client.py +795 -0
- llama_cloud/_compat.py +219 -0
- llama_cloud/_constants.py +14 -0
- llama_cloud/_exceptions.py +108 -0
- llama_cloud/_files.py +127 -0
- llama_cloud/_models.py +872 -0
- llama_cloud/_polling.py +182 -0
- llama_cloud/_qs.py +150 -0
- llama_cloud/_resource.py +43 -0
- llama_cloud/_response.py +832 -0
- llama_cloud/_streaming.py +333 -0
- llama_cloud/_types.py +270 -0
- llama_cloud/_utils/__init__.py +64 -0
- llama_cloud/_utils/_compat.py +45 -0
- llama_cloud/_utils/_datetime_parse.py +136 -0
- llama_cloud/_utils/_logs.py +25 -0
- llama_cloud/_utils/_proxy.py +65 -0
- llama_cloud/_utils/_reflection.py +42 -0
- llama_cloud/_utils/_resources_proxy.py +24 -0
- llama_cloud/_utils/_streams.py +12 -0
- llama_cloud/_utils/_sync.py +58 -0
- llama_cloud/_utils/_transform.py +457 -0
- llama_cloud/_utils/_typing.py +156 -0
- llama_cloud/_utils/_utils.py +421 -0
- llama_cloud/_version.py +4 -0
- llama_cloud/lib/__init__.py +0 -0
- llama_cloud/lib/index/__init__.py +13 -0
- llama_cloud/lib/index/api_utils.py +300 -0
- llama_cloud/lib/index/base.py +1041 -0
- llama_cloud/lib/index/composite_retriever.py +272 -0
- llama_cloud/lib/index/retriever.py +233 -0
- llama_cloud/pagination.py +465 -0
- llama_cloud/py.typed +0 -0
- llama_cloud/resources/__init__.py +136 -107
- llama_cloud/resources/beta/__init__.py +102 -1
- llama_cloud/resources/beta/agent_data.py +1041 -0
- llama_cloud/resources/beta/batch/__init__.py +33 -0
- llama_cloud/resources/beta/batch/batch.py +664 -0
- llama_cloud/resources/beta/batch/job_items.py +348 -0
- llama_cloud/resources/beta/beta.py +262 -0
- llama_cloud/resources/beta/directories/__init__.py +33 -0
- llama_cloud/resources/beta/directories/directories.py +719 -0
- llama_cloud/resources/beta/directories/files.py +913 -0
- llama_cloud/resources/beta/parse_configurations.py +743 -0
- llama_cloud/resources/beta/sheets.py +1130 -0
- llama_cloud/resources/beta/split.py +917 -0
- llama_cloud/resources/classifier/__init__.py +32 -1
- llama_cloud/resources/classifier/classifier.py +588 -0
- llama_cloud/resources/classifier/jobs.py +563 -0
- llama_cloud/resources/data_sinks.py +579 -0
- llama_cloud/resources/data_sources.py +651 -0
- llama_cloud/resources/extraction/__init__.py +61 -0
- llama_cloud/resources/extraction/extraction.py +609 -0
- llama_cloud/resources/extraction/extraction_agents/__init__.py +33 -0
- llama_cloud/resources/extraction/extraction_agents/extraction_agents.py +633 -0
- llama_cloud/resources/extraction/extraction_agents/schema.py +308 -0
- llama_cloud/resources/extraction/jobs.py +1106 -0
- llama_cloud/resources/extraction/runs.py +498 -0
- llama_cloud/resources/files.py +784 -0
- llama_cloud/resources/parsing.py +1296 -0
- llama_cloud/resources/pipelines/__init__.py +98 -24
- llama_cloud/resources/pipelines/data_sources.py +529 -0
- llama_cloud/resources/pipelines/documents.py +810 -0
- llama_cloud/resources/pipelines/files.py +682 -0
- llama_cloud/resources/pipelines/images.py +513 -0
- llama_cloud/resources/pipelines/metadata.py +265 -0
- llama_cloud/resources/pipelines/pipelines.py +1525 -0
- llama_cloud/resources/pipelines/sync.py +243 -0
- llama_cloud/resources/projects.py +276 -0
- llama_cloud/resources/retrievers/__init__.py +32 -1
- llama_cloud/resources/retrievers/retriever.py +238 -0
- llama_cloud/resources/retrievers/retrievers.py +920 -0
- llama_cloud/types/__init__.py +171 -721
- llama_cloud/types/advanced_mode_transform_config.py +102 -38
- llama_cloud/types/advanced_mode_transform_config_param.py +102 -0
- llama_cloud/types/auto_transform_config.py +11 -25
- llama_cloud/types/auto_transform_config_param.py +17 -0
- llama_cloud/types/azure_openai_embedding.py +62 -0
- llama_cloud/types/azure_openai_embedding_config.py +17 -0
- llama_cloud/types/azure_openai_embedding_config_param.py +17 -0
- llama_cloud/types/azure_openai_embedding_param.py +61 -0
- llama_cloud/types/b_box.py +37 -0
- llama_cloud/types/bedrock_embedding.py +49 -46
- llama_cloud/types/bedrock_embedding_config.py +10 -27
- llama_cloud/types/bedrock_embedding_config_param.py +17 -0
- llama_cloud/types/bedrock_embedding_param.py +48 -0
- llama_cloud/types/beta/__init__.py +59 -0
- llama_cloud/types/beta/agent_data.py +26 -0
- llama_cloud/types/beta/agent_data_agent_data_params.py +20 -0
- llama_cloud/types/beta/agent_data_aggregate_params.py +79 -0
- llama_cloud/types/beta/agent_data_aggregate_response.py +17 -0
- llama_cloud/types/beta/agent_data_delete_by_query_params.py +43 -0
- llama_cloud/types/beta/agent_data_delete_by_query_response.py +11 -0
- llama_cloud/types/beta/agent_data_delete_params.py +14 -0
- llama_cloud/types/beta/agent_data_delete_response.py +8 -0
- llama_cloud/types/beta/agent_data_get_params.py +14 -0
- llama_cloud/types/beta/agent_data_search_params.py +69 -0
- llama_cloud/types/beta/agent_data_update_params.py +16 -0
- llama_cloud/types/beta/batch/__init__.py +12 -0
- llama_cloud/types/beta/batch/job_item_get_processing_results_params.py +17 -0
- llama_cloud/types/beta/batch/job_item_get_processing_results_response.py +409 -0
- llama_cloud/types/beta/batch/job_item_list_params.py +23 -0
- llama_cloud/types/beta/batch/job_item_list_response.py +42 -0
- llama_cloud/types/beta/batch_cancel_params.py +21 -0
- llama_cloud/types/beta/batch_cancel_response.py +23 -0
- llama_cloud/types/beta/batch_create_params.py +399 -0
- llama_cloud/types/beta/batch_create_response.py +63 -0
- llama_cloud/types/beta/batch_get_status_params.py +14 -0
- llama_cloud/types/beta/batch_get_status_response.py +73 -0
- llama_cloud/types/beta/batch_list_params.py +29 -0
- llama_cloud/types/beta/batch_list_response.py +63 -0
- llama_cloud/types/beta/directories/__init__.py +15 -0
- llama_cloud/types/beta/directories/file_add_params.py +26 -0
- llama_cloud/types/beta/directories/file_add_response.py +42 -0
- llama_cloud/types/beta/directories/file_delete_params.py +16 -0
- llama_cloud/types/beta/directories/file_get_params.py +16 -0
- llama_cloud/types/beta/directories/file_get_response.py +42 -0
- llama_cloud/types/beta/directories/file_list_params.py +28 -0
- llama_cloud/types/beta/directories/file_list_response.py +42 -0
- llama_cloud/types/beta/directories/file_update_params.py +27 -0
- llama_cloud/types/beta/directories/file_update_response.py +42 -0
- llama_cloud/types/beta/directories/file_upload_params.py +24 -0
- llama_cloud/types/beta/directories/file_upload_response.py +42 -0
- llama_cloud/types/beta/directory_create_params.py +23 -0
- llama_cloud/types/beta/directory_create_response.py +36 -0
- llama_cloud/types/beta/directory_delete_params.py +14 -0
- llama_cloud/types/beta/directory_get_params.py +14 -0
- llama_cloud/types/beta/directory_get_response.py +36 -0
- llama_cloud/types/beta/directory_list_params.py +24 -0
- llama_cloud/types/beta/directory_list_response.py +36 -0
- llama_cloud/types/beta/directory_update_params.py +20 -0
- llama_cloud/types/beta/directory_update_response.py +36 -0
- llama_cloud/types/beta/parse_configuration.py +40 -0
- llama_cloud/types/beta/parse_configuration_create_params.py +34 -0
- llama_cloud/types/beta/parse_configuration_delete_params.py +14 -0
- llama_cloud/types/beta/parse_configuration_get_params.py +14 -0
- llama_cloud/types/beta/parse_configuration_list_params.py +24 -0
- llama_cloud/types/beta/parse_configuration_query_response.py +28 -0
- llama_cloud/types/beta/parse_configuration_update_params.py +22 -0
- llama_cloud/types/beta/sheet_create_params.py +22 -0
- llama_cloud/types/beta/sheet_delete_job_params.py +14 -0
- llama_cloud/types/beta/sheet_get_params.py +16 -0
- llama_cloud/types/beta/sheet_get_result_table_params.py +20 -0
- llama_cloud/types/beta/sheet_list_params.py +20 -0
- llama_cloud/types/beta/sheets_job.py +88 -0
- llama_cloud/types/beta/sheets_parsing_config.py +49 -0
- llama_cloud/types/beta/sheets_parsing_config_param.py +51 -0
- llama_cloud/types/beta/split_category.py +17 -0
- llama_cloud/types/beta/split_category_param.py +18 -0
- llama_cloud/types/beta/split_create_params.py +36 -0
- llama_cloud/types/beta/split_create_response.py +48 -0
- llama_cloud/types/beta/split_document_input.py +15 -0
- llama_cloud/types/beta/split_document_input_param.py +17 -0
- llama_cloud/types/beta/split_get_params.py +14 -0
- llama_cloud/types/beta/split_get_response.py +48 -0
- llama_cloud/types/beta/split_list_params.py +18 -0
- llama_cloud/types/beta/split_list_response.py +48 -0
- llama_cloud/types/beta/split_result_response.py +15 -0
- llama_cloud/types/beta/split_segment_response.py +20 -0
- llama_cloud/types/classifier/__init__.py +15 -0
- llama_cloud/types/classifier/classifier_rule.py +25 -0
- llama_cloud/types/classifier/classifier_rule_param.py +27 -0
- llama_cloud/types/classifier/classify_job.py +51 -0
- llama_cloud/types/classifier/classify_job_param.py +53 -0
- llama_cloud/types/classifier/classify_parsing_configuration.py +21 -0
- llama_cloud/types/classifier/classify_parsing_configuration_param.py +23 -0
- llama_cloud/types/classifier/job_create_params.py +30 -0
- llama_cloud/types/classifier/job_get_params.py +14 -0
- llama_cloud/types/classifier/job_get_results_params.py +14 -0
- llama_cloud/types/classifier/job_get_results_response.py +66 -0
- llama_cloud/types/classifier/job_list_params.py +18 -0
- llama_cloud/types/cohere_embedding.py +37 -40
- llama_cloud/types/cohere_embedding_config.py +10 -27
- llama_cloud/types/cohere_embedding_config_param.py +17 -0
- llama_cloud/types/cohere_embedding_param.py +36 -0
- llama_cloud/types/composite_retrieval_mode.py +4 -18
- llama_cloud/types/composite_retrieval_result.py +52 -37
- llama_cloud/types/data_sink.py +46 -39
- llama_cloud/types/data_sink_create_param.py +41 -0
- llama_cloud/types/data_sink_create_params.py +44 -0
- llama_cloud/types/data_sink_list_params.py +14 -0
- llama_cloud/types/data_sink_list_response.py +10 -0
- llama_cloud/types/data_sink_update_params.py +40 -0
- llama_cloud/types/data_source.py +67 -39
- llama_cloud/types/data_source_create_params.py +65 -0
- llama_cloud/types/data_source_list_params.py +14 -0
- llama_cloud/types/data_source_list_response.py +10 -0
- llama_cloud/types/data_source_reader_version_metadata.py +8 -27
- llama_cloud/types/data_source_update_params.py +61 -0
- llama_cloud/types/extraction/__init__.py +25 -0
- llama_cloud/types/extraction/extract_agent.py +41 -0
- llama_cloud/types/extraction/extract_config.py +118 -0
- llama_cloud/types/extraction/extract_config_param.py +118 -0
- llama_cloud/types/extraction/extract_job.py +32 -0
- llama_cloud/types/extraction/extract_run.py +64 -0
- llama_cloud/types/extraction/extraction_agent_create_params.py +25 -0
- llama_cloud/types/extraction/extraction_agent_list_params.py +17 -0
- llama_cloud/types/extraction/extraction_agent_list_response.py +10 -0
- llama_cloud/types/extraction/extraction_agent_update_params.py +18 -0
- llama_cloud/types/extraction/extraction_agents/__init__.py +8 -0
- llama_cloud/types/extraction/extraction_agents/schema_generate_schema_params.py +23 -0
- llama_cloud/types/extraction/extraction_agents/schema_generate_schema_response.py +14 -0
- llama_cloud/types/extraction/extraction_agents/schema_validate_schema_params.py +12 -0
- llama_cloud/types/extraction/extraction_agents/schema_validate_schema_response.py +13 -0
- llama_cloud/types/extraction/job_create_params.py +38 -0
- llama_cloud/types/extraction/job_file_params.py +29 -0
- llama_cloud/types/extraction/job_get_result_params.py +14 -0
- llama_cloud/types/extraction/job_get_result_response.py +27 -0
- llama_cloud/types/extraction/job_list_params.py +11 -0
- llama_cloud/types/extraction/job_list_response.py +10 -0
- llama_cloud/types/extraction/run_delete_params.py +14 -0
- llama_cloud/types/extraction/run_get_by_job_params.py +14 -0
- llama_cloud/types/extraction/run_get_params.py +14 -0
- llama_cloud/types/extraction/run_list_params.py +15 -0
- llama_cloud/types/extraction/webhook_configuration.py +43 -0
- llama_cloud/types/extraction/webhook_configuration_param.py +43 -0
- llama_cloud/types/extraction_run_params.py +45 -0
- llama_cloud/types/fail_page_mode.py +4 -26
- llama_cloud/types/file.py +48 -40
- llama_cloud/types/file_create_params.py +28 -0
- llama_cloud/types/file_create_response.py +38 -0
- llama_cloud/types/file_delete_params.py +14 -0
- llama_cloud/types/file_get_params.py +16 -0
- llama_cloud/types/file_list_params.py +40 -0
- llama_cloud/types/file_list_response.py +38 -0
- llama_cloud/types/file_query_params.py +61 -0
- llama_cloud/types/file_query_response.py +47 -27
- llama_cloud/types/gemini_embedding.py +40 -39
- llama_cloud/types/gemini_embedding_config.py +10 -27
- llama_cloud/types/gemini_embedding_config_param.py +17 -0
- llama_cloud/types/gemini_embedding_param.py +39 -0
- llama_cloud/types/hugging_face_inference_api_embedding.py +62 -46
- llama_cloud/types/hugging_face_inference_api_embedding_config.py +11 -28
- llama_cloud/types/hugging_face_inference_api_embedding_config_param.py +17 -0
- llama_cloud/types/hugging_face_inference_api_embedding_param.py +60 -0
- llama_cloud/types/list_item.py +48 -0
- llama_cloud/types/llama_parse_parameters.py +251 -130
- llama_cloud/types/llama_parse_parameters_param.py +261 -0
- llama_cloud/types/llama_parse_supported_file_extensions.py +84 -310
- llama_cloud/types/managed_ingestion_status_response.py +39 -37
- llama_cloud/types/message_role.py +4 -46
- llama_cloud/types/metadata_filters.py +45 -29
- llama_cloud/types/metadata_filters_param.py +58 -0
- llama_cloud/types/openai_embedding.py +56 -0
- llama_cloud/types/openai_embedding_config.py +17 -0
- llama_cloud/types/openai_embedding_config_param.py +17 -0
- llama_cloud/types/openai_embedding_param.py +55 -0
- llama_cloud/types/page_figure_node_with_score.py +32 -29
- llama_cloud/types/page_screenshot_node_with_score.py +23 -29
- llama_cloud/types/parsing_create_params.py +586 -0
- llama_cloud/types/parsing_create_response.py +33 -0
- llama_cloud/types/parsing_get_params.py +27 -0
- llama_cloud/types/parsing_get_response.py +364 -0
- llama_cloud/types/parsing_languages.py +94 -0
- llama_cloud/types/parsing_list_params.py +23 -0
- llama_cloud/types/parsing_list_response.py +33 -0
- llama_cloud/types/parsing_mode.py +13 -46
- llama_cloud/types/parsing_upload_file_params.py +14 -0
- llama_cloud/types/parsing_upload_file_response.py +33 -0
- llama_cloud/types/pipeline.py +180 -62
- llama_cloud/types/pipeline_create_params.py +95 -0
- llama_cloud/types/pipeline_get_status_params.py +12 -0
- llama_cloud/types/pipeline_list_params.py +23 -0
- llama_cloud/types/pipeline_list_response.py +12 -0
- llama_cloud/types/pipeline_metadata_config.py +9 -30
- llama_cloud/types/pipeline_metadata_config_param.py +17 -0
- llama_cloud/types/pipeline_retrieve_params.py +74 -0
- llama_cloud/types/pipeline_retrieve_response.py +63 -0
- llama_cloud/types/pipeline_type.py +4 -18
- llama_cloud/types/pipeline_update_params.py +90 -0
- llama_cloud/types/pipeline_upsert_params.py +95 -0
- llama_cloud/types/pipelines/__init__.py +38 -0
- llama_cloud/types/pipelines/cloud_document.py +29 -0
- llama_cloud/types/pipelines/cloud_document_create_param.py +30 -0
- llama_cloud/types/pipelines/data_source_get_data_sources_response.py +10 -0
- llama_cloud/types/pipelines/data_source_sync_params.py +16 -0
- llama_cloud/types/pipelines/data_source_update_data_sources_params.py +25 -0
- llama_cloud/types/pipelines/data_source_update_data_sources_response.py +10 -0
- llama_cloud/types/pipelines/data_source_update_params.py +15 -0
- llama_cloud/types/pipelines/document_create_params.py +14 -0
- llama_cloud/types/pipelines/document_create_response.py +10 -0
- llama_cloud/types/pipelines/document_get_chunks_response.py +10 -0
- llama_cloud/types/pipelines/document_list_params.py +22 -0
- llama_cloud/types/pipelines/document_upsert_params.py +14 -0
- llama_cloud/types/pipelines/document_upsert_response.py +10 -0
- llama_cloud/types/pipelines/file_create_params.py +22 -0
- llama_cloud/types/pipelines/file_create_response.py +10 -0
- llama_cloud/types/pipelines/file_get_status_counts_params.py +14 -0
- llama_cloud/types/pipelines/file_get_status_counts_response.py +24 -0
- llama_cloud/types/pipelines/file_list_params.py +22 -0
- llama_cloud/types/pipelines/file_update_params.py +15 -0
- llama_cloud/types/pipelines/image_get_page_figure_params.py +18 -0
- llama_cloud/types/pipelines/image_get_page_screenshot_params.py +16 -0
- llama_cloud/types/pipelines/image_list_page_figures_params.py +14 -0
- llama_cloud/types/pipelines/image_list_page_figures_response.py +34 -0
- llama_cloud/types/pipelines/image_list_page_screenshots_params.py +14 -0
- llama_cloud/types/pipelines/image_list_page_screenshots_response.py +25 -0
- llama_cloud/types/pipelines/metadata_create_params.py +13 -0
- llama_cloud/types/pipelines/metadata_create_response.py +8 -0
- llama_cloud/types/pipelines/pipeline_data_source.py +96 -0
- llama_cloud/types/pipelines/pipeline_file.py +70 -0
- llama_cloud/types/pipelines/text_node.py +89 -0
- llama_cloud/types/preset_retrieval_params.py +61 -49
- llama_cloud/types/preset_retrieval_params_param.py +71 -0
- llama_cloud/types/presigned_url.py +13 -29
- llama_cloud/types/project.py +24 -36
- llama_cloud/types/project_get_params.py +12 -0
- llama_cloud/types/project_list_params.py +14 -0
- llama_cloud/types/project_list_response.py +10 -0
- llama_cloud/types/re_rank_config_param.py +18 -0
- llama_cloud/types/retrieval_mode.py +4 -26
- llama_cloud/types/retriever.py +31 -38
- llama_cloud/types/retriever_create_params.py +26 -0
- llama_cloud/types/retriever_get_params.py +14 -0
- llama_cloud/types/retriever_list_params.py +16 -0
- llama_cloud/types/retriever_list_response.py +12 -0
- llama_cloud/types/retriever_pipeline.py +26 -34
- llama_cloud/types/retriever_pipeline_param.py +28 -0
- llama_cloud/types/retriever_search_params.py +38 -0
- llama_cloud/types/retriever_update_params.py +19 -0
- llama_cloud/types/retriever_upsert_params.py +26 -0
- llama_cloud/types/retrievers/__init__.py +5 -0
- llama_cloud/types/retrievers/retriever_search_params.py +32 -0
- llama_cloud/types/shared/__init__.py +21 -0
- llama_cloud/types/shared/cloud_astra_db_vector_store.py +39 -0
- llama_cloud/types/shared/cloud_az_storage_blob_data_source.py +34 -0
- llama_cloud/types/shared/cloud_azure_ai_search_vector_store.py +30 -0
- llama_cloud/types/shared/cloud_box_data_source.py +31 -0
- llama_cloud/types/shared/cloud_confluence_data_source.py +53 -0
- llama_cloud/types/shared/cloud_jira_data_source.py +30 -0
- llama_cloud/types/shared/cloud_jira_data_source_v2.py +49 -0
- llama_cloud/types/shared/cloud_milvus_vector_store.py +21 -0
- llama_cloud/types/shared/cloud_mongodb_atlas_vector_search.py +36 -0
- llama_cloud/types/shared/cloud_notion_page_data_source.py +19 -0
- llama_cloud/types/shared/cloud_one_drive_data_source.py +32 -0
- llama_cloud/types/shared/cloud_pinecone_vector_store.py +32 -0
- llama_cloud/types/shared/cloud_postgres_vector_store.py +35 -0
- llama_cloud/types/shared/cloud_qdrant_vector_store.py +35 -0
- llama_cloud/types/shared/cloud_s3_data_source.py +28 -0
- llama_cloud/types/shared/cloud_sharepoint_data_source.py +55 -0
- llama_cloud/types/shared/cloud_slack_data_source.py +31 -0
- llama_cloud/types/shared/failure_handling_config.py +16 -0
- llama_cloud/types/shared/pg_vector_hnsw_settings.py +27 -0
- llama_cloud/types/shared_params/__init__.py +21 -0
- llama_cloud/types/shared_params/cloud_astra_db_vector_store.py +42 -0
- llama_cloud/types/shared_params/cloud_az_storage_blob_data_source.py +41 -0
- llama_cloud/types/shared_params/cloud_azure_ai_search_vector_store.py +34 -0
- llama_cloud/types/shared_params/cloud_box_data_source.py +40 -0
- llama_cloud/types/shared_params/cloud_confluence_data_source.py +58 -0
- llama_cloud/types/shared_params/cloud_jira_data_source.py +34 -0
- llama_cloud/types/shared_params/cloud_jira_data_source_v2.py +54 -0
- llama_cloud/types/shared_params/cloud_milvus_vector_store.py +24 -0
- llama_cloud/types/shared_params/cloud_mongodb_atlas_vector_search.py +39 -0
- llama_cloud/types/shared_params/cloud_notion_page_data_source.py +23 -0
- llama_cloud/types/shared_params/cloud_one_drive_data_source.py +37 -0
- llama_cloud/types/shared_params/cloud_pinecone_vector_store.py +35 -0
- llama_cloud/types/shared_params/cloud_postgres_vector_store.py +39 -0
- llama_cloud/types/shared_params/cloud_qdrant_vector_store.py +37 -0
- llama_cloud/types/shared_params/cloud_s3_data_source.py +32 -0
- llama_cloud/types/shared_params/cloud_sharepoint_data_source.py +60 -0
- llama_cloud/types/shared_params/cloud_slack_data_source.py +35 -0
- llama_cloud/types/shared_params/failure_handling_config.py +16 -0
- llama_cloud/types/shared_params/pg_vector_hnsw_settings.py +26 -0
- llama_cloud/types/sparse_model_config.py +16 -30
- llama_cloud/types/sparse_model_config_param.py +25 -0
- llama_cloud/types/status_enum.py +4 -34
- llama_cloud/types/vertex_ai_embedding_config.py +10 -27
- llama_cloud/types/vertex_ai_embedding_config_param.py +17 -0
- llama_cloud/types/vertex_text_embedding.py +47 -45
- llama_cloud/types/vertex_text_embedding_param.py +45 -0
- llama_cloud-1.0.0b4.dist-info/METADATA +546 -0
- llama_cloud-1.0.0b4.dist-info/RECORD +376 -0
- {llama_cloud-0.1.41.dist-info → llama_cloud-1.0.0b4.dist-info}/WHEEL +1 -1
- llama_cloud-1.0.0b4.dist-info/licenses/LICENSE +7 -0
- llama_cloud/client.py +0 -108
- llama_cloud/core/__init__.py +0 -17
- llama_cloud/core/api_error.py +0 -15
- llama_cloud/core/client_wrapper.py +0 -51
- llama_cloud/core/datetime_utils.py +0 -28
- llama_cloud/core/jsonable_encoder.py +0 -106
- llama_cloud/core/remove_none_from_dict.py +0 -11
- llama_cloud/environment.py +0 -7
- llama_cloud/errors/__init__.py +0 -5
- llama_cloud/errors/unprocessable_entity_error.py +0 -9
- llama_cloud/resources/admin/__init__.py +0 -2
- llama_cloud/resources/admin/client.py +0 -196
- llama_cloud/resources/agent_deployments/__init__.py +0 -2
- llama_cloud/resources/agent_deployments/client.py +0 -160
- llama_cloud/resources/alpha/__init__.py +0 -2
- llama_cloud/resources/alpha/client.py +0 -112
- llama_cloud/resources/beta/client.py +0 -2664
- llama_cloud/resources/chat_apps/__init__.py +0 -2
- llama_cloud/resources/chat_apps/client.py +0 -616
- llama_cloud/resources/classifier/client.py +0 -444
- llama_cloud/resources/data_sinks/__init__.py +0 -5
- llama_cloud/resources/data_sinks/client.py +0 -535
- llama_cloud/resources/data_sinks/types/__init__.py +0 -5
- llama_cloud/resources/data_sinks/types/data_sink_update_component.py +0 -22
- llama_cloud/resources/data_sources/__init__.py +0 -5
- llama_cloud/resources/data_sources/client.py +0 -548
- llama_cloud/resources/data_sources/types/__init__.py +0 -6
- llama_cloud/resources/data_sources/types/data_source_update_component.py +0 -28
- llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py +0 -7
- llama_cloud/resources/embedding_model_configs/__init__.py +0 -23
- llama_cloud/resources/embedding_model_configs/client.py +0 -420
- llama_cloud/resources/embedding_model_configs/types/__init__.py +0 -23
- llama_cloud/resources/embedding_model_configs/types/embedding_model_config_create_embedding_config.py +0 -89
- llama_cloud/resources/evals/__init__.py +0 -2
- llama_cloud/resources/evals/client.py +0 -85
- llama_cloud/resources/files/__init__.py +0 -5
- llama_cloud/resources/files/client.py +0 -1454
- llama_cloud/resources/files/types/__init__.py +0 -5
- llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py +0 -7
- llama_cloud/resources/jobs/__init__.py +0 -2
- llama_cloud/resources/jobs/client.py +0 -164
- llama_cloud/resources/llama_extract/__init__.py +0 -27
- llama_cloud/resources/llama_extract/client.py +0 -2082
- llama_cloud/resources/llama_extract/types/__init__.py +0 -25
- llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py +0 -9
- llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_zero_value.py +0 -7
- llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema.py +0 -9
- llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_zero_value.py +0 -7
- llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_override.py +0 -9
- llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_override_zero_value.py +0 -7
- llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py +0 -9
- llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py +0 -7
- llama_cloud/resources/llama_extract/types/extract_stateless_request_data_schema.py +0 -9
- llama_cloud/resources/llama_extract/types/extract_stateless_request_data_schema_zero_value.py +0 -7
- llama_cloud/resources/organizations/__init__.py +0 -2
- llama_cloud/resources/organizations/client.py +0 -1448
- llama_cloud/resources/parsing/__init__.py +0 -2
- llama_cloud/resources/parsing/client.py +0 -2392
- llama_cloud/resources/pipelines/client.py +0 -3436
- llama_cloud/resources/pipelines/types/__init__.py +0 -29
- llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py +0 -7
- llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py +0 -89
- llama_cloud/resources/pipelines/types/pipeline_update_transform_config.py +0 -8
- llama_cloud/resources/pipelines/types/retrieval_params_search_filters_inference_schema_value.py +0 -7
- llama_cloud/resources/projects/__init__.py +0 -2
- llama_cloud/resources/projects/client.py +0 -636
- llama_cloud/resources/retrievers/client.py +0 -837
- llama_cloud/resources/users/__init__.py +0 -2
- llama_cloud/resources/users/client.py +0 -155
- llama_cloud/types/advanced_mode_transform_config_chunking_config.py +0 -67
- llama_cloud/types/advanced_mode_transform_config_segmentation_config.py +0 -45
- llama_cloud/types/agent_data.py +0 -40
- llama_cloud/types/agent_deployment_list.py +0 -32
- llama_cloud/types/agent_deployment_summary.py +0 -39
- llama_cloud/types/aggregate_group.py +0 -37
- llama_cloud/types/azure_open_ai_embedding.py +0 -49
- llama_cloud/types/azure_open_ai_embedding_config.py +0 -34
- llama_cloud/types/base_plan.py +0 -53
- llama_cloud/types/base_plan_metronome_plan_type.py +0 -17
- llama_cloud/types/base_plan_name.py +0 -57
- llama_cloud/types/base_plan_plan_frequency.py +0 -25
- llama_cloud/types/batch.py +0 -47
- llama_cloud/types/batch_item.py +0 -40
- llama_cloud/types/batch_paginated_list.py +0 -35
- llama_cloud/types/batch_public_output.py +0 -36
- llama_cloud/types/billing_period.py +0 -32
- llama_cloud/types/box_auth_mechanism.py +0 -17
- llama_cloud/types/character_chunking_config.py +0 -32
- llama_cloud/types/chat_app.py +0 -46
- llama_cloud/types/chat_app_response.py +0 -43
- llama_cloud/types/chat_data.py +0 -35
- llama_cloud/types/chat_message.py +0 -43
- llama_cloud/types/chunk_mode.py +0 -29
- llama_cloud/types/classification_result.py +0 -39
- llama_cloud/types/classifier_rule.py +0 -43
- llama_cloud/types/classify_job.py +0 -47
- llama_cloud/types/classify_job_results.py +0 -38
- llama_cloud/types/classify_parsing_configuration.py +0 -38
- llama_cloud/types/cloud_astra_db_vector_store.py +0 -51
- llama_cloud/types/cloud_az_storage_blob_data_source.py +0 -41
- llama_cloud/types/cloud_azure_ai_search_vector_store.py +0 -45
- llama_cloud/types/cloud_box_data_source.py +0 -42
- llama_cloud/types/cloud_confluence_data_source.py +0 -59
- llama_cloud/types/cloud_document.py +0 -40
- llama_cloud/types/cloud_document_create.py +0 -40
- llama_cloud/types/cloud_jira_data_source.py +0 -42
- llama_cloud/types/cloud_jira_data_source_v_2.py +0 -52
- llama_cloud/types/cloud_jira_data_source_v_2_api_version.py +0 -21
- llama_cloud/types/cloud_milvus_vector_store.py +0 -40
- llama_cloud/types/cloud_mongo_db_atlas_vector_search.py +0 -52
- llama_cloud/types/cloud_notion_page_data_source.py +0 -35
- llama_cloud/types/cloud_one_drive_data_source.py +0 -39
- llama_cloud/types/cloud_pinecone_vector_store.py +0 -49
- llama_cloud/types/cloud_postgres_vector_store.py +0 -44
- llama_cloud/types/cloud_qdrant_vector_store.py +0 -51
- llama_cloud/types/cloud_s_3_data_source.py +0 -39
- llama_cloud/types/cloud_sharepoint_data_source.py +0 -42
- llama_cloud/types/cloud_slack_data_source.py +0 -39
- llama_cloud/types/composite_retrieved_text_node.py +0 -42
- llama_cloud/types/composite_retrieved_text_node_with_score.py +0 -34
- llama_cloud/types/configurable_data_sink_names.py +0 -41
- llama_cloud/types/configurable_data_source_names.py +0 -57
- llama_cloud/types/credit_type.py +0 -32
- llama_cloud/types/data_sink_component.py +0 -22
- llama_cloud/types/data_sink_create.py +0 -39
- llama_cloud/types/data_sink_create_component.py +0 -22
- llama_cloud/types/data_source_component.py +0 -28
- llama_cloud/types/data_source_create.py +0 -41
- llama_cloud/types/data_source_create_component.py +0 -28
- llama_cloud/types/data_source_create_custom_metadata_value.py +0 -7
- llama_cloud/types/data_source_custom_metadata_value.py +0 -7
- llama_cloud/types/data_source_reader_version_metadata_reader_version.py +0 -25
- llama_cloud/types/data_source_update_dispatcher_config.py +0 -38
- llama_cloud/types/delete_params.py +0 -39
- llama_cloud/types/document_chunk_mode.py +0 -17
- llama_cloud/types/document_ingestion_job_params.py +0 -43
- llama_cloud/types/element_segmentation_config.py +0 -29
- llama_cloud/types/embedding_model_config.py +0 -43
- llama_cloud/types/embedding_model_config_embedding_config.py +0 -89
- llama_cloud/types/embedding_model_config_update.py +0 -33
- llama_cloud/types/embedding_model_config_update_embedding_config.py +0 -89
- llama_cloud/types/eval_execution_params.py +0 -41
- llama_cloud/types/extract_agent.py +0 -48
- llama_cloud/types/extract_agent_data_schema_value.py +0 -5
- llama_cloud/types/extract_config.py +0 -66
- llama_cloud/types/extract_config_priority.py +0 -29
- llama_cloud/types/extract_job.py +0 -38
- llama_cloud/types/extract_job_create.py +0 -46
- llama_cloud/types/extract_job_create_data_schema_override.py +0 -9
- llama_cloud/types/extract_job_create_data_schema_override_zero_value.py +0 -7
- llama_cloud/types/extract_job_create_priority.py +0 -29
- llama_cloud/types/extract_mode.py +0 -29
- llama_cloud/types/extract_models.py +0 -53
- llama_cloud/types/extract_resultset.py +0 -42
- llama_cloud/types/extract_resultset_data.py +0 -11
- llama_cloud/types/extract_resultset_data_item_value.py +0 -7
- llama_cloud/types/extract_resultset_data_zero_value.py +0 -7
- llama_cloud/types/extract_resultset_extraction_metadata_value.py +0 -7
- llama_cloud/types/extract_run.py +0 -55
- llama_cloud/types/extract_run_data.py +0 -11
- llama_cloud/types/extract_run_data_item_value.py +0 -5
- llama_cloud/types/extract_run_data_schema_value.py +0 -5
- llama_cloud/types/extract_run_data_zero_value.py +0 -5
- llama_cloud/types/extract_run_extraction_metadata_value.py +0 -7
- llama_cloud/types/extract_schema_generate_response.py +0 -38
- llama_cloud/types/extract_schema_generate_response_data_schema_value.py +0 -7
- llama_cloud/types/extract_schema_validate_response.py +0 -32
- llama_cloud/types/extract_schema_validate_response_data_schema_value.py +0 -7
- llama_cloud/types/extract_state.py +0 -29
- llama_cloud/types/extract_target.py +0 -17
- llama_cloud/types/failure_handling_config.py +0 -37
- llama_cloud/types/file_classification.py +0 -41
- llama_cloud/types/file_count_by_status_response.py +0 -37
- llama_cloud/types/file_create.py +0 -41
- llama_cloud/types/file_create_permission_info_value.py +0 -7
- llama_cloud/types/file_create_resource_info_value.py +0 -5
- llama_cloud/types/file_data.py +0 -36
- llama_cloud/types/file_filter.py +0 -40
- llama_cloud/types/file_id_presigned_url.py +0 -38
- llama_cloud/types/file_parse_public.py +0 -36
- llama_cloud/types/file_permission_info_value.py +0 -5
- llama_cloud/types/file_resource_info_value.py +0 -5
- llama_cloud/types/file_store_info_response.py +0 -34
- llama_cloud/types/file_store_info_response_status.py +0 -25
- llama_cloud/types/filter_condition.py +0 -29
- llama_cloud/types/filter_operation.py +0 -46
- llama_cloud/types/filter_operation_eq.py +0 -6
- llama_cloud/types/filter_operation_gt.py +0 -6
- llama_cloud/types/filter_operation_gte.py +0 -6
- llama_cloud/types/filter_operation_includes_item.py +0 -6
- llama_cloud/types/filter_operation_lt.py +0 -6
- llama_cloud/types/filter_operation_lte.py +0 -6
- llama_cloud/types/filter_operator.py +0 -73
- llama_cloud/types/free_credits_usage.py +0 -34
- llama_cloud/types/http_validation_error.py +0 -32
- llama_cloud/types/hugging_face_inference_api_embedding_token.py +0 -5
- llama_cloud/types/ingestion_error_response.py +0 -34
- llama_cloud/types/input_message.py +0 -40
- llama_cloud/types/job_name_mapping.py +0 -49
- llama_cloud/types/job_names.py +0 -81
- llama_cloud/types/job_record.py +0 -58
- llama_cloud/types/job_record_parameters.py +0 -111
- llama_cloud/types/job_record_with_usage_metrics.py +0 -36
- llama_cloud/types/l_lama_parse_transform_config.py +0 -37
- llama_cloud/types/legacy_parse_job_config.py +0 -207
- llama_cloud/types/license_info_response.py +0 -34
- llama_cloud/types/llama_extract_feature_availability.py +0 -34
- llama_cloud/types/llama_extract_mode_availability.py +0 -38
- llama_cloud/types/llama_extract_mode_availability_status.py +0 -17
- llama_cloud/types/llama_extract_settings.py +0 -67
- llama_cloud/types/llama_parse_parameters_priority.py +0 -29
- llama_cloud/types/llm_model_data.py +0 -38
- llama_cloud/types/llm_parameters.py +0 -39
- llama_cloud/types/load_files_job_config.py +0 -35
- llama_cloud/types/managed_ingestion_status.py +0 -41
- llama_cloud/types/managed_open_ai_embedding.py +0 -36
- llama_cloud/types/managed_open_ai_embedding_config.py +0 -34
- llama_cloud/types/message_annotation.py +0 -33
- llama_cloud/types/metadata_filter.py +0 -44
- llama_cloud/types/metadata_filter_value.py +0 -5
- llama_cloud/types/metadata_filters_filters_item.py +0 -8
- llama_cloud/types/multimodal_parse_resolution.py +0 -17
- llama_cloud/types/node_relationship.py +0 -44
- llama_cloud/types/none_chunking_config.py +0 -29
- llama_cloud/types/none_segmentation_config.py +0 -29
- llama_cloud/types/object_type.py +0 -33
- llama_cloud/types/open_ai_embedding.py +0 -47
- llama_cloud/types/open_ai_embedding_config.py +0 -34
- llama_cloud/types/organization.py +0 -43
- llama_cloud/types/organization_create.py +0 -35
- llama_cloud/types/page_figure_metadata.py +0 -37
- llama_cloud/types/page_screenshot_metadata.py +0 -34
- llama_cloud/types/page_segmentation_config.py +0 -31
- llama_cloud/types/paginated_extract_runs_response.py +0 -39
- llama_cloud/types/paginated_jobs_history_with_metrics.py +0 -35
- llama_cloud/types/paginated_list_cloud_documents_response.py +0 -35
- llama_cloud/types/paginated_list_pipeline_files_response.py +0 -35
- llama_cloud/types/paginated_response_agent_data.py +0 -34
- llama_cloud/types/paginated_response_aggregate_group.py +0 -34
- llama_cloud/types/paginated_response_classify_job.py +0 -34
- llama_cloud/types/paginated_response_quota_configuration.py +0 -36
- llama_cloud/types/parse_configuration.py +0 -44
- llama_cloud/types/parse_configuration_create.py +0 -41
- llama_cloud/types/parse_configuration_filter.py +0 -40
- llama_cloud/types/parse_configuration_query_response.py +0 -38
- llama_cloud/types/parse_job_config.py +0 -149
- llama_cloud/types/parse_job_config_priority.py +0 -29
- llama_cloud/types/parse_plan_level.py +0 -21
- llama_cloud/types/parser_languages.py +0 -361
- llama_cloud/types/parsing_history_item.py +0 -39
- llama_cloud/types/parsing_job.py +0 -35
- llama_cloud/types/parsing_job_json_result.py +0 -32
- llama_cloud/types/parsing_job_markdown_result.py +0 -32
- llama_cloud/types/parsing_job_structured_result.py +0 -32
- llama_cloud/types/parsing_job_text_result.py +0 -32
- llama_cloud/types/partition_names.py +0 -45
- llama_cloud/types/permission.py +0 -40
- llama_cloud/types/pg_vector_distance_method.py +0 -43
- llama_cloud/types/pg_vector_hnsw_settings.py +0 -45
- llama_cloud/types/pg_vector_vector_type.py +0 -35
- llama_cloud/types/pipeline_configuration_hashes.py +0 -37
- llama_cloud/types/pipeline_create.py +0 -65
- llama_cloud/types/pipeline_create_embedding_config.py +0 -89
- llama_cloud/types/pipeline_create_transform_config.py +0 -8
- llama_cloud/types/pipeline_data_source.py +0 -55
- llama_cloud/types/pipeline_data_source_component.py +0 -28
- llama_cloud/types/pipeline_data_source_create.py +0 -36
- llama_cloud/types/pipeline_data_source_custom_metadata_value.py +0 -7
- llama_cloud/types/pipeline_data_source_status.py +0 -33
- llama_cloud/types/pipeline_deployment.py +0 -37
- llama_cloud/types/pipeline_embedding_config.py +0 -100
- llama_cloud/types/pipeline_file.py +0 -58
- llama_cloud/types/pipeline_file_config_hash_value.py +0 -5
- llama_cloud/types/pipeline_file_create.py +0 -37
- llama_cloud/types/pipeline_file_create_custom_metadata_value.py +0 -7
- llama_cloud/types/pipeline_file_custom_metadata_value.py +0 -7
- llama_cloud/types/pipeline_file_permission_info_value.py +0 -7
- llama_cloud/types/pipeline_file_resource_info_value.py +0 -7
- llama_cloud/types/pipeline_file_status.py +0 -33
- llama_cloud/types/pipeline_file_update_dispatcher_config.py +0 -38
- llama_cloud/types/pipeline_file_updater_config.py +0 -44
- llama_cloud/types/pipeline_managed_ingestion_job_params.py +0 -37
- llama_cloud/types/pipeline_status.py +0 -17
- llama_cloud/types/pipeline_transform_config.py +0 -31
- llama_cloud/types/plan_limits.py +0 -53
- llama_cloud/types/playground_session.py +0 -51
- llama_cloud/types/pooling.py +0 -29
- llama_cloud/types/preset_composite_retrieval_params.py +0 -37
- llama_cloud/types/preset_retrieval_params_search_filters_inference_schema_value.py +0 -7
- llama_cloud/types/project_create.py +0 -35
- llama_cloud/types/prompt_conf.py +0 -38
- llama_cloud/types/public_model_name.py +0 -97
- llama_cloud/types/quota_configuration.py +0 -53
- llama_cloud/types/quota_configuration_configuration_type.py +0 -33
- llama_cloud/types/quota_configuration_status.py +0 -21
- llama_cloud/types/quota_rate_limit_configuration_value.py +0 -38
- llama_cloud/types/quota_rate_limit_configuration_value_denominator_units.py +0 -29
- llama_cloud/types/re_rank_config.py +0 -35
- llama_cloud/types/re_ranker_type.py +0 -41
- llama_cloud/types/recurring_credit_grant.py +0 -44
- llama_cloud/types/related_node_info.py +0 -36
- llama_cloud/types/related_node_info_node_type.py +0 -7
- llama_cloud/types/retrieve_results.py +0 -56
- llama_cloud/types/retriever_create.py +0 -37
- llama_cloud/types/role.py +0 -40
- llama_cloud/types/schema_generation_availability.py +0 -33
- llama_cloud/types/schema_generation_availability_status.py +0 -17
- llama_cloud/types/schema_relax_mode.py +0 -25
- llama_cloud/types/semantic_chunking_config.py +0 -32
- llama_cloud/types/sentence_chunking_config.py +0 -34
- llama_cloud/types/sparse_model_type.py +0 -33
- llama_cloud/types/struct_mode.py +0 -33
- llama_cloud/types/struct_parse_conf.py +0 -63
- llama_cloud/types/supported_llm_model.py +0 -40
- llama_cloud/types/supported_llm_model_names.py +0 -69
- llama_cloud/types/text_node.py +0 -67
- llama_cloud/types/text_node_relationships_value.py +0 -7
- llama_cloud/types/text_node_with_score.py +0 -39
- llama_cloud/types/token_chunking_config.py +0 -33
- llama_cloud/types/update_user_response.py +0 -33
- llama_cloud/types/usage_and_plan.py +0 -34
- llama_cloud/types/usage_metric_response.py +0 -34
- llama_cloud/types/usage_response.py +0 -43
- llama_cloud/types/usage_response_active_alerts_item.py +0 -37
- llama_cloud/types/user_job_record.py +0 -32
- llama_cloud/types/user_organization.py +0 -47
- llama_cloud/types/user_organization_create.py +0 -38
- llama_cloud/types/user_organization_delete.py +0 -37
- llama_cloud/types/user_organization_role.py +0 -42
- llama_cloud/types/user_summary.py +0 -38
- llama_cloud/types/validation_error.py +0 -34
- llama_cloud/types/validation_error_loc_item.py +0 -5
- llama_cloud/types/vertex_embedding_mode.py +0 -38
- llama_cloud/types/webhook_configuration.py +0 -39
- llama_cloud/types/webhook_configuration_webhook_events_item.py +0 -57
- llama_cloud-0.1.41.dist-info/LICENSE +0 -21
- llama_cloud-0.1.41.dist-info/METADATA +0 -106
- llama_cloud-0.1.41.dist-info/RECORD +0 -385
|
@@ -1,2664 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
import typing
|
|
4
|
-
import urllib.parse
|
|
5
|
-
from json.decoder import JSONDecodeError
|
|
6
|
-
|
|
7
|
-
import typing_extensions
|
|
8
|
-
|
|
9
|
-
from ...core.api_error import ApiError
|
|
10
|
-
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
11
|
-
from ...core.jsonable_encoder import jsonable_encoder
|
|
12
|
-
from ...core.remove_none_from_dict import remove_none_from_dict
|
|
13
|
-
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
14
|
-
from ...types.agent_data import AgentData
|
|
15
|
-
from ...types.batch import Batch
|
|
16
|
-
from ...types.batch_paginated_list import BatchPaginatedList
|
|
17
|
-
from ...types.batch_public_output import BatchPublicOutput
|
|
18
|
-
from ...types.file import File
|
|
19
|
-
from ...types.file_create import FileCreate
|
|
20
|
-
from ...types.file_filter import FileFilter
|
|
21
|
-
from ...types.file_query_response import FileQueryResponse
|
|
22
|
-
from ...types.filter_operation import FilterOperation
|
|
23
|
-
from ...types.http_validation_error import HttpValidationError
|
|
24
|
-
from ...types.llama_parse_parameters import LlamaParseParameters
|
|
25
|
-
from ...types.paginated_response_agent_data import PaginatedResponseAgentData
|
|
26
|
-
from ...types.paginated_response_aggregate_group import PaginatedResponseAggregateGroup
|
|
27
|
-
from ...types.paginated_response_quota_configuration import PaginatedResponseQuotaConfiguration
|
|
28
|
-
from ...types.parse_configuration import ParseConfiguration
|
|
29
|
-
from ...types.parse_configuration_create import ParseConfigurationCreate
|
|
30
|
-
from ...types.parse_configuration_filter import ParseConfigurationFilter
|
|
31
|
-
from ...types.parse_configuration_query_response import ParseConfigurationQueryResponse
|
|
32
|
-
|
|
33
|
-
try:
|
|
34
|
-
import pydantic
|
|
35
|
-
if pydantic.__version__.startswith("1."):
|
|
36
|
-
raise ImportError
|
|
37
|
-
import pydantic.v1 as pydantic # type: ignore
|
|
38
|
-
except ImportError:
|
|
39
|
-
import pydantic # type: ignore
|
|
40
|
-
|
|
41
|
-
# this is used as the default value for optional parameters
|
|
42
|
-
OMIT = typing.cast(typing.Any, ...)
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
class BetaClient:
|
|
46
|
-
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
47
|
-
self._client_wrapper = client_wrapper
|
|
48
|
-
|
|
49
|
-
def list_batches(
|
|
50
|
-
self,
|
|
51
|
-
*,
|
|
52
|
-
limit: typing.Optional[int] = None,
|
|
53
|
-
offset: typing.Optional[int] = None,
|
|
54
|
-
project_id: typing.Optional[str] = None,
|
|
55
|
-
organization_id: typing.Optional[str] = None,
|
|
56
|
-
) -> BatchPaginatedList:
|
|
57
|
-
"""
|
|
58
|
-
Parameters:
|
|
59
|
-
- limit: typing.Optional[int].
|
|
60
|
-
|
|
61
|
-
- offset: typing.Optional[int].
|
|
62
|
-
|
|
63
|
-
- project_id: typing.Optional[str].
|
|
64
|
-
|
|
65
|
-
- organization_id: typing.Optional[str].
|
|
66
|
-
---
|
|
67
|
-
from llama_cloud.client import LlamaCloud
|
|
68
|
-
|
|
69
|
-
client = LlamaCloud(
|
|
70
|
-
token="YOUR_TOKEN",
|
|
71
|
-
)
|
|
72
|
-
client.beta.list_batches()
|
|
73
|
-
"""
|
|
74
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
75
|
-
"GET",
|
|
76
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/batches"),
|
|
77
|
-
params=remove_none_from_dict(
|
|
78
|
-
{"limit": limit, "offset": offset, "project_id": project_id, "organization_id": organization_id}
|
|
79
|
-
),
|
|
80
|
-
headers=self._client_wrapper.get_headers(),
|
|
81
|
-
timeout=60,
|
|
82
|
-
)
|
|
83
|
-
if 200 <= _response.status_code < 300:
|
|
84
|
-
return pydantic.parse_obj_as(BatchPaginatedList, _response.json()) # type: ignore
|
|
85
|
-
if _response.status_code == 422:
|
|
86
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
87
|
-
try:
|
|
88
|
-
_response_json = _response.json()
|
|
89
|
-
except JSONDecodeError:
|
|
90
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
91
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
92
|
-
|
|
93
|
-
def create_batch(
|
|
94
|
-
self,
|
|
95
|
-
*,
|
|
96
|
-
organization_id: typing.Optional[str] = None,
|
|
97
|
-
project_id: typing.Optional[str] = None,
|
|
98
|
-
tool: str,
|
|
99
|
-
tool_data: typing.Optional[LlamaParseParameters] = OMIT,
|
|
100
|
-
input_type: str,
|
|
101
|
-
input_id: str,
|
|
102
|
-
output_type: typing.Optional[str] = OMIT,
|
|
103
|
-
output_id: typing.Optional[str] = OMIT,
|
|
104
|
-
batch_create_project_id: str,
|
|
105
|
-
external_id: str,
|
|
106
|
-
completion_window: typing.Optional[int] = OMIT,
|
|
107
|
-
) -> Batch:
|
|
108
|
-
"""
|
|
109
|
-
Parameters:
|
|
110
|
-
- organization_id: typing.Optional[str].
|
|
111
|
-
|
|
112
|
-
- project_id: typing.Optional[str].
|
|
113
|
-
|
|
114
|
-
- tool: str. The tool to be used for all requests in the batch.
|
|
115
|
-
|
|
116
|
-
- tool_data: typing.Optional[LlamaParseParameters].
|
|
117
|
-
|
|
118
|
-
- input_type: str. The type of input file. Currently only 'datasource' is supported.
|
|
119
|
-
|
|
120
|
-
- input_id: str. The ID of the input file for the batch.
|
|
121
|
-
|
|
122
|
-
- output_type: typing.Optional[str].
|
|
123
|
-
|
|
124
|
-
- output_id: typing.Optional[str].
|
|
125
|
-
|
|
126
|
-
- batch_create_project_id: str. The ID of the project to which the batch belongs
|
|
127
|
-
|
|
128
|
-
- external_id: str. A developer-provided ID for the batch. This ID will be returned in the response.
|
|
129
|
-
|
|
130
|
-
- completion_window: typing.Optional[int]. The time frame within which the batch should be processed. Currently only 24h is supported.
|
|
131
|
-
---
|
|
132
|
-
from llama_cloud import (
|
|
133
|
-
FailPageMode,
|
|
134
|
-
LlamaParseParameters,
|
|
135
|
-
LlamaParseParametersPriority,
|
|
136
|
-
ParsingMode,
|
|
137
|
-
)
|
|
138
|
-
from llama_cloud.client import LlamaCloud
|
|
139
|
-
|
|
140
|
-
client = LlamaCloud(
|
|
141
|
-
token="YOUR_TOKEN",
|
|
142
|
-
)
|
|
143
|
-
client.beta.create_batch(
|
|
144
|
-
tool="string",
|
|
145
|
-
tool_data=LlamaParseParameters(
|
|
146
|
-
priority=LlamaParseParametersPriority.LOW,
|
|
147
|
-
parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
|
|
148
|
-
replace_failed_page_mode=FailPageMode.RAW_TEXT,
|
|
149
|
-
),
|
|
150
|
-
input_type="string",
|
|
151
|
-
input_id="string",
|
|
152
|
-
batch_create_project_id="string",
|
|
153
|
-
external_id="string",
|
|
154
|
-
)
|
|
155
|
-
"""
|
|
156
|
-
_request: typing.Dict[str, typing.Any] = {
|
|
157
|
-
"tool": tool,
|
|
158
|
-
"input_type": input_type,
|
|
159
|
-
"input_id": input_id,
|
|
160
|
-
"project_id": batch_create_project_id,
|
|
161
|
-
"external_id": external_id,
|
|
162
|
-
}
|
|
163
|
-
if tool_data is not OMIT:
|
|
164
|
-
_request["tool_data"] = tool_data
|
|
165
|
-
if output_type is not OMIT:
|
|
166
|
-
_request["output_type"] = output_type
|
|
167
|
-
if output_id is not OMIT:
|
|
168
|
-
_request["output_id"] = output_id
|
|
169
|
-
if completion_window is not OMIT:
|
|
170
|
-
_request["completion_window"] = completion_window
|
|
171
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
172
|
-
"POST",
|
|
173
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/batches"),
|
|
174
|
-
params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
|
|
175
|
-
json=jsonable_encoder(_request),
|
|
176
|
-
headers=self._client_wrapper.get_headers(),
|
|
177
|
-
timeout=60,
|
|
178
|
-
)
|
|
179
|
-
if 200 <= _response.status_code < 300:
|
|
180
|
-
return pydantic.parse_obj_as(Batch, _response.json()) # type: ignore
|
|
181
|
-
if _response.status_code == 422:
|
|
182
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
183
|
-
try:
|
|
184
|
-
_response_json = _response.json()
|
|
185
|
-
except JSONDecodeError:
|
|
186
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
187
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
188
|
-
|
|
189
|
-
def get_batch(self, batch_id: str, *, organization_id: typing.Optional[str] = None) -> BatchPublicOutput:
|
|
190
|
-
"""
|
|
191
|
-
Parameters:
|
|
192
|
-
- batch_id: str.
|
|
193
|
-
|
|
194
|
-
- organization_id: typing.Optional[str].
|
|
195
|
-
---
|
|
196
|
-
from llama_cloud.client import LlamaCloud
|
|
197
|
-
|
|
198
|
-
client = LlamaCloud(
|
|
199
|
-
token="YOUR_TOKEN",
|
|
200
|
-
)
|
|
201
|
-
client.beta.get_batch(
|
|
202
|
-
batch_id="string",
|
|
203
|
-
)
|
|
204
|
-
"""
|
|
205
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
206
|
-
"GET",
|
|
207
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/batches/{batch_id}"),
|
|
208
|
-
params=remove_none_from_dict({"organization_id": organization_id}),
|
|
209
|
-
headers=self._client_wrapper.get_headers(),
|
|
210
|
-
timeout=60,
|
|
211
|
-
)
|
|
212
|
-
if 200 <= _response.status_code < 300:
|
|
213
|
-
return pydantic.parse_obj_as(BatchPublicOutput, _response.json()) # type: ignore
|
|
214
|
-
if _response.status_code == 422:
|
|
215
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
216
|
-
try:
|
|
217
|
-
_response_json = _response.json()
|
|
218
|
-
except JSONDecodeError:
|
|
219
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
220
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
221
|
-
|
|
222
|
-
def get_agent_data(
|
|
223
|
-
self, item_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
224
|
-
) -> AgentData:
|
|
225
|
-
"""
|
|
226
|
-
Get agent data by ID.
|
|
227
|
-
|
|
228
|
-
Parameters:
|
|
229
|
-
- item_id: str.
|
|
230
|
-
|
|
231
|
-
- project_id: typing.Optional[str].
|
|
232
|
-
|
|
233
|
-
- organization_id: typing.Optional[str].
|
|
234
|
-
---
|
|
235
|
-
from llama_cloud.client import LlamaCloud
|
|
236
|
-
|
|
237
|
-
client = LlamaCloud(
|
|
238
|
-
token="YOUR_TOKEN",
|
|
239
|
-
)
|
|
240
|
-
client.beta.get_agent_data(
|
|
241
|
-
item_id="string",
|
|
242
|
-
)
|
|
243
|
-
"""
|
|
244
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
245
|
-
"GET",
|
|
246
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/agent-data/{item_id}"),
|
|
247
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
248
|
-
headers=self._client_wrapper.get_headers(),
|
|
249
|
-
timeout=60,
|
|
250
|
-
)
|
|
251
|
-
if 200 <= _response.status_code < 300:
|
|
252
|
-
return pydantic.parse_obj_as(AgentData, _response.json()) # type: ignore
|
|
253
|
-
if _response.status_code == 422:
|
|
254
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
255
|
-
try:
|
|
256
|
-
_response_json = _response.json()
|
|
257
|
-
except JSONDecodeError:
|
|
258
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
259
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
260
|
-
|
|
261
|
-
def update_agent_data(
|
|
262
|
-
self,
|
|
263
|
-
item_id: str,
|
|
264
|
-
*,
|
|
265
|
-
project_id: typing.Optional[str] = None,
|
|
266
|
-
organization_id: typing.Optional[str] = None,
|
|
267
|
-
data: typing.Dict[str, typing.Any],
|
|
268
|
-
) -> AgentData:
|
|
269
|
-
"""
|
|
270
|
-
Update agent data by ID (overwrites).
|
|
271
|
-
|
|
272
|
-
Parameters:
|
|
273
|
-
- item_id: str.
|
|
274
|
-
|
|
275
|
-
- project_id: typing.Optional[str].
|
|
276
|
-
|
|
277
|
-
- organization_id: typing.Optional[str].
|
|
278
|
-
|
|
279
|
-
- data: typing.Dict[str, typing.Any].
|
|
280
|
-
---
|
|
281
|
-
from llama_cloud.client import LlamaCloud
|
|
282
|
-
|
|
283
|
-
client = LlamaCloud(
|
|
284
|
-
token="YOUR_TOKEN",
|
|
285
|
-
)
|
|
286
|
-
client.beta.update_agent_data(
|
|
287
|
-
item_id="string",
|
|
288
|
-
data={"string": {}},
|
|
289
|
-
)
|
|
290
|
-
"""
|
|
291
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
292
|
-
"PUT",
|
|
293
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/agent-data/{item_id}"),
|
|
294
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
295
|
-
json=jsonable_encoder({"data": data}),
|
|
296
|
-
headers=self._client_wrapper.get_headers(),
|
|
297
|
-
timeout=60,
|
|
298
|
-
)
|
|
299
|
-
if 200 <= _response.status_code < 300:
|
|
300
|
-
return pydantic.parse_obj_as(AgentData, _response.json()) # type: ignore
|
|
301
|
-
if _response.status_code == 422:
|
|
302
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
303
|
-
try:
|
|
304
|
-
_response_json = _response.json()
|
|
305
|
-
except JSONDecodeError:
|
|
306
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
307
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
308
|
-
|
|
309
|
-
def delete_agent_data(
|
|
310
|
-
self, item_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
311
|
-
) -> typing.Dict[str, str]:
|
|
312
|
-
"""
|
|
313
|
-
Delete agent data by ID.
|
|
314
|
-
|
|
315
|
-
Parameters:
|
|
316
|
-
- item_id: str.
|
|
317
|
-
|
|
318
|
-
- project_id: typing.Optional[str].
|
|
319
|
-
|
|
320
|
-
- organization_id: typing.Optional[str].
|
|
321
|
-
---
|
|
322
|
-
from llama_cloud.client import LlamaCloud
|
|
323
|
-
|
|
324
|
-
client = LlamaCloud(
|
|
325
|
-
token="YOUR_TOKEN",
|
|
326
|
-
)
|
|
327
|
-
client.beta.delete_agent_data(
|
|
328
|
-
item_id="string",
|
|
329
|
-
)
|
|
330
|
-
"""
|
|
331
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
332
|
-
"DELETE",
|
|
333
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/agent-data/{item_id}"),
|
|
334
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
335
|
-
headers=self._client_wrapper.get_headers(),
|
|
336
|
-
timeout=60,
|
|
337
|
-
)
|
|
338
|
-
if 200 <= _response.status_code < 300:
|
|
339
|
-
return pydantic.parse_obj_as(typing.Dict[str, str], _response.json()) # type: ignore
|
|
340
|
-
if _response.status_code == 422:
|
|
341
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
342
|
-
try:
|
|
343
|
-
_response_json = _response.json()
|
|
344
|
-
except JSONDecodeError:
|
|
345
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
346
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
347
|
-
|
|
348
|
-
def create_agent_data(
|
|
349
|
-
self,
|
|
350
|
-
*,
|
|
351
|
-
project_id: typing.Optional[str] = None,
|
|
352
|
-
organization_id: typing.Optional[str] = None,
|
|
353
|
-
agent_slug: str,
|
|
354
|
-
collection: typing.Optional[str] = OMIT,
|
|
355
|
-
data: typing.Dict[str, typing.Any],
|
|
356
|
-
) -> AgentData:
|
|
357
|
-
"""
|
|
358
|
-
Create new agent data.
|
|
359
|
-
|
|
360
|
-
Parameters:
|
|
361
|
-
- project_id: typing.Optional[str].
|
|
362
|
-
|
|
363
|
-
- organization_id: typing.Optional[str].
|
|
364
|
-
|
|
365
|
-
- agent_slug: str.
|
|
366
|
-
|
|
367
|
-
- collection: typing.Optional[str].
|
|
368
|
-
|
|
369
|
-
- data: typing.Dict[str, typing.Any].
|
|
370
|
-
---
|
|
371
|
-
from llama_cloud.client import LlamaCloud
|
|
372
|
-
|
|
373
|
-
client = LlamaCloud(
|
|
374
|
-
token="YOUR_TOKEN",
|
|
375
|
-
)
|
|
376
|
-
client.beta.create_agent_data(
|
|
377
|
-
agent_slug="string",
|
|
378
|
-
data={"string": {}},
|
|
379
|
-
)
|
|
380
|
-
"""
|
|
381
|
-
_request: typing.Dict[str, typing.Any] = {"agent_slug": agent_slug, "data": data}
|
|
382
|
-
if collection is not OMIT:
|
|
383
|
-
_request["collection"] = collection
|
|
384
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
385
|
-
"POST",
|
|
386
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/agent-data"),
|
|
387
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
388
|
-
json=jsonable_encoder(_request),
|
|
389
|
-
headers=self._client_wrapper.get_headers(),
|
|
390
|
-
timeout=60,
|
|
391
|
-
)
|
|
392
|
-
if 200 <= _response.status_code < 300:
|
|
393
|
-
return pydantic.parse_obj_as(AgentData, _response.json()) # type: ignore
|
|
394
|
-
if _response.status_code == 422:
|
|
395
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
396
|
-
try:
|
|
397
|
-
_response_json = _response.json()
|
|
398
|
-
except JSONDecodeError:
|
|
399
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
400
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
401
|
-
|
|
402
|
-
def search_agent_data_api_v_1_beta_agent_data_search_post(
|
|
403
|
-
self,
|
|
404
|
-
*,
|
|
405
|
-
project_id: typing.Optional[str] = None,
|
|
406
|
-
organization_id: typing.Optional[str] = None,
|
|
407
|
-
page_size: typing.Optional[int] = OMIT,
|
|
408
|
-
page_token: typing.Optional[str] = OMIT,
|
|
409
|
-
filter: typing.Optional[typing.Dict[str, typing.Optional[FilterOperation]]] = OMIT,
|
|
410
|
-
order_by: typing.Optional[str] = OMIT,
|
|
411
|
-
agent_slug: str,
|
|
412
|
-
collection: typing.Optional[str] = OMIT,
|
|
413
|
-
include_total: typing.Optional[bool] = OMIT,
|
|
414
|
-
offset: typing.Optional[int] = OMIT,
|
|
415
|
-
) -> PaginatedResponseAgentData:
|
|
416
|
-
"""
|
|
417
|
-
Search agent data with filtering, sorting, and pagination.
|
|
418
|
-
|
|
419
|
-
Parameters:
|
|
420
|
-
- project_id: typing.Optional[str].
|
|
421
|
-
|
|
422
|
-
- organization_id: typing.Optional[str].
|
|
423
|
-
|
|
424
|
-
- page_size: typing.Optional[int].
|
|
425
|
-
|
|
426
|
-
- page_token: typing.Optional[str].
|
|
427
|
-
|
|
428
|
-
- filter: typing.Optional[typing.Dict[str, typing.Optional[FilterOperation]]].
|
|
429
|
-
|
|
430
|
-
- order_by: typing.Optional[str].
|
|
431
|
-
|
|
432
|
-
- agent_slug: str. The agent deployment's agent_slug to search within
|
|
433
|
-
|
|
434
|
-
- collection: typing.Optional[str]. The logical agent data collection to search within
|
|
435
|
-
|
|
436
|
-
- include_total: typing.Optional[bool]. Whether to include the total number of items in the response
|
|
437
|
-
|
|
438
|
-
- offset: typing.Optional[int].
|
|
439
|
-
---
|
|
440
|
-
from llama_cloud.client import LlamaCloud
|
|
441
|
-
|
|
442
|
-
client = LlamaCloud(
|
|
443
|
-
token="YOUR_TOKEN",
|
|
444
|
-
)
|
|
445
|
-
client.beta.search_agent_data_api_v_1_beta_agent_data_search_post(
|
|
446
|
-
agent_slug="string",
|
|
447
|
-
)
|
|
448
|
-
"""
|
|
449
|
-
_request: typing.Dict[str, typing.Any] = {"agent_slug": agent_slug}
|
|
450
|
-
if page_size is not OMIT:
|
|
451
|
-
_request["page_size"] = page_size
|
|
452
|
-
if page_token is not OMIT:
|
|
453
|
-
_request["page_token"] = page_token
|
|
454
|
-
if filter is not OMIT:
|
|
455
|
-
_request["filter"] = filter
|
|
456
|
-
if order_by is not OMIT:
|
|
457
|
-
_request["order_by"] = order_by
|
|
458
|
-
if collection is not OMIT:
|
|
459
|
-
_request["collection"] = collection
|
|
460
|
-
if include_total is not OMIT:
|
|
461
|
-
_request["include_total"] = include_total
|
|
462
|
-
if offset is not OMIT:
|
|
463
|
-
_request["offset"] = offset
|
|
464
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
465
|
-
"POST",
|
|
466
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/agent-data/:search"),
|
|
467
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
468
|
-
json=jsonable_encoder(_request),
|
|
469
|
-
headers=self._client_wrapper.get_headers(),
|
|
470
|
-
timeout=60,
|
|
471
|
-
)
|
|
472
|
-
if 200 <= _response.status_code < 300:
|
|
473
|
-
return pydantic.parse_obj_as(PaginatedResponseAgentData, _response.json()) # type: ignore
|
|
474
|
-
if _response.status_code == 422:
|
|
475
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
476
|
-
try:
|
|
477
|
-
_response_json = _response.json()
|
|
478
|
-
except JSONDecodeError:
|
|
479
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
480
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
481
|
-
|
|
482
|
-
def aggregate_agent_data_api_v_1_beta_agent_data_aggregate_post(
|
|
483
|
-
self,
|
|
484
|
-
*,
|
|
485
|
-
project_id: typing.Optional[str] = None,
|
|
486
|
-
organization_id: typing.Optional[str] = None,
|
|
487
|
-
page_size: typing.Optional[int] = OMIT,
|
|
488
|
-
page_token: typing.Optional[str] = OMIT,
|
|
489
|
-
filter: typing.Optional[typing.Dict[str, typing.Optional[FilterOperation]]] = OMIT,
|
|
490
|
-
order_by: typing.Optional[str] = OMIT,
|
|
491
|
-
agent_slug: str,
|
|
492
|
-
collection: typing.Optional[str] = OMIT,
|
|
493
|
-
group_by: typing.Optional[typing.List[str]] = OMIT,
|
|
494
|
-
count: typing.Optional[bool] = OMIT,
|
|
495
|
-
first: typing.Optional[bool] = OMIT,
|
|
496
|
-
offset: typing.Optional[int] = OMIT,
|
|
497
|
-
) -> PaginatedResponseAggregateGroup:
|
|
498
|
-
"""
|
|
499
|
-
Aggregate agent data with grouping and optional counting/first item retrieval.
|
|
500
|
-
|
|
501
|
-
Parameters:
|
|
502
|
-
- project_id: typing.Optional[str].
|
|
503
|
-
|
|
504
|
-
- organization_id: typing.Optional[str].
|
|
505
|
-
|
|
506
|
-
- page_size: typing.Optional[int].
|
|
507
|
-
|
|
508
|
-
- page_token: typing.Optional[str].
|
|
509
|
-
|
|
510
|
-
- filter: typing.Optional[typing.Dict[str, typing.Optional[FilterOperation]]].
|
|
511
|
-
|
|
512
|
-
- order_by: typing.Optional[str].
|
|
513
|
-
|
|
514
|
-
- agent_slug: str. The agent deployment's agent_slug to aggregate data for
|
|
515
|
-
|
|
516
|
-
- collection: typing.Optional[str]. The logical agent data collection to aggregate data for
|
|
517
|
-
|
|
518
|
-
- group_by: typing.Optional[typing.List[str]].
|
|
519
|
-
|
|
520
|
-
- count: typing.Optional[bool].
|
|
521
|
-
|
|
522
|
-
- first: typing.Optional[bool].
|
|
523
|
-
|
|
524
|
-
- offset: typing.Optional[int].
|
|
525
|
-
---
|
|
526
|
-
from llama_cloud.client import LlamaCloud
|
|
527
|
-
|
|
528
|
-
client = LlamaCloud(
|
|
529
|
-
token="YOUR_TOKEN",
|
|
530
|
-
)
|
|
531
|
-
client.beta.aggregate_agent_data_api_v_1_beta_agent_data_aggregate_post(
|
|
532
|
-
agent_slug="string",
|
|
533
|
-
)
|
|
534
|
-
"""
|
|
535
|
-
_request: typing.Dict[str, typing.Any] = {"agent_slug": agent_slug}
|
|
536
|
-
if page_size is not OMIT:
|
|
537
|
-
_request["page_size"] = page_size
|
|
538
|
-
if page_token is not OMIT:
|
|
539
|
-
_request["page_token"] = page_token
|
|
540
|
-
if filter is not OMIT:
|
|
541
|
-
_request["filter"] = filter
|
|
542
|
-
if order_by is not OMIT:
|
|
543
|
-
_request["order_by"] = order_by
|
|
544
|
-
if collection is not OMIT:
|
|
545
|
-
_request["collection"] = collection
|
|
546
|
-
if group_by is not OMIT:
|
|
547
|
-
_request["group_by"] = group_by
|
|
548
|
-
if count is not OMIT:
|
|
549
|
-
_request["count"] = count
|
|
550
|
-
if first is not OMIT:
|
|
551
|
-
_request["first"] = first
|
|
552
|
-
if offset is not OMIT:
|
|
553
|
-
_request["offset"] = offset
|
|
554
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
555
|
-
"POST",
|
|
556
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/agent-data/:aggregate"),
|
|
557
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
558
|
-
json=jsonable_encoder(_request),
|
|
559
|
-
headers=self._client_wrapper.get_headers(),
|
|
560
|
-
timeout=60,
|
|
561
|
-
)
|
|
562
|
-
if 200 <= _response.status_code < 300:
|
|
563
|
-
return pydantic.parse_obj_as(PaginatedResponseAggregateGroup, _response.json()) # type: ignore
|
|
564
|
-
if _response.status_code == 422:
|
|
565
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
566
|
-
try:
|
|
567
|
-
_response_json = _response.json()
|
|
568
|
-
except JSONDecodeError:
|
|
569
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
570
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
571
|
-
|
|
572
|
-
def list_quota_configurations(
|
|
573
|
-
self,
|
|
574
|
-
*,
|
|
575
|
-
source_type: typing_extensions.Literal["organization"],
|
|
576
|
-
source_id: str,
|
|
577
|
-
page: typing.Optional[int] = None,
|
|
578
|
-
page_size: typing.Optional[int] = None,
|
|
579
|
-
) -> PaginatedResponseQuotaConfiguration:
|
|
580
|
-
"""
|
|
581
|
-
Retrieve a paginated list of quota configurations with optional filtering.
|
|
582
|
-
|
|
583
|
-
Parameters:
|
|
584
|
-
- source_type: typing_extensions.Literal["organization"].
|
|
585
|
-
|
|
586
|
-
- source_id: str.
|
|
587
|
-
|
|
588
|
-
- page: typing.Optional[int].
|
|
589
|
-
|
|
590
|
-
- page_size: typing.Optional[int].
|
|
591
|
-
---
|
|
592
|
-
from llama_cloud.client import LlamaCloud
|
|
593
|
-
|
|
594
|
-
client = LlamaCloud(
|
|
595
|
-
token="YOUR_TOKEN",
|
|
596
|
-
)
|
|
597
|
-
client.beta.list_quota_configurations(
|
|
598
|
-
source_type="organization",
|
|
599
|
-
source_id="string",
|
|
600
|
-
)
|
|
601
|
-
"""
|
|
602
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
603
|
-
"GET",
|
|
604
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/quota-management"),
|
|
605
|
-
params=remove_none_from_dict(
|
|
606
|
-
{"source_type": source_type, "source_id": source_id, "page": page, "page_size": page_size}
|
|
607
|
-
),
|
|
608
|
-
headers=self._client_wrapper.get_headers(),
|
|
609
|
-
timeout=60,
|
|
610
|
-
)
|
|
611
|
-
if 200 <= _response.status_code < 300:
|
|
612
|
-
return pydantic.parse_obj_as(PaginatedResponseQuotaConfiguration, _response.json()) # type: ignore
|
|
613
|
-
if _response.status_code == 422:
|
|
614
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
615
|
-
try:
|
|
616
|
-
_response_json = _response.json()
|
|
617
|
-
except JSONDecodeError:
|
|
618
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
619
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
620
|
-
|
|
621
|
-
def create_file(
|
|
622
|
-
self,
|
|
623
|
-
*,
|
|
624
|
-
project_id: typing.Optional[str] = None,
|
|
625
|
-
organization_id: typing.Optional[str] = None,
|
|
626
|
-
request: FileCreate,
|
|
627
|
-
) -> File:
|
|
628
|
-
"""
|
|
629
|
-
Create a new file in the project.
|
|
630
|
-
|
|
631
|
-
Args:
|
|
632
|
-
file_create: File creation data
|
|
633
|
-
project: Validated project from dependency
|
|
634
|
-
db: Database session
|
|
635
|
-
|
|
636
|
-
Returns:
|
|
637
|
-
The created file
|
|
638
|
-
|
|
639
|
-
Parameters:
|
|
640
|
-
- project_id: typing.Optional[str].
|
|
641
|
-
|
|
642
|
-
- organization_id: typing.Optional[str].
|
|
643
|
-
|
|
644
|
-
- request: FileCreate.
|
|
645
|
-
---
|
|
646
|
-
from llama_cloud import FileCreate
|
|
647
|
-
from llama_cloud.client import LlamaCloud
|
|
648
|
-
|
|
649
|
-
client = LlamaCloud(
|
|
650
|
-
token="YOUR_TOKEN",
|
|
651
|
-
)
|
|
652
|
-
client.beta.create_file(
|
|
653
|
-
request=FileCreate(
|
|
654
|
-
name="string",
|
|
655
|
-
),
|
|
656
|
-
)
|
|
657
|
-
"""
|
|
658
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
659
|
-
"POST",
|
|
660
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/files"),
|
|
661
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
662
|
-
json=jsonable_encoder(request),
|
|
663
|
-
headers=self._client_wrapper.get_headers(),
|
|
664
|
-
timeout=60,
|
|
665
|
-
)
|
|
666
|
-
if 200 <= _response.status_code < 300:
|
|
667
|
-
return pydantic.parse_obj_as(File, _response.json()) # type: ignore
|
|
668
|
-
if _response.status_code == 422:
|
|
669
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
670
|
-
try:
|
|
671
|
-
_response_json = _response.json()
|
|
672
|
-
except JSONDecodeError:
|
|
673
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
674
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
675
|
-
|
|
676
|
-
def upsert_file(
|
|
677
|
-
self,
|
|
678
|
-
*,
|
|
679
|
-
project_id: typing.Optional[str] = None,
|
|
680
|
-
organization_id: typing.Optional[str] = None,
|
|
681
|
-
request: FileCreate,
|
|
682
|
-
) -> File:
|
|
683
|
-
"""
|
|
684
|
-
Upsert a file (create or update if exists) in the project.
|
|
685
|
-
|
|
686
|
-
Args:
|
|
687
|
-
file_create: File creation/update data
|
|
688
|
-
project: Validated project from dependency
|
|
689
|
-
db: Database session
|
|
690
|
-
|
|
691
|
-
Returns:
|
|
692
|
-
The upserted file
|
|
693
|
-
|
|
694
|
-
Parameters:
|
|
695
|
-
- project_id: typing.Optional[str].
|
|
696
|
-
|
|
697
|
-
- organization_id: typing.Optional[str].
|
|
698
|
-
|
|
699
|
-
- request: FileCreate.
|
|
700
|
-
---
|
|
701
|
-
from llama_cloud import FileCreate
|
|
702
|
-
from llama_cloud.client import LlamaCloud
|
|
703
|
-
|
|
704
|
-
client = LlamaCloud(
|
|
705
|
-
token="YOUR_TOKEN",
|
|
706
|
-
)
|
|
707
|
-
client.beta.upsert_file(
|
|
708
|
-
request=FileCreate(
|
|
709
|
-
name="string",
|
|
710
|
-
),
|
|
711
|
-
)
|
|
712
|
-
"""
|
|
713
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
714
|
-
"PUT",
|
|
715
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/files"),
|
|
716
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
717
|
-
json=jsonable_encoder(request),
|
|
718
|
-
headers=self._client_wrapper.get_headers(),
|
|
719
|
-
timeout=60,
|
|
720
|
-
)
|
|
721
|
-
if 200 <= _response.status_code < 300:
|
|
722
|
-
return pydantic.parse_obj_as(File, _response.json()) # type: ignore
|
|
723
|
-
if _response.status_code == 422:
|
|
724
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
725
|
-
try:
|
|
726
|
-
_response_json = _response.json()
|
|
727
|
-
except JSONDecodeError:
|
|
728
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
729
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
730
|
-
|
|
731
|
-
def query_files(
|
|
732
|
-
self,
|
|
733
|
-
*,
|
|
734
|
-
project_id: typing.Optional[str] = None,
|
|
735
|
-
organization_id: typing.Optional[str] = None,
|
|
736
|
-
page_size: typing.Optional[int] = OMIT,
|
|
737
|
-
page_token: typing.Optional[str] = OMIT,
|
|
738
|
-
filter: typing.Optional[FileFilter] = OMIT,
|
|
739
|
-
order_by: typing.Optional[str] = OMIT,
|
|
740
|
-
) -> FileQueryResponse:
|
|
741
|
-
"""
|
|
742
|
-
Query files with flexible filtering and pagination.
|
|
743
|
-
|
|
744
|
-
Args:
|
|
745
|
-
request: The query request with filters and pagination
|
|
746
|
-
project: Validated project from dependency
|
|
747
|
-
db: Database session
|
|
748
|
-
|
|
749
|
-
Returns:
|
|
750
|
-
Paginated response with files
|
|
751
|
-
|
|
752
|
-
Parameters:
|
|
753
|
-
- project_id: typing.Optional[str].
|
|
754
|
-
|
|
755
|
-
- organization_id: typing.Optional[str].
|
|
756
|
-
|
|
757
|
-
- page_size: typing.Optional[int].
|
|
758
|
-
|
|
759
|
-
- page_token: typing.Optional[str].
|
|
760
|
-
|
|
761
|
-
- filter: typing.Optional[FileFilter].
|
|
762
|
-
|
|
763
|
-
- order_by: typing.Optional[str].
|
|
764
|
-
---
|
|
765
|
-
from llama_cloud import FileFilter
|
|
766
|
-
from llama_cloud.client import LlamaCloud
|
|
767
|
-
|
|
768
|
-
client = LlamaCloud(
|
|
769
|
-
token="YOUR_TOKEN",
|
|
770
|
-
)
|
|
771
|
-
client.beta.query_files(
|
|
772
|
-
filter=FileFilter(),
|
|
773
|
-
)
|
|
774
|
-
"""
|
|
775
|
-
_request: typing.Dict[str, typing.Any] = {}
|
|
776
|
-
if page_size is not OMIT:
|
|
777
|
-
_request["page_size"] = page_size
|
|
778
|
-
if page_token is not OMIT:
|
|
779
|
-
_request["page_token"] = page_token
|
|
780
|
-
if filter is not OMIT:
|
|
781
|
-
_request["filter"] = filter
|
|
782
|
-
if order_by is not OMIT:
|
|
783
|
-
_request["order_by"] = order_by
|
|
784
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
785
|
-
"POST",
|
|
786
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/files/query"),
|
|
787
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
788
|
-
json=jsonable_encoder(_request),
|
|
789
|
-
headers=self._client_wrapper.get_headers(),
|
|
790
|
-
timeout=60,
|
|
791
|
-
)
|
|
792
|
-
if 200 <= _response.status_code < 300:
|
|
793
|
-
return pydantic.parse_obj_as(FileQueryResponse, _response.json()) # type: ignore
|
|
794
|
-
if _response.status_code == 422:
|
|
795
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
796
|
-
try:
|
|
797
|
-
_response_json = _response.json()
|
|
798
|
-
except JSONDecodeError:
|
|
799
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
800
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
801
|
-
|
|
802
|
-
def delete_file(
|
|
803
|
-
self, file_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
804
|
-
) -> None:
|
|
805
|
-
"""
|
|
806
|
-
Delete a single file from the project.
|
|
807
|
-
|
|
808
|
-
Args:
|
|
809
|
-
file_id: The ID of the file to delete
|
|
810
|
-
project: Validated project from dependency
|
|
811
|
-
db: Database session
|
|
812
|
-
|
|
813
|
-
Returns:
|
|
814
|
-
None (204 No Content on success)
|
|
815
|
-
|
|
816
|
-
Parameters:
|
|
817
|
-
- file_id: str.
|
|
818
|
-
|
|
819
|
-
- project_id: typing.Optional[str].
|
|
820
|
-
|
|
821
|
-
- organization_id: typing.Optional[str].
|
|
822
|
-
---
|
|
823
|
-
from llama_cloud.client import LlamaCloud
|
|
824
|
-
|
|
825
|
-
client = LlamaCloud(
|
|
826
|
-
token="YOUR_TOKEN",
|
|
827
|
-
)
|
|
828
|
-
client.beta.delete_file(
|
|
829
|
-
file_id="string",
|
|
830
|
-
)
|
|
831
|
-
"""
|
|
832
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
833
|
-
"DELETE",
|
|
834
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/files/{file_id}"),
|
|
835
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
836
|
-
headers=self._client_wrapper.get_headers(),
|
|
837
|
-
timeout=60,
|
|
838
|
-
)
|
|
839
|
-
if 200 <= _response.status_code < 300:
|
|
840
|
-
return
|
|
841
|
-
if _response.status_code == 422:
|
|
842
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
843
|
-
try:
|
|
844
|
-
_response_json = _response.json()
|
|
845
|
-
except JSONDecodeError:
|
|
846
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
847
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
848
|
-
|
|
849
|
-
def list_parse_configurations(
|
|
850
|
-
self,
|
|
851
|
-
*,
|
|
852
|
-
page_size: typing.Optional[int] = None,
|
|
853
|
-
page_token: typing.Optional[str] = None,
|
|
854
|
-
name: typing.Optional[str] = None,
|
|
855
|
-
creator: typing.Optional[str] = None,
|
|
856
|
-
version: typing.Optional[str] = None,
|
|
857
|
-
project_id: typing.Optional[str] = None,
|
|
858
|
-
organization_id: typing.Optional[str] = None,
|
|
859
|
-
) -> ParseConfigurationQueryResponse:
|
|
860
|
-
"""
|
|
861
|
-
List parse configurations for the current project.
|
|
862
|
-
|
|
863
|
-
Args:
|
|
864
|
-
project: Validated project from dependency
|
|
865
|
-
user: Current user
|
|
866
|
-
db: Database session
|
|
867
|
-
page_size: Number of items per page
|
|
868
|
-
page_token: Token for pagination
|
|
869
|
-
name: Filter by configuration name
|
|
870
|
-
creator: Filter by creator
|
|
871
|
-
version: Filter by version
|
|
872
|
-
|
|
873
|
-
Returns:
|
|
874
|
-
Paginated response with parse configurations
|
|
875
|
-
|
|
876
|
-
Parameters:
|
|
877
|
-
- page_size: typing.Optional[int].
|
|
878
|
-
|
|
879
|
-
- page_token: typing.Optional[str].
|
|
880
|
-
|
|
881
|
-
- name: typing.Optional[str].
|
|
882
|
-
|
|
883
|
-
- creator: typing.Optional[str].
|
|
884
|
-
|
|
885
|
-
- version: typing.Optional[str].
|
|
886
|
-
|
|
887
|
-
- project_id: typing.Optional[str].
|
|
888
|
-
|
|
889
|
-
- organization_id: typing.Optional[str].
|
|
890
|
-
---
|
|
891
|
-
from llama_cloud.client import LlamaCloud
|
|
892
|
-
|
|
893
|
-
client = LlamaCloud(
|
|
894
|
-
token="YOUR_TOKEN",
|
|
895
|
-
)
|
|
896
|
-
client.beta.list_parse_configurations()
|
|
897
|
-
"""
|
|
898
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
899
|
-
"GET",
|
|
900
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/parse-configurations"),
|
|
901
|
-
params=remove_none_from_dict(
|
|
902
|
-
{
|
|
903
|
-
"page_size": page_size,
|
|
904
|
-
"page_token": page_token,
|
|
905
|
-
"name": name,
|
|
906
|
-
"creator": creator,
|
|
907
|
-
"version": version,
|
|
908
|
-
"project_id": project_id,
|
|
909
|
-
"organization_id": organization_id,
|
|
910
|
-
}
|
|
911
|
-
),
|
|
912
|
-
headers=self._client_wrapper.get_headers(),
|
|
913
|
-
timeout=60,
|
|
914
|
-
)
|
|
915
|
-
if 200 <= _response.status_code < 300:
|
|
916
|
-
return pydantic.parse_obj_as(ParseConfigurationQueryResponse, _response.json()) # type: ignore
|
|
917
|
-
if _response.status_code == 422:
|
|
918
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
919
|
-
try:
|
|
920
|
-
_response_json = _response.json()
|
|
921
|
-
except JSONDecodeError:
|
|
922
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
923
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
924
|
-
|
|
925
|
-
def create_parse_configuration(
|
|
926
|
-
self,
|
|
927
|
-
*,
|
|
928
|
-
project_id: typing.Optional[str] = None,
|
|
929
|
-
organization_id: typing.Optional[str] = None,
|
|
930
|
-
request: ParseConfigurationCreate,
|
|
931
|
-
) -> ParseConfiguration:
|
|
932
|
-
"""
|
|
933
|
-
Create a new parse configuration.
|
|
934
|
-
|
|
935
|
-
Args:
|
|
936
|
-
config_create: Parse configuration creation data
|
|
937
|
-
project: Validated project from dependency
|
|
938
|
-
user: Current user
|
|
939
|
-
db: Database session
|
|
940
|
-
|
|
941
|
-
Returns:
|
|
942
|
-
The created parse configuration
|
|
943
|
-
|
|
944
|
-
Parameters:
|
|
945
|
-
- project_id: typing.Optional[str].
|
|
946
|
-
|
|
947
|
-
- organization_id: typing.Optional[str].
|
|
948
|
-
|
|
949
|
-
- request: ParseConfigurationCreate.
|
|
950
|
-
---
|
|
951
|
-
from llama_cloud import (
|
|
952
|
-
FailPageMode,
|
|
953
|
-
LlamaParseParameters,
|
|
954
|
-
LlamaParseParametersPriority,
|
|
955
|
-
ParseConfigurationCreate,
|
|
956
|
-
ParsingMode,
|
|
957
|
-
)
|
|
958
|
-
from llama_cloud.client import LlamaCloud
|
|
959
|
-
|
|
960
|
-
client = LlamaCloud(
|
|
961
|
-
token="YOUR_TOKEN",
|
|
962
|
-
)
|
|
963
|
-
client.beta.create_parse_configuration(
|
|
964
|
-
request=ParseConfigurationCreate(
|
|
965
|
-
name="string",
|
|
966
|
-
version="string",
|
|
967
|
-
parameters=LlamaParseParameters(
|
|
968
|
-
priority=LlamaParseParametersPriority.LOW,
|
|
969
|
-
parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
|
|
970
|
-
replace_failed_page_mode=FailPageMode.RAW_TEXT,
|
|
971
|
-
),
|
|
972
|
-
),
|
|
973
|
-
)
|
|
974
|
-
"""
|
|
975
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
976
|
-
"POST",
|
|
977
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/parse-configurations"),
|
|
978
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
979
|
-
json=jsonable_encoder(request),
|
|
980
|
-
headers=self._client_wrapper.get_headers(),
|
|
981
|
-
timeout=60,
|
|
982
|
-
)
|
|
983
|
-
if 200 <= _response.status_code < 300:
|
|
984
|
-
return pydantic.parse_obj_as(ParseConfiguration, _response.json()) # type: ignore
|
|
985
|
-
if _response.status_code == 422:
|
|
986
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
987
|
-
try:
|
|
988
|
-
_response_json = _response.json()
|
|
989
|
-
except JSONDecodeError:
|
|
990
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
991
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
992
|
-
|
|
993
|
-
def upsert_parse_configuration(
|
|
994
|
-
self,
|
|
995
|
-
*,
|
|
996
|
-
project_id: typing.Optional[str] = None,
|
|
997
|
-
organization_id: typing.Optional[str] = None,
|
|
998
|
-
request: ParseConfigurationCreate,
|
|
999
|
-
) -> ParseConfiguration:
|
|
1000
|
-
"""
|
|
1001
|
-
Create or update a parse configuration by name.
|
|
1002
|
-
|
|
1003
|
-
Args:
|
|
1004
|
-
config_create: Parse configuration creation data
|
|
1005
|
-
project: Validated project from dependency
|
|
1006
|
-
user: Current user
|
|
1007
|
-
db: Database session
|
|
1008
|
-
|
|
1009
|
-
Returns:
|
|
1010
|
-
The created or updated parse configuration
|
|
1011
|
-
|
|
1012
|
-
Parameters:
|
|
1013
|
-
- project_id: typing.Optional[str].
|
|
1014
|
-
|
|
1015
|
-
- organization_id: typing.Optional[str].
|
|
1016
|
-
|
|
1017
|
-
- request: ParseConfigurationCreate.
|
|
1018
|
-
---
|
|
1019
|
-
from llama_cloud import (
|
|
1020
|
-
FailPageMode,
|
|
1021
|
-
LlamaParseParameters,
|
|
1022
|
-
LlamaParseParametersPriority,
|
|
1023
|
-
ParseConfigurationCreate,
|
|
1024
|
-
ParsingMode,
|
|
1025
|
-
)
|
|
1026
|
-
from llama_cloud.client import LlamaCloud
|
|
1027
|
-
|
|
1028
|
-
client = LlamaCloud(
|
|
1029
|
-
token="YOUR_TOKEN",
|
|
1030
|
-
)
|
|
1031
|
-
client.beta.upsert_parse_configuration(
|
|
1032
|
-
request=ParseConfigurationCreate(
|
|
1033
|
-
name="string",
|
|
1034
|
-
version="string",
|
|
1035
|
-
parameters=LlamaParseParameters(
|
|
1036
|
-
priority=LlamaParseParametersPriority.LOW,
|
|
1037
|
-
parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
|
|
1038
|
-
replace_failed_page_mode=FailPageMode.RAW_TEXT,
|
|
1039
|
-
),
|
|
1040
|
-
),
|
|
1041
|
-
)
|
|
1042
|
-
"""
|
|
1043
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1044
|
-
"PUT",
|
|
1045
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/parse-configurations"),
|
|
1046
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1047
|
-
json=jsonable_encoder(request),
|
|
1048
|
-
headers=self._client_wrapper.get_headers(),
|
|
1049
|
-
timeout=60,
|
|
1050
|
-
)
|
|
1051
|
-
if 200 <= _response.status_code < 300:
|
|
1052
|
-
return pydantic.parse_obj_as(ParseConfiguration, _response.json()) # type: ignore
|
|
1053
|
-
if _response.status_code == 422:
|
|
1054
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1055
|
-
try:
|
|
1056
|
-
_response_json = _response.json()
|
|
1057
|
-
except JSONDecodeError:
|
|
1058
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1059
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1060
|
-
|
|
1061
|
-
def get_parse_configuration(
|
|
1062
|
-
self, config_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
1063
|
-
) -> ParseConfiguration:
|
|
1064
|
-
"""
|
|
1065
|
-
Get a parse configuration by ID.
|
|
1066
|
-
|
|
1067
|
-
Args:
|
|
1068
|
-
config_id: The ID of the parse configuration
|
|
1069
|
-
project: Validated project from dependency
|
|
1070
|
-
user: Current user
|
|
1071
|
-
db: Database session
|
|
1072
|
-
|
|
1073
|
-
Returns:
|
|
1074
|
-
The parse configuration
|
|
1075
|
-
|
|
1076
|
-
Parameters:
|
|
1077
|
-
- config_id: str.
|
|
1078
|
-
|
|
1079
|
-
- project_id: typing.Optional[str].
|
|
1080
|
-
|
|
1081
|
-
- organization_id: typing.Optional[str].
|
|
1082
|
-
---
|
|
1083
|
-
from llama_cloud.client import LlamaCloud
|
|
1084
|
-
|
|
1085
|
-
client = LlamaCloud(
|
|
1086
|
-
token="YOUR_TOKEN",
|
|
1087
|
-
)
|
|
1088
|
-
client.beta.get_parse_configuration(
|
|
1089
|
-
config_id="string",
|
|
1090
|
-
)
|
|
1091
|
-
"""
|
|
1092
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1093
|
-
"GET",
|
|
1094
|
-
urllib.parse.urljoin(
|
|
1095
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/parse-configurations/{config_id}"
|
|
1096
|
-
),
|
|
1097
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1098
|
-
headers=self._client_wrapper.get_headers(),
|
|
1099
|
-
timeout=60,
|
|
1100
|
-
)
|
|
1101
|
-
if 200 <= _response.status_code < 300:
|
|
1102
|
-
return pydantic.parse_obj_as(ParseConfiguration, _response.json()) # type: ignore
|
|
1103
|
-
if _response.status_code == 422:
|
|
1104
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1105
|
-
try:
|
|
1106
|
-
_response_json = _response.json()
|
|
1107
|
-
except JSONDecodeError:
|
|
1108
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1109
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1110
|
-
|
|
1111
|
-
def update_parse_configuration(
|
|
1112
|
-
self,
|
|
1113
|
-
config_id: str,
|
|
1114
|
-
*,
|
|
1115
|
-
project_id: typing.Optional[str] = None,
|
|
1116
|
-
organization_id: typing.Optional[str] = None,
|
|
1117
|
-
parameters: typing.Optional[LlamaParseParameters] = OMIT,
|
|
1118
|
-
) -> ParseConfiguration:
|
|
1119
|
-
"""
|
|
1120
|
-
Update a parse configuration.
|
|
1121
|
-
|
|
1122
|
-
Args:
|
|
1123
|
-
config_id: The ID of the parse configuration to update
|
|
1124
|
-
config_update: Update data
|
|
1125
|
-
project: Validated project from dependency
|
|
1126
|
-
user: Current user
|
|
1127
|
-
db: Database session
|
|
1128
|
-
|
|
1129
|
-
Returns:
|
|
1130
|
-
The updated parse configuration
|
|
1131
|
-
|
|
1132
|
-
Parameters:
|
|
1133
|
-
- config_id: str.
|
|
1134
|
-
|
|
1135
|
-
- project_id: typing.Optional[str].
|
|
1136
|
-
|
|
1137
|
-
- organization_id: typing.Optional[str].
|
|
1138
|
-
|
|
1139
|
-
- parameters: typing.Optional[LlamaParseParameters].
|
|
1140
|
-
---
|
|
1141
|
-
from llama_cloud import (
|
|
1142
|
-
FailPageMode,
|
|
1143
|
-
LlamaParseParameters,
|
|
1144
|
-
LlamaParseParametersPriority,
|
|
1145
|
-
ParsingMode,
|
|
1146
|
-
)
|
|
1147
|
-
from llama_cloud.client import LlamaCloud
|
|
1148
|
-
|
|
1149
|
-
client = LlamaCloud(
|
|
1150
|
-
token="YOUR_TOKEN",
|
|
1151
|
-
)
|
|
1152
|
-
client.beta.update_parse_configuration(
|
|
1153
|
-
config_id="string",
|
|
1154
|
-
parameters=LlamaParseParameters(
|
|
1155
|
-
priority=LlamaParseParametersPriority.LOW,
|
|
1156
|
-
parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
|
|
1157
|
-
replace_failed_page_mode=FailPageMode.RAW_TEXT,
|
|
1158
|
-
),
|
|
1159
|
-
)
|
|
1160
|
-
"""
|
|
1161
|
-
_request: typing.Dict[str, typing.Any] = {}
|
|
1162
|
-
if parameters is not OMIT:
|
|
1163
|
-
_request["parameters"] = parameters
|
|
1164
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1165
|
-
"PUT",
|
|
1166
|
-
urllib.parse.urljoin(
|
|
1167
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/parse-configurations/{config_id}"
|
|
1168
|
-
),
|
|
1169
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1170
|
-
json=jsonable_encoder(_request),
|
|
1171
|
-
headers=self._client_wrapper.get_headers(),
|
|
1172
|
-
timeout=60,
|
|
1173
|
-
)
|
|
1174
|
-
if 200 <= _response.status_code < 300:
|
|
1175
|
-
return pydantic.parse_obj_as(ParseConfiguration, _response.json()) # type: ignore
|
|
1176
|
-
if _response.status_code == 422:
|
|
1177
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1178
|
-
try:
|
|
1179
|
-
_response_json = _response.json()
|
|
1180
|
-
except JSONDecodeError:
|
|
1181
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1182
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1183
|
-
|
|
1184
|
-
def delete_parse_configuration(
|
|
1185
|
-
self, config_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
1186
|
-
) -> None:
|
|
1187
|
-
"""
|
|
1188
|
-
Delete a parse configuration.
|
|
1189
|
-
|
|
1190
|
-
Args:
|
|
1191
|
-
config_id: The ID of the parse configuration to delete
|
|
1192
|
-
project: Validated project from dependency
|
|
1193
|
-
user: Current user
|
|
1194
|
-
db: Database session
|
|
1195
|
-
|
|
1196
|
-
Parameters:
|
|
1197
|
-
- config_id: str.
|
|
1198
|
-
|
|
1199
|
-
- project_id: typing.Optional[str].
|
|
1200
|
-
|
|
1201
|
-
- organization_id: typing.Optional[str].
|
|
1202
|
-
---
|
|
1203
|
-
from llama_cloud.client import LlamaCloud
|
|
1204
|
-
|
|
1205
|
-
client = LlamaCloud(
|
|
1206
|
-
token="YOUR_TOKEN",
|
|
1207
|
-
)
|
|
1208
|
-
client.beta.delete_parse_configuration(
|
|
1209
|
-
config_id="string",
|
|
1210
|
-
)
|
|
1211
|
-
"""
|
|
1212
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1213
|
-
"DELETE",
|
|
1214
|
-
urllib.parse.urljoin(
|
|
1215
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/parse-configurations/{config_id}"
|
|
1216
|
-
),
|
|
1217
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1218
|
-
headers=self._client_wrapper.get_headers(),
|
|
1219
|
-
timeout=60,
|
|
1220
|
-
)
|
|
1221
|
-
if 200 <= _response.status_code < 300:
|
|
1222
|
-
return
|
|
1223
|
-
if _response.status_code == 422:
|
|
1224
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1225
|
-
try:
|
|
1226
|
-
_response_json = _response.json()
|
|
1227
|
-
except JSONDecodeError:
|
|
1228
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1229
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1230
|
-
|
|
1231
|
-
def query_parse_configurations(
|
|
1232
|
-
self,
|
|
1233
|
-
*,
|
|
1234
|
-
project_id: typing.Optional[str] = None,
|
|
1235
|
-
organization_id: typing.Optional[str] = None,
|
|
1236
|
-
page_size: typing.Optional[int] = OMIT,
|
|
1237
|
-
page_token: typing.Optional[str] = OMIT,
|
|
1238
|
-
filter: typing.Optional[ParseConfigurationFilter] = OMIT,
|
|
1239
|
-
order_by: typing.Optional[str] = OMIT,
|
|
1240
|
-
) -> ParseConfigurationQueryResponse:
|
|
1241
|
-
"""
|
|
1242
|
-
Query parse configurations with filtering and pagination.
|
|
1243
|
-
|
|
1244
|
-
Args:
|
|
1245
|
-
query_request: Query request with filters and pagination
|
|
1246
|
-
project: Validated project from dependency
|
|
1247
|
-
user: Current user
|
|
1248
|
-
db: Database session
|
|
1249
|
-
|
|
1250
|
-
Returns:
|
|
1251
|
-
Paginated response with parse configurations
|
|
1252
|
-
|
|
1253
|
-
Parameters:
|
|
1254
|
-
- project_id: typing.Optional[str].
|
|
1255
|
-
|
|
1256
|
-
- organization_id: typing.Optional[str].
|
|
1257
|
-
|
|
1258
|
-
- page_size: typing.Optional[int].
|
|
1259
|
-
|
|
1260
|
-
- page_token: typing.Optional[str].
|
|
1261
|
-
|
|
1262
|
-
- filter: typing.Optional[ParseConfigurationFilter].
|
|
1263
|
-
|
|
1264
|
-
- order_by: typing.Optional[str].
|
|
1265
|
-
---
|
|
1266
|
-
from llama_cloud import ParseConfigurationFilter
|
|
1267
|
-
from llama_cloud.client import LlamaCloud
|
|
1268
|
-
|
|
1269
|
-
client = LlamaCloud(
|
|
1270
|
-
token="YOUR_TOKEN",
|
|
1271
|
-
)
|
|
1272
|
-
client.beta.query_parse_configurations(
|
|
1273
|
-
filter=ParseConfigurationFilter(),
|
|
1274
|
-
)
|
|
1275
|
-
"""
|
|
1276
|
-
_request: typing.Dict[str, typing.Any] = {}
|
|
1277
|
-
if page_size is not OMIT:
|
|
1278
|
-
_request["page_size"] = page_size
|
|
1279
|
-
if page_token is not OMIT:
|
|
1280
|
-
_request["page_token"] = page_token
|
|
1281
|
-
if filter is not OMIT:
|
|
1282
|
-
_request["filter"] = filter
|
|
1283
|
-
if order_by is not OMIT:
|
|
1284
|
-
_request["order_by"] = order_by
|
|
1285
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1286
|
-
"POST",
|
|
1287
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/parse-configurations/query"),
|
|
1288
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1289
|
-
json=jsonable_encoder(_request),
|
|
1290
|
-
headers=self._client_wrapper.get_headers(),
|
|
1291
|
-
timeout=60,
|
|
1292
|
-
)
|
|
1293
|
-
if 200 <= _response.status_code < 300:
|
|
1294
|
-
return pydantic.parse_obj_as(ParseConfigurationQueryResponse, _response.json()) # type: ignore
|
|
1295
|
-
if _response.status_code == 422:
|
|
1296
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1297
|
-
try:
|
|
1298
|
-
_response_json = _response.json()
|
|
1299
|
-
except JSONDecodeError:
|
|
1300
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1301
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1302
|
-
|
|
1303
|
-
def get_latest_parse_configuration(
|
|
1304
|
-
self,
|
|
1305
|
-
*,
|
|
1306
|
-
creator: typing.Optional[str] = None,
|
|
1307
|
-
project_id: typing.Optional[str] = None,
|
|
1308
|
-
organization_id: typing.Optional[str] = None,
|
|
1309
|
-
) -> typing.Optional[ParseConfiguration]:
|
|
1310
|
-
"""
|
|
1311
|
-
Get the latest parse configuration for the current project.
|
|
1312
|
-
|
|
1313
|
-
Args:
|
|
1314
|
-
project: Validated project from dependency
|
|
1315
|
-
user: Current user
|
|
1316
|
-
db: Database session
|
|
1317
|
-
creator: Optional creator filter
|
|
1318
|
-
|
|
1319
|
-
Returns:
|
|
1320
|
-
The latest parse configuration or None if not found
|
|
1321
|
-
|
|
1322
|
-
Parameters:
|
|
1323
|
-
- creator: typing.Optional[str].
|
|
1324
|
-
|
|
1325
|
-
- project_id: typing.Optional[str].
|
|
1326
|
-
|
|
1327
|
-
- organization_id: typing.Optional[str].
|
|
1328
|
-
---
|
|
1329
|
-
from llama_cloud.client import LlamaCloud
|
|
1330
|
-
|
|
1331
|
-
client = LlamaCloud(
|
|
1332
|
-
token="YOUR_TOKEN",
|
|
1333
|
-
)
|
|
1334
|
-
client.beta.get_latest_parse_configuration()
|
|
1335
|
-
"""
|
|
1336
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1337
|
-
"GET",
|
|
1338
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/parse-configurations/latest"),
|
|
1339
|
-
params=remove_none_from_dict(
|
|
1340
|
-
{"creator": creator, "project_id": project_id, "organization_id": organization_id}
|
|
1341
|
-
),
|
|
1342
|
-
headers=self._client_wrapper.get_headers(),
|
|
1343
|
-
timeout=60,
|
|
1344
|
-
)
|
|
1345
|
-
if 200 <= _response.status_code < 300:
|
|
1346
|
-
return pydantic.parse_obj_as(typing.Optional[ParseConfiguration], _response.json()) # type: ignore
|
|
1347
|
-
if _response.status_code == 422:
|
|
1348
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1349
|
-
try:
|
|
1350
|
-
_response_json = _response.json()
|
|
1351
|
-
except JSONDecodeError:
|
|
1352
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1353
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1354
|
-
|
|
1355
|
-
|
|
1356
|
-
class AsyncBetaClient:
|
|
1357
|
-
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
1358
|
-
self._client_wrapper = client_wrapper
|
|
1359
|
-
|
|
1360
|
-
async def list_batches(
|
|
1361
|
-
self,
|
|
1362
|
-
*,
|
|
1363
|
-
limit: typing.Optional[int] = None,
|
|
1364
|
-
offset: typing.Optional[int] = None,
|
|
1365
|
-
project_id: typing.Optional[str] = None,
|
|
1366
|
-
organization_id: typing.Optional[str] = None,
|
|
1367
|
-
) -> BatchPaginatedList:
|
|
1368
|
-
"""
|
|
1369
|
-
Parameters:
|
|
1370
|
-
- limit: typing.Optional[int].
|
|
1371
|
-
|
|
1372
|
-
- offset: typing.Optional[int].
|
|
1373
|
-
|
|
1374
|
-
- project_id: typing.Optional[str].
|
|
1375
|
-
|
|
1376
|
-
- organization_id: typing.Optional[str].
|
|
1377
|
-
---
|
|
1378
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1379
|
-
|
|
1380
|
-
client = AsyncLlamaCloud(
|
|
1381
|
-
token="YOUR_TOKEN",
|
|
1382
|
-
)
|
|
1383
|
-
await client.beta.list_batches()
|
|
1384
|
-
"""
|
|
1385
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1386
|
-
"GET",
|
|
1387
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/batches"),
|
|
1388
|
-
params=remove_none_from_dict(
|
|
1389
|
-
{"limit": limit, "offset": offset, "project_id": project_id, "organization_id": organization_id}
|
|
1390
|
-
),
|
|
1391
|
-
headers=self._client_wrapper.get_headers(),
|
|
1392
|
-
timeout=60,
|
|
1393
|
-
)
|
|
1394
|
-
if 200 <= _response.status_code < 300:
|
|
1395
|
-
return pydantic.parse_obj_as(BatchPaginatedList, _response.json()) # type: ignore
|
|
1396
|
-
if _response.status_code == 422:
|
|
1397
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1398
|
-
try:
|
|
1399
|
-
_response_json = _response.json()
|
|
1400
|
-
except JSONDecodeError:
|
|
1401
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1402
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1403
|
-
|
|
1404
|
-
async def create_batch(
|
|
1405
|
-
self,
|
|
1406
|
-
*,
|
|
1407
|
-
organization_id: typing.Optional[str] = None,
|
|
1408
|
-
project_id: typing.Optional[str] = None,
|
|
1409
|
-
tool: str,
|
|
1410
|
-
tool_data: typing.Optional[LlamaParseParameters] = OMIT,
|
|
1411
|
-
input_type: str,
|
|
1412
|
-
input_id: str,
|
|
1413
|
-
output_type: typing.Optional[str] = OMIT,
|
|
1414
|
-
output_id: typing.Optional[str] = OMIT,
|
|
1415
|
-
batch_create_project_id: str,
|
|
1416
|
-
external_id: str,
|
|
1417
|
-
completion_window: typing.Optional[int] = OMIT,
|
|
1418
|
-
) -> Batch:
|
|
1419
|
-
"""
|
|
1420
|
-
Parameters:
|
|
1421
|
-
- organization_id: typing.Optional[str].
|
|
1422
|
-
|
|
1423
|
-
- project_id: typing.Optional[str].
|
|
1424
|
-
|
|
1425
|
-
- tool: str. The tool to be used for all requests in the batch.
|
|
1426
|
-
|
|
1427
|
-
- tool_data: typing.Optional[LlamaParseParameters].
|
|
1428
|
-
|
|
1429
|
-
- input_type: str. The type of input file. Currently only 'datasource' is supported.
|
|
1430
|
-
|
|
1431
|
-
- input_id: str. The ID of the input file for the batch.
|
|
1432
|
-
|
|
1433
|
-
- output_type: typing.Optional[str].
|
|
1434
|
-
|
|
1435
|
-
- output_id: typing.Optional[str].
|
|
1436
|
-
|
|
1437
|
-
- batch_create_project_id: str. The ID of the project to which the batch belongs
|
|
1438
|
-
|
|
1439
|
-
- external_id: str. A developer-provided ID for the batch. This ID will be returned in the response.
|
|
1440
|
-
|
|
1441
|
-
- completion_window: typing.Optional[int]. The time frame within which the batch should be processed. Currently only 24h is supported.
|
|
1442
|
-
---
|
|
1443
|
-
from llama_cloud import (
|
|
1444
|
-
FailPageMode,
|
|
1445
|
-
LlamaParseParameters,
|
|
1446
|
-
LlamaParseParametersPriority,
|
|
1447
|
-
ParsingMode,
|
|
1448
|
-
)
|
|
1449
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1450
|
-
|
|
1451
|
-
client = AsyncLlamaCloud(
|
|
1452
|
-
token="YOUR_TOKEN",
|
|
1453
|
-
)
|
|
1454
|
-
await client.beta.create_batch(
|
|
1455
|
-
tool="string",
|
|
1456
|
-
tool_data=LlamaParseParameters(
|
|
1457
|
-
priority=LlamaParseParametersPriority.LOW,
|
|
1458
|
-
parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
|
|
1459
|
-
replace_failed_page_mode=FailPageMode.RAW_TEXT,
|
|
1460
|
-
),
|
|
1461
|
-
input_type="string",
|
|
1462
|
-
input_id="string",
|
|
1463
|
-
batch_create_project_id="string",
|
|
1464
|
-
external_id="string",
|
|
1465
|
-
)
|
|
1466
|
-
"""
|
|
1467
|
-
_request: typing.Dict[str, typing.Any] = {
|
|
1468
|
-
"tool": tool,
|
|
1469
|
-
"input_type": input_type,
|
|
1470
|
-
"input_id": input_id,
|
|
1471
|
-
"project_id": batch_create_project_id,
|
|
1472
|
-
"external_id": external_id,
|
|
1473
|
-
}
|
|
1474
|
-
if tool_data is not OMIT:
|
|
1475
|
-
_request["tool_data"] = tool_data
|
|
1476
|
-
if output_type is not OMIT:
|
|
1477
|
-
_request["output_type"] = output_type
|
|
1478
|
-
if output_id is not OMIT:
|
|
1479
|
-
_request["output_id"] = output_id
|
|
1480
|
-
if completion_window is not OMIT:
|
|
1481
|
-
_request["completion_window"] = completion_window
|
|
1482
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1483
|
-
"POST",
|
|
1484
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/batches"),
|
|
1485
|
-
params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
|
|
1486
|
-
json=jsonable_encoder(_request),
|
|
1487
|
-
headers=self._client_wrapper.get_headers(),
|
|
1488
|
-
timeout=60,
|
|
1489
|
-
)
|
|
1490
|
-
if 200 <= _response.status_code < 300:
|
|
1491
|
-
return pydantic.parse_obj_as(Batch, _response.json()) # type: ignore
|
|
1492
|
-
if _response.status_code == 422:
|
|
1493
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1494
|
-
try:
|
|
1495
|
-
_response_json = _response.json()
|
|
1496
|
-
except JSONDecodeError:
|
|
1497
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1498
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1499
|
-
|
|
1500
|
-
async def get_batch(self, batch_id: str, *, organization_id: typing.Optional[str] = None) -> BatchPublicOutput:
|
|
1501
|
-
"""
|
|
1502
|
-
Parameters:
|
|
1503
|
-
- batch_id: str.
|
|
1504
|
-
|
|
1505
|
-
- organization_id: typing.Optional[str].
|
|
1506
|
-
---
|
|
1507
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1508
|
-
|
|
1509
|
-
client = AsyncLlamaCloud(
|
|
1510
|
-
token="YOUR_TOKEN",
|
|
1511
|
-
)
|
|
1512
|
-
await client.beta.get_batch(
|
|
1513
|
-
batch_id="string",
|
|
1514
|
-
)
|
|
1515
|
-
"""
|
|
1516
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1517
|
-
"GET",
|
|
1518
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/batches/{batch_id}"),
|
|
1519
|
-
params=remove_none_from_dict({"organization_id": organization_id}),
|
|
1520
|
-
headers=self._client_wrapper.get_headers(),
|
|
1521
|
-
timeout=60,
|
|
1522
|
-
)
|
|
1523
|
-
if 200 <= _response.status_code < 300:
|
|
1524
|
-
return pydantic.parse_obj_as(BatchPublicOutput, _response.json()) # type: ignore
|
|
1525
|
-
if _response.status_code == 422:
|
|
1526
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1527
|
-
try:
|
|
1528
|
-
_response_json = _response.json()
|
|
1529
|
-
except JSONDecodeError:
|
|
1530
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1531
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1532
|
-
|
|
1533
|
-
async def get_agent_data(
|
|
1534
|
-
self, item_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
1535
|
-
) -> AgentData:
|
|
1536
|
-
"""
|
|
1537
|
-
Get agent data by ID.
|
|
1538
|
-
|
|
1539
|
-
Parameters:
|
|
1540
|
-
- item_id: str.
|
|
1541
|
-
|
|
1542
|
-
- project_id: typing.Optional[str].
|
|
1543
|
-
|
|
1544
|
-
- organization_id: typing.Optional[str].
|
|
1545
|
-
---
|
|
1546
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1547
|
-
|
|
1548
|
-
client = AsyncLlamaCloud(
|
|
1549
|
-
token="YOUR_TOKEN",
|
|
1550
|
-
)
|
|
1551
|
-
await client.beta.get_agent_data(
|
|
1552
|
-
item_id="string",
|
|
1553
|
-
)
|
|
1554
|
-
"""
|
|
1555
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1556
|
-
"GET",
|
|
1557
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/agent-data/{item_id}"),
|
|
1558
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1559
|
-
headers=self._client_wrapper.get_headers(),
|
|
1560
|
-
timeout=60,
|
|
1561
|
-
)
|
|
1562
|
-
if 200 <= _response.status_code < 300:
|
|
1563
|
-
return pydantic.parse_obj_as(AgentData, _response.json()) # type: ignore
|
|
1564
|
-
if _response.status_code == 422:
|
|
1565
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1566
|
-
try:
|
|
1567
|
-
_response_json = _response.json()
|
|
1568
|
-
except JSONDecodeError:
|
|
1569
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1570
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1571
|
-
|
|
1572
|
-
async def update_agent_data(
|
|
1573
|
-
self,
|
|
1574
|
-
item_id: str,
|
|
1575
|
-
*,
|
|
1576
|
-
project_id: typing.Optional[str] = None,
|
|
1577
|
-
organization_id: typing.Optional[str] = None,
|
|
1578
|
-
data: typing.Dict[str, typing.Any],
|
|
1579
|
-
) -> AgentData:
|
|
1580
|
-
"""
|
|
1581
|
-
Update agent data by ID (overwrites).
|
|
1582
|
-
|
|
1583
|
-
Parameters:
|
|
1584
|
-
- item_id: str.
|
|
1585
|
-
|
|
1586
|
-
- project_id: typing.Optional[str].
|
|
1587
|
-
|
|
1588
|
-
- organization_id: typing.Optional[str].
|
|
1589
|
-
|
|
1590
|
-
- data: typing.Dict[str, typing.Any].
|
|
1591
|
-
---
|
|
1592
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1593
|
-
|
|
1594
|
-
client = AsyncLlamaCloud(
|
|
1595
|
-
token="YOUR_TOKEN",
|
|
1596
|
-
)
|
|
1597
|
-
await client.beta.update_agent_data(
|
|
1598
|
-
item_id="string",
|
|
1599
|
-
data={"string": {}},
|
|
1600
|
-
)
|
|
1601
|
-
"""
|
|
1602
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1603
|
-
"PUT",
|
|
1604
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/agent-data/{item_id}"),
|
|
1605
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1606
|
-
json=jsonable_encoder({"data": data}),
|
|
1607
|
-
headers=self._client_wrapper.get_headers(),
|
|
1608
|
-
timeout=60,
|
|
1609
|
-
)
|
|
1610
|
-
if 200 <= _response.status_code < 300:
|
|
1611
|
-
return pydantic.parse_obj_as(AgentData, _response.json()) # type: ignore
|
|
1612
|
-
if _response.status_code == 422:
|
|
1613
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1614
|
-
try:
|
|
1615
|
-
_response_json = _response.json()
|
|
1616
|
-
except JSONDecodeError:
|
|
1617
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1618
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1619
|
-
|
|
1620
|
-
async def delete_agent_data(
|
|
1621
|
-
self, item_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
1622
|
-
) -> typing.Dict[str, str]:
|
|
1623
|
-
"""
|
|
1624
|
-
Delete agent data by ID.
|
|
1625
|
-
|
|
1626
|
-
Parameters:
|
|
1627
|
-
- item_id: str.
|
|
1628
|
-
|
|
1629
|
-
- project_id: typing.Optional[str].
|
|
1630
|
-
|
|
1631
|
-
- organization_id: typing.Optional[str].
|
|
1632
|
-
---
|
|
1633
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1634
|
-
|
|
1635
|
-
client = AsyncLlamaCloud(
|
|
1636
|
-
token="YOUR_TOKEN",
|
|
1637
|
-
)
|
|
1638
|
-
await client.beta.delete_agent_data(
|
|
1639
|
-
item_id="string",
|
|
1640
|
-
)
|
|
1641
|
-
"""
|
|
1642
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1643
|
-
"DELETE",
|
|
1644
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/agent-data/{item_id}"),
|
|
1645
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1646
|
-
headers=self._client_wrapper.get_headers(),
|
|
1647
|
-
timeout=60,
|
|
1648
|
-
)
|
|
1649
|
-
if 200 <= _response.status_code < 300:
|
|
1650
|
-
return pydantic.parse_obj_as(typing.Dict[str, str], _response.json()) # type: ignore
|
|
1651
|
-
if _response.status_code == 422:
|
|
1652
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1653
|
-
try:
|
|
1654
|
-
_response_json = _response.json()
|
|
1655
|
-
except JSONDecodeError:
|
|
1656
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1657
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1658
|
-
|
|
1659
|
-
async def create_agent_data(
|
|
1660
|
-
self,
|
|
1661
|
-
*,
|
|
1662
|
-
project_id: typing.Optional[str] = None,
|
|
1663
|
-
organization_id: typing.Optional[str] = None,
|
|
1664
|
-
agent_slug: str,
|
|
1665
|
-
collection: typing.Optional[str] = OMIT,
|
|
1666
|
-
data: typing.Dict[str, typing.Any],
|
|
1667
|
-
) -> AgentData:
|
|
1668
|
-
"""
|
|
1669
|
-
Create new agent data.
|
|
1670
|
-
|
|
1671
|
-
Parameters:
|
|
1672
|
-
- project_id: typing.Optional[str].
|
|
1673
|
-
|
|
1674
|
-
- organization_id: typing.Optional[str].
|
|
1675
|
-
|
|
1676
|
-
- agent_slug: str.
|
|
1677
|
-
|
|
1678
|
-
- collection: typing.Optional[str].
|
|
1679
|
-
|
|
1680
|
-
- data: typing.Dict[str, typing.Any].
|
|
1681
|
-
---
|
|
1682
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1683
|
-
|
|
1684
|
-
client = AsyncLlamaCloud(
|
|
1685
|
-
token="YOUR_TOKEN",
|
|
1686
|
-
)
|
|
1687
|
-
await client.beta.create_agent_data(
|
|
1688
|
-
agent_slug="string",
|
|
1689
|
-
data={"string": {}},
|
|
1690
|
-
)
|
|
1691
|
-
"""
|
|
1692
|
-
_request: typing.Dict[str, typing.Any] = {"agent_slug": agent_slug, "data": data}
|
|
1693
|
-
if collection is not OMIT:
|
|
1694
|
-
_request["collection"] = collection
|
|
1695
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1696
|
-
"POST",
|
|
1697
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/agent-data"),
|
|
1698
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1699
|
-
json=jsonable_encoder(_request),
|
|
1700
|
-
headers=self._client_wrapper.get_headers(),
|
|
1701
|
-
timeout=60,
|
|
1702
|
-
)
|
|
1703
|
-
if 200 <= _response.status_code < 300:
|
|
1704
|
-
return pydantic.parse_obj_as(AgentData, _response.json()) # type: ignore
|
|
1705
|
-
if _response.status_code == 422:
|
|
1706
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1707
|
-
try:
|
|
1708
|
-
_response_json = _response.json()
|
|
1709
|
-
except JSONDecodeError:
|
|
1710
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1711
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1712
|
-
|
|
1713
|
-
async def search_agent_data_api_v_1_beta_agent_data_search_post(
|
|
1714
|
-
self,
|
|
1715
|
-
*,
|
|
1716
|
-
project_id: typing.Optional[str] = None,
|
|
1717
|
-
organization_id: typing.Optional[str] = None,
|
|
1718
|
-
page_size: typing.Optional[int] = OMIT,
|
|
1719
|
-
page_token: typing.Optional[str] = OMIT,
|
|
1720
|
-
filter: typing.Optional[typing.Dict[str, typing.Optional[FilterOperation]]] = OMIT,
|
|
1721
|
-
order_by: typing.Optional[str] = OMIT,
|
|
1722
|
-
agent_slug: str,
|
|
1723
|
-
collection: typing.Optional[str] = OMIT,
|
|
1724
|
-
include_total: typing.Optional[bool] = OMIT,
|
|
1725
|
-
offset: typing.Optional[int] = OMIT,
|
|
1726
|
-
) -> PaginatedResponseAgentData:
|
|
1727
|
-
"""
|
|
1728
|
-
Search agent data with filtering, sorting, and pagination.
|
|
1729
|
-
|
|
1730
|
-
Parameters:
|
|
1731
|
-
- project_id: typing.Optional[str].
|
|
1732
|
-
|
|
1733
|
-
- organization_id: typing.Optional[str].
|
|
1734
|
-
|
|
1735
|
-
- page_size: typing.Optional[int].
|
|
1736
|
-
|
|
1737
|
-
- page_token: typing.Optional[str].
|
|
1738
|
-
|
|
1739
|
-
- filter: typing.Optional[typing.Dict[str, typing.Optional[FilterOperation]]].
|
|
1740
|
-
|
|
1741
|
-
- order_by: typing.Optional[str].
|
|
1742
|
-
|
|
1743
|
-
- agent_slug: str. The agent deployment's agent_slug to search within
|
|
1744
|
-
|
|
1745
|
-
- collection: typing.Optional[str]. The logical agent data collection to search within
|
|
1746
|
-
|
|
1747
|
-
- include_total: typing.Optional[bool]. Whether to include the total number of items in the response
|
|
1748
|
-
|
|
1749
|
-
- offset: typing.Optional[int].
|
|
1750
|
-
---
|
|
1751
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1752
|
-
|
|
1753
|
-
client = AsyncLlamaCloud(
|
|
1754
|
-
token="YOUR_TOKEN",
|
|
1755
|
-
)
|
|
1756
|
-
await client.beta.search_agent_data_api_v_1_beta_agent_data_search_post(
|
|
1757
|
-
agent_slug="string",
|
|
1758
|
-
)
|
|
1759
|
-
"""
|
|
1760
|
-
_request: typing.Dict[str, typing.Any] = {"agent_slug": agent_slug}
|
|
1761
|
-
if page_size is not OMIT:
|
|
1762
|
-
_request["page_size"] = page_size
|
|
1763
|
-
if page_token is not OMIT:
|
|
1764
|
-
_request["page_token"] = page_token
|
|
1765
|
-
if filter is not OMIT:
|
|
1766
|
-
_request["filter"] = filter
|
|
1767
|
-
if order_by is not OMIT:
|
|
1768
|
-
_request["order_by"] = order_by
|
|
1769
|
-
if collection is not OMIT:
|
|
1770
|
-
_request["collection"] = collection
|
|
1771
|
-
if include_total is not OMIT:
|
|
1772
|
-
_request["include_total"] = include_total
|
|
1773
|
-
if offset is not OMIT:
|
|
1774
|
-
_request["offset"] = offset
|
|
1775
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1776
|
-
"POST",
|
|
1777
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/agent-data/:search"),
|
|
1778
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1779
|
-
json=jsonable_encoder(_request),
|
|
1780
|
-
headers=self._client_wrapper.get_headers(),
|
|
1781
|
-
timeout=60,
|
|
1782
|
-
)
|
|
1783
|
-
if 200 <= _response.status_code < 300:
|
|
1784
|
-
return pydantic.parse_obj_as(PaginatedResponseAgentData, _response.json()) # type: ignore
|
|
1785
|
-
if _response.status_code == 422:
|
|
1786
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1787
|
-
try:
|
|
1788
|
-
_response_json = _response.json()
|
|
1789
|
-
except JSONDecodeError:
|
|
1790
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1791
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1792
|
-
|
|
1793
|
-
async def aggregate_agent_data_api_v_1_beta_agent_data_aggregate_post(
|
|
1794
|
-
self,
|
|
1795
|
-
*,
|
|
1796
|
-
project_id: typing.Optional[str] = None,
|
|
1797
|
-
organization_id: typing.Optional[str] = None,
|
|
1798
|
-
page_size: typing.Optional[int] = OMIT,
|
|
1799
|
-
page_token: typing.Optional[str] = OMIT,
|
|
1800
|
-
filter: typing.Optional[typing.Dict[str, typing.Optional[FilterOperation]]] = OMIT,
|
|
1801
|
-
order_by: typing.Optional[str] = OMIT,
|
|
1802
|
-
agent_slug: str,
|
|
1803
|
-
collection: typing.Optional[str] = OMIT,
|
|
1804
|
-
group_by: typing.Optional[typing.List[str]] = OMIT,
|
|
1805
|
-
count: typing.Optional[bool] = OMIT,
|
|
1806
|
-
first: typing.Optional[bool] = OMIT,
|
|
1807
|
-
offset: typing.Optional[int] = OMIT,
|
|
1808
|
-
) -> PaginatedResponseAggregateGroup:
|
|
1809
|
-
"""
|
|
1810
|
-
Aggregate agent data with grouping and optional counting/first item retrieval.
|
|
1811
|
-
|
|
1812
|
-
Parameters:
|
|
1813
|
-
- project_id: typing.Optional[str].
|
|
1814
|
-
|
|
1815
|
-
- organization_id: typing.Optional[str].
|
|
1816
|
-
|
|
1817
|
-
- page_size: typing.Optional[int].
|
|
1818
|
-
|
|
1819
|
-
- page_token: typing.Optional[str].
|
|
1820
|
-
|
|
1821
|
-
- filter: typing.Optional[typing.Dict[str, typing.Optional[FilterOperation]]].
|
|
1822
|
-
|
|
1823
|
-
- order_by: typing.Optional[str].
|
|
1824
|
-
|
|
1825
|
-
- agent_slug: str. The agent deployment's agent_slug to aggregate data for
|
|
1826
|
-
|
|
1827
|
-
- collection: typing.Optional[str]. The logical agent data collection to aggregate data for
|
|
1828
|
-
|
|
1829
|
-
- group_by: typing.Optional[typing.List[str]].
|
|
1830
|
-
|
|
1831
|
-
- count: typing.Optional[bool].
|
|
1832
|
-
|
|
1833
|
-
- first: typing.Optional[bool].
|
|
1834
|
-
|
|
1835
|
-
- offset: typing.Optional[int].
|
|
1836
|
-
---
|
|
1837
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1838
|
-
|
|
1839
|
-
client = AsyncLlamaCloud(
|
|
1840
|
-
token="YOUR_TOKEN",
|
|
1841
|
-
)
|
|
1842
|
-
await client.beta.aggregate_agent_data_api_v_1_beta_agent_data_aggregate_post(
|
|
1843
|
-
agent_slug="string",
|
|
1844
|
-
)
|
|
1845
|
-
"""
|
|
1846
|
-
_request: typing.Dict[str, typing.Any] = {"agent_slug": agent_slug}
|
|
1847
|
-
if page_size is not OMIT:
|
|
1848
|
-
_request["page_size"] = page_size
|
|
1849
|
-
if page_token is not OMIT:
|
|
1850
|
-
_request["page_token"] = page_token
|
|
1851
|
-
if filter is not OMIT:
|
|
1852
|
-
_request["filter"] = filter
|
|
1853
|
-
if order_by is not OMIT:
|
|
1854
|
-
_request["order_by"] = order_by
|
|
1855
|
-
if collection is not OMIT:
|
|
1856
|
-
_request["collection"] = collection
|
|
1857
|
-
if group_by is not OMIT:
|
|
1858
|
-
_request["group_by"] = group_by
|
|
1859
|
-
if count is not OMIT:
|
|
1860
|
-
_request["count"] = count
|
|
1861
|
-
if first is not OMIT:
|
|
1862
|
-
_request["first"] = first
|
|
1863
|
-
if offset is not OMIT:
|
|
1864
|
-
_request["offset"] = offset
|
|
1865
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1866
|
-
"POST",
|
|
1867
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/agent-data/:aggregate"),
|
|
1868
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1869
|
-
json=jsonable_encoder(_request),
|
|
1870
|
-
headers=self._client_wrapper.get_headers(),
|
|
1871
|
-
timeout=60,
|
|
1872
|
-
)
|
|
1873
|
-
if 200 <= _response.status_code < 300:
|
|
1874
|
-
return pydantic.parse_obj_as(PaginatedResponseAggregateGroup, _response.json()) # type: ignore
|
|
1875
|
-
if _response.status_code == 422:
|
|
1876
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1877
|
-
try:
|
|
1878
|
-
_response_json = _response.json()
|
|
1879
|
-
except JSONDecodeError:
|
|
1880
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1881
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1882
|
-
|
|
1883
|
-
async def list_quota_configurations(
|
|
1884
|
-
self,
|
|
1885
|
-
*,
|
|
1886
|
-
source_type: typing_extensions.Literal["organization"],
|
|
1887
|
-
source_id: str,
|
|
1888
|
-
page: typing.Optional[int] = None,
|
|
1889
|
-
page_size: typing.Optional[int] = None,
|
|
1890
|
-
) -> PaginatedResponseQuotaConfiguration:
|
|
1891
|
-
"""
|
|
1892
|
-
Retrieve a paginated list of quota configurations with optional filtering.
|
|
1893
|
-
|
|
1894
|
-
Parameters:
|
|
1895
|
-
- source_type: typing_extensions.Literal["organization"].
|
|
1896
|
-
|
|
1897
|
-
- source_id: str.
|
|
1898
|
-
|
|
1899
|
-
- page: typing.Optional[int].
|
|
1900
|
-
|
|
1901
|
-
- page_size: typing.Optional[int].
|
|
1902
|
-
---
|
|
1903
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1904
|
-
|
|
1905
|
-
client = AsyncLlamaCloud(
|
|
1906
|
-
token="YOUR_TOKEN",
|
|
1907
|
-
)
|
|
1908
|
-
await client.beta.list_quota_configurations(
|
|
1909
|
-
source_type="organization",
|
|
1910
|
-
source_id="string",
|
|
1911
|
-
)
|
|
1912
|
-
"""
|
|
1913
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1914
|
-
"GET",
|
|
1915
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/quota-management"),
|
|
1916
|
-
params=remove_none_from_dict(
|
|
1917
|
-
{"source_type": source_type, "source_id": source_id, "page": page, "page_size": page_size}
|
|
1918
|
-
),
|
|
1919
|
-
headers=self._client_wrapper.get_headers(),
|
|
1920
|
-
timeout=60,
|
|
1921
|
-
)
|
|
1922
|
-
if 200 <= _response.status_code < 300:
|
|
1923
|
-
return pydantic.parse_obj_as(PaginatedResponseQuotaConfiguration, _response.json()) # type: ignore
|
|
1924
|
-
if _response.status_code == 422:
|
|
1925
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1926
|
-
try:
|
|
1927
|
-
_response_json = _response.json()
|
|
1928
|
-
except JSONDecodeError:
|
|
1929
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1930
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1931
|
-
|
|
1932
|
-
async def create_file(
|
|
1933
|
-
self,
|
|
1934
|
-
*,
|
|
1935
|
-
project_id: typing.Optional[str] = None,
|
|
1936
|
-
organization_id: typing.Optional[str] = None,
|
|
1937
|
-
request: FileCreate,
|
|
1938
|
-
) -> File:
|
|
1939
|
-
"""
|
|
1940
|
-
Create a new file in the project.
|
|
1941
|
-
|
|
1942
|
-
Args:
|
|
1943
|
-
file_create: File creation data
|
|
1944
|
-
project: Validated project from dependency
|
|
1945
|
-
db: Database session
|
|
1946
|
-
|
|
1947
|
-
Returns:
|
|
1948
|
-
The created file
|
|
1949
|
-
|
|
1950
|
-
Parameters:
|
|
1951
|
-
- project_id: typing.Optional[str].
|
|
1952
|
-
|
|
1953
|
-
- organization_id: typing.Optional[str].
|
|
1954
|
-
|
|
1955
|
-
- request: FileCreate.
|
|
1956
|
-
---
|
|
1957
|
-
from llama_cloud import FileCreate
|
|
1958
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1959
|
-
|
|
1960
|
-
client = AsyncLlamaCloud(
|
|
1961
|
-
token="YOUR_TOKEN",
|
|
1962
|
-
)
|
|
1963
|
-
await client.beta.create_file(
|
|
1964
|
-
request=FileCreate(
|
|
1965
|
-
name="string",
|
|
1966
|
-
),
|
|
1967
|
-
)
|
|
1968
|
-
"""
|
|
1969
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1970
|
-
"POST",
|
|
1971
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/files"),
|
|
1972
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1973
|
-
json=jsonable_encoder(request),
|
|
1974
|
-
headers=self._client_wrapper.get_headers(),
|
|
1975
|
-
timeout=60,
|
|
1976
|
-
)
|
|
1977
|
-
if 200 <= _response.status_code < 300:
|
|
1978
|
-
return pydantic.parse_obj_as(File, _response.json()) # type: ignore
|
|
1979
|
-
if _response.status_code == 422:
|
|
1980
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1981
|
-
try:
|
|
1982
|
-
_response_json = _response.json()
|
|
1983
|
-
except JSONDecodeError:
|
|
1984
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1985
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1986
|
-
|
|
1987
|
-
async def upsert_file(
|
|
1988
|
-
self,
|
|
1989
|
-
*,
|
|
1990
|
-
project_id: typing.Optional[str] = None,
|
|
1991
|
-
organization_id: typing.Optional[str] = None,
|
|
1992
|
-
request: FileCreate,
|
|
1993
|
-
) -> File:
|
|
1994
|
-
"""
|
|
1995
|
-
Upsert a file (create or update if exists) in the project.
|
|
1996
|
-
|
|
1997
|
-
Args:
|
|
1998
|
-
file_create: File creation/update data
|
|
1999
|
-
project: Validated project from dependency
|
|
2000
|
-
db: Database session
|
|
2001
|
-
|
|
2002
|
-
Returns:
|
|
2003
|
-
The upserted file
|
|
2004
|
-
|
|
2005
|
-
Parameters:
|
|
2006
|
-
- project_id: typing.Optional[str].
|
|
2007
|
-
|
|
2008
|
-
- organization_id: typing.Optional[str].
|
|
2009
|
-
|
|
2010
|
-
- request: FileCreate.
|
|
2011
|
-
---
|
|
2012
|
-
from llama_cloud import FileCreate
|
|
2013
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2014
|
-
|
|
2015
|
-
client = AsyncLlamaCloud(
|
|
2016
|
-
token="YOUR_TOKEN",
|
|
2017
|
-
)
|
|
2018
|
-
await client.beta.upsert_file(
|
|
2019
|
-
request=FileCreate(
|
|
2020
|
-
name="string",
|
|
2021
|
-
),
|
|
2022
|
-
)
|
|
2023
|
-
"""
|
|
2024
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2025
|
-
"PUT",
|
|
2026
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/files"),
|
|
2027
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
2028
|
-
json=jsonable_encoder(request),
|
|
2029
|
-
headers=self._client_wrapper.get_headers(),
|
|
2030
|
-
timeout=60,
|
|
2031
|
-
)
|
|
2032
|
-
if 200 <= _response.status_code < 300:
|
|
2033
|
-
return pydantic.parse_obj_as(File, _response.json()) # type: ignore
|
|
2034
|
-
if _response.status_code == 422:
|
|
2035
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2036
|
-
try:
|
|
2037
|
-
_response_json = _response.json()
|
|
2038
|
-
except JSONDecodeError:
|
|
2039
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2040
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2041
|
-
|
|
2042
|
-
async def query_files(
|
|
2043
|
-
self,
|
|
2044
|
-
*,
|
|
2045
|
-
project_id: typing.Optional[str] = None,
|
|
2046
|
-
organization_id: typing.Optional[str] = None,
|
|
2047
|
-
page_size: typing.Optional[int] = OMIT,
|
|
2048
|
-
page_token: typing.Optional[str] = OMIT,
|
|
2049
|
-
filter: typing.Optional[FileFilter] = OMIT,
|
|
2050
|
-
order_by: typing.Optional[str] = OMIT,
|
|
2051
|
-
) -> FileQueryResponse:
|
|
2052
|
-
"""
|
|
2053
|
-
Query files with flexible filtering and pagination.
|
|
2054
|
-
|
|
2055
|
-
Args:
|
|
2056
|
-
request: The query request with filters and pagination
|
|
2057
|
-
project: Validated project from dependency
|
|
2058
|
-
db: Database session
|
|
2059
|
-
|
|
2060
|
-
Returns:
|
|
2061
|
-
Paginated response with files
|
|
2062
|
-
|
|
2063
|
-
Parameters:
|
|
2064
|
-
- project_id: typing.Optional[str].
|
|
2065
|
-
|
|
2066
|
-
- organization_id: typing.Optional[str].
|
|
2067
|
-
|
|
2068
|
-
- page_size: typing.Optional[int].
|
|
2069
|
-
|
|
2070
|
-
- page_token: typing.Optional[str].
|
|
2071
|
-
|
|
2072
|
-
- filter: typing.Optional[FileFilter].
|
|
2073
|
-
|
|
2074
|
-
- order_by: typing.Optional[str].
|
|
2075
|
-
---
|
|
2076
|
-
from llama_cloud import FileFilter
|
|
2077
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2078
|
-
|
|
2079
|
-
client = AsyncLlamaCloud(
|
|
2080
|
-
token="YOUR_TOKEN",
|
|
2081
|
-
)
|
|
2082
|
-
await client.beta.query_files(
|
|
2083
|
-
filter=FileFilter(),
|
|
2084
|
-
)
|
|
2085
|
-
"""
|
|
2086
|
-
_request: typing.Dict[str, typing.Any] = {}
|
|
2087
|
-
if page_size is not OMIT:
|
|
2088
|
-
_request["page_size"] = page_size
|
|
2089
|
-
if page_token is not OMIT:
|
|
2090
|
-
_request["page_token"] = page_token
|
|
2091
|
-
if filter is not OMIT:
|
|
2092
|
-
_request["filter"] = filter
|
|
2093
|
-
if order_by is not OMIT:
|
|
2094
|
-
_request["order_by"] = order_by
|
|
2095
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2096
|
-
"POST",
|
|
2097
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/files/query"),
|
|
2098
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
2099
|
-
json=jsonable_encoder(_request),
|
|
2100
|
-
headers=self._client_wrapper.get_headers(),
|
|
2101
|
-
timeout=60,
|
|
2102
|
-
)
|
|
2103
|
-
if 200 <= _response.status_code < 300:
|
|
2104
|
-
return pydantic.parse_obj_as(FileQueryResponse, _response.json()) # type: ignore
|
|
2105
|
-
if _response.status_code == 422:
|
|
2106
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2107
|
-
try:
|
|
2108
|
-
_response_json = _response.json()
|
|
2109
|
-
except JSONDecodeError:
|
|
2110
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2111
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2112
|
-
|
|
2113
|
-
async def delete_file(
|
|
2114
|
-
self, file_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
2115
|
-
) -> None:
|
|
2116
|
-
"""
|
|
2117
|
-
Delete a single file from the project.
|
|
2118
|
-
|
|
2119
|
-
Args:
|
|
2120
|
-
file_id: The ID of the file to delete
|
|
2121
|
-
project: Validated project from dependency
|
|
2122
|
-
db: Database session
|
|
2123
|
-
|
|
2124
|
-
Returns:
|
|
2125
|
-
None (204 No Content on success)
|
|
2126
|
-
|
|
2127
|
-
Parameters:
|
|
2128
|
-
- file_id: str.
|
|
2129
|
-
|
|
2130
|
-
- project_id: typing.Optional[str].
|
|
2131
|
-
|
|
2132
|
-
- organization_id: typing.Optional[str].
|
|
2133
|
-
---
|
|
2134
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2135
|
-
|
|
2136
|
-
client = AsyncLlamaCloud(
|
|
2137
|
-
token="YOUR_TOKEN",
|
|
2138
|
-
)
|
|
2139
|
-
await client.beta.delete_file(
|
|
2140
|
-
file_id="string",
|
|
2141
|
-
)
|
|
2142
|
-
"""
|
|
2143
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2144
|
-
"DELETE",
|
|
2145
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/files/{file_id}"),
|
|
2146
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
2147
|
-
headers=self._client_wrapper.get_headers(),
|
|
2148
|
-
timeout=60,
|
|
2149
|
-
)
|
|
2150
|
-
if 200 <= _response.status_code < 300:
|
|
2151
|
-
return
|
|
2152
|
-
if _response.status_code == 422:
|
|
2153
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2154
|
-
try:
|
|
2155
|
-
_response_json = _response.json()
|
|
2156
|
-
except JSONDecodeError:
|
|
2157
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2158
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2159
|
-
|
|
2160
|
-
async def list_parse_configurations(
|
|
2161
|
-
self,
|
|
2162
|
-
*,
|
|
2163
|
-
page_size: typing.Optional[int] = None,
|
|
2164
|
-
page_token: typing.Optional[str] = None,
|
|
2165
|
-
name: typing.Optional[str] = None,
|
|
2166
|
-
creator: typing.Optional[str] = None,
|
|
2167
|
-
version: typing.Optional[str] = None,
|
|
2168
|
-
project_id: typing.Optional[str] = None,
|
|
2169
|
-
organization_id: typing.Optional[str] = None,
|
|
2170
|
-
) -> ParseConfigurationQueryResponse:
|
|
2171
|
-
"""
|
|
2172
|
-
List parse configurations for the current project.
|
|
2173
|
-
|
|
2174
|
-
Args:
|
|
2175
|
-
project: Validated project from dependency
|
|
2176
|
-
user: Current user
|
|
2177
|
-
db: Database session
|
|
2178
|
-
page_size: Number of items per page
|
|
2179
|
-
page_token: Token for pagination
|
|
2180
|
-
name: Filter by configuration name
|
|
2181
|
-
creator: Filter by creator
|
|
2182
|
-
version: Filter by version
|
|
2183
|
-
|
|
2184
|
-
Returns:
|
|
2185
|
-
Paginated response with parse configurations
|
|
2186
|
-
|
|
2187
|
-
Parameters:
|
|
2188
|
-
- page_size: typing.Optional[int].
|
|
2189
|
-
|
|
2190
|
-
- page_token: typing.Optional[str].
|
|
2191
|
-
|
|
2192
|
-
- name: typing.Optional[str].
|
|
2193
|
-
|
|
2194
|
-
- creator: typing.Optional[str].
|
|
2195
|
-
|
|
2196
|
-
- version: typing.Optional[str].
|
|
2197
|
-
|
|
2198
|
-
- project_id: typing.Optional[str].
|
|
2199
|
-
|
|
2200
|
-
- organization_id: typing.Optional[str].
|
|
2201
|
-
---
|
|
2202
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2203
|
-
|
|
2204
|
-
client = AsyncLlamaCloud(
|
|
2205
|
-
token="YOUR_TOKEN",
|
|
2206
|
-
)
|
|
2207
|
-
await client.beta.list_parse_configurations()
|
|
2208
|
-
"""
|
|
2209
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2210
|
-
"GET",
|
|
2211
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/parse-configurations"),
|
|
2212
|
-
params=remove_none_from_dict(
|
|
2213
|
-
{
|
|
2214
|
-
"page_size": page_size,
|
|
2215
|
-
"page_token": page_token,
|
|
2216
|
-
"name": name,
|
|
2217
|
-
"creator": creator,
|
|
2218
|
-
"version": version,
|
|
2219
|
-
"project_id": project_id,
|
|
2220
|
-
"organization_id": organization_id,
|
|
2221
|
-
}
|
|
2222
|
-
),
|
|
2223
|
-
headers=self._client_wrapper.get_headers(),
|
|
2224
|
-
timeout=60,
|
|
2225
|
-
)
|
|
2226
|
-
if 200 <= _response.status_code < 300:
|
|
2227
|
-
return pydantic.parse_obj_as(ParseConfigurationQueryResponse, _response.json()) # type: ignore
|
|
2228
|
-
if _response.status_code == 422:
|
|
2229
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2230
|
-
try:
|
|
2231
|
-
_response_json = _response.json()
|
|
2232
|
-
except JSONDecodeError:
|
|
2233
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2234
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2235
|
-
|
|
2236
|
-
async def create_parse_configuration(
|
|
2237
|
-
self,
|
|
2238
|
-
*,
|
|
2239
|
-
project_id: typing.Optional[str] = None,
|
|
2240
|
-
organization_id: typing.Optional[str] = None,
|
|
2241
|
-
request: ParseConfigurationCreate,
|
|
2242
|
-
) -> ParseConfiguration:
|
|
2243
|
-
"""
|
|
2244
|
-
Create a new parse configuration.
|
|
2245
|
-
|
|
2246
|
-
Args:
|
|
2247
|
-
config_create: Parse configuration creation data
|
|
2248
|
-
project: Validated project from dependency
|
|
2249
|
-
user: Current user
|
|
2250
|
-
db: Database session
|
|
2251
|
-
|
|
2252
|
-
Returns:
|
|
2253
|
-
The created parse configuration
|
|
2254
|
-
|
|
2255
|
-
Parameters:
|
|
2256
|
-
- project_id: typing.Optional[str].
|
|
2257
|
-
|
|
2258
|
-
- organization_id: typing.Optional[str].
|
|
2259
|
-
|
|
2260
|
-
- request: ParseConfigurationCreate.
|
|
2261
|
-
---
|
|
2262
|
-
from llama_cloud import (
|
|
2263
|
-
FailPageMode,
|
|
2264
|
-
LlamaParseParameters,
|
|
2265
|
-
LlamaParseParametersPriority,
|
|
2266
|
-
ParseConfigurationCreate,
|
|
2267
|
-
ParsingMode,
|
|
2268
|
-
)
|
|
2269
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2270
|
-
|
|
2271
|
-
client = AsyncLlamaCloud(
|
|
2272
|
-
token="YOUR_TOKEN",
|
|
2273
|
-
)
|
|
2274
|
-
await client.beta.create_parse_configuration(
|
|
2275
|
-
request=ParseConfigurationCreate(
|
|
2276
|
-
name="string",
|
|
2277
|
-
version="string",
|
|
2278
|
-
parameters=LlamaParseParameters(
|
|
2279
|
-
priority=LlamaParseParametersPriority.LOW,
|
|
2280
|
-
parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
|
|
2281
|
-
replace_failed_page_mode=FailPageMode.RAW_TEXT,
|
|
2282
|
-
),
|
|
2283
|
-
),
|
|
2284
|
-
)
|
|
2285
|
-
"""
|
|
2286
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2287
|
-
"POST",
|
|
2288
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/parse-configurations"),
|
|
2289
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
2290
|
-
json=jsonable_encoder(request),
|
|
2291
|
-
headers=self._client_wrapper.get_headers(),
|
|
2292
|
-
timeout=60,
|
|
2293
|
-
)
|
|
2294
|
-
if 200 <= _response.status_code < 300:
|
|
2295
|
-
return pydantic.parse_obj_as(ParseConfiguration, _response.json()) # type: ignore
|
|
2296
|
-
if _response.status_code == 422:
|
|
2297
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2298
|
-
try:
|
|
2299
|
-
_response_json = _response.json()
|
|
2300
|
-
except JSONDecodeError:
|
|
2301
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2302
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2303
|
-
|
|
2304
|
-
async def upsert_parse_configuration(
|
|
2305
|
-
self,
|
|
2306
|
-
*,
|
|
2307
|
-
project_id: typing.Optional[str] = None,
|
|
2308
|
-
organization_id: typing.Optional[str] = None,
|
|
2309
|
-
request: ParseConfigurationCreate,
|
|
2310
|
-
) -> ParseConfiguration:
|
|
2311
|
-
"""
|
|
2312
|
-
Create or update a parse configuration by name.
|
|
2313
|
-
|
|
2314
|
-
Args:
|
|
2315
|
-
config_create: Parse configuration creation data
|
|
2316
|
-
project: Validated project from dependency
|
|
2317
|
-
user: Current user
|
|
2318
|
-
db: Database session
|
|
2319
|
-
|
|
2320
|
-
Returns:
|
|
2321
|
-
The created or updated parse configuration
|
|
2322
|
-
|
|
2323
|
-
Parameters:
|
|
2324
|
-
- project_id: typing.Optional[str].
|
|
2325
|
-
|
|
2326
|
-
- organization_id: typing.Optional[str].
|
|
2327
|
-
|
|
2328
|
-
- request: ParseConfigurationCreate.
|
|
2329
|
-
---
|
|
2330
|
-
from llama_cloud import (
|
|
2331
|
-
FailPageMode,
|
|
2332
|
-
LlamaParseParameters,
|
|
2333
|
-
LlamaParseParametersPriority,
|
|
2334
|
-
ParseConfigurationCreate,
|
|
2335
|
-
ParsingMode,
|
|
2336
|
-
)
|
|
2337
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2338
|
-
|
|
2339
|
-
client = AsyncLlamaCloud(
|
|
2340
|
-
token="YOUR_TOKEN",
|
|
2341
|
-
)
|
|
2342
|
-
await client.beta.upsert_parse_configuration(
|
|
2343
|
-
request=ParseConfigurationCreate(
|
|
2344
|
-
name="string",
|
|
2345
|
-
version="string",
|
|
2346
|
-
parameters=LlamaParseParameters(
|
|
2347
|
-
priority=LlamaParseParametersPriority.LOW,
|
|
2348
|
-
parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
|
|
2349
|
-
replace_failed_page_mode=FailPageMode.RAW_TEXT,
|
|
2350
|
-
),
|
|
2351
|
-
),
|
|
2352
|
-
)
|
|
2353
|
-
"""
|
|
2354
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2355
|
-
"PUT",
|
|
2356
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/parse-configurations"),
|
|
2357
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
2358
|
-
json=jsonable_encoder(request),
|
|
2359
|
-
headers=self._client_wrapper.get_headers(),
|
|
2360
|
-
timeout=60,
|
|
2361
|
-
)
|
|
2362
|
-
if 200 <= _response.status_code < 300:
|
|
2363
|
-
return pydantic.parse_obj_as(ParseConfiguration, _response.json()) # type: ignore
|
|
2364
|
-
if _response.status_code == 422:
|
|
2365
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2366
|
-
try:
|
|
2367
|
-
_response_json = _response.json()
|
|
2368
|
-
except JSONDecodeError:
|
|
2369
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2370
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2371
|
-
|
|
2372
|
-
async def get_parse_configuration(
|
|
2373
|
-
self, config_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
2374
|
-
) -> ParseConfiguration:
|
|
2375
|
-
"""
|
|
2376
|
-
Get a parse configuration by ID.
|
|
2377
|
-
|
|
2378
|
-
Args:
|
|
2379
|
-
config_id: The ID of the parse configuration
|
|
2380
|
-
project: Validated project from dependency
|
|
2381
|
-
user: Current user
|
|
2382
|
-
db: Database session
|
|
2383
|
-
|
|
2384
|
-
Returns:
|
|
2385
|
-
The parse configuration
|
|
2386
|
-
|
|
2387
|
-
Parameters:
|
|
2388
|
-
- config_id: str.
|
|
2389
|
-
|
|
2390
|
-
- project_id: typing.Optional[str].
|
|
2391
|
-
|
|
2392
|
-
- organization_id: typing.Optional[str].
|
|
2393
|
-
---
|
|
2394
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2395
|
-
|
|
2396
|
-
client = AsyncLlamaCloud(
|
|
2397
|
-
token="YOUR_TOKEN",
|
|
2398
|
-
)
|
|
2399
|
-
await client.beta.get_parse_configuration(
|
|
2400
|
-
config_id="string",
|
|
2401
|
-
)
|
|
2402
|
-
"""
|
|
2403
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2404
|
-
"GET",
|
|
2405
|
-
urllib.parse.urljoin(
|
|
2406
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/parse-configurations/{config_id}"
|
|
2407
|
-
),
|
|
2408
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
2409
|
-
headers=self._client_wrapper.get_headers(),
|
|
2410
|
-
timeout=60,
|
|
2411
|
-
)
|
|
2412
|
-
if 200 <= _response.status_code < 300:
|
|
2413
|
-
return pydantic.parse_obj_as(ParseConfiguration, _response.json()) # type: ignore
|
|
2414
|
-
if _response.status_code == 422:
|
|
2415
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2416
|
-
try:
|
|
2417
|
-
_response_json = _response.json()
|
|
2418
|
-
except JSONDecodeError:
|
|
2419
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2420
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2421
|
-
|
|
2422
|
-
async def update_parse_configuration(
|
|
2423
|
-
self,
|
|
2424
|
-
config_id: str,
|
|
2425
|
-
*,
|
|
2426
|
-
project_id: typing.Optional[str] = None,
|
|
2427
|
-
organization_id: typing.Optional[str] = None,
|
|
2428
|
-
parameters: typing.Optional[LlamaParseParameters] = OMIT,
|
|
2429
|
-
) -> ParseConfiguration:
|
|
2430
|
-
"""
|
|
2431
|
-
Update a parse configuration.
|
|
2432
|
-
|
|
2433
|
-
Args:
|
|
2434
|
-
config_id: The ID of the parse configuration to update
|
|
2435
|
-
config_update: Update data
|
|
2436
|
-
project: Validated project from dependency
|
|
2437
|
-
user: Current user
|
|
2438
|
-
db: Database session
|
|
2439
|
-
|
|
2440
|
-
Returns:
|
|
2441
|
-
The updated parse configuration
|
|
2442
|
-
|
|
2443
|
-
Parameters:
|
|
2444
|
-
- config_id: str.
|
|
2445
|
-
|
|
2446
|
-
- project_id: typing.Optional[str].
|
|
2447
|
-
|
|
2448
|
-
- organization_id: typing.Optional[str].
|
|
2449
|
-
|
|
2450
|
-
- parameters: typing.Optional[LlamaParseParameters].
|
|
2451
|
-
---
|
|
2452
|
-
from llama_cloud import (
|
|
2453
|
-
FailPageMode,
|
|
2454
|
-
LlamaParseParameters,
|
|
2455
|
-
LlamaParseParametersPriority,
|
|
2456
|
-
ParsingMode,
|
|
2457
|
-
)
|
|
2458
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2459
|
-
|
|
2460
|
-
client = AsyncLlamaCloud(
|
|
2461
|
-
token="YOUR_TOKEN",
|
|
2462
|
-
)
|
|
2463
|
-
await client.beta.update_parse_configuration(
|
|
2464
|
-
config_id="string",
|
|
2465
|
-
parameters=LlamaParseParameters(
|
|
2466
|
-
priority=LlamaParseParametersPriority.LOW,
|
|
2467
|
-
parse_mode=ParsingMode.PARSE_PAGE_WITHOUT_LLM,
|
|
2468
|
-
replace_failed_page_mode=FailPageMode.RAW_TEXT,
|
|
2469
|
-
),
|
|
2470
|
-
)
|
|
2471
|
-
"""
|
|
2472
|
-
_request: typing.Dict[str, typing.Any] = {}
|
|
2473
|
-
if parameters is not OMIT:
|
|
2474
|
-
_request["parameters"] = parameters
|
|
2475
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2476
|
-
"PUT",
|
|
2477
|
-
urllib.parse.urljoin(
|
|
2478
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/parse-configurations/{config_id}"
|
|
2479
|
-
),
|
|
2480
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
2481
|
-
json=jsonable_encoder(_request),
|
|
2482
|
-
headers=self._client_wrapper.get_headers(),
|
|
2483
|
-
timeout=60,
|
|
2484
|
-
)
|
|
2485
|
-
if 200 <= _response.status_code < 300:
|
|
2486
|
-
return pydantic.parse_obj_as(ParseConfiguration, _response.json()) # type: ignore
|
|
2487
|
-
if _response.status_code == 422:
|
|
2488
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2489
|
-
try:
|
|
2490
|
-
_response_json = _response.json()
|
|
2491
|
-
except JSONDecodeError:
|
|
2492
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2493
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2494
|
-
|
|
2495
|
-
async def delete_parse_configuration(
|
|
2496
|
-
self, config_id: str, *, project_id: typing.Optional[str] = None, organization_id: typing.Optional[str] = None
|
|
2497
|
-
) -> None:
|
|
2498
|
-
"""
|
|
2499
|
-
Delete a parse configuration.
|
|
2500
|
-
|
|
2501
|
-
Args:
|
|
2502
|
-
config_id: The ID of the parse configuration to delete
|
|
2503
|
-
project: Validated project from dependency
|
|
2504
|
-
user: Current user
|
|
2505
|
-
db: Database session
|
|
2506
|
-
|
|
2507
|
-
Parameters:
|
|
2508
|
-
- config_id: str.
|
|
2509
|
-
|
|
2510
|
-
- project_id: typing.Optional[str].
|
|
2511
|
-
|
|
2512
|
-
- organization_id: typing.Optional[str].
|
|
2513
|
-
---
|
|
2514
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2515
|
-
|
|
2516
|
-
client = AsyncLlamaCloud(
|
|
2517
|
-
token="YOUR_TOKEN",
|
|
2518
|
-
)
|
|
2519
|
-
await client.beta.delete_parse_configuration(
|
|
2520
|
-
config_id="string",
|
|
2521
|
-
)
|
|
2522
|
-
"""
|
|
2523
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2524
|
-
"DELETE",
|
|
2525
|
-
urllib.parse.urljoin(
|
|
2526
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/beta/parse-configurations/{config_id}"
|
|
2527
|
-
),
|
|
2528
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
2529
|
-
headers=self._client_wrapper.get_headers(),
|
|
2530
|
-
timeout=60,
|
|
2531
|
-
)
|
|
2532
|
-
if 200 <= _response.status_code < 300:
|
|
2533
|
-
return
|
|
2534
|
-
if _response.status_code == 422:
|
|
2535
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2536
|
-
try:
|
|
2537
|
-
_response_json = _response.json()
|
|
2538
|
-
except JSONDecodeError:
|
|
2539
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2540
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2541
|
-
|
|
2542
|
-
async def query_parse_configurations(
|
|
2543
|
-
self,
|
|
2544
|
-
*,
|
|
2545
|
-
project_id: typing.Optional[str] = None,
|
|
2546
|
-
organization_id: typing.Optional[str] = None,
|
|
2547
|
-
page_size: typing.Optional[int] = OMIT,
|
|
2548
|
-
page_token: typing.Optional[str] = OMIT,
|
|
2549
|
-
filter: typing.Optional[ParseConfigurationFilter] = OMIT,
|
|
2550
|
-
order_by: typing.Optional[str] = OMIT,
|
|
2551
|
-
) -> ParseConfigurationQueryResponse:
|
|
2552
|
-
"""
|
|
2553
|
-
Query parse configurations with filtering and pagination.
|
|
2554
|
-
|
|
2555
|
-
Args:
|
|
2556
|
-
query_request: Query request with filters and pagination
|
|
2557
|
-
project: Validated project from dependency
|
|
2558
|
-
user: Current user
|
|
2559
|
-
db: Database session
|
|
2560
|
-
|
|
2561
|
-
Returns:
|
|
2562
|
-
Paginated response with parse configurations
|
|
2563
|
-
|
|
2564
|
-
Parameters:
|
|
2565
|
-
- project_id: typing.Optional[str].
|
|
2566
|
-
|
|
2567
|
-
- organization_id: typing.Optional[str].
|
|
2568
|
-
|
|
2569
|
-
- page_size: typing.Optional[int].
|
|
2570
|
-
|
|
2571
|
-
- page_token: typing.Optional[str].
|
|
2572
|
-
|
|
2573
|
-
- filter: typing.Optional[ParseConfigurationFilter].
|
|
2574
|
-
|
|
2575
|
-
- order_by: typing.Optional[str].
|
|
2576
|
-
---
|
|
2577
|
-
from llama_cloud import ParseConfigurationFilter
|
|
2578
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2579
|
-
|
|
2580
|
-
client = AsyncLlamaCloud(
|
|
2581
|
-
token="YOUR_TOKEN",
|
|
2582
|
-
)
|
|
2583
|
-
await client.beta.query_parse_configurations(
|
|
2584
|
-
filter=ParseConfigurationFilter(),
|
|
2585
|
-
)
|
|
2586
|
-
"""
|
|
2587
|
-
_request: typing.Dict[str, typing.Any] = {}
|
|
2588
|
-
if page_size is not OMIT:
|
|
2589
|
-
_request["page_size"] = page_size
|
|
2590
|
-
if page_token is not OMIT:
|
|
2591
|
-
_request["page_token"] = page_token
|
|
2592
|
-
if filter is not OMIT:
|
|
2593
|
-
_request["filter"] = filter
|
|
2594
|
-
if order_by is not OMIT:
|
|
2595
|
-
_request["order_by"] = order_by
|
|
2596
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2597
|
-
"POST",
|
|
2598
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/parse-configurations/query"),
|
|
2599
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
2600
|
-
json=jsonable_encoder(_request),
|
|
2601
|
-
headers=self._client_wrapper.get_headers(),
|
|
2602
|
-
timeout=60,
|
|
2603
|
-
)
|
|
2604
|
-
if 200 <= _response.status_code < 300:
|
|
2605
|
-
return pydantic.parse_obj_as(ParseConfigurationQueryResponse, _response.json()) # type: ignore
|
|
2606
|
-
if _response.status_code == 422:
|
|
2607
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2608
|
-
try:
|
|
2609
|
-
_response_json = _response.json()
|
|
2610
|
-
except JSONDecodeError:
|
|
2611
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2612
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2613
|
-
|
|
2614
|
-
async def get_latest_parse_configuration(
|
|
2615
|
-
self,
|
|
2616
|
-
*,
|
|
2617
|
-
creator: typing.Optional[str] = None,
|
|
2618
|
-
project_id: typing.Optional[str] = None,
|
|
2619
|
-
organization_id: typing.Optional[str] = None,
|
|
2620
|
-
) -> typing.Optional[ParseConfiguration]:
|
|
2621
|
-
"""
|
|
2622
|
-
Get the latest parse configuration for the current project.
|
|
2623
|
-
|
|
2624
|
-
Args:
|
|
2625
|
-
project: Validated project from dependency
|
|
2626
|
-
user: Current user
|
|
2627
|
-
db: Database session
|
|
2628
|
-
creator: Optional creator filter
|
|
2629
|
-
|
|
2630
|
-
Returns:
|
|
2631
|
-
The latest parse configuration or None if not found
|
|
2632
|
-
|
|
2633
|
-
Parameters:
|
|
2634
|
-
- creator: typing.Optional[str].
|
|
2635
|
-
|
|
2636
|
-
- project_id: typing.Optional[str].
|
|
2637
|
-
|
|
2638
|
-
- organization_id: typing.Optional[str].
|
|
2639
|
-
---
|
|
2640
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2641
|
-
|
|
2642
|
-
client = AsyncLlamaCloud(
|
|
2643
|
-
token="YOUR_TOKEN",
|
|
2644
|
-
)
|
|
2645
|
-
await client.beta.get_latest_parse_configuration()
|
|
2646
|
-
"""
|
|
2647
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2648
|
-
"GET",
|
|
2649
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/beta/parse-configurations/latest"),
|
|
2650
|
-
params=remove_none_from_dict(
|
|
2651
|
-
{"creator": creator, "project_id": project_id, "organization_id": organization_id}
|
|
2652
|
-
),
|
|
2653
|
-
headers=self._client_wrapper.get_headers(),
|
|
2654
|
-
timeout=60,
|
|
2655
|
-
)
|
|
2656
|
-
if 200 <= _response.status_code < 300:
|
|
2657
|
-
return pydantic.parse_obj_as(typing.Optional[ParseConfiguration], _response.json()) # type: ignore
|
|
2658
|
-
if _response.status_code == 422:
|
|
2659
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2660
|
-
try:
|
|
2661
|
-
_response_json = _response.json()
|
|
2662
|
-
except JSONDecodeError:
|
|
2663
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2664
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|