llama-cloud 0.1.41__py3-none-any.whl → 1.0.0b4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_cloud/__init__.py +101 -816
- llama_cloud/_base_client.py +2124 -0
- llama_cloud/_client.py +795 -0
- llama_cloud/_compat.py +219 -0
- llama_cloud/_constants.py +14 -0
- llama_cloud/_exceptions.py +108 -0
- llama_cloud/_files.py +127 -0
- llama_cloud/_models.py +872 -0
- llama_cloud/_polling.py +182 -0
- llama_cloud/_qs.py +150 -0
- llama_cloud/_resource.py +43 -0
- llama_cloud/_response.py +832 -0
- llama_cloud/_streaming.py +333 -0
- llama_cloud/_types.py +270 -0
- llama_cloud/_utils/__init__.py +64 -0
- llama_cloud/_utils/_compat.py +45 -0
- llama_cloud/_utils/_datetime_parse.py +136 -0
- llama_cloud/_utils/_logs.py +25 -0
- llama_cloud/_utils/_proxy.py +65 -0
- llama_cloud/_utils/_reflection.py +42 -0
- llama_cloud/_utils/_resources_proxy.py +24 -0
- llama_cloud/_utils/_streams.py +12 -0
- llama_cloud/_utils/_sync.py +58 -0
- llama_cloud/_utils/_transform.py +457 -0
- llama_cloud/_utils/_typing.py +156 -0
- llama_cloud/_utils/_utils.py +421 -0
- llama_cloud/_version.py +4 -0
- llama_cloud/lib/__init__.py +0 -0
- llama_cloud/lib/index/__init__.py +13 -0
- llama_cloud/lib/index/api_utils.py +300 -0
- llama_cloud/lib/index/base.py +1041 -0
- llama_cloud/lib/index/composite_retriever.py +272 -0
- llama_cloud/lib/index/retriever.py +233 -0
- llama_cloud/pagination.py +465 -0
- llama_cloud/py.typed +0 -0
- llama_cloud/resources/__init__.py +136 -107
- llama_cloud/resources/beta/__init__.py +102 -1
- llama_cloud/resources/beta/agent_data.py +1041 -0
- llama_cloud/resources/beta/batch/__init__.py +33 -0
- llama_cloud/resources/beta/batch/batch.py +664 -0
- llama_cloud/resources/beta/batch/job_items.py +348 -0
- llama_cloud/resources/beta/beta.py +262 -0
- llama_cloud/resources/beta/directories/__init__.py +33 -0
- llama_cloud/resources/beta/directories/directories.py +719 -0
- llama_cloud/resources/beta/directories/files.py +913 -0
- llama_cloud/resources/beta/parse_configurations.py +743 -0
- llama_cloud/resources/beta/sheets.py +1130 -0
- llama_cloud/resources/beta/split.py +917 -0
- llama_cloud/resources/classifier/__init__.py +32 -1
- llama_cloud/resources/classifier/classifier.py +588 -0
- llama_cloud/resources/classifier/jobs.py +563 -0
- llama_cloud/resources/data_sinks.py +579 -0
- llama_cloud/resources/data_sources.py +651 -0
- llama_cloud/resources/extraction/__init__.py +61 -0
- llama_cloud/resources/extraction/extraction.py +609 -0
- llama_cloud/resources/extraction/extraction_agents/__init__.py +33 -0
- llama_cloud/resources/extraction/extraction_agents/extraction_agents.py +633 -0
- llama_cloud/resources/extraction/extraction_agents/schema.py +308 -0
- llama_cloud/resources/extraction/jobs.py +1106 -0
- llama_cloud/resources/extraction/runs.py +498 -0
- llama_cloud/resources/files.py +784 -0
- llama_cloud/resources/parsing.py +1296 -0
- llama_cloud/resources/pipelines/__init__.py +98 -24
- llama_cloud/resources/pipelines/data_sources.py +529 -0
- llama_cloud/resources/pipelines/documents.py +810 -0
- llama_cloud/resources/pipelines/files.py +682 -0
- llama_cloud/resources/pipelines/images.py +513 -0
- llama_cloud/resources/pipelines/metadata.py +265 -0
- llama_cloud/resources/pipelines/pipelines.py +1525 -0
- llama_cloud/resources/pipelines/sync.py +243 -0
- llama_cloud/resources/projects.py +276 -0
- llama_cloud/resources/retrievers/__init__.py +32 -1
- llama_cloud/resources/retrievers/retriever.py +238 -0
- llama_cloud/resources/retrievers/retrievers.py +920 -0
- llama_cloud/types/__init__.py +171 -721
- llama_cloud/types/advanced_mode_transform_config.py +102 -38
- llama_cloud/types/advanced_mode_transform_config_param.py +102 -0
- llama_cloud/types/auto_transform_config.py +11 -25
- llama_cloud/types/auto_transform_config_param.py +17 -0
- llama_cloud/types/azure_openai_embedding.py +62 -0
- llama_cloud/types/azure_openai_embedding_config.py +17 -0
- llama_cloud/types/azure_openai_embedding_config_param.py +17 -0
- llama_cloud/types/azure_openai_embedding_param.py +61 -0
- llama_cloud/types/b_box.py +37 -0
- llama_cloud/types/bedrock_embedding.py +49 -46
- llama_cloud/types/bedrock_embedding_config.py +10 -27
- llama_cloud/types/bedrock_embedding_config_param.py +17 -0
- llama_cloud/types/bedrock_embedding_param.py +48 -0
- llama_cloud/types/beta/__init__.py +59 -0
- llama_cloud/types/beta/agent_data.py +26 -0
- llama_cloud/types/beta/agent_data_agent_data_params.py +20 -0
- llama_cloud/types/beta/agent_data_aggregate_params.py +79 -0
- llama_cloud/types/beta/agent_data_aggregate_response.py +17 -0
- llama_cloud/types/beta/agent_data_delete_by_query_params.py +43 -0
- llama_cloud/types/beta/agent_data_delete_by_query_response.py +11 -0
- llama_cloud/types/beta/agent_data_delete_params.py +14 -0
- llama_cloud/types/beta/agent_data_delete_response.py +8 -0
- llama_cloud/types/beta/agent_data_get_params.py +14 -0
- llama_cloud/types/beta/agent_data_search_params.py +69 -0
- llama_cloud/types/beta/agent_data_update_params.py +16 -0
- llama_cloud/types/beta/batch/__init__.py +12 -0
- llama_cloud/types/beta/batch/job_item_get_processing_results_params.py +17 -0
- llama_cloud/types/beta/batch/job_item_get_processing_results_response.py +409 -0
- llama_cloud/types/beta/batch/job_item_list_params.py +23 -0
- llama_cloud/types/beta/batch/job_item_list_response.py +42 -0
- llama_cloud/types/beta/batch_cancel_params.py +21 -0
- llama_cloud/types/beta/batch_cancel_response.py +23 -0
- llama_cloud/types/beta/batch_create_params.py +399 -0
- llama_cloud/types/beta/batch_create_response.py +63 -0
- llama_cloud/types/beta/batch_get_status_params.py +14 -0
- llama_cloud/types/beta/batch_get_status_response.py +73 -0
- llama_cloud/types/beta/batch_list_params.py +29 -0
- llama_cloud/types/beta/batch_list_response.py +63 -0
- llama_cloud/types/beta/directories/__init__.py +15 -0
- llama_cloud/types/beta/directories/file_add_params.py +26 -0
- llama_cloud/types/beta/directories/file_add_response.py +42 -0
- llama_cloud/types/beta/directories/file_delete_params.py +16 -0
- llama_cloud/types/beta/directories/file_get_params.py +16 -0
- llama_cloud/types/beta/directories/file_get_response.py +42 -0
- llama_cloud/types/beta/directories/file_list_params.py +28 -0
- llama_cloud/types/beta/directories/file_list_response.py +42 -0
- llama_cloud/types/beta/directories/file_update_params.py +27 -0
- llama_cloud/types/beta/directories/file_update_response.py +42 -0
- llama_cloud/types/beta/directories/file_upload_params.py +24 -0
- llama_cloud/types/beta/directories/file_upload_response.py +42 -0
- llama_cloud/types/beta/directory_create_params.py +23 -0
- llama_cloud/types/beta/directory_create_response.py +36 -0
- llama_cloud/types/beta/directory_delete_params.py +14 -0
- llama_cloud/types/beta/directory_get_params.py +14 -0
- llama_cloud/types/beta/directory_get_response.py +36 -0
- llama_cloud/types/beta/directory_list_params.py +24 -0
- llama_cloud/types/beta/directory_list_response.py +36 -0
- llama_cloud/types/beta/directory_update_params.py +20 -0
- llama_cloud/types/beta/directory_update_response.py +36 -0
- llama_cloud/types/beta/parse_configuration.py +40 -0
- llama_cloud/types/beta/parse_configuration_create_params.py +34 -0
- llama_cloud/types/beta/parse_configuration_delete_params.py +14 -0
- llama_cloud/types/beta/parse_configuration_get_params.py +14 -0
- llama_cloud/types/beta/parse_configuration_list_params.py +24 -0
- llama_cloud/types/beta/parse_configuration_query_response.py +28 -0
- llama_cloud/types/beta/parse_configuration_update_params.py +22 -0
- llama_cloud/types/beta/sheet_create_params.py +22 -0
- llama_cloud/types/beta/sheet_delete_job_params.py +14 -0
- llama_cloud/types/beta/sheet_get_params.py +16 -0
- llama_cloud/types/beta/sheet_get_result_table_params.py +20 -0
- llama_cloud/types/beta/sheet_list_params.py +20 -0
- llama_cloud/types/beta/sheets_job.py +88 -0
- llama_cloud/types/beta/sheets_parsing_config.py +49 -0
- llama_cloud/types/beta/sheets_parsing_config_param.py +51 -0
- llama_cloud/types/beta/split_category.py +17 -0
- llama_cloud/types/beta/split_category_param.py +18 -0
- llama_cloud/types/beta/split_create_params.py +36 -0
- llama_cloud/types/beta/split_create_response.py +48 -0
- llama_cloud/types/beta/split_document_input.py +15 -0
- llama_cloud/types/beta/split_document_input_param.py +17 -0
- llama_cloud/types/beta/split_get_params.py +14 -0
- llama_cloud/types/beta/split_get_response.py +48 -0
- llama_cloud/types/beta/split_list_params.py +18 -0
- llama_cloud/types/beta/split_list_response.py +48 -0
- llama_cloud/types/beta/split_result_response.py +15 -0
- llama_cloud/types/beta/split_segment_response.py +20 -0
- llama_cloud/types/classifier/__init__.py +15 -0
- llama_cloud/types/classifier/classifier_rule.py +25 -0
- llama_cloud/types/classifier/classifier_rule_param.py +27 -0
- llama_cloud/types/classifier/classify_job.py +51 -0
- llama_cloud/types/classifier/classify_job_param.py +53 -0
- llama_cloud/types/classifier/classify_parsing_configuration.py +21 -0
- llama_cloud/types/classifier/classify_parsing_configuration_param.py +23 -0
- llama_cloud/types/classifier/job_create_params.py +30 -0
- llama_cloud/types/classifier/job_get_params.py +14 -0
- llama_cloud/types/classifier/job_get_results_params.py +14 -0
- llama_cloud/types/classifier/job_get_results_response.py +66 -0
- llama_cloud/types/classifier/job_list_params.py +18 -0
- llama_cloud/types/cohere_embedding.py +37 -40
- llama_cloud/types/cohere_embedding_config.py +10 -27
- llama_cloud/types/cohere_embedding_config_param.py +17 -0
- llama_cloud/types/cohere_embedding_param.py +36 -0
- llama_cloud/types/composite_retrieval_mode.py +4 -18
- llama_cloud/types/composite_retrieval_result.py +52 -37
- llama_cloud/types/data_sink.py +46 -39
- llama_cloud/types/data_sink_create_param.py +41 -0
- llama_cloud/types/data_sink_create_params.py +44 -0
- llama_cloud/types/data_sink_list_params.py +14 -0
- llama_cloud/types/data_sink_list_response.py +10 -0
- llama_cloud/types/data_sink_update_params.py +40 -0
- llama_cloud/types/data_source.py +67 -39
- llama_cloud/types/data_source_create_params.py +65 -0
- llama_cloud/types/data_source_list_params.py +14 -0
- llama_cloud/types/data_source_list_response.py +10 -0
- llama_cloud/types/data_source_reader_version_metadata.py +8 -27
- llama_cloud/types/data_source_update_params.py +61 -0
- llama_cloud/types/extraction/__init__.py +25 -0
- llama_cloud/types/extraction/extract_agent.py +41 -0
- llama_cloud/types/extraction/extract_config.py +118 -0
- llama_cloud/types/extraction/extract_config_param.py +118 -0
- llama_cloud/types/extraction/extract_job.py +32 -0
- llama_cloud/types/extraction/extract_run.py +64 -0
- llama_cloud/types/extraction/extraction_agent_create_params.py +25 -0
- llama_cloud/types/extraction/extraction_agent_list_params.py +17 -0
- llama_cloud/types/extraction/extraction_agent_list_response.py +10 -0
- llama_cloud/types/extraction/extraction_agent_update_params.py +18 -0
- llama_cloud/types/extraction/extraction_agents/__init__.py +8 -0
- llama_cloud/types/extraction/extraction_agents/schema_generate_schema_params.py +23 -0
- llama_cloud/types/extraction/extraction_agents/schema_generate_schema_response.py +14 -0
- llama_cloud/types/extraction/extraction_agents/schema_validate_schema_params.py +12 -0
- llama_cloud/types/extraction/extraction_agents/schema_validate_schema_response.py +13 -0
- llama_cloud/types/extraction/job_create_params.py +38 -0
- llama_cloud/types/extraction/job_file_params.py +29 -0
- llama_cloud/types/extraction/job_get_result_params.py +14 -0
- llama_cloud/types/extraction/job_get_result_response.py +27 -0
- llama_cloud/types/extraction/job_list_params.py +11 -0
- llama_cloud/types/extraction/job_list_response.py +10 -0
- llama_cloud/types/extraction/run_delete_params.py +14 -0
- llama_cloud/types/extraction/run_get_by_job_params.py +14 -0
- llama_cloud/types/extraction/run_get_params.py +14 -0
- llama_cloud/types/extraction/run_list_params.py +15 -0
- llama_cloud/types/extraction/webhook_configuration.py +43 -0
- llama_cloud/types/extraction/webhook_configuration_param.py +43 -0
- llama_cloud/types/extraction_run_params.py +45 -0
- llama_cloud/types/fail_page_mode.py +4 -26
- llama_cloud/types/file.py +48 -40
- llama_cloud/types/file_create_params.py +28 -0
- llama_cloud/types/file_create_response.py +38 -0
- llama_cloud/types/file_delete_params.py +14 -0
- llama_cloud/types/file_get_params.py +16 -0
- llama_cloud/types/file_list_params.py +40 -0
- llama_cloud/types/file_list_response.py +38 -0
- llama_cloud/types/file_query_params.py +61 -0
- llama_cloud/types/file_query_response.py +47 -27
- llama_cloud/types/gemini_embedding.py +40 -39
- llama_cloud/types/gemini_embedding_config.py +10 -27
- llama_cloud/types/gemini_embedding_config_param.py +17 -0
- llama_cloud/types/gemini_embedding_param.py +39 -0
- llama_cloud/types/hugging_face_inference_api_embedding.py +62 -46
- llama_cloud/types/hugging_face_inference_api_embedding_config.py +11 -28
- llama_cloud/types/hugging_face_inference_api_embedding_config_param.py +17 -0
- llama_cloud/types/hugging_face_inference_api_embedding_param.py +60 -0
- llama_cloud/types/list_item.py +48 -0
- llama_cloud/types/llama_parse_parameters.py +251 -130
- llama_cloud/types/llama_parse_parameters_param.py +261 -0
- llama_cloud/types/llama_parse_supported_file_extensions.py +84 -310
- llama_cloud/types/managed_ingestion_status_response.py +39 -37
- llama_cloud/types/message_role.py +4 -46
- llama_cloud/types/metadata_filters.py +45 -29
- llama_cloud/types/metadata_filters_param.py +58 -0
- llama_cloud/types/openai_embedding.py +56 -0
- llama_cloud/types/openai_embedding_config.py +17 -0
- llama_cloud/types/openai_embedding_config_param.py +17 -0
- llama_cloud/types/openai_embedding_param.py +55 -0
- llama_cloud/types/page_figure_node_with_score.py +32 -29
- llama_cloud/types/page_screenshot_node_with_score.py +23 -29
- llama_cloud/types/parsing_create_params.py +586 -0
- llama_cloud/types/parsing_create_response.py +33 -0
- llama_cloud/types/parsing_get_params.py +27 -0
- llama_cloud/types/parsing_get_response.py +364 -0
- llama_cloud/types/parsing_languages.py +94 -0
- llama_cloud/types/parsing_list_params.py +23 -0
- llama_cloud/types/parsing_list_response.py +33 -0
- llama_cloud/types/parsing_mode.py +13 -46
- llama_cloud/types/parsing_upload_file_params.py +14 -0
- llama_cloud/types/parsing_upload_file_response.py +33 -0
- llama_cloud/types/pipeline.py +180 -62
- llama_cloud/types/pipeline_create_params.py +95 -0
- llama_cloud/types/pipeline_get_status_params.py +12 -0
- llama_cloud/types/pipeline_list_params.py +23 -0
- llama_cloud/types/pipeline_list_response.py +12 -0
- llama_cloud/types/pipeline_metadata_config.py +9 -30
- llama_cloud/types/pipeline_metadata_config_param.py +17 -0
- llama_cloud/types/pipeline_retrieve_params.py +74 -0
- llama_cloud/types/pipeline_retrieve_response.py +63 -0
- llama_cloud/types/pipeline_type.py +4 -18
- llama_cloud/types/pipeline_update_params.py +90 -0
- llama_cloud/types/pipeline_upsert_params.py +95 -0
- llama_cloud/types/pipelines/__init__.py +38 -0
- llama_cloud/types/pipelines/cloud_document.py +29 -0
- llama_cloud/types/pipelines/cloud_document_create_param.py +30 -0
- llama_cloud/types/pipelines/data_source_get_data_sources_response.py +10 -0
- llama_cloud/types/pipelines/data_source_sync_params.py +16 -0
- llama_cloud/types/pipelines/data_source_update_data_sources_params.py +25 -0
- llama_cloud/types/pipelines/data_source_update_data_sources_response.py +10 -0
- llama_cloud/types/pipelines/data_source_update_params.py +15 -0
- llama_cloud/types/pipelines/document_create_params.py +14 -0
- llama_cloud/types/pipelines/document_create_response.py +10 -0
- llama_cloud/types/pipelines/document_get_chunks_response.py +10 -0
- llama_cloud/types/pipelines/document_list_params.py +22 -0
- llama_cloud/types/pipelines/document_upsert_params.py +14 -0
- llama_cloud/types/pipelines/document_upsert_response.py +10 -0
- llama_cloud/types/pipelines/file_create_params.py +22 -0
- llama_cloud/types/pipelines/file_create_response.py +10 -0
- llama_cloud/types/pipelines/file_get_status_counts_params.py +14 -0
- llama_cloud/types/pipelines/file_get_status_counts_response.py +24 -0
- llama_cloud/types/pipelines/file_list_params.py +22 -0
- llama_cloud/types/pipelines/file_update_params.py +15 -0
- llama_cloud/types/pipelines/image_get_page_figure_params.py +18 -0
- llama_cloud/types/pipelines/image_get_page_screenshot_params.py +16 -0
- llama_cloud/types/pipelines/image_list_page_figures_params.py +14 -0
- llama_cloud/types/pipelines/image_list_page_figures_response.py +34 -0
- llama_cloud/types/pipelines/image_list_page_screenshots_params.py +14 -0
- llama_cloud/types/pipelines/image_list_page_screenshots_response.py +25 -0
- llama_cloud/types/pipelines/metadata_create_params.py +13 -0
- llama_cloud/types/pipelines/metadata_create_response.py +8 -0
- llama_cloud/types/pipelines/pipeline_data_source.py +96 -0
- llama_cloud/types/pipelines/pipeline_file.py +70 -0
- llama_cloud/types/pipelines/text_node.py +89 -0
- llama_cloud/types/preset_retrieval_params.py +61 -49
- llama_cloud/types/preset_retrieval_params_param.py +71 -0
- llama_cloud/types/presigned_url.py +13 -29
- llama_cloud/types/project.py +24 -36
- llama_cloud/types/project_get_params.py +12 -0
- llama_cloud/types/project_list_params.py +14 -0
- llama_cloud/types/project_list_response.py +10 -0
- llama_cloud/types/re_rank_config_param.py +18 -0
- llama_cloud/types/retrieval_mode.py +4 -26
- llama_cloud/types/retriever.py +31 -38
- llama_cloud/types/retriever_create_params.py +26 -0
- llama_cloud/types/retriever_get_params.py +14 -0
- llama_cloud/types/retriever_list_params.py +16 -0
- llama_cloud/types/retriever_list_response.py +12 -0
- llama_cloud/types/retriever_pipeline.py +26 -34
- llama_cloud/types/retriever_pipeline_param.py +28 -0
- llama_cloud/types/retriever_search_params.py +38 -0
- llama_cloud/types/retriever_update_params.py +19 -0
- llama_cloud/types/retriever_upsert_params.py +26 -0
- llama_cloud/types/retrievers/__init__.py +5 -0
- llama_cloud/types/retrievers/retriever_search_params.py +32 -0
- llama_cloud/types/shared/__init__.py +21 -0
- llama_cloud/types/shared/cloud_astra_db_vector_store.py +39 -0
- llama_cloud/types/shared/cloud_az_storage_blob_data_source.py +34 -0
- llama_cloud/types/shared/cloud_azure_ai_search_vector_store.py +30 -0
- llama_cloud/types/shared/cloud_box_data_source.py +31 -0
- llama_cloud/types/shared/cloud_confluence_data_source.py +53 -0
- llama_cloud/types/shared/cloud_jira_data_source.py +30 -0
- llama_cloud/types/shared/cloud_jira_data_source_v2.py +49 -0
- llama_cloud/types/shared/cloud_milvus_vector_store.py +21 -0
- llama_cloud/types/shared/cloud_mongodb_atlas_vector_search.py +36 -0
- llama_cloud/types/shared/cloud_notion_page_data_source.py +19 -0
- llama_cloud/types/shared/cloud_one_drive_data_source.py +32 -0
- llama_cloud/types/shared/cloud_pinecone_vector_store.py +32 -0
- llama_cloud/types/shared/cloud_postgres_vector_store.py +35 -0
- llama_cloud/types/shared/cloud_qdrant_vector_store.py +35 -0
- llama_cloud/types/shared/cloud_s3_data_source.py +28 -0
- llama_cloud/types/shared/cloud_sharepoint_data_source.py +55 -0
- llama_cloud/types/shared/cloud_slack_data_source.py +31 -0
- llama_cloud/types/shared/failure_handling_config.py +16 -0
- llama_cloud/types/shared/pg_vector_hnsw_settings.py +27 -0
- llama_cloud/types/shared_params/__init__.py +21 -0
- llama_cloud/types/shared_params/cloud_astra_db_vector_store.py +42 -0
- llama_cloud/types/shared_params/cloud_az_storage_blob_data_source.py +41 -0
- llama_cloud/types/shared_params/cloud_azure_ai_search_vector_store.py +34 -0
- llama_cloud/types/shared_params/cloud_box_data_source.py +40 -0
- llama_cloud/types/shared_params/cloud_confluence_data_source.py +58 -0
- llama_cloud/types/shared_params/cloud_jira_data_source.py +34 -0
- llama_cloud/types/shared_params/cloud_jira_data_source_v2.py +54 -0
- llama_cloud/types/shared_params/cloud_milvus_vector_store.py +24 -0
- llama_cloud/types/shared_params/cloud_mongodb_atlas_vector_search.py +39 -0
- llama_cloud/types/shared_params/cloud_notion_page_data_source.py +23 -0
- llama_cloud/types/shared_params/cloud_one_drive_data_source.py +37 -0
- llama_cloud/types/shared_params/cloud_pinecone_vector_store.py +35 -0
- llama_cloud/types/shared_params/cloud_postgres_vector_store.py +39 -0
- llama_cloud/types/shared_params/cloud_qdrant_vector_store.py +37 -0
- llama_cloud/types/shared_params/cloud_s3_data_source.py +32 -0
- llama_cloud/types/shared_params/cloud_sharepoint_data_source.py +60 -0
- llama_cloud/types/shared_params/cloud_slack_data_source.py +35 -0
- llama_cloud/types/shared_params/failure_handling_config.py +16 -0
- llama_cloud/types/shared_params/pg_vector_hnsw_settings.py +26 -0
- llama_cloud/types/sparse_model_config.py +16 -30
- llama_cloud/types/sparse_model_config_param.py +25 -0
- llama_cloud/types/status_enum.py +4 -34
- llama_cloud/types/vertex_ai_embedding_config.py +10 -27
- llama_cloud/types/vertex_ai_embedding_config_param.py +17 -0
- llama_cloud/types/vertex_text_embedding.py +47 -45
- llama_cloud/types/vertex_text_embedding_param.py +45 -0
- llama_cloud-1.0.0b4.dist-info/METADATA +546 -0
- llama_cloud-1.0.0b4.dist-info/RECORD +376 -0
- {llama_cloud-0.1.41.dist-info → llama_cloud-1.0.0b4.dist-info}/WHEEL +1 -1
- llama_cloud-1.0.0b4.dist-info/licenses/LICENSE +7 -0
- llama_cloud/client.py +0 -108
- llama_cloud/core/__init__.py +0 -17
- llama_cloud/core/api_error.py +0 -15
- llama_cloud/core/client_wrapper.py +0 -51
- llama_cloud/core/datetime_utils.py +0 -28
- llama_cloud/core/jsonable_encoder.py +0 -106
- llama_cloud/core/remove_none_from_dict.py +0 -11
- llama_cloud/environment.py +0 -7
- llama_cloud/errors/__init__.py +0 -5
- llama_cloud/errors/unprocessable_entity_error.py +0 -9
- llama_cloud/resources/admin/__init__.py +0 -2
- llama_cloud/resources/admin/client.py +0 -196
- llama_cloud/resources/agent_deployments/__init__.py +0 -2
- llama_cloud/resources/agent_deployments/client.py +0 -160
- llama_cloud/resources/alpha/__init__.py +0 -2
- llama_cloud/resources/alpha/client.py +0 -112
- llama_cloud/resources/beta/client.py +0 -2664
- llama_cloud/resources/chat_apps/__init__.py +0 -2
- llama_cloud/resources/chat_apps/client.py +0 -616
- llama_cloud/resources/classifier/client.py +0 -444
- llama_cloud/resources/data_sinks/__init__.py +0 -5
- llama_cloud/resources/data_sinks/client.py +0 -535
- llama_cloud/resources/data_sinks/types/__init__.py +0 -5
- llama_cloud/resources/data_sinks/types/data_sink_update_component.py +0 -22
- llama_cloud/resources/data_sources/__init__.py +0 -5
- llama_cloud/resources/data_sources/client.py +0 -548
- llama_cloud/resources/data_sources/types/__init__.py +0 -6
- llama_cloud/resources/data_sources/types/data_source_update_component.py +0 -28
- llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py +0 -7
- llama_cloud/resources/embedding_model_configs/__init__.py +0 -23
- llama_cloud/resources/embedding_model_configs/client.py +0 -420
- llama_cloud/resources/embedding_model_configs/types/__init__.py +0 -23
- llama_cloud/resources/embedding_model_configs/types/embedding_model_config_create_embedding_config.py +0 -89
- llama_cloud/resources/evals/__init__.py +0 -2
- llama_cloud/resources/evals/client.py +0 -85
- llama_cloud/resources/files/__init__.py +0 -5
- llama_cloud/resources/files/client.py +0 -1454
- llama_cloud/resources/files/types/__init__.py +0 -5
- llama_cloud/resources/files/types/file_create_from_url_resource_info_value.py +0 -7
- llama_cloud/resources/jobs/__init__.py +0 -2
- llama_cloud/resources/jobs/client.py +0 -164
- llama_cloud/resources/llama_extract/__init__.py +0 -27
- llama_cloud/resources/llama_extract/client.py +0 -2082
- llama_cloud/resources/llama_extract/types/__init__.py +0 -25
- llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema.py +0 -9
- llama_cloud/resources/llama_extract/types/extract_agent_create_data_schema_zero_value.py +0 -7
- llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema.py +0 -9
- llama_cloud/resources/llama_extract/types/extract_agent_update_data_schema_zero_value.py +0 -7
- llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_override.py +0 -9
- llama_cloud/resources/llama_extract/types/extract_job_create_batch_data_schema_override_zero_value.py +0 -7
- llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema.py +0 -9
- llama_cloud/resources/llama_extract/types/extract_schema_validate_request_data_schema_zero_value.py +0 -7
- llama_cloud/resources/llama_extract/types/extract_stateless_request_data_schema.py +0 -9
- llama_cloud/resources/llama_extract/types/extract_stateless_request_data_schema_zero_value.py +0 -7
- llama_cloud/resources/organizations/__init__.py +0 -2
- llama_cloud/resources/organizations/client.py +0 -1448
- llama_cloud/resources/parsing/__init__.py +0 -2
- llama_cloud/resources/parsing/client.py +0 -2392
- llama_cloud/resources/pipelines/client.py +0 -3436
- llama_cloud/resources/pipelines/types/__init__.py +0 -29
- llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py +0 -7
- llama_cloud/resources/pipelines/types/pipeline_update_embedding_config.py +0 -89
- llama_cloud/resources/pipelines/types/pipeline_update_transform_config.py +0 -8
- llama_cloud/resources/pipelines/types/retrieval_params_search_filters_inference_schema_value.py +0 -7
- llama_cloud/resources/projects/__init__.py +0 -2
- llama_cloud/resources/projects/client.py +0 -636
- llama_cloud/resources/retrievers/client.py +0 -837
- llama_cloud/resources/users/__init__.py +0 -2
- llama_cloud/resources/users/client.py +0 -155
- llama_cloud/types/advanced_mode_transform_config_chunking_config.py +0 -67
- llama_cloud/types/advanced_mode_transform_config_segmentation_config.py +0 -45
- llama_cloud/types/agent_data.py +0 -40
- llama_cloud/types/agent_deployment_list.py +0 -32
- llama_cloud/types/agent_deployment_summary.py +0 -39
- llama_cloud/types/aggregate_group.py +0 -37
- llama_cloud/types/azure_open_ai_embedding.py +0 -49
- llama_cloud/types/azure_open_ai_embedding_config.py +0 -34
- llama_cloud/types/base_plan.py +0 -53
- llama_cloud/types/base_plan_metronome_plan_type.py +0 -17
- llama_cloud/types/base_plan_name.py +0 -57
- llama_cloud/types/base_plan_plan_frequency.py +0 -25
- llama_cloud/types/batch.py +0 -47
- llama_cloud/types/batch_item.py +0 -40
- llama_cloud/types/batch_paginated_list.py +0 -35
- llama_cloud/types/batch_public_output.py +0 -36
- llama_cloud/types/billing_period.py +0 -32
- llama_cloud/types/box_auth_mechanism.py +0 -17
- llama_cloud/types/character_chunking_config.py +0 -32
- llama_cloud/types/chat_app.py +0 -46
- llama_cloud/types/chat_app_response.py +0 -43
- llama_cloud/types/chat_data.py +0 -35
- llama_cloud/types/chat_message.py +0 -43
- llama_cloud/types/chunk_mode.py +0 -29
- llama_cloud/types/classification_result.py +0 -39
- llama_cloud/types/classifier_rule.py +0 -43
- llama_cloud/types/classify_job.py +0 -47
- llama_cloud/types/classify_job_results.py +0 -38
- llama_cloud/types/classify_parsing_configuration.py +0 -38
- llama_cloud/types/cloud_astra_db_vector_store.py +0 -51
- llama_cloud/types/cloud_az_storage_blob_data_source.py +0 -41
- llama_cloud/types/cloud_azure_ai_search_vector_store.py +0 -45
- llama_cloud/types/cloud_box_data_source.py +0 -42
- llama_cloud/types/cloud_confluence_data_source.py +0 -59
- llama_cloud/types/cloud_document.py +0 -40
- llama_cloud/types/cloud_document_create.py +0 -40
- llama_cloud/types/cloud_jira_data_source.py +0 -42
- llama_cloud/types/cloud_jira_data_source_v_2.py +0 -52
- llama_cloud/types/cloud_jira_data_source_v_2_api_version.py +0 -21
- llama_cloud/types/cloud_milvus_vector_store.py +0 -40
- llama_cloud/types/cloud_mongo_db_atlas_vector_search.py +0 -52
- llama_cloud/types/cloud_notion_page_data_source.py +0 -35
- llama_cloud/types/cloud_one_drive_data_source.py +0 -39
- llama_cloud/types/cloud_pinecone_vector_store.py +0 -49
- llama_cloud/types/cloud_postgres_vector_store.py +0 -44
- llama_cloud/types/cloud_qdrant_vector_store.py +0 -51
- llama_cloud/types/cloud_s_3_data_source.py +0 -39
- llama_cloud/types/cloud_sharepoint_data_source.py +0 -42
- llama_cloud/types/cloud_slack_data_source.py +0 -39
- llama_cloud/types/composite_retrieved_text_node.py +0 -42
- llama_cloud/types/composite_retrieved_text_node_with_score.py +0 -34
- llama_cloud/types/configurable_data_sink_names.py +0 -41
- llama_cloud/types/configurable_data_source_names.py +0 -57
- llama_cloud/types/credit_type.py +0 -32
- llama_cloud/types/data_sink_component.py +0 -22
- llama_cloud/types/data_sink_create.py +0 -39
- llama_cloud/types/data_sink_create_component.py +0 -22
- llama_cloud/types/data_source_component.py +0 -28
- llama_cloud/types/data_source_create.py +0 -41
- llama_cloud/types/data_source_create_component.py +0 -28
- llama_cloud/types/data_source_create_custom_metadata_value.py +0 -7
- llama_cloud/types/data_source_custom_metadata_value.py +0 -7
- llama_cloud/types/data_source_reader_version_metadata_reader_version.py +0 -25
- llama_cloud/types/data_source_update_dispatcher_config.py +0 -38
- llama_cloud/types/delete_params.py +0 -39
- llama_cloud/types/document_chunk_mode.py +0 -17
- llama_cloud/types/document_ingestion_job_params.py +0 -43
- llama_cloud/types/element_segmentation_config.py +0 -29
- llama_cloud/types/embedding_model_config.py +0 -43
- llama_cloud/types/embedding_model_config_embedding_config.py +0 -89
- llama_cloud/types/embedding_model_config_update.py +0 -33
- llama_cloud/types/embedding_model_config_update_embedding_config.py +0 -89
- llama_cloud/types/eval_execution_params.py +0 -41
- llama_cloud/types/extract_agent.py +0 -48
- llama_cloud/types/extract_agent_data_schema_value.py +0 -5
- llama_cloud/types/extract_config.py +0 -66
- llama_cloud/types/extract_config_priority.py +0 -29
- llama_cloud/types/extract_job.py +0 -38
- llama_cloud/types/extract_job_create.py +0 -46
- llama_cloud/types/extract_job_create_data_schema_override.py +0 -9
- llama_cloud/types/extract_job_create_data_schema_override_zero_value.py +0 -7
- llama_cloud/types/extract_job_create_priority.py +0 -29
- llama_cloud/types/extract_mode.py +0 -29
- llama_cloud/types/extract_models.py +0 -53
- llama_cloud/types/extract_resultset.py +0 -42
- llama_cloud/types/extract_resultset_data.py +0 -11
- llama_cloud/types/extract_resultset_data_item_value.py +0 -7
- llama_cloud/types/extract_resultset_data_zero_value.py +0 -7
- llama_cloud/types/extract_resultset_extraction_metadata_value.py +0 -7
- llama_cloud/types/extract_run.py +0 -55
- llama_cloud/types/extract_run_data.py +0 -11
- llama_cloud/types/extract_run_data_item_value.py +0 -5
- llama_cloud/types/extract_run_data_schema_value.py +0 -5
- llama_cloud/types/extract_run_data_zero_value.py +0 -5
- llama_cloud/types/extract_run_extraction_metadata_value.py +0 -7
- llama_cloud/types/extract_schema_generate_response.py +0 -38
- llama_cloud/types/extract_schema_generate_response_data_schema_value.py +0 -7
- llama_cloud/types/extract_schema_validate_response.py +0 -32
- llama_cloud/types/extract_schema_validate_response_data_schema_value.py +0 -7
- llama_cloud/types/extract_state.py +0 -29
- llama_cloud/types/extract_target.py +0 -17
- llama_cloud/types/failure_handling_config.py +0 -37
- llama_cloud/types/file_classification.py +0 -41
- llama_cloud/types/file_count_by_status_response.py +0 -37
- llama_cloud/types/file_create.py +0 -41
- llama_cloud/types/file_create_permission_info_value.py +0 -7
- llama_cloud/types/file_create_resource_info_value.py +0 -5
- llama_cloud/types/file_data.py +0 -36
- llama_cloud/types/file_filter.py +0 -40
- llama_cloud/types/file_id_presigned_url.py +0 -38
- llama_cloud/types/file_parse_public.py +0 -36
- llama_cloud/types/file_permission_info_value.py +0 -5
- llama_cloud/types/file_resource_info_value.py +0 -5
- llama_cloud/types/file_store_info_response.py +0 -34
- llama_cloud/types/file_store_info_response_status.py +0 -25
- llama_cloud/types/filter_condition.py +0 -29
- llama_cloud/types/filter_operation.py +0 -46
- llama_cloud/types/filter_operation_eq.py +0 -6
- llama_cloud/types/filter_operation_gt.py +0 -6
- llama_cloud/types/filter_operation_gte.py +0 -6
- llama_cloud/types/filter_operation_includes_item.py +0 -6
- llama_cloud/types/filter_operation_lt.py +0 -6
- llama_cloud/types/filter_operation_lte.py +0 -6
- llama_cloud/types/filter_operator.py +0 -73
- llama_cloud/types/free_credits_usage.py +0 -34
- llama_cloud/types/http_validation_error.py +0 -32
- llama_cloud/types/hugging_face_inference_api_embedding_token.py +0 -5
- llama_cloud/types/ingestion_error_response.py +0 -34
- llama_cloud/types/input_message.py +0 -40
- llama_cloud/types/job_name_mapping.py +0 -49
- llama_cloud/types/job_names.py +0 -81
- llama_cloud/types/job_record.py +0 -58
- llama_cloud/types/job_record_parameters.py +0 -111
- llama_cloud/types/job_record_with_usage_metrics.py +0 -36
- llama_cloud/types/l_lama_parse_transform_config.py +0 -37
- llama_cloud/types/legacy_parse_job_config.py +0 -207
- llama_cloud/types/license_info_response.py +0 -34
- llama_cloud/types/llama_extract_feature_availability.py +0 -34
- llama_cloud/types/llama_extract_mode_availability.py +0 -38
- llama_cloud/types/llama_extract_mode_availability_status.py +0 -17
- llama_cloud/types/llama_extract_settings.py +0 -67
- llama_cloud/types/llama_parse_parameters_priority.py +0 -29
- llama_cloud/types/llm_model_data.py +0 -38
- llama_cloud/types/llm_parameters.py +0 -39
- llama_cloud/types/load_files_job_config.py +0 -35
- llama_cloud/types/managed_ingestion_status.py +0 -41
- llama_cloud/types/managed_open_ai_embedding.py +0 -36
- llama_cloud/types/managed_open_ai_embedding_config.py +0 -34
- llama_cloud/types/message_annotation.py +0 -33
- llama_cloud/types/metadata_filter.py +0 -44
- llama_cloud/types/metadata_filter_value.py +0 -5
- llama_cloud/types/metadata_filters_filters_item.py +0 -8
- llama_cloud/types/multimodal_parse_resolution.py +0 -17
- llama_cloud/types/node_relationship.py +0 -44
- llama_cloud/types/none_chunking_config.py +0 -29
- llama_cloud/types/none_segmentation_config.py +0 -29
- llama_cloud/types/object_type.py +0 -33
- llama_cloud/types/open_ai_embedding.py +0 -47
- llama_cloud/types/open_ai_embedding_config.py +0 -34
- llama_cloud/types/organization.py +0 -43
- llama_cloud/types/organization_create.py +0 -35
- llama_cloud/types/page_figure_metadata.py +0 -37
- llama_cloud/types/page_screenshot_metadata.py +0 -34
- llama_cloud/types/page_segmentation_config.py +0 -31
- llama_cloud/types/paginated_extract_runs_response.py +0 -39
- llama_cloud/types/paginated_jobs_history_with_metrics.py +0 -35
- llama_cloud/types/paginated_list_cloud_documents_response.py +0 -35
- llama_cloud/types/paginated_list_pipeline_files_response.py +0 -35
- llama_cloud/types/paginated_response_agent_data.py +0 -34
- llama_cloud/types/paginated_response_aggregate_group.py +0 -34
- llama_cloud/types/paginated_response_classify_job.py +0 -34
- llama_cloud/types/paginated_response_quota_configuration.py +0 -36
- llama_cloud/types/parse_configuration.py +0 -44
- llama_cloud/types/parse_configuration_create.py +0 -41
- llama_cloud/types/parse_configuration_filter.py +0 -40
- llama_cloud/types/parse_configuration_query_response.py +0 -38
- llama_cloud/types/parse_job_config.py +0 -149
- llama_cloud/types/parse_job_config_priority.py +0 -29
- llama_cloud/types/parse_plan_level.py +0 -21
- llama_cloud/types/parser_languages.py +0 -361
- llama_cloud/types/parsing_history_item.py +0 -39
- llama_cloud/types/parsing_job.py +0 -35
- llama_cloud/types/parsing_job_json_result.py +0 -32
- llama_cloud/types/parsing_job_markdown_result.py +0 -32
- llama_cloud/types/parsing_job_structured_result.py +0 -32
- llama_cloud/types/parsing_job_text_result.py +0 -32
- llama_cloud/types/partition_names.py +0 -45
- llama_cloud/types/permission.py +0 -40
- llama_cloud/types/pg_vector_distance_method.py +0 -43
- llama_cloud/types/pg_vector_hnsw_settings.py +0 -45
- llama_cloud/types/pg_vector_vector_type.py +0 -35
- llama_cloud/types/pipeline_configuration_hashes.py +0 -37
- llama_cloud/types/pipeline_create.py +0 -65
- llama_cloud/types/pipeline_create_embedding_config.py +0 -89
- llama_cloud/types/pipeline_create_transform_config.py +0 -8
- llama_cloud/types/pipeline_data_source.py +0 -55
- llama_cloud/types/pipeline_data_source_component.py +0 -28
- llama_cloud/types/pipeline_data_source_create.py +0 -36
- llama_cloud/types/pipeline_data_source_custom_metadata_value.py +0 -7
- llama_cloud/types/pipeline_data_source_status.py +0 -33
- llama_cloud/types/pipeline_deployment.py +0 -37
- llama_cloud/types/pipeline_embedding_config.py +0 -100
- llama_cloud/types/pipeline_file.py +0 -58
- llama_cloud/types/pipeline_file_config_hash_value.py +0 -5
- llama_cloud/types/pipeline_file_create.py +0 -37
- llama_cloud/types/pipeline_file_create_custom_metadata_value.py +0 -7
- llama_cloud/types/pipeline_file_custom_metadata_value.py +0 -7
- llama_cloud/types/pipeline_file_permission_info_value.py +0 -7
- llama_cloud/types/pipeline_file_resource_info_value.py +0 -7
- llama_cloud/types/pipeline_file_status.py +0 -33
- llama_cloud/types/pipeline_file_update_dispatcher_config.py +0 -38
- llama_cloud/types/pipeline_file_updater_config.py +0 -44
- llama_cloud/types/pipeline_managed_ingestion_job_params.py +0 -37
- llama_cloud/types/pipeline_status.py +0 -17
- llama_cloud/types/pipeline_transform_config.py +0 -31
- llama_cloud/types/plan_limits.py +0 -53
- llama_cloud/types/playground_session.py +0 -51
- llama_cloud/types/pooling.py +0 -29
- llama_cloud/types/preset_composite_retrieval_params.py +0 -37
- llama_cloud/types/preset_retrieval_params_search_filters_inference_schema_value.py +0 -7
- llama_cloud/types/project_create.py +0 -35
- llama_cloud/types/prompt_conf.py +0 -38
- llama_cloud/types/public_model_name.py +0 -97
- llama_cloud/types/quota_configuration.py +0 -53
- llama_cloud/types/quota_configuration_configuration_type.py +0 -33
- llama_cloud/types/quota_configuration_status.py +0 -21
- llama_cloud/types/quota_rate_limit_configuration_value.py +0 -38
- llama_cloud/types/quota_rate_limit_configuration_value_denominator_units.py +0 -29
- llama_cloud/types/re_rank_config.py +0 -35
- llama_cloud/types/re_ranker_type.py +0 -41
- llama_cloud/types/recurring_credit_grant.py +0 -44
- llama_cloud/types/related_node_info.py +0 -36
- llama_cloud/types/related_node_info_node_type.py +0 -7
- llama_cloud/types/retrieve_results.py +0 -56
- llama_cloud/types/retriever_create.py +0 -37
- llama_cloud/types/role.py +0 -40
- llama_cloud/types/schema_generation_availability.py +0 -33
- llama_cloud/types/schema_generation_availability_status.py +0 -17
- llama_cloud/types/schema_relax_mode.py +0 -25
- llama_cloud/types/semantic_chunking_config.py +0 -32
- llama_cloud/types/sentence_chunking_config.py +0 -34
- llama_cloud/types/sparse_model_type.py +0 -33
- llama_cloud/types/struct_mode.py +0 -33
- llama_cloud/types/struct_parse_conf.py +0 -63
- llama_cloud/types/supported_llm_model.py +0 -40
- llama_cloud/types/supported_llm_model_names.py +0 -69
- llama_cloud/types/text_node.py +0 -67
- llama_cloud/types/text_node_relationships_value.py +0 -7
- llama_cloud/types/text_node_with_score.py +0 -39
- llama_cloud/types/token_chunking_config.py +0 -33
- llama_cloud/types/update_user_response.py +0 -33
- llama_cloud/types/usage_and_plan.py +0 -34
- llama_cloud/types/usage_metric_response.py +0 -34
- llama_cloud/types/usage_response.py +0 -43
- llama_cloud/types/usage_response_active_alerts_item.py +0 -37
- llama_cloud/types/user_job_record.py +0 -32
- llama_cloud/types/user_organization.py +0 -47
- llama_cloud/types/user_organization_create.py +0 -38
- llama_cloud/types/user_organization_delete.py +0 -37
- llama_cloud/types/user_organization_role.py +0 -42
- llama_cloud/types/user_summary.py +0 -38
- llama_cloud/types/validation_error.py +0 -34
- llama_cloud/types/validation_error_loc_item.py +0 -5
- llama_cloud/types/vertex_embedding_mode.py +0 -38
- llama_cloud/types/webhook_configuration.py +0 -39
- llama_cloud/types/webhook_configuration_webhook_events_item.py +0 -57
- llama_cloud-0.1.41.dist-info/LICENSE +0 -21
- llama_cloud-0.1.41.dist-info/METADATA +0 -106
- llama_cloud-0.1.41.dist-info/RECORD +0 -385
|
@@ -1,3436 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
import typing
|
|
4
|
-
import urllib.parse
|
|
5
|
-
from json.decoder import JSONDecodeError
|
|
6
|
-
|
|
7
|
-
from ...core.api_error import ApiError
|
|
8
|
-
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
9
|
-
from ...core.jsonable_encoder import jsonable_encoder
|
|
10
|
-
from ...core.remove_none_from_dict import remove_none_from_dict
|
|
11
|
-
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
12
|
-
from ...types.chat_data import ChatData
|
|
13
|
-
from ...types.cloud_document import CloudDocument
|
|
14
|
-
from ...types.cloud_document_create import CloudDocumentCreate
|
|
15
|
-
from ...types.data_sink_create import DataSinkCreate
|
|
16
|
-
from ...types.eval_execution_params import EvalExecutionParams
|
|
17
|
-
from ...types.file_count_by_status_response import FileCountByStatusResponse
|
|
18
|
-
from ...types.http_validation_error import HttpValidationError
|
|
19
|
-
from ...types.input_message import InputMessage
|
|
20
|
-
from ...types.llama_parse_parameters import LlamaParseParameters
|
|
21
|
-
from ...types.managed_ingestion_status_response import ManagedIngestionStatusResponse
|
|
22
|
-
from ...types.metadata_filters import MetadataFilters
|
|
23
|
-
from ...types.paginated_list_cloud_documents_response import PaginatedListCloudDocumentsResponse
|
|
24
|
-
from ...types.paginated_list_pipeline_files_response import PaginatedListPipelineFilesResponse
|
|
25
|
-
from ...types.pipeline import Pipeline
|
|
26
|
-
from ...types.pipeline_create import PipelineCreate
|
|
27
|
-
from ...types.pipeline_data_source import PipelineDataSource
|
|
28
|
-
from ...types.pipeline_data_source_create import PipelineDataSourceCreate
|
|
29
|
-
from ...types.pipeline_deployment import PipelineDeployment
|
|
30
|
-
from ...types.pipeline_file import PipelineFile
|
|
31
|
-
from ...types.pipeline_file_create import PipelineFileCreate
|
|
32
|
-
from ...types.pipeline_metadata_config import PipelineMetadataConfig
|
|
33
|
-
from ...types.pipeline_type import PipelineType
|
|
34
|
-
from ...types.playground_session import PlaygroundSession
|
|
35
|
-
from ...types.preset_retrieval_params import PresetRetrievalParams
|
|
36
|
-
from ...types.retrieval_mode import RetrievalMode
|
|
37
|
-
from ...types.retrieve_results import RetrieveResults
|
|
38
|
-
from ...types.sparse_model_config import SparseModelConfig
|
|
39
|
-
from ...types.text_node import TextNode
|
|
40
|
-
from .types.pipeline_file_update_custom_metadata_value import PipelineFileUpdateCustomMetadataValue
|
|
41
|
-
from .types.pipeline_update_embedding_config import PipelineUpdateEmbeddingConfig
|
|
42
|
-
from .types.pipeline_update_transform_config import PipelineUpdateTransformConfig
|
|
43
|
-
from .types.retrieval_params_search_filters_inference_schema_value import (
|
|
44
|
-
RetrievalParamsSearchFiltersInferenceSchemaValue,
|
|
45
|
-
)
|
|
46
|
-
|
|
47
|
-
try:
|
|
48
|
-
import pydantic
|
|
49
|
-
if pydantic.__version__.startswith("1."):
|
|
50
|
-
raise ImportError
|
|
51
|
-
import pydantic.v1 as pydantic # type: ignore
|
|
52
|
-
except ImportError:
|
|
53
|
-
import pydantic # type: ignore
|
|
54
|
-
|
|
55
|
-
# this is used as the default value for optional parameters
|
|
56
|
-
OMIT = typing.cast(typing.Any, ...)
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
class PipelinesClient:
|
|
60
|
-
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
61
|
-
self._client_wrapper = client_wrapper
|
|
62
|
-
|
|
63
|
-
def search_pipelines(
|
|
64
|
-
self,
|
|
65
|
-
*,
|
|
66
|
-
project_id: typing.Optional[str] = None,
|
|
67
|
-
project_name: typing.Optional[str] = None,
|
|
68
|
-
pipeline_name: typing.Optional[str] = None,
|
|
69
|
-
pipeline_type: typing.Optional[PipelineType] = None,
|
|
70
|
-
organization_id: typing.Optional[str] = None,
|
|
71
|
-
) -> typing.List[Pipeline]:
|
|
72
|
-
"""
|
|
73
|
-
Search for pipelines by various parameters.
|
|
74
|
-
|
|
75
|
-
Parameters:
|
|
76
|
-
- project_id: typing.Optional[str].
|
|
77
|
-
|
|
78
|
-
- project_name: typing.Optional[str].
|
|
79
|
-
|
|
80
|
-
- pipeline_name: typing.Optional[str].
|
|
81
|
-
|
|
82
|
-
- pipeline_type: typing.Optional[PipelineType].
|
|
83
|
-
|
|
84
|
-
- organization_id: typing.Optional[str].
|
|
85
|
-
---
|
|
86
|
-
from llama_cloud import PipelineType
|
|
87
|
-
from llama_cloud.client import LlamaCloud
|
|
88
|
-
|
|
89
|
-
client = LlamaCloud(
|
|
90
|
-
token="YOUR_TOKEN",
|
|
91
|
-
)
|
|
92
|
-
client.pipelines.search_pipelines(
|
|
93
|
-
pipeline_type=PipelineType.PLAYGROUND,
|
|
94
|
-
)
|
|
95
|
-
"""
|
|
96
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
97
|
-
"GET",
|
|
98
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
|
|
99
|
-
params=remove_none_from_dict(
|
|
100
|
-
{
|
|
101
|
-
"project_id": project_id,
|
|
102
|
-
"project_name": project_name,
|
|
103
|
-
"pipeline_name": pipeline_name,
|
|
104
|
-
"pipeline_type": pipeline_type,
|
|
105
|
-
"organization_id": organization_id,
|
|
106
|
-
}
|
|
107
|
-
),
|
|
108
|
-
headers=self._client_wrapper.get_headers(),
|
|
109
|
-
timeout=60,
|
|
110
|
-
)
|
|
111
|
-
if 200 <= _response.status_code < 300:
|
|
112
|
-
return pydantic.parse_obj_as(typing.List[Pipeline], _response.json()) # type: ignore
|
|
113
|
-
if _response.status_code == 422:
|
|
114
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
115
|
-
try:
|
|
116
|
-
_response_json = _response.json()
|
|
117
|
-
except JSONDecodeError:
|
|
118
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
119
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
120
|
-
|
|
121
|
-
def create_pipeline(
|
|
122
|
-
self,
|
|
123
|
-
*,
|
|
124
|
-
project_id: typing.Optional[str] = None,
|
|
125
|
-
organization_id: typing.Optional[str] = None,
|
|
126
|
-
request: PipelineCreate,
|
|
127
|
-
) -> Pipeline:
|
|
128
|
-
"""
|
|
129
|
-
Create a new pipeline for a project.
|
|
130
|
-
|
|
131
|
-
Parameters:
|
|
132
|
-
- project_id: typing.Optional[str].
|
|
133
|
-
|
|
134
|
-
- organization_id: typing.Optional[str].
|
|
135
|
-
|
|
136
|
-
- request: PipelineCreate.
|
|
137
|
-
"""
|
|
138
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
139
|
-
"POST",
|
|
140
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
|
|
141
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
142
|
-
json=jsonable_encoder(request),
|
|
143
|
-
headers=self._client_wrapper.get_headers(),
|
|
144
|
-
timeout=60,
|
|
145
|
-
)
|
|
146
|
-
if 200 <= _response.status_code < 300:
|
|
147
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
148
|
-
if _response.status_code == 422:
|
|
149
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
150
|
-
try:
|
|
151
|
-
_response_json = _response.json()
|
|
152
|
-
except JSONDecodeError:
|
|
153
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
154
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
155
|
-
|
|
156
|
-
def upsert_pipeline(
|
|
157
|
-
self,
|
|
158
|
-
*,
|
|
159
|
-
project_id: typing.Optional[str] = None,
|
|
160
|
-
organization_id: typing.Optional[str] = None,
|
|
161
|
-
request: PipelineCreate,
|
|
162
|
-
) -> Pipeline:
|
|
163
|
-
"""
|
|
164
|
-
Upsert a pipeline for a project.
|
|
165
|
-
Updates if a pipeline with the same name and project_id already exists. Otherwise, creates a new pipeline.
|
|
166
|
-
|
|
167
|
-
Parameters:
|
|
168
|
-
- project_id: typing.Optional[str].
|
|
169
|
-
|
|
170
|
-
- organization_id: typing.Optional[str].
|
|
171
|
-
|
|
172
|
-
- request: PipelineCreate.
|
|
173
|
-
"""
|
|
174
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
175
|
-
"PUT",
|
|
176
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
|
|
177
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
178
|
-
json=jsonable_encoder(request),
|
|
179
|
-
headers=self._client_wrapper.get_headers(),
|
|
180
|
-
timeout=60,
|
|
181
|
-
)
|
|
182
|
-
if 200 <= _response.status_code < 300:
|
|
183
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
184
|
-
if _response.status_code == 422:
|
|
185
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
186
|
-
try:
|
|
187
|
-
_response_json = _response.json()
|
|
188
|
-
except JSONDecodeError:
|
|
189
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
190
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
191
|
-
|
|
192
|
-
def get_pipeline(self, pipeline_id: str) -> Pipeline:
|
|
193
|
-
"""
|
|
194
|
-
Get a pipeline by ID for a given project.
|
|
195
|
-
|
|
196
|
-
Parameters:
|
|
197
|
-
- pipeline_id: str.
|
|
198
|
-
"""
|
|
199
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
200
|
-
"GET",
|
|
201
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}"),
|
|
202
|
-
headers=self._client_wrapper.get_headers(),
|
|
203
|
-
timeout=60,
|
|
204
|
-
)
|
|
205
|
-
if 200 <= _response.status_code < 300:
|
|
206
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
207
|
-
if _response.status_code == 422:
|
|
208
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
209
|
-
try:
|
|
210
|
-
_response_json = _response.json()
|
|
211
|
-
except JSONDecodeError:
|
|
212
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
213
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
214
|
-
|
|
215
|
-
def update_existing_pipeline(
|
|
216
|
-
self,
|
|
217
|
-
pipeline_id: str,
|
|
218
|
-
*,
|
|
219
|
-
embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig] = OMIT,
|
|
220
|
-
transform_config: typing.Optional[PipelineUpdateTransformConfig] = OMIT,
|
|
221
|
-
sparse_model_config: typing.Optional[SparseModelConfig] = OMIT,
|
|
222
|
-
data_sink_id: typing.Optional[str] = OMIT,
|
|
223
|
-
embedding_model_config_id: typing.Optional[str] = OMIT,
|
|
224
|
-
data_sink: typing.Optional[DataSinkCreate] = OMIT,
|
|
225
|
-
preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = OMIT,
|
|
226
|
-
eval_parameters: typing.Optional[EvalExecutionParams] = OMIT,
|
|
227
|
-
llama_parse_parameters: typing.Optional[LlamaParseParameters] = OMIT,
|
|
228
|
-
status: typing.Optional[str] = OMIT,
|
|
229
|
-
metadata_config: typing.Optional[PipelineMetadataConfig] = OMIT,
|
|
230
|
-
name: typing.Optional[str] = OMIT,
|
|
231
|
-
managed_pipeline_id: typing.Optional[str] = OMIT,
|
|
232
|
-
) -> Pipeline:
|
|
233
|
-
"""
|
|
234
|
-
Update an existing pipeline for a project.
|
|
235
|
-
|
|
236
|
-
Parameters:
|
|
237
|
-
- pipeline_id: str.
|
|
238
|
-
|
|
239
|
-
- embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig].
|
|
240
|
-
|
|
241
|
-
- transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
|
|
242
|
-
|
|
243
|
-
- sparse_model_config: typing.Optional[SparseModelConfig].
|
|
244
|
-
|
|
245
|
-
- data_sink_id: typing.Optional[str].
|
|
246
|
-
|
|
247
|
-
- embedding_model_config_id: typing.Optional[str].
|
|
248
|
-
|
|
249
|
-
- data_sink: typing.Optional[DataSinkCreate].
|
|
250
|
-
|
|
251
|
-
- preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
|
|
252
|
-
|
|
253
|
-
- eval_parameters: typing.Optional[EvalExecutionParams].
|
|
254
|
-
|
|
255
|
-
- llama_parse_parameters: typing.Optional[LlamaParseParameters].
|
|
256
|
-
|
|
257
|
-
- status: typing.Optional[str].
|
|
258
|
-
|
|
259
|
-
- metadata_config: typing.Optional[PipelineMetadataConfig].
|
|
260
|
-
|
|
261
|
-
- name: typing.Optional[str].
|
|
262
|
-
|
|
263
|
-
- managed_pipeline_id: typing.Optional[str].
|
|
264
|
-
"""
|
|
265
|
-
_request: typing.Dict[str, typing.Any] = {}
|
|
266
|
-
if embedding_config is not OMIT:
|
|
267
|
-
_request["embedding_config"] = embedding_config
|
|
268
|
-
if transform_config is not OMIT:
|
|
269
|
-
_request["transform_config"] = transform_config
|
|
270
|
-
if sparse_model_config is not OMIT:
|
|
271
|
-
_request["sparse_model_config"] = sparse_model_config
|
|
272
|
-
if data_sink_id is not OMIT:
|
|
273
|
-
_request["data_sink_id"] = data_sink_id
|
|
274
|
-
if embedding_model_config_id is not OMIT:
|
|
275
|
-
_request["embedding_model_config_id"] = embedding_model_config_id
|
|
276
|
-
if data_sink is not OMIT:
|
|
277
|
-
_request["data_sink"] = data_sink
|
|
278
|
-
if preset_retrieval_parameters is not OMIT:
|
|
279
|
-
_request["preset_retrieval_parameters"] = preset_retrieval_parameters
|
|
280
|
-
if eval_parameters is not OMIT:
|
|
281
|
-
_request["eval_parameters"] = eval_parameters
|
|
282
|
-
if llama_parse_parameters is not OMIT:
|
|
283
|
-
_request["llama_parse_parameters"] = llama_parse_parameters
|
|
284
|
-
if status is not OMIT:
|
|
285
|
-
_request["status"] = status
|
|
286
|
-
if metadata_config is not OMIT:
|
|
287
|
-
_request["metadata_config"] = metadata_config
|
|
288
|
-
if name is not OMIT:
|
|
289
|
-
_request["name"] = name
|
|
290
|
-
if managed_pipeline_id is not OMIT:
|
|
291
|
-
_request["managed_pipeline_id"] = managed_pipeline_id
|
|
292
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
293
|
-
"PUT",
|
|
294
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}"),
|
|
295
|
-
json=jsonable_encoder(_request),
|
|
296
|
-
headers=self._client_wrapper.get_headers(),
|
|
297
|
-
timeout=60,
|
|
298
|
-
)
|
|
299
|
-
if 200 <= _response.status_code < 300:
|
|
300
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
301
|
-
if _response.status_code == 422:
|
|
302
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
303
|
-
try:
|
|
304
|
-
_response_json = _response.json()
|
|
305
|
-
except JSONDecodeError:
|
|
306
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
307
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
308
|
-
|
|
309
|
-
def delete_pipeline(self, pipeline_id: str) -> None:
|
|
310
|
-
"""
|
|
311
|
-
Delete a pipeline by ID.
|
|
312
|
-
|
|
313
|
-
Parameters:
|
|
314
|
-
- pipeline_id: str.
|
|
315
|
-
---
|
|
316
|
-
from llama_cloud.client import LlamaCloud
|
|
317
|
-
|
|
318
|
-
client = LlamaCloud(
|
|
319
|
-
token="YOUR_TOKEN",
|
|
320
|
-
)
|
|
321
|
-
client.pipelines.delete_pipeline(
|
|
322
|
-
pipeline_id="string",
|
|
323
|
-
)
|
|
324
|
-
"""
|
|
325
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
326
|
-
"DELETE",
|
|
327
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}"),
|
|
328
|
-
headers=self._client_wrapper.get_headers(),
|
|
329
|
-
timeout=60,
|
|
330
|
-
)
|
|
331
|
-
if 200 <= _response.status_code < 300:
|
|
332
|
-
return
|
|
333
|
-
if _response.status_code == 422:
|
|
334
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
335
|
-
try:
|
|
336
|
-
_response_json = _response.json()
|
|
337
|
-
except JSONDecodeError:
|
|
338
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
339
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
340
|
-
|
|
341
|
-
def get_pipeline_status(self, pipeline_id: str) -> ManagedIngestionStatusResponse:
|
|
342
|
-
"""
|
|
343
|
-
Get the status of a pipeline by ID.
|
|
344
|
-
|
|
345
|
-
Parameters:
|
|
346
|
-
- pipeline_id: str.
|
|
347
|
-
---
|
|
348
|
-
from llama_cloud.client import LlamaCloud
|
|
349
|
-
|
|
350
|
-
client = LlamaCloud(
|
|
351
|
-
token="YOUR_TOKEN",
|
|
352
|
-
)
|
|
353
|
-
client.pipelines.get_pipeline_status(
|
|
354
|
-
pipeline_id="string",
|
|
355
|
-
)
|
|
356
|
-
"""
|
|
357
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
358
|
-
"GET",
|
|
359
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/status"),
|
|
360
|
-
headers=self._client_wrapper.get_headers(),
|
|
361
|
-
timeout=60,
|
|
362
|
-
)
|
|
363
|
-
if 200 <= _response.status_code < 300:
|
|
364
|
-
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
365
|
-
if _response.status_code == 422:
|
|
366
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
367
|
-
try:
|
|
368
|
-
_response_json = _response.json()
|
|
369
|
-
except JSONDecodeError:
|
|
370
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
371
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
372
|
-
|
|
373
|
-
def sync_pipeline(self, pipeline_id: str) -> Pipeline:
|
|
374
|
-
"""
|
|
375
|
-
Run ingestion for the pipeline by incrementally updating the data-sink with upstream changes from data-sources & files.
|
|
376
|
-
|
|
377
|
-
Parameters:
|
|
378
|
-
- pipeline_id: str.
|
|
379
|
-
"""
|
|
380
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
381
|
-
"POST",
|
|
382
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/sync"),
|
|
383
|
-
headers=self._client_wrapper.get_headers(),
|
|
384
|
-
timeout=60,
|
|
385
|
-
)
|
|
386
|
-
if 200 <= _response.status_code < 300:
|
|
387
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
388
|
-
if _response.status_code == 422:
|
|
389
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
390
|
-
try:
|
|
391
|
-
_response_json = _response.json()
|
|
392
|
-
except JSONDecodeError:
|
|
393
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
394
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
395
|
-
|
|
396
|
-
def cancel_pipeline_sync(self, pipeline_id: str) -> Pipeline:
|
|
397
|
-
"""
|
|
398
|
-
Parameters:
|
|
399
|
-
- pipeline_id: str.
|
|
400
|
-
"""
|
|
401
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
402
|
-
"POST",
|
|
403
|
-
urllib.parse.urljoin(
|
|
404
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/sync/cancel"
|
|
405
|
-
),
|
|
406
|
-
headers=self._client_wrapper.get_headers(),
|
|
407
|
-
timeout=60,
|
|
408
|
-
)
|
|
409
|
-
if 200 <= _response.status_code < 300:
|
|
410
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
411
|
-
if _response.status_code == 422:
|
|
412
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
413
|
-
try:
|
|
414
|
-
_response_json = _response.json()
|
|
415
|
-
except JSONDecodeError:
|
|
416
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
417
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
418
|
-
|
|
419
|
-
def force_delete_pipeline(self, pipeline_id: str) -> None:
|
|
420
|
-
"""
|
|
421
|
-
Parameters:
|
|
422
|
-
- pipeline_id: str.
|
|
423
|
-
---
|
|
424
|
-
from llama_cloud.client import LlamaCloud
|
|
425
|
-
|
|
426
|
-
client = LlamaCloud(
|
|
427
|
-
token="YOUR_TOKEN",
|
|
428
|
-
)
|
|
429
|
-
client.pipelines.force_delete_pipeline(
|
|
430
|
-
pipeline_id="string",
|
|
431
|
-
)
|
|
432
|
-
"""
|
|
433
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
434
|
-
"POST",
|
|
435
|
-
urllib.parse.urljoin(
|
|
436
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/force-delete"
|
|
437
|
-
),
|
|
438
|
-
headers=self._client_wrapper.get_headers(),
|
|
439
|
-
timeout=60,
|
|
440
|
-
)
|
|
441
|
-
if 200 <= _response.status_code < 300:
|
|
442
|
-
return
|
|
443
|
-
if _response.status_code == 422:
|
|
444
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
445
|
-
try:
|
|
446
|
-
_response_json = _response.json()
|
|
447
|
-
except JSONDecodeError:
|
|
448
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
449
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
450
|
-
|
|
451
|
-
def copy_pipeline(self, pipeline_id: str) -> Pipeline:
|
|
452
|
-
"""
|
|
453
|
-
Copy a pipeline by ID.
|
|
454
|
-
|
|
455
|
-
Parameters:
|
|
456
|
-
- pipeline_id: str.
|
|
457
|
-
"""
|
|
458
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
459
|
-
"POST",
|
|
460
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/copy"),
|
|
461
|
-
headers=self._client_wrapper.get_headers(),
|
|
462
|
-
timeout=60,
|
|
463
|
-
)
|
|
464
|
-
if 200 <= _response.status_code < 300:
|
|
465
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
466
|
-
if _response.status_code == 422:
|
|
467
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
468
|
-
try:
|
|
469
|
-
_response_json = _response.json()
|
|
470
|
-
except JSONDecodeError:
|
|
471
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
472
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
473
|
-
|
|
474
|
-
def list_pipeline_files(
|
|
475
|
-
self,
|
|
476
|
-
pipeline_id: str,
|
|
477
|
-
*,
|
|
478
|
-
data_source_id: typing.Optional[str] = None,
|
|
479
|
-
only_manually_uploaded: typing.Optional[bool] = None,
|
|
480
|
-
) -> typing.List[PipelineFile]:
|
|
481
|
-
"""
|
|
482
|
-
Get files for a pipeline.
|
|
483
|
-
|
|
484
|
-
Parameters:
|
|
485
|
-
- pipeline_id: str.
|
|
486
|
-
|
|
487
|
-
- data_source_id: typing.Optional[str].
|
|
488
|
-
|
|
489
|
-
- only_manually_uploaded: typing.Optional[bool].
|
|
490
|
-
---
|
|
491
|
-
from llama_cloud.client import LlamaCloud
|
|
492
|
-
|
|
493
|
-
client = LlamaCloud(
|
|
494
|
-
token="YOUR_TOKEN",
|
|
495
|
-
)
|
|
496
|
-
client.pipelines.list_pipeline_files(
|
|
497
|
-
pipeline_id="string",
|
|
498
|
-
)
|
|
499
|
-
"""
|
|
500
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
501
|
-
"GET",
|
|
502
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files"),
|
|
503
|
-
params=remove_none_from_dict(
|
|
504
|
-
{"data_source_id": data_source_id, "only_manually_uploaded": only_manually_uploaded}
|
|
505
|
-
),
|
|
506
|
-
headers=self._client_wrapper.get_headers(),
|
|
507
|
-
timeout=60,
|
|
508
|
-
)
|
|
509
|
-
if 200 <= _response.status_code < 300:
|
|
510
|
-
return pydantic.parse_obj_as(typing.List[PipelineFile], _response.json()) # type: ignore
|
|
511
|
-
if _response.status_code == 422:
|
|
512
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
513
|
-
try:
|
|
514
|
-
_response_json = _response.json()
|
|
515
|
-
except JSONDecodeError:
|
|
516
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
517
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
518
|
-
|
|
519
|
-
def add_files_to_pipeline_api(
|
|
520
|
-
self, pipeline_id: str, *, request: typing.List[PipelineFileCreate]
|
|
521
|
-
) -> typing.List[PipelineFile]:
|
|
522
|
-
"""
|
|
523
|
-
Add files to a pipeline.
|
|
524
|
-
|
|
525
|
-
Parameters:
|
|
526
|
-
- pipeline_id: str.
|
|
527
|
-
|
|
528
|
-
- request: typing.List[PipelineFileCreate].
|
|
529
|
-
---
|
|
530
|
-
from llama_cloud.client import LlamaCloud
|
|
531
|
-
|
|
532
|
-
client = LlamaCloud(
|
|
533
|
-
token="YOUR_TOKEN",
|
|
534
|
-
)
|
|
535
|
-
client.pipelines.add_files_to_pipeline_api(
|
|
536
|
-
pipeline_id="string",
|
|
537
|
-
request=[],
|
|
538
|
-
)
|
|
539
|
-
"""
|
|
540
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
541
|
-
"PUT",
|
|
542
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files"),
|
|
543
|
-
json=jsonable_encoder(request),
|
|
544
|
-
headers=self._client_wrapper.get_headers(),
|
|
545
|
-
timeout=60,
|
|
546
|
-
)
|
|
547
|
-
if 200 <= _response.status_code < 300:
|
|
548
|
-
return pydantic.parse_obj_as(typing.List[PipelineFile], _response.json()) # type: ignore
|
|
549
|
-
if _response.status_code == 422:
|
|
550
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
551
|
-
try:
|
|
552
|
-
_response_json = _response.json()
|
|
553
|
-
except JSONDecodeError:
|
|
554
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
555
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
556
|
-
|
|
557
|
-
def list_pipeline_files_2(
|
|
558
|
-
self,
|
|
559
|
-
pipeline_id: str,
|
|
560
|
-
*,
|
|
561
|
-
data_source_id: typing.Optional[str] = None,
|
|
562
|
-
only_manually_uploaded: typing.Optional[bool] = None,
|
|
563
|
-
file_name_contains: typing.Optional[str] = None,
|
|
564
|
-
limit: typing.Optional[int] = None,
|
|
565
|
-
offset: typing.Optional[int] = None,
|
|
566
|
-
order_by: typing.Optional[str] = None,
|
|
567
|
-
) -> PaginatedListPipelineFilesResponse:
|
|
568
|
-
"""
|
|
569
|
-
Get files for a pipeline.
|
|
570
|
-
|
|
571
|
-
Args:
|
|
572
|
-
pipeline_id: ID of the pipeline
|
|
573
|
-
data_source_id: Optional filter by data source ID
|
|
574
|
-
only_manually_uploaded: Filter for only manually uploaded files
|
|
575
|
-
file_name_contains: Optional filter by file name (substring match)
|
|
576
|
-
limit: Limit number of results
|
|
577
|
-
offset: Offset for pagination
|
|
578
|
-
order_by: Field to order by
|
|
579
|
-
|
|
580
|
-
Parameters:
|
|
581
|
-
- pipeline_id: str.
|
|
582
|
-
|
|
583
|
-
- data_source_id: typing.Optional[str].
|
|
584
|
-
|
|
585
|
-
- only_manually_uploaded: typing.Optional[bool].
|
|
586
|
-
|
|
587
|
-
- file_name_contains: typing.Optional[str].
|
|
588
|
-
|
|
589
|
-
- limit: typing.Optional[int].
|
|
590
|
-
|
|
591
|
-
- offset: typing.Optional[int].
|
|
592
|
-
|
|
593
|
-
- order_by: typing.Optional[str].
|
|
594
|
-
---
|
|
595
|
-
from llama_cloud.client import LlamaCloud
|
|
596
|
-
|
|
597
|
-
client = LlamaCloud(
|
|
598
|
-
token="YOUR_TOKEN",
|
|
599
|
-
)
|
|
600
|
-
client.pipelines.list_pipeline_files_2(
|
|
601
|
-
pipeline_id="string",
|
|
602
|
-
)
|
|
603
|
-
"""
|
|
604
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
605
|
-
"GET",
|
|
606
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files2"),
|
|
607
|
-
params=remove_none_from_dict(
|
|
608
|
-
{
|
|
609
|
-
"data_source_id": data_source_id,
|
|
610
|
-
"only_manually_uploaded": only_manually_uploaded,
|
|
611
|
-
"file_name_contains": file_name_contains,
|
|
612
|
-
"limit": limit,
|
|
613
|
-
"offset": offset,
|
|
614
|
-
"order_by": order_by,
|
|
615
|
-
}
|
|
616
|
-
),
|
|
617
|
-
headers=self._client_wrapper.get_headers(),
|
|
618
|
-
timeout=60,
|
|
619
|
-
)
|
|
620
|
-
if 200 <= _response.status_code < 300:
|
|
621
|
-
return pydantic.parse_obj_as(PaginatedListPipelineFilesResponse, _response.json()) # type: ignore
|
|
622
|
-
if _response.status_code == 422:
|
|
623
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
624
|
-
try:
|
|
625
|
-
_response_json = _response.json()
|
|
626
|
-
except JSONDecodeError:
|
|
627
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
628
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
629
|
-
|
|
630
|
-
def get_pipeline_file_status_counts(
|
|
631
|
-
self,
|
|
632
|
-
pipeline_id: str,
|
|
633
|
-
*,
|
|
634
|
-
data_source_id: typing.Optional[str] = None,
|
|
635
|
-
only_manually_uploaded: typing.Optional[bool] = None,
|
|
636
|
-
) -> FileCountByStatusResponse:
|
|
637
|
-
"""
|
|
638
|
-
Get files for a pipeline.
|
|
639
|
-
|
|
640
|
-
Parameters:
|
|
641
|
-
- pipeline_id: str.
|
|
642
|
-
|
|
643
|
-
- data_source_id: typing.Optional[str].
|
|
644
|
-
|
|
645
|
-
- only_manually_uploaded: typing.Optional[bool].
|
|
646
|
-
---
|
|
647
|
-
from llama_cloud.client import LlamaCloud
|
|
648
|
-
|
|
649
|
-
client = LlamaCloud(
|
|
650
|
-
token="YOUR_TOKEN",
|
|
651
|
-
)
|
|
652
|
-
client.pipelines.get_pipeline_file_status_counts(
|
|
653
|
-
pipeline_id="string",
|
|
654
|
-
)
|
|
655
|
-
"""
|
|
656
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
657
|
-
"GET",
|
|
658
|
-
urllib.parse.urljoin(
|
|
659
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files/status-counts"
|
|
660
|
-
),
|
|
661
|
-
params=remove_none_from_dict(
|
|
662
|
-
{"data_source_id": data_source_id, "only_manually_uploaded": only_manually_uploaded}
|
|
663
|
-
),
|
|
664
|
-
headers=self._client_wrapper.get_headers(),
|
|
665
|
-
timeout=60,
|
|
666
|
-
)
|
|
667
|
-
if 200 <= _response.status_code < 300:
|
|
668
|
-
return pydantic.parse_obj_as(FileCountByStatusResponse, _response.json()) # type: ignore
|
|
669
|
-
if _response.status_code == 422:
|
|
670
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
671
|
-
try:
|
|
672
|
-
_response_json = _response.json()
|
|
673
|
-
except JSONDecodeError:
|
|
674
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
675
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
676
|
-
|
|
677
|
-
def get_pipeline_file_status(self, file_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
|
|
678
|
-
"""
|
|
679
|
-
Get status of a file for a pipeline.
|
|
680
|
-
|
|
681
|
-
Parameters:
|
|
682
|
-
- file_id: str.
|
|
683
|
-
|
|
684
|
-
- pipeline_id: str.
|
|
685
|
-
---
|
|
686
|
-
from llama_cloud.client import LlamaCloud
|
|
687
|
-
|
|
688
|
-
client = LlamaCloud(
|
|
689
|
-
token="YOUR_TOKEN",
|
|
690
|
-
)
|
|
691
|
-
client.pipelines.get_pipeline_file_status(
|
|
692
|
-
file_id="string",
|
|
693
|
-
pipeline_id="string",
|
|
694
|
-
)
|
|
695
|
-
"""
|
|
696
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
697
|
-
"GET",
|
|
698
|
-
urllib.parse.urljoin(
|
|
699
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files/{file_id}/status"
|
|
700
|
-
),
|
|
701
|
-
headers=self._client_wrapper.get_headers(),
|
|
702
|
-
timeout=60,
|
|
703
|
-
)
|
|
704
|
-
if 200 <= _response.status_code < 300:
|
|
705
|
-
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
706
|
-
if _response.status_code == 422:
|
|
707
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
708
|
-
try:
|
|
709
|
-
_response_json = _response.json()
|
|
710
|
-
except JSONDecodeError:
|
|
711
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
712
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
713
|
-
|
|
714
|
-
def update_pipeline_file(
|
|
715
|
-
self,
|
|
716
|
-
file_id: str,
|
|
717
|
-
pipeline_id: str,
|
|
718
|
-
*,
|
|
719
|
-
custom_metadata: typing.Optional[
|
|
720
|
-
typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]
|
|
721
|
-
] = OMIT,
|
|
722
|
-
) -> PipelineFile:
|
|
723
|
-
"""
|
|
724
|
-
Update a file for a pipeline.
|
|
725
|
-
|
|
726
|
-
Parameters:
|
|
727
|
-
- file_id: str.
|
|
728
|
-
|
|
729
|
-
- pipeline_id: str.
|
|
730
|
-
|
|
731
|
-
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
|
|
732
|
-
---
|
|
733
|
-
from llama_cloud.client import LlamaCloud
|
|
734
|
-
|
|
735
|
-
client = LlamaCloud(
|
|
736
|
-
token="YOUR_TOKEN",
|
|
737
|
-
)
|
|
738
|
-
client.pipelines.update_pipeline_file(
|
|
739
|
-
file_id="string",
|
|
740
|
-
pipeline_id="string",
|
|
741
|
-
)
|
|
742
|
-
"""
|
|
743
|
-
_request: typing.Dict[str, typing.Any] = {}
|
|
744
|
-
if custom_metadata is not OMIT:
|
|
745
|
-
_request["custom_metadata"] = custom_metadata
|
|
746
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
747
|
-
"PUT",
|
|
748
|
-
urllib.parse.urljoin(
|
|
749
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files/{file_id}"
|
|
750
|
-
),
|
|
751
|
-
json=jsonable_encoder(_request),
|
|
752
|
-
headers=self._client_wrapper.get_headers(),
|
|
753
|
-
timeout=60,
|
|
754
|
-
)
|
|
755
|
-
if 200 <= _response.status_code < 300:
|
|
756
|
-
return pydantic.parse_obj_as(PipelineFile, _response.json()) # type: ignore
|
|
757
|
-
if _response.status_code == 422:
|
|
758
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
759
|
-
try:
|
|
760
|
-
_response_json = _response.json()
|
|
761
|
-
except JSONDecodeError:
|
|
762
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
763
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
764
|
-
|
|
765
|
-
def delete_pipeline_file(self, file_id: str, pipeline_id: str) -> None:
|
|
766
|
-
"""
|
|
767
|
-
Delete a file from a pipeline.
|
|
768
|
-
|
|
769
|
-
Parameters:
|
|
770
|
-
- file_id: str.
|
|
771
|
-
|
|
772
|
-
- pipeline_id: str.
|
|
773
|
-
---
|
|
774
|
-
from llama_cloud.client import LlamaCloud
|
|
775
|
-
|
|
776
|
-
client = LlamaCloud(
|
|
777
|
-
token="YOUR_TOKEN",
|
|
778
|
-
)
|
|
779
|
-
client.pipelines.delete_pipeline_file(
|
|
780
|
-
file_id="string",
|
|
781
|
-
pipeline_id="string",
|
|
782
|
-
)
|
|
783
|
-
"""
|
|
784
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
785
|
-
"DELETE",
|
|
786
|
-
urllib.parse.urljoin(
|
|
787
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files/{file_id}"
|
|
788
|
-
),
|
|
789
|
-
headers=self._client_wrapper.get_headers(),
|
|
790
|
-
timeout=60,
|
|
791
|
-
)
|
|
792
|
-
if 200 <= _response.status_code < 300:
|
|
793
|
-
return
|
|
794
|
-
if _response.status_code == 422:
|
|
795
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
796
|
-
try:
|
|
797
|
-
_response_json = _response.json()
|
|
798
|
-
except JSONDecodeError:
|
|
799
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
800
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
801
|
-
|
|
802
|
-
def import_pipeline_metadata(self, pipeline_id: str, *, upload_file: typing.IO) -> typing.Dict[str, str]:
|
|
803
|
-
"""
|
|
804
|
-
Import metadata for a pipeline.
|
|
805
|
-
|
|
806
|
-
Parameters:
|
|
807
|
-
- pipeline_id: str.
|
|
808
|
-
|
|
809
|
-
- upload_file: typing.IO.
|
|
810
|
-
"""
|
|
811
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
812
|
-
"PUT",
|
|
813
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/metadata"),
|
|
814
|
-
data=jsonable_encoder({}),
|
|
815
|
-
files={"upload_file": upload_file},
|
|
816
|
-
headers=self._client_wrapper.get_headers(),
|
|
817
|
-
timeout=60,
|
|
818
|
-
)
|
|
819
|
-
if 200 <= _response.status_code < 300:
|
|
820
|
-
return pydantic.parse_obj_as(typing.Dict[str, str], _response.json()) # type: ignore
|
|
821
|
-
if _response.status_code == 422:
|
|
822
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
823
|
-
try:
|
|
824
|
-
_response_json = _response.json()
|
|
825
|
-
except JSONDecodeError:
|
|
826
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
827
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
828
|
-
|
|
829
|
-
def delete_pipeline_files_metadata(self, pipeline_id: str) -> None:
|
|
830
|
-
"""
|
|
831
|
-
Delete metadata for all files in a pipeline.
|
|
832
|
-
|
|
833
|
-
Parameters:
|
|
834
|
-
- pipeline_id: str.
|
|
835
|
-
---
|
|
836
|
-
from llama_cloud.client import LlamaCloud
|
|
837
|
-
|
|
838
|
-
client = LlamaCloud(
|
|
839
|
-
token="YOUR_TOKEN",
|
|
840
|
-
)
|
|
841
|
-
client.pipelines.delete_pipeline_files_metadata(
|
|
842
|
-
pipeline_id="string",
|
|
843
|
-
)
|
|
844
|
-
"""
|
|
845
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
846
|
-
"DELETE",
|
|
847
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/metadata"),
|
|
848
|
-
headers=self._client_wrapper.get_headers(),
|
|
849
|
-
timeout=60,
|
|
850
|
-
)
|
|
851
|
-
if 200 <= _response.status_code < 300:
|
|
852
|
-
return
|
|
853
|
-
if _response.status_code == 422:
|
|
854
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
855
|
-
try:
|
|
856
|
-
_response_json = _response.json()
|
|
857
|
-
except JSONDecodeError:
|
|
858
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
859
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
860
|
-
|
|
861
|
-
def list_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
|
|
862
|
-
"""
|
|
863
|
-
Get data sources for a pipeline.
|
|
864
|
-
|
|
865
|
-
Parameters:
|
|
866
|
-
- pipeline_id: str.
|
|
867
|
-
---
|
|
868
|
-
from llama_cloud.client import LlamaCloud
|
|
869
|
-
|
|
870
|
-
client = LlamaCloud(
|
|
871
|
-
token="YOUR_TOKEN",
|
|
872
|
-
)
|
|
873
|
-
client.pipelines.list_pipeline_data_sources(
|
|
874
|
-
pipeline_id="string",
|
|
875
|
-
)
|
|
876
|
-
"""
|
|
877
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
878
|
-
"GET",
|
|
879
|
-
urllib.parse.urljoin(
|
|
880
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/data-sources"
|
|
881
|
-
),
|
|
882
|
-
headers=self._client_wrapper.get_headers(),
|
|
883
|
-
timeout=60,
|
|
884
|
-
)
|
|
885
|
-
if 200 <= _response.status_code < 300:
|
|
886
|
-
return pydantic.parse_obj_as(typing.List[PipelineDataSource], _response.json()) # type: ignore
|
|
887
|
-
if _response.status_code == 422:
|
|
888
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
889
|
-
try:
|
|
890
|
-
_response_json = _response.json()
|
|
891
|
-
except JSONDecodeError:
|
|
892
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
893
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
894
|
-
|
|
895
|
-
def add_data_sources_to_pipeline(
|
|
896
|
-
self, pipeline_id: str, *, request: typing.List[PipelineDataSourceCreate]
|
|
897
|
-
) -> typing.List[PipelineDataSource]:
|
|
898
|
-
"""
|
|
899
|
-
Add data sources to a pipeline.
|
|
900
|
-
|
|
901
|
-
Parameters:
|
|
902
|
-
- pipeline_id: str.
|
|
903
|
-
|
|
904
|
-
- request: typing.List[PipelineDataSourceCreate].
|
|
905
|
-
---
|
|
906
|
-
from llama_cloud.client import LlamaCloud
|
|
907
|
-
|
|
908
|
-
client = LlamaCloud(
|
|
909
|
-
token="YOUR_TOKEN",
|
|
910
|
-
)
|
|
911
|
-
client.pipelines.add_data_sources_to_pipeline(
|
|
912
|
-
pipeline_id="string",
|
|
913
|
-
request=[],
|
|
914
|
-
)
|
|
915
|
-
"""
|
|
916
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
917
|
-
"PUT",
|
|
918
|
-
urllib.parse.urljoin(
|
|
919
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/data-sources"
|
|
920
|
-
),
|
|
921
|
-
json=jsonable_encoder(request),
|
|
922
|
-
headers=self._client_wrapper.get_headers(),
|
|
923
|
-
timeout=60,
|
|
924
|
-
)
|
|
925
|
-
if 200 <= _response.status_code < 300:
|
|
926
|
-
return pydantic.parse_obj_as(typing.List[PipelineDataSource], _response.json()) # type: ignore
|
|
927
|
-
if _response.status_code == 422:
|
|
928
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
929
|
-
try:
|
|
930
|
-
_response_json = _response.json()
|
|
931
|
-
except JSONDecodeError:
|
|
932
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
933
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
934
|
-
|
|
935
|
-
def update_pipeline_data_source(
|
|
936
|
-
self, data_source_id: str, pipeline_id: str, *, sync_interval: typing.Optional[float] = OMIT
|
|
937
|
-
) -> PipelineDataSource:
|
|
938
|
-
"""
|
|
939
|
-
Update the configuration of a data source in a pipeline.
|
|
940
|
-
|
|
941
|
-
Parameters:
|
|
942
|
-
- data_source_id: str.
|
|
943
|
-
|
|
944
|
-
- pipeline_id: str.
|
|
945
|
-
|
|
946
|
-
- sync_interval: typing.Optional[float].
|
|
947
|
-
---
|
|
948
|
-
from llama_cloud.client import LlamaCloud
|
|
949
|
-
|
|
950
|
-
client = LlamaCloud(
|
|
951
|
-
token="YOUR_TOKEN",
|
|
952
|
-
)
|
|
953
|
-
client.pipelines.update_pipeline_data_source(
|
|
954
|
-
data_source_id="string",
|
|
955
|
-
pipeline_id="string",
|
|
956
|
-
)
|
|
957
|
-
"""
|
|
958
|
-
_request: typing.Dict[str, typing.Any] = {}
|
|
959
|
-
if sync_interval is not OMIT:
|
|
960
|
-
_request["sync_interval"] = sync_interval
|
|
961
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
962
|
-
"PUT",
|
|
963
|
-
urllib.parse.urljoin(
|
|
964
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
965
|
-
f"api/v1/pipelines/{pipeline_id}/data-sources/{data_source_id}",
|
|
966
|
-
),
|
|
967
|
-
json=jsonable_encoder(_request),
|
|
968
|
-
headers=self._client_wrapper.get_headers(),
|
|
969
|
-
timeout=60,
|
|
970
|
-
)
|
|
971
|
-
if 200 <= _response.status_code < 300:
|
|
972
|
-
return pydantic.parse_obj_as(PipelineDataSource, _response.json()) # type: ignore
|
|
973
|
-
if _response.status_code == 422:
|
|
974
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
975
|
-
try:
|
|
976
|
-
_response_json = _response.json()
|
|
977
|
-
except JSONDecodeError:
|
|
978
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
979
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
980
|
-
|
|
981
|
-
def delete_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> None:
|
|
982
|
-
"""
|
|
983
|
-
Delete a data source from a pipeline.
|
|
984
|
-
|
|
985
|
-
Parameters:
|
|
986
|
-
- data_source_id: str.
|
|
987
|
-
|
|
988
|
-
- pipeline_id: str.
|
|
989
|
-
---
|
|
990
|
-
from llama_cloud.client import LlamaCloud
|
|
991
|
-
|
|
992
|
-
client = LlamaCloud(
|
|
993
|
-
token="YOUR_TOKEN",
|
|
994
|
-
)
|
|
995
|
-
client.pipelines.delete_pipeline_data_source(
|
|
996
|
-
data_source_id="string",
|
|
997
|
-
pipeline_id="string",
|
|
998
|
-
)
|
|
999
|
-
"""
|
|
1000
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1001
|
-
"DELETE",
|
|
1002
|
-
urllib.parse.urljoin(
|
|
1003
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
1004
|
-
f"api/v1/pipelines/{pipeline_id}/data-sources/{data_source_id}",
|
|
1005
|
-
),
|
|
1006
|
-
headers=self._client_wrapper.get_headers(),
|
|
1007
|
-
timeout=60,
|
|
1008
|
-
)
|
|
1009
|
-
if 200 <= _response.status_code < 300:
|
|
1010
|
-
return
|
|
1011
|
-
if _response.status_code == 422:
|
|
1012
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1013
|
-
try:
|
|
1014
|
-
_response_json = _response.json()
|
|
1015
|
-
except JSONDecodeError:
|
|
1016
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1017
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1018
|
-
|
|
1019
|
-
def sync_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> Pipeline:
|
|
1020
|
-
"""
|
|
1021
|
-
Run ingestion for the pipeline data source by incrementally updating the data-sink with upstream changes from data-source.
|
|
1022
|
-
|
|
1023
|
-
Parameters:
|
|
1024
|
-
- data_source_id: str.
|
|
1025
|
-
|
|
1026
|
-
- pipeline_id: str.
|
|
1027
|
-
"""
|
|
1028
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1029
|
-
"POST",
|
|
1030
|
-
urllib.parse.urljoin(
|
|
1031
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
1032
|
-
f"api/v1/pipelines/{pipeline_id}/data-sources/{data_source_id}/sync",
|
|
1033
|
-
),
|
|
1034
|
-
headers=self._client_wrapper.get_headers(),
|
|
1035
|
-
timeout=60,
|
|
1036
|
-
)
|
|
1037
|
-
if 200 <= _response.status_code < 300:
|
|
1038
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
1039
|
-
if _response.status_code == 422:
|
|
1040
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1041
|
-
try:
|
|
1042
|
-
_response_json = _response.json()
|
|
1043
|
-
except JSONDecodeError:
|
|
1044
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1045
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1046
|
-
|
|
1047
|
-
def get_pipeline_data_source_status(self, data_source_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
|
|
1048
|
-
"""
|
|
1049
|
-
Get the status of a data source for a pipeline.
|
|
1050
|
-
|
|
1051
|
-
Parameters:
|
|
1052
|
-
- data_source_id: str.
|
|
1053
|
-
|
|
1054
|
-
- pipeline_id: str.
|
|
1055
|
-
---
|
|
1056
|
-
from llama_cloud.client import LlamaCloud
|
|
1057
|
-
|
|
1058
|
-
client = LlamaCloud(
|
|
1059
|
-
token="YOUR_TOKEN",
|
|
1060
|
-
)
|
|
1061
|
-
client.pipelines.get_pipeline_data_source_status(
|
|
1062
|
-
data_source_id="string",
|
|
1063
|
-
pipeline_id="string",
|
|
1064
|
-
)
|
|
1065
|
-
"""
|
|
1066
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1067
|
-
"GET",
|
|
1068
|
-
urllib.parse.urljoin(
|
|
1069
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
1070
|
-
f"api/v1/pipelines/{pipeline_id}/data-sources/{data_source_id}/status",
|
|
1071
|
-
),
|
|
1072
|
-
headers=self._client_wrapper.get_headers(),
|
|
1073
|
-
timeout=60,
|
|
1074
|
-
)
|
|
1075
|
-
if 200 <= _response.status_code < 300:
|
|
1076
|
-
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
1077
|
-
if _response.status_code == 422:
|
|
1078
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1079
|
-
try:
|
|
1080
|
-
_response_json = _response.json()
|
|
1081
|
-
except JSONDecodeError:
|
|
1082
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1083
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1084
|
-
|
|
1085
|
-
def run_search(
|
|
1086
|
-
self,
|
|
1087
|
-
pipeline_id: str,
|
|
1088
|
-
*,
|
|
1089
|
-
project_id: typing.Optional[str] = None,
|
|
1090
|
-
organization_id: typing.Optional[str] = None,
|
|
1091
|
-
dense_similarity_top_k: typing.Optional[int] = OMIT,
|
|
1092
|
-
dense_similarity_cutoff: typing.Optional[float] = OMIT,
|
|
1093
|
-
sparse_similarity_top_k: typing.Optional[int] = OMIT,
|
|
1094
|
-
enable_reranking: typing.Optional[bool] = OMIT,
|
|
1095
|
-
rerank_top_n: typing.Optional[int] = OMIT,
|
|
1096
|
-
alpha: typing.Optional[float] = OMIT,
|
|
1097
|
-
search_filters: typing.Optional[MetadataFilters] = OMIT,
|
|
1098
|
-
search_filters_inference_schema: typing.Optional[
|
|
1099
|
-
typing.Dict[str, typing.Optional[RetrievalParamsSearchFiltersInferenceSchemaValue]]
|
|
1100
|
-
] = OMIT,
|
|
1101
|
-
files_top_k: typing.Optional[int] = OMIT,
|
|
1102
|
-
retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
|
|
1103
|
-
retrieve_image_nodes: typing.Optional[bool] = OMIT,
|
|
1104
|
-
retrieve_page_screenshot_nodes: typing.Optional[bool] = OMIT,
|
|
1105
|
-
retrieve_page_figure_nodes: typing.Optional[bool] = OMIT,
|
|
1106
|
-
query: str,
|
|
1107
|
-
class_name: typing.Optional[str] = OMIT,
|
|
1108
|
-
) -> RetrieveResults:
|
|
1109
|
-
"""
|
|
1110
|
-
Get retrieval results for a managed pipeline and a query
|
|
1111
|
-
|
|
1112
|
-
Parameters:
|
|
1113
|
-
- pipeline_id: str.
|
|
1114
|
-
|
|
1115
|
-
- project_id: typing.Optional[str].
|
|
1116
|
-
|
|
1117
|
-
- organization_id: typing.Optional[str].
|
|
1118
|
-
|
|
1119
|
-
- dense_similarity_top_k: typing.Optional[int].
|
|
1120
|
-
|
|
1121
|
-
- dense_similarity_cutoff: typing.Optional[float].
|
|
1122
|
-
|
|
1123
|
-
- sparse_similarity_top_k: typing.Optional[int].
|
|
1124
|
-
|
|
1125
|
-
- enable_reranking: typing.Optional[bool].
|
|
1126
|
-
|
|
1127
|
-
- rerank_top_n: typing.Optional[int].
|
|
1128
|
-
|
|
1129
|
-
- alpha: typing.Optional[float].
|
|
1130
|
-
|
|
1131
|
-
- search_filters: typing.Optional[MetadataFilters].
|
|
1132
|
-
|
|
1133
|
-
- search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[RetrievalParamsSearchFiltersInferenceSchemaValue]]].
|
|
1134
|
-
|
|
1135
|
-
- files_top_k: typing.Optional[int].
|
|
1136
|
-
|
|
1137
|
-
- retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
|
|
1138
|
-
|
|
1139
|
-
- retrieve_image_nodes: typing.Optional[bool]. Whether to retrieve image nodes.
|
|
1140
|
-
|
|
1141
|
-
- retrieve_page_screenshot_nodes: typing.Optional[bool]. Whether to retrieve page screenshot nodes.
|
|
1142
|
-
|
|
1143
|
-
- retrieve_page_figure_nodes: typing.Optional[bool]. Whether to retrieve page figure nodes.
|
|
1144
|
-
|
|
1145
|
-
- query: str. The query to retrieve against.
|
|
1146
|
-
|
|
1147
|
-
- class_name: typing.Optional[str].
|
|
1148
|
-
---
|
|
1149
|
-
from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
|
|
1150
|
-
from llama_cloud.client import LlamaCloud
|
|
1151
|
-
|
|
1152
|
-
client = LlamaCloud(
|
|
1153
|
-
token="YOUR_TOKEN",
|
|
1154
|
-
)
|
|
1155
|
-
client.pipelines.run_search(
|
|
1156
|
-
pipeline_id="string",
|
|
1157
|
-
search_filters=MetadataFilters(
|
|
1158
|
-
filters=[],
|
|
1159
|
-
condition=FilterCondition.AND,
|
|
1160
|
-
),
|
|
1161
|
-
retrieval_mode=RetrievalMode.CHUNKS,
|
|
1162
|
-
query="string",
|
|
1163
|
-
)
|
|
1164
|
-
"""
|
|
1165
|
-
_request: typing.Dict[str, typing.Any] = {"query": query}
|
|
1166
|
-
if dense_similarity_top_k is not OMIT:
|
|
1167
|
-
_request["dense_similarity_top_k"] = dense_similarity_top_k
|
|
1168
|
-
if dense_similarity_cutoff is not OMIT:
|
|
1169
|
-
_request["dense_similarity_cutoff"] = dense_similarity_cutoff
|
|
1170
|
-
if sparse_similarity_top_k is not OMIT:
|
|
1171
|
-
_request["sparse_similarity_top_k"] = sparse_similarity_top_k
|
|
1172
|
-
if enable_reranking is not OMIT:
|
|
1173
|
-
_request["enable_reranking"] = enable_reranking
|
|
1174
|
-
if rerank_top_n is not OMIT:
|
|
1175
|
-
_request["rerank_top_n"] = rerank_top_n
|
|
1176
|
-
if alpha is not OMIT:
|
|
1177
|
-
_request["alpha"] = alpha
|
|
1178
|
-
if search_filters is not OMIT:
|
|
1179
|
-
_request["search_filters"] = search_filters
|
|
1180
|
-
if search_filters_inference_schema is not OMIT:
|
|
1181
|
-
_request["search_filters_inference_schema"] = search_filters_inference_schema
|
|
1182
|
-
if files_top_k is not OMIT:
|
|
1183
|
-
_request["files_top_k"] = files_top_k
|
|
1184
|
-
if retrieval_mode is not OMIT:
|
|
1185
|
-
_request["retrieval_mode"] = retrieval_mode
|
|
1186
|
-
if retrieve_image_nodes is not OMIT:
|
|
1187
|
-
_request["retrieve_image_nodes"] = retrieve_image_nodes
|
|
1188
|
-
if retrieve_page_screenshot_nodes is not OMIT:
|
|
1189
|
-
_request["retrieve_page_screenshot_nodes"] = retrieve_page_screenshot_nodes
|
|
1190
|
-
if retrieve_page_figure_nodes is not OMIT:
|
|
1191
|
-
_request["retrieve_page_figure_nodes"] = retrieve_page_figure_nodes
|
|
1192
|
-
if class_name is not OMIT:
|
|
1193
|
-
_request["class_name"] = class_name
|
|
1194
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1195
|
-
"POST",
|
|
1196
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/retrieve"),
|
|
1197
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1198
|
-
json=jsonable_encoder(_request),
|
|
1199
|
-
headers=self._client_wrapper.get_headers(),
|
|
1200
|
-
timeout=60,
|
|
1201
|
-
)
|
|
1202
|
-
if 200 <= _response.status_code < 300:
|
|
1203
|
-
return pydantic.parse_obj_as(RetrieveResults, _response.json()) # type: ignore
|
|
1204
|
-
if _response.status_code == 422:
|
|
1205
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1206
|
-
try:
|
|
1207
|
-
_response_json = _response.json()
|
|
1208
|
-
except JSONDecodeError:
|
|
1209
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1210
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1211
|
-
|
|
1212
|
-
def list_pipeline_jobs(self, pipeline_id: str) -> typing.List[PipelineDeployment]:
|
|
1213
|
-
"""
|
|
1214
|
-
Get jobs for a pipeline.
|
|
1215
|
-
|
|
1216
|
-
Parameters:
|
|
1217
|
-
- pipeline_id: str.
|
|
1218
|
-
---
|
|
1219
|
-
from llama_cloud.client import LlamaCloud
|
|
1220
|
-
|
|
1221
|
-
client = LlamaCloud(
|
|
1222
|
-
token="YOUR_TOKEN",
|
|
1223
|
-
)
|
|
1224
|
-
client.pipelines.list_pipeline_jobs(
|
|
1225
|
-
pipeline_id="string",
|
|
1226
|
-
)
|
|
1227
|
-
"""
|
|
1228
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1229
|
-
"GET",
|
|
1230
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/jobs"),
|
|
1231
|
-
headers=self._client_wrapper.get_headers(),
|
|
1232
|
-
timeout=60,
|
|
1233
|
-
)
|
|
1234
|
-
if 200 <= _response.status_code < 300:
|
|
1235
|
-
return pydantic.parse_obj_as(typing.List[PipelineDeployment], _response.json()) # type: ignore
|
|
1236
|
-
if _response.status_code == 422:
|
|
1237
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1238
|
-
try:
|
|
1239
|
-
_response_json = _response.json()
|
|
1240
|
-
except JSONDecodeError:
|
|
1241
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1242
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1243
|
-
|
|
1244
|
-
def get_pipeline_job(self, job_id: str, pipeline_id: str) -> PipelineDeployment:
|
|
1245
|
-
"""
|
|
1246
|
-
Get a job for a pipeline.
|
|
1247
|
-
|
|
1248
|
-
Parameters:
|
|
1249
|
-
- job_id: str.
|
|
1250
|
-
|
|
1251
|
-
- pipeline_id: str.
|
|
1252
|
-
---
|
|
1253
|
-
from llama_cloud.client import LlamaCloud
|
|
1254
|
-
|
|
1255
|
-
client = LlamaCloud(
|
|
1256
|
-
token="YOUR_TOKEN",
|
|
1257
|
-
)
|
|
1258
|
-
client.pipelines.get_pipeline_job(
|
|
1259
|
-
job_id="string",
|
|
1260
|
-
pipeline_id="string",
|
|
1261
|
-
)
|
|
1262
|
-
"""
|
|
1263
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1264
|
-
"GET",
|
|
1265
|
-
urllib.parse.urljoin(
|
|
1266
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/jobs/{job_id}"
|
|
1267
|
-
),
|
|
1268
|
-
headers=self._client_wrapper.get_headers(),
|
|
1269
|
-
timeout=60,
|
|
1270
|
-
)
|
|
1271
|
-
if 200 <= _response.status_code < 300:
|
|
1272
|
-
return pydantic.parse_obj_as(PipelineDeployment, _response.json()) # type: ignore
|
|
1273
|
-
if _response.status_code == 422:
|
|
1274
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1275
|
-
try:
|
|
1276
|
-
_response_json = _response.json()
|
|
1277
|
-
except JSONDecodeError:
|
|
1278
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1279
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1280
|
-
|
|
1281
|
-
def get_playground_session(self, pipeline_id: str) -> PlaygroundSession:
|
|
1282
|
-
"""
|
|
1283
|
-
Get a playground session for a user and pipeline.
|
|
1284
|
-
|
|
1285
|
-
Parameters:
|
|
1286
|
-
- pipeline_id: str.
|
|
1287
|
-
---
|
|
1288
|
-
from llama_cloud.client import LlamaCloud
|
|
1289
|
-
|
|
1290
|
-
client = LlamaCloud(
|
|
1291
|
-
token="YOUR_TOKEN",
|
|
1292
|
-
)
|
|
1293
|
-
client.pipelines.get_playground_session(
|
|
1294
|
-
pipeline_id="string",
|
|
1295
|
-
)
|
|
1296
|
-
"""
|
|
1297
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1298
|
-
"GET",
|
|
1299
|
-
urllib.parse.urljoin(
|
|
1300
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/playground-session"
|
|
1301
|
-
),
|
|
1302
|
-
headers=self._client_wrapper.get_headers(),
|
|
1303
|
-
timeout=60,
|
|
1304
|
-
)
|
|
1305
|
-
if 200 <= _response.status_code < 300:
|
|
1306
|
-
return pydantic.parse_obj_as(PlaygroundSession, _response.json()) # type: ignore
|
|
1307
|
-
if _response.status_code == 422:
|
|
1308
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1309
|
-
try:
|
|
1310
|
-
_response_json = _response.json()
|
|
1311
|
-
except JSONDecodeError:
|
|
1312
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1313
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1314
|
-
|
|
1315
|
-
def chat(
|
|
1316
|
-
self,
|
|
1317
|
-
pipeline_id: str,
|
|
1318
|
-
*,
|
|
1319
|
-
messages: typing.Optional[typing.List[InputMessage]] = OMIT,
|
|
1320
|
-
data: typing.Optional[ChatData] = OMIT,
|
|
1321
|
-
class_name: typing.Optional[str] = OMIT,
|
|
1322
|
-
) -> typing.Any:
|
|
1323
|
-
"""
|
|
1324
|
-
Make a retrieval query + chat completion for a managed pipeline.
|
|
1325
|
-
|
|
1326
|
-
Parameters:
|
|
1327
|
-
- pipeline_id: str.
|
|
1328
|
-
|
|
1329
|
-
- messages: typing.Optional[typing.List[InputMessage]].
|
|
1330
|
-
|
|
1331
|
-
- data: typing.Optional[ChatData].
|
|
1332
|
-
|
|
1333
|
-
- class_name: typing.Optional[str].
|
|
1334
|
-
---
|
|
1335
|
-
from llama_cloud import (
|
|
1336
|
-
ChatData,
|
|
1337
|
-
FilterCondition,
|
|
1338
|
-
LlmParameters,
|
|
1339
|
-
MetadataFilters,
|
|
1340
|
-
PresetRetrievalParams,
|
|
1341
|
-
RetrievalMode,
|
|
1342
|
-
SupportedLlmModelNames,
|
|
1343
|
-
)
|
|
1344
|
-
from llama_cloud.client import LlamaCloud
|
|
1345
|
-
|
|
1346
|
-
client = LlamaCloud(
|
|
1347
|
-
token="YOUR_TOKEN",
|
|
1348
|
-
)
|
|
1349
|
-
client.pipelines.chat(
|
|
1350
|
-
pipeline_id="string",
|
|
1351
|
-
data=ChatData(
|
|
1352
|
-
retrieval_parameters=PresetRetrievalParams(
|
|
1353
|
-
search_filters=MetadataFilters(
|
|
1354
|
-
filters=[],
|
|
1355
|
-
condition=FilterCondition.AND,
|
|
1356
|
-
),
|
|
1357
|
-
retrieval_mode=RetrievalMode.CHUNKS,
|
|
1358
|
-
),
|
|
1359
|
-
llm_parameters=LlmParameters(
|
|
1360
|
-
model_name=SupportedLlmModelNames.GPT_4_O,
|
|
1361
|
-
),
|
|
1362
|
-
),
|
|
1363
|
-
)
|
|
1364
|
-
"""
|
|
1365
|
-
_request: typing.Dict[str, typing.Any] = {}
|
|
1366
|
-
if messages is not OMIT:
|
|
1367
|
-
_request["messages"] = messages
|
|
1368
|
-
if data is not OMIT:
|
|
1369
|
-
_request["data"] = data
|
|
1370
|
-
if class_name is not OMIT:
|
|
1371
|
-
_request["class_name"] = class_name
|
|
1372
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1373
|
-
"POST",
|
|
1374
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/chat"),
|
|
1375
|
-
json=jsonable_encoder(_request),
|
|
1376
|
-
headers=self._client_wrapper.get_headers(),
|
|
1377
|
-
timeout=60,
|
|
1378
|
-
)
|
|
1379
|
-
if 200 <= _response.status_code < 300:
|
|
1380
|
-
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
1381
|
-
if _response.status_code == 422:
|
|
1382
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1383
|
-
try:
|
|
1384
|
-
_response_json = _response.json()
|
|
1385
|
-
except JSONDecodeError:
|
|
1386
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1387
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1388
|
-
|
|
1389
|
-
def list_pipeline_documents(
|
|
1390
|
-
self,
|
|
1391
|
-
pipeline_id: str,
|
|
1392
|
-
*,
|
|
1393
|
-
skip: typing.Optional[int] = None,
|
|
1394
|
-
limit: typing.Optional[int] = None,
|
|
1395
|
-
file_id: typing.Optional[str] = None,
|
|
1396
|
-
only_direct_upload: typing.Optional[bool] = None,
|
|
1397
|
-
only_api_data_source_documents: typing.Optional[bool] = None,
|
|
1398
|
-
) -> typing.List[CloudDocument]:
|
|
1399
|
-
"""
|
|
1400
|
-
Return a list of documents for a pipeline.
|
|
1401
|
-
|
|
1402
|
-
Parameters:
|
|
1403
|
-
- pipeline_id: str.
|
|
1404
|
-
|
|
1405
|
-
- skip: typing.Optional[int].
|
|
1406
|
-
|
|
1407
|
-
- limit: typing.Optional[int].
|
|
1408
|
-
|
|
1409
|
-
- file_id: typing.Optional[str].
|
|
1410
|
-
|
|
1411
|
-
- only_direct_upload: typing.Optional[bool].
|
|
1412
|
-
|
|
1413
|
-
- only_api_data_source_documents: typing.Optional[bool].
|
|
1414
|
-
---
|
|
1415
|
-
from llama_cloud.client import LlamaCloud
|
|
1416
|
-
|
|
1417
|
-
client = LlamaCloud(
|
|
1418
|
-
token="YOUR_TOKEN",
|
|
1419
|
-
)
|
|
1420
|
-
client.pipelines.list_pipeline_documents(
|
|
1421
|
-
pipeline_id="string",
|
|
1422
|
-
)
|
|
1423
|
-
"""
|
|
1424
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1425
|
-
"GET",
|
|
1426
|
-
urllib.parse.urljoin(
|
|
1427
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents"
|
|
1428
|
-
),
|
|
1429
|
-
params=remove_none_from_dict(
|
|
1430
|
-
{
|
|
1431
|
-
"skip": skip,
|
|
1432
|
-
"limit": limit,
|
|
1433
|
-
"file_id": file_id,
|
|
1434
|
-
"only_direct_upload": only_direct_upload,
|
|
1435
|
-
"only_api_data_source_documents": only_api_data_source_documents,
|
|
1436
|
-
}
|
|
1437
|
-
),
|
|
1438
|
-
headers=self._client_wrapper.get_headers(),
|
|
1439
|
-
timeout=60,
|
|
1440
|
-
)
|
|
1441
|
-
if 200 <= _response.status_code < 300:
|
|
1442
|
-
return pydantic.parse_obj_as(typing.List[CloudDocument], _response.json()) # type: ignore
|
|
1443
|
-
if _response.status_code == 422:
|
|
1444
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1445
|
-
try:
|
|
1446
|
-
_response_json = _response.json()
|
|
1447
|
-
except JSONDecodeError:
|
|
1448
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1449
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1450
|
-
|
|
1451
|
-
def create_batch_pipeline_documents(
|
|
1452
|
-
self, pipeline_id: str, *, request: typing.List[CloudDocumentCreate]
|
|
1453
|
-
) -> typing.List[CloudDocument]:
|
|
1454
|
-
"""
|
|
1455
|
-
Batch create documents for a pipeline.
|
|
1456
|
-
|
|
1457
|
-
Parameters:
|
|
1458
|
-
- pipeline_id: str.
|
|
1459
|
-
|
|
1460
|
-
- request: typing.List[CloudDocumentCreate].
|
|
1461
|
-
---
|
|
1462
|
-
from llama_cloud.client import LlamaCloud
|
|
1463
|
-
|
|
1464
|
-
client = LlamaCloud(
|
|
1465
|
-
token="YOUR_TOKEN",
|
|
1466
|
-
)
|
|
1467
|
-
client.pipelines.create_batch_pipeline_documents(
|
|
1468
|
-
pipeline_id="string",
|
|
1469
|
-
request=[],
|
|
1470
|
-
)
|
|
1471
|
-
"""
|
|
1472
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1473
|
-
"POST",
|
|
1474
|
-
urllib.parse.urljoin(
|
|
1475
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents"
|
|
1476
|
-
),
|
|
1477
|
-
json=jsonable_encoder(request),
|
|
1478
|
-
headers=self._client_wrapper.get_headers(),
|
|
1479
|
-
timeout=60,
|
|
1480
|
-
)
|
|
1481
|
-
if 200 <= _response.status_code < 300:
|
|
1482
|
-
return pydantic.parse_obj_as(typing.List[CloudDocument], _response.json()) # type: ignore
|
|
1483
|
-
if _response.status_code == 422:
|
|
1484
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1485
|
-
try:
|
|
1486
|
-
_response_json = _response.json()
|
|
1487
|
-
except JSONDecodeError:
|
|
1488
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1489
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1490
|
-
|
|
1491
|
-
def upsert_batch_pipeline_documents(
|
|
1492
|
-
self, pipeline_id: str, *, request: typing.List[CloudDocumentCreate]
|
|
1493
|
-
) -> typing.List[CloudDocument]:
|
|
1494
|
-
"""
|
|
1495
|
-
Batch create or update a document for a pipeline.
|
|
1496
|
-
|
|
1497
|
-
Parameters:
|
|
1498
|
-
- pipeline_id: str.
|
|
1499
|
-
|
|
1500
|
-
- request: typing.List[CloudDocumentCreate].
|
|
1501
|
-
---
|
|
1502
|
-
from llama_cloud.client import LlamaCloud
|
|
1503
|
-
|
|
1504
|
-
client = LlamaCloud(
|
|
1505
|
-
token="YOUR_TOKEN",
|
|
1506
|
-
)
|
|
1507
|
-
client.pipelines.upsert_batch_pipeline_documents(
|
|
1508
|
-
pipeline_id="string",
|
|
1509
|
-
request=[],
|
|
1510
|
-
)
|
|
1511
|
-
"""
|
|
1512
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1513
|
-
"PUT",
|
|
1514
|
-
urllib.parse.urljoin(
|
|
1515
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents"
|
|
1516
|
-
),
|
|
1517
|
-
json=jsonable_encoder(request),
|
|
1518
|
-
headers=self._client_wrapper.get_headers(),
|
|
1519
|
-
timeout=60,
|
|
1520
|
-
)
|
|
1521
|
-
if 200 <= _response.status_code < 300:
|
|
1522
|
-
return pydantic.parse_obj_as(typing.List[CloudDocument], _response.json()) # type: ignore
|
|
1523
|
-
if _response.status_code == 422:
|
|
1524
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1525
|
-
try:
|
|
1526
|
-
_response_json = _response.json()
|
|
1527
|
-
except JSONDecodeError:
|
|
1528
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1529
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1530
|
-
|
|
1531
|
-
def paginated_list_pipeline_documents(
|
|
1532
|
-
self,
|
|
1533
|
-
pipeline_id: str,
|
|
1534
|
-
*,
|
|
1535
|
-
skip: typing.Optional[int] = None,
|
|
1536
|
-
limit: typing.Optional[int] = None,
|
|
1537
|
-
file_id: typing.Optional[str] = None,
|
|
1538
|
-
only_direct_upload: typing.Optional[bool] = None,
|
|
1539
|
-
only_api_data_source_documents: typing.Optional[bool] = None,
|
|
1540
|
-
) -> PaginatedListCloudDocumentsResponse:
|
|
1541
|
-
"""
|
|
1542
|
-
Return a list of documents for a pipeline.
|
|
1543
|
-
|
|
1544
|
-
Parameters:
|
|
1545
|
-
- pipeline_id: str.
|
|
1546
|
-
|
|
1547
|
-
- skip: typing.Optional[int].
|
|
1548
|
-
|
|
1549
|
-
- limit: typing.Optional[int].
|
|
1550
|
-
|
|
1551
|
-
- file_id: typing.Optional[str].
|
|
1552
|
-
|
|
1553
|
-
- only_direct_upload: typing.Optional[bool].
|
|
1554
|
-
|
|
1555
|
-
- only_api_data_source_documents: typing.Optional[bool].
|
|
1556
|
-
---
|
|
1557
|
-
from llama_cloud.client import LlamaCloud
|
|
1558
|
-
|
|
1559
|
-
client = LlamaCloud(
|
|
1560
|
-
token="YOUR_TOKEN",
|
|
1561
|
-
)
|
|
1562
|
-
client.pipelines.paginated_list_pipeline_documents(
|
|
1563
|
-
pipeline_id="string",
|
|
1564
|
-
)
|
|
1565
|
-
"""
|
|
1566
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1567
|
-
"GET",
|
|
1568
|
-
urllib.parse.urljoin(
|
|
1569
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents/paginated"
|
|
1570
|
-
),
|
|
1571
|
-
params=remove_none_from_dict(
|
|
1572
|
-
{
|
|
1573
|
-
"skip": skip,
|
|
1574
|
-
"limit": limit,
|
|
1575
|
-
"file_id": file_id,
|
|
1576
|
-
"only_direct_upload": only_direct_upload,
|
|
1577
|
-
"only_api_data_source_documents": only_api_data_source_documents,
|
|
1578
|
-
}
|
|
1579
|
-
),
|
|
1580
|
-
headers=self._client_wrapper.get_headers(),
|
|
1581
|
-
timeout=60,
|
|
1582
|
-
)
|
|
1583
|
-
if 200 <= _response.status_code < 300:
|
|
1584
|
-
return pydantic.parse_obj_as(PaginatedListCloudDocumentsResponse, _response.json()) # type: ignore
|
|
1585
|
-
if _response.status_code == 422:
|
|
1586
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1587
|
-
try:
|
|
1588
|
-
_response_json = _response.json()
|
|
1589
|
-
except JSONDecodeError:
|
|
1590
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1591
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1592
|
-
|
|
1593
|
-
def get_pipeline_document(self, document_id: str, pipeline_id: str) -> CloudDocument:
|
|
1594
|
-
"""
|
|
1595
|
-
Return a single document for a pipeline.
|
|
1596
|
-
|
|
1597
|
-
Parameters:
|
|
1598
|
-
- document_id: str.
|
|
1599
|
-
|
|
1600
|
-
- pipeline_id: str.
|
|
1601
|
-
---
|
|
1602
|
-
from llama_cloud.client import LlamaCloud
|
|
1603
|
-
|
|
1604
|
-
client = LlamaCloud(
|
|
1605
|
-
token="YOUR_TOKEN",
|
|
1606
|
-
)
|
|
1607
|
-
client.pipelines.get_pipeline_document(
|
|
1608
|
-
document_id="string",
|
|
1609
|
-
pipeline_id="string",
|
|
1610
|
-
)
|
|
1611
|
-
"""
|
|
1612
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1613
|
-
"GET",
|
|
1614
|
-
urllib.parse.urljoin(
|
|
1615
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents/{document_id}"
|
|
1616
|
-
),
|
|
1617
|
-
headers=self._client_wrapper.get_headers(),
|
|
1618
|
-
timeout=60,
|
|
1619
|
-
)
|
|
1620
|
-
if 200 <= _response.status_code < 300:
|
|
1621
|
-
return pydantic.parse_obj_as(CloudDocument, _response.json()) # type: ignore
|
|
1622
|
-
if _response.status_code == 422:
|
|
1623
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1624
|
-
try:
|
|
1625
|
-
_response_json = _response.json()
|
|
1626
|
-
except JSONDecodeError:
|
|
1627
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1628
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1629
|
-
|
|
1630
|
-
def delete_pipeline_document(self, document_id: str, pipeline_id: str) -> None:
|
|
1631
|
-
"""
|
|
1632
|
-
Delete a document from a pipeline.
|
|
1633
|
-
Initiates an async job that will:
|
|
1634
|
-
|
|
1635
|
-
1. Delete vectors from the vector store
|
|
1636
|
-
2. Delete the document from MongoDB after vectors are successfully deleted
|
|
1637
|
-
|
|
1638
|
-
Parameters:
|
|
1639
|
-
- document_id: str.
|
|
1640
|
-
|
|
1641
|
-
- pipeline_id: str.
|
|
1642
|
-
---
|
|
1643
|
-
from llama_cloud.client import LlamaCloud
|
|
1644
|
-
|
|
1645
|
-
client = LlamaCloud(
|
|
1646
|
-
token="YOUR_TOKEN",
|
|
1647
|
-
)
|
|
1648
|
-
client.pipelines.delete_pipeline_document(
|
|
1649
|
-
document_id="string",
|
|
1650
|
-
pipeline_id="string",
|
|
1651
|
-
)
|
|
1652
|
-
"""
|
|
1653
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1654
|
-
"DELETE",
|
|
1655
|
-
urllib.parse.urljoin(
|
|
1656
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents/{document_id}"
|
|
1657
|
-
),
|
|
1658
|
-
headers=self._client_wrapper.get_headers(),
|
|
1659
|
-
timeout=60,
|
|
1660
|
-
)
|
|
1661
|
-
if 200 <= _response.status_code < 300:
|
|
1662
|
-
return
|
|
1663
|
-
if _response.status_code == 422:
|
|
1664
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1665
|
-
try:
|
|
1666
|
-
_response_json = _response.json()
|
|
1667
|
-
except JSONDecodeError:
|
|
1668
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1669
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1670
|
-
|
|
1671
|
-
def get_pipeline_document_status(self, document_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
|
|
1672
|
-
"""
|
|
1673
|
-
Return a single document for a pipeline.
|
|
1674
|
-
|
|
1675
|
-
Parameters:
|
|
1676
|
-
- document_id: str.
|
|
1677
|
-
|
|
1678
|
-
- pipeline_id: str.
|
|
1679
|
-
---
|
|
1680
|
-
from llama_cloud.client import LlamaCloud
|
|
1681
|
-
|
|
1682
|
-
client = LlamaCloud(
|
|
1683
|
-
token="YOUR_TOKEN",
|
|
1684
|
-
)
|
|
1685
|
-
client.pipelines.get_pipeline_document_status(
|
|
1686
|
-
document_id="string",
|
|
1687
|
-
pipeline_id="string",
|
|
1688
|
-
)
|
|
1689
|
-
"""
|
|
1690
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1691
|
-
"GET",
|
|
1692
|
-
urllib.parse.urljoin(
|
|
1693
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
1694
|
-
f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/status",
|
|
1695
|
-
),
|
|
1696
|
-
headers=self._client_wrapper.get_headers(),
|
|
1697
|
-
timeout=60,
|
|
1698
|
-
)
|
|
1699
|
-
if 200 <= _response.status_code < 300:
|
|
1700
|
-
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
1701
|
-
if _response.status_code == 422:
|
|
1702
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1703
|
-
try:
|
|
1704
|
-
_response_json = _response.json()
|
|
1705
|
-
except JSONDecodeError:
|
|
1706
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1707
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1708
|
-
|
|
1709
|
-
def list_pipeline_document_chunks(self, document_id: str, pipeline_id: str) -> typing.List[TextNode]:
|
|
1710
|
-
"""
|
|
1711
|
-
Return a list of chunks for a pipeline document.
|
|
1712
|
-
|
|
1713
|
-
Parameters:
|
|
1714
|
-
- document_id: str.
|
|
1715
|
-
|
|
1716
|
-
- pipeline_id: str.
|
|
1717
|
-
---
|
|
1718
|
-
from llama_cloud.client import LlamaCloud
|
|
1719
|
-
|
|
1720
|
-
client = LlamaCloud(
|
|
1721
|
-
token="YOUR_TOKEN",
|
|
1722
|
-
)
|
|
1723
|
-
client.pipelines.list_pipeline_document_chunks(
|
|
1724
|
-
document_id="string",
|
|
1725
|
-
pipeline_id="string",
|
|
1726
|
-
)
|
|
1727
|
-
"""
|
|
1728
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1729
|
-
"GET",
|
|
1730
|
-
urllib.parse.urljoin(
|
|
1731
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
1732
|
-
f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/chunks",
|
|
1733
|
-
),
|
|
1734
|
-
headers=self._client_wrapper.get_headers(),
|
|
1735
|
-
timeout=60,
|
|
1736
|
-
)
|
|
1737
|
-
if 200 <= _response.status_code < 300:
|
|
1738
|
-
return pydantic.parse_obj_as(typing.List[TextNode], _response.json()) # type: ignore
|
|
1739
|
-
if _response.status_code == 422:
|
|
1740
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1741
|
-
try:
|
|
1742
|
-
_response_json = _response.json()
|
|
1743
|
-
except JSONDecodeError:
|
|
1744
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1745
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1746
|
-
|
|
1747
|
-
|
|
1748
|
-
class AsyncPipelinesClient:
|
|
1749
|
-
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
1750
|
-
self._client_wrapper = client_wrapper
|
|
1751
|
-
|
|
1752
|
-
async def search_pipelines(
|
|
1753
|
-
self,
|
|
1754
|
-
*,
|
|
1755
|
-
project_id: typing.Optional[str] = None,
|
|
1756
|
-
project_name: typing.Optional[str] = None,
|
|
1757
|
-
pipeline_name: typing.Optional[str] = None,
|
|
1758
|
-
pipeline_type: typing.Optional[PipelineType] = None,
|
|
1759
|
-
organization_id: typing.Optional[str] = None,
|
|
1760
|
-
) -> typing.List[Pipeline]:
|
|
1761
|
-
"""
|
|
1762
|
-
Search for pipelines by various parameters.
|
|
1763
|
-
|
|
1764
|
-
Parameters:
|
|
1765
|
-
- project_id: typing.Optional[str].
|
|
1766
|
-
|
|
1767
|
-
- project_name: typing.Optional[str].
|
|
1768
|
-
|
|
1769
|
-
- pipeline_name: typing.Optional[str].
|
|
1770
|
-
|
|
1771
|
-
- pipeline_type: typing.Optional[PipelineType].
|
|
1772
|
-
|
|
1773
|
-
- organization_id: typing.Optional[str].
|
|
1774
|
-
---
|
|
1775
|
-
from llama_cloud import PipelineType
|
|
1776
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
1777
|
-
|
|
1778
|
-
client = AsyncLlamaCloud(
|
|
1779
|
-
token="YOUR_TOKEN",
|
|
1780
|
-
)
|
|
1781
|
-
await client.pipelines.search_pipelines(
|
|
1782
|
-
pipeline_type=PipelineType.PLAYGROUND,
|
|
1783
|
-
)
|
|
1784
|
-
"""
|
|
1785
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1786
|
-
"GET",
|
|
1787
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
|
|
1788
|
-
params=remove_none_from_dict(
|
|
1789
|
-
{
|
|
1790
|
-
"project_id": project_id,
|
|
1791
|
-
"project_name": project_name,
|
|
1792
|
-
"pipeline_name": pipeline_name,
|
|
1793
|
-
"pipeline_type": pipeline_type,
|
|
1794
|
-
"organization_id": organization_id,
|
|
1795
|
-
}
|
|
1796
|
-
),
|
|
1797
|
-
headers=self._client_wrapper.get_headers(),
|
|
1798
|
-
timeout=60,
|
|
1799
|
-
)
|
|
1800
|
-
if 200 <= _response.status_code < 300:
|
|
1801
|
-
return pydantic.parse_obj_as(typing.List[Pipeline], _response.json()) # type: ignore
|
|
1802
|
-
if _response.status_code == 422:
|
|
1803
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1804
|
-
try:
|
|
1805
|
-
_response_json = _response.json()
|
|
1806
|
-
except JSONDecodeError:
|
|
1807
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1808
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1809
|
-
|
|
1810
|
-
async def create_pipeline(
|
|
1811
|
-
self,
|
|
1812
|
-
*,
|
|
1813
|
-
project_id: typing.Optional[str] = None,
|
|
1814
|
-
organization_id: typing.Optional[str] = None,
|
|
1815
|
-
request: PipelineCreate,
|
|
1816
|
-
) -> Pipeline:
|
|
1817
|
-
"""
|
|
1818
|
-
Create a new pipeline for a project.
|
|
1819
|
-
|
|
1820
|
-
Parameters:
|
|
1821
|
-
- project_id: typing.Optional[str].
|
|
1822
|
-
|
|
1823
|
-
- organization_id: typing.Optional[str].
|
|
1824
|
-
|
|
1825
|
-
- request: PipelineCreate.
|
|
1826
|
-
"""
|
|
1827
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1828
|
-
"POST",
|
|
1829
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
|
|
1830
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1831
|
-
json=jsonable_encoder(request),
|
|
1832
|
-
headers=self._client_wrapper.get_headers(),
|
|
1833
|
-
timeout=60,
|
|
1834
|
-
)
|
|
1835
|
-
if 200 <= _response.status_code < 300:
|
|
1836
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
1837
|
-
if _response.status_code == 422:
|
|
1838
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1839
|
-
try:
|
|
1840
|
-
_response_json = _response.json()
|
|
1841
|
-
except JSONDecodeError:
|
|
1842
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1843
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1844
|
-
|
|
1845
|
-
async def upsert_pipeline(
|
|
1846
|
-
self,
|
|
1847
|
-
*,
|
|
1848
|
-
project_id: typing.Optional[str] = None,
|
|
1849
|
-
organization_id: typing.Optional[str] = None,
|
|
1850
|
-
request: PipelineCreate,
|
|
1851
|
-
) -> Pipeline:
|
|
1852
|
-
"""
|
|
1853
|
-
Upsert a pipeline for a project.
|
|
1854
|
-
Updates if a pipeline with the same name and project_id already exists. Otherwise, creates a new pipeline.
|
|
1855
|
-
|
|
1856
|
-
Parameters:
|
|
1857
|
-
- project_id: typing.Optional[str].
|
|
1858
|
-
|
|
1859
|
-
- organization_id: typing.Optional[str].
|
|
1860
|
-
|
|
1861
|
-
- request: PipelineCreate.
|
|
1862
|
-
"""
|
|
1863
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1864
|
-
"PUT",
|
|
1865
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/pipelines"),
|
|
1866
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
1867
|
-
json=jsonable_encoder(request),
|
|
1868
|
-
headers=self._client_wrapper.get_headers(),
|
|
1869
|
-
timeout=60,
|
|
1870
|
-
)
|
|
1871
|
-
if 200 <= _response.status_code < 300:
|
|
1872
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
1873
|
-
if _response.status_code == 422:
|
|
1874
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1875
|
-
try:
|
|
1876
|
-
_response_json = _response.json()
|
|
1877
|
-
except JSONDecodeError:
|
|
1878
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1879
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1880
|
-
|
|
1881
|
-
async def get_pipeline(self, pipeline_id: str) -> Pipeline:
|
|
1882
|
-
"""
|
|
1883
|
-
Get a pipeline by ID for a given project.
|
|
1884
|
-
|
|
1885
|
-
Parameters:
|
|
1886
|
-
- pipeline_id: str.
|
|
1887
|
-
"""
|
|
1888
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1889
|
-
"GET",
|
|
1890
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}"),
|
|
1891
|
-
headers=self._client_wrapper.get_headers(),
|
|
1892
|
-
timeout=60,
|
|
1893
|
-
)
|
|
1894
|
-
if 200 <= _response.status_code < 300:
|
|
1895
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
1896
|
-
if _response.status_code == 422:
|
|
1897
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1898
|
-
try:
|
|
1899
|
-
_response_json = _response.json()
|
|
1900
|
-
except JSONDecodeError:
|
|
1901
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1902
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1903
|
-
|
|
1904
|
-
async def update_existing_pipeline(
|
|
1905
|
-
self,
|
|
1906
|
-
pipeline_id: str,
|
|
1907
|
-
*,
|
|
1908
|
-
embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig] = OMIT,
|
|
1909
|
-
transform_config: typing.Optional[PipelineUpdateTransformConfig] = OMIT,
|
|
1910
|
-
sparse_model_config: typing.Optional[SparseModelConfig] = OMIT,
|
|
1911
|
-
data_sink_id: typing.Optional[str] = OMIT,
|
|
1912
|
-
embedding_model_config_id: typing.Optional[str] = OMIT,
|
|
1913
|
-
data_sink: typing.Optional[DataSinkCreate] = OMIT,
|
|
1914
|
-
preset_retrieval_parameters: typing.Optional[PresetRetrievalParams] = OMIT,
|
|
1915
|
-
eval_parameters: typing.Optional[EvalExecutionParams] = OMIT,
|
|
1916
|
-
llama_parse_parameters: typing.Optional[LlamaParseParameters] = OMIT,
|
|
1917
|
-
status: typing.Optional[str] = OMIT,
|
|
1918
|
-
metadata_config: typing.Optional[PipelineMetadataConfig] = OMIT,
|
|
1919
|
-
name: typing.Optional[str] = OMIT,
|
|
1920
|
-
managed_pipeline_id: typing.Optional[str] = OMIT,
|
|
1921
|
-
) -> Pipeline:
|
|
1922
|
-
"""
|
|
1923
|
-
Update an existing pipeline for a project.
|
|
1924
|
-
|
|
1925
|
-
Parameters:
|
|
1926
|
-
- pipeline_id: str.
|
|
1927
|
-
|
|
1928
|
-
- embedding_config: typing.Optional[PipelineUpdateEmbeddingConfig].
|
|
1929
|
-
|
|
1930
|
-
- transform_config: typing.Optional[PipelineUpdateTransformConfig]. Configuration for the transformation.
|
|
1931
|
-
|
|
1932
|
-
- sparse_model_config: typing.Optional[SparseModelConfig].
|
|
1933
|
-
|
|
1934
|
-
- data_sink_id: typing.Optional[str].
|
|
1935
|
-
|
|
1936
|
-
- embedding_model_config_id: typing.Optional[str].
|
|
1937
|
-
|
|
1938
|
-
- data_sink: typing.Optional[DataSinkCreate].
|
|
1939
|
-
|
|
1940
|
-
- preset_retrieval_parameters: typing.Optional[PresetRetrievalParams].
|
|
1941
|
-
|
|
1942
|
-
- eval_parameters: typing.Optional[EvalExecutionParams].
|
|
1943
|
-
|
|
1944
|
-
- llama_parse_parameters: typing.Optional[LlamaParseParameters].
|
|
1945
|
-
|
|
1946
|
-
- status: typing.Optional[str].
|
|
1947
|
-
|
|
1948
|
-
- metadata_config: typing.Optional[PipelineMetadataConfig].
|
|
1949
|
-
|
|
1950
|
-
- name: typing.Optional[str].
|
|
1951
|
-
|
|
1952
|
-
- managed_pipeline_id: typing.Optional[str].
|
|
1953
|
-
"""
|
|
1954
|
-
_request: typing.Dict[str, typing.Any] = {}
|
|
1955
|
-
if embedding_config is not OMIT:
|
|
1956
|
-
_request["embedding_config"] = embedding_config
|
|
1957
|
-
if transform_config is not OMIT:
|
|
1958
|
-
_request["transform_config"] = transform_config
|
|
1959
|
-
if sparse_model_config is not OMIT:
|
|
1960
|
-
_request["sparse_model_config"] = sparse_model_config
|
|
1961
|
-
if data_sink_id is not OMIT:
|
|
1962
|
-
_request["data_sink_id"] = data_sink_id
|
|
1963
|
-
if embedding_model_config_id is not OMIT:
|
|
1964
|
-
_request["embedding_model_config_id"] = embedding_model_config_id
|
|
1965
|
-
if data_sink is not OMIT:
|
|
1966
|
-
_request["data_sink"] = data_sink
|
|
1967
|
-
if preset_retrieval_parameters is not OMIT:
|
|
1968
|
-
_request["preset_retrieval_parameters"] = preset_retrieval_parameters
|
|
1969
|
-
if eval_parameters is not OMIT:
|
|
1970
|
-
_request["eval_parameters"] = eval_parameters
|
|
1971
|
-
if llama_parse_parameters is not OMIT:
|
|
1972
|
-
_request["llama_parse_parameters"] = llama_parse_parameters
|
|
1973
|
-
if status is not OMIT:
|
|
1974
|
-
_request["status"] = status
|
|
1975
|
-
if metadata_config is not OMIT:
|
|
1976
|
-
_request["metadata_config"] = metadata_config
|
|
1977
|
-
if name is not OMIT:
|
|
1978
|
-
_request["name"] = name
|
|
1979
|
-
if managed_pipeline_id is not OMIT:
|
|
1980
|
-
_request["managed_pipeline_id"] = managed_pipeline_id
|
|
1981
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
1982
|
-
"PUT",
|
|
1983
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}"),
|
|
1984
|
-
json=jsonable_encoder(_request),
|
|
1985
|
-
headers=self._client_wrapper.get_headers(),
|
|
1986
|
-
timeout=60,
|
|
1987
|
-
)
|
|
1988
|
-
if 200 <= _response.status_code < 300:
|
|
1989
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
1990
|
-
if _response.status_code == 422:
|
|
1991
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
1992
|
-
try:
|
|
1993
|
-
_response_json = _response.json()
|
|
1994
|
-
except JSONDecodeError:
|
|
1995
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1996
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1997
|
-
|
|
1998
|
-
async def delete_pipeline(self, pipeline_id: str) -> None:
|
|
1999
|
-
"""
|
|
2000
|
-
Delete a pipeline by ID.
|
|
2001
|
-
|
|
2002
|
-
Parameters:
|
|
2003
|
-
- pipeline_id: str.
|
|
2004
|
-
---
|
|
2005
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2006
|
-
|
|
2007
|
-
client = AsyncLlamaCloud(
|
|
2008
|
-
token="YOUR_TOKEN",
|
|
2009
|
-
)
|
|
2010
|
-
await client.pipelines.delete_pipeline(
|
|
2011
|
-
pipeline_id="string",
|
|
2012
|
-
)
|
|
2013
|
-
"""
|
|
2014
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2015
|
-
"DELETE",
|
|
2016
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}"),
|
|
2017
|
-
headers=self._client_wrapper.get_headers(),
|
|
2018
|
-
timeout=60,
|
|
2019
|
-
)
|
|
2020
|
-
if 200 <= _response.status_code < 300:
|
|
2021
|
-
return
|
|
2022
|
-
if _response.status_code == 422:
|
|
2023
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2024
|
-
try:
|
|
2025
|
-
_response_json = _response.json()
|
|
2026
|
-
except JSONDecodeError:
|
|
2027
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2028
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2029
|
-
|
|
2030
|
-
async def get_pipeline_status(self, pipeline_id: str) -> ManagedIngestionStatusResponse:
|
|
2031
|
-
"""
|
|
2032
|
-
Get the status of a pipeline by ID.
|
|
2033
|
-
|
|
2034
|
-
Parameters:
|
|
2035
|
-
- pipeline_id: str.
|
|
2036
|
-
---
|
|
2037
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2038
|
-
|
|
2039
|
-
client = AsyncLlamaCloud(
|
|
2040
|
-
token="YOUR_TOKEN",
|
|
2041
|
-
)
|
|
2042
|
-
await client.pipelines.get_pipeline_status(
|
|
2043
|
-
pipeline_id="string",
|
|
2044
|
-
)
|
|
2045
|
-
"""
|
|
2046
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2047
|
-
"GET",
|
|
2048
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/status"),
|
|
2049
|
-
headers=self._client_wrapper.get_headers(),
|
|
2050
|
-
timeout=60,
|
|
2051
|
-
)
|
|
2052
|
-
if 200 <= _response.status_code < 300:
|
|
2053
|
-
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
2054
|
-
if _response.status_code == 422:
|
|
2055
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2056
|
-
try:
|
|
2057
|
-
_response_json = _response.json()
|
|
2058
|
-
except JSONDecodeError:
|
|
2059
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2060
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2061
|
-
|
|
2062
|
-
async def sync_pipeline(self, pipeline_id: str) -> Pipeline:
|
|
2063
|
-
"""
|
|
2064
|
-
Run ingestion for the pipeline by incrementally updating the data-sink with upstream changes from data-sources & files.
|
|
2065
|
-
|
|
2066
|
-
Parameters:
|
|
2067
|
-
- pipeline_id: str.
|
|
2068
|
-
"""
|
|
2069
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2070
|
-
"POST",
|
|
2071
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/sync"),
|
|
2072
|
-
headers=self._client_wrapper.get_headers(),
|
|
2073
|
-
timeout=60,
|
|
2074
|
-
)
|
|
2075
|
-
if 200 <= _response.status_code < 300:
|
|
2076
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
2077
|
-
if _response.status_code == 422:
|
|
2078
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2079
|
-
try:
|
|
2080
|
-
_response_json = _response.json()
|
|
2081
|
-
except JSONDecodeError:
|
|
2082
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2083
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2084
|
-
|
|
2085
|
-
async def cancel_pipeline_sync(self, pipeline_id: str) -> Pipeline:
|
|
2086
|
-
"""
|
|
2087
|
-
Parameters:
|
|
2088
|
-
- pipeline_id: str.
|
|
2089
|
-
"""
|
|
2090
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2091
|
-
"POST",
|
|
2092
|
-
urllib.parse.urljoin(
|
|
2093
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/sync/cancel"
|
|
2094
|
-
),
|
|
2095
|
-
headers=self._client_wrapper.get_headers(),
|
|
2096
|
-
timeout=60,
|
|
2097
|
-
)
|
|
2098
|
-
if 200 <= _response.status_code < 300:
|
|
2099
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
2100
|
-
if _response.status_code == 422:
|
|
2101
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2102
|
-
try:
|
|
2103
|
-
_response_json = _response.json()
|
|
2104
|
-
except JSONDecodeError:
|
|
2105
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2106
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2107
|
-
|
|
2108
|
-
async def force_delete_pipeline(self, pipeline_id: str) -> None:
|
|
2109
|
-
"""
|
|
2110
|
-
Parameters:
|
|
2111
|
-
- pipeline_id: str.
|
|
2112
|
-
---
|
|
2113
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2114
|
-
|
|
2115
|
-
client = AsyncLlamaCloud(
|
|
2116
|
-
token="YOUR_TOKEN",
|
|
2117
|
-
)
|
|
2118
|
-
await client.pipelines.force_delete_pipeline(
|
|
2119
|
-
pipeline_id="string",
|
|
2120
|
-
)
|
|
2121
|
-
"""
|
|
2122
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2123
|
-
"POST",
|
|
2124
|
-
urllib.parse.urljoin(
|
|
2125
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/force-delete"
|
|
2126
|
-
),
|
|
2127
|
-
headers=self._client_wrapper.get_headers(),
|
|
2128
|
-
timeout=60,
|
|
2129
|
-
)
|
|
2130
|
-
if 200 <= _response.status_code < 300:
|
|
2131
|
-
return
|
|
2132
|
-
if _response.status_code == 422:
|
|
2133
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2134
|
-
try:
|
|
2135
|
-
_response_json = _response.json()
|
|
2136
|
-
except JSONDecodeError:
|
|
2137
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2138
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2139
|
-
|
|
2140
|
-
async def copy_pipeline(self, pipeline_id: str) -> Pipeline:
|
|
2141
|
-
"""
|
|
2142
|
-
Copy a pipeline by ID.
|
|
2143
|
-
|
|
2144
|
-
Parameters:
|
|
2145
|
-
- pipeline_id: str.
|
|
2146
|
-
"""
|
|
2147
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2148
|
-
"POST",
|
|
2149
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/copy"),
|
|
2150
|
-
headers=self._client_wrapper.get_headers(),
|
|
2151
|
-
timeout=60,
|
|
2152
|
-
)
|
|
2153
|
-
if 200 <= _response.status_code < 300:
|
|
2154
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
2155
|
-
if _response.status_code == 422:
|
|
2156
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2157
|
-
try:
|
|
2158
|
-
_response_json = _response.json()
|
|
2159
|
-
except JSONDecodeError:
|
|
2160
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2161
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2162
|
-
|
|
2163
|
-
async def list_pipeline_files(
|
|
2164
|
-
self,
|
|
2165
|
-
pipeline_id: str,
|
|
2166
|
-
*,
|
|
2167
|
-
data_source_id: typing.Optional[str] = None,
|
|
2168
|
-
only_manually_uploaded: typing.Optional[bool] = None,
|
|
2169
|
-
) -> typing.List[PipelineFile]:
|
|
2170
|
-
"""
|
|
2171
|
-
Get files for a pipeline.
|
|
2172
|
-
|
|
2173
|
-
Parameters:
|
|
2174
|
-
- pipeline_id: str.
|
|
2175
|
-
|
|
2176
|
-
- data_source_id: typing.Optional[str].
|
|
2177
|
-
|
|
2178
|
-
- only_manually_uploaded: typing.Optional[bool].
|
|
2179
|
-
---
|
|
2180
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2181
|
-
|
|
2182
|
-
client = AsyncLlamaCloud(
|
|
2183
|
-
token="YOUR_TOKEN",
|
|
2184
|
-
)
|
|
2185
|
-
await client.pipelines.list_pipeline_files(
|
|
2186
|
-
pipeline_id="string",
|
|
2187
|
-
)
|
|
2188
|
-
"""
|
|
2189
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2190
|
-
"GET",
|
|
2191
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files"),
|
|
2192
|
-
params=remove_none_from_dict(
|
|
2193
|
-
{"data_source_id": data_source_id, "only_manually_uploaded": only_manually_uploaded}
|
|
2194
|
-
),
|
|
2195
|
-
headers=self._client_wrapper.get_headers(),
|
|
2196
|
-
timeout=60,
|
|
2197
|
-
)
|
|
2198
|
-
if 200 <= _response.status_code < 300:
|
|
2199
|
-
return pydantic.parse_obj_as(typing.List[PipelineFile], _response.json()) # type: ignore
|
|
2200
|
-
if _response.status_code == 422:
|
|
2201
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2202
|
-
try:
|
|
2203
|
-
_response_json = _response.json()
|
|
2204
|
-
except JSONDecodeError:
|
|
2205
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2206
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2207
|
-
|
|
2208
|
-
async def add_files_to_pipeline_api(
|
|
2209
|
-
self, pipeline_id: str, *, request: typing.List[PipelineFileCreate]
|
|
2210
|
-
) -> typing.List[PipelineFile]:
|
|
2211
|
-
"""
|
|
2212
|
-
Add files to a pipeline.
|
|
2213
|
-
|
|
2214
|
-
Parameters:
|
|
2215
|
-
- pipeline_id: str.
|
|
2216
|
-
|
|
2217
|
-
- request: typing.List[PipelineFileCreate].
|
|
2218
|
-
---
|
|
2219
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2220
|
-
|
|
2221
|
-
client = AsyncLlamaCloud(
|
|
2222
|
-
token="YOUR_TOKEN",
|
|
2223
|
-
)
|
|
2224
|
-
await client.pipelines.add_files_to_pipeline_api(
|
|
2225
|
-
pipeline_id="string",
|
|
2226
|
-
request=[],
|
|
2227
|
-
)
|
|
2228
|
-
"""
|
|
2229
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2230
|
-
"PUT",
|
|
2231
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files"),
|
|
2232
|
-
json=jsonable_encoder(request),
|
|
2233
|
-
headers=self._client_wrapper.get_headers(),
|
|
2234
|
-
timeout=60,
|
|
2235
|
-
)
|
|
2236
|
-
if 200 <= _response.status_code < 300:
|
|
2237
|
-
return pydantic.parse_obj_as(typing.List[PipelineFile], _response.json()) # type: ignore
|
|
2238
|
-
if _response.status_code == 422:
|
|
2239
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2240
|
-
try:
|
|
2241
|
-
_response_json = _response.json()
|
|
2242
|
-
except JSONDecodeError:
|
|
2243
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2244
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2245
|
-
|
|
2246
|
-
async def list_pipeline_files_2(
|
|
2247
|
-
self,
|
|
2248
|
-
pipeline_id: str,
|
|
2249
|
-
*,
|
|
2250
|
-
data_source_id: typing.Optional[str] = None,
|
|
2251
|
-
only_manually_uploaded: typing.Optional[bool] = None,
|
|
2252
|
-
file_name_contains: typing.Optional[str] = None,
|
|
2253
|
-
limit: typing.Optional[int] = None,
|
|
2254
|
-
offset: typing.Optional[int] = None,
|
|
2255
|
-
order_by: typing.Optional[str] = None,
|
|
2256
|
-
) -> PaginatedListPipelineFilesResponse:
|
|
2257
|
-
"""
|
|
2258
|
-
Get files for a pipeline.
|
|
2259
|
-
|
|
2260
|
-
Args:
|
|
2261
|
-
pipeline_id: ID of the pipeline
|
|
2262
|
-
data_source_id: Optional filter by data source ID
|
|
2263
|
-
only_manually_uploaded: Filter for only manually uploaded files
|
|
2264
|
-
file_name_contains: Optional filter by file name (substring match)
|
|
2265
|
-
limit: Limit number of results
|
|
2266
|
-
offset: Offset for pagination
|
|
2267
|
-
order_by: Field to order by
|
|
2268
|
-
|
|
2269
|
-
Parameters:
|
|
2270
|
-
- pipeline_id: str.
|
|
2271
|
-
|
|
2272
|
-
- data_source_id: typing.Optional[str].
|
|
2273
|
-
|
|
2274
|
-
- only_manually_uploaded: typing.Optional[bool].
|
|
2275
|
-
|
|
2276
|
-
- file_name_contains: typing.Optional[str].
|
|
2277
|
-
|
|
2278
|
-
- limit: typing.Optional[int].
|
|
2279
|
-
|
|
2280
|
-
- offset: typing.Optional[int].
|
|
2281
|
-
|
|
2282
|
-
- order_by: typing.Optional[str].
|
|
2283
|
-
---
|
|
2284
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2285
|
-
|
|
2286
|
-
client = AsyncLlamaCloud(
|
|
2287
|
-
token="YOUR_TOKEN",
|
|
2288
|
-
)
|
|
2289
|
-
await client.pipelines.list_pipeline_files_2(
|
|
2290
|
-
pipeline_id="string",
|
|
2291
|
-
)
|
|
2292
|
-
"""
|
|
2293
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2294
|
-
"GET",
|
|
2295
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files2"),
|
|
2296
|
-
params=remove_none_from_dict(
|
|
2297
|
-
{
|
|
2298
|
-
"data_source_id": data_source_id,
|
|
2299
|
-
"only_manually_uploaded": only_manually_uploaded,
|
|
2300
|
-
"file_name_contains": file_name_contains,
|
|
2301
|
-
"limit": limit,
|
|
2302
|
-
"offset": offset,
|
|
2303
|
-
"order_by": order_by,
|
|
2304
|
-
}
|
|
2305
|
-
),
|
|
2306
|
-
headers=self._client_wrapper.get_headers(),
|
|
2307
|
-
timeout=60,
|
|
2308
|
-
)
|
|
2309
|
-
if 200 <= _response.status_code < 300:
|
|
2310
|
-
return pydantic.parse_obj_as(PaginatedListPipelineFilesResponse, _response.json()) # type: ignore
|
|
2311
|
-
if _response.status_code == 422:
|
|
2312
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2313
|
-
try:
|
|
2314
|
-
_response_json = _response.json()
|
|
2315
|
-
except JSONDecodeError:
|
|
2316
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2317
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2318
|
-
|
|
2319
|
-
async def get_pipeline_file_status_counts(
|
|
2320
|
-
self,
|
|
2321
|
-
pipeline_id: str,
|
|
2322
|
-
*,
|
|
2323
|
-
data_source_id: typing.Optional[str] = None,
|
|
2324
|
-
only_manually_uploaded: typing.Optional[bool] = None,
|
|
2325
|
-
) -> FileCountByStatusResponse:
|
|
2326
|
-
"""
|
|
2327
|
-
Get files for a pipeline.
|
|
2328
|
-
|
|
2329
|
-
Parameters:
|
|
2330
|
-
- pipeline_id: str.
|
|
2331
|
-
|
|
2332
|
-
- data_source_id: typing.Optional[str].
|
|
2333
|
-
|
|
2334
|
-
- only_manually_uploaded: typing.Optional[bool].
|
|
2335
|
-
---
|
|
2336
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2337
|
-
|
|
2338
|
-
client = AsyncLlamaCloud(
|
|
2339
|
-
token="YOUR_TOKEN",
|
|
2340
|
-
)
|
|
2341
|
-
await client.pipelines.get_pipeline_file_status_counts(
|
|
2342
|
-
pipeline_id="string",
|
|
2343
|
-
)
|
|
2344
|
-
"""
|
|
2345
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2346
|
-
"GET",
|
|
2347
|
-
urllib.parse.urljoin(
|
|
2348
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files/status-counts"
|
|
2349
|
-
),
|
|
2350
|
-
params=remove_none_from_dict(
|
|
2351
|
-
{"data_source_id": data_source_id, "only_manually_uploaded": only_manually_uploaded}
|
|
2352
|
-
),
|
|
2353
|
-
headers=self._client_wrapper.get_headers(),
|
|
2354
|
-
timeout=60,
|
|
2355
|
-
)
|
|
2356
|
-
if 200 <= _response.status_code < 300:
|
|
2357
|
-
return pydantic.parse_obj_as(FileCountByStatusResponse, _response.json()) # type: ignore
|
|
2358
|
-
if _response.status_code == 422:
|
|
2359
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2360
|
-
try:
|
|
2361
|
-
_response_json = _response.json()
|
|
2362
|
-
except JSONDecodeError:
|
|
2363
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2364
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2365
|
-
|
|
2366
|
-
async def get_pipeline_file_status(self, file_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
|
|
2367
|
-
"""
|
|
2368
|
-
Get status of a file for a pipeline.
|
|
2369
|
-
|
|
2370
|
-
Parameters:
|
|
2371
|
-
- file_id: str.
|
|
2372
|
-
|
|
2373
|
-
- pipeline_id: str.
|
|
2374
|
-
---
|
|
2375
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2376
|
-
|
|
2377
|
-
client = AsyncLlamaCloud(
|
|
2378
|
-
token="YOUR_TOKEN",
|
|
2379
|
-
)
|
|
2380
|
-
await client.pipelines.get_pipeline_file_status(
|
|
2381
|
-
file_id="string",
|
|
2382
|
-
pipeline_id="string",
|
|
2383
|
-
)
|
|
2384
|
-
"""
|
|
2385
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2386
|
-
"GET",
|
|
2387
|
-
urllib.parse.urljoin(
|
|
2388
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files/{file_id}/status"
|
|
2389
|
-
),
|
|
2390
|
-
headers=self._client_wrapper.get_headers(),
|
|
2391
|
-
timeout=60,
|
|
2392
|
-
)
|
|
2393
|
-
if 200 <= _response.status_code < 300:
|
|
2394
|
-
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
2395
|
-
if _response.status_code == 422:
|
|
2396
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2397
|
-
try:
|
|
2398
|
-
_response_json = _response.json()
|
|
2399
|
-
except JSONDecodeError:
|
|
2400
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2401
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2402
|
-
|
|
2403
|
-
async def update_pipeline_file(
|
|
2404
|
-
self,
|
|
2405
|
-
file_id: str,
|
|
2406
|
-
pipeline_id: str,
|
|
2407
|
-
*,
|
|
2408
|
-
custom_metadata: typing.Optional[
|
|
2409
|
-
typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]
|
|
2410
|
-
] = OMIT,
|
|
2411
|
-
) -> PipelineFile:
|
|
2412
|
-
"""
|
|
2413
|
-
Update a file for a pipeline.
|
|
2414
|
-
|
|
2415
|
-
Parameters:
|
|
2416
|
-
- file_id: str.
|
|
2417
|
-
|
|
2418
|
-
- pipeline_id: str.
|
|
2419
|
-
|
|
2420
|
-
- custom_metadata: typing.Optional[typing.Dict[str, typing.Optional[PipelineFileUpdateCustomMetadataValue]]].
|
|
2421
|
-
---
|
|
2422
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2423
|
-
|
|
2424
|
-
client = AsyncLlamaCloud(
|
|
2425
|
-
token="YOUR_TOKEN",
|
|
2426
|
-
)
|
|
2427
|
-
await client.pipelines.update_pipeline_file(
|
|
2428
|
-
file_id="string",
|
|
2429
|
-
pipeline_id="string",
|
|
2430
|
-
)
|
|
2431
|
-
"""
|
|
2432
|
-
_request: typing.Dict[str, typing.Any] = {}
|
|
2433
|
-
if custom_metadata is not OMIT:
|
|
2434
|
-
_request["custom_metadata"] = custom_metadata
|
|
2435
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2436
|
-
"PUT",
|
|
2437
|
-
urllib.parse.urljoin(
|
|
2438
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files/{file_id}"
|
|
2439
|
-
),
|
|
2440
|
-
json=jsonable_encoder(_request),
|
|
2441
|
-
headers=self._client_wrapper.get_headers(),
|
|
2442
|
-
timeout=60,
|
|
2443
|
-
)
|
|
2444
|
-
if 200 <= _response.status_code < 300:
|
|
2445
|
-
return pydantic.parse_obj_as(PipelineFile, _response.json()) # type: ignore
|
|
2446
|
-
if _response.status_code == 422:
|
|
2447
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2448
|
-
try:
|
|
2449
|
-
_response_json = _response.json()
|
|
2450
|
-
except JSONDecodeError:
|
|
2451
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2452
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2453
|
-
|
|
2454
|
-
async def delete_pipeline_file(self, file_id: str, pipeline_id: str) -> None:
|
|
2455
|
-
"""
|
|
2456
|
-
Delete a file from a pipeline.
|
|
2457
|
-
|
|
2458
|
-
Parameters:
|
|
2459
|
-
- file_id: str.
|
|
2460
|
-
|
|
2461
|
-
- pipeline_id: str.
|
|
2462
|
-
---
|
|
2463
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2464
|
-
|
|
2465
|
-
client = AsyncLlamaCloud(
|
|
2466
|
-
token="YOUR_TOKEN",
|
|
2467
|
-
)
|
|
2468
|
-
await client.pipelines.delete_pipeline_file(
|
|
2469
|
-
file_id="string",
|
|
2470
|
-
pipeline_id="string",
|
|
2471
|
-
)
|
|
2472
|
-
"""
|
|
2473
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2474
|
-
"DELETE",
|
|
2475
|
-
urllib.parse.urljoin(
|
|
2476
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/files/{file_id}"
|
|
2477
|
-
),
|
|
2478
|
-
headers=self._client_wrapper.get_headers(),
|
|
2479
|
-
timeout=60,
|
|
2480
|
-
)
|
|
2481
|
-
if 200 <= _response.status_code < 300:
|
|
2482
|
-
return
|
|
2483
|
-
if _response.status_code == 422:
|
|
2484
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2485
|
-
try:
|
|
2486
|
-
_response_json = _response.json()
|
|
2487
|
-
except JSONDecodeError:
|
|
2488
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2489
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2490
|
-
|
|
2491
|
-
async def import_pipeline_metadata(self, pipeline_id: str, *, upload_file: typing.IO) -> typing.Dict[str, str]:
|
|
2492
|
-
"""
|
|
2493
|
-
Import metadata for a pipeline.
|
|
2494
|
-
|
|
2495
|
-
Parameters:
|
|
2496
|
-
- pipeline_id: str.
|
|
2497
|
-
|
|
2498
|
-
- upload_file: typing.IO.
|
|
2499
|
-
"""
|
|
2500
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2501
|
-
"PUT",
|
|
2502
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/metadata"),
|
|
2503
|
-
data=jsonable_encoder({}),
|
|
2504
|
-
files={"upload_file": upload_file},
|
|
2505
|
-
headers=self._client_wrapper.get_headers(),
|
|
2506
|
-
timeout=60,
|
|
2507
|
-
)
|
|
2508
|
-
if 200 <= _response.status_code < 300:
|
|
2509
|
-
return pydantic.parse_obj_as(typing.Dict[str, str], _response.json()) # type: ignore
|
|
2510
|
-
if _response.status_code == 422:
|
|
2511
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2512
|
-
try:
|
|
2513
|
-
_response_json = _response.json()
|
|
2514
|
-
except JSONDecodeError:
|
|
2515
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2516
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2517
|
-
|
|
2518
|
-
async def delete_pipeline_files_metadata(self, pipeline_id: str) -> None:
|
|
2519
|
-
"""
|
|
2520
|
-
Delete metadata for all files in a pipeline.
|
|
2521
|
-
|
|
2522
|
-
Parameters:
|
|
2523
|
-
- pipeline_id: str.
|
|
2524
|
-
---
|
|
2525
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2526
|
-
|
|
2527
|
-
client = AsyncLlamaCloud(
|
|
2528
|
-
token="YOUR_TOKEN",
|
|
2529
|
-
)
|
|
2530
|
-
await client.pipelines.delete_pipeline_files_metadata(
|
|
2531
|
-
pipeline_id="string",
|
|
2532
|
-
)
|
|
2533
|
-
"""
|
|
2534
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2535
|
-
"DELETE",
|
|
2536
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/metadata"),
|
|
2537
|
-
headers=self._client_wrapper.get_headers(),
|
|
2538
|
-
timeout=60,
|
|
2539
|
-
)
|
|
2540
|
-
if 200 <= _response.status_code < 300:
|
|
2541
|
-
return
|
|
2542
|
-
if _response.status_code == 422:
|
|
2543
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2544
|
-
try:
|
|
2545
|
-
_response_json = _response.json()
|
|
2546
|
-
except JSONDecodeError:
|
|
2547
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2548
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2549
|
-
|
|
2550
|
-
async def list_pipeline_data_sources(self, pipeline_id: str) -> typing.List[PipelineDataSource]:
|
|
2551
|
-
"""
|
|
2552
|
-
Get data sources for a pipeline.
|
|
2553
|
-
|
|
2554
|
-
Parameters:
|
|
2555
|
-
- pipeline_id: str.
|
|
2556
|
-
---
|
|
2557
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2558
|
-
|
|
2559
|
-
client = AsyncLlamaCloud(
|
|
2560
|
-
token="YOUR_TOKEN",
|
|
2561
|
-
)
|
|
2562
|
-
await client.pipelines.list_pipeline_data_sources(
|
|
2563
|
-
pipeline_id="string",
|
|
2564
|
-
)
|
|
2565
|
-
"""
|
|
2566
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2567
|
-
"GET",
|
|
2568
|
-
urllib.parse.urljoin(
|
|
2569
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/data-sources"
|
|
2570
|
-
),
|
|
2571
|
-
headers=self._client_wrapper.get_headers(),
|
|
2572
|
-
timeout=60,
|
|
2573
|
-
)
|
|
2574
|
-
if 200 <= _response.status_code < 300:
|
|
2575
|
-
return pydantic.parse_obj_as(typing.List[PipelineDataSource], _response.json()) # type: ignore
|
|
2576
|
-
if _response.status_code == 422:
|
|
2577
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2578
|
-
try:
|
|
2579
|
-
_response_json = _response.json()
|
|
2580
|
-
except JSONDecodeError:
|
|
2581
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2582
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2583
|
-
|
|
2584
|
-
async def add_data_sources_to_pipeline(
|
|
2585
|
-
self, pipeline_id: str, *, request: typing.List[PipelineDataSourceCreate]
|
|
2586
|
-
) -> typing.List[PipelineDataSource]:
|
|
2587
|
-
"""
|
|
2588
|
-
Add data sources to a pipeline.
|
|
2589
|
-
|
|
2590
|
-
Parameters:
|
|
2591
|
-
- pipeline_id: str.
|
|
2592
|
-
|
|
2593
|
-
- request: typing.List[PipelineDataSourceCreate].
|
|
2594
|
-
---
|
|
2595
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2596
|
-
|
|
2597
|
-
client = AsyncLlamaCloud(
|
|
2598
|
-
token="YOUR_TOKEN",
|
|
2599
|
-
)
|
|
2600
|
-
await client.pipelines.add_data_sources_to_pipeline(
|
|
2601
|
-
pipeline_id="string",
|
|
2602
|
-
request=[],
|
|
2603
|
-
)
|
|
2604
|
-
"""
|
|
2605
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2606
|
-
"PUT",
|
|
2607
|
-
urllib.parse.urljoin(
|
|
2608
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/data-sources"
|
|
2609
|
-
),
|
|
2610
|
-
json=jsonable_encoder(request),
|
|
2611
|
-
headers=self._client_wrapper.get_headers(),
|
|
2612
|
-
timeout=60,
|
|
2613
|
-
)
|
|
2614
|
-
if 200 <= _response.status_code < 300:
|
|
2615
|
-
return pydantic.parse_obj_as(typing.List[PipelineDataSource], _response.json()) # type: ignore
|
|
2616
|
-
if _response.status_code == 422:
|
|
2617
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2618
|
-
try:
|
|
2619
|
-
_response_json = _response.json()
|
|
2620
|
-
except JSONDecodeError:
|
|
2621
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2622
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2623
|
-
|
|
2624
|
-
async def update_pipeline_data_source(
|
|
2625
|
-
self, data_source_id: str, pipeline_id: str, *, sync_interval: typing.Optional[float] = OMIT
|
|
2626
|
-
) -> PipelineDataSource:
|
|
2627
|
-
"""
|
|
2628
|
-
Update the configuration of a data source in a pipeline.
|
|
2629
|
-
|
|
2630
|
-
Parameters:
|
|
2631
|
-
- data_source_id: str.
|
|
2632
|
-
|
|
2633
|
-
- pipeline_id: str.
|
|
2634
|
-
|
|
2635
|
-
- sync_interval: typing.Optional[float].
|
|
2636
|
-
---
|
|
2637
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2638
|
-
|
|
2639
|
-
client = AsyncLlamaCloud(
|
|
2640
|
-
token="YOUR_TOKEN",
|
|
2641
|
-
)
|
|
2642
|
-
await client.pipelines.update_pipeline_data_source(
|
|
2643
|
-
data_source_id="string",
|
|
2644
|
-
pipeline_id="string",
|
|
2645
|
-
)
|
|
2646
|
-
"""
|
|
2647
|
-
_request: typing.Dict[str, typing.Any] = {}
|
|
2648
|
-
if sync_interval is not OMIT:
|
|
2649
|
-
_request["sync_interval"] = sync_interval
|
|
2650
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2651
|
-
"PUT",
|
|
2652
|
-
urllib.parse.urljoin(
|
|
2653
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
2654
|
-
f"api/v1/pipelines/{pipeline_id}/data-sources/{data_source_id}",
|
|
2655
|
-
),
|
|
2656
|
-
json=jsonable_encoder(_request),
|
|
2657
|
-
headers=self._client_wrapper.get_headers(),
|
|
2658
|
-
timeout=60,
|
|
2659
|
-
)
|
|
2660
|
-
if 200 <= _response.status_code < 300:
|
|
2661
|
-
return pydantic.parse_obj_as(PipelineDataSource, _response.json()) # type: ignore
|
|
2662
|
-
if _response.status_code == 422:
|
|
2663
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2664
|
-
try:
|
|
2665
|
-
_response_json = _response.json()
|
|
2666
|
-
except JSONDecodeError:
|
|
2667
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2668
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2669
|
-
|
|
2670
|
-
async def delete_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> None:
|
|
2671
|
-
"""
|
|
2672
|
-
Delete a data source from a pipeline.
|
|
2673
|
-
|
|
2674
|
-
Parameters:
|
|
2675
|
-
- data_source_id: str.
|
|
2676
|
-
|
|
2677
|
-
- pipeline_id: str.
|
|
2678
|
-
---
|
|
2679
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2680
|
-
|
|
2681
|
-
client = AsyncLlamaCloud(
|
|
2682
|
-
token="YOUR_TOKEN",
|
|
2683
|
-
)
|
|
2684
|
-
await client.pipelines.delete_pipeline_data_source(
|
|
2685
|
-
data_source_id="string",
|
|
2686
|
-
pipeline_id="string",
|
|
2687
|
-
)
|
|
2688
|
-
"""
|
|
2689
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2690
|
-
"DELETE",
|
|
2691
|
-
urllib.parse.urljoin(
|
|
2692
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
2693
|
-
f"api/v1/pipelines/{pipeline_id}/data-sources/{data_source_id}",
|
|
2694
|
-
),
|
|
2695
|
-
headers=self._client_wrapper.get_headers(),
|
|
2696
|
-
timeout=60,
|
|
2697
|
-
)
|
|
2698
|
-
if 200 <= _response.status_code < 300:
|
|
2699
|
-
return
|
|
2700
|
-
if _response.status_code == 422:
|
|
2701
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2702
|
-
try:
|
|
2703
|
-
_response_json = _response.json()
|
|
2704
|
-
except JSONDecodeError:
|
|
2705
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2706
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2707
|
-
|
|
2708
|
-
async def sync_pipeline_data_source(self, data_source_id: str, pipeline_id: str) -> Pipeline:
|
|
2709
|
-
"""
|
|
2710
|
-
Run ingestion for the pipeline data source by incrementally updating the data-sink with upstream changes from data-source.
|
|
2711
|
-
|
|
2712
|
-
Parameters:
|
|
2713
|
-
- data_source_id: str.
|
|
2714
|
-
|
|
2715
|
-
- pipeline_id: str.
|
|
2716
|
-
"""
|
|
2717
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2718
|
-
"POST",
|
|
2719
|
-
urllib.parse.urljoin(
|
|
2720
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
2721
|
-
f"api/v1/pipelines/{pipeline_id}/data-sources/{data_source_id}/sync",
|
|
2722
|
-
),
|
|
2723
|
-
headers=self._client_wrapper.get_headers(),
|
|
2724
|
-
timeout=60,
|
|
2725
|
-
)
|
|
2726
|
-
if 200 <= _response.status_code < 300:
|
|
2727
|
-
return pydantic.parse_obj_as(Pipeline, _response.json()) # type: ignore
|
|
2728
|
-
if _response.status_code == 422:
|
|
2729
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2730
|
-
try:
|
|
2731
|
-
_response_json = _response.json()
|
|
2732
|
-
except JSONDecodeError:
|
|
2733
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2734
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2735
|
-
|
|
2736
|
-
async def get_pipeline_data_source_status(
|
|
2737
|
-
self, data_source_id: str, pipeline_id: str
|
|
2738
|
-
) -> ManagedIngestionStatusResponse:
|
|
2739
|
-
"""
|
|
2740
|
-
Get the status of a data source for a pipeline.
|
|
2741
|
-
|
|
2742
|
-
Parameters:
|
|
2743
|
-
- data_source_id: str.
|
|
2744
|
-
|
|
2745
|
-
- pipeline_id: str.
|
|
2746
|
-
---
|
|
2747
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2748
|
-
|
|
2749
|
-
client = AsyncLlamaCloud(
|
|
2750
|
-
token="YOUR_TOKEN",
|
|
2751
|
-
)
|
|
2752
|
-
await client.pipelines.get_pipeline_data_source_status(
|
|
2753
|
-
data_source_id="string",
|
|
2754
|
-
pipeline_id="string",
|
|
2755
|
-
)
|
|
2756
|
-
"""
|
|
2757
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2758
|
-
"GET",
|
|
2759
|
-
urllib.parse.urljoin(
|
|
2760
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
2761
|
-
f"api/v1/pipelines/{pipeline_id}/data-sources/{data_source_id}/status",
|
|
2762
|
-
),
|
|
2763
|
-
headers=self._client_wrapper.get_headers(),
|
|
2764
|
-
timeout=60,
|
|
2765
|
-
)
|
|
2766
|
-
if 200 <= _response.status_code < 300:
|
|
2767
|
-
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
2768
|
-
if _response.status_code == 422:
|
|
2769
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2770
|
-
try:
|
|
2771
|
-
_response_json = _response.json()
|
|
2772
|
-
except JSONDecodeError:
|
|
2773
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2774
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2775
|
-
|
|
2776
|
-
async def run_search(
|
|
2777
|
-
self,
|
|
2778
|
-
pipeline_id: str,
|
|
2779
|
-
*,
|
|
2780
|
-
project_id: typing.Optional[str] = None,
|
|
2781
|
-
organization_id: typing.Optional[str] = None,
|
|
2782
|
-
dense_similarity_top_k: typing.Optional[int] = OMIT,
|
|
2783
|
-
dense_similarity_cutoff: typing.Optional[float] = OMIT,
|
|
2784
|
-
sparse_similarity_top_k: typing.Optional[int] = OMIT,
|
|
2785
|
-
enable_reranking: typing.Optional[bool] = OMIT,
|
|
2786
|
-
rerank_top_n: typing.Optional[int] = OMIT,
|
|
2787
|
-
alpha: typing.Optional[float] = OMIT,
|
|
2788
|
-
search_filters: typing.Optional[MetadataFilters] = OMIT,
|
|
2789
|
-
search_filters_inference_schema: typing.Optional[
|
|
2790
|
-
typing.Dict[str, typing.Optional[RetrievalParamsSearchFiltersInferenceSchemaValue]]
|
|
2791
|
-
] = OMIT,
|
|
2792
|
-
files_top_k: typing.Optional[int] = OMIT,
|
|
2793
|
-
retrieval_mode: typing.Optional[RetrievalMode] = OMIT,
|
|
2794
|
-
retrieve_image_nodes: typing.Optional[bool] = OMIT,
|
|
2795
|
-
retrieve_page_screenshot_nodes: typing.Optional[bool] = OMIT,
|
|
2796
|
-
retrieve_page_figure_nodes: typing.Optional[bool] = OMIT,
|
|
2797
|
-
query: str,
|
|
2798
|
-
class_name: typing.Optional[str] = OMIT,
|
|
2799
|
-
) -> RetrieveResults:
|
|
2800
|
-
"""
|
|
2801
|
-
Get retrieval results for a managed pipeline and a query
|
|
2802
|
-
|
|
2803
|
-
Parameters:
|
|
2804
|
-
- pipeline_id: str.
|
|
2805
|
-
|
|
2806
|
-
- project_id: typing.Optional[str].
|
|
2807
|
-
|
|
2808
|
-
- organization_id: typing.Optional[str].
|
|
2809
|
-
|
|
2810
|
-
- dense_similarity_top_k: typing.Optional[int].
|
|
2811
|
-
|
|
2812
|
-
- dense_similarity_cutoff: typing.Optional[float].
|
|
2813
|
-
|
|
2814
|
-
- sparse_similarity_top_k: typing.Optional[int].
|
|
2815
|
-
|
|
2816
|
-
- enable_reranking: typing.Optional[bool].
|
|
2817
|
-
|
|
2818
|
-
- rerank_top_n: typing.Optional[int].
|
|
2819
|
-
|
|
2820
|
-
- alpha: typing.Optional[float].
|
|
2821
|
-
|
|
2822
|
-
- search_filters: typing.Optional[MetadataFilters].
|
|
2823
|
-
|
|
2824
|
-
- search_filters_inference_schema: typing.Optional[typing.Dict[str, typing.Optional[RetrievalParamsSearchFiltersInferenceSchemaValue]]].
|
|
2825
|
-
|
|
2826
|
-
- files_top_k: typing.Optional[int].
|
|
2827
|
-
|
|
2828
|
-
- retrieval_mode: typing.Optional[RetrievalMode]. The retrieval mode for the query.
|
|
2829
|
-
|
|
2830
|
-
- retrieve_image_nodes: typing.Optional[bool]. Whether to retrieve image nodes.
|
|
2831
|
-
|
|
2832
|
-
- retrieve_page_screenshot_nodes: typing.Optional[bool]. Whether to retrieve page screenshot nodes.
|
|
2833
|
-
|
|
2834
|
-
- retrieve_page_figure_nodes: typing.Optional[bool]. Whether to retrieve page figure nodes.
|
|
2835
|
-
|
|
2836
|
-
- query: str. The query to retrieve against.
|
|
2837
|
-
|
|
2838
|
-
- class_name: typing.Optional[str].
|
|
2839
|
-
---
|
|
2840
|
-
from llama_cloud import FilterCondition, MetadataFilters, RetrievalMode
|
|
2841
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2842
|
-
|
|
2843
|
-
client = AsyncLlamaCloud(
|
|
2844
|
-
token="YOUR_TOKEN",
|
|
2845
|
-
)
|
|
2846
|
-
await client.pipelines.run_search(
|
|
2847
|
-
pipeline_id="string",
|
|
2848
|
-
search_filters=MetadataFilters(
|
|
2849
|
-
filters=[],
|
|
2850
|
-
condition=FilterCondition.AND,
|
|
2851
|
-
),
|
|
2852
|
-
retrieval_mode=RetrievalMode.CHUNKS,
|
|
2853
|
-
query="string",
|
|
2854
|
-
)
|
|
2855
|
-
"""
|
|
2856
|
-
_request: typing.Dict[str, typing.Any] = {"query": query}
|
|
2857
|
-
if dense_similarity_top_k is not OMIT:
|
|
2858
|
-
_request["dense_similarity_top_k"] = dense_similarity_top_k
|
|
2859
|
-
if dense_similarity_cutoff is not OMIT:
|
|
2860
|
-
_request["dense_similarity_cutoff"] = dense_similarity_cutoff
|
|
2861
|
-
if sparse_similarity_top_k is not OMIT:
|
|
2862
|
-
_request["sparse_similarity_top_k"] = sparse_similarity_top_k
|
|
2863
|
-
if enable_reranking is not OMIT:
|
|
2864
|
-
_request["enable_reranking"] = enable_reranking
|
|
2865
|
-
if rerank_top_n is not OMIT:
|
|
2866
|
-
_request["rerank_top_n"] = rerank_top_n
|
|
2867
|
-
if alpha is not OMIT:
|
|
2868
|
-
_request["alpha"] = alpha
|
|
2869
|
-
if search_filters is not OMIT:
|
|
2870
|
-
_request["search_filters"] = search_filters
|
|
2871
|
-
if search_filters_inference_schema is not OMIT:
|
|
2872
|
-
_request["search_filters_inference_schema"] = search_filters_inference_schema
|
|
2873
|
-
if files_top_k is not OMIT:
|
|
2874
|
-
_request["files_top_k"] = files_top_k
|
|
2875
|
-
if retrieval_mode is not OMIT:
|
|
2876
|
-
_request["retrieval_mode"] = retrieval_mode
|
|
2877
|
-
if retrieve_image_nodes is not OMIT:
|
|
2878
|
-
_request["retrieve_image_nodes"] = retrieve_image_nodes
|
|
2879
|
-
if retrieve_page_screenshot_nodes is not OMIT:
|
|
2880
|
-
_request["retrieve_page_screenshot_nodes"] = retrieve_page_screenshot_nodes
|
|
2881
|
-
if retrieve_page_figure_nodes is not OMIT:
|
|
2882
|
-
_request["retrieve_page_figure_nodes"] = retrieve_page_figure_nodes
|
|
2883
|
-
if class_name is not OMIT:
|
|
2884
|
-
_request["class_name"] = class_name
|
|
2885
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2886
|
-
"POST",
|
|
2887
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/retrieve"),
|
|
2888
|
-
params=remove_none_from_dict({"project_id": project_id, "organization_id": organization_id}),
|
|
2889
|
-
json=jsonable_encoder(_request),
|
|
2890
|
-
headers=self._client_wrapper.get_headers(),
|
|
2891
|
-
timeout=60,
|
|
2892
|
-
)
|
|
2893
|
-
if 200 <= _response.status_code < 300:
|
|
2894
|
-
return pydantic.parse_obj_as(RetrieveResults, _response.json()) # type: ignore
|
|
2895
|
-
if _response.status_code == 422:
|
|
2896
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2897
|
-
try:
|
|
2898
|
-
_response_json = _response.json()
|
|
2899
|
-
except JSONDecodeError:
|
|
2900
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2901
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2902
|
-
|
|
2903
|
-
async def list_pipeline_jobs(self, pipeline_id: str) -> typing.List[PipelineDeployment]:
|
|
2904
|
-
"""
|
|
2905
|
-
Get jobs for a pipeline.
|
|
2906
|
-
|
|
2907
|
-
Parameters:
|
|
2908
|
-
- pipeline_id: str.
|
|
2909
|
-
---
|
|
2910
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2911
|
-
|
|
2912
|
-
client = AsyncLlamaCloud(
|
|
2913
|
-
token="YOUR_TOKEN",
|
|
2914
|
-
)
|
|
2915
|
-
await client.pipelines.list_pipeline_jobs(
|
|
2916
|
-
pipeline_id="string",
|
|
2917
|
-
)
|
|
2918
|
-
"""
|
|
2919
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2920
|
-
"GET",
|
|
2921
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/jobs"),
|
|
2922
|
-
headers=self._client_wrapper.get_headers(),
|
|
2923
|
-
timeout=60,
|
|
2924
|
-
)
|
|
2925
|
-
if 200 <= _response.status_code < 300:
|
|
2926
|
-
return pydantic.parse_obj_as(typing.List[PipelineDeployment], _response.json()) # type: ignore
|
|
2927
|
-
if _response.status_code == 422:
|
|
2928
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2929
|
-
try:
|
|
2930
|
-
_response_json = _response.json()
|
|
2931
|
-
except JSONDecodeError:
|
|
2932
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2933
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2934
|
-
|
|
2935
|
-
async def get_pipeline_job(self, job_id: str, pipeline_id: str) -> PipelineDeployment:
|
|
2936
|
-
"""
|
|
2937
|
-
Get a job for a pipeline.
|
|
2938
|
-
|
|
2939
|
-
Parameters:
|
|
2940
|
-
- job_id: str.
|
|
2941
|
-
|
|
2942
|
-
- pipeline_id: str.
|
|
2943
|
-
---
|
|
2944
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2945
|
-
|
|
2946
|
-
client = AsyncLlamaCloud(
|
|
2947
|
-
token="YOUR_TOKEN",
|
|
2948
|
-
)
|
|
2949
|
-
await client.pipelines.get_pipeline_job(
|
|
2950
|
-
job_id="string",
|
|
2951
|
-
pipeline_id="string",
|
|
2952
|
-
)
|
|
2953
|
-
"""
|
|
2954
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2955
|
-
"GET",
|
|
2956
|
-
urllib.parse.urljoin(
|
|
2957
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/jobs/{job_id}"
|
|
2958
|
-
),
|
|
2959
|
-
headers=self._client_wrapper.get_headers(),
|
|
2960
|
-
timeout=60,
|
|
2961
|
-
)
|
|
2962
|
-
if 200 <= _response.status_code < 300:
|
|
2963
|
-
return pydantic.parse_obj_as(PipelineDeployment, _response.json()) # type: ignore
|
|
2964
|
-
if _response.status_code == 422:
|
|
2965
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
2966
|
-
try:
|
|
2967
|
-
_response_json = _response.json()
|
|
2968
|
-
except JSONDecodeError:
|
|
2969
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2970
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2971
|
-
|
|
2972
|
-
async def get_playground_session(self, pipeline_id: str) -> PlaygroundSession:
|
|
2973
|
-
"""
|
|
2974
|
-
Get a playground session for a user and pipeline.
|
|
2975
|
-
|
|
2976
|
-
Parameters:
|
|
2977
|
-
- pipeline_id: str.
|
|
2978
|
-
---
|
|
2979
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
2980
|
-
|
|
2981
|
-
client = AsyncLlamaCloud(
|
|
2982
|
-
token="YOUR_TOKEN",
|
|
2983
|
-
)
|
|
2984
|
-
await client.pipelines.get_playground_session(
|
|
2985
|
-
pipeline_id="string",
|
|
2986
|
-
)
|
|
2987
|
-
"""
|
|
2988
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2989
|
-
"GET",
|
|
2990
|
-
urllib.parse.urljoin(
|
|
2991
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/playground-session"
|
|
2992
|
-
),
|
|
2993
|
-
headers=self._client_wrapper.get_headers(),
|
|
2994
|
-
timeout=60,
|
|
2995
|
-
)
|
|
2996
|
-
if 200 <= _response.status_code < 300:
|
|
2997
|
-
return pydantic.parse_obj_as(PlaygroundSession, _response.json()) # type: ignore
|
|
2998
|
-
if _response.status_code == 422:
|
|
2999
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3000
|
-
try:
|
|
3001
|
-
_response_json = _response.json()
|
|
3002
|
-
except JSONDecodeError:
|
|
3003
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3004
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3005
|
-
|
|
3006
|
-
async def chat(
|
|
3007
|
-
self,
|
|
3008
|
-
pipeline_id: str,
|
|
3009
|
-
*,
|
|
3010
|
-
messages: typing.Optional[typing.List[InputMessage]] = OMIT,
|
|
3011
|
-
data: typing.Optional[ChatData] = OMIT,
|
|
3012
|
-
class_name: typing.Optional[str] = OMIT,
|
|
3013
|
-
) -> typing.Any:
|
|
3014
|
-
"""
|
|
3015
|
-
Make a retrieval query + chat completion for a managed pipeline.
|
|
3016
|
-
|
|
3017
|
-
Parameters:
|
|
3018
|
-
- pipeline_id: str.
|
|
3019
|
-
|
|
3020
|
-
- messages: typing.Optional[typing.List[InputMessage]].
|
|
3021
|
-
|
|
3022
|
-
- data: typing.Optional[ChatData].
|
|
3023
|
-
|
|
3024
|
-
- class_name: typing.Optional[str].
|
|
3025
|
-
---
|
|
3026
|
-
from llama_cloud import (
|
|
3027
|
-
ChatData,
|
|
3028
|
-
FilterCondition,
|
|
3029
|
-
LlmParameters,
|
|
3030
|
-
MetadataFilters,
|
|
3031
|
-
PresetRetrievalParams,
|
|
3032
|
-
RetrievalMode,
|
|
3033
|
-
SupportedLlmModelNames,
|
|
3034
|
-
)
|
|
3035
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
3036
|
-
|
|
3037
|
-
client = AsyncLlamaCloud(
|
|
3038
|
-
token="YOUR_TOKEN",
|
|
3039
|
-
)
|
|
3040
|
-
await client.pipelines.chat(
|
|
3041
|
-
pipeline_id="string",
|
|
3042
|
-
data=ChatData(
|
|
3043
|
-
retrieval_parameters=PresetRetrievalParams(
|
|
3044
|
-
search_filters=MetadataFilters(
|
|
3045
|
-
filters=[],
|
|
3046
|
-
condition=FilterCondition.AND,
|
|
3047
|
-
),
|
|
3048
|
-
retrieval_mode=RetrievalMode.CHUNKS,
|
|
3049
|
-
),
|
|
3050
|
-
llm_parameters=LlmParameters(
|
|
3051
|
-
model_name=SupportedLlmModelNames.GPT_4_O,
|
|
3052
|
-
),
|
|
3053
|
-
),
|
|
3054
|
-
)
|
|
3055
|
-
"""
|
|
3056
|
-
_request: typing.Dict[str, typing.Any] = {}
|
|
3057
|
-
if messages is not OMIT:
|
|
3058
|
-
_request["messages"] = messages
|
|
3059
|
-
if data is not OMIT:
|
|
3060
|
-
_request["data"] = data
|
|
3061
|
-
if class_name is not OMIT:
|
|
3062
|
-
_request["class_name"] = class_name
|
|
3063
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
3064
|
-
"POST",
|
|
3065
|
-
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/chat"),
|
|
3066
|
-
json=jsonable_encoder(_request),
|
|
3067
|
-
headers=self._client_wrapper.get_headers(),
|
|
3068
|
-
timeout=60,
|
|
3069
|
-
)
|
|
3070
|
-
if 200 <= _response.status_code < 300:
|
|
3071
|
-
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
3072
|
-
if _response.status_code == 422:
|
|
3073
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3074
|
-
try:
|
|
3075
|
-
_response_json = _response.json()
|
|
3076
|
-
except JSONDecodeError:
|
|
3077
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3078
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3079
|
-
|
|
3080
|
-
async def list_pipeline_documents(
|
|
3081
|
-
self,
|
|
3082
|
-
pipeline_id: str,
|
|
3083
|
-
*,
|
|
3084
|
-
skip: typing.Optional[int] = None,
|
|
3085
|
-
limit: typing.Optional[int] = None,
|
|
3086
|
-
file_id: typing.Optional[str] = None,
|
|
3087
|
-
only_direct_upload: typing.Optional[bool] = None,
|
|
3088
|
-
only_api_data_source_documents: typing.Optional[bool] = None,
|
|
3089
|
-
) -> typing.List[CloudDocument]:
|
|
3090
|
-
"""
|
|
3091
|
-
Return a list of documents for a pipeline.
|
|
3092
|
-
|
|
3093
|
-
Parameters:
|
|
3094
|
-
- pipeline_id: str.
|
|
3095
|
-
|
|
3096
|
-
- skip: typing.Optional[int].
|
|
3097
|
-
|
|
3098
|
-
- limit: typing.Optional[int].
|
|
3099
|
-
|
|
3100
|
-
- file_id: typing.Optional[str].
|
|
3101
|
-
|
|
3102
|
-
- only_direct_upload: typing.Optional[bool].
|
|
3103
|
-
|
|
3104
|
-
- only_api_data_source_documents: typing.Optional[bool].
|
|
3105
|
-
---
|
|
3106
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
3107
|
-
|
|
3108
|
-
client = AsyncLlamaCloud(
|
|
3109
|
-
token="YOUR_TOKEN",
|
|
3110
|
-
)
|
|
3111
|
-
await client.pipelines.list_pipeline_documents(
|
|
3112
|
-
pipeline_id="string",
|
|
3113
|
-
)
|
|
3114
|
-
"""
|
|
3115
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
3116
|
-
"GET",
|
|
3117
|
-
urllib.parse.urljoin(
|
|
3118
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents"
|
|
3119
|
-
),
|
|
3120
|
-
params=remove_none_from_dict(
|
|
3121
|
-
{
|
|
3122
|
-
"skip": skip,
|
|
3123
|
-
"limit": limit,
|
|
3124
|
-
"file_id": file_id,
|
|
3125
|
-
"only_direct_upload": only_direct_upload,
|
|
3126
|
-
"only_api_data_source_documents": only_api_data_source_documents,
|
|
3127
|
-
}
|
|
3128
|
-
),
|
|
3129
|
-
headers=self._client_wrapper.get_headers(),
|
|
3130
|
-
timeout=60,
|
|
3131
|
-
)
|
|
3132
|
-
if 200 <= _response.status_code < 300:
|
|
3133
|
-
return pydantic.parse_obj_as(typing.List[CloudDocument], _response.json()) # type: ignore
|
|
3134
|
-
if _response.status_code == 422:
|
|
3135
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3136
|
-
try:
|
|
3137
|
-
_response_json = _response.json()
|
|
3138
|
-
except JSONDecodeError:
|
|
3139
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3140
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3141
|
-
|
|
3142
|
-
async def create_batch_pipeline_documents(
|
|
3143
|
-
self, pipeline_id: str, *, request: typing.List[CloudDocumentCreate]
|
|
3144
|
-
) -> typing.List[CloudDocument]:
|
|
3145
|
-
"""
|
|
3146
|
-
Batch create documents for a pipeline.
|
|
3147
|
-
|
|
3148
|
-
Parameters:
|
|
3149
|
-
- pipeline_id: str.
|
|
3150
|
-
|
|
3151
|
-
- request: typing.List[CloudDocumentCreate].
|
|
3152
|
-
---
|
|
3153
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
3154
|
-
|
|
3155
|
-
client = AsyncLlamaCloud(
|
|
3156
|
-
token="YOUR_TOKEN",
|
|
3157
|
-
)
|
|
3158
|
-
await client.pipelines.create_batch_pipeline_documents(
|
|
3159
|
-
pipeline_id="string",
|
|
3160
|
-
request=[],
|
|
3161
|
-
)
|
|
3162
|
-
"""
|
|
3163
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
3164
|
-
"POST",
|
|
3165
|
-
urllib.parse.urljoin(
|
|
3166
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents"
|
|
3167
|
-
),
|
|
3168
|
-
json=jsonable_encoder(request),
|
|
3169
|
-
headers=self._client_wrapper.get_headers(),
|
|
3170
|
-
timeout=60,
|
|
3171
|
-
)
|
|
3172
|
-
if 200 <= _response.status_code < 300:
|
|
3173
|
-
return pydantic.parse_obj_as(typing.List[CloudDocument], _response.json()) # type: ignore
|
|
3174
|
-
if _response.status_code == 422:
|
|
3175
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3176
|
-
try:
|
|
3177
|
-
_response_json = _response.json()
|
|
3178
|
-
except JSONDecodeError:
|
|
3179
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3180
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3181
|
-
|
|
3182
|
-
async def upsert_batch_pipeline_documents(
|
|
3183
|
-
self, pipeline_id: str, *, request: typing.List[CloudDocumentCreate]
|
|
3184
|
-
) -> typing.List[CloudDocument]:
|
|
3185
|
-
"""
|
|
3186
|
-
Batch create or update a document for a pipeline.
|
|
3187
|
-
|
|
3188
|
-
Parameters:
|
|
3189
|
-
- pipeline_id: str.
|
|
3190
|
-
|
|
3191
|
-
- request: typing.List[CloudDocumentCreate].
|
|
3192
|
-
---
|
|
3193
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
3194
|
-
|
|
3195
|
-
client = AsyncLlamaCloud(
|
|
3196
|
-
token="YOUR_TOKEN",
|
|
3197
|
-
)
|
|
3198
|
-
await client.pipelines.upsert_batch_pipeline_documents(
|
|
3199
|
-
pipeline_id="string",
|
|
3200
|
-
request=[],
|
|
3201
|
-
)
|
|
3202
|
-
"""
|
|
3203
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
3204
|
-
"PUT",
|
|
3205
|
-
urllib.parse.urljoin(
|
|
3206
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents"
|
|
3207
|
-
),
|
|
3208
|
-
json=jsonable_encoder(request),
|
|
3209
|
-
headers=self._client_wrapper.get_headers(),
|
|
3210
|
-
timeout=60,
|
|
3211
|
-
)
|
|
3212
|
-
if 200 <= _response.status_code < 300:
|
|
3213
|
-
return pydantic.parse_obj_as(typing.List[CloudDocument], _response.json()) # type: ignore
|
|
3214
|
-
if _response.status_code == 422:
|
|
3215
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3216
|
-
try:
|
|
3217
|
-
_response_json = _response.json()
|
|
3218
|
-
except JSONDecodeError:
|
|
3219
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3220
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3221
|
-
|
|
3222
|
-
async def paginated_list_pipeline_documents(
|
|
3223
|
-
self,
|
|
3224
|
-
pipeline_id: str,
|
|
3225
|
-
*,
|
|
3226
|
-
skip: typing.Optional[int] = None,
|
|
3227
|
-
limit: typing.Optional[int] = None,
|
|
3228
|
-
file_id: typing.Optional[str] = None,
|
|
3229
|
-
only_direct_upload: typing.Optional[bool] = None,
|
|
3230
|
-
only_api_data_source_documents: typing.Optional[bool] = None,
|
|
3231
|
-
) -> PaginatedListCloudDocumentsResponse:
|
|
3232
|
-
"""
|
|
3233
|
-
Return a list of documents for a pipeline.
|
|
3234
|
-
|
|
3235
|
-
Parameters:
|
|
3236
|
-
- pipeline_id: str.
|
|
3237
|
-
|
|
3238
|
-
- skip: typing.Optional[int].
|
|
3239
|
-
|
|
3240
|
-
- limit: typing.Optional[int].
|
|
3241
|
-
|
|
3242
|
-
- file_id: typing.Optional[str].
|
|
3243
|
-
|
|
3244
|
-
- only_direct_upload: typing.Optional[bool].
|
|
3245
|
-
|
|
3246
|
-
- only_api_data_source_documents: typing.Optional[bool].
|
|
3247
|
-
---
|
|
3248
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
3249
|
-
|
|
3250
|
-
client = AsyncLlamaCloud(
|
|
3251
|
-
token="YOUR_TOKEN",
|
|
3252
|
-
)
|
|
3253
|
-
await client.pipelines.paginated_list_pipeline_documents(
|
|
3254
|
-
pipeline_id="string",
|
|
3255
|
-
)
|
|
3256
|
-
"""
|
|
3257
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
3258
|
-
"GET",
|
|
3259
|
-
urllib.parse.urljoin(
|
|
3260
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents/paginated"
|
|
3261
|
-
),
|
|
3262
|
-
params=remove_none_from_dict(
|
|
3263
|
-
{
|
|
3264
|
-
"skip": skip,
|
|
3265
|
-
"limit": limit,
|
|
3266
|
-
"file_id": file_id,
|
|
3267
|
-
"only_direct_upload": only_direct_upload,
|
|
3268
|
-
"only_api_data_source_documents": only_api_data_source_documents,
|
|
3269
|
-
}
|
|
3270
|
-
),
|
|
3271
|
-
headers=self._client_wrapper.get_headers(),
|
|
3272
|
-
timeout=60,
|
|
3273
|
-
)
|
|
3274
|
-
if 200 <= _response.status_code < 300:
|
|
3275
|
-
return pydantic.parse_obj_as(PaginatedListCloudDocumentsResponse, _response.json()) # type: ignore
|
|
3276
|
-
if _response.status_code == 422:
|
|
3277
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3278
|
-
try:
|
|
3279
|
-
_response_json = _response.json()
|
|
3280
|
-
except JSONDecodeError:
|
|
3281
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3282
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3283
|
-
|
|
3284
|
-
async def get_pipeline_document(self, document_id: str, pipeline_id: str) -> CloudDocument:
|
|
3285
|
-
"""
|
|
3286
|
-
Return a single document for a pipeline.
|
|
3287
|
-
|
|
3288
|
-
Parameters:
|
|
3289
|
-
- document_id: str.
|
|
3290
|
-
|
|
3291
|
-
- pipeline_id: str.
|
|
3292
|
-
---
|
|
3293
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
3294
|
-
|
|
3295
|
-
client = AsyncLlamaCloud(
|
|
3296
|
-
token="YOUR_TOKEN",
|
|
3297
|
-
)
|
|
3298
|
-
await client.pipelines.get_pipeline_document(
|
|
3299
|
-
document_id="string",
|
|
3300
|
-
pipeline_id="string",
|
|
3301
|
-
)
|
|
3302
|
-
"""
|
|
3303
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
3304
|
-
"GET",
|
|
3305
|
-
urllib.parse.urljoin(
|
|
3306
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents/{document_id}"
|
|
3307
|
-
),
|
|
3308
|
-
headers=self._client_wrapper.get_headers(),
|
|
3309
|
-
timeout=60,
|
|
3310
|
-
)
|
|
3311
|
-
if 200 <= _response.status_code < 300:
|
|
3312
|
-
return pydantic.parse_obj_as(CloudDocument, _response.json()) # type: ignore
|
|
3313
|
-
if _response.status_code == 422:
|
|
3314
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3315
|
-
try:
|
|
3316
|
-
_response_json = _response.json()
|
|
3317
|
-
except JSONDecodeError:
|
|
3318
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3319
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3320
|
-
|
|
3321
|
-
async def delete_pipeline_document(self, document_id: str, pipeline_id: str) -> None:
|
|
3322
|
-
"""
|
|
3323
|
-
Delete a document from a pipeline.
|
|
3324
|
-
Initiates an async job that will:
|
|
3325
|
-
|
|
3326
|
-
1. Delete vectors from the vector store
|
|
3327
|
-
2. Delete the document from MongoDB after vectors are successfully deleted
|
|
3328
|
-
|
|
3329
|
-
Parameters:
|
|
3330
|
-
- document_id: str.
|
|
3331
|
-
|
|
3332
|
-
- pipeline_id: str.
|
|
3333
|
-
---
|
|
3334
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
3335
|
-
|
|
3336
|
-
client = AsyncLlamaCloud(
|
|
3337
|
-
token="YOUR_TOKEN",
|
|
3338
|
-
)
|
|
3339
|
-
await client.pipelines.delete_pipeline_document(
|
|
3340
|
-
document_id="string",
|
|
3341
|
-
pipeline_id="string",
|
|
3342
|
-
)
|
|
3343
|
-
"""
|
|
3344
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
3345
|
-
"DELETE",
|
|
3346
|
-
urllib.parse.urljoin(
|
|
3347
|
-
f"{self._client_wrapper.get_base_url()}/", f"api/v1/pipelines/{pipeline_id}/documents/{document_id}"
|
|
3348
|
-
),
|
|
3349
|
-
headers=self._client_wrapper.get_headers(),
|
|
3350
|
-
timeout=60,
|
|
3351
|
-
)
|
|
3352
|
-
if 200 <= _response.status_code < 300:
|
|
3353
|
-
return
|
|
3354
|
-
if _response.status_code == 422:
|
|
3355
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3356
|
-
try:
|
|
3357
|
-
_response_json = _response.json()
|
|
3358
|
-
except JSONDecodeError:
|
|
3359
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3360
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3361
|
-
|
|
3362
|
-
async def get_pipeline_document_status(self, document_id: str, pipeline_id: str) -> ManagedIngestionStatusResponse:
|
|
3363
|
-
"""
|
|
3364
|
-
Return a single document for a pipeline.
|
|
3365
|
-
|
|
3366
|
-
Parameters:
|
|
3367
|
-
- document_id: str.
|
|
3368
|
-
|
|
3369
|
-
- pipeline_id: str.
|
|
3370
|
-
---
|
|
3371
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
3372
|
-
|
|
3373
|
-
client = AsyncLlamaCloud(
|
|
3374
|
-
token="YOUR_TOKEN",
|
|
3375
|
-
)
|
|
3376
|
-
await client.pipelines.get_pipeline_document_status(
|
|
3377
|
-
document_id="string",
|
|
3378
|
-
pipeline_id="string",
|
|
3379
|
-
)
|
|
3380
|
-
"""
|
|
3381
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
3382
|
-
"GET",
|
|
3383
|
-
urllib.parse.urljoin(
|
|
3384
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
3385
|
-
f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/status",
|
|
3386
|
-
),
|
|
3387
|
-
headers=self._client_wrapper.get_headers(),
|
|
3388
|
-
timeout=60,
|
|
3389
|
-
)
|
|
3390
|
-
if 200 <= _response.status_code < 300:
|
|
3391
|
-
return pydantic.parse_obj_as(ManagedIngestionStatusResponse, _response.json()) # type: ignore
|
|
3392
|
-
if _response.status_code == 422:
|
|
3393
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3394
|
-
try:
|
|
3395
|
-
_response_json = _response.json()
|
|
3396
|
-
except JSONDecodeError:
|
|
3397
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3398
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
3399
|
-
|
|
3400
|
-
async def list_pipeline_document_chunks(self, document_id: str, pipeline_id: str) -> typing.List[TextNode]:
|
|
3401
|
-
"""
|
|
3402
|
-
Return a list of chunks for a pipeline document.
|
|
3403
|
-
|
|
3404
|
-
Parameters:
|
|
3405
|
-
- document_id: str.
|
|
3406
|
-
|
|
3407
|
-
- pipeline_id: str.
|
|
3408
|
-
---
|
|
3409
|
-
from llama_cloud.client import AsyncLlamaCloud
|
|
3410
|
-
|
|
3411
|
-
client = AsyncLlamaCloud(
|
|
3412
|
-
token="YOUR_TOKEN",
|
|
3413
|
-
)
|
|
3414
|
-
await client.pipelines.list_pipeline_document_chunks(
|
|
3415
|
-
document_id="string",
|
|
3416
|
-
pipeline_id="string",
|
|
3417
|
-
)
|
|
3418
|
-
"""
|
|
3419
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
3420
|
-
"GET",
|
|
3421
|
-
urllib.parse.urljoin(
|
|
3422
|
-
f"{self._client_wrapper.get_base_url()}/",
|
|
3423
|
-
f"api/v1/pipelines/{pipeline_id}/documents/{document_id}/chunks",
|
|
3424
|
-
),
|
|
3425
|
-
headers=self._client_wrapper.get_headers(),
|
|
3426
|
-
timeout=60,
|
|
3427
|
-
)
|
|
3428
|
-
if 200 <= _response.status_code < 300:
|
|
3429
|
-
return pydantic.parse_obj_as(typing.List[TextNode], _response.json()) # type: ignore
|
|
3430
|
-
if _response.status_code == 422:
|
|
3431
|
-
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
3432
|
-
try:
|
|
3433
|
-
_response_json = _response.json()
|
|
3434
|
-
except JSONDecodeError:
|
|
3435
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
3436
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|