llama-cloud 0.0.9__tar.gz → 0.0.10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of llama-cloud might be problematic. Click here for more details.
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/PKG-INFO +1 -1
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/__init__.py +8 -2
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/data_sources/types/data_source_update_component_one.py +2 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/extraction/client.py +55 -22
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/organizations/client.py +81 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/parsing/client.py +104 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/pipelines/client.py +219 -41
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/__init__.py +8 -2
- llama_cloud-0.0.9/llama_cloud/types/chat_params.py → llama_cloud-0.0.10/llama_cloud/types/chat_data.py +3 -3
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_azure_ai_search_vector_store.py +1 -1
- llama_cloud-0.0.10/llama_cloud/types/cloud_confluence_data_source.py +45 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/configurable_data_source_names.py +4 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_source_component_one.py +2 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_source_create_component_one.py +2 -0
- llama_cloud-0.0.10/llama_cloud/types/extraction_job.py +35 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/extraction_schema.py +1 -1
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/llama_parse_parameters.py +5 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pipeline.py +0 -3
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pipeline_create.py +0 -3
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pipeline_data_source_component_one.py +2 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/user_organization.py +10 -1
- llama_cloud-0.0.10/llama_cloud/types/user_organization_delete.py +36 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/pyproject.toml +1 -1
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/LICENSE +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/README.md +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/client.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/core/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/core/api_error.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/core/client_wrapper.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/core/datetime_utils.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/core/jsonable_encoder.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/core/remove_none_from_dict.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/environment.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/errors/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/errors/unprocessable_entity_error.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/component_definitions/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/component_definitions/client.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/data_sinks/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/data_sinks/client.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/data_sinks/types/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/data_sinks/types/data_sink_update_component.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/data_sinks/types/data_sink_update_component_one.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/data_sources/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/data_sources/client.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/data_sources/types/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/data_sources/types/data_source_update_component.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/data_sources/types/data_source_update_custom_metadata_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/evals/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/evals/client.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/extraction/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/extraction/types/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/extraction/types/extraction_schema_update_data_schema_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/files/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/files/client.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/files/types/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/files/types/file_create_resource_info_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/organizations/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/parsing/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/pipelines/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/pipelines/types/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/pipelines/types/pipeline_file_update_custom_metadata_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/projects/__init__.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/resources/projects/client.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/azure_open_ai_embedding.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/base.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/base_prompt_template.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/bedrock_embedding.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/chat_message.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_az_storage_blob_data_source.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_chroma_vector_store.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_document.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_document_create.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_jira_data_source.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_notion_page_data_source.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_one_drive_data_source.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_pinecone_vector_store.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_postgres_vector_store.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_qdrant_vector_store.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_s_3_data_source.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_sharepoint_data_source.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_slack_data_source.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cloud_weaviate_vector_store.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/code_splitter.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/cohere_embedding.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/configurable_data_sink_names.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/configurable_transformation_definition.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/configurable_transformation_names.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/configured_transformation_item.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/configured_transformation_item_component.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/configured_transformation_item_component_one.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_sink.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_sink_component.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_sink_component_one.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_sink_create.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_sink_create_component.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_sink_create_component_one.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_sink_definition.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_source.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_source_component.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_source_create.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_source_create_component.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_source_create_custom_metadata_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_source_custom_metadata_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/data_source_definition.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/eval_dataset.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/eval_dataset_job_params.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/eval_dataset_job_record.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/eval_execution_params.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/eval_execution_params_override.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/eval_llm_model_data.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/eval_question.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/eval_question_create.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/eval_question_result.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/extraction_result.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/extraction_result_data_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/extraction_schema_data_schema_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/file.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/file_resource_info_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/filter_condition.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/filter_operator.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/gemini_embedding.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/html_node_parser.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/http_validation_error.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/hugging_face_inference_api_embedding.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/hugging_face_inference_api_embedding_token.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/json_node_parser.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/llama_parse_supported_file_extensions.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/llm.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/local_eval.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/local_eval_results.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/local_eval_sets.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/managed_ingestion_status.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/managed_ingestion_status_response.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/markdown_element_node_parser.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/markdown_node_parser.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/message_role.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/metadata_filter.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/metadata_filter_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/metadata_filters.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/metadata_filters_filters_item.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/metric_result.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/node_parser.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/object_type.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/open_ai_embedding.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/organization.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/organization_create.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/parser_languages.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/parsing_history_item.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/parsing_job.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/parsing_job_json_result.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/parsing_job_markdown_result.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/parsing_job_text_result.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/parsing_usage.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pipeline_data_source.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pipeline_data_source_component.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pipeline_data_source_create.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pipeline_data_source_custom_metadata_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pipeline_deployment.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pipeline_file.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pipeline_file_create.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pipeline_file_create_custom_metadata_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pipeline_file_custom_metadata_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pipeline_file_resource_info_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pipeline_type.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pooling.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/preset_retrieval_params.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/presigned_url.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/project.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/project_create.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/prompt_mixin_prompts.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/prompt_spec.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/pydantic_program_mode.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/related_node_info.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/retrieval_mode.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/retrieve_results.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/sentence_splitter.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/simple_file_node_parser.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/status_enum.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/supported_eval_llm_model.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/supported_eval_llm_model_names.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/text_node.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/text_node_relationships_value.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/text_node_with_score.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/token_text_splitter.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/transformation_category_names.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/user_organization_create.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/validation_error.py +0 -0
- {llama_cloud-0.0.9 → llama_cloud-0.0.10}/llama_cloud/types/validation_error_loc_item.py +0 -0
|
@@ -5,11 +5,12 @@ from .types import (
|
|
|
5
5
|
Base,
|
|
6
6
|
BasePromptTemplate,
|
|
7
7
|
BedrockEmbedding,
|
|
8
|
+
ChatData,
|
|
8
9
|
ChatMessage,
|
|
9
|
-
ChatParams,
|
|
10
10
|
CloudAzStorageBlobDataSource,
|
|
11
11
|
CloudAzureAiSearchVectorStore,
|
|
12
12
|
CloudChromaVectorStore,
|
|
13
|
+
CloudConfluenceDataSource,
|
|
13
14
|
CloudDocument,
|
|
14
15
|
CloudDocumentCreate,
|
|
15
16
|
CloudJiraDataSource,
|
|
@@ -56,6 +57,7 @@ from .types import (
|
|
|
56
57
|
EvalQuestion,
|
|
57
58
|
EvalQuestionCreate,
|
|
58
59
|
EvalQuestionResult,
|
|
60
|
+
ExtractionJob,
|
|
59
61
|
ExtractionResult,
|
|
60
62
|
ExtractionResultDataValue,
|
|
61
63
|
ExtractionSchema,
|
|
@@ -135,6 +137,7 @@ from .types import (
|
|
|
135
137
|
TransformationCategoryNames,
|
|
136
138
|
UserOrganization,
|
|
137
139
|
UserOrganizationCreate,
|
|
140
|
+
UserOrganizationDelete,
|
|
138
141
|
ValidationError,
|
|
139
142
|
ValidationErrorLocItem,
|
|
140
143
|
)
|
|
@@ -166,11 +169,12 @@ __all__ = [
|
|
|
166
169
|
"Base",
|
|
167
170
|
"BasePromptTemplate",
|
|
168
171
|
"BedrockEmbedding",
|
|
172
|
+
"ChatData",
|
|
169
173
|
"ChatMessage",
|
|
170
|
-
"ChatParams",
|
|
171
174
|
"CloudAzStorageBlobDataSource",
|
|
172
175
|
"CloudAzureAiSearchVectorStore",
|
|
173
176
|
"CloudChromaVectorStore",
|
|
177
|
+
"CloudConfluenceDataSource",
|
|
174
178
|
"CloudDocument",
|
|
175
179
|
"CloudDocumentCreate",
|
|
176
180
|
"CloudJiraDataSource",
|
|
@@ -222,6 +226,7 @@ __all__ = [
|
|
|
222
226
|
"EvalQuestion",
|
|
223
227
|
"EvalQuestionCreate",
|
|
224
228
|
"EvalQuestionResult",
|
|
229
|
+
"ExtractionJob",
|
|
225
230
|
"ExtractionResult",
|
|
226
231
|
"ExtractionResultDataValue",
|
|
227
232
|
"ExtractionSchema",
|
|
@@ -306,6 +311,7 @@ __all__ = [
|
|
|
306
311
|
"UnprocessableEntityError",
|
|
307
312
|
"UserOrganization",
|
|
308
313
|
"UserOrganizationCreate",
|
|
314
|
+
"UserOrganizationDelete",
|
|
309
315
|
"ValidationError",
|
|
310
316
|
"ValidationErrorLocItem",
|
|
311
317
|
"component_definitions",
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
import typing
|
|
4
4
|
|
|
5
5
|
from ....types.cloud_az_storage_blob_data_source import CloudAzStorageBlobDataSource
|
|
6
|
+
from ....types.cloud_confluence_data_source import CloudConfluenceDataSource
|
|
6
7
|
from ....types.cloud_jira_data_source import CloudJiraDataSource
|
|
7
8
|
from ....types.cloud_notion_page_data_source import CloudNotionPageDataSource
|
|
8
9
|
from ....types.cloud_one_drive_data_source import CloudOneDriveDataSource
|
|
@@ -17,5 +18,6 @@ DataSourceUpdateComponentOne = typing.Union[
|
|
|
17
18
|
CloudSharepointDataSource,
|
|
18
19
|
CloudSlackDataSource,
|
|
19
20
|
CloudNotionPageDataSource,
|
|
21
|
+
CloudConfluenceDataSource,
|
|
20
22
|
CloudJiraDataSource,
|
|
21
23
|
]
|
|
@@ -9,6 +9,7 @@ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
|
|
|
9
9
|
from ...core.jsonable_encoder import jsonable_encoder
|
|
10
10
|
from ...core.remove_none_from_dict import remove_none_from_dict
|
|
11
11
|
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
12
|
+
from ...types.extraction_job import ExtractionJob
|
|
12
13
|
from ...types.extraction_result import ExtractionResult
|
|
13
14
|
from ...types.extraction_schema import ExtractionSchema
|
|
14
15
|
from ...types.http_validation_error import HttpValidationError
|
|
@@ -31,15 +32,25 @@ class ExtractionClient:
|
|
|
31
32
|
self._client_wrapper = client_wrapper
|
|
32
33
|
|
|
33
34
|
def infer_schema(
|
|
34
|
-
self,
|
|
35
|
+
self,
|
|
36
|
+
*,
|
|
37
|
+
schema_id: typing.Optional[str] = OMIT,
|
|
38
|
+
name: str,
|
|
39
|
+
project_id: typing.Optional[str] = OMIT,
|
|
40
|
+
file_ids: typing.List[str],
|
|
41
|
+
stream: typing.Optional[bool] = OMIT,
|
|
35
42
|
) -> ExtractionSchema:
|
|
36
43
|
"""
|
|
37
44
|
Parameters:
|
|
45
|
+
- schema_id: typing.Optional[str]. The ID of a schema to update with the new schema
|
|
46
|
+
|
|
38
47
|
- name: str. The name of the extraction schema
|
|
39
48
|
|
|
40
49
|
- project_id: typing.Optional[str]. The ID of the project that the extraction schema belongs to
|
|
41
50
|
|
|
42
51
|
- file_ids: typing.List[str]. The IDs of the files that the extraction schema contains
|
|
52
|
+
|
|
53
|
+
- stream: typing.Optional[bool]. Whether to stream the results of the extraction schema
|
|
43
54
|
---
|
|
44
55
|
from llama_cloud.client import LlamaCloud
|
|
45
56
|
|
|
@@ -52,8 +63,12 @@ class ExtractionClient:
|
|
|
52
63
|
)
|
|
53
64
|
"""
|
|
54
65
|
_request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids}
|
|
66
|
+
if schema_id is not OMIT:
|
|
67
|
+
_request["schema_id"] = schema_id
|
|
55
68
|
if project_id is not OMIT:
|
|
56
69
|
_request["project_id"] = project_id
|
|
70
|
+
if stream is not OMIT:
|
|
71
|
+
_request["stream"] = stream
|
|
57
72
|
_response = self._client_wrapper.httpx_client.request(
|
|
58
73
|
"POST",
|
|
59
74
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas/infer"),
|
|
@@ -171,17 +186,19 @@ class ExtractionClient:
|
|
|
171
186
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
172
187
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
173
188
|
|
|
174
|
-
def list_jobs(self, *, schema_id:
|
|
189
|
+
def list_jobs(self, *, schema_id: str) -> typing.List[ExtractionJob]:
|
|
175
190
|
"""
|
|
176
191
|
Parameters:
|
|
177
|
-
- schema_id:
|
|
192
|
+
- schema_id: str.
|
|
178
193
|
---
|
|
179
194
|
from llama_cloud.client import LlamaCloud
|
|
180
195
|
|
|
181
196
|
client = LlamaCloud(
|
|
182
197
|
token="YOUR_TOKEN",
|
|
183
198
|
)
|
|
184
|
-
client.extraction.list_jobs(
|
|
199
|
+
client.extraction.list_jobs(
|
|
200
|
+
schema_id="string",
|
|
201
|
+
)
|
|
185
202
|
"""
|
|
186
203
|
_response = self._client_wrapper.httpx_client.request(
|
|
187
204
|
"GET",
|
|
@@ -191,7 +208,7 @@ class ExtractionClient:
|
|
|
191
208
|
timeout=60,
|
|
192
209
|
)
|
|
193
210
|
if 200 <= _response.status_code < 300:
|
|
194
|
-
return pydantic.parse_obj_as(typing.List[
|
|
211
|
+
return pydantic.parse_obj_as(typing.List[ExtractionJob], _response.json()) # type: ignore
|
|
195
212
|
if _response.status_code == 422:
|
|
196
213
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
197
214
|
try:
|
|
@@ -200,7 +217,7 @@ class ExtractionClient:
|
|
|
200
217
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
201
218
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
202
219
|
|
|
203
|
-
def run_job(self, *, schema_id: str, file_id: str) ->
|
|
220
|
+
def run_job(self, *, schema_id: str, file_id: str) -> ExtractionJob:
|
|
204
221
|
"""
|
|
205
222
|
Parameters:
|
|
206
223
|
- schema_id: str. The id of the schema
|
|
@@ -225,7 +242,7 @@ class ExtractionClient:
|
|
|
225
242
|
timeout=60,
|
|
226
243
|
)
|
|
227
244
|
if 200 <= _response.status_code < 300:
|
|
228
|
-
return pydantic.parse_obj_as(
|
|
245
|
+
return pydantic.parse_obj_as(ExtractionJob, _response.json()) # type: ignore
|
|
229
246
|
if _response.status_code == 422:
|
|
230
247
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
231
248
|
try:
|
|
@@ -234,7 +251,7 @@ class ExtractionClient:
|
|
|
234
251
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
235
252
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
236
253
|
|
|
237
|
-
def get_job(self, job_id: str) ->
|
|
254
|
+
def get_job(self, job_id: str) -> ExtractionJob:
|
|
238
255
|
"""
|
|
239
256
|
Parameters:
|
|
240
257
|
- job_id: str.
|
|
@@ -255,7 +272,7 @@ class ExtractionClient:
|
|
|
255
272
|
timeout=60,
|
|
256
273
|
)
|
|
257
274
|
if 200 <= _response.status_code < 300:
|
|
258
|
-
return pydantic.parse_obj_as(
|
|
275
|
+
return pydantic.parse_obj_as(ExtractionJob, _response.json()) # type: ignore
|
|
259
276
|
if _response.status_code == 422:
|
|
260
277
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
261
278
|
try:
|
|
@@ -264,7 +281,7 @@ class ExtractionClient:
|
|
|
264
281
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
265
282
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
266
283
|
|
|
267
|
-
def run_jobs_in_batch(self, *, schema_id: str, file_ids: typing.List[str]) -> typing.List[
|
|
284
|
+
def run_jobs_in_batch(self, *, schema_id: str, file_ids: typing.List[str]) -> typing.List[ExtractionJob]:
|
|
268
285
|
"""
|
|
269
286
|
Parameters:
|
|
270
287
|
- schema_id: str. The id of the schema
|
|
@@ -289,7 +306,7 @@ class ExtractionClient:
|
|
|
289
306
|
timeout=60,
|
|
290
307
|
)
|
|
291
308
|
if 200 <= _response.status_code < 300:
|
|
292
|
-
return pydantic.parse_obj_as(typing.List[
|
|
309
|
+
return pydantic.parse_obj_as(typing.List[ExtractionJob], _response.json()) # type: ignore
|
|
293
310
|
if _response.status_code == 422:
|
|
294
311
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
295
312
|
try:
|
|
@@ -334,15 +351,25 @@ class AsyncExtractionClient:
|
|
|
334
351
|
self._client_wrapper = client_wrapper
|
|
335
352
|
|
|
336
353
|
async def infer_schema(
|
|
337
|
-
self,
|
|
354
|
+
self,
|
|
355
|
+
*,
|
|
356
|
+
schema_id: typing.Optional[str] = OMIT,
|
|
357
|
+
name: str,
|
|
358
|
+
project_id: typing.Optional[str] = OMIT,
|
|
359
|
+
file_ids: typing.List[str],
|
|
360
|
+
stream: typing.Optional[bool] = OMIT,
|
|
338
361
|
) -> ExtractionSchema:
|
|
339
362
|
"""
|
|
340
363
|
Parameters:
|
|
364
|
+
- schema_id: typing.Optional[str]. The ID of a schema to update with the new schema
|
|
365
|
+
|
|
341
366
|
- name: str. The name of the extraction schema
|
|
342
367
|
|
|
343
368
|
- project_id: typing.Optional[str]. The ID of the project that the extraction schema belongs to
|
|
344
369
|
|
|
345
370
|
- file_ids: typing.List[str]. The IDs of the files that the extraction schema contains
|
|
371
|
+
|
|
372
|
+
- stream: typing.Optional[bool]. Whether to stream the results of the extraction schema
|
|
346
373
|
---
|
|
347
374
|
from llama_cloud.client import AsyncLlamaCloud
|
|
348
375
|
|
|
@@ -355,8 +382,12 @@ class AsyncExtractionClient:
|
|
|
355
382
|
)
|
|
356
383
|
"""
|
|
357
384
|
_request: typing.Dict[str, typing.Any] = {"name": name, "file_ids": file_ids}
|
|
385
|
+
if schema_id is not OMIT:
|
|
386
|
+
_request["schema_id"] = schema_id
|
|
358
387
|
if project_id is not OMIT:
|
|
359
388
|
_request["project_id"] = project_id
|
|
389
|
+
if stream is not OMIT:
|
|
390
|
+
_request["stream"] = stream
|
|
360
391
|
_response = await self._client_wrapper.httpx_client.request(
|
|
361
392
|
"POST",
|
|
362
393
|
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/extraction/schemas/infer"),
|
|
@@ -474,17 +505,19 @@ class AsyncExtractionClient:
|
|
|
474
505
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
475
506
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
476
507
|
|
|
477
|
-
async def list_jobs(self, *, schema_id:
|
|
508
|
+
async def list_jobs(self, *, schema_id: str) -> typing.List[ExtractionJob]:
|
|
478
509
|
"""
|
|
479
510
|
Parameters:
|
|
480
|
-
- schema_id:
|
|
511
|
+
- schema_id: str.
|
|
481
512
|
---
|
|
482
513
|
from llama_cloud.client import AsyncLlamaCloud
|
|
483
514
|
|
|
484
515
|
client = AsyncLlamaCloud(
|
|
485
516
|
token="YOUR_TOKEN",
|
|
486
517
|
)
|
|
487
|
-
await client.extraction.list_jobs(
|
|
518
|
+
await client.extraction.list_jobs(
|
|
519
|
+
schema_id="string",
|
|
520
|
+
)
|
|
488
521
|
"""
|
|
489
522
|
_response = await self._client_wrapper.httpx_client.request(
|
|
490
523
|
"GET",
|
|
@@ -494,7 +527,7 @@ class AsyncExtractionClient:
|
|
|
494
527
|
timeout=60,
|
|
495
528
|
)
|
|
496
529
|
if 200 <= _response.status_code < 300:
|
|
497
|
-
return pydantic.parse_obj_as(typing.List[
|
|
530
|
+
return pydantic.parse_obj_as(typing.List[ExtractionJob], _response.json()) # type: ignore
|
|
498
531
|
if _response.status_code == 422:
|
|
499
532
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
500
533
|
try:
|
|
@@ -503,7 +536,7 @@ class AsyncExtractionClient:
|
|
|
503
536
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
504
537
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
505
538
|
|
|
506
|
-
async def run_job(self, *, schema_id: str, file_id: str) ->
|
|
539
|
+
async def run_job(self, *, schema_id: str, file_id: str) -> ExtractionJob:
|
|
507
540
|
"""
|
|
508
541
|
Parameters:
|
|
509
542
|
- schema_id: str. The id of the schema
|
|
@@ -528,7 +561,7 @@ class AsyncExtractionClient:
|
|
|
528
561
|
timeout=60,
|
|
529
562
|
)
|
|
530
563
|
if 200 <= _response.status_code < 300:
|
|
531
|
-
return pydantic.parse_obj_as(
|
|
564
|
+
return pydantic.parse_obj_as(ExtractionJob, _response.json()) # type: ignore
|
|
532
565
|
if _response.status_code == 422:
|
|
533
566
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
534
567
|
try:
|
|
@@ -537,7 +570,7 @@ class AsyncExtractionClient:
|
|
|
537
570
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
538
571
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
539
572
|
|
|
540
|
-
async def get_job(self, job_id: str) ->
|
|
573
|
+
async def get_job(self, job_id: str) -> ExtractionJob:
|
|
541
574
|
"""
|
|
542
575
|
Parameters:
|
|
543
576
|
- job_id: str.
|
|
@@ -558,7 +591,7 @@ class AsyncExtractionClient:
|
|
|
558
591
|
timeout=60,
|
|
559
592
|
)
|
|
560
593
|
if 200 <= _response.status_code < 300:
|
|
561
|
-
return pydantic.parse_obj_as(
|
|
594
|
+
return pydantic.parse_obj_as(ExtractionJob, _response.json()) # type: ignore
|
|
562
595
|
if _response.status_code == 422:
|
|
563
596
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
564
597
|
try:
|
|
@@ -567,7 +600,7 @@ class AsyncExtractionClient:
|
|
|
567
600
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
568
601
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
569
602
|
|
|
570
|
-
async def run_jobs_in_batch(self, *, schema_id: str, file_ids: typing.List[str]) -> typing.List[
|
|
603
|
+
async def run_jobs_in_batch(self, *, schema_id: str, file_ids: typing.List[str]) -> typing.List[ExtractionJob]:
|
|
571
604
|
"""
|
|
572
605
|
Parameters:
|
|
573
606
|
- schema_id: str. The id of the schema
|
|
@@ -592,7 +625,7 @@ class AsyncExtractionClient:
|
|
|
592
625
|
timeout=60,
|
|
593
626
|
)
|
|
594
627
|
if 200 <= _response.status_code < 300:
|
|
595
|
-
return pydantic.parse_obj_as(typing.List[
|
|
628
|
+
return pydantic.parse_obj_as(typing.List[ExtractionJob], _response.json()) # type: ignore
|
|
596
629
|
if _response.status_code == 422:
|
|
597
630
|
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
598
631
|
try:
|
|
@@ -13,6 +13,7 @@ from ...types.organization import Organization
|
|
|
13
13
|
from ...types.organization_create import OrganizationCreate
|
|
14
14
|
from ...types.user_organization import UserOrganization
|
|
15
15
|
from ...types.user_organization_create import UserOrganizationCreate
|
|
16
|
+
from ...types.user_organization_delete import UserOrganizationDelete
|
|
16
17
|
|
|
17
18
|
try:
|
|
18
19
|
import pydantic
|
|
@@ -405,6 +406,46 @@ class OrganizationsClient:
|
|
|
405
406
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
406
407
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
407
408
|
|
|
409
|
+
def batch_remove_users_from_organization(
|
|
410
|
+
self, organization_id: str, *, request: typing.List[UserOrganizationDelete]
|
|
411
|
+
) -> None:
|
|
412
|
+
"""
|
|
413
|
+
Remove a batch of users from an organization.
|
|
414
|
+
|
|
415
|
+
Parameters:
|
|
416
|
+
- organization_id: str.
|
|
417
|
+
|
|
418
|
+
- request: typing.List[UserOrganizationDelete].
|
|
419
|
+
---
|
|
420
|
+
from llama_cloud.client import LlamaCloud
|
|
421
|
+
|
|
422
|
+
client = LlamaCloud(
|
|
423
|
+
token="YOUR_TOKEN",
|
|
424
|
+
)
|
|
425
|
+
client.organizations.batch_remove_users_from_organization(
|
|
426
|
+
organization_id="string",
|
|
427
|
+
request=[],
|
|
428
|
+
)
|
|
429
|
+
"""
|
|
430
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
431
|
+
"PUT",
|
|
432
|
+
urllib.parse.urljoin(
|
|
433
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}/users/remove"
|
|
434
|
+
),
|
|
435
|
+
json=jsonable_encoder(request),
|
|
436
|
+
headers=self._client_wrapper.get_headers(),
|
|
437
|
+
timeout=60,
|
|
438
|
+
)
|
|
439
|
+
if 200 <= _response.status_code < 300:
|
|
440
|
+
return
|
|
441
|
+
if _response.status_code == 422:
|
|
442
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
443
|
+
try:
|
|
444
|
+
_response_json = _response.json()
|
|
445
|
+
except JSONDecodeError:
|
|
446
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
447
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
448
|
+
|
|
408
449
|
|
|
409
450
|
class AsyncOrganizationsClient:
|
|
410
451
|
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
@@ -784,3 +825,43 @@ class AsyncOrganizationsClient:
|
|
|
784
825
|
except JSONDecodeError:
|
|
785
826
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
786
827
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
828
|
+
|
|
829
|
+
async def batch_remove_users_from_organization(
|
|
830
|
+
self, organization_id: str, *, request: typing.List[UserOrganizationDelete]
|
|
831
|
+
) -> None:
|
|
832
|
+
"""
|
|
833
|
+
Remove a batch of users from an organization.
|
|
834
|
+
|
|
835
|
+
Parameters:
|
|
836
|
+
- organization_id: str.
|
|
837
|
+
|
|
838
|
+
- request: typing.List[UserOrganizationDelete].
|
|
839
|
+
---
|
|
840
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
841
|
+
|
|
842
|
+
client = AsyncLlamaCloud(
|
|
843
|
+
token="YOUR_TOKEN",
|
|
844
|
+
)
|
|
845
|
+
await client.organizations.batch_remove_users_from_organization(
|
|
846
|
+
organization_id="string",
|
|
847
|
+
request=[],
|
|
848
|
+
)
|
|
849
|
+
"""
|
|
850
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
851
|
+
"PUT",
|
|
852
|
+
urllib.parse.urljoin(
|
|
853
|
+
f"{self._client_wrapper.get_base_url()}/", f"api/v1/organizations/{organization_id}/users/remove"
|
|
854
|
+
),
|
|
855
|
+
json=jsonable_encoder(request),
|
|
856
|
+
headers=self._client_wrapper.get_headers(),
|
|
857
|
+
timeout=60,
|
|
858
|
+
)
|
|
859
|
+
if 200 <= _response.status_code < 300:
|
|
860
|
+
return
|
|
861
|
+
if _response.status_code == 422:
|
|
862
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
863
|
+
try:
|
|
864
|
+
_response_json = _response.json()
|
|
865
|
+
except JSONDecodeError:
|
|
866
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
867
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
@@ -113,6 +113,11 @@ class ParsingClient:
|
|
|
113
113
|
page_separator: str,
|
|
114
114
|
bounding_box: str,
|
|
115
115
|
target_pages: str,
|
|
116
|
+
use_vendor_multimodal_model: bool,
|
|
117
|
+
vendor_multimodal_model_name: str,
|
|
118
|
+
vendor_multimodal_api_key: str,
|
|
119
|
+
page_prefix: str,
|
|
120
|
+
page_suffix: str,
|
|
116
121
|
file: typing.IO,
|
|
117
122
|
) -> ParsingJob:
|
|
118
123
|
"""
|
|
@@ -143,6 +148,16 @@ class ParsingClient:
|
|
|
143
148
|
|
|
144
149
|
- target_pages: str.
|
|
145
150
|
|
|
151
|
+
- use_vendor_multimodal_model: bool.
|
|
152
|
+
|
|
153
|
+
- vendor_multimodal_model_name: str.
|
|
154
|
+
|
|
155
|
+
- vendor_multimodal_api_key: str.
|
|
156
|
+
|
|
157
|
+
- page_prefix: str.
|
|
158
|
+
|
|
159
|
+
- page_suffix: str.
|
|
160
|
+
|
|
146
161
|
- file: typing.IO.
|
|
147
162
|
"""
|
|
148
163
|
_response = self._client_wrapper.httpx_client.request(
|
|
@@ -162,6 +177,11 @@ class ParsingClient:
|
|
|
162
177
|
"page_separator": page_separator,
|
|
163
178
|
"bounding_box": bounding_box,
|
|
164
179
|
"target_pages": target_pages,
|
|
180
|
+
"use_vendor_multimodal_model": use_vendor_multimodal_model,
|
|
181
|
+
"vendor_multimodal_model_name": vendor_multimodal_model_name,
|
|
182
|
+
"vendor_multimodal_api_key": vendor_multimodal_api_key,
|
|
183
|
+
"page_prefix": page_prefix,
|
|
184
|
+
"page_suffix": page_suffix,
|
|
165
185
|
}
|
|
166
186
|
),
|
|
167
187
|
files={"file": file},
|
|
@@ -238,6 +258,38 @@ class ParsingClient:
|
|
|
238
258
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
239
259
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
240
260
|
|
|
261
|
+
def get_parsing_job_details(self, job_id: str) -> typing.Any:
|
|
262
|
+
"""
|
|
263
|
+
Get a job by id
|
|
264
|
+
|
|
265
|
+
Parameters:
|
|
266
|
+
- job_id: str.
|
|
267
|
+
---
|
|
268
|
+
from llama_cloud.client import LlamaCloud
|
|
269
|
+
|
|
270
|
+
client = LlamaCloud(
|
|
271
|
+
token="YOUR_TOKEN",
|
|
272
|
+
)
|
|
273
|
+
client.parsing.get_parsing_job_details(
|
|
274
|
+
job_id="string",
|
|
275
|
+
)
|
|
276
|
+
"""
|
|
277
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
278
|
+
"GET",
|
|
279
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/details"),
|
|
280
|
+
headers=self._client_wrapper.get_headers(),
|
|
281
|
+
timeout=60,
|
|
282
|
+
)
|
|
283
|
+
if 200 <= _response.status_code < 300:
|
|
284
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
285
|
+
if _response.status_code == 422:
|
|
286
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
287
|
+
try:
|
|
288
|
+
_response_json = _response.json()
|
|
289
|
+
except JSONDecodeError:
|
|
290
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
291
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
292
|
+
|
|
241
293
|
def get_job_text_result(self, job_id: str) -> ParsingJobTextResult:
|
|
242
294
|
"""
|
|
243
295
|
Get a job by id
|
|
@@ -586,6 +638,11 @@ class AsyncParsingClient:
|
|
|
586
638
|
page_separator: str,
|
|
587
639
|
bounding_box: str,
|
|
588
640
|
target_pages: str,
|
|
641
|
+
use_vendor_multimodal_model: bool,
|
|
642
|
+
vendor_multimodal_model_name: str,
|
|
643
|
+
vendor_multimodal_api_key: str,
|
|
644
|
+
page_prefix: str,
|
|
645
|
+
page_suffix: str,
|
|
589
646
|
file: typing.IO,
|
|
590
647
|
) -> ParsingJob:
|
|
591
648
|
"""
|
|
@@ -616,6 +673,16 @@ class AsyncParsingClient:
|
|
|
616
673
|
|
|
617
674
|
- target_pages: str.
|
|
618
675
|
|
|
676
|
+
- use_vendor_multimodal_model: bool.
|
|
677
|
+
|
|
678
|
+
- vendor_multimodal_model_name: str.
|
|
679
|
+
|
|
680
|
+
- vendor_multimodal_api_key: str.
|
|
681
|
+
|
|
682
|
+
- page_prefix: str.
|
|
683
|
+
|
|
684
|
+
- page_suffix: str.
|
|
685
|
+
|
|
619
686
|
- file: typing.IO.
|
|
620
687
|
"""
|
|
621
688
|
_response = await self._client_wrapper.httpx_client.request(
|
|
@@ -635,6 +702,11 @@ class AsyncParsingClient:
|
|
|
635
702
|
"page_separator": page_separator,
|
|
636
703
|
"bounding_box": bounding_box,
|
|
637
704
|
"target_pages": target_pages,
|
|
705
|
+
"use_vendor_multimodal_model": use_vendor_multimodal_model,
|
|
706
|
+
"vendor_multimodal_model_name": vendor_multimodal_model_name,
|
|
707
|
+
"vendor_multimodal_api_key": vendor_multimodal_api_key,
|
|
708
|
+
"page_prefix": page_prefix,
|
|
709
|
+
"page_suffix": page_suffix,
|
|
638
710
|
}
|
|
639
711
|
),
|
|
640
712
|
files={"file": file},
|
|
@@ -711,6 +783,38 @@ class AsyncParsingClient:
|
|
|
711
783
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
712
784
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
713
785
|
|
|
786
|
+
async def get_parsing_job_details(self, job_id: str) -> typing.Any:
|
|
787
|
+
"""
|
|
788
|
+
Get a job by id
|
|
789
|
+
|
|
790
|
+
Parameters:
|
|
791
|
+
- job_id: str.
|
|
792
|
+
---
|
|
793
|
+
from llama_cloud.client import AsyncLlamaCloud
|
|
794
|
+
|
|
795
|
+
client = AsyncLlamaCloud(
|
|
796
|
+
token="YOUR_TOKEN",
|
|
797
|
+
)
|
|
798
|
+
await client.parsing.get_parsing_job_details(
|
|
799
|
+
job_id="string",
|
|
800
|
+
)
|
|
801
|
+
"""
|
|
802
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
803
|
+
"GET",
|
|
804
|
+
urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/details"),
|
|
805
|
+
headers=self._client_wrapper.get_headers(),
|
|
806
|
+
timeout=60,
|
|
807
|
+
)
|
|
808
|
+
if 200 <= _response.status_code < 300:
|
|
809
|
+
return pydantic.parse_obj_as(typing.Any, _response.json()) # type: ignore
|
|
810
|
+
if _response.status_code == 422:
|
|
811
|
+
raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json())) # type: ignore
|
|
812
|
+
try:
|
|
813
|
+
_response_json = _response.json()
|
|
814
|
+
except JSONDecodeError:
|
|
815
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
816
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
817
|
+
|
|
714
818
|
async def get_job_text_result(self, job_id: str) -> ParsingJobTextResult:
|
|
715
819
|
"""
|
|
716
820
|
Get a job by id
|