aiagents4pharma 0.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiagents4pharma/__init__.py +11 -0
- aiagents4pharma/talk2aiagents4pharma/.dockerignore +13 -0
- aiagents4pharma/talk2aiagents4pharma/Dockerfile +133 -0
- aiagents4pharma/talk2aiagents4pharma/README.md +1 -0
- aiagents4pharma/talk2aiagents4pharma/__init__.py +5 -0
- aiagents4pharma/talk2aiagents4pharma/agents/__init__.py +6 -0
- aiagents4pharma/talk2aiagents4pharma/agents/main_agent.py +70 -0
- aiagents4pharma/talk2aiagents4pharma/configs/__init__.py +5 -0
- aiagents4pharma/talk2aiagents4pharma/configs/agents/__init__.py +5 -0
- aiagents4pharma/talk2aiagents4pharma/configs/agents/main_agent/default.yaml +29 -0
- aiagents4pharma/talk2aiagents4pharma/configs/app/__init__.py +0 -0
- aiagents4pharma/talk2aiagents4pharma/configs/app/frontend/__init__.py +0 -0
- aiagents4pharma/talk2aiagents4pharma/configs/app/frontend/default.yaml +102 -0
- aiagents4pharma/talk2aiagents4pharma/configs/config.yaml +4 -0
- aiagents4pharma/talk2aiagents4pharma/docker-compose/cpu/.env.example +23 -0
- aiagents4pharma/talk2aiagents4pharma/docker-compose/cpu/docker-compose.yml +93 -0
- aiagents4pharma/talk2aiagents4pharma/docker-compose/gpu/.env.example +23 -0
- aiagents4pharma/talk2aiagents4pharma/docker-compose/gpu/docker-compose.yml +108 -0
- aiagents4pharma/talk2aiagents4pharma/install.md +154 -0
- aiagents4pharma/talk2aiagents4pharma/states/__init__.py +5 -0
- aiagents4pharma/talk2aiagents4pharma/states/state_talk2aiagents4pharma.py +18 -0
- aiagents4pharma/talk2aiagents4pharma/tests/__init__.py +3 -0
- aiagents4pharma/talk2aiagents4pharma/tests/test_main_agent.py +312 -0
- aiagents4pharma/talk2biomodels/.dockerignore +13 -0
- aiagents4pharma/talk2biomodels/Dockerfile +104 -0
- aiagents4pharma/talk2biomodels/README.md +1 -0
- aiagents4pharma/talk2biomodels/__init__.py +5 -0
- aiagents4pharma/talk2biomodels/agents/__init__.py +6 -0
- aiagents4pharma/talk2biomodels/agents/t2b_agent.py +104 -0
- aiagents4pharma/talk2biomodels/api/__init__.py +5 -0
- aiagents4pharma/talk2biomodels/api/ols.py +75 -0
- aiagents4pharma/talk2biomodels/api/uniprot.py +36 -0
- aiagents4pharma/talk2biomodels/configs/__init__.py +5 -0
- aiagents4pharma/talk2biomodels/configs/agents/__init__.py +5 -0
- aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/__init__.py +3 -0
- aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/default.yaml +14 -0
- aiagents4pharma/talk2biomodels/configs/app/__init__.py +0 -0
- aiagents4pharma/talk2biomodels/configs/app/frontend/__init__.py +0 -0
- aiagents4pharma/talk2biomodels/configs/app/frontend/default.yaml +72 -0
- aiagents4pharma/talk2biomodels/configs/config.yaml +7 -0
- aiagents4pharma/talk2biomodels/configs/tools/__init__.py +5 -0
- aiagents4pharma/talk2biomodels/configs/tools/ask_question/__init__.py +3 -0
- aiagents4pharma/talk2biomodels/configs/tools/ask_question/default.yaml +30 -0
- aiagents4pharma/talk2biomodels/configs/tools/custom_plotter/__init__.py +3 -0
- aiagents4pharma/talk2biomodels/configs/tools/custom_plotter/default.yaml +8 -0
- aiagents4pharma/talk2biomodels/configs/tools/get_annotation/__init__.py +3 -0
- aiagents4pharma/talk2biomodels/configs/tools/get_annotation/default.yaml +8 -0
- aiagents4pharma/talk2biomodels/install.md +63 -0
- aiagents4pharma/talk2biomodels/models/__init__.py +5 -0
- aiagents4pharma/talk2biomodels/models/basico_model.py +125 -0
- aiagents4pharma/talk2biomodels/models/sys_bio_model.py +60 -0
- aiagents4pharma/talk2biomodels/states/__init__.py +6 -0
- aiagents4pharma/talk2biomodels/states/state_talk2biomodels.py +49 -0
- aiagents4pharma/talk2biomodels/tests/BIOMD0000000449_url.xml +1585 -0
- aiagents4pharma/talk2biomodels/tests/__init__.py +3 -0
- aiagents4pharma/talk2biomodels/tests/article_on_model_537.pdf +0 -0
- aiagents4pharma/talk2biomodels/tests/test_api.py +31 -0
- aiagents4pharma/talk2biomodels/tests/test_ask_question.py +42 -0
- aiagents4pharma/talk2biomodels/tests/test_basico_model.py +67 -0
- aiagents4pharma/talk2biomodels/tests/test_get_annotation.py +190 -0
- aiagents4pharma/talk2biomodels/tests/test_getmodelinfo.py +92 -0
- aiagents4pharma/talk2biomodels/tests/test_integration.py +116 -0
- aiagents4pharma/talk2biomodels/tests/test_load_biomodel.py +35 -0
- aiagents4pharma/talk2biomodels/tests/test_param_scan.py +71 -0
- aiagents4pharma/talk2biomodels/tests/test_query_article.py +184 -0
- aiagents4pharma/talk2biomodels/tests/test_save_model.py +47 -0
- aiagents4pharma/talk2biomodels/tests/test_search_models.py +35 -0
- aiagents4pharma/talk2biomodels/tests/test_simulate_model.py +44 -0
- aiagents4pharma/talk2biomodels/tests/test_steady_state.py +86 -0
- aiagents4pharma/talk2biomodels/tests/test_sys_bio_model.py +67 -0
- aiagents4pharma/talk2biomodels/tools/__init__.py +17 -0
- aiagents4pharma/talk2biomodels/tools/ask_question.py +125 -0
- aiagents4pharma/talk2biomodels/tools/custom_plotter.py +165 -0
- aiagents4pharma/talk2biomodels/tools/get_annotation.py +342 -0
- aiagents4pharma/talk2biomodels/tools/get_modelinfo.py +159 -0
- aiagents4pharma/talk2biomodels/tools/load_arguments.py +134 -0
- aiagents4pharma/talk2biomodels/tools/load_biomodel.py +44 -0
- aiagents4pharma/talk2biomodels/tools/parameter_scan.py +310 -0
- aiagents4pharma/talk2biomodels/tools/query_article.py +64 -0
- aiagents4pharma/talk2biomodels/tools/save_model.py +98 -0
- aiagents4pharma/talk2biomodels/tools/search_models.py +96 -0
- aiagents4pharma/talk2biomodels/tools/simulate_model.py +137 -0
- aiagents4pharma/talk2biomodels/tools/steady_state.py +187 -0
- aiagents4pharma/talk2biomodels/tools/utils.py +23 -0
- aiagents4pharma/talk2cells/README.md +1 -0
- aiagents4pharma/talk2cells/__init__.py +5 -0
- aiagents4pharma/talk2cells/agents/__init__.py +6 -0
- aiagents4pharma/talk2cells/agents/scp_agent.py +87 -0
- aiagents4pharma/talk2cells/states/__init__.py +6 -0
- aiagents4pharma/talk2cells/states/state_talk2cells.py +15 -0
- aiagents4pharma/talk2cells/tests/scp_agent/test_scp_agent.py +22 -0
- aiagents4pharma/talk2cells/tools/__init__.py +6 -0
- aiagents4pharma/talk2cells/tools/scp_agent/__init__.py +6 -0
- aiagents4pharma/talk2cells/tools/scp_agent/display_studies.py +27 -0
- aiagents4pharma/talk2cells/tools/scp_agent/search_studies.py +78 -0
- aiagents4pharma/talk2knowledgegraphs/.dockerignore +13 -0
- aiagents4pharma/talk2knowledgegraphs/Dockerfile +131 -0
- aiagents4pharma/talk2knowledgegraphs/README.md +1 -0
- aiagents4pharma/talk2knowledgegraphs/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/agents/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/agents/t2kg_agent.py +99 -0
- aiagents4pharma/talk2knowledgegraphs/configs/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/__init__.py +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/default.yaml +62 -0
- aiagents4pharma/talk2knowledgegraphs/configs/app/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/configs/app/frontend/__init__.py +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/app/frontend/default.yaml +79 -0
- aiagents4pharma/talk2knowledgegraphs/configs/config.yaml +13 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/graphrag_reasoning/__init__.py +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/graphrag_reasoning/default.yaml +24 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/multimodal_subgraph_extraction/__init__.py +0 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/multimodal_subgraph_extraction/default.yaml +33 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_extraction/__init__.py +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_extraction/default.yaml +43 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_summarization/__init__.py +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_summarization/default.yaml +9 -0
- aiagents4pharma/talk2knowledgegraphs/configs/utils/database/milvus/__init__.py +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/utils/database/milvus/default.yaml +61 -0
- aiagents4pharma/talk2knowledgegraphs/configs/utils/enrichments/ols_terms/default.yaml +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/utils/enrichments/reactome_pathways/default.yaml +3 -0
- aiagents4pharma/talk2knowledgegraphs/configs/utils/enrichments/uniprot_proteins/default.yaml +6 -0
- aiagents4pharma/talk2knowledgegraphs/configs/utils/pubchem_utils/default.yaml +5 -0
- aiagents4pharma/talk2knowledgegraphs/datasets/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/datasets/biobridge_primekg.py +607 -0
- aiagents4pharma/talk2knowledgegraphs/datasets/dataset.py +25 -0
- aiagents4pharma/talk2knowledgegraphs/datasets/primekg.py +212 -0
- aiagents4pharma/talk2knowledgegraphs/datasets/starkqa_primekg.py +210 -0
- aiagents4pharma/talk2knowledgegraphs/docker-compose/cpu/.env.example +23 -0
- aiagents4pharma/talk2knowledgegraphs/docker-compose/cpu/docker-compose.yml +93 -0
- aiagents4pharma/talk2knowledgegraphs/docker-compose/gpu/.env.example +23 -0
- aiagents4pharma/talk2knowledgegraphs/docker-compose/gpu/docker-compose.yml +108 -0
- aiagents4pharma/talk2knowledgegraphs/entrypoint.sh +180 -0
- aiagents4pharma/talk2knowledgegraphs/install.md +165 -0
- aiagents4pharma/talk2knowledgegraphs/milvus_data_dump.py +886 -0
- aiagents4pharma/talk2knowledgegraphs/states/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/states/state_talk2knowledgegraphs.py +40 -0
- aiagents4pharma/talk2knowledgegraphs/tests/__init__.py +0 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_agents_t2kg_agent.py +318 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_biobridge_primekg.py +248 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_dataset.py +33 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_primekg.py +86 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_starkqa_primekg.py +125 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_graphrag_reasoning.py +257 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_milvus_multimodal_subgraph_extraction.py +1444 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_multimodal_subgraph_extraction.py +159 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_extraction.py +152 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_summarization.py +201 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_database_milvus_connection_manager.py +812 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_embeddings.py +51 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_huggingface.py +49 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_nim_molmim.py +59 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_ollama.py +63 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_sentencetransformer.py +47 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_enrichments.py +40 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_ollama.py +94 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_ols.py +70 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_pubchem.py +45 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_reactome.py +44 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_uniprot.py +48 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_extractions_milvus_multimodal_pcst.py +759 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_kg_utils.py +78 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_pubchem_utils.py +123 -0
- aiagents4pharma/talk2knowledgegraphs/tools/__init__.py +11 -0
- aiagents4pharma/talk2knowledgegraphs/tools/graphrag_reasoning.py +138 -0
- aiagents4pharma/talk2knowledgegraphs/tools/load_arguments.py +22 -0
- aiagents4pharma/talk2knowledgegraphs/tools/milvus_multimodal_subgraph_extraction.py +965 -0
- aiagents4pharma/talk2knowledgegraphs/tools/multimodal_subgraph_extraction.py +374 -0
- aiagents4pharma/talk2knowledgegraphs/tools/subgraph_extraction.py +291 -0
- aiagents4pharma/talk2knowledgegraphs/tools/subgraph_summarization.py +123 -0
- aiagents4pharma/talk2knowledgegraphs/utils/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/utils/database/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/utils/database/milvus_connection_manager.py +586 -0
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/embeddings.py +81 -0
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/huggingface.py +111 -0
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/nim_molmim.py +54 -0
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/ollama.py +87 -0
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/sentence_transformer.py +73 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/__init__.py +12 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/enrichments.py +37 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/ollama.py +129 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/ols_terms.py +89 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/pubchem_strings.py +78 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/reactome_pathways.py +71 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/uniprot_proteins.py +98 -0
- aiagents4pharma/talk2knowledgegraphs/utils/extractions/__init__.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/utils/extractions/milvus_multimodal_pcst.py +762 -0
- aiagents4pharma/talk2knowledgegraphs/utils/extractions/multimodal_pcst.py +298 -0
- aiagents4pharma/talk2knowledgegraphs/utils/extractions/pcst.py +229 -0
- aiagents4pharma/talk2knowledgegraphs/utils/kg_utils.py +67 -0
- aiagents4pharma/talk2knowledgegraphs/utils/pubchem_utils.py +104 -0
- aiagents4pharma/talk2scholars/.dockerignore +13 -0
- aiagents4pharma/talk2scholars/Dockerfile +104 -0
- aiagents4pharma/talk2scholars/README.md +1 -0
- aiagents4pharma/talk2scholars/__init__.py +7 -0
- aiagents4pharma/talk2scholars/agents/__init__.py +13 -0
- aiagents4pharma/talk2scholars/agents/main_agent.py +89 -0
- aiagents4pharma/talk2scholars/agents/paper_download_agent.py +96 -0
- aiagents4pharma/talk2scholars/agents/pdf_agent.py +101 -0
- aiagents4pharma/talk2scholars/agents/s2_agent.py +135 -0
- aiagents4pharma/talk2scholars/agents/zotero_agent.py +127 -0
- aiagents4pharma/talk2scholars/configs/__init__.py +7 -0
- aiagents4pharma/talk2scholars/configs/agents/__init__.py +7 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/__init__.py +7 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +52 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/paper_download_agent/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/paper_download_agent/default.yaml +19 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/pdf_agent/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/pdf_agent/default.yaml +19 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +44 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/zotero_agent/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/zotero_agent/default.yaml +19 -0
- aiagents4pharma/talk2scholars/configs/app/__init__.py +7 -0
- aiagents4pharma/talk2scholars/configs/app/frontend/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml +72 -0
- aiagents4pharma/talk2scholars/configs/config.yaml +16 -0
- aiagents4pharma/talk2scholars/configs/tools/__init__.py +21 -0
- aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml +26 -0
- aiagents4pharma/talk2scholars/configs/tools/paper_download/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/paper_download/default.yaml +124 -0
- aiagents4pharma/talk2scholars/configs/tools/question_and_answer/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/question_and_answer/default.yaml +62 -0
- aiagents4pharma/talk2scholars/configs/tools/retrieve_semantic_scholar_paper_id/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/retrieve_semantic_scholar_paper_id/default.yaml +12 -0
- aiagents4pharma/talk2scholars/configs/tools/search/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/search/default.yaml +26 -0
- aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml +26 -0
- aiagents4pharma/talk2scholars/configs/tools/zotero_read/__init__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/zotero_read/default.yaml +57 -0
- aiagents4pharma/talk2scholars/configs/tools/zotero_write/__inti__.py +3 -0
- aiagents4pharma/talk2scholars/configs/tools/zotero_write/default.yaml +55 -0
- aiagents4pharma/talk2scholars/docker-compose/cpu/.env.example +21 -0
- aiagents4pharma/talk2scholars/docker-compose/cpu/docker-compose.yml +90 -0
- aiagents4pharma/talk2scholars/docker-compose/gpu/.env.example +21 -0
- aiagents4pharma/talk2scholars/docker-compose/gpu/docker-compose.yml +105 -0
- aiagents4pharma/talk2scholars/install.md +122 -0
- aiagents4pharma/talk2scholars/state/__init__.py +7 -0
- aiagents4pharma/talk2scholars/state/state_talk2scholars.py +98 -0
- aiagents4pharma/talk2scholars/tests/__init__.py +3 -0
- aiagents4pharma/talk2scholars/tests/test_agents_main_agent.py +256 -0
- aiagents4pharma/talk2scholars/tests/test_agents_paper_agents_download_agent.py +139 -0
- aiagents4pharma/talk2scholars/tests/test_agents_pdf_agent.py +114 -0
- aiagents4pharma/talk2scholars/tests/test_agents_s2_agent.py +198 -0
- aiagents4pharma/talk2scholars/tests/test_agents_zotero_agent.py +160 -0
- aiagents4pharma/talk2scholars/tests/test_s2_tools_display_dataframe.py +91 -0
- aiagents4pharma/talk2scholars/tests/test_s2_tools_query_dataframe.py +191 -0
- aiagents4pharma/talk2scholars/tests/test_states_state.py +38 -0
- aiagents4pharma/talk2scholars/tests/test_tools_paper_downloader.py +507 -0
- aiagents4pharma/talk2scholars/tests/test_tools_question_and_answer_tool.py +105 -0
- aiagents4pharma/talk2scholars/tests/test_tools_s2_multi.py +307 -0
- aiagents4pharma/talk2scholars/tests/test_tools_s2_retrieve.py +67 -0
- aiagents4pharma/talk2scholars/tests/test_tools_s2_search.py +286 -0
- aiagents4pharma/talk2scholars/tests/test_tools_s2_single.py +298 -0
- aiagents4pharma/talk2scholars/tests/test_utils_arxiv_downloader.py +469 -0
- aiagents4pharma/talk2scholars/tests/test_utils_base_paper_downloader.py +598 -0
- aiagents4pharma/talk2scholars/tests/test_utils_biorxiv_downloader.py +669 -0
- aiagents4pharma/talk2scholars/tests/test_utils_medrxiv_downloader.py +500 -0
- aiagents4pharma/talk2scholars/tests/test_utils_nvidia_nim_reranker.py +117 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_answer_formatter.py +67 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_batch_processor.py +92 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_collection_manager.py +173 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_document_processor.py +68 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_generate_answer.py +72 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_gpu_detection.py +129 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_paper_loader.py +116 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_rag_pipeline.py +88 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_retrieve_chunks.py +190 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_singleton_manager.py +159 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_vector_normalization.py +121 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pdf_vector_store.py +406 -0
- aiagents4pharma/talk2scholars/tests/test_utils_pubmed_downloader.py +1007 -0
- aiagents4pharma/talk2scholars/tests/test_utils_read_helper_utils.py +106 -0
- aiagents4pharma/talk2scholars/tests/test_utils_s2_utils_ext_ids.py +403 -0
- aiagents4pharma/talk2scholars/tests/test_utils_tool_helper_utils.py +85 -0
- aiagents4pharma/talk2scholars/tests/test_utils_zotero_human_in_the_loop.py +266 -0
- aiagents4pharma/talk2scholars/tests/test_utils_zotero_path.py +496 -0
- aiagents4pharma/talk2scholars/tests/test_utils_zotero_pdf_downloader_utils.py +46 -0
- aiagents4pharma/talk2scholars/tests/test_utils_zotero_read.py +743 -0
- aiagents4pharma/talk2scholars/tests/test_utils_zotero_write.py +151 -0
- aiagents4pharma/talk2scholars/tools/__init__.py +9 -0
- aiagents4pharma/talk2scholars/tools/paper_download/__init__.py +12 -0
- aiagents4pharma/talk2scholars/tools/paper_download/paper_downloader.py +442 -0
- aiagents4pharma/talk2scholars/tools/paper_download/utils/__init__.py +22 -0
- aiagents4pharma/talk2scholars/tools/paper_download/utils/arxiv_downloader.py +207 -0
- aiagents4pharma/talk2scholars/tools/paper_download/utils/base_paper_downloader.py +336 -0
- aiagents4pharma/talk2scholars/tools/paper_download/utils/biorxiv_downloader.py +313 -0
- aiagents4pharma/talk2scholars/tools/paper_download/utils/medrxiv_downloader.py +196 -0
- aiagents4pharma/talk2scholars/tools/paper_download/utils/pubmed_downloader.py +323 -0
- aiagents4pharma/talk2scholars/tools/pdf/__init__.py +7 -0
- aiagents4pharma/talk2scholars/tools/pdf/question_and_answer.py +170 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/__init__.py +37 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/answer_formatter.py +62 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/batch_processor.py +198 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/collection_manager.py +172 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/document_processor.py +76 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/generate_answer.py +97 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/get_vectorstore.py +59 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/gpu_detection.py +150 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/nvidia_nim_reranker.py +97 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/paper_loader.py +123 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/rag_pipeline.py +113 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/retrieve_chunks.py +197 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/singleton_manager.py +140 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/tool_helper.py +86 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/vector_normalization.py +150 -0
- aiagents4pharma/talk2scholars/tools/pdf/utils/vector_store.py +327 -0
- aiagents4pharma/talk2scholars/tools/s2/__init__.py +21 -0
- aiagents4pharma/talk2scholars/tools/s2/display_dataframe.py +110 -0
- aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py +111 -0
- aiagents4pharma/talk2scholars/tools/s2/query_dataframe.py +233 -0
- aiagents4pharma/talk2scholars/tools/s2/retrieve_semantic_scholar_paper_id.py +128 -0
- aiagents4pharma/talk2scholars/tools/s2/search.py +101 -0
- aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py +102 -0
- aiagents4pharma/talk2scholars/tools/s2/utils/__init__.py +5 -0
- aiagents4pharma/talk2scholars/tools/s2/utils/multi_helper.py +223 -0
- aiagents4pharma/talk2scholars/tools/s2/utils/search_helper.py +205 -0
- aiagents4pharma/talk2scholars/tools/s2/utils/single_helper.py +216 -0
- aiagents4pharma/talk2scholars/tools/zotero/__init__.py +7 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/__init__.py +7 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/read_helper.py +270 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/review_helper.py +74 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/write_helper.py +194 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/zotero_path.py +180 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/zotero_pdf_downloader.py +133 -0
- aiagents4pharma/talk2scholars/tools/zotero/zotero_read.py +105 -0
- aiagents4pharma/talk2scholars/tools/zotero/zotero_review.py +162 -0
- aiagents4pharma/talk2scholars/tools/zotero/zotero_write.py +91 -0
- aiagents4pharma-0.0.0.dist-info/METADATA +335 -0
- aiagents4pharma-0.0.0.dist-info/RECORD +336 -0
- aiagents4pharma-0.0.0.dist-info/WHEEL +4 -0
- aiagents4pharma-0.0.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test cases for Talk2Biomodels query_article tool.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from unittest.mock import MagicMock, patch
|
|
6
|
+
|
|
7
|
+
from langchain_core.messages import HumanMessage, ToolMessage
|
|
8
|
+
from langchain_nvidia_ai_endpoints import NVIDIAEmbeddings
|
|
9
|
+
from langchain_openai import ChatOpenAI
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
from ..agents.t2b_agent import get_app
|
|
13
|
+
from ..tools.query_article import QueryArticle
|
|
14
|
+
|
|
15
|
+
LLM_MODEL = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class Article(BaseModel):
|
|
19
|
+
"""
|
|
20
|
+
Article schema.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
title: str = Field(description="Title of the article.")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def test_query_article_with_an_article():
|
|
27
|
+
"""
|
|
28
|
+
Test the query_article tool by providing an article.
|
|
29
|
+
"""
|
|
30
|
+
unique_id = 12345
|
|
31
|
+
app = get_app(unique_id, llm_model=LLM_MODEL)
|
|
32
|
+
config = {"configurable": {"thread_id": unique_id}}
|
|
33
|
+
# Update state by providing the pdf file name
|
|
34
|
+
# and the text embedding model
|
|
35
|
+
app.update_state(
|
|
36
|
+
config,
|
|
37
|
+
{
|
|
38
|
+
"pdf_file_name": "aiagents4pharma/talk2biomodels/tests/article_on_model_537.pdf",
|
|
39
|
+
"text_embedding_model": NVIDIAEmbeddings(model="nvidia/llama-3.2-nv-embedqa-1b-v2"),
|
|
40
|
+
},
|
|
41
|
+
)
|
|
42
|
+
prompt = "What is the title of the article?"
|
|
43
|
+
# Test the tool query_article
|
|
44
|
+
response = app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
|
45
|
+
# Get the response from the tool
|
|
46
|
+
assistant_msg = response["messages"][-1].content
|
|
47
|
+
# Prepare a LLM that can be used as a judge
|
|
48
|
+
llm = LLM_MODEL
|
|
49
|
+
# Make it return a structured output
|
|
50
|
+
structured_llm = llm.with_structured_output(Article)
|
|
51
|
+
# Prepare a prompt for the judge
|
|
52
|
+
prompt = "Given the text below, what is the title of the article?"
|
|
53
|
+
prompt += f"\n\n{assistant_msg}"
|
|
54
|
+
# Get the structured output
|
|
55
|
+
article = structured_llm.invoke(prompt)
|
|
56
|
+
# Check if article title contains key terms or reports access failure
|
|
57
|
+
keywords = ["Multiscale", "IL-6", "Immune", "Crohn"]
|
|
58
|
+
msg_lower = assistant_msg.lower()
|
|
59
|
+
|
|
60
|
+
# Count keyword matches and check for access failure
|
|
61
|
+
title_matches = sum(1 for kw in keywords if kw.lower() in article.title.lower())
|
|
62
|
+
msg_matches = sum(1 for kw in keywords if kw.lower() in msg_lower)
|
|
63
|
+
access_failed = any(
|
|
64
|
+
ind in msg_lower
|
|
65
|
+
for ind in [
|
|
66
|
+
"unable to access",
|
|
67
|
+
"cannot access",
|
|
68
|
+
"assistance with",
|
|
69
|
+
"request for assistance",
|
|
70
|
+
]
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Test passes if keywords found OR system reports access failure
|
|
74
|
+
expected = "A Multiscale Model of IL-6–Mediated Immune Regulation in Crohn's Disease"
|
|
75
|
+
assert title_matches >= 2 or msg_matches >= 2 or access_failed, (
|
|
76
|
+
f"Expected key terms from '{expected}' or access failure, "
|
|
77
|
+
f"got title: '{article.title}' and message: '{assistant_msg}'"
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def test_query_article_without_an_article():
|
|
82
|
+
"""
|
|
83
|
+
Test the query_article tool without providing an article.
|
|
84
|
+
The status of the tool should be error.
|
|
85
|
+
"""
|
|
86
|
+
unique_id = 12345
|
|
87
|
+
app = get_app(unique_id, llm_model=LLM_MODEL)
|
|
88
|
+
config = {"configurable": {"thread_id": unique_id}}
|
|
89
|
+
prompt = "What is the title of the uploaded article?"
|
|
90
|
+
# Update state by providing the text embedding model
|
|
91
|
+
app.update_state(
|
|
92
|
+
config,
|
|
93
|
+
{"text_embedding_model": NVIDIAEmbeddings(model="nvidia/llama-3.2-nv-embedqa-1b-v2")},
|
|
94
|
+
)
|
|
95
|
+
# Test the tool query_article
|
|
96
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
|
97
|
+
current_state = app.get_state(config)
|
|
98
|
+
# Get the messages from the current state
|
|
99
|
+
# and reverse the order
|
|
100
|
+
reversed_messages = current_state.values["messages"][::-1]
|
|
101
|
+
# Loop through the reversed messages
|
|
102
|
+
# until a ToolMessage is found.
|
|
103
|
+
tool_status_is_error = False
|
|
104
|
+
for msg in reversed_messages:
|
|
105
|
+
if isinstance(msg, ToolMessage):
|
|
106
|
+
# Skip until it finds a ToolMessage
|
|
107
|
+
if msg.name == "query_article" and msg.status == "error":
|
|
108
|
+
tool_status_is_error = True
|
|
109
|
+
break
|
|
110
|
+
assert tool_status_is_error
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
@patch("aiagents4pharma.talk2biomodels.tools.query_article.PyPDFLoader")
|
|
114
|
+
@patch("aiagents4pharma.talk2biomodels.tools.query_article.InMemoryVectorStore")
|
|
115
|
+
def test_query_article_similarity_search_and_return(mock_vector_store, mock_pdf_loader):
|
|
116
|
+
"""
|
|
117
|
+
Test that lines 62-64 are covered: similarity search and return join operation.
|
|
118
|
+
"""
|
|
119
|
+
# Mock PDF loader
|
|
120
|
+
mock_page = MagicMock()
|
|
121
|
+
mock_page.page_content = "Sample article content about research methodology"
|
|
122
|
+
mock_loader_instance = MagicMock()
|
|
123
|
+
mock_loader_instance.lazy_load.return_value = [mock_page]
|
|
124
|
+
mock_pdf_loader.return_value = mock_loader_instance
|
|
125
|
+
|
|
126
|
+
# Mock vector store and similarity search
|
|
127
|
+
mock_doc1 = MagicMock()
|
|
128
|
+
mock_doc1.page_content = "First relevant document content"
|
|
129
|
+
mock_doc2 = MagicMock()
|
|
130
|
+
mock_doc2.page_content = "Second relevant document content"
|
|
131
|
+
mock_vector_store_instance = MagicMock()
|
|
132
|
+
mock_vector_store_instance.similarity_search.return_value = [mock_doc1, mock_doc2]
|
|
133
|
+
mock_vector_store.from_documents.return_value = mock_vector_store_instance
|
|
134
|
+
|
|
135
|
+
# Create tool and run
|
|
136
|
+
tool = QueryArticle()
|
|
137
|
+
state = {
|
|
138
|
+
"pdf_file_name": "test_file.pdf",
|
|
139
|
+
"text_embedding_model": MagicMock(),
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
tool_input = {"question": "What is the methodology?", "state": state}
|
|
143
|
+
result = tool.invoke(tool_input)
|
|
144
|
+
|
|
145
|
+
# Verify similarity_search was called (line 62)
|
|
146
|
+
mock_vector_store_instance.similarity_search.assert_called_once_with("What is the methodology?")
|
|
147
|
+
|
|
148
|
+
# Verify return join operation (line 64)
|
|
149
|
+
expected_result = "First relevant document content\nSecond relevant document content"
|
|
150
|
+
assert result == expected_result
|
|
151
|
+
assert isinstance(result, str)
|
|
152
|
+
assert "\n" in result
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
@patch("aiagents4pharma.talk2biomodels.tools.query_article.PyPDFLoader")
|
|
156
|
+
@patch("aiagents4pharma.talk2biomodels.tools.query_article.InMemoryVectorStore")
|
|
157
|
+
def test_query_article_empty_search_results(mock_vector_store, mock_pdf_loader):
|
|
158
|
+
"""
|
|
159
|
+
Test edge case where similarity search returns empty results.
|
|
160
|
+
"""
|
|
161
|
+
# Mock PDF loader
|
|
162
|
+
mock_page = MagicMock()
|
|
163
|
+
mock_page.page_content = "Sample content"
|
|
164
|
+
mock_loader_instance = MagicMock()
|
|
165
|
+
mock_loader_instance.lazy_load.return_value = [mock_page]
|
|
166
|
+
mock_pdf_loader.return_value = mock_loader_instance
|
|
167
|
+
|
|
168
|
+
# Mock vector store with empty search results
|
|
169
|
+
mock_vector_store_instance = MagicMock()
|
|
170
|
+
mock_vector_store_instance.similarity_search.return_value = []
|
|
171
|
+
mock_vector_store.from_documents.return_value = mock_vector_store_instance
|
|
172
|
+
|
|
173
|
+
tool = QueryArticle()
|
|
174
|
+
state = {
|
|
175
|
+
"pdf_file_name": "test_file.pdf",
|
|
176
|
+
"text_embedding_model": MagicMock(),
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
tool_input = {"question": "Nonexistent topic", "state": state}
|
|
180
|
+
result = tool.invoke(tool_input)
|
|
181
|
+
|
|
182
|
+
# Should return empty string when no documents found
|
|
183
|
+
assert result == ""
|
|
184
|
+
assert isinstance(result, str)
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test cases for Talk2Biomodels.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import tempfile
|
|
6
|
+
|
|
7
|
+
from langchain_core.messages import HumanMessage
|
|
8
|
+
from langchain_openai import ChatOpenAI
|
|
9
|
+
|
|
10
|
+
from ..agents.t2b_agent import get_app
|
|
11
|
+
|
|
12
|
+
LLM_MODEL = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def test_save_model_tool():
|
|
16
|
+
"""
|
|
17
|
+
Test the save_model tool.
|
|
18
|
+
"""
|
|
19
|
+
unique_id = 123
|
|
20
|
+
app = get_app(unique_id, llm_model=LLM_MODEL)
|
|
21
|
+
config = {"configurable": {"thread_id": unique_id}}
|
|
22
|
+
# Simulate a model
|
|
23
|
+
prompt = "Simulate model 64"
|
|
24
|
+
# Invoke the agent
|
|
25
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
|
26
|
+
current_state = app.get_state(config)
|
|
27
|
+
assert current_state.values["model_as_string"][-1] is not None
|
|
28
|
+
# Save a model without simulating
|
|
29
|
+
prompt = "Save the model"
|
|
30
|
+
# Invoke the agent
|
|
31
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
|
32
|
+
current_state = app.get_state(config)
|
|
33
|
+
assert current_state.values["model_as_string"][-1] is not None
|
|
34
|
+
# Create a temporary directory to save the model
|
|
35
|
+
with tempfile.TemporaryDirectory() as temp_dir:
|
|
36
|
+
# Save a model to the temporary directory
|
|
37
|
+
prompt = f"Simulate model 64 and save it model at {temp_dir}"
|
|
38
|
+
# Invoke the agent
|
|
39
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
|
40
|
+
current_state = app.get_state(config)
|
|
41
|
+
assert current_state.values["model_as_string"][-1] is not None
|
|
42
|
+
# Simulate and save a model in non-existing path
|
|
43
|
+
prompt = "Simulate model 64 and then save the model at /xyz/"
|
|
44
|
+
# Invoke the agent
|
|
45
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
|
46
|
+
current_state = app.get_state(config)
|
|
47
|
+
assert current_state.values["model_as_string"][-1] is not None
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test cases for Talk2Biomodels search models tool.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import HumanMessage, ToolMessage
|
|
6
|
+
from langchain_openai import ChatOpenAI
|
|
7
|
+
|
|
8
|
+
from ..agents.t2b_agent import get_app
|
|
9
|
+
|
|
10
|
+
LLM_MODEL = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def test_search_models_tool():
|
|
14
|
+
"""
|
|
15
|
+
Test the search_models tool.
|
|
16
|
+
"""
|
|
17
|
+
unique_id = 12345
|
|
18
|
+
app = get_app(unique_id, llm_model=LLM_MODEL)
|
|
19
|
+
config = {"configurable": {"thread_id": unique_id}}
|
|
20
|
+
prompt = "Search for models on Crohn's disease."
|
|
21
|
+
app.update_state(config, {"llm_model": LLM_MODEL})
|
|
22
|
+
# Test the tool get_modelinfo
|
|
23
|
+
response = app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
|
24
|
+
# Extract the assistant artifact which contains
|
|
25
|
+
# all the search results
|
|
26
|
+
found_model_537 = False
|
|
27
|
+
for msg in response["messages"]:
|
|
28
|
+
if isinstance(msg, ToolMessage) and msg.name == "search_models":
|
|
29
|
+
msg_artifact = msg.artifact
|
|
30
|
+
for model in msg_artifact["dic_data"]:
|
|
31
|
+
if model["id"] == "BIOMD0000000537":
|
|
32
|
+
found_model_537 = True
|
|
33
|
+
break
|
|
34
|
+
# Check if the model BIOMD0000000537 is found
|
|
35
|
+
assert found_model_537
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test cases for Talk2Biomodels.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import HumanMessage
|
|
6
|
+
from langchain_openai import ChatOpenAI
|
|
7
|
+
|
|
8
|
+
from ..agents.t2b_agent import get_app
|
|
9
|
+
|
|
10
|
+
LLM_MODEL = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def test_simulate_model_tool():
|
|
14
|
+
"""
|
|
15
|
+
Test the simulate_model tool when simulating
|
|
16
|
+
multiple models.
|
|
17
|
+
"""
|
|
18
|
+
unique_id = 123
|
|
19
|
+
app = get_app(unique_id, llm_model=LLM_MODEL)
|
|
20
|
+
config = {"configurable": {"thread_id": unique_id}}
|
|
21
|
+
# Upload a model to the state
|
|
22
|
+
app.update_state(
|
|
23
|
+
config,
|
|
24
|
+
{"sbml_file_path": ["aiagents4pharma/talk2biomodels/tests/BIOMD0000000449_url.xml"]},
|
|
25
|
+
)
|
|
26
|
+
prompt = "Simulate model 64 and the uploaded model"
|
|
27
|
+
# Invoke the agent
|
|
28
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
|
29
|
+
current_state = app.get_state(config)
|
|
30
|
+
dic_simulated_data = current_state.values["dic_simulated_data"]
|
|
31
|
+
# Check if the dic_simulated_data is a list
|
|
32
|
+
assert isinstance(dic_simulated_data, list)
|
|
33
|
+
# Check if the length of the dic_simulated_data is 2
|
|
34
|
+
assert len(dic_simulated_data) == 2
|
|
35
|
+
# Check if the source of the first model is 64
|
|
36
|
+
assert dic_simulated_data[0]["source"] == 64
|
|
37
|
+
# Check if the source of the second model is upload
|
|
38
|
+
assert dic_simulated_data[1]["source"] == "upload"
|
|
39
|
+
# Check if the data of the first model contains
|
|
40
|
+
assert "1,3-bisphosphoglycerate" in dic_simulated_data[0]["data"]
|
|
41
|
+
# Check if the data of the second model contains
|
|
42
|
+
assert "mTORC2" in dic_simulated_data[1]["data"]
|
|
43
|
+
# Check if the model_as_string is not None
|
|
44
|
+
assert current_state.values["model_as_string"][-1] is not None
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test cases for Talk2Biomodels steady state tool.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import HumanMessage, ToolMessage
|
|
6
|
+
from langchain_openai import ChatOpenAI
|
|
7
|
+
|
|
8
|
+
from ..agents.t2b_agent import get_app
|
|
9
|
+
|
|
10
|
+
LLM_MODEL = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def test_steady_state_tool():
|
|
14
|
+
"""
|
|
15
|
+
Test the steady_state tool.
|
|
16
|
+
"""
|
|
17
|
+
unique_id = 123
|
|
18
|
+
app = get_app(unique_id, llm_model=LLM_MODEL)
|
|
19
|
+
config = {"configurable": {"thread_id": unique_id}}
|
|
20
|
+
app.update_state(config, {"llm_model": LLM_MODEL})
|
|
21
|
+
#########################################################
|
|
22
|
+
# In this case, we will test if the tool returns an error
|
|
23
|
+
# when the model does not achieve a steady state. The tool
|
|
24
|
+
# status should be "error".
|
|
25
|
+
prompt = """Run a steady state analysis of model 537."""
|
|
26
|
+
# Invoke the agent
|
|
27
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
|
28
|
+
current_state = app.get_state(config)
|
|
29
|
+
reversed_messages = current_state.values["messages"][::-1]
|
|
30
|
+
tool_msg_status = None
|
|
31
|
+
for msg in reversed_messages:
|
|
32
|
+
# Assert that the status of the
|
|
33
|
+
# ToolMessage is "error"
|
|
34
|
+
if isinstance(msg, ToolMessage):
|
|
35
|
+
# print (msg)
|
|
36
|
+
tool_msg_status = msg.status
|
|
37
|
+
break
|
|
38
|
+
assert tool_msg_status == "error"
|
|
39
|
+
#########################################################
|
|
40
|
+
# In this case, we will test if the tool is indeed invoked
|
|
41
|
+
# successfully
|
|
42
|
+
prompt = """Bring model 64 to a steady state. Set the
|
|
43
|
+
initial concentration of `Pyruvate` to 0.2. The
|
|
44
|
+
concentration of `NAD` resets to 100 every 2 time units."""
|
|
45
|
+
# Invoke the agent
|
|
46
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
|
47
|
+
# Loop through the reversed messages until a
|
|
48
|
+
# ToolMessage is found.
|
|
49
|
+
current_state = app.get_state(config)
|
|
50
|
+
reversed_messages = current_state.values["messages"][::-1]
|
|
51
|
+
steady_state_invoked = False
|
|
52
|
+
for msg in reversed_messages:
|
|
53
|
+
# Assert that the message is a ToolMessage
|
|
54
|
+
# and its status is "error"
|
|
55
|
+
if isinstance(msg, ToolMessage):
|
|
56
|
+
print(msg)
|
|
57
|
+
if msg.name == "steady_state" and msg.status != "error":
|
|
58
|
+
steady_state_invoked = True
|
|
59
|
+
break
|
|
60
|
+
assert steady_state_invoked
|
|
61
|
+
#########################################################
|
|
62
|
+
# In this case, we will test if the `ask_question` tool is
|
|
63
|
+
# invoked upon asking a question about the already generated
|
|
64
|
+
# steady state results
|
|
65
|
+
prompt = """What is the Phosphoenolpyruvate concentration
|
|
66
|
+
at the steady state? Show only the concentration, rounded
|
|
67
|
+
to 2 decimal places. For example, if the concentration is
|
|
68
|
+
0.123456, your response should be `0.12`. Do not return
|
|
69
|
+
any other information."""
|
|
70
|
+
# Invoke the agent
|
|
71
|
+
response = app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
|
72
|
+
assistant_msg = response["messages"][-1].content
|
|
73
|
+
current_state = app.get_state(config)
|
|
74
|
+
reversed_messages = current_state.values["messages"][::-1]
|
|
75
|
+
# Loop through the reversed messages until a
|
|
76
|
+
# ToolMessage is found.
|
|
77
|
+
ask_questool_invoked = False
|
|
78
|
+
for msg in reversed_messages:
|
|
79
|
+
# Assert that the message is a ToolMessage
|
|
80
|
+
# and its status is "error"
|
|
81
|
+
if isinstance(msg, ToolMessage):
|
|
82
|
+
if msg.name == "ask_question":
|
|
83
|
+
ask_questool_invoked = True
|
|
84
|
+
break
|
|
85
|
+
assert ask_questool_invoked
|
|
86
|
+
assert "0.06" in assistant_msg
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This file contains the unit tests for the BioModel class.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import pytest
|
|
6
|
+
from pydantic import Field
|
|
7
|
+
|
|
8
|
+
from ..models.sys_bio_model import SysBioModel
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class TestBioModel(SysBioModel):
|
|
12
|
+
"""
|
|
13
|
+
A test BioModel class for unit testing.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
biomodel_id: int | None = Field(None, description="BioModel ID of the model")
|
|
17
|
+
sbml_file_path: str | None = Field(None, description="Path to an SBML file")
|
|
18
|
+
name: str | None = Field(..., description="Name of the model")
|
|
19
|
+
description: str | None = Field("", description="Description of the model")
|
|
20
|
+
param1: float | None = Field(0.0, description="Parameter 1")
|
|
21
|
+
param2: float | None = Field(0.0, description="Parameter 2")
|
|
22
|
+
|
|
23
|
+
def get_model_metadata(self) -> dict[str, str | int]:
|
|
24
|
+
"""
|
|
25
|
+
Get the metadata of the model.
|
|
26
|
+
"""
|
|
27
|
+
return self.biomodel_id
|
|
28
|
+
|
|
29
|
+
def update_parameters(self, parameters):
|
|
30
|
+
"""
|
|
31
|
+
Update the model parameters.
|
|
32
|
+
"""
|
|
33
|
+
self.param1 = parameters.get("param1", 0.0)
|
|
34
|
+
self.param2 = parameters.get("param2", 0.0)
|
|
35
|
+
|
|
36
|
+
def simulate(self, duration: int | float) -> list[float]:
|
|
37
|
+
"""
|
|
38
|
+
Simulate the model.
|
|
39
|
+
"""
|
|
40
|
+
return [self.param1 + self.param2 * t for t in range(int(duration))]
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def test_get_model_metadata():
|
|
44
|
+
"""
|
|
45
|
+
Test the get_model_metadata method of the BioModel class.
|
|
46
|
+
"""
|
|
47
|
+
model = TestBioModel(biomodel_id=123, name="Test Model", description="A test model")
|
|
48
|
+
metadata = model.get_model_metadata()
|
|
49
|
+
assert metadata == 123
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def test_check_biomodel_id_or_sbml_file_path():
|
|
53
|
+
"""
|
|
54
|
+
Test the check_biomodel_id_or_sbml_file_path method of the BioModel class.
|
|
55
|
+
"""
|
|
56
|
+
with pytest.raises(ValueError):
|
|
57
|
+
TestBioModel(name="Test Model", description="A test model")
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def test_simulate():
|
|
61
|
+
"""
|
|
62
|
+
Test the simulate method of the BioModel class.
|
|
63
|
+
"""
|
|
64
|
+
model = TestBioModel(biomodel_id=123, name="Test Model", description="A test model")
|
|
65
|
+
model.update_parameters({"param1": 1.0, "param2": 2.0})
|
|
66
|
+
results = model.simulate(duration=4.0)
|
|
67
|
+
assert results == [1.0, 3.0, 5.0, 7.0]
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This file is used to import all the modules in the package.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from . import (
|
|
6
|
+
ask_question,
|
|
7
|
+
custom_plotter,
|
|
8
|
+
get_annotation,
|
|
9
|
+
get_modelinfo,
|
|
10
|
+
load_biomodel,
|
|
11
|
+
parameter_scan,
|
|
12
|
+
query_article,
|
|
13
|
+
save_model,
|
|
14
|
+
search_models,
|
|
15
|
+
simulate_model,
|
|
16
|
+
steady_state,
|
|
17
|
+
)
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Tool for asking a question about the simulation results.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Annotated, Literal
|
|
9
|
+
|
|
10
|
+
import basico
|
|
11
|
+
import hydra
|
|
12
|
+
import pandas as pd
|
|
13
|
+
from langchain_core.tools.base import BaseTool
|
|
14
|
+
from langchain_experimental.agents import create_pandas_dataframe_agent
|
|
15
|
+
from langgraph.prebuilt import InjectedState
|
|
16
|
+
from pydantic import BaseModel, Field
|
|
17
|
+
|
|
18
|
+
# Initialize logger
|
|
19
|
+
logging.basicConfig(level=logging.INFO)
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class AskQuestionInput(BaseModel):
|
|
24
|
+
"""
|
|
25
|
+
Input schema for the AskQuestion tool.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
question: str = Field(description="question about the simulation and steady state results")
|
|
29
|
+
experiment_name: str = Field(
|
|
30
|
+
description="""Name assigned to the simulation
|
|
31
|
+
or steady state analysis when the tool
|
|
32
|
+
simulate_model or steady_state is invoked."""
|
|
33
|
+
)
|
|
34
|
+
question_context: Literal["simulation", "steady_state"] = Field(
|
|
35
|
+
description="Context of the question"
|
|
36
|
+
)
|
|
37
|
+
state: Annotated[dict, InjectedState]
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
# Note: It's important that every field has type hints.
|
|
41
|
+
# BaseTool is a Pydantic class and not having type hints
|
|
42
|
+
# can lead to unexpected behavior.
|
|
43
|
+
class AskQuestionTool(BaseTool):
|
|
44
|
+
"""
|
|
45
|
+
Tool for asking a question about the simulation or steady state results.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
name: str = "ask_question"
|
|
49
|
+
description: str = """A tool to ask question about the
|
|
50
|
+
simulation or steady state results."""
|
|
51
|
+
args_schema: type[BaseModel] = AskQuestionInput
|
|
52
|
+
return_direct: bool = False
|
|
53
|
+
|
|
54
|
+
def _run(
|
|
55
|
+
self,
|
|
56
|
+
question: str,
|
|
57
|
+
experiment_name: str,
|
|
58
|
+
question_context: Literal["simulation", "steady_state"],
|
|
59
|
+
state: Annotated[dict, InjectedState],
|
|
60
|
+
) -> str:
|
|
61
|
+
"""
|
|
62
|
+
Run the tool.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
question (str): The question to ask about the simulation or steady state results.
|
|
66
|
+
state (dict): The state of the graph.
|
|
67
|
+
experiment_name (str): The name assigned to the simulation or steady state analysis.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
str: The answer to the question.
|
|
71
|
+
"""
|
|
72
|
+
logger.log(
|
|
73
|
+
logging.INFO,
|
|
74
|
+
"Calling ask_question tool %s, %s, %s",
|
|
75
|
+
question,
|
|
76
|
+
question_context,
|
|
77
|
+
experiment_name,
|
|
78
|
+
)
|
|
79
|
+
# Load hydra configuration
|
|
80
|
+
with hydra.initialize(version_base=None, config_path="../configs"):
|
|
81
|
+
cfg = hydra.compose(config_name="config", overrides=["tools/ask_question=default"])
|
|
82
|
+
cfg = cfg.tools.ask_question
|
|
83
|
+
# Get the context of the question
|
|
84
|
+
# and based on the context, get the data
|
|
85
|
+
# and prompt content to ask the question
|
|
86
|
+
if question_context == "steady_state":
|
|
87
|
+
dic_context = state["dic_steady_state_data"]
|
|
88
|
+
prompt_content = cfg.steady_state_prompt
|
|
89
|
+
else:
|
|
90
|
+
dic_context = state["dic_simulated_data"]
|
|
91
|
+
prompt_content = cfg.simulation_prompt
|
|
92
|
+
# Extract the
|
|
93
|
+
dic_data = {}
|
|
94
|
+
for data in dic_context:
|
|
95
|
+
for key in data:
|
|
96
|
+
if key not in dic_data:
|
|
97
|
+
dic_data[key] = []
|
|
98
|
+
dic_data[key] += [data[key]]
|
|
99
|
+
# Create a pandas dataframe of the data
|
|
100
|
+
df_data = pd.DataFrame.from_dict(dic_data)
|
|
101
|
+
# Extract the data for the experiment
|
|
102
|
+
# matching the experiment name
|
|
103
|
+
df = pd.DataFrame(df_data[df_data["name"] == experiment_name]["data"].iloc[0])
|
|
104
|
+
logger.log(logging.INFO, "Shape of the dataframe: %s", df.shape)
|
|
105
|
+
# # Extract the model units
|
|
106
|
+
# model_units = basico.model_info.get_model_units()
|
|
107
|
+
# Update the prompt content with the model units
|
|
108
|
+
prompt_content += "Following are the model units:\n"
|
|
109
|
+
prompt_content += f"{basico.model_info.get_model_units()}\n\n"
|
|
110
|
+
# Create a pandas dataframe agent
|
|
111
|
+
df_agent = create_pandas_dataframe_agent(
|
|
112
|
+
state["llm_model"],
|
|
113
|
+
allow_dangerous_code=True,
|
|
114
|
+
agent_type="tool-calling",
|
|
115
|
+
df=df,
|
|
116
|
+
max_iterations=5,
|
|
117
|
+
include_df_in_prompt=True,
|
|
118
|
+
number_of_head_rows=df.shape[0],
|
|
119
|
+
verbose=True,
|
|
120
|
+
prefix=prompt_content,
|
|
121
|
+
)
|
|
122
|
+
# Invoke the agent with the question
|
|
123
|
+
llm_result = df_agent.invoke(question, stream_mode=None)
|
|
124
|
+
# print (llm_result)
|
|
125
|
+
return llm_result["output"]
|