aiagents4pharma 1.44.0__py3-none-any.whl → 1.45.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiagents4pharma/__init__.py +2 -2
- aiagents4pharma/talk2aiagents4pharma/.dockerignore +13 -0
- aiagents4pharma/talk2aiagents4pharma/Dockerfile +105 -0
- aiagents4pharma/talk2aiagents4pharma/README.md +1 -0
- aiagents4pharma/talk2aiagents4pharma/__init__.py +4 -5
- aiagents4pharma/talk2aiagents4pharma/agents/__init__.py +3 -2
- aiagents4pharma/talk2aiagents4pharma/agents/main_agent.py +24 -23
- aiagents4pharma/talk2aiagents4pharma/configs/__init__.py +2 -2
- aiagents4pharma/talk2aiagents4pharma/configs/agents/__init__.py +2 -2
- aiagents4pharma/talk2aiagents4pharma/configs/agents/main_agent/default.yaml +2 -2
- aiagents4pharma/talk2aiagents4pharma/configs/config.yaml +1 -1
- aiagents4pharma/talk2aiagents4pharma/docker-compose/cpu/.env.example +23 -0
- aiagents4pharma/talk2aiagents4pharma/docker-compose/cpu/docker-compose.yml +93 -0
- aiagents4pharma/talk2aiagents4pharma/docker-compose/gpu/.env.example +23 -0
- aiagents4pharma/talk2aiagents4pharma/docker-compose/gpu/docker-compose.yml +108 -0
- aiagents4pharma/talk2aiagents4pharma/install.md +127 -0
- aiagents4pharma/talk2aiagents4pharma/states/__init__.py +3 -2
- aiagents4pharma/talk2aiagents4pharma/states/state_talk2aiagents4pharma.py +5 -3
- aiagents4pharma/talk2aiagents4pharma/tests/__init__.py +2 -2
- aiagents4pharma/talk2aiagents4pharma/tests/test_main_agent.py +72 -50
- aiagents4pharma/talk2biomodels/.dockerignore +13 -0
- aiagents4pharma/talk2biomodels/Dockerfile +104 -0
- aiagents4pharma/talk2biomodels/README.md +1 -0
- aiagents4pharma/talk2biomodels/__init__.py +4 -8
- aiagents4pharma/talk2biomodels/agents/__init__.py +3 -2
- aiagents4pharma/talk2biomodels/agents/t2b_agent.py +47 -42
- aiagents4pharma/talk2biomodels/api/__init__.py +4 -5
- aiagents4pharma/talk2biomodels/api/kegg.py +14 -10
- aiagents4pharma/talk2biomodels/api/ols.py +13 -10
- aiagents4pharma/talk2biomodels/api/uniprot.py +7 -6
- aiagents4pharma/talk2biomodels/configs/__init__.py +3 -4
- aiagents4pharma/talk2biomodels/configs/agents/__init__.py +2 -2
- aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/__init__.py +2 -2
- aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/default.yaml +1 -1
- aiagents4pharma/talk2biomodels/configs/config.yaml +1 -1
- aiagents4pharma/talk2biomodels/configs/tools/__init__.py +4 -5
- aiagents4pharma/talk2biomodels/configs/tools/ask_question/__init__.py +2 -2
- aiagents4pharma/talk2biomodels/configs/tools/ask_question/default.yaml +1 -2
- aiagents4pharma/talk2biomodels/configs/tools/custom_plotter/__init__.py +2 -2
- aiagents4pharma/talk2biomodels/configs/tools/custom_plotter/default.yaml +1 -1
- aiagents4pharma/talk2biomodels/configs/tools/get_annotation/__init__.py +2 -2
- aiagents4pharma/talk2biomodels/configs/tools/get_annotation/default.yaml +1 -1
- aiagents4pharma/talk2biomodels/install.md +63 -0
- aiagents4pharma/talk2biomodels/models/__init__.py +4 -4
- aiagents4pharma/talk2biomodels/models/basico_model.py +36 -28
- aiagents4pharma/talk2biomodels/models/sys_bio_model.py +13 -10
- aiagents4pharma/talk2biomodels/states/__init__.py +3 -2
- aiagents4pharma/talk2biomodels/states/state_talk2biomodels.py +12 -8
- aiagents4pharma/talk2biomodels/tests/BIOMD0000000449_url.xml +1585 -0
- aiagents4pharma/talk2biomodels/tests/__init__.py +2 -2
- aiagents4pharma/talk2biomodels/tests/article_on_model_537.pdf +0 -0
- aiagents4pharma/talk2biomodels/tests/test_api.py +18 -14
- aiagents4pharma/talk2biomodels/tests/test_ask_question.py +8 -9
- aiagents4pharma/talk2biomodels/tests/test_basico_model.py +15 -9
- aiagents4pharma/talk2biomodels/tests/test_get_annotation.py +54 -55
- aiagents4pharma/talk2biomodels/tests/test_getmodelinfo.py +28 -27
- aiagents4pharma/talk2biomodels/tests/test_integration.py +21 -33
- aiagents4pharma/talk2biomodels/tests/test_load_biomodel.py +14 -11
- aiagents4pharma/talk2biomodels/tests/test_param_scan.py +21 -20
- aiagents4pharma/talk2biomodels/tests/test_query_article.py +129 -29
- aiagents4pharma/talk2biomodels/tests/test_search_models.py +9 -13
- aiagents4pharma/talk2biomodels/tests/test_simulate_model.py +16 -15
- aiagents4pharma/talk2biomodels/tests/test_steady_state.py +12 -22
- aiagents4pharma/talk2biomodels/tests/test_sys_bio_model.py +33 -29
- aiagents4pharma/talk2biomodels/tools/__init__.py +15 -12
- aiagents4pharma/talk2biomodels/tools/ask_question.py +42 -32
- aiagents4pharma/talk2biomodels/tools/custom_plotter.py +51 -43
- aiagents4pharma/talk2biomodels/tools/get_annotation.py +99 -75
- aiagents4pharma/talk2biomodels/tools/get_modelinfo.py +57 -51
- aiagents4pharma/talk2biomodels/tools/load_arguments.py +52 -32
- aiagents4pharma/talk2biomodels/tools/load_biomodel.py +8 -2
- aiagents4pharma/talk2biomodels/tools/parameter_scan.py +107 -90
- aiagents4pharma/talk2biomodels/tools/query_article.py +14 -13
- aiagents4pharma/talk2biomodels/tools/search_models.py +37 -26
- aiagents4pharma/talk2biomodels/tools/simulate_model.py +47 -37
- aiagents4pharma/talk2biomodels/tools/steady_state.py +76 -58
- aiagents4pharma/talk2biomodels/tools/utils.py +4 -3
- aiagents4pharma/talk2cells/README.md +1 -0
- aiagents4pharma/talk2cells/__init__.py +4 -5
- aiagents4pharma/talk2cells/agents/__init__.py +3 -2
- aiagents4pharma/talk2cells/agents/scp_agent.py +21 -19
- aiagents4pharma/talk2cells/states/__init__.py +3 -2
- aiagents4pharma/talk2cells/states/state_talk2cells.py +4 -2
- aiagents4pharma/talk2cells/tests/scp_agent/test_scp_agent.py +8 -9
- aiagents4pharma/talk2cells/tools/__init__.py +3 -2
- aiagents4pharma/talk2cells/tools/scp_agent/__init__.py +4 -4
- aiagents4pharma/talk2cells/tools/scp_agent/display_studies.py +5 -3
- aiagents4pharma/talk2cells/tools/scp_agent/search_studies.py +21 -22
- aiagents4pharma/talk2knowledgegraphs/.dockerignore +13 -0
- aiagents4pharma/talk2knowledgegraphs/Dockerfile +103 -0
- aiagents4pharma/talk2knowledgegraphs/README.md +1 -0
- aiagents4pharma/talk2knowledgegraphs/__init__.py +4 -7
- aiagents4pharma/talk2knowledgegraphs/agents/__init__.py +3 -2
- aiagents4pharma/talk2knowledgegraphs/agents/t2kg_agent.py +40 -30
- aiagents4pharma/talk2knowledgegraphs/configs/__init__.py +3 -6
- aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/__init__.py +2 -2
- aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/default.yaml +8 -8
- aiagents4pharma/talk2knowledgegraphs/configs/app/__init__.py +3 -2
- aiagents4pharma/talk2knowledgegraphs/configs/app/frontend/__init__.py +2 -2
- aiagents4pharma/talk2knowledgegraphs/configs/app/frontend/default.yaml +1 -1
- aiagents4pharma/talk2knowledgegraphs/configs/config.yaml +1 -1
- aiagents4pharma/talk2knowledgegraphs/configs/tools/__init__.py +4 -5
- aiagents4pharma/talk2knowledgegraphs/configs/tools/graphrag_reasoning/__init__.py +2 -2
- aiagents4pharma/talk2knowledgegraphs/configs/tools/graphrag_reasoning/default.yaml +1 -1
- aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_extraction/__init__.py +2 -2
- aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_extraction/default.yaml +1 -1
- aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_summarization/__init__.py +2 -2
- aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_summarization/default.yaml +1 -1
- aiagents4pharma/talk2knowledgegraphs/configs/utils/enrichments/ols_terms/default.yaml +1 -1
- aiagents4pharma/talk2knowledgegraphs/configs/utils/enrichments/reactome_pathways/default.yaml +1 -1
- aiagents4pharma/talk2knowledgegraphs/configs/utils/enrichments/uniprot_proteins/default.yaml +1 -1
- aiagents4pharma/talk2knowledgegraphs/configs/utils/pubchem_utils/default.yaml +1 -1
- aiagents4pharma/talk2knowledgegraphs/datasets/__init__.py +4 -6
- aiagents4pharma/talk2knowledgegraphs/datasets/biobridge_primekg.py +115 -67
- aiagents4pharma/talk2knowledgegraphs/datasets/dataset.py +2 -0
- aiagents4pharma/talk2knowledgegraphs/datasets/primekg.py +35 -24
- aiagents4pharma/talk2knowledgegraphs/datasets/starkqa_primekg.py +29 -21
- aiagents4pharma/talk2knowledgegraphs/docker-compose/cpu/.env.example +23 -0
- aiagents4pharma/talk2knowledgegraphs/docker-compose/cpu/docker-compose.yml +93 -0
- aiagents4pharma/talk2knowledgegraphs/docker-compose/gpu/.env.example +23 -0
- aiagents4pharma/talk2knowledgegraphs/docker-compose/gpu/docker-compose.yml +108 -0
- aiagents4pharma/talk2knowledgegraphs/entrypoint.sh +190 -0
- aiagents4pharma/talk2knowledgegraphs/install.md +140 -0
- aiagents4pharma/talk2knowledgegraphs/milvus_data_dump.py +31 -65
- aiagents4pharma/talk2knowledgegraphs/states/__init__.py +3 -2
- aiagents4pharma/talk2knowledgegraphs/states/state_talk2knowledgegraphs.py +1 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_agents_t2kg_agent.py +65 -40
- aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_biobridge_primekg.py +54 -48
- aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_dataset.py +4 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_primekg.py +17 -4
- aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_starkqa_primekg.py +33 -24
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_graphrag_reasoning.py +116 -69
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_milvus_multimodal_subgraph_extraction.py +334 -216
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_multimodal_subgraph_extraction.py +22 -15
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_extraction.py +19 -12
- aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_summarization.py +95 -48
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_embeddings.py +4 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_huggingface.py +5 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_nim_molmim.py +13 -18
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_ollama.py +10 -3
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_enrichments.py +4 -3
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_ollama.py +3 -2
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_ols.py +1 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_pubchem.py +9 -4
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_reactome.py +6 -6
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_uniprot.py +4 -0
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_extractions_milvus_multimodal_pcst.py +160 -97
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_kg_utils.py +3 -4
- aiagents4pharma/talk2knowledgegraphs/tests/test_utils_pubchem_utils.py +87 -13
- aiagents4pharma/talk2knowledgegraphs/tools/__init__.py +10 -7
- aiagents4pharma/talk2knowledgegraphs/tools/graphrag_reasoning.py +15 -20
- aiagents4pharma/talk2knowledgegraphs/tools/milvus_multimodal_subgraph_extraction.py +145 -142
- aiagents4pharma/talk2knowledgegraphs/tools/multimodal_subgraph_extraction.py +92 -90
- aiagents4pharma/talk2knowledgegraphs/tools/subgraph_extraction.py +25 -37
- aiagents4pharma/talk2knowledgegraphs/tools/subgraph_summarization.py +10 -13
- aiagents4pharma/talk2knowledgegraphs/utils/__init__.py +4 -7
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/__init__.py +4 -7
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/embeddings.py +4 -0
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/huggingface.py +11 -14
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/nim_molmim.py +7 -7
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/ollama.py +12 -6
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/sentence_transformer.py +8 -6
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/__init__.py +9 -6
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/enrichments.py +1 -0
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/ollama.py +15 -9
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/ols_terms.py +23 -20
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/pubchem_strings.py +12 -10
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/reactome_pathways.py +16 -10
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/uniprot_proteins.py +26 -18
- aiagents4pharma/talk2knowledgegraphs/utils/extractions/__init__.py +4 -5
- aiagents4pharma/talk2knowledgegraphs/utils/extractions/milvus_multimodal_pcst.py +14 -34
- aiagents4pharma/talk2knowledgegraphs/utils/extractions/multimodal_pcst.py +53 -47
- aiagents4pharma/talk2knowledgegraphs/utils/extractions/pcst.py +18 -14
- aiagents4pharma/talk2knowledgegraphs/utils/kg_utils.py +22 -23
- aiagents4pharma/talk2knowledgegraphs/utils/pubchem_utils.py +11 -10
- aiagents4pharma/talk2scholars/.dockerignore +13 -0
- aiagents4pharma/talk2scholars/Dockerfile +104 -0
- aiagents4pharma/talk2scholars/README.md +1 -0
- aiagents4pharma/talk2scholars/agents/__init__.py +1 -5
- aiagents4pharma/talk2scholars/agents/main_agent.py +6 -4
- aiagents4pharma/talk2scholars/agents/paper_download_agent.py +5 -4
- aiagents4pharma/talk2scholars/agents/pdf_agent.py +4 -2
- aiagents4pharma/talk2scholars/agents/s2_agent.py +2 -2
- aiagents4pharma/talk2scholars/agents/zotero_agent.py +10 -11
- aiagents4pharma/talk2scholars/configs/__init__.py +1 -3
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/__init__.py +1 -4
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +1 -1
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/pdf_agent/default.yaml +1 -1
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +8 -8
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/zotero_agent/default.yaml +7 -7
- aiagents4pharma/talk2scholars/configs/tools/__init__.py +8 -6
- aiagents4pharma/talk2scholars/docker-compose/cpu/.env.example +21 -0
- aiagents4pharma/talk2scholars/docker-compose/cpu/docker-compose.yml +90 -0
- aiagents4pharma/talk2scholars/docker-compose/gpu/.env.example +21 -0
- aiagents4pharma/talk2scholars/docker-compose/gpu/docker-compose.yml +105 -0
- aiagents4pharma/talk2scholars/install.md +122 -0
- aiagents4pharma/talk2scholars/state/state_talk2scholars.py +8 -8
- aiagents4pharma/talk2scholars/tests/{test_main_agent.py → test_agents_main_agent.py} +41 -23
- aiagents4pharma/talk2scholars/tests/{test_paper_download_agent.py → test_agents_paper_agents_download_agent.py} +10 -16
- aiagents4pharma/talk2scholars/tests/{test_pdf_agent.py → test_agents_pdf_agent.py} +6 -10
- aiagents4pharma/talk2scholars/tests/{test_s2_agent.py → test_agents_s2_agent.py} +8 -16
- aiagents4pharma/talk2scholars/tests/{test_zotero_agent.py → test_agents_zotero_agent.py} +5 -7
- aiagents4pharma/talk2scholars/tests/{test_s2_display_dataframe.py → test_s2_tools_display_dataframe.py} +6 -7
- aiagents4pharma/talk2scholars/tests/{test_s2_query_dataframe.py → test_s2_tools_query_dataframe.py} +5 -15
- aiagents4pharma/talk2scholars/tests/{test_paper_downloader.py → test_tools_paper_downloader.py} +25 -63
- aiagents4pharma/talk2scholars/tests/{test_question_and_answer_tool.py → test_tools_question_and_answer_tool.py} +2 -6
- aiagents4pharma/talk2scholars/tests/{test_s2_multi.py → test_tools_s2_multi.py} +5 -5
- aiagents4pharma/talk2scholars/tests/{test_s2_retrieve.py → test_tools_s2_retrieve.py} +2 -1
- aiagents4pharma/talk2scholars/tests/{test_s2_search.py → test_tools_s2_search.py} +5 -5
- aiagents4pharma/talk2scholars/tests/{test_s2_single.py → test_tools_s2_single.py} +5 -5
- aiagents4pharma/talk2scholars/tests/{test_arxiv_downloader.py → test_utils_arxiv_downloader.py} +16 -25
- aiagents4pharma/talk2scholars/tests/{test_base_paper_downloader.py → test_utils_base_paper_downloader.py} +25 -47
- aiagents4pharma/talk2scholars/tests/{test_biorxiv_downloader.py → test_utils_biorxiv_downloader.py} +14 -42
- aiagents4pharma/talk2scholars/tests/{test_medrxiv_downloader.py → test_utils_medrxiv_downloader.py} +15 -49
- aiagents4pharma/talk2scholars/tests/{test_nvidia_nim_reranker.py → test_utils_nvidia_nim_reranker.py} +6 -16
- aiagents4pharma/talk2scholars/tests/{test_pdf_answer_formatter.py → test_utils_pdf_answer_formatter.py} +1 -0
- aiagents4pharma/talk2scholars/tests/{test_pdf_batch_processor.py → test_utils_pdf_batch_processor.py} +6 -15
- aiagents4pharma/talk2scholars/tests/{test_pdf_collection_manager.py → test_utils_pdf_collection_manager.py} +34 -11
- aiagents4pharma/talk2scholars/tests/{test_pdf_document_processor.py → test_utils_pdf_document_processor.py} +2 -3
- aiagents4pharma/talk2scholars/tests/{test_pdf_generate_answer.py → test_utils_pdf_generate_answer.py} +3 -6
- aiagents4pharma/talk2scholars/tests/{test_pdf_gpu_detection.py → test_utils_pdf_gpu_detection.py} +5 -16
- aiagents4pharma/talk2scholars/tests/{test_pdf_rag_pipeline.py → test_utils_pdf_rag_pipeline.py} +7 -17
- aiagents4pharma/talk2scholars/tests/{test_pdf_retrieve_chunks.py → test_utils_pdf_retrieve_chunks.py} +4 -11
- aiagents4pharma/talk2scholars/tests/{test_pdf_singleton_manager.py → test_utils_pdf_singleton_manager.py} +26 -23
- aiagents4pharma/talk2scholars/tests/{test_pdf_vector_normalization.py → test_utils_pdf_vector_normalization.py} +1 -1
- aiagents4pharma/talk2scholars/tests/{test_pdf_vector_store.py → test_utils_pdf_vector_store.py} +27 -55
- aiagents4pharma/talk2scholars/tests/{test_pubmed_downloader.py → test_utils_pubmed_downloader.py} +31 -91
- aiagents4pharma/talk2scholars/tests/{test_read_helper_utils.py → test_utils_read_helper_utils.py} +2 -6
- aiagents4pharma/talk2scholars/tests/{test_s2_utils_ext_ids.py → test_utils_s2_utils_ext_ids.py} +5 -15
- aiagents4pharma/talk2scholars/tests/{test_zotero_human_in_the_loop.py → test_utils_zotero_human_in_the_loop.py} +6 -13
- aiagents4pharma/talk2scholars/tests/{test_zotero_path.py → test_utils_zotero_path.py} +53 -45
- aiagents4pharma/talk2scholars/tests/{test_zotero_read.py → test_utils_zotero_read.py} +30 -91
- aiagents4pharma/talk2scholars/tests/{test_zotero_write.py → test_utils_zotero_write.py} +6 -16
- aiagents4pharma/talk2scholars/tools/__init__.py +1 -4
- aiagents4pharma/talk2scholars/tools/paper_download/paper_downloader.py +20 -35
- aiagents4pharma/talk2scholars/tools/paper_download/utils/__init__.py +7 -5
- aiagents4pharma/talk2scholars/tools/paper_download/utils/arxiv_downloader.py +9 -11
- aiagents4pharma/talk2scholars/tools/paper_download/utils/base_paper_downloader.py +14 -21
- aiagents4pharma/talk2scholars/tools/paper_download/utils/biorxiv_downloader.py +14 -22
- aiagents4pharma/talk2scholars/tools/paper_download/utils/medrxiv_downloader.py +11 -13
- aiagents4pharma/talk2scholars/tools/paper_download/utils/pubmed_downloader.py +14 -28
- aiagents4pharma/talk2scholars/tools/pdf/question_and_answer.py +4 -8
- aiagents4pharma/talk2scholars/tools/pdf/utils/__init__.py +16 -14
- aiagents4pharma/talk2scholars/tools/pdf/utils/answer_formatter.py +4 -4
- aiagents4pharma/talk2scholars/tools/pdf/utils/batch_processor.py +15 -17
- aiagents4pharma/talk2scholars/tools/pdf/utils/collection_manager.py +2 -2
- aiagents4pharma/talk2scholars/tools/pdf/utils/document_processor.py +5 -5
- aiagents4pharma/talk2scholars/tools/pdf/utils/generate_answer.py +4 -4
- aiagents4pharma/talk2scholars/tools/pdf/utils/get_vectorstore.py +2 -6
- aiagents4pharma/talk2scholars/tools/pdf/utils/gpu_detection.py +5 -9
- aiagents4pharma/talk2scholars/tools/pdf/utils/nvidia_nim_reranker.py +4 -4
- aiagents4pharma/talk2scholars/tools/pdf/utils/paper_loader.py +2 -2
- aiagents4pharma/talk2scholars/tools/pdf/utils/rag_pipeline.py +6 -15
- aiagents4pharma/talk2scholars/tools/pdf/utils/retrieve_chunks.py +7 -15
- aiagents4pharma/talk2scholars/tools/pdf/utils/singleton_manager.py +2 -2
- aiagents4pharma/talk2scholars/tools/pdf/utils/tool_helper.py +3 -4
- aiagents4pharma/talk2scholars/tools/pdf/utils/vector_normalization.py +8 -17
- aiagents4pharma/talk2scholars/tools/pdf/utils/vector_store.py +17 -33
- aiagents4pharma/talk2scholars/tools/s2/__init__.py +8 -6
- aiagents4pharma/talk2scholars/tools/s2/display_dataframe.py +3 -7
- aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py +7 -6
- aiagents4pharma/talk2scholars/tools/s2/query_dataframe.py +5 -12
- aiagents4pharma/talk2scholars/tools/s2/retrieve_semantic_scholar_paper_id.py +2 -4
- aiagents4pharma/talk2scholars/tools/s2/search.py +6 -6
- aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py +5 -3
- aiagents4pharma/talk2scholars/tools/s2/utils/__init__.py +1 -3
- aiagents4pharma/talk2scholars/tools/s2/utils/multi_helper.py +12 -18
- aiagents4pharma/talk2scholars/tools/s2/utils/search_helper.py +11 -18
- aiagents4pharma/talk2scholars/tools/s2/utils/single_helper.py +11 -16
- aiagents4pharma/talk2scholars/tools/zotero/__init__.py +1 -4
- aiagents4pharma/talk2scholars/tools/zotero/utils/__init__.py +1 -4
- aiagents4pharma/talk2scholars/tools/zotero/utils/read_helper.py +21 -39
- aiagents4pharma/talk2scholars/tools/zotero/utils/review_helper.py +2 -6
- aiagents4pharma/talk2scholars/tools/zotero/utils/write_helper.py +8 -11
- aiagents4pharma/talk2scholars/tools/zotero/utils/zotero_path.py +4 -12
- aiagents4pharma/talk2scholars/tools/zotero/utils/zotero_pdf_downloader.py +13 -27
- aiagents4pharma/talk2scholars/tools/zotero/zotero_read.py +4 -7
- aiagents4pharma/talk2scholars/tools/zotero/zotero_review.py +8 -10
- aiagents4pharma/talk2scholars/tools/zotero/zotero_write.py +3 -2
- {aiagents4pharma-1.44.0.dist-info → aiagents4pharma-1.45.1.dist-info}/METADATA +115 -51
- aiagents4pharma-1.45.1.dist-info/RECORD +324 -0
- {aiagents4pharma-1.44.0.dist-info → aiagents4pharma-1.45.1.dist-info}/WHEEL +1 -2
- aiagents4pharma-1.44.0.dist-info/RECORD +0 -293
- aiagents4pharma-1.44.0.dist-info/top_level.txt +0 -1
- /aiagents4pharma/talk2scholars/tests/{test_state.py → test_states_state.py} +0 -0
- /aiagents4pharma/talk2scholars/tests/{test_pdf_paper_loader.py → test_utils_pdf_paper_loader.py} +0 -0
- /aiagents4pharma/talk2scholars/tests/{test_tool_helper_utils.py → test_utils_tool_helper_utils.py} +0 -0
- /aiagents4pharma/talk2scholars/tests/{test_zotero_pdf_downloader_utils.py → test_utils_zotero_pdf_downloader_utils.py} +0 -0
- {aiagents4pharma-1.44.0.dist-info → aiagents4pharma-1.45.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,3 +1,3 @@
|
|
1
|
-
|
1
|
+
"""
|
2
2
|
This module contains the test cases.
|
3
|
-
|
3
|
+
"""
|
Binary file
|
@@ -1,27 +1,29 @@
|
|
1
|
-
|
1
|
+
"""
|
2
2
|
Test cases for Talk2Biomodels.
|
3
|
-
|
3
|
+
"""
|
4
4
|
|
5
|
-
from ..api.
|
5
|
+
from ..api.kegg import fetch_from_api, fetch_kegg_names
|
6
6
|
from ..api.ols import fetch_from_ols
|
7
|
-
from ..api.
|
7
|
+
from ..api.uniprot import search_uniprot_labels
|
8
|
+
|
8
9
|
|
9
10
|
def test_search_uniprot_labels():
|
10
|
-
|
11
|
+
"""
|
11
12
|
Test the search_uniprot_labels function.
|
12
|
-
|
13
|
+
"""
|
13
14
|
# "P61764" = Positive result, "P0000Q" = negative result
|
14
15
|
identifiers = ["P61764", "P0000Q"]
|
15
16
|
results = search_uniprot_labels(identifiers)
|
16
17
|
assert results["P61764"] == "Syntaxin-binding protein 1"
|
17
18
|
assert results["P0000Q"].startswith("Error: 400")
|
18
19
|
|
20
|
+
|
19
21
|
def test_fetch_from_ols():
|
20
|
-
|
22
|
+
"""
|
21
23
|
Test the fetch_from_ols function.
|
22
|
-
|
23
|
-
term_1 = "GO:0005886"
|
24
|
-
term_2 = "GO:ABC123"
|
24
|
+
"""
|
25
|
+
term_1 = "GO:0005886" # Positive result
|
26
|
+
term_2 = "GO:ABC123" # Negative result
|
25
27
|
label_1 = fetch_from_ols(term_1)
|
26
28
|
label_2 = fetch_from_ols(term_2)
|
27
29
|
assert isinstance(label_1, str), f"Expected string, got {type(label_1)}"
|
@@ -29,10 +31,11 @@ def test_fetch_from_ols():
|
|
29
31
|
assert label_1 == "plasma membrane"
|
30
32
|
assert label_2.startswith("Error: 404")
|
31
33
|
|
34
|
+
|
32
35
|
def test_fetch_kegg_names():
|
33
|
-
|
36
|
+
"""
|
34
37
|
Test the fetch_kegg_names function.
|
35
|
-
|
38
|
+
"""
|
36
39
|
ids = ["C00001", "C00002"]
|
37
40
|
results = fetch_kegg_names(ids)
|
38
41
|
assert results["C00001"] == "H2O"
|
@@ -42,10 +45,11 @@ def test_fetch_kegg_names():
|
|
42
45
|
results = fetch_kegg_names([])
|
43
46
|
assert not results
|
44
47
|
|
48
|
+
|
45
49
|
def test_fetch_from_api():
|
46
|
-
|
50
|
+
"""
|
47
51
|
Test the fetch_from_api function.
|
48
|
-
|
52
|
+
"""
|
49
53
|
base_url = "https://rest.kegg.jp/get/"
|
50
54
|
query = "C00001"
|
51
55
|
entry_data = fetch_from_api(base_url, query)
|
@@ -1,17 +1,19 @@
|
|
1
|
-
|
1
|
+
"""
|
2
2
|
Test cases for Talk2Biomodels.
|
3
|
-
|
3
|
+
"""
|
4
4
|
|
5
5
|
from langchain_core.messages import HumanMessage, ToolMessage
|
6
6
|
from langchain_openai import ChatOpenAI
|
7
|
+
|
7
8
|
from ..agents.t2b_agent import get_app
|
8
9
|
|
10
|
+
|
9
11
|
def test_ask_question_tool():
|
10
|
-
|
12
|
+
"""
|
11
13
|
Test the ask_question tool without the simulation results.
|
12
|
-
|
14
|
+
"""
|
13
15
|
unique_id = 12345
|
14
|
-
app = get_app(unique_id, llm_model=ChatOpenAI(model=
|
16
|
+
app = get_app(unique_id, llm_model=ChatOpenAI(model="gpt-4o-mini", temperature=0))
|
15
17
|
config = {"configurable": {"thread_id": unique_id}}
|
16
18
|
|
17
19
|
##########################################
|
@@ -26,10 +28,7 @@ def test_ask_question_tool():
|
|
26
28
|
prompt += "in serum at 1000 hours? The simulation name "
|
27
29
|
prompt += "is `simulation_name`."
|
28
30
|
# Invoke the tool
|
29
|
-
app.invoke(
|
30
|
-
{"messages": [HumanMessage(content=prompt)]},
|
31
|
-
config=config
|
32
|
-
)
|
31
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
33
32
|
# Get the messages from the current state
|
34
33
|
# and reverse the order
|
35
34
|
current_state = app.get_state(config)
|
@@ -1,12 +1,14 @@
|
|
1
|
-
|
1
|
+
"""
|
2
2
|
A test BasicoModel class for pytest unit testing.
|
3
|
-
|
3
|
+
"""
|
4
4
|
|
5
|
+
import basico
|
5
6
|
import pandas as pd
|
6
7
|
import pytest
|
7
|
-
|
8
|
+
|
8
9
|
from ..models.basico_model import BasicoModel
|
9
10
|
|
11
|
+
|
10
12
|
@pytest.fixture(name="model")
|
11
13
|
def model_fixture():
|
12
14
|
"""
|
@@ -14,16 +16,17 @@ def model_fixture():
|
|
14
16
|
"""
|
15
17
|
return BasicoModel(biomodel_id=64, species={"Pyruvate": 100}, duration=2, interval=2)
|
16
18
|
|
19
|
+
|
17
20
|
def test_with_biomodel_id(model):
|
18
21
|
"""
|
19
22
|
Test initialization of BasicoModel with biomodel_id.
|
20
23
|
"""
|
21
24
|
assert model.biomodel_id == 64
|
22
|
-
model.update_parameters(parameters={
|
25
|
+
model.update_parameters(parameters={"Pyruvate": 0.5, "KmPFKF6P": 1.5})
|
23
26
|
df_species = basico.model_info.get_species(model=model.copasi_model)
|
24
|
-
assert df_species.loc[
|
27
|
+
assert df_species.loc["Pyruvate", "initial_concentration"] == 0.5
|
25
28
|
df_parameters = basico.model_info.get_parameters(model=model.copasi_model)
|
26
|
-
assert df_parameters.loc[
|
29
|
+
assert df_parameters.loc["KmPFKF6P", "initial_value"] == 1.5
|
27
30
|
# check if the simulation results are a pandas DataFrame object
|
28
31
|
assert isinstance(model.simulate(duration=2, interval=2), pd.DataFrame)
|
29
32
|
# Pass a None value to the update_parameters method
|
@@ -34,7 +37,8 @@ def test_with_biomodel_id(model):
|
|
34
37
|
# check if an error is raised if an invalid species/parameter (`Pyruv`)
|
35
38
|
# is passed and it should raise a ValueError
|
36
39
|
with pytest.raises(ValueError):
|
37
|
-
model.update_parameters(parameters={
|
40
|
+
model.update_parameters(parameters={"Pyruv": 0.5})
|
41
|
+
|
38
42
|
|
39
43
|
def test_with_sbml_file():
|
40
44
|
"""
|
@@ -44,13 +48,15 @@ def test_with_sbml_file():
|
|
44
48
|
assert model_object.sbml_file_path == "./BIOMD0000000064_url.xml"
|
45
49
|
assert isinstance(model_object.simulate(duration=2, interval=2), pd.DataFrame)
|
46
50
|
|
51
|
+
|
47
52
|
def test_check_biomodel_id_or_sbml_file_path():
|
48
|
-
|
53
|
+
"""
|
49
54
|
Test the check_biomodel_id_or_sbml_file_path method of the BioModel class.
|
50
|
-
|
55
|
+
"""
|
51
56
|
with pytest.raises(ValueError):
|
52
57
|
BasicoModel(species={"Pyruvate": 100}, duration=2, interval=2)
|
53
58
|
|
59
|
+
|
54
60
|
def test_get_model_metadata():
|
55
61
|
"""
|
56
62
|
Test the get_model_metadata method of the BasicoModel class.
|
@@ -1,79 +1,74 @@
|
|
1
|
-
|
1
|
+
"""
|
2
2
|
Test cases for Talk2Biomodels get_annotation tool.
|
3
|
-
|
3
|
+
"""
|
4
4
|
|
5
5
|
import random
|
6
|
+
|
6
7
|
import pytest
|
7
8
|
from langchain_core.messages import HumanMessage, ToolMessage
|
8
9
|
from langchain_openai import ChatOpenAI
|
10
|
+
|
9
11
|
from ..agents.t2b_agent import get_app
|
10
12
|
from ..tools.get_annotation import prepare_content_msg
|
11
13
|
|
12
|
-
LLM_MODEL = ChatOpenAI(model=
|
14
|
+
LLM_MODEL = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
15
|
+
|
13
16
|
|
14
17
|
@pytest.fixture(name="make_graph")
|
15
18
|
def make_graph_fixture():
|
16
|
-
|
19
|
+
"""
|
17
20
|
Create an instance of the talk2biomodels agent.
|
18
|
-
|
21
|
+
"""
|
19
22
|
unique_id = random.randint(1000, 9999)
|
20
23
|
graph = get_app(unique_id, llm_model=LLM_MODEL)
|
21
24
|
config = {"configurable": {"thread_id": unique_id}}
|
22
|
-
graph.update_state(
|
23
|
-
config,
|
24
|
-
{"llm_model": LLM_MODEL}
|
25
|
-
)
|
25
|
+
graph.update_state(config, {"llm_model": LLM_MODEL})
|
26
26
|
return graph, config
|
27
27
|
|
28
|
+
|
28
29
|
def test_no_model_provided(make_graph):
|
29
|
-
|
30
|
+
"""
|
30
31
|
Test the tool by not specifying any model.
|
31
32
|
We are testing a condition where the user
|
32
33
|
asks for annotations of all species without
|
33
34
|
specifying a model.
|
34
|
-
|
35
|
+
"""
|
35
36
|
app, config = make_graph
|
36
37
|
prompt = "Extract annotations of all species. Call the tool get_annotation."
|
37
|
-
app.invoke({"messages": [HumanMessage(content=prompt)]},
|
38
|
-
config=config
|
39
|
-
)
|
38
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
40
39
|
current_state = app.get_state(config)
|
41
40
|
# Assert that the state key model_id is empty.
|
42
41
|
assert current_state.values["model_id"] == []
|
43
42
|
|
43
|
+
|
44
44
|
def test_valid_species_provided(make_graph):
|
45
|
-
|
45
|
+
"""
|
46
46
|
Test the tool by providing a specific species name.
|
47
47
|
We are testing a condition where the user asks for annotations
|
48
48
|
of a specific species in a specific model.
|
49
|
-
|
49
|
+
"""
|
50
50
|
# Test with a valid species name
|
51
51
|
app, config = make_graph
|
52
52
|
prompt = "Extract annotations of species IL6 in model 537."
|
53
|
-
app.invoke(
|
54
|
-
{"messages": [HumanMessage(content=prompt)]},
|
55
|
-
config=config
|
56
|
-
)
|
53
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
57
54
|
current_state = app.get_state(config)
|
58
55
|
# print (current_state.values["dic_annotations_data"])
|
59
56
|
dic_annotations_data = current_state.values["dic_annotations_data"]
|
60
57
|
|
61
58
|
# The assert statement checks if IL6 is present in the returned annotations.
|
62
|
-
assert dic_annotations_data[0][
|
59
|
+
assert dic_annotations_data[0]["data"]["Species Name"][0] == "IL6"
|
60
|
+
|
63
61
|
|
64
62
|
def test_invalid_species_provided(make_graph):
|
65
|
-
|
63
|
+
"""
|
66
64
|
Test the tool by providing an invalid species name.
|
67
65
|
We are testing a condition where the user asks for annotations
|
68
66
|
of an invalid species in a specific model.
|
69
|
-
|
67
|
+
"""
|
70
68
|
# Test with an invalid species name
|
71
69
|
app, config = make_graph
|
72
70
|
prompt = "Extract annotations of only species NADH in model 537."
|
73
|
-
app.invoke(
|
74
|
-
{"messages": [HumanMessage(content=prompt)]},
|
75
|
-
config=config
|
76
|
-
)
|
71
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
77
72
|
current_state = app.get_state(config)
|
78
73
|
reversed_messages = current_state.values["messages"][::-1]
|
79
74
|
# Loop through the reversed messages until a
|
@@ -84,34 +79,32 @@ def test_invalid_species_provided(make_graph):
|
|
84
79
|
# Assert that the one of the messages is a ToolMessage
|
85
80
|
# and its artifact is None.
|
86
81
|
if isinstance(msg, ToolMessage) and msg.name == "get_annotation":
|
87
|
-
#If a ToolMessage exists and artifact is None (meaning no valid annotation was found)
|
88
|
-
#and the rejected species (NADH) is mentioned, the test passes.
|
82
|
+
# If a ToolMessage exists and artifact is None (meaning no valid annotation was found)
|
83
|
+
# and the rejected species (NADH) is mentioned, the test passes.
|
89
84
|
if msg.artifact is None and msg.status == "error":
|
90
|
-
#If artifact is None, it means no annotation was found
|
85
|
+
# If artifact is None, it means no annotation was found
|
91
86
|
# (likely due to an invalid species).
|
92
87
|
test_condition = True
|
93
88
|
break
|
94
89
|
assert test_condition
|
95
90
|
|
91
|
+
|
96
92
|
def test_invalid_and_valid_species_provided(make_graph):
|
97
|
-
|
93
|
+
"""
|
98
94
|
Test the tool by providing an invalid species name and a valid species name.
|
99
95
|
We are testing a condition where the user asks for annotations
|
100
96
|
of an invalid species and a valid species in a specific model.
|
101
|
-
|
97
|
+
"""
|
102
98
|
# Test with an invalid species name and a valid species name
|
103
99
|
app, config = make_graph
|
104
100
|
prompt = "Extract annotations of species NADH, NAD, and IL7 in model 64."
|
105
|
-
app.invoke(
|
106
|
-
{"messages": [HumanMessage(content=prompt)]},
|
107
|
-
config=config
|
108
|
-
)
|
101
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
109
102
|
current_state = app.get_state(config)
|
110
103
|
dic_annotations_data = current_state.values["dic_annotations_data"]
|
111
104
|
# List of species that are expected to be found in the annotations
|
112
105
|
extracted_species = []
|
113
|
-
for idx in dic_annotations_data[0][
|
114
|
-
extracted_species.append(dic_annotations_data[0][
|
106
|
+
for idx in dic_annotations_data[0]["data"]["Species Name"]:
|
107
|
+
extracted_species.append(dic_annotations_data[0]["data"]["Species Name"][idx])
|
115
108
|
reversed_messages = current_state.values["messages"][::-1]
|
116
109
|
# Loop through the reversed messages until a
|
117
110
|
# ToolMessage is found.
|
@@ -124,10 +117,11 @@ def test_invalid_and_valid_species_provided(make_graph):
|
|
124
117
|
tool_status_success = True
|
125
118
|
break
|
126
119
|
assert tool_status_success
|
127
|
-
assert set(extracted_species) ==
|
120
|
+
assert set(extracted_species) == {"NADH", "NAD"}
|
121
|
+
|
128
122
|
|
129
123
|
def test_all_species_annotations(make_graph):
|
130
|
-
|
124
|
+
"""
|
131
125
|
Test the tool by asking for annotations of all species is specific models.
|
132
126
|
Here, we test the tool with three models since they have different use cases:
|
133
127
|
- model 12 contains a species with no URL provided.
|
@@ -136,16 +130,14 @@ def test_all_species_annotations(make_graph):
|
|
136
130
|
|
137
131
|
We are testing a condition where the user asks for annotations
|
138
132
|
of all species in a specific model.
|
139
|
-
|
133
|
+
"""
|
140
134
|
# Loop through the models and test the tool
|
141
135
|
# for each model's unique use case.
|
142
136
|
for model_id in [12, 20, 56]:
|
143
137
|
app, config = make_graph
|
144
138
|
prompt = f"Extract annotations of all species model {model_id}."
|
145
139
|
# Test the tool get_modelinfo
|
146
|
-
app.invoke({"messages": [HumanMessage(content=prompt)]},
|
147
|
-
config=config
|
148
|
-
)
|
140
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
149
141
|
current_state = app.get_state(config)
|
150
142
|
|
151
143
|
reversed_messages = current_state.values["messages"][::-1]
|
@@ -160,32 +152,39 @@ def test_all_species_annotations(make_graph):
|
|
160
152
|
# Extact the first and second description of the LacI protein
|
161
153
|
# We already know that the first or second description is missing ('-')
|
162
154
|
dic_annotations_data = current_state.values["dic_annotations_data"][0]
|
163
|
-
first_descp_laci_protein = dic_annotations_data[
|
164
|
-
second_descp_laci_protein = dic_annotations_data[
|
155
|
+
first_descp_laci_protein = dic_annotations_data["data"]["Description"][0]
|
156
|
+
second_descp_laci_protein = dic_annotations_data["data"]["Description"][1]
|
165
157
|
|
166
158
|
# Expect a successful extraction (artifact is True) and that the content
|
167
159
|
# matches what is returned by prepare_content_msg for species.
|
168
160
|
# And that the first or second description of the LacI protein is missing.
|
169
|
-
if (
|
170
|
-
|
171
|
-
|
161
|
+
if (
|
162
|
+
msg.artifact is True
|
163
|
+
and msg.content == prepare_content_msg([])
|
164
|
+
and msg.status == "success"
|
165
|
+
and (first_descp_laci_protein == "-" or second_descp_laci_protein == "-")
|
166
|
+
):
|
172
167
|
test_condition = True
|
173
168
|
break
|
174
169
|
|
175
170
|
if model_id == 20:
|
176
171
|
# Expect an error message containing a note
|
177
172
|
# that species extraction failed.
|
178
|
-
if (
|
179
|
-
|
173
|
+
if (
|
174
|
+
"Unable to extract species from the model" in msg.content
|
175
|
+
and msg.status == "error"
|
176
|
+
):
|
180
177
|
test_condition = True
|
181
178
|
break
|
182
179
|
|
183
180
|
if model_id == 56:
|
184
181
|
# Expect a successful extraction (artifact is True) and that the content
|
185
182
|
# matches for for missing description ['ORI'].
|
186
|
-
if (
|
187
|
-
|
188
|
-
|
183
|
+
if (
|
184
|
+
msg.artifact is True
|
185
|
+
and msg.content == prepare_content_msg(["ORI"])
|
186
|
+
and msg.status == "success"
|
187
|
+
):
|
189
188
|
test_condition = True
|
190
189
|
break
|
191
|
-
assert test_condition
|
190
|
+
assert test_condition # Expected output is validated
|
@@ -1,49 +1,48 @@
|
|
1
|
-
|
1
|
+
"""
|
2
2
|
Test cases for Talk2Biomodels get_modelinfo tool.
|
3
|
-
|
3
|
+
"""
|
4
4
|
|
5
5
|
from langchain_core.messages import HumanMessage, ToolMessage
|
6
6
|
from langchain_openai import ChatOpenAI
|
7
|
+
|
7
8
|
from ..agents.t2b_agent import get_app
|
8
9
|
|
9
|
-
LLM_MODEL = ChatOpenAI(model=
|
10
|
+
LLM_MODEL = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
11
|
+
|
10
12
|
|
11
13
|
def test_get_modelinfo_tool():
|
12
|
-
|
14
|
+
"""
|
13
15
|
Test the get_modelinfo tool.
|
14
|
-
|
16
|
+
"""
|
15
17
|
unique_id = 12345
|
16
18
|
app = get_app(unique_id, LLM_MODEL)
|
17
19
|
config = {"configurable": {"thread_id": unique_id}}
|
18
20
|
# Update state
|
19
|
-
app.update_state(
|
20
|
-
|
21
|
+
app.update_state(
|
22
|
+
config,
|
23
|
+
{"sbml_file_path": ["aiagents4pharma/talk2biomodels/tests/BIOMD0000000449_url.xml"]},
|
24
|
+
)
|
21
25
|
prompt = "Extract all relevant information from the uploaded model."
|
22
26
|
# Test the tool get_modelinfo
|
23
|
-
response = app.invoke(
|
24
|
-
{"messages": [HumanMessage(content=prompt)]},
|
25
|
-
config=config
|
26
|
-
)
|
27
|
+
response = app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
27
28
|
assistant_msg = response["messages"][-1].content
|
28
29
|
# Check if the assistant message is a string
|
29
30
|
assert isinstance(assistant_msg, str)
|
30
31
|
|
32
|
+
|
31
33
|
def test_model_with_no_species():
|
32
|
-
|
34
|
+
"""
|
33
35
|
Test the get_modelinfo tool with a model that does not
|
34
36
|
return any species.
|
35
37
|
|
36
38
|
This should raise a tool error.
|
37
|
-
|
39
|
+
"""
|
38
40
|
unique_id = 12345
|
39
41
|
app = get_app(unique_id, LLM_MODEL)
|
40
42
|
config = {"configurable": {"thread_id": unique_id}}
|
41
43
|
prompt = "Extract all species from model 20"
|
42
44
|
# Test the tool get_modelinfo
|
43
|
-
app.invoke(
|
44
|
-
{"messages": [HumanMessage(content=prompt)]},
|
45
|
-
config=config
|
46
|
-
)
|
45
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
47
46
|
current_state = app.get_state(config)
|
48
47
|
reversed_messages = current_state.values["messages"][::-1]
|
49
48
|
# Loop through the reversed messages until a
|
@@ -53,28 +52,28 @@ def test_model_with_no_species():
|
|
53
52
|
# Check if the message is a ToolMessage from the get_modelinfo tool
|
54
53
|
if isinstance(msg, ToolMessage) and msg.name == "get_modelinfo":
|
55
54
|
# Check if the message is an error message
|
56
|
-
if (
|
57
|
-
|
55
|
+
if (
|
56
|
+
msg.status == "error"
|
57
|
+
and "ValueError('Unable to extract species from the model.')" in msg.content
|
58
|
+
):
|
58
59
|
test_condition = True
|
59
60
|
break
|
60
61
|
assert test_condition
|
61
62
|
|
63
|
+
|
62
64
|
def test_model_with_no_parameters():
|
63
|
-
|
65
|
+
"""
|
64
66
|
Test the get_modelinfo tool with a model that does not
|
65
67
|
return any parameters.
|
66
68
|
|
67
69
|
This should raise a tool error.
|
68
|
-
|
70
|
+
"""
|
69
71
|
unique_id = 12345
|
70
72
|
app = get_app(unique_id, LLM_MODEL)
|
71
73
|
config = {"configurable": {"thread_id": unique_id}}
|
72
74
|
prompt = "Extract all parameters from model 10"
|
73
75
|
# Test the tool get_modelinfo
|
74
|
-
app.invoke(
|
75
|
-
{"messages": [HumanMessage(content=prompt)]},
|
76
|
-
config=config
|
77
|
-
)
|
76
|
+
app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
78
77
|
current_state = app.get_state(config)
|
79
78
|
reversed_messages = current_state.values["messages"][::-1]
|
80
79
|
# Loop through the reversed messages until a
|
@@ -84,8 +83,10 @@ def test_model_with_no_parameters():
|
|
84
83
|
# Check if the message is a ToolMessage from the get_modelinfo tool
|
85
84
|
if isinstance(msg, ToolMessage) and msg.name == "get_modelinfo":
|
86
85
|
# Check if the message is an error message
|
87
|
-
if (
|
88
|
-
|
86
|
+
if (
|
87
|
+
msg.status == "error"
|
88
|
+
and "ValueError('Unable to extract parameters from the model.')" in msg.content
|
89
|
+
):
|
89
90
|
test_condition = True
|
90
91
|
break
|
91
92
|
assert test_condition
|
@@ -1,34 +1,33 @@
|
|
1
|
-
|
1
|
+
"""
|
2
2
|
Test cases for Talk2Biomodels.
|
3
|
-
|
3
|
+
"""
|
4
4
|
|
5
5
|
import pandas as pd
|
6
6
|
from langchain_core.messages import HumanMessage, ToolMessage
|
7
7
|
from langchain_openai import ChatOpenAI
|
8
|
+
|
8
9
|
from ..agents.t2b_agent import get_app
|
9
10
|
|
10
|
-
LLM_MODEL = ChatOpenAI(model=
|
11
|
+
LLM_MODEL = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
12
|
+
|
11
13
|
|
12
14
|
def test_integration():
|
13
|
-
|
15
|
+
"""
|
14
16
|
Test the integration of the tools.
|
15
|
-
|
17
|
+
"""
|
16
18
|
unique_id = 1234567
|
17
19
|
app = get_app(unique_id, llm_model=LLM_MODEL)
|
18
20
|
config = {"configurable": {"thread_id": unique_id}}
|
19
21
|
# ##########################################
|
20
22
|
# ## Test simulate_model tool
|
21
23
|
# ##########################################
|
22
|
-
prompt =
|
24
|
+
prompt = """Simulate the model BIOMD0000000537 for 100 hours and time intervals
|
23
25
|
100 with an initial concentration of `DoseQ2W` set to 300 and `Dose`
|
24
|
-
set to 0. Reset the concentration of `Ab{serum}` to 100 every 25 hours.
|
26
|
+
set to 0. Reset the concentration of `Ab{serum}` to 100 every 25 hours."""
|
25
27
|
# Test the tool get_modelinfo
|
26
|
-
response = app.invoke(
|
27
|
-
{"messages": [HumanMessage(content=prompt)]},
|
28
|
-
config=config
|
29
|
-
)
|
28
|
+
response = app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
30
29
|
assistant_msg = response["messages"][-1].content
|
31
|
-
print
|
30
|
+
print(assistant_msg)
|
32
31
|
# Check if the assistant message is a string
|
33
32
|
assert isinstance(assistant_msg, str)
|
34
33
|
##########################################
|
@@ -40,14 +39,11 @@ def test_integration():
|
|
40
39
|
prompt = """What is the concentration of CRP in serum after 100 hours?
|
41
40
|
Round off the value to 2 decimal places."""
|
42
41
|
# Test the tool get_modelinfo
|
43
|
-
response = app.invoke(
|
44
|
-
{"messages": [HumanMessage(content=prompt)]},
|
45
|
-
config=config
|
46
|
-
)
|
42
|
+
response = app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
47
43
|
assistant_msg = response["messages"][-1].content
|
48
44
|
# print (assistant_msg)
|
49
45
|
# Check if the assistant message is a string
|
50
|
-
assert
|
46
|
+
assert "211" in assistant_msg
|
51
47
|
|
52
48
|
##########################################
|
53
49
|
# Test the custom_plotter tool when the
|
@@ -59,13 +55,9 @@ def test_integration():
|
|
59
55
|
know if these species were not found. Do not
|
60
56
|
invoke any other tool."""
|
61
57
|
# Update state
|
62
|
-
app.update_state(config, {"llm_model": LLM_MODEL}
|
63
|
-
)
|
58
|
+
app.update_state(config, {"llm_model": LLM_MODEL})
|
64
59
|
# Test the tool get_modelinfo
|
65
|
-
response = app.invoke(
|
66
|
-
{"messages": [HumanMessage(content=prompt)]},
|
67
|
-
config=config
|
68
|
-
)
|
60
|
+
response = app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
69
61
|
assistant_msg = response["messages"][-1].content
|
70
62
|
current_state = app.get_state(config)
|
71
63
|
# Get the messages from the current state
|
@@ -92,13 +84,9 @@ def test_integration():
|
|
92
84
|
prompt = "Plot only CRP related species."
|
93
85
|
|
94
86
|
# Update state
|
95
|
-
app.update_state(config, {"llm_model": LLM_MODEL}
|
96
|
-
)
|
87
|
+
app.update_state(config, {"llm_model": LLM_MODEL})
|
97
88
|
# Test the tool get_modelinfo
|
98
|
-
response = app.invoke(
|
99
|
-
{"messages": [HumanMessage(content=prompt)]},
|
100
|
-
config=config
|
101
|
-
)
|
89
|
+
response = app.invoke({"messages": [HumanMessage(content=prompt)]}, config=config)
|
102
90
|
assistant_msg = response["messages"][-1].content
|
103
91
|
current_state = app.get_state(config)
|
104
92
|
# Get the messages from the current state
|
@@ -106,9 +94,9 @@ def test_integration():
|
|
106
94
|
reversed_messages = current_state.values["messages"][::-1]
|
107
95
|
# Loop through the reversed messages
|
108
96
|
# until a ToolMessage is found.
|
109
|
-
expected_header = [
|
110
|
-
expected_header += [
|
111
|
-
expected_header += [
|
97
|
+
expected_header = ["Time", "CRP{serum}", "CRPExtracellular"]
|
98
|
+
expected_header += ["CRP Suppression (%)", "CRP (% of baseline)"]
|
99
|
+
expected_header += ["CRP{liver}"]
|
112
100
|
predicted_artifact = []
|
113
101
|
for msg in reversed_messages:
|
114
102
|
if isinstance(msg, ToolMessage):
|
@@ -116,7 +104,7 @@ def test_integration():
|
|
116
104
|
# These may contain additional visuals that
|
117
105
|
# need to be displayed to the user.
|
118
106
|
if msg.name == "custom_plotter":
|
119
|
-
predicted_artifact = msg.artifact[
|
107
|
+
predicted_artifact = msg.artifact["dic_data"]
|
120
108
|
break
|
121
109
|
# Convert the artifact into a pandas dataframe
|
122
110
|
# for easy comparison
|