aiagents4pharma 1.19.0__tar.gz → 1.20.0__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/PKG-INFO +18 -2
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/README.md +17 -1
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/agents/main_agent.py +207 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/agents/s2_agent.py +2 -1
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +18 -0
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +24 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/state/state_talk2scholars.py +9 -8
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/tests/test_integration.py +237 -0
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/tests/test_main_agent.py +180 -0
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/tests/test_s2_agent.py +138 -0
- aiagents4pharma-1.19.0/aiagents4pharma/talk2scholars/tests/test_langgraph.py → aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/tests/test_s2_tools.py +79 -151
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/tests/test_state.py +14 -0
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/tools/s2/display_results.py +50 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py +10 -23
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/tools/s2/search.py +10 -29
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py +4 -29
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma.egg-info/PKG-INFO +18 -2
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma.egg-info/SOURCES.txt +5 -1
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/pyproject.toml +18 -17
- aiagents4pharma-1.20.0/release_version.txt +1 -0
- aiagents4pharma-1.19.0/aiagents4pharma/talk2scholars/agents/main_agent.py +0 -151
- aiagents4pharma-1.19.0/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +0 -39
- aiagents4pharma-1.19.0/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +0 -68
- aiagents4pharma-1.19.0/aiagents4pharma/talk2scholars/tools/s2/display_results.py +0 -25
- aiagents4pharma-1.19.0/release_version.txt +0 -1
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/LICENSE +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/agents/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/agents/t2b_agent.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/api/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/api/kegg.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/api/ols.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/api/uniprot.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/configs/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/configs/agents/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/default.yaml +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/configs/tools/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/configs/tools/ask_question/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/configs/tools/ask_question/default.yaml +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/configs/tools/get_annotation/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/configs/tools/get_annotation/default.yaml +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/models/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/models/basico_model.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/models/sys_bio_model.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/states/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/states/state_talk2biomodels.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tests/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tests/test_api.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tests/test_ask_question.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tests/test_basico_model.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tests/test_get_annotation.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tests/test_getmodelinfo.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tests/test_integration.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tests/test_param_scan.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tests/test_query_article.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tests/test_search_models.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tests/test_simulate_model.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tests/test_steady_state.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tests/test_sys_bio_model.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tools/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tools/ask_question.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tools/custom_plotter.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tools/get_annotation.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tools/get_modelinfo.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tools/load_arguments.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tools/load_biomodel.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tools/parameter_scan.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tools/query_article.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tools/search_models.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tools/simulate_model.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2biomodels/tools/steady_state.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2cells/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2cells/agents/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2cells/agents/scp_agent.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2cells/states/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2cells/states/state_talk2cells.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2cells/tests/scp_agent/test_scp_agent.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2cells/tools/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2cells/tools/scp_agent/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2cells/tools/scp_agent/display_studies.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2cells/tools/scp_agent/search_studies.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/agents/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/agents/t2kg_agent.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/configs/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/default.yaml +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/configs/app/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/configs/app/frontend/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/configs/app/frontend/default.yaml +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/configs/config.yaml +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/graphrag_reasoning/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/graphrag_reasoning/default.yaml +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_extraction/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_extraction/default.yaml +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_summarization/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_summarization/default.yaml +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/datasets/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/datasets/biobridge_primekg.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/datasets/dataset.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/datasets/primekg.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/datasets/starkqa_primekg.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/states/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/states/state_talk2knowledgegraphs.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_agents_t2kg_agent.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_biobridge_primekg.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_dataset.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_primekg.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_starkqa_primekg.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_tools_graphrag_reasoning.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_extraction.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_summarization.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_embeddings.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_huggingface.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_ollama.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_sentencetransformer.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_enrichments.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_ollama.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_kg_utils.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tools/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tools/graphrag_reasoning.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tools/load_arguments.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tools/subgraph_extraction.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/tools/subgraph_summarization.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/utils/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/embeddings.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/huggingface.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/ollama.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/sentence_transformer.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/utils/enrichments/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/utils/enrichments/enrichments.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/utils/enrichments/ollama.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/utils/extractions/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/utils/extractions/pcst.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2knowledgegraphs/utils/kg_utils.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/agents/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/agents/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/app/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/app/frontend/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/config.yaml +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/tools/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/tools/search/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/tools/search/default.yaml +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/state/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/tests/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/tools/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/tools/s2/__init__.py +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma.egg-info/dependency_links.txt +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma.egg-info/requires.txt +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma.egg-info/top_level.txt +0 -0
- {aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: aiagents4pharma
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.20.0
|
4
4
|
Summary: AI Agents for drug discovery, drug development, and other pharmaceutical R&D.
|
5
5
|
Classifier: Programming Language :: Python :: 3
|
6
6
|
Classifier: License :: OSI Approved :: MIT License
|
@@ -56,6 +56,7 @@ Requires-Dist: streamlit-feedback
|
|
56
56
|
[](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2scholars.yml)
|
57
57
|

|
58
58
|

|
59
|
+

|
59
60
|
|
60
61
|
|
61
62
|
## Introduction
|
@@ -85,7 +86,22 @@ pip install aiagents4pharma
|
|
85
86
|
|
86
87
|
Check out the tutorials on each agent for detailed instrcutions.
|
87
88
|
|
88
|
-
#### Option 2:
|
89
|
+
#### Option 2: docker hub
|
90
|
+
|
91
|
+
_Please note that this option is currently available only for Talk2Biomodels._
|
92
|
+
|
93
|
+
1. **Pull the image**
|
94
|
+
```
|
95
|
+
docker pull virtualpatientengine/talk2biomodels
|
96
|
+
```
|
97
|
+
2. **Run a container**
|
98
|
+
```
|
99
|
+
docker run -e OPENAI_API_KEY=<openai_api_key> -e NVIDIA_API_KEY=<nvidia_api_key> -p 8501:8501 virtualpatientengine/talk2biomodels
|
100
|
+
```
|
101
|
+
_You can create a free account at NVIDIA and apply for their
|
102
|
+
free credits [here](https://build.nvidia.com/explore/discover)._
|
103
|
+
|
104
|
+
#### Option 3: git
|
89
105
|
|
90
106
|
1. **Clone the repository:**
|
91
107
|
```bash
|
@@ -4,6 +4,7 @@
|
|
4
4
|
[](https://github.com/VirtualPatientEngine/AIAgents4Pharma/actions/workflows/tests_talk2scholars.yml)
|
5
5
|

|
6
6
|

|
7
|
+

|
7
8
|
|
8
9
|
|
9
10
|
## Introduction
|
@@ -33,7 +34,22 @@ pip install aiagents4pharma
|
|
33
34
|
|
34
35
|
Check out the tutorials on each agent for detailed instrcutions.
|
35
36
|
|
36
|
-
#### Option 2:
|
37
|
+
#### Option 2: docker hub
|
38
|
+
|
39
|
+
_Please note that this option is currently available only for Talk2Biomodels._
|
40
|
+
|
41
|
+
1. **Pull the image**
|
42
|
+
```
|
43
|
+
docker pull virtualpatientengine/talk2biomodels
|
44
|
+
```
|
45
|
+
2. **Run a container**
|
46
|
+
```
|
47
|
+
docker run -e OPENAI_API_KEY=<openai_api_key> -e NVIDIA_API_KEY=<nvidia_api_key> -p 8501:8501 virtualpatientengine/talk2biomodels
|
48
|
+
```
|
49
|
+
_You can create a free account at NVIDIA and apply for their
|
50
|
+
free credits [here](https://build.nvidia.com/explore/discover)._
|
51
|
+
|
52
|
+
#### Option 3: git
|
37
53
|
|
38
54
|
1. **Clone the repository:**
|
39
55
|
```bash
|
@@ -0,0 +1,207 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
|
3
|
+
"""
|
4
|
+
Main agent for the talk2scholars app using ReAct pattern.
|
5
|
+
|
6
|
+
This module implements a hierarchical agent system where a supervisor agent
|
7
|
+
routes queries to specialized sub-agents. It follows the LangGraph patterns
|
8
|
+
for multi-agent systems and implements proper state management.
|
9
|
+
|
10
|
+
The main components are:
|
11
|
+
1. Supervisor node with ReAct pattern for intelligent routing.
|
12
|
+
2. S2 agent node for handling academic paper queries.
|
13
|
+
3. Shared state management via Talk2Scholars.
|
14
|
+
4. Hydra-based configuration system.
|
15
|
+
|
16
|
+
Example:
|
17
|
+
app = get_app("thread_123", "gpt-4o-mini")
|
18
|
+
result = app.invoke({
|
19
|
+
"messages": [("human", "Find papers about AI agents")]
|
20
|
+
})
|
21
|
+
"""
|
22
|
+
|
23
|
+
import logging
|
24
|
+
from typing import Literal, Callable
|
25
|
+
import hydra
|
26
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
27
|
+
from langchain_openai import ChatOpenAI
|
28
|
+
from langgraph.checkpoint.memory import MemorySaver
|
29
|
+
from langgraph.graph import END, START, StateGraph
|
30
|
+
from langgraph.prebuilt import create_react_agent
|
31
|
+
from langgraph.types import Command
|
32
|
+
from ..agents import s2_agent
|
33
|
+
from ..state.state_talk2scholars import Talk2Scholars
|
34
|
+
|
35
|
+
# Configure logging
|
36
|
+
logging.basicConfig(level=logging.INFO)
|
37
|
+
logger = logging.getLogger(__name__)
|
38
|
+
|
39
|
+
|
40
|
+
def get_hydra_config():
|
41
|
+
"""
|
42
|
+
Loads and returns the Hydra configuration for the main agent.
|
43
|
+
|
44
|
+
This function fetches the configuration settings for the Talk2Scholars
|
45
|
+
agent, ensuring that all required parameters are properly initialized.
|
46
|
+
|
47
|
+
Returns:
|
48
|
+
Any: The configuration object for the main agent.
|
49
|
+
"""
|
50
|
+
with hydra.initialize(version_base=None, config_path="../configs"):
|
51
|
+
cfg = hydra.compose(
|
52
|
+
config_name="config", overrides=["agents/talk2scholars/main_agent=default"]
|
53
|
+
)
|
54
|
+
return cfg.agents.talk2scholars.main_agent
|
55
|
+
|
56
|
+
|
57
|
+
def make_supervisor_node(llm: BaseChatModel, thread_id: str) -> Callable:
|
58
|
+
"""
|
59
|
+
Creates and returns a supervisor node for intelligent routing using the ReAct pattern.
|
60
|
+
|
61
|
+
This function initializes a supervisor agent that processes user queries and
|
62
|
+
determines the appropriate sub-agent for further processing. It applies structured
|
63
|
+
reasoning to manage conversations and direct queries based on context.
|
64
|
+
|
65
|
+
Args:
|
66
|
+
llm (BaseChatModel): The language model used by the supervisor agent.
|
67
|
+
thread_id (str): Unique identifier for the conversation session.
|
68
|
+
|
69
|
+
Returns:
|
70
|
+
Callable: A function that acts as the supervisor node in the LangGraph workflow.
|
71
|
+
|
72
|
+
Example:
|
73
|
+
supervisor = make_supervisor_node(llm, "thread_123")
|
74
|
+
workflow.add_node("supervisor", supervisor)
|
75
|
+
"""
|
76
|
+
logger.info("Loading Hydra configuration for Talk2Scholars main agent.")
|
77
|
+
cfg = get_hydra_config()
|
78
|
+
logger.info("Hydra configuration loaded with values: %s", cfg)
|
79
|
+
|
80
|
+
# Create the supervisor agent using the main agent's configuration
|
81
|
+
supervisor_agent = create_react_agent(
|
82
|
+
llm,
|
83
|
+
tools=[], # Will add sub-agents later
|
84
|
+
state_modifier=cfg.main_agent,
|
85
|
+
state_schema=Talk2Scholars,
|
86
|
+
checkpointer=MemorySaver(),
|
87
|
+
)
|
88
|
+
|
89
|
+
def supervisor_node(
|
90
|
+
state: Talk2Scholars,
|
91
|
+
) -> Command[Literal["s2_agent", "__end__"]]:
|
92
|
+
"""
|
93
|
+
Processes user queries and determines the next step in the conversation flow.
|
94
|
+
|
95
|
+
This function examines the conversation state and decides whether to forward
|
96
|
+
the query to a specialized sub-agent (e.g., S2 agent) or conclude the interaction.
|
97
|
+
|
98
|
+
Args:
|
99
|
+
state (Talk2Scholars): The current state of the conversation, containing
|
100
|
+
messages, papers, and metadata.
|
101
|
+
|
102
|
+
Returns:
|
103
|
+
Command: The next action to be executed, along with updated state data.
|
104
|
+
|
105
|
+
Example:
|
106
|
+
result = supervisor_node(current_state)
|
107
|
+
next_step = result.goto
|
108
|
+
"""
|
109
|
+
logger.info(
|
110
|
+
"Supervisor node called - Messages count: %d",
|
111
|
+
len(state["messages"]),
|
112
|
+
)
|
113
|
+
|
114
|
+
# Invoke the supervisor agent with configurable thread_id
|
115
|
+
result = supervisor_agent.invoke(
|
116
|
+
state, {"configurable": {"thread_id": thread_id}}
|
117
|
+
)
|
118
|
+
goto = "s2_agent"
|
119
|
+
logger.info("Supervisor agent completed with result: %s", result)
|
120
|
+
|
121
|
+
return Command(goto=goto)
|
122
|
+
|
123
|
+
return supervisor_node
|
124
|
+
|
125
|
+
|
126
|
+
def get_app(thread_id: str, llm_model: str = "gpt-4o-mini") -> StateGraph:
|
127
|
+
"""
|
128
|
+
Initializes and returns the LangGraph application with a hierarchical agent system.
|
129
|
+
|
130
|
+
This function sets up the full agent architecture, including the supervisor
|
131
|
+
and sub-agents, and compiles the LangGraph workflow for handling user queries.
|
132
|
+
|
133
|
+
Args:
|
134
|
+
thread_id (str): Unique identifier for the conversation session.
|
135
|
+
llm_model (str, optional): The language model to be used. Defaults to "gpt-4o-mini".
|
136
|
+
|
137
|
+
Returns:
|
138
|
+
StateGraph: A compiled LangGraph application ready for query invocation.
|
139
|
+
|
140
|
+
Example:
|
141
|
+
app = get_app("thread_123")
|
142
|
+
result = app.invoke(initial_state)
|
143
|
+
"""
|
144
|
+
cfg = get_hydra_config()
|
145
|
+
|
146
|
+
def call_s2_agent(
|
147
|
+
state: Talk2Scholars,
|
148
|
+
) -> Command[Literal["supervisor", "__end__"]]:
|
149
|
+
"""
|
150
|
+
Calls the Semantic Scholar (S2) agent to process academic paper queries.
|
151
|
+
|
152
|
+
This function invokes the S2 agent, retrieves relevant research papers,
|
153
|
+
and updates the conversation state accordingly.
|
154
|
+
|
155
|
+
Args:
|
156
|
+
state (Talk2Scholars): The current conversation state, including user queries
|
157
|
+
and any previously retrieved papers.
|
158
|
+
|
159
|
+
Returns:
|
160
|
+
Command: The next action to execute, along with updated messages and papers.
|
161
|
+
|
162
|
+
Example:
|
163
|
+
result = call_s2_agent(current_state)
|
164
|
+
next_step = result.goto
|
165
|
+
"""
|
166
|
+
logger.info("Calling S2 agent with state: %s", state)
|
167
|
+
app = s2_agent.get_app(thread_id, llm_model)
|
168
|
+
|
169
|
+
# Invoke the S2 agent, passing state,
|
170
|
+
# Pass both config_id and thread_id
|
171
|
+
response = app.invoke(
|
172
|
+
state,
|
173
|
+
{
|
174
|
+
"configurable": {
|
175
|
+
"config_id": thread_id,
|
176
|
+
"thread_id": thread_id,
|
177
|
+
}
|
178
|
+
},
|
179
|
+
)
|
180
|
+
logger.info("S2 agent completed with response: %s", response)
|
181
|
+
|
182
|
+
return Command(
|
183
|
+
goto=END,
|
184
|
+
update={
|
185
|
+
"messages": response["messages"],
|
186
|
+
"papers": response.get("papers", {}),
|
187
|
+
"multi_papers": response.get("multi_papers", {}),
|
188
|
+
},
|
189
|
+
)
|
190
|
+
|
191
|
+
# Initialize LLM
|
192
|
+
logger.info("Using OpenAI model %s with temperature %s", llm_model, cfg.temperature)
|
193
|
+
llm = ChatOpenAI(model=llm_model, temperature=cfg.temperature)
|
194
|
+
|
195
|
+
# Build the graph
|
196
|
+
workflow = StateGraph(Talk2Scholars)
|
197
|
+
supervisor = make_supervisor_node(llm, thread_id)
|
198
|
+
|
199
|
+
workflow.add_node("supervisor", supervisor)
|
200
|
+
workflow.add_node("s2_agent", call_s2_agent)
|
201
|
+
workflow.add_edge(START, "supervisor")
|
202
|
+
workflow.add_edge("s2_agent", END)
|
203
|
+
|
204
|
+
# Compile the graph without initial state
|
205
|
+
app = workflow.compile(checkpointer=MemorySaver())
|
206
|
+
logger.info("Main agent workflow compiled")
|
207
|
+
return app
|
{aiagents4pharma-1.19.0 → aiagents4pharma-1.20.0}/aiagents4pharma/talk2scholars/agents/s2_agent.py
RENAMED
@@ -39,7 +39,7 @@ def get_app(uniq_id, llm_model="gpt-4o-mini"):
|
|
39
39
|
|
40
40
|
# Load hydra configuration
|
41
41
|
logger.log(logging.INFO, "Load Hydra configuration for Talk2Scholars S2 agent.")
|
42
|
-
with hydra.initialize(version_base=None, config_path="
|
42
|
+
with hydra.initialize(version_base=None, config_path="../configs"):
|
43
43
|
cfg = hydra.compose(
|
44
44
|
config_name="config", overrides=["agents/talk2scholars/s2_agent=default"]
|
45
45
|
)
|
@@ -57,6 +57,7 @@ def get_app(uniq_id, llm_model="gpt-4o-mini"):
|
|
57
57
|
llm,
|
58
58
|
tools=tools,
|
59
59
|
state_schema=Talk2Scholars,
|
60
|
+
# prompt=cfg.s2_agent,
|
60
61
|
state_modifier=cfg.s2_agent,
|
61
62
|
checkpointer=MemorySaver(),
|
62
63
|
)
|
@@ -0,0 +1,18 @@
|
|
1
|
+
_target_: agents.main_agent.get_app
|
2
|
+
openai_api_key: ${oc.env:OPENAI_API_KEY}
|
3
|
+
openai_llms:
|
4
|
+
- "gpt-4o-mini"
|
5
|
+
- "gpt-4-turbo"
|
6
|
+
- "gpt-3.5-turbo"
|
7
|
+
temperature: 0
|
8
|
+
main_agent: >
|
9
|
+
You are an intelligent research assistant coordinating academic paper discovery and analysis.
|
10
|
+
|
11
|
+
AVAILABLE TOOLS AND ROUTING:
|
12
|
+
1. semantic_scholar_agent:
|
13
|
+
Access to tools:
|
14
|
+
- search_tool: For paper discovery
|
15
|
+
- display_results: For showing paper results
|
16
|
+
- get_single_paper_recommendations: For single paper recommendations
|
17
|
+
- get_multi_paper_recommendations: For multi-paper recommendations
|
18
|
+
→ ROUTE TO THIS AGENT FOR: Any query about academic papers, research, or articles
|
@@ -0,0 +1,24 @@
|
|
1
|
+
_target_: agents.s2_agent.get_app
|
2
|
+
openai_api_key: ${oc.env:OPENAI_API_KEY}
|
3
|
+
openai_llms:
|
4
|
+
- "gpt-4o-mini"
|
5
|
+
- "gpt-4-turbo"
|
6
|
+
- "gpt-3.5-turbo"
|
7
|
+
temperature: 0
|
8
|
+
s2_agent: >
|
9
|
+
You are a specialized academic research agent with access to tools for paper discovery and analysis.
|
10
|
+
|
11
|
+
YOUR TOOLS:
|
12
|
+
1. search_tool:
|
13
|
+
- Finds research papers based on user queries.
|
14
|
+
- If no papers are found, it performs a new search.
|
15
|
+
|
16
|
+
2. display_results:
|
17
|
+
- Shows the current research papers.
|
18
|
+
- If no papers are found, it will instruct you to perform a search.
|
19
|
+
|
20
|
+
3. get_single_paper_recommendations:
|
21
|
+
- Provides recommendations based on a single selected paper.
|
22
|
+
|
23
|
+
4. get_multi_paper_recommendations:
|
24
|
+
- Provides recommendations based on multiple selected papers.
|
@@ -3,10 +3,8 @@ This is the state file for the talk2scholars agent.
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import logging
|
6
|
-
from typing import Annotated, Any, Dict
|
7
|
-
|
6
|
+
from typing import Annotated, Any, Dict
|
8
7
|
from langgraph.prebuilt.chat_agent_executor import AgentState
|
9
|
-
from typing_extensions import NotRequired, Required
|
10
8
|
|
11
9
|
# Configure logging
|
12
10
|
logging.basicConfig(level=logging.INFO)
|
@@ -22,11 +20,14 @@ def replace_dict(existing: Dict[str, Any], new: Dict[str, Any]) -> Dict[str, Any
|
|
22
20
|
class Talk2Scholars(AgentState):
|
23
21
|
"""
|
24
22
|
The state for the talk2scholars agent, inheriting from AgentState.
|
23
|
+
|
24
|
+
Attributes:
|
25
|
+
papers: Dictionary of papers from search results
|
26
|
+
multi_papers: Dictionary of papers from multi-paper recommendations
|
27
|
+
llm_model: Model being used
|
25
28
|
"""
|
26
29
|
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
current_agent: NotRequired[Optional[str]]
|
31
|
-
is_last_step: Required[bool] # Required field for LangGraph
|
30
|
+
# Agent state fields
|
31
|
+
papers: Annotated[Dict[str, Any], replace_dict]
|
32
|
+
multi_papers: Annotated[Dict[str, Any], replace_dict]
|
32
33
|
llm_model: str
|
@@ -0,0 +1,237 @@
|
|
1
|
+
"""
|
2
|
+
Integration tests for talk2scholars system.
|
3
|
+
|
4
|
+
These tests ensure that:
|
5
|
+
1. The main agent and sub-agent work together.
|
6
|
+
2. The agents correctly interact with tools (search, recommendations).
|
7
|
+
3. The full pipeline processes queries and updates state correctly.
|
8
|
+
"""
|
9
|
+
|
10
|
+
# pylint: disable=redefined-outer-name
|
11
|
+
from unittest.mock import patch, Mock
|
12
|
+
import pytest
|
13
|
+
from langchain_core.messages import HumanMessage
|
14
|
+
from ..agents.main_agent import get_app as get_main_app
|
15
|
+
from ..agents.s2_agent import get_app as get_s2_app
|
16
|
+
from ..state.state_talk2scholars import Talk2Scholars
|
17
|
+
|
18
|
+
|
19
|
+
@pytest.fixture(autouse=True)
|
20
|
+
def mock_hydra():
|
21
|
+
"""Mock Hydra configuration to prevent external dependencies."""
|
22
|
+
with patch("hydra.initialize"), patch("hydra.compose") as mock_compose:
|
23
|
+
cfg_mock = Mock()
|
24
|
+
cfg_mock.agents.talk2scholars.main_agent.temperature = 0
|
25
|
+
cfg_mock.agents.talk2scholars.main_agent.main_agent = "Test main agent prompt"
|
26
|
+
cfg_mock.agents.talk2scholars.s2_agent.temperature = 0
|
27
|
+
cfg_mock.agents.talk2scholars.s2_agent.s2_agent = "Test s2 agent prompt"
|
28
|
+
mock_compose.return_value = cfg_mock
|
29
|
+
yield mock_compose
|
30
|
+
|
31
|
+
|
32
|
+
@pytest.fixture(autouse=True)
|
33
|
+
def mock_tools():
|
34
|
+
"""Mock tools to prevent execution of real API calls."""
|
35
|
+
with (
|
36
|
+
patch(
|
37
|
+
"aiagents4pharma.talk2scholars.tools.s2.search.search_tool"
|
38
|
+
) as mock_s2_search,
|
39
|
+
patch(
|
40
|
+
"aiagents4pharma.talk2scholars.tools.s2.display_results.display_results"
|
41
|
+
) as mock_s2_display,
|
42
|
+
patch(
|
43
|
+
"aiagents4pharma.talk2scholars.tools.s2.single_paper_rec."
|
44
|
+
"get_single_paper_recommendations"
|
45
|
+
) as mock_s2_single_rec,
|
46
|
+
patch(
|
47
|
+
"aiagents4pharma.talk2scholars.tools.s2.multi_paper_rec."
|
48
|
+
"get_multi_paper_recommendations"
|
49
|
+
) as mock_s2_multi_rec,
|
50
|
+
):
|
51
|
+
|
52
|
+
mock_s2_search.return_value = {"papers": {"id123": "Mock Paper"}}
|
53
|
+
mock_s2_display.return_value = "Displaying Mock Results"
|
54
|
+
mock_s2_single_rec.return_value = {"recommendations": ["Paper A", "Paper B"]}
|
55
|
+
mock_s2_multi_rec.return_value = {
|
56
|
+
"multi_recommendations": ["Paper X", "Paper Y"]
|
57
|
+
}
|
58
|
+
|
59
|
+
yield {
|
60
|
+
"search_tool": mock_s2_search,
|
61
|
+
"display_results": mock_s2_display,
|
62
|
+
"single_paper_rec": mock_s2_single_rec,
|
63
|
+
"multi_paper_rec": mock_s2_multi_rec,
|
64
|
+
}
|
65
|
+
|
66
|
+
|
67
|
+
def test_full_workflow():
|
68
|
+
"""Test the full workflow from main agent to S2 agent."""
|
69
|
+
thread_id = "test_thread"
|
70
|
+
main_app = get_main_app(thread_id)
|
71
|
+
|
72
|
+
# Define expected mock response with the actual structure
|
73
|
+
expected_paper = {
|
74
|
+
"530a059cb48477ad1e3d4f8f4b153274c8997332": {
|
75
|
+
"Title": "Explainable Artificial Intelligence",
|
76
|
+
"Abstract": None,
|
77
|
+
"Citation Count": 5544,
|
78
|
+
"Year": "2024",
|
79
|
+
"URL": "https://example.com/paper",
|
80
|
+
}
|
81
|
+
}
|
82
|
+
|
83
|
+
# Mock the search tool instead of the app
|
84
|
+
with patch(
|
85
|
+
"aiagents4pharma.talk2scholars.tools.s2.search.search_tool",
|
86
|
+
return_value={"papers": expected_paper},
|
87
|
+
):
|
88
|
+
state = Talk2Scholars(messages=[HumanMessage(content="Find AI papers")])
|
89
|
+
result = main_app.invoke(
|
90
|
+
state,
|
91
|
+
config={
|
92
|
+
"configurable": {
|
93
|
+
"thread_id": thread_id,
|
94
|
+
"checkpoint_ns": "test_ns",
|
95
|
+
"checkpoint_id": "test_checkpoint",
|
96
|
+
}
|
97
|
+
},
|
98
|
+
)
|
99
|
+
|
100
|
+
# Check values
|
101
|
+
assert "papers" in result
|
102
|
+
assert "messages" in result
|
103
|
+
assert len(result["papers"]) > 0
|
104
|
+
|
105
|
+
|
106
|
+
def test_s2_agent_execution():
|
107
|
+
"""Test if the S2 agent processes requests correctly and updates state."""
|
108
|
+
thread_id = "test_thread"
|
109
|
+
s2_app = get_s2_app(thread_id)
|
110
|
+
|
111
|
+
state = Talk2Scholars(messages=[HumanMessage(content="Get recommendations")])
|
112
|
+
|
113
|
+
result = s2_app.invoke(
|
114
|
+
state,
|
115
|
+
config={
|
116
|
+
"configurable": {
|
117
|
+
"thread_id": thread_id,
|
118
|
+
"checkpoint_ns": "test_ns",
|
119
|
+
"checkpoint_id": "test_checkpoint",
|
120
|
+
}
|
121
|
+
},
|
122
|
+
)
|
123
|
+
|
124
|
+
assert "messages" in result
|
125
|
+
assert "multi_papers" in result
|
126
|
+
assert result["multi_papers"] is not None
|
127
|
+
|
128
|
+
|
129
|
+
def test_tool_integration(mock_tools):
|
130
|
+
"""Test if the tools interact correctly with the workflow."""
|
131
|
+
thread_id = "test_thread"
|
132
|
+
s2_app = get_s2_app(thread_id)
|
133
|
+
|
134
|
+
state = Talk2Scholars(
|
135
|
+
messages=[HumanMessage(content="Search for AI ethics papers")]
|
136
|
+
)
|
137
|
+
|
138
|
+
mock_paper_id = "11159bdb213aaa243916f42f576396d483ba474b"
|
139
|
+
mock_response = {
|
140
|
+
"papers": {
|
141
|
+
mock_paper_id: {
|
142
|
+
"Title": "Mock AI Ethics Paper",
|
143
|
+
"Abstract": "A study on AI ethics",
|
144
|
+
"Citation Count": 100,
|
145
|
+
"URL": "https://example.com/mock-paper",
|
146
|
+
}
|
147
|
+
}
|
148
|
+
}
|
149
|
+
|
150
|
+
# Update both the fixture mock and patch the actual tool
|
151
|
+
mock_tools["search_tool"].return_value = {"papers": mock_response["papers"]}
|
152
|
+
|
153
|
+
with patch(
|
154
|
+
"aiagents4pharma.talk2scholars.tools.s2.search.search_tool",
|
155
|
+
return_value={"papers": mock_response["papers"]},
|
156
|
+
):
|
157
|
+
result = s2_app.invoke(
|
158
|
+
state,
|
159
|
+
config={
|
160
|
+
"configurable": {
|
161
|
+
"thread_id": thread_id,
|
162
|
+
"checkpoint_ns": "test_ns",
|
163
|
+
"checkpoint_id": "test_checkpoint",
|
164
|
+
}
|
165
|
+
},
|
166
|
+
)
|
167
|
+
|
168
|
+
assert "papers" in result
|
169
|
+
assert len(result["papers"]) > 0 # Verify we have papers
|
170
|
+
assert isinstance(result["papers"], dict) # Verify it's a dictionary
|
171
|
+
|
172
|
+
|
173
|
+
def test_empty_query():
|
174
|
+
"""Test how the system handles an empty query."""
|
175
|
+
thread_id = "test_thread"
|
176
|
+
main_app = get_main_app(thread_id)
|
177
|
+
|
178
|
+
state = Talk2Scholars(messages=[HumanMessage(content="")])
|
179
|
+
|
180
|
+
# Mock the s2_agent app
|
181
|
+
mock_s2_app = get_s2_app(thread_id)
|
182
|
+
|
183
|
+
with patch(
|
184
|
+
"aiagents4pharma.talk2scholars.agents.s2_agent.get_app",
|
185
|
+
return_value=mock_s2_app,
|
186
|
+
):
|
187
|
+
result = main_app.invoke(
|
188
|
+
state,
|
189
|
+
config={
|
190
|
+
"configurable": {
|
191
|
+
"thread_id": thread_id,
|
192
|
+
"checkpoint_ns": "test_ns",
|
193
|
+
"checkpoint_id": "test_checkpoint",
|
194
|
+
}
|
195
|
+
},
|
196
|
+
)
|
197
|
+
|
198
|
+
assert "messages" in result
|
199
|
+
last_message = result["messages"][-1].content.lower()
|
200
|
+
assert any(
|
201
|
+
phrase in last_message
|
202
|
+
for phrase in ["no valid input", "how can i assist", "please provide a query"]
|
203
|
+
)
|
204
|
+
|
205
|
+
|
206
|
+
def test_api_failure_handling():
|
207
|
+
"""Test if the system gracefully handles an API failure."""
|
208
|
+
thread_id = "test_thread"
|
209
|
+
s2_app = get_s2_app(thread_id)
|
210
|
+
|
211
|
+
expected_error = "API Timeout: Connection failed"
|
212
|
+
with patch("requests.get", side_effect=Exception(expected_error)):
|
213
|
+
state = Talk2Scholars(messages=[HumanMessage(content="Find latest NLP papers")])
|
214
|
+
|
215
|
+
result = s2_app.invoke(
|
216
|
+
state,
|
217
|
+
config={
|
218
|
+
"configurable": {
|
219
|
+
"thread_id": thread_id,
|
220
|
+
"checkpoint_ns": "test_ns",
|
221
|
+
"checkpoint_id": "test_checkpoint",
|
222
|
+
}
|
223
|
+
},
|
224
|
+
)
|
225
|
+
|
226
|
+
assert "messages" in result
|
227
|
+
last_message = result["messages"][-1].content.lower()
|
228
|
+
|
229
|
+
# Update assertions to match actual error message
|
230
|
+
assert any(
|
231
|
+
[
|
232
|
+
"unable to retrieve" in last_message,
|
233
|
+
"connection issue" in last_message,
|
234
|
+
"please try again later" in last_message,
|
235
|
+
]
|
236
|
+
)
|
237
|
+
assert "nlp papers" in last_message # Verify context is maintained
|