aiagents4pharma 1.20.0__tar.gz → 1.21.0__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/PKG-INFO +2 -2
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/README.md +1 -1
- aiagents4pharma-1.21.0/aiagents4pharma/talk2biomodels/configs/config.yaml +5 -0
- aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/agents/main_agent.py +206 -0
- aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/agents/s2_agent.py +129 -0
- aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +39 -0
- aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +16 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml +11 -9
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/config.yaml +1 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml +2 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/tools/search/default.yaml +1 -0
- aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/__init__.py +3 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml +1 -0
- aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/state/state_talk2scholars.py +62 -0
- aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/tests/test_llm_main_integration.py +58 -0
- aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/tests/test_main_agent.py +156 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tests/test_s2_agent.py +95 -29
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tests/test_s2_tools.py +158 -22
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tools/s2/__init__.py +4 -2
- aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/tools/s2/display_results.py +89 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py +35 -8
- aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/tools/s2/query_results.py +61 -0
- aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/tools/s2/retrieve_semantic_scholar_paper_id.py +79 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tools/s2/search.py +34 -10
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py +39 -9
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma.egg-info/PKG-INFO +2 -2
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma.egg-info/SOURCES.txt +5 -1
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/pyproject.toml +1 -1
- aiagents4pharma-1.21.0/release_version.txt +1 -0
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/agents/main_agent.py +0 -207
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/agents/s2_agent.py +0 -85
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +0 -18
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +0 -24
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/state/state_talk2scholars.py +0 -33
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/tests/test_integration.py +0 -237
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/tests/test_main_agent.py +0 -180
- aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/tools/s2/display_results.py +0 -50
- aiagents4pharma-1.20.0/release_version.txt +0 -1
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/LICENSE +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/agents/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/agents/t2b_agent.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/api/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/api/kegg.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/api/ols.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/api/uniprot.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/agents/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/default.yaml +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/tools/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/tools/ask_question/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/tools/ask_question/default.yaml +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/tools/get_annotation/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/tools/get_annotation/default.yaml +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/models/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/models/basico_model.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/models/sys_bio_model.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/states/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/states/state_talk2biomodels.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_api.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_ask_question.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_basico_model.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_get_annotation.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_getmodelinfo.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_integration.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_param_scan.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_query_article.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_search_models.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_simulate_model.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_steady_state.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_sys_bio_model.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/ask_question.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/custom_plotter.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/get_annotation.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/get_modelinfo.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/load_arguments.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/load_biomodel.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/parameter_scan.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/query_article.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/search_models.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/simulate_model.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/steady_state.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/agents/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/agents/scp_agent.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/states/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/states/state_talk2cells.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/tests/scp_agent/test_scp_agent.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/tools/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/tools/scp_agent/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/tools/scp_agent/display_studies.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/tools/scp_agent/search_studies.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/agents/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/agents/t2kg_agent.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/default.yaml +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/app/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/app/frontend/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/app/frontend/default.yaml +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/config.yaml +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/graphrag_reasoning/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/graphrag_reasoning/default.yaml +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_extraction/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_extraction/default.yaml +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_summarization/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_summarization/default.yaml +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/datasets/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/datasets/biobridge_primekg.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/datasets/dataset.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/datasets/primekg.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/datasets/starkqa_primekg.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/states/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/states/state_talk2knowledgegraphs.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_agents_t2kg_agent.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_biobridge_primekg.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_dataset.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_primekg.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_starkqa_primekg.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_tools_graphrag_reasoning.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_extraction.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_summarization.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_embeddings.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_huggingface.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_ollama.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_sentencetransformer.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_enrichments.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_ollama.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_kg_utils.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tools/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tools/graphrag_reasoning.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tools/load_arguments.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tools/subgraph_extraction.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tools/subgraph_summarization.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/embeddings.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/huggingface.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/ollama.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/sentence_transformer.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/enrichments/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/enrichments/enrichments.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/enrichments/ollama.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/extractions/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/extractions/pcst.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/kg_utils.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/agents/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/agents/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/app/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/app/frontend/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/tools/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/__init__.py +0 -0
- {aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/configs/tools/search → aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/configs/tools/retrieve_semantic_scholar_paper_id}/__init__.py +0 -0
- {aiagents4pharma-1.20.0/aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation → aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/configs/tools/search}/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/state/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tests/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tests/test_state.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tools/__init__.py +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma.egg-info/dependency_links.txt +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma.egg-info/requires.txt +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/aiagents4pharma.egg-info/top_level.txt +0 -0
- {aiagents4pharma-1.20.0 → aiagents4pharma-1.21.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: aiagents4pharma
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.21.0
|
4
4
|
Summary: AI Agents for drug discovery, drug development, and other pharmaceutical R&D.
|
5
5
|
Classifier: Programming Language :: Python :: 3
|
6
6
|
Classifier: License :: OSI Approved :: MIT License
|
@@ -110,7 +110,7 @@ free credits [here](https://build.nvidia.com/explore/discover)._
|
|
110
110
|
```
|
111
111
|
2. **Install dependencies:**
|
112
112
|
```bash
|
113
|
-
pip install .
|
113
|
+
pip install -r requirements.txt
|
114
114
|
```
|
115
115
|
3. **Initialize OPENAI_API_KEY and NVIDIA_API_KEY**
|
116
116
|
```bash
|
@@ -0,0 +1,206 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
|
3
|
+
"""
|
4
|
+
Main agent for the talk2scholars app using ReAct pattern.
|
5
|
+
|
6
|
+
This module implements a hierarchical agent system where a supervisor agent
|
7
|
+
routes queries to specialized sub-agents. It follows the LangGraph patterns
|
8
|
+
for multi-agent systems and implements proper state management.
|
9
|
+
"""
|
10
|
+
|
11
|
+
import logging
|
12
|
+
from typing import Literal, Callable
|
13
|
+
from pydantic import BaseModel
|
14
|
+
import hydra
|
15
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
16
|
+
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
|
17
|
+
from langchain_openai import ChatOpenAI
|
18
|
+
from langgraph.checkpoint.memory import MemorySaver
|
19
|
+
from langgraph.graph import END, START, StateGraph
|
20
|
+
from langgraph.types import Command
|
21
|
+
from ..agents import s2_agent
|
22
|
+
from ..state.state_talk2scholars import Talk2Scholars
|
23
|
+
|
24
|
+
# Configure logging
|
25
|
+
logging.basicConfig(level=logging.INFO)
|
26
|
+
logger = logging.getLogger(__name__)
|
27
|
+
|
28
|
+
|
29
|
+
def get_hydra_config():
|
30
|
+
"""
|
31
|
+
Loads the Hydra configuration for the main agent.
|
32
|
+
|
33
|
+
This function initializes the Hydra configuration system and retrieves the settings
|
34
|
+
for the `Talk2Scholars` agent, ensuring that all required parameters are loaded.
|
35
|
+
|
36
|
+
Returns:
|
37
|
+
DictConfig: The configuration object containing parameters for the main agent.
|
38
|
+
"""
|
39
|
+
with hydra.initialize(version_base=None, config_path="../configs"):
|
40
|
+
cfg = hydra.compose(
|
41
|
+
config_name="config", overrides=["agents/talk2scholars/main_agent=default"]
|
42
|
+
)
|
43
|
+
return cfg.agents.talk2scholars.main_agent
|
44
|
+
|
45
|
+
|
46
|
+
def make_supervisor_node(llm_model: BaseChatModel, thread_id: str) -> Callable:
|
47
|
+
"""
|
48
|
+
Creates the supervisor node responsible for routing user queries to the appropriate sub-agents.
|
49
|
+
|
50
|
+
This function initializes the routing logic by leveraging the system and router prompts defined
|
51
|
+
in the Hydra configuration. The supervisor determines whether to
|
52
|
+
call a sub-agent (like `s2_agent`)
|
53
|
+
or directly generate a response using the language model.
|
54
|
+
|
55
|
+
Args:
|
56
|
+
llm_model (BaseChatModel): The language model used for decision-making.
|
57
|
+
thread_id (str): Unique identifier for the current conversation session.
|
58
|
+
|
59
|
+
Returns:
|
60
|
+
Callable: The supervisor node function that processes user queries and
|
61
|
+
decides the next step.
|
62
|
+
"""
|
63
|
+
cfg = get_hydra_config()
|
64
|
+
logger.info("Hydra configuration for Talk2Scholars main agent loaded: %s", cfg)
|
65
|
+
members = ["s2_agent"]
|
66
|
+
options = ["FINISH"] + members
|
67
|
+
# Define system prompt for general interactions
|
68
|
+
system_prompt = cfg.system_prompt
|
69
|
+
# Define router prompt for routing to sub-agents
|
70
|
+
router_prompt = cfg.router_prompt
|
71
|
+
|
72
|
+
class Router(BaseModel):
|
73
|
+
"""Worker to route to next. If no workers needed, route to FINISH."""
|
74
|
+
|
75
|
+
next: Literal[*options]
|
76
|
+
|
77
|
+
def supervisor_node(
|
78
|
+
state: Talk2Scholars,
|
79
|
+
) -> Command:
|
80
|
+
"""
|
81
|
+
Handles the routing logic for the supervisor agent.
|
82
|
+
|
83
|
+
This function determines the next agent to invoke based on the router prompt response.
|
84
|
+
If no further processing is required, it generates an AI response using the system prompt.
|
85
|
+
|
86
|
+
Args:
|
87
|
+
state (Talk2Scholars): The current conversation state, including messages
|
88
|
+
exchanged so far.
|
89
|
+
|
90
|
+
Returns:
|
91
|
+
Command: A command dictating whether to invoke a sub-agent or generate a final response.
|
92
|
+
"""
|
93
|
+
messages = [SystemMessage(content=router_prompt)] + state["messages"]
|
94
|
+
structured_llm = llm_model.with_structured_output(Router)
|
95
|
+
response = structured_llm.invoke(messages)
|
96
|
+
goto = response.next
|
97
|
+
logger.info("Routing to: %s, Thread ID: %s", goto, thread_id)
|
98
|
+
if goto == "FINISH":
|
99
|
+
goto = END # Using END from langgraph.graph
|
100
|
+
# If no agents were called, and the last message was
|
101
|
+
# from the user, call the LLM to respond to the user
|
102
|
+
# with a slightly different system prompt.
|
103
|
+
if isinstance(messages[-1], HumanMessage):
|
104
|
+
response = llm_model.invoke(
|
105
|
+
[
|
106
|
+
SystemMessage(content=system_prompt),
|
107
|
+
]
|
108
|
+
+ messages[1:]
|
109
|
+
)
|
110
|
+
return Command(
|
111
|
+
goto=goto, update={"messages": AIMessage(content=response.content)}
|
112
|
+
)
|
113
|
+
# Go to the requested agent
|
114
|
+
return Command(goto=goto)
|
115
|
+
|
116
|
+
return supervisor_node
|
117
|
+
|
118
|
+
|
119
|
+
def get_app(
|
120
|
+
thread_id: str,
|
121
|
+
llm_model: BaseChatModel = ChatOpenAI(model="gpt-4o-mini", temperature=0),
|
122
|
+
):
|
123
|
+
"""
|
124
|
+
Initializes and returns the LangGraph-based hierarchical agent system.
|
125
|
+
|
126
|
+
This function constructs the agent workflow by defining nodes for the supervisor
|
127
|
+
and sub-agents. It compiles the graph using `StateGraph` to enable structured
|
128
|
+
conversational workflows.
|
129
|
+
|
130
|
+
Args:
|
131
|
+
thread_id (str): A unique session identifier for tracking conversation state.
|
132
|
+
llm_model (BaseChatModel, optional): The language model used for query processing.
|
133
|
+
Defaults to `ChatOpenAI(model="gpt-4o-mini", temperature=0)`.
|
134
|
+
|
135
|
+
Returns:
|
136
|
+
StateGraph: A compiled LangGraph application that can process user queries.
|
137
|
+
|
138
|
+
Example:
|
139
|
+
>>> app = get_app("thread_123")
|
140
|
+
>>> result = app.invoke(initial_state)
|
141
|
+
"""
|
142
|
+
cfg = get_hydra_config()
|
143
|
+
|
144
|
+
def call_s2_agent(
|
145
|
+
state: Talk2Scholars,
|
146
|
+
) -> Command[Literal["supervisor"]]:
|
147
|
+
"""
|
148
|
+
Invokes the Semantic Scholar (S2) agent to retrieve relevant research papers.
|
149
|
+
|
150
|
+
This function calls the `s2_agent` and updates the conversation state with retrieved
|
151
|
+
academic papers. The agent uses Semantic Scholar's API to find papers based on
|
152
|
+
user queries.
|
153
|
+
|
154
|
+
Args:
|
155
|
+
state (Talk2Scholars): The current state of the conversation, containing messages
|
156
|
+
and any previous search results.
|
157
|
+
|
158
|
+
Returns:
|
159
|
+
Command: A command to update the conversation state with the retrieved papers
|
160
|
+
and return control to the supervisor node.
|
161
|
+
|
162
|
+
Example:
|
163
|
+
>>> result = call_s2_agent(current_state)
|
164
|
+
>>> next_step = result.goto
|
165
|
+
"""
|
166
|
+
logger.info("Calling S2 agent")
|
167
|
+
app = s2_agent.get_app(thread_id, llm_model)
|
168
|
+
|
169
|
+
# Invoke the S2 agent, passing state,
|
170
|
+
# Pass both config_id and thread_id
|
171
|
+
response = app.invoke(
|
172
|
+
state,
|
173
|
+
{
|
174
|
+
"configurable": {
|
175
|
+
"config_id": thread_id,
|
176
|
+
"thread_id": thread_id,
|
177
|
+
}
|
178
|
+
},
|
179
|
+
)
|
180
|
+
logger.info("S2 agent completed with response")
|
181
|
+
return Command(
|
182
|
+
update={
|
183
|
+
"messages": response["messages"],
|
184
|
+
"papers": response.get("papers", {}),
|
185
|
+
"multi_papers": response.get("multi_papers", {}),
|
186
|
+
"last_displayed_papers": response.get("last_displayed_papers", {}),
|
187
|
+
},
|
188
|
+
# Always return to supervisor
|
189
|
+
goto="supervisor",
|
190
|
+
)
|
191
|
+
|
192
|
+
# Initialize LLM
|
193
|
+
logger.info("Using model %s with temperature %s", llm_model, cfg.temperature)
|
194
|
+
|
195
|
+
# Build the graph
|
196
|
+
workflow = StateGraph(Talk2Scholars)
|
197
|
+
supervisor = make_supervisor_node(llm_model, thread_id)
|
198
|
+
# Add nodes
|
199
|
+
workflow.add_node("supervisor", supervisor)
|
200
|
+
workflow.add_node("s2_agent", call_s2_agent)
|
201
|
+
# Add edges
|
202
|
+
workflow.add_edge(START, "supervisor")
|
203
|
+
# Compile the workflow
|
204
|
+
app = workflow.compile(checkpointer=MemorySaver())
|
205
|
+
logger.info("Main agent workflow compiled")
|
206
|
+
return app
|
@@ -0,0 +1,129 @@
|
|
1
|
+
# /usr/bin/env python3
|
2
|
+
|
3
|
+
"""
|
4
|
+
Agent for interacting with Semantic Scholar
|
5
|
+
"""
|
6
|
+
|
7
|
+
import logging
|
8
|
+
from typing import Any, Dict
|
9
|
+
import hydra
|
10
|
+
from langchain_openai import ChatOpenAI
|
11
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
12
|
+
from langgraph.graph import START, StateGraph
|
13
|
+
from langgraph.prebuilt import create_react_agent, ToolNode
|
14
|
+
from langgraph.checkpoint.memory import MemorySaver
|
15
|
+
from ..state.state_talk2scholars import Talk2Scholars
|
16
|
+
from ..tools.s2.search import search_tool as s2_search
|
17
|
+
from ..tools.s2.display_results import display_results as s2_display
|
18
|
+
from ..tools.s2.query_results import query_results as s2_query_results
|
19
|
+
from ..tools.s2.retrieve_semantic_scholar_paper_id import (
|
20
|
+
retrieve_semantic_scholar_paper_id as s2_retrieve_id,
|
21
|
+
)
|
22
|
+
from ..tools.s2.single_paper_rec import (
|
23
|
+
get_single_paper_recommendations as s2_single_rec,
|
24
|
+
)
|
25
|
+
from ..tools.s2.multi_paper_rec import get_multi_paper_recommendations as s2_multi_rec
|
26
|
+
|
27
|
+
# Initialize logger
|
28
|
+
logging.basicConfig(level=logging.INFO)
|
29
|
+
logger = logging.getLogger(__name__)
|
30
|
+
|
31
|
+
|
32
|
+
def get_app(
|
33
|
+
uniq_id, llm_model: BaseChatModel = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
34
|
+
):
|
35
|
+
"""
|
36
|
+
Initializes and returns the LangGraph application for the Semantic Scholar (S2) agent.
|
37
|
+
|
38
|
+
This function sets up the S2 agent, which integrates various tools to search, retrieve,
|
39
|
+
and display research papers from Semantic Scholar. The agent follows the ReAct pattern
|
40
|
+
for structured interaction.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
uniq_id (str): Unique identifier for the current conversation session.
|
44
|
+
llm_model (BaseChatModel, optional): The language model to be used by the agent.
|
45
|
+
Defaults to `ChatOpenAI(model="gpt-4o-mini", temperature=0)`.
|
46
|
+
|
47
|
+
Returns:
|
48
|
+
StateGraph: A compiled LangGraph application that enables the S2 agent to process
|
49
|
+
user queries and retrieve research papers.
|
50
|
+
|
51
|
+
Example:
|
52
|
+
>>> app = get_app("thread_123")
|
53
|
+
>>> result = app.invoke(initial_state)
|
54
|
+
"""
|
55
|
+
|
56
|
+
# def agent_s2_node(state: Talk2Scholars) -> Command[Literal["supervisor"]]:
|
57
|
+
def agent_s2_node(state: Talk2Scholars) -> Dict[str, Any]:
|
58
|
+
"""
|
59
|
+
Processes the user query and retrieves relevant research papers.
|
60
|
+
|
61
|
+
This function calls the language model using the configured `ReAct` agent to analyze
|
62
|
+
the state and generate an appropriate response. The function then returns control
|
63
|
+
to the main supervisor.
|
64
|
+
|
65
|
+
Args:
|
66
|
+
state (Talk2Scholars): The current conversation state, including messages exchanged
|
67
|
+
and any previously retrieved research papers.
|
68
|
+
|
69
|
+
Returns:
|
70
|
+
Dict[str, Any]: A dictionary containing the updated conversation state.
|
71
|
+
|
72
|
+
Example:
|
73
|
+
>>> result = agent_s2_node(current_state)
|
74
|
+
>>> papers = result.get("papers", [])
|
75
|
+
"""
|
76
|
+
logger.log(logging.INFO, "Creating Agent_S2 node with thread_id %s", uniq_id)
|
77
|
+
result = model.invoke(state, {"configurable": {"thread_id": uniq_id}})
|
78
|
+
|
79
|
+
return result
|
80
|
+
|
81
|
+
logger.log(logging.INFO, "thread_id, llm_model: %s, %s", uniq_id, llm_model)
|
82
|
+
|
83
|
+
# Load hydra configuration
|
84
|
+
logger.log(logging.INFO, "Load Hydra configuration for Talk2Scholars S2 agent.")
|
85
|
+
with hydra.initialize(version_base=None, config_path="../configs"):
|
86
|
+
cfg = hydra.compose(
|
87
|
+
config_name="config", overrides=["agents/talk2scholars/s2_agent=default"]
|
88
|
+
)
|
89
|
+
cfg = cfg.agents.talk2scholars.s2_agent
|
90
|
+
|
91
|
+
# Define the tools
|
92
|
+
tools = ToolNode(
|
93
|
+
[
|
94
|
+
s2_search,
|
95
|
+
s2_display,
|
96
|
+
s2_query_results,
|
97
|
+
s2_retrieve_id,
|
98
|
+
s2_single_rec,
|
99
|
+
s2_multi_rec,
|
100
|
+
]
|
101
|
+
)
|
102
|
+
|
103
|
+
# Define the model
|
104
|
+
logger.log(logging.INFO, "Using OpenAI model %s", llm_model)
|
105
|
+
|
106
|
+
# Create the agent
|
107
|
+
model = create_react_agent(
|
108
|
+
llm_model,
|
109
|
+
tools=tools,
|
110
|
+
state_schema=Talk2Scholars,
|
111
|
+
state_modifier=cfg.s2_agent,
|
112
|
+
checkpointer=MemorySaver(),
|
113
|
+
)
|
114
|
+
|
115
|
+
workflow = StateGraph(Talk2Scholars)
|
116
|
+
workflow.add_node("agent_s2", agent_s2_node)
|
117
|
+
workflow.add_edge(START, "agent_s2")
|
118
|
+
|
119
|
+
# Initialize memory to persist state between graph runs
|
120
|
+
checkpointer = MemorySaver()
|
121
|
+
|
122
|
+
# Finally, we compile it!
|
123
|
+
# This compiles it into a LangChain Runnable,
|
124
|
+
# meaning you can use it as you would any other runnable.
|
125
|
+
# Note that we're (optionally) passing the memory when compiling the graph
|
126
|
+
app = workflow.compile(checkpointer=checkpointer)
|
127
|
+
logger.log(logging.INFO, "Compiled the graph")
|
128
|
+
|
129
|
+
return app
|
@@ -0,0 +1,39 @@
|
|
1
|
+
_target_: agents.main_agent.get_app
|
2
|
+
openai_api_key: ${oc.env:OPENAI_API_KEY}
|
3
|
+
openai_llms:
|
4
|
+
- "gpt-4o-mini"
|
5
|
+
- "gpt-4-turbo"
|
6
|
+
- "gpt-3.5-turbo"
|
7
|
+
temperature: 0
|
8
|
+
system_prompt: >
|
9
|
+
You are the Talk2Scholars agent coordinating academic paper discovery and analysis.
|
10
|
+
|
11
|
+
You have access to the following agents:
|
12
|
+
1. S2_agent: This agent can be used to search and recommend papers
|
13
|
+
from Semantic Scholar. Use this agent when the user asks for
|
14
|
+
general paper searches and recommendations. This agent can also
|
15
|
+
retrieve the Semantic Scholar ID of a paper.
|
16
|
+
router_prompt: >
|
17
|
+
You are a supervisor tasked with managing a conversation between the
|
18
|
+
following workers: {members}. Given the user request, respond with the
|
19
|
+
worker to act next. Each worker will perform a task and respond with
|
20
|
+
their results and status. When finished, respond with FINISH.
|
21
|
+
|
22
|
+
Here is a description of the workers:
|
23
|
+
1. S2_agent: This agent can be used to search and recommend papers
|
24
|
+
from Semantic Scholar. Use this agent when the user asks for
|
25
|
+
general paper searches and recommendations. This agent can also
|
26
|
+
retrieve the Semantic Scholar ID of a paper. It can also be used to
|
27
|
+
provide more information about a paper.
|
28
|
+
|
29
|
+
Here are some instructions for the workers:
|
30
|
+
1. Call the S2 agent for general paper searches and recommendations.
|
31
|
+
2. The S2 agent has access to tools for querying and displaying papers.
|
32
|
+
3. If the user wants suggestions for papers and you don’t have
|
33
|
+
a Semantic Scholar ID for it but do have the title from
|
34
|
+
the last displayed results, use the S2 agent to retrieve the
|
35
|
+
Semantic Scholar ID of the paper. Then, use the S2 agent again to display
|
36
|
+
recommendations for the paper.
|
37
|
+
4. You can call the S2 agent to get more information about a paper based
|
38
|
+
on the context of the conversation.
|
39
|
+
5. Respond with FINISH when all tasks are completed.
|
@@ -0,0 +1,16 @@
|
|
1
|
+
_target_: agents.s2_agent.get_app
|
2
|
+
openai_api_key: ${oc.env:OPENAI_API_KEY}
|
3
|
+
openai_llms:
|
4
|
+
- "gpt-4o-mini"
|
5
|
+
- "gpt-4-turbo"
|
6
|
+
- "gpt-3.5-turbo"
|
7
|
+
temperature: 0
|
8
|
+
s2_agent: >
|
9
|
+
You are an academic research assistant with access to the
|
10
|
+
Semantic Scholar API for paper discovery and analysis.
|
11
|
+
You also have tools to gain more insights on the papers and
|
12
|
+
display them.
|
13
|
+
You must strictly rely on retrieved information and avoid
|
14
|
+
generating unsupported content. Do not generate hallucinations
|
15
|
+
or fabricate details of any article. Stay focused on accurate,
|
16
|
+
sourced academic insights.
|
@@ -1,14 +1,13 @@
|
|
1
|
-
#
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
1
|
+
# Page configuration
|
2
|
+
page:
|
3
|
+
title: "Talk2Scholars"
|
4
|
+
icon: "🤖"
|
5
|
+
layout: "wide"
|
6
6
|
|
7
7
|
# Available LLM models
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
- "gpt-3.5-turbo"
|
8
|
+
llms:
|
9
|
+
available_models:
|
10
|
+
- "OpenAI/gpt-4o-mini"
|
12
11
|
# # Chat UI configuration
|
13
12
|
# chat:
|
14
13
|
# assistant_avatar: "🤖"
|
@@ -16,6 +15,9 @@ llm_models:
|
|
16
15
|
# input_placeholder: "Say something ..."
|
17
16
|
# spinner_text: "Fetching response ..."
|
18
17
|
|
18
|
+
api_keys:
|
19
|
+
openai_key: "OPENAI_API_KEY"
|
20
|
+
nvidia_key: "NVIDIA_API_KEY"
|
19
21
|
# # Feedback configuration
|
20
22
|
# feedback:
|
21
23
|
# type: "thumbs"
|
@@ -0,0 +1,62 @@
|
|
1
|
+
"""
|
2
|
+
State management for the Talk2Scholars agent.
|
3
|
+
|
4
|
+
This module defines the state class `Talk2Scholars`, which maintains the conversation
|
5
|
+
context, retrieved papers, and other relevant metadata. The state ensures consistency
|
6
|
+
across agent interactions.
|
7
|
+
"""
|
8
|
+
|
9
|
+
import logging
|
10
|
+
from typing import Annotated, Any, Dict
|
11
|
+
from langchain_core.language_models import BaseChatModel
|
12
|
+
from langgraph.prebuilt.chat_agent_executor import AgentState
|
13
|
+
|
14
|
+
# Configure logging
|
15
|
+
logging.basicConfig(level=logging.INFO)
|
16
|
+
logger = logging.getLogger(__name__)
|
17
|
+
|
18
|
+
|
19
|
+
def replace_dict(existing: Dict[str, Any], new: Dict[str, Any]) -> Dict[str, Any]:
|
20
|
+
"""
|
21
|
+
Replaces the existing dictionary with a new dictionary.
|
22
|
+
|
23
|
+
This function logs the state update and ensures that the old state is replaced
|
24
|
+
with the new one.
|
25
|
+
|
26
|
+
Args:
|
27
|
+
existing (Dict[str, Any]): The current dictionary state.
|
28
|
+
new (Dict[str, Any]): The new dictionary state to replace the existing one.
|
29
|
+
|
30
|
+
Returns:
|
31
|
+
Dict[str, Any]: The updated dictionary state.
|
32
|
+
|
33
|
+
Example:
|
34
|
+
>>> old_state = {"papers": {"id1": "Paper 1"}}
|
35
|
+
>>> new_state = {"papers": {"id2": "Paper 2"}}
|
36
|
+
>>> updated_state = replace_dict(old_state, new_state)
|
37
|
+
>>> print(updated_state)
|
38
|
+
{"papers": {"id2": "Paper 2"}}
|
39
|
+
"""
|
40
|
+
logger.info("Updating existing state %s with the state dict: %s", existing, new)
|
41
|
+
return new
|
42
|
+
|
43
|
+
|
44
|
+
class Talk2Scholars(AgentState):
|
45
|
+
"""
|
46
|
+
Represents the state of the Talk2Scholars agent.
|
47
|
+
|
48
|
+
This class extends `AgentState` to maintain conversation history, retrieved papers,
|
49
|
+
and interactions with the language model.
|
50
|
+
|
51
|
+
Attributes:
|
52
|
+
last_displayed_papers (Dict[str, Any]): Stores the most recently displayed papers.
|
53
|
+
papers (Dict[str, Any]): Stores the research papers retrieved from the agent's queries.
|
54
|
+
multi_papers (Dict[str, Any]): Stores multiple recommended papers from various sources.
|
55
|
+
llm_model (BaseChatModel): The language model instance used for generating responses.
|
56
|
+
"""
|
57
|
+
|
58
|
+
# Agent state fields
|
59
|
+
last_displayed_papers: Annotated[Dict[str, Any], replace_dict]
|
60
|
+
papers: Annotated[Dict[str, Any], replace_dict]
|
61
|
+
multi_papers: Annotated[Dict[str, Any], replace_dict]
|
62
|
+
llm_model: BaseChatModel
|
@@ -0,0 +1,58 @@
|
|
1
|
+
"""
|
2
|
+
Integration tests for talk2scholars system with OpenAI.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
import pytest
|
7
|
+
import hydra
|
8
|
+
from langchain_openai import ChatOpenAI
|
9
|
+
from langchain_core.messages import HumanMessage, AIMessage
|
10
|
+
from ..agents.main_agent import get_app
|
11
|
+
from ..state.state_talk2scholars import Talk2Scholars
|
12
|
+
|
13
|
+
# pylint: disable=redefined-outer-name
|
14
|
+
|
15
|
+
|
16
|
+
@pytest.mark.skipif(
|
17
|
+
not os.getenv("OPENAI_API_KEY"), reason="Requires OpenAI API key to run"
|
18
|
+
)
|
19
|
+
def test_main_agent_real_llm():
|
20
|
+
"""
|
21
|
+
Test that the main agent invokes S2 agent correctly
|
22
|
+
and updates the state with real LLM execution.
|
23
|
+
"""
|
24
|
+
|
25
|
+
# Load Hydra Configuration EXACTLY like in main_agent.py
|
26
|
+
with hydra.initialize(version_base=None, config_path="../configs"):
|
27
|
+
cfg = hydra.compose(
|
28
|
+
config_name="config", overrides=["agents/talk2scholars/main_agent=default"]
|
29
|
+
)
|
30
|
+
hydra_cfg = cfg.agents.talk2scholars.main_agent
|
31
|
+
|
32
|
+
assert hydra_cfg is not None, "Hydra config failed to load"
|
33
|
+
|
34
|
+
# Use the real OpenAI API (ensure env variable is set)
|
35
|
+
llm = ChatOpenAI(model="gpt-4o-mini", temperature=hydra_cfg.temperature)
|
36
|
+
|
37
|
+
# Initialize main agent workflow (WITH real Hydra config)
|
38
|
+
thread_id = "test_thread"
|
39
|
+
app = get_app(thread_id, llm)
|
40
|
+
|
41
|
+
# Provide an actual user query
|
42
|
+
initial_state = Talk2Scholars(
|
43
|
+
messages=[HumanMessage(content="Find AI papers on transformers")]
|
44
|
+
)
|
45
|
+
|
46
|
+
# Invoke the agent (triggers supervisor → s2_agent)
|
47
|
+
result = app.invoke(
|
48
|
+
initial_state,
|
49
|
+
{"configurable": {"config_id": thread_id, "thread_id": thread_id}},
|
50
|
+
)
|
51
|
+
|
52
|
+
# Assert that the supervisor routed correctly
|
53
|
+
assert "messages" in result, "Expected messages in response"
|
54
|
+
|
55
|
+
# Fix: Accept AIMessage as a valid response type
|
56
|
+
assert isinstance(
|
57
|
+
result["messages"][-1], (HumanMessage, AIMessage, str)
|
58
|
+
), "Last message should be a valid response"
|