aiagents4pharma 1.20.1__tar.gz → 1.21.0__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (174) hide show
  1. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/PKG-INFO +1 -1
  2. aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/agents/main_agent.py +206 -0
  3. aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/agents/s2_agent.py +129 -0
  4. aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +39 -0
  5. aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +16 -0
  6. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml +11 -9
  7. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/config.yaml +1 -0
  8. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml +2 -0
  9. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/tools/search/default.yaml +1 -0
  10. aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/__init__.py +3 -0
  11. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml +1 -0
  12. aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/state/state_talk2scholars.py +62 -0
  13. aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/tests/test_llm_main_integration.py +58 -0
  14. aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/tests/test_main_agent.py +156 -0
  15. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tests/test_s2_agent.py +95 -29
  16. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tests/test_s2_tools.py +158 -22
  17. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tools/s2/__init__.py +4 -2
  18. aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/tools/s2/display_results.py +89 -0
  19. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py +35 -8
  20. aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/tools/s2/query_results.py +61 -0
  21. aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/tools/s2/retrieve_semantic_scholar_paper_id.py +79 -0
  22. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tools/s2/search.py +34 -10
  23. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py +39 -9
  24. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma.egg-info/PKG-INFO +1 -1
  25. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma.egg-info/SOURCES.txt +4 -1
  26. aiagents4pharma-1.21.0/release_version.txt +1 -0
  27. aiagents4pharma-1.20.1/aiagents4pharma/talk2scholars/agents/main_agent.py +0 -207
  28. aiagents4pharma-1.20.1/aiagents4pharma/talk2scholars/agents/s2_agent.py +0 -85
  29. aiagents4pharma-1.20.1/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +0 -18
  30. aiagents4pharma-1.20.1/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +0 -24
  31. aiagents4pharma-1.20.1/aiagents4pharma/talk2scholars/state/state_talk2scholars.py +0 -33
  32. aiagents4pharma-1.20.1/aiagents4pharma/talk2scholars/tests/test_integration.py +0 -237
  33. aiagents4pharma-1.20.1/aiagents4pharma/talk2scholars/tests/test_main_agent.py +0 -180
  34. aiagents4pharma-1.20.1/aiagents4pharma/talk2scholars/tools/s2/display_results.py +0 -50
  35. aiagents4pharma-1.20.1/release_version.txt +0 -1
  36. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/LICENSE +0 -0
  37. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/README.md +0 -0
  38. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/__init__.py +0 -0
  39. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/__init__.py +0 -0
  40. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/agents/__init__.py +0 -0
  41. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/agents/t2b_agent.py +0 -0
  42. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/api/__init__.py +0 -0
  43. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/api/kegg.py +0 -0
  44. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/api/ols.py +0 -0
  45. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/api/uniprot.py +0 -0
  46. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/__init__.py +0 -0
  47. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/agents/__init__.py +0 -0
  48. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/__init__.py +0 -0
  49. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/default.yaml +0 -0
  50. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/config.yaml +0 -0
  51. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/tools/__init__.py +0 -0
  52. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/tools/ask_question/__init__.py +0 -0
  53. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/tools/ask_question/default.yaml +0 -0
  54. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/tools/get_annotation/__init__.py +0 -0
  55. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/configs/tools/get_annotation/default.yaml +0 -0
  56. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/models/__init__.py +0 -0
  57. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/models/basico_model.py +0 -0
  58. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/models/sys_bio_model.py +0 -0
  59. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/states/__init__.py +0 -0
  60. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/states/state_talk2biomodels.py +0 -0
  61. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/__init__.py +0 -0
  62. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_api.py +0 -0
  63. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_ask_question.py +0 -0
  64. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_basico_model.py +0 -0
  65. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_get_annotation.py +0 -0
  66. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_getmodelinfo.py +0 -0
  67. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_integration.py +0 -0
  68. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_param_scan.py +0 -0
  69. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_query_article.py +0 -0
  70. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_search_models.py +0 -0
  71. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_simulate_model.py +0 -0
  72. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_steady_state.py +0 -0
  73. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tests/test_sys_bio_model.py +0 -0
  74. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/__init__.py +0 -0
  75. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/ask_question.py +0 -0
  76. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/custom_plotter.py +0 -0
  77. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/get_annotation.py +0 -0
  78. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/get_modelinfo.py +0 -0
  79. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/load_arguments.py +0 -0
  80. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/load_biomodel.py +0 -0
  81. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/parameter_scan.py +0 -0
  82. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/query_article.py +0 -0
  83. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/search_models.py +0 -0
  84. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/simulate_model.py +0 -0
  85. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2biomodels/tools/steady_state.py +0 -0
  86. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/__init__.py +0 -0
  87. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/agents/__init__.py +0 -0
  88. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/agents/scp_agent.py +0 -0
  89. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/states/__init__.py +0 -0
  90. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/states/state_talk2cells.py +0 -0
  91. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/tests/scp_agent/test_scp_agent.py +0 -0
  92. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/tools/__init__.py +0 -0
  93. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/tools/scp_agent/__init__.py +0 -0
  94. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/tools/scp_agent/display_studies.py +0 -0
  95. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2cells/tools/scp_agent/search_studies.py +0 -0
  96. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/__init__.py +0 -0
  97. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/agents/__init__.py +0 -0
  98. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/agents/t2kg_agent.py +0 -0
  99. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/__init__.py +0 -0
  100. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/__init__.py +0 -0
  101. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/default.yaml +0 -0
  102. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/app/__init__.py +0 -0
  103. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/app/frontend/__init__.py +0 -0
  104. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/app/frontend/default.yaml +0 -0
  105. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/config.yaml +0 -0
  106. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/__init__.py +0 -0
  107. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/graphrag_reasoning/__init__.py +0 -0
  108. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/graphrag_reasoning/default.yaml +0 -0
  109. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_extraction/__init__.py +0 -0
  110. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_extraction/default.yaml +0 -0
  111. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_summarization/__init__.py +0 -0
  112. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/configs/tools/subgraph_summarization/default.yaml +0 -0
  113. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/datasets/__init__.py +0 -0
  114. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/datasets/biobridge_primekg.py +0 -0
  115. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/datasets/dataset.py +0 -0
  116. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/datasets/primekg.py +0 -0
  117. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/datasets/starkqa_primekg.py +0 -0
  118. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/states/__init__.py +0 -0
  119. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/states/state_talk2knowledgegraphs.py +0 -0
  120. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/__init__.py +0 -0
  121. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_agents_t2kg_agent.py +0 -0
  122. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_biobridge_primekg.py +0 -0
  123. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_dataset.py +0 -0
  124. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_primekg.py +0 -0
  125. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_datasets_starkqa_primekg.py +0 -0
  126. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_tools_graphrag_reasoning.py +0 -0
  127. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_extraction.py +0 -0
  128. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_summarization.py +0 -0
  129. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_embeddings.py +0 -0
  130. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_huggingface.py +0 -0
  131. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_ollama.py +0 -0
  132. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_sentencetransformer.py +0 -0
  133. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_enrichments.py +0 -0
  134. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_ollama.py +0 -0
  135. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tests/test_utils_kg_utils.py +0 -0
  136. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tools/__init__.py +0 -0
  137. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tools/graphrag_reasoning.py +0 -0
  138. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tools/load_arguments.py +0 -0
  139. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tools/subgraph_extraction.py +0 -0
  140. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/tools/subgraph_summarization.py +0 -0
  141. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/__init__.py +0 -0
  142. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/__init__.py +0 -0
  143. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/embeddings.py +0 -0
  144. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/huggingface.py +0 -0
  145. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/ollama.py +0 -0
  146. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/embeddings/sentence_transformer.py +0 -0
  147. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/enrichments/__init__.py +0 -0
  148. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/enrichments/enrichments.py +0 -0
  149. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/enrichments/ollama.py +0 -0
  150. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/extractions/__init__.py +0 -0
  151. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/extractions/pcst.py +0 -0
  152. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2knowledgegraphs/utils/kg_utils.py +0 -0
  153. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/__init__.py +0 -0
  154. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/agents/__init__.py +0 -0
  155. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/__init__.py +0 -0
  156. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/agents/__init__.py +0 -0
  157. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/__init__.py +0 -0
  158. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/__init__.py +0 -0
  159. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/__init__.py +0 -0
  160. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/app/__init__.py +0 -0
  161. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/app/frontend/__init__.py +0 -0
  162. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/tools/__init__.py +0 -0
  163. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/__init__.py +0 -0
  164. {aiagents4pharma-1.20.1/aiagents4pharma/talk2scholars/configs/tools/search → aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/configs/tools/retrieve_semantic_scholar_paper_id}/__init__.py +0 -0
  165. {aiagents4pharma-1.20.1/aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation → aiagents4pharma-1.21.0/aiagents4pharma/talk2scholars/configs/tools/search}/__init__.py +0 -0
  166. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/state/__init__.py +0 -0
  167. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tests/__init__.py +0 -0
  168. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tests/test_state.py +0 -0
  169. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma/talk2scholars/tools/__init__.py +0 -0
  170. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma.egg-info/dependency_links.txt +0 -0
  171. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma.egg-info/requires.txt +0 -0
  172. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/aiagents4pharma.egg-info/top_level.txt +0 -0
  173. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/pyproject.toml +0 -0
  174. {aiagents4pharma-1.20.1 → aiagents4pharma-1.21.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: aiagents4pharma
3
- Version: 1.20.1
3
+ Version: 1.21.0
4
4
  Summary: AI Agents for drug discovery, drug development, and other pharmaceutical R&D.
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: License :: OSI Approved :: MIT License
@@ -0,0 +1,206 @@
1
+ #!/usr/bin/env python3
2
+
3
+ """
4
+ Main agent for the talk2scholars app using ReAct pattern.
5
+
6
+ This module implements a hierarchical agent system where a supervisor agent
7
+ routes queries to specialized sub-agents. It follows the LangGraph patterns
8
+ for multi-agent systems and implements proper state management.
9
+ """
10
+
11
+ import logging
12
+ from typing import Literal, Callable
13
+ from pydantic import BaseModel
14
+ import hydra
15
+ from langchain_core.language_models.chat_models import BaseChatModel
16
+ from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
17
+ from langchain_openai import ChatOpenAI
18
+ from langgraph.checkpoint.memory import MemorySaver
19
+ from langgraph.graph import END, START, StateGraph
20
+ from langgraph.types import Command
21
+ from ..agents import s2_agent
22
+ from ..state.state_talk2scholars import Talk2Scholars
23
+
24
+ # Configure logging
25
+ logging.basicConfig(level=logging.INFO)
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ def get_hydra_config():
30
+ """
31
+ Loads the Hydra configuration for the main agent.
32
+
33
+ This function initializes the Hydra configuration system and retrieves the settings
34
+ for the `Talk2Scholars` agent, ensuring that all required parameters are loaded.
35
+
36
+ Returns:
37
+ DictConfig: The configuration object containing parameters for the main agent.
38
+ """
39
+ with hydra.initialize(version_base=None, config_path="../configs"):
40
+ cfg = hydra.compose(
41
+ config_name="config", overrides=["agents/talk2scholars/main_agent=default"]
42
+ )
43
+ return cfg.agents.talk2scholars.main_agent
44
+
45
+
46
+ def make_supervisor_node(llm_model: BaseChatModel, thread_id: str) -> Callable:
47
+ """
48
+ Creates the supervisor node responsible for routing user queries to the appropriate sub-agents.
49
+
50
+ This function initializes the routing logic by leveraging the system and router prompts defined
51
+ in the Hydra configuration. The supervisor determines whether to
52
+ call a sub-agent (like `s2_agent`)
53
+ or directly generate a response using the language model.
54
+
55
+ Args:
56
+ llm_model (BaseChatModel): The language model used for decision-making.
57
+ thread_id (str): Unique identifier for the current conversation session.
58
+
59
+ Returns:
60
+ Callable: The supervisor node function that processes user queries and
61
+ decides the next step.
62
+ """
63
+ cfg = get_hydra_config()
64
+ logger.info("Hydra configuration for Talk2Scholars main agent loaded: %s", cfg)
65
+ members = ["s2_agent"]
66
+ options = ["FINISH"] + members
67
+ # Define system prompt for general interactions
68
+ system_prompt = cfg.system_prompt
69
+ # Define router prompt for routing to sub-agents
70
+ router_prompt = cfg.router_prompt
71
+
72
+ class Router(BaseModel):
73
+ """Worker to route to next. If no workers needed, route to FINISH."""
74
+
75
+ next: Literal[*options]
76
+
77
+ def supervisor_node(
78
+ state: Talk2Scholars,
79
+ ) -> Command:
80
+ """
81
+ Handles the routing logic for the supervisor agent.
82
+
83
+ This function determines the next agent to invoke based on the router prompt response.
84
+ If no further processing is required, it generates an AI response using the system prompt.
85
+
86
+ Args:
87
+ state (Talk2Scholars): The current conversation state, including messages
88
+ exchanged so far.
89
+
90
+ Returns:
91
+ Command: A command dictating whether to invoke a sub-agent or generate a final response.
92
+ """
93
+ messages = [SystemMessage(content=router_prompt)] + state["messages"]
94
+ structured_llm = llm_model.with_structured_output(Router)
95
+ response = structured_llm.invoke(messages)
96
+ goto = response.next
97
+ logger.info("Routing to: %s, Thread ID: %s", goto, thread_id)
98
+ if goto == "FINISH":
99
+ goto = END # Using END from langgraph.graph
100
+ # If no agents were called, and the last message was
101
+ # from the user, call the LLM to respond to the user
102
+ # with a slightly different system prompt.
103
+ if isinstance(messages[-1], HumanMessage):
104
+ response = llm_model.invoke(
105
+ [
106
+ SystemMessage(content=system_prompt),
107
+ ]
108
+ + messages[1:]
109
+ )
110
+ return Command(
111
+ goto=goto, update={"messages": AIMessage(content=response.content)}
112
+ )
113
+ # Go to the requested agent
114
+ return Command(goto=goto)
115
+
116
+ return supervisor_node
117
+
118
+
119
+ def get_app(
120
+ thread_id: str,
121
+ llm_model: BaseChatModel = ChatOpenAI(model="gpt-4o-mini", temperature=0),
122
+ ):
123
+ """
124
+ Initializes and returns the LangGraph-based hierarchical agent system.
125
+
126
+ This function constructs the agent workflow by defining nodes for the supervisor
127
+ and sub-agents. It compiles the graph using `StateGraph` to enable structured
128
+ conversational workflows.
129
+
130
+ Args:
131
+ thread_id (str): A unique session identifier for tracking conversation state.
132
+ llm_model (BaseChatModel, optional): The language model used for query processing.
133
+ Defaults to `ChatOpenAI(model="gpt-4o-mini", temperature=0)`.
134
+
135
+ Returns:
136
+ StateGraph: A compiled LangGraph application that can process user queries.
137
+
138
+ Example:
139
+ >>> app = get_app("thread_123")
140
+ >>> result = app.invoke(initial_state)
141
+ """
142
+ cfg = get_hydra_config()
143
+
144
+ def call_s2_agent(
145
+ state: Talk2Scholars,
146
+ ) -> Command[Literal["supervisor"]]:
147
+ """
148
+ Invokes the Semantic Scholar (S2) agent to retrieve relevant research papers.
149
+
150
+ This function calls the `s2_agent` and updates the conversation state with retrieved
151
+ academic papers. The agent uses Semantic Scholar's API to find papers based on
152
+ user queries.
153
+
154
+ Args:
155
+ state (Talk2Scholars): The current state of the conversation, containing messages
156
+ and any previous search results.
157
+
158
+ Returns:
159
+ Command: A command to update the conversation state with the retrieved papers
160
+ and return control to the supervisor node.
161
+
162
+ Example:
163
+ >>> result = call_s2_agent(current_state)
164
+ >>> next_step = result.goto
165
+ """
166
+ logger.info("Calling S2 agent")
167
+ app = s2_agent.get_app(thread_id, llm_model)
168
+
169
+ # Invoke the S2 agent, passing state,
170
+ # Pass both config_id and thread_id
171
+ response = app.invoke(
172
+ state,
173
+ {
174
+ "configurable": {
175
+ "config_id": thread_id,
176
+ "thread_id": thread_id,
177
+ }
178
+ },
179
+ )
180
+ logger.info("S2 agent completed with response")
181
+ return Command(
182
+ update={
183
+ "messages": response["messages"],
184
+ "papers": response.get("papers", {}),
185
+ "multi_papers": response.get("multi_papers", {}),
186
+ "last_displayed_papers": response.get("last_displayed_papers", {}),
187
+ },
188
+ # Always return to supervisor
189
+ goto="supervisor",
190
+ )
191
+
192
+ # Initialize LLM
193
+ logger.info("Using model %s with temperature %s", llm_model, cfg.temperature)
194
+
195
+ # Build the graph
196
+ workflow = StateGraph(Talk2Scholars)
197
+ supervisor = make_supervisor_node(llm_model, thread_id)
198
+ # Add nodes
199
+ workflow.add_node("supervisor", supervisor)
200
+ workflow.add_node("s2_agent", call_s2_agent)
201
+ # Add edges
202
+ workflow.add_edge(START, "supervisor")
203
+ # Compile the workflow
204
+ app = workflow.compile(checkpointer=MemorySaver())
205
+ logger.info("Main agent workflow compiled")
206
+ return app
@@ -0,0 +1,129 @@
1
+ # /usr/bin/env python3
2
+
3
+ """
4
+ Agent for interacting with Semantic Scholar
5
+ """
6
+
7
+ import logging
8
+ from typing import Any, Dict
9
+ import hydra
10
+ from langchain_openai import ChatOpenAI
11
+ from langchain_core.language_models.chat_models import BaseChatModel
12
+ from langgraph.graph import START, StateGraph
13
+ from langgraph.prebuilt import create_react_agent, ToolNode
14
+ from langgraph.checkpoint.memory import MemorySaver
15
+ from ..state.state_talk2scholars import Talk2Scholars
16
+ from ..tools.s2.search import search_tool as s2_search
17
+ from ..tools.s2.display_results import display_results as s2_display
18
+ from ..tools.s2.query_results import query_results as s2_query_results
19
+ from ..tools.s2.retrieve_semantic_scholar_paper_id import (
20
+ retrieve_semantic_scholar_paper_id as s2_retrieve_id,
21
+ )
22
+ from ..tools.s2.single_paper_rec import (
23
+ get_single_paper_recommendations as s2_single_rec,
24
+ )
25
+ from ..tools.s2.multi_paper_rec import get_multi_paper_recommendations as s2_multi_rec
26
+
27
+ # Initialize logger
28
+ logging.basicConfig(level=logging.INFO)
29
+ logger = logging.getLogger(__name__)
30
+
31
+
32
+ def get_app(
33
+ uniq_id, llm_model: BaseChatModel = ChatOpenAI(model="gpt-4o-mini", temperature=0)
34
+ ):
35
+ """
36
+ Initializes and returns the LangGraph application for the Semantic Scholar (S2) agent.
37
+
38
+ This function sets up the S2 agent, which integrates various tools to search, retrieve,
39
+ and display research papers from Semantic Scholar. The agent follows the ReAct pattern
40
+ for structured interaction.
41
+
42
+ Args:
43
+ uniq_id (str): Unique identifier for the current conversation session.
44
+ llm_model (BaseChatModel, optional): The language model to be used by the agent.
45
+ Defaults to `ChatOpenAI(model="gpt-4o-mini", temperature=0)`.
46
+
47
+ Returns:
48
+ StateGraph: A compiled LangGraph application that enables the S2 agent to process
49
+ user queries and retrieve research papers.
50
+
51
+ Example:
52
+ >>> app = get_app("thread_123")
53
+ >>> result = app.invoke(initial_state)
54
+ """
55
+
56
+ # def agent_s2_node(state: Talk2Scholars) -> Command[Literal["supervisor"]]:
57
+ def agent_s2_node(state: Talk2Scholars) -> Dict[str, Any]:
58
+ """
59
+ Processes the user query and retrieves relevant research papers.
60
+
61
+ This function calls the language model using the configured `ReAct` agent to analyze
62
+ the state and generate an appropriate response. The function then returns control
63
+ to the main supervisor.
64
+
65
+ Args:
66
+ state (Talk2Scholars): The current conversation state, including messages exchanged
67
+ and any previously retrieved research papers.
68
+
69
+ Returns:
70
+ Dict[str, Any]: A dictionary containing the updated conversation state.
71
+
72
+ Example:
73
+ >>> result = agent_s2_node(current_state)
74
+ >>> papers = result.get("papers", [])
75
+ """
76
+ logger.log(logging.INFO, "Creating Agent_S2 node with thread_id %s", uniq_id)
77
+ result = model.invoke(state, {"configurable": {"thread_id": uniq_id}})
78
+
79
+ return result
80
+
81
+ logger.log(logging.INFO, "thread_id, llm_model: %s, %s", uniq_id, llm_model)
82
+
83
+ # Load hydra configuration
84
+ logger.log(logging.INFO, "Load Hydra configuration for Talk2Scholars S2 agent.")
85
+ with hydra.initialize(version_base=None, config_path="../configs"):
86
+ cfg = hydra.compose(
87
+ config_name="config", overrides=["agents/talk2scholars/s2_agent=default"]
88
+ )
89
+ cfg = cfg.agents.talk2scholars.s2_agent
90
+
91
+ # Define the tools
92
+ tools = ToolNode(
93
+ [
94
+ s2_search,
95
+ s2_display,
96
+ s2_query_results,
97
+ s2_retrieve_id,
98
+ s2_single_rec,
99
+ s2_multi_rec,
100
+ ]
101
+ )
102
+
103
+ # Define the model
104
+ logger.log(logging.INFO, "Using OpenAI model %s", llm_model)
105
+
106
+ # Create the agent
107
+ model = create_react_agent(
108
+ llm_model,
109
+ tools=tools,
110
+ state_schema=Talk2Scholars,
111
+ state_modifier=cfg.s2_agent,
112
+ checkpointer=MemorySaver(),
113
+ )
114
+
115
+ workflow = StateGraph(Talk2Scholars)
116
+ workflow.add_node("agent_s2", agent_s2_node)
117
+ workflow.add_edge(START, "agent_s2")
118
+
119
+ # Initialize memory to persist state between graph runs
120
+ checkpointer = MemorySaver()
121
+
122
+ # Finally, we compile it!
123
+ # This compiles it into a LangChain Runnable,
124
+ # meaning you can use it as you would any other runnable.
125
+ # Note that we're (optionally) passing the memory when compiling the graph
126
+ app = workflow.compile(checkpointer=checkpointer)
127
+ logger.log(logging.INFO, "Compiled the graph")
128
+
129
+ return app
@@ -0,0 +1,39 @@
1
+ _target_: agents.main_agent.get_app
2
+ openai_api_key: ${oc.env:OPENAI_API_KEY}
3
+ openai_llms:
4
+ - "gpt-4o-mini"
5
+ - "gpt-4-turbo"
6
+ - "gpt-3.5-turbo"
7
+ temperature: 0
8
+ system_prompt: >
9
+ You are the Talk2Scholars agent coordinating academic paper discovery and analysis.
10
+
11
+ You have access to the following agents:
12
+ 1. S2_agent: This agent can be used to search and recommend papers
13
+ from Semantic Scholar. Use this agent when the user asks for
14
+ general paper searches and recommendations. This agent can also
15
+ retrieve the Semantic Scholar ID of a paper.
16
+ router_prompt: >
17
+ You are a supervisor tasked with managing a conversation between the
18
+ following workers: {members}. Given the user request, respond with the
19
+ worker to act next. Each worker will perform a task and respond with
20
+ their results and status. When finished, respond with FINISH.
21
+
22
+ Here is a description of the workers:
23
+ 1. S2_agent: This agent can be used to search and recommend papers
24
+ from Semantic Scholar. Use this agent when the user asks for
25
+ general paper searches and recommendations. This agent can also
26
+ retrieve the Semantic Scholar ID of a paper. It can also be used to
27
+ provide more information about a paper.
28
+
29
+ Here are some instructions for the workers:
30
+ 1. Call the S2 agent for general paper searches and recommendations.
31
+ 2. The S2 agent has access to tools for querying and displaying papers.
32
+ 3. If the user wants suggestions for papers and you don’t have
33
+ a Semantic Scholar ID for it but do have the title from
34
+ the last displayed results, use the S2 agent to retrieve the
35
+ Semantic Scholar ID of the paper. Then, use the S2 agent again to display
36
+ recommendations for the paper.
37
+ 4. You can call the S2 agent to get more information about a paper based
38
+ on the context of the conversation.
39
+ 5. Respond with FINISH when all tasks are completed.
@@ -0,0 +1,16 @@
1
+ _target_: agents.s2_agent.get_app
2
+ openai_api_key: ${oc.env:OPENAI_API_KEY}
3
+ openai_llms:
4
+ - "gpt-4o-mini"
5
+ - "gpt-4-turbo"
6
+ - "gpt-3.5-turbo"
7
+ temperature: 0
8
+ s2_agent: >
9
+ You are an academic research assistant with access to the
10
+ Semantic Scholar API for paper discovery and analysis.
11
+ You also have tools to gain more insights on the papers and
12
+ display them.
13
+ You must strictly rely on retrieved information and avoid
14
+ generating unsupported content. Do not generate hallucinations
15
+ or fabricate details of any article. Stay focused on accurate,
16
+ sourced academic insights.
@@ -1,14 +1,13 @@
1
- # # Page configuration
2
- # page:
3
- # title: "Talk2Scholars"
4
- # icon: "🤖"
5
- # layout: "wide"
1
+ # Page configuration
2
+ page:
3
+ title: "Talk2Scholars"
4
+ icon: "🤖"
5
+ layout: "wide"
6
6
 
7
7
  # Available LLM models
8
- llm_models:
9
- - "gpt-4o-mini"
10
- - "gpt-4-turbo"
11
- - "gpt-3.5-turbo"
8
+ llms:
9
+ available_models:
10
+ - "OpenAI/gpt-4o-mini"
12
11
  # # Chat UI configuration
13
12
  # chat:
14
13
  # assistant_avatar: "🤖"
@@ -16,6 +15,9 @@ llm_models:
16
15
  # input_placeholder: "Say something ..."
17
16
  # spinner_text: "Fetching response ..."
18
17
 
18
+ api_keys:
19
+ openai_key: "OPENAI_API_KEY"
20
+ nvidia_key: "NVIDIA_API_KEY"
19
21
  # # Feedback configuration
20
22
  # feedback:
21
23
  # type: "thumbs"
@@ -5,4 +5,5 @@ defaults:
5
5
  - tools/search: default
6
6
  - tools/single_paper_recommendation: default
7
7
  - tools/multi_paper_recommendation: default
8
+ - tools/retrieve_semantic_scholar_paper_id: default
8
9
  - app/frontend: default
@@ -9,6 +9,8 @@ api_fields:
9
9
  - "authors"
10
10
  - "citationCount"
11
11
  - "url"
12
+ # Commented fields that could be added later if needed
13
+ # - "externalIds"
12
14
 
13
15
  # Default headers and params
14
16
  headers:
@@ -10,6 +10,7 @@ api_fields:
10
10
  - "citationCount"
11
11
  - "url"
12
12
  # Commented fields that could be added later if needed
13
+ # - "externalIds"
13
14
  # - "publicationTypes"
14
15
  # - "openAccessPdf"
15
16
 
@@ -0,0 +1,3 @@
1
+ """
2
+ Import all the modules in the package
3
+ """
@@ -10,6 +10,7 @@ api_fields:
10
10
  - "citationCount"
11
11
  - "url"
12
12
  # Commented fields that could be added later if needed
13
+ # - "externalIds"
13
14
  # - "publicationTypes"
14
15
  # - "openAccessPdf"
15
16
 
@@ -0,0 +1,62 @@
1
+ """
2
+ State management for the Talk2Scholars agent.
3
+
4
+ This module defines the state class `Talk2Scholars`, which maintains the conversation
5
+ context, retrieved papers, and other relevant metadata. The state ensures consistency
6
+ across agent interactions.
7
+ """
8
+
9
+ import logging
10
+ from typing import Annotated, Any, Dict
11
+ from langchain_core.language_models import BaseChatModel
12
+ from langgraph.prebuilt.chat_agent_executor import AgentState
13
+
14
+ # Configure logging
15
+ logging.basicConfig(level=logging.INFO)
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ def replace_dict(existing: Dict[str, Any], new: Dict[str, Any]) -> Dict[str, Any]:
20
+ """
21
+ Replaces the existing dictionary with a new dictionary.
22
+
23
+ This function logs the state update and ensures that the old state is replaced
24
+ with the new one.
25
+
26
+ Args:
27
+ existing (Dict[str, Any]): The current dictionary state.
28
+ new (Dict[str, Any]): The new dictionary state to replace the existing one.
29
+
30
+ Returns:
31
+ Dict[str, Any]: The updated dictionary state.
32
+
33
+ Example:
34
+ >>> old_state = {"papers": {"id1": "Paper 1"}}
35
+ >>> new_state = {"papers": {"id2": "Paper 2"}}
36
+ >>> updated_state = replace_dict(old_state, new_state)
37
+ >>> print(updated_state)
38
+ {"papers": {"id2": "Paper 2"}}
39
+ """
40
+ logger.info("Updating existing state %s with the state dict: %s", existing, new)
41
+ return new
42
+
43
+
44
+ class Talk2Scholars(AgentState):
45
+ """
46
+ Represents the state of the Talk2Scholars agent.
47
+
48
+ This class extends `AgentState` to maintain conversation history, retrieved papers,
49
+ and interactions with the language model.
50
+
51
+ Attributes:
52
+ last_displayed_papers (Dict[str, Any]): Stores the most recently displayed papers.
53
+ papers (Dict[str, Any]): Stores the research papers retrieved from the agent's queries.
54
+ multi_papers (Dict[str, Any]): Stores multiple recommended papers from various sources.
55
+ llm_model (BaseChatModel): The language model instance used for generating responses.
56
+ """
57
+
58
+ # Agent state fields
59
+ last_displayed_papers: Annotated[Dict[str, Any], replace_dict]
60
+ papers: Annotated[Dict[str, Any], replace_dict]
61
+ multi_papers: Annotated[Dict[str, Any], replace_dict]
62
+ llm_model: BaseChatModel
@@ -0,0 +1,58 @@
1
+ """
2
+ Integration tests for talk2scholars system with OpenAI.
3
+ """
4
+
5
+ import os
6
+ import pytest
7
+ import hydra
8
+ from langchain_openai import ChatOpenAI
9
+ from langchain_core.messages import HumanMessage, AIMessage
10
+ from ..agents.main_agent import get_app
11
+ from ..state.state_talk2scholars import Talk2Scholars
12
+
13
+ # pylint: disable=redefined-outer-name
14
+
15
+
16
+ @pytest.mark.skipif(
17
+ not os.getenv("OPENAI_API_KEY"), reason="Requires OpenAI API key to run"
18
+ )
19
+ def test_main_agent_real_llm():
20
+ """
21
+ Test that the main agent invokes S2 agent correctly
22
+ and updates the state with real LLM execution.
23
+ """
24
+
25
+ # Load Hydra Configuration EXACTLY like in main_agent.py
26
+ with hydra.initialize(version_base=None, config_path="../configs"):
27
+ cfg = hydra.compose(
28
+ config_name="config", overrides=["agents/talk2scholars/main_agent=default"]
29
+ )
30
+ hydra_cfg = cfg.agents.talk2scholars.main_agent
31
+
32
+ assert hydra_cfg is not None, "Hydra config failed to load"
33
+
34
+ # Use the real OpenAI API (ensure env variable is set)
35
+ llm = ChatOpenAI(model="gpt-4o-mini", temperature=hydra_cfg.temperature)
36
+
37
+ # Initialize main agent workflow (WITH real Hydra config)
38
+ thread_id = "test_thread"
39
+ app = get_app(thread_id, llm)
40
+
41
+ # Provide an actual user query
42
+ initial_state = Talk2Scholars(
43
+ messages=[HumanMessage(content="Find AI papers on transformers")]
44
+ )
45
+
46
+ # Invoke the agent (triggers supervisor → s2_agent)
47
+ result = app.invoke(
48
+ initial_state,
49
+ {"configurable": {"config_id": thread_id, "thread_id": thread_id}},
50
+ )
51
+
52
+ # Assert that the supervisor routed correctly
53
+ assert "messages" in result, "Expected messages in response"
54
+
55
+ # Fix: Accept AIMessage as a valid response type
56
+ assert isinstance(
57
+ result["messages"][-1], (HumanMessage, AIMessage, str)
58
+ ), "Last message should be a valid response"