aiagents4pharma 1.20.1__py3-none-any.whl → 1.22.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. aiagents4pharma/talk2knowledgegraphs/configs/__init__.py +1 -0
  2. aiagents4pharma/talk2knowledgegraphs/configs/config.yaml +1 -0
  3. aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_nim_molmim.py +64 -0
  4. aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_pubchem.py +33 -0
  5. aiagents4pharma/talk2knowledgegraphs/tests/test_utils_pubchem_utils.py +16 -0
  6. aiagents4pharma/talk2knowledgegraphs/utils/__init__.py +1 -0
  7. aiagents4pharma/talk2knowledgegraphs/utils/embeddings/__init__.py +1 -0
  8. aiagents4pharma/talk2knowledgegraphs/utils/embeddings/nim_molmim.py +54 -0
  9. aiagents4pharma/talk2knowledgegraphs/utils/enrichments/__init__.py +1 -0
  10. aiagents4pharma/talk2knowledgegraphs/utils/enrichments/pubchem_strings.py +49 -0
  11. aiagents4pharma/talk2knowledgegraphs/utils/pubchem_utils.py +42 -0
  12. aiagents4pharma/talk2scholars/agents/main_agent.py +90 -91
  13. aiagents4pharma/talk2scholars/agents/s2_agent.py +61 -17
  14. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +31 -10
  15. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +8 -16
  16. aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml +11 -9
  17. aiagents4pharma/talk2scholars/configs/config.yaml +1 -0
  18. aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml +2 -0
  19. aiagents4pharma/talk2scholars/configs/tools/retrieve_semantic_scholar_paper_id/__init__.py +3 -0
  20. aiagents4pharma/talk2scholars/configs/tools/search/default.yaml +1 -0
  21. aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml +1 -0
  22. aiagents4pharma/talk2scholars/state/state_talk2scholars.py +36 -7
  23. aiagents4pharma/talk2scholars/tests/test_llm_main_integration.py +58 -0
  24. aiagents4pharma/talk2scholars/tests/test_main_agent.py +98 -122
  25. aiagents4pharma/talk2scholars/tests/test_s2_agent.py +95 -29
  26. aiagents4pharma/talk2scholars/tests/test_s2_tools.py +158 -22
  27. aiagents4pharma/talk2scholars/tools/s2/__init__.py +4 -2
  28. aiagents4pharma/talk2scholars/tools/s2/display_results.py +60 -21
  29. aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py +35 -8
  30. aiagents4pharma/talk2scholars/tools/s2/query_results.py +61 -0
  31. aiagents4pharma/talk2scholars/tools/s2/retrieve_semantic_scholar_paper_id.py +79 -0
  32. aiagents4pharma/talk2scholars/tools/s2/search.py +34 -10
  33. aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py +39 -9
  34. {aiagents4pharma-1.20.1.dist-info → aiagents4pharma-1.22.0.dist-info}/METADATA +2 -1
  35. {aiagents4pharma-1.20.1.dist-info → aiagents4pharma-1.22.0.dist-info}/RECORD +38 -29
  36. aiagents4pharma/talk2scholars/tests/test_integration.py +0 -237
  37. {aiagents4pharma-1.20.1.dist-info → aiagents4pharma-1.22.0.dist-info}/LICENSE +0 -0
  38. {aiagents4pharma-1.20.1.dist-info → aiagents4pharma-1.22.0.dist-info}/WHEEL +0 -0
  39. {aiagents4pharma-1.20.1.dist-info → aiagents4pharma-1.22.0.dist-info}/top_level.txt +0 -0
@@ -5,7 +5,7 @@ This tool is used to search for academic papers on Semantic Scholar.
5
5
  """
6
6
 
7
7
  import logging
8
- from typing import Annotated, Any, Dict, Optional
8
+ from typing import Annotated, Any, Optional
9
9
  import hydra
10
10
  import requests
11
11
  from langchain_core.messages import ToolMessage
@@ -14,7 +14,6 @@ from langchain_core.tools.base import InjectedToolCallId
14
14
  from langgraph.types import Command
15
15
  from pydantic import BaseModel, Field
16
16
 
17
-
18
17
  # Configure logging
19
18
  logging.basicConfig(level=logging.INFO)
20
19
  logger = logging.getLogger(__name__)
@@ -28,7 +27,7 @@ class SearchInput(BaseModel):
28
27
  "Be specific and include relevant academic terms."
29
28
  )
30
29
  limit: int = Field(
31
- default=2, description="Maximum number of results to return", ge=1, le=100
30
+ default=5, description="Maximum number of results to return", ge=1, le=100
32
31
  )
33
32
  year: Optional[str] = Field(
34
33
  default=None,
@@ -44,13 +43,13 @@ with hydra.initialize(version_base=None, config_path="../../configs"):
44
43
  cfg = cfg.tools.search
45
44
 
46
45
 
47
- @tool(args_schema=SearchInput)
46
+ @tool("search_tool", args_schema=SearchInput, parse_docstring=True)
48
47
  def search_tool(
49
48
  query: str,
50
49
  tool_call_id: Annotated[str, InjectedToolCallId],
51
- limit: int = 2,
50
+ limit: int = 5,
52
51
  year: Optional[str] = None,
53
- ) -> Dict[str, Any]:
52
+ ) -> Command[Any]:
54
53
  """
55
54
  Search for academic papers on Semantic Scholar.
56
55
 
@@ -62,9 +61,9 @@ def search_tool(
62
61
  Supports formats like "2024-", "-2024", "2024:2025". Defaults to None.
63
62
 
64
63
  Returns:
65
- Dict[str, Any]: The search results and related information.
64
+ The number of papers found on Semantic Scholar.
66
65
  """
67
- print("Starting paper search...")
66
+ logger.info("Searching for papers on %s", query)
68
67
  endpoint = cfg.api_endpoint
69
68
  params = {
70
69
  "query": query,
@@ -80,26 +79,51 @@ def search_tool(
80
79
  data = response.json()
81
80
  papers = data.get("data", [])
82
81
  logger.info("Received %d papers", len(papers))
82
+ if not papers:
83
+ return Command(
84
+ update={ # Place 'messages' inside 'update'
85
+ "messages": [
86
+ ToolMessage(
87
+ content="No papers found. Please try a different search query.",
88
+ tool_call_id=tool_call_id,
89
+ )
90
+ ]
91
+ }
92
+ )
83
93
  # Create a dictionary to store the papers
84
94
  filtered_papers = {
85
95
  paper["paperId"]: {
96
+ # "semantic_scholar_id": paper["paperId"], # Store Semantic Scholar ID
86
97
  "Title": paper.get("title", "N/A"),
87
98
  "Abstract": paper.get("abstract", "N/A"),
88
99
  "Year": paper.get("year", "N/A"),
89
100
  "Citation Count": paper.get("citationCount", "N/A"),
90
101
  "URL": paper.get("url", "N/A"),
102
+ # "arXiv_ID": paper.get("externalIds", {}).get(
103
+ # "ArXiv", "N/A"
104
+ # ), # Extract arXiv ID
91
105
  }
92
106
  for paper in papers
93
107
  if paper.get("title") and paper.get("authors")
94
108
  }
95
109
 
110
+ logger.info("Filtered %d papers", len(filtered_papers))
111
+
112
+ content = "Search was successful."
113
+ content += " Here is a summary of the search results:"
114
+ content += f"Number of papers found: {len(filtered_papers)}\n"
115
+ content += f"Query: {query}\n"
116
+ content += f"Year: {year}\n" if year else ""
117
+
96
118
  return Command(
97
119
  update={
98
120
  "papers": filtered_papers, # Now sending the dictionary directly
121
+ "last_displayed_papers": "papers",
99
122
  "messages": [
100
123
  ToolMessage(
101
- content=f"Search Successful: {filtered_papers}",
102
- tool_call_id=tool_call_id
124
+ content=content,
125
+ tool_call_id=tool_call_id,
126
+ artifact=filtered_papers,
103
127
  )
104
128
  ],
105
129
  }
@@ -5,7 +5,7 @@ This tool is used to return recommendations for a single paper.
5
5
  """
6
6
 
7
7
  import logging
8
- from typing import Annotated, Any, Dict, Optional
8
+ from typing import Annotated, Any, Optional
9
9
  import hydra
10
10
  import requests
11
11
  from langchain_core.messages import ToolMessage
@@ -26,7 +26,7 @@ class SinglePaperRecInput(BaseModel):
26
26
  description="Semantic Scholar Paper ID to get recommendations for (40-character string)"
27
27
  )
28
28
  limit: int = Field(
29
- default=2,
29
+ default=5,
30
30
  description="Maximum number of recommendations to return",
31
31
  ge=1,
32
32
  le=500,
@@ -48,15 +48,16 @@ with hydra.initialize(version_base=None, config_path="../../configs"):
48
48
  cfg = cfg.tools.single_paper_recommendation
49
49
 
50
50
 
51
- @tool(args_schema=SinglePaperRecInput)
51
+ @tool(args_schema=SinglePaperRecInput, parse_docstring=True)
52
52
  def get_single_paper_recommendations(
53
53
  paper_id: str,
54
54
  tool_call_id: Annotated[str, InjectedToolCallId],
55
- limit: int = 2,
55
+ limit: int = 5,
56
56
  year: Optional[str] = None,
57
- ) -> Dict[str, Any]:
57
+ ) -> Command[Any]:
58
58
  """
59
- Get paper recommendations based on a single paper.
59
+ Get recommendations for on a single paper using its Semantic Scholar ID.
60
+ No other ID types are supported.
60
61
 
61
62
  Args:
62
63
  paper_id (str): The Semantic Scholar Paper ID to get recommendations for.
@@ -68,7 +69,9 @@ def get_single_paper_recommendations(
68
69
  Returns:
69
70
  Dict[str, Any]: The recommendations and related information.
70
71
  """
71
- logger.info("Starting single paper recommendations search.")
72
+ logger.info(
73
+ "Starting single paper recommendations search with paper ID: %s", paper_id
74
+ )
72
75
 
73
76
  endpoint = f"{cfg.api_endpoint}/{paper_id}"
74
77
  params = {
@@ -90,32 +93,59 @@ def get_single_paper_recommendations(
90
93
  paper_id,
91
94
  response.status_code,
92
95
  )
96
+ if response.status_code != 200:
97
+ raise ValueError("Invalid paper ID or API error.")
93
98
  # print(f"Request params: {params}")
94
99
  logging.info("Request params: %s", params)
95
100
 
96
101
  data = response.json()
97
102
  recommendations = data.get("recommendedPapers", [])
98
103
 
104
+ if not recommendations:
105
+ return Command(
106
+ update={
107
+ "papers": {},
108
+ "messages": [
109
+ ToolMessage(
110
+ content=f"No recommendations found for {paper_id}.",
111
+ tool_call_id=tool_call_id,
112
+ )
113
+ ],
114
+ }
115
+ )
116
+
99
117
  # Extract paper ID and title from recommendations
100
118
  filtered_papers = {
101
119
  paper["paperId"]: {
120
+ # "semantic_scholar_id": paper["paperId"], # Store Semantic Scholar ID
102
121
  "Title": paper.get("title", "N/A"),
103
122
  "Abstract": paper.get("abstract", "N/A"),
104
123
  "Year": paper.get("year", "N/A"),
105
124
  "Citation Count": paper.get("citationCount", "N/A"),
106
125
  "URL": paper.get("url", "N/A"),
126
+ # "arXiv_ID": paper.get("externalIds", {}).get(
127
+ # "ArXiv", "N/A"
128
+ # ), # Extract arXiv ID
107
129
  }
108
130
  for paper in recommendations
109
131
  if paper.get("title") and paper.get("authors")
110
132
  }
111
133
 
134
+ content = "Recommendations based on a single paper were successful."
135
+ content += " Here is a summary of the recommendations:"
136
+ content += f"Number of papers found: {len(filtered_papers)}\n"
137
+ content += f"Query Paper ID: {paper_id}\n"
138
+ content += f"Year: {year}\n" if year else ""
139
+
112
140
  return Command(
113
141
  update={
114
142
  "papers": filtered_papers, # Now sending the dictionary directly
143
+ "last_displayed_papers": "papers",
115
144
  "messages": [
116
145
  ToolMessage(
117
- content=f"Search Successful: {filtered_papers}",
118
- tool_call_id=tool_call_id
146
+ content=content,
147
+ tool_call_id=tool_call_id,
148
+ artifact=filtered_papers,
119
149
  )
120
150
  ],
121
151
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: aiagents4pharma
3
- Version: 1.20.1
3
+ Version: 1.22.0
4
4
  Summary: AI Agents for drug discovery, drug development, and other pharmaceutical R&D.
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: License :: OSI Approved :: MIT License
@@ -30,6 +30,7 @@ Requires-Dist: ollama==0.4.6
30
30
  Requires-Dist: pandas==2.2.3
31
31
  Requires-Dist: pcst_fast==1.0.10
32
32
  Requires-Dist: plotly==5.24.1
33
+ Requires-Dist: pubchempy==1.0.4
33
34
  Requires-Dist: pydantic==2.9.2
34
35
  Requires-Dist: pylint==3.3.1
35
36
  Requires-Dist: pypdf==5.2.0
@@ -59,8 +59,8 @@ aiagents4pharma/talk2cells/tools/scp_agent/search_studies.py,sha256=MLe-twtFnOu-
59
59
  aiagents4pharma/talk2knowledgegraphs/__init__.py,sha256=Z0Eo7LTiKk0STsr8VI7wkCLq7PHrK1vYlH4I1hSNLiA,165
60
60
  aiagents4pharma/talk2knowledgegraphs/agents/__init__.py,sha256=iOAzuy_8A03tQDFtSBhC9dldUo62z5gfxcVtXAdLOJs,92
61
61
  aiagents4pharma/talk2knowledgegraphs/agents/t2kg_agent.py,sha256=j6MA1LB28mqpb6ZEmNLGcvDZvOnlGbJB9r7VXyEGask,3079
62
- aiagents4pharma/talk2knowledgegraphs/configs/__init__.py,sha256=Y49ucO22v9oe9EwFiXN6MU2wvyB3_ZBpmHwHbeh-ZVQ,106
63
- aiagents4pharma/talk2knowledgegraphs/configs/config.yaml,sha256=rwUIZ2t5j5hlFyre7VnV8zMsP0qpPTwvAFExgvQD6q0,196
62
+ aiagents4pharma/talk2knowledgegraphs/configs/__init__.py,sha256=4_DVdpahaJ55yPl0aZotlFA_MYWLFF2cubWyKtBVI_Q,126
63
+ aiagents4pharma/talk2knowledgegraphs/configs/config.yaml,sha256=bag4w3JCSqaojG37MTksy3ZehAPe3qoVzjIN2uh3nrc,229
64
64
  aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/__init__.py,sha256=-fAORvyFmG2iSvFOFDixmt9OTQRR58y89uhhu2EgbA8,46
65
65
  aiagents4pharma/talk2knowledgegraphs/configs/agents/t2kg_agent/default.yaml,sha256=ENCGROwYFpR6g4QD518h73sshdn3vPVpotBMk1QJcpU,4830
66
66
  aiagents4pharma/talk2knowledgegraphs/configs/app/__init__.py,sha256=fKfc3FR7g5KjY9b6jzrU6cwKTVVpkoVZQS3dvUowu34,69
@@ -91,66 +91,75 @@ aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_extraction.py,sha
91
91
  aiagents4pharma/talk2knowledgegraphs/tests/test_tools_subgraph_summarization.py,sha256=oBqfspXXOxH04OQuPb8BCW0liIQTGKXtaPNSrPpQtFc,7597
92
92
  aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_embeddings.py,sha256=uYFoE_6zeU10_1mLLAHUr5c4S2XZMSc0Q_860o-KWEw,1517
93
93
  aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_huggingface.py,sha256=hzX84pheZdEsTtikF2KtBFiH44_xPjYXxLA6p4Ax1CY,1623
94
+ aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_nim_molmim.py,sha256=LwtTZ-M7lHGxvRrGBXbyIT8AkA3T2OpeKqtNq3RK7Ik,2164
94
95
  aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_ollama.py,sha256=jn-TrPwF0aR9kVoerwkbMZa3U6Hc6HjV6Zoau4qSH4g,1834
95
96
  aiagents4pharma/talk2knowledgegraphs/tests/test_utils_embeddings_sentencetransformer.py,sha256=Qxo6WeIDRy8aLh1tNKw0kSlzmUj3MtTak63oW2YwB24,1327
96
97
  aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_enrichments.py,sha256=N6HRr4lWHXY7bTHe2uXJe4D_EG9WqZPibZne6qLl9_k,1447
97
98
  aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_ollama.py,sha256=JhY7axvVULLywDJ2ctA-gob5YPeaJYWsaMNjHT6L9CU,3021
99
+ aiagents4pharma/talk2knowledgegraphs/tests/test_utils_enrichments_pubchem.py,sha256=bk27KElJxOvKJ2RTz4ftleExQPMyWWS755KKmlImzbk,1241
98
100
  aiagents4pharma/talk2knowledgegraphs/tests/test_utils_kg_utils.py,sha256=pal76wi7WgQWUNk56BrzfFV8jKpbDaHHdbwtgx_gXLI,2410
101
+ aiagents4pharma/talk2knowledgegraphs/tests/test_utils_pubchem_utils.py,sha256=C07YqUNYW7ofpKAzKh0lBovXKLvaiXFb3oJU6k1dvu4,411
99
102
  aiagents4pharma/talk2knowledgegraphs/tools/__init__.py,sha256=zpD4h7EYtyq0QNOqLd6bkxrPlPb2XN64ceI9ncgESrA,171
100
103
  aiagents4pharma/talk2knowledgegraphs/tools/graphrag_reasoning.py,sha256=OEuOFncDRdb7TQEGq4rkT5On-jI-R7Nt8K5EBzaND8w,5338
101
104
  aiagents4pharma/talk2knowledgegraphs/tools/load_arguments.py,sha256=zhmsRp-8vjB5rRekqTA07d3yb-42HWqng9dDMkvK6hM,623
102
105
  aiagents4pharma/talk2knowledgegraphs/tools/subgraph_extraction.py,sha256=te06QMFQfgJWrjaGrqpcOYeaV38jwm0KY_rXVSMHkeI,11468
103
106
  aiagents4pharma/talk2knowledgegraphs/tools/subgraph_summarization.py,sha256=mDSBOxopDfNhEJeU8fVI8b5lXTYrRzcc97aLbFgYSy4,4413
104
- aiagents4pharma/talk2knowledgegraphs/utils/__init__.py,sha256=Q9mzcSmkmhdnOn13fxGh1fNECYoUR5Y5CCuEJTIxwAI,167
107
+ aiagents4pharma/talk2knowledgegraphs/utils/__init__.py,sha256=cZqb3LZLmBnmyAtWFv2Z-4uJvQmx0M4zKsfiWrlM3Pk,195
105
108
  aiagents4pharma/talk2knowledgegraphs/utils/kg_utils.py,sha256=6vQnPkeOWae_8jePjhma3sJuMTngy0I0tqzdFt6OqKg,2507
106
- aiagents4pharma/talk2knowledgegraphs/utils/embeddings/__init__.py,sha256=4TGK0XIVkkfGOyrSVwFQ-Lp-rzH9CCl-fWcqkFJKRLc,174
109
+ aiagents4pharma/talk2knowledgegraphs/utils/pubchem_utils.py,sha256=IlrdGbRGD0IM7eMcpkOjuRjKNuH3lz_X8zN6RHwk61c,1340
110
+ aiagents4pharma/talk2knowledgegraphs/utils/embeddings/__init__.py,sha256=POSDrSdFAWsBCueOPD-Fok-ARdTywJU1ivwpT9EU1Kw,199
107
111
  aiagents4pharma/talk2knowledgegraphs/utils/embeddings/embeddings.py,sha256=1nGznrAj-xT0xuSMBGz2dOujJ7M_IwSR84njxtxsy9A,2523
108
112
  aiagents4pharma/talk2knowledgegraphs/utils/embeddings/huggingface.py,sha256=2vi_elf6EgzfagFAO5QnL3a_aXZyN7B1EBziu44MTfM,3806
113
+ aiagents4pharma/talk2knowledgegraphs/utils/embeddings/nim_molmim.py,sha256=XH6JNfmMS38UEU7UGJeeabHfRykharnQpQaqjO86OlQ,1537
109
114
  aiagents4pharma/talk2knowledgegraphs/utils/embeddings/ollama.py,sha256=8w0sjt3Ex5YJ_XvpKl9UbhdTiiaoMIarbPUxLBU-1Uw,2378
110
115
  aiagents4pharma/talk2knowledgegraphs/utils/embeddings/sentence_transformer.py,sha256=36iKlisOpMtGR5xfTAlSHXWvPqVC_Jbezod8kbBBMVg,2136
111
- aiagents4pharma/talk2knowledgegraphs/utils/enrichments/__init__.py,sha256=tW426knki2DBIHcWyF_K04iMMdbpIn_e_TpPmTgz2dI,113
116
+ aiagents4pharma/talk2knowledgegraphs/utils/enrichments/__init__.py,sha256=JKGavA-umsGX3ng17_UYAvDBdbg-W-mPn8Q6JfP7J9U,143
112
117
  aiagents4pharma/talk2knowledgegraphs/utils/enrichments/enrichments.py,sha256=Bx8x6zzk5614ApWB90N_iv4_Y_Uq0-KwUeBwYSdQMU4,924
113
118
  aiagents4pharma/talk2knowledgegraphs/utils/enrichments/ollama.py,sha256=8eoxR-VHo0G7ReQIwje7xEhE-SJlHdef7_wJRpnvFIc,4116
119
+ aiagents4pharma/talk2knowledgegraphs/utils/enrichments/pubchem_strings.py,sha256=qsVlDCGGDkUCv-R5_xFGhrtLS7P0CfagnM2qATwiOFM,1333
114
120
  aiagents4pharma/talk2knowledgegraphs/utils/extractions/__init__.py,sha256=7gwwtfzKhB8GuOBD47XRi0NprwEXkOzwNl5eeu-hDTI,86
115
121
  aiagents4pharma/talk2knowledgegraphs/utils/extractions/pcst.py,sha256=m5p0yoJb7I19ua5yeQfXPf7c4r6S1XPwttsrM7Qoy94,9336
116
122
  aiagents4pharma/talk2scholars/__init__.py,sha256=gphERyVKZHvOnMQsml7TIHlaIshHJ75R1J3FKExkfuY,120
117
123
  aiagents4pharma/talk2scholars/agents/__init__.py,sha256=ykszlVGxz3egLHZAttlNoTPxIrnQJZYva_ssR8fwIFk,117
118
- aiagents4pharma/talk2scholars/agents/main_agent.py,sha256=_spqbC2lS-pJ8lq59S7ViVT_aQK3jlA5vqlOx1Q7JOc,7177
119
- aiagents4pharma/talk2scholars/agents/s2_agent.py,sha256=QBsar2QrTgi9H5DniJzvbEUH3vKb_dlYojEcfsBXhPM,2765
124
+ aiagents4pharma/talk2scholars/agents/main_agent.py,sha256=UiiqCMUko4t39uR4NtTvG3kRMf89x67dU_3NSAM4DWw,7603
125
+ aiagents4pharma/talk2scholars/agents/s2_agent.py,sha256=XilKQvlxEVNYK5cEc44x-62OZZ1qG77v1r0FkwLexcw,4581
120
126
  aiagents4pharma/talk2scholars/configs/__init__.py,sha256=tf2gz8n7M4ko6xLdX_C925ELVIxoP6SgkPcbeh59ad4,151
121
- aiagents4pharma/talk2scholars/configs/config.yaml,sha256=a3_jCFAmsVL6gZuvzoRe4jL94mQaSbp0CUXZDUtqhZA,254
127
+ aiagents4pharma/talk2scholars/configs/config.yaml,sha256=XaUi1aP0VGiZFeW_ZP68bVSiCJezied5yLzcx3Uljhc,308
122
128
  aiagents4pharma/talk2scholars/configs/agents/__init__.py,sha256=yyh7PB2oY_JulnpSQCWS4wwCH_uzIdt47O2Ay48x_oU,75
123
129
  aiagents4pharma/talk2scholars/configs/agents/talk2scholars/__init__.py,sha256=Tj4urOkjpu2cTlpJl0Fmr_18RZCR88vns-Gt-XquDzs,95
124
130
  aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
125
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml,sha256=Z85fv6orEpvdn-JGEmx0-45fsTJrlTRnqUYAhp1gZpY,679
131
+ aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml,sha256=kizyUjDasjv3zt2xHcGMqTxnpR_FHAcOs1vgVaH7tsY,1882
126
132
  aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
127
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml,sha256=OCQ6wEh48qX73m49WJj2RHaws4kkKRUsGt6T2thDAzo,766
133
+ aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml,sha256=lQUvQDLqqPIxn4TkL58atG2dQFhysNuNyAbd6P8km3g,585
128
134
  aiagents4pharma/talk2scholars/configs/app/__init__.py,sha256=JoSZV6N669kGMv5zLDszwf0ZjcRHx9TJfIqGhIIdPXE,70
129
135
  aiagents4pharma/talk2scholars/configs/app/frontend/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
130
- aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml,sha256=BX-J1zQyb0QJ7hcOFOnkJ8aWoWbjK4WE2VG7OZTOyKU,821
136
+ aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml,sha256=wsELBdRLv6UqZ9QZfwpS7K4xfMj5s-a99-aXqIs6WEI,868
131
137
  aiagents4pharma/talk2scholars/configs/tools/__init__.py,sha256=w0BJK0MR6Et8Pw1htP8JV0Lr9F_N68CqvbpV14KBy_8,151
132
138
  aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
133
- aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml,sha256=70HSJ8WbS2Qbhur5FpuOPBjrea9g3TioM0gjGn6U1bE,369
139
+ aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml,sha256=iEsEW89MlQwKsAW4ZAxLt4pDBwA1qxImYQ2dfONIf6c,442
140
+ aiagents4pharma/talk2scholars/configs/tools/retrieve_semantic_scholar_paper_id/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
134
141
  aiagents4pharma/talk2scholars/configs/tools/search/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
135
- aiagents4pharma/talk2scholars/configs/tools/search/default.yaml,sha256=NznRVqB6EamMfsFc5hj5s9ygzl6rPuFPiy9ikcpqp68,486
142
+ aiagents4pharma/talk2scholars/configs/tools/search/default.yaml,sha256=tw8N1Mms0qHQbIY3KGDNK1NuT19dQGPiagxzWDdOAJk,504
136
143
  aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
137
- aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml,sha256=4_gTvdVc-hf9GNxBKMGQd72s5h53Zy09j9qeZ9Fys04,578
144
+ aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml,sha256=TILecrowsu5VGdJPeac6fl5AXSf3piSHN0oKdjY2q1o,596
138
145
  aiagents4pharma/talk2scholars/state/__init__.py,sha256=S6SxlszIMZSIMJehjevPF9sKyR-PAwWb5TEdo6xWXE8,103
139
- aiagents4pharma/talk2scholars/state/state_talk2scholars.py,sha256=6LwGCP1yluxcAf1PyWl7LaXEvxbaJ2-b2Nrxu4Z_KI0,978
146
+ aiagents4pharma/talk2scholars/state/state_talk2scholars.py,sha256=Jbwckhhpv03KFQYkYkuPsYz5OxT0Si-mra7qrynH5Jo,2246
140
147
  aiagents4pharma/talk2scholars/tests/__init__.py,sha256=U3PsTiUZaUBD1IZanFGkDIOdFieDVJtGKQ5-woYUo8c,45
141
- aiagents4pharma/talk2scholars/tests/test_integration.py,sha256=JWRoXkYjB6mKjiGvzoqSjAFXX_L8s2dP67MGS9dk6Rs,7721
142
- aiagents4pharma/talk2scholars/tests/test_main_agent.py,sha256=_XPnyveZoBOgrrV_2ws8CLWAX338xBlyh1Zs6xoZKpk,6171
143
- aiagents4pharma/talk2scholars/tests/test_s2_agent.py,sha256=0VtCZ0C_4WyF5t7i8Cmv7P1FakmDTe6YfXNriFAiGEQ,4645
144
- aiagents4pharma/talk2scholars/tests/test_s2_tools.py,sha256=OU2ArjF07J1jKz4dLnhQ1RxNXgNBhgMqglxLIvKcXuk,7496
148
+ aiagents4pharma/talk2scholars/tests/test_llm_main_integration.py,sha256=SAMG-Kb2S9sei8Us5vUWCUJikTKXPZVKQ6aJJPEhJsc,1880
149
+ aiagents4pharma/talk2scholars/tests/test_main_agent.py,sha256=TTPfVGWWq6BXJVgfR958qttD6dGRnpJHZMqo86k4aMo,5562
150
+ aiagents4pharma/talk2scholars/tests/test_s2_agent.py,sha256=-yEoG2v5SMkCLCrSA2DFcNE-xMOSn97N4UTomzCeW40,7559
151
+ aiagents4pharma/talk2scholars/tests/test_s2_tools.py,sha256=QEwraJk9_Kp6ZSGYyYDXWH62wIjSwi1Pptwwbx1fuG0,13176
145
152
  aiagents4pharma/talk2scholars/tests/test_state.py,sha256=_iHXvoZnU_eruf8l1sQKBSCIVnxNkH_9VzkVtZZA6bY,384
146
153
  aiagents4pharma/talk2scholars/tools/__init__.py,sha256=YudBDRwaEzDnAcpxGZvEOfyh5-6xd51CTvTKTkywgXw,68
147
- aiagents4pharma/talk2scholars/tools/s2/__init__.py,sha256=9RQH3efTj6qkXk0ICKSc7Mzpkitt4gRGsQ1pGPrrREU,181
148
- aiagents4pharma/talk2scholars/tools/s2/display_results.py,sha256=Aap3P-9i80TDarkEZX0S62rzsVZw0ftzgUppK67rOhI,1446
149
- aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py,sha256=7GrMaZ15zLSmBvmek9NCATM4-O6BXuxa5Pwd9qJ6ygU,3887
150
- aiagents4pharma/talk2scholars/tools/s2/search.py,sha256=fF8i2s8ikF0qdM_c7KSAQvguG9Hjq0s2bPvoyLdx5wA,3311
151
- aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py,sha256=Z3NQgf3hHcPSisYJnbEErsyGFZPgad8CdaWe4XxiOgs,3978
152
- aiagents4pharma-1.20.1.dist-info/LICENSE,sha256=IcIbyB1Hyk5ZDah03VNQvJkbNk2hkBCDqQ8qtnCvB4Q,1077
153
- aiagents4pharma-1.20.1.dist-info/METADATA,sha256=b9jMg2iAMjZ0xAGOF-qrttUhkcPNqV3nyMRWgQUib6M,7757
154
- aiagents4pharma-1.20.1.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
155
- aiagents4pharma-1.20.1.dist-info/top_level.txt,sha256=-AH8rMmrSnJtq7HaAObS78UU-cTCwvX660dSxeM7a0A,16
156
- aiagents4pharma-1.20.1.dist-info/RECORD,,
154
+ aiagents4pharma/talk2scholars/tools/s2/__init__.py,sha256=wytqCmGm8Fbl8y5qLdIkxhhG8VHLYMifCGjbH_LK2Fc,258
155
+ aiagents4pharma/talk2scholars/tools/s2/display_results.py,sha256=UR0PtEHGDpOhPH0Di5HT8-Fip2RkEMTJgzROsChb1gc,2959
156
+ aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py,sha256=sYtoJWFmJuSTSHMrZmqiBfGS-mDKS1gAbKtNyjRnlwU,4979
157
+ aiagents4pharma/talk2scholars/tools/s2/query_results.py,sha256=EUfzRh5Qc_tMl5fDIFb9PIsQkkrU4Xb5MR0sud_X5-c,2017
158
+ aiagents4pharma/talk2scholars/tools/s2/retrieve_semantic_scholar_paper_id.py,sha256=Lg1L4HQCN2LaQEyWtLD73O67PMoXkPHi-Y8rCzHS0A4,2499
159
+ aiagents4pharma/talk2scholars/tools/s2/search.py,sha256=mnBQWDuQ50UVw6B-bRuL8Ek1av-pEtdgzVMxpEA2BpI,4296
160
+ aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py,sha256=xgnUj9W9JkeTvB2VJBJUAnia789GGNGqdqgJ_G16v2s,5120
161
+ aiagents4pharma-1.22.0.dist-info/LICENSE,sha256=IcIbyB1Hyk5ZDah03VNQvJkbNk2hkBCDqQ8qtnCvB4Q,1077
162
+ aiagents4pharma-1.22.0.dist-info/METADATA,sha256=qMCGdj4nouCcxKZOt2SwH53fDMIgPkoUTirEIvk2Mfs,7789
163
+ aiagents4pharma-1.22.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
164
+ aiagents4pharma-1.22.0.dist-info/top_level.txt,sha256=-AH8rMmrSnJtq7HaAObS78UU-cTCwvX660dSxeM7a0A,16
165
+ aiagents4pharma-1.22.0.dist-info/RECORD,,
@@ -1,237 +0,0 @@
1
- """
2
- Integration tests for talk2scholars system.
3
-
4
- These tests ensure that:
5
- 1. The main agent and sub-agent work together.
6
- 2. The agents correctly interact with tools (search, recommendations).
7
- 3. The full pipeline processes queries and updates state correctly.
8
- """
9
-
10
- # pylint: disable=redefined-outer-name
11
- from unittest.mock import patch, Mock
12
- import pytest
13
- from langchain_core.messages import HumanMessage
14
- from ..agents.main_agent import get_app as get_main_app
15
- from ..agents.s2_agent import get_app as get_s2_app
16
- from ..state.state_talk2scholars import Talk2Scholars
17
-
18
-
19
- @pytest.fixture(autouse=True)
20
- def mock_hydra():
21
- """Mock Hydra configuration to prevent external dependencies."""
22
- with patch("hydra.initialize"), patch("hydra.compose") as mock_compose:
23
- cfg_mock = Mock()
24
- cfg_mock.agents.talk2scholars.main_agent.temperature = 0
25
- cfg_mock.agents.talk2scholars.main_agent.main_agent = "Test main agent prompt"
26
- cfg_mock.agents.talk2scholars.s2_agent.temperature = 0
27
- cfg_mock.agents.talk2scholars.s2_agent.s2_agent = "Test s2 agent prompt"
28
- mock_compose.return_value = cfg_mock
29
- yield mock_compose
30
-
31
-
32
- @pytest.fixture(autouse=True)
33
- def mock_tools():
34
- """Mock tools to prevent execution of real API calls."""
35
- with (
36
- patch(
37
- "aiagents4pharma.talk2scholars.tools.s2.search.search_tool"
38
- ) as mock_s2_search,
39
- patch(
40
- "aiagents4pharma.talk2scholars.tools.s2.display_results.display_results"
41
- ) as mock_s2_display,
42
- patch(
43
- "aiagents4pharma.talk2scholars.tools.s2.single_paper_rec."
44
- "get_single_paper_recommendations"
45
- ) as mock_s2_single_rec,
46
- patch(
47
- "aiagents4pharma.talk2scholars.tools.s2.multi_paper_rec."
48
- "get_multi_paper_recommendations"
49
- ) as mock_s2_multi_rec,
50
- ):
51
-
52
- mock_s2_search.return_value = {"papers": {"id123": "Mock Paper"}}
53
- mock_s2_display.return_value = "Displaying Mock Results"
54
- mock_s2_single_rec.return_value = {"recommendations": ["Paper A", "Paper B"]}
55
- mock_s2_multi_rec.return_value = {
56
- "multi_recommendations": ["Paper X", "Paper Y"]
57
- }
58
-
59
- yield {
60
- "search_tool": mock_s2_search,
61
- "display_results": mock_s2_display,
62
- "single_paper_rec": mock_s2_single_rec,
63
- "multi_paper_rec": mock_s2_multi_rec,
64
- }
65
-
66
-
67
- def test_full_workflow():
68
- """Test the full workflow from main agent to S2 agent."""
69
- thread_id = "test_thread"
70
- main_app = get_main_app(thread_id)
71
-
72
- # Define expected mock response with the actual structure
73
- expected_paper = {
74
- "530a059cb48477ad1e3d4f8f4b153274c8997332": {
75
- "Title": "Explainable Artificial Intelligence",
76
- "Abstract": None,
77
- "Citation Count": 5544,
78
- "Year": "2024",
79
- "URL": "https://example.com/paper",
80
- }
81
- }
82
-
83
- # Mock the search tool instead of the app
84
- with patch(
85
- "aiagents4pharma.talk2scholars.tools.s2.search.search_tool",
86
- return_value={"papers": expected_paper},
87
- ):
88
- state = Talk2Scholars(messages=[HumanMessage(content="Find AI papers")])
89
- result = main_app.invoke(
90
- state,
91
- config={
92
- "configurable": {
93
- "thread_id": thread_id,
94
- "checkpoint_ns": "test_ns",
95
- "checkpoint_id": "test_checkpoint",
96
- }
97
- },
98
- )
99
-
100
- # Check values
101
- assert "papers" in result
102
- assert "messages" in result
103
- assert len(result["papers"]) > 0
104
-
105
-
106
- def test_s2_agent_execution():
107
- """Test if the S2 agent processes requests correctly and updates state."""
108
- thread_id = "test_thread"
109
- s2_app = get_s2_app(thread_id)
110
-
111
- state = Talk2Scholars(messages=[HumanMessage(content="Get recommendations")])
112
-
113
- result = s2_app.invoke(
114
- state,
115
- config={
116
- "configurable": {
117
- "thread_id": thread_id,
118
- "checkpoint_ns": "test_ns",
119
- "checkpoint_id": "test_checkpoint",
120
- }
121
- },
122
- )
123
-
124
- assert "messages" in result
125
- assert "multi_papers" in result
126
- assert result["multi_papers"] is not None
127
-
128
-
129
- def test_tool_integration(mock_tools):
130
- """Test if the tools interact correctly with the workflow."""
131
- thread_id = "test_thread"
132
- s2_app = get_s2_app(thread_id)
133
-
134
- state = Talk2Scholars(
135
- messages=[HumanMessage(content="Search for AI ethics papers")]
136
- )
137
-
138
- mock_paper_id = "11159bdb213aaa243916f42f576396d483ba474b"
139
- mock_response = {
140
- "papers": {
141
- mock_paper_id: {
142
- "Title": "Mock AI Ethics Paper",
143
- "Abstract": "A study on AI ethics",
144
- "Citation Count": 100,
145
- "URL": "https://example.com/mock-paper",
146
- }
147
- }
148
- }
149
-
150
- # Update both the fixture mock and patch the actual tool
151
- mock_tools["search_tool"].return_value = {"papers": mock_response["papers"]}
152
-
153
- with patch(
154
- "aiagents4pharma.talk2scholars.tools.s2.search.search_tool",
155
- return_value={"papers": mock_response["papers"]},
156
- ):
157
- result = s2_app.invoke(
158
- state,
159
- config={
160
- "configurable": {
161
- "thread_id": thread_id,
162
- "checkpoint_ns": "test_ns",
163
- "checkpoint_id": "test_checkpoint",
164
- }
165
- },
166
- )
167
-
168
- assert "papers" in result
169
- assert len(result["papers"]) > 0 # Verify we have papers
170
- assert isinstance(result["papers"], dict) # Verify it's a dictionary
171
-
172
-
173
- def test_empty_query():
174
- """Test how the system handles an empty query."""
175
- thread_id = "test_thread"
176
- main_app = get_main_app(thread_id)
177
-
178
- state = Talk2Scholars(messages=[HumanMessage(content="")])
179
-
180
- # Mock the s2_agent app
181
- mock_s2_app = get_s2_app(thread_id)
182
-
183
- with patch(
184
- "aiagents4pharma.talk2scholars.agents.s2_agent.get_app",
185
- return_value=mock_s2_app,
186
- ):
187
- result = main_app.invoke(
188
- state,
189
- config={
190
- "configurable": {
191
- "thread_id": thread_id,
192
- "checkpoint_ns": "test_ns",
193
- "checkpoint_id": "test_checkpoint",
194
- }
195
- },
196
- )
197
-
198
- assert "messages" in result
199
- last_message = result["messages"][-1].content.lower()
200
- assert any(
201
- phrase in last_message
202
- for phrase in ["no valid input", "how can i assist", "please provide a query"]
203
- )
204
-
205
-
206
- def test_api_failure_handling():
207
- """Test if the system gracefully handles an API failure."""
208
- thread_id = "test_thread"
209
- s2_app = get_s2_app(thread_id)
210
-
211
- expected_error = "API Timeout: Connection failed"
212
- with patch("requests.get", side_effect=Exception(expected_error)):
213
- state = Talk2Scholars(messages=[HumanMessage(content="Find latest NLP papers")])
214
-
215
- result = s2_app.invoke(
216
- state,
217
- config={
218
- "configurable": {
219
- "thread_id": thread_id,
220
- "checkpoint_ns": "test_ns",
221
- "checkpoint_id": "test_checkpoint",
222
- }
223
- },
224
- )
225
-
226
- assert "messages" in result
227
- last_message = result["messages"][-1].content.lower()
228
-
229
- # Update assertions to match actual error message
230
- assert any(
231
- [
232
- "unable to retrieve" in last_message,
233
- "connection issue" in last_message,
234
- "please try again later" in last_message,
235
- ]
236
- )
237
- assert "nlp papers" in last_message # Verify context is maintained