aiagents4pharma 1.20.0__py3-none-any.whl → 1.21.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (29) hide show
  1. aiagents4pharma/talk2biomodels/configs/config.yaml +5 -0
  2. aiagents4pharma/talk2scholars/agents/main_agent.py +90 -91
  3. aiagents4pharma/talk2scholars/agents/s2_agent.py +61 -17
  4. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml +31 -10
  5. aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml +8 -16
  6. aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml +11 -9
  7. aiagents4pharma/talk2scholars/configs/config.yaml +1 -0
  8. aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml +2 -0
  9. aiagents4pharma/talk2scholars/configs/tools/retrieve_semantic_scholar_paper_id/__init__.py +3 -0
  10. aiagents4pharma/talk2scholars/configs/tools/search/default.yaml +1 -0
  11. aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml +1 -0
  12. aiagents4pharma/talk2scholars/state/state_talk2scholars.py +36 -7
  13. aiagents4pharma/talk2scholars/tests/test_llm_main_integration.py +58 -0
  14. aiagents4pharma/talk2scholars/tests/test_main_agent.py +98 -122
  15. aiagents4pharma/talk2scholars/tests/test_s2_agent.py +95 -29
  16. aiagents4pharma/talk2scholars/tests/test_s2_tools.py +158 -22
  17. aiagents4pharma/talk2scholars/tools/s2/__init__.py +4 -2
  18. aiagents4pharma/talk2scholars/tools/s2/display_results.py +60 -21
  19. aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py +35 -8
  20. aiagents4pharma/talk2scholars/tools/s2/query_results.py +61 -0
  21. aiagents4pharma/talk2scholars/tools/s2/retrieve_semantic_scholar_paper_id.py +79 -0
  22. aiagents4pharma/talk2scholars/tools/s2/search.py +34 -10
  23. aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py +39 -9
  24. {aiagents4pharma-1.20.0.dist-info → aiagents4pharma-1.21.0.dist-info}/METADATA +2 -2
  25. {aiagents4pharma-1.20.0.dist-info → aiagents4pharma-1.21.0.dist-info}/RECORD +28 -24
  26. aiagents4pharma/talk2scholars/tests/test_integration.py +0 -237
  27. {aiagents4pharma-1.20.0.dist-info → aiagents4pharma-1.21.0.dist-info}/LICENSE +0 -0
  28. {aiagents4pharma-1.20.0.dist-info → aiagents4pharma-1.21.0.dist-info}/WHEEL +0 -0
  29. {aiagents4pharma-1.20.0.dist-info → aiagents4pharma-1.21.0.dist-info}/top_level.txt +0 -0
@@ -5,7 +5,7 @@ This tool is used to search for academic papers on Semantic Scholar.
5
5
  """
6
6
 
7
7
  import logging
8
- from typing import Annotated, Any, Dict, Optional
8
+ from typing import Annotated, Any, Optional
9
9
  import hydra
10
10
  import requests
11
11
  from langchain_core.messages import ToolMessage
@@ -14,7 +14,6 @@ from langchain_core.tools.base import InjectedToolCallId
14
14
  from langgraph.types import Command
15
15
  from pydantic import BaseModel, Field
16
16
 
17
-
18
17
  # Configure logging
19
18
  logging.basicConfig(level=logging.INFO)
20
19
  logger = logging.getLogger(__name__)
@@ -28,7 +27,7 @@ class SearchInput(BaseModel):
28
27
  "Be specific and include relevant academic terms."
29
28
  )
30
29
  limit: int = Field(
31
- default=2, description="Maximum number of results to return", ge=1, le=100
30
+ default=5, description="Maximum number of results to return", ge=1, le=100
32
31
  )
33
32
  year: Optional[str] = Field(
34
33
  default=None,
@@ -44,13 +43,13 @@ with hydra.initialize(version_base=None, config_path="../../configs"):
44
43
  cfg = cfg.tools.search
45
44
 
46
45
 
47
- @tool(args_schema=SearchInput)
46
+ @tool("search_tool", args_schema=SearchInput, parse_docstring=True)
48
47
  def search_tool(
49
48
  query: str,
50
49
  tool_call_id: Annotated[str, InjectedToolCallId],
51
- limit: int = 2,
50
+ limit: int = 5,
52
51
  year: Optional[str] = None,
53
- ) -> Dict[str, Any]:
52
+ ) -> Command[Any]:
54
53
  """
55
54
  Search for academic papers on Semantic Scholar.
56
55
 
@@ -62,9 +61,9 @@ def search_tool(
62
61
  Supports formats like "2024-", "-2024", "2024:2025". Defaults to None.
63
62
 
64
63
  Returns:
65
- Dict[str, Any]: The search results and related information.
64
+ The number of papers found on Semantic Scholar.
66
65
  """
67
- print("Starting paper search...")
66
+ logger.info("Searching for papers on %s", query)
68
67
  endpoint = cfg.api_endpoint
69
68
  params = {
70
69
  "query": query,
@@ -80,26 +79,51 @@ def search_tool(
80
79
  data = response.json()
81
80
  papers = data.get("data", [])
82
81
  logger.info("Received %d papers", len(papers))
82
+ if not papers:
83
+ return Command(
84
+ update={ # Place 'messages' inside 'update'
85
+ "messages": [
86
+ ToolMessage(
87
+ content="No papers found. Please try a different search query.",
88
+ tool_call_id=tool_call_id,
89
+ )
90
+ ]
91
+ }
92
+ )
83
93
  # Create a dictionary to store the papers
84
94
  filtered_papers = {
85
95
  paper["paperId"]: {
96
+ # "semantic_scholar_id": paper["paperId"], # Store Semantic Scholar ID
86
97
  "Title": paper.get("title", "N/A"),
87
98
  "Abstract": paper.get("abstract", "N/A"),
88
99
  "Year": paper.get("year", "N/A"),
89
100
  "Citation Count": paper.get("citationCount", "N/A"),
90
101
  "URL": paper.get("url", "N/A"),
102
+ # "arXiv_ID": paper.get("externalIds", {}).get(
103
+ # "ArXiv", "N/A"
104
+ # ), # Extract arXiv ID
91
105
  }
92
106
  for paper in papers
93
107
  if paper.get("title") and paper.get("authors")
94
108
  }
95
109
 
110
+ logger.info("Filtered %d papers", len(filtered_papers))
111
+
112
+ content = "Search was successful."
113
+ content += " Here is a summary of the search results:"
114
+ content += f"Number of papers found: {len(filtered_papers)}\n"
115
+ content += f"Query: {query}\n"
116
+ content += f"Year: {year}\n" if year else ""
117
+
96
118
  return Command(
97
119
  update={
98
120
  "papers": filtered_papers, # Now sending the dictionary directly
121
+ "last_displayed_papers": "papers",
99
122
  "messages": [
100
123
  ToolMessage(
101
- content=f"Search Successful: {filtered_papers}",
102
- tool_call_id=tool_call_id
124
+ content=content,
125
+ tool_call_id=tool_call_id,
126
+ artifact=filtered_papers,
103
127
  )
104
128
  ],
105
129
  }
@@ -5,7 +5,7 @@ This tool is used to return recommendations for a single paper.
5
5
  """
6
6
 
7
7
  import logging
8
- from typing import Annotated, Any, Dict, Optional
8
+ from typing import Annotated, Any, Optional
9
9
  import hydra
10
10
  import requests
11
11
  from langchain_core.messages import ToolMessage
@@ -26,7 +26,7 @@ class SinglePaperRecInput(BaseModel):
26
26
  description="Semantic Scholar Paper ID to get recommendations for (40-character string)"
27
27
  )
28
28
  limit: int = Field(
29
- default=2,
29
+ default=5,
30
30
  description="Maximum number of recommendations to return",
31
31
  ge=1,
32
32
  le=500,
@@ -48,15 +48,16 @@ with hydra.initialize(version_base=None, config_path="../../configs"):
48
48
  cfg = cfg.tools.single_paper_recommendation
49
49
 
50
50
 
51
- @tool(args_schema=SinglePaperRecInput)
51
+ @tool(args_schema=SinglePaperRecInput, parse_docstring=True)
52
52
  def get_single_paper_recommendations(
53
53
  paper_id: str,
54
54
  tool_call_id: Annotated[str, InjectedToolCallId],
55
- limit: int = 2,
55
+ limit: int = 5,
56
56
  year: Optional[str] = None,
57
- ) -> Dict[str, Any]:
57
+ ) -> Command[Any]:
58
58
  """
59
- Get paper recommendations based on a single paper.
59
+ Get recommendations for on a single paper using its Semantic Scholar ID.
60
+ No other ID types are supported.
60
61
 
61
62
  Args:
62
63
  paper_id (str): The Semantic Scholar Paper ID to get recommendations for.
@@ -68,7 +69,9 @@ def get_single_paper_recommendations(
68
69
  Returns:
69
70
  Dict[str, Any]: The recommendations and related information.
70
71
  """
71
- logger.info("Starting single paper recommendations search.")
72
+ logger.info(
73
+ "Starting single paper recommendations search with paper ID: %s", paper_id
74
+ )
72
75
 
73
76
  endpoint = f"{cfg.api_endpoint}/{paper_id}"
74
77
  params = {
@@ -90,32 +93,59 @@ def get_single_paper_recommendations(
90
93
  paper_id,
91
94
  response.status_code,
92
95
  )
96
+ if response.status_code != 200:
97
+ raise ValueError("Invalid paper ID or API error.")
93
98
  # print(f"Request params: {params}")
94
99
  logging.info("Request params: %s", params)
95
100
 
96
101
  data = response.json()
97
102
  recommendations = data.get("recommendedPapers", [])
98
103
 
104
+ if not recommendations:
105
+ return Command(
106
+ update={
107
+ "papers": {},
108
+ "messages": [
109
+ ToolMessage(
110
+ content=f"No recommendations found for {paper_id}.",
111
+ tool_call_id=tool_call_id,
112
+ )
113
+ ],
114
+ }
115
+ )
116
+
99
117
  # Extract paper ID and title from recommendations
100
118
  filtered_papers = {
101
119
  paper["paperId"]: {
120
+ # "semantic_scholar_id": paper["paperId"], # Store Semantic Scholar ID
102
121
  "Title": paper.get("title", "N/A"),
103
122
  "Abstract": paper.get("abstract", "N/A"),
104
123
  "Year": paper.get("year", "N/A"),
105
124
  "Citation Count": paper.get("citationCount", "N/A"),
106
125
  "URL": paper.get("url", "N/A"),
126
+ # "arXiv_ID": paper.get("externalIds", {}).get(
127
+ # "ArXiv", "N/A"
128
+ # ), # Extract arXiv ID
107
129
  }
108
130
  for paper in recommendations
109
131
  if paper.get("title") and paper.get("authors")
110
132
  }
111
133
 
134
+ content = "Recommendations based on a single paper were successful."
135
+ content += " Here is a summary of the recommendations:"
136
+ content += f"Number of papers found: {len(filtered_papers)}\n"
137
+ content += f"Query Paper ID: {paper_id}\n"
138
+ content += f"Year: {year}\n" if year else ""
139
+
112
140
  return Command(
113
141
  update={
114
142
  "papers": filtered_papers, # Now sending the dictionary directly
143
+ "last_displayed_papers": "papers",
115
144
  "messages": [
116
145
  ToolMessage(
117
- content=f"Search Successful: {filtered_papers}",
118
- tool_call_id=tool_call_id
146
+ content=content,
147
+ tool_call_id=tool_call_id,
148
+ artifact=filtered_papers,
119
149
  )
120
150
  ],
121
151
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: aiagents4pharma
3
- Version: 1.20.0
3
+ Version: 1.21.0
4
4
  Summary: AI Agents for drug discovery, drug development, and other pharmaceutical R&D.
5
5
  Classifier: Programming Language :: Python :: 3
6
6
  Classifier: License :: OSI Approved :: MIT License
@@ -110,7 +110,7 @@ free credits [here](https://build.nvidia.com/explore/discover)._
110
110
  ```
111
111
  2. **Install dependencies:**
112
112
  ```bash
113
- pip install .
113
+ pip install -r requirements.txt
114
114
  ```
115
115
  3. **Initialize OPENAI_API_KEY and NVIDIA_API_KEY**
116
116
  ```bash
@@ -7,6 +7,7 @@ aiagents4pharma/talk2biomodels/api/kegg.py,sha256=QzYDAfJ16E7tbHGxP8ZNWRizMkMRS_
7
7
  aiagents4pharma/talk2biomodels/api/ols.py,sha256=qq0Qy-gJDxanQW-HfCChDsTQsY1M41ua8hMlTnfuzrA,2202
8
8
  aiagents4pharma/talk2biomodels/api/uniprot.py,sha256=aPUAVBR7UYXDuuhDpKezAK2aTMzo-NxFYFq6C0W5u6U,1175
9
9
  aiagents4pharma/talk2biomodels/configs/__init__.py,sha256=safyFKhkd5Wlirl9dMZIHWDLTpY2oLw9wjIM7ZtLIHk,88
10
+ aiagents4pharma/talk2biomodels/configs/config.yaml,sha256=X0CMsnx6hHNvV04wsENQSGXadx0aKIy6mziSopVUdZI,116
10
11
  aiagents4pharma/talk2biomodels/configs/agents/__init__.py,sha256=_ZoG8snICK2bidWtc2KOGs738LWg9_r66V9mOMnEb-E,71
11
12
  aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/__init__.py,sha256=-fAORvyFmG2iSvFOFDixmt9OTQRR58y89uhhu2EgbA8,46
12
13
  aiagents4pharma/talk2biomodels/configs/agents/t2b_agent/default.yaml,sha256=pSViMKwKyMQDm8LzbfIaGdxph73iHYaXMiv5YOuxM7k,536
@@ -114,42 +115,45 @@ aiagents4pharma/talk2knowledgegraphs/utils/extractions/__init__.py,sha256=7gwwtf
114
115
  aiagents4pharma/talk2knowledgegraphs/utils/extractions/pcst.py,sha256=m5p0yoJb7I19ua5yeQfXPf7c4r6S1XPwttsrM7Qoy94,9336
115
116
  aiagents4pharma/talk2scholars/__init__.py,sha256=gphERyVKZHvOnMQsml7TIHlaIshHJ75R1J3FKExkfuY,120
116
117
  aiagents4pharma/talk2scholars/agents/__init__.py,sha256=ykszlVGxz3egLHZAttlNoTPxIrnQJZYva_ssR8fwIFk,117
117
- aiagents4pharma/talk2scholars/agents/main_agent.py,sha256=_spqbC2lS-pJ8lq59S7ViVT_aQK3jlA5vqlOx1Q7JOc,7177
118
- aiagents4pharma/talk2scholars/agents/s2_agent.py,sha256=QBsar2QrTgi9H5DniJzvbEUH3vKb_dlYojEcfsBXhPM,2765
118
+ aiagents4pharma/talk2scholars/agents/main_agent.py,sha256=UiiqCMUko4t39uR4NtTvG3kRMf89x67dU_3NSAM4DWw,7603
119
+ aiagents4pharma/talk2scholars/agents/s2_agent.py,sha256=XilKQvlxEVNYK5cEc44x-62OZZ1qG77v1r0FkwLexcw,4581
119
120
  aiagents4pharma/talk2scholars/configs/__init__.py,sha256=tf2gz8n7M4ko6xLdX_C925ELVIxoP6SgkPcbeh59ad4,151
120
- aiagents4pharma/talk2scholars/configs/config.yaml,sha256=a3_jCFAmsVL6gZuvzoRe4jL94mQaSbp0CUXZDUtqhZA,254
121
+ aiagents4pharma/talk2scholars/configs/config.yaml,sha256=XaUi1aP0VGiZFeW_ZP68bVSiCJezied5yLzcx3Uljhc,308
121
122
  aiagents4pharma/talk2scholars/configs/agents/__init__.py,sha256=yyh7PB2oY_JulnpSQCWS4wwCH_uzIdt47O2Ay48x_oU,75
122
123
  aiagents4pharma/talk2scholars/configs/agents/talk2scholars/__init__.py,sha256=Tj4urOkjpu2cTlpJl0Fmr_18RZCR88vns-Gt-XquDzs,95
123
124
  aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
124
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml,sha256=Z85fv6orEpvdn-JGEmx0-45fsTJrlTRnqUYAhp1gZpY,679
125
+ aiagents4pharma/talk2scholars/configs/agents/talk2scholars/main_agent/default.yaml,sha256=kizyUjDasjv3zt2xHcGMqTxnpR_FHAcOs1vgVaH7tsY,1882
125
126
  aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
126
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml,sha256=OCQ6wEh48qX73m49WJj2RHaws4kkKRUsGt6T2thDAzo,766
127
+ aiagents4pharma/talk2scholars/configs/agents/talk2scholars/s2_agent/default.yaml,sha256=lQUvQDLqqPIxn4TkL58atG2dQFhysNuNyAbd6P8km3g,585
127
128
  aiagents4pharma/talk2scholars/configs/app/__init__.py,sha256=JoSZV6N669kGMv5zLDszwf0ZjcRHx9TJfIqGhIIdPXE,70
128
129
  aiagents4pharma/talk2scholars/configs/app/frontend/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
129
- aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml,sha256=BX-J1zQyb0QJ7hcOFOnkJ8aWoWbjK4WE2VG7OZTOyKU,821
130
+ aiagents4pharma/talk2scholars/configs/app/frontend/default.yaml,sha256=wsELBdRLv6UqZ9QZfwpS7K4xfMj5s-a99-aXqIs6WEI,868
130
131
  aiagents4pharma/talk2scholars/configs/tools/__init__.py,sha256=w0BJK0MR6Et8Pw1htP8JV0Lr9F_N68CqvbpV14KBy_8,151
131
132
  aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
132
- aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml,sha256=70HSJ8WbS2Qbhur5FpuOPBjrea9g3TioM0gjGn6U1bE,369
133
+ aiagents4pharma/talk2scholars/configs/tools/multi_paper_recommendation/default.yaml,sha256=iEsEW89MlQwKsAW4ZAxLt4pDBwA1qxImYQ2dfONIf6c,442
134
+ aiagents4pharma/talk2scholars/configs/tools/retrieve_semantic_scholar_paper_id/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
133
135
  aiagents4pharma/talk2scholars/configs/tools/search/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
134
- aiagents4pharma/talk2scholars/configs/tools/search/default.yaml,sha256=NznRVqB6EamMfsFc5hj5s9ygzl6rPuFPiy9ikcpqp68,486
136
+ aiagents4pharma/talk2scholars/configs/tools/search/default.yaml,sha256=tw8N1Mms0qHQbIY3KGDNK1NuT19dQGPiagxzWDdOAJk,504
135
137
  aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/__init__.py,sha256=fqQQ-GlRcbzru2KmEk3oMma0R6_SzGM8dOXzYeU4oVA,46
136
- aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml,sha256=4_gTvdVc-hf9GNxBKMGQd72s5h53Zy09j9qeZ9Fys04,578
138
+ aiagents4pharma/talk2scholars/configs/tools/single_paper_recommendation/default.yaml,sha256=TILecrowsu5VGdJPeac6fl5AXSf3piSHN0oKdjY2q1o,596
137
139
  aiagents4pharma/talk2scholars/state/__init__.py,sha256=S6SxlszIMZSIMJehjevPF9sKyR-PAwWb5TEdo6xWXE8,103
138
- aiagents4pharma/talk2scholars/state/state_talk2scholars.py,sha256=6LwGCP1yluxcAf1PyWl7LaXEvxbaJ2-b2Nrxu4Z_KI0,978
140
+ aiagents4pharma/talk2scholars/state/state_talk2scholars.py,sha256=Jbwckhhpv03KFQYkYkuPsYz5OxT0Si-mra7qrynH5Jo,2246
139
141
  aiagents4pharma/talk2scholars/tests/__init__.py,sha256=U3PsTiUZaUBD1IZanFGkDIOdFieDVJtGKQ5-woYUo8c,45
140
- aiagents4pharma/talk2scholars/tests/test_integration.py,sha256=JWRoXkYjB6mKjiGvzoqSjAFXX_L8s2dP67MGS9dk6Rs,7721
141
- aiagents4pharma/talk2scholars/tests/test_main_agent.py,sha256=_XPnyveZoBOgrrV_2ws8CLWAX338xBlyh1Zs6xoZKpk,6171
142
- aiagents4pharma/talk2scholars/tests/test_s2_agent.py,sha256=0VtCZ0C_4WyF5t7i8Cmv7P1FakmDTe6YfXNriFAiGEQ,4645
143
- aiagents4pharma/talk2scholars/tests/test_s2_tools.py,sha256=OU2ArjF07J1jKz4dLnhQ1RxNXgNBhgMqglxLIvKcXuk,7496
142
+ aiagents4pharma/talk2scholars/tests/test_llm_main_integration.py,sha256=SAMG-Kb2S9sei8Us5vUWCUJikTKXPZVKQ6aJJPEhJsc,1880
143
+ aiagents4pharma/talk2scholars/tests/test_main_agent.py,sha256=TTPfVGWWq6BXJVgfR958qttD6dGRnpJHZMqo86k4aMo,5562
144
+ aiagents4pharma/talk2scholars/tests/test_s2_agent.py,sha256=-yEoG2v5SMkCLCrSA2DFcNE-xMOSn97N4UTomzCeW40,7559
145
+ aiagents4pharma/talk2scholars/tests/test_s2_tools.py,sha256=QEwraJk9_Kp6ZSGYyYDXWH62wIjSwi1Pptwwbx1fuG0,13176
144
146
  aiagents4pharma/talk2scholars/tests/test_state.py,sha256=_iHXvoZnU_eruf8l1sQKBSCIVnxNkH_9VzkVtZZA6bY,384
145
147
  aiagents4pharma/talk2scholars/tools/__init__.py,sha256=YudBDRwaEzDnAcpxGZvEOfyh5-6xd51CTvTKTkywgXw,68
146
- aiagents4pharma/talk2scholars/tools/s2/__init__.py,sha256=9RQH3efTj6qkXk0ICKSc7Mzpkitt4gRGsQ1pGPrrREU,181
147
- aiagents4pharma/talk2scholars/tools/s2/display_results.py,sha256=Aap3P-9i80TDarkEZX0S62rzsVZw0ftzgUppK67rOhI,1446
148
- aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py,sha256=7GrMaZ15zLSmBvmek9NCATM4-O6BXuxa5Pwd9qJ6ygU,3887
149
- aiagents4pharma/talk2scholars/tools/s2/search.py,sha256=fF8i2s8ikF0qdM_c7KSAQvguG9Hjq0s2bPvoyLdx5wA,3311
150
- aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py,sha256=Z3NQgf3hHcPSisYJnbEErsyGFZPgad8CdaWe4XxiOgs,3978
151
- aiagents4pharma-1.20.0.dist-info/LICENSE,sha256=IcIbyB1Hyk5ZDah03VNQvJkbNk2hkBCDqQ8qtnCvB4Q,1077
152
- aiagents4pharma-1.20.0.dist-info/METADATA,sha256=GZWGNk_tXc9mtrsvC5ubW6zbFUVPzIRDNNS2TPnMQBQ,7739
153
- aiagents4pharma-1.20.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
154
- aiagents4pharma-1.20.0.dist-info/top_level.txt,sha256=-AH8rMmrSnJtq7HaAObS78UU-cTCwvX660dSxeM7a0A,16
155
- aiagents4pharma-1.20.0.dist-info/RECORD,,
148
+ aiagents4pharma/talk2scholars/tools/s2/__init__.py,sha256=wytqCmGm8Fbl8y5qLdIkxhhG8VHLYMifCGjbH_LK2Fc,258
149
+ aiagents4pharma/talk2scholars/tools/s2/display_results.py,sha256=UR0PtEHGDpOhPH0Di5HT8-Fip2RkEMTJgzROsChb1gc,2959
150
+ aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py,sha256=sYtoJWFmJuSTSHMrZmqiBfGS-mDKS1gAbKtNyjRnlwU,4979
151
+ aiagents4pharma/talk2scholars/tools/s2/query_results.py,sha256=EUfzRh5Qc_tMl5fDIFb9PIsQkkrU4Xb5MR0sud_X5-c,2017
152
+ aiagents4pharma/talk2scholars/tools/s2/retrieve_semantic_scholar_paper_id.py,sha256=Lg1L4HQCN2LaQEyWtLD73O67PMoXkPHi-Y8rCzHS0A4,2499
153
+ aiagents4pharma/talk2scholars/tools/s2/search.py,sha256=mnBQWDuQ50UVw6B-bRuL8Ek1av-pEtdgzVMxpEA2BpI,4296
154
+ aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py,sha256=xgnUj9W9JkeTvB2VJBJUAnia789GGNGqdqgJ_G16v2s,5120
155
+ aiagents4pharma-1.21.0.dist-info/LICENSE,sha256=IcIbyB1Hyk5ZDah03VNQvJkbNk2hkBCDqQ8qtnCvB4Q,1077
156
+ aiagents4pharma-1.21.0.dist-info/METADATA,sha256=YsjDHw3yfqfPClv0N3j35AObxHBhStDojFUUslyd_1Q,7757
157
+ aiagents4pharma-1.21.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
158
+ aiagents4pharma-1.21.0.dist-info/top_level.txt,sha256=-AH8rMmrSnJtq7HaAObS78UU-cTCwvX660dSxeM7a0A,16
159
+ aiagents4pharma-1.21.0.dist-info/RECORD,,
@@ -1,237 +0,0 @@
1
- """
2
- Integration tests for talk2scholars system.
3
-
4
- These tests ensure that:
5
- 1. The main agent and sub-agent work together.
6
- 2. The agents correctly interact with tools (search, recommendations).
7
- 3. The full pipeline processes queries and updates state correctly.
8
- """
9
-
10
- # pylint: disable=redefined-outer-name
11
- from unittest.mock import patch, Mock
12
- import pytest
13
- from langchain_core.messages import HumanMessage
14
- from ..agents.main_agent import get_app as get_main_app
15
- from ..agents.s2_agent import get_app as get_s2_app
16
- from ..state.state_talk2scholars import Talk2Scholars
17
-
18
-
19
- @pytest.fixture(autouse=True)
20
- def mock_hydra():
21
- """Mock Hydra configuration to prevent external dependencies."""
22
- with patch("hydra.initialize"), patch("hydra.compose") as mock_compose:
23
- cfg_mock = Mock()
24
- cfg_mock.agents.talk2scholars.main_agent.temperature = 0
25
- cfg_mock.agents.talk2scholars.main_agent.main_agent = "Test main agent prompt"
26
- cfg_mock.agents.talk2scholars.s2_agent.temperature = 0
27
- cfg_mock.agents.talk2scholars.s2_agent.s2_agent = "Test s2 agent prompt"
28
- mock_compose.return_value = cfg_mock
29
- yield mock_compose
30
-
31
-
32
- @pytest.fixture(autouse=True)
33
- def mock_tools():
34
- """Mock tools to prevent execution of real API calls."""
35
- with (
36
- patch(
37
- "aiagents4pharma.talk2scholars.tools.s2.search.search_tool"
38
- ) as mock_s2_search,
39
- patch(
40
- "aiagents4pharma.talk2scholars.tools.s2.display_results.display_results"
41
- ) as mock_s2_display,
42
- patch(
43
- "aiagents4pharma.talk2scholars.tools.s2.single_paper_rec."
44
- "get_single_paper_recommendations"
45
- ) as mock_s2_single_rec,
46
- patch(
47
- "aiagents4pharma.talk2scholars.tools.s2.multi_paper_rec."
48
- "get_multi_paper_recommendations"
49
- ) as mock_s2_multi_rec,
50
- ):
51
-
52
- mock_s2_search.return_value = {"papers": {"id123": "Mock Paper"}}
53
- mock_s2_display.return_value = "Displaying Mock Results"
54
- mock_s2_single_rec.return_value = {"recommendations": ["Paper A", "Paper B"]}
55
- mock_s2_multi_rec.return_value = {
56
- "multi_recommendations": ["Paper X", "Paper Y"]
57
- }
58
-
59
- yield {
60
- "search_tool": mock_s2_search,
61
- "display_results": mock_s2_display,
62
- "single_paper_rec": mock_s2_single_rec,
63
- "multi_paper_rec": mock_s2_multi_rec,
64
- }
65
-
66
-
67
- def test_full_workflow():
68
- """Test the full workflow from main agent to S2 agent."""
69
- thread_id = "test_thread"
70
- main_app = get_main_app(thread_id)
71
-
72
- # Define expected mock response with the actual structure
73
- expected_paper = {
74
- "530a059cb48477ad1e3d4f8f4b153274c8997332": {
75
- "Title": "Explainable Artificial Intelligence",
76
- "Abstract": None,
77
- "Citation Count": 5544,
78
- "Year": "2024",
79
- "URL": "https://example.com/paper",
80
- }
81
- }
82
-
83
- # Mock the search tool instead of the app
84
- with patch(
85
- "aiagents4pharma.talk2scholars.tools.s2.search.search_tool",
86
- return_value={"papers": expected_paper},
87
- ):
88
- state = Talk2Scholars(messages=[HumanMessage(content="Find AI papers")])
89
- result = main_app.invoke(
90
- state,
91
- config={
92
- "configurable": {
93
- "thread_id": thread_id,
94
- "checkpoint_ns": "test_ns",
95
- "checkpoint_id": "test_checkpoint",
96
- }
97
- },
98
- )
99
-
100
- # Check values
101
- assert "papers" in result
102
- assert "messages" in result
103
- assert len(result["papers"]) > 0
104
-
105
-
106
- def test_s2_agent_execution():
107
- """Test if the S2 agent processes requests correctly and updates state."""
108
- thread_id = "test_thread"
109
- s2_app = get_s2_app(thread_id)
110
-
111
- state = Talk2Scholars(messages=[HumanMessage(content="Get recommendations")])
112
-
113
- result = s2_app.invoke(
114
- state,
115
- config={
116
- "configurable": {
117
- "thread_id": thread_id,
118
- "checkpoint_ns": "test_ns",
119
- "checkpoint_id": "test_checkpoint",
120
- }
121
- },
122
- )
123
-
124
- assert "messages" in result
125
- assert "multi_papers" in result
126
- assert result["multi_papers"] is not None
127
-
128
-
129
- def test_tool_integration(mock_tools):
130
- """Test if the tools interact correctly with the workflow."""
131
- thread_id = "test_thread"
132
- s2_app = get_s2_app(thread_id)
133
-
134
- state = Talk2Scholars(
135
- messages=[HumanMessage(content="Search for AI ethics papers")]
136
- )
137
-
138
- mock_paper_id = "11159bdb213aaa243916f42f576396d483ba474b"
139
- mock_response = {
140
- "papers": {
141
- mock_paper_id: {
142
- "Title": "Mock AI Ethics Paper",
143
- "Abstract": "A study on AI ethics",
144
- "Citation Count": 100,
145
- "URL": "https://example.com/mock-paper",
146
- }
147
- }
148
- }
149
-
150
- # Update both the fixture mock and patch the actual tool
151
- mock_tools["search_tool"].return_value = {"papers": mock_response["papers"]}
152
-
153
- with patch(
154
- "aiagents4pharma.talk2scholars.tools.s2.search.search_tool",
155
- return_value={"papers": mock_response["papers"]},
156
- ):
157
- result = s2_app.invoke(
158
- state,
159
- config={
160
- "configurable": {
161
- "thread_id": thread_id,
162
- "checkpoint_ns": "test_ns",
163
- "checkpoint_id": "test_checkpoint",
164
- }
165
- },
166
- )
167
-
168
- assert "papers" in result
169
- assert len(result["papers"]) > 0 # Verify we have papers
170
- assert isinstance(result["papers"], dict) # Verify it's a dictionary
171
-
172
-
173
- def test_empty_query():
174
- """Test how the system handles an empty query."""
175
- thread_id = "test_thread"
176
- main_app = get_main_app(thread_id)
177
-
178
- state = Talk2Scholars(messages=[HumanMessage(content="")])
179
-
180
- # Mock the s2_agent app
181
- mock_s2_app = get_s2_app(thread_id)
182
-
183
- with patch(
184
- "aiagents4pharma.talk2scholars.agents.s2_agent.get_app",
185
- return_value=mock_s2_app,
186
- ):
187
- result = main_app.invoke(
188
- state,
189
- config={
190
- "configurable": {
191
- "thread_id": thread_id,
192
- "checkpoint_ns": "test_ns",
193
- "checkpoint_id": "test_checkpoint",
194
- }
195
- },
196
- )
197
-
198
- assert "messages" in result
199
- last_message = result["messages"][-1].content.lower()
200
- assert any(
201
- phrase in last_message
202
- for phrase in ["no valid input", "how can i assist", "please provide a query"]
203
- )
204
-
205
-
206
- def test_api_failure_handling():
207
- """Test if the system gracefully handles an API failure."""
208
- thread_id = "test_thread"
209
- s2_app = get_s2_app(thread_id)
210
-
211
- expected_error = "API Timeout: Connection failed"
212
- with patch("requests.get", side_effect=Exception(expected_error)):
213
- state = Talk2Scholars(messages=[HumanMessage(content="Find latest NLP papers")])
214
-
215
- result = s2_app.invoke(
216
- state,
217
- config={
218
- "configurable": {
219
- "thread_id": thread_id,
220
- "checkpoint_ns": "test_ns",
221
- "checkpoint_id": "test_checkpoint",
222
- }
223
- },
224
- )
225
-
226
- assert "messages" in result
227
- last_message = result["messages"][-1].content.lower()
228
-
229
- # Update assertions to match actual error message
230
- assert any(
231
- [
232
- "unable to retrieve" in last_message,
233
- "connection issue" in last_message,
234
- "please try again later" in last_message,
235
- ]
236
- )
237
- assert "nlp papers" in last_message # Verify context is maintained