aiagents4pharma 1.30.2__py3-none-any.whl → 1.30.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiagents4pharma/talk2scholars/__init__.py +2 -0
- aiagents4pharma/talk2scholars/agents/__init__.py +8 -0
- aiagents4pharma/talk2scholars/configs/__init__.py +2 -0
- aiagents4pharma/talk2scholars/configs/agents/__init__.py +2 -0
- aiagents4pharma/talk2scholars/configs/agents/talk2scholars/__init__.py +2 -0
- aiagents4pharma/talk2scholars/configs/app/__init__.py +2 -0
- aiagents4pharma/talk2scholars/configs/tools/__init__.py +9 -0
- aiagents4pharma/talk2scholars/state/__init__.py +4 -2
- aiagents4pharma/talk2scholars/tests/test_s2_multi.py +10 -8
- aiagents4pharma/talk2scholars/tests/test_s2_search.py +9 -5
- aiagents4pharma/talk2scholars/tests/test_s2_single.py +7 -7
- aiagents4pharma/talk2scholars/tests/test_zotero_path.py +25 -11
- aiagents4pharma/talk2scholars/tests/test_zotero_read.py +49 -35
- aiagents4pharma/talk2scholars/tests/test_zotero_write.py +10 -10
- aiagents4pharma/talk2scholars/tools/__init__.py +3 -0
- aiagents4pharma/talk2scholars/tools/pdf/__init__.py +4 -2
- aiagents4pharma/talk2scholars/tools/s2/__init__.py +9 -0
- aiagents4pharma/talk2scholars/tools/s2/multi_paper_rec.py +9 -135
- aiagents4pharma/talk2scholars/tools/s2/search.py +8 -114
- aiagents4pharma/talk2scholars/tools/s2/single_paper_rec.py +8 -126
- aiagents4pharma/talk2scholars/tools/s2/utils/__init__.py +7 -0
- aiagents4pharma/talk2scholars/tools/s2/utils/multi_helper.py +194 -0
- aiagents4pharma/talk2scholars/tools/s2/utils/search_helper.py +175 -0
- aiagents4pharma/talk2scholars/tools/s2/utils/single_helper.py +186 -0
- aiagents4pharma/talk2scholars/tools/zotero/__init__.py +2 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/__init__.py +5 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/read_helper.py +167 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/review_helper.py +78 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/write_helper.py +197 -0
- aiagents4pharma/talk2scholars/tools/zotero/utils/zotero_path.py +1 -1
- aiagents4pharma/talk2scholars/tools/zotero/zotero_read.py +9 -136
- aiagents4pharma/talk2scholars/tools/zotero/zotero_review.py +14 -48
- aiagents4pharma/talk2scholars/tools/zotero/zotero_write.py +22 -147
- {aiagents4pharma-1.30.2.dist-info → aiagents4pharma-1.30.4.dist-info}/METADATA +1 -1
- {aiagents4pharma-1.30.2.dist-info → aiagents4pharma-1.30.4.dist-info}/RECORD +38 -31
- {aiagents4pharma-1.30.2.dist-info → aiagents4pharma-1.30.4.dist-info}/WHEEL +0 -0
- {aiagents4pharma-1.30.2.dist-info → aiagents4pharma-1.30.4.dist-info}/licenses/LICENSE +0 -0
- {aiagents4pharma-1.30.2.dist-info → aiagents4pharma-1.30.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,194 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
|
3
|
+
"""
|
4
|
+
Utility for fetching recommendations based on multiple papers.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import json
|
8
|
+
import logging
|
9
|
+
from typing import Any, List, Optional, Dict
|
10
|
+
import hydra
|
11
|
+
import requests
|
12
|
+
|
13
|
+
|
14
|
+
# Configure logging
|
15
|
+
logging.basicConfig(level=logging.INFO)
|
16
|
+
logger = logging.getLogger(__name__)
|
17
|
+
|
18
|
+
|
19
|
+
class MultiPaperRecData:
|
20
|
+
"""Helper class to organize multi-paper recommendation data."""
|
21
|
+
|
22
|
+
def __init__(
|
23
|
+
self,
|
24
|
+
paper_ids: List[str],
|
25
|
+
limit: int,
|
26
|
+
year: Optional[str],
|
27
|
+
tool_call_id: str,
|
28
|
+
):
|
29
|
+
self.paper_ids = paper_ids
|
30
|
+
self.limit = limit
|
31
|
+
self.year = year
|
32
|
+
self.tool_call_id = tool_call_id
|
33
|
+
self.cfg = self._load_config()
|
34
|
+
self.endpoint = self.cfg.api_endpoint
|
35
|
+
self.headers = self.cfg.headers
|
36
|
+
self.payload = {"positivePaperIds": paper_ids, "negativePaperIds": []}
|
37
|
+
self.params = self._create_params()
|
38
|
+
self.response = None
|
39
|
+
self.data = None
|
40
|
+
self.recommendations = []
|
41
|
+
self.filtered_papers = {}
|
42
|
+
self.content = ""
|
43
|
+
|
44
|
+
def _load_config(self) -> Any:
|
45
|
+
"""Load hydra configuration."""
|
46
|
+
with hydra.initialize(version_base=None, config_path="../../../configs"):
|
47
|
+
cfg = hydra.compose(
|
48
|
+
config_name="config",
|
49
|
+
overrides=["tools/multi_paper_recommendation=default"],
|
50
|
+
)
|
51
|
+
logger.info("Loaded configuration for multi-paper recommendation tool")
|
52
|
+
return cfg.tools.multi_paper_recommendation
|
53
|
+
|
54
|
+
def _create_params(self) -> Dict[str, Any]:
|
55
|
+
"""Create parameters for the API request."""
|
56
|
+
params = {
|
57
|
+
"limit": min(self.limit, 500),
|
58
|
+
"fields": ",".join(self.cfg.api_fields),
|
59
|
+
}
|
60
|
+
if self.year:
|
61
|
+
params["year"] = self.year
|
62
|
+
return params
|
63
|
+
|
64
|
+
def _fetch_recommendations(self) -> None:
|
65
|
+
"""Fetch recommendations from Semantic Scholar API."""
|
66
|
+
logger.info(
|
67
|
+
"Starting multi-paper recommendations search with paper IDs: %s",
|
68
|
+
self.paper_ids,
|
69
|
+
)
|
70
|
+
|
71
|
+
# Wrap API call in try/except to catch connectivity issues and validate response format
|
72
|
+
for attempt in range(10):
|
73
|
+
try:
|
74
|
+
self.response = requests.post(
|
75
|
+
self.endpoint,
|
76
|
+
headers=self.headers,
|
77
|
+
params=self.params,
|
78
|
+
data=json.dumps(self.payload),
|
79
|
+
timeout=self.cfg.request_timeout,
|
80
|
+
)
|
81
|
+
self.response.raise_for_status() # Raises HTTPError for bad responses
|
82
|
+
break # Exit loop if request is successful
|
83
|
+
except requests.exceptions.RequestException as e:
|
84
|
+
logger.error(
|
85
|
+
"Attempt %d: Failed to connect to Semantic Scholar API for "
|
86
|
+
"multi-paper recommendations: %s",
|
87
|
+
attempt + 1,
|
88
|
+
e,
|
89
|
+
)
|
90
|
+
if attempt == 9: # Last attempt
|
91
|
+
raise RuntimeError(
|
92
|
+
"Failed to connect to Semantic Scholar API after 10 attempts."
|
93
|
+
"Please retry the same query."
|
94
|
+
) from e
|
95
|
+
|
96
|
+
if self.response is None:
|
97
|
+
raise RuntimeError(
|
98
|
+
"Failed to obtain a response from the Semantic Scholar API."
|
99
|
+
)
|
100
|
+
|
101
|
+
logger.info(
|
102
|
+
"API Response Status for multi-paper recommendations: %s",
|
103
|
+
self.response.status_code,
|
104
|
+
)
|
105
|
+
logger.info("Request params: %s", self.params)
|
106
|
+
|
107
|
+
self.data = self.response.json()
|
108
|
+
|
109
|
+
# Check for expected data format
|
110
|
+
if "recommendedPapers" not in self.data:
|
111
|
+
logger.error("Unexpected API response format: %s", self.data)
|
112
|
+
raise RuntimeError(
|
113
|
+
"Unexpected response from Semantic Scholar API. The results could not be "
|
114
|
+
"retrieved due to an unexpected format. "
|
115
|
+
"Please modify your search query and try again."
|
116
|
+
)
|
117
|
+
|
118
|
+
self.recommendations = self.data.get("recommendedPapers", [])
|
119
|
+
if not self.recommendations:
|
120
|
+
logger.error(
|
121
|
+
"No recommendations returned from API for paper IDs: %s", self.paper_ids
|
122
|
+
)
|
123
|
+
raise RuntimeError(
|
124
|
+
"No recommendations were found for your query. Consider refining your search "
|
125
|
+
"by using more specific keywords or different terms."
|
126
|
+
)
|
127
|
+
|
128
|
+
def _filter_papers(self) -> None:
|
129
|
+
"""Filter and format papers."""
|
130
|
+
self.filtered_papers = {
|
131
|
+
paper["paperId"]: {
|
132
|
+
"semantic_scholar_paper_id": paper["paperId"],
|
133
|
+
"Title": paper.get("title", "N/A"),
|
134
|
+
"Abstract": paper.get("abstract", "N/A"),
|
135
|
+
"Year": paper.get("year", "N/A"),
|
136
|
+
"Publication Date": paper.get("publicationDate", "N/A"),
|
137
|
+
"Venue": paper.get("venue", "N/A"),
|
138
|
+
"Journal Name": (paper.get("journal") or {}).get("name", "N/A"),
|
139
|
+
"Citation Count": paper.get("citationCount", "N/A"),
|
140
|
+
"Authors": [
|
141
|
+
f"{author.get('name', 'N/A')} (ID: {author.get('authorId', 'N/A')})"
|
142
|
+
for author in paper.get("authors", [])
|
143
|
+
],
|
144
|
+
"URL": paper.get("url", "N/A"),
|
145
|
+
"arxiv_id": paper.get("externalIds", {}).get("ArXiv", "N/A"),
|
146
|
+
}
|
147
|
+
for paper in self.recommendations
|
148
|
+
if paper.get("title") and paper.get("authors")
|
149
|
+
}
|
150
|
+
|
151
|
+
logger.info("Filtered %d papers", len(self.filtered_papers))
|
152
|
+
|
153
|
+
def _create_content(self) -> None:
|
154
|
+
"""Create the content message for the response."""
|
155
|
+
top_papers = list(self.filtered_papers.values())[:3]
|
156
|
+
top_papers_info = "\n".join(
|
157
|
+
[
|
158
|
+
f"{i+1}. {paper['Title']} ({paper['Year']}; "
|
159
|
+
f"semantic_scholar_paper_id: {paper['semantic_scholar_paper_id']}; "
|
160
|
+
f"arXiv ID: {paper['arxiv_id']})"
|
161
|
+
for i, paper in enumerate(top_papers)
|
162
|
+
]
|
163
|
+
)
|
164
|
+
|
165
|
+
self.content = (
|
166
|
+
"Recommendations based on multiple papers were successful. "
|
167
|
+
"Papers are attached as an artifact."
|
168
|
+
)
|
169
|
+
self.content += " Here is a summary of the recommendations:\n"
|
170
|
+
self.content += (
|
171
|
+
f"Number of recommended papers found: {self.get_paper_count()}\n"
|
172
|
+
)
|
173
|
+
self.content += f"Query Paper IDs: {', '.join(self.paper_ids)}\n"
|
174
|
+
self.content += f"Year: {self.year}\n" if self.year else ""
|
175
|
+
self.content += "Here are a few of these papers:\n" + top_papers_info
|
176
|
+
|
177
|
+
def process_recommendations(self) -> Dict[str, Any]:
|
178
|
+
"""Process the recommendations request and return results."""
|
179
|
+
self._fetch_recommendations()
|
180
|
+
self._filter_papers()
|
181
|
+
self._create_content()
|
182
|
+
|
183
|
+
return {
|
184
|
+
"papers": self.filtered_papers,
|
185
|
+
"content": self.content,
|
186
|
+
}
|
187
|
+
|
188
|
+
def get_paper_count(self) -> int:
|
189
|
+
"""Get the number of recommended papers.
|
190
|
+
|
191
|
+
Returns:
|
192
|
+
int: The number of papers in the filtered papers dictionary.
|
193
|
+
"""
|
194
|
+
return len(self.filtered_papers)
|
@@ -0,0 +1,175 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
|
3
|
+
"""
|
4
|
+
Utility for fetching recommendations based on a single paper.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import logging
|
8
|
+
from typing import Any, Optional, Dict
|
9
|
+
import hydra
|
10
|
+
import requests
|
11
|
+
|
12
|
+
# Configure logging
|
13
|
+
logging.basicConfig(level=logging.INFO)
|
14
|
+
logger = logging.getLogger(__name__)
|
15
|
+
|
16
|
+
|
17
|
+
class SearchData:
|
18
|
+
"""Helper class to organize search-related data."""
|
19
|
+
|
20
|
+
def __init__(
|
21
|
+
self,
|
22
|
+
query: str,
|
23
|
+
limit: int,
|
24
|
+
year: Optional[str],
|
25
|
+
tool_call_id: str,
|
26
|
+
):
|
27
|
+
self.query = query
|
28
|
+
self.limit = limit
|
29
|
+
self.year = year
|
30
|
+
self.tool_call_id = tool_call_id
|
31
|
+
self.cfg = self._load_config()
|
32
|
+
self.endpoint = self.cfg.api_endpoint
|
33
|
+
self.params = self._create_params()
|
34
|
+
self.response = None
|
35
|
+
self.data = None
|
36
|
+
self.papers = []
|
37
|
+
self.filtered_papers = {}
|
38
|
+
self.content = ""
|
39
|
+
|
40
|
+
def _load_config(self) -> Any:
|
41
|
+
"""Load hydra configuration."""
|
42
|
+
with hydra.initialize(version_base=None, config_path="../../../configs"):
|
43
|
+
cfg = hydra.compose(
|
44
|
+
config_name="config", overrides=["tools/search=default"]
|
45
|
+
)
|
46
|
+
logger.info("Loaded configuration for search tool")
|
47
|
+
return cfg.tools.search
|
48
|
+
|
49
|
+
def _create_params(self) -> Dict[str, Any]:
|
50
|
+
"""Create parameters for the API request."""
|
51
|
+
params = {
|
52
|
+
"query": self.query,
|
53
|
+
"limit": min(self.limit, 100),
|
54
|
+
"fields": ",".join(self.cfg.api_fields),
|
55
|
+
}
|
56
|
+
if self.year:
|
57
|
+
params["year"] = self.year
|
58
|
+
return params
|
59
|
+
|
60
|
+
def _fetch_papers(self) -> None:
|
61
|
+
"""Fetch papers from Semantic Scholar API."""
|
62
|
+
logger.info("Searching for papers on %s", self.query)
|
63
|
+
|
64
|
+
# Wrap API call in try/except to catch connectivity issues
|
65
|
+
for attempt in range(10):
|
66
|
+
try:
|
67
|
+
self.response = requests.get(
|
68
|
+
self.endpoint, params=self.params, timeout=10
|
69
|
+
)
|
70
|
+
self.response.raise_for_status() # Raises HTTPError for bad responses
|
71
|
+
break # Exit loop if request is successful
|
72
|
+
except requests.exceptions.RequestException as e:
|
73
|
+
logger.error(
|
74
|
+
"Attempt %d: Failed to connect to Semantic Scholar API: %s",
|
75
|
+
attempt + 1,
|
76
|
+
e,
|
77
|
+
)
|
78
|
+
if attempt == 9: # Last attempt
|
79
|
+
raise RuntimeError(
|
80
|
+
"Failed to connect to Semantic Scholar API after 10 attempts."
|
81
|
+
"Please retry the same query."
|
82
|
+
) from e
|
83
|
+
|
84
|
+
if self.response is None:
|
85
|
+
raise RuntimeError(
|
86
|
+
"Failed to obtain a response from the Semantic Scholar API."
|
87
|
+
)
|
88
|
+
|
89
|
+
self.data = self.response.json()
|
90
|
+
|
91
|
+
# Check for expected data format
|
92
|
+
if "data" not in self.data:
|
93
|
+
logger.error("Unexpected API response format: %s", self.data)
|
94
|
+
raise RuntimeError(
|
95
|
+
"Unexpected response from Semantic Scholar API. The results could not be "
|
96
|
+
"retrieved due to an unexpected format. "
|
97
|
+
"Please modify your search query and try again."
|
98
|
+
)
|
99
|
+
|
100
|
+
self.papers = self.data.get("data", [])
|
101
|
+
if not self.papers:
|
102
|
+
logger.error(
|
103
|
+
"No papers returned from Semantic Scholar API for query: %s", self.query
|
104
|
+
)
|
105
|
+
raise RuntimeError(
|
106
|
+
"No papers were found for your query. Consider refining your search "
|
107
|
+
"by using more specific keywords or different terms."
|
108
|
+
)
|
109
|
+
|
110
|
+
def _filter_papers(self) -> None:
|
111
|
+
"""Filter and format papers."""
|
112
|
+
self.filtered_papers = {
|
113
|
+
paper["paperId"]: {
|
114
|
+
"semantic_scholar_paper_id": paper["paperId"],
|
115
|
+
"Title": paper.get("title", "N/A"),
|
116
|
+
"Abstract": paper.get("abstract", "N/A"),
|
117
|
+
"Year": paper.get("year", "N/A"),
|
118
|
+
"Publication Date": paper.get("publicationDate", "N/A"),
|
119
|
+
"Venue": paper.get("venue", "N/A"),
|
120
|
+
"Journal Name": (paper.get("journal") or {}).get("name", "N/A"),
|
121
|
+
"Citation Count": paper.get("citationCount", "N/A"),
|
122
|
+
"Authors": [
|
123
|
+
f"{author.get('name', 'N/A')} (ID: {author.get('authorId', 'N/A')})"
|
124
|
+
for author in paper.get("authors", [])
|
125
|
+
],
|
126
|
+
"URL": paper.get("url", "N/A"),
|
127
|
+
"arxiv_id": paper.get("externalIds", {}).get("ArXiv", "N/A"),
|
128
|
+
}
|
129
|
+
for paper in self.papers
|
130
|
+
if paper.get("title") and paper.get("authors")
|
131
|
+
}
|
132
|
+
|
133
|
+
logger.info("Filtered %d papers", len(self.filtered_papers))
|
134
|
+
|
135
|
+
def _create_content(self) -> None:
|
136
|
+
"""Create the content message for the response."""
|
137
|
+
top_papers = list(self.filtered_papers.values())[:3]
|
138
|
+
top_papers_info = "\n".join(
|
139
|
+
[
|
140
|
+
f"{i+1}. {paper['Title']} ({paper['Year']}; "
|
141
|
+
f"semantic_scholar_paper_id: {paper['semantic_scholar_paper_id']}; "
|
142
|
+
f"arXiv ID: {paper['arxiv_id']})"
|
143
|
+
for i, paper in enumerate(top_papers)
|
144
|
+
]
|
145
|
+
)
|
146
|
+
|
147
|
+
logger.info("-----------Filtered %d papers", self.get_paper_count())
|
148
|
+
|
149
|
+
self.content = (
|
150
|
+
"Search was successful. Papers are attached as an artifact. "
|
151
|
+
"Here is a summary of the search results:\n"
|
152
|
+
)
|
153
|
+
self.content += f"Number of papers found: {self.get_paper_count()}\n"
|
154
|
+
self.content += f"Query: {self.query}\n"
|
155
|
+
self.content += f"Year: {self.year}\n" if self.year else ""
|
156
|
+
self.content += "Top 3 papers:\n" + top_papers_info
|
157
|
+
|
158
|
+
def process_search(self) -> Dict[str, Any]:
|
159
|
+
"""Process the search request and return results."""
|
160
|
+
self._fetch_papers()
|
161
|
+
self._filter_papers()
|
162
|
+
self._create_content()
|
163
|
+
|
164
|
+
return {
|
165
|
+
"papers": self.filtered_papers,
|
166
|
+
"content": self.content,
|
167
|
+
}
|
168
|
+
|
169
|
+
def get_paper_count(self) -> int:
|
170
|
+
"""Get the number of papers found in the search.
|
171
|
+
|
172
|
+
Returns:
|
173
|
+
int: The number of papers in the filtered papers dictionary.
|
174
|
+
"""
|
175
|
+
return len(self.filtered_papers)
|
@@ -0,0 +1,186 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
|
3
|
+
"""
|
4
|
+
Utility for fetching recommendations based on a single paper.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import logging
|
8
|
+
from typing import Any, Optional, Dict
|
9
|
+
import hydra
|
10
|
+
import requests
|
11
|
+
|
12
|
+
# Configure logging
|
13
|
+
logging.basicConfig(level=logging.INFO)
|
14
|
+
logger = logging.getLogger(__name__)
|
15
|
+
|
16
|
+
|
17
|
+
class SinglePaperRecData:
|
18
|
+
"""Helper class to organize single paper recommendation data."""
|
19
|
+
|
20
|
+
def __init__(
|
21
|
+
self,
|
22
|
+
paper_id: str,
|
23
|
+
limit: int,
|
24
|
+
year: Optional[str],
|
25
|
+
tool_call_id: str,
|
26
|
+
):
|
27
|
+
self.paper_id = paper_id
|
28
|
+
self.limit = limit
|
29
|
+
self.year = year
|
30
|
+
self.tool_call_id = tool_call_id
|
31
|
+
self.cfg = self._load_config()
|
32
|
+
self.endpoint = f"{self.cfg.api_endpoint}/{paper_id}"
|
33
|
+
self.params = self._create_params()
|
34
|
+
self.response = None
|
35
|
+
self.data = None
|
36
|
+
self.recommendations = []
|
37
|
+
self.filtered_papers = {}
|
38
|
+
self.content = ""
|
39
|
+
|
40
|
+
def _load_config(self) -> Any:
|
41
|
+
"""Load hydra configuration."""
|
42
|
+
with hydra.initialize(version_base=None, config_path="../../../configs"):
|
43
|
+
cfg = hydra.compose(
|
44
|
+
config_name="config",
|
45
|
+
overrides=["tools/single_paper_recommendation=default"],
|
46
|
+
)
|
47
|
+
logger.info("Loaded configuration for single paper recommendation tool")
|
48
|
+
return cfg.tools.single_paper_recommendation
|
49
|
+
|
50
|
+
def _create_params(self) -> Dict[str, Any]:
|
51
|
+
"""Create parameters for the API request."""
|
52
|
+
params = {
|
53
|
+
"limit": min(self.limit, 500), # Max 500 per API docs
|
54
|
+
"fields": ",".join(self.cfg.api_fields),
|
55
|
+
"from": self.cfg.recommendation_params.from_pool,
|
56
|
+
}
|
57
|
+
if self.year:
|
58
|
+
params["year"] = self.year
|
59
|
+
return params
|
60
|
+
|
61
|
+
def _fetch_recommendations(self) -> None:
|
62
|
+
"""Fetch recommendations from Semantic Scholar API."""
|
63
|
+
logger.info(
|
64
|
+
"Starting single paper recommendations search with paper ID: %s",
|
65
|
+
self.paper_id,
|
66
|
+
)
|
67
|
+
|
68
|
+
# Wrap API call in try/except to catch connectivity issues and check response format
|
69
|
+
for attempt in range(10):
|
70
|
+
try:
|
71
|
+
self.response = requests.get(
|
72
|
+
self.endpoint, params=self.params, timeout=self.cfg.request_timeout
|
73
|
+
)
|
74
|
+
self.response.raise_for_status() # Raises HTTPError for bad responses
|
75
|
+
break # Exit loop if request is successful
|
76
|
+
except requests.exceptions.RequestException as e:
|
77
|
+
logger.error(
|
78
|
+
"Attempt %d: Failed to connect to Semantic Scholar API for recommendations: %s",
|
79
|
+
attempt + 1,
|
80
|
+
e,
|
81
|
+
)
|
82
|
+
if attempt == 9: # Last attempt
|
83
|
+
raise RuntimeError(
|
84
|
+
"Failed to connect to Semantic Scholar API after 10 attempts."
|
85
|
+
"Please retry the same query."
|
86
|
+
) from e
|
87
|
+
|
88
|
+
if self.response is None:
|
89
|
+
raise RuntimeError(
|
90
|
+
"Failed to obtain a response from the Semantic Scholar API."
|
91
|
+
)
|
92
|
+
|
93
|
+
logger.info(
|
94
|
+
"API Response Status for recommendations of paper %s: %s",
|
95
|
+
self.paper_id,
|
96
|
+
self.response.status_code,
|
97
|
+
)
|
98
|
+
logger.info("Request params: %s", self.params)
|
99
|
+
|
100
|
+
self.data = self.response.json()
|
101
|
+
|
102
|
+
# Check for expected data format
|
103
|
+
if "recommendedPapers" not in self.data:
|
104
|
+
logger.error("Unexpected API response format: %s", self.data)
|
105
|
+
raise RuntimeError(
|
106
|
+
"Unexpected response from Semantic Scholar API. The results could not be "
|
107
|
+
"retrieved due to an unexpected format. "
|
108
|
+
"Please modify your search query and try again."
|
109
|
+
)
|
110
|
+
|
111
|
+
self.recommendations = self.data.get("recommendedPapers", [])
|
112
|
+
if not self.recommendations:
|
113
|
+
logger.error(
|
114
|
+
"No recommendations returned from API for paper: %s", self.paper_id
|
115
|
+
)
|
116
|
+
raise RuntimeError(
|
117
|
+
"No recommendations were found for your query. Consider refining your search "
|
118
|
+
"by using more specific keywords or different terms."
|
119
|
+
)
|
120
|
+
|
121
|
+
def _filter_papers(self) -> None:
|
122
|
+
"""Filter and format papers."""
|
123
|
+
self.filtered_papers = {
|
124
|
+
paper["paperId"]: {
|
125
|
+
"semantic_scholar_paper_id": paper["paperId"],
|
126
|
+
"Title": paper.get("title", "N/A"),
|
127
|
+
"Abstract": paper.get("abstract", "N/A"),
|
128
|
+
"Year": paper.get("year", "N/A"),
|
129
|
+
"Publication Date": paper.get("publicationDate", "N/A"),
|
130
|
+
"Venue": paper.get("venue", "N/A"),
|
131
|
+
"Journal Name": (paper.get("journal") or {}).get("name", "N/A"),
|
132
|
+
"Citation Count": paper.get("citationCount", "N/A"),
|
133
|
+
"Authors": [
|
134
|
+
f"{author.get('name', 'N/A')} (ID: {author.get('authorId', 'N/A')})"
|
135
|
+
for author in paper.get("authors", [])
|
136
|
+
],
|
137
|
+
"URL": paper.get("url", "N/A"),
|
138
|
+
"arxiv_id": paper.get("externalIds", {}).get("ArXiv", "N/A"),
|
139
|
+
}
|
140
|
+
for paper in self.recommendations
|
141
|
+
if paper.get("title") and paper.get("authors")
|
142
|
+
}
|
143
|
+
|
144
|
+
logger.info("Filtered %d papers", len(self.filtered_papers))
|
145
|
+
|
146
|
+
def _create_content(self) -> None:
|
147
|
+
"""Create the content message for the response."""
|
148
|
+
top_papers = list(self.filtered_papers.values())[:3]
|
149
|
+
top_papers_info = "\n".join(
|
150
|
+
[
|
151
|
+
f"{i+1}. {paper['Title']} ({paper['Year']}; "
|
152
|
+
f"semantic_scholar_paper_id: {paper['semantic_scholar_paper_id']}; "
|
153
|
+
f"arXiv ID: {paper['arxiv_id']})"
|
154
|
+
for i, paper in enumerate(top_papers)
|
155
|
+
]
|
156
|
+
)
|
157
|
+
|
158
|
+
self.content = (
|
159
|
+
"Recommendations based on the single paper were successful. "
|
160
|
+
"Papers are attached as an artifact. "
|
161
|
+
"Here is a summary of the recommendations:\n"
|
162
|
+
)
|
163
|
+
self.content += (
|
164
|
+
f"Number of recommended papers found: {self.get_paper_count()}\n"
|
165
|
+
)
|
166
|
+
self.content += f"Query Paper ID: {self.paper_id}\n"
|
167
|
+
self.content += "Here are a few of these papers:\n" + top_papers_info
|
168
|
+
|
169
|
+
def process_recommendations(self) -> Dict[str, Any]:
|
170
|
+
"""Process the recommendations request and return results."""
|
171
|
+
self._fetch_recommendations()
|
172
|
+
self._filter_papers()
|
173
|
+
self._create_content()
|
174
|
+
|
175
|
+
return {
|
176
|
+
"papers": self.filtered_papers,
|
177
|
+
"content": self.content,
|
178
|
+
}
|
179
|
+
|
180
|
+
def get_paper_count(self) -> int:
|
181
|
+
"""Get the number of recommended papers.
|
182
|
+
|
183
|
+
Returns:
|
184
|
+
int: The number of papers in the filtered papers dictionary.
|
185
|
+
"""
|
186
|
+
return len(self.filtered_papers)
|