academic-search-mcp 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- academic_search_mcp-0.1.3.dist-info/METADATA +243 -0
- academic_search_mcp-0.1.3.dist-info/RECORD +24 -0
- academic_search_mcp-0.1.3.dist-info/WHEEL +4 -0
- academic_search_mcp-0.1.3.dist-info/entry_points.txt +2 -0
- academic_search_mcp-0.1.3.dist-info/licenses/LICENSE +21 -0
- paper_search_mcp/__init__.py +0 -0
- paper_search_mcp/academic_platforms/__init__.py +0 -0
- paper_search_mcp/academic_platforms/arxiv.py +147 -0
- paper_search_mcp/academic_platforms/biorxiv.py +156 -0
- paper_search_mcp/academic_platforms/core.py +284 -0
- paper_search_mcp/academic_platforms/crossref.py +375 -0
- paper_search_mcp/academic_platforms/cyberleninka.py +396 -0
- paper_search_mcp/academic_platforms/google_scholar.py +249 -0
- paper_search_mcp/academic_platforms/hub.py +0 -0
- paper_search_mcp/academic_platforms/iacr.py +548 -0
- paper_search_mcp/academic_platforms/medrxiv.py +156 -0
- paper_search_mcp/academic_platforms/openalex.py +497 -0
- paper_search_mcp/academic_platforms/pubmed.py +159 -0
- paper_search_mcp/academic_platforms/sci_hub.py +178 -0
- paper_search_mcp/academic_platforms/semantic.py +492 -0
- paper_search_mcp/academic_platforms/ssrn.py +385 -0
- paper_search_mcp/paper.py +69 -0
- paper_search_mcp/pdf_utils.py +67 -0
- paper_search_mcp/server.py +514 -0
|
@@ -0,0 +1,514 @@
|
|
|
1
|
+
# paper_search_mcp/server.py
|
|
2
|
+
from typing import List, Dict, Optional
|
|
3
|
+
import httpx
|
|
4
|
+
from mcp.server.fastmcp import FastMCP
|
|
5
|
+
from .academic_platforms.arxiv import ArxivSearcher
|
|
6
|
+
from .academic_platforms.pubmed import PubMedSearcher
|
|
7
|
+
from .academic_platforms.biorxiv import BioRxivSearcher
|
|
8
|
+
from .academic_platforms.medrxiv import MedRxivSearcher
|
|
9
|
+
from .academic_platforms.google_scholar import GoogleScholarSearcher
|
|
10
|
+
from .academic_platforms.iacr import IACRSearcher
|
|
11
|
+
from .academic_platforms.semantic import SemanticSearcher
|
|
12
|
+
from .academic_platforms.crossref import CrossRefSearcher
|
|
13
|
+
from .academic_platforms.openalex import OpenAlexSearcher
|
|
14
|
+
from .academic_platforms.core import CoreSearcher
|
|
15
|
+
from .academic_platforms.ssrn import SSRNSearcher
|
|
16
|
+
from .academic_platforms.cyberleninka import CyberLeninkaSearcher
|
|
17
|
+
|
|
18
|
+
# from .academic_platforms.hub import SciHubSearcher
|
|
19
|
+
from .paper import Paper
|
|
20
|
+
|
|
21
|
+
# Initialize MCP server
|
|
22
|
+
mcp = FastMCP("paper_search_server")
|
|
23
|
+
|
|
24
|
+
# Instances of searchers
|
|
25
|
+
arxiv_searcher = ArxivSearcher()
|
|
26
|
+
pubmed_searcher = PubMedSearcher()
|
|
27
|
+
biorxiv_searcher = BioRxivSearcher()
|
|
28
|
+
medrxiv_searcher = MedRxivSearcher()
|
|
29
|
+
google_scholar_searcher = GoogleScholarSearcher()
|
|
30
|
+
iacr_searcher = IACRSearcher()
|
|
31
|
+
semantic_searcher = SemanticSearcher()
|
|
32
|
+
crossref_searcher = CrossRefSearcher()
|
|
33
|
+
openalex_searcher = OpenAlexSearcher()
|
|
34
|
+
core_searcher = CoreSearcher()
|
|
35
|
+
ssrn_searcher = SSRNSearcher()
|
|
36
|
+
cyberleninka_searcher = CyberLeninkaSearcher()
|
|
37
|
+
# scihub_searcher = SciHubSearcher()
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
# Asynchronous helper to adapt synchronous searchers
|
|
41
|
+
async def async_search(
|
|
42
|
+
searcher, query: str, max_results: int, abstract_limit: int = 200, **kwargs
|
|
43
|
+
) -> List[Dict]:
|
|
44
|
+
async with httpx.AsyncClient() as client:
|
|
45
|
+
# Assuming searchers use requests internally; we'll call synchronously for now
|
|
46
|
+
if 'year' in kwargs:
|
|
47
|
+
papers = searcher.search(query, year=kwargs['year'], max_results=max_results)
|
|
48
|
+
else:
|
|
49
|
+
papers = searcher.search(query, max_results=max_results)
|
|
50
|
+
return [paper.to_dict(abstract_limit=abstract_limit) for paper in papers]
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# Tool definitions
|
|
54
|
+
@mcp.tool()
|
|
55
|
+
async def search_arxiv(
|
|
56
|
+
query: str, max_results: int = 10, abstract_limit: int = 200,
|
|
57
|
+
date_from: Optional[str] = None, date_to: Optional[str] = None
|
|
58
|
+
) -> List[Dict]:
|
|
59
|
+
"""Search academic papers from arXiv.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
query: Search query string (e.g., 'machine learning').
|
|
63
|
+
max_results: Maximum number of papers to return (default: 10).
|
|
64
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
65
|
+
date_from: Start date YYYY-MM-DD (e.g., '2024-01-01').
|
|
66
|
+
date_to: End date YYYY-MM-DD (e.g., '2024-12-31').
|
|
67
|
+
Returns:
|
|
68
|
+
List of paper metadata in dictionary format.
|
|
69
|
+
"""
|
|
70
|
+
papers = arxiv_searcher.search(query, max_results, date_from=date_from, date_to=date_to)
|
|
71
|
+
return [p.to_dict(abstract_limit=abstract_limit) for p in papers] if papers else []
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
@mcp.tool()
|
|
75
|
+
async def search_pubmed(
|
|
76
|
+
query: str, max_results: int = 10, abstract_limit: int = 200,
|
|
77
|
+
date_from: Optional[str] = None, date_to: Optional[str] = None
|
|
78
|
+
) -> List[Dict]:
|
|
79
|
+
"""Search academic papers from PubMed.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
query: Search query string (e.g., 'machine learning').
|
|
83
|
+
max_results: Maximum number of papers to return (default: 10).
|
|
84
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
85
|
+
date_from: Start date YYYY-MM-DD (e.g., '2024-01-01').
|
|
86
|
+
date_to: End date YYYY-MM-DD (e.g., '2024-12-31').
|
|
87
|
+
Returns:
|
|
88
|
+
List of paper metadata in dictionary format.
|
|
89
|
+
"""
|
|
90
|
+
papers = pubmed_searcher.search(query, max_results, date_from=date_from, date_to=date_to)
|
|
91
|
+
return [p.to_dict(abstract_limit=abstract_limit) for p in papers] if papers else []
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
@mcp.tool()
|
|
95
|
+
async def search_biorxiv(
|
|
96
|
+
query: str, max_results: int = 10, abstract_limit: int = 200,
|
|
97
|
+
date_from: Optional[str] = None, date_to: Optional[str] = None
|
|
98
|
+
) -> List[Dict]:
|
|
99
|
+
"""Search academic papers from bioRxiv.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
query: Search query string (e.g., 'machine learning').
|
|
103
|
+
max_results: Maximum number of papers to return (default: 10).
|
|
104
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
105
|
+
date_from: Start date YYYY-MM-DD (e.g., '2024-01-01').
|
|
106
|
+
date_to: End date YYYY-MM-DD (e.g., '2024-12-31').
|
|
107
|
+
Returns:
|
|
108
|
+
List of paper metadata in dictionary format.
|
|
109
|
+
"""
|
|
110
|
+
papers = biorxiv_searcher.search(query, max_results, date_from=date_from, date_to=date_to)
|
|
111
|
+
return [p.to_dict(abstract_limit=abstract_limit) for p in papers] if papers else []
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
@mcp.tool()
|
|
115
|
+
async def search_medrxiv(
|
|
116
|
+
query: str, max_results: int = 10, abstract_limit: int = 200,
|
|
117
|
+
date_from: Optional[str] = None, date_to: Optional[str] = None
|
|
118
|
+
) -> List[Dict]:
|
|
119
|
+
"""Search academic papers from medRxiv.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
query: Search query string (e.g., 'machine learning').
|
|
123
|
+
max_results: Maximum number of papers to return (default: 10).
|
|
124
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
125
|
+
date_from: Start date YYYY-MM-DD (e.g., '2024-01-01').
|
|
126
|
+
date_to: End date YYYY-MM-DD (e.g., '2024-12-31').
|
|
127
|
+
Returns:
|
|
128
|
+
List of paper metadata in dictionary format.
|
|
129
|
+
"""
|
|
130
|
+
papers = medrxiv_searcher.search(query, max_results, date_from=date_from, date_to=date_to)
|
|
131
|
+
return [p.to_dict(abstract_limit=abstract_limit) for p in papers] if papers else []
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
@mcp.tool()
|
|
135
|
+
async def search_google_scholar(
|
|
136
|
+
query: str, max_results: int = 10, abstract_limit: int = 200,
|
|
137
|
+
date_from: Optional[str] = None, date_to: Optional[str] = None
|
|
138
|
+
) -> List[Dict]:
|
|
139
|
+
"""Search academic papers from Google Scholar.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
query: Search query string (e.g., 'machine learning').
|
|
143
|
+
max_results: Maximum number of papers to return (default: 10).
|
|
144
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
145
|
+
date_from: Start date YYYY-MM-DD (e.g., '2024-01-01'). Only year is used.
|
|
146
|
+
date_to: End date YYYY-MM-DD (e.g., '2024-12-31'). Only year is used.
|
|
147
|
+
Returns:
|
|
148
|
+
List of paper metadata in dictionary format.
|
|
149
|
+
"""
|
|
150
|
+
papers = google_scholar_searcher.search(query, max_results, date_from=date_from, date_to=date_to)
|
|
151
|
+
return [p.to_dict(abstract_limit=abstract_limit) for p in papers] if papers else []
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
@mcp.tool()
|
|
155
|
+
async def search_iacr(
|
|
156
|
+
query: str, max_results: int = 10, fetch_details: bool = True, abstract_limit: int = 200,
|
|
157
|
+
date_from: Optional[str] = None, date_to: Optional[str] = None
|
|
158
|
+
) -> List[Dict]:
|
|
159
|
+
"""Search academic papers from IACR ePrint Archive.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
query: Search query string (e.g., 'cryptography', 'secret sharing').
|
|
163
|
+
max_results: Maximum number of papers to return (default: 10).
|
|
164
|
+
fetch_details: Whether to fetch detailed information for each paper (default: True).
|
|
165
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
166
|
+
date_from: Start date YYYY-MM-DD (e.g., '2024-01-01').
|
|
167
|
+
date_to: End date YYYY-MM-DD (e.g., '2024-12-31').
|
|
168
|
+
Returns:
|
|
169
|
+
List of paper metadata in dictionary format.
|
|
170
|
+
"""
|
|
171
|
+
async with httpx.AsyncClient() as client:
|
|
172
|
+
papers = iacr_searcher.search(query, max_results, fetch_details, date_from=date_from, date_to=date_to)
|
|
173
|
+
return [paper.to_dict(abstract_limit=abstract_limit) for paper in papers] if papers else []
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
@mcp.tool()
|
|
177
|
+
async def search_core(
|
|
178
|
+
query: str, max_results: int = 10, abstract_limit: int = 200,
|
|
179
|
+
date_from: Optional[str] = None, date_to: Optional[str] = None
|
|
180
|
+
) -> List[Dict]:
|
|
181
|
+
"""Search academic papers from CORE - 200M+ open access papers from repositories worldwide.
|
|
182
|
+
|
|
183
|
+
CORE aggregates open access research from thousands of repositories including
|
|
184
|
+
institutional repositories, arXiv, PubMed Central, and more.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
query: Search query string (e.g., 'machine learning', 'climate change').
|
|
188
|
+
max_results: Maximum number of papers to return (default: 10, max: 100).
|
|
189
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
190
|
+
date_from: Start date YYYY-MM-DD (e.g., '2024-01-01'). Only year is used.
|
|
191
|
+
date_to: End date YYYY-MM-DD (e.g., '2024-12-31'). Only year is used.
|
|
192
|
+
Returns:
|
|
193
|
+
List of paper metadata in dictionary format.
|
|
194
|
+
"""
|
|
195
|
+
papers = core_searcher.search(query, max_results, date_from=date_from, date_to=date_to)
|
|
196
|
+
return [p.to_dict(abstract_limit=abstract_limit) for p in papers] if papers else []
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
@mcp.tool()
|
|
200
|
+
async def search_ssrn(
|
|
201
|
+
query: str, max_results: int = 10, abstract_limit: int = 200,
|
|
202
|
+
date_from: Optional[str] = None, date_to: Optional[str] = None
|
|
203
|
+
) -> List[Dict]:
|
|
204
|
+
"""Search preprints from SSRN - social sciences, law, business, and humanities.
|
|
205
|
+
|
|
206
|
+
SSRN specializes in early-stage research and working papers across economics,
|
|
207
|
+
finance, law, business, and social sciences.
|
|
208
|
+
|
|
209
|
+
Args:
|
|
210
|
+
query: Search query string (e.g., 'corporate governance', 'behavioral economics').
|
|
211
|
+
max_results: Maximum number of papers to return (default: 10).
|
|
212
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
213
|
+
date_from: Start date YYYY-MM-DD (e.g., '2024-01-01'). Only year is used.
|
|
214
|
+
date_to: End date YYYY-MM-DD (e.g., '2024-12-31'). Only year is used.
|
|
215
|
+
Returns:
|
|
216
|
+
List of paper metadata in dictionary format.
|
|
217
|
+
"""
|
|
218
|
+
papers = ssrn_searcher.search(query, max_results, date_from=date_from, date_to=date_to)
|
|
219
|
+
return [p.to_dict(abstract_limit=abstract_limit) for p in papers] if papers else []
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
@mcp.tool()
|
|
223
|
+
async def search_cyberleninka(
|
|
224
|
+
query: str, max_results: int = 10, abstract_limit: int = 200,
|
|
225
|
+
date_from: Optional[str] = None, date_to: Optional[str] = None,
|
|
226
|
+
catalog: Optional[str] = None, category: Optional[str] = None
|
|
227
|
+
) -> List[Dict]:
|
|
228
|
+
"""Search Russian academic papers from CyberLeninka.
|
|
229
|
+
|
|
230
|
+
CyberLeninka is a Russian open access repository with articles from Russian journals.
|
|
231
|
+
Supports filtering by journal indexing (VAK, RSCI, SCOPUS) and subject categories.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
query: Search query string (Russian or English).
|
|
235
|
+
max_results: Maximum number of papers to return (default: 10, max: 100).
|
|
236
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
237
|
+
date_from: Start date YYYY-MM-DD (only year is used).
|
|
238
|
+
date_to: End date YYYY-MM-DD (only year is used).
|
|
239
|
+
catalog: Filter by indexing: 'vak', 'rsci', or 'scopus'.
|
|
240
|
+
category: Filter by subject: 'economics', 'law', 'medicine', 'psychology', etc.
|
|
241
|
+
Returns:
|
|
242
|
+
List of paper metadata in dictionary format.
|
|
243
|
+
"""
|
|
244
|
+
papers = cyberleninka_searcher.search(
|
|
245
|
+
query, max_results, date_from=date_from, date_to=date_to,
|
|
246
|
+
catalog=catalog, category=category
|
|
247
|
+
)
|
|
248
|
+
return [p.to_dict(abstract_limit=abstract_limit) for p in papers] if papers else []
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
# Unified download/read tools
|
|
252
|
+
SEARCHERS = {
|
|
253
|
+
'arxiv': arxiv_searcher,
|
|
254
|
+
'pubmed': pubmed_searcher,
|
|
255
|
+
'biorxiv': biorxiv_searcher,
|
|
256
|
+
'medrxiv': medrxiv_searcher,
|
|
257
|
+
'iacr': iacr_searcher,
|
|
258
|
+
'semantic': semantic_searcher,
|
|
259
|
+
'crossref': crossref_searcher,
|
|
260
|
+
'openalex': openalex_searcher,
|
|
261
|
+
'core': core_searcher,
|
|
262
|
+
'ssrn': ssrn_searcher,
|
|
263
|
+
'cyberleninka': cyberleninka_searcher,
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
@mcp.tool()
|
|
268
|
+
async def download_paper(paper_id: str, source: str, save_path: str = "./downloads") -> str:
|
|
269
|
+
"""Download PDF of a paper from any supported source.
|
|
270
|
+
|
|
271
|
+
Args:
|
|
272
|
+
paper_id: Paper identifier (format depends on source).
|
|
273
|
+
source: Source platform (arxiv, pubmed, biorxiv, medrxiv, iacr, semantic, crossref, openalex, core, ssrn, cyberleninka).
|
|
274
|
+
save_path: Directory to save the PDF (default: './downloads').
|
|
275
|
+
Returns:
|
|
276
|
+
Path to the downloaded PDF file, or error message.
|
|
277
|
+
"""
|
|
278
|
+
searcher = SEARCHERS.get(source.lower())
|
|
279
|
+
if not searcher:
|
|
280
|
+
return f"Unknown source: {source}. Supported: {', '.join(SEARCHERS.keys())}"
|
|
281
|
+
try:
|
|
282
|
+
return searcher.download_pdf(paper_id, save_path)
|
|
283
|
+
except NotImplementedError as e:
|
|
284
|
+
return str(e)
|
|
285
|
+
except Exception as e:
|
|
286
|
+
return f"Error downloading paper: {e}"
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
@mcp.tool()
|
|
290
|
+
async def read_paper(paper_id: str, source: str, save_path: str = "./downloads") -> str:
|
|
291
|
+
"""Read and extract text content from a paper PDF.
|
|
292
|
+
|
|
293
|
+
Args:
|
|
294
|
+
paper_id: Paper identifier (format depends on source).
|
|
295
|
+
source: Source platform (arxiv, pubmed, biorxiv, medrxiv, iacr, semantic, crossref, openalex, core, ssrn, cyberleninka).
|
|
296
|
+
save_path: Directory where the PDF is/will be saved (default: './downloads').
|
|
297
|
+
Returns:
|
|
298
|
+
str: The extracted text content of the paper, or error message.
|
|
299
|
+
"""
|
|
300
|
+
searcher = SEARCHERS.get(source.lower())
|
|
301
|
+
if not searcher:
|
|
302
|
+
return f"Unknown source: {source}. Supported: {', '.join(SEARCHERS.keys())}"
|
|
303
|
+
try:
|
|
304
|
+
return searcher.read_paper(paper_id, save_path)
|
|
305
|
+
except NotImplementedError as e:
|
|
306
|
+
return str(e)
|
|
307
|
+
except Exception as e:
|
|
308
|
+
return f"Error reading paper: {e}"
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
@mcp.tool()
|
|
312
|
+
async def search_semantic(
|
|
313
|
+
query: str, max_results: int = 10, abstract_limit: int = 200,
|
|
314
|
+
year: Optional[str] = None, date_from: Optional[str] = None, date_to: Optional[str] = None
|
|
315
|
+
) -> List[Dict]:
|
|
316
|
+
"""Search academic papers from Semantic Scholar.
|
|
317
|
+
|
|
318
|
+
Args:
|
|
319
|
+
query: Search query string (e.g., 'machine learning').
|
|
320
|
+
max_results: Maximum number of papers to return (default: 10).
|
|
321
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
322
|
+
year: Year filter (e.g., '2019', '2016-2020', '2010-', '-2015').
|
|
323
|
+
date_from: Start date YYYY-MM-DD (e.g., '2024-01-01'). Overrides year.
|
|
324
|
+
date_to: End date YYYY-MM-DD (e.g., '2024-12-31'). Overrides year.
|
|
325
|
+
Returns:
|
|
326
|
+
List of paper metadata in dictionary format.
|
|
327
|
+
"""
|
|
328
|
+
papers = semantic_searcher.search(
|
|
329
|
+
query, year=year, max_results=max_results,
|
|
330
|
+
date_from=date_from, date_to=date_to
|
|
331
|
+
)
|
|
332
|
+
return [p.to_dict(abstract_limit=abstract_limit) for p in papers] if papers else []
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
@mcp.tool()
|
|
336
|
+
async def search_crossref(
|
|
337
|
+
query: str, max_results: int = 10, abstract_limit: int = 200,
|
|
338
|
+
date_from: Optional[str] = None, date_to: Optional[str] = None,
|
|
339
|
+
filter: Optional[str] = None, sort: Optional[str] = None, order: Optional[str] = None
|
|
340
|
+
) -> List[Dict]:
|
|
341
|
+
"""Search academic papers from CrossRef database.
|
|
342
|
+
|
|
343
|
+
Args:
|
|
344
|
+
query: Search query string (e.g., 'machine learning', 'climate change').
|
|
345
|
+
max_results: Maximum number of papers to return (default: 10, max: 1000).
|
|
346
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
347
|
+
date_from: Start date YYYY-MM-DD (e.g., '2024-01-01').
|
|
348
|
+
date_to: End date YYYY-MM-DD (e.g., '2024-12-31').
|
|
349
|
+
filter: CrossRef filter string (e.g., 'has-full-text:true').
|
|
350
|
+
sort: Sort field ('relevance', 'published', 'updated', 'deposited', etc.).
|
|
351
|
+
order: Sort order ('asc' or 'desc').
|
|
352
|
+
Returns:
|
|
353
|
+
List of paper metadata in dictionary format.
|
|
354
|
+
"""
|
|
355
|
+
kwargs = {}
|
|
356
|
+
if filter is not None:
|
|
357
|
+
kwargs['filter'] = filter
|
|
358
|
+
if sort is not None:
|
|
359
|
+
kwargs['sort'] = sort
|
|
360
|
+
if order is not None:
|
|
361
|
+
kwargs['order'] = order
|
|
362
|
+
papers = crossref_searcher.search(query, max_results, date_from=date_from, date_to=date_to, **kwargs)
|
|
363
|
+
return [p.to_dict(abstract_limit=abstract_limit) for p in papers] if papers else []
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
@mcp.tool()
|
|
367
|
+
async def get_crossref_paper_by_doi(doi: str, abstract_limit: int = 200) -> Dict:
|
|
368
|
+
"""Get a specific paper from CrossRef by its DOI.
|
|
369
|
+
|
|
370
|
+
Args:
|
|
371
|
+
doi: Digital Object Identifier (e.g., '10.1038/nature12373').
|
|
372
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
373
|
+
Returns:
|
|
374
|
+
Paper metadata in dictionary format, or empty dict if not found.
|
|
375
|
+
"""
|
|
376
|
+
async with httpx.AsyncClient() as client:
|
|
377
|
+
paper = crossref_searcher.get_paper_by_doi(doi)
|
|
378
|
+
return paper.to_dict(abstract_limit=abstract_limit) if paper else {}
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
@mcp.tool()
|
|
382
|
+
async def search_openalex(
|
|
383
|
+
query: str, max_results: int = 10, abstract_limit: int = 200,
|
|
384
|
+
date_from: Optional[str] = None, date_to: Optional[str] = None
|
|
385
|
+
) -> List[Dict]:
|
|
386
|
+
"""Search academic papers from OpenAlex - comprehensive index of 240M+ scholarly works.
|
|
387
|
+
|
|
388
|
+
OpenAlex aggregates data from CrossRef, PubMed, arXiv, institutional repositories,
|
|
389
|
+
and more. Results include PDF URLs for open access papers.
|
|
390
|
+
|
|
391
|
+
Args:
|
|
392
|
+
query: Search query string (e.g., 'machine learning', 'climate change').
|
|
393
|
+
max_results: Maximum number of papers to return (default: 10, max: 200).
|
|
394
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
395
|
+
date_from: Start date YYYY-MM-DD (e.g., '2024-01-01').
|
|
396
|
+
date_to: End date YYYY-MM-DD (e.g., '2024-12-31').
|
|
397
|
+
Returns:
|
|
398
|
+
List of paper metadata in dictionary format. Open access papers include 'pdf' field.
|
|
399
|
+
"""
|
|
400
|
+
papers = openalex_searcher.search(query, max_results, date_from=date_from, date_to=date_to)
|
|
401
|
+
return [p.to_dict(abstract_limit=abstract_limit) for p in papers] if papers else []
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
@mcp.tool()
|
|
405
|
+
async def get_openalex_work_by_id(openalex_id: str, abstract_limit: int = 200) -> Dict:
|
|
406
|
+
"""Get a specific work from OpenAlex by its ID.
|
|
407
|
+
|
|
408
|
+
Args:
|
|
409
|
+
openalex_id: OpenAlex work ID (e.g., 'W2741809807').
|
|
410
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
411
|
+
Returns:
|
|
412
|
+
Paper metadata in dictionary format, or empty dict if not found.
|
|
413
|
+
"""
|
|
414
|
+
paper = openalex_searcher.get_work_by_id(openalex_id)
|
|
415
|
+
return paper.to_dict(abstract_limit=abstract_limit) if paper else {}
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
@mcp.tool()
|
|
419
|
+
async def get_openalex_references(
|
|
420
|
+
paper_id: str, max_results: int = 25, abstract_limit: int = 200
|
|
421
|
+
) -> List[Dict]:
|
|
422
|
+
"""Get papers that a work cites (outgoing references/bibliography).
|
|
423
|
+
|
|
424
|
+
Args:
|
|
425
|
+
paper_id: OpenAlex work ID (e.g., 'W2741809807').
|
|
426
|
+
max_results: Maximum number of references to return (default: 25).
|
|
427
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
428
|
+
Returns:
|
|
429
|
+
List of paper metadata for works cited by this paper.
|
|
430
|
+
"""
|
|
431
|
+
papers = openalex_searcher.get_references(paper_id, max_results)
|
|
432
|
+
return [p.to_dict(abstract_limit=abstract_limit) for p in papers] if papers else []
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
@mcp.tool()
|
|
436
|
+
async def get_openalex_citations(
|
|
437
|
+
paper_id: str, max_results: int = 25, abstract_limit: int = 200
|
|
438
|
+
) -> List[Dict]:
|
|
439
|
+
"""Get papers that cite a work (incoming citations).
|
|
440
|
+
|
|
441
|
+
Args:
|
|
442
|
+
paper_id: OpenAlex work ID (e.g., 'W2741809807').
|
|
443
|
+
max_results: Maximum number of citing papers to return (default: 25).
|
|
444
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
445
|
+
Returns:
|
|
446
|
+
List of paper metadata for works that cite this paper.
|
|
447
|
+
"""
|
|
448
|
+
papers = openalex_searcher.get_citing_papers(paper_id, max_results)
|
|
449
|
+
return [p.to_dict(abstract_limit=abstract_limit) for p in papers] if papers else []
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
@mcp.tool()
|
|
453
|
+
async def get_paper_by_doi(doi: str, abstract_limit: int = 200) -> Dict:
|
|
454
|
+
"""Get a paper by its DOI from OpenAlex (with CrossRef fallback).
|
|
455
|
+
|
|
456
|
+
Args:
|
|
457
|
+
doi: Digital Object Identifier (e.g., '10.1038/nature12373').
|
|
458
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
459
|
+
Returns:
|
|
460
|
+
Paper metadata in dictionary format, or empty dict if not found.
|
|
461
|
+
"""
|
|
462
|
+
# Try OpenAlex first (has citations, categories)
|
|
463
|
+
paper = openalex_searcher.get_work_by_doi(doi)
|
|
464
|
+
if paper:
|
|
465
|
+
return paper.to_dict(abstract_limit=abstract_limit)
|
|
466
|
+
|
|
467
|
+
# Fallback to CrossRef
|
|
468
|
+
paper = crossref_searcher.get_paper_by_doi(doi)
|
|
469
|
+
if paper:
|
|
470
|
+
return paper.to_dict(abstract_limit=abstract_limit)
|
|
471
|
+
|
|
472
|
+
return {}
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
@mcp.tool()
|
|
476
|
+
async def search_authors(name: str, max_results: int = 10) -> List[Dict]:
|
|
477
|
+
"""Search for authors by name using OpenAlex.
|
|
478
|
+
|
|
479
|
+
Args:
|
|
480
|
+
name: Author name to search for (e.g., 'Yann LeCun', 'Hinton').
|
|
481
|
+
max_results: Maximum number of authors to return (default: 10).
|
|
482
|
+
Returns:
|
|
483
|
+
List of author metadata with id, name, works_count, citations, affiliations.
|
|
484
|
+
"""
|
|
485
|
+
return openalex_searcher.search_authors(name, max_results)
|
|
486
|
+
|
|
487
|
+
|
|
488
|
+
@mcp.tool()
|
|
489
|
+
async def get_author_papers(
|
|
490
|
+
author_id: str, max_results: int = 25, abstract_limit: int = 200,
|
|
491
|
+
date_from: Optional[str] = None, date_to: Optional[str] = None
|
|
492
|
+
) -> List[Dict]:
|
|
493
|
+
"""Get papers by an author, sorted by citation count.
|
|
494
|
+
|
|
495
|
+
Args:
|
|
496
|
+
author_id: OpenAlex author ID (e.g., 'A5015666723').
|
|
497
|
+
max_results: Maximum number of papers to return (default: 25).
|
|
498
|
+
abstract_limit: Max chars for abstract (0=omit, -1=full, default: 200).
|
|
499
|
+
date_from: Start date YYYY-MM-DD (optional).
|
|
500
|
+
date_to: End date YYYY-MM-DD (optional).
|
|
501
|
+
Returns:
|
|
502
|
+
List of paper metadata sorted by citations (highest first).
|
|
503
|
+
"""
|
|
504
|
+
papers = openalex_searcher.get_author_papers(author_id, max_results, date_from, date_to)
|
|
505
|
+
return [p.to_dict(abstract_limit=abstract_limit) for p in papers] if papers else []
|
|
506
|
+
|
|
507
|
+
|
|
508
|
+
def main():
|
|
509
|
+
"""Entry point for the MCP server."""
|
|
510
|
+
mcp.run(transport="stdio")
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
if __name__ == "__main__":
|
|
514
|
+
main()
|