academia-mcp 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- academia_mcp/__init__.py +7 -0
- academia_mcp/__main__.py +5 -0
- academia_mcp/server.py +19 -0
- academia_mcp/tools/__init__.py +14 -0
- academia_mcp/tools/anthology_search.py +193 -0
- academia_mcp/tools/arxiv_download.py +326 -0
- academia_mcp/tools/arxiv_search.py +246 -0
- academia_mcp/tools/hf_datasets_search.py +82 -0
- academia_mcp/tools/s2_citations.py +118 -0
- academia_mcp-0.0.2.dist-info/METADATA +33 -0
- academia_mcp-0.0.2.dist-info/RECORD +15 -0
- academia_mcp-0.0.2.dist-info/WHEEL +5 -0
- academia_mcp-0.0.2.dist-info/entry_points.txt +2 -0
- academia_mcp-0.0.2.dist-info/licenses/LICENSE +201 -0
- academia_mcp-0.0.2.dist-info/top_level.txt +1 -0
academia_mcp/__init__.py
ADDED
academia_mcp/__main__.py
ADDED
academia_mcp/server.py
ADDED
@@ -0,0 +1,19 @@
|
|
1
|
+
from mcp.server.fastmcp import FastMCP
|
2
|
+
|
3
|
+
from .tools.arxiv_search import arxiv_search
|
4
|
+
from .tools.arxiv_download import arxiv_download
|
5
|
+
from .tools.s2_citations import s2_citations
|
6
|
+
from .tools.hf_datasets_search import hf_datasets_search
|
7
|
+
from .tools.anthology_search import anthology_search
|
8
|
+
|
9
|
+
server = FastMCP("Academia MCP")
|
10
|
+
|
11
|
+
server.add_tool(arxiv_search)
|
12
|
+
server.add_tool(arxiv_download)
|
13
|
+
server.add_tool(s2_citations)
|
14
|
+
server.add_tool(hf_datasets_search)
|
15
|
+
server.add_tool(anthology_search)
|
16
|
+
|
17
|
+
|
18
|
+
if __name__ == "__main__":
|
19
|
+
server.run()
|
@@ -0,0 +1,14 @@
|
|
1
|
+
from .arxiv_search import arxiv_search
|
2
|
+
from .anthology_search import anthology_search
|
3
|
+
from .arxiv_download import arxiv_download
|
4
|
+
from .hf_datasets_search import hf_datasets_search
|
5
|
+
from .s2_citations import s2_citations
|
6
|
+
|
7
|
+
|
8
|
+
__all__ = [
|
9
|
+
"arxiv_search",
|
10
|
+
"arxiv_download",
|
11
|
+
"anthology_search",
|
12
|
+
"s2_citations",
|
13
|
+
"hf_datasets_search",
|
14
|
+
]
|
@@ -0,0 +1,193 @@
|
|
1
|
+
import json
|
2
|
+
import re
|
3
|
+
from datetime import datetime
|
4
|
+
from typing import Optional, List, Dict, Any
|
5
|
+
|
6
|
+
from acl_anthology import Anthology
|
7
|
+
|
8
|
+
|
9
|
+
class AnthologySingleton:
|
10
|
+
instance: Optional[Anthology] = None
|
11
|
+
|
12
|
+
@classmethod
|
13
|
+
def get(cls) -> Anthology:
|
14
|
+
if cls.instance is None:
|
15
|
+
cls.instance = Anthology.from_repo()
|
16
|
+
cls.instance.load_all()
|
17
|
+
return cls.instance
|
18
|
+
|
19
|
+
|
20
|
+
SORT_BY_OPTIONS = ("relevance", "published")
|
21
|
+
SORT_ORDER_OPTIONS = ("ascending", "descending")
|
22
|
+
|
23
|
+
|
24
|
+
def _format_text_field(text: str) -> str:
|
25
|
+
return " ".join([line.strip() for line in text.split() if line.strip()])
|
26
|
+
|
27
|
+
|
28
|
+
def _format_authors(authors: List[Any]) -> str:
|
29
|
+
names = [f"{author.name.first} {author.name.last}" for author in authors]
|
30
|
+
result = ", ".join(names[:3])
|
31
|
+
if len(names) > 3:
|
32
|
+
result += f", and {len(names) - 3} more authors"
|
33
|
+
return result
|
34
|
+
|
35
|
+
|
36
|
+
def _format_date(date_str: str) -> str:
|
37
|
+
try:
|
38
|
+
return datetime.strptime(date_str, "%Y").strftime("%B %d, %Y")
|
39
|
+
except ValueError:
|
40
|
+
return date_str
|
41
|
+
|
42
|
+
|
43
|
+
def _clean_entry(entry: Any) -> Dict[str, Any]:
|
44
|
+
return {
|
45
|
+
"id": entry.full_id,
|
46
|
+
"title": _format_text_field(entry.title.as_text()),
|
47
|
+
"authors": _format_authors(entry.authors),
|
48
|
+
"abstract": (_format_text_field(entry.abstract.as_text()) if entry.abstract else ""),
|
49
|
+
"published": _format_date(entry.year),
|
50
|
+
"categories": ", ".join(entry.venue_ids),
|
51
|
+
"comment": entry.note if entry.note else "",
|
52
|
+
"url": entry.pdf.url if entry.pdf else "",
|
53
|
+
}
|
54
|
+
|
55
|
+
|
56
|
+
def _convert_to_year(date_str: str) -> int:
|
57
|
+
try:
|
58
|
+
return int(date_str[:4])
|
59
|
+
except ValueError as e:
|
60
|
+
raise ValueError("Invalid date format. Please use YYYY-MM-DD format.") from e
|
61
|
+
|
62
|
+
|
63
|
+
def _has_cyrillic(text: str) -> bool:
|
64
|
+
return bool(re.search("[а-яА-Я]", text))
|
65
|
+
|
66
|
+
|
67
|
+
def _parse_query(query: str, paper: Any) -> bool:
|
68
|
+
conditions = re.split(r"\s+(AND|OR|ANDNOT)\s+", query)
|
69
|
+
result = False
|
70
|
+
for i in range(0, len(conditions), 2):
|
71
|
+
condition = conditions[i]
|
72
|
+
field, value = condition.split(":", 1) if ":" in condition else ("ti", condition)
|
73
|
+
value = value.lower().replace('"', "").replace("'", "")
|
74
|
+
match field:
|
75
|
+
case "ti":
|
76
|
+
match_found = value in paper.title.as_text().lower()
|
77
|
+
case "au":
|
78
|
+
match_found = any(value in str(author).lower() for author in paper.authors)
|
79
|
+
case "abs":
|
80
|
+
match_found = paper.abstract and value in paper.abstract.as_text().lower()
|
81
|
+
case "cat":
|
82
|
+
match_found = any(value in cat.lower() for cat in paper.venue_ids)
|
83
|
+
case "id":
|
84
|
+
match_found = value in paper.full_id.lower()
|
85
|
+
case _:
|
86
|
+
match_found = False
|
87
|
+
if i == 0:
|
88
|
+
result = match_found
|
89
|
+
else:
|
90
|
+
operator = conditions[i - 1]
|
91
|
+
if operator == "AND":
|
92
|
+
result = result and match_found
|
93
|
+
elif operator == "OR":
|
94
|
+
result = result or match_found
|
95
|
+
elif operator == "ANDNOT":
|
96
|
+
result = result and not match_found
|
97
|
+
return result
|
98
|
+
|
99
|
+
|
100
|
+
def anthology_search(
|
101
|
+
query: str,
|
102
|
+
offset: Optional[int] = 0,
|
103
|
+
limit: Optional[int] = 5,
|
104
|
+
start_date: Optional[str] = None,
|
105
|
+
end_date: Optional[str] = None,
|
106
|
+
sort_by: Optional[str] = "relevance",
|
107
|
+
sort_order: Optional[str] = "descending",
|
108
|
+
include_abstracts: Optional[bool] = False,
|
109
|
+
) -> str:
|
110
|
+
"""
|
111
|
+
Search ACL Anthology papers with field-specific queries.
|
112
|
+
|
113
|
+
Fields:
|
114
|
+
all: (all fields), ti: (title), au: (author),
|
115
|
+
abs: (abstract), cat: (category), id: (ID without version)
|
116
|
+
|
117
|
+
Operators:
|
118
|
+
AND, OR, ANDNOT
|
119
|
+
|
120
|
+
Please always specify the fields. Search should always be field-specific.
|
121
|
+
You can search for an exact match of an entire phrase by enclosing the phrase in double quotes.
|
122
|
+
If you do not need an exact match of a phrase, use single terms with OR/AND.
|
123
|
+
Boolean operators are strict. In most cases, you need OR and not AND.
|
124
|
+
You can scroll all search results with the "offset" parameter.
|
125
|
+
Do not include date constraints in the query: use "start_date" and "end_date" parameters instead.
|
126
|
+
The names of authors should be in Latin script. For example, search "Ilya Gusev" instead of "Илья Гусев".
|
127
|
+
|
128
|
+
Example queries:
|
129
|
+
abs:"machine learning"
|
130
|
+
au:"del maestro"
|
131
|
+
au:vaswani AND ti:"attention is all"
|
132
|
+
all:role OR all:playing OR all:"language model"
|
133
|
+
(au:vaswani OR au:"del maestro") ANDNOT ti:attention
|
134
|
+
|
135
|
+
Return a JSON object serialized to a string. The structure is:
|
136
|
+
{"total_count": ..., "returned_count": ..., "offset": ..., "results": [...]}
|
137
|
+
Every item in the "results" has the following fields:
|
138
|
+
("index", "id", "title", "authors", "abstract", "published", "updated", "categories", "comment")
|
139
|
+
You can use `json.loads` to deserialize the result and get specific fields.
|
140
|
+
|
141
|
+
Args:
|
142
|
+
query: The search query, required.
|
143
|
+
offset: The offset in search results. If it is 10, the first 10 items will be skipped. 0 by default.
|
144
|
+
limit: The maximum number of items that will be returned. limit=5 by default, limit=10 is the maximum.
|
145
|
+
start_date: Start date in %Y-%m-%d format. None by default.
|
146
|
+
end_date: End date in %Y-%m-%d format. None by default.
|
147
|
+
sort_by: 3 options to sort by: relevance, lastUpdatedDate, submittedDate. relevance by default.
|
148
|
+
sort_order: 2 sort orders: ascending, descending. descending by default.
|
149
|
+
include_abstracts: include abstracts in the result or not. False by default.
|
150
|
+
"""
|
151
|
+
assert isinstance(query, str), "Error: Your search query must be a string"
|
152
|
+
assert isinstance(offset, int), "Error: offset should be an integer"
|
153
|
+
assert isinstance(limit, int), "Error: limit should be an integer"
|
154
|
+
assert isinstance(sort_by, str), "Error: sort_by should be a string"
|
155
|
+
assert isinstance(sort_order, str), "Error: sort_order should be a string"
|
156
|
+
assert query.strip(), "Error: Your query should not be empty"
|
157
|
+
assert sort_by in SORT_BY_OPTIONS, f"Error: sort_by should be one of {SORT_BY_OPTIONS}"
|
158
|
+
assert (
|
159
|
+
sort_order in SORT_ORDER_OPTIONS
|
160
|
+
), f"Error: sort_order should be one of {SORT_ORDER_OPTIONS}"
|
161
|
+
assert offset >= 0, "Error: offset must be 0 or positive number"
|
162
|
+
assert limit < 100, "Error: limit is too large, it should be less than 100"
|
163
|
+
assert limit > 0, "Error: limit should be greater than 0"
|
164
|
+
assert not _has_cyrillic(query), "Error: use only Latin script for queries"
|
165
|
+
assert include_abstracts is not None, "Error: include_abstracts must be bool"
|
166
|
+
|
167
|
+
singleton = AnthologySingleton.get()
|
168
|
+
all_papers = [
|
169
|
+
paper for paper in singleton.papers() if paper.abstract and str(paper.abstract).strip()
|
170
|
+
]
|
171
|
+
|
172
|
+
if start_date or end_date:
|
173
|
+
start_year = _convert_to_year(start_date) if start_date else 1900
|
174
|
+
end_year = _convert_to_year(end_date) if end_date else datetime.now().year
|
175
|
+
all_papers = [paper for paper in all_papers if start_year <= int(paper.year) <= end_year]
|
176
|
+
|
177
|
+
filtered_papers = [paper for paper in all_papers if _parse_query(query, paper)]
|
178
|
+
|
179
|
+
if sort_by == "published":
|
180
|
+
filtered_papers.sort(key=lambda x: int(x.year), reverse=(sort_order == "descending"))
|
181
|
+
|
182
|
+
paged_papers = filtered_papers[offset : offset + limit]
|
183
|
+
clean_entries = [_clean_entry(entry) for entry in paged_papers]
|
184
|
+
|
185
|
+
return json.dumps(
|
186
|
+
{
|
187
|
+
"total_count": len(filtered_papers),
|
188
|
+
"returned_count": len(paged_papers),
|
189
|
+
"offset": offset,
|
190
|
+
"results": clean_entries,
|
191
|
+
},
|
192
|
+
ensure_ascii=False,
|
193
|
+
)
|
@@ -0,0 +1,326 @@
|
|
1
|
+
# Based on
|
2
|
+
# https://github.com/SamuelSchmidgall/AgentLaboratory/blob/main/tools.py
|
3
|
+
# https://github.com/bytedance/pasa/blob/main/utils.py
|
4
|
+
|
5
|
+
import re
|
6
|
+
import json
|
7
|
+
import tempfile
|
8
|
+
from pathlib import Path
|
9
|
+
from typing import Any, List, Optional, Dict
|
10
|
+
from dataclasses import dataclass, field
|
11
|
+
|
12
|
+
import requests
|
13
|
+
import bs4
|
14
|
+
from markdownify import MarkdownConverter # type: ignore
|
15
|
+
from pypdf import PdfReader
|
16
|
+
|
17
|
+
|
18
|
+
def download_pdf(url: str, output_path: Path) -> None:
|
19
|
+
response = requests.get(url)
|
20
|
+
response.raise_for_status()
|
21
|
+
content_type = response.headers.get("content-type")
|
22
|
+
assert content_type
|
23
|
+
assert "application/pdf" in content_type.lower()
|
24
|
+
with open(output_path.resolve(), "wb") as fp:
|
25
|
+
fp.write(response.content)
|
26
|
+
|
27
|
+
|
28
|
+
def parse_pdf_file(pdf_path: Path) -> List[str]:
|
29
|
+
# Why not Marker? Because it is too heavy.
|
30
|
+
reader = PdfReader(str(pdf_path.resolve()))
|
31
|
+
|
32
|
+
pages = []
|
33
|
+
for page_number, page in enumerate(reader.pages, start=1):
|
34
|
+
try:
|
35
|
+
text = page.extract_text()
|
36
|
+
prefix = f"## Page {page_number}\n\n"
|
37
|
+
pages.append(prefix + text)
|
38
|
+
except Exception:
|
39
|
+
continue
|
40
|
+
return pages
|
41
|
+
|
42
|
+
|
43
|
+
HTML_URL = "https://arxiv.org/html/{paper_id}"
|
44
|
+
ABS_URL = "https://arxiv.org/abs/{paper_id}"
|
45
|
+
PDF_URL = "https://arxiv.org/pdf/{paper_id}"
|
46
|
+
SECTION_STOP_WORDS = (
|
47
|
+
"references",
|
48
|
+
"acknowledgments",
|
49
|
+
"about this document",
|
50
|
+
"appendix",
|
51
|
+
)
|
52
|
+
|
53
|
+
|
54
|
+
@dataclass
|
55
|
+
class TOCEntry:
|
56
|
+
level: int
|
57
|
+
title: str
|
58
|
+
html_id: Optional[str] = None
|
59
|
+
subsections: List["TOCEntry"] = field(default_factory=list)
|
60
|
+
|
61
|
+
def linearize(self) -> List["TOCEntry"]:
|
62
|
+
entries = [self]
|
63
|
+
for subsection in self.subsections:
|
64
|
+
entries.extend(subsection.linearize())
|
65
|
+
return entries
|
66
|
+
|
67
|
+
def is_excluded(self) -> bool:
|
68
|
+
return any(ss in self.title.lower() for ss in SECTION_STOP_WORDS)
|
69
|
+
|
70
|
+
def to_str(self) -> str:
|
71
|
+
final_items = []
|
72
|
+
output_index = 0
|
73
|
+
for entry in self.linearize():
|
74
|
+
if entry.level <= 1:
|
75
|
+
continue
|
76
|
+
prefix = " " * (entry.level - 2)
|
77
|
+
suffix = ""
|
78
|
+
if entry.level == 2 and not entry.is_excluded():
|
79
|
+
suffix = f" (index in 'sections': {output_index})"
|
80
|
+
output_index += 1
|
81
|
+
final_items.append(prefix + entry.title + suffix)
|
82
|
+
return "\n".join(final_items)
|
83
|
+
|
84
|
+
|
85
|
+
class ArxivHTMLConverter(MarkdownConverter): # type: ignore
|
86
|
+
def __init__(self, base_url: str, *args: Any, **kwargs: Any) -> None:
|
87
|
+
self.base_url = base_url
|
88
|
+
|
89
|
+
super().__init__(*args, **kwargs)
|
90
|
+
|
91
|
+
def convert_cite(self, el: bs4.element.Tag, text: str, convert_as_inline: bool = False) -> str:
|
92
|
+
citations = text.split(";")
|
93
|
+
citations = [c.strip() for c in citations if c.strip()]
|
94
|
+
fixed_citations = []
|
95
|
+
for citation in citations:
|
96
|
+
fixed_citation = citation
|
97
|
+
parts = citation.split()
|
98
|
+
year = parts[-1]
|
99
|
+
if len(year) > 4 and year[0] == "(" and year[-1] == ")":
|
100
|
+
fixed_citation = f'{" ".join(parts[:-1])}, {year[1:-1]}'
|
101
|
+
fixed_citations.append(fixed_citation)
|
102
|
+
return f'({"; ".join(fixed_citations)})'
|
103
|
+
|
104
|
+
def convert_sup(self, *args: Any, **kwargs: Any) -> str:
|
105
|
+
return ""
|
106
|
+
|
107
|
+
def convert_span(self, el: bs4.element.Tag, text: str, convert_as_inline: bool = False) -> str:
|
108
|
+
if "class" not in el:
|
109
|
+
return text
|
110
|
+
if "ltx_tag_item" in el["class"]:
|
111
|
+
return ""
|
112
|
+
if "ltx_note_outer" in el["class"]:
|
113
|
+
return f" (Footnote {text})"
|
114
|
+
if "ltx_tag_note" in el["class"]:
|
115
|
+
return text + ": "
|
116
|
+
return text
|
117
|
+
|
118
|
+
def convert_figure(
|
119
|
+
self, el: bs4.element.Tag, text: str, convert_as_inline: bool = False
|
120
|
+
) -> str:
|
121
|
+
if el.img:
|
122
|
+
link = el.img.get("src")
|
123
|
+
caption = "Figure"
|
124
|
+
if el.figcaption:
|
125
|
+
caption = el.figcaption.text
|
126
|
+
return f"\n\n\n\n"
|
127
|
+
elif el.table:
|
128
|
+
caption = "Table"
|
129
|
+
if el.figcaption:
|
130
|
+
caption = el.figcaption.text
|
131
|
+
table_text = self.process_tag(el.table, convert_as_inline=False)
|
132
|
+
return f"\n\n{caption}\n\n{table_text}\n\n"
|
133
|
+
return text
|
134
|
+
|
135
|
+
|
136
|
+
def _generate_toc(soup: bs4.element.Tag) -> TOCEntry:
|
137
|
+
stack = [TOCEntry(level=0, title="ROOT", html_id=None)]
|
138
|
+
heading_tags = {"h1": 1, "h2": 2, "h3": 3, "h4": 4, "h5": 5}
|
139
|
+
for tag in soup.find_all(heading_tags.keys()):
|
140
|
+
level = heading_tags[tag.name]
|
141
|
+
while stack[-1].level >= level:
|
142
|
+
stack.pop()
|
143
|
+
parent_entry = stack[-1]
|
144
|
+
section = tag.find_parent("section", id=True)
|
145
|
+
if not section:
|
146
|
+
continue
|
147
|
+
section_id = section.get("id")
|
148
|
+
assert section_id is not None
|
149
|
+
title = tag.get_text().strip()
|
150
|
+
new_entry = TOCEntry(level=level, title=title, html_id=section_id)
|
151
|
+
parent_entry.subsections.append(new_entry)
|
152
|
+
stack.append(new_entry)
|
153
|
+
return stack[0]
|
154
|
+
|
155
|
+
|
156
|
+
def _convert_soup_to_md(soup: bs4.element.Tag, url: str) -> str:
|
157
|
+
converter = ArxivHTMLConverter(base_url=url, strip=["div", "a"], heading_style="ATX")
|
158
|
+
md_content: str = converter.convert_soup(soup)
|
159
|
+
md_content = md_content.replace("\xa0", " ")
|
160
|
+
md_content = "\n".join([line.strip() for line in md_content.split("\n")])
|
161
|
+
lines = [line.strip() for line in md_content.split("\n\n") if line.strip()]
|
162
|
+
md_content = "\n\n".join(lines)
|
163
|
+
return md_content
|
164
|
+
|
165
|
+
|
166
|
+
def _build_by_toc(toc: TOCEntry, soup: bs4.element.Tag, url: str) -> List[str]:
|
167
|
+
final_sections = []
|
168
|
+
for toc_entry in toc.linearize():
|
169
|
+
if toc_entry.level == 2 and not toc_entry.is_excluded():
|
170
|
+
section = soup.find(id=toc_entry.html_id)
|
171
|
+
assert isinstance(section, bs4.element.Tag)
|
172
|
+
text = _convert_soup_to_md(section, url)
|
173
|
+
final_sections.append(text)
|
174
|
+
return final_sections
|
175
|
+
|
176
|
+
|
177
|
+
def _format_authors(authors: str) -> str:
|
178
|
+
if not authors:
|
179
|
+
return ""
|
180
|
+
names = authors.split(",")
|
181
|
+
names = [n.strip() for n in names if n.strip()]
|
182
|
+
result = ", ".join(names[:3])
|
183
|
+
if len(names) > 3:
|
184
|
+
result += f", and {len(names) - 3} more authors"
|
185
|
+
return result
|
186
|
+
|
187
|
+
|
188
|
+
def _parse_citation_metadata(metas: List[str]) -> Dict[str, Any]:
|
189
|
+
metas = [item.replace("\n", " ") for item in metas]
|
190
|
+
meta_string = " ".join(metas)
|
191
|
+
authors, title, journal, year = "", "", "", None
|
192
|
+
if len(metas) == 3:
|
193
|
+
authors, title, journal = metas
|
194
|
+
else:
|
195
|
+
meta_string = re.sub(r"\.\s\d{4}[a-z]?\.", ".", meta_string)
|
196
|
+
match = re.match(r"^(.*?\.\s)(.*?)(\.\s.*|$)", meta_string, re.DOTALL)
|
197
|
+
if match:
|
198
|
+
authors = match.group(1).strip() if match.group(1) else ""
|
199
|
+
title = match.group(2).strip() if match.group(2) else ""
|
200
|
+
journal = match.group(3).strip() if match.group(3) else ""
|
201
|
+
if journal.startswith(". "):
|
202
|
+
journal = journal[2:]
|
203
|
+
|
204
|
+
if authors:
|
205
|
+
parts = authors.strip(".").split(".")
|
206
|
+
if parts and parts[-1].strip().isdigit():
|
207
|
+
year = int(parts[-1].strip())
|
208
|
+
authors = ".".join(parts[:-1])
|
209
|
+
authors = _format_authors(authors)
|
210
|
+
|
211
|
+
result = {
|
212
|
+
"authors": authors,
|
213
|
+
"year": year,
|
214
|
+
"title": title,
|
215
|
+
"journal": journal,
|
216
|
+
}
|
217
|
+
if not authors or not title:
|
218
|
+
result["meta"] = meta_string
|
219
|
+
return result
|
220
|
+
|
221
|
+
|
222
|
+
def _extract_citations(soup_biblist: bs4.element.Tag) -> List[Dict[str, Any]]:
|
223
|
+
extracted = []
|
224
|
+
for li in soup_biblist.find_all("li", recursive=False):
|
225
|
+
metas = [x.text.strip() for x in li.find_all("span", class_="ltx_bibblock")]
|
226
|
+
extracted.append(_parse_citation_metadata(metas))
|
227
|
+
return extracted
|
228
|
+
|
229
|
+
|
230
|
+
def _parse_html(paper_id: str) -> Dict[str, Any]:
|
231
|
+
url = HTML_URL.format(paper_id=paper_id)
|
232
|
+
response = requests.get(url)
|
233
|
+
response.raise_for_status()
|
234
|
+
content = response.text
|
235
|
+
|
236
|
+
soup = bs4.BeautifulSoup(content, features="lxml")
|
237
|
+
article = soup.article
|
238
|
+
assert article and isinstance(article, bs4.element.Tag)
|
239
|
+
|
240
|
+
citations = []
|
241
|
+
biblist_tag = article.find(class_="ltx_biblist")
|
242
|
+
if biblist_tag and isinstance(biblist_tag, bs4.element.Tag):
|
243
|
+
citations = _extract_citations(biblist_tag)
|
244
|
+
|
245
|
+
toc = _generate_toc(article)
|
246
|
+
sections = _build_by_toc(toc, article, url)
|
247
|
+
return {
|
248
|
+
"toc": toc.to_str(),
|
249
|
+
"sections": sections,
|
250
|
+
"citations": citations,
|
251
|
+
"original_format": "html",
|
252
|
+
}
|
253
|
+
|
254
|
+
|
255
|
+
def _parse_abs(paper_id: str) -> Dict[str, str]:
|
256
|
+
url = ABS_URL.format(paper_id=paper_id)
|
257
|
+
response = requests.get(url)
|
258
|
+
response.raise_for_status()
|
259
|
+
content = response.text
|
260
|
+
|
261
|
+
soup = bs4.BeautifulSoup(content, features="lxml")
|
262
|
+
title_tag = soup.find(class_="title")
|
263
|
+
assert title_tag and isinstance(title_tag, bs4.element.Tag)
|
264
|
+
title = title_tag.get_text().strip()
|
265
|
+
title = title.replace("Title:", "")
|
266
|
+
abstract_tag = soup.find(class_="abstract")
|
267
|
+
assert abstract_tag and isinstance(abstract_tag, bs4.element.Tag)
|
268
|
+
abstract = abstract_tag.get_text().strip()
|
269
|
+
abstract = abstract.replace("Abstract:", "")
|
270
|
+
return {"title": title, "abstract": abstract}
|
271
|
+
|
272
|
+
|
273
|
+
def _parse_pdf(paper_id: str) -> Dict[str, Any]:
|
274
|
+
url = PDF_URL.format(paper_id=paper_id)
|
275
|
+
with tempfile.NamedTemporaryFile(mode="w", delete=True) as temp_file:
|
276
|
+
download_pdf(url, Path(temp_file.name))
|
277
|
+
pages: List[str] = parse_pdf_file(Path(temp_file.name))
|
278
|
+
return {
|
279
|
+
"toc": "\n".join([f"Page {page_number}" for page_number in range(1, len(pages) + 1)]),
|
280
|
+
"sections": pages,
|
281
|
+
"citations": [],
|
282
|
+
"original_format": "pdf",
|
283
|
+
}
|
284
|
+
|
285
|
+
|
286
|
+
def arxiv_download(
|
287
|
+
paper_id: str,
|
288
|
+
include_citations: Optional[bool] = False,
|
289
|
+
mode: Optional[str] = "html",
|
290
|
+
) -> str:
|
291
|
+
"""
|
292
|
+
Downloads a paper from Arxiv and converts it to text.
|
293
|
+
Use mode = "html" by default.
|
294
|
+
Fall back to mode = "pdf" if there are any problems with the HTML version.
|
295
|
+
|
296
|
+
Returns a JSON with a following structure:
|
297
|
+
{
|
298
|
+
"title": "...",
|
299
|
+
"abstract": "...",
|
300
|
+
"toc": "...",
|
301
|
+
"sections": ["...", ...],
|
302
|
+
"citations": [...]
|
303
|
+
}
|
304
|
+
Use `json.loads` to deserialize the result if you want to get specific fields.
|
305
|
+
For example, `abstract = json.loads(arxiv_download("2409.06820v1"))`
|
306
|
+
The "toc" key contains Table of Contents, that sometimes has indexing for sections.
|
307
|
+
|
308
|
+
Args:
|
309
|
+
paper_id: ID of the paper on Arxiv. For instance: 2409.06820v1
|
310
|
+
include_citations: include "citations" in the result or not. False by default.
|
311
|
+
mode: Which version of paper to use. Options: ["html", "pdf"]. "html" by default.
|
312
|
+
"""
|
313
|
+
|
314
|
+
abs_meta = _parse_abs(paper_id)
|
315
|
+
if mode == "html":
|
316
|
+
try:
|
317
|
+
content = _parse_html(paper_id)
|
318
|
+
except requests.exceptions.HTTPError:
|
319
|
+
content = _parse_pdf(paper_id)
|
320
|
+
else:
|
321
|
+
content = _parse_pdf(paper_id)
|
322
|
+
|
323
|
+
if not include_citations and "citations" in content:
|
324
|
+
content.pop("citations")
|
325
|
+
|
326
|
+
return json.dumps({**abs_meta, **content}, ensure_ascii=False)
|
@@ -0,0 +1,246 @@
|
|
1
|
+
# Based on
|
2
|
+
# https://github.com/jonatasgrosman/findpapers/blob/master/findpapers/searchers/arxiv_searcher.py
|
3
|
+
# https://info.arxiv.org/help/api/user-manual.html
|
4
|
+
|
5
|
+
import json
|
6
|
+
import re
|
7
|
+
from typing import Optional, List, Dict, Any, Union
|
8
|
+
from datetime import datetime, date
|
9
|
+
from urllib3.util.retry import Retry
|
10
|
+
|
11
|
+
import requests
|
12
|
+
import xmltodict
|
13
|
+
|
14
|
+
BASE_URL = "http://export.arxiv.org"
|
15
|
+
URL_TEMPLATE = "{base_url}/api/query?search_query={query}&start={start}&sortBy={sort_by}&sortOrder={sort_order}&max_results={limit}"
|
16
|
+
SORT_BY_OPTIONS = ("relevance", "lastUpdatedDate", "submittedDate")
|
17
|
+
SORT_ORDER_OPTIONS = ("ascending", "descending")
|
18
|
+
|
19
|
+
|
20
|
+
def _format_text_field(text: str) -> str:
|
21
|
+
return " ".join([line.strip() for line in text.split() if line.strip()])
|
22
|
+
|
23
|
+
|
24
|
+
def _format_authors(authors: Union[List[Dict[str, str]], Dict[str, str]]) -> str:
|
25
|
+
if not authors:
|
26
|
+
return ""
|
27
|
+
if isinstance(authors, dict):
|
28
|
+
authors = [authors]
|
29
|
+
names = [author["name"] for author in authors]
|
30
|
+
result = ", ".join(names[:3])
|
31
|
+
if len(names) > 3:
|
32
|
+
result += f", and {len(names) - 3} more authors"
|
33
|
+
return result
|
34
|
+
|
35
|
+
|
36
|
+
def _format_categories(categories: Union[List[Dict[str, Any]], Dict[str, Any]]) -> str:
|
37
|
+
if not categories:
|
38
|
+
return ""
|
39
|
+
if isinstance(categories, dict):
|
40
|
+
categories = [categories]
|
41
|
+
clean_categories = [c.get("@term", "") for c in categories]
|
42
|
+
clean_categories = [c.strip() for c in clean_categories if c.strip()]
|
43
|
+
return ", ".join(clean_categories)
|
44
|
+
|
45
|
+
|
46
|
+
def _format_date(date: str) -> str:
|
47
|
+
dt = datetime.strptime(date, "%Y-%m-%dT%H:%M:%SZ")
|
48
|
+
return dt.strftime("%B %d, %Y")
|
49
|
+
|
50
|
+
|
51
|
+
def _clean_entry(entry: Dict[str, Any]) -> Dict[str, Any]:
|
52
|
+
return {
|
53
|
+
"id": entry["id"].split("/")[-1],
|
54
|
+
"title": _format_text_field(entry["title"]),
|
55
|
+
"authors": _format_authors(entry["author"]),
|
56
|
+
"abstract": _format_text_field(entry["summary"]),
|
57
|
+
"published": _format_date(entry["published"]),
|
58
|
+
"updated": _format_date(entry["updated"]),
|
59
|
+
"categories": _format_categories(entry.get("category", {})),
|
60
|
+
"comment": _format_text_field(entry.get("arxiv:comment", {}).get("#text", "")),
|
61
|
+
}
|
62
|
+
|
63
|
+
|
64
|
+
def _convert_to_yyyymmddtttt(date_str: str) -> str:
|
65
|
+
try:
|
66
|
+
date_obj = datetime.strptime(date_str, "%Y-%m-%d")
|
67
|
+
return date_obj.strftime("%Y%m%d") + "0000"
|
68
|
+
except ValueError as e:
|
69
|
+
raise ValueError("Invalid date format. Please use YYYY-MM-DD format.") from e
|
70
|
+
|
71
|
+
|
72
|
+
def _has_cyrillic(text: str) -> bool:
|
73
|
+
return bool(re.search("[а-яА-Я]", text))
|
74
|
+
|
75
|
+
|
76
|
+
def _compose_query(
|
77
|
+
orig_query: str,
|
78
|
+
start_date: Optional[str] = None,
|
79
|
+
end_date: Optional[str] = None,
|
80
|
+
) -> str:
|
81
|
+
query: str = orig_query.replace(" AND NOT ", " ANDNOT ")
|
82
|
+
if "-" in query:
|
83
|
+
query = f"({query}) OR ({query.replace('-', ' ')})"
|
84
|
+
|
85
|
+
if start_date or end_date:
|
86
|
+
if not start_date:
|
87
|
+
start_date = "1900-01-01"
|
88
|
+
if not end_date:
|
89
|
+
today = date.today()
|
90
|
+
end_date = today.strftime("%Y-%m-%d")
|
91
|
+
date_filter = (
|
92
|
+
f"[{_convert_to_yyyymmddtttt(start_date)} TO {_convert_to_yyyymmddtttt(end_date)}]"
|
93
|
+
)
|
94
|
+
query = f"({query}) AND submittedDate:{date_filter}"
|
95
|
+
|
96
|
+
query = query.replace(" ", "+")
|
97
|
+
query = query.replace('"', "%22")
|
98
|
+
query = query.replace("(", "%28")
|
99
|
+
query = query.replace(")", "%29")
|
100
|
+
return query
|
101
|
+
|
102
|
+
|
103
|
+
def _format_entries(
|
104
|
+
entries: List[Dict[str, Any]],
|
105
|
+
start_index: int,
|
106
|
+
include_abstracts: bool,
|
107
|
+
total_results: int,
|
108
|
+
) -> str:
|
109
|
+
clean_entries: List[Dict[str, Any]] = []
|
110
|
+
for entry_num, entry in enumerate(entries):
|
111
|
+
clean_entry = _clean_entry(entry)
|
112
|
+
if not include_abstracts:
|
113
|
+
clean_entry.pop("abstract")
|
114
|
+
clean_entry["index"] = start_index + entry_num
|
115
|
+
clean_entries.append(clean_entry)
|
116
|
+
return json.dumps(
|
117
|
+
{
|
118
|
+
"total_count": total_results,
|
119
|
+
"returned_count": len(entries),
|
120
|
+
"offset": start_index,
|
121
|
+
"results": clean_entries,
|
122
|
+
},
|
123
|
+
ensure_ascii=False,
|
124
|
+
)
|
125
|
+
|
126
|
+
|
127
|
+
def _get_results(url: str) -> requests.Response:
|
128
|
+
retry_strategy = Retry(
|
129
|
+
total=3,
|
130
|
+
backoff_factor=3,
|
131
|
+
status_forcelist=[500, 502, 503, 504],
|
132
|
+
allowed_methods=["GET"],
|
133
|
+
)
|
134
|
+
|
135
|
+
session = requests.Session()
|
136
|
+
adapter = requests.adapters.HTTPAdapter(max_retries=retry_strategy)
|
137
|
+
session.mount("http://", adapter)
|
138
|
+
|
139
|
+
try:
|
140
|
+
response = session.get(url, timeout=30)
|
141
|
+
response.raise_for_status()
|
142
|
+
return response
|
143
|
+
except (
|
144
|
+
requests.exceptions.ConnectionError,
|
145
|
+
requests.exceptions.RequestException,
|
146
|
+
) as e:
|
147
|
+
print(f"Failed after {retry_strategy.total} retries: {str(e)}")
|
148
|
+
raise
|
149
|
+
|
150
|
+
return response
|
151
|
+
|
152
|
+
|
153
|
+
def arxiv_search(
|
154
|
+
query: str,
|
155
|
+
offset: Optional[int] = 0,
|
156
|
+
limit: Optional[int] = 5,
|
157
|
+
start_date: Optional[str] = None,
|
158
|
+
end_date: Optional[str] = None,
|
159
|
+
sort_by: Optional[str] = "relevance",
|
160
|
+
sort_order: Optional[str] = "descending",
|
161
|
+
include_abstracts: Optional[bool] = False,
|
162
|
+
) -> str:
|
163
|
+
"""
|
164
|
+
Search arXiv papers with field-specific queries.
|
165
|
+
|
166
|
+
Fields:
|
167
|
+
all: (all fields), ti: (title), au: (author),
|
168
|
+
abs: (abstract), cat: (category), id: (ID without version)
|
169
|
+
|
170
|
+
Operators:
|
171
|
+
AND, OR, ANDNOT
|
172
|
+
|
173
|
+
Please always specify the fields. Search should always be field-specific.
|
174
|
+
You can search for an exact match of an entire phrase by enclosing the phrase in double quotes.
|
175
|
+
If you do not need an exact match of a phrase, use single terms with OR/AND.
|
176
|
+
Boolean operators are strict. OR is better in most cases.
|
177
|
+
Do not include date constraints in the query: use "start_date" and "end_date" parameters instead.
|
178
|
+
Use Latin script for names. For example, search "Ilya Gusev" instead of "Илья Гусев".
|
179
|
+
|
180
|
+
Example queries:
|
181
|
+
all:"machine learning"
|
182
|
+
au:"del maestro"
|
183
|
+
au:vaswani AND abs:"attention is all"
|
184
|
+
all:role OR all:playing OR all:"language model"
|
185
|
+
(au:vaswani OR au:"del maestro") ANDNOT ti:attention
|
186
|
+
|
187
|
+
Returns a JSON object serialized to a string. The structure is:
|
188
|
+
{"total_count": ..., "returned_count": ..., "offset": ..., "results": [...]}
|
189
|
+
Every item in the "results" has the following fields:
|
190
|
+
("index", "id", "title", "authors", "abstract", "published", "updated", "categories", "comment")
|
191
|
+
Use `json.loads` to deserialize the result if you want to get specific fields.
|
192
|
+
|
193
|
+
Args:
|
194
|
+
query: The search query, required.
|
195
|
+
offset: The offset to scroll search results. 10 items will be skipped if offset=10. 0 by default.
|
196
|
+
limit: The maximum number of items to return. limit=5 by default, limit=10 is the maximum.
|
197
|
+
start_date: Start date in %Y-%m-%d format. None by default.
|
198
|
+
end_date: End date in %Y-%m-%d format. None by default.
|
199
|
+
sort_by: 3 options to sort by: relevance, lastUpdatedDate, submittedDate. relevance by default.
|
200
|
+
sort_order: 2 sort orders: ascending, descending. descending by default.
|
201
|
+
include_abstracts: include abstracts in the result or not. False by default.
|
202
|
+
"""
|
203
|
+
|
204
|
+
assert isinstance(query, str), "Error: Your search query must be a string"
|
205
|
+
assert isinstance(offset, int), "Error: offset should be an integer"
|
206
|
+
assert isinstance(limit, int), "Error: limit should be an integer"
|
207
|
+
assert isinstance(sort_by, str), "Error: sort_by should be a string"
|
208
|
+
assert isinstance(sort_order, str), "Error: sort_order should be a string"
|
209
|
+
assert query.strip(), "Error: Your query should not be empty"
|
210
|
+
assert sort_by in SORT_BY_OPTIONS, f"Error: sort_by should be one of {SORT_BY_OPTIONS}"
|
211
|
+
assert (
|
212
|
+
sort_order in SORT_ORDER_OPTIONS
|
213
|
+
), f"Error: sort_order should be one of {SORT_ORDER_OPTIONS}"
|
214
|
+
assert offset >= 0, "Error: offset must be 0 or positive number"
|
215
|
+
assert limit < 100, "Error: limit is too large, it should be less than 100"
|
216
|
+
assert limit > 0, "Error: limit should be greater than 0"
|
217
|
+
assert not _has_cyrillic(query), "Error: use only Latin script for queries"
|
218
|
+
assert include_abstracts is not None, "Error: include_abstracts must be bool"
|
219
|
+
|
220
|
+
fixed_query: str = _compose_query(query, start_date, end_date)
|
221
|
+
url = URL_TEMPLATE.format(
|
222
|
+
base_url=BASE_URL,
|
223
|
+
query=fixed_query,
|
224
|
+
start=offset,
|
225
|
+
limit=limit,
|
226
|
+
sort_by=sort_by,
|
227
|
+
sort_order=sort_order,
|
228
|
+
)
|
229
|
+
|
230
|
+
response = _get_results(url)
|
231
|
+
content = response.content
|
232
|
+
parsed_content = xmltodict.parse(content)
|
233
|
+
|
234
|
+
feed = parsed_content.get("feed", {})
|
235
|
+
total_results = int(feed.get("opensearch:totalResults", {}).get("#text", 0))
|
236
|
+
start_index = int(feed.get("opensearch:startIndex", {}).get("#text", 0))
|
237
|
+
entries = feed.get("entry", [])
|
238
|
+
if isinstance(entries, dict):
|
239
|
+
entries = [entries]
|
240
|
+
formatted_entries: str = _format_entries(
|
241
|
+
entries,
|
242
|
+
start_index=start_index,
|
243
|
+
total_results=total_results,
|
244
|
+
include_abstracts=include_abstracts,
|
245
|
+
)
|
246
|
+
return formatted_entries
|
@@ -0,0 +1,82 @@
|
|
1
|
+
import json
|
2
|
+
from datetime import datetime
|
3
|
+
from typing import Optional, List, Dict, Any, Literal
|
4
|
+
|
5
|
+
from huggingface_hub import HfApi, DatasetInfo, hf_hub_download
|
6
|
+
|
7
|
+
HF_API = HfApi()
|
8
|
+
|
9
|
+
|
10
|
+
def _format_date(dt: Optional[datetime]) -> str:
|
11
|
+
if not dt:
|
12
|
+
return ""
|
13
|
+
return dt.strftime("%B %d, %Y")
|
14
|
+
|
15
|
+
|
16
|
+
def _clean_entry(entry: DatasetInfo) -> Dict[str, Any]:
|
17
|
+
try:
|
18
|
+
readme_path = hf_hub_download(repo_id=entry.id, repo_type="dataset", filename="README.md")
|
19
|
+
with open(readme_path, "r", encoding="utf-8") as f:
|
20
|
+
readme_content = f.read()
|
21
|
+
except Exception:
|
22
|
+
readme_content = ""
|
23
|
+
|
24
|
+
return {
|
25
|
+
"id": entry.id,
|
26
|
+
"created_at": _format_date(entry.created_at),
|
27
|
+
"last_modified": _format_date(entry.last_modified),
|
28
|
+
"downloads": entry.downloads,
|
29
|
+
"likes": entry.likes,
|
30
|
+
"tags": entry.tags,
|
31
|
+
"readme": readme_content,
|
32
|
+
}
|
33
|
+
|
34
|
+
|
35
|
+
def _format_entries(entries: List[DatasetInfo]) -> str:
|
36
|
+
clean_entries: List[Dict[str, Any]] = [_clean_entry(entry) for entry in entries]
|
37
|
+
return json.dumps({"results": clean_entries}, ensure_ascii=False)
|
38
|
+
|
39
|
+
|
40
|
+
def hf_datasets_search(
|
41
|
+
query: Optional[str] = None,
|
42
|
+
search_filter: Optional[List[str]] = None,
|
43
|
+
limit: Optional[int] = 5,
|
44
|
+
sort_by: Optional[str] = "trending_score",
|
45
|
+
sort_order: Optional[str] = "descending",
|
46
|
+
) -> str:
|
47
|
+
"""
|
48
|
+
Search or filter HF datasets.
|
49
|
+
|
50
|
+
Examples:
|
51
|
+
List only the datasets in Russian for language modeling:
|
52
|
+
hf_datasets_search(filter=(language:ru", "task_ids:language-modeling"))
|
53
|
+
|
54
|
+
List all recent datasets with "text" in their name
|
55
|
+
hf_datasets_search(query="text", sort_by="last_modified")
|
56
|
+
|
57
|
+
Returns a JSON object serialized to a string. The structure is: {"results": [...]}
|
58
|
+
Every item in the "results" has the following fields:
|
59
|
+
("id", "created_at", "last_modified", "downloads", "likes", "tags")
|
60
|
+
Use `json.loads` to deserialize the result if you want to get specific fields.
|
61
|
+
|
62
|
+
Args:
|
63
|
+
query: The search query for the exact match search.
|
64
|
+
search_filter: A list of string to filter datasets.
|
65
|
+
limit: The maximum number of items to return. limit=5 by default, limit=10 is the maximum.
|
66
|
+
sort_by:
|
67
|
+
The key with which to sort the resulting models.
|
68
|
+
Possible values are "last_modified", "trending_score", "created_at", "downloads" and "likes".
|
69
|
+
"trending_score" by default.
|
70
|
+
sort_order: 2 sort orders: ascending, descending. descending by default.
|
71
|
+
"""
|
72
|
+
direction: Optional[Literal[-1]] = -1 if sort_order == "descending" else None
|
73
|
+
results = list(
|
74
|
+
HF_API.list_datasets(
|
75
|
+
search=query,
|
76
|
+
sort=sort_by,
|
77
|
+
direction=direction,
|
78
|
+
limit=limit,
|
79
|
+
filter=search_filter,
|
80
|
+
)
|
81
|
+
)
|
82
|
+
return _format_entries(results)
|
@@ -0,0 +1,118 @@
|
|
1
|
+
# Based on
|
2
|
+
# https://api.semanticscholar.org/api-docs/graph#tag/Paper-Data/operation/get_graph_get_paper_citations
|
3
|
+
|
4
|
+
import json
|
5
|
+
from typing import Optional, List, Dict, Any
|
6
|
+
from urllib3.util.retry import Retry
|
7
|
+
|
8
|
+
import requests
|
9
|
+
|
10
|
+
OLD_API_URL_TEMPLATE = "https://api.semanticscholar.org/v1/paper/{paper_id}"
|
11
|
+
GRAPH_URL_TEMPLATE = "https://api.semanticscholar.org/graph/v1/paper/{paper_id}/citations?fields={fields}&offset={offset}&limit={limit}"
|
12
|
+
FIELDS = "title,authors,externalIds,venue,citationCount,publicationDate"
|
13
|
+
|
14
|
+
|
15
|
+
def _get_results(url: str) -> requests.Response:
|
16
|
+
retry_strategy = Retry(
|
17
|
+
total=3,
|
18
|
+
backoff_factor=30,
|
19
|
+
status_forcelist=[429, 500, 502, 503, 504],
|
20
|
+
allowed_methods=["GET"],
|
21
|
+
)
|
22
|
+
|
23
|
+
session = requests.Session()
|
24
|
+
adapter = requests.adapters.HTTPAdapter(max_retries=retry_strategy)
|
25
|
+
session.mount("https://", adapter)
|
26
|
+
|
27
|
+
try:
|
28
|
+
response = session.get(url, timeout=30)
|
29
|
+
response.raise_for_status()
|
30
|
+
return response
|
31
|
+
except (
|
32
|
+
requests.exceptions.ConnectionError,
|
33
|
+
requests.exceptions.RequestException,
|
34
|
+
requests.exceptions.HTTPError,
|
35
|
+
) as e:
|
36
|
+
print(f"Failed after {retry_strategy.total} retries: {str(e)}")
|
37
|
+
raise
|
38
|
+
|
39
|
+
return response
|
40
|
+
|
41
|
+
|
42
|
+
def _format_authors(authors: List[Dict[str, Any]]) -> List[str]:
|
43
|
+
return [a["name"] for a in authors]
|
44
|
+
|
45
|
+
|
46
|
+
def _clean_entry(entry: Dict[str, Any]) -> Dict[str, Any]:
|
47
|
+
entry = entry["citingPaper"]
|
48
|
+
external_ids = entry.get("externalIds")
|
49
|
+
if not external_ids:
|
50
|
+
external_ids = dict()
|
51
|
+
external_ids.pop("CorpusId", None)
|
52
|
+
arxiv_id = external_ids.pop("ArXiv", None)
|
53
|
+
return {
|
54
|
+
"arxiv_id": arxiv_id,
|
55
|
+
"external_ids": external_ids if external_ids else None,
|
56
|
+
"title": entry["title"],
|
57
|
+
"authors": _format_authors(entry["authors"]),
|
58
|
+
"venue": entry.get("venue", ""),
|
59
|
+
"citation_count": entry.get("citationCount", 0),
|
60
|
+
"publication_date": entry.get("publicationDate", ""),
|
61
|
+
}
|
62
|
+
|
63
|
+
|
64
|
+
def _format_entries(
|
65
|
+
entries: List[Dict[str, Any]],
|
66
|
+
start_index: int,
|
67
|
+
total_results: int,
|
68
|
+
) -> str:
|
69
|
+
clean_entries = [_clean_entry(e) for e in entries]
|
70
|
+
return json.dumps(
|
71
|
+
{
|
72
|
+
"total_count": total_results,
|
73
|
+
"returned_count": len(entries),
|
74
|
+
"offset": start_index,
|
75
|
+
"results": clean_entries,
|
76
|
+
},
|
77
|
+
ensure_ascii=False,
|
78
|
+
)
|
79
|
+
|
80
|
+
|
81
|
+
def s2_citations(
|
82
|
+
arxiv_id: str,
|
83
|
+
offset: Optional[int] = 0,
|
84
|
+
limit: Optional[int] = 50,
|
85
|
+
) -> str:
|
86
|
+
"""
|
87
|
+
Get all papers that cited a given arXiv paper based on Semantic Scholar info.
|
88
|
+
|
89
|
+
Returns a JSON object serialized to a string. The structure is:
|
90
|
+
{"total_count": ..., "returned_count": ..., "offset": ..., "results": [...]}
|
91
|
+
Every item in the "results" has the following fields:
|
92
|
+
("arxiv_id", "external_ids", "title", "authors", "venue", "citation_count", "publication_date")
|
93
|
+
Use `json.loads` to deserialize the result if you want to get specific fields.
|
94
|
+
|
95
|
+
Args:
|
96
|
+
arxiv_id: The ID of a given arXiv paper.
|
97
|
+
offset: The offset to scroll through citations. 10 items will be skipped if offset=10. 0 by default.
|
98
|
+
limit: The maximum number of items to return. limit=50 by default.
|
99
|
+
"""
|
100
|
+
|
101
|
+
assert isinstance(arxiv_id, str), "Error: Your arxiv_id must be a string"
|
102
|
+
if "v" in arxiv_id:
|
103
|
+
arxiv_id = arxiv_id.split("v")[0]
|
104
|
+
paper_id = f"arxiv:{arxiv_id}"
|
105
|
+
|
106
|
+
url = GRAPH_URL_TEMPLATE.format(paper_id=paper_id, fields=FIELDS, offset=offset, limit=limit)
|
107
|
+
response = _get_results(url)
|
108
|
+
result = response.json()
|
109
|
+
entries = result["data"]
|
110
|
+
total_count = len(result["data"]) + result["offset"]
|
111
|
+
|
112
|
+
if "next" in result:
|
113
|
+
paper_url = OLD_API_URL_TEMPLATE.format(paper_id=paper_id)
|
114
|
+
paper_response = _get_results(paper_url)
|
115
|
+
paper_result = paper_response.json()
|
116
|
+
total_count = paper_result["numCitedBy"]
|
117
|
+
|
118
|
+
return _format_entries(entries, offset if offset else 0, total_count)
|
@@ -0,0 +1,33 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: academia-mcp
|
3
|
+
Version: 0.0.2
|
4
|
+
Summary: MCP server that provides different tools to search for scientific publications
|
5
|
+
Author-email: Ilya Gusev <phoenixilya@gmail.com>
|
6
|
+
Project-URL: Homepage, https://github.com/IlyaGusev/academia_mcp
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
8
|
+
Classifier: License :: OSI Approved :: MIT License
|
9
|
+
Classifier: Operating System :: OS Independent
|
10
|
+
Requires-Python: >=3.12
|
11
|
+
Description-Content-Type: text/markdown
|
12
|
+
License-File: LICENSE
|
13
|
+
Requires-Dist: mcp==1.9.2
|
14
|
+
Requires-Dist: xmltodict>=0.14.0
|
15
|
+
Requires-Dist: types-xmltodict>=0.14.0
|
16
|
+
Requires-Dist: requests>=2.32.0
|
17
|
+
Requires-Dist: types-requests>=2.32.0
|
18
|
+
Requires-Dist: pypdf>=5.1.0
|
19
|
+
Requires-Dist: beautifulsoup4>=4.12.0
|
20
|
+
Requires-Dist: types-beautifulsoup4>=4.12.0
|
21
|
+
Requires-Dist: markdownify==0.14.1
|
22
|
+
Requires-Dist: acl-anthology==0.5.2
|
23
|
+
Requires-Dist: markdown==3.7.0
|
24
|
+
Requires-Dist: types-markdown==3.7.0.20250322
|
25
|
+
Requires-Dist: black==25.1.0
|
26
|
+
Requires-Dist: mypy==1.16.0
|
27
|
+
Requires-Dist: flake8==7.2.0
|
28
|
+
Requires-Dist: huggingface-hub>=0.32.4
|
29
|
+
Dynamic: license-file
|
30
|
+
|
31
|
+
# Academia MCP
|
32
|
+
|
33
|
+
A collection of MCP tools related to the search of scientific papers.
|
@@ -0,0 +1,15 @@
|
|
1
|
+
academia_mcp/__init__.py,sha256=0wvKJeTFVLXWx97eL76D6SRNF4o5IUxwv83lWiG3Xiw,89
|
2
|
+
academia_mcp/__main__.py,sha256=rcmsOtJd3SA82exjrcGBuxuptcoxF8AXI7jNjiVq2BY,59
|
3
|
+
academia_mcp/server.py,sha256=2OIyYdrKZ4bKwgywAYg4iswicIezgm5Qqa7TYluePB4,532
|
4
|
+
academia_mcp/tools/__init__.py,sha256=KZuteEnRZSJFl1E_VIH7V3hAHS6WUl-K4Ge41D9q58M,347
|
5
|
+
academia_mcp/tools/anthology_search.py,sha256=Wnpd6ovSeCYXjuzisV37hg_Kdh4NVC_1Y93VLIy2Gus,7728
|
6
|
+
academia_mcp/tools/arxiv_download.py,sha256=fJ1PCgnpQvPKXv6VLKLcL_Hd_hz9LhPM-NAq3gYehGk,11158
|
7
|
+
academia_mcp/tools/arxiv_search.py,sha256=-A5-7PP-kYgjQjMreRGKkOwD4AQf7nVt6yh8vHsFSo4,9048
|
8
|
+
academia_mcp/tools/hf_datasets_search.py,sha256=KiBkqT4rXjEN4oc1AWZOPnqN_Go90TQogY5-DUm3LQo,2854
|
9
|
+
academia_mcp/tools/s2_citations.py,sha256=xIwaGaAcLNBJ8_IAJtFiN2h_164yvz_jcsl6c-wFzAA,3988
|
10
|
+
academia_mcp-0.0.2.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
11
|
+
academia_mcp-0.0.2.dist-info/METADATA,sha256=p89qDQZQ_m2sgKBgKZYfAKBaFnciP0SfHNqSVZlTn6c,1150
|
12
|
+
academia_mcp-0.0.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
13
|
+
academia_mcp-0.0.2.dist-info/entry_points.txt,sha256=gxkiKJ74w2FwJpSECpjA3XtCfI5ZfrM6N8cqnwsq4yY,51
|
14
|
+
academia_mcp-0.0.2.dist-info/top_level.txt,sha256=CzGpRFsRRJRqWEb1e3SUlcfGqRzOxevZGaJWrtGF8W0,13
|
15
|
+
academia_mcp-0.0.2.dist-info/RECORD,,
|
@@ -0,0 +1,201 @@
|
|
1
|
+
Apache License
|
2
|
+
Version 2.0, January 2004
|
3
|
+
http://www.apache.org/licenses/
|
4
|
+
|
5
|
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6
|
+
|
7
|
+
1. Definitions.
|
8
|
+
|
9
|
+
"License" shall mean the terms and conditions for use, reproduction,
|
10
|
+
and distribution as defined by Sections 1 through 9 of this document.
|
11
|
+
|
12
|
+
"Licensor" shall mean the copyright owner or entity authorized by
|
13
|
+
the copyright owner that is granting the License.
|
14
|
+
|
15
|
+
"Legal Entity" shall mean the union of the acting entity and all
|
16
|
+
other entities that control, are controlled by, or are under common
|
17
|
+
control with that entity. For the purposes of this definition,
|
18
|
+
"control" means (i) the power, direct or indirect, to cause the
|
19
|
+
direction or management of such entity, whether by contract or
|
20
|
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21
|
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
22
|
+
|
23
|
+
"You" (or "Your") shall mean an individual or Legal Entity
|
24
|
+
exercising permissions granted by this License.
|
25
|
+
|
26
|
+
"Source" form shall mean the preferred form for making modifications,
|
27
|
+
including but not limited to software source code, documentation
|
28
|
+
source, and configuration files.
|
29
|
+
|
30
|
+
"Object" form shall mean any form resulting from mechanical
|
31
|
+
transformation or translation of a Source form, including but
|
32
|
+
not limited to compiled object code, generated documentation,
|
33
|
+
and conversions to other media types.
|
34
|
+
|
35
|
+
"Work" shall mean the work of authorship, whether in Source or
|
36
|
+
Object form, made available under the License, as indicated by a
|
37
|
+
copyright notice that is included in or attached to the work
|
38
|
+
(an example is provided in the Appendix below).
|
39
|
+
|
40
|
+
"Derivative Works" shall mean any work, whether in Source or Object
|
41
|
+
form, that is based on (or derived from) the Work and for which the
|
42
|
+
editorial revisions, annotations, elaborations, or other modifications
|
43
|
+
represent, as a whole, an original work of authorship. For the purposes
|
44
|
+
of this License, Derivative Works shall not include works that remain
|
45
|
+
separable from, or merely link (or bind by name) to the interfaces of,
|
46
|
+
the Work and Derivative Works thereof.
|
47
|
+
|
48
|
+
"Contribution" shall mean any work of authorship, including
|
49
|
+
the original version of the Work and any modifications or additions
|
50
|
+
to that Work or Derivative Works thereof, that is intentionally
|
51
|
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
52
|
+
or by an individual or Legal Entity authorized to submit on behalf of
|
53
|
+
the copyright owner. For the purposes of this definition, "submitted"
|
54
|
+
means any form of electronic, verbal, or written communication sent
|
55
|
+
to the Licensor or its representatives, including but not limited to
|
56
|
+
communication on electronic mailing lists, source code control systems,
|
57
|
+
and issue tracking systems that are managed by, or on behalf of, the
|
58
|
+
Licensor for the purpose of discussing and improving the Work, but
|
59
|
+
excluding communication that is conspicuously marked or otherwise
|
60
|
+
designated in writing by the copyright owner as "Not a Contribution."
|
61
|
+
|
62
|
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63
|
+
on behalf of whom a Contribution has been received by Licensor and
|
64
|
+
subsequently incorporated within the Work.
|
65
|
+
|
66
|
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
67
|
+
this License, each Contributor hereby grants to You a perpetual,
|
68
|
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69
|
+
copyright license to reproduce, prepare Derivative Works of,
|
70
|
+
publicly display, publicly perform, sublicense, and distribute the
|
71
|
+
Work and such Derivative Works in Source or Object form.
|
72
|
+
|
73
|
+
3. Grant of Patent License. Subject to the terms and conditions of
|
74
|
+
this License, each Contributor hereby grants to You a perpetual,
|
75
|
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76
|
+
(except as stated in this section) patent license to make, have made,
|
77
|
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78
|
+
where such license applies only to those patent claims licensable
|
79
|
+
by such Contributor that are necessarily infringed by their
|
80
|
+
Contribution(s) alone or by combination of their Contribution(s)
|
81
|
+
with the Work to which such Contribution(s) was submitted. If You
|
82
|
+
institute patent litigation against any entity (including a
|
83
|
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84
|
+
or a Contribution incorporated within the Work constitutes direct
|
85
|
+
or contributory patent infringement, then any patent licenses
|
86
|
+
granted to You under this License for that Work shall terminate
|
87
|
+
as of the date such litigation is filed.
|
88
|
+
|
89
|
+
4. Redistribution. You may reproduce and distribute copies of the
|
90
|
+
Work or Derivative Works thereof in any medium, with or without
|
91
|
+
modifications, and in Source or Object form, provided that You
|
92
|
+
meet the following conditions:
|
93
|
+
|
94
|
+
(a) You must give any other recipients of the Work or
|
95
|
+
Derivative Works a copy of this License; and
|
96
|
+
|
97
|
+
(b) You must cause any modified files to carry prominent notices
|
98
|
+
stating that You changed the files; and
|
99
|
+
|
100
|
+
(c) You must retain, in the Source form of any Derivative Works
|
101
|
+
that You distribute, all copyright, patent, trademark, and
|
102
|
+
attribution notices from the Source form of the Work,
|
103
|
+
excluding those notices that do not pertain to any part of
|
104
|
+
the Derivative Works; and
|
105
|
+
|
106
|
+
(d) If the Work includes a "NOTICE" text file as part of its
|
107
|
+
distribution, then any Derivative Works that You distribute must
|
108
|
+
include a readable copy of the attribution notices contained
|
109
|
+
within such NOTICE file, excluding those notices that do not
|
110
|
+
pertain to any part of the Derivative Works, in at least one
|
111
|
+
of the following places: within a NOTICE text file distributed
|
112
|
+
as part of the Derivative Works; within the Source form or
|
113
|
+
documentation, if provided along with the Derivative Works; or,
|
114
|
+
within a display generated by the Derivative Works, if and
|
115
|
+
wherever such third-party notices normally appear. The contents
|
116
|
+
of the NOTICE file are for informational purposes only and
|
117
|
+
do not modify the License. You may add Your own attribution
|
118
|
+
notices within Derivative Works that You distribute, alongside
|
119
|
+
or as an addendum to the NOTICE text from the Work, provided
|
120
|
+
that such additional attribution notices cannot be construed
|
121
|
+
as modifying the License.
|
122
|
+
|
123
|
+
You may add Your own copyright statement to Your modifications and
|
124
|
+
may provide additional or different license terms and conditions
|
125
|
+
for use, reproduction, or distribution of Your modifications, or
|
126
|
+
for any such Derivative Works as a whole, provided Your use,
|
127
|
+
reproduction, and distribution of the Work otherwise complies with
|
128
|
+
the conditions stated in this License.
|
129
|
+
|
130
|
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131
|
+
any Contribution intentionally submitted for inclusion in the Work
|
132
|
+
by You to the Licensor shall be under the terms and conditions of
|
133
|
+
this License, without any additional terms or conditions.
|
134
|
+
Notwithstanding the above, nothing herein shall supersede or modify
|
135
|
+
the terms of any separate license agreement you may have executed
|
136
|
+
with Licensor regarding such Contributions.
|
137
|
+
|
138
|
+
6. Trademarks. This License does not grant permission to use the trade
|
139
|
+
names, trademarks, service marks, or product names of the Licensor,
|
140
|
+
except as required for reasonable and customary use in describing the
|
141
|
+
origin of the Work and reproducing the content of the NOTICE file.
|
142
|
+
|
143
|
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
144
|
+
agreed to in writing, Licensor provides the Work (and each
|
145
|
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147
|
+
implied, including, without limitation, any warranties or conditions
|
148
|
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149
|
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150
|
+
appropriateness of using or redistributing the Work and assume any
|
151
|
+
risks associated with Your exercise of permissions under this License.
|
152
|
+
|
153
|
+
8. Limitation of Liability. In no event and under no legal theory,
|
154
|
+
whether in tort (including negligence), contract, or otherwise,
|
155
|
+
unless required by applicable law (such as deliberate and grossly
|
156
|
+
negligent acts) or agreed to in writing, shall any Contributor be
|
157
|
+
liable to You for damages, including any direct, indirect, special,
|
158
|
+
incidental, or consequential damages of any character arising as a
|
159
|
+
result of this License or out of the use or inability to use the
|
160
|
+
Work (including but not limited to damages for loss of goodwill,
|
161
|
+
work stoppage, computer failure or malfunction, or any and all
|
162
|
+
other commercial damages or losses), even if such Contributor
|
163
|
+
has been advised of the possibility of such damages.
|
164
|
+
|
165
|
+
9. Accepting Warranty or Additional Liability. While redistributing
|
166
|
+
the Work or Derivative Works thereof, You may choose to offer,
|
167
|
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
168
|
+
or other liability obligations and/or rights consistent with this
|
169
|
+
License. However, in accepting such obligations, You may act only
|
170
|
+
on Your own behalf and on Your sole responsibility, not on behalf
|
171
|
+
of any other Contributor, and only if You agree to indemnify,
|
172
|
+
defend, and hold each Contributor harmless for any liability
|
173
|
+
incurred by, or claims asserted against, such Contributor by reason
|
174
|
+
of your accepting any such warranty or additional liability.
|
175
|
+
|
176
|
+
END OF TERMS AND CONDITIONS
|
177
|
+
|
178
|
+
APPENDIX: How to apply the Apache License to your work.
|
179
|
+
|
180
|
+
To apply the Apache License to your work, attach the following
|
181
|
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
182
|
+
replaced with your own identifying information. (Don't include
|
183
|
+
the brackets!) The text should be enclosed in the appropriate
|
184
|
+
comment syntax for the file format. We also recommend that a
|
185
|
+
file or class name and description of purpose be included on the
|
186
|
+
same "printed page" as the copyright notice for easier
|
187
|
+
identification within third-party archives.
|
188
|
+
|
189
|
+
Copyright [yyyy] [name of copyright owner]
|
190
|
+
|
191
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
192
|
+
you may not use this file except in compliance with the License.
|
193
|
+
You may obtain a copy of the License at
|
194
|
+
|
195
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
196
|
+
|
197
|
+
Unless required by applicable law or agreed to in writing, software
|
198
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
199
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200
|
+
See the License for the specific language governing permissions and
|
201
|
+
limitations under the License.
|
@@ -0,0 +1 @@
|
|
1
|
+
academia_mcp
|