all-in-mcp 0.2.3__py3-none-any.whl → 0.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- all_in_mcp/academic_platforms/__init__.py +2 -1
- all_in_mcp/academic_platforms/crossref.py +256 -0
- all_in_mcp/academic_platforms/google_scholar.py +245 -0
- all_in_mcp/server.py +211 -0
- all_in_mcp-0.2.6.dist-info/METADATA +129 -0
- all_in_mcp-0.2.6.dist-info/RECORD +14 -0
- all_in_mcp-0.2.3.dist-info/METADATA +0 -218
- all_in_mcp-0.2.3.dist-info/RECORD +0 -12
- {all_in_mcp-0.2.3.dist-info → all_in_mcp-0.2.6.dist-info}/WHEEL +0 -0
- {all_in_mcp-0.2.3.dist-info → all_in_mcp-0.2.6.dist-info}/entry_points.txt +0 -0
- {all_in_mcp-0.2.3.dist-info → all_in_mcp-0.2.6.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,7 @@
|
|
1
1
|
# all_in_mcp/academic_platforms/__init__.py
|
2
2
|
from .base import PaperSource
|
3
3
|
from .cryptobib import CryptoBibSearcher
|
4
|
+
from .google_scholar import GoogleScholarSearcher
|
4
5
|
from .iacr import IACRSearcher
|
5
6
|
|
6
|
-
__all__ = ["CryptoBibSearcher", "IACRSearcher", "PaperSource"]
|
7
|
+
__all__ = ["CryptoBibSearcher", "GoogleScholarSearcher", "IACRSearcher", "PaperSource"]
|
@@ -0,0 +1,256 @@
|
|
1
|
+
# all_in_mcp/academic_platforms/crossref.py
|
2
|
+
import logging
|
3
|
+
from datetime import datetime
|
4
|
+
from typing import Optional
|
5
|
+
from urllib.parse import quote_plus
|
6
|
+
|
7
|
+
import httpx
|
8
|
+
|
9
|
+
from ..paper import Paper
|
10
|
+
from .base import PaperSource
|
11
|
+
|
12
|
+
logger = logging.getLogger(__name__)
|
13
|
+
|
14
|
+
|
15
|
+
class CrossrefSearcher(PaperSource):
|
16
|
+
"""Crossref API paper search implementation"""
|
17
|
+
|
18
|
+
BASE_URL = "https://api.crossref.org"
|
19
|
+
WORKS_ENDPOINT = f"{BASE_URL}/works"
|
20
|
+
|
21
|
+
def __init__(self, email: Optional[str] = None):
|
22
|
+
"""
|
23
|
+
Initialize Crossref searcher
|
24
|
+
|
25
|
+
Args:
|
26
|
+
email: Optional email for polite API usage (recommended by Crossref)
|
27
|
+
"""
|
28
|
+
self.email = email
|
29
|
+
self.client = httpx.Client(timeout=30.0)
|
30
|
+
|
31
|
+
def _get_headers(self) -> dict:
|
32
|
+
"""Get headers for API requests"""
|
33
|
+
headers = {
|
34
|
+
"User-Agent": "all-in-mcp/0.1.0 (https://github.com/user/all-in-mcp)"
|
35
|
+
}
|
36
|
+
if self.email:
|
37
|
+
headers["User-Agent"] += f" (mailto:{self.email})"
|
38
|
+
return headers
|
39
|
+
|
40
|
+
def _parse_date(self, date_parts: list) -> Optional[datetime]:
|
41
|
+
"""Parse Crossref date parts into datetime"""
|
42
|
+
if not date_parts or not isinstance(date_parts, list):
|
43
|
+
return None
|
44
|
+
|
45
|
+
try:
|
46
|
+
# Crossref provides date as [[year, month, day]] or [[year, month]] or [[year]]
|
47
|
+
if len(date_parts) > 0 and isinstance(date_parts[0], list):
|
48
|
+
parts = date_parts[0]
|
49
|
+
year = parts[0] if len(parts) > 0 else 1
|
50
|
+
month = parts[1] if len(parts) > 1 else 1
|
51
|
+
day = parts[2] if len(parts) > 2 else 1
|
52
|
+
return datetime(year, month, day)
|
53
|
+
except (ValueError, IndexError, TypeError):
|
54
|
+
pass
|
55
|
+
return None
|
56
|
+
|
57
|
+
def _extract_authors(self, authors_data: list) -> list[str]:
|
58
|
+
"""Extract author names from Crossref author data"""
|
59
|
+
authors = []
|
60
|
+
for author in authors_data or []:
|
61
|
+
if isinstance(author, dict):
|
62
|
+
given = author.get("given", "")
|
63
|
+
family = author.get("family", "")
|
64
|
+
if given and family:
|
65
|
+
authors.append(f"{given} {family}")
|
66
|
+
elif family:
|
67
|
+
authors.append(family)
|
68
|
+
elif given:
|
69
|
+
authors.append(given)
|
70
|
+
return authors
|
71
|
+
|
72
|
+
def _parse_work(self, work: dict) -> Optional[Paper]:
|
73
|
+
"""Parse a single work from Crossref API response"""
|
74
|
+
try:
|
75
|
+
# Extract basic information
|
76
|
+
title_list = work.get("title", [])
|
77
|
+
title = title_list[0] if title_list else ""
|
78
|
+
|
79
|
+
if not title:
|
80
|
+
return None
|
81
|
+
|
82
|
+
doi = work.get("DOI", "")
|
83
|
+
paper_id = doi or work.get("URL", "")
|
84
|
+
|
85
|
+
# Extract authors
|
86
|
+
authors = self._extract_authors(work.get("author", []))
|
87
|
+
|
88
|
+
# Extract abstract
|
89
|
+
abstract = work.get("abstract", "")
|
90
|
+
if abstract:
|
91
|
+
# Remove HTML tags if present
|
92
|
+
import re
|
93
|
+
|
94
|
+
abstract = re.sub(r"<[^>]+>", "", abstract)
|
95
|
+
|
96
|
+
# Extract publication date
|
97
|
+
published_date = (
|
98
|
+
self._parse_date(work.get("published-print", {}).get("date-parts"))
|
99
|
+
or self._parse_date(work.get("published-online", {}).get("date-parts"))
|
100
|
+
or self._parse_date(work.get("created", {}).get("date-parts"))
|
101
|
+
)
|
102
|
+
|
103
|
+
# Extract URLs
|
104
|
+
url = work.get("URL", "")
|
105
|
+
pdf_url = ""
|
106
|
+
|
107
|
+
# Look for PDF in links
|
108
|
+
links = work.get("link", [])
|
109
|
+
for link in links:
|
110
|
+
if link.get("content-type") == "application/pdf":
|
111
|
+
pdf_url = link.get("URL", "")
|
112
|
+
break
|
113
|
+
|
114
|
+
# Extract additional metadata
|
115
|
+
container_title = work.get("container-title", [])
|
116
|
+
journal = container_title[0] if container_title else ""
|
117
|
+
|
118
|
+
volume = work.get("volume", "")
|
119
|
+
issue = work.get("issue", "")
|
120
|
+
pages = work.get("page", "")
|
121
|
+
|
122
|
+
# Extract categories/subjects
|
123
|
+
categories = []
|
124
|
+
subjects = work.get("subject", [])
|
125
|
+
if subjects:
|
126
|
+
categories.extend(subjects)
|
127
|
+
|
128
|
+
# Citation count (if available)
|
129
|
+
citations = work.get("is-referenced-by-count", 0)
|
130
|
+
|
131
|
+
# Build extra metadata
|
132
|
+
extra = {
|
133
|
+
"journal": journal,
|
134
|
+
"volume": volume,
|
135
|
+
"issue": issue,
|
136
|
+
"pages": pages,
|
137
|
+
"type": work.get("type", ""),
|
138
|
+
"publisher": work.get("publisher", ""),
|
139
|
+
"issn": work.get("ISSN", []),
|
140
|
+
"isbn": work.get("ISBN", []),
|
141
|
+
}
|
142
|
+
|
143
|
+
# Remove empty values from extra
|
144
|
+
extra = {k: v for k, v in extra.items() if v}
|
145
|
+
|
146
|
+
return Paper(
|
147
|
+
paper_id=paper_id,
|
148
|
+
title=title,
|
149
|
+
authors=authors,
|
150
|
+
abstract=abstract,
|
151
|
+
doi=doi,
|
152
|
+
published_date=published_date or datetime(1900, 1, 1),
|
153
|
+
pdf_url=pdf_url,
|
154
|
+
url=url,
|
155
|
+
source="crossref",
|
156
|
+
categories=categories,
|
157
|
+
citations=citations,
|
158
|
+
extra=extra,
|
159
|
+
)
|
160
|
+
|
161
|
+
except Exception as e:
|
162
|
+
logger.error(f"Error parsing Crossref work: {e}")
|
163
|
+
return None
|
164
|
+
|
165
|
+
def search(
|
166
|
+
self,
|
167
|
+
query: str,
|
168
|
+
max_results: int = 10,
|
169
|
+
year_min: Optional[int] = None,
|
170
|
+
year_max: Optional[int] = None,
|
171
|
+
sort_by: str = "relevance",
|
172
|
+
**kwargs,
|
173
|
+
) -> list[Paper]:
|
174
|
+
"""
|
175
|
+
Search for papers using Crossref API
|
176
|
+
|
177
|
+
Args:
|
178
|
+
query: Search query string
|
179
|
+
max_results: Maximum number of results to return
|
180
|
+
year_min: Minimum publication year
|
181
|
+
year_max: Maximum publication year
|
182
|
+
sort_by: Sort order (relevance, published, indexed, updated)
|
183
|
+
"""
|
184
|
+
if not query.strip():
|
185
|
+
return []
|
186
|
+
|
187
|
+
try:
|
188
|
+
params = {
|
189
|
+
"query": query,
|
190
|
+
"rows": min(max_results, 1000), # Crossref max is 1000
|
191
|
+
"sort": sort_by,
|
192
|
+
"select": "DOI,title,author,abstract,published-print,published-online,created,URL,container-title,volume,issue,page,subject,is-referenced-by-count,type,publisher,ISSN,ISBN,link",
|
193
|
+
}
|
194
|
+
|
195
|
+
# Add year filters if specified
|
196
|
+
filters = []
|
197
|
+
if year_min:
|
198
|
+
filters.append(f"from-pub-date:{year_min}")
|
199
|
+
if year_max:
|
200
|
+
filters.append(f"until-pub-date:{year_max}")
|
201
|
+
|
202
|
+
if filters:
|
203
|
+
params["filter"] = ",".join(filters)
|
204
|
+
|
205
|
+
response = self.client.get(
|
206
|
+
self.WORKS_ENDPOINT, params=params, headers=self._get_headers()
|
207
|
+
)
|
208
|
+
response.raise_for_status()
|
209
|
+
|
210
|
+
data = response.json()
|
211
|
+
works = data.get("message", {}).get("items", [])
|
212
|
+
|
213
|
+
papers = []
|
214
|
+
for work in works:
|
215
|
+
paper = self._parse_work(work)
|
216
|
+
if paper:
|
217
|
+
papers.append(paper)
|
218
|
+
|
219
|
+
return papers[:max_results]
|
220
|
+
|
221
|
+
except Exception as e:
|
222
|
+
logger.error(f"Error searching Crossref: {e}")
|
223
|
+
return []
|
224
|
+
|
225
|
+
def download_pdf(self, paper_id: str, save_path: str) -> str:
|
226
|
+
"""
|
227
|
+
Not implemented: Download PDF for a paper (limited functionality for Crossref)
|
228
|
+
"""
|
229
|
+
return "Crossref does not provide a direct way to download PDFs. Use the paper's URL or DOI to access the publisher's site for PDF downloads if available."
|
230
|
+
|
231
|
+
def read_paper(self, paper_id: str, save_path: str) -> str:
|
232
|
+
"""
|
233
|
+
crossref doesn't provide a direct way to read paper text.
|
234
|
+
"""
|
235
|
+
return "Crossref does not provide a direct way to read paper text. Use the download_pdf method to get the PDF if available."
|
236
|
+
|
237
|
+
def search_by_doi(self, doi: str) -> Optional[Paper]:
|
238
|
+
"""Search for a specific paper by DOI"""
|
239
|
+
try:
|
240
|
+
work_url = f"{self.WORKS_ENDPOINT}/{quote_plus(doi)}"
|
241
|
+
response = self.client.get(work_url, headers=self._get_headers())
|
242
|
+
response.raise_for_status()
|
243
|
+
|
244
|
+
data = response.json()
|
245
|
+
work = data.get("message", {})
|
246
|
+
|
247
|
+
return self._parse_work(work)
|
248
|
+
|
249
|
+
except Exception as e:
|
250
|
+
logger.error(f"Error searching by DOI {doi}: {e}")
|
251
|
+
return None
|
252
|
+
|
253
|
+
def __del__(self):
|
254
|
+
"""Clean up HTTP client"""
|
255
|
+
if hasattr(self, "client"):
|
256
|
+
self.client.close()
|
@@ -0,0 +1,245 @@
|
|
1
|
+
# all_in_mcp/academic_platforms/google_scholar.py
|
2
|
+
import logging
|
3
|
+
import random
|
4
|
+
import time
|
5
|
+
from datetime import datetime
|
6
|
+
from typing import Optional
|
7
|
+
|
8
|
+
import requests
|
9
|
+
from bs4 import BeautifulSoup
|
10
|
+
|
11
|
+
from ..paper import Paper
|
12
|
+
from .base import PaperSource
|
13
|
+
|
14
|
+
logger = logging.getLogger(__name__)
|
15
|
+
|
16
|
+
|
17
|
+
class GoogleScholarSearcher(PaperSource):
|
18
|
+
"""Google Scholar paper search implementation"""
|
19
|
+
|
20
|
+
SCHOLAR_URL = "https://scholar.google.com/scholar"
|
21
|
+
BROWSERS = [
|
22
|
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
23
|
+
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
24
|
+
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
25
|
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0",
|
26
|
+
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:89.0) Gecko/20100101 Firefox/89.0",
|
27
|
+
]
|
28
|
+
|
29
|
+
def __init__(self):
|
30
|
+
"""Initialize Google Scholar searcher"""
|
31
|
+
self._setup_session()
|
32
|
+
|
33
|
+
def _setup_session(self):
|
34
|
+
"""Initialize session with random user agent"""
|
35
|
+
self.session = requests.Session()
|
36
|
+
self.session.headers.update(
|
37
|
+
{
|
38
|
+
"User-Agent": random.choice(self.BROWSERS),
|
39
|
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
|
40
|
+
"Accept-Language": "en-US,en;q=0.9",
|
41
|
+
"Accept-Encoding": "gzip, deflate",
|
42
|
+
"DNT": "1",
|
43
|
+
"Connection": "keep-alive",
|
44
|
+
"Upgrade-Insecure-Requests": "1",
|
45
|
+
}
|
46
|
+
)
|
47
|
+
|
48
|
+
def _extract_year(self, text: str) -> Optional[int]:
|
49
|
+
"""Extract publication year from text"""
|
50
|
+
words = text.replace(",", " ").replace("-", " ").split()
|
51
|
+
for word in words:
|
52
|
+
if word.isdigit() and 1900 <= int(word) <= datetime.now().year:
|
53
|
+
return int(word)
|
54
|
+
return None
|
55
|
+
|
56
|
+
def _extract_citations(self, item) -> int:
|
57
|
+
"""Extract citation count from paper item"""
|
58
|
+
try:
|
59
|
+
citation_elem = item.find("div", class_="gs_fl")
|
60
|
+
if citation_elem:
|
61
|
+
citation_link = citation_elem.find(
|
62
|
+
"a", string=lambda text: text and "Cited by" in text
|
63
|
+
)
|
64
|
+
if citation_link:
|
65
|
+
citation_text = citation_link.get_text()
|
66
|
+
# Extract number from "Cited by X" text
|
67
|
+
citation_num = "".join(filter(str.isdigit, citation_text))
|
68
|
+
return int(citation_num) if citation_num else 0
|
69
|
+
return 0
|
70
|
+
except Exception:
|
71
|
+
return 0
|
72
|
+
|
73
|
+
def _parse_paper(self, item) -> Optional[Paper]:
|
74
|
+
"""Parse a single paper entry from HTML"""
|
75
|
+
try:
|
76
|
+
# Extract main paper elements
|
77
|
+
title_elem = item.find("h3", class_="gs_rt")
|
78
|
+
info_elem = item.find("div", class_="gs_a")
|
79
|
+
abstract_elem = item.find("div", class_="gs_rs")
|
80
|
+
|
81
|
+
if not title_elem or not info_elem:
|
82
|
+
return None
|
83
|
+
|
84
|
+
# Process title and URL
|
85
|
+
title_text = title_elem.get_text(strip=True)
|
86
|
+
# Remove common prefixes
|
87
|
+
title = (
|
88
|
+
title_text.replace("[PDF]", "")
|
89
|
+
.replace("[HTML]", "")
|
90
|
+
.replace("[BOOK]", "")
|
91
|
+
.strip()
|
92
|
+
)
|
93
|
+
|
94
|
+
link = title_elem.find("a", href=True)
|
95
|
+
url = link["href"] if link else ""
|
96
|
+
|
97
|
+
# Process author and publication info
|
98
|
+
info_text = info_elem.get_text()
|
99
|
+
info_parts = info_text.split(" - ")
|
100
|
+
|
101
|
+
# Extract authors (usually the first part before the first dash)
|
102
|
+
authors_text = info_parts[0] if info_parts else ""
|
103
|
+
authors = [a.strip() for a in authors_text.split(",") if a.strip()]
|
104
|
+
|
105
|
+
# Extract year from the info text
|
106
|
+
year = self._extract_year(info_text)
|
107
|
+
|
108
|
+
# Extract abstract
|
109
|
+
abstract = abstract_elem.get_text(strip=True) if abstract_elem else ""
|
110
|
+
|
111
|
+
# Extract citations
|
112
|
+
citations = self._extract_citations(item)
|
113
|
+
|
114
|
+
# Generate a paper ID based on the URL or title
|
115
|
+
paper_id = f"gs_{abs(hash(url if url else title))}"
|
116
|
+
|
117
|
+
# Create paper object
|
118
|
+
return Paper(
|
119
|
+
paper_id=paper_id,
|
120
|
+
title=title,
|
121
|
+
authors=authors,
|
122
|
+
abstract=abstract,
|
123
|
+
url=url,
|
124
|
+
pdf_url="", # Google Scholar doesn't provide direct PDF links
|
125
|
+
published_date=datetime(year, 1, 1) if year else datetime.now(),
|
126
|
+
updated_date=None,
|
127
|
+
source="google_scholar",
|
128
|
+
categories=[],
|
129
|
+
keywords=[],
|
130
|
+
doi="",
|
131
|
+
citations=citations,
|
132
|
+
references=[],
|
133
|
+
extra={"info_text": info_text},
|
134
|
+
)
|
135
|
+
except Exception as e:
|
136
|
+
logger.warning(f"Failed to parse paper: {e}")
|
137
|
+
return None
|
138
|
+
|
139
|
+
def search(self, query: str, max_results: int = 10, **kwargs) -> list[Paper]:
|
140
|
+
"""
|
141
|
+
Search Google Scholar for papers
|
142
|
+
|
143
|
+
Args:
|
144
|
+
query: Search query string
|
145
|
+
max_results: Maximum number of results to return
|
146
|
+
**kwargs: Additional search parameters (e.g., year_low, year_high)
|
147
|
+
|
148
|
+
Returns:
|
149
|
+
List of Paper objects
|
150
|
+
"""
|
151
|
+
papers = []
|
152
|
+
start = 0
|
153
|
+
results_per_page = min(10, max_results)
|
154
|
+
|
155
|
+
# Extract additional parameters
|
156
|
+
year_low = kwargs.get("year_low")
|
157
|
+
year_high = kwargs.get("year_high")
|
158
|
+
|
159
|
+
while len(papers) < max_results:
|
160
|
+
try:
|
161
|
+
# Construct search parameters
|
162
|
+
params = {
|
163
|
+
"q": query,
|
164
|
+
"start": start,
|
165
|
+
"hl": "en",
|
166
|
+
"as_sdt": "0,5", # Include articles and citations
|
167
|
+
"num": results_per_page,
|
168
|
+
}
|
169
|
+
|
170
|
+
# Add year filters if provided
|
171
|
+
if year_low:
|
172
|
+
params["as_ylo"] = year_low
|
173
|
+
if year_high:
|
174
|
+
params["as_yhi"] = year_high
|
175
|
+
|
176
|
+
# Make request with random delay to avoid rate limiting
|
177
|
+
time.sleep(random.uniform(1.0, 3.0))
|
178
|
+
response = self.session.get(self.SCHOLAR_URL, params=params, timeout=30)
|
179
|
+
|
180
|
+
if response.status_code != 200:
|
181
|
+
logger.error(f"Search failed with status {response.status_code}")
|
182
|
+
break
|
183
|
+
|
184
|
+
# Parse results
|
185
|
+
soup = BeautifulSoup(response.text, "html.parser")
|
186
|
+
results = soup.find_all("div", class_="gs_ri")
|
187
|
+
|
188
|
+
if not results:
|
189
|
+
logger.info("No more results found")
|
190
|
+
break
|
191
|
+
|
192
|
+
# Process each result
|
193
|
+
for item in results:
|
194
|
+
if len(papers) >= max_results:
|
195
|
+
break
|
196
|
+
|
197
|
+
paper = self._parse_paper(item)
|
198
|
+
if paper:
|
199
|
+
papers.append(paper)
|
200
|
+
|
201
|
+
start += results_per_page
|
202
|
+
|
203
|
+
except requests.exceptions.RequestException as e:
|
204
|
+
logger.error(f"Network error during search: {e}")
|
205
|
+
break
|
206
|
+
except Exception as e:
|
207
|
+
logger.error(f"Search error: {e}")
|
208
|
+
break
|
209
|
+
|
210
|
+
return papers[:max_results]
|
211
|
+
|
212
|
+
def download_pdf(self, paper_id: str, save_path: str) -> str:
|
213
|
+
"""
|
214
|
+
Google Scholar doesn't support direct PDF downloads
|
215
|
+
|
216
|
+
Args:
|
217
|
+
paper_id: Paper identifier
|
218
|
+
save_path: Directory to save the PDF
|
219
|
+
|
220
|
+
Returns:
|
221
|
+
Error message explaining limitation
|
222
|
+
|
223
|
+
Raises:
|
224
|
+
NotImplementedError: Always raises this error
|
225
|
+
"""
|
226
|
+
raise NotImplementedError(
|
227
|
+
"Google Scholar doesn't provide direct PDF downloads. "
|
228
|
+
"Please use the paper URL to access the publisher's website."
|
229
|
+
)
|
230
|
+
|
231
|
+
def read_paper(self, paper_id: str, save_path: str = "./downloads") -> str:
|
232
|
+
"""
|
233
|
+
Google Scholar doesn't support direct paper reading
|
234
|
+
|
235
|
+
Args:
|
236
|
+
paper_id: Paper identifier
|
237
|
+
save_path: Directory where papers are stored
|
238
|
+
|
239
|
+
Returns:
|
240
|
+
Message indicating the feature is not supported
|
241
|
+
"""
|
242
|
+
return (
|
243
|
+
"Google Scholar doesn't support direct paper reading. "
|
244
|
+
"Please use the paper URL to access the full text on the publisher's website."
|
245
|
+
)
|
all_in_mcp/server.py
CHANGED
@@ -4,6 +4,8 @@ from mcp.server import NotificationOptions, Server
|
|
4
4
|
from mcp.server.models import InitializationOptions
|
5
5
|
|
6
6
|
from .academic_platforms.cryptobib import CryptoBibSearcher
|
7
|
+
from .academic_platforms.crossref import CrossrefSearcher
|
8
|
+
from .academic_platforms.google_scholar import GoogleScholarSearcher
|
7
9
|
|
8
10
|
# Import searchers
|
9
11
|
from .academic_platforms.iacr import IACRSearcher
|
@@ -14,6 +16,8 @@ server = Server("all-in-mcp")
|
|
14
16
|
# Initialize searchers
|
15
17
|
iacr_searcher = IACRSearcher()
|
16
18
|
cryptobib_searcher = CryptoBibSearcher(cache_dir="./downloads")
|
19
|
+
crossref_searcher = CrossrefSearcher()
|
20
|
+
google_scholar_searcher = GoogleScholarSearcher()
|
17
21
|
|
18
22
|
|
19
23
|
@server.list_tools()
|
@@ -122,6 +126,65 @@ async def handle_list_tools() -> list[types.Tool]:
|
|
122
126
|
"required": ["query"],
|
123
127
|
},
|
124
128
|
),
|
129
|
+
types.Tool(
|
130
|
+
name="search-google-scholar-papers",
|
131
|
+
description="Search academic papers from Google Scholar",
|
132
|
+
inputSchema={
|
133
|
+
"type": "object",
|
134
|
+
"properties": {
|
135
|
+
"query": {
|
136
|
+
"type": "string",
|
137
|
+
"description": "Search query string (e.g., 'machine learning', 'neural networks')",
|
138
|
+
},
|
139
|
+
"max_results": {
|
140
|
+
"type": "integer",
|
141
|
+
"description": "Maximum number of papers to return (default: 10)",
|
142
|
+
"default": 10,
|
143
|
+
},
|
144
|
+
"year_low": {
|
145
|
+
"type": "integer",
|
146
|
+
"description": "Minimum publication year (optional)",
|
147
|
+
},
|
148
|
+
"year_high": {
|
149
|
+
"type": "integer",
|
150
|
+
"description": "Maximum publication year (optional)",
|
151
|
+
},
|
152
|
+
},
|
153
|
+
"required": ["query"],
|
154
|
+
},
|
155
|
+
),
|
156
|
+
types.Tool(
|
157
|
+
name="search-crossref-papers",
|
158
|
+
description="Search academic papers from Crossref database",
|
159
|
+
inputSchema={
|
160
|
+
"type": "object",
|
161
|
+
"properties": {
|
162
|
+
"query": {
|
163
|
+
"type": "string",
|
164
|
+
"description": "Search query string (e.g., 'quantum computing', 'machine learning')",
|
165
|
+
},
|
166
|
+
"max_results": {
|
167
|
+
"type": "integer",
|
168
|
+
"description": "Maximum number of papers to return (default: 10)",
|
169
|
+
"default": 10,
|
170
|
+
},
|
171
|
+
"year_min": {
|
172
|
+
"type": "integer",
|
173
|
+
"description": "Minimum publication year (optional)",
|
174
|
+
},
|
175
|
+
"year_max": {
|
176
|
+
"type": "integer",
|
177
|
+
"description": "Maximum publication year (optional)",
|
178
|
+
},
|
179
|
+
"sort_by": {
|
180
|
+
"type": "string",
|
181
|
+
"description": "Sort order: relevance, published, indexed, updated (default: relevance)",
|
182
|
+
"default": "relevance",
|
183
|
+
},
|
184
|
+
},
|
185
|
+
"required": ["query"],
|
186
|
+
},
|
187
|
+
),
|
125
188
|
types.Tool(
|
126
189
|
name="read-pdf",
|
127
190
|
description="Read and extract text content from a PDF file (local or online)",
|
@@ -337,6 +400,154 @@ async def handle_call_tool(
|
|
337
400
|
|
338
401
|
return [types.TextContent(type="text", text=result_text)]
|
339
402
|
|
403
|
+
elif name == "search-google-scholar-papers":
|
404
|
+
query = arguments.get("query", "")
|
405
|
+
max_results = arguments.get("max_results", 10)
|
406
|
+
year_low = arguments.get("year_low")
|
407
|
+
year_high = arguments.get("year_high")
|
408
|
+
|
409
|
+
if not query:
|
410
|
+
return [
|
411
|
+
types.TextContent(
|
412
|
+
type="text", text="Error: Query parameter is required"
|
413
|
+
)
|
414
|
+
]
|
415
|
+
|
416
|
+
try:
|
417
|
+
papers = google_scholar_searcher.search(
|
418
|
+
query,
|
419
|
+
max_results=max_results,
|
420
|
+
year_low=year_low,
|
421
|
+
year_high=year_high,
|
422
|
+
)
|
423
|
+
|
424
|
+
if not papers:
|
425
|
+
year_filter_msg = ""
|
426
|
+
if year_low or year_high:
|
427
|
+
year_range = (
|
428
|
+
f" ({year_low or 'earliest'}-{year_high or 'latest'})"
|
429
|
+
)
|
430
|
+
year_filter_msg = f" in year range{year_range}"
|
431
|
+
return [
|
432
|
+
types.TextContent(
|
433
|
+
type="text",
|
434
|
+
text=f"No papers found for query: {query}{year_filter_msg}",
|
435
|
+
)
|
436
|
+
]
|
437
|
+
|
438
|
+
year_filter_msg = ""
|
439
|
+
if year_low or year_high:
|
440
|
+
year_range = f" ({year_low or 'earliest'}-{year_high or 'latest'})"
|
441
|
+
year_filter_msg = f" in year range{year_range}"
|
442
|
+
|
443
|
+
result_text = f"Found {len(papers)} Google Scholar papers for query '{query}'{year_filter_msg}:\n\n"
|
444
|
+
for i, paper in enumerate(papers, 1):
|
445
|
+
result_text += f"{i}. **{paper.title}**\n"
|
446
|
+
result_text += f" - Authors: {', '.join(paper.authors)}\n"
|
447
|
+
if paper.citations > 0:
|
448
|
+
result_text += f" - Citations: {paper.citations}\n"
|
449
|
+
if paper.published_date and paper.published_date.year > 1900:
|
450
|
+
result_text += f" - Year: {paper.published_date.year}\n"
|
451
|
+
if paper.url:
|
452
|
+
result_text += f" - URL: {paper.url}\n"
|
453
|
+
if paper.abstract:
|
454
|
+
# Truncate abstract for readability
|
455
|
+
abstract_preview = (
|
456
|
+
paper.abstract[:300] + "..."
|
457
|
+
if len(paper.abstract) > 300
|
458
|
+
else paper.abstract
|
459
|
+
)
|
460
|
+
result_text += f" - Abstract: {abstract_preview}\n"
|
461
|
+
result_text += "\n"
|
462
|
+
|
463
|
+
return [types.TextContent(type="text", text=result_text)]
|
464
|
+
|
465
|
+
except Exception as e:
|
466
|
+
return [
|
467
|
+
types.TextContent(
|
468
|
+
type="text", text=f"Error searching Google Scholar: {e!s}"
|
469
|
+
)
|
470
|
+
]
|
471
|
+
|
472
|
+
elif name == "search-crossref-papers":
|
473
|
+
query = arguments.get("query", "")
|
474
|
+
max_results = arguments.get("max_results", 10)
|
475
|
+
year_min = arguments.get("year_min")
|
476
|
+
year_max = arguments.get("year_max")
|
477
|
+
sort_by = arguments.get("sort_by", "relevance")
|
478
|
+
|
479
|
+
if not query:
|
480
|
+
return [
|
481
|
+
types.TextContent(
|
482
|
+
type="text", text="Error: Query parameter is required"
|
483
|
+
)
|
484
|
+
]
|
485
|
+
|
486
|
+
try:
|
487
|
+
papers = crossref_searcher.search(
|
488
|
+
query,
|
489
|
+
max_results=max_results,
|
490
|
+
year_min=year_min,
|
491
|
+
year_max=year_max,
|
492
|
+
sort_by=sort_by,
|
493
|
+
)
|
494
|
+
|
495
|
+
if not papers:
|
496
|
+
year_filter_msg = ""
|
497
|
+
if year_min or year_max:
|
498
|
+
year_range = (
|
499
|
+
f" ({year_min or 'earliest'}-{year_max or 'latest'})"
|
500
|
+
)
|
501
|
+
year_filter_msg = f" in year range{year_range}"
|
502
|
+
return [
|
503
|
+
types.TextContent(
|
504
|
+
type="text",
|
505
|
+
text=f"No papers found for query: {query}{year_filter_msg}",
|
506
|
+
)
|
507
|
+
]
|
508
|
+
|
509
|
+
year_filter_msg = ""
|
510
|
+
if year_min or year_max:
|
511
|
+
year_range = f" ({year_min or 'earliest'}-{year_max or 'latest'})"
|
512
|
+
year_filter_msg = f" in year range{year_range}"
|
513
|
+
|
514
|
+
result_text = f"Found {len(papers)} Crossref papers for query '{query}'{year_filter_msg}:\n\n"
|
515
|
+
for i, paper in enumerate(papers, 1):
|
516
|
+
result_text += f"{i}. **{paper.title}**\n"
|
517
|
+
result_text += f" - Authors: {', '.join(paper.authors)}\n"
|
518
|
+
if paper.doi:
|
519
|
+
result_text += f" - DOI: {paper.doi}\n"
|
520
|
+
if paper.citations > 0:
|
521
|
+
result_text += f" - Citations: {paper.citations}\n"
|
522
|
+
if paper.published_date and paper.published_date.year > 1900:
|
523
|
+
result_text += f" - Year: {paper.published_date.year}\n"
|
524
|
+
if paper.extra and paper.extra.get("journal"):
|
525
|
+
result_text += f" - Journal: {paper.extra['journal']}\n"
|
526
|
+
if paper.extra and paper.extra.get("volume"):
|
527
|
+
result_text += f" - Volume: {paper.extra['volume']}\n"
|
528
|
+
if paper.extra and paper.extra.get("pages"):
|
529
|
+
result_text += f" - Pages: {paper.extra['pages']}\n"
|
530
|
+
if paper.url:
|
531
|
+
result_text += f" - URL: {paper.url}\n"
|
532
|
+
if paper.abstract:
|
533
|
+
# Truncate abstract for readability
|
534
|
+
abstract_preview = (
|
535
|
+
paper.abstract[:300] + "..."
|
536
|
+
if len(paper.abstract) > 300
|
537
|
+
else paper.abstract
|
538
|
+
)
|
539
|
+
result_text += f" - Abstract: {abstract_preview}\n"
|
540
|
+
result_text += "\n"
|
541
|
+
|
542
|
+
return [types.TextContent(type="text", text=result_text)]
|
543
|
+
|
544
|
+
except Exception as e:
|
545
|
+
return [
|
546
|
+
types.TextContent(
|
547
|
+
type="text", text=f"Error searching Crossref: {e!s}"
|
548
|
+
)
|
549
|
+
]
|
550
|
+
|
340
551
|
elif name == "read-pdf":
|
341
552
|
pdf_source = arguments.get("pdf_source", "")
|
342
553
|
|
@@ -0,0 +1,129 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: all-in-mcp
|
3
|
+
Version: 0.2.6
|
4
|
+
Summary: An MCP (Model Context Protocol) server providing daily-use utility functions and academic paper search capabilities
|
5
|
+
Project-URL: Homepage, https://github.com/jiahaoxiang2000/all-in-mcp
|
6
|
+
Project-URL: Repository, https://github.com/jiahaoxiang2000/all-in-mcp
|
7
|
+
Project-URL: Documentation, https://github.com/jiahaoxiang2000/all-in-mcp/tree/main/docs
|
8
|
+
Project-URL: Issues, https://github.com/jiahaoxiang2000/all-in-mcp/issues
|
9
|
+
Author-email: isomo <jiahaoxiang2000@gmail.com>
|
10
|
+
License: MIT
|
11
|
+
License-File: LICENSE
|
12
|
+
Keywords: academic,iacr,mcp,model-context-protocol,papers,utilities
|
13
|
+
Classifier: Development Status :: 4 - Beta
|
14
|
+
Classifier: Intended Audience :: Developers
|
15
|
+
Classifier: Intended Audience :: Science/Research
|
16
|
+
Classifier: License :: OSI Approved :: MIT License
|
17
|
+
Classifier: Programming Language :: Python :: 3
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
21
|
+
Classifier: Programming Language :: Python :: 3.13
|
22
|
+
Classifier: Topic :: Scientific/Engineering
|
23
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
24
|
+
Classifier: Topic :: Text Processing :: General
|
25
|
+
Requires-Python: >=3.10
|
26
|
+
Requires-Dist: beautifulsoup4>=4.12.0
|
27
|
+
Requires-Dist: feedparser>=6.0.10
|
28
|
+
Requires-Dist: httpx>=0.24.0
|
29
|
+
Requires-Dist: mcp>=1.9.4
|
30
|
+
Requires-Dist: pypdf>=4.0.0
|
31
|
+
Requires-Dist: requests>=2.31.0
|
32
|
+
Provides-Extra: all
|
33
|
+
Requires-Dist: psutil>=5.9.0; extra == 'all'
|
34
|
+
Provides-Extra: dev
|
35
|
+
Requires-Dist: build>=1.0.0; extra == 'dev'
|
36
|
+
Requires-Dist: mypy>=1.5.0; extra == 'dev'
|
37
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
|
38
|
+
Requires-Dist: pytest-cov>=4.1.0; extra == 'dev'
|
39
|
+
Requires-Dist: pytest>=8.0.0; extra == 'dev'
|
40
|
+
Requires-Dist: ruff>=0.1.0; extra == 'dev'
|
41
|
+
Requires-Dist: twine>=4.0.0; extra == 'dev'
|
42
|
+
Provides-Extra: system
|
43
|
+
Requires-Dist: psutil>=5.9.0; extra == 'system'
|
44
|
+
Description-Content-Type: text/markdown
|
45
|
+
|
46
|
+
# All-in-MCP
|
47
|
+
|
48
|
+
An MCP (Model Context Protocol) server that provides utility functions, including academic paper search capabilities.
|
49
|
+
|
50
|
+
- [**Paper Tools overview _Video_**](https://www.bilibili.com/video/BV1RMKWzdEk8)
|
51
|
+
- [_Overview PDF_](https://github.com/jiahaoxiang2000/tutor/blob/main/Apaper/setup.pdf)
|
52
|
+
|
53
|
+
## Features
|
54
|
+
|
55
|
+
This MCP server exposes the following daily-use utility tools as MCP endpoints:
|
56
|
+
|
57
|
+
### Available Tools
|
58
|
+
|
59
|
+
- **Academic Research**:
|
60
|
+
- `search-iacr-papers`: Search academic papers from IACR ePrint Archive
|
61
|
+
- `download-iacr-paper`: Download PDF of an IACR ePrint paper
|
62
|
+
- `read-iacr-paper`: Read and extract text content from an IACR ePrint paper PDF
|
63
|
+
- **Bibliography Search**:
|
64
|
+
- `search-cryptobib-papers`: Search CryptoBib bibliography database for cryptography papers (structured metadata or raw BibTeX)
|
65
|
+
- **Crossref Search**:
|
66
|
+
- `search-crossref-papers`: Search academic papers from Crossref database
|
67
|
+
- **Google Scholar**:
|
68
|
+
- `search-google-scholar-papers`: Search academic papers across disciplines with citation data
|
69
|
+
- **PDF Reading**:
|
70
|
+
- `read-pdf`: Read and extract text from local and online PDF files
|
71
|
+
|
72
|
+
All tools are implemented as async MCP endpoints with proper validation and error handling.
|
73
|
+
|
74
|
+
## Quick Start
|
75
|
+
|
76
|
+
- [**Video for Env Setup**](https://www.bilibili.com/video/BV1cZKozaEjg)
|
77
|
+
- [_Overview PDF_](https://github.com/jiahaoxiang2000/tutor/blob/main/Apaper/config.pdf)
|
78
|
+
|
79
|
+
### Prerequisites
|
80
|
+
|
81
|
+
- Python 3.12 or higher
|
82
|
+
- UV package manager
|
83
|
+
|
84
|
+
### Installation
|
85
|
+
|
86
|
+
Install from PyPI (Recommended by `UV`)
|
87
|
+
|
88
|
+
```bash
|
89
|
+
uv pip install all-in-mcp
|
90
|
+
```
|
91
|
+
|
92
|
+
### Integration with MCP Clients Vscode
|
93
|
+
|
94
|
+
Add this server to your MCP client configuration. The server runs using stdio transport.
|
95
|
+
|
96
|
+
Example configuration for Vscode:
|
97
|
+
|
98
|
+
```json .vscode/mcp.json
|
99
|
+
{
|
100
|
+
"servers": {
|
101
|
+
"all-in-mcp": {
|
102
|
+
"type": "stdio",
|
103
|
+
"command": "uv",
|
104
|
+
"args": ["run", "all-in-mcp"]
|
105
|
+
}
|
106
|
+
}
|
107
|
+
}
|
108
|
+
```
|
109
|
+
|
110
|
+
<details>
|
111
|
+
<summary>Development</summary>
|
112
|
+
|
113
|
+
For development setup and contribution guidelines, see the [Development Guide](docs/development.md).
|
114
|
+
|
115
|
+
### Quick Development Setup
|
116
|
+
|
117
|
+
```bash
|
118
|
+
# Clone the repository
|
119
|
+
git clone https://github.com/jiahaoxiang2000/all-in-mcp.git
|
120
|
+
cd all-in-mcp
|
121
|
+
|
122
|
+
# Install with development dependencies
|
123
|
+
uv sync --extra dev
|
124
|
+
|
125
|
+
# Run tests
|
126
|
+
uv run pytest
|
127
|
+
```
|
128
|
+
|
129
|
+
</details>
|
@@ -0,0 +1,14 @@
|
|
1
|
+
all_in_mcp/__init__.py,sha256=REDwcbifpuUnsFAhNowIKCZ-8g6irIzUFTI_f8Aunxk,215
|
2
|
+
all_in_mcp/paper.py,sha256=vSJyC_ehfZX5-ASYG048z8gaD1LKafFdJvR13iQcJRw,7104
|
3
|
+
all_in_mcp/server.py,sha256=GAb_RUlFi2UyXOASB9IM_fQmHHr5F6bQXbPV8J964pI,24420
|
4
|
+
all_in_mcp/academic_platforms/__init__.py,sha256=IpI29DMS4_mSmTEa8VkQEiJCl7OyFbswSx7mWSp08P4,285
|
5
|
+
all_in_mcp/academic_platforms/base.py,sha256=VYMp8_tnp7YzXKAXLfr7uUxgvJBNKRyC_NT1uVhBOwY,673
|
6
|
+
all_in_mcp/academic_platforms/crossref.py,sha256=D-wvSwnOocP16m9fA3xJ6VGEpmRPtMmGoFm5MlyPdXE,8707
|
7
|
+
all_in_mcp/academic_platforms/cryptobib.py,sha256=F9N23eojfyAIjnFDPrJAYOpZ_Vi9iHOqNHGtKC6O16c,17360
|
8
|
+
all_in_mcp/academic_platforms/google_scholar.py,sha256=_KLFfIOZeFCGxFOt-nwzm1fgZKMlXOf3HvIjXAYE5cI,8737
|
9
|
+
all_in_mcp/academic_platforms/iacr.py,sha256=MUPxFycVS0eMsJok71y12RUqjxbRrCReG33V5ORAbfU,15450
|
10
|
+
all_in_mcp-0.2.6.dist-info/METADATA,sha256=viUPVp5EJXz5XdzgI-ZewGAWZBok5eu2LvSH5QYMjRQ,4242
|
11
|
+
all_in_mcp-0.2.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
12
|
+
all_in_mcp-0.2.6.dist-info/entry_points.txt,sha256=FbQOtUQzOIfkMNp4qQV1NTU9K4J7C0XGH9wKKhfK1VM,47
|
13
|
+
all_in_mcp-0.2.6.dist-info/licenses/LICENSE,sha256=idExTHItK7AC5FVo4H9HKnr6h51Z8BKCEztZPyP8nK8,1062
|
14
|
+
all_in_mcp-0.2.6.dist-info/RECORD,,
|
@@ -1,218 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.4
|
2
|
-
Name: all-in-mcp
|
3
|
-
Version: 0.2.3
|
4
|
-
Summary: An MCP (Model Context Protocol) server providing daily-use utility functions and academic paper search capabilities
|
5
|
-
Project-URL: Homepage, https://github.com/jiahaoxiang2000/all-in-mcp
|
6
|
-
Project-URL: Repository, https://github.com/jiahaoxiang2000/all-in-mcp
|
7
|
-
Project-URL: Documentation, https://github.com/jiahaoxiang2000/all-in-mcp/tree/main/docs
|
8
|
-
Project-URL: Issues, https://github.com/jiahaoxiang2000/all-in-mcp/issues
|
9
|
-
Author-email: isomo <jiahaoxiang2000@gmail.com>
|
10
|
-
License: MIT
|
11
|
-
License-File: LICENSE
|
12
|
-
Keywords: academic,iacr,mcp,model-context-protocol,papers,utilities
|
13
|
-
Classifier: Development Status :: 4 - Beta
|
14
|
-
Classifier: Intended Audience :: Developers
|
15
|
-
Classifier: Intended Audience :: Science/Research
|
16
|
-
Classifier: License :: OSI Approved :: MIT License
|
17
|
-
Classifier: Programming Language :: Python :: 3
|
18
|
-
Classifier: Programming Language :: Python :: 3.10
|
19
|
-
Classifier: Programming Language :: Python :: 3.11
|
20
|
-
Classifier: Programming Language :: Python :: 3.12
|
21
|
-
Classifier: Programming Language :: Python :: 3.13
|
22
|
-
Classifier: Topic :: Scientific/Engineering
|
23
|
-
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
24
|
-
Classifier: Topic :: Text Processing :: General
|
25
|
-
Requires-Python: >=3.10
|
26
|
-
Requires-Dist: beautifulsoup4>=4.12.0
|
27
|
-
Requires-Dist: feedparser>=6.0.10
|
28
|
-
Requires-Dist: httpx>=0.24.0
|
29
|
-
Requires-Dist: mcp>=1.9.4
|
30
|
-
Requires-Dist: pypdf>=4.0.0
|
31
|
-
Requires-Dist: requests>=2.31.0
|
32
|
-
Provides-Extra: all
|
33
|
-
Requires-Dist: psutil>=5.9.0; extra == 'all'
|
34
|
-
Provides-Extra: dev
|
35
|
-
Requires-Dist: build>=1.0.0; extra == 'dev'
|
36
|
-
Requires-Dist: mypy>=1.5.0; extra == 'dev'
|
37
|
-
Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
|
38
|
-
Requires-Dist: pytest-cov>=4.1.0; extra == 'dev'
|
39
|
-
Requires-Dist: pytest>=8.0.0; extra == 'dev'
|
40
|
-
Requires-Dist: ruff>=0.1.0; extra == 'dev'
|
41
|
-
Requires-Dist: twine>=4.0.0; extra == 'dev'
|
42
|
-
Provides-Extra: system
|
43
|
-
Requires-Dist: psutil>=5.9.0; extra == 'system'
|
44
|
-
Description-Content-Type: text/markdown
|
45
|
-
|
46
|
-
# All-in-MCP
|
47
|
-
|
48
|
-
An MCP (Model Context Protocol) server that provides daily-use utility functions, including academic paper search capabilities.
|
49
|
-
|
50
|
-
## Features
|
51
|
-
|
52
|
-
### Daily Utilities
|
53
|
-
|
54
|
-
- **Academic Research**: IACR ePrint Archive paper search, download, and reading
|
55
|
-
- **Bibliography Search**: CryptoBib database search for cryptography papers
|
56
|
-
- **PDF Reading**: Read and extract text from local and online PDF files
|
57
|
-
|
58
|
-
### Paper Search Capabilities
|
59
|
-
|
60
|
-
#### IACR ePrint Archive
|
61
|
-
|
62
|
-
- Search academic papers from IACR ePrint Archive
|
63
|
-
- Download PDF files
|
64
|
-
- Extract and read text content from papers
|
65
|
-
- Metadata extraction (authors, publication dates, abstracts)
|
66
|
-
|
67
|
-
#### CryptoBib Database
|
68
|
-
|
69
|
-
- Search comprehensive cryptography bibliography database
|
70
|
-
- Access to thousands of cryptographic research papers
|
71
|
-
- Retrieve structured paper metadata or raw BibTeX entries
|
72
|
-
- Support for all major cryptography venues and conferences
|
73
|
-
|
74
|
-
## Quick Start
|
75
|
-
|
76
|
-
### Prerequisites
|
77
|
-
|
78
|
-
- Python 3.12 or higher
|
79
|
-
- UV package manager
|
80
|
-
|
81
|
-
## Installation
|
82
|
-
|
83
|
-
### Option 1: Install from PyPI (Recommended)
|
84
|
-
|
85
|
-
```bash
|
86
|
-
pip install all-in-mcp
|
87
|
-
```
|
88
|
-
|
89
|
-
### Option 2: Install from Source
|
90
|
-
|
91
|
-
1. Clone this repository:
|
92
|
-
|
93
|
-
```bash
|
94
|
-
git clone https://github.com/jiahaoxiang2000/all-in-mcp.git
|
95
|
-
cd all-in-mcp
|
96
|
-
```
|
97
|
-
|
98
|
-
2. Install with UV:
|
99
|
-
|
100
|
-
```bash
|
101
|
-
uv sync
|
102
|
-
```
|
103
|
-
|
104
|
-
### Running the Server
|
105
|
-
|
106
|
-
After installation, you can run the MCP server directly:
|
107
|
-
|
108
|
-
```bash
|
109
|
-
all-in-mcp
|
110
|
-
```
|
111
|
-
|
112
|
-
Or if you installed from source with UV:
|
113
|
-
|
114
|
-
```bash
|
115
|
-
uv run all-in-mcp
|
116
|
-
```
|
117
|
-
|
118
|
-
## Integration with MCP Clients
|
119
|
-
|
120
|
-
Add this server to your MCP client configuration. The server runs using stdio transport.
|
121
|
-
See detailed integration guide in [`docs/INTEGRATION.md`](docs/INTEGRATION.md).
|
122
|
-
|
123
|
-
Example configuration for Claude Desktop:
|
124
|
-
|
125
|
-
```json
|
126
|
-
{
|
127
|
-
"mcpServers": {
|
128
|
-
"all-in-mcp": {
|
129
|
-
"command": "uv",
|
130
|
-
"args": ["run", "all-in-mcp"],
|
131
|
-
"cwd": "/path/to/all-in-mcp"
|
132
|
-
}
|
133
|
-
}
|
134
|
-
}
|
135
|
-
```
|
136
|
-
|
137
|
-
## Development
|
138
|
-
|
139
|
-
For development setup and contribution guidelines, see the [Development Guide](docs/development.md).
|
140
|
-
|
141
|
-
### Quick Development Setup
|
142
|
-
|
143
|
-
```bash
|
144
|
-
# Clone the repository
|
145
|
-
git clone https://github.com/jiahaoxiang2000/all-in-mcp.git
|
146
|
-
cd all-in-mcp
|
147
|
-
|
148
|
-
# Install with development dependencies
|
149
|
-
uv sync --extra dev
|
150
|
-
|
151
|
-
# Run tests
|
152
|
-
uv run pytest
|
153
|
-
|
154
|
-
# Run linting
|
155
|
-
uv run ruff check src/
|
156
|
-
uv run ruff format src/
|
157
|
-
|
158
|
-
# Type checking
|
159
|
-
uv run mypy src/all_in_mcp
|
160
|
-
```
|
161
|
-
|
162
|
-
### Releases
|
163
|
-
|
164
|
-
This project uses the existing release helper script for creating releases:
|
165
|
-
|
166
|
-
#### Using the Release Script
|
167
|
-
|
168
|
-
Use the release helper script to create a new version:
|
169
|
-
|
170
|
-
```bash
|
171
|
-
python scripts/release.py 0.1.2
|
172
|
-
```
|
173
|
-
|
174
|
-
This script will:
|
175
|
-
|
176
|
-
1. Update the version in `pyproject.toml`
|
177
|
-
2. Create a git commit
|
178
|
-
3. Create a git tag
|
179
|
-
4. Push the changes to trigger CI/CD
|
180
|
-
|
181
|
-
#### Manual Process
|
182
|
-
|
183
|
-
Alternatively, you can manually:
|
184
|
-
|
185
|
-
1. **Update version** in `pyproject.toml`:
|
186
|
-
|
187
|
-
```toml
|
188
|
-
version = "0.1.2" # Change this
|
189
|
-
```
|
190
|
-
|
191
|
-
2. **Commit and tag**:
|
192
|
-
|
193
|
-
```bash
|
194
|
-
git add pyproject.toml
|
195
|
-
git commit -m "Bump version to 0.1.2"
|
196
|
-
git tag v0.1.2
|
197
|
-
git push --follow-tags
|
198
|
-
```
|
199
|
-
|
200
|
-
### Debugging
|
201
|
-
|
202
|
-
For debugging, use the [MCP Inspector](https://github.com/modelcontextprotocol/inspector):
|
203
|
-
|
204
|
-
```bash
|
205
|
-
npx @modelcontextprotocol/inspector uv --directory /path/to/all-in-mcp run all-in-mcp
|
206
|
-
```
|
207
|
-
|
208
|
-
## Documentation
|
209
|
-
|
210
|
-
Complete documentation is available in the [`docs/`](docs/) directory:
|
211
|
-
|
212
|
-
- **[API Reference](docs/api.md)** - Complete API documentation
|
213
|
-
- **[Installation Guide](docs/installation.md)** - Setup instructions
|
214
|
-
- **[IACR Integration](docs/iacr.md)** - Academic paper search details
|
215
|
-
- **[CryptoBib Integration](docs/cryptobib.md)** - Bibliography database search
|
216
|
-
- **[Development Guide](docs/development.md)** - Contributing guidelines
|
217
|
-
- **[PyPI Setup Guide](docs/pypi-setup.md)** - Publishing configuration
|
218
|
-
- **[Examples](docs/examples.md)** - Usage examples
|
@@ -1,12 +0,0 @@
|
|
1
|
-
all_in_mcp/__init__.py,sha256=REDwcbifpuUnsFAhNowIKCZ-8g6irIzUFTI_f8Aunxk,215
|
2
|
-
all_in_mcp/paper.py,sha256=vSJyC_ehfZX5-ASYG048z8gaD1LKafFdJvR13iQcJRw,7104
|
3
|
-
all_in_mcp/server.py,sha256=pMGyRbgr_kwC_ZNsxMUwXcoEQ8fW4NZx3Sns7uRRa8I,15140
|
4
|
-
all_in_mcp/academic_platforms/__init__.py,sha256=2KgWMc38NBhRkiLYwqyKi43u-Wm5vWK8i-es3fQFlN0,210
|
5
|
-
all_in_mcp/academic_platforms/base.py,sha256=VYMp8_tnp7YzXKAXLfr7uUxgvJBNKRyC_NT1uVhBOwY,673
|
6
|
-
all_in_mcp/academic_platforms/cryptobib.py,sha256=F9N23eojfyAIjnFDPrJAYOpZ_Vi9iHOqNHGtKC6O16c,17360
|
7
|
-
all_in_mcp/academic_platforms/iacr.py,sha256=MUPxFycVS0eMsJok71y12RUqjxbRrCReG33V5ORAbfU,15450
|
8
|
-
all_in_mcp-0.2.3.dist-info/METADATA,sha256=43FE07lBZ-f92fi1AemtCEQO_IVXyKD1d_keztjtcYI,5750
|
9
|
-
all_in_mcp-0.2.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
10
|
-
all_in_mcp-0.2.3.dist-info/entry_points.txt,sha256=FbQOtUQzOIfkMNp4qQV1NTU9K4J7C0XGH9wKKhfK1VM,47
|
11
|
-
all_in_mcp-0.2.3.dist-info/licenses/LICENSE,sha256=idExTHItK7AC5FVo4H9HKnr6h51Z8BKCEztZPyP8nK8,1062
|
12
|
-
all_in_mcp-0.2.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|