all-in-mcp 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
all_in_mcp/__init__.py ADDED
@@ -0,0 +1,11 @@
1
+ import asyncio
2
+
3
+ from . import server
4
+
5
+
6
+ def main():
7
+ """Main entry point for the package."""
8
+ asyncio.run(server.main())
9
+
10
+ # Optionally expose other important items at package level
11
+ __all__ = ['main', 'server']
@@ -0,0 +1 @@
1
+ # all_in_mcp/academic_platforms/__init__.py
@@ -0,0 +1,23 @@
1
+ # all_in_mcp/academic_platforms/base.py
2
+ from abc import ABC, abstractmethod
3
+
4
+ from ..paper import Paper
5
+
6
+
7
+ class PaperSource(ABC):
8
+ """Abstract base class for paper sources"""
9
+
10
+ @abstractmethod
11
+ def search(self, query: str, **kwargs) -> list[Paper]:
12
+ """Search for papers based on query"""
13
+ raise NotImplementedError
14
+
15
+ @abstractmethod
16
+ def download_pdf(self, paper_id: str, save_path: str) -> str:
17
+ """Download PDF of a paper"""
18
+ raise NotImplementedError
19
+
20
+ @abstractmethod
21
+ def read_paper(self, paper_id: str, save_path: str) -> str:
22
+ """Read and extract text content from a paper"""
23
+ raise NotImplementedError
@@ -0,0 +1,427 @@
1
+ # all_in_mcp/academic_platforms/iacr.py
2
+ import logging
3
+ import os
4
+ import random
5
+ from datetime import datetime
6
+ from typing import ClassVar
7
+
8
+ import requests
9
+ from bs4 import BeautifulSoup
10
+ from pypdf import PdfReader
11
+
12
+ from ..paper import Paper
13
+ from .base import PaperSource
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class IACRSearcher(PaperSource):
19
+ """IACR ePrint Archive paper search implementation"""
20
+
21
+ IACR_SEARCH_URL = "https://eprint.iacr.org/search"
22
+ IACR_BASE_URL = "https://eprint.iacr.org"
23
+ BROWSERS: ClassVar[list[str]] = [
24
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
25
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)",
26
+ "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36",
27
+ ]
28
+
29
+ def __init__(self):
30
+ self._setup_session()
31
+
32
+ def _setup_session(self):
33
+ """Initialize session with random user agent"""
34
+ self.session = requests.Session()
35
+ self.session.headers.update(
36
+ {
37
+ "User-Agent": random.choice(self.BROWSERS),
38
+ "Accept": "text/html,application/xhtml+xml",
39
+ "Accept-Language": "en-US,en;q=0.9",
40
+ }
41
+ )
42
+
43
+ def _parse_date(self, date_str: str) -> datetime | None:
44
+ """Parse date from IACR format (e.g., '2025-06-02')"""
45
+ try:
46
+ return datetime.strptime(date_str.strip(), "%Y-%m-%d")
47
+ except ValueError:
48
+ logger.warning(f"Could not parse date: {date_str}")
49
+ return None
50
+
51
+ def _parse_paper(self, item, fetch_details: bool = True) -> Paper | None:
52
+ """Parse single paper entry from IACR HTML and optionally fetch detailed info"""
53
+ try:
54
+ # Extract paper ID from the search result
55
+ header_div = item.find("div", class_="d-flex")
56
+ if not header_div:
57
+ return None
58
+
59
+ # Get paper ID from the link
60
+ paper_link = header_div.find("a", class_="paperlink")
61
+ if not paper_link:
62
+ return None
63
+
64
+ paper_id = paper_link.get_text(strip=True) # e.g., "2025/1014"
65
+
66
+ if fetch_details:
67
+ # Fetch detailed information for this paper
68
+ logger.info(f"Fetching detailed info for paper {paper_id}")
69
+ detailed_paper = self.get_paper_details(paper_id)
70
+ if detailed_paper:
71
+ return detailed_paper
72
+ else:
73
+ logger.warning(
74
+ f"Could not fetch details for {paper_id}, falling back to search result parsing"
75
+ )
76
+
77
+ # Fallback: parse from search results if detailed fetch fails or is disabled
78
+ paper_url = self.IACR_BASE_URL + paper_link["href"]
79
+
80
+ # Get PDF URL
81
+ pdf_link = header_div.find("a", href=True, string="(PDF)")
82
+ pdf_url = self.IACR_BASE_URL + pdf_link["href"] if pdf_link else ""
83
+
84
+ # Get last updated date
85
+ last_updated_elem = header_div.find("small", class_="ms-auto")
86
+ updated_date = None
87
+ if last_updated_elem:
88
+ date_text = last_updated_elem.get_text(strip=True)
89
+ if "Last updated:" in date_text:
90
+ date_str = date_text.replace("Last updated:", "").strip()
91
+ updated_date = self._parse_date(date_str)
92
+
93
+ # Get content from the second div
94
+ content_div = item.find("div", class_="ms-md-4")
95
+ if not content_div:
96
+ return None
97
+
98
+ # Extract title
99
+ title_elem = content_div.find("strong")
100
+ title = title_elem.get_text(strip=True) if title_elem else ""
101
+
102
+ # Extract authors
103
+ authors_elem = content_div.find("span", class_="fst-italic")
104
+ authors = []
105
+ if authors_elem:
106
+ authors_text = authors_elem.get_text(strip=True)
107
+ authors = [author.strip() for author in authors_text.split(",")]
108
+
109
+ # Extract category
110
+ category_elem = content_div.find("small", class_="badge")
111
+ categories = []
112
+ if category_elem:
113
+ category_text = category_elem.get_text(strip=True)
114
+ categories = [category_text]
115
+
116
+ # Extract abstract
117
+ abstract_elem = content_div.find("p", class_="search-abstract")
118
+ abstract = abstract_elem.get_text(strip=True) if abstract_elem else ""
119
+
120
+ # Create paper object with search result data
121
+ published_date = updated_date if updated_date else datetime(1900, 1, 1)
122
+
123
+ return Paper(
124
+ paper_id=paper_id,
125
+ title=title,
126
+ authors=authors,
127
+ abstract=abstract,
128
+ url=paper_url,
129
+ pdf_url=pdf_url,
130
+ published_date=published_date,
131
+ updated_date=updated_date,
132
+ source="iacr",
133
+ categories=categories,
134
+ keywords=[],
135
+ doi="",
136
+ citations=0,
137
+ )
138
+
139
+ except Exception as e:
140
+ logger.warning(f"Failed to parse IACR paper: {e}")
141
+ return None
142
+
143
+ def search(
144
+ self, query: str, max_results: int = 10, fetch_details: bool = True
145
+ ) -> list[Paper]:
146
+ """
147
+ Search IACR ePrint Archive
148
+
149
+ Args:
150
+ query: Search query string
151
+ max_results: Maximum number of results to return
152
+ fetch_details: Whether to fetch detailed information for each paper (slower but more complete)
153
+
154
+ Returns:
155
+ List[Paper]: List of paper objects
156
+ """
157
+ papers = []
158
+
159
+ try:
160
+ # Construct search parameters
161
+ params = {"q": query}
162
+
163
+ # Make request
164
+ response = self.session.get(self.IACR_SEARCH_URL, params=params)
165
+
166
+ if response.status_code != 200:
167
+ logger.error(f"IACR search failed with status {response.status_code}")
168
+ return papers
169
+
170
+ # Parse results
171
+ soup = BeautifulSoup(response.text, "html.parser")
172
+
173
+ # Find all paper entries - they are divs with class "mb-4"
174
+ results = soup.find_all("div", class_="mb-4")
175
+
176
+ if not results:
177
+ logger.info("No results found for the query")
178
+ return papers
179
+
180
+ # Process each result
181
+ for i, item in enumerate(results):
182
+ if len(papers) >= max_results:
183
+ break
184
+
185
+ logger.info(f"Processing paper {i+1}/{min(len(results), max_results)}")
186
+ paper = self._parse_paper(item, fetch_details=fetch_details)
187
+ if paper:
188
+ papers.append(paper)
189
+
190
+ except Exception as e:
191
+ logger.error(f"IACR search error: {e}")
192
+
193
+ return papers[:max_results]
194
+
195
+ def download_pdf(self, paper_id: str, save_path: str) -> str:
196
+ """
197
+ Download PDF from IACR ePrint Archive
198
+
199
+ Args:
200
+ paper_id: IACR paper ID (e.g., "2025/1014")
201
+ save_path: Path to save the PDF
202
+
203
+ Returns:
204
+ str: Path to downloaded file or error message
205
+ """
206
+ try:
207
+ os.makedirs(save_path, exist_ok=True)
208
+ pdf_url = f"{self.IACR_BASE_URL}/{paper_id}.pdf"
209
+
210
+ response = self.session.get(pdf_url)
211
+
212
+ if response.status_code == 200:
213
+ filename = f"{save_path}/iacr_{paper_id.replace('/', '_')}.pdf"
214
+ with open(filename, "wb") as f:
215
+ f.write(response.content)
216
+ return filename
217
+ else:
218
+ return f"Failed to download PDF: HTTP {response.status_code}"
219
+
220
+ except Exception as e:
221
+ logger.error(f"PDF download error: {e}")
222
+ return f"Error downloading PDF: {e}"
223
+
224
+ def read_paper(self, paper_id: str, save_path: str = "./downloads") -> str:
225
+ """
226
+ Download and extract text from IACR paper PDF
227
+
228
+ Args:
229
+ paper_id: IACR paper ID
230
+ save_path: Directory to save downloaded PDF
231
+
232
+ Returns:
233
+ str: Extracted text from the PDF or error message
234
+ """
235
+ try:
236
+ # First get paper details to get the PDF URL
237
+ paper = self.get_paper_details(paper_id)
238
+ if not paper or not paper.pdf_url:
239
+ return f"Error: Could not find PDF URL for paper {paper_id}"
240
+
241
+ # Download the PDF
242
+ pdf_response = requests.get(paper.pdf_url, timeout=30)
243
+ pdf_response.raise_for_status()
244
+
245
+ # Create download directory if it doesn't exist
246
+ os.makedirs(save_path, exist_ok=True)
247
+
248
+ # Save the PDF
249
+ filename = f"iacr_{paper_id.replace('/', '_')}.pdf"
250
+ pdf_path = os.path.join(save_path, filename)
251
+
252
+ with open(pdf_path, "wb") as f:
253
+ f.write(pdf_response.content)
254
+
255
+ # Extract text using pypdf
256
+ reader = PdfReader(pdf_path)
257
+ text = ""
258
+
259
+ for page_num, page in enumerate(reader.pages):
260
+ try:
261
+ page_text = page.extract_text()
262
+ if page_text:
263
+ text += f"\n--- Page {page_num + 1} ---\n"
264
+ text += page_text + "\n"
265
+ except Exception as e:
266
+ logger.warning(
267
+ f"Failed to extract text from page {page_num + 1}: {e}"
268
+ )
269
+ continue
270
+
271
+ if not text.strip():
272
+ return (
273
+ f"PDF downloaded to {pdf_path}, but unable to extract readable text"
274
+ )
275
+
276
+ # Add paper metadata at the beginning
277
+ metadata = f"Title: {paper.title}\n"
278
+ metadata += f"Authors: {', '.join(paper.authors)}\n"
279
+ metadata += f"Published Date: {paper.published_date}\n"
280
+ metadata += f"URL: {paper.url}\n"
281
+ metadata += f"PDF downloaded to: {pdf_path}\n"
282
+ metadata += "=" * 80 + "\n\n"
283
+
284
+ return metadata + text.strip()
285
+
286
+ except requests.RequestException as e:
287
+ logger.error(f"Error downloading PDF: {e}")
288
+ return f"Error downloading PDF: {e}"
289
+ except Exception as e:
290
+ logger.error(f"Read paper error: {e}")
291
+ return f"Error reading paper: {e}"
292
+
293
+ def get_paper_details(self, paper_id: str) -> Paper | None:
294
+ """
295
+ Fetch detailed information for a specific IACR paper
296
+
297
+ Args:
298
+ paper_id: IACR paper ID (e.g., "2009/101") or full URL
299
+
300
+ Returns:
301
+ Paper: Detailed paper object with full metadata
302
+ """
303
+ try:
304
+ # Handle both paper ID and full URL
305
+ if paper_id.startswith("http"):
306
+ paper_url = paper_id
307
+ # Extract paper ID from URL
308
+ parts = paper_url.split("/")
309
+ if len(parts) >= 2:
310
+ paper_id = f"{parts[-2]}/{parts[-1]}"
311
+ else:
312
+ paper_url = f"{self.IACR_BASE_URL}/{paper_id}"
313
+
314
+ # Make request
315
+ response = self.session.get(paper_url)
316
+
317
+ if response.status_code != 200:
318
+ logger.error(
319
+ f"Failed to fetch paper details: HTTP {response.status_code}"
320
+ )
321
+ return None
322
+
323
+ # Parse the page
324
+ soup = BeautifulSoup(response.text, "html.parser")
325
+
326
+ # Extract title from h3 element
327
+ title = ""
328
+ title_elem = soup.find("h3", class_="mb-3")
329
+ if title_elem:
330
+ title = title_elem.get_text(strip=True)
331
+
332
+ # Extract authors from the italic paragraph
333
+ authors = []
334
+ author_elem = soup.find("p", class_="fst-italic")
335
+ if author_elem:
336
+ author_text = author_elem.get_text(strip=True)
337
+ # Split by " and " to get individual authors
338
+ authors = [
339
+ author.strip()
340
+ for author in author_text.replace(" and ", ",").split(",")
341
+ ]
342
+
343
+ # Extract abstract using multiple strategies
344
+ abstract = ""
345
+
346
+ # Look for paragraph with white-space: pre-wrap style (current method)
347
+ abstract_p = soup.find("p", style="white-space: pre-wrap;")
348
+ if abstract_p:
349
+ abstract = abstract_p.get_text(strip=True)
350
+
351
+ # Extract metadata using a simpler, safer approach
352
+ publication_info = ""
353
+ keywords = []
354
+ history_entries = []
355
+ last_updated = None
356
+
357
+ # Extract publication info
358
+ page_text = soup.get_text()
359
+ lines = page_text.split("\n")
360
+
361
+ # Find publication info
362
+ for i, line in enumerate(lines):
363
+ if "Publication info" in line and i + 1 < len(lines):
364
+ publication_info = lines[i + 1].strip()
365
+ break
366
+
367
+ # Find keywords using CSS selector for keyword badges
368
+ try:
369
+ keyword_elements = soup.select("a.badge.bg-secondary.keyword")
370
+ keywords = [elem.get_text(strip=True) for elem in keyword_elements]
371
+ except Exception:
372
+ keywords = []
373
+
374
+ # Find history entries
375
+ history_found = False
376
+ for _i, line in enumerate(lines):
377
+ if "History" in line and ":" not in line:
378
+ history_found = True
379
+ continue
380
+ elif (
381
+ history_found
382
+ and ":" in line
383
+ and not line.strip().startswith("Short URL")
384
+ ):
385
+ history_entries.append(line.strip())
386
+ # Try to extract the last updated date from the first history entry
387
+ if not last_updated:
388
+ date_str = line.split(":")[0].strip()
389
+ try:
390
+ last_updated = datetime.strptime(date_str, "%Y-%m-%d")
391
+ except ValueError:
392
+ pass
393
+ elif history_found and (
394
+ line.strip().startswith("Short URL")
395
+ or line.strip().startswith("License")
396
+ ):
397
+ break
398
+
399
+ # Combine history entries
400
+ history = "; ".join(history_entries) if history_entries else ""
401
+
402
+ # Construct PDF URL
403
+ pdf_url = f"{self.IACR_BASE_URL}/{paper_id}.pdf"
404
+
405
+ # Use last updated date or current date as published date
406
+ published_date = last_updated if last_updated else datetime.now()
407
+
408
+ return Paper(
409
+ paper_id=paper_id,
410
+ title=title,
411
+ authors=authors,
412
+ abstract=abstract,
413
+ url=paper_url,
414
+ pdf_url=pdf_url,
415
+ published_date=published_date,
416
+ updated_date=last_updated,
417
+ source="iacr",
418
+ categories=[],
419
+ keywords=keywords,
420
+ doi="",
421
+ citations=0,
422
+ extra={"publication_info": publication_info, "history": history},
423
+ )
424
+
425
+ except Exception as e:
426
+ logger.error(f"Error fetching paper details for {paper_id}: {e}")
427
+ return None
all_in_mcp/paper.py ADDED
@@ -0,0 +1,64 @@
1
+ # all_in_mcp/paper.py
2
+ from dataclasses import dataclass
3
+ from datetime import datetime
4
+
5
+
6
+ @dataclass
7
+ class Paper:
8
+ """Standardized paper format with core fields for academic sources"""
9
+
10
+ # Core fields (required, but allows empty values or defaults)
11
+ paper_id: str # Unique identifier (e.g., arXiv ID, PMID, DOI)
12
+ title: str # Paper title
13
+ authors: list[str] # List of author names
14
+ abstract: str # Abstract text
15
+ doi: str # Digital Object Identifier
16
+ published_date: datetime # Publication date
17
+ pdf_url: str # Direct PDF link
18
+ url: str # URL to paper page
19
+ source: str # Source platform (e.g., 'arxiv', 'pubmed')
20
+
21
+ # Optional fields
22
+ updated_date: datetime | None = None # Last updated date
23
+ categories: list[str] | None = None # Subject categories
24
+ keywords: list[str] | None = None # Keywords
25
+ citations: int = 0 # Citation count
26
+ references: list[str] | None = None # List of reference IDs/DOIs
27
+ extra: dict | None = None # Source-specific extra metadata
28
+
29
+ def __post_init__(self):
30
+ """Post-initialization to handle default values"""
31
+ if self.authors is None:
32
+ self.authors = []
33
+ if self.categories is None:
34
+ self.categories = []
35
+ if self.keywords is None:
36
+ self.keywords = []
37
+ if self.references is None:
38
+ self.references = []
39
+ if self.extra is None:
40
+ self.extra = {}
41
+
42
+ def to_dict(self) -> dict:
43
+ """Convert paper to dictionary format for serialization"""
44
+ return {
45
+ "paper_id": self.paper_id,
46
+ "title": self.title,
47
+ "authors": self.authors,
48
+ "abstract": self.abstract,
49
+ "doi": self.doi,
50
+ "published_date": (
51
+ self.published_date.isoformat() if self.published_date else None
52
+ ),
53
+ "pdf_url": self.pdf_url,
54
+ "url": self.url,
55
+ "source": self.source,
56
+ "updated_date": (
57
+ self.updated_date.isoformat() if self.updated_date else None
58
+ ),
59
+ "categories": self.categories,
60
+ "keywords": self.keywords,
61
+ "citations": self.citations,
62
+ "references": self.references,
63
+ "extra": self.extra,
64
+ }
all_in_mcp/server.py ADDED
@@ -0,0 +1,210 @@
1
+ import os
2
+ from typing import List, Dict
3
+ import mcp.server.stdio
4
+ import mcp.types as types
5
+ from mcp.server import NotificationOptions, Server
6
+ from mcp.server.models import InitializationOptions
7
+
8
+ # Import IACR searcher
9
+ from .academic_platforms.iacr import IACRSearcher
10
+
11
+ server = Server("all-in-mcp")
12
+
13
+ # Initialize IACR searcher
14
+ iacr_searcher = IACRSearcher()
15
+
16
+
17
+ @server.list_tools()
18
+ async def handle_list_tools() -> list[types.Tool]:
19
+ """
20
+ List available daily utility tools.
21
+ Each tool specifies its arguments using JSON Schema validation.
22
+ """
23
+ return [
24
+ types.Tool(
25
+ name="search-iacr-papers",
26
+ description="Search academic papers from IACR ePrint Archive",
27
+ inputSchema={
28
+ "type": "object",
29
+ "properties": {
30
+ "query": {
31
+ "type": "string",
32
+ "description": "Search query string (e.g., 'cryptography', 'secret sharing')",
33
+ },
34
+ "max_results": {
35
+ "type": "integer",
36
+ "description": "Maximum number of papers to return (default: 10)",
37
+ "default": 10,
38
+ },
39
+ "fetch_details": {
40
+ "type": "boolean",
41
+ "description": "Whether to fetch detailed information for each paper (default: True)",
42
+ "default": True,
43
+ },
44
+ },
45
+ "required": ["query"],
46
+ },
47
+ ),
48
+ types.Tool(
49
+ name="download-iacr-paper",
50
+ description="Download PDF of an IACR ePrint paper",
51
+ inputSchema={
52
+ "type": "object",
53
+ "properties": {
54
+ "paper_id": {
55
+ "type": "string",
56
+ "description": "IACR paper ID (e.g., '2009/101')",
57
+ },
58
+ "save_path": {
59
+ "type": "string",
60
+ "description": "Directory to save the PDF (default: './downloads')",
61
+ "default": "./downloads",
62
+ },
63
+ },
64
+ "required": ["paper_id"],
65
+ },
66
+ ),
67
+ types.Tool(
68
+ name="read-iacr-paper",
69
+ description="Read and extract text content from an IACR ePrint paper PDF",
70
+ inputSchema={
71
+ "type": "object",
72
+ "properties": {
73
+ "paper_id": {
74
+ "type": "string",
75
+ "description": "IACR paper ID (e.g., '2009/101')",
76
+ },
77
+ "save_path": {
78
+ "type": "string",
79
+ "description": "Directory where the PDF is/will be saved (default: './downloads')",
80
+ "default": "./downloads",
81
+ },
82
+ },
83
+ "required": ["paper_id"],
84
+ },
85
+ ),
86
+ ]
87
+
88
+
89
+ @server.call_tool()
90
+ async def handle_call_tool(
91
+ name: str, arguments: dict | None
92
+ ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]:
93
+ """
94
+ Handle tool execution requests.
95
+ """
96
+ if not arguments:
97
+ arguments = {}
98
+
99
+ try:
100
+ if name == "search-iacr-papers":
101
+ query = arguments.get("query", "")
102
+ max_results = arguments.get("max_results", 10)
103
+ fetch_details = arguments.get("fetch_details", True)
104
+
105
+ if not query:
106
+ return [
107
+ types.TextContent(
108
+ type="text", text="Error: Query parameter is required"
109
+ )
110
+ ]
111
+
112
+ papers = iacr_searcher.search(query, max_results, fetch_details)
113
+
114
+ if not papers:
115
+ return [
116
+ types.TextContent(
117
+ type="text", text=f"No papers found for query: {query}"
118
+ )
119
+ ]
120
+
121
+ # Format the results
122
+ result_text = f"Found {len(papers)} IACR papers for query '{query}':\n\n"
123
+ for i, paper in enumerate(papers, 1):
124
+ result_text += f"{i}. **{paper.title}**\n"
125
+ result_text += f" - Paper ID: {paper.paper_id}\n"
126
+ result_text += f" - Authors: {', '.join(paper.authors)}\n"
127
+ result_text += f" - URL: {paper.url}\n"
128
+ result_text += f" - PDF: {paper.pdf_url}\n"
129
+ if paper.categories:
130
+ result_text += f" - Categories: {', '.join(paper.categories)}\n"
131
+ if paper.keywords:
132
+ result_text += f" - Keywords: {', '.join(paper.keywords)}\n"
133
+ if paper.abstract:
134
+ result_text += f" - Abstract: {paper.abstract}n"
135
+ result_text += "\n"
136
+
137
+ return [types.TextContent(type="text", text=result_text)]
138
+
139
+ elif name == "download-iacr-paper":
140
+ paper_id = arguments.get("paper_id", "")
141
+ save_path = arguments.get("save_path", "./downloads")
142
+
143
+ if not paper_id:
144
+ return [
145
+ types.TextContent(
146
+ type="text", text="Error: paper_id parameter is required"
147
+ )
148
+ ]
149
+
150
+ result = iacr_searcher.download_pdf(paper_id, save_path)
151
+
152
+ if result.startswith(("Error", "Failed")):
153
+ return [
154
+ types.TextContent(type="text", text=f"Download failed: {result}")
155
+ ]
156
+ else:
157
+ return [
158
+ types.TextContent(
159
+ type="text", text=f"PDF downloaded successfully to: {result}"
160
+ )
161
+ ]
162
+
163
+ elif name == "read-iacr-paper":
164
+ paper_id = arguments.get("paper_id", "")
165
+ save_path = arguments.get("save_path", "./downloads")
166
+
167
+ if not paper_id:
168
+ return [
169
+ types.TextContent(
170
+ type="text", text="Error: paper_id parameter is required"
171
+ )
172
+ ]
173
+
174
+ result = iacr_searcher.read_paper(paper_id, save_path)
175
+
176
+ if result.startswith("Error"):
177
+ return [types.TextContent(type="text", text=result)]
178
+ else:
179
+ # Truncate very long text for display
180
+ if len(result) > 5000:
181
+ truncated_result = (
182
+ result[:5000]
183
+ + f"\n\n... [Text truncated. Full text is {len(result)} characters long]"
184
+ )
185
+ return [types.TextContent(type="text", text=truncated_result)]
186
+ else:
187
+ return [types.TextContent(type="text", text=result)]
188
+
189
+ else:
190
+ raise ValueError(f"Unknown tool: {name}")
191
+
192
+ except Exception as e:
193
+ return [types.TextContent(type="text", text=f"Error executing {name}: {e!s}")]
194
+
195
+
196
+ async def main():
197
+ # Run the server using stdin/stdout streams
198
+ async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
199
+ await server.run(
200
+ read_stream,
201
+ write_stream,
202
+ InitializationOptions(
203
+ server_name="all-in-mcp",
204
+ server_version="0.1.0",
205
+ capabilities=server.get_capabilities(
206
+ notification_options=NotificationOptions(),
207
+ experimental_capabilities={},
208
+ ),
209
+ ),
210
+ )
@@ -0,0 +1,206 @@
1
+ Metadata-Version: 2.4
2
+ Name: all-in-mcp
3
+ Version: 0.1.4
4
+ Summary: An MCP (Model Context Protocol) server providing daily-use utility functions and academic paper search capabilities
5
+ Project-URL: Homepage, https://github.com/jiahaoxiang2000/all-in-mcp
6
+ Project-URL: Repository, https://github.com/jiahaoxiang2000/all-in-mcp
7
+ Project-URL: Documentation, https://github.com/jiahaoxiang2000/all-in-mcp/tree/main/docs
8
+ Project-URL: Issues, https://github.com/jiahaoxiang2000/all-in-mcp/issues
9
+ Author-email: isomo <jiahaoxiang2000@gmail.com>
10
+ License: MIT
11
+ License-File: LICENSE
12
+ Keywords: academic,iacr,mcp,model-context-protocol,papers,utilities
13
+ Classifier: Development Status :: 4 - Beta
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: Intended Audience :: Science/Research
16
+ Classifier: License :: OSI Approved :: MIT License
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.10
19
+ Classifier: Programming Language :: Python :: 3.11
20
+ Classifier: Programming Language :: Python :: 3.12
21
+ Classifier: Programming Language :: Python :: 3.13
22
+ Classifier: Topic :: Scientific/Engineering
23
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
24
+ Classifier: Topic :: Text Processing :: General
25
+ Requires-Python: >=3.10
26
+ Requires-Dist: beautifulsoup4>=4.12.0
27
+ Requires-Dist: feedparser>=6.0.10
28
+ Requires-Dist: httpx>=0.24.0
29
+ Requires-Dist: mcp>=1.9.4
30
+ Requires-Dist: pypdf>=4.0.0
31
+ Requires-Dist: requests>=2.31.0
32
+ Provides-Extra: all
33
+ Requires-Dist: psutil>=5.9.0; extra == 'all'
34
+ Provides-Extra: dev
35
+ Requires-Dist: build>=1.0.0; extra == 'dev'
36
+ Requires-Dist: mypy>=1.5.0; extra == 'dev'
37
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
38
+ Requires-Dist: pytest-cov>=4.1.0; extra == 'dev'
39
+ Requires-Dist: pytest>=8.0.0; extra == 'dev'
40
+ Requires-Dist: ruff>=0.1.0; extra == 'dev'
41
+ Requires-Dist: twine>=4.0.0; extra == 'dev'
42
+ Provides-Extra: system
43
+ Requires-Dist: psutil>=5.9.0; extra == 'system'
44
+ Description-Content-Type: text/markdown
45
+
46
+ # All-in-MCP
47
+
48
+ An MCP (Model Context Protocol) server that provides daily-use utility functions, including academic paper search capabilities.
49
+
50
+ ## Features
51
+
52
+ ### Daily Utilities
53
+
54
+ - **Academic Research**: IACR ePrint Archive paper search, download, and reading
55
+
56
+ ### Paper Search Capabilities
57
+
58
+ - Search academic papers from IACR ePrint Archive
59
+ - Download PDF files
60
+ - Extract and read text content from papers
61
+ - Metadata extraction (authors, publication dates, abstracts)
62
+
63
+ ## Quick Start
64
+
65
+ ### Prerequisites
66
+
67
+ - Python 3.12 or higher
68
+ - UV package manager
69
+
70
+ ## Installation
71
+
72
+ ### Option 1: Install from PyPI (Recommended)
73
+
74
+ ```bash
75
+ pip install all-in-mcp
76
+ ```
77
+
78
+ ### Installation
79
+
80
+ 1. Clone this repository:
81
+
82
+ ```bash
83
+ git clone https://github.com/jiahaoxiang2000/all-in-mcp.git
84
+ cd all-in-mcp
85
+ ```
86
+
87
+ 2. Install with UV:
88
+
89
+ ```bash
90
+ uv sync
91
+ ```
92
+
93
+ ### Running the Server
94
+
95
+ After installation, you can run the MCP server directly:
96
+
97
+ ```bash
98
+ all-in-mcp
99
+ ```
100
+
101
+ Or if you installed from source with UV:
102
+
103
+ ```bash
104
+ uv run all-in-mcp
105
+ ```
106
+
107
+ ## Integration with MCP Clients
108
+
109
+ Add this server to your MCP client configuration. The server runs using stdio transport.
110
+ See detailed integration guide in [`docs/INTEGRATION.md`](docs/INTEGRATION.md).
111
+
112
+ Example configuration for Claude Desktop:
113
+
114
+ ```json
115
+ {
116
+ "mcpServers": {
117
+ "all-in-mcp": {
118
+ "command": "uv",
119
+ "args": ["run", "all-in-mcp"],
120
+ "cwd": "/path/to/all-in-mcp"
121
+ }
122
+ }
123
+ }
124
+ ```
125
+
126
+ ## Development
127
+
128
+ For development setup and contribution guidelines, see the [Development Guide](docs/development.md).
129
+
130
+ ### Quick Development Setup
131
+
132
+ ```bash
133
+ # Clone the repository
134
+ git clone https://github.com/jiahaoxiang2000/all-in-mcp.git
135
+ cd all-in-mcp
136
+
137
+ # Install with development dependencies
138
+ uv sync --extra dev
139
+
140
+ # Run tests
141
+ uv run pytest
142
+
143
+ # Run linting
144
+ uv run ruff check src/
145
+ uv run ruff format src/
146
+
147
+ # Type checking
148
+ uv run mypy src/all_in_mcp
149
+ ```
150
+
151
+ ### Releases
152
+
153
+ This project uses the existing release helper script for creating releases:
154
+
155
+ #### Using the Release Script
156
+
157
+ Use the release helper script to create a new version:
158
+
159
+ ```bash
160
+ python scripts/release.py 0.1.2
161
+ ```
162
+
163
+ This script will:
164
+
165
+ 1. Update the version in `pyproject.toml`
166
+ 2. Create a git commit
167
+ 3. Create a git tag
168
+ 4. Push the changes to trigger CI/CD
169
+
170
+ #### Manual Process
171
+
172
+ Alternatively, you can manually:
173
+
174
+ 1. **Update version** in `pyproject.toml`:
175
+
176
+ ```toml
177
+ version = "0.1.2" # Change this
178
+ ```
179
+
180
+ 2. **Commit and tag**:
181
+
182
+ ```bash
183
+ git add pyproject.toml
184
+ git commit -m "Bump version to 0.1.2"
185
+ git tag v0.1.2
186
+ git push --follow-tags
187
+ ```
188
+
189
+ ### Debugging
190
+
191
+ For debugging, use the [MCP Inspector](https://github.com/modelcontextprotocol/inspector):
192
+
193
+ ```bash
194
+ npx @modelcontextprotocol/inspector uv --directory /path/to/all-in-mcp run all-in-mcp
195
+ ```
196
+
197
+ ## Documentation
198
+
199
+ Complete documentation is available in the [`docs/`](docs/) directory:
200
+
201
+ - **[API Reference](docs/api.md)** - Complete API documentation
202
+ - **[Installation Guide](docs/installation.md)** - Setup instructions
203
+ - **[IACR Integration](docs/iacr.md)** - Academic paper search details
204
+ - **[Development Guide](docs/development.md)** - Contributing guidelines
205
+ - **[PyPI Setup Guide](docs/pypi-setup.md)** - Publishing configuration
206
+ - **[Examples](docs/examples.md)** - Usage examples
@@ -0,0 +1,11 @@
1
+ all_in_mcp/__init__.py,sha256=REDwcbifpuUnsFAhNowIKCZ-8g6irIzUFTI_f8Aunxk,215
2
+ all_in_mcp/paper.py,sha256=QVH2BQpQT3I14T2IaZs1ZeC-MJVoFNVYZXSs1iHlGLY,2293
3
+ all_in_mcp/server.py,sha256=HHopmtcjoCLlmcf2P3DWlj0pn42fjQ1L0UF44WHK6Mw,7604
4
+ all_in_mcp/academic_platforms/__init__.py,sha256=FbxJcL8B7hP0KjBvtanNuvt6qfla3B_q-KjxdjnxukY,44
5
+ all_in_mcp/academic_platforms/base.py,sha256=VYMp8_tnp7YzXKAXLfr7uUxgvJBNKRyC_NT1uVhBOwY,673
6
+ all_in_mcp/academic_platforms/iacr.py,sha256=MUPxFycVS0eMsJok71y12RUqjxbRrCReG33V5ORAbfU,15450
7
+ all_in_mcp-0.1.4.dist-info/METADATA,sha256=tTuprD0AU0V0qhuDwg4vgQRHEuvga5tS-i29tGVFyOk,5219
8
+ all_in_mcp-0.1.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
9
+ all_in_mcp-0.1.4.dist-info/entry_points.txt,sha256=FbQOtUQzOIfkMNp4qQV1NTU9K4J7C0XGH9wKKhfK1VM,47
10
+ all_in_mcp-0.1.4.dist-info/licenses/LICENSE,sha256=idExTHItK7AC5FVo4H9HKnr6h51Z8BKCEztZPyP8nK8,1062
11
+ all_in_mcp-0.1.4.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ all-in-mcp = all_in_mcp:main
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 isomo
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.