all-in-mcp 0.2.6__tar.gz → 0.2.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (21) hide show
  1. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/PKG-INFO +19 -6
  2. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/README.md +18 -5
  3. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/pyproject.toml +1 -1
  4. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/src/all_in_mcp/academic_platforms/base.py +1 -1
  5. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/src/all_in_mcp/academic_platforms/crossref.py +7 -1
  6. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/src/all_in_mcp/academic_platforms/cryptobib.py +7 -1
  7. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/src/all_in_mcp/academic_platforms/google_scholar.py +3 -1
  8. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/src/all_in_mcp/academic_platforms/iacr.py +23 -42
  9. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/src/all_in_mcp/paper.py +61 -8
  10. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/src/all_in_mcp/server.py +56 -5
  11. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/.gitignore +0 -0
  12. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/LICENSE +0 -0
  13. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/docs/api.md +0 -0
  14. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/docs/configuration.md +0 -0
  15. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/docs/cryptobib.md +0 -0
  16. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/docs/development.md +0 -0
  17. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/docs/google_scholar.md +0 -0
  18. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/docs/iacr.md +0 -0
  19. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/docs/pypi-setup.md +0 -0
  20. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/src/all_in_mcp/__init__.py +0 -0
  21. {all_in_mcp-0.2.6 → all_in_mcp-0.2.8}/src/all_in_mcp/academic_platforms/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: all-in-mcp
3
- Version: 0.2.6
3
+ Version: 0.2.8
4
4
  Summary: An MCP (Model Context Protocol) server providing daily-use utility functions and academic paper search capabilities
5
5
  Project-URL: Homepage, https://github.com/jiahaoxiang2000/all-in-mcp
6
6
  Project-URL: Repository, https://github.com/jiahaoxiang2000/all-in-mcp
@@ -73,12 +73,12 @@ All tools are implemented as async MCP endpoints with proper validation and erro
73
73
 
74
74
  ## Quick Start
75
75
 
76
- - [**Video for Env Setup**](https://www.bilibili.com/video/BV1cZKozaEjg)
77
- - [_Overview PDF_](https://github.com/jiahaoxiang2000/tutor/blob/main/Apaper/config.pdf)
76
+ - [**Video for Env Setup**](https://www.bilibili.com/video/BV1cZKozaEjg) [**Video for Claude code**](https://www.bilibili.com/video/BV1s9KmzVEcE/)
77
+ - [_Overview PDF_](https://github.com/jiahaoxiang2000/tutor/blob/main/Apaper/config.pdf) [_PDF for Claude code_](https://github.com/jiahaoxiang2000/tutor/blob/main/Apaper/config-claude.pdf)
78
78
 
79
79
  ### Prerequisites
80
80
 
81
- - Python 3.12 or higher
81
+ - Python 3.10 or higher
82
82
  - UV package manager
83
83
 
84
84
  ### Installation
@@ -89,11 +89,11 @@ Install from PyPI (Recommended by `UV`)
89
89
  uv pip install all-in-mcp
90
90
  ```
91
91
 
92
- ### Integration with MCP Clients Vscode
92
+ ### Integration with MCP Clients
93
93
 
94
94
  Add this server to your MCP client configuration. The server runs using stdio transport.
95
95
 
96
- Example configuration for Vscode:
96
+ #### VSCode Configuration
97
97
 
98
98
  ```json .vscode/mcp.json
99
99
  {
@@ -107,6 +107,19 @@ Example configuration for Vscode:
107
107
  }
108
108
  ```
109
109
 
110
+ #### Claude Code Configuration
111
+
112
+ ```json .mcp.json
113
+ {
114
+ "mcpServers": {
115
+ "all-in-mcp": {
116
+ "command": "uv",
117
+ "args": ["run", "all-in-mcp"]
118
+ }
119
+ }
120
+ }
121
+ ```
122
+
110
123
  <details>
111
124
  <summary>Development</summary>
112
125
 
@@ -28,12 +28,12 @@ All tools are implemented as async MCP endpoints with proper validation and erro
28
28
 
29
29
  ## Quick Start
30
30
 
31
- - [**Video for Env Setup**](https://www.bilibili.com/video/BV1cZKozaEjg)
32
- - [_Overview PDF_](https://github.com/jiahaoxiang2000/tutor/blob/main/Apaper/config.pdf)
31
+ - [**Video for Env Setup**](https://www.bilibili.com/video/BV1cZKozaEjg) [**Video for Claude code**](https://www.bilibili.com/video/BV1s9KmzVEcE/)
32
+ - [_Overview PDF_](https://github.com/jiahaoxiang2000/tutor/blob/main/Apaper/config.pdf) [_PDF for Claude code_](https://github.com/jiahaoxiang2000/tutor/blob/main/Apaper/config-claude.pdf)
33
33
 
34
34
  ### Prerequisites
35
35
 
36
- - Python 3.12 or higher
36
+ - Python 3.10 or higher
37
37
  - UV package manager
38
38
 
39
39
  ### Installation
@@ -44,11 +44,11 @@ Install from PyPI (Recommended by `UV`)
44
44
  uv pip install all-in-mcp
45
45
  ```
46
46
 
47
- ### Integration with MCP Clients Vscode
47
+ ### Integration with MCP Clients
48
48
 
49
49
  Add this server to your MCP client configuration. The server runs using stdio transport.
50
50
 
51
- Example configuration for Vscode:
51
+ #### VSCode Configuration
52
52
 
53
53
  ```json .vscode/mcp.json
54
54
  {
@@ -62,6 +62,19 @@ Example configuration for Vscode:
62
62
  }
63
63
  ```
64
64
 
65
+ #### Claude Code Configuration
66
+
67
+ ```json .mcp.json
68
+ {
69
+ "mcpServers": {
70
+ "all-in-mcp": {
71
+ "command": "uv",
72
+ "args": ["run", "all-in-mcp"]
73
+ }
74
+ }
75
+ }
76
+ ```
77
+
65
78
  <details>
66
79
  <summary>Development</summary>
67
80
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "all-in-mcp"
3
- version = "0.2.6"
3
+ version = "0.2.8"
4
4
  description = "An MCP (Model Context Protocol) server providing daily-use utility functions and academic paper search capabilities"
5
5
  readme = "README.md"
6
6
  license = { text = "MIT" }
@@ -18,6 +18,6 @@ class PaperSource(ABC):
18
18
  raise NotImplementedError
19
19
 
20
20
  @abstractmethod
21
- def read_paper(self, paper_id: str, save_path: str) -> str:
21
+ def read_paper(self, paper_id: str, save_path: str, start_page: int | None = None, end_page: int | None = None) -> str:
22
22
  """Read and extract text content from a paper"""
23
23
  raise NotImplementedError
@@ -228,9 +228,15 @@ class CrossrefSearcher(PaperSource):
228
228
  """
229
229
  return "Crossref does not provide a direct way to download PDFs. Use the paper's URL or DOI to access the publisher's site for PDF downloads if available."
230
230
 
231
- def read_paper(self, paper_id: str, save_path: str) -> str:
231
+ def read_paper(self, paper_id: str, save_path: str, start_page: int | None = None, end_page: int | None = None) -> str:
232
232
  """
233
233
  crossref doesn't provide a direct way to read paper text.
234
+
235
+ Args:
236
+ paper_id: Paper identifier
237
+ save_path: Directory where papers are stored
238
+ start_page: Starting page number (1-indexed, inclusive). Defaults to 1.
239
+ end_page: Ending page number (1-indexed, inclusive). Defaults to last page.
234
240
  """
235
241
  return "Crossref does not provide a direct way to read paper text. Use the download_pdf method to get the PDF if available."
236
242
 
@@ -364,9 +364,15 @@ class CryptoBibSearcher(PaperSource):
364
364
  """
365
365
  return "Error: CryptoBib is a bibliography database and doesn't provide PDF downloads"
366
366
 
367
- def read_paper(self, paper_id: str, save_path: str = "./downloads") -> str:
367
+ def read_paper(self, paper_id: str, save_path: str = "./downloads", start_page: int | None = None, end_page: int | None = None) -> str:
368
368
  """
369
369
  CryptoBib doesn't provide paper content reading
370
+
371
+ Args:
372
+ paper_id: Paper identifier
373
+ save_path: Directory where papers are stored
374
+ start_page: Starting page number (1-indexed, inclusive). Defaults to 1.
375
+ end_page: Ending page number (1-indexed, inclusive). Defaults to last page.
370
376
  """
371
377
  return "Error: CryptoBib is a bibliography database and doesn't provide paper content"
372
378
 
@@ -228,13 +228,15 @@ class GoogleScholarSearcher(PaperSource):
228
228
  "Please use the paper URL to access the publisher's website."
229
229
  )
230
230
 
231
- def read_paper(self, paper_id: str, save_path: str = "./downloads") -> str:
231
+ def read_paper(self, paper_id: str, save_path: str = "./downloads", start_page: int | None = None, end_page: int | None = None) -> str:
232
232
  """
233
233
  Google Scholar doesn't support direct paper reading
234
234
 
235
235
  Args:
236
236
  paper_id: Paper identifier
237
237
  save_path: Directory where papers are stored
238
+ start_page: Starting page number (1-indexed, inclusive). Defaults to 1.
239
+ end_page: Ending page number (1-indexed, inclusive). Defaults to last page.
238
240
 
239
241
  Returns:
240
242
  Message indicating the feature is not supported
@@ -141,7 +141,12 @@ class IACRSearcher(PaperSource):
141
141
  return None
142
142
 
143
143
  def search(
144
- self, query: str, max_results: int = 10, fetch_details: bool = True
144
+ self,
145
+ query: str,
146
+ max_results: int = 10,
147
+ fetch_details: bool = True,
148
+ year_min: int | None = None,
149
+ year_max: int | None = None,
145
150
  ) -> list[Paper]:
146
151
  """
147
152
  Search IACR ePrint Archive
@@ -150,6 +155,8 @@ class IACRSearcher(PaperSource):
150
155
  query: Search query string
151
156
  max_results: Maximum number of results to return
152
157
  fetch_details: Whether to fetch detailed information for each paper (slower but more complete)
158
+ year_min: Minimum publication year (revised after)
159
+ year_max: Maximum publication year (revised before)
153
160
 
154
161
  Returns:
155
162
  List[Paper]: List of paper objects
@@ -158,7 +165,11 @@ class IACRSearcher(PaperSource):
158
165
 
159
166
  try:
160
167
  # Construct search parameters
161
- params = {"q": query}
168
+ params: dict[str, str | int] = {"q": query}
169
+ if year_min:
170
+ params["revisedafter"] = year_min
171
+ if year_max:
172
+ params["revisedbefore"] = year_max
162
173
 
163
174
  # Make request
164
175
  response = self.session.get(self.IACR_SEARCH_URL, params=params)
@@ -221,13 +232,15 @@ class IACRSearcher(PaperSource):
221
232
  logger.error(f"PDF download error: {e}")
222
233
  return f"Error downloading PDF: {e}"
223
234
 
224
- def read_paper(self, paper_id: str, save_path: str = "./downloads") -> str:
235
+ def read_paper(self, paper_id: str, save_path: str = "./downloads", start_page: int | None = None, end_page: int | None = None) -> str:
225
236
  """
226
237
  Download and extract text from IACR paper PDF
227
238
 
228
239
  Args:
229
240
  paper_id: IACR paper ID
230
241
  save_path: Directory to save downloaded PDF
242
+ start_page: Starting page number (1-indexed, inclusive). Defaults to 1.
243
+ end_page: Ending page number (1-indexed, inclusive). Defaults to last page.
231
244
 
232
245
  Returns:
233
246
  str: Extracted text from the PDF or error message
@@ -238,56 +251,24 @@ class IACRSearcher(PaperSource):
238
251
  if not paper or not paper.pdf_url:
239
252
  return f"Error: Could not find PDF URL for paper {paper_id}"
240
253
 
241
- # Download the PDF
242
- pdf_response = requests.get(paper.pdf_url, timeout=30)
243
- pdf_response.raise_for_status()
254
+ # Import read_pdf function
255
+ from ..paper import read_pdf
244
256
 
245
- # Create download directory if it doesn't exist
246
- os.makedirs(save_path, exist_ok=True)
247
-
248
- # Save the PDF
249
- filename = f"iacr_{paper_id.replace('/', '_')}.pdf"
250
- pdf_path = os.path.join(save_path, filename)
251
-
252
- with open(pdf_path, "wb") as f:
253
- f.write(pdf_response.content)
254
-
255
- # Extract text using pypdf
256
- reader = PdfReader(pdf_path)
257
- text = ""
258
-
259
- for page_num, page in enumerate(reader.pages):
260
- try:
261
- page_text = page.extract_text()
262
- if page_text:
263
- text += f"\n--- Page {page_num + 1} ---\n"
264
- text += page_text + "\n"
265
- except Exception as e:
266
- logger.warning(
267
- f"Failed to extract text from page {page_num + 1}: {e}"
268
- )
269
- continue
270
-
271
- if not text.strip():
272
- return (
273
- f"PDF downloaded to {pdf_path}, but unable to extract readable text"
274
- )
257
+ # Use the read_pdf function to extract text
258
+ text = read_pdf(paper.pdf_url, start_page, end_page)
275
259
 
276
260
  # Add paper metadata at the beginning
277
261
  metadata = f"Title: {paper.title}\n"
278
262
  metadata += f"Authors: {', '.join(paper.authors)}\n"
279
263
  metadata += f"Published Date: {paper.published_date}\n"
280
264
  metadata += f"URL: {paper.url}\n"
281
- metadata += f"PDF downloaded to: {pdf_path}\n"
265
+ metadata += f"PDF URL: {paper.pdf_url}\n"
282
266
  metadata += "=" * 80 + "\n\n"
283
267
 
284
- return metadata + text.strip()
268
+ return metadata + text
285
269
 
286
- except requests.RequestException as e:
287
- logger.error(f"Error downloading PDF: {e}")
288
- return f"Error downloading PDF: {e}"
289
270
  except Exception as e:
290
- logger.error(f"Read paper error: {e}")
271
+ logger.error(f"Error reading paper: {e}")
291
272
  return f"Error reading paper: {e}"
292
273
 
293
274
  def get_paper_details(self, paper_id: str) -> Paper | None:
@@ -86,19 +86,21 @@ class Paper:
86
86
  return read_pdf(self.pdf_url)
87
87
 
88
88
 
89
- def read_pdf(pdf_source: str | Path) -> str:
89
+ def read_pdf(pdf_source: str | Path, start_page: int | None = None, end_page: int | None = None) -> str:
90
90
  """
91
91
  Extract text content from a PDF file (local or online).
92
92
 
93
93
  Args:
94
94
  pdf_source: Path to local PDF file or URL to online PDF
95
+ start_page: Starting page number (1-indexed, inclusive). Defaults to 1.
96
+ end_page: Ending page number (1-indexed, inclusive). Defaults to last page.
95
97
 
96
98
  Returns:
97
99
  str: Extracted text content from the PDF
98
100
 
99
101
  Raises:
100
102
  FileNotFoundError: If local file doesn't exist
101
- ValueError: If URL is invalid or PDF cannot be processed
103
+ ValueError: If URL is invalid, PDF cannot be processed, or page range is invalid
102
104
  Exception: For other PDF processing errors
103
105
  """
104
106
  try:
@@ -109,10 +111,10 @@ def read_pdf(pdf_source: str | Path) -> str:
109
111
  parsed = urlparse(pdf_source_str)
110
112
  if parsed.scheme in ("http", "https"):
111
113
  # Handle online PDF
112
- return _read_pdf_from_url(pdf_source_str)
114
+ return _read_pdf_from_url(pdf_source_str, start_page, end_page)
113
115
  else:
114
116
  # Handle local file
115
- return _read_pdf_from_file(Path(pdf_source_str))
117
+ return _read_pdf_from_file(Path(pdf_source_str), start_page, end_page)
116
118
  else:
117
119
  raise ValueError("pdf_source must be a string or Path object")
118
120
 
@@ -120,7 +122,46 @@ def read_pdf(pdf_source: str | Path) -> str:
120
122
  raise Exception(f"Failed to read PDF from {pdf_source}: {e!s}") from e
121
123
 
122
124
 
123
- def _read_pdf_from_file(file_path: Path) -> str:
125
+ def _normalize_page_range(start_page: int | None, end_page: int | None, total_pages: int) -> tuple[int, int]:
126
+ """
127
+ Normalize and validate page range parameters.
128
+
129
+ Args:
130
+ start_page: Starting page number (1-indexed, inclusive) or None
131
+ end_page: Ending page number (1-indexed, inclusive) or None
132
+ total_pages: Total number of pages in the PDF
133
+
134
+ Returns:
135
+ tuple[int, int]: (start_index, end_index) as 0-indexed values
136
+
137
+ Raises:
138
+ ValueError: If page range is invalid
139
+ """
140
+ # Default values
141
+ if start_page is None:
142
+ start_page = 1
143
+ if end_page is None:
144
+ end_page = total_pages
145
+
146
+ # Validate page numbers
147
+ if start_page < 1:
148
+ raise ValueError(f"start_page must be >= 1, got {start_page}")
149
+ if end_page < 1:
150
+ raise ValueError(f"end_page must be >= 1, got {end_page}")
151
+ if start_page > end_page:
152
+ raise ValueError(f"start_page ({start_page}) must be <= end_page ({end_page})")
153
+ if start_page > total_pages:
154
+ raise ValueError(f"start_page ({start_page}) exceeds total pages ({total_pages})")
155
+
156
+ # Clamp end_page to total_pages
157
+ if end_page > total_pages:
158
+ end_page = total_pages
159
+
160
+ # Convert to 0-indexed
161
+ return start_page - 1, end_page - 1
162
+
163
+
164
+ def _read_pdf_from_file(file_path: Path, start_page: int | None = None, end_page: int | None = None) -> str:
124
165
  """Read PDF from local file path."""
125
166
  if not file_path.exists():
126
167
  raise FileNotFoundError(f"PDF file not found: {file_path}")
@@ -131,10 +172,16 @@ def _read_pdf_from_file(file_path: Path) -> str:
131
172
  try:
132
173
  with open(file_path, "rb") as file:
133
174
  pdf_reader = PdfReader(file)
175
+ total_pages = len(pdf_reader.pages)
176
+
177
+ # Validate and normalize page range
178
+ start_idx, end_idx = _normalize_page_range(start_page, end_page, total_pages)
179
+
134
180
  text_content = []
135
181
 
136
- for page_num, page in enumerate(pdf_reader.pages):
182
+ for page_num in range(start_idx, end_idx + 1):
137
183
  try:
184
+ page = pdf_reader.pages[page_num]
138
185
  page_text = page.extract_text()
139
186
  if page_text.strip(): # Only add non-empty pages
140
187
  text_content.append(
@@ -151,7 +198,7 @@ def _read_pdf_from_file(file_path: Path) -> str:
151
198
  raise Exception(f"Error reading PDF file {file_path}: {e!s}") from e
152
199
 
153
200
 
154
- def _read_pdf_from_url(url: str) -> str:
201
+ def _read_pdf_from_url(url: str, start_page: int | None = None, end_page: int | None = None) -> str:
155
202
  """Download and read PDF from URL."""
156
203
  try:
157
204
  # Download PDF with proper headers
@@ -175,10 +222,16 @@ def _read_pdf_from_url(url: str) -> str:
175
222
  # Read PDF from bytes
176
223
  pdf_bytes = io.BytesIO(response.content)
177
224
  pdf_reader = PdfReader(pdf_bytes)
225
+ total_pages = len(pdf_reader.pages)
226
+
227
+ # Validate and normalize page range
228
+ start_idx, end_idx = _normalize_page_range(start_page, end_page, total_pages)
229
+
178
230
  text_content = []
179
231
 
180
- for page_num, page in enumerate(pdf_reader.pages):
232
+ for page_num in range(start_idx, end_idx + 1):
181
233
  try:
234
+ page = pdf_reader.pages[page_num]
182
235
  page_text = page.extract_text()
183
236
  if page_text.strip(): # Only add non-empty pages
184
237
  text_content.append(
@@ -47,6 +47,14 @@ async def handle_list_tools() -> list[types.Tool]:
47
47
  "description": "Whether to fetch detailed information for each paper (default: True)",
48
48
  "default": True,
49
49
  },
50
+ "year_min": {
51
+ "type": "integer",
52
+ "description": "Minimum publication year (revised after)",
53
+ },
54
+ "year_max": {
55
+ "type": "integer",
56
+ "description": "Maximum publication year (revised before)",
57
+ },
50
58
  },
51
59
  "required": ["query"],
52
60
  },
@@ -85,6 +93,16 @@ async def handle_list_tools() -> list[types.Tool]:
85
93
  "description": "Directory where the PDF is/will be saved (default: './downloads')",
86
94
  "default": "./downloads",
87
95
  },
96
+ "start_page": {
97
+ "type": "integer",
98
+ "description": "Starting page number (1-indexed, inclusive). Defaults to 1.",
99
+ "minimum": 1,
100
+ },
101
+ "end_page": {
102
+ "type": "integer",
103
+ "description": "Ending page number (1-indexed, inclusive). Defaults to last page.",
104
+ "minimum": 1,
105
+ },
88
106
  },
89
107
  "required": ["paper_id"],
90
108
  },
@@ -195,6 +213,16 @@ async def handle_list_tools() -> list[types.Tool]:
195
213
  "type": "string",
196
214
  "description": "Path to local PDF file or URL to online PDF",
197
215
  },
216
+ "start_page": {
217
+ "type": "integer",
218
+ "description": "Starting page number (1-indexed, inclusive). Defaults to 1.",
219
+ "minimum": 1,
220
+ },
221
+ "end_page": {
222
+ "type": "integer",
223
+ "description": "Ending page number (1-indexed, inclusive). Defaults to last page.",
224
+ "minimum": 1,
225
+ },
198
226
  },
199
227
  "required": ["pdf_source"],
200
228
  },
@@ -217,6 +245,8 @@ async def handle_call_tool(
217
245
  query = arguments.get("query", "")
218
246
  max_results = arguments.get("max_results", 10)
219
247
  fetch_details = arguments.get("fetch_details", True)
248
+ year_min = arguments.get("year_min")
249
+ year_max = arguments.get("year_max")
220
250
 
221
251
  if not query:
222
252
  return [
@@ -225,17 +255,34 @@ async def handle_call_tool(
225
255
  )
226
256
  ]
227
257
 
228
- papers = iacr_searcher.search(query, max_results, fetch_details)
258
+ papers = iacr_searcher.search(
259
+ query,
260
+ max_results=max_results,
261
+ fetch_details=fetch_details,
262
+ year_min=year_min,
263
+ year_max=year_max,
264
+ )
229
265
 
230
266
  if not papers:
267
+ year_filter_msg = ""
268
+ if year_min or year_max:
269
+ year_range = f" ({year_min or 'earliest'}-{year_max or 'latest'})"
270
+ year_filter_msg = f" in year range{year_range}"
231
271
  return [
232
272
  types.TextContent(
233
- type="text", text=f"No papers found for query: {query}"
273
+ type="text",
274
+ text=f"No papers found for query: {query}{year_filter_msg}",
234
275
  )
235
276
  ]
236
277
 
237
278
  # Format the results
238
- result_text = f"Found {len(papers)} IACR papers for query '{query}':\n\n"
279
+ year_filter_msg = ""
280
+ if year_min or year_max:
281
+ year_range = f" ({year_min or 'earliest'}-{year_max or 'latest'})"
282
+ year_filter_msg = f" in year range{year_range}"
283
+ result_text = (
284
+ f"Found {len(papers)} IACR papers for query '{query}'{year_filter_msg}:\n\n"
285
+ )
239
286
  for i, paper in enumerate(papers, 1):
240
287
  result_text += f"{i}. **{paper.title}**\n"
241
288
  result_text += f" - Paper ID: {paper.paper_id}\n"
@@ -279,6 +326,8 @@ async def handle_call_tool(
279
326
  elif name == "read-iacr-paper":
280
327
  paper_id = arguments.get("paper_id", "")
281
328
  save_path = arguments.get("save_path", "./downloads")
329
+ start_page = arguments.get("start_page")
330
+ end_page = arguments.get("end_page")
282
331
 
283
332
  if not paper_id:
284
333
  return [
@@ -287,7 +336,7 @@ async def handle_call_tool(
287
336
  )
288
337
  ]
289
338
 
290
- result = iacr_searcher.read_paper(paper_id, save_path)
339
+ result = iacr_searcher.read_paper(paper_id, save_path, start_page=start_page, end_page=end_page)
291
340
 
292
341
  if result.startswith("Error"):
293
342
  return [types.TextContent(type="text", text=result)]
@@ -550,6 +599,8 @@ async def handle_call_tool(
550
599
 
551
600
  elif name == "read-pdf":
552
601
  pdf_source = arguments.get("pdf_source", "")
602
+ start_page = arguments.get("start_page")
603
+ end_page = arguments.get("end_page")
553
604
 
554
605
  if not pdf_source:
555
606
  return [
@@ -559,7 +610,7 @@ async def handle_call_tool(
559
610
  ]
560
611
 
561
612
  try:
562
- result = read_pdf(pdf_source)
613
+ result = read_pdf(pdf_source, start_page=start_page, end_page=end_page)
563
614
  return [types.TextContent(type="text", text=result)]
564
615
 
565
616
  except Exception as e:
File without changes
File without changes
File without changes
File without changes
File without changes