all-in-mcp 0.2.7__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- all_in_mcp/academic_platforms/base.py +1 -1
- all_in_mcp/academic_platforms/crossref.py +7 -1
- all_in_mcp/academic_platforms/cryptobib.py +7 -1
- all_in_mcp/academic_platforms/google_scholar.py +3 -1
- all_in_mcp/academic_platforms/iacr.py +10 -40
- all_in_mcp/paper.py +61 -8
- all_in_mcp/server.py +26 -2
- {all_in_mcp-0.2.7.dist-info → all_in_mcp-0.2.8.dist-info}/METADATA +18 -5
- all_in_mcp-0.2.8.dist-info/RECORD +14 -0
- all_in_mcp-0.2.7.dist-info/RECORD +0 -14
- {all_in_mcp-0.2.7.dist-info → all_in_mcp-0.2.8.dist-info}/WHEEL +0 -0
- {all_in_mcp-0.2.7.dist-info → all_in_mcp-0.2.8.dist-info}/entry_points.txt +0 -0
- {all_in_mcp-0.2.7.dist-info → all_in_mcp-0.2.8.dist-info}/licenses/LICENSE +0 -0
@@ -18,6 +18,6 @@ class PaperSource(ABC):
|
|
18
18
|
raise NotImplementedError
|
19
19
|
|
20
20
|
@abstractmethod
|
21
|
-
def read_paper(self, paper_id: str, save_path: str) -> str:
|
21
|
+
def read_paper(self, paper_id: str, save_path: str, start_page: int | None = None, end_page: int | None = None) -> str:
|
22
22
|
"""Read and extract text content from a paper"""
|
23
23
|
raise NotImplementedError
|
@@ -228,9 +228,15 @@ class CrossrefSearcher(PaperSource):
|
|
228
228
|
"""
|
229
229
|
return "Crossref does not provide a direct way to download PDFs. Use the paper's URL or DOI to access the publisher's site for PDF downloads if available."
|
230
230
|
|
231
|
-
def read_paper(self, paper_id: str, save_path: str) -> str:
|
231
|
+
def read_paper(self, paper_id: str, save_path: str, start_page: int | None = None, end_page: int | None = None) -> str:
|
232
232
|
"""
|
233
233
|
crossref doesn't provide a direct way to read paper text.
|
234
|
+
|
235
|
+
Args:
|
236
|
+
paper_id: Paper identifier
|
237
|
+
save_path: Directory where papers are stored
|
238
|
+
start_page: Starting page number (1-indexed, inclusive). Defaults to 1.
|
239
|
+
end_page: Ending page number (1-indexed, inclusive). Defaults to last page.
|
234
240
|
"""
|
235
241
|
return "Crossref does not provide a direct way to read paper text. Use the download_pdf method to get the PDF if available."
|
236
242
|
|
@@ -364,9 +364,15 @@ class CryptoBibSearcher(PaperSource):
|
|
364
364
|
"""
|
365
365
|
return "Error: CryptoBib is a bibliography database and doesn't provide PDF downloads"
|
366
366
|
|
367
|
-
def read_paper(self, paper_id: str, save_path: str = "./downloads") -> str:
|
367
|
+
def read_paper(self, paper_id: str, save_path: str = "./downloads", start_page: int | None = None, end_page: int | None = None) -> str:
|
368
368
|
"""
|
369
369
|
CryptoBib doesn't provide paper content reading
|
370
|
+
|
371
|
+
Args:
|
372
|
+
paper_id: Paper identifier
|
373
|
+
save_path: Directory where papers are stored
|
374
|
+
start_page: Starting page number (1-indexed, inclusive). Defaults to 1.
|
375
|
+
end_page: Ending page number (1-indexed, inclusive). Defaults to last page.
|
370
376
|
"""
|
371
377
|
return "Error: CryptoBib is a bibliography database and doesn't provide paper content"
|
372
378
|
|
@@ -228,13 +228,15 @@ class GoogleScholarSearcher(PaperSource):
|
|
228
228
|
"Please use the paper URL to access the publisher's website."
|
229
229
|
)
|
230
230
|
|
231
|
-
def read_paper(self, paper_id: str, save_path: str = "./downloads") -> str:
|
231
|
+
def read_paper(self, paper_id: str, save_path: str = "./downloads", start_page: int | None = None, end_page: int | None = None) -> str:
|
232
232
|
"""
|
233
233
|
Google Scholar doesn't support direct paper reading
|
234
234
|
|
235
235
|
Args:
|
236
236
|
paper_id: Paper identifier
|
237
237
|
save_path: Directory where papers are stored
|
238
|
+
start_page: Starting page number (1-indexed, inclusive). Defaults to 1.
|
239
|
+
end_page: Ending page number (1-indexed, inclusive). Defaults to last page.
|
238
240
|
|
239
241
|
Returns:
|
240
242
|
Message indicating the feature is not supported
|
@@ -232,13 +232,15 @@ class IACRSearcher(PaperSource):
|
|
232
232
|
logger.error(f"PDF download error: {e}")
|
233
233
|
return f"Error downloading PDF: {e}"
|
234
234
|
|
235
|
-
def read_paper(self, paper_id: str, save_path: str = "./downloads") -> str:
|
235
|
+
def read_paper(self, paper_id: str, save_path: str = "./downloads", start_page: int | None = None, end_page: int | None = None) -> str:
|
236
236
|
"""
|
237
237
|
Download and extract text from IACR paper PDF
|
238
238
|
|
239
239
|
Args:
|
240
240
|
paper_id: IACR paper ID
|
241
241
|
save_path: Directory to save downloaded PDF
|
242
|
+
start_page: Starting page number (1-indexed, inclusive). Defaults to 1.
|
243
|
+
end_page: Ending page number (1-indexed, inclusive). Defaults to last page.
|
242
244
|
|
243
245
|
Returns:
|
244
246
|
str: Extracted text from the PDF or error message
|
@@ -249,56 +251,24 @@ class IACRSearcher(PaperSource):
|
|
249
251
|
if not paper or not paper.pdf_url:
|
250
252
|
return f"Error: Could not find PDF URL for paper {paper_id}"
|
251
253
|
|
252
|
-
#
|
253
|
-
|
254
|
-
pdf_response.raise_for_status()
|
254
|
+
# Import read_pdf function
|
255
|
+
from ..paper import read_pdf
|
255
256
|
|
256
|
-
#
|
257
|
-
|
258
|
-
|
259
|
-
# Save the PDF
|
260
|
-
filename = f"iacr_{paper_id.replace('/', '_')}.pdf"
|
261
|
-
pdf_path = os.path.join(save_path, filename)
|
262
|
-
|
263
|
-
with open(pdf_path, "wb") as f:
|
264
|
-
f.write(pdf_response.content)
|
265
|
-
|
266
|
-
# Extract text using pypdf
|
267
|
-
reader = PdfReader(pdf_path)
|
268
|
-
text = ""
|
269
|
-
|
270
|
-
for page_num, page in enumerate(reader.pages):
|
271
|
-
try:
|
272
|
-
page_text = page.extract_text()
|
273
|
-
if page_text:
|
274
|
-
text += f"\n--- Page {page_num + 1} ---\n"
|
275
|
-
text += page_text + "\n"
|
276
|
-
except Exception as e:
|
277
|
-
logger.warning(
|
278
|
-
f"Failed to extract text from page {page_num + 1}: {e}"
|
279
|
-
)
|
280
|
-
continue
|
281
|
-
|
282
|
-
if not text.strip():
|
283
|
-
return (
|
284
|
-
f"PDF downloaded to {pdf_path}, but unable to extract readable text"
|
285
|
-
)
|
257
|
+
# Use the read_pdf function to extract text
|
258
|
+
text = read_pdf(paper.pdf_url, start_page, end_page)
|
286
259
|
|
287
260
|
# Add paper metadata at the beginning
|
288
261
|
metadata = f"Title: {paper.title}\n"
|
289
262
|
metadata += f"Authors: {', '.join(paper.authors)}\n"
|
290
263
|
metadata += f"Published Date: {paper.published_date}\n"
|
291
264
|
metadata += f"URL: {paper.url}\n"
|
292
|
-
metadata += f"PDF
|
265
|
+
metadata += f"PDF URL: {paper.pdf_url}\n"
|
293
266
|
metadata += "=" * 80 + "\n\n"
|
294
267
|
|
295
|
-
return metadata + text
|
268
|
+
return metadata + text
|
296
269
|
|
297
|
-
except requests.RequestException as e:
|
298
|
-
logger.error(f"Error downloading PDF: {e}")
|
299
|
-
return f"Error downloading PDF: {e}"
|
300
270
|
except Exception as e:
|
301
|
-
logger.error(f"
|
271
|
+
logger.error(f"Error reading paper: {e}")
|
302
272
|
return f"Error reading paper: {e}"
|
303
273
|
|
304
274
|
def get_paper_details(self, paper_id: str) -> Paper | None:
|
all_in_mcp/paper.py
CHANGED
@@ -86,19 +86,21 @@ class Paper:
|
|
86
86
|
return read_pdf(self.pdf_url)
|
87
87
|
|
88
88
|
|
89
|
-
def read_pdf(pdf_source: str | Path) -> str:
|
89
|
+
def read_pdf(pdf_source: str | Path, start_page: int | None = None, end_page: int | None = None) -> str:
|
90
90
|
"""
|
91
91
|
Extract text content from a PDF file (local or online).
|
92
92
|
|
93
93
|
Args:
|
94
94
|
pdf_source: Path to local PDF file or URL to online PDF
|
95
|
+
start_page: Starting page number (1-indexed, inclusive). Defaults to 1.
|
96
|
+
end_page: Ending page number (1-indexed, inclusive). Defaults to last page.
|
95
97
|
|
96
98
|
Returns:
|
97
99
|
str: Extracted text content from the PDF
|
98
100
|
|
99
101
|
Raises:
|
100
102
|
FileNotFoundError: If local file doesn't exist
|
101
|
-
ValueError: If URL is invalid
|
103
|
+
ValueError: If URL is invalid, PDF cannot be processed, or page range is invalid
|
102
104
|
Exception: For other PDF processing errors
|
103
105
|
"""
|
104
106
|
try:
|
@@ -109,10 +111,10 @@ def read_pdf(pdf_source: str | Path) -> str:
|
|
109
111
|
parsed = urlparse(pdf_source_str)
|
110
112
|
if parsed.scheme in ("http", "https"):
|
111
113
|
# Handle online PDF
|
112
|
-
return _read_pdf_from_url(pdf_source_str)
|
114
|
+
return _read_pdf_from_url(pdf_source_str, start_page, end_page)
|
113
115
|
else:
|
114
116
|
# Handle local file
|
115
|
-
return _read_pdf_from_file(Path(pdf_source_str))
|
117
|
+
return _read_pdf_from_file(Path(pdf_source_str), start_page, end_page)
|
116
118
|
else:
|
117
119
|
raise ValueError("pdf_source must be a string or Path object")
|
118
120
|
|
@@ -120,7 +122,46 @@ def read_pdf(pdf_source: str | Path) -> str:
|
|
120
122
|
raise Exception(f"Failed to read PDF from {pdf_source}: {e!s}") from e
|
121
123
|
|
122
124
|
|
123
|
-
def
|
125
|
+
def _normalize_page_range(start_page: int | None, end_page: int | None, total_pages: int) -> tuple[int, int]:
|
126
|
+
"""
|
127
|
+
Normalize and validate page range parameters.
|
128
|
+
|
129
|
+
Args:
|
130
|
+
start_page: Starting page number (1-indexed, inclusive) or None
|
131
|
+
end_page: Ending page number (1-indexed, inclusive) or None
|
132
|
+
total_pages: Total number of pages in the PDF
|
133
|
+
|
134
|
+
Returns:
|
135
|
+
tuple[int, int]: (start_index, end_index) as 0-indexed values
|
136
|
+
|
137
|
+
Raises:
|
138
|
+
ValueError: If page range is invalid
|
139
|
+
"""
|
140
|
+
# Default values
|
141
|
+
if start_page is None:
|
142
|
+
start_page = 1
|
143
|
+
if end_page is None:
|
144
|
+
end_page = total_pages
|
145
|
+
|
146
|
+
# Validate page numbers
|
147
|
+
if start_page < 1:
|
148
|
+
raise ValueError(f"start_page must be >= 1, got {start_page}")
|
149
|
+
if end_page < 1:
|
150
|
+
raise ValueError(f"end_page must be >= 1, got {end_page}")
|
151
|
+
if start_page > end_page:
|
152
|
+
raise ValueError(f"start_page ({start_page}) must be <= end_page ({end_page})")
|
153
|
+
if start_page > total_pages:
|
154
|
+
raise ValueError(f"start_page ({start_page}) exceeds total pages ({total_pages})")
|
155
|
+
|
156
|
+
# Clamp end_page to total_pages
|
157
|
+
if end_page > total_pages:
|
158
|
+
end_page = total_pages
|
159
|
+
|
160
|
+
# Convert to 0-indexed
|
161
|
+
return start_page - 1, end_page - 1
|
162
|
+
|
163
|
+
|
164
|
+
def _read_pdf_from_file(file_path: Path, start_page: int | None = None, end_page: int | None = None) -> str:
|
124
165
|
"""Read PDF from local file path."""
|
125
166
|
if not file_path.exists():
|
126
167
|
raise FileNotFoundError(f"PDF file not found: {file_path}")
|
@@ -131,10 +172,16 @@ def _read_pdf_from_file(file_path: Path) -> str:
|
|
131
172
|
try:
|
132
173
|
with open(file_path, "rb") as file:
|
133
174
|
pdf_reader = PdfReader(file)
|
175
|
+
total_pages = len(pdf_reader.pages)
|
176
|
+
|
177
|
+
# Validate and normalize page range
|
178
|
+
start_idx, end_idx = _normalize_page_range(start_page, end_page, total_pages)
|
179
|
+
|
134
180
|
text_content = []
|
135
181
|
|
136
|
-
for page_num,
|
182
|
+
for page_num in range(start_idx, end_idx + 1):
|
137
183
|
try:
|
184
|
+
page = pdf_reader.pages[page_num]
|
138
185
|
page_text = page.extract_text()
|
139
186
|
if page_text.strip(): # Only add non-empty pages
|
140
187
|
text_content.append(
|
@@ -151,7 +198,7 @@ def _read_pdf_from_file(file_path: Path) -> str:
|
|
151
198
|
raise Exception(f"Error reading PDF file {file_path}: {e!s}") from e
|
152
199
|
|
153
200
|
|
154
|
-
def _read_pdf_from_url(url: str) -> str:
|
201
|
+
def _read_pdf_from_url(url: str, start_page: int | None = None, end_page: int | None = None) -> str:
|
155
202
|
"""Download and read PDF from URL."""
|
156
203
|
try:
|
157
204
|
# Download PDF with proper headers
|
@@ -175,10 +222,16 @@ def _read_pdf_from_url(url: str) -> str:
|
|
175
222
|
# Read PDF from bytes
|
176
223
|
pdf_bytes = io.BytesIO(response.content)
|
177
224
|
pdf_reader = PdfReader(pdf_bytes)
|
225
|
+
total_pages = len(pdf_reader.pages)
|
226
|
+
|
227
|
+
# Validate and normalize page range
|
228
|
+
start_idx, end_idx = _normalize_page_range(start_page, end_page, total_pages)
|
229
|
+
|
178
230
|
text_content = []
|
179
231
|
|
180
|
-
for page_num,
|
232
|
+
for page_num in range(start_idx, end_idx + 1):
|
181
233
|
try:
|
234
|
+
page = pdf_reader.pages[page_num]
|
182
235
|
page_text = page.extract_text()
|
183
236
|
if page_text.strip(): # Only add non-empty pages
|
184
237
|
text_content.append(
|
all_in_mcp/server.py
CHANGED
@@ -93,6 +93,16 @@ async def handle_list_tools() -> list[types.Tool]:
|
|
93
93
|
"description": "Directory where the PDF is/will be saved (default: './downloads')",
|
94
94
|
"default": "./downloads",
|
95
95
|
},
|
96
|
+
"start_page": {
|
97
|
+
"type": "integer",
|
98
|
+
"description": "Starting page number (1-indexed, inclusive). Defaults to 1.",
|
99
|
+
"minimum": 1,
|
100
|
+
},
|
101
|
+
"end_page": {
|
102
|
+
"type": "integer",
|
103
|
+
"description": "Ending page number (1-indexed, inclusive). Defaults to last page.",
|
104
|
+
"minimum": 1,
|
105
|
+
},
|
96
106
|
},
|
97
107
|
"required": ["paper_id"],
|
98
108
|
},
|
@@ -203,6 +213,16 @@ async def handle_list_tools() -> list[types.Tool]:
|
|
203
213
|
"type": "string",
|
204
214
|
"description": "Path to local PDF file or URL to online PDF",
|
205
215
|
},
|
216
|
+
"start_page": {
|
217
|
+
"type": "integer",
|
218
|
+
"description": "Starting page number (1-indexed, inclusive). Defaults to 1.",
|
219
|
+
"minimum": 1,
|
220
|
+
},
|
221
|
+
"end_page": {
|
222
|
+
"type": "integer",
|
223
|
+
"description": "Ending page number (1-indexed, inclusive). Defaults to last page.",
|
224
|
+
"minimum": 1,
|
225
|
+
},
|
206
226
|
},
|
207
227
|
"required": ["pdf_source"],
|
208
228
|
},
|
@@ -306,6 +326,8 @@ async def handle_call_tool(
|
|
306
326
|
elif name == "read-iacr-paper":
|
307
327
|
paper_id = arguments.get("paper_id", "")
|
308
328
|
save_path = arguments.get("save_path", "./downloads")
|
329
|
+
start_page = arguments.get("start_page")
|
330
|
+
end_page = arguments.get("end_page")
|
309
331
|
|
310
332
|
if not paper_id:
|
311
333
|
return [
|
@@ -314,7 +336,7 @@ async def handle_call_tool(
|
|
314
336
|
)
|
315
337
|
]
|
316
338
|
|
317
|
-
result = iacr_searcher.read_paper(paper_id, save_path)
|
339
|
+
result = iacr_searcher.read_paper(paper_id, save_path, start_page=start_page, end_page=end_page)
|
318
340
|
|
319
341
|
if result.startswith("Error"):
|
320
342
|
return [types.TextContent(type="text", text=result)]
|
@@ -577,6 +599,8 @@ async def handle_call_tool(
|
|
577
599
|
|
578
600
|
elif name == "read-pdf":
|
579
601
|
pdf_source = arguments.get("pdf_source", "")
|
602
|
+
start_page = arguments.get("start_page")
|
603
|
+
end_page = arguments.get("end_page")
|
580
604
|
|
581
605
|
if not pdf_source:
|
582
606
|
return [
|
@@ -586,7 +610,7 @@ async def handle_call_tool(
|
|
586
610
|
]
|
587
611
|
|
588
612
|
try:
|
589
|
-
result = read_pdf(pdf_source)
|
613
|
+
result = read_pdf(pdf_source, start_page=start_page, end_page=end_page)
|
590
614
|
return [types.TextContent(type="text", text=result)]
|
591
615
|
|
592
616
|
except Exception as e:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: all-in-mcp
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.8
|
4
4
|
Summary: An MCP (Model Context Protocol) server providing daily-use utility functions and academic paper search capabilities
|
5
5
|
Project-URL: Homepage, https://github.com/jiahaoxiang2000/all-in-mcp
|
6
6
|
Project-URL: Repository, https://github.com/jiahaoxiang2000/all-in-mcp
|
@@ -73,8 +73,8 @@ All tools are implemented as async MCP endpoints with proper validation and erro
|
|
73
73
|
|
74
74
|
## Quick Start
|
75
75
|
|
76
|
-
- [**Video for Env Setup**](https://www.bilibili.com/video/BV1cZKozaEjg)
|
77
|
-
- [_Overview PDF_](https://github.com/jiahaoxiang2000/tutor/blob/main/Apaper/config.pdf)
|
76
|
+
- [**Video for Env Setup**](https://www.bilibili.com/video/BV1cZKozaEjg) [**Video for Claude code**](https://www.bilibili.com/video/BV1s9KmzVEcE/)
|
77
|
+
- [_Overview PDF_](https://github.com/jiahaoxiang2000/tutor/blob/main/Apaper/config.pdf) [_PDF for Claude code_](https://github.com/jiahaoxiang2000/tutor/blob/main/Apaper/config-claude.pdf)
|
78
78
|
|
79
79
|
### Prerequisites
|
80
80
|
|
@@ -89,11 +89,11 @@ Install from PyPI (Recommended by `UV`)
|
|
89
89
|
uv pip install all-in-mcp
|
90
90
|
```
|
91
91
|
|
92
|
-
### Integration with MCP Clients
|
92
|
+
### Integration with MCP Clients
|
93
93
|
|
94
94
|
Add this server to your MCP client configuration. The server runs using stdio transport.
|
95
95
|
|
96
|
-
|
96
|
+
#### VSCode Configuration
|
97
97
|
|
98
98
|
```json .vscode/mcp.json
|
99
99
|
{
|
@@ -107,6 +107,19 @@ Example configuration for Vscode:
|
|
107
107
|
}
|
108
108
|
```
|
109
109
|
|
110
|
+
#### Claude Code Configuration
|
111
|
+
|
112
|
+
```json .mcp.json
|
113
|
+
{
|
114
|
+
"mcpServers": {
|
115
|
+
"all-in-mcp": {
|
116
|
+
"command": "uv",
|
117
|
+
"args": ["run", "all-in-mcp"]
|
118
|
+
}
|
119
|
+
}
|
120
|
+
}
|
121
|
+
```
|
122
|
+
|
110
123
|
<details>
|
111
124
|
<summary>Development</summary>
|
112
125
|
|
@@ -0,0 +1,14 @@
|
|
1
|
+
all_in_mcp/__init__.py,sha256=REDwcbifpuUnsFAhNowIKCZ-8g6irIzUFTI_f8Aunxk,215
|
2
|
+
all_in_mcp/paper.py,sha256=juZI0M_gAH_WNIaLCt_RNxvbGRuEDbuSN90SI3mb2fs,9356
|
3
|
+
all_in_mcp/server.py,sha256=zmBTgVJVHJdQF8r6Y4OP2bcvLBtEioBHRDAYPUJ3JMc,26849
|
4
|
+
all_in_mcp/academic_platforms/__init__.py,sha256=IpI29DMS4_mSmTEa8VkQEiJCl7OyFbswSx7mWSp08P4,285
|
5
|
+
all_in_mcp/academic_platforms/base.py,sha256=dRIybIIAJDZzfZFmqSuJ-qdMUz2hKzIqltwRVclzBzI,733
|
6
|
+
all_in_mcp/academic_platforms/crossref.py,sha256=3qif-sPXOkaicC0sb3wnGCRf4IqXLQ2wMx9LO5CG0Ao,9058
|
7
|
+
all_in_mcp/academic_platforms/cryptobib.py,sha256=zozR040FHEY1tfwakqPiV_ouwdzruhHdgKcSM1gHdUc,17711
|
8
|
+
all_in_mcp/academic_platforms/google_scholar.py,sha256=SsD0unk-B32MGrwmOlgJJl6d06vRJKm1V2FziTwtcCQ,8969
|
9
|
+
all_in_mcp/academic_platforms/iacr.py,sha256=4d_MTcUEBqN0HsAUBCD6PzHJOp8XCGDhQkQ1ojCcQQY,14870
|
10
|
+
all_in_mcp-0.2.8.dist-info/METADATA,sha256=wnBPSHo_8-QA-tHikG3wNQa13I9QSOp6vTfpFK3kYfQ,4568
|
11
|
+
all_in_mcp-0.2.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
12
|
+
all_in_mcp-0.2.8.dist-info/entry_points.txt,sha256=FbQOtUQzOIfkMNp4qQV1NTU9K4J7C0XGH9wKKhfK1VM,47
|
13
|
+
all_in_mcp-0.2.8.dist-info/licenses/LICENSE,sha256=idExTHItK7AC5FVo4H9HKnr6h51Z8BKCEztZPyP8nK8,1062
|
14
|
+
all_in_mcp-0.2.8.dist-info/RECORD,,
|
@@ -1,14 +0,0 @@
|
|
1
|
-
all_in_mcp/__init__.py,sha256=REDwcbifpuUnsFAhNowIKCZ-8g6irIzUFTI_f8Aunxk,215
|
2
|
-
all_in_mcp/paper.py,sha256=vSJyC_ehfZX5-ASYG048z8gaD1LKafFdJvR13iQcJRw,7104
|
3
|
-
all_in_mcp/server.py,sha256=NQ-AEARpZXafLs_yTUKmSC2I32tLQUVL_KLYdefWdm0,25585
|
4
|
-
all_in_mcp/academic_platforms/__init__.py,sha256=IpI29DMS4_mSmTEa8VkQEiJCl7OyFbswSx7mWSp08P4,285
|
5
|
-
all_in_mcp/academic_platforms/base.py,sha256=VYMp8_tnp7YzXKAXLfr7uUxgvJBNKRyC_NT1uVhBOwY,673
|
6
|
-
all_in_mcp/academic_platforms/crossref.py,sha256=D-wvSwnOocP16m9fA3xJ6VGEpmRPtMmGoFm5MlyPdXE,8707
|
7
|
-
all_in_mcp/academic_platforms/cryptobib.py,sha256=F9N23eojfyAIjnFDPrJAYOpZ_Vi9iHOqNHGtKC6O16c,17360
|
8
|
-
all_in_mcp/academic_platforms/google_scholar.py,sha256=_KLFfIOZeFCGxFOt-nwzm1fgZKMlXOf3HvIjXAYE5cI,8737
|
9
|
-
all_in_mcp/academic_platforms/iacr.py,sha256=CMv3kVvF7NiZJdtvXc8xoGOP-gMNnAkhIETTbYTP75o,15849
|
10
|
-
all_in_mcp-0.2.7.dist-info/METADATA,sha256=r2v5qsm0TwWdfUZwFGuaFxVSeyh9PYlarJym8Kt6pFM,4242
|
11
|
-
all_in_mcp-0.2.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
12
|
-
all_in_mcp-0.2.7.dist-info/entry_points.txt,sha256=FbQOtUQzOIfkMNp4qQV1NTU9K4J7C0XGH9wKKhfK1VM,47
|
13
|
-
all_in_mcp-0.2.7.dist-info/licenses/LICENSE,sha256=idExTHItK7AC5FVo4H9HKnr6h51Z8BKCEztZPyP8nK8,1062
|
14
|
-
all_in_mcp-0.2.7.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|