academia-mcp 1.2.2__tar.gz → 1.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/PKG-INFO +51 -8
- academia_mcp-1.4.0/README.md +83 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/files.py +1 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/server.py +27 -15
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/tools/__init__.py +10 -2
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/tools/anthology_search.py +1 -8
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/tools/arxiv_download.py +2 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/tools/document_qa.py +4 -1
- academia_mcp-1.4.0/academia_mcp/tools/latex.py +151 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/tools/visit_webpage.py +19 -8
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/tools/web_search.py +9 -6
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/utils.py +22 -2
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp.egg-info/PKG-INFO +51 -8
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp.egg-info/SOURCES.txt +2 -2
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp.egg-info/requires.txt +0 -5
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/pyproject.toml +9 -5
- academia_mcp-1.4.0/tests/test_latex.py +41 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/tests/test_visit_webpage.py +5 -0
- academia_mcp-1.2.2/README.md +0 -35
- academia_mcp-1.2.2/academia_mcp/tools/md_to_pdf.py +0 -404
- academia_mcp-1.2.2/tests/test_md_to_pdf.py +0 -114
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/LICENSE +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/__init__.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/__main__.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/llm.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/py.typed +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/tools/arxiv_search.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/tools/bitflip.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/tools/hf_datasets_search.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/tools/py.typed +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp/tools/s2_citations.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp.egg-info/dependency_links.txt +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp.egg-info/entry_points.txt +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/academia_mcp.egg-info/top_level.txt +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/setup.cfg +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/tests/test_anthology_search.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/tests/test_arxiv_download.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/tests/test_arxiv_search.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/tests/test_bitflip.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/tests/test_document_qa.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/tests/test_extract_json.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/tests/test_hf_dataset_search.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/tests/test_s2_citations.py +0 -0
- {academia_mcp-1.2.2 → academia_mcp-1.4.0}/tests/test_web_search.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: academia-mcp
|
3
|
-
Version: 1.
|
3
|
+
Version: 1.4.0
|
4
4
|
Summary: MCP server that provides different tools to search for scientific publications
|
5
5
|
Author-email: Ilya Gusev <phoenixilya@gmail.com>
|
6
6
|
Project-URL: Homepage, https://github.com/IlyaGusev/academia_mcp
|
@@ -22,16 +22,11 @@ Requires-Dist: markdownify==0.14.1
|
|
22
22
|
Requires-Dist: acl-anthology==0.5.2
|
23
23
|
Requires-Dist: markdown==3.7.0
|
24
24
|
Requires-Dist: types-markdown==3.7.0.20250322
|
25
|
-
Requires-Dist: black==25.1.0
|
26
|
-
Requires-Dist: mypy==1.16.0
|
27
|
-
Requires-Dist: flake8==7.2.0
|
28
25
|
Requires-Dist: huggingface-hub>=0.32.4
|
29
26
|
Requires-Dist: fire>=0.7.0
|
30
|
-
Requires-Dist: pytest>=8.4.1
|
31
27
|
Requires-Dist: openai>=1.97.1
|
32
28
|
Requires-Dist: jinja2>=3.1.6
|
33
29
|
Requires-Dist: datasets>=4.0.0
|
34
|
-
Requires-Dist: pytest-asyncio>=1.1.0
|
35
30
|
Dynamic: license-file
|
36
31
|
|
37
32
|
# Academia MCP
|
@@ -44,16 +39,26 @@ A collection of MCP tools related to the search of scientific papers:
|
|
44
39
|
- Web search: Exa/Brave/Tavily
|
45
40
|
- Page crawler
|
46
41
|
|
47
|
-
Install
|
42
|
+
## Install
|
43
|
+
|
44
|
+
- Using pip (end users):
|
48
45
|
```
|
49
46
|
pip3 install academia-mcp
|
50
47
|
```
|
51
48
|
|
49
|
+
- For development (uv + Makefile):
|
50
|
+
```
|
51
|
+
uv venv .venv
|
52
|
+
make install
|
53
|
+
```
|
54
|
+
|
55
|
+
## Examples
|
52
56
|
Comprehensive report screencast: https://www.youtube.com/watch?v=4bweqQcN6w8
|
53
57
|
|
54
58
|
Single paper screencast: https://www.youtube.com/watch?v=IAAPMptJ5k8
|
55
59
|
|
56
|
-
|
60
|
+
|
61
|
+
## Claude Desktop config
|
57
62
|
```
|
58
63
|
{
|
59
64
|
"mcpServers": {
|
@@ -69,3 +74,41 @@ Claude Desktop config:
|
|
69
74
|
}
|
70
75
|
}
|
71
76
|
```
|
77
|
+
|
78
|
+
## Running the server (CLI)
|
79
|
+
|
80
|
+
```
|
81
|
+
uv run -m academia_mcp --transport streamable-http
|
82
|
+
```
|
83
|
+
|
84
|
+
Notes:
|
85
|
+
- Transports supported: `stdio`, `sse`, `streamable-http`.
|
86
|
+
- Host/port are used for HTTP transports; for `stdio` they are ignored.
|
87
|
+
|
88
|
+
## Makefile targets
|
89
|
+
|
90
|
+
- `make install`: install the package in editable mode with uv.
|
91
|
+
- `make validate`: run black, flake8, and mypy (strict).
|
92
|
+
- `make test`: run the test suite with pytest.
|
93
|
+
- `make publish`: build and publish using uv.
|
94
|
+
|
95
|
+
## Environment variables
|
96
|
+
|
97
|
+
Set as needed depending on which tools you use:
|
98
|
+
|
99
|
+
- `TAVILY_API_KEY`: enables Tavily in `web_search`.
|
100
|
+
- `EXA_API_KEY`: enables Exa in `web_search` and `visit_webpage`.
|
101
|
+
- `BRAVE_API_KEY`: enables Brave in `web_search`.
|
102
|
+
- `OPENROUTER_API_KEY`: required for `document_qa`.
|
103
|
+
- `BASE_URL`: override OpenRouter base URL for `document_qa` and bitflip tools.
|
104
|
+
- `DOCUMENT_QA_MODEL_NAME`: override default model for `document_qa`.
|
105
|
+
- `BITFLIP_MODEL_NAME`: override default model for bitflip tools.
|
106
|
+
- `WORKSPACE_DIR`: directory for generated files (PDFs, temp artifacts).
|
107
|
+
|
108
|
+
## md_to_pdf requirements
|
109
|
+
|
110
|
+
The `md_to_pdf` tool invokes `pdflatex`. Ensure a LaTeX distribution is installed and `pdflatex` is on PATH. On Debian/Ubuntu:
|
111
|
+
|
112
|
+
```
|
113
|
+
sudo apt install texlive-latex-base texlive-fonts-recommended texlive-latex-extra texlive-science
|
114
|
+
```
|
@@ -0,0 +1,83 @@
|
|
1
|
+
# Academia MCP
|
2
|
+
|
3
|
+
A collection of MCP tools related to the search of scientific papers:
|
4
|
+
- ArXiv search and download
|
5
|
+
- ACL Anthology search
|
6
|
+
- HuggingFact datasets search
|
7
|
+
- Semantic Scholar citation graphs
|
8
|
+
- Web search: Exa/Brave/Tavily
|
9
|
+
- Page crawler
|
10
|
+
|
11
|
+
## Install
|
12
|
+
|
13
|
+
- Using pip (end users):
|
14
|
+
```
|
15
|
+
pip3 install academia-mcp
|
16
|
+
```
|
17
|
+
|
18
|
+
- For development (uv + Makefile):
|
19
|
+
```
|
20
|
+
uv venv .venv
|
21
|
+
make install
|
22
|
+
```
|
23
|
+
|
24
|
+
## Examples
|
25
|
+
Comprehensive report screencast: https://www.youtube.com/watch?v=4bweqQcN6w8
|
26
|
+
|
27
|
+
Single paper screencast: https://www.youtube.com/watch?v=IAAPMptJ5k8
|
28
|
+
|
29
|
+
|
30
|
+
## Claude Desktop config
|
31
|
+
```
|
32
|
+
{
|
33
|
+
"mcpServers": {
|
34
|
+
"academia": {
|
35
|
+
"command": "python3",
|
36
|
+
"args": [
|
37
|
+
"-m",
|
38
|
+
"academia_mcp",
|
39
|
+
"--transport",
|
40
|
+
"stdio"
|
41
|
+
]
|
42
|
+
}
|
43
|
+
}
|
44
|
+
}
|
45
|
+
```
|
46
|
+
|
47
|
+
## Running the server (CLI)
|
48
|
+
|
49
|
+
```
|
50
|
+
uv run -m academia_mcp --transport streamable-http
|
51
|
+
```
|
52
|
+
|
53
|
+
Notes:
|
54
|
+
- Transports supported: `stdio`, `sse`, `streamable-http`.
|
55
|
+
- Host/port are used for HTTP transports; for `stdio` they are ignored.
|
56
|
+
|
57
|
+
## Makefile targets
|
58
|
+
|
59
|
+
- `make install`: install the package in editable mode with uv.
|
60
|
+
- `make validate`: run black, flake8, and mypy (strict).
|
61
|
+
- `make test`: run the test suite with pytest.
|
62
|
+
- `make publish`: build and publish using uv.
|
63
|
+
|
64
|
+
## Environment variables
|
65
|
+
|
66
|
+
Set as needed depending on which tools you use:
|
67
|
+
|
68
|
+
- `TAVILY_API_KEY`: enables Tavily in `web_search`.
|
69
|
+
- `EXA_API_KEY`: enables Exa in `web_search` and `visit_webpage`.
|
70
|
+
- `BRAVE_API_KEY`: enables Brave in `web_search`.
|
71
|
+
- `OPENROUTER_API_KEY`: required for `document_qa`.
|
72
|
+
- `BASE_URL`: override OpenRouter base URL for `document_qa` and bitflip tools.
|
73
|
+
- `DOCUMENT_QA_MODEL_NAME`: override default model for `document_qa`.
|
74
|
+
- `BITFLIP_MODEL_NAME`: override default model for bitflip tools.
|
75
|
+
- `WORKSPACE_DIR`: directory for generated files (PDFs, temp artifacts).
|
76
|
+
|
77
|
+
## md_to_pdf requirements
|
78
|
+
|
79
|
+
The `md_to_pdf` tool invokes `pdflatex`. Ensure a LaTeX distribution is installed and `pdflatex` is on PATH. On Debian/Ubuntu:
|
80
|
+
|
81
|
+
```
|
82
|
+
sudo apt install texlive-latex-base texlive-fonts-recommended texlive-latex-extra texlive-science
|
83
|
+
```
|
@@ -12,7 +12,12 @@ from .tools.s2_citations import s2_get_citations, s2_get_references
|
|
12
12
|
from .tools.hf_datasets_search import hf_datasets_search
|
13
13
|
from .tools.anthology_search import anthology_search
|
14
14
|
from .tools.document_qa import document_qa
|
15
|
-
from .tools.
|
15
|
+
from .tools.latex import (
|
16
|
+
compile_latex_from_file,
|
17
|
+
compile_latex_from_str,
|
18
|
+
get_latex_template,
|
19
|
+
get_latex_templates_list,
|
20
|
+
)
|
16
21
|
from .tools.web_search import web_search, tavily_web_search, exa_web_search, brave_web_search
|
17
22
|
from .tools.visit_webpage import visit_webpage
|
18
23
|
from .tools.bitflip import (
|
@@ -33,7 +38,7 @@ def find_free_port() -> int:
|
|
33
38
|
return port
|
34
39
|
except Exception:
|
35
40
|
continue
|
36
|
-
|
41
|
+
raise RuntimeError("No free port in range 5000-6000 found")
|
37
42
|
|
38
43
|
|
39
44
|
def run(
|
@@ -42,6 +47,8 @@ def run(
|
|
42
47
|
mount_path: str = "/",
|
43
48
|
streamable_http_path: str = "/mcp",
|
44
49
|
transport: Literal["stdio", "sse", "streamable-http"] = "streamable-http",
|
50
|
+
disable_web_search_tools: bool = False,
|
51
|
+
disable_llm_tools: bool = False,
|
45
52
|
) -> None:
|
46
53
|
server = FastMCP(
|
47
54
|
"Academia MCP",
|
@@ -56,21 +63,26 @@ def run(
|
|
56
63
|
server.add_tool(s2_get_references)
|
57
64
|
server.add_tool(hf_datasets_search)
|
58
65
|
server.add_tool(anthology_search)
|
59
|
-
server.add_tool(
|
66
|
+
server.add_tool(compile_latex_from_file)
|
67
|
+
server.add_tool(compile_latex_from_str)
|
68
|
+
server.add_tool(get_latex_template)
|
69
|
+
server.add_tool(get_latex_templates_list)
|
60
70
|
server.add_tool(visit_webpage)
|
61
|
-
server.add_tool(extract_bitflip_info)
|
62
|
-
server.add_tool(generate_research_proposal)
|
63
|
-
server.add_tool(score_research_proposals)
|
64
71
|
|
65
|
-
if
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
72
|
+
if not disable_web_search_tools:
|
73
|
+
if os.getenv("TAVILY_API_KEY"):
|
74
|
+
server.add_tool(tavily_web_search)
|
75
|
+
if os.getenv("EXA_API_KEY"):
|
76
|
+
server.add_tool(exa_web_search)
|
77
|
+
if os.getenv("BRAVE_API_KEY"):
|
78
|
+
server.add_tool(brave_web_search)
|
79
|
+
if os.getenv("EXA_API_KEY") or os.getenv("BRAVE_API_KEY") or os.getenv("TAVILY_API_KEY"):
|
80
|
+
server.add_tool(web_search)
|
81
|
+
|
82
|
+
if not disable_llm_tools and os.getenv("OPENROUTER_API_KEY"):
|
83
|
+
server.add_tool(extract_bitflip_info)
|
84
|
+
server.add_tool(generate_research_proposal)
|
85
|
+
server.add_tool(score_research_proposals)
|
74
86
|
server.add_tool(document_qa)
|
75
87
|
|
76
88
|
if port is None:
|
@@ -4,7 +4,12 @@ from .arxiv_download import arxiv_download
|
|
4
4
|
from .hf_datasets_search import hf_datasets_search
|
5
5
|
from .s2_citations import s2_get_references, s2_get_citations
|
6
6
|
from .document_qa import document_qa
|
7
|
-
from .
|
7
|
+
from .latex import (
|
8
|
+
compile_latex_from_file,
|
9
|
+
compile_latex_from_str,
|
10
|
+
get_latex_template,
|
11
|
+
get_latex_templates_list,
|
12
|
+
)
|
8
13
|
from .web_search import web_search, tavily_web_search, exa_web_search, brave_web_search
|
9
14
|
from .visit_webpage import visit_webpage
|
10
15
|
from .bitflip import extract_bitflip_info, generate_research_proposal, score_research_proposals
|
@@ -18,7 +23,10 @@ __all__ = [
|
|
18
23
|
"s2_get_citations",
|
19
24
|
"hf_datasets_search",
|
20
25
|
"document_qa",
|
21
|
-
"
|
26
|
+
"compile_latex_from_file",
|
27
|
+
"compile_latex_from_str",
|
28
|
+
"get_latex_template",
|
29
|
+
"get_latex_templates_list",
|
22
30
|
"web_search",
|
23
31
|
"tavily_web_search",
|
24
32
|
"exa_web_search",
|
@@ -34,20 +34,13 @@ def _format_authors(authors: List[Any]) -> str:
|
|
34
34
|
return result
|
35
35
|
|
36
36
|
|
37
|
-
def _format_date(date_str: str) -> str:
|
38
|
-
try:
|
39
|
-
return datetime.strptime(date_str, "%Y").strftime("%B %d, %Y")
|
40
|
-
except ValueError:
|
41
|
-
return date_str
|
42
|
-
|
43
|
-
|
44
37
|
def _clean_entry(entry: Any) -> Dict[str, Any]:
|
45
38
|
return {
|
46
39
|
"id": entry.full_id,
|
47
40
|
"title": _format_text_field(entry.title.as_text()),
|
48
41
|
"authors": _format_authors(entry.authors),
|
49
42
|
"abstract": (_format_text_field(entry.abstract.as_text()) if entry.abstract else ""),
|
50
|
-
"
|
43
|
+
"published_year": entry.year,
|
51
44
|
"categories": ", ".join(entry.venue_ids),
|
52
45
|
"comment": entry.note if entry.note else "",
|
53
46
|
"url": entry.pdf.url if entry.pdf else "",
|
@@ -35,6 +35,8 @@ def parse_pdf_file(pdf_path: Path) -> List[str]:
|
|
35
35
|
for page_number, page in enumerate(reader.pages, start=1):
|
36
36
|
try:
|
37
37
|
text = page.extract_text()
|
38
|
+
if not text:
|
39
|
+
continue
|
38
40
|
prefix = f"## Page {page_number}\n\n"
|
39
41
|
pages.append(prefix + text)
|
40
42
|
except Exception:
|
@@ -6,6 +6,7 @@ from dotenv import load_dotenv
|
|
6
6
|
from pydantic import BaseModel
|
7
7
|
|
8
8
|
from academia_mcp.llm import llm_acall
|
9
|
+
from academia_mcp.utils import truncate_content
|
9
10
|
|
10
11
|
load_dotenv()
|
11
12
|
|
@@ -62,9 +63,11 @@ async def document_qa(
|
|
62
63
|
assert question and question.strip(), "Please provide non-empty 'question'"
|
63
64
|
if isinstance(document, dict):
|
64
65
|
document = json.dumps(document)
|
65
|
-
|
66
66
|
assert document and document.strip(), "Please provide non-empty 'document'"
|
67
67
|
|
68
|
+
question = truncate_content(question, 10000)
|
69
|
+
document = truncate_content(document, 200000)
|
70
|
+
|
68
71
|
model_name = os.getenv("DOCUMENT_QA_MODEL_NAME", "deepseek/deepseek-chat-v3-0324")
|
69
72
|
prompt = PROMPT.format(question=question, document=document)
|
70
73
|
content = await llm_acall(model_name=model_name, prompt=prompt)
|
@@ -0,0 +1,151 @@
|
|
1
|
+
import re
|
2
|
+
import subprocess
|
3
|
+
import shutil
|
4
|
+
import tempfile
|
5
|
+
import json
|
6
|
+
from pathlib import Path
|
7
|
+
|
8
|
+
|
9
|
+
from academia_mcp.files import get_workspace_dir, DEFAULT_LATEX_TEMPLATES_DIR_PATH
|
10
|
+
|
11
|
+
|
12
|
+
def get_latex_templates_list() -> str:
|
13
|
+
"""
|
14
|
+
Get the list of available latex templates.
|
15
|
+
Always use one of the templates from the list.
|
16
|
+
|
17
|
+
Returns a JSON list serialized to a string.
|
18
|
+
Use `json.loads` to deserialize the result.
|
19
|
+
"""
|
20
|
+
return json.dumps([str(path.name) for path in DEFAULT_LATEX_TEMPLATES_DIR_PATH.glob("*")])
|
21
|
+
|
22
|
+
|
23
|
+
def get_latex_template(template_name: str) -> str:
|
24
|
+
"""
|
25
|
+
Get the latex template by name.
|
26
|
+
|
27
|
+
Returns a JSON object serialized to a string.
|
28
|
+
Use `json.loads` to deserialize the result.
|
29
|
+
The structure is: {"template": "...", "style": "..."}
|
30
|
+
|
31
|
+
Args:
|
32
|
+
template_name: The name of the latex template.
|
33
|
+
"""
|
34
|
+
template_dir_path = DEFAULT_LATEX_TEMPLATES_DIR_PATH / template_name
|
35
|
+
if not template_dir_path.exists():
|
36
|
+
raise FileNotFoundError(
|
37
|
+
f"Template {template_name} not found in {DEFAULT_LATEX_TEMPLATES_DIR_PATH}"
|
38
|
+
)
|
39
|
+
template_path = template_dir_path / f"{template_name}.tex"
|
40
|
+
style_path = template_dir_path / f"{template_name}.sty"
|
41
|
+
if not template_path.exists():
|
42
|
+
raise FileNotFoundError(f"Template file {template_path} not found in {template_dir_path}")
|
43
|
+
if not style_path.exists():
|
44
|
+
raise FileNotFoundError(f"Style file {style_path} not found in {template_dir_path}")
|
45
|
+
return json.dumps({"template": template_path.read_text(), "style": style_path.read_text()})
|
46
|
+
|
47
|
+
|
48
|
+
def compile_latex_from_file(
|
49
|
+
input_filename: str, output_filename: str = "output.pdf", timeout: int = 60
|
50
|
+
) -> str:
|
51
|
+
"""
|
52
|
+
Compile a latex file.
|
53
|
+
|
54
|
+
Returns a string with the result of the compilation.
|
55
|
+
|
56
|
+
Args:
|
57
|
+
input_filename: The path to the latex file.
|
58
|
+
output_filename: The path to the output pdf file.
|
59
|
+
timeout: The timeout for the compilation. 60 seconds by default.
|
60
|
+
"""
|
61
|
+
with open(input_filename, "r", encoding="utf-8") as file:
|
62
|
+
latex_code = file.read()
|
63
|
+
return compile_latex_from_str(latex_code, output_filename, timeout)
|
64
|
+
|
65
|
+
|
66
|
+
def compile_latex_from_str(
|
67
|
+
latex_code: str, output_filename: str = "output.pdf", timeout: int = 60
|
68
|
+
) -> str:
|
69
|
+
"""
|
70
|
+
Compile a latex code.
|
71
|
+
|
72
|
+
Returns a string with the result of the compilation.
|
73
|
+
|
74
|
+
Args:
|
75
|
+
latex_code: The latex code to compile.
|
76
|
+
output_filename: The path to the output pdf file.
|
77
|
+
timeout: The timeout for the compilation. 60 seconds by default.
|
78
|
+
"""
|
79
|
+
if shutil.which("pdflatex") is None:
|
80
|
+
return "pdflatex is not installed or not found in PATH."
|
81
|
+
|
82
|
+
destination_name = (
|
83
|
+
output_filename if output_filename.lower().endswith(".pdf") else f"{output_filename}.pdf"
|
84
|
+
)
|
85
|
+
|
86
|
+
try:
|
87
|
+
with tempfile.TemporaryDirectory(
|
88
|
+
dir=str(get_workspace_dir()), prefix="temp_latex_"
|
89
|
+
) as temp_dir:
|
90
|
+
temp_dir_path = Path(temp_dir)
|
91
|
+
tex_filename = "temp.tex"
|
92
|
+
pdf_filename = "temp.pdf"
|
93
|
+
tex_file_path = temp_dir_path / tex_filename
|
94
|
+
tex_file_path.write_text(latex_code, encoding="utf-8")
|
95
|
+
|
96
|
+
# Detect and copy local .sty packages referenced by \usepackage{...}
|
97
|
+
# Supports optional arguments: \usepackage[opts]{pkgA,pkgB}
|
98
|
+
try:
|
99
|
+
package_names: set[str] = set()
|
100
|
+
for match in re.finditer(r"\\usepackage(?:\[[^\]]*\])?\{([^}]+)\}", latex_code):
|
101
|
+
for name in match.group(1).split(","):
|
102
|
+
pkg = name.strip()
|
103
|
+
if pkg:
|
104
|
+
package_names.add(pkg)
|
105
|
+
|
106
|
+
for pkg in package_names:
|
107
|
+
sty_name = f"{pkg}.sty"
|
108
|
+
for candidate in DEFAULT_LATEX_TEMPLATES_DIR_PATH.rglob(sty_name):
|
109
|
+
shutil.copyfile(candidate, temp_dir_path / sty_name)
|
110
|
+
break
|
111
|
+
except Exception:
|
112
|
+
pass
|
113
|
+
|
114
|
+
try:
|
115
|
+
subprocess.run(
|
116
|
+
[
|
117
|
+
"pdflatex",
|
118
|
+
"-interaction=nonstopmode",
|
119
|
+
tex_filename,
|
120
|
+
],
|
121
|
+
cwd=str(temp_dir_path),
|
122
|
+
check=True,
|
123
|
+
capture_output=True,
|
124
|
+
text=True,
|
125
|
+
timeout=timeout,
|
126
|
+
)
|
127
|
+
except subprocess.TimeoutExpired:
|
128
|
+
return f"Compilation timed out after {timeout} seconds"
|
129
|
+
except subprocess.CalledProcessError as e:
|
130
|
+
combined_output = (e.stdout or "") + "\n" + (e.stderr or "")
|
131
|
+
error_lines = [
|
132
|
+
line
|
133
|
+
for line in combined_output.split("\n")
|
134
|
+
if ("error" in line.lower() or "!" in line)
|
135
|
+
]
|
136
|
+
if error_lines:
|
137
|
+
return "Compilation failed. LaTeX errors:\n" + "\n".join(error_lines)
|
138
|
+
return f"Compilation failed. Full LaTeX output:\n{combined_output}"
|
139
|
+
|
140
|
+
pdf_path = temp_dir_path / pdf_filename
|
141
|
+
output_pdf_path = Path(get_workspace_dir()) / destination_name
|
142
|
+
|
143
|
+
if pdf_path.exists():
|
144
|
+
shutil.move(str(pdf_path), str(output_pdf_path))
|
145
|
+
return f"Compilation successful! PDF file saved as {destination_name}"
|
146
|
+
|
147
|
+
return (
|
148
|
+
"Compilation completed, but PDF file was not created. Check LaTeX code for errors."
|
149
|
+
)
|
150
|
+
except Exception as e:
|
151
|
+
return f"Compilation failed due to an unexpected error: {e}"
|
@@ -11,6 +11,17 @@ EXA_CONTENTS_URL = "https://api.exa.ai/contents"
|
|
11
11
|
AVAILABLE_PROVIDERS = ("basic", "exa")
|
12
12
|
|
13
13
|
|
14
|
+
def _exa_visit_webpage(url: str) -> str:
|
15
|
+
key = os.getenv("EXA_API_KEY", "")
|
16
|
+
assert key, "Error: EXA_API_KEY is not set and no api_key was provided"
|
17
|
+
payload = {
|
18
|
+
"urls": [url],
|
19
|
+
"text": True,
|
20
|
+
}
|
21
|
+
response = post_with_retries(EXA_CONTENTS_URL, payload=payload, api_key=key)
|
22
|
+
return json.dumps(response.json()["results"][0])
|
23
|
+
|
24
|
+
|
14
25
|
def visit_webpage(url: str, provider: Optional[str] = "basic") -> str:
|
15
26
|
"""
|
16
27
|
Visit a webpage and return the content.
|
@@ -28,17 +39,17 @@ def visit_webpage(url: str, provider: Optional[str] = "basic") -> str:
|
|
28
39
|
), f"Invalid provider: {provider}. Available providers: {AVAILABLE_PROVIDERS}"
|
29
40
|
|
30
41
|
if provider == "exa":
|
31
|
-
|
32
|
-
assert key, "Error: EXA_API_KEY is not set and no api_key was provided"
|
33
|
-
payload = {
|
34
|
-
"urls": [url],
|
35
|
-
"text": True,
|
36
|
-
}
|
37
|
-
response = post_with_retries(EXA_CONTENTS_URL, payload=payload, api_key=key)
|
38
|
-
return json.dumps(response.json()["results"][0])
|
42
|
+
return _exa_visit_webpage(url)
|
39
43
|
|
40
44
|
assert provider == "basic"
|
41
45
|
response = get_with_retries(url)
|
46
|
+
content_type = response.headers.get("content-type", "").lower()
|
47
|
+
if not content_type or (not content_type.startswith("text/") and "html" not in content_type):
|
48
|
+
if os.getenv("EXA_API_KEY"):
|
49
|
+
return _exa_visit_webpage(url)
|
50
|
+
return json.dumps(
|
51
|
+
{"id": url, "error": f"Unsupported content-type: {content_type or 'unknown'}"}
|
52
|
+
)
|
42
53
|
markdown_content = markdownify(response.text).strip()
|
43
54
|
markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
|
44
55
|
return json.dumps({"id": url, "text": markdown_content})
|
@@ -17,7 +17,8 @@ def web_search(
|
|
17
17
|
provider: Optional[str] = "tavily",
|
18
18
|
) -> str:
|
19
19
|
"""
|
20
|
-
Search the web using Exa Search or Tavily and return normalized results.
|
20
|
+
Search the web using Exa Search, Brave Search or Tavily and return normalized results.
|
21
|
+
If the specified provider is not available, the function will try to use the next available provider.
|
21
22
|
|
22
23
|
Returns a JSON object serialized to a string. The structure is: {"results": [...]}
|
23
24
|
Every item in the "results" has at least the following fields: ("title", "url")
|
@@ -47,13 +48,15 @@ def web_search(
|
|
47
48
|
provider = p
|
48
49
|
break
|
49
50
|
|
51
|
+
result = {}
|
50
52
|
if provider == "exa":
|
51
|
-
|
53
|
+
result = json.loads(exa_web_search(query, limit))
|
52
54
|
elif provider == "brave":
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
55
|
+
result = json.loads(brave_web_search(query, limit))
|
56
|
+
elif provider == "tavily":
|
57
|
+
result = json.loads(tavily_web_search(query, limit))
|
58
|
+
result["search_provider"] = provider
|
59
|
+
return json.dumps(result, ensure_ascii=False)
|
57
60
|
|
58
61
|
|
59
62
|
def tavily_web_search(query: str, limit: Optional[int] = 20) -> str:
|
@@ -13,10 +13,11 @@ def post_with_retries(
|
|
13
13
|
api_key: Optional[str] = None,
|
14
14
|
timeout: int = 30,
|
15
15
|
num_retries: int = 3,
|
16
|
+
backoff_factor: float = 3.0,
|
16
17
|
) -> requests.Response:
|
17
18
|
retry_strategy = Retry(
|
18
19
|
total=num_retries,
|
19
|
-
backoff_factor=
|
20
|
+
backoff_factor=backoff_factor,
|
20
21
|
status_forcelist=[429, 500, 502, 503, 504],
|
21
22
|
allowed_methods=["POST"],
|
22
23
|
)
|
@@ -24,6 +25,7 @@ def post_with_retries(
|
|
24
25
|
session = requests.Session()
|
25
26
|
adapter = requests.adapters.HTTPAdapter(max_retries=retry_strategy)
|
26
27
|
session.mount("https://", adapter)
|
28
|
+
session.mount("http://", adapter)
|
27
29
|
|
28
30
|
headers = {
|
29
31
|
"x-api-key": api_key,
|
@@ -42,11 +44,12 @@ def get_with_retries(
|
|
42
44
|
api_key: Optional[str] = None,
|
43
45
|
timeout: int = 30,
|
44
46
|
num_retries: int = 3,
|
47
|
+
backoff_factor: float = 3.0,
|
45
48
|
params: Optional[Dict[str, Any]] = None,
|
46
49
|
) -> requests.Response:
|
47
50
|
retry_strategy = Retry(
|
48
51
|
total=num_retries,
|
49
|
-
backoff_factor=
|
52
|
+
backoff_factor=backoff_factor,
|
50
53
|
status_forcelist=[429, 500, 502, 503, 504],
|
51
54
|
allowed_methods=["GET"],
|
52
55
|
)
|
@@ -54,6 +57,7 @@ def get_with_retries(
|
|
54
57
|
session = requests.Session()
|
55
58
|
adapter = requests.adapters.HTTPAdapter(max_retries=retry_strategy)
|
56
59
|
session.mount("https://", adapter)
|
60
|
+
session.mount("http://", adapter)
|
57
61
|
|
58
62
|
headers = {}
|
59
63
|
if api_key:
|
@@ -145,3 +149,19 @@ def extract_json(text: str) -> Any:
|
|
145
149
|
def encode_prompt(template: str, **kwargs: Any) -> str:
|
146
150
|
template_obj = Template(template)
|
147
151
|
return template_obj.render(**kwargs).strip()
|
152
|
+
|
153
|
+
|
154
|
+
def truncate_content(
|
155
|
+
content: str,
|
156
|
+
max_length: int,
|
157
|
+
) -> str:
|
158
|
+
disclaimer = (
|
159
|
+
f"\n\n..._This content has been truncated to stay below {max_length} characters_...\n\n"
|
160
|
+
)
|
161
|
+
half_length = max_length // 2
|
162
|
+
if len(content) <= max_length:
|
163
|
+
return content
|
164
|
+
|
165
|
+
prefix = content[:half_length]
|
166
|
+
suffix = content[-half_length:]
|
167
|
+
return prefix + disclaimer + suffix
|