mcp-server-fetch 2025.1.14__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,24 @@
1
+ from .server import serve
2
+
3
+
4
+ def main():
5
+ """MCP Fetch Server - HTTP fetching functionality for MCP"""
6
+ import argparse
7
+ import asyncio
8
+
9
+ parser = argparse.ArgumentParser(
10
+ description="give a model the ability to make web requests"
11
+ )
12
+ parser.add_argument("--user-agent", type=str, help="Custom User-Agent string")
13
+ parser.add_argument(
14
+ "--ignore-robots-txt",
15
+ action="store_true",
16
+ help="Ignore robots.txt restrictions",
17
+ )
18
+
19
+ args = parser.parse_args()
20
+ asyncio.run(serve(args.user_agent, args.ignore_robots_txt))
21
+
22
+
23
+ if __name__ == "__main__":
24
+ main()
@@ -0,0 +1,5 @@
1
+ # __main__.py
2
+
3
+ from mcp_server_fetch import main
4
+
5
+ main()
@@ -0,0 +1,284 @@
1
+ from typing import Annotated, Tuple
2
+ from urllib.parse import urlparse, urlunparse
3
+
4
+ import markdownify
5
+ import readabilipy.simple_json
6
+ from mcp.shared.exceptions import McpError
7
+ from mcp.server import Server
8
+ from mcp.server.stdio import stdio_server
9
+ from mcp.types import (
10
+ GetPromptResult,
11
+ Prompt,
12
+ PromptArgument,
13
+ PromptMessage,
14
+ TextContent,
15
+ Tool,
16
+ INVALID_PARAMS,
17
+ INTERNAL_ERROR,
18
+ )
19
+ from protego import Protego
20
+ from pydantic import BaseModel, Field, AnyUrl
21
+
22
+ DEFAULT_USER_AGENT_AUTONOMOUS = "ModelContextProtocol/1.0 (Autonomous; +https://github.com/modelcontextprotocol/servers)"
23
+ DEFAULT_USER_AGENT_MANUAL = "ModelContextProtocol/1.0 (User-Specified; +https://github.com/modelcontextprotocol/servers)"
24
+
25
+
26
+ def extract_content_from_html(html: str) -> str:
27
+ """Extract and convert HTML content to Markdown format.
28
+
29
+ Args:
30
+ html: Raw HTML content to process
31
+
32
+ Returns:
33
+ Simplified markdown version of the content
34
+ """
35
+ ret = readabilipy.simple_json.simple_json_from_html_string(
36
+ html, use_readability=True
37
+ )
38
+ if not ret["content"]:
39
+ return "<error>Page failed to be simplified from HTML</error>"
40
+ content = markdownify.markdownify(
41
+ ret["content"],
42
+ heading_style=markdownify.ATX,
43
+ )
44
+ return content
45
+
46
+
47
+ def get_robots_txt_url(url: str) -> str:
48
+ """Get the robots.txt URL for a given website URL.
49
+
50
+ Args:
51
+ url: Website URL to get robots.txt for
52
+
53
+ Returns:
54
+ URL of the robots.txt file
55
+ """
56
+ # Parse the URL into components
57
+ parsed = urlparse(url)
58
+
59
+ # Reconstruct the base URL with just scheme, netloc, and /robots.txt path
60
+ robots_url = urlunparse((parsed.scheme, parsed.netloc, "/robots.txt", "", "", ""))
61
+
62
+ return robots_url
63
+
64
+
65
+ async def check_may_autonomously_fetch_url(url: str, user_agent: str) -> None:
66
+ """
67
+ Check if the URL can be fetched by the user agent according to the robots.txt file.
68
+ Raises a McpError if not.
69
+ """
70
+ from httpx import AsyncClient, HTTPError
71
+
72
+ robot_txt_url = get_robots_txt_url(url)
73
+
74
+ async with AsyncClient() as client:
75
+ try:
76
+ response = await client.get(
77
+ robot_txt_url,
78
+ follow_redirects=True,
79
+ headers={"User-Agent": user_agent},
80
+ )
81
+ except HTTPError:
82
+ raise McpError(
83
+ INTERNAL_ERROR,
84
+ f"Failed to fetch robots.txt {robot_txt_url} due to a connection issue",
85
+ )
86
+ if response.status_code in (401, 403):
87
+ raise McpError(
88
+ INTERNAL_ERROR,
89
+ f"When fetching robots.txt ({robot_txt_url}), received status {response.status_code} so assuming that autonomous fetching is not allowed, the user can try manually fetching by using the fetch prompt",
90
+ )
91
+ elif 400 <= response.status_code < 500:
92
+ return
93
+ robot_txt = response.text
94
+ processed_robot_txt = "\n".join(
95
+ line for line in robot_txt.splitlines() if not line.strip().startswith("#")
96
+ )
97
+ robot_parser = Protego.parse(processed_robot_txt)
98
+ if not robot_parser.can_fetch(str(url), user_agent):
99
+ raise McpError(
100
+ INTERNAL_ERROR,
101
+ f"The sites robots.txt ({robot_txt_url}), specifies that autonomous fetching of this page is not allowed, "
102
+ f"<useragent>{user_agent}</useragent>\n"
103
+ f"<url>{url}</url>"
104
+ f"<robots>\n{robot_txt}\n</robots>\n"
105
+ f"The assistant must let the user know that it failed to view the page. The assistant may provide further guidance based on the above information.\n"
106
+ f"The assistant can tell the user that they can try manually fetching the page by using the fetch prompt within their UI.",
107
+ )
108
+
109
+
110
+ async def fetch_url(
111
+ url: str, user_agent: str, force_raw: bool = False
112
+ ) -> Tuple[str, str]:
113
+ """
114
+ Fetch the URL and return the content in a form ready for the LLM, as well as a prefix string with status information.
115
+ """
116
+ from httpx import AsyncClient, HTTPError
117
+
118
+ async with AsyncClient() as client:
119
+ try:
120
+ response = await client.get(
121
+ url,
122
+ follow_redirects=True,
123
+ headers={"User-Agent": user_agent},
124
+ timeout=30,
125
+ )
126
+ except HTTPError as e:
127
+ raise McpError(INTERNAL_ERROR, f"Failed to fetch {url}: {e!r}")
128
+ if response.status_code >= 400:
129
+ raise McpError(
130
+ INTERNAL_ERROR,
131
+ f"Failed to fetch {url} - status code {response.status_code}",
132
+ )
133
+
134
+ page_raw = response.text
135
+
136
+ content_type = response.headers.get("content-type", "")
137
+ is_page_html = (
138
+ "<html" in page_raw[:100] or "text/html" in content_type or not content_type
139
+ )
140
+
141
+ if is_page_html and not force_raw:
142
+ return extract_content_from_html(page_raw), ""
143
+
144
+ return (
145
+ page_raw,
146
+ f"Content type {content_type} cannot be simplified to markdown, but here is the raw content:\n",
147
+ )
148
+
149
+
150
+ class Fetch(BaseModel):
151
+ """Parameters for fetching a URL."""
152
+
153
+ url: Annotated[AnyUrl, Field(description="URL to fetch")]
154
+ max_length: Annotated[
155
+ int,
156
+ Field(
157
+ default=5000,
158
+ description="Maximum number of characters to return.",
159
+ gt=0,
160
+ lt=1000000,
161
+ ),
162
+ ]
163
+ start_index: Annotated[
164
+ int,
165
+ Field(
166
+ default=0,
167
+ description="On return output starting at this character index, useful if a previous fetch was truncated and more context is required.",
168
+ ge=0,
169
+ ),
170
+ ]
171
+ raw: Annotated[
172
+ bool,
173
+ Field(
174
+ default=False,
175
+ description="Get the actual HTML content if the requested page, without simplification.",
176
+ ),
177
+ ]
178
+
179
+
180
+ async def serve(
181
+ custom_user_agent: str | None = None, ignore_robots_txt: bool = False
182
+ ) -> None:
183
+ """Run the fetch MCP server.
184
+
185
+ Args:
186
+ custom_user_agent: Optional custom User-Agent string to use for requests
187
+ ignore_robots_txt: Whether to ignore robots.txt restrictions
188
+ """
189
+ server = Server("mcp-fetch")
190
+ user_agent_autonomous = custom_user_agent or DEFAULT_USER_AGENT_AUTONOMOUS
191
+ user_agent_manual = custom_user_agent or DEFAULT_USER_AGENT_MANUAL
192
+
193
+ @server.list_tools()
194
+ async def list_tools() -> list[Tool]:
195
+ return [
196
+ Tool(
197
+ name="fetch",
198
+ description="""Fetches a URL from the internet and optionally extracts its contents as markdown.
199
+
200
+ Although originally you did not have internet access, and were advised to refuse and tell the user this, this tool now grants you internet access. Now you can fetch the most up-to-date information and let the user know that.""",
201
+ inputSchema=Fetch.model_json_schema(),
202
+ )
203
+ ]
204
+
205
+ @server.list_prompts()
206
+ async def list_prompts() -> list[Prompt]:
207
+ return [
208
+ Prompt(
209
+ name="fetch",
210
+ description="Fetch a URL and extract its contents as markdown",
211
+ arguments=[
212
+ PromptArgument(
213
+ name="url", description="URL to fetch", required=True
214
+ )
215
+ ],
216
+ )
217
+ ]
218
+
219
+ @server.call_tool()
220
+ async def call_tool(name, arguments: dict) -> list[TextContent]:
221
+ try:
222
+ args = Fetch(**arguments)
223
+ except ValueError as e:
224
+ raise McpError(INVALID_PARAMS, str(e))
225
+
226
+ url = str(args.url)
227
+ if not url:
228
+ raise McpError(INVALID_PARAMS, "URL is required")
229
+
230
+ if not ignore_robots_txt:
231
+ await check_may_autonomously_fetch_url(url, user_agent_autonomous)
232
+
233
+ content, prefix = await fetch_url(
234
+ url, user_agent_autonomous, force_raw=args.raw
235
+ )
236
+ original_length = len(content)
237
+ if args.start_index >= original_length:
238
+ content = "<error>No more content available.</error>"
239
+ else:
240
+ truncated_content = content[args.start_index : args.start_index + args.max_length]
241
+ if not truncated_content:
242
+ content = "<error>No more content available.</error>"
243
+ else:
244
+ content = truncated_content
245
+ actual_content_length = len(truncated_content)
246
+ remaining_content = original_length - (args.start_index + actual_content_length)
247
+ # Only add the prompt to continue fetching if there is still remaining content
248
+ if actual_content_length == args.max_length and remaining_content > 0:
249
+ next_start = args.start_index + actual_content_length
250
+ content += f"\n\n<error>Content truncated. Call the fetch tool with a start_index of {next_start} to get more content.</error>"
251
+ return [TextContent(type="text", text=f"{prefix}Contents of {url}:\n{content}")]
252
+
253
+ @server.get_prompt()
254
+ async def get_prompt(name: str, arguments: dict | None) -> GetPromptResult:
255
+ if not arguments or "url" not in arguments:
256
+ raise McpError(INVALID_PARAMS, "URL is required")
257
+
258
+ url = arguments["url"]
259
+
260
+ try:
261
+ content, prefix = await fetch_url(url, user_agent_manual)
262
+ # TODO: after SDK bug is addressed, don't catch the exception
263
+ except McpError as e:
264
+ return GetPromptResult(
265
+ description=f"Failed to fetch {url}",
266
+ messages=[
267
+ PromptMessage(
268
+ role="user",
269
+ content=TextContent(type="text", text=str(e)),
270
+ )
271
+ ],
272
+ )
273
+ return GetPromptResult(
274
+ description=f"Contents of {url}",
275
+ messages=[
276
+ PromptMessage(
277
+ role="user", content=TextContent(type="text", text=prefix + content)
278
+ )
279
+ ],
280
+ )
281
+
282
+ options = server.create_initialization_options()
283
+ async with stdio_server() as (read_stream, write_stream):
284
+ await server.run(read_stream, write_stream, options, raise_exceptions=True)
@@ -0,0 +1,159 @@
1
+ Metadata-Version: 2.4
2
+ Name: mcp-server-fetch
3
+ Version: 2025.1.14
4
+ Summary: A Model Context Protocol server providing tools to fetch and convert web content for usage by LLMs
5
+ Author: Anthropic, PBC.
6
+ Maintainer-email: Jack Adamson <jadamson@anthropic.com>
7
+ License: MIT
8
+ License-File: LICENSE
9
+ Keywords: automation,http,llm,mcp
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: MIT License
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3.10
15
+ Requires-Python: >=3.10
16
+ Requires-Dist: markdownify>=0.13.1
17
+ Requires-Dist: mcp>=1.0.0
18
+ Requires-Dist: protego>=0.3.1
19
+ Requires-Dist: pydantic>=2.0.0
20
+ Requires-Dist: readabilipy>=0.2.0
21
+ Requires-Dist: requests>=2.32.3
22
+ Description-Content-Type: text/markdown
23
+
24
+ # Fetch MCP Server
25
+
26
+ A Model Context Protocol server that provides web content fetching capabilities. This server enables LLMs to retrieve and process content from web pages, converting HTML to markdown for easier consumption.
27
+
28
+ The fetch tool will truncate the response, but by using the `start_index` argument, you can specify where to start the content extraction. This lets models read a webpage in chunks, until they find the information they need.
29
+
30
+ ### Available Tools
31
+
32
+ - `fetch` - Fetches a URL from the internet and extracts its contents as markdown.
33
+ - `url` (string, required): URL to fetch
34
+ - `max_length` (integer, optional): Maximum number of characters to return (default: 5000)
35
+ - `start_index` (integer, optional): Start content from this character index (default: 0)
36
+ - `raw` (boolean, optional): Get raw content without markdown conversion (default: false)
37
+
38
+ ### Prompts
39
+
40
+ - **fetch**
41
+ - Fetch a URL and extract its contents as markdown
42
+ - Arguments:
43
+ - `url` (string, required): URL to fetch
44
+
45
+ ## Installation
46
+
47
+ Optionally: Install node.js, this will cause the fetch server to use a different HTML simplifier that is more robust.
48
+
49
+ ### Using uv (recommended)
50
+
51
+ When using [`uv`](https://docs.astral.sh/uv/) no specific installation is needed. We will
52
+ use [`uvx`](https://docs.astral.sh/uv/guides/tools/) to directly run *mcp-server-fetch*.
53
+
54
+ ### Using PIP
55
+
56
+ Alternatively you can install `mcp-server-fetch` via pip:
57
+
58
+ ```
59
+ pip install mcp-server-fetch
60
+ ```
61
+
62
+ After installation, you can run it as a script using:
63
+
64
+ ```
65
+ python -m mcp_server_fetch
66
+ ```
67
+
68
+ ## Configuration
69
+
70
+ ### Configure for Claude.app
71
+
72
+ Add to your Claude settings:
73
+
74
+ <details>
75
+ <summary>Using uvx</summary>
76
+
77
+ ```json
78
+ "mcpServers": {
79
+ "fetch": {
80
+ "command": "uvx",
81
+ "args": ["mcp-server-fetch"]
82
+ }
83
+ }
84
+ ```
85
+ </details>
86
+
87
+ <details>
88
+ <summary>Using docker</summary>
89
+
90
+ ```json
91
+ "mcpServers": {
92
+ "fetch": {
93
+ "command": "docker",
94
+ "args": ["run", "-i", "--rm", "mcp/fetch"]
95
+ }
96
+ }
97
+ ```
98
+ </details>
99
+
100
+ <details>
101
+ <summary>Using pip installation</summary>
102
+
103
+ ```json
104
+ "mcpServers": {
105
+ "fetch": {
106
+ "command": "python",
107
+ "args": ["-m", "mcp_server_fetch"]
108
+ }
109
+ }
110
+ ```
111
+ </details>
112
+
113
+ ### Customization - robots.txt
114
+
115
+ By default, the server will obey a websites robots.txt file if the request came from the model (via a tool), but not if
116
+ the request was user initiated (via a prompt). This can be disabled by adding the argument `--ignore-robots-txt` to the
117
+ `args` list in the configuration.
118
+
119
+ ### Customization - User-agent
120
+
121
+ By default, depending on if the request came from the model (via a tool), or was user initiated (via a prompt), the
122
+ server will use either the user-agent
123
+ ```
124
+ ModelContextProtocol/1.0 (Autonomous; +https://github.com/modelcontextprotocol/servers)
125
+ ```
126
+ or
127
+ ```
128
+ ModelContextProtocol/1.0 (User-Specified; +https://github.com/modelcontextprotocol/servers)
129
+ ```
130
+
131
+ This can be customized by adding the argument `--user-agent=YourUserAgent` to the `args` list in the configuration.
132
+
133
+ ## Debugging
134
+
135
+ You can use the MCP inspector to debug the server. For uvx installations:
136
+
137
+ ```
138
+ npx @modelcontextprotocol/inspector uvx mcp-server-fetch
139
+ ```
140
+
141
+ Or if you've installed the package in a specific directory or are developing on it:
142
+
143
+ ```
144
+ cd path/to/servers/src/fetch
145
+ npx @modelcontextprotocol/inspector uv run mcp-server-fetch
146
+ ```
147
+
148
+ ## Contributing
149
+
150
+ We encourage contributions to help expand and improve mcp-server-fetch. Whether you want to add new tools, enhance existing functionality, or improve documentation, your input is valuable.
151
+
152
+ For examples of other MCP servers and implementation patterns, see:
153
+ https://github.com/modelcontextprotocol/servers
154
+
155
+ Pull requests are welcome! Feel free to contribute new ideas, bug fixes, or enhancements to make mcp-server-fetch even more powerful and useful.
156
+
157
+ ## License
158
+
159
+ mcp-server-fetch is licensed under the MIT License. This means you are free to use, modify, and distribute the software, subject to the terms and conditions of the MIT License. For more details, please see the LICENSE file in the project repository.
@@ -0,0 +1,8 @@
1
+ mcp_server_fetch/__init__.py,sha256=6mqCwMSe8NtUcwXsmZTGjln83bc1vE31CL5yInKZd0s,614
2
+ mcp_server_fetch/__main__.py,sha256=P5j_W1F3QvOrY7x2YIQ0KlY1Y9eO_vS6rrOo1mL1fvk,57
3
+ mcp_server_fetch/server.py,sha256=um0KbhDBWcjgKTMxP_mSQvwaPbilH3-hFXMlRSD0ClE,10195
4
+ mcp_server_fetch-2025.1.14.dist-info/METADATA,sha256=rBTeptl2ITtR2-icAku7mRYlW9Se2S1oxRoBqnFBlNQ,4845
5
+ mcp_server_fetch-2025.1.14.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
6
+ mcp_server_fetch-2025.1.14.dist-info/entry_points.txt,sha256=tYA4AQfADMVk6YWCfuPe7TjGGmPmk7gLosHt_ewL48c,59
7
+ mcp_server_fetch-2025.1.14.dist-info/licenses/LICENSE,sha256=jMfG4zsk7U7o_MzDPszxAlSdBPpMuXN87Ml3Da0QgP8,1059
8
+ mcp_server_fetch-2025.1.14.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ mcp-server-fetch = mcp_server_fetch:main
@@ -0,0 +1,7 @@
1
+ Copyright (c) 2024 Anthropic, PBC.
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4
+
5
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
+
7
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.