mcp-server-fetch 0.1.0__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,9 +3,21 @@ from .server import serve
3
3
 
4
4
  def main():
5
5
  """MCP Fetch Server - HTTP fetching functionality for MCP"""
6
+ import argparse
6
7
  import asyncio
7
8
 
8
- asyncio.run(serve())
9
+ parser = argparse.ArgumentParser(
10
+ description="give a model the ability to make web requests"
11
+ )
12
+ parser.add_argument("--user-agent", type=str, help="Custom User-Agent string")
13
+ parser.add_argument(
14
+ "--ignore-robots-txt",
15
+ action="store_true",
16
+ help="Ignore robots.txt restrictions",
17
+ )
18
+
19
+ args = parser.parse_args()
20
+ asyncio.run(serve(args.user_agent, args.ignore_robots_txt))
9
21
 
10
22
 
11
23
  if __name__ == "__main__":
@@ -1,53 +1,199 @@
1
+ from typing import Annotated, Tuple
2
+ from urllib.parse import urlparse, urlunparse
3
+
1
4
  import markdownify
2
5
  import readabilipy.simple_json
6
+ from mcp.shared.exceptions import McpError
3
7
  from mcp.server import Server
4
8
  from mcp.server.stdio import stdio_server
5
9
  from mcp.types import (
6
- TextContent,
7
- Tool,
10
+ GetPromptResult,
8
11
  Prompt,
9
12
  PromptArgument,
10
- GetPromptResult,
11
13
  PromptMessage,
14
+ TextContent,
15
+ Tool,
16
+ INVALID_PARAMS,
17
+ INTERNAL_ERROR,
12
18
  )
13
- from pydantic import BaseModel, Field
19
+ from protego import Protego
20
+ from pydantic import BaseModel, Field, AnyUrl
21
+
22
+ DEFAULT_USER_AGENT_AUTONOMOUS = "ModelContextProtocol/1.0 (Autonomous; +https://github.com/modelcontextprotocol/servers)"
23
+ DEFAULT_USER_AGENT_MANUAL = "ModelContextProtocol/1.0 (User-Specified; +https://github.com/modelcontextprotocol/servers)"
24
+
14
25
 
26
+ def extract_content_from_html(html: str) -> str:
27
+ """Extract and convert HTML content to Markdown format.
15
28
 
16
- def extract_content(html: str) -> str:
17
- ret = readabilipy.simple_json.simple_json_from_html_string(html)
18
- if not ret["plain_content"]:
29
+ Args:
30
+ html: Raw HTML content to process
31
+
32
+ Returns:
33
+ Simplified markdown version of the content
34
+ """
35
+ ret = readabilipy.simple_json.simple_json_from_html_string(
36
+ html, use_readability=True
37
+ )
38
+ if not ret["content"]:
19
39
  return "<error>Page failed to be simplified from HTML</error>"
20
40
  content = markdownify.markdownify(
21
- ret["plain_content"],
41
+ ret["content"],
22
42
  heading_style=markdownify.ATX,
23
43
  )
24
44
  return content
25
45
 
26
46
 
27
- async def fetch_url(url: str) -> str:
28
- from httpx import AsyncClient
47
+ def get_robots_txt_url(url: AnyUrl | str) -> str:
48
+ """Get the robots.txt URL for a given website URL.
49
+
50
+ Args:
51
+ url: Website URL to get robots.txt for
52
+
53
+ Returns:
54
+ URL of the robots.txt file
55
+ """
56
+ # Parse the URL into components
57
+ parsed = urlparse(str(url))
58
+
59
+ # Reconstruct the base URL with just scheme, netloc, and /robots.txt path
60
+ robots_url = urlunparse((parsed.scheme, parsed.netloc, "/robots.txt", "", "", ""))
61
+
62
+ return robots_url
63
+
64
+
65
+ async def check_may_autonomously_fetch_url(url: AnyUrl | str, user_agent: str) -> None:
66
+ """
67
+ Check if the URL can be fetched by the user agent according to the robots.txt file.
68
+ Raises a McpError if not.
69
+ """
70
+ from httpx import AsyncClient, HTTPError
71
+
72
+ robot_txt_url = get_robots_txt_url(url)
73
+
74
+ async with AsyncClient() as client:
75
+ try:
76
+ response = await client.get(
77
+ robot_txt_url, headers={"User-Agent": user_agent}
78
+ )
79
+ except HTTPError:
80
+ raise McpError(
81
+ INTERNAL_ERROR,
82
+ f"Failed to fetch robots.txt {robot_txt_url} due to a connection issue",
83
+ )
84
+ if response.status_code in (401, 403):
85
+ raise McpError(
86
+ INTERNAL_ERROR,
87
+ f"When fetching robots.txt ({robot_txt_url}), received status {response.status_code} so assuming that autonomous fetching is not allowed, the user can try manually fetching by using the fetch prompt",
88
+ )
89
+ elif 400 <= response.status_code < 500:
90
+ return
91
+ robot_txt = response.text
92
+ processed_robot_txt = "\n".join(
93
+ line for line in robot_txt.splitlines() if not line.strip().startswith("#")
94
+ )
95
+ robot_parser = Protego.parse(processed_robot_txt)
96
+ if not robot_parser.can_fetch(url, user_agent):
97
+ raise McpError(
98
+ INTERNAL_ERROR,
99
+ f"The sites robots.txt ({robot_txt_url}), specifies that autonomous fetching of this page is not allowed, "
100
+ f"<useragent>{user_agent}</useragent>\n"
101
+ f"<url>{url}</url>"
102
+ f"<robots>\n{robot_txt}\n</robots>\n"
103
+ f"The assistant must let the user know that it failed to view the page. The assistant may provide further guidance based on the above information.\n"
104
+ f"The assistant can tell the user that they can try manually fetching the page by using the fetch prompt within their UI.",
105
+ )
106
+
107
+
108
+ async def fetch_url(
109
+ url: AnyUrl | str, user_agent: str, force_raw: bool = False
110
+ ) -> Tuple[str, str]:
111
+ """
112
+ Fetch the URL and return the content in a form ready for the LLM, as well as a prefix string with status information.
113
+ """
114
+ from httpx import AsyncClient, HTTPError
29
115
 
30
116
  async with AsyncClient() as client:
31
- response = await client.get(url)
32
- response.raise_for_status()
33
- page_html = response.text
117
+ try:
118
+ response = await client.get(
119
+ str(url),
120
+ follow_redirects=True,
121
+ headers={"User-Agent": user_agent},
122
+ timeout=30,
123
+ )
124
+ except HTTPError as e:
125
+ raise McpError(INTERNAL_ERROR, f"Failed to fetch {url}: {e!r}")
126
+ if response.status_code >= 400:
127
+ raise McpError(
128
+ INTERNAL_ERROR,
129
+ f"Failed to fetch {url} - status code {response.status_code}",
130
+ )
34
131
 
35
- return extract_content(page_html)
132
+ page_raw = response.text
133
+
134
+ content_type = response.headers.get("content-type", "")
135
+ is_page_html = (
136
+ "<html" in page_raw[:100] or "text/html" in content_type or not content_type
137
+ )
138
+
139
+ if is_page_html and not force_raw:
140
+ return extract_content_from_html(page_raw), ""
141
+
142
+ return (
143
+ page_raw,
144
+ f"Content type {content_type} cannot be simplified to markdown, but here is the raw content:\n",
145
+ )
36
146
 
37
147
 
38
148
  class Fetch(BaseModel):
39
- url: str = Field(..., description="URL to fetch")
149
+ """Parameters for fetching a URL."""
150
+
151
+ url: Annotated[AnyUrl, Field(description="URL to fetch")]
152
+ max_length: Annotated[
153
+ int,
154
+ Field(
155
+ default=5000,
156
+ description="Maximum number of characters to return.",
157
+ gt=0,
158
+ lt=1000000,
159
+ ),
160
+ ]
161
+ start_index: Annotated[
162
+ int,
163
+ Field(
164
+ default=0,
165
+ description="On return output starting at this character index, useful if a previous fetch was truncated and more context is required.",
166
+ ge=0,
167
+ ),
168
+ ]
169
+ raw: Annotated[
170
+ bool,
171
+ Field(
172
+ default=False,
173
+ description="Get the actual HTML content if the requested page, without simplification.",
174
+ ),
175
+ ]
40
176
 
41
177
 
42
- async def serve() -> None:
178
+ async def serve(
179
+ custom_user_agent: str | None = None, ignore_robots_txt: bool = False
180
+ ) -> None:
181
+ """Run the fetch MCP server.
182
+
183
+ Args:
184
+ custom_user_agent: Optional custom User-Agent string to use for requests
185
+ ignore_robots_txt: Whether to ignore robots.txt restrictions
186
+ """
43
187
  server = Server("mcp-fetch")
188
+ user_agent_autonomous = custom_user_agent or DEFAULT_USER_AGENT_AUTONOMOUS
189
+ user_agent_manual = custom_user_agent or DEFAULT_USER_AGENT_MANUAL
44
190
 
45
191
  @server.list_tools()
46
192
  async def list_tools() -> list[Tool]:
47
193
  return [
48
194
  Tool(
49
195
  name="fetch",
50
- description="""Fetches a URL from the internet and extracts its contents as markdown.
196
+ description="""Fetches a URL from the internet and optionally extracts its contents as markdown.
51
197
 
52
198
  Although originally you did not have internet access, and were advised to refuse and tell the user this, this tool now grants you internet access. Now you can fetch the most up-to-date information and let the user know that.""",
53
199
  inputSchema=Fetch.model_json_schema(),
@@ -70,19 +216,51 @@ Although originally you did not have internet access, and were advised to refuse
70
216
 
71
217
  @server.call_tool()
72
218
  async def call_tool(name, arguments: dict) -> list[TextContent]:
73
- url = arguments["url"]
74
- content = await fetch_url(url)
75
- return [TextContent(type="text", text=f"Contents of {url}:\n{content}")]
219
+ try:
220
+ args = Fetch(**arguments)
221
+ except ValueError as e:
222
+ raise McpError(INVALID_PARAMS, str(e))
223
+
224
+ url = args.url
225
+ if not url:
226
+ raise McpError(INVALID_PARAMS, "URL is required")
227
+
228
+ if not ignore_robots_txt:
229
+ await check_may_autonomously_fetch_url(url, user_agent_autonomous)
230
+
231
+ content, prefix = await fetch_url(
232
+ url, user_agent_autonomous, force_raw=args.raw
233
+ )
234
+ if len(content) > args.max_length:
235
+ content = content[args.start_index : args.start_index + args.max_length]
236
+ content += f"\n\n<error>Content truncated. Call the fetch tool with a start_index of {args.start_index + args.max_length} to get more content.</error>"
237
+ return [TextContent(type="text", text=f"{prefix}Contents of {url}:\n{content}")]
76
238
 
77
239
  @server.get_prompt()
78
- async def get_prompt(name, arguments: dict) -> GetPromptResult:
240
+ async def get_prompt(name: str, arguments: dict | None) -> GetPromptResult:
241
+ if not arguments or "url" not in arguments:
242
+ raise McpError(INVALID_PARAMS, "URL is required")
243
+
79
244
  url = arguments["url"]
80
- content = await fetch_url(url)
245
+
246
+ try:
247
+ content, prefix = await fetch_url(url, user_agent_manual)
248
+ # TODO: after SDK bug is addressed, don't catch the exception
249
+ except McpError as e:
250
+ return GetPromptResult(
251
+ description=f"Failed to fetch {url}",
252
+ messages=[
253
+ PromptMessage(
254
+ role="user",
255
+ content=TextContent(type="text", text=str(e)),
256
+ )
257
+ ],
258
+ )
81
259
  return GetPromptResult(
82
260
  description=f"Contents of {url}",
83
261
  messages=[
84
262
  PromptMessage(
85
- role="user", content=TextContent(type="text", text=content)
263
+ role="user", content=TextContent(type="text", text=prefix + content)
86
264
  )
87
265
  ],
88
266
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: mcp-server-fetch
3
- Version: 0.1.0
3
+ Version: 0.6.1
4
4
  Summary: A Model Context Protocol server providing tools to fetch and convert web content for usage by LLMs
5
5
  Author: Anthropic, PBC.
6
6
  Maintainer-email: Jack Adamson <jadamson@anthropic.com>
@@ -13,7 +13,8 @@ Classifier: Programming Language :: Python :: 3
13
13
  Classifier: Programming Language :: Python :: 3.10
14
14
  Requires-Python: >=3.10
15
15
  Requires-Dist: markdownify>=0.13.1
16
- Requires-Dist: mcp>=0.6.0
16
+ Requires-Dist: mcp>=1.0.0
17
+ Requires-Dist: protego>=0.3.1
17
18
  Requires-Dist: pydantic>=2.0.0
18
19
  Requires-Dist: readabilipy>=0.2.0
19
20
  Requires-Dist: requests>=2.32.3
@@ -23,20 +24,27 @@ Description-Content-Type: text/markdown
23
24
 
24
25
  A Model Context Protocol server that provides web content fetching capabilities. This server enables LLMs to retrieve and process content from web pages, converting HTML to markdown for easier consumption.
25
26
 
26
- Presently the server only supports fetching HTML content.
27
+ The fetch tool will truncate the response, but by using the `start_index` argument, you can specify where to start the content extraction. This lets models read a webpage in chunks, until they find the information they need.
27
28
 
28
29
  ### Available Tools
29
30
 
30
31
  - `fetch` - Fetches a URL from the internet and extracts its contents as markdown.
32
+ - `url` (string, required): URL to fetch
33
+ - `max_length` (integer, optional): Maximum number of characters to return (default: 5000)
34
+ - `start_index` (integer, optional): Start content from this character index (default: 0)
35
+ - `raw` (boolean, optional): Get raw content without markdown conversion (default: false)
31
36
 
32
37
  ### Prompts
33
38
 
34
39
  - **fetch**
35
40
  - Fetch a URL and extract its contents as markdown
36
- - Argument: `url` (string, required): URL to fetch
41
+ - Arguments:
42
+ - `url` (string, required): URL to fetch
37
43
 
38
44
  ## Installation
39
45
 
46
+ Optionally: Install node.js, this will cause the fetch server to use a different HTML simplifier that is more robust.
47
+
40
48
  ### Using uv (recommended)
41
49
 
42
50
  When using [`uv`](https://docs.astral.sh/uv/) no specific installation is needed. We will
@@ -88,35 +96,25 @@ Add to your Claude settings:
88
96
  ```
89
97
  </details>
90
98
 
91
- ### Configure for Zed
99
+ ### Customization - robots.txt
92
100
 
93
- Add to your Zed settings.json:
101
+ By default, the server will obey a websites robots.txt file if the request came from the model (via a tool), but not if
102
+ the request was user initiated (via a prompt). This can be disabled by adding the argument `--ignore-robots-txt` to the
103
+ `args` list in the configuration.
94
104
 
95
- <details>
96
- <summary>Using uvx</summary>
105
+ ### Customization - User-agent
97
106
 
98
- ```json
99
- "context_servers": [
100
- "mcp-server-fetch": {
101
- "command": "uvx",
102
- "args": ["mcp-server-fetch"]
103
- }
104
- ],
107
+ By default, depending on if the request came from the model (via a tool), or was user initiated (via a prompt), the
108
+ server will use either the user-agent
105
109
  ```
106
- </details>
107
-
108
- <details>
109
- <summary>Using pip installation</summary>
110
-
111
- ```json
112
- "context_servers": {
113
- "mcp-server-fetch": {
114
- "command": "python",
115
- "args": ["-m", "mcp_server_fetch"]
116
- }
117
- },
110
+ ModelContextProtocol/1.0 (Autonomous; +https://github.com/modelcontextprotocol/servers)
118
111
  ```
119
- </details>
112
+ or
113
+ ```
114
+ ModelContextProtocol/1.0 (User-Specified; +https://github.com/modelcontextprotocol/servers)
115
+ ```
116
+
117
+ This can be customized by adding the argument `--user-agent=YourUserAgent` to the `args` list in the configuration.
120
118
 
121
119
  ## Debugging
122
120
 
@@ -0,0 +1,8 @@
1
+ mcp_server_fetch/__init__.py,sha256=6mqCwMSe8NtUcwXsmZTGjln83bc1vE31CL5yInKZd0s,614
2
+ mcp_server_fetch/__main__.py,sha256=P5j_W1F3QvOrY7x2YIQ0KlY1Y9eO_vS6rrOo1mL1fvk,57
3
+ mcp_server_fetch/server.py,sha256=G_oJaTAt4RWfVcsCT9kKiZ8ObCaZF95SHeSWpz_d-ms,9462
4
+ mcp_server_fetch-0.6.1.dist-info/METADATA,sha256=LvajvVT7StDVQ8jkr-EirJLMlh4N4wnLH9eTU25JP6c,4645
5
+ mcp_server_fetch-0.6.1.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
6
+ mcp_server_fetch-0.6.1.dist-info/entry_points.txt,sha256=tYA4AQfADMVk6YWCfuPe7TjGGmPmk7gLosHt_ewL48c,59
7
+ mcp_server_fetch-0.6.1.dist-info/licenses/LICENSE,sha256=jMfG4zsk7U7o_MzDPszxAlSdBPpMuXN87Ml3Da0QgP8,1059
8
+ mcp_server_fetch-0.6.1.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- mcp_server_fetch/__init__.py,sha256=xXBif3LVCyo-5MEpJYT2Y-Fq5GOK-cR5U_o3MjuQb60,190
2
- mcp_server_fetch/__main__.py,sha256=P5j_W1F3QvOrY7x2YIQ0KlY1Y9eO_vS6rrOo1mL1fvk,57
3
- mcp_server_fetch/server.py,sha256=QSnK1f6Xx7B8ZsoiY_M2DfhOIHPSHpVF4iwkReq9H-w,2875
4
- mcp_server_fetch-0.1.0.dist-info/METADATA,sha256=gTm1HG8PVNfbO3RZQYHczhPlV7vqsjSP3Sw9xfv_yQo,3603
5
- mcp_server_fetch-0.1.0.dist-info/WHEEL,sha256=C2FUgwZgiLbznR-k0b_5k3Ai_1aASOXDss3lzCUsUug,87
6
- mcp_server_fetch-0.1.0.dist-info/entry_points.txt,sha256=tYA4AQfADMVk6YWCfuPe7TjGGmPmk7gLosHt_ewL48c,59
7
- mcp_server_fetch-0.1.0.dist-info/licenses/LICENSE,sha256=jMfG4zsk7U7o_MzDPszxAlSdBPpMuXN87Ml3Da0QgP8,1059
8
- mcp_server_fetch-0.1.0.dist-info/RECORD,,