iflow-mcp-internet-speed-test 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iflow_mcp_internet_speed_test-0.1.1.dist-info/METADATA +324 -0
- iflow_mcp_internet_speed_test-0.1.1.dist-info/RECORD +8 -0
- iflow_mcp_internet_speed_test-0.1.1.dist-info/WHEEL +5 -0
- iflow_mcp_internet_speed_test-0.1.1.dist-info/entry_points.txt +2 -0
- iflow_mcp_internet_speed_test-0.1.1.dist-info/licenses/LICENSE +21 -0
- iflow_mcp_internet_speed_test-0.1.1.dist-info/top_level.txt +1 -0
- mcp_internet_speed_test/__init__.py +12 -0
- mcp_internet_speed_test/main.py +801 -0
|
@@ -0,0 +1,324 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: iflow-mcp-internet-speed-test
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: Enables AI models and agents to perform internet speed measurements.
|
|
5
|
+
Author-email: Pedro Cruz <hola@inventer.dev>
|
|
6
|
+
Maintainer-email: Pedro Cruz <hola@inventer.dev>
|
|
7
|
+
Project-URL: Homepage, https://github.com/inventer-dev/mcp-internet-speed-test
|
|
8
|
+
Project-URL: Bug Reports, https://github.com/inventer-dev/mcp-internet-speed-test/issues
|
|
9
|
+
Project-URL: Source, https://github.com/inventer-dev/mcp-internet-speed-test
|
|
10
|
+
Project-URL: Documentation, https://github.com/inventer-dev/mcp-internet-speed-test#readme
|
|
11
|
+
Keywords: mcp,speed-test,internet,network,ai,model-context-protocol
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
17
|
+
Classifier: Operating System :: OS Independent
|
|
18
|
+
Classifier: Topic :: Internet :: WWW/HTTP
|
|
19
|
+
Classifier: Topic :: System :: Networking :: Monitoring
|
|
20
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
21
|
+
Requires-Python: >=3.12
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
License-File: LICENSE
|
|
24
|
+
Requires-Dist: mcp[cli]>=1.6.0
|
|
25
|
+
Requires-Dist: httpx>=0.27.0
|
|
26
|
+
Dynamic: license-file
|
|
27
|
+
|
|
28
|
+
[](https://archestra.ai/mcp-catalog/inventer-dev__mcp-internet-speed-test)
|
|
29
|
+
[](https://smithery.ai/server/@inventer-dev/mcp-internet-speed-test)
|
|
30
|
+
|
|
31
|
+
[](https://mseep.ai/app/inventer-dev-mcp-internet-speed-test)
|
|
32
|
+
|
|
33
|
+
<a href="https://glama.ai/mcp/servers/@inventer-dev/mcp-internet-speed-test">
|
|
34
|
+
<img width="380" height="200" src="https://glama.ai/mcp/servers/@inventer-dev/mcp-internet-speed-test/badge" alt="mcp-internet-speed-test MCP server" />
|
|
35
|
+
</a>
|
|
36
|
+
|
|
37
|
+
# MCP Internet Speed Test
|
|
38
|
+
|
|
39
|
+
An implementation of a Model Context Protocol (MCP) for internet speed testing. It allows AI models and agents to measure, analyze, and report network performance metrics through a standardized interface.
|
|
40
|
+
|
|
41
|
+
**📦 Available on PyPI:** https://pypi.org/project/mcp-internet-speed-test/
|
|
42
|
+
|
|
43
|
+
**🚀 Quick Start:**
|
|
44
|
+
```bash
|
|
45
|
+
pip install mcp-internet-speed-test
|
|
46
|
+
mcp-internet-speed-test
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
## What is MCP?
|
|
50
|
+
|
|
51
|
+
The Model Context Protocol (MCP) provides a standardized way for Large Language Models (LLMs) to interact with external tools and data sources. Think of it as the "USB-C for AI applications" - a common interface that allows AI systems to access real-world capabilities and information.
|
|
52
|
+
|
|
53
|
+
## Features
|
|
54
|
+
|
|
55
|
+
- **Smart Incremental Testing**: Uses SpeedOf.Me methodology with 8-second threshold for optimal accuracy
|
|
56
|
+
- **Download Speed Testing**: Measures bandwidth using files from 128KB to 100MB from GitHub repository
|
|
57
|
+
- **Upload Speed Testing**: Tests upload bandwidth using generated data from 128KB to 100MB
|
|
58
|
+
- **Latency Testing**: Measures network latency with detailed server location information
|
|
59
|
+
- **Jitter Analysis**: Calculates network stability using multiple latency samples (default: 5)
|
|
60
|
+
- **Multi-CDN Support**: Detects and provides info for Fastly, Cloudflare, and AWS CloudFront
|
|
61
|
+
- **Geographic Location**: Maps POP codes to physical locations (50+ locations worldwide)
|
|
62
|
+
- **Cache Analysis**: Detects HIT/MISS status and cache headers
|
|
63
|
+
- **Server Metadata**: Extracts detailed CDN headers including `x-served-by`, `via`, `x-cache`
|
|
64
|
+
- **Comprehensive Testing**: Single function to run all tests with complete metrics
|
|
65
|
+
|
|
66
|
+
## Installation
|
|
67
|
+
|
|
68
|
+
### Prerequisites
|
|
69
|
+
|
|
70
|
+
- Python 3.12 or higher (required for async support)
|
|
71
|
+
- pip or [uv](https://github.com/astral-sh/uv) package manager
|
|
72
|
+
|
|
73
|
+
### Option 1: Install from PyPI with pip (Recommended)
|
|
74
|
+
|
|
75
|
+
```bash
|
|
76
|
+
# Install the package globally
|
|
77
|
+
pip install mcp-internet-speed-test
|
|
78
|
+
|
|
79
|
+
# Run the MCP server
|
|
80
|
+
mcp-internet-speed-test
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
### Option 2: Install from PyPI with uv
|
|
84
|
+
|
|
85
|
+
```bash
|
|
86
|
+
# Install the package globally
|
|
87
|
+
uv add mcp-internet-speed-test
|
|
88
|
+
|
|
89
|
+
# Or run directly without installing
|
|
90
|
+
uvx mcp-internet-speed-test
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### Option 3: Using docker
|
|
94
|
+
|
|
95
|
+
```bash
|
|
96
|
+
# Build the Docker image
|
|
97
|
+
docker build -t mcp-internet-speed-test .
|
|
98
|
+
|
|
99
|
+
# Run the MCP server in a Docker container
|
|
100
|
+
docker run -it --rm -v $(pwd):/app -w /app mcp-internet-speed-test
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
### Option 4: Development/Local Installation
|
|
104
|
+
|
|
105
|
+
If you want to contribute or modify the code:
|
|
106
|
+
|
|
107
|
+
```bash
|
|
108
|
+
# Clone the repository
|
|
109
|
+
git clone https://github.com/inventer-dev/mcp-internet-speed-test.git
|
|
110
|
+
cd mcp-internet-speed-test
|
|
111
|
+
|
|
112
|
+
# Install in development mode
|
|
113
|
+
pip install -e .
|
|
114
|
+
|
|
115
|
+
# Or using uv
|
|
116
|
+
uv sync
|
|
117
|
+
uv run python -m mcp_internet_speed_test.main
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
### Dependencies
|
|
121
|
+
|
|
122
|
+
The package automatically installs these dependencies:
|
|
123
|
+
- `mcp[cli]>=1.6.0`: MCP server framework with CLI integration
|
|
124
|
+
- `httpx>=0.27.0`: Async HTTP client for speed tests
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
## Configuration
|
|
128
|
+
|
|
129
|
+
To use this MCP server with Claude Desktop or other MCP clients, add it to your MCP configuration file.
|
|
130
|
+
|
|
131
|
+
### Claude Desktop Configuration
|
|
132
|
+
|
|
133
|
+
Edit your Claude Desktop MCP configuration file:
|
|
134
|
+
|
|
135
|
+
#### Option 1: Using pip installed package (Recommended)
|
|
136
|
+
|
|
137
|
+
```json
|
|
138
|
+
{
|
|
139
|
+
"mcpServers": {
|
|
140
|
+
"mcp-internet-speed-test": {
|
|
141
|
+
"command": "mcp-internet-speed-test"
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
#### Option 2: Using uvx
|
|
148
|
+
|
|
149
|
+
```json
|
|
150
|
+
{
|
|
151
|
+
"mcpServers": {
|
|
152
|
+
"mcp-internet-speed-test": {
|
|
153
|
+
"command": "uvx",
|
|
154
|
+
"args": ["mcp-internet-speed-test"]
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
## API Tools
|
|
161
|
+
|
|
162
|
+
The MCP Internet Speed Test provides the following tools:
|
|
163
|
+
|
|
164
|
+
### Testing Functions
|
|
165
|
+
1. `measure_download_speed`: Measures download bandwidth (in Mbps) with server location info
|
|
166
|
+
2. `measure_upload_speed`: Measures upload bandwidth (in Mbps) with server location info
|
|
167
|
+
3. `measure_latency`: Measures network latency (in ms) with server location info
|
|
168
|
+
4. `measure_jitter`: Measures network jitter by analyzing latency variations with server info
|
|
169
|
+
5. `get_server_info`: Get detailed CDN server information for any URL without running speed tests
|
|
170
|
+
6. `run_complete_test`: Comprehensive test with all metrics and server metadata
|
|
171
|
+
|
|
172
|
+
## CDN Server Detection
|
|
173
|
+
|
|
174
|
+
This speed test now provides detailed information about the CDN servers serving your tests:
|
|
175
|
+
|
|
176
|
+
### What You Get
|
|
177
|
+
- **CDN Provider**: Identifies if you're connecting to Fastly, Cloudflare, or Amazon CloudFront
|
|
178
|
+
- **Geographic Location**: Shows the physical location of the server (e.g., "Mexico City, Mexico")
|
|
179
|
+
- **POP Code**: Three-letter code identifying the Point of Presence (e.g., "MEX", "QRO", "DFW")
|
|
180
|
+
- **Cache Status**: Whether content is served from cache (HIT) or fetched from origin (MISS)
|
|
181
|
+
- **Server Headers**: Full HTTP headers including `x-served-by`, `via`, and `x-cache`
|
|
182
|
+
|
|
183
|
+
### Technical Implementation
|
|
184
|
+
|
|
185
|
+
#### Smart Testing Methodology
|
|
186
|
+
- **Incremental Approach**: Starts with small files (128KB) and progressively increases
|
|
187
|
+
- **Time-Based Optimization**: Uses 8-second base threshold + 4-second additional buffer
|
|
188
|
+
- **Accuracy Focus**: Selects optimal file size that provides reliable measurements
|
|
189
|
+
- **Multi-Provider Support**: Tests against geographically distributed endpoints
|
|
190
|
+
|
|
191
|
+
#### CDN Detection Capabilities
|
|
192
|
+
- **Fastly**: Detects POP codes and maps to 50+ global locations
|
|
193
|
+
- **Cloudflare**: Identifies data centers and geographic regions
|
|
194
|
+
- **AWS CloudFront**: Recognizes edge locations across continents
|
|
195
|
+
- **Header Analysis**: Parses `x-served-by`, `via`, `x-cache`, and custom CDN headers
|
|
196
|
+
|
|
197
|
+
### Why This Matters
|
|
198
|
+
- **Network Diagnostics**: Understand which server is actually serving your tests
|
|
199
|
+
- **Performance Analysis**: Correlate speed results with server proximity
|
|
200
|
+
- **CDN Optimization**: Identify if your ISP's routing is optimal
|
|
201
|
+
- **Geographic Awareness**: Know if tests are running from your expected region
|
|
202
|
+
- **Troubleshooting**: Identify routing issues and CDN misconfigurations
|
|
203
|
+
|
|
204
|
+
### Example Server Info Output
|
|
205
|
+
```json
|
|
206
|
+
{
|
|
207
|
+
"cdn_provider": "Fastly",
|
|
208
|
+
"pop_code": "MEX",
|
|
209
|
+
"pop_location": "Mexico City, Mexico",
|
|
210
|
+
"served_by": "cache-mex4329-MEX",
|
|
211
|
+
"cache_status": "HIT",
|
|
212
|
+
"x_cache": "HIT, HIT"
|
|
213
|
+
}
|
|
214
|
+
```
|
|
215
|
+
|
|
216
|
+
### Technical Configuration
|
|
217
|
+
|
|
218
|
+
#### Default Test Files Repository
|
|
219
|
+
```
|
|
220
|
+
GitHub Repository: inventer-dev/speed-test-files
|
|
221
|
+
Branch: main
|
|
222
|
+
File Sizes: 128KB, 256KB, 512KB, 1MB, 2MB, 5MB, 10MB, 20MB, 40MB, 50MB, 100MB
|
|
223
|
+
```
|
|
224
|
+
|
|
225
|
+
#### Upload Endpoints Priority
|
|
226
|
+
1. **Cloudflare Workers** (httpi.dev) - Global distribution, highest priority
|
|
227
|
+
2. **HTTPBin** (httpbin.org) - AWS-based, secondary endpoint
|
|
228
|
+
|
|
229
|
+
#### Supported CDN Locations (150+ POPs)
|
|
230
|
+
|
|
231
|
+
**Fastly POPs**: MEX, QRO, DFW, LAX, NYC, MIA, LHR, FRA, AMS, CDG, NRT, SIN, SYD, GRU, SCL, BOG, MAD, MIL...
|
|
232
|
+
|
|
233
|
+
**Cloudflare Centers**: DFW, LAX, SJC, SEA, ORD, MCI, IAD, ATL, MIA, YYZ, LHR, FRA, AMS, CDG, ARN, STO...
|
|
234
|
+
|
|
235
|
+
**AWS CloudFront**: ATL, BOS, ORD, CMH, DFW, DEN, IAD, LAX, MIA, MSP, JFK, SEA, SJC, AMS, ATH, TXL...
|
|
236
|
+
|
|
237
|
+
#### Performance Thresholds
|
|
238
|
+
- **Base Test Duration**: 8.0 seconds
|
|
239
|
+
- **Additional Buffer**: 4.0 seconds
|
|
240
|
+
- **Maximum File Size**: Configurable (default: 100MB)
|
|
241
|
+
- **Jitter Samples**: 5 measurements (configurable)
|
|
242
|
+
|
|
243
|
+
## Troubleshooting
|
|
244
|
+
|
|
245
|
+
### Common Issues
|
|
246
|
+
|
|
247
|
+
#### MCP Server Connection
|
|
248
|
+
1. **Path Configuration**: Ensure absolute path is used in MCP configuration
|
|
249
|
+
2. **Directory Permissions**: Verify read/execute permissions for the project directory
|
|
250
|
+
3. **Python Version**: Requires Python 3.12+ with async support
|
|
251
|
+
4. **Dependencies**: Install `fastmcp` and `httpx` packages
|
|
252
|
+
|
|
253
|
+
#### Speed Test Issues
|
|
254
|
+
1. **GitHub Repository Access**: Ensure `inventer-dev/speed-test-files` is accessible
|
|
255
|
+
2. **Firewall/Proxy**: Check if corporate firewalls block test endpoints
|
|
256
|
+
3. **CDN Routing**: Some ISPs may route differently to CDNs
|
|
257
|
+
4. **Network Stability**: Jitter tests require stable connections
|
|
258
|
+
|
|
259
|
+
#### Performance Considerations
|
|
260
|
+
- **File Size Limits**: Large files (>50MB) may timeout on slow connections
|
|
261
|
+
- **Upload Endpoints**: If primary endpoint fails, fallback is automatic
|
|
262
|
+
- **Geographic Accuracy**: POP detection depends on CDN header consistency
|
|
263
|
+
|
|
264
|
+
## Development
|
|
265
|
+
|
|
266
|
+
### Project Structure
|
|
267
|
+
```
|
|
268
|
+
mcp-internet-speed-test/
|
|
269
|
+
├── mcp_internet_speed_test/ # Main package directory
|
|
270
|
+
│ ├── __init__.py # Package initialization
|
|
271
|
+
│ └── main.py # MCP server implementation
|
|
272
|
+
├── README.md # This documentation
|
|
273
|
+
├── Dockerfile # Container configuration
|
|
274
|
+
└── pyproject.toml # Python project configuration
|
|
275
|
+
```
|
|
276
|
+
|
|
277
|
+
### Key Components
|
|
278
|
+
|
|
279
|
+
#### Configuration Constants
|
|
280
|
+
- `GITHUB_RAW_URL`: Base URL for test files repository
|
|
281
|
+
- `UPLOAD_ENDPOINTS`: Prioritized list of upload test endpoints
|
|
282
|
+
- `SIZE_PROGRESSION`: Ordered list of file sizes for incremental testing
|
|
283
|
+
- `*_POP_LOCATIONS`: Mappings of CDN codes to geographic locations
|
|
284
|
+
|
|
285
|
+
#### Core Functions
|
|
286
|
+
- `extract_server_info()`: Parses HTTP headers to identify CDN providers
|
|
287
|
+
- `measure_*()`: Individual test functions for different metrics
|
|
288
|
+
- `run_complete_test()`: Orchestrates comprehensive testing suite
|
|
289
|
+
|
|
290
|
+
### Configuration Customization
|
|
291
|
+
|
|
292
|
+
You can customize the following in `mcp_internet_speed_test/main.py` if you clone the repository:
|
|
293
|
+
```python
|
|
294
|
+
# GitHub repository settings
|
|
295
|
+
GITHUB_USERNAME = "your-username"
|
|
296
|
+
GITHUB_REPO = "your-speed-test-files"
|
|
297
|
+
GITHUB_BRANCH = "main"
|
|
298
|
+
|
|
299
|
+
# Test duration thresholds
|
|
300
|
+
BASE_TEST_DURATION = 8.0 # seconds
|
|
301
|
+
ADDITIONAL_TEST_DURATION = 4.0 # seconds
|
|
302
|
+
|
|
303
|
+
# Default endpoints
|
|
304
|
+
DEFAULT_UPLOAD_URL = "your-upload-endpoint"
|
|
305
|
+
DEFAULT_LATENCY_URL = "your-latency-endpoint"
|
|
306
|
+
```
|
|
307
|
+
|
|
308
|
+
### Contributing
|
|
309
|
+
|
|
310
|
+
This is an experimental project and contributions are welcome:
|
|
311
|
+
|
|
312
|
+
1. **Issues**: Report bugs or request features
|
|
313
|
+
2. **Pull Requests**: Submit code improvements
|
|
314
|
+
3. **Documentation**: Help improve this README
|
|
315
|
+
4. **Testing**: Test with different network conditions and CDNs
|
|
316
|
+
|
|
317
|
+
## License
|
|
318
|
+
|
|
319
|
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
|
320
|
+
|
|
321
|
+
## Acknowledgments
|
|
322
|
+
|
|
323
|
+
- MCP Framework maintainers for standardizing AI tool interactions
|
|
324
|
+
- The Model Context Protocol community for documentation and examples
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
iflow_mcp_internet_speed_test-0.1.1.dist-info/licenses/LICENSE,sha256=cCb3HXf-Ls45DyPWWK5bfDo70RsI_HQ8m7H9UdOINEQ,1069
|
|
2
|
+
mcp_internet_speed_test/__init__.py,sha256=tXlhYHCq2ak07-QRCC-qH_LetvZZy2vuFW4Gb-SjhxU,302
|
|
3
|
+
mcp_internet_speed_test/main.py,sha256=Pjn83_jsIdZLWpc7uSYE5pLtVDhjgAv0dS8nQJR_iMk,26420
|
|
4
|
+
iflow_mcp_internet_speed_test-0.1.1.dist-info/METADATA,sha256=pxJ7NhMhmB4rgFY6dgPD7CPrhup16FmX7turmB8SsnY,11956
|
|
5
|
+
iflow_mcp_internet_speed_test-0.1.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
6
|
+
iflow_mcp_internet_speed_test-0.1.1.dist-info/entry_points.txt,sha256=YyjiAwCCgQISrSi0CTUOmkGosW7AN2NHKNUVkJ0tjKQ,81
|
|
7
|
+
iflow_mcp_internet_speed_test-0.1.1.dist-info/top_level.txt,sha256=xcHQxrlq-l_JmJZzg9le3dccclAscNrMOJODn45-9KA,24
|
|
8
|
+
iflow_mcp_internet_speed_test-0.1.1.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Inventer Team
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
mcp_internet_speed_test
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP Internet Speed Test
|
|
3
|
+
|
|
4
|
+
A Model Context Protocol server for internet speed testing.
|
|
5
|
+
Enables AI models and agents to perform network performance measurements.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
__version__ = "0.1.1"
|
|
9
|
+
__author__ = "Pedro Cruz"
|
|
10
|
+
__email__ = "hola@inventer.dev"
|
|
11
|
+
|
|
12
|
+
__all__ = ["__version__", "__author__", "__email__"]
|
|
@@ -0,0 +1,801 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Model Context Protocol for the internet speed test
|
|
3
|
+
|
|
4
|
+
This MCP implements an internet speed test service inspired by SpeedOf.Me methodology.
|
|
5
|
+
|
|
6
|
+
## How It Works
|
|
7
|
+
|
|
8
|
+
An internet speed test uses an incremental testing approach:
|
|
9
|
+
|
|
10
|
+
### Download Test
|
|
11
|
+
- Begins with downloading the smallest sample size (128 KB)
|
|
12
|
+
- Gradually increases file size until download takes more than 8 seconds
|
|
13
|
+
- Uses the last sample that took more than 8 seconds for final speed calculation
|
|
14
|
+
|
|
15
|
+
### Upload Test
|
|
16
|
+
- Similar incremental mechanism for uploads
|
|
17
|
+
- Starts with a smaller sample file and gradually increases
|
|
18
|
+
- Continues until upload takes more than 8 seconds
|
|
19
|
+
|
|
20
|
+
### Test Method
|
|
21
|
+
- Tests bandwidth in several passes with gradually increasing file sizes
|
|
22
|
+
- Can measure a wide range of connection speeds (from 10 Kbps to 100+ Mbps)
|
|
23
|
+
- Sample files sizes range from 128 KB to 512 MB
|
|
24
|
+
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
import time
|
|
28
|
+
import re
|
|
29
|
+
from typing import Dict, Optional
|
|
30
|
+
|
|
31
|
+
import httpx
|
|
32
|
+
from mcp.server.fastmcp import FastMCP
|
|
33
|
+
|
|
34
|
+
# Create a singleton instance of FastMCP
|
|
35
|
+
mcp = FastMCP("internet_speed_test", dependencies=["httpx"])
|
|
36
|
+
|
|
37
|
+
# Default URLs for testing
|
|
38
|
+
GITHUB_USERNAME = "inventer-dev" # Replace with your GitHub username
|
|
39
|
+
GITHUB_REPO = "speed-test-files" # Replace with your repository name
|
|
40
|
+
GITHUB_BRANCH = "main" # Replace with your branch name (main or master)
|
|
41
|
+
|
|
42
|
+
# Build base URL for GitHub raw content
|
|
43
|
+
GITHUB_RAW_URL = (
|
|
44
|
+
f"https://raw.githubusercontent.com/{GITHUB_USERNAME}/{GITHUB_REPO}/{GITHUB_BRANCH}"
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
DEFAULT_DOWNLOAD_URLS = {
|
|
48
|
+
"128KB": f"{GITHUB_RAW_URL}/128KB.bin",
|
|
49
|
+
"256KB": f"{GITHUB_RAW_URL}/256KB.bin",
|
|
50
|
+
"512KB": f"{GITHUB_RAW_URL}/512KB.bin",
|
|
51
|
+
"1MB": f"{GITHUB_RAW_URL}/1MB.bin",
|
|
52
|
+
"2MB": f"{GITHUB_RAW_URL}/2MB.bin",
|
|
53
|
+
"5MB": f"{GITHUB_RAW_URL}/5MB.bin",
|
|
54
|
+
"10MB": f"{GITHUB_RAW_URL}/10MB.bin",
|
|
55
|
+
"20MB": f"{GITHUB_RAW_URL}/20MB.bin",
|
|
56
|
+
"40MB": f"{GITHUB_RAW_URL}/40MB.bin",
|
|
57
|
+
"50MB": f"{GITHUB_RAW_URL}/50MB.bin",
|
|
58
|
+
"100MB": f"{GITHUB_RAW_URL}/100MB.bin",
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
# Distributed upload endpoints for geographic diversity
|
|
62
|
+
UPLOAD_ENDPOINTS = [
|
|
63
|
+
{
|
|
64
|
+
"url": "https://httpi.dev/",
|
|
65
|
+
"name": "Cloudflare Workers (Global)",
|
|
66
|
+
"provider": "Cloudflare",
|
|
67
|
+
"priority": 1, # Highest priority due to global distribution
|
|
68
|
+
},
|
|
69
|
+
{
|
|
70
|
+
"url": "https://httpbin.org/",
|
|
71
|
+
"name": "HTTPBin (AWS)",
|
|
72
|
+
"provider": "AWS",
|
|
73
|
+
"priority": 2,
|
|
74
|
+
},
|
|
75
|
+
]
|
|
76
|
+
|
|
77
|
+
# Primary endpoints for backward compatibility
|
|
78
|
+
DEFAULT_UPLOAD_URL = UPLOAD_ENDPOINTS[0]["url"] + "post" # Use Cloudflare by default
|
|
79
|
+
DEFAULT_LATENCY_URL = UPLOAD_ENDPOINTS[0]["url"] + "get" # Use Cloudflare by default
|
|
80
|
+
|
|
81
|
+
# File sizes in bytes for upload testing
|
|
82
|
+
UPLOAD_SIZES = {
|
|
83
|
+
"128KB": 128 * 1024,
|
|
84
|
+
"256KB": 256 * 1024,
|
|
85
|
+
"512KB": 512 * 1024,
|
|
86
|
+
"1MB": 1 * 1024 * 1024,
|
|
87
|
+
"2MB": 2 * 1024 * 1024,
|
|
88
|
+
"5MB": 5 * 1024 * 1024,
|
|
89
|
+
"10MB": 10 * 1024 * 1024,
|
|
90
|
+
"20MB": 20 * 1024 * 1024,
|
|
91
|
+
"40MB": 40 * 1024 * 1024,
|
|
92
|
+
"50MB": 50 * 1024 * 1024,
|
|
93
|
+
"100MB": 100 * 1024 * 1024,
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
# Maximum time threshold for a test (in seconds)
|
|
97
|
+
BASE_TEST_DURATION = 8.0
|
|
98
|
+
ADDITIONAL_TEST_DURATION = 4.0
|
|
99
|
+
|
|
100
|
+
# Size progression order
|
|
101
|
+
SIZE_PROGRESSION = [
|
|
102
|
+
"128KB",
|
|
103
|
+
"256KB",
|
|
104
|
+
"512KB",
|
|
105
|
+
"1MB",
|
|
106
|
+
"2MB",
|
|
107
|
+
"5MB",
|
|
108
|
+
"10MB",
|
|
109
|
+
"20MB",
|
|
110
|
+
"40MB",
|
|
111
|
+
"50MB",
|
|
112
|
+
"100MB",
|
|
113
|
+
]
|
|
114
|
+
|
|
115
|
+
# Server location mapping based on Fastly POP codes
|
|
116
|
+
FASTLY_POP_LOCATIONS = {
|
|
117
|
+
"MEX": "Mexico City, Mexico",
|
|
118
|
+
"QRO": "Querétaro, Mexico",
|
|
119
|
+
"DFW": "Dallas, Texas, USA",
|
|
120
|
+
"LAX": "Los Angeles, California, USA",
|
|
121
|
+
"NYC": "New York City, New York, USA",
|
|
122
|
+
"MIA": "Miami, Florida, USA",
|
|
123
|
+
"LHR": "London, United Kingdom",
|
|
124
|
+
"FRA": "Frankfurt, Germany",
|
|
125
|
+
"AMS": "Amsterdam, Netherlands",
|
|
126
|
+
"CDG": "Paris, France",
|
|
127
|
+
"NRT": "Tokyo, Japan",
|
|
128
|
+
"SIN": "Singapore",
|
|
129
|
+
"SYD": "Sydney, Australia",
|
|
130
|
+
"GRU": "São Paulo, Brazil",
|
|
131
|
+
"SCL": "Santiago, Chile",
|
|
132
|
+
"BOG": "Bogotá, Colombia",
|
|
133
|
+
"MAD": "Madrid, Spain",
|
|
134
|
+
"MIL": "Milan, Italy",
|
|
135
|
+
"STO": "Stockholm, Sweden",
|
|
136
|
+
"CPH": "Copenhagen, Denmark",
|
|
137
|
+
"ZUR": "Zurich, Switzerland",
|
|
138
|
+
"VIE": "Vienna, Austria",
|
|
139
|
+
"WAW": "Warsaw, Poland",
|
|
140
|
+
"PRG": "Prague, Czech Republic",
|
|
141
|
+
"BUD": "Budapest, Hungary",
|
|
142
|
+
"ATH": "Athens, Greece",
|
|
143
|
+
"IST": "Istanbul, Turkey",
|
|
144
|
+
"DXB": "Dubai, UAE",
|
|
145
|
+
"BOM": "Mumbai, India",
|
|
146
|
+
"DEL": "New Delhi, India",
|
|
147
|
+
"ICN": "Seoul, South Korea",
|
|
148
|
+
"HKG": "Hong Kong",
|
|
149
|
+
"TPE": "Taipei, Taiwan",
|
|
150
|
+
"KUL": "Kuala Lumpur, Malaysia",
|
|
151
|
+
"BKK": "Bangkok, Thailand",
|
|
152
|
+
"CGK": "Jakarta, Indonesia",
|
|
153
|
+
"MNL": "Manila, Philippines",
|
|
154
|
+
"PER": "Perth, Australia",
|
|
155
|
+
"AKL": "Auckland, New Zealand",
|
|
156
|
+
"JNB": "Johannesburg, South Africa",
|
|
157
|
+
"CPT": "Cape Town, South Africa",
|
|
158
|
+
"CAI": "Cairo, Egypt",
|
|
159
|
+
"LOS": "Lagos, Nigeria",
|
|
160
|
+
"NBO": "Nairobi, Kenya",
|
|
161
|
+
"YYZ": "Toronto, Canada",
|
|
162
|
+
"YVR": "Vancouver, Canada",
|
|
163
|
+
"GIG": "Rio de Janeiro, Brazil",
|
|
164
|
+
"LIM": "Lima, Peru",
|
|
165
|
+
"UIO": "Quito, Ecuador",
|
|
166
|
+
"CCS": "Caracas, Venezuela",
|
|
167
|
+
"PTY": "Panama City, Panama",
|
|
168
|
+
"SJO": "San José, Costa Rica",
|
|
169
|
+
"GUA": "Guatemala City, Guatemala",
|
|
170
|
+
"SDQ": "Santo Domingo, Dominican Republic",
|
|
171
|
+
"SJU": "San Juan, Puerto Rico",
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
# Cloudflare data center locations mapping
|
|
175
|
+
CLOUDFLARE_POP_LOCATIONS = {
|
|
176
|
+
"DFW": "Dallas, Texas, USA",
|
|
177
|
+
"LAX": "Los Angeles, California, USA",
|
|
178
|
+
"SJC": "San Jose, California, USA",
|
|
179
|
+
"SEA": "Seattle, Washington, USA",
|
|
180
|
+
"ORD": "Chicago, Illinois, USA",
|
|
181
|
+
"MCI": "Kansas City, Missouri, USA",
|
|
182
|
+
"ATL": "Atlanta, Georgia, USA",
|
|
183
|
+
"MIA": "Miami, Florida, USA",
|
|
184
|
+
"EWR": "Newark, New Jersey, USA",
|
|
185
|
+
"IAD": "Washington, D.C., USA",
|
|
186
|
+
"YYZ": "Toronto, Canada",
|
|
187
|
+
"YVR": "Vancouver, Canada",
|
|
188
|
+
"LHR": "London, United Kingdom",
|
|
189
|
+
"CDG": "Paris, France",
|
|
190
|
+
"FRA": "Frankfurt, Germany",
|
|
191
|
+
"AMS": "Amsterdam, Netherlands",
|
|
192
|
+
"ARN": "Stockholm, Sweden",
|
|
193
|
+
"CPH": "Copenhagen, Denmark",
|
|
194
|
+
"OSL": "Oslo, Norway",
|
|
195
|
+
"HEL": "Helsinki, Finland",
|
|
196
|
+
"WAW": "Warsaw, Poland",
|
|
197
|
+
"PRG": "Prague, Czech Republic",
|
|
198
|
+
"VIE": "Vienna, Austria",
|
|
199
|
+
"ZUR": "Zurich, Switzerland",
|
|
200
|
+
"MIL": "Milan, Italy",
|
|
201
|
+
"FCO": "Rome, Italy",
|
|
202
|
+
"MAD": "Madrid, Spain",
|
|
203
|
+
"BCN": "Barcelona, Spain",
|
|
204
|
+
"LIS": "Lisbon, Portugal",
|
|
205
|
+
"ATH": "Athens, Greece",
|
|
206
|
+
"IST": "Istanbul, Turkey",
|
|
207
|
+
"SVO": "Moscow, Russia",
|
|
208
|
+
"LED": "St. Petersburg, Russia",
|
|
209
|
+
"HKG": "Hong Kong",
|
|
210
|
+
"NRT": "Tokyo, Japan",
|
|
211
|
+
"KIX": "Osaka, Japan",
|
|
212
|
+
"ICN": "Seoul, South Korea",
|
|
213
|
+
"PVG": "Shanghai, China",
|
|
214
|
+
"PEK": "Beijing, China",
|
|
215
|
+
"SIN": "Singapore",
|
|
216
|
+
"KUL": "Kuala Lumpur, Malaysia",
|
|
217
|
+
"BKK": "Bangkok, Thailand",
|
|
218
|
+
"CGK": "Jakarta, Indonesia",
|
|
219
|
+
"MNL": "Manila, Philippines",
|
|
220
|
+
"SYD": "Sydney, Australia",
|
|
221
|
+
"MEL": "Melbourne, Australia",
|
|
222
|
+
"PER": "Perth, Australia",
|
|
223
|
+
"AKL": "Auckland, New Zealand",
|
|
224
|
+
"BOM": "Mumbai, India",
|
|
225
|
+
"DEL": "New Delhi, India",
|
|
226
|
+
"BLR": "Bangalore, India",
|
|
227
|
+
"MAA": "Chennai, India",
|
|
228
|
+
"DXB": "Dubai, UAE",
|
|
229
|
+
"DOH": "Doha, Qatar",
|
|
230
|
+
"KWI": "Kuwait City, Kuwait",
|
|
231
|
+
"JNB": "Johannesburg, South Africa",
|
|
232
|
+
"CPT": "Cape Town, South Africa",
|
|
233
|
+
"LAD": "Luanda, Angola",
|
|
234
|
+
"CAI": "Cairo, Egypt",
|
|
235
|
+
"LOS": "Lagos, Nigeria",
|
|
236
|
+
"NBO": "Nairobi, Kenya",
|
|
237
|
+
"GRU": "São Paulo, Brazil",
|
|
238
|
+
"GIG": "Rio de Janeiro, Brazil",
|
|
239
|
+
"FOR": "Fortaleza, Brazil",
|
|
240
|
+
"SCL": "Santiago, Chile",
|
|
241
|
+
"LIM": "Lima, Peru",
|
|
242
|
+
"BOG": "Bogotá, Colombia",
|
|
243
|
+
"UIO": "Quito, Ecuador",
|
|
244
|
+
"PTY": "Panama City, Panama",
|
|
245
|
+
"SJO": "San José, Costa Rica",
|
|
246
|
+
"MEX": "Mexico City, Mexico",
|
|
247
|
+
"QRO": "Querétaro, Mexico",
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
# AWS CloudFront edge location POP codes mapping
|
|
251
|
+
AWS_POP_LOCATIONS = {
|
|
252
|
+
# North America
|
|
253
|
+
"ATL": "Atlanta, Georgia, USA",
|
|
254
|
+
"BOS": "Boston, Massachusetts, USA",
|
|
255
|
+
"ORD": "Chicago, Illinois, USA",
|
|
256
|
+
"CMH": "Columbus, Ohio, USA",
|
|
257
|
+
"DFW": "Dallas, Texas, USA",
|
|
258
|
+
"DEN": "Denver, Colorado, USA",
|
|
259
|
+
"DTW": "Detroit, Michigan, USA",
|
|
260
|
+
"IAH": "Houston, Texas, USA",
|
|
261
|
+
"MCI": "Kansas City, Missouri, USA",
|
|
262
|
+
"LAX": "Los Angeles, California, USA",
|
|
263
|
+
"MIA": "Miami, Florida, USA",
|
|
264
|
+
"MSP": "Minneapolis, Minnesota, USA",
|
|
265
|
+
"BNA": "Nashville, Tennessee, USA",
|
|
266
|
+
"JFK": "New York, New York, USA",
|
|
267
|
+
"EWR": "Newark, New Jersey, USA",
|
|
268
|
+
"PHL": "Philadelphia, Pennsylvania, USA",
|
|
269
|
+
"PHX": "Phoenix, Arizona, USA",
|
|
270
|
+
"PIT": "Pittsburgh, Pennsylvania, USA",
|
|
271
|
+
"HIO": "Portland, Oregon, USA",
|
|
272
|
+
"SLC": "Salt Lake City, Utah, USA",
|
|
273
|
+
"SFO": "San Francisco, California, USA",
|
|
274
|
+
"SEA": "Seattle, Washington, USA",
|
|
275
|
+
"TPA": "Tampa, Florida, USA",
|
|
276
|
+
"IAD": "Washington, DC, USA",
|
|
277
|
+
"YUL": "Montreal, Quebec, Canada",
|
|
278
|
+
"YTO": "Toronto, Ontario, Canada",
|
|
279
|
+
"YVR": "Vancouver, British Columbia, Canada",
|
|
280
|
+
"QRO": "Querétaro, Mexico",
|
|
281
|
+
# South America
|
|
282
|
+
"BOG": "Bogotá, Colombia",
|
|
283
|
+
"EZE": "Buenos Aires, Argentina",
|
|
284
|
+
"FOR": "Fortaleza, Brazil",
|
|
285
|
+
"LIM": "Lima, Peru",
|
|
286
|
+
"GIG": "Rio de Janeiro, Brazil",
|
|
287
|
+
"SCL": "Santiago, Chile",
|
|
288
|
+
"GRU": "São Paulo, Brazil",
|
|
289
|
+
# Europe
|
|
290
|
+
"AMS": "Amsterdam, Netherlands",
|
|
291
|
+
"ATH": "Athens, Greece",
|
|
292
|
+
"TXL": "Berlin, Germany",
|
|
293
|
+
"BRU": "Brussels, Belgium",
|
|
294
|
+
"OTP": "Bucharest, Romania",
|
|
295
|
+
"BUD": "Budapest, Hungary",
|
|
296
|
+
"CPH": "Copenhagen, Denmark",
|
|
297
|
+
"DUB": "Dublin, Ireland",
|
|
298
|
+
"DUS": "Düsseldorf, Germany",
|
|
299
|
+
"FRA": "Frankfurt am Main, Germany",
|
|
300
|
+
"HAM": "Hamburg, Germany",
|
|
301
|
+
"HEL": "Helsinki, Finland",
|
|
302
|
+
"LIS": "Lisbon, Portugal",
|
|
303
|
+
"LHR": "London, United Kingdom",
|
|
304
|
+
"MAD": "Madrid, Spain",
|
|
305
|
+
"MAN": "Manchester, United Kingdom",
|
|
306
|
+
"MRS": "Marseille, France",
|
|
307
|
+
"MXP": "Milan, Italy",
|
|
308
|
+
"MUC": "Munich, Germany",
|
|
309
|
+
"OSL": "Oslo, Norway",
|
|
310
|
+
"PMO": "Palermo, Italy",
|
|
311
|
+
"CDG": "Paris, France",
|
|
312
|
+
"PRG": "Prague, Czech Republic",
|
|
313
|
+
"FCO": "Rome, Italy",
|
|
314
|
+
"SOF": "Sofia, Bulgaria",
|
|
315
|
+
"ARN": "Stockholm, Sweden",
|
|
316
|
+
"VIE": "Vienna, Austria",
|
|
317
|
+
"WAW": "Warsaw, Poland",
|
|
318
|
+
"ZAG": "Zagreb, Croatia",
|
|
319
|
+
"ZRH": "Zurich, Switzerland",
|
|
320
|
+
"IST": "Istanbul, Turkey",
|
|
321
|
+
# Middle East
|
|
322
|
+
"DXB": "Dubai, UAE",
|
|
323
|
+
"FJR": "Fujairah, UAE",
|
|
324
|
+
"JED": "Jeddah, Saudi Arabia",
|
|
325
|
+
"BAH": "Manama, Bahrain",
|
|
326
|
+
"MCT": "Muscat, Oman",
|
|
327
|
+
"DOH": "Doha, Qatar",
|
|
328
|
+
"TLV": "Tel Aviv, Israel",
|
|
329
|
+
# Africa
|
|
330
|
+
"CAI": "Cairo, Egypt",
|
|
331
|
+
"CPT": "Cape Town, South Africa",
|
|
332
|
+
"JNB": "Johannesburg, South Africa",
|
|
333
|
+
"LOS": "Lagos, Nigeria",
|
|
334
|
+
"NBO": "Nairobi, Kenya",
|
|
335
|
+
# Asia Pacific
|
|
336
|
+
"BKK": "Bangkok, Thailand",
|
|
337
|
+
"PEK": "Beijing, China",
|
|
338
|
+
"BLR": "Bengaluru, India",
|
|
339
|
+
"MAA": "Chennai, India",
|
|
340
|
+
"DEL": "New Delhi, India",
|
|
341
|
+
"HAN": "Hanoi, Vietnam",
|
|
342
|
+
"SGN": "Ho Chi Minh City, Vietnam",
|
|
343
|
+
"HKG": "Hong Kong, China",
|
|
344
|
+
"HYD": "Hyderabad, India",
|
|
345
|
+
"CGK": "Jakarta, Indonesia",
|
|
346
|
+
"CCU": "Kolkata, India",
|
|
347
|
+
"KUL": "Kuala Lumpur, Malaysia",
|
|
348
|
+
"MNL": "Manila, Philippines",
|
|
349
|
+
"BOM": "Mumbai, India",
|
|
350
|
+
"KIX": "Osaka, Japan",
|
|
351
|
+
"PNQ": "Pune, India",
|
|
352
|
+
"ICN": "Seoul, South Korea",
|
|
353
|
+
"PVG": "Shanghai, China",
|
|
354
|
+
"SZX": "Shenzhen, China",
|
|
355
|
+
"SIN": "Singapore",
|
|
356
|
+
"TPE": "Taoyuan, Taiwan",
|
|
357
|
+
"NRT": "Tokyo, Japan",
|
|
358
|
+
"ZHY": "Zhongwei, China",
|
|
359
|
+
# Australia & Oceania
|
|
360
|
+
"AKL": "Auckland, New Zealand",
|
|
361
|
+
"BNE": "Brisbane, Australia",
|
|
362
|
+
"MEL": "Melbourne, Australia",
|
|
363
|
+
"PER": "Perth, Australia",
|
|
364
|
+
"SYD": "Sydney, Australia",
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
def extract_server_info(headers: Dict[str, str]) -> Dict[str, Optional[str]]:
|
|
369
|
+
"""
|
|
370
|
+
Extract server information from HTTP headers.
|
|
371
|
+
|
|
372
|
+
Args:
|
|
373
|
+
headers: HTTP response headers
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
Dictionary with server information including POP location, CDN info, etc.
|
|
377
|
+
"""
|
|
378
|
+
server_info = {
|
|
379
|
+
"cdn_provider": None,
|
|
380
|
+
"pop_code": None,
|
|
381
|
+
"pop_location": None,
|
|
382
|
+
"served_by": None,
|
|
383
|
+
"via_header": None,
|
|
384
|
+
"cache_status": None,
|
|
385
|
+
"server_ip_info": None,
|
|
386
|
+
"x_cache": None,
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
# Extract x-served-by header (Fastly specific)
|
|
390
|
+
served_by = headers.get("x-served-by", "")
|
|
391
|
+
if served_by:
|
|
392
|
+
server_info["served_by"] = served_by
|
|
393
|
+
|
|
394
|
+
# Extract POP code from served-by header
|
|
395
|
+
# Format examples: cache-mex4329-MEX, cache-qro4141-QRO, cache-dfw-kdfw8210052-DFW
|
|
396
|
+
pop_match = re.search(r"-([A-Z]{3})$", served_by)
|
|
397
|
+
if pop_match:
|
|
398
|
+
server_info["pop_code"] = pop_match.group(1)
|
|
399
|
+
server_info["pop_location"] = FASTLY_POP_LOCATIONS.get(
|
|
400
|
+
pop_match.group(1), f"Unknown location ({pop_match.group(1)})"
|
|
401
|
+
)
|
|
402
|
+
server_info["cdn_provider"] = "Fastly"
|
|
403
|
+
|
|
404
|
+
# Extract via header
|
|
405
|
+
via = headers.get("via", "")
|
|
406
|
+
if via:
|
|
407
|
+
server_info["via_header"] = via
|
|
408
|
+
|
|
409
|
+
# Extract cache status
|
|
410
|
+
cache_status = headers.get("x-cache", "")
|
|
411
|
+
if cache_status:
|
|
412
|
+
server_info["x_cache"] = cache_status
|
|
413
|
+
server_info["cache_status"] = "HIT" if "HIT" in cache_status.upper() else "MISS"
|
|
414
|
+
|
|
415
|
+
# Extract Cloudflare CF-Ray header
|
|
416
|
+
cf_ray = headers.get("cf-ray", "")
|
|
417
|
+
if cf_ray:
|
|
418
|
+
server_info["cf_ray"] = cf_ray
|
|
419
|
+
# Extract data center code from CF-Ray (format: request_id-datacenter_code)
|
|
420
|
+
cf_match = re.search(r"-([A-Z]{3})$", cf_ray)
|
|
421
|
+
if cf_match:
|
|
422
|
+
server_info["pop_code"] = cf_match.group(1)
|
|
423
|
+
server_info["pop_location"] = CLOUDFLARE_POP_LOCATIONS.get(
|
|
424
|
+
cf_match.group(1), f"Unknown location ({cf_match.group(1)})"
|
|
425
|
+
)
|
|
426
|
+
server_info["cdn_provider"] = "Cloudflare"
|
|
427
|
+
|
|
428
|
+
# Extract AWS CloudFront headers
|
|
429
|
+
cf_pop = headers.get("x-amz-cf-pop", "")
|
|
430
|
+
cf_id = headers.get("x-amz-cf-id", "")
|
|
431
|
+
if cf_pop:
|
|
432
|
+
server_info["cf_pop"] = cf_pop
|
|
433
|
+
server_info["cdn_provider"] = "Amazon CloudFront"
|
|
434
|
+
|
|
435
|
+
# Extract POP code from x-amz-cf-pop header (format: DFW56-P1, SIN5-C1)
|
|
436
|
+
cf_pop_match = re.search(r"^([A-Z]{3})", cf_pop)
|
|
437
|
+
if cf_pop_match:
|
|
438
|
+
server_info["pop_code"] = cf_pop_match.group(1)
|
|
439
|
+
server_info["pop_location"] = AWS_POP_LOCATIONS.get(
|
|
440
|
+
cf_pop_match.group(1), f"Unknown location ({cf_pop_match.group(1)})"
|
|
441
|
+
)
|
|
442
|
+
|
|
443
|
+
if cf_id:
|
|
444
|
+
server_info["cf_id"] = cf_id
|
|
445
|
+
if not server_info["cdn_provider"]:
|
|
446
|
+
server_info["cdn_provider"] = "Amazon CloudFront"
|
|
447
|
+
|
|
448
|
+
# Check for other CDN indicators
|
|
449
|
+
if not server_info["cdn_provider"]:
|
|
450
|
+
if "fastly" in headers.get("server", "").lower():
|
|
451
|
+
server_info["cdn_provider"] = "Fastly"
|
|
452
|
+
elif "cloudflare" in headers.get("server", "").lower():
|
|
453
|
+
server_info["cdn_provider"] = "Cloudflare"
|
|
454
|
+
elif (
|
|
455
|
+
"amazon" in headers.get("server", "").lower()
|
|
456
|
+
or "aws" in headers.get("server", "").lower()
|
|
457
|
+
):
|
|
458
|
+
server_info["cdn_provider"] = "Amazon CloudFront"
|
|
459
|
+
|
|
460
|
+
return server_info
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
# Register tools
|
|
464
|
+
@mcp.tool()
|
|
465
|
+
async def measure_download_speed(size_limit: str = "100MB") -> dict:
|
|
466
|
+
"""
|
|
467
|
+
Measure download speed using incremental file sizes.
|
|
468
|
+
|
|
469
|
+
Args:
|
|
470
|
+
size_limit: Maximum file size to test (default: 100MB)
|
|
471
|
+
|
|
472
|
+
Returns:
|
|
473
|
+
Dictionary with download speed results
|
|
474
|
+
"""
|
|
475
|
+
results = []
|
|
476
|
+
final_result = None
|
|
477
|
+
|
|
478
|
+
# Find the index of the size limit in our progression
|
|
479
|
+
max_index = (
|
|
480
|
+
SIZE_PROGRESSION.index(size_limit)
|
|
481
|
+
if size_limit in SIZE_PROGRESSION
|
|
482
|
+
else len(SIZE_PROGRESSION) - 1
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
# Test each file size in order, up to the specified limit
|
|
486
|
+
async with httpx.AsyncClient() as client:
|
|
487
|
+
for size_key in SIZE_PROGRESSION[: max_index + 1]:
|
|
488
|
+
if size_key in ["100MB", "200MB", "500MB", "1GB"]:
|
|
489
|
+
test_duration = BASE_TEST_DURATION + ADDITIONAL_TEST_DURATION
|
|
490
|
+
else:
|
|
491
|
+
test_duration = BASE_TEST_DURATION
|
|
492
|
+
|
|
493
|
+
url = DEFAULT_DOWNLOAD_URLS[size_key]
|
|
494
|
+
start = time.time()
|
|
495
|
+
total_size = 0
|
|
496
|
+
|
|
497
|
+
async with client.stream(
|
|
498
|
+
"GET",
|
|
499
|
+
url,
|
|
500
|
+
) as response:
|
|
501
|
+
# Extract server information from headers
|
|
502
|
+
server_info = extract_server_info(dict(response.headers))
|
|
503
|
+
|
|
504
|
+
async for chunk in response.aiter_bytes(chunk_size=1024):
|
|
505
|
+
if chunk:
|
|
506
|
+
chunk_size = len(chunk)
|
|
507
|
+
total_size += chunk_size
|
|
508
|
+
|
|
509
|
+
# Check elapsed time during download
|
|
510
|
+
current_time = time.time()
|
|
511
|
+
elapsed_time = current_time - start
|
|
512
|
+
|
|
513
|
+
# Update our final result continuously
|
|
514
|
+
speed_mbps = ((total_size * 8) / (1024 * 1024)) / elapsed_time
|
|
515
|
+
final_result = {
|
|
516
|
+
"download_speed": round(speed_mbps, 2),
|
|
517
|
+
"elapsed_time": round(elapsed_time, 2),
|
|
518
|
+
"data_size": total_size,
|
|
519
|
+
"size": size_key,
|
|
520
|
+
"url": url,
|
|
521
|
+
"server_info": server_info,
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
# If test duration exceeded, stop the test
|
|
525
|
+
if elapsed_time >= test_duration:
|
|
526
|
+
break
|
|
527
|
+
|
|
528
|
+
# Return the final result or an error if all tests failed
|
|
529
|
+
if final_result:
|
|
530
|
+
return {
|
|
531
|
+
"download_speed": final_result["download_speed"],
|
|
532
|
+
"unit": "Mbps",
|
|
533
|
+
"elapsed_time": final_result["elapsed_time"],
|
|
534
|
+
"data_size": final_result["data_size"],
|
|
535
|
+
"size_used": final_result["size"],
|
|
536
|
+
"server_info": final_result["server_info"],
|
|
537
|
+
"all_tests": results,
|
|
538
|
+
}
|
|
539
|
+
return {
|
|
540
|
+
"error": True,
|
|
541
|
+
"message": "All download tests failed",
|
|
542
|
+
"details": results,
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
|
|
546
|
+
@mcp.tool()
|
|
547
|
+
async def measure_upload_speed(
|
|
548
|
+
url_upload: str = DEFAULT_UPLOAD_URL, size_limit: str = "100MB"
|
|
549
|
+
) -> dict:
|
|
550
|
+
"""
|
|
551
|
+
Measure upload speed using incremental file sizes.
|
|
552
|
+
|
|
553
|
+
Args:
|
|
554
|
+
url_upload: URL to upload data to
|
|
555
|
+
size_limit: Maximum file size to test (default: 100MB)
|
|
556
|
+
|
|
557
|
+
Returns:
|
|
558
|
+
Dictionary with upload speed results
|
|
559
|
+
"""
|
|
560
|
+
results = []
|
|
561
|
+
final_result = None
|
|
562
|
+
|
|
563
|
+
# Find the index of the size limit in our progression
|
|
564
|
+
max_index = (
|
|
565
|
+
SIZE_PROGRESSION.index(size_limit)
|
|
566
|
+
if size_limit in SIZE_PROGRESSION
|
|
567
|
+
else len(SIZE_PROGRESSION) - 1
|
|
568
|
+
)
|
|
569
|
+
|
|
570
|
+
# Only test up to the specified size limit
|
|
571
|
+
async with httpx.AsyncClient() as client:
|
|
572
|
+
for size_key in SIZE_PROGRESSION[: max_index + 1]:
|
|
573
|
+
if size_key in ["100MB", "200MB", "500MB", "1GB"]:
|
|
574
|
+
test_duration = BASE_TEST_DURATION + ADDITIONAL_TEST_DURATION
|
|
575
|
+
else:
|
|
576
|
+
test_duration = BASE_TEST_DURATION
|
|
577
|
+
|
|
578
|
+
data_size = UPLOAD_SIZES[size_key]
|
|
579
|
+
data = b"x" * data_size
|
|
580
|
+
start = time.time()
|
|
581
|
+
|
|
582
|
+
try:
|
|
583
|
+
response = await client.post(url_upload, data=data, timeout=30.0)
|
|
584
|
+
end = time.time()
|
|
585
|
+
elapsed_time = end - start
|
|
586
|
+
|
|
587
|
+
# Extract server information from headers
|
|
588
|
+
server_info = extract_server_info(dict(response.headers))
|
|
589
|
+
|
|
590
|
+
# Calculate upload speed in Mbps
|
|
591
|
+
speed_mbps = (data_size * 8) / (1024 * 1024) / elapsed_time
|
|
592
|
+
result = {
|
|
593
|
+
"size": size_key,
|
|
594
|
+
"upload_speed": round(speed_mbps, 2),
|
|
595
|
+
"elapsed_time": round(elapsed_time, 2),
|
|
596
|
+
"data_size": data_size,
|
|
597
|
+
"url": url_upload,
|
|
598
|
+
"server_info": server_info,
|
|
599
|
+
}
|
|
600
|
+
|
|
601
|
+
results.append(result)
|
|
602
|
+
|
|
603
|
+
# Set the final result to the last result
|
|
604
|
+
final_result = result
|
|
605
|
+
|
|
606
|
+
# If this test took longer than our threshold, we're done
|
|
607
|
+
if elapsed_time > test_duration:
|
|
608
|
+
break
|
|
609
|
+
|
|
610
|
+
except (httpx.RequestError, httpx.HTTPStatusError, httpx.TimeoutException) as e:
|
|
611
|
+
results.append(
|
|
612
|
+
{
|
|
613
|
+
"size": size_key,
|
|
614
|
+
"error": True,
|
|
615
|
+
"message": f"HTTP Error: {str(e)}",
|
|
616
|
+
"url": url_upload,
|
|
617
|
+
}
|
|
618
|
+
)
|
|
619
|
+
# If we encounter an error, use the last successful result or continue
|
|
620
|
+
if final_result:
|
|
621
|
+
break
|
|
622
|
+
|
|
623
|
+
# Return the final result or an error if all tests failed
|
|
624
|
+
if final_result:
|
|
625
|
+
return {
|
|
626
|
+
"upload_speed": final_result["upload_speed"],
|
|
627
|
+
"unit": "Mbps",
|
|
628
|
+
"elapsed_time": final_result["elapsed_time"],
|
|
629
|
+
"data_size": final_result["data_size"],
|
|
630
|
+
"size_used": final_result["size"],
|
|
631
|
+
"server_info": final_result["server_info"],
|
|
632
|
+
"all_tests": results,
|
|
633
|
+
}
|
|
634
|
+
return {
|
|
635
|
+
"error": True,
|
|
636
|
+
"message": "All upload tests failed",
|
|
637
|
+
"details": results,
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
@mcp.tool()
|
|
642
|
+
async def measure_latency(url: str = DEFAULT_LATENCY_URL) -> dict:
|
|
643
|
+
"""Measure the latency
|
|
644
|
+
|
|
645
|
+
Args:
|
|
646
|
+
url (str): The URL to measure latency to
|
|
647
|
+
|
|
648
|
+
Returns:
|
|
649
|
+
Dictionary with latency result
|
|
650
|
+
"""
|
|
651
|
+
start = time.time()
|
|
652
|
+
async with httpx.AsyncClient() as client:
|
|
653
|
+
response = await client.get(url)
|
|
654
|
+
end = time.time()
|
|
655
|
+
elapsed_time = end - start
|
|
656
|
+
|
|
657
|
+
# Extract server information from headers
|
|
658
|
+
server_info = extract_server_info(dict(response.headers))
|
|
659
|
+
|
|
660
|
+
return {
|
|
661
|
+
"latency": round(elapsed_time * 1000, 2), # Convert to milliseconds
|
|
662
|
+
"unit": "ms",
|
|
663
|
+
"url": url,
|
|
664
|
+
"server_info": server_info,
|
|
665
|
+
}
|
|
666
|
+
|
|
667
|
+
|
|
668
|
+
@mcp.tool()
|
|
669
|
+
async def measure_jitter(url: str = DEFAULT_LATENCY_URL, samples: int = 5) -> dict:
|
|
670
|
+
"""Jitter is the variation in latency, so we need multiple measurements."""
|
|
671
|
+
latency_values = []
|
|
672
|
+
server_info = None
|
|
673
|
+
|
|
674
|
+
async with httpx.AsyncClient() as client:
|
|
675
|
+
for i in range(samples):
|
|
676
|
+
start = time.time()
|
|
677
|
+
response = await client.get(url)
|
|
678
|
+
end = time.time()
|
|
679
|
+
latency_values.append((end - start) * 1000) # Convert to milliseconds
|
|
680
|
+
|
|
681
|
+
# Extract server info from the first response
|
|
682
|
+
if i == 0:
|
|
683
|
+
server_info = extract_server_info(dict(response.headers))
|
|
684
|
+
|
|
685
|
+
# Calculate average latency
|
|
686
|
+
avg_latency = sum(latency_values) / len(latency_values)
|
|
687
|
+
|
|
688
|
+
# Calculate jitter (average deviation from the mean)
|
|
689
|
+
jitter = sum(abs(latency - avg_latency) for latency in latency_values) / len(
|
|
690
|
+
latency_values
|
|
691
|
+
)
|
|
692
|
+
|
|
693
|
+
return {
|
|
694
|
+
"jitter": round(jitter, 2),
|
|
695
|
+
"unit": "ms",
|
|
696
|
+
"average_latency": round(avg_latency, 2),
|
|
697
|
+
"samples": samples,
|
|
698
|
+
"url": url,
|
|
699
|
+
"server_info": server_info,
|
|
700
|
+
}
|
|
701
|
+
|
|
702
|
+
|
|
703
|
+
@mcp.tool()
|
|
704
|
+
async def get_server_info(
|
|
705
|
+
url_download: str = DEFAULT_DOWNLOAD_URLS["128KB"],
|
|
706
|
+
url_upload: str = DEFAULT_UPLOAD_URL,
|
|
707
|
+
url_latency: str = DEFAULT_LATENCY_URL,
|
|
708
|
+
) -> dict:
|
|
709
|
+
"""
|
|
710
|
+
Get server information for any URL without performing speed tests.
|
|
711
|
+
|
|
712
|
+
Args:
|
|
713
|
+
url_download: URL to download data from
|
|
714
|
+
url_upload: URL to upload data to
|
|
715
|
+
url_latency: URL to measure latency to
|
|
716
|
+
|
|
717
|
+
Returns:
|
|
718
|
+
Dictionary with servers information including POP location, CDN info, etc.
|
|
719
|
+
"""
|
|
720
|
+
async with httpx.AsyncClient() as client:
|
|
721
|
+
try:
|
|
722
|
+
response_url_download = await client.head(url_download, timeout=12.0)
|
|
723
|
+
server_info_url_download = extract_server_info(
|
|
724
|
+
dict(response_url_download.headers)
|
|
725
|
+
)
|
|
726
|
+
|
|
727
|
+
response_url_upload = await client.head(url_upload, timeout=12.0)
|
|
728
|
+
server_info_url_upload = extract_server_info(
|
|
729
|
+
dict(response_url_upload.headers)
|
|
730
|
+
)
|
|
731
|
+
|
|
732
|
+
response_url_latency = await client.head(url_latency, timeout=12.0)
|
|
733
|
+
server_info_url_latency = extract_server_info(
|
|
734
|
+
dict(response_url_latency.headers)
|
|
735
|
+
)
|
|
736
|
+
|
|
737
|
+
return {
|
|
738
|
+
"url_download": url_download,
|
|
739
|
+
"status_code_url_download": response_url_download.status_code,
|
|
740
|
+
"server_info_url_download": server_info_url_download,
|
|
741
|
+
"headers_url_download": dict(response_url_download.headers),
|
|
742
|
+
"url_upload": url_upload,
|
|
743
|
+
"status_code_url_upload": response_url_upload.status_code,
|
|
744
|
+
"server_info_url_upload": server_info_url_upload,
|
|
745
|
+
"headers_url_upload": dict(response_url_upload.headers),
|
|
746
|
+
"url_latency": url_latency,
|
|
747
|
+
"status_code_url_latency": response_url_latency.status_code,
|
|
748
|
+
"server_info_url_latency": server_info_url_latency,
|
|
749
|
+
"headers_url_latency": dict(response_url_latency.headers),
|
|
750
|
+
}
|
|
751
|
+
except (httpx.RequestError, httpx.HTTPStatusError, httpx.TimeoutException) as e:
|
|
752
|
+
return {
|
|
753
|
+
"error": True,
|
|
754
|
+
"message": f"Failed to get servers info: {str(e)}",
|
|
755
|
+
"url_download": url_download,
|
|
756
|
+
"url_upload": url_upload,
|
|
757
|
+
"url_latency": url_latency,
|
|
758
|
+
}
|
|
759
|
+
|
|
760
|
+
|
|
761
|
+
@mcp.tool()
|
|
762
|
+
async def run_complete_test(
|
|
763
|
+
max_size: str = "100MB",
|
|
764
|
+
url_upload: str = DEFAULT_UPLOAD_URL,
|
|
765
|
+
url_latency: str = DEFAULT_LATENCY_URL,
|
|
766
|
+
) -> dict:
|
|
767
|
+
"""
|
|
768
|
+
Run a complete speed test returning all metrics in a single call.
|
|
769
|
+
|
|
770
|
+
This test uses the smart incremental approach inspired by SpeedOf.Me:
|
|
771
|
+
- First measures download speed with gradually increasing file sizes
|
|
772
|
+
- Then measures upload speed with gradually increasing data sizes
|
|
773
|
+
- Measures latency and jitter
|
|
774
|
+
- Returns comprehensive results with real-time data
|
|
775
|
+
|
|
776
|
+
Args:
|
|
777
|
+
max_size: Maximum file size to test (default: 100MB)
|
|
778
|
+
url_upload: URL for upload testing
|
|
779
|
+
url_latency: URL for latency testing
|
|
780
|
+
|
|
781
|
+
Returns:
|
|
782
|
+
Complete test results including download, upload, latency and jitter metrics
|
|
783
|
+
"""
|
|
784
|
+
download_result = await measure_download_speed(max_size)
|
|
785
|
+
upload_result = await measure_upload_speed(url_upload, max_size)
|
|
786
|
+
latency_result = await measure_latency(url_latency)
|
|
787
|
+
jitter_result = await measure_jitter(url_latency)
|
|
788
|
+
|
|
789
|
+
return {
|
|
790
|
+
"timestamp": time.time(),
|
|
791
|
+
"download": download_result,
|
|
792
|
+
"upload": upload_result,
|
|
793
|
+
"latency": latency_result,
|
|
794
|
+
"jitter": jitter_result,
|
|
795
|
+
"test_methodology": "Incremental file size approach with 8-second threshold",
|
|
796
|
+
}
|
|
797
|
+
|
|
798
|
+
|
|
799
|
+
# Entry point to run the server
|
|
800
|
+
if __name__ == "__main__":
|
|
801
|
+
mcp.run()
|