mcp-ssh-vps 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. mcp_ssh_vps-0.4.1.dist-info/METADATA +482 -0
  2. mcp_ssh_vps-0.4.1.dist-info/RECORD +47 -0
  3. mcp_ssh_vps-0.4.1.dist-info/WHEEL +5 -0
  4. mcp_ssh_vps-0.4.1.dist-info/entry_points.txt +4 -0
  5. mcp_ssh_vps-0.4.1.dist-info/licenses/LICENSE +21 -0
  6. mcp_ssh_vps-0.4.1.dist-info/top_level.txt +1 -0
  7. sshmcp/__init__.py +3 -0
  8. sshmcp/cli.py +473 -0
  9. sshmcp/config.py +155 -0
  10. sshmcp/core/__init__.py +5 -0
  11. sshmcp/core/container.py +291 -0
  12. sshmcp/models/__init__.py +15 -0
  13. sshmcp/models/command.py +69 -0
  14. sshmcp/models/file.py +102 -0
  15. sshmcp/models/machine.py +139 -0
  16. sshmcp/monitoring/__init__.py +0 -0
  17. sshmcp/monitoring/alerts.py +464 -0
  18. sshmcp/prompts/__init__.py +7 -0
  19. sshmcp/prompts/backup.py +151 -0
  20. sshmcp/prompts/deploy.py +115 -0
  21. sshmcp/prompts/monitor.py +146 -0
  22. sshmcp/resources/__init__.py +7 -0
  23. sshmcp/resources/logs.py +99 -0
  24. sshmcp/resources/metrics.py +204 -0
  25. sshmcp/resources/status.py +160 -0
  26. sshmcp/security/__init__.py +7 -0
  27. sshmcp/security/audit.py +314 -0
  28. sshmcp/security/rate_limiter.py +221 -0
  29. sshmcp/security/totp.py +392 -0
  30. sshmcp/security/validator.py +234 -0
  31. sshmcp/security/whitelist.py +169 -0
  32. sshmcp/server.py +632 -0
  33. sshmcp/ssh/__init__.py +6 -0
  34. sshmcp/ssh/async_client.py +247 -0
  35. sshmcp/ssh/client.py +464 -0
  36. sshmcp/ssh/executor.py +79 -0
  37. sshmcp/ssh/forwarding.py +368 -0
  38. sshmcp/ssh/pool.py +343 -0
  39. sshmcp/ssh/shell.py +518 -0
  40. sshmcp/ssh/transfer.py +461 -0
  41. sshmcp/tools/__init__.py +13 -0
  42. sshmcp/tools/commands.py +226 -0
  43. sshmcp/tools/files.py +220 -0
  44. sshmcp/tools/helpers.py +321 -0
  45. sshmcp/tools/history.py +372 -0
  46. sshmcp/tools/processes.py +214 -0
  47. sshmcp/tools/servers.py +484 -0
@@ -0,0 +1,115 @@
1
+ """MCP Prompt for application deployment."""
2
+
3
+
4
+ def deploy_app(
5
+ host: str,
6
+ branch: str = "main",
7
+ app_path: str = "/var/www/app",
8
+ package_manager: str = "npm",
9
+ process_manager: str = "pm2",
10
+ app_name: str = "app",
11
+ ) -> str:
12
+ """
13
+ Generate deployment prompt for application.
14
+
15
+ Creates a step-by-step deployment plan for a web application.
16
+
17
+ Args:
18
+ host: Target host name.
19
+ branch: Git branch to deploy (default: main).
20
+ app_path: Application directory path.
21
+ package_manager: Package manager (npm, yarn, pip).
22
+ process_manager: Process manager (pm2, systemd, supervisor).
23
+ app_name: Application name for process manager.
24
+
25
+ Returns:
26
+ Deployment instructions as multi-line string.
27
+ """
28
+ # Build install command based on package manager
29
+ install_commands = {
30
+ "npm": "npm ci --production",
31
+ "yarn": "yarn install --frozen-lockfile --production",
32
+ "pip": "pip install -r requirements.txt",
33
+ "poetry": "poetry install --no-dev",
34
+ }
35
+ install_cmd = install_commands.get(package_manager, "npm install")
36
+
37
+ # Build restart command based on process manager
38
+ restart_commands = {
39
+ "pm2": f"pm2 restart {app_name}",
40
+ "systemd": f"systemctl restart {app_name}",
41
+ "supervisor": f"supervisorctl restart {app_name}",
42
+ }
43
+ restart_cmd = restart_commands.get(process_manager, f"pm2 restart {app_name}")
44
+
45
+ # Build status command
46
+ status_commands = {
47
+ "pm2": f"pm2 status {app_name}",
48
+ "systemd": f"systemctl status {app_name}",
49
+ "supervisor": f"supervisorctl status {app_name}",
50
+ }
51
+ status_cmd = status_commands.get(process_manager, f"pm2 status {app_name}")
52
+
53
+ return f"""Deploy application to {host}:
54
+
55
+ ## Pre-deployment Checks
56
+ 1. Check server status:
57
+ - Use get_status resource for {host} to verify server is online
58
+ - Use get_metrics resource for {host} to check available resources
59
+
60
+ 2. Check current application state:
61
+ ```
62
+ execute_command(host="{host}", command="cd {app_path} && git status")
63
+ ```
64
+
65
+ ## Deployment Steps
66
+
67
+ 3. Pull latest code from branch {branch}:
68
+ ```
69
+ execute_command(host="{host}", command="cd {app_path} && git fetch origin && git checkout {branch} && git pull origin {branch}")
70
+ ```
71
+
72
+ 4. Install dependencies:
73
+ ```
74
+ execute_command(host="{host}", command="cd {app_path} && {install_cmd}")
75
+ ```
76
+
77
+ 5. Build application (if needed):
78
+ ```
79
+ execute_command(host="{host}", command="cd {app_path} && npm run build")
80
+ ```
81
+
82
+ 6. Restart application:
83
+ ```
84
+ execute_command(host="{host}", command="{restart_cmd}")
85
+ ```
86
+
87
+ ## Post-deployment Verification
88
+
89
+ 7. Check application status:
90
+ ```
91
+ execute_command(host="{host}", command="{status_cmd}")
92
+ ```
93
+
94
+ 8. Check application logs:
95
+ - Use get_logs resource for {host} with path "var/log/{app_name}.log" or appropriate log path
96
+
97
+ 9. Verify health endpoint (if available):
98
+ ```
99
+ execute_command(host="{host}", command="curl -s http://localhost:3000/health")
100
+ ```
101
+
102
+ ## Rollback Plan
103
+ If deployment fails:
104
+ 1. Checkout previous commit:
105
+ ```
106
+ execute_command(host="{host}", command="cd {app_path} && git checkout HEAD~1")
107
+ ```
108
+ 2. Reinstall dependencies and restart application
109
+
110
+ ## Notes
111
+ - Branch: {branch}
112
+ - Application path: {app_path}
113
+ - Package manager: {package_manager}
114
+ - Process manager: {process_manager}
115
+ """
@@ -0,0 +1,146 @@
1
+ """MCP Prompt for system monitoring."""
2
+
3
+
4
+ def monitor_health(
5
+ host: str,
6
+ check_logs: bool = True,
7
+ check_services: bool = True,
8
+ log_paths: list[str] | None = None,
9
+ services: list[str] | None = None,
10
+ ) -> str:
11
+ """
12
+ Generate health monitoring prompt.
13
+
14
+ Creates a comprehensive health check plan for a server.
15
+
16
+ Args:
17
+ host: Target host name.
18
+ check_logs: Whether to check log files for errors.
19
+ check_services: Whether to check service statuses.
20
+ log_paths: List of log file paths to check.
21
+ services: List of services to check.
22
+
23
+ Returns:
24
+ Monitoring instructions as multi-line string.
25
+ """
26
+ default_logs = [
27
+ "/var/log/syslog",
28
+ "/var/log/nginx/error.log",
29
+ "/var/log/app/error.log",
30
+ ]
31
+ default_services = ["nginx", "postgresql", "redis"]
32
+
33
+ log_paths = log_paths or default_logs
34
+ services = services or default_services
35
+
36
+ logs_section = ""
37
+ if check_logs:
38
+ logs_section = """
39
+ ## Log Analysis
40
+
41
+ Check for errors in application logs:
42
+ """
43
+ for log_path in log_paths:
44
+ logs_section += f"""
45
+ ### {log_path}
46
+ - Use get_logs resource for {host} with path "{log_path.lstrip("/")}" and filter_level="error"
47
+ - Or execute directly:
48
+ ```
49
+ execute_command(host="{host}", command="tail -n 100 {log_path} | grep -iE 'error|fatal|critical' | tail -20")
50
+ ```
51
+ """
52
+
53
+ services_section = ""
54
+ if check_services:
55
+ services_section = """
56
+ ## Service Health
57
+
58
+ Check status of critical services:
59
+ """
60
+ for service in services:
61
+ services_section += f"""
62
+ ### {service}
63
+ ```
64
+ manage_process(host="{host}", action="status", process_name="{service}")
65
+ ```
66
+ """
67
+
68
+ return f"""Monitor health of {host}:
69
+
70
+ ## System Metrics
71
+
72
+ 1. Get current system metrics:
73
+ - Use get_metrics resource for {host}
74
+ - This provides CPU, memory, disk usage, and load average
75
+
76
+ 2. Check for high resource usage:
77
+ ```
78
+ execute_command(host="{host}", command="top -bn1 | head -20")
79
+ ```
80
+
81
+ 3. Check disk space on all mounts:
82
+ ```
83
+ execute_command(host="{host}", command="df -h")
84
+ ```
85
+
86
+ 4. Check memory details:
87
+ ```
88
+ execute_command(host="{host}", command="free -m")
89
+ ```
90
+
91
+ 5. Check system load:
92
+ ```
93
+ execute_command(host="{host}", command="uptime")
94
+ ```
95
+ {logs_section}{services_section}
96
+ ## Network Health
97
+
98
+ Check network connectivity:
99
+ ```
100
+ execute_command(host="{host}", command="netstat -tuln | head -20")
101
+ ```
102
+
103
+ Check established connections:
104
+ ```
105
+ execute_command(host="{host}", command="netstat -an | grep ESTABLISHED | wc -l")
106
+ ```
107
+
108
+ ## Recent System Events
109
+
110
+ Check dmesg for hardware/kernel issues:
111
+ ```
112
+ execute_command(host="{host}", command="dmesg | tail -20")
113
+ ```
114
+
115
+ Check authentication logs:
116
+ ```
117
+ execute_command(host="{host}", command="tail -20 /var/log/auth.log")
118
+ ```
119
+
120
+ ## Health Report Summary
121
+
122
+ After running the above checks, summarize:
123
+
124
+ 1. **System Resources**:
125
+ - CPU usage: [X]%
126
+ - Memory usage: [X]%
127
+ - Disk usage: [X]%
128
+ - Load average: [X]
129
+
130
+ 2. **Services Status**:
131
+ - List all checked services and their status
132
+
133
+ 3. **Issues Found**:
134
+ - List any errors from logs
135
+ - List any services that are not running
136
+ - List any resource usage concerns
137
+
138
+ 4. **Recommendations**:
139
+ - Based on findings, provide action items
140
+
141
+ ## Thresholds to Watch
142
+ - CPU > 80%: High usage warning
143
+ - Memory > 85%: Memory pressure warning
144
+ - Disk > 90%: Disk space critical
145
+ - Load average > number of cores: System overloaded
146
+ """
@@ -0,0 +1,7 @@
1
+ """MCP Resources for VPS data access."""
2
+
3
+ from sshmcp.resources.logs import get_logs
4
+ from sshmcp.resources.metrics import get_metrics
5
+ from sshmcp.resources.status import get_status
6
+
7
+ __all__ = ["get_logs", "get_metrics", "get_status"]
@@ -0,0 +1,99 @@
1
+ """MCP Resource for reading logs."""
2
+
3
+ import structlog
4
+
5
+ from sshmcp.config import get_machine
6
+ from sshmcp.security.audit import get_audit_logger
7
+ from sshmcp.security.validator import validate_path
8
+ from sshmcp.ssh.pool import get_pool
9
+
10
+ logger = structlog.get_logger()
11
+
12
+
13
+ def get_logs(
14
+ host: str,
15
+ log_path: str,
16
+ lines: int = 100,
17
+ filter_level: str | None = None,
18
+ ) -> str:
19
+ """
20
+ Get logs from VPS server.
21
+
22
+ Reads the last N lines from a log file, optionally filtering by log level.
23
+
24
+ Args:
25
+ host: Name of the host from machines.json configuration.
26
+ log_path: Path to log file (e.g., "var/log/app.log").
27
+ lines: Number of lines to retrieve (default: 100).
28
+ filter_level: Optional log level filter (error, warn, info).
29
+
30
+ Returns:
31
+ Log content as string.
32
+
33
+ Raises:
34
+ ValueError: If host not found or path not allowed.
35
+ RuntimeError: If logs cannot be read.
36
+
37
+ Example:
38
+ Resource URI: vps://production-server/logs/var/log/app.log
39
+ """
40
+ audit = get_audit_logger()
41
+
42
+ # Normalize path (add leading slash if needed)
43
+ if not log_path.startswith("/"):
44
+ log_path = "/" + log_path
45
+
46
+ # Get machine configuration
47
+ try:
48
+ machine = get_machine(host)
49
+ except Exception as e:
50
+ raise ValueError(f"Host not found: {host}") from e
51
+
52
+ # Validate path
53
+ is_valid, error_msg = validate_path(log_path, machine.security, "read")
54
+ if not is_valid:
55
+ audit.log_path_rejected(host, log_path, error_msg or "Path not allowed")
56
+ raise ValueError(f"Path not allowed: {error_msg}")
57
+
58
+ # Build command
59
+ if filter_level:
60
+ level_patterns = {
61
+ "error": "ERROR|FATAL|CRITICAL",
62
+ "warn": "WARN|WARNING|ERROR|FATAL|CRITICAL",
63
+ "info": "INFO|WARN|WARNING|ERROR|FATAL|CRITICAL",
64
+ }
65
+ pattern = level_patterns.get(filter_level.lower(), "")
66
+ if pattern:
67
+ command = f"tail -n {lines * 2} {log_path} | grep -iE '{pattern}' | tail -n {lines}"
68
+ else:
69
+ command = f"tail -n {lines} {log_path}"
70
+ else:
71
+ command = f"tail -n {lines} {log_path}"
72
+
73
+ # Execute command
74
+ pool = get_pool()
75
+ pool.register_machine(machine)
76
+
77
+ try:
78
+ client = pool.get_client(host)
79
+ try:
80
+ result = client.execute(command)
81
+
82
+ if result.exit_code != 0:
83
+ if "No such file" in result.stderr:
84
+ raise RuntimeError(f"Log file not found: {log_path}")
85
+ raise RuntimeError(f"Failed to read logs: {result.stderr}")
86
+
87
+ audit.log(
88
+ event="logs_read",
89
+ host=host,
90
+ result={"path": log_path, "lines": lines},
91
+ )
92
+
93
+ return result.stdout
94
+
95
+ finally:
96
+ pool.release_client(client)
97
+
98
+ except Exception as e:
99
+ raise RuntimeError(f"Failed to read logs: {e}") from e
@@ -0,0 +1,204 @@
1
+ """MCP Resource for system metrics."""
2
+
3
+ import re
4
+
5
+ import structlog
6
+
7
+ from sshmcp.config import get_machine
8
+ from sshmcp.security.audit import get_audit_logger
9
+ from sshmcp.ssh.pool import get_pool
10
+
11
+ logger = structlog.get_logger()
12
+
13
+
14
+ def get_metrics(host: str) -> dict:
15
+ """
16
+ Get system metrics from VPS server.
17
+
18
+ Returns CPU, memory, disk usage, and uptime information.
19
+
20
+ Args:
21
+ host: Name of the host from machines.json configuration.
22
+
23
+ Returns:
24
+ Dictionary with metrics:
25
+ - cpu: CPU usage information
26
+ - memory: Memory usage information
27
+ - disk: Disk usage information
28
+ - uptime_seconds: System uptime
29
+
30
+ Raises:
31
+ ValueError: If host not found.
32
+ RuntimeError: If metrics cannot be retrieved.
33
+
34
+ Example:
35
+ Resource URI: vps://production-server/metrics
36
+ """
37
+ audit = get_audit_logger()
38
+
39
+ # Get machine configuration
40
+ try:
41
+ machine = get_machine(host)
42
+ except Exception as e:
43
+ raise ValueError(f"Host not found: {host}") from e
44
+
45
+ pool = get_pool()
46
+ pool.register_machine(machine)
47
+
48
+ metrics = {
49
+ "cpu": {},
50
+ "memory": {},
51
+ "disk": {},
52
+ "uptime_seconds": 0,
53
+ "host": host,
54
+ }
55
+
56
+ try:
57
+ client = pool.get_client(host)
58
+ try:
59
+ # Get CPU usage
60
+ cpu_result = client.execute(
61
+ "top -bn1 | head -5 | grep 'Cpu' || cat /proc/stat | head -1"
62
+ )
63
+ metrics["cpu"] = _parse_cpu(cpu_result.stdout)
64
+
65
+ # Get memory usage
66
+ mem_result = client.execute("free -m")
67
+ metrics["memory"] = _parse_memory(mem_result.stdout)
68
+
69
+ # Get disk usage
70
+ disk_result = client.execute("df -h / | tail -1")
71
+ metrics["disk"] = _parse_disk(disk_result.stdout)
72
+
73
+ # Get uptime
74
+ uptime_result = client.execute("cat /proc/uptime")
75
+ metrics["uptime_seconds"] = _parse_uptime(uptime_result.stdout)
76
+
77
+ # Get load average
78
+ load_result = client.execute("cat /proc/loadavg")
79
+ metrics["load_average"] = _parse_load(load_result.stdout)
80
+
81
+ audit.log(
82
+ event="metrics_read",
83
+ host=host,
84
+ )
85
+
86
+ return metrics
87
+
88
+ finally:
89
+ pool.release_client(client)
90
+
91
+ except Exception as e:
92
+ raise RuntimeError(f"Failed to get metrics: {e}") from e
93
+
94
+
95
+ def _parse_cpu(output: str) -> dict:
96
+ """Parse CPU usage from top or /proc/stat output."""
97
+ cpu_info = {"usage_percent": 0.0, "cores": 1}
98
+
99
+ try:
100
+ # Try parsing top output
101
+ if "Cpu" in output:
102
+ # Format: %Cpu(s): 1.2 us, 0.3 sy, 0.0 ni, 98.5 id, ...
103
+ match = re.search(r"(\d+\.?\d*)\s*id", output)
104
+ if match:
105
+ idle = float(match.group(1))
106
+ cpu_info["usage_percent"] = round(100.0 - idle, 1)
107
+
108
+ # Get CPU cores
109
+ import os
110
+
111
+ cpu_info["cores"] = os.cpu_count() or 1
112
+
113
+ except Exception:
114
+ pass
115
+
116
+ return cpu_info
117
+
118
+
119
+ def _parse_memory(output: str) -> dict:
120
+ """Parse memory usage from free -m output."""
121
+ mem_info = {"used_mb": 0, "total_mb": 0, "usage_percent": 0.0, "available_mb": 0}
122
+
123
+ try:
124
+ lines = output.strip().split("\n")
125
+ for line in lines:
126
+ if line.startswith("Mem:"):
127
+ parts = line.split()
128
+ if len(parts) >= 4:
129
+ mem_info["total_mb"] = int(parts[1])
130
+ mem_info["used_mb"] = int(parts[2])
131
+ if len(parts) >= 7:
132
+ mem_info["available_mb"] = int(parts[6])
133
+ else:
134
+ mem_info["available_mb"] = (
135
+ mem_info["total_mb"] - mem_info["used_mb"]
136
+ )
137
+
138
+ if mem_info["total_mb"] > 0:
139
+ mem_info["usage_percent"] = round(
140
+ mem_info["used_mb"] / mem_info["total_mb"] * 100, 1
141
+ )
142
+ except Exception:
143
+ pass
144
+
145
+ return mem_info
146
+
147
+
148
+ def _parse_disk(output: str) -> dict:
149
+ """Parse disk usage from df -h output."""
150
+ disk_info = {
151
+ "used_gb": 0.0,
152
+ "total_gb": 0.0,
153
+ "usage_percent": 0.0,
154
+ "available_gb": 0.0,
155
+ }
156
+
157
+ try:
158
+ parts = output.split()
159
+ if len(parts) >= 5:
160
+ # Parse size with units (e.g., "50G", "1.2T")
161
+ def parse_size(s: str) -> float:
162
+ s = s.upper()
163
+ multipliers = {"K": 1 / 1024 / 1024, "M": 1 / 1024, "G": 1, "T": 1024}
164
+ for unit, mult in multipliers.items():
165
+ if unit in s:
166
+ return float(s.replace(unit, "")) * mult
167
+ return float(s)
168
+
169
+ disk_info["total_gb"] = round(parse_size(parts[1]), 1)
170
+ disk_info["used_gb"] = round(parse_size(parts[2]), 1)
171
+ disk_info["available_gb"] = round(parse_size(parts[3]), 1)
172
+ disk_info["usage_percent"] = float(parts[4].replace("%", ""))
173
+
174
+ except Exception:
175
+ pass
176
+
177
+ return disk_info
178
+
179
+
180
+ def _parse_uptime(output: str) -> int:
181
+ """Parse uptime from /proc/uptime."""
182
+ try:
183
+ parts = output.strip().split()
184
+ if parts:
185
+ return int(float(parts[0]))
186
+ except Exception:
187
+ pass
188
+ return 0
189
+
190
+
191
+ def _parse_load(output: str) -> dict:
192
+ """Parse load average from /proc/loadavg."""
193
+ load = {"1min": 0.0, "5min": 0.0, "15min": 0.0}
194
+
195
+ try:
196
+ parts = output.strip().split()
197
+ if len(parts) >= 3:
198
+ load["1min"] = float(parts[0])
199
+ load["5min"] = float(parts[1])
200
+ load["15min"] = float(parts[2])
201
+ except Exception:
202
+ pass
203
+
204
+ return load