mdify-cli 2.11.9__py3-none-any.whl → 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mdify/__init__.py +1 -1
- mdify/cli.py +542 -0
- mdify/container.py +0 -4
- mdify/ssh/__init__.py +11 -0
- mdify/ssh/client.py +408 -0
- mdify/ssh/models.py +458 -0
- mdify/ssh/remote_container.py +237 -0
- mdify/ssh/transfer.py +297 -0
- {mdify_cli-2.11.9.dist-info → mdify_cli-3.0.0.dist-info}/METADATA +192 -4
- mdify_cli-3.0.0.dist-info/RECORD +17 -0
- mdify_cli-2.11.9.dist-info/RECORD +0 -12
- {mdify_cli-2.11.9.dist-info → mdify_cli-3.0.0.dist-info}/WHEEL +0 -0
- {mdify_cli-2.11.9.dist-info → mdify_cli-3.0.0.dist-info}/entry_points.txt +0 -0
- {mdify_cli-2.11.9.dist-info → mdify_cli-3.0.0.dist-info}/licenses/LICENSE +0 -0
- {mdify_cli-2.11.9.dist-info → mdify_cli-3.0.0.dist-info}/top_level.txt +0 -0
mdify/ssh/models.py
ADDED
|
@@ -0,0 +1,458 @@
|
|
|
1
|
+
"""Data models for SSH remote server support."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing import Literal
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
import uuid
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class SSHError(Exception):
|
|
11
|
+
"""Base exception for SSH operations."""
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class SSHConnectionError(SSHError):
|
|
15
|
+
"""Connection establishment or maintenance failed."""
|
|
16
|
+
def __init__(self, message: str, host: str, port: int):
|
|
17
|
+
self.message = message
|
|
18
|
+
self.host = host
|
|
19
|
+
self.port = port
|
|
20
|
+
super().__init__(f"{message} ({host}:{port})")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class SSHAuthError(SSHConnectionError):
|
|
24
|
+
"""Authentication failed (bad password, key, or permissions)."""
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class ConfigError(SSHError):
|
|
28
|
+
"""Configuration is invalid or incomplete."""
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class ValidationError(SSHError):
|
|
32
|
+
"""Resource validation check failed."""
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class SSHConfig:
|
|
37
|
+
"""SSH connection configuration with precedence-aware merging."""
|
|
38
|
+
|
|
39
|
+
# Required fields
|
|
40
|
+
host: str
|
|
41
|
+
port: int = 22
|
|
42
|
+
username: str = ""
|
|
43
|
+
|
|
44
|
+
# Authentication
|
|
45
|
+
password: str | None = None
|
|
46
|
+
key_file: str | None = None
|
|
47
|
+
key_passphrase: str | None = None
|
|
48
|
+
|
|
49
|
+
# Connection behavior
|
|
50
|
+
timeout: int = 30
|
|
51
|
+
keepalive: int = 60
|
|
52
|
+
compression: bool = False
|
|
53
|
+
|
|
54
|
+
# Remote environment
|
|
55
|
+
work_dir: str = "/tmp/mdify"
|
|
56
|
+
container_runtime: str | None = None
|
|
57
|
+
|
|
58
|
+
# Metadata
|
|
59
|
+
source: str = "cli"
|
|
60
|
+
config_file: str | None = None
|
|
61
|
+
created_at: datetime = field(default_factory=datetime.now)
|
|
62
|
+
|
|
63
|
+
# Validation metadata
|
|
64
|
+
validated: bool = False
|
|
65
|
+
validation_errors: list[str] = field(default_factory=list)
|
|
66
|
+
|
|
67
|
+
def __post_init__(self):
|
|
68
|
+
"""Validate config after initialization."""
|
|
69
|
+
if not self.host:
|
|
70
|
+
raise ConfigError("host is required")
|
|
71
|
+
if not 1 <= self.port <= 65535:
|
|
72
|
+
raise ConfigError(f"port must be 1-65535, got {self.port}")
|
|
73
|
+
if self.timeout < 1:
|
|
74
|
+
raise ConfigError(f"timeout must be positive, got {self.timeout}")
|
|
75
|
+
|
|
76
|
+
@classmethod
|
|
77
|
+
def from_cli_args(cls, args) -> "SSHConfig":
|
|
78
|
+
"""Create SSHConfig from CLI argument namespace.
|
|
79
|
+
|
|
80
|
+
Parameters:
|
|
81
|
+
args: Parsed CLI arguments (argparse.Namespace)
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
SSHConfig instance with source='cli'
|
|
85
|
+
|
|
86
|
+
Raises:
|
|
87
|
+
ConfigError: Invalid configuration
|
|
88
|
+
"""
|
|
89
|
+
# Extract only CLI-provided values (not defaults)
|
|
90
|
+
kwargs = {"source": "cli"}
|
|
91
|
+
|
|
92
|
+
if hasattr(args, "remote_host") and args.remote_host:
|
|
93
|
+
kwargs["host"] = args.remote_host
|
|
94
|
+
if hasattr(args, "remote_port") and args.remote_port:
|
|
95
|
+
kwargs["port"] = args.remote_port
|
|
96
|
+
if hasattr(args, "remote_user") and args.remote_user:
|
|
97
|
+
kwargs["username"] = args.remote_user
|
|
98
|
+
if hasattr(args, "remote_key") and args.remote_key:
|
|
99
|
+
kwargs["key_file"] = args.remote_key
|
|
100
|
+
if hasattr(args, "remote_key_pass_phrase") and args.remote_key_pass_phrase:
|
|
101
|
+
kwargs["key_passphrase"] = args.remote_key_pass_phrase
|
|
102
|
+
if hasattr(args, "remote_timeout") and args.remote_timeout:
|
|
103
|
+
kwargs["timeout"] = args.remote_timeout
|
|
104
|
+
if hasattr(args, "remote_keepalive") and args.remote_keepalive:
|
|
105
|
+
kwargs["keepalive"] = args.remote_keepalive
|
|
106
|
+
if hasattr(args, "remote_work_dir") and args.remote_work_dir:
|
|
107
|
+
kwargs["work_dir"] = args.remote_work_dir
|
|
108
|
+
if hasattr(args, "remote_runtime") and args.remote_runtime:
|
|
109
|
+
kwargs["container_runtime"] = args.remote_runtime
|
|
110
|
+
if hasattr(args, "remote_compression") and args.remote_compression:
|
|
111
|
+
kwargs["compression"] = args.remote_compression
|
|
112
|
+
|
|
113
|
+
# For partial configs (CLI may only override one or two fields),
|
|
114
|
+
# we need a default host to create the object
|
|
115
|
+
if "host" not in kwargs:
|
|
116
|
+
kwargs["host"] = "localhost" # Temporary, will be overridden by merge
|
|
117
|
+
|
|
118
|
+
return cls(**kwargs)
|
|
119
|
+
|
|
120
|
+
@classmethod
|
|
121
|
+
def from_ssh_config(cls, host: str, ssh_config_path: str | None = None) -> "SSHConfig":
|
|
122
|
+
"""Load SSH config for host from ~/.ssh/config.
|
|
123
|
+
|
|
124
|
+
Parameters:
|
|
125
|
+
host: Host alias or hostname to look up
|
|
126
|
+
ssh_config_path: Path to SSH config file (defaults to ~/.ssh/config)
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
SSHConfig instance with source='ssh_config'
|
|
130
|
+
|
|
131
|
+
Raises:
|
|
132
|
+
ConfigError: SSH config file not found or invalid
|
|
133
|
+
"""
|
|
134
|
+
import os
|
|
135
|
+
from pathlib import Path
|
|
136
|
+
|
|
137
|
+
if ssh_config_path is None:
|
|
138
|
+
ssh_config_path = "~/.ssh/config"
|
|
139
|
+
|
|
140
|
+
ssh_config_path = os.path.expanduser(ssh_config_path)
|
|
141
|
+
|
|
142
|
+
if not Path(ssh_config_path).exists():
|
|
143
|
+
# SSH config is optional; return defaults if not found
|
|
144
|
+
return cls(
|
|
145
|
+
host=host,
|
|
146
|
+
source="ssh_config",
|
|
147
|
+
config_file=ssh_config_path,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
# For SSH config loading, we'll use a simple approach:
|
|
152
|
+
# Parse the SSH config file ourselves to extract host configuration
|
|
153
|
+
config_data = cls._parse_ssh_config_file(ssh_config_path, host)
|
|
154
|
+
|
|
155
|
+
kwargs = {
|
|
156
|
+
"source": "ssh_config",
|
|
157
|
+
"config_file": ssh_config_path,
|
|
158
|
+
"host": config_data.get("hostname", host),
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
if "port" in config_data:
|
|
162
|
+
kwargs["port"] = int(config_data["port"])
|
|
163
|
+
if "user" in config_data:
|
|
164
|
+
kwargs["username"] = config_data["user"]
|
|
165
|
+
if "identityfile" in config_data:
|
|
166
|
+
# Use first identity file, expand ~ if present
|
|
167
|
+
identity_files = config_data["identityfile"]
|
|
168
|
+
if isinstance(identity_files, list):
|
|
169
|
+
kwargs["key_file"] = os.path.expanduser(identity_files[0])
|
|
170
|
+
else:
|
|
171
|
+
kwargs["key_file"] = os.path.expanduser(identity_files)
|
|
172
|
+
if "connecttimeout" in config_data:
|
|
173
|
+
kwargs["timeout"] = int(config_data["connecttimeout"])
|
|
174
|
+
if "serveraliveinterval" in config_data:
|
|
175
|
+
kwargs["keepalive"] = int(config_data["serveraliveinterval"])
|
|
176
|
+
if "compression" in config_data:
|
|
177
|
+
compression_str = config_data["compression"]
|
|
178
|
+
kwargs["compression"] = compression_str.lower() in ("yes", "true", "1")
|
|
179
|
+
|
|
180
|
+
return cls(**kwargs)
|
|
181
|
+
|
|
182
|
+
except Exception as e:
|
|
183
|
+
raise ConfigError(f"Failed to load SSH config: {e}")
|
|
184
|
+
|
|
185
|
+
@staticmethod
|
|
186
|
+
def _parse_ssh_config_file(config_path: str, target_host: str) -> dict:
|
|
187
|
+
"""Parse SSH config file for a specific host.
|
|
188
|
+
|
|
189
|
+
Parameters:
|
|
190
|
+
config_path: Path to SSH config file
|
|
191
|
+
target_host: Host alias to look for
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
Dictionary of configuration values for the host
|
|
195
|
+
"""
|
|
196
|
+
config_data = {}
|
|
197
|
+
current_hosts = []
|
|
198
|
+
in_target_block = False
|
|
199
|
+
|
|
200
|
+
try:
|
|
201
|
+
with open(config_path, 'r') as f:
|
|
202
|
+
for line in f:
|
|
203
|
+
line = line.strip()
|
|
204
|
+
|
|
205
|
+
# Skip comments and empty lines
|
|
206
|
+
if not line or line.startswith('#'):
|
|
207
|
+
continue
|
|
208
|
+
|
|
209
|
+
# Check for Host directive
|
|
210
|
+
if line.lower().startswith('host '):
|
|
211
|
+
parts = line.split(None, 1)
|
|
212
|
+
if len(parts) == 2:
|
|
213
|
+
hosts = parts[1].split()
|
|
214
|
+
in_target_block = target_host in hosts or '*' in hosts
|
|
215
|
+
if not in_target_block:
|
|
216
|
+
config_data = {}
|
|
217
|
+
continue
|
|
218
|
+
|
|
219
|
+
# Parse config directives
|
|
220
|
+
if in_target_block:
|
|
221
|
+
parts = line.split(None, 1)
|
|
222
|
+
if len(parts) == 2:
|
|
223
|
+
key = parts[0].lower()
|
|
224
|
+
value = parts[1]
|
|
225
|
+
|
|
226
|
+
# Handle multi-value options (identity files)
|
|
227
|
+
if key == 'identityfile':
|
|
228
|
+
if key not in config_data:
|
|
229
|
+
config_data[key] = []
|
|
230
|
+
if isinstance(config_data[key], list):
|
|
231
|
+
config_data[key].append(value)
|
|
232
|
+
else:
|
|
233
|
+
config_data[key] = [config_data[key], value]
|
|
234
|
+
else:
|
|
235
|
+
# For single-value options, use the first occurrence
|
|
236
|
+
if key not in config_data:
|
|
237
|
+
config_data[key] = value
|
|
238
|
+
except Exception as e:
|
|
239
|
+
raise ConfigError(f"Failed to parse SSH config file {config_path}: {e}")
|
|
240
|
+
|
|
241
|
+
return config_data
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
@classmethod
|
|
245
|
+
def from_remote_conf(cls, remote_conf_path: str | None = None) -> "SSHConfig":
|
|
246
|
+
"""Load SSH config from ~/.mdify/remote.conf.
|
|
247
|
+
|
|
248
|
+
Parameters:
|
|
249
|
+
remote_conf_path: Path to remote config file
|
|
250
|
+
|
|
251
|
+
Returns:
|
|
252
|
+
SSHConfig instance with source='remote_conf'
|
|
253
|
+
|
|
254
|
+
Raises:
|
|
255
|
+
ConfigError: Config file not found or invalid
|
|
256
|
+
"""
|
|
257
|
+
import yaml
|
|
258
|
+
import os
|
|
259
|
+
|
|
260
|
+
if remote_conf_path is None:
|
|
261
|
+
remote_conf_path = "~/.mdify/remote.conf"
|
|
262
|
+
|
|
263
|
+
remote_conf_path = os.path.expanduser(remote_conf_path)
|
|
264
|
+
|
|
265
|
+
if not Path(remote_conf_path).exists():
|
|
266
|
+
raise ConfigError(f"Remote config file not found: {remote_conf_path}")
|
|
267
|
+
|
|
268
|
+
try:
|
|
269
|
+
with open(remote_conf_path, "r") as f:
|
|
270
|
+
config_data = yaml.safe_load(f) or {}
|
|
271
|
+
|
|
272
|
+
# Extract defaults
|
|
273
|
+
defaults = config_data.get("defaults", {})
|
|
274
|
+
servers = config_data.get("servers", {})
|
|
275
|
+
|
|
276
|
+
if not servers:
|
|
277
|
+
raise ConfigError("No servers defined in remote config")
|
|
278
|
+
|
|
279
|
+
# Use first server or named one
|
|
280
|
+
first_server_name = next(iter(servers.keys()))
|
|
281
|
+
server_config = servers[first_server_name]
|
|
282
|
+
|
|
283
|
+
# Merge defaults with server config (server overrides defaults)
|
|
284
|
+
merged = {**defaults, **server_config}
|
|
285
|
+
|
|
286
|
+
# Build SSHConfig
|
|
287
|
+
kwargs = {
|
|
288
|
+
"source": "remote_conf",
|
|
289
|
+
"config_file": remote_conf_path,
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
if "host" in merged:
|
|
293
|
+
kwargs["host"] = merged["host"]
|
|
294
|
+
else:
|
|
295
|
+
raise ConfigError(f"Server '{first_server_name}' missing 'host' field")
|
|
296
|
+
|
|
297
|
+
if "port" in merged:
|
|
298
|
+
kwargs["port"] = int(merged["port"])
|
|
299
|
+
if "username" in merged:
|
|
300
|
+
kwargs["username"] = merged["username"]
|
|
301
|
+
if "key_file" in merged:
|
|
302
|
+
kwargs["key_file"] = merged["key_file"]
|
|
303
|
+
if "timeout" in merged:
|
|
304
|
+
kwargs["timeout"] = int(merged["timeout"])
|
|
305
|
+
if "keepalive" in merged:
|
|
306
|
+
kwargs["keepalive"] = int(merged["keepalive"])
|
|
307
|
+
if "compression" in merged:
|
|
308
|
+
kwargs["compression"] = bool(merged["compression"])
|
|
309
|
+
if "work_dir" in merged:
|
|
310
|
+
kwargs["work_dir"] = merged["work_dir"]
|
|
311
|
+
if "container_runtime" in merged:
|
|
312
|
+
kwargs["container_runtime"] = merged["container_runtime"]
|
|
313
|
+
|
|
314
|
+
return cls(**kwargs)
|
|
315
|
+
|
|
316
|
+
except yaml.YAMLError as e:
|
|
317
|
+
raise ConfigError(f"Invalid YAML in remote config: {e}")
|
|
318
|
+
except Exception as e:
|
|
319
|
+
raise ConfigError(f"Failed to load remote config: {e}")
|
|
320
|
+
|
|
321
|
+
def merge(self, higher_precedence: "SSHConfig") -> "SSHConfig":
|
|
322
|
+
"""Merge with higher precedence config.
|
|
323
|
+
|
|
324
|
+
Parameters:
|
|
325
|
+
higher_precedence: Config with higher precedence (e.g., CLI args)
|
|
326
|
+
|
|
327
|
+
Returns:
|
|
328
|
+
Merged SSHConfig with higher precedence values
|
|
329
|
+
"""
|
|
330
|
+
# Use higher precedence value if provided, otherwise use self
|
|
331
|
+
def pick_value(self_val, higher_val, is_string=True):
|
|
332
|
+
if is_string:
|
|
333
|
+
return higher_val if higher_val else self_val
|
|
334
|
+
else:
|
|
335
|
+
return higher_val if higher_val is not None else self_val
|
|
336
|
+
|
|
337
|
+
return SSHConfig(
|
|
338
|
+
host=pick_value(self.host, higher_precedence.host),
|
|
339
|
+
port=pick_value(self.port, higher_precedence.port, is_string=False),
|
|
340
|
+
username=pick_value(self.username, higher_precedence.username),
|
|
341
|
+
password=pick_value(self.password, higher_precedence.password),
|
|
342
|
+
key_file=pick_value(self.key_file, higher_precedence.key_file),
|
|
343
|
+
key_passphrase=pick_value(self.key_passphrase, higher_precedence.key_passphrase),
|
|
344
|
+
timeout=pick_value(self.timeout, higher_precedence.timeout, is_string=False),
|
|
345
|
+
keepalive=pick_value(self.keepalive, higher_precedence.keepalive, is_string=False),
|
|
346
|
+
compression=pick_value(self.compression, higher_precedence.compression, is_string=False),
|
|
347
|
+
work_dir=pick_value(self.work_dir, higher_precedence.work_dir),
|
|
348
|
+
container_runtime=pick_value(self.container_runtime, higher_precedence.container_runtime),
|
|
349
|
+
source=higher_precedence.source, # Track higher precedence source
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
def to_dict(self) -> dict:
|
|
353
|
+
"""Convert to dictionary for serialization (excludes secrets)."""
|
|
354
|
+
return {
|
|
355
|
+
"host": self.host,
|
|
356
|
+
"port": self.port,
|
|
357
|
+
"username": self.username or "default",
|
|
358
|
+
"timeout": self.timeout,
|
|
359
|
+
"keepalive": self.keepalive,
|
|
360
|
+
"compression": self.compression,
|
|
361
|
+
"work_dir": self.work_dir,
|
|
362
|
+
"container_runtime": self.container_runtime or "auto-detect",
|
|
363
|
+
"source": self.source,
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
@dataclass
|
|
368
|
+
class TransferSession:
|
|
369
|
+
"""Active file transfer session with progress tracking."""
|
|
370
|
+
|
|
371
|
+
session_id: str = field(default_factory=lambda: str(uuid.uuid4()))
|
|
372
|
+
local_path: str = ""
|
|
373
|
+
remote_path: str = ""
|
|
374
|
+
direction: Literal["upload", "download"] = "upload"
|
|
375
|
+
|
|
376
|
+
# Progress tracking
|
|
377
|
+
total_bytes: int = 0
|
|
378
|
+
transferred_bytes: int = 0
|
|
379
|
+
start_time: datetime = field(default_factory=datetime.now)
|
|
380
|
+
end_time: datetime | None = None
|
|
381
|
+
|
|
382
|
+
# Status
|
|
383
|
+
status: Literal["pending", "in_progress", "completed", "failed", "cancelled"] = "pending"
|
|
384
|
+
error_message: str | None = None
|
|
385
|
+
|
|
386
|
+
# Performance metrics
|
|
387
|
+
avg_speed_mbps: float = 0.0
|
|
388
|
+
current_speed_mbps: float = 0.0
|
|
389
|
+
eta_seconds: int | None = None
|
|
390
|
+
|
|
391
|
+
# Debugging
|
|
392
|
+
debug_mode: bool = False
|
|
393
|
+
chunk_log: list[str] = field(default_factory=list)
|
|
394
|
+
|
|
395
|
+
def update_progress(self, transferred_bytes: int) -> None:
|
|
396
|
+
"""Update transfer progress and recalculate speed/ETA."""
|
|
397
|
+
elapsed = (datetime.now() - self.start_time).total_seconds()
|
|
398
|
+
if elapsed <= 0:
|
|
399
|
+
return
|
|
400
|
+
|
|
401
|
+
self.transferred_bytes = transferred_bytes
|
|
402
|
+
self.avg_speed_mbps = (transferred_bytes / elapsed) / (1024 * 1024)
|
|
403
|
+
|
|
404
|
+
if self.avg_speed_mbps > 0 and self.transferred_bytes < self.total_bytes:
|
|
405
|
+
remaining_bytes = self.total_bytes - self.transferred_bytes
|
|
406
|
+
self.eta_seconds = int(remaining_bytes / (self.avg_speed_mbps * 1024 * 1024))
|
|
407
|
+
else:
|
|
408
|
+
self.eta_seconds = None
|
|
409
|
+
|
|
410
|
+
def complete(self) -> None:
|
|
411
|
+
"""Mark transfer as completed."""
|
|
412
|
+
self.end_time = datetime.now()
|
|
413
|
+
self.status = "completed"
|
|
414
|
+
|
|
415
|
+
elapsed = (self.end_time - self.start_time).total_seconds()
|
|
416
|
+
if elapsed > 0:
|
|
417
|
+
self.avg_speed_mbps = (self.total_bytes / elapsed) / (1024 * 1024)
|
|
418
|
+
|
|
419
|
+
def fail(self, error: Exception) -> None:
|
|
420
|
+
"""Mark transfer as failed."""
|
|
421
|
+
self.end_time = datetime.now()
|
|
422
|
+
self.status = "failed"
|
|
423
|
+
self.error_message = str(error)
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
@dataclass
|
|
427
|
+
class RemoteContainerState:
|
|
428
|
+
"""State of a container running on a remote server."""
|
|
429
|
+
|
|
430
|
+
container_id: str = ""
|
|
431
|
+
container_name: str = ""
|
|
432
|
+
host: str = ""
|
|
433
|
+
port: int = 8000
|
|
434
|
+
|
|
435
|
+
# Runtime state
|
|
436
|
+
runtime: Literal["docker", "podman"] = "docker"
|
|
437
|
+
is_running: bool = False
|
|
438
|
+
health_status: Literal["healthy", "unhealthy", "unknown"] = "unknown"
|
|
439
|
+
|
|
440
|
+
# Lifecycle timestamps
|
|
441
|
+
created_at: datetime | None = None
|
|
442
|
+
started_at: datetime | None = None
|
|
443
|
+
stopped_at: datetime | None = None
|
|
444
|
+
|
|
445
|
+
# Status details
|
|
446
|
+
exit_code: int | None = None
|
|
447
|
+
error_message: str | None = None
|
|
448
|
+
|
|
449
|
+
# Network info
|
|
450
|
+
base_url: str = ""
|
|
451
|
+
|
|
452
|
+
# Metadata
|
|
453
|
+
created_by: str = ""
|
|
454
|
+
tags: dict[str, str] = field(default_factory=dict)
|
|
455
|
+
|
|
456
|
+
def is_accessible(self) -> bool:
|
|
457
|
+
"""Check if container is running and healthy."""
|
|
458
|
+
return self.is_running and self.health_status == "healthy"
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
"""Remote container management over SSH."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import uuid
|
|
5
|
+
from typing import Literal
|
|
6
|
+
from mdify.container import DoclingContainer
|
|
7
|
+
from mdify.ssh.models import RemoteContainerState
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class RemoteContainer(DoclingContainer):
|
|
13
|
+
"""Container running on remote server via SSH."""
|
|
14
|
+
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
ssh_client,
|
|
18
|
+
image: str = "docling-serve:latest",
|
|
19
|
+
port: int = 8000,
|
|
20
|
+
runtime: Literal["docker", "podman"] = "docker",
|
|
21
|
+
name: str | None = None,
|
|
22
|
+
timeout: int = 30,
|
|
23
|
+
health_check_interval: int = 2
|
|
24
|
+
):
|
|
25
|
+
"""Initialize remote container manager.
|
|
26
|
+
|
|
27
|
+
Parameters:
|
|
28
|
+
ssh_client: Connected AsyncSSHClient instance
|
|
29
|
+
image: Container image name
|
|
30
|
+
port: Port to expose from container
|
|
31
|
+
runtime: Container runtime ("docker" or "podman")
|
|
32
|
+
name: Container name (auto-generated if None)
|
|
33
|
+
timeout: Timeout for operations in seconds
|
|
34
|
+
health_check_interval: Health check poll interval in seconds
|
|
35
|
+
"""
|
|
36
|
+
# Initialize base class
|
|
37
|
+
super().__init__(
|
|
38
|
+
runtime=runtime,
|
|
39
|
+
image=image,
|
|
40
|
+
port=port,
|
|
41
|
+
timeout=timeout
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
self.ssh_client = ssh_client
|
|
45
|
+
self.name = name or f"mdify-{uuid.uuid4().hex[:8]}"
|
|
46
|
+
self.health_check_interval = health_check_interval
|
|
47
|
+
|
|
48
|
+
self.state = RemoteContainerState(
|
|
49
|
+
container_name=self.name,
|
|
50
|
+
port=port,
|
|
51
|
+
runtime=runtime,
|
|
52
|
+
host=ssh_client.config.host,
|
|
53
|
+
base_url=f"http://{ssh_client.config.host}:{port}"
|
|
54
|
+
)
|
|
55
|
+
self.is_healthy = False
|
|
56
|
+
|
|
57
|
+
async def start(self) -> None:
|
|
58
|
+
"""Start container on remote server.
|
|
59
|
+
|
|
60
|
+
Operations:
|
|
61
|
+
1. Detect container runtime on remote
|
|
62
|
+
2. Run docker/podman run command
|
|
63
|
+
3. Extract container ID
|
|
64
|
+
4. Wait for health check
|
|
65
|
+
|
|
66
|
+
Raises:
|
|
67
|
+
RuntimeError: Container already running or start failed
|
|
68
|
+
SSHConnectionError: SSH connection lost
|
|
69
|
+
"""
|
|
70
|
+
if self.state.is_running:
|
|
71
|
+
raise RuntimeError(f"Container {self.name} is already running")
|
|
72
|
+
|
|
73
|
+
logger.info(f"Starting remote container: {self.name}")
|
|
74
|
+
|
|
75
|
+
try:
|
|
76
|
+
# Detect runtime if needed
|
|
77
|
+
if not self.runtime:
|
|
78
|
+
runtime = await self.ssh_client.check_container_runtime()
|
|
79
|
+
if not runtime:
|
|
80
|
+
raise RuntimeError("No container runtime available on remote")
|
|
81
|
+
self.runtime = runtime
|
|
82
|
+
|
|
83
|
+
# Build docker/podman command
|
|
84
|
+
cmd = (
|
|
85
|
+
f"{self.runtime} run "
|
|
86
|
+
f"--name {self.name} "
|
|
87
|
+
f"--publish {self.port}:5001 "
|
|
88
|
+
f"--detach "
|
|
89
|
+
f"{self.image}"
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
logger.debug(f"Running: {cmd}")
|
|
93
|
+
stdout, stderr, code = await self.ssh_client.run_command(cmd, timeout=self.timeout)
|
|
94
|
+
|
|
95
|
+
if code != 0:
|
|
96
|
+
raise RuntimeError(f"Container start failed: {stderr}")
|
|
97
|
+
|
|
98
|
+
# Extract container ID
|
|
99
|
+
container_id = stdout.strip()
|
|
100
|
+
self.state.container_id = container_id
|
|
101
|
+
self.state.is_running = True
|
|
102
|
+
|
|
103
|
+
logger.info(f"Container started: {container_id}")
|
|
104
|
+
|
|
105
|
+
# Wait for health check
|
|
106
|
+
await self._wait_for_health()
|
|
107
|
+
|
|
108
|
+
except Exception as e:
|
|
109
|
+
self.state.is_running = False
|
|
110
|
+
logger.error(f"Container start failed: {e}")
|
|
111
|
+
raise
|
|
112
|
+
|
|
113
|
+
async def stop(self, force: bool = False) -> None:
|
|
114
|
+
"""Stop container on remote server.
|
|
115
|
+
|
|
116
|
+
Parameters:
|
|
117
|
+
force: If True, kill container; if False, graceful stop
|
|
118
|
+
|
|
119
|
+
Raises:
|
|
120
|
+
RuntimeError: Container not running
|
|
121
|
+
SSHConnectionError: SSH connection lost
|
|
122
|
+
"""
|
|
123
|
+
if not self.state.is_running:
|
|
124
|
+
raise RuntimeError(f"Container {self.name} is not running")
|
|
125
|
+
|
|
126
|
+
logger.info(f"Stopping remote container: {self.name}")
|
|
127
|
+
|
|
128
|
+
try:
|
|
129
|
+
action = "stop" if not force else "kill"
|
|
130
|
+
cmd = f"{self.runtime} {action} {self.state.container_id}"
|
|
131
|
+
|
|
132
|
+
_stdout, stderr, code = await self.ssh_client.run_command(cmd, timeout=self.timeout)
|
|
133
|
+
|
|
134
|
+
if code != 0:
|
|
135
|
+
logger.warning(f"Container stop returned code {code}: {stderr}")
|
|
136
|
+
|
|
137
|
+
# Remove container
|
|
138
|
+
cmd = f"{self.runtime} rm {self.state.container_id}"
|
|
139
|
+
_stdout, stderr, code = await self.ssh_client.run_command(cmd, timeout=self.timeout)
|
|
140
|
+
|
|
141
|
+
if code != 0:
|
|
142
|
+
logger.warning(f"Container remove returned code {code}: {stderr}")
|
|
143
|
+
else:
|
|
144
|
+
logger.debug(f"Container removed: {self.state.container_id}")
|
|
145
|
+
|
|
146
|
+
self.state.is_running = False
|
|
147
|
+
logger.info(f"Container stopped: {self.state.container_id}")
|
|
148
|
+
|
|
149
|
+
except Exception as e:
|
|
150
|
+
logger.error(f"Container stop failed: {e}")
|
|
151
|
+
raise
|
|
152
|
+
|
|
153
|
+
async def is_running(self) -> bool:
|
|
154
|
+
"""Check if container is running.
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
True if container is running
|
|
158
|
+
"""
|
|
159
|
+
try:
|
|
160
|
+
cmd = f"{self.runtime} ps --filter name={self.state.container_id} --format '{{{{.ID}}}}'"
|
|
161
|
+
stdout, stderr, code = await self.ssh_client.run_command(cmd)
|
|
162
|
+
|
|
163
|
+
is_running = code == 0 and stdout.strip() != ""
|
|
164
|
+
self.state.is_running = is_running
|
|
165
|
+
return is_running
|
|
166
|
+
|
|
167
|
+
except Exception as e:
|
|
168
|
+
logger.error(f"Could not check if running: {e}")
|
|
169
|
+
return False
|
|
170
|
+
|
|
171
|
+
async def check_health(self) -> bool:
|
|
172
|
+
"""Check container health.
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
True if health check passes
|
|
176
|
+
"""
|
|
177
|
+
if not self.state.is_running:
|
|
178
|
+
return False
|
|
179
|
+
|
|
180
|
+
try:
|
|
181
|
+
# Use curl inside SSH session to check if service responds
|
|
182
|
+
# The docling-serve doesn't have a /health endpoint, so we check if it responds at all
|
|
183
|
+
cmd = f"curl -s -o /dev/null -w '%{{http_code}}' http://localhost:{self.port}/"
|
|
184
|
+
stdout, stderr, code = await self.ssh_client.run_command(cmd, timeout=5)
|
|
185
|
+
|
|
186
|
+
# Any HTTP response (even 404) means the service is running
|
|
187
|
+
http_code = stdout.strip()
|
|
188
|
+
is_healthy = http_code in ["200", "404", "422"] # 404 = Not Found is OK, 422 = Unprocessable Entity
|
|
189
|
+
self.state.health_status = "healthy" if is_healthy else "unhealthy"
|
|
190
|
+
return is_healthy
|
|
191
|
+
|
|
192
|
+
except Exception as e:
|
|
193
|
+
logger.debug(f"Health check failed: {e}")
|
|
194
|
+
self.state.health_status = "unknown"
|
|
195
|
+
return False
|
|
196
|
+
|
|
197
|
+
async def get_logs(self, lines: int = 50) -> str:
|
|
198
|
+
"""Get container logs.
|
|
199
|
+
|
|
200
|
+
Parameters:
|
|
201
|
+
lines: Number of recent log lines
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
Container logs as string
|
|
205
|
+
"""
|
|
206
|
+
try:
|
|
207
|
+
cmd = f"{self.runtime} logs --tail {lines} {self.state.container_id}"
|
|
208
|
+
stdout, stderr, code = await self.ssh_client.run_command(cmd)
|
|
209
|
+
|
|
210
|
+
return stdout if code == 0 else f"Error getting logs: {stderr}"
|
|
211
|
+
|
|
212
|
+
except Exception as e:
|
|
213
|
+
logger.error(f"Could not get logs: {e}")
|
|
214
|
+
return f"Error: {e}"
|
|
215
|
+
|
|
216
|
+
async def _wait_for_health(self, max_attempts: int = 30) -> None:
|
|
217
|
+
"""Wait for container to become healthy.
|
|
218
|
+
|
|
219
|
+
Parameters:
|
|
220
|
+
max_attempts: Maximum health check attempts
|
|
221
|
+
|
|
222
|
+
Raises:
|
|
223
|
+
TimeoutError: Container didn't become healthy
|
|
224
|
+
"""
|
|
225
|
+
import asyncio
|
|
226
|
+
|
|
227
|
+
for attempt in range(max_attempts):
|
|
228
|
+
if await self.check_health():
|
|
229
|
+
self.is_healthy = True
|
|
230
|
+
logger.info(f"Container became healthy after {attempt * self.health_check_interval}s")
|
|
231
|
+
return
|
|
232
|
+
|
|
233
|
+
await asyncio.sleep(self.health_check_interval)
|
|
234
|
+
|
|
235
|
+
raise TimeoutError(
|
|
236
|
+
f"Container did not become healthy after {max_attempts * self.health_check_interval}s"
|
|
237
|
+
)
|