py2ls 0.1.10.12__py3-none-any.whl → 0.2.7.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of py2ls might be problematic. Click here for more details.
- py2ls/.DS_Store +0 -0
- py2ls/.git/.DS_Store +0 -0
- py2ls/.git/index +0 -0
- py2ls/.git/logs/refs/remotes/origin/HEAD +1 -0
- py2ls/.git/objects/.DS_Store +0 -0
- py2ls/.git/refs/.DS_Store +0 -0
- py2ls/ImageLoader.py +621 -0
- py2ls/__init__.py +7 -5
- py2ls/apptainer2ls.py +3940 -0
- py2ls/batman.py +164 -42
- py2ls/bio.py +2595 -0
- py2ls/cell_image_clf.py +1632 -0
- py2ls/container2ls.py +4635 -0
- py2ls/corr.py +475 -0
- py2ls/data/.DS_Store +0 -0
- py2ls/data/email/email_html_template.html +88 -0
- py2ls/data/hyper_param_autogluon_zeroshot2024.json +2383 -0
- py2ls/data/hyper_param_tabrepo_2024.py +1753 -0
- py2ls/data/mygenes_fields_241022.txt +355 -0
- py2ls/data/re_common_pattern.json +173 -0
- py2ls/data/sns_info.json +74 -0
- py2ls/data/styles/.DS_Store +0 -0
- py2ls/data/styles/example/.DS_Store +0 -0
- py2ls/data/styles/stylelib/.DS_Store +0 -0
- py2ls/data/styles/stylelib/grid.mplstyle +15 -0
- py2ls/data/styles/stylelib/high-contrast.mplstyle +6 -0
- py2ls/data/styles/stylelib/high-vis.mplstyle +4 -0
- py2ls/data/styles/stylelib/ieee.mplstyle +15 -0
- py2ls/data/styles/stylelib/light.mplstyl +6 -0
- py2ls/data/styles/stylelib/muted.mplstyle +6 -0
- py2ls/data/styles/stylelib/nature-reviews-latex.mplstyle +616 -0
- py2ls/data/styles/stylelib/nature-reviews.mplstyle +616 -0
- py2ls/data/styles/stylelib/nature.mplstyle +31 -0
- py2ls/data/styles/stylelib/no-latex.mplstyle +10 -0
- py2ls/data/styles/stylelib/notebook.mplstyle +36 -0
- py2ls/data/styles/stylelib/paper.mplstyle +290 -0
- py2ls/data/styles/stylelib/paper2.mplstyle +305 -0
- py2ls/data/styles/stylelib/retro.mplstyle +4 -0
- py2ls/data/styles/stylelib/sans.mplstyle +10 -0
- py2ls/data/styles/stylelib/scatter.mplstyle +7 -0
- py2ls/data/styles/stylelib/science.mplstyle +48 -0
- py2ls/data/styles/stylelib/std-colors.mplstyle +4 -0
- py2ls/data/styles/stylelib/vibrant.mplstyle +6 -0
- py2ls/data/tiles.csv +146 -0
- py2ls/data/usages_pd.json +1417 -0
- py2ls/data/usages_sns.json +31 -0
- py2ls/docker2ls.py +5446 -0
- py2ls/ec2ls.py +61 -0
- py2ls/fetch_update.py +145 -0
- py2ls/ich2ls.py +1955 -296
- py2ls/im2.py +8242 -0
- py2ls/image_ml2ls.py +2100 -0
- py2ls/ips.py +33909 -3418
- py2ls/ml2ls.py +7700 -0
- py2ls/mol.py +289 -0
- py2ls/mount2ls.py +1307 -0
- py2ls/netfinder.py +873 -351
- py2ls/nl2ls.py +283 -0
- py2ls/ocr.py +1581 -458
- py2ls/plot.py +10394 -314
- py2ls/rna2ls.py +311 -0
- py2ls/ssh2ls.md +456 -0
- py2ls/ssh2ls.py +5933 -0
- py2ls/ssh2ls_v01.py +2204 -0
- py2ls/stats.py +66 -172
- py2ls/temp20251124.py +509 -0
- py2ls/translator.py +2 -0
- py2ls/utils/decorators.py +3564 -0
- py2ls/utils_bio.py +3453 -0
- {py2ls-0.1.10.12.dist-info → py2ls-0.2.7.10.dist-info}/METADATA +113 -224
- {py2ls-0.1.10.12.dist-info → py2ls-0.2.7.10.dist-info}/RECORD +72 -16
- {py2ls-0.1.10.12.dist-info → py2ls-0.2.7.10.dist-info}/WHEEL +0 -0
py2ls/mount2ls.py
ADDED
|
@@ -0,0 +1,1307 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
S3FS Mount Manager - Ultimate Edition
|
|
4
|
+
A comprehensive tool for mounting S3 buckets using s3fs with enterprise-grade features.
|
|
5
|
+
|
|
6
|
+
Features:
|
|
7
|
+
1. Automatic credential discovery from multiple sources
|
|
8
|
+
2. Smart mount detection and management
|
|
9
|
+
3. Fstab persistence management
|
|
10
|
+
4. Multiple bucket/profile support
|
|
11
|
+
5. Health checks and diagnostics
|
|
12
|
+
6. Graceful error handling
|
|
13
|
+
7. Configuration presets
|
|
14
|
+
8. Performance tuning options
|
|
15
|
+
Usage:
|
|
16
|
+
|
|
17
|
+
from s3fs_manager import S3FSMountManager
|
|
18
|
+
|
|
19
|
+
# Initialize manager
|
|
20
|
+
manager = S3FSMountManager()
|
|
21
|
+
|
|
22
|
+
# Create configuration
|
|
23
|
+
config = manager.create_config(
|
|
24
|
+
bucket_name="my-data-bucket",
|
|
25
|
+
profile="production",
|
|
26
|
+
mount_point="/mnt/s3-data",
|
|
27
|
+
preset="performance"
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
# Mount the bucket
|
|
31
|
+
success = manager.mount(config)
|
|
32
|
+
|
|
33
|
+
# Check health
|
|
34
|
+
health = manager.check_mount_health("/mnt/s3-data")
|
|
35
|
+
print(f"Mount status: {health.status}")
|
|
36
|
+
|
|
37
|
+
2. Command Line Interface
|
|
38
|
+
# Mount a bucket
|
|
39
|
+
python mount2ls.py mount my-bucket --profile production --save-as prod-config
|
|
40
|
+
|
|
41
|
+
# Show status
|
|
42
|
+
python mount2ls.py status
|
|
43
|
+
|
|
44
|
+
# Add to fstab for boot-time mounting
|
|
45
|
+
sudo python mount2ls.py fstab add my-bucket --comment "Production data"
|
|
46
|
+
|
|
47
|
+
# Monitor mounts
|
|
48
|
+
python mount2ls.py monitor start --interval 30
|
|
49
|
+
|
|
50
|
+
# Unmount
|
|
51
|
+
python mount2ls.py unmount /mnt/s3-data
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
import os
|
|
55
|
+
import sys
|
|
56
|
+
import json
|
|
57
|
+
import shlex
|
|
58
|
+
import signal
|
|
59
|
+
import logging
|
|
60
|
+
import argparse
|
|
61
|
+
import configparser
|
|
62
|
+
import subprocess
|
|
63
|
+
from pathlib import Path
|
|
64
|
+
from datetime import datetime
|
|
65
|
+
from typing import Dict, List, Optional, Tuple, Union, Any
|
|
66
|
+
from dataclasses import dataclass, field, asdict
|
|
67
|
+
from enum import Enum
|
|
68
|
+
import threading
|
|
69
|
+
import time
|
|
70
|
+
|
|
71
|
+
# Configure logging
|
|
72
|
+
logging.basicConfig(
|
|
73
|
+
level=logging.INFO,
|
|
74
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
75
|
+
)
|
|
76
|
+
logger = logging.getLogger(__name__)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class MountStatus(Enum):
|
|
80
|
+
"""Mount status enumeration"""
|
|
81
|
+
NOT_MOUNTED = "not_mounted"
|
|
82
|
+
MOUNTED = "mounted"
|
|
83
|
+
STALE = "stale"
|
|
84
|
+
ERROR = "error"
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class CredentialSource(Enum):
|
|
88
|
+
"""Credential source enumeration"""
|
|
89
|
+
AWS_PROFILE = "aws_profile"
|
|
90
|
+
ENVIRONMENT = "environment"
|
|
91
|
+
FILE = "file"
|
|
92
|
+
IAM_ROLE = "iam_role"
|
|
93
|
+
ECS_METADATA = "ecs_metadata"
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@dataclass
|
|
97
|
+
class S3MountConfig:
|
|
98
|
+
"""S3 mount configuration dataclass"""
|
|
99
|
+
profile: str = "default"
|
|
100
|
+
bucket_name: str = ""
|
|
101
|
+
mount_point: Path = None
|
|
102
|
+
endpoint_url: str = "https://s3.amazonaws.com"
|
|
103
|
+
region: str = "us-east-1"
|
|
104
|
+
access_key: str = ""
|
|
105
|
+
secret_key: str = ""
|
|
106
|
+
session_token: str = ""
|
|
107
|
+
iam_role: str = ""
|
|
108
|
+
|
|
109
|
+
# Mount options
|
|
110
|
+
use_cache: bool = True
|
|
111
|
+
cache_location: Path = Path("/tmp/s3fs_cache")
|
|
112
|
+
allow_other: bool = False
|
|
113
|
+
allow_root: bool = False
|
|
114
|
+
uid: Optional[int] = None
|
|
115
|
+
gid: Optional[int] = None
|
|
116
|
+
umask: str = "000"
|
|
117
|
+
retries: int = 5
|
|
118
|
+
connect_timeout: int = 30
|
|
119
|
+
readwrite_timeout: int = 300
|
|
120
|
+
max_stat_cache_size: int = 100000
|
|
121
|
+
stat_cache_expire: int = 900
|
|
122
|
+
enable_noobj_cache: bool = True
|
|
123
|
+
|
|
124
|
+
# Performance tuning
|
|
125
|
+
parallel_count: int = 5
|
|
126
|
+
multipart_size: int = 512 # MB
|
|
127
|
+
max_upload_threads: int = 5
|
|
128
|
+
ensure_diskfree: int = 10240 # 10GB minimum free space
|
|
129
|
+
|
|
130
|
+
# Advanced options
|
|
131
|
+
use_path_request_style: bool = False
|
|
132
|
+
sse: bool = False
|
|
133
|
+
sse_kms_key_id: str = ""
|
|
134
|
+
storage_class: str = "STANDARD"
|
|
135
|
+
|
|
136
|
+
# Health check
|
|
137
|
+
health_check_interval: int = 60 # seconds
|
|
138
|
+
auto_remount: bool = True
|
|
139
|
+
auto_remount_attempts: int = 3
|
|
140
|
+
|
|
141
|
+
def __post_init__(self):
|
|
142
|
+
if not self.mount_point:
|
|
143
|
+
self.mount_point = Path.home() / self.bucket_name
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
@dataclass
|
|
147
|
+
class MountHealth:
|
|
148
|
+
"""Mount health information"""
|
|
149
|
+
status: MountStatus
|
|
150
|
+
mount_time: Optional[datetime] = None
|
|
151
|
+
last_access: Optional[datetime] = None
|
|
152
|
+
read_speed: Optional[float] = None # MB/s
|
|
153
|
+
write_speed: Optional[float] = None # MB/s
|
|
154
|
+
latency: Optional[float] = None # ms
|
|
155
|
+
errors: List[str] = field(default_factory=list)
|
|
156
|
+
warnings: List[str] = field(default_factory=list)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
class S3FSMountManager:
|
|
160
|
+
"""
|
|
161
|
+
Ultimate S3FS Mount Manager
|
|
162
|
+
|
|
163
|
+
A comprehensive tool for mounting S3 buckets with enterprise features:
|
|
164
|
+
- Automatic credential discovery from multiple sources
|
|
165
|
+
- Smart mount detection and management
|
|
166
|
+
- Health monitoring and auto-recovery
|
|
167
|
+
- Performance tuning
|
|
168
|
+
- Configuration presets
|
|
169
|
+
- Multi-bucket support
|
|
170
|
+
|
|
171
|
+
Example:
|
|
172
|
+
>>> manager = S3FSMountManager()
|
|
173
|
+
>>> config = manager.create_config(
|
|
174
|
+
... bucket_name="my-bucket",
|
|
175
|
+
... profile="production"
|
|
176
|
+
... )
|
|
177
|
+
>>> success = manager.mount(config)
|
|
178
|
+
>>> health = manager.check_health(config.mount_point)
|
|
179
|
+
"""
|
|
180
|
+
|
|
181
|
+
# Default configuration presets
|
|
182
|
+
PRESETS = {
|
|
183
|
+
"standard": {
|
|
184
|
+
"retries": 5,
|
|
185
|
+
"parallel_count": 5,
|
|
186
|
+
"max_stat_cache_size": 100000,
|
|
187
|
+
},
|
|
188
|
+
"performance": {
|
|
189
|
+
"parallel_count": 20,
|
|
190
|
+
"multipart_size": 1024,
|
|
191
|
+
"max_upload_threads": 10,
|
|
192
|
+
"max_stat_cache_size": 500000,
|
|
193
|
+
},
|
|
194
|
+
"reliable": {
|
|
195
|
+
"retries": 10,
|
|
196
|
+
"connect_timeout": 60,
|
|
197
|
+
"readwrite_timeout": 600,
|
|
198
|
+
"auto_remount": True,
|
|
199
|
+
"auto_remount_attempts": 5,
|
|
200
|
+
},
|
|
201
|
+
"low_memory": {
|
|
202
|
+
"parallel_count": 2,
|
|
203
|
+
"max_stat_cache_size": 10000,
|
|
204
|
+
"enable_noobj_cache": False,
|
|
205
|
+
},
|
|
206
|
+
"high_latency": {
|
|
207
|
+
"connect_timeout": 120,
|
|
208
|
+
"readwrite_timeout": 900,
|
|
209
|
+
"retries": 15,
|
|
210
|
+
"parallel_count": 3,
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
def __init__(self, config_dir: Optional[Path] = None):
|
|
215
|
+
"""
|
|
216
|
+
Initialize S3FS Mount Manager
|
|
217
|
+
|
|
218
|
+
Args:
|
|
219
|
+
config_dir: Directory for storing configuration files
|
|
220
|
+
"""
|
|
221
|
+
self.config_dir = config_dir or Path.home() / ".config" / "s3fs-manager"
|
|
222
|
+
self.config_dir.mkdir(parents=True, exist_ok=True)
|
|
223
|
+
|
|
224
|
+
# Load saved configurations
|
|
225
|
+
self.saved_configs = self._load_saved_configs()
|
|
226
|
+
|
|
227
|
+
# Active mounts tracking
|
|
228
|
+
self.active_mounts: Dict[Path, S3MountConfig] = {}
|
|
229
|
+
self.mount_health: Dict[Path, MountHealth] = {}
|
|
230
|
+
|
|
231
|
+
# Health monitoring thread
|
|
232
|
+
self.monitor_thread = None
|
|
233
|
+
self.monitor_running = False
|
|
234
|
+
|
|
235
|
+
# Signal handling
|
|
236
|
+
signal.signal(signal.SIGINT, self._signal_handler)
|
|
237
|
+
signal.signal(signal.SIGTERM, self._signal_handler)
|
|
238
|
+
|
|
239
|
+
logger.info(f"S3FS Mount Manager initialized. Config dir: {self.config_dir}")
|
|
240
|
+
|
|
241
|
+
def _signal_handler(self, signum, frame):
|
|
242
|
+
"""Handle termination signals gracefully"""
|
|
243
|
+
logger.info(f"Received signal {signum}, cleaning up...")
|
|
244
|
+
self.stop_monitoring()
|
|
245
|
+
self.unmount_all()
|
|
246
|
+
sys.exit(0)
|
|
247
|
+
|
|
248
|
+
def _load_saved_configs(self) -> Dict[str, S3MountConfig]:
|
|
249
|
+
"""Load saved configurations from disk"""
|
|
250
|
+
config_file = self.config_dir / "saved_configs.json"
|
|
251
|
+
if config_file.exists():
|
|
252
|
+
try:
|
|
253
|
+
with open(config_file, 'r') as f:
|
|
254
|
+
data = json.load(f)
|
|
255
|
+
configs = {}
|
|
256
|
+
for name, cfg_data in data.items():
|
|
257
|
+
# Convert mount_point string back to Path
|
|
258
|
+
if 'mount_point' in cfg_data:
|
|
259
|
+
cfg_data['mount_point'] = Path(cfg_data['mount_point'])
|
|
260
|
+
if 'cache_location' in cfg_data:
|
|
261
|
+
cfg_data['cache_location'] = Path(cfg_data['cache_location'])
|
|
262
|
+
configs[name] = S3MountConfig(**cfg_data)
|
|
263
|
+
return configs
|
|
264
|
+
except Exception as e:
|
|
265
|
+
logger.error(f"Failed to load saved configs: {e}")
|
|
266
|
+
return {}
|
|
267
|
+
|
|
268
|
+
def _save_configs(self):
|
|
269
|
+
"""Save configurations to disk"""
|
|
270
|
+
config_file = self.config_dir / "saved_configs.json"
|
|
271
|
+
try:
|
|
272
|
+
# Convert Path objects to strings for JSON serialization
|
|
273
|
+
data = {}
|
|
274
|
+
for name, config in self.saved_configs.items():
|
|
275
|
+
cfg_dict = asdict(config)
|
|
276
|
+
cfg_dict['mount_point'] = str(config.mount_point)
|
|
277
|
+
cfg_dict['cache_location'] = str(config.cache_location)
|
|
278
|
+
data[name] = cfg_dict
|
|
279
|
+
|
|
280
|
+
with open(config_file, 'w') as f:
|
|
281
|
+
json.dump(data, f, indent=2, default=str)
|
|
282
|
+
except Exception as e:
|
|
283
|
+
logger.error(f"Failed to save configs: {e}")
|
|
284
|
+
|
|
285
|
+
def discover_credentials(self, profile: str = "default") -> Tuple[str, str, str]:
|
|
286
|
+
"""
|
|
287
|
+
Discover AWS credentials from multiple sources
|
|
288
|
+
|
|
289
|
+
Sources checked in order:
|
|
290
|
+
1. Environment variables
|
|
291
|
+
2. AWS CLI profile
|
|
292
|
+
3. IAM role (EC2/ECS)
|
|
293
|
+
4. Credential file
|
|
294
|
+
|
|
295
|
+
Args:
|
|
296
|
+
profile: AWS profile name
|
|
297
|
+
|
|
298
|
+
Returns:
|
|
299
|
+
Tuple of (access_key, secret_key, session_token)
|
|
300
|
+
|
|
301
|
+
Raises:
|
|
302
|
+
ValueError: If no credentials found
|
|
303
|
+
"""
|
|
304
|
+
sources = []
|
|
305
|
+
|
|
306
|
+
# 1. Environment variables
|
|
307
|
+
access_key = os.environ.get('AWS_ACCESS_KEY_ID') or os.environ.get('AWS_ACCESS_KEY')
|
|
308
|
+
secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY') or os.environ.get('AWS_SECRET_KEY')
|
|
309
|
+
session_token = os.environ.get('AWS_SESSION_TOKEN')
|
|
310
|
+
|
|
311
|
+
if access_key and secret_key:
|
|
312
|
+
sources.append(CredentialSource.ENVIRONMENT)
|
|
313
|
+
logger.info("Found credentials in environment variables")
|
|
314
|
+
return access_key, secret_key, session_token or ""
|
|
315
|
+
|
|
316
|
+
# 2. AWS CLI profile
|
|
317
|
+
creds_file = Path.home() / ".aws" / "credentials"
|
|
318
|
+
config_file = Path.home() / ".aws" / "config"
|
|
319
|
+
|
|
320
|
+
if creds_file.exists():
|
|
321
|
+
try:
|
|
322
|
+
creds = configparser.ConfigParser()
|
|
323
|
+
creds.read(creds_file)
|
|
324
|
+
|
|
325
|
+
# Check for profile
|
|
326
|
+
profile_name = profile
|
|
327
|
+
if profile != "default" and f"profile {profile}" in creds:
|
|
328
|
+
profile_name = f"profile {profile}"
|
|
329
|
+
|
|
330
|
+
if profile_name in creds:
|
|
331
|
+
access_key = creds[profile_name].get("aws_access_key_id")
|
|
332
|
+
secret_key = creds[profile_name].get("aws_secret_access_key")
|
|
333
|
+
session_token = creds[profile_name].get("aws_session_token")
|
|
334
|
+
|
|
335
|
+
if access_key and secret_key:
|
|
336
|
+
sources.append(CredentialSource.AWS_PROFILE)
|
|
337
|
+
logger.info(f"Found credentials in AWS profile: {profile}")
|
|
338
|
+
return access_key, secret_key, session_token or ""
|
|
339
|
+
except Exception as e:
|
|
340
|
+
logger.warning(f"Failed to read AWS credentials file: {e}")
|
|
341
|
+
|
|
342
|
+
# 3. IAM Role (check metadata service)
|
|
343
|
+
try:
|
|
344
|
+
import requests
|
|
345
|
+
# Try EC2 metadata
|
|
346
|
+
metadata_url = "http://169.254.169.254/latest/meta-data/iam/security-credentials/"
|
|
347
|
+
response = requests.get(metadata_url, timeout=2)
|
|
348
|
+
if response.status_code == 200:
|
|
349
|
+
role_name = response.text.strip()
|
|
350
|
+
role_url = f"{metadata_url}{role_name}"
|
|
351
|
+
role_data = requests.get(role_url, timeout=2).json()
|
|
352
|
+
|
|
353
|
+
sources.append(CredentialSource.IAM_ROLE)
|
|
354
|
+
logger.info(f"Found IAM role credentials: {role_name}")
|
|
355
|
+
return role_data['AccessKeyId'], role_data['SecretAccessKey'], role_data.get('Token', "")
|
|
356
|
+
except:
|
|
357
|
+
pass
|
|
358
|
+
|
|
359
|
+
# 4. Check for credential file
|
|
360
|
+
passwd_file = Path.home() / ".passwd-s3fs"
|
|
361
|
+
if passwd_file.exists():
|
|
362
|
+
try:
|
|
363
|
+
with open(passwd_file, 'r') as f:
|
|
364
|
+
content = f.read().strip()
|
|
365
|
+
if ':' in content:
|
|
366
|
+
access_key, secret_key = content.split(':', 1)
|
|
367
|
+
sources.append(CredentialSource.FILE)
|
|
368
|
+
logger.info("Found credentials in .passwd-s3fs file")
|
|
369
|
+
return access_key, secret_key, ""
|
|
370
|
+
except Exception as e:
|
|
371
|
+
logger.warning(f"Failed to read .passwd-s3fs: {e}")
|
|
372
|
+
|
|
373
|
+
raise ValueError(
|
|
374
|
+
f"No AWS credentials found for profile '{profile}'. "
|
|
375
|
+
"Please configure AWS credentials via:\n"
|
|
376
|
+
"1. Environment variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)\n"
|
|
377
|
+
"2. AWS CLI: 'aws configure'\n"
|
|
378
|
+
"3. IAM role (if on EC2/ECS)\n"
|
|
379
|
+
"4. Create ~/.passwd-s3fs file"
|
|
380
|
+
)
|
|
381
|
+
|
|
382
|
+
def discover_endpoint(self, profile: str = "default", region: str = None) -> str:
|
|
383
|
+
"""
|
|
384
|
+
Discover S3 endpoint URL
|
|
385
|
+
|
|
386
|
+
Args:
|
|
387
|
+
profile: AWS profile name
|
|
388
|
+
region: AWS region (overrides profile setting)
|
|
389
|
+
|
|
390
|
+
Returns:
|
|
391
|
+
Endpoint URL
|
|
392
|
+
"""
|
|
393
|
+
# Check config file for custom endpoint
|
|
394
|
+
config_file = Path.home() / ".aws" / "config"
|
|
395
|
+
if config_file.exists():
|
|
396
|
+
try:
|
|
397
|
+
config = configparser.ConfigParser()
|
|
398
|
+
config.read(config_file)
|
|
399
|
+
|
|
400
|
+
section = profile if profile == "default" else f"profile {profile}"
|
|
401
|
+
if section in config:
|
|
402
|
+
# Check for endpoint_url in s3 section
|
|
403
|
+
s3_options = config[section].get("s3", "")
|
|
404
|
+
for line in s3_options.splitlines():
|
|
405
|
+
if "endpoint_url" in line:
|
|
406
|
+
endpoint = line.split("=")[-1].strip()
|
|
407
|
+
logger.info(f"Found custom endpoint in config: {endpoint}")
|
|
408
|
+
return endpoint
|
|
409
|
+
|
|
410
|
+
# Get region from config
|
|
411
|
+
if not region:
|
|
412
|
+
region = config[section].get("region", "us-east-1")
|
|
413
|
+
except Exception as e:
|
|
414
|
+
logger.warning(f"Failed to read AWS config: {e}")
|
|
415
|
+
|
|
416
|
+
# Use region to construct endpoint
|
|
417
|
+
if not region:
|
|
418
|
+
region = os.environ.get('AWS_REGION', 'us-east-1')
|
|
419
|
+
|
|
420
|
+
# Special handling for different regions
|
|
421
|
+
if region.startswith('cn-'):
|
|
422
|
+
return f"https://s3.{region}.amazonaws.com.cn"
|
|
423
|
+
elif region.startswith('us-gov-'):
|
|
424
|
+
return f"https://s3.{region}.amazonaws.com"
|
|
425
|
+
else:
|
|
426
|
+
return f"https://s3.{region}.amazonaws.com"
|
|
427
|
+
|
|
428
|
+
def create_config(
|
|
429
|
+
self,
|
|
430
|
+
bucket_name: str,
|
|
431
|
+
profile: str = "default",
|
|
432
|
+
mount_point: Optional[Union[str, Path]] = None,
|
|
433
|
+
preset: str = "standard",
|
|
434
|
+
**kwargs
|
|
435
|
+
) -> S3MountConfig:
|
|
436
|
+
"""
|
|
437
|
+
Create S3 mount configuration
|
|
438
|
+
|
|
439
|
+
Args:
|
|
440
|
+
bucket_name: Name of S3 bucket
|
|
441
|
+
profile: AWS profile name
|
|
442
|
+
mount_point: Local mount directory (default: ~/bucket_name)
|
|
443
|
+
preset: Configuration preset (standard, performance, reliable, etc.)
|
|
444
|
+
**kwargs: Additional configuration overrides
|
|
445
|
+
|
|
446
|
+
Returns:
|
|
447
|
+
S3MountConfig object
|
|
448
|
+
"""
|
|
449
|
+
# Discover credentials
|
|
450
|
+
access_key, secret_key, session_token = self.discover_credentials(profile)
|
|
451
|
+
|
|
452
|
+
# Discover endpoint
|
|
453
|
+
endpoint_url = self.discover_endpoint(profile, kwargs.get('region'))
|
|
454
|
+
|
|
455
|
+
# Create mount point path
|
|
456
|
+
if mount_point is None:
|
|
457
|
+
mount_point = Path.home() / bucket_name
|
|
458
|
+
elif isinstance(mount_point, str):
|
|
459
|
+
mount_point = Path(mount_point).expanduser()
|
|
460
|
+
|
|
461
|
+
# Start with preset configuration
|
|
462
|
+
if preset in self.PRESETS:
|
|
463
|
+
base_config = self.PRESETS[preset].copy()
|
|
464
|
+
else:
|
|
465
|
+
base_config = self.PRESETS["standard"].copy()
|
|
466
|
+
|
|
467
|
+
# Apply kwargs overrides
|
|
468
|
+
base_config.update(kwargs)
|
|
469
|
+
|
|
470
|
+
# Create config object
|
|
471
|
+
config = S3MountConfig(
|
|
472
|
+
profile=profile,
|
|
473
|
+
bucket_name=bucket_name,
|
|
474
|
+
mount_point=mount_point,
|
|
475
|
+
endpoint_url=endpoint_url,
|
|
476
|
+
access_key=access_key,
|
|
477
|
+
secret_key=secret_key,
|
|
478
|
+
session_token=session_token,
|
|
479
|
+
**base_config
|
|
480
|
+
)
|
|
481
|
+
|
|
482
|
+
return config
|
|
483
|
+
|
|
484
|
+
def save_config(self, name: str, config: S3MountConfig):
|
|
485
|
+
"""
|
|
486
|
+
Save configuration for later use
|
|
487
|
+
|
|
488
|
+
Args:
|
|
489
|
+
name: Configuration name
|
|
490
|
+
config: S3MountConfig object
|
|
491
|
+
"""
|
|
492
|
+
self.saved_configs[name] = config
|
|
493
|
+
self._save_configs()
|
|
494
|
+
logger.info(f"Saved configuration: {name}")
|
|
495
|
+
|
|
496
|
+
def load_config(self, name: str) -> S3MountConfig:
|
|
497
|
+
"""
|
|
498
|
+
Load saved configuration
|
|
499
|
+
|
|
500
|
+
Args:
|
|
501
|
+
name: Configuration name
|
|
502
|
+
|
|
503
|
+
Returns:
|
|
504
|
+
S3MountConfig object
|
|
505
|
+
|
|
506
|
+
Raises:
|
|
507
|
+
KeyError: If configuration not found
|
|
508
|
+
"""
|
|
509
|
+
if name not in self.saved_configs:
|
|
510
|
+
raise KeyError(f"Configuration '{name}' not found")
|
|
511
|
+
return self.saved_configs[name]
|
|
512
|
+
|
|
513
|
+
def is_mounted(self, mount_point: Union[str, Path]) -> Tuple[bool, Optional[Dict]]:
|
|
514
|
+
"""
|
|
515
|
+
Check if path is mounted with detailed information
|
|
516
|
+
|
|
517
|
+
Args:
|
|
518
|
+
mount_point: Path to check
|
|
519
|
+
|
|
520
|
+
Returns:
|
|
521
|
+
Tuple of (is_mounted, mount_info)
|
|
522
|
+
"""
|
|
523
|
+
mount_path = Path(mount_point) if isinstance(mount_point, str) else mount_point
|
|
524
|
+
|
|
525
|
+
# Check with mountpoint command
|
|
526
|
+
try:
|
|
527
|
+
result = subprocess.run(
|
|
528
|
+
["mountpoint", "-q", str(mount_path)],
|
|
529
|
+
capture_output=True
|
|
530
|
+
)
|
|
531
|
+
is_mountpoint = result.returncode == 0
|
|
532
|
+
|
|
533
|
+
# Get detailed mount info
|
|
534
|
+
mount_info = None
|
|
535
|
+
if is_mountpoint:
|
|
536
|
+
result = subprocess.run(
|
|
537
|
+
["mount"], capture_output=True, text=True
|
|
538
|
+
)
|
|
539
|
+
for line in result.stdout.splitlines():
|
|
540
|
+
if str(mount_path) in line:
|
|
541
|
+
mount_info = self._parse_mount_line(line)
|
|
542
|
+
break
|
|
543
|
+
|
|
544
|
+
return is_mountpoint, mount_info
|
|
545
|
+
except Exception as e:
|
|
546
|
+
logger.error(f"Failed to check mount status: {e}")
|
|
547
|
+
return False, None
|
|
548
|
+
|
|
549
|
+
def _parse_mount_line(self, line: str) -> Dict:
|
|
550
|
+
"""Parse mount command output line"""
|
|
551
|
+
parts = line.split()
|
|
552
|
+
info = {
|
|
553
|
+
'device': parts[0],
|
|
554
|
+
'mount_point': parts[2],
|
|
555
|
+
'filesystem': parts[4],
|
|
556
|
+
'options': parts[5].strip('()').split(',')
|
|
557
|
+
}
|
|
558
|
+
return info
|
|
559
|
+
|
|
560
|
+
def check_mount_health(self, mount_point: Union[str, Path]) -> MountHealth:
|
|
561
|
+
"""
|
|
562
|
+
Perform comprehensive health check on mount
|
|
563
|
+
|
|
564
|
+
Args:
|
|
565
|
+
mount_point: Mount point to check
|
|
566
|
+
|
|
567
|
+
Returns:
|
|
568
|
+
MountHealth object
|
|
569
|
+
"""
|
|
570
|
+
mount_path = Path(mount_point) if isinstance(mount_point, str) else mount_point
|
|
571
|
+
is_mounted, mount_info = self.is_mounted(mount_path)
|
|
572
|
+
|
|
573
|
+
if not is_mounted:
|
|
574
|
+
return MountHealth(status=MountStatus.NOT_MOUNTED)
|
|
575
|
+
|
|
576
|
+
health = MountHealth(
|
|
577
|
+
status=MountStatus.MOUNTED,
|
|
578
|
+
mount_time=datetime.now()
|
|
579
|
+
)
|
|
580
|
+
|
|
581
|
+
# Perform I/O test
|
|
582
|
+
try:
|
|
583
|
+
# Read test
|
|
584
|
+
test_file = mount_path / ".health_check_test"
|
|
585
|
+
with open(test_file, 'w') as f:
|
|
586
|
+
f.write("health_check")
|
|
587
|
+
|
|
588
|
+
start = time.time()
|
|
589
|
+
with open(test_file, 'r') as f:
|
|
590
|
+
content = f.read()
|
|
591
|
+
read_time = time.time() - start
|
|
592
|
+
|
|
593
|
+
# Cleanup
|
|
594
|
+
test_file.unlink()
|
|
595
|
+
|
|
596
|
+
# Calculate speeds (simplified)
|
|
597
|
+
if read_time > 0:
|
|
598
|
+
health.read_speed = 0.001 / read_time # 1KB / time
|
|
599
|
+
|
|
600
|
+
except Exception as e:
|
|
601
|
+
health.status = MountStatus.ERROR
|
|
602
|
+
health.errors.append(f"I/O test failed: {e}")
|
|
603
|
+
|
|
604
|
+
# Check for stale mount (no recent access)
|
|
605
|
+
if health.last_access and (datetime.now() - health.last_access).seconds > 300:
|
|
606
|
+
health.warnings.append("Mount appears stale - no recent access")
|
|
607
|
+
health.status = MountStatus.STALE
|
|
608
|
+
|
|
609
|
+
return health
|
|
610
|
+
|
|
611
|
+
def _create_passwd_file(self, config: S3MountConfig) -> Path:
|
|
612
|
+
"""
|
|
613
|
+
Create s3fs password file
|
|
614
|
+
|
|
615
|
+
Args:
|
|
616
|
+
config: S3MountConfig object
|
|
617
|
+
|
|
618
|
+
Returns:
|
|
619
|
+
Path to password file
|
|
620
|
+
"""
|
|
621
|
+
passwd_dir = self.config_dir / "credentials"
|
|
622
|
+
passwd_dir.mkdir(exist_ok=True)
|
|
623
|
+
|
|
624
|
+
passwd_file = passwd_dir / f"{config.profile}_{config.bucket_name}.passwd"
|
|
625
|
+
|
|
626
|
+
# Write credentials
|
|
627
|
+
with open(passwd_file, 'w') as f:
|
|
628
|
+
if config.session_token:
|
|
629
|
+
f.write(f"{config.access_key}:{config.secret_key}:{config.session_token}\n")
|
|
630
|
+
else:
|
|
631
|
+
f.write(f"{config.access_key}:{config.secret_key}\n")
|
|
632
|
+
|
|
633
|
+
# Secure permissions
|
|
634
|
+
os.chmod(passwd_file, 0o600)
|
|
635
|
+
|
|
636
|
+
return passwd_file
|
|
637
|
+
|
|
638
|
+
def _build_s3fs_command(self, config: S3MountConfig) -> List[str]:
|
|
639
|
+
"""
|
|
640
|
+
Build s3fs command with all options
|
|
641
|
+
|
|
642
|
+
Args:
|
|
643
|
+
config: S3MountConfig object
|
|
644
|
+
|
|
645
|
+
Returns:
|
|
646
|
+
List of command arguments
|
|
647
|
+
"""
|
|
648
|
+
# Create password file
|
|
649
|
+
passwd_file = self._create_passwd_file(config)
|
|
650
|
+
|
|
651
|
+
# Base command
|
|
652
|
+
cmd = [
|
|
653
|
+
"s3fs",
|
|
654
|
+
config.bucket_name,
|
|
655
|
+
str(config.mount_point),
|
|
656
|
+
"-o", f"passwd_file={passwd_file}",
|
|
657
|
+
"-o", f"url={config.endpoint_url}",
|
|
658
|
+
]
|
|
659
|
+
|
|
660
|
+
# Add common options
|
|
661
|
+
options = [
|
|
662
|
+
("retries", config.retries),
|
|
663
|
+
("connect_timeout", config.connect_timeout),
|
|
664
|
+
("readwrite_timeout", config.readwrite_timeout),
|
|
665
|
+
("parallel_count", config.parallel_count),
|
|
666
|
+
("multipart_size", config.multipart_size),
|
|
667
|
+
("max_stat_cache_size", config.max_stat_cache_size),
|
|
668
|
+
("stat_cache_expire", config.stat_cache_expire),
|
|
669
|
+
("ensure_diskfree", config.ensure_diskfree),
|
|
670
|
+
("umask", config.umask),
|
|
671
|
+
]
|
|
672
|
+
|
|
673
|
+
for opt, value in options:
|
|
674
|
+
if value is not None:
|
|
675
|
+
cmd.extend(["-o", f"{opt}={value}"])
|
|
676
|
+
|
|
677
|
+
# Conditional options
|
|
678
|
+
if config.use_cache:
|
|
679
|
+
cmd.extend(["-o", f"use_cache={config.cache_location}"])
|
|
680
|
+
|
|
681
|
+
if config.allow_other:
|
|
682
|
+
cmd.append("-o")
|
|
683
|
+
cmd.append("allow_other")
|
|
684
|
+
|
|
685
|
+
if config.allow_root:
|
|
686
|
+
cmd.append("-o")
|
|
687
|
+
cmd.append("allow_root")
|
|
688
|
+
|
|
689
|
+
if config.uid is not None:
|
|
690
|
+
cmd.extend(["-o", f"uid={config.uid}"])
|
|
691
|
+
|
|
692
|
+
if config.gid is not None:
|
|
693
|
+
cmd.extend(["-o", f"gid={config.gid}"])
|
|
694
|
+
|
|
695
|
+
if config.use_path_request_style:
|
|
696
|
+
cmd.append("-o")
|
|
697
|
+
cmd.append("use_path_request_style")
|
|
698
|
+
|
|
699
|
+
if not config.enable_noobj_cache:
|
|
700
|
+
cmd.append("-o")
|
|
701
|
+
cmd.append("enable_noobj_cache")
|
|
702
|
+
|
|
703
|
+
if config.sse:
|
|
704
|
+
cmd.append("-o")
|
|
705
|
+
cmd.append("sse")
|
|
706
|
+
if config.sse_kms_key_id:
|
|
707
|
+
cmd.extend(["-o", f"sse_kms_key_id={config.sse_kms_key_id}"])
|
|
708
|
+
|
|
709
|
+
# Debug logging
|
|
710
|
+
cmd.append("-o")
|
|
711
|
+
cmd.append("dbglevel=info")
|
|
712
|
+
cmd.append("-f") # Run in foreground
|
|
713
|
+
|
|
714
|
+
return cmd
|
|
715
|
+
|
|
716
|
+
def mount(self, config: S3MountConfig, force: bool = False) -> bool:
|
|
717
|
+
"""
|
|
718
|
+
Mount S3 bucket with comprehensive error handling
|
|
719
|
+
|
|
720
|
+
Args:
|
|
721
|
+
config: S3MountConfig object
|
|
722
|
+
force: Force remount if already mounted
|
|
723
|
+
|
|
724
|
+
Returns:
|
|
725
|
+
True if successful, False otherwise
|
|
726
|
+
"""
|
|
727
|
+
logger.info(f"Mounting bucket '{config.bucket_name}' to '{config.mount_point}'")
|
|
728
|
+
|
|
729
|
+
# Check if already mounted
|
|
730
|
+
is_mounted, mount_info = self.is_mounted(config.mount_point)
|
|
731
|
+
|
|
732
|
+
if is_mounted:
|
|
733
|
+
if not force:
|
|
734
|
+
logger.warning(f"Mount point '{config.mount_point}' is already mounted")
|
|
735
|
+
return True
|
|
736
|
+
|
|
737
|
+
logger.info(f"Force unmounting existing mount at '{config.mount_point}'")
|
|
738
|
+
if not self.unmount(config.mount_point):
|
|
739
|
+
logger.error("Failed to unmount existing mount")
|
|
740
|
+
return False
|
|
741
|
+
|
|
742
|
+
# Create mount point
|
|
743
|
+
try:
|
|
744
|
+
config.mount_point.mkdir(parents=True, exist_ok=True)
|
|
745
|
+
logger.debug(f"Created mount point: {config.mount_point}")
|
|
746
|
+
except Exception as e:
|
|
747
|
+
logger.error(f"Failed to create mount point: {e}")
|
|
748
|
+
return False
|
|
749
|
+
|
|
750
|
+
# Create cache directory if using cache
|
|
751
|
+
if config.use_cache:
|
|
752
|
+
try:
|
|
753
|
+
config.cache_location.mkdir(parents=True, exist_ok=True)
|
|
754
|
+
except Exception as e:
|
|
755
|
+
logger.warning(f"Failed to create cache directory: {e}")
|
|
756
|
+
|
|
757
|
+
# Check disk space
|
|
758
|
+
try:
|
|
759
|
+
stat = os.statvfs(config.mount_point)
|
|
760
|
+
free_space = stat.f_bavail * stat.f_frsize / (1024 ** 3) # GB
|
|
761
|
+
|
|
762
|
+
if free_space < (config.ensure_diskfree / 1024):
|
|
763
|
+
logger.warning(
|
|
764
|
+
f"Low disk space: {free_space:.1f}GB free, "
|
|
765
|
+
f"recommended: {config.ensure_diskfree/1024:.1f}GB"
|
|
766
|
+
)
|
|
767
|
+
except Exception as e:
|
|
768
|
+
logger.warning(f"Could not check disk space: {e}")
|
|
769
|
+
|
|
770
|
+
# Build and execute mount command
|
|
771
|
+
cmd = self._build_s3fs_command(config)
|
|
772
|
+
logger.debug(f"Executing command: {' '.join(cmd)}")
|
|
773
|
+
|
|
774
|
+
try:
|
|
775
|
+
# Start s3fs as a subprocess
|
|
776
|
+
process = subprocess.Popen(
|
|
777
|
+
cmd,
|
|
778
|
+
stdout=subprocess.PIPE,
|
|
779
|
+
stderr=subprocess.PIPE,
|
|
780
|
+
text=True,
|
|
781
|
+
bufsize=1,
|
|
782
|
+
universal_newlines=True
|
|
783
|
+
)
|
|
784
|
+
|
|
785
|
+
# Wait for mount to complete (with timeout)
|
|
786
|
+
timeout = 30
|
|
787
|
+
start_time = time.time()
|
|
788
|
+
|
|
789
|
+
while True:
|
|
790
|
+
# Check if process is still running
|
|
791
|
+
if process.poll() is not None:
|
|
792
|
+
stderr_output = process.stderr.read()
|
|
793
|
+
if process.returncode != 0:
|
|
794
|
+
logger.error(f"s3fs failed with code {process.returncode}: {stderr_output}")
|
|
795
|
+
return False
|
|
796
|
+
break
|
|
797
|
+
|
|
798
|
+
# Check if mount point is now mounted
|
|
799
|
+
is_mounted, _ = self.is_mounted(config.mount_point)
|
|
800
|
+
if is_mounted:
|
|
801
|
+
logger.info(f"Successfully mounted {config.bucket_name}")
|
|
802
|
+
break
|
|
803
|
+
|
|
804
|
+
# Check timeout
|
|
805
|
+
if time.time() - start_time > timeout:
|
|
806
|
+
logger.error(f"Mount timeout after {timeout} seconds")
|
|
807
|
+
process.terminate()
|
|
808
|
+
return False
|
|
809
|
+
|
|
810
|
+
time.sleep(0.5)
|
|
811
|
+
|
|
812
|
+
# Track active mount
|
|
813
|
+
self.active_mounts[config.mount_point] = config
|
|
814
|
+
|
|
815
|
+
# Perform health check
|
|
816
|
+
health = self.check_mount_health(config.mount_point)
|
|
817
|
+
self.mount_health[config.mount_point] = health
|
|
818
|
+
|
|
819
|
+
logger.info(f"Mount health: {health.status.value}")
|
|
820
|
+
|
|
821
|
+
return True
|
|
822
|
+
|
|
823
|
+
except Exception as e:
|
|
824
|
+
logger.error(f"Failed to mount: {e}")
|
|
825
|
+
return False
|
|
826
|
+
|
|
827
|
+
def unmount(self, mount_point: Union[str, Path], force: bool = False) -> bool:
|
|
828
|
+
"""
|
|
829
|
+
Unmount S3 bucket
|
|
830
|
+
|
|
831
|
+
Args:
|
|
832
|
+
mount_point: Mount point to unmount
|
|
833
|
+
force: Force unmount if busy
|
|
834
|
+
|
|
835
|
+
Returns:
|
|
836
|
+
True if successful, False otherwise
|
|
837
|
+
"""
|
|
838
|
+
mount_path = Path(mount_point) if isinstance(mount_point, str) else mount_point
|
|
839
|
+
|
|
840
|
+
logger.info(f"Unmounting {mount_path}")
|
|
841
|
+
|
|
842
|
+
# Check if mounted
|
|
843
|
+
is_mounted, _ = self.is_mounted(mount_path)
|
|
844
|
+
if not is_mounted:
|
|
845
|
+
logger.info(f"{mount_path} is not mounted")
|
|
846
|
+
return True
|
|
847
|
+
|
|
848
|
+
# Try fusermount first
|
|
849
|
+
try:
|
|
850
|
+
if force:
|
|
851
|
+
cmd = ["fusermount", "-u", "-z", str(mount_path)]
|
|
852
|
+
else:
|
|
853
|
+
cmd = ["fusermount", "-u", str(mount_path)]
|
|
854
|
+
|
|
855
|
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)
|
|
856
|
+
|
|
857
|
+
if result.returncode == 0:
|
|
858
|
+
logger.info(f"Successfully unmounted {mount_path}")
|
|
859
|
+
# Remove from active mounts
|
|
860
|
+
self.active_mounts.pop(mount_path, None)
|
|
861
|
+
self.mount_health.pop(mount_path, None)
|
|
862
|
+
return True
|
|
863
|
+
else:
|
|
864
|
+
logger.warning(f"fusermount failed: {result.stderr}")
|
|
865
|
+
except Exception as e:
|
|
866
|
+
logger.warning(f"fusermount error: {e}")
|
|
867
|
+
|
|
868
|
+
# Try umount if fusermount failed
|
|
869
|
+
try:
|
|
870
|
+
if force:
|
|
871
|
+
cmd = ["umount", "-f", "-l", str(mount_path)]
|
|
872
|
+
else:
|
|
873
|
+
cmd = ["umount", str(mount_path)]
|
|
874
|
+
|
|
875
|
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)
|
|
876
|
+
|
|
877
|
+
if result.returncode == 0:
|
|
878
|
+
logger.info(f"Successfully unmounted with umount: {mount_path}")
|
|
879
|
+
self.active_mounts.pop(mount_path, None)
|
|
880
|
+
self.mount_health.pop(mount_path, None)
|
|
881
|
+
return True
|
|
882
|
+
else:
|
|
883
|
+
logger.error(f"umount failed: {result.stderr}")
|
|
884
|
+
return False
|
|
885
|
+
except Exception as e:
|
|
886
|
+
logger.error(f"umount error: {e}")
|
|
887
|
+
return False
|
|
888
|
+
|
|
889
|
+
def unmount_all(self):
|
|
890
|
+
"""Unmount all active mounts"""
|
|
891
|
+
logger.info("Unmounting all active mounts")
|
|
892
|
+
for mount_point in list(self.active_mounts.keys()):
|
|
893
|
+
self.unmount(mount_point, force=True)
|
|
894
|
+
|
|
895
|
+
def update_fstab(self, config: S3MountConfig, comment: str = "") -> bool:
|
|
896
|
+
"""
|
|
897
|
+
Add or update entry in /etc/fstab for persistent mount
|
|
898
|
+
|
|
899
|
+
Args:
|
|
900
|
+
config: S3MountConfig object
|
|
901
|
+
comment: Optional comment for fstab entry
|
|
902
|
+
|
|
903
|
+
Returns:
|
|
904
|
+
True if successful, False otherwise
|
|
905
|
+
"""
|
|
906
|
+
if os.geteuid() != 0:
|
|
907
|
+
logger.error("Root privileges required to update /etc/fstab")
|
|
908
|
+
return False
|
|
909
|
+
|
|
910
|
+
passwd_file = self._create_passwd_file(config)
|
|
911
|
+
|
|
912
|
+
# Build fstab options
|
|
913
|
+
options = [
|
|
914
|
+
"_netdev",
|
|
915
|
+
f"passwd_file={passwd_file}",
|
|
916
|
+
f"url={config.endpoint_url}",
|
|
917
|
+
f"retries={config.retries}",
|
|
918
|
+
f"connect_timeout={config.connect_timeout}",
|
|
919
|
+
f"readwrite_timeout={config.readwrite_timeout}",
|
|
920
|
+
f"parallel_count={config.parallel_count}",
|
|
921
|
+
f"umask={config.umask}",
|
|
922
|
+
]
|
|
923
|
+
|
|
924
|
+
if config.allow_other:
|
|
925
|
+
options.append("allow_other")
|
|
926
|
+
|
|
927
|
+
if config.use_path_request_style:
|
|
928
|
+
options.append("use_path_request_style")
|
|
929
|
+
|
|
930
|
+
if config.use_cache:
|
|
931
|
+
options.append(f"use_cache={config.cache_location}")
|
|
932
|
+
|
|
933
|
+
options_str = ",".join(options)
|
|
934
|
+
|
|
935
|
+
# Create fstab entry
|
|
936
|
+
fstab_entry = (
|
|
937
|
+
f"s3fs#{config.bucket_name} {config.mount_point} "
|
|
938
|
+
f"fuse {options_str} 0 0"
|
|
939
|
+
)
|
|
940
|
+
|
|
941
|
+
if comment:
|
|
942
|
+
fstab_entry = f"# {comment}\n{fstab_entry}"
|
|
943
|
+
|
|
944
|
+
try:
|
|
945
|
+
# Read existing fstab
|
|
946
|
+
with open("/etc/fstab", "r") as f:
|
|
947
|
+
lines = f.readlines()
|
|
948
|
+
|
|
949
|
+
# Check if entry already exists
|
|
950
|
+
entry_exists = False
|
|
951
|
+
new_lines = []
|
|
952
|
+
|
|
953
|
+
for line in lines:
|
|
954
|
+
if f"s3fs#{config.bucket_name}" in line:
|
|
955
|
+
# Replace existing entry
|
|
956
|
+
new_lines.append(fstab_entry + "\n")
|
|
957
|
+
entry_exists = True
|
|
958
|
+
logger.info(f"Updated existing fstab entry for {config.bucket_name}")
|
|
959
|
+
else:
|
|
960
|
+
new_lines.append(line)
|
|
961
|
+
|
|
962
|
+
# Add new entry if not exists
|
|
963
|
+
if not entry_exists:
|
|
964
|
+
new_lines.append(fstab_entry + "\n")
|
|
965
|
+
logger.info(f"Added new fstab entry for {config.bucket_name}")
|
|
966
|
+
|
|
967
|
+
# Write back fstab
|
|
968
|
+
with open("/etc/fstab", "w") as f:
|
|
969
|
+
f.writelines(new_lines)
|
|
970
|
+
|
|
971
|
+
return True
|
|
972
|
+
|
|
973
|
+
except Exception as e:
|
|
974
|
+
logger.error(f"Failed to update /etc/fstab: {e}")
|
|
975
|
+
return False
|
|
976
|
+
|
|
977
|
+
def remove_fstab_entry(self, bucket_name: str) -> bool:
|
|
978
|
+
"""
|
|
979
|
+
Remove entry from /etc/fstab
|
|
980
|
+
|
|
981
|
+
Args:
|
|
982
|
+
bucket_name: S3 bucket name
|
|
983
|
+
|
|
984
|
+
Returns:
|
|
985
|
+
True if successful, False otherwise
|
|
986
|
+
"""
|
|
987
|
+
if os.geteuid() != 0:
|
|
988
|
+
logger.error("Root privileges required to modify /etc/fstab")
|
|
989
|
+
return False
|
|
990
|
+
|
|
991
|
+
try:
|
|
992
|
+
with open("/etc/fstab", "r") as f:
|
|
993
|
+
lines = f.readlines()
|
|
994
|
+
|
|
995
|
+
# Filter out entries for this bucket
|
|
996
|
+
new_lines = [
|
|
997
|
+
line for line in lines
|
|
998
|
+
if f"s3fs#{bucket_name}" not in line
|
|
999
|
+
]
|
|
1000
|
+
|
|
1001
|
+
# Only write if something changed
|
|
1002
|
+
if len(new_lines) != len(lines):
|
|
1003
|
+
with open("/etc/fstab", "w") as f:
|
|
1004
|
+
f.writelines(new_lines)
|
|
1005
|
+
logger.info(f"Removed fstab entry for {bucket_name}")
|
|
1006
|
+
return True
|
|
1007
|
+
else:
|
|
1008
|
+
logger.info(f"No fstab entry found for {bucket_name}")
|
|
1009
|
+
return True
|
|
1010
|
+
|
|
1011
|
+
except Exception as e:
|
|
1012
|
+
logger.error(f"Failed to remove fstab entry: {e}")
|
|
1013
|
+
return False
|
|
1014
|
+
|
|
1015
|
+
def start_monitoring(self, interval: int = 60):
|
|
1016
|
+
"""
|
|
1017
|
+
Start health monitoring for all active mounts
|
|
1018
|
+
|
|
1019
|
+
Args:
|
|
1020
|
+
interval: Health check interval in seconds
|
|
1021
|
+
"""
|
|
1022
|
+
if self.monitor_running:
|
|
1023
|
+
logger.warning("Monitoring already running")
|
|
1024
|
+
return
|
|
1025
|
+
|
|
1026
|
+
self.monitor_running = True
|
|
1027
|
+
|
|
1028
|
+
def monitor_loop():
|
|
1029
|
+
while self.monitor_running:
|
|
1030
|
+
try:
|
|
1031
|
+
for mount_point, config in list(self.active_mounts.items()):
|
|
1032
|
+
health = self.check_mount_health(mount_point)
|
|
1033
|
+
self.mount_health[mount_point] = health
|
|
1034
|
+
|
|
1035
|
+
# Auto-remount on error
|
|
1036
|
+
if (health.status == MountStatus.ERROR and
|
|
1037
|
+
config.auto_remount):
|
|
1038
|
+
logger.warning(f"Auto-remounting {mount_point}")
|
|
1039
|
+
self.unmount(mount_point)
|
|
1040
|
+
time.sleep(2)
|
|
1041
|
+
self.mount(config)
|
|
1042
|
+
|
|
1043
|
+
# Log warnings
|
|
1044
|
+
if health.warnings:
|
|
1045
|
+
for warning in health.warnings:
|
|
1046
|
+
logger.warning(f"{mount_point}: {warning}")
|
|
1047
|
+
|
|
1048
|
+
time.sleep(interval)
|
|
1049
|
+
except Exception as e:
|
|
1050
|
+
logger.error(f"Monitoring error: {e}")
|
|
1051
|
+
time.sleep(interval)
|
|
1052
|
+
|
|
1053
|
+
self.monitor_thread = threading.Thread(target=monitor_loop, daemon=True)
|
|
1054
|
+
self.monitor_thread.start()
|
|
1055
|
+
logger.info(f"Started health monitoring with {interval}s interval")
|
|
1056
|
+
|
|
1057
|
+
def stop_monitoring(self):
|
|
1058
|
+
"""Stop health monitoring"""
|
|
1059
|
+
self.monitor_running = False
|
|
1060
|
+
if self.monitor_thread:
|
|
1061
|
+
self.monitor_thread.join(timeout=5)
|
|
1062
|
+
logger.info("Stopped health monitoring")
|
|
1063
|
+
|
|
1064
|
+
def get_status_report(self) -> Dict:
|
|
1065
|
+
"""
|
|
1066
|
+
Generate comprehensive status report
|
|
1067
|
+
|
|
1068
|
+
Returns:
|
|
1069
|
+
Dictionary with status information
|
|
1070
|
+
"""
|
|
1071
|
+
report = {
|
|
1072
|
+
"timestamp": datetime.now().isoformat(),
|
|
1073
|
+
"active_mounts": {},
|
|
1074
|
+
"saved_configs": list(self.saved_configs.keys()),
|
|
1075
|
+
"monitoring": self.monitor_running,
|
|
1076
|
+
}
|
|
1077
|
+
|
|
1078
|
+
for mount_point, config in self.active_mounts.items():
|
|
1079
|
+
health = self.mount_health.get(mount_point)
|
|
1080
|
+
|
|
1081
|
+
report["active_mounts"][str(mount_point)] = {
|
|
1082
|
+
"bucket": config.bucket_name,
|
|
1083
|
+
"profile": config.profile,
|
|
1084
|
+
"endpoint": config.endpoint_url,
|
|
1085
|
+
"health": {
|
|
1086
|
+
"status": health.status.value if health else "unknown",
|
|
1087
|
+
"errors": health.errors if health else [],
|
|
1088
|
+
"warnings": health.warnings if health else [],
|
|
1089
|
+
"read_speed": health.read_speed if health else None,
|
|
1090
|
+
} if health else None
|
|
1091
|
+
}
|
|
1092
|
+
|
|
1093
|
+
return report
|
|
1094
|
+
|
|
1095
|
+
def print_status(self):
|
|
1096
|
+
"""Print formatted status report to console"""
|
|
1097
|
+
report = self.get_status_report()
|
|
1098
|
+
|
|
1099
|
+
print("\n" + "=" * 80)
|
|
1100
|
+
print("S3FS MOUNT MANAGER STATUS")
|
|
1101
|
+
print("=" * 80)
|
|
1102
|
+
print(f"Timestamp: {report['timestamp']}")
|
|
1103
|
+
print(f"Monitoring: {'ACTIVE' if report['monitoring'] else 'INACTIVE'}")
|
|
1104
|
+
print(f"Saved Configurations: {', '.join(report['saved_configs'])}")
|
|
1105
|
+
|
|
1106
|
+
if report['active_mounts']:
|
|
1107
|
+
print("\nACTIVE MOUNTS:")
|
|
1108
|
+
print("-" * 80)
|
|
1109
|
+
for mount_point, info in report['active_mounts'].items():
|
|
1110
|
+
print(f"Mount Point: {mount_point}")
|
|
1111
|
+
print(f" Bucket: {info['bucket']}")
|
|
1112
|
+
print(f" Profile: {info['profile']}")
|
|
1113
|
+
print(f" Endpoint: {info['endpoint']}")
|
|
1114
|
+
if info['health']:
|
|
1115
|
+
print(f" Status: {info['health']['status'].upper()}")
|
|
1116
|
+
if info['health']['read_speed']:
|
|
1117
|
+
print(f" Read Speed: {info['health']['read_speed']:.2f} MB/s")
|
|
1118
|
+
if info['health']['errors']:
|
|
1119
|
+
print(f" Errors: {', '.join(info['health']['errors'])}")
|
|
1120
|
+
if info['health']['warnings']:
|
|
1121
|
+
print(f" Warnings: {', '.join(info['health']['warnings'])}")
|
|
1122
|
+
print()
|
|
1123
|
+
else:
|
|
1124
|
+
print("\nNo active mounts")
|
|
1125
|
+
|
|
1126
|
+
print("=" * 80)
|
|
1127
|
+
|
|
1128
|
+
|
|
1129
|
+
def main():
|
|
1130
|
+
"""Command-line interface for S3FS Mount Manager"""
|
|
1131
|
+
parser = argparse.ArgumentParser(
|
|
1132
|
+
description="Ultimate S3FS Mount Manager",
|
|
1133
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
1134
|
+
epilog="""
|
|
1135
|
+
Examples:
|
|
1136
|
+
# Mount a bucket with default settings
|
|
1137
|
+
%(prog)s mount my-bucket
|
|
1138
|
+
|
|
1139
|
+
# Mount with performance preset
|
|
1140
|
+
%(prog)s mount my-bucket --preset performance --mount-dir /mnt/s3
|
|
1141
|
+
|
|
1142
|
+
# Mount using specific AWS profile
|
|
1143
|
+
%(prog)s mount my-bucket --profile production --save-as prod-bucket
|
|
1144
|
+
|
|
1145
|
+
# Unmount a directory
|
|
1146
|
+
%(prog)s unmount /mnt/s3
|
|
1147
|
+
|
|
1148
|
+
# Show status
|
|
1149
|
+
%(prog)s status
|
|
1150
|
+
|
|
1151
|
+
# Add to fstab for persistence
|
|
1152
|
+
%(prog)s fstab my-bucket --comment "Production data"
|
|
1153
|
+
|
|
1154
|
+
# Load and mount saved configuration
|
|
1155
|
+
%(prog)s load prod-bucket
|
|
1156
|
+
"""
|
|
1157
|
+
)
|
|
1158
|
+
|
|
1159
|
+
subparsers = parser.add_subparsers(dest="command", help="Command to execute")
|
|
1160
|
+
|
|
1161
|
+
# Mount command
|
|
1162
|
+
mount_parser = subparsers.add_parser("mount", help="Mount S3 bucket")
|
|
1163
|
+
mount_parser.add_argument("bucket", help="S3 bucket name")
|
|
1164
|
+
mount_parser.add_argument("--profile", default="default", help="AWS profile name")
|
|
1165
|
+
mount_parser.add_argument("--mount-dir", help="Mount directory (default: ~/bucket)")
|
|
1166
|
+
mount_parser.add_argument("--preset", default="standard",
|
|
1167
|
+
choices=["standard", "performance", "reliable", "low_memory", "high_latency"],
|
|
1168
|
+
help="Configuration preset")
|
|
1169
|
+
mount_parser.add_argument("--save-as", help="Save configuration with this name")
|
|
1170
|
+
mount_parser.add_argument("--force", action="store_true", help="Force remount if already mounted")
|
|
1171
|
+
mount_parser.add_argument("--fstab", action="store_true", help="Add to /etc/fstab")
|
|
1172
|
+
mount_parser.add_argument("--comment", help="Comment for fstab entry")
|
|
1173
|
+
|
|
1174
|
+
# Unmount command
|
|
1175
|
+
unmount_parser = subparsers.add_parser("unmount", help="Unmount directory")
|
|
1176
|
+
unmount_parser.add_argument("mount_point", help="Mount point to unmount")
|
|
1177
|
+
unmount_parser.add_argument("--force", action="store_true", help="Force unmount")
|
|
1178
|
+
|
|
1179
|
+
# Status command
|
|
1180
|
+
status_parser = subparsers.add_parser("status", help="Show mount status")
|
|
1181
|
+
|
|
1182
|
+
# Fstab command
|
|
1183
|
+
fstab_parser = subparsers.add_parser("fstab", help="Manage /etc/fstab entries")
|
|
1184
|
+
fstab_parser.add_argument("action", choices=["add", "remove"], help="Add or remove entry")
|
|
1185
|
+
fstab_parser.add_argument("bucket", help="S3 bucket name")
|
|
1186
|
+
fstab_parser.add_argument("--profile", default="default", help="AWS profile name")
|
|
1187
|
+
fstab_parser.add_argument("--mount-dir", help="Mount directory")
|
|
1188
|
+
fstab_parser.add_argument("--comment", help="Comment for fstab entry")
|
|
1189
|
+
|
|
1190
|
+
# Load command
|
|
1191
|
+
load_parser = subparsers.add_parser("load", help="Load and mount saved configuration")
|
|
1192
|
+
load_parser.add_argument("config_name", help="Saved configuration name")
|
|
1193
|
+
load_parser.add_argument("--force", action="store_true", help="Force remount")
|
|
1194
|
+
|
|
1195
|
+
# Save command
|
|
1196
|
+
save_parser = subparsers.add_parser("save", help="Save current configuration")
|
|
1197
|
+
save_parser.add_argument("name", help="Configuration name")
|
|
1198
|
+
save_parser.add_argument("--bucket", required=True, help="S3 bucket name")
|
|
1199
|
+
save_parser.add_argument("--profile", default="default", help="AWS profile name")
|
|
1200
|
+
save_parser.add_argument("--mount-dir", help="Mount directory")
|
|
1201
|
+
save_parser.add_argument("--preset", default="standard", help="Configuration preset")
|
|
1202
|
+
|
|
1203
|
+
# List command
|
|
1204
|
+
list_parser = subparsers.add_parser("list", help="List saved configurations")
|
|
1205
|
+
|
|
1206
|
+
# Monitor command
|
|
1207
|
+
monitor_parser = subparsers.add_parser("monitor", help="Monitor mounts")
|
|
1208
|
+
monitor_parser.add_argument("action", choices=["start", "stop", "status"],
|
|
1209
|
+
help="Monitor action")
|
|
1210
|
+
monitor_parser.add_argument("--interval", type=int, default=60,
|
|
1211
|
+
help="Health check interval in seconds")
|
|
1212
|
+
|
|
1213
|
+
args = parser.parse_args()
|
|
1214
|
+
|
|
1215
|
+
if not args.command:
|
|
1216
|
+
parser.print_help()
|
|
1217
|
+
sys.exit(1)
|
|
1218
|
+
|
|
1219
|
+
# Initialize manager
|
|
1220
|
+
manager = S3FSMountManager()
|
|
1221
|
+
|
|
1222
|
+
try:
|
|
1223
|
+
if args.command == "mount":
|
|
1224
|
+
# Create configuration
|
|
1225
|
+
config = manager.create_config(
|
|
1226
|
+
bucket_name=args.bucket,
|
|
1227
|
+
profile=args.profile,
|
|
1228
|
+
mount_point=args.mount_dir,
|
|
1229
|
+
preset=args.preset
|
|
1230
|
+
)
|
|
1231
|
+
|
|
1232
|
+
# Save if requested
|
|
1233
|
+
if args.save_as:
|
|
1234
|
+
manager.save_config(args.save_as, config)
|
|
1235
|
+
|
|
1236
|
+
# Mount
|
|
1237
|
+
success = manager.mount(config, force=args.force)
|
|
1238
|
+
|
|
1239
|
+
if success and args.fstab:
|
|
1240
|
+
manager.update_fstab(config, comment=args.comment)
|
|
1241
|
+
|
|
1242
|
+
sys.exit(0 if success else 1)
|
|
1243
|
+
|
|
1244
|
+
elif args.command == "unmount":
|
|
1245
|
+
success = manager.unmount(args.mount_point, force=args.force)
|
|
1246
|
+
sys.exit(0 if success else 1)
|
|
1247
|
+
|
|
1248
|
+
elif args.command == "status":
|
|
1249
|
+
manager.print_status()
|
|
1250
|
+
|
|
1251
|
+
elif args.command == "fstab":
|
|
1252
|
+
if args.action == "add":
|
|
1253
|
+
config = manager.create_config(
|
|
1254
|
+
bucket_name=args.bucket,
|
|
1255
|
+
profile=args.profile,
|
|
1256
|
+
mount_point=args.mount_dir
|
|
1257
|
+
)
|
|
1258
|
+
success = manager.update_fstab(config, comment=args.comment)
|
|
1259
|
+
sys.exit(0 if success else 1)
|
|
1260
|
+
else: # remove
|
|
1261
|
+
success = manager.remove_fstab_entry(args.bucket)
|
|
1262
|
+
sys.exit(0 if success else 1)
|
|
1263
|
+
|
|
1264
|
+
elif args.command == "load":
|
|
1265
|
+
config = manager.load_config(args.config_name)
|
|
1266
|
+
success = manager.mount(config, force=args.force)
|
|
1267
|
+
sys.exit(0 if success else 1)
|
|
1268
|
+
|
|
1269
|
+
elif args.command == "save":
|
|
1270
|
+
config = manager.create_config(
|
|
1271
|
+
bucket_name=args.bucket,
|
|
1272
|
+
profile=args.profile,
|
|
1273
|
+
mount_point=args.mount_dir,
|
|
1274
|
+
preset=args.preset
|
|
1275
|
+
)
|
|
1276
|
+
manager.save_config(args.name, config)
|
|
1277
|
+
print(f"Saved configuration: {args.name}")
|
|
1278
|
+
|
|
1279
|
+
elif args.command == "list":
|
|
1280
|
+
configs = manager.saved_configs
|
|
1281
|
+
if configs:
|
|
1282
|
+
print("Saved Configurations:")
|
|
1283
|
+
for name in configs:
|
|
1284
|
+
print(f" {name}: {configs[name].bucket_name}")
|
|
1285
|
+
else:
|
|
1286
|
+
print("No saved configurations")
|
|
1287
|
+
|
|
1288
|
+
elif args.command == "monitor":
|
|
1289
|
+
if args.action == "start":
|
|
1290
|
+
manager.start_monitoring(args.interval)
|
|
1291
|
+
print(f"Monitoring started with {args.interval}s interval")
|
|
1292
|
+
elif args.action == "stop":
|
|
1293
|
+
manager.stop_monitoring()
|
|
1294
|
+
print("Monitoring stopped")
|
|
1295
|
+
else: # status
|
|
1296
|
+
if manager.monitor_running:
|
|
1297
|
+
print("Monitoring is ACTIVE")
|
|
1298
|
+
else:
|
|
1299
|
+
print("Monitoring is INACTIVE")
|
|
1300
|
+
|
|
1301
|
+
except Exception as e:
|
|
1302
|
+
logger.error(f"Command failed: {e}")
|
|
1303
|
+
sys.exit(1)
|
|
1304
|
+
|
|
1305
|
+
|
|
1306
|
+
if __name__ == "__main__":
|
|
1307
|
+
main()
|