py2ls 0.1.10.12__py3-none-any.whl → 0.2.7.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of py2ls might be problematic. Click here for more details.
- py2ls/.DS_Store +0 -0
- py2ls/.git/.DS_Store +0 -0
- py2ls/.git/index +0 -0
- py2ls/.git/logs/refs/remotes/origin/HEAD +1 -0
- py2ls/.git/objects/.DS_Store +0 -0
- py2ls/.git/refs/.DS_Store +0 -0
- py2ls/ImageLoader.py +621 -0
- py2ls/__init__.py +7 -5
- py2ls/apptainer2ls.py +3940 -0
- py2ls/batman.py +164 -42
- py2ls/bio.py +2595 -0
- py2ls/cell_image_clf.py +1632 -0
- py2ls/container2ls.py +4635 -0
- py2ls/corr.py +475 -0
- py2ls/data/.DS_Store +0 -0
- py2ls/data/email/email_html_template.html +88 -0
- py2ls/data/hyper_param_autogluon_zeroshot2024.json +2383 -0
- py2ls/data/hyper_param_tabrepo_2024.py +1753 -0
- py2ls/data/mygenes_fields_241022.txt +355 -0
- py2ls/data/re_common_pattern.json +173 -0
- py2ls/data/sns_info.json +74 -0
- py2ls/data/styles/.DS_Store +0 -0
- py2ls/data/styles/example/.DS_Store +0 -0
- py2ls/data/styles/stylelib/.DS_Store +0 -0
- py2ls/data/styles/stylelib/grid.mplstyle +15 -0
- py2ls/data/styles/stylelib/high-contrast.mplstyle +6 -0
- py2ls/data/styles/stylelib/high-vis.mplstyle +4 -0
- py2ls/data/styles/stylelib/ieee.mplstyle +15 -0
- py2ls/data/styles/stylelib/light.mplstyl +6 -0
- py2ls/data/styles/stylelib/muted.mplstyle +6 -0
- py2ls/data/styles/stylelib/nature-reviews-latex.mplstyle +616 -0
- py2ls/data/styles/stylelib/nature-reviews.mplstyle +616 -0
- py2ls/data/styles/stylelib/nature.mplstyle +31 -0
- py2ls/data/styles/stylelib/no-latex.mplstyle +10 -0
- py2ls/data/styles/stylelib/notebook.mplstyle +36 -0
- py2ls/data/styles/stylelib/paper.mplstyle +290 -0
- py2ls/data/styles/stylelib/paper2.mplstyle +305 -0
- py2ls/data/styles/stylelib/retro.mplstyle +4 -0
- py2ls/data/styles/stylelib/sans.mplstyle +10 -0
- py2ls/data/styles/stylelib/scatter.mplstyle +7 -0
- py2ls/data/styles/stylelib/science.mplstyle +48 -0
- py2ls/data/styles/stylelib/std-colors.mplstyle +4 -0
- py2ls/data/styles/stylelib/vibrant.mplstyle +6 -0
- py2ls/data/tiles.csv +146 -0
- py2ls/data/usages_pd.json +1417 -0
- py2ls/data/usages_sns.json +31 -0
- py2ls/docker2ls.py +5446 -0
- py2ls/ec2ls.py +61 -0
- py2ls/fetch_update.py +145 -0
- py2ls/ich2ls.py +1955 -296
- py2ls/im2.py +8242 -0
- py2ls/image_ml2ls.py +2100 -0
- py2ls/ips.py +33909 -3418
- py2ls/ml2ls.py +7700 -0
- py2ls/mol.py +289 -0
- py2ls/mount2ls.py +1307 -0
- py2ls/netfinder.py +873 -351
- py2ls/nl2ls.py +283 -0
- py2ls/ocr.py +1581 -458
- py2ls/plot.py +10394 -314
- py2ls/rna2ls.py +311 -0
- py2ls/ssh2ls.md +456 -0
- py2ls/ssh2ls.py +5933 -0
- py2ls/ssh2ls_v01.py +2204 -0
- py2ls/stats.py +66 -172
- py2ls/temp20251124.py +509 -0
- py2ls/translator.py +2 -0
- py2ls/utils/decorators.py +3564 -0
- py2ls/utils_bio.py +3453 -0
- {py2ls-0.1.10.12.dist-info → py2ls-0.2.7.10.dist-info}/METADATA +113 -224
- {py2ls-0.1.10.12.dist-info → py2ls-0.2.7.10.dist-info}/RECORD +72 -16
- {py2ls-0.1.10.12.dist-info → py2ls-0.2.7.10.dist-info}/WHEEL +0 -0
py2ls/apptainer2ls.py
ADDED
|
@@ -0,0 +1,3940 @@
|
|
|
1
|
+
"""
|
|
2
|
+
apptainer2ls.py - Ultimate Singularity/Apptainer Development Environment Tool
|
|
3
|
+
|
|
4
|
+
A comprehensive tool for creating, managing, and using Singularity/Apptainer
|
|
5
|
+
containers for development and HPC workflows.
|
|
6
|
+
|
|
7
|
+
Features:
|
|
8
|
+
- Create persistent development environments
|
|
9
|
+
- GPU support (CUDA, ROCm)
|
|
10
|
+
- HPC cluster compatibility
|
|
11
|
+
- SSH forwarding for remote execution
|
|
12
|
+
- X11 forwarding for GUI applications
|
|
13
|
+
- Batch job templates
|
|
14
|
+
- Backup and migration tools
|
|
15
|
+
- Performance monitoring
|
|
16
|
+
- Multi-architecture support
|
|
17
|
+
- Container security scanning
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import os
|
|
21
|
+
import sys
|
|
22
|
+
import subprocess
|
|
23
|
+
import shutil
|
|
24
|
+
import json
|
|
25
|
+
import time
|
|
26
|
+
import tempfile
|
|
27
|
+
import getpass
|
|
28
|
+
import platform
|
|
29
|
+
import argparse
|
|
30
|
+
import textwrap
|
|
31
|
+
import hashlib
|
|
32
|
+
import re
|
|
33
|
+
import stat
|
|
34
|
+
from pathlib import Path
|
|
35
|
+
from datetime import datetime
|
|
36
|
+
from typing import List, Dict, Optional, Union, Tuple, Any
|
|
37
|
+
import warnings
|
|
38
|
+
import socket
|
|
39
|
+
import pwd
|
|
40
|
+
import grp
|
|
41
|
+
import shlex
|
|
42
|
+
import secrets
|
|
43
|
+
import string
|
|
44
|
+
import tarfile
|
|
45
|
+
import gzip
|
|
46
|
+
import zipfile
|
|
47
|
+
|
|
48
|
+
# Try to import optional dependencies
|
|
49
|
+
try:
|
|
50
|
+
import yaml
|
|
51
|
+
YAML_AVAILABLE = True
|
|
52
|
+
except ImportError:
|
|
53
|
+
YAML_AVAILABLE = False
|
|
54
|
+
|
|
55
|
+
try:
|
|
56
|
+
import paramiko
|
|
57
|
+
PARAMIKO_AVAILABLE = True
|
|
58
|
+
except ImportError:
|
|
59
|
+
PARAMIKO_AVAILABLE = False
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
import psutil
|
|
63
|
+
PSUTIL_AVAILABLE = True
|
|
64
|
+
except ImportError:
|
|
65
|
+
PSUTIL_AVAILABLE = False
|
|
66
|
+
|
|
67
|
+
# Version
|
|
68
|
+
__version__ = "1.0.0"
|
|
69
|
+
__author__ = "Singularity/Apptainer Development Tools"
|
|
70
|
+
|
|
71
|
+
def check_apptainer_availability() -> Dict[str, Union[bool, str]]:
|
|
72
|
+
"""
|
|
73
|
+
Check if Singularity/Apptainer is available on the system.
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
Dictionary with availability status and information
|
|
77
|
+
"""
|
|
78
|
+
# Try Apptainer first (newer name)
|
|
79
|
+
for cmd in ["apptainer", "singularity"]:
|
|
80
|
+
try:
|
|
81
|
+
result = subprocess.run(
|
|
82
|
+
[cmd, "--version"],
|
|
83
|
+
capture_output=True,
|
|
84
|
+
text=True,
|
|
85
|
+
timeout=5
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
if result.returncode == 0:
|
|
89
|
+
# Parse version
|
|
90
|
+
version_output = result.stdout.strip()
|
|
91
|
+
version_match = re.search(r'(\d+\.\d+\.\d+)', version_output)
|
|
92
|
+
version = version_match.group(1) if version_match else "unknown"
|
|
93
|
+
|
|
94
|
+
return {
|
|
95
|
+
"available": True,
|
|
96
|
+
"command": cmd,
|
|
97
|
+
"version": version,
|
|
98
|
+
"full_version": version_output,
|
|
99
|
+
"message": f"{cmd.capitalize()} is available"
|
|
100
|
+
}
|
|
101
|
+
except (FileNotFoundError, subprocess.TimeoutExpired):
|
|
102
|
+
continue
|
|
103
|
+
|
|
104
|
+
return {
|
|
105
|
+
"available": False,
|
|
106
|
+
"command": None,
|
|
107
|
+
"version": None,
|
|
108
|
+
"message": "Neither Apptainer nor Singularity found. Please install one of them."
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
def install_apptainer_tool() -> bool:
|
|
112
|
+
"""
|
|
113
|
+
Install Singularity/Apptainer based on the operating system.
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
True if installation was attempted/succeeded, False otherwise
|
|
117
|
+
"""
|
|
118
|
+
system = platform.system().lower()
|
|
119
|
+
distro = None
|
|
120
|
+
|
|
121
|
+
print("Installing Singularity/Apptainer...")
|
|
122
|
+
print(f"Detected system: {system}")
|
|
123
|
+
|
|
124
|
+
if system == "linux":
|
|
125
|
+
# Try to detect distribution
|
|
126
|
+
try:
|
|
127
|
+
if os.path.exists("/etc/os-release"):
|
|
128
|
+
with open("/etc/os-release", "r") as f:
|
|
129
|
+
content = f.read().lower()
|
|
130
|
+
|
|
131
|
+
if "ubuntu" in content or "debian" in content:
|
|
132
|
+
distro = "debian"
|
|
133
|
+
elif "centos" in content or "rhel" in content or "rocky" in content:
|
|
134
|
+
distro = "rhel"
|
|
135
|
+
elif "fedora" in content:
|
|
136
|
+
distro = "fedora"
|
|
137
|
+
elif "arch" in content:
|
|
138
|
+
distro = "arch"
|
|
139
|
+
elif "opensuse" in content or "suse" in content:
|
|
140
|
+
distro = "suse"
|
|
141
|
+
except:
|
|
142
|
+
pass
|
|
143
|
+
|
|
144
|
+
print(f"Detected distribution: {distro or 'unknown'}")
|
|
145
|
+
|
|
146
|
+
# Installation instructions
|
|
147
|
+
if distro == "debian":
|
|
148
|
+
print("\nFor Ubuntu/Debian:")
|
|
149
|
+
print("1. Install dependencies:")
|
|
150
|
+
print(" sudo apt-get update")
|
|
151
|
+
print(" sudo apt-get install -y \\")
|
|
152
|
+
print(" build-essential \\")
|
|
153
|
+
print(" libssl-dev \\")
|
|
154
|
+
print(" uuid-dev \\")
|
|
155
|
+
print(" libgpgme-dev \\")
|
|
156
|
+
print(" squashfs-tools \\")
|
|
157
|
+
print(" libseccomp-dev \\")
|
|
158
|
+
print(" pkg-config")
|
|
159
|
+
print("\n2. Install Go (required):")
|
|
160
|
+
print(" wget https://go.dev/dl/go1.21.0.linux-amd64.tar.gz")
|
|
161
|
+
print(" sudo tar -C /usr/local -xzf go1.21.0.linux-amd64.tar.gz")
|
|
162
|
+
print(" echo 'export PATH=$PATH:/usr/local/go/bin' >> ~/.bashrc")
|
|
163
|
+
print(" source ~/.bashrc")
|
|
164
|
+
print("\n3. Install Apptainer (recommended):")
|
|
165
|
+
print(" wget https://github.com/apptainer/apptainer/releases/download/v1.2.0/apptainer_1.2.0_amd64.deb")
|
|
166
|
+
print(" sudo dpkg -i apptainer_1.2.0_amd64.deb")
|
|
167
|
+
print("\n4. OR Install Singularity:")
|
|
168
|
+
print(" See: https://docs.sylabs.io/guides/admin-guide/installation.html")
|
|
169
|
+
|
|
170
|
+
elif distro == "rhel":
|
|
171
|
+
print("\nFor RHEL/CentOS/Rocky:")
|
|
172
|
+
print("1. Enable EPEL repository:")
|
|
173
|
+
print(" sudo yum install -y epel-release")
|
|
174
|
+
print("\n2. Install dependencies:")
|
|
175
|
+
print(" sudo yum groupinstall -y 'Development Tools'")
|
|
176
|
+
print(" sudo yum install -y \\")
|
|
177
|
+
print(" openssl-devel \\")
|
|
178
|
+
print(" libuuid-devel \\")
|
|
179
|
+
print(" squashfs-tools \\")
|
|
180
|
+
print(" libseccomp-devel")
|
|
181
|
+
print("\n3. Install Go and Apptainer from source or use conda:")
|
|
182
|
+
print(" conda install -c conda-forge apptainer")
|
|
183
|
+
|
|
184
|
+
elif distro == "arch":
|
|
185
|
+
print("\nFor Arch Linux:")
|
|
186
|
+
print(" yay -S apptainer # or singularity from AUR")
|
|
187
|
+
|
|
188
|
+
elif distro == "fedora":
|
|
189
|
+
print("\nFor Fedora:")
|
|
190
|
+
print(" sudo dnf install -y apptainer")
|
|
191
|
+
|
|
192
|
+
else:
|
|
193
|
+
print("\nGeneral installation instructions:")
|
|
194
|
+
print("1. Check official documentation:")
|
|
195
|
+
print(" Apptainer: https://apptainer.org/docs/admin/main/installation.html")
|
|
196
|
+
print(" Singularity: https://docs.sylabs.io/guides/admin-guide/installation.html")
|
|
197
|
+
print("\n2. Using conda (recommended for users):")
|
|
198
|
+
print(" conda create -n apptainer -c conda-forge apptainer")
|
|
199
|
+
print(" conda activate apptainer")
|
|
200
|
+
|
|
201
|
+
print("\n3. Verify installation:")
|
|
202
|
+
print(" apptainer --version # or singularity --version")
|
|
203
|
+
|
|
204
|
+
elif system == "darwin": # macOS
|
|
205
|
+
print("\nFor macOS:")
|
|
206
|
+
print("Using conda (recommended):")
|
|
207
|
+
print(" conda install -c conda-forge apptainer")
|
|
208
|
+
print("\nOr using Homebrew:")
|
|
209
|
+
print(" brew install --cask docker # Required for VM")
|
|
210
|
+
print(" brew install singularity")
|
|
211
|
+
print("\nNote: On macOS, Singularity runs in a VM")
|
|
212
|
+
|
|
213
|
+
else:
|
|
214
|
+
print(f"\nUnsupported system: {system}")
|
|
215
|
+
print("Please see:")
|
|
216
|
+
print("- https://apptainer.org/docs/admin/main/installation.html")
|
|
217
|
+
print("- https://docs.sylabs.io/guides/admin-guide/installation.html")
|
|
218
|
+
|
|
219
|
+
print("\n⚠️ Note: Some installations require root privileges.")
|
|
220
|
+
print(" Consider using conda installation for user-space setup.")
|
|
221
|
+
|
|
222
|
+
return True
|
|
223
|
+
|
|
224
|
+
def create_apptainer_sandbox(
|
|
225
|
+
base_image: str,
|
|
226
|
+
sandbox_dir: str,
|
|
227
|
+
image_type: str = "sandbox",
|
|
228
|
+
force: bool = False,
|
|
229
|
+
pull_folder: str = None,
|
|
230
|
+
library_url: str = "https://library.sylabs.io",
|
|
231
|
+
auth_token: str = None,
|
|
232
|
+
keystore: str = None
|
|
233
|
+
) -> Dict[str, Any]:
|
|
234
|
+
"""
|
|
235
|
+
Create a sandbox/writable Singularity container from a base image.
|
|
236
|
+
|
|
237
|
+
Args:
|
|
238
|
+
base_image: Source image (docker://, library://, shub://, oras://, etc.)
|
|
239
|
+
sandbox_dir: Directory for the sandbox
|
|
240
|
+
image_type: 'sandbox' (writable) or 'sif' (read-only single file)
|
|
241
|
+
force: Overwrite existing sandbox
|
|
242
|
+
pull_folder: Cache directory for pulled images
|
|
243
|
+
library_url: Container library URL
|
|
244
|
+
auth_token: Authentication token for private images
|
|
245
|
+
keystore: GPG keystore for verified images
|
|
246
|
+
|
|
247
|
+
Returns:
|
|
248
|
+
Dictionary with operation results
|
|
249
|
+
"""
|
|
250
|
+
apptainer_info = check_apptainer_availability()
|
|
251
|
+
if not apptainer_info["available"]:
|
|
252
|
+
return {
|
|
253
|
+
"success": False,
|
|
254
|
+
"error": "Apptainer/Singularity not available",
|
|
255
|
+
"message": apptainer_info["message"]
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
cmd = apptainer_info["command"]
|
|
259
|
+
sandbox_dir = os.path.abspath(os.path.expanduser(sandbox_dir))
|
|
260
|
+
|
|
261
|
+
# Check if sandbox already exists
|
|
262
|
+
if os.path.exists(sandbox_dir) and not force:
|
|
263
|
+
return {
|
|
264
|
+
"success": True,
|
|
265
|
+
"sandbox_dir": sandbox_dir,
|
|
266
|
+
"message": f"Sandbox already exists at {sandbox_dir}",
|
|
267
|
+
"existing": True
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
# Remove existing if force=True
|
|
271
|
+
if force and os.path.exists(sandbox_dir):
|
|
272
|
+
try:
|
|
273
|
+
if os.path.isdir(sandbox_dir):
|
|
274
|
+
shutil.rmtree(sandbox_dir)
|
|
275
|
+
else:
|
|
276
|
+
os.remove(sandbox_dir)
|
|
277
|
+
except Exception as e:
|
|
278
|
+
return {
|
|
279
|
+
"success": False,
|
|
280
|
+
"error": f"Could not remove existing sandbox: {e}",
|
|
281
|
+
"sandbox_dir": sandbox_dir
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
# Create parent directory if it doesn't exist
|
|
285
|
+
os.makedirs(os.path.dirname(sandbox_dir), exist_ok=True)
|
|
286
|
+
|
|
287
|
+
# Build command
|
|
288
|
+
singularity_cmd = [cmd, "build"]
|
|
289
|
+
|
|
290
|
+
# Add options based on image type
|
|
291
|
+
if image_type == "sandbox":
|
|
292
|
+
singularity_cmd.append("--sandbox")
|
|
293
|
+
|
|
294
|
+
# Add authentication if provided
|
|
295
|
+
if auth_token:
|
|
296
|
+
singularity_cmd.extend(["--authfile", auth_token])
|
|
297
|
+
|
|
298
|
+
# Add keystore if provided
|
|
299
|
+
if keystore:
|
|
300
|
+
singularity_cmd.extend(["--keyring", keystore])
|
|
301
|
+
|
|
302
|
+
# Add library URL if provided
|
|
303
|
+
if library_url and not base_image.startswith(("docker://", "shub://", "oras://")):
|
|
304
|
+
singularity_cmd.extend(["--library", library_url])
|
|
305
|
+
|
|
306
|
+
# Add pull folder if provided
|
|
307
|
+
if pull_folder:
|
|
308
|
+
singularity_cmd.extend(["--pull-folder", pull_folder])
|
|
309
|
+
|
|
310
|
+
# Add target and source
|
|
311
|
+
singularity_cmd.append(sandbox_dir)
|
|
312
|
+
singularity_cmd.append(base_image)
|
|
313
|
+
|
|
314
|
+
# Execute command
|
|
315
|
+
print(f"Creating {image_type} from {base_image}...")
|
|
316
|
+
print(f"Command: {' '.join(singularity_cmd)}")
|
|
317
|
+
|
|
318
|
+
try:
|
|
319
|
+
result = subprocess.run(
|
|
320
|
+
singularity_cmd,
|
|
321
|
+
capture_output=True,
|
|
322
|
+
text=True,
|
|
323
|
+
check=True
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
print(f"✓ Successfully created {image_type} at {sandbox_dir}")
|
|
327
|
+
|
|
328
|
+
return {
|
|
329
|
+
"success": True,
|
|
330
|
+
"sandbox_dir": sandbox_dir,
|
|
331
|
+
"output": result.stdout,
|
|
332
|
+
"stderr": result.stderr,
|
|
333
|
+
"command": ' '.join(singularity_cmd)
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
except subprocess.CalledProcessError as e:
|
|
337
|
+
error_msg = f"Failed to create {image_type}: {e.stderr}"
|
|
338
|
+
print(f"✗ {error_msg}")
|
|
339
|
+
|
|
340
|
+
return {
|
|
341
|
+
"success": False,
|
|
342
|
+
"error": error_msg,
|
|
343
|
+
"sandbox_dir": sandbox_dir,
|
|
344
|
+
"stderr": e.stderr,
|
|
345
|
+
"stdout": e.stdout,
|
|
346
|
+
"returncode": e.returncode
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
except Exception as e:
|
|
350
|
+
error_msg = f"Unexpected error: {str(e)}"
|
|
351
|
+
print(f"✗ {error_msg}")
|
|
352
|
+
|
|
353
|
+
return {
|
|
354
|
+
"success": False,
|
|
355
|
+
"error": error_msg,
|
|
356
|
+
"sandbox_dir": sandbox_dir
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
def create_development_sandbox(
|
|
360
|
+
base_image: str = "docker://ubuntu:22.04",
|
|
361
|
+
sandbox_dir: str = "~/singularity_sandbox",
|
|
362
|
+
packages: List[str] = None,
|
|
363
|
+
pip_packages: List[str] = None,
|
|
364
|
+
conda_packages: List[str] = None,
|
|
365
|
+
environment_vars: Dict[str, str] = None,
|
|
366
|
+
files_to_copy: Dict[str, str] = None,
|
|
367
|
+
post_install_commands: List[str] = None,
|
|
368
|
+
force: bool = False,
|
|
369
|
+
gpu_support: bool = False,
|
|
370
|
+
cuda_version: str = None,
|
|
371
|
+
rocm_version: str = None,
|
|
372
|
+
mpi_support: bool = False,
|
|
373
|
+
mpi_flavor: str = "openmpi",
|
|
374
|
+
workdir: str = "/workspace",
|
|
375
|
+
shell: str = "/bin/bash"
|
|
376
|
+
) -> Dict[str, Any]:
|
|
377
|
+
"""
|
|
378
|
+
Create a development sandbox with common development tools and configurations.
|
|
379
|
+
|
|
380
|
+
Args:
|
|
381
|
+
base_image: Base container image
|
|
382
|
+
sandbox_dir: Directory for the sandbox
|
|
383
|
+
packages: System packages to install
|
|
384
|
+
pip_packages: Python packages to install via pip
|
|
385
|
+
conda_packages: Conda packages to install
|
|
386
|
+
environment_vars: Environment variables to set
|
|
387
|
+
files_to_copy: Files to copy into container (host_path: container_path)
|
|
388
|
+
post_install_commands: Commands to run after installation
|
|
389
|
+
force: Overwrite existing sandbox
|
|
390
|
+
gpu_support: Enable GPU support
|
|
391
|
+
cuda_version: CUDA version for GPU support
|
|
392
|
+
rocm_version: ROCm version for AMD GPU support
|
|
393
|
+
mpi_support: Enable MPI support
|
|
394
|
+
mpi_flavor: MPI implementation (openmpi, mpich, intel-mpi)
|
|
395
|
+
workdir: Default working directory
|
|
396
|
+
shell: Default shell
|
|
397
|
+
|
|
398
|
+
Returns:
|
|
399
|
+
Dictionary with operation results
|
|
400
|
+
"""
|
|
401
|
+
if packages is None:
|
|
402
|
+
packages = []
|
|
403
|
+
|
|
404
|
+
if pip_packages is None:
|
|
405
|
+
pip_packages = []
|
|
406
|
+
|
|
407
|
+
if conda_packages is None:
|
|
408
|
+
conda_packages = []
|
|
409
|
+
|
|
410
|
+
if environment_vars is None:
|
|
411
|
+
environment_vars = {}
|
|
412
|
+
|
|
413
|
+
if files_to_copy is None:
|
|
414
|
+
files_to_copy = {}
|
|
415
|
+
|
|
416
|
+
if post_install_commands is None:
|
|
417
|
+
post_install_commands = []
|
|
418
|
+
|
|
419
|
+
# Create temporary definition file
|
|
420
|
+
with tempfile.NamedTemporaryFile(mode='w', suffix='.def', delete=False) as def_file:
|
|
421
|
+
definition = generate_singularity_definition(
|
|
422
|
+
base_image=base_image,
|
|
423
|
+
packages=packages,
|
|
424
|
+
pip_packages=pip_packages,
|
|
425
|
+
conda_packages=conda_packages,
|
|
426
|
+
environment_vars=environment_vars,
|
|
427
|
+
files_to_copy=files_to_copy,
|
|
428
|
+
post_install_commands=post_install_commands,
|
|
429
|
+
gpu_support=gpu_support,
|
|
430
|
+
cuda_version=cuda_version,
|
|
431
|
+
rocm_version=rocm_version,
|
|
432
|
+
mpi_support=mpi_support,
|
|
433
|
+
mpi_flavor=mpi_flavor,
|
|
434
|
+
workdir=workdir,
|
|
435
|
+
shell=shell
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
def_file.write(definition)
|
|
439
|
+
def_file_path = def_file.name
|
|
440
|
+
|
|
441
|
+
try:
|
|
442
|
+
# Build from definition file
|
|
443
|
+
result = create_apptainer_sandbox(
|
|
444
|
+
base_image=def_file_path,
|
|
445
|
+
sandbox_dir=sandbox_dir,
|
|
446
|
+
image_type="sandbox",
|
|
447
|
+
force=force
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
# Clean up definition file
|
|
451
|
+
os.unlink(def_file_path)
|
|
452
|
+
|
|
453
|
+
return result
|
|
454
|
+
|
|
455
|
+
except Exception as e:
|
|
456
|
+
# Clean up definition file on error
|
|
457
|
+
if os.path.exists(def_file_path):
|
|
458
|
+
os.unlink(def_file_path)
|
|
459
|
+
raise
|
|
460
|
+
|
|
461
|
+
def generate_singularity_definition(
|
|
462
|
+
base_image: str = "docker://ubuntu:22.04",
|
|
463
|
+
packages: List[str] = None,
|
|
464
|
+
pip_packages: List[str] = None,
|
|
465
|
+
conda_packages: List[str] = None,
|
|
466
|
+
environment_vars: Dict[str, str] = None,
|
|
467
|
+
files_to_copy: Dict[str, str] = None,
|
|
468
|
+
post_install_commands: List[str] = None,
|
|
469
|
+
gpu_support: bool = False,
|
|
470
|
+
cuda_version: str = None,
|
|
471
|
+
rocm_version: str = None,
|
|
472
|
+
mpi_support: bool = False,
|
|
473
|
+
mpi_flavor: str = "openmpi",
|
|
474
|
+
workdir: str = "/workspace",
|
|
475
|
+
shell: str = "/bin/bash"
|
|
476
|
+
) -> str:
|
|
477
|
+
"""
|
|
478
|
+
Generate a Singularity definition file.
|
|
479
|
+
|
|
480
|
+
Returns:
|
|
481
|
+
Singularity definition file content
|
|
482
|
+
"""
|
|
483
|
+
if packages is None:
|
|
484
|
+
packages = []
|
|
485
|
+
|
|
486
|
+
if pip_packages is None:
|
|
487
|
+
pip_packages = []
|
|
488
|
+
|
|
489
|
+
if conda_packages is None:
|
|
490
|
+
conda_packages = []
|
|
491
|
+
|
|
492
|
+
if environment_vars is None:
|
|
493
|
+
environment_vars = {}
|
|
494
|
+
|
|
495
|
+
if files_to_copy is None:
|
|
496
|
+
files_to_copy = {}
|
|
497
|
+
|
|
498
|
+
if post_install_commands is None:
|
|
499
|
+
post_install_commands = []
|
|
500
|
+
|
|
501
|
+
# Default packages for development
|
|
502
|
+
default_packages = [
|
|
503
|
+
# Core development
|
|
504
|
+
"build-essential",
|
|
505
|
+
"cmake",
|
|
506
|
+
"pkg-config",
|
|
507
|
+
"git",
|
|
508
|
+
"curl",
|
|
509
|
+
"wget",
|
|
510
|
+
"vim",
|
|
511
|
+
"nano",
|
|
512
|
+
|
|
513
|
+
# Python
|
|
514
|
+
"python3",
|
|
515
|
+
"python3-pip",
|
|
516
|
+
"python3-dev",
|
|
517
|
+
"python3-venv",
|
|
518
|
+
|
|
519
|
+
# System tools
|
|
520
|
+
"htop",
|
|
521
|
+
"tree",
|
|
522
|
+
"rsync",
|
|
523
|
+
"unzip",
|
|
524
|
+
"ssh",
|
|
525
|
+
|
|
526
|
+
# Network tools
|
|
527
|
+
"net-tools",
|
|
528
|
+
"iputils-ping",
|
|
529
|
+
"dnsutils",
|
|
530
|
+
|
|
531
|
+
# Compression
|
|
532
|
+
"gzip",
|
|
533
|
+
"bzip2",
|
|
534
|
+
"xz-utils",
|
|
535
|
+
"zip",
|
|
536
|
+
|
|
537
|
+
# Editors
|
|
538
|
+
"emacs-nox",
|
|
539
|
+
"micro",
|
|
540
|
+
]
|
|
541
|
+
|
|
542
|
+
# Combine packages
|
|
543
|
+
all_packages = default_packages + packages
|
|
544
|
+
|
|
545
|
+
# Add GPU support packages
|
|
546
|
+
gpu_packages = []
|
|
547
|
+
if gpu_support:
|
|
548
|
+
if cuda_version:
|
|
549
|
+
# CUDA support
|
|
550
|
+
cuda_major = cuda_version.split('.')[0] if cuda_version else "12"
|
|
551
|
+
gpu_packages.extend([
|
|
552
|
+
f"cuda-toolkit-{cuda_major}-0",
|
|
553
|
+
"nvidia-cuda-dev",
|
|
554
|
+
"nvidia-container-toolkit"
|
|
555
|
+
])
|
|
556
|
+
elif rocm_version:
|
|
557
|
+
# ROCm support
|
|
558
|
+
gpu_packages.extend([
|
|
559
|
+
"rocm-dev",
|
|
560
|
+
"hip-dev",
|
|
561
|
+
"rocblas",
|
|
562
|
+
"rocsolver"
|
|
563
|
+
])
|
|
564
|
+
|
|
565
|
+
# Add MPI support packages
|
|
566
|
+
mpi_packages = []
|
|
567
|
+
if mpi_support:
|
|
568
|
+
if mpi_flavor == "openmpi":
|
|
569
|
+
mpi_packages.extend(["openmpi-bin", "libopenmpi-dev"])
|
|
570
|
+
elif mpi_flavor == "mpich":
|
|
571
|
+
mpi_packages.extend(["mpich", "libmpich-dev"])
|
|
572
|
+
elif mpi_flavor == "intel-mpi":
|
|
573
|
+
mpi_packages.append("intel-mpi") # Usually from Intel repos
|
|
574
|
+
|
|
575
|
+
all_packages.extend(gpu_packages + mpi_packages)
|
|
576
|
+
|
|
577
|
+
# Start building definition
|
|
578
|
+
lines = [
|
|
579
|
+
f"Bootstrap: docker",
|
|
580
|
+
f"From: {base_image.replace('docker://', '')}",
|
|
581
|
+
"",
|
|
582
|
+
"%post",
|
|
583
|
+
" # Update and install packages",
|
|
584
|
+
" apt-get update -y",
|
|
585
|
+
" apt-get install -y --no-install-recommends \\"
|
|
586
|
+
]
|
|
587
|
+
|
|
588
|
+
# Add packages in chunks to avoid line length issues
|
|
589
|
+
package_chunks = [all_packages[i:i+10] for i in range(0, len(all_packages), 10)]
|
|
590
|
+
for chunk in package_chunks:
|
|
591
|
+
if chunk == package_chunks[0]:
|
|
592
|
+
lines.append(" " + " \\\n ".join(chunk))
|
|
593
|
+
else:
|
|
594
|
+
lines[-1] += " \\"
|
|
595
|
+
lines.append(" " + " \\\n ".join(chunk))
|
|
596
|
+
|
|
597
|
+
lines.extend([
|
|
598
|
+
" ",
|
|
599
|
+
" # Clean up",
|
|
600
|
+
" apt-get clean",
|
|
601
|
+
" rm -rf /var/lib/apt/lists/*",
|
|
602
|
+
"",
|
|
603
|
+
" # Install pip packages",
|
|
604
|
+
])
|
|
605
|
+
|
|
606
|
+
if pip_packages:
|
|
607
|
+
lines.append(f" pip3 install --upgrade pip")
|
|
608
|
+
lines.append(f" pip3 install {' '.join(pip_packages)}")
|
|
609
|
+
|
|
610
|
+
# Add conda installation if requested
|
|
611
|
+
if conda_packages:
|
|
612
|
+
lines.extend([
|
|
613
|
+
"",
|
|
614
|
+
" # Install Miniconda",
|
|
615
|
+
" wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O /tmp/miniconda.sh",
|
|
616
|
+
" bash /tmp/miniconda.sh -b -p /opt/conda",
|
|
617
|
+
" rm /tmp/miniconda.sh",
|
|
618
|
+
" echo 'export PATH=/opt/conda/bin:$PATH' >> /etc/profile",
|
|
619
|
+
"",
|
|
620
|
+
" # Install conda packages",
|
|
621
|
+
f" /opt/conda/bin/conda install -y {' '.join(conda_packages)}",
|
|
622
|
+
" /opt/conda/bin/conda clean -y --all"
|
|
623
|
+
])
|
|
624
|
+
|
|
625
|
+
# Add post-install commands
|
|
626
|
+
if post_install_commands:
|
|
627
|
+
lines.append("")
|
|
628
|
+
lines.append(" # Post-install commands")
|
|
629
|
+
for cmd in post_install_commands:
|
|
630
|
+
lines.append(f" {cmd}")
|
|
631
|
+
|
|
632
|
+
# Environment section
|
|
633
|
+
lines.extend([
|
|
634
|
+
"",
|
|
635
|
+
"%environment",
|
|
636
|
+
f" export SHELL={shell}",
|
|
637
|
+
f" export WORKDIR={workdir}",
|
|
638
|
+
" export LC_ALL=C.UTF-8",
|
|
639
|
+
" export LANG=C.UTF-8",
|
|
640
|
+
" export PYTHONUNBUFFERED=1",
|
|
641
|
+
])
|
|
642
|
+
|
|
643
|
+
# Add custom environment variables
|
|
644
|
+
for key, value in environment_vars.items():
|
|
645
|
+
lines.append(f" export {key}={value}")
|
|
646
|
+
|
|
647
|
+
# Add conda to PATH if installed
|
|
648
|
+
if conda_packages:
|
|
649
|
+
lines.append(" export PATH=/opt/conda/bin:$PATH")
|
|
650
|
+
|
|
651
|
+
# Add GPU environment variables
|
|
652
|
+
if gpu_support:
|
|
653
|
+
lines.append(" # GPU support")
|
|
654
|
+
lines.append(" export NVIDIA_VISIBLE_DEVICES=all")
|
|
655
|
+
lines.append(" export NVIDIA_DRIVER_CAPABILITIES=compute,utility")
|
|
656
|
+
if cuda_version:
|
|
657
|
+
lines.append(f" export CUDA_VERSION={cuda_version}")
|
|
658
|
+
lines.append(" export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH")
|
|
659
|
+
lines.append(" export PATH=/usr/local/cuda/bin:$PATH")
|
|
660
|
+
|
|
661
|
+
# Files section
|
|
662
|
+
if files_to_copy:
|
|
663
|
+
lines.extend(["", "%files"])
|
|
664
|
+
for host_path, container_path in files_to_copy.items():
|
|
665
|
+
lines.append(f" {host_path} {container_path}")
|
|
666
|
+
|
|
667
|
+
# Labels section
|
|
668
|
+
lines.extend([
|
|
669
|
+
"",
|
|
670
|
+
"%labels",
|
|
671
|
+
f" Author {getpass.getuser()}",
|
|
672
|
+
f" Version 1.0",
|
|
673
|
+
f" Description Development sandbox",
|
|
674
|
+
f" Created {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
|
|
675
|
+
])
|
|
676
|
+
|
|
677
|
+
# Runscript section
|
|
678
|
+
lines.extend([
|
|
679
|
+
"",
|
|
680
|
+
"%runscript",
|
|
681
|
+
f" cd {workdir}",
|
|
682
|
+
" exec /bin/bash \"$@\"",
|
|
683
|
+
"",
|
|
684
|
+
"%startscript",
|
|
685
|
+
" # Commands to run when container starts",
|
|
686
|
+
f" cd {workdir}",
|
|
687
|
+
" echo 'Container started at $(date)'",
|
|
688
|
+
"",
|
|
689
|
+
"%test",
|
|
690
|
+
" # Test commands",
|
|
691
|
+
" python3 --version",
|
|
692
|
+
" pip3 --version",
|
|
693
|
+
" git --version",
|
|
694
|
+
"",
|
|
695
|
+
"%help",
|
|
696
|
+
" This is a development sandbox container.",
|
|
697
|
+
" Mount your workspace to /workspace for persistent storage.",
|
|
698
|
+
" Example: singularity shell --bind /host/path:/workspace sandbox/",
|
|
699
|
+
])
|
|
700
|
+
|
|
701
|
+
return "\n".join(lines)
|
|
702
|
+
|
|
703
|
+
def run_singularity_command(
|
|
704
|
+
image_path: str,
|
|
705
|
+
command: str,
|
|
706
|
+
bind_mounts: Dict[str, str] = None,
|
|
707
|
+
environment: Dict[str, str] = None,
|
|
708
|
+
working_dir: str = None,
|
|
709
|
+
home_dir: str = None,
|
|
710
|
+
scratch_dir: str = "/tmp",
|
|
711
|
+
gpu: bool = False,
|
|
712
|
+
nv_gpu: bool = False,
|
|
713
|
+
rocm_gpu: bool = False,
|
|
714
|
+
enable_networking: bool = True,
|
|
715
|
+
hostname: str = None,
|
|
716
|
+
network: str = None,
|
|
717
|
+
security_options: List[str] = None,
|
|
718
|
+
cleanup: bool = True,
|
|
719
|
+
writable_tmpfs: bool = False,
|
|
720
|
+
fakeroot: bool = False,
|
|
721
|
+
keep_privileges: bool = False,
|
|
722
|
+
singularity_options: List[str] = None
|
|
723
|
+
) -> Dict[str, Any]:
|
|
724
|
+
"""
|
|
725
|
+
Run a command inside a Singularity container.
|
|
726
|
+
|
|
727
|
+
Args:
|
|
728
|
+
image_path: Path to Singularity image or sandbox
|
|
729
|
+
command: Command to run inside container
|
|
730
|
+
bind_mounts: Bind mounts (host_path: container_path)
|
|
731
|
+
environment: Environment variables to set
|
|
732
|
+
working_dir: Working directory inside container
|
|
733
|
+
home_dir: Home directory inside container
|
|
734
|
+
scratch_dir: Scratch directory
|
|
735
|
+
gpu: Enable GPU support
|
|
736
|
+
nv_gpu: Enable NVIDIA GPU support
|
|
737
|
+
rocm_gpu: Enable AMD ROCm GPU support
|
|
738
|
+
enable_networking: Enable network access
|
|
739
|
+
hostname: Set container hostname
|
|
740
|
+
network: Network type (bridge, none, host, etc.)
|
|
741
|
+
security_options: Security options (seccomp, etc.)
|
|
742
|
+
cleanup: Clean up temporary files
|
|
743
|
+
writable_tmpfs: Mount tmpfs as writable
|
|
744
|
+
fakeroot: Use fakeroot for unprivileged builds
|
|
745
|
+
keep_privileges: Keep user privileges
|
|
746
|
+
singularity_options: Additional Singularity options
|
|
747
|
+
|
|
748
|
+
Returns:
|
|
749
|
+
Dictionary with command results
|
|
750
|
+
"""
|
|
751
|
+
apptainer_info = check_apptainer_availability()
|
|
752
|
+
if not apptainer_info["available"]:
|
|
753
|
+
return {
|
|
754
|
+
"success": False,
|
|
755
|
+
"error": "Apptainer/Singularity not available",
|
|
756
|
+
"message": apptainer_info["message"]
|
|
757
|
+
}
|
|
758
|
+
|
|
759
|
+
cmd = apptainer_info["command"]
|
|
760
|
+
image_path = os.path.expanduser(image_path)
|
|
761
|
+
|
|
762
|
+
# Check if image exists
|
|
763
|
+
if not os.path.exists(image_path):
|
|
764
|
+
return {
|
|
765
|
+
"success": False,
|
|
766
|
+
"error": f"Image not found: {image_path}",
|
|
767
|
+
"image_path": image_path
|
|
768
|
+
}
|
|
769
|
+
|
|
770
|
+
# Build command
|
|
771
|
+
singularity_cmd = [cmd, "exec"]
|
|
772
|
+
|
|
773
|
+
# Add bind mounts
|
|
774
|
+
if bind_mounts:
|
|
775
|
+
for host_path, container_path in bind_mounts.items():
|
|
776
|
+
host_path = os.path.expanduser(host_path)
|
|
777
|
+
singularity_cmd.extend(["--bind", f"{host_path}:{container_path}"])
|
|
778
|
+
|
|
779
|
+
# Add environment variables
|
|
780
|
+
if environment:
|
|
781
|
+
for key, value in environment.items():
|
|
782
|
+
singularity_cmd.extend(["--env", f"{key}={value}"])
|
|
783
|
+
|
|
784
|
+
# Add working directory
|
|
785
|
+
if working_dir:
|
|
786
|
+
singularity_cmd.extend(["--pwd", working_dir])
|
|
787
|
+
|
|
788
|
+
# Add home directory
|
|
789
|
+
if home_dir:
|
|
790
|
+
singularity_cmd.extend(["--home", home_dir])
|
|
791
|
+
|
|
792
|
+
# Add GPU support
|
|
793
|
+
if gpu or nv_gpu:
|
|
794
|
+
singularity_cmd.append("--nv") # NVIDIA GPU
|
|
795
|
+
elif rocm_gpu:
|
|
796
|
+
singularity_cmd.append("--rocm") # AMD ROCm GPU
|
|
797
|
+
|
|
798
|
+
# Add network options
|
|
799
|
+
if not enable_networking:
|
|
800
|
+
singularity_cmd.append("--net")
|
|
801
|
+
singularity_cmd.append("--network")
|
|
802
|
+
singularity_cmd.append("none")
|
|
803
|
+
elif network:
|
|
804
|
+
singularity_cmd.extend(["--network", network])
|
|
805
|
+
|
|
806
|
+
# Add hostname
|
|
807
|
+
if hostname:
|
|
808
|
+
singularity_cmd.extend(["--hostname", hostname])
|
|
809
|
+
|
|
810
|
+
# Add security options
|
|
811
|
+
if security_options:
|
|
812
|
+
for opt in security_options:
|
|
813
|
+
singularity_cmd.extend(["--security", opt])
|
|
814
|
+
|
|
815
|
+
# Add writable tmpfs
|
|
816
|
+
if writable_tmpfs:
|
|
817
|
+
singularity_cmd.append("--writable-tmpfs")
|
|
818
|
+
|
|
819
|
+
# Add fakeroot
|
|
820
|
+
if fakeroot:
|
|
821
|
+
singularity_cmd.append("--fakeroot")
|
|
822
|
+
|
|
823
|
+
# Add keep privileges
|
|
824
|
+
if keep_privileges:
|
|
825
|
+
singularity_cmd.append("--keep-privs")
|
|
826
|
+
|
|
827
|
+
# Add additional options
|
|
828
|
+
if singularity_options:
|
|
829
|
+
singularity_cmd.extend(singularity_options)
|
|
830
|
+
|
|
831
|
+
# Add scratch directory
|
|
832
|
+
singularity_cmd.extend(["--scratch", scratch_dir])
|
|
833
|
+
|
|
834
|
+
# Add cleanup
|
|
835
|
+
if cleanup:
|
|
836
|
+
singularity_cmd.append("--cleanenv")
|
|
837
|
+
|
|
838
|
+
# Add image path and command
|
|
839
|
+
singularity_cmd.append(image_path)
|
|
840
|
+
singularity_cmd.extend(["sh", "-c", command])
|
|
841
|
+
|
|
842
|
+
# Execute command
|
|
843
|
+
print(f"Running command in container: {command}")
|
|
844
|
+
print(f"Image: {image_path}")
|
|
845
|
+
|
|
846
|
+
try:
|
|
847
|
+
result = subprocess.run(
|
|
848
|
+
singularity_cmd,
|
|
849
|
+
capture_output=True,
|
|
850
|
+
text=True,
|
|
851
|
+
check=False # Don't raise exception on non-zero return
|
|
852
|
+
)
|
|
853
|
+
|
|
854
|
+
output = {
|
|
855
|
+
"success": result.returncode == 0,
|
|
856
|
+
"returncode": result.returncode,
|
|
857
|
+
"stdout": result.stdout,
|
|
858
|
+
"stderr": result.stderr,
|
|
859
|
+
"command": ' '.join(singularity_cmd),
|
|
860
|
+
"image_path": image_path
|
|
861
|
+
}
|
|
862
|
+
|
|
863
|
+
if result.returncode == 0:
|
|
864
|
+
print(f"✓ Command executed successfully")
|
|
865
|
+
else:
|
|
866
|
+
print(f"✗ Command failed with return code {result.returncode}")
|
|
867
|
+
print(f"Stderr: {result.stderr[:500]}...")
|
|
868
|
+
|
|
869
|
+
return output
|
|
870
|
+
|
|
871
|
+
except Exception as e:
|
|
872
|
+
error_msg = f"Failed to execute command: {str(e)}"
|
|
873
|
+
print(f"✗ {error_msg}")
|
|
874
|
+
|
|
875
|
+
return {
|
|
876
|
+
"success": False,
|
|
877
|
+
"error": error_msg,
|
|
878
|
+
"image_path": image_path,
|
|
879
|
+
"command": command
|
|
880
|
+
}
|
|
881
|
+
|
|
882
|
+
def shell_into_container(
|
|
883
|
+
image_path: str,
|
|
884
|
+
bind_mounts: Dict[str, str] = None,
|
|
885
|
+
environment: Dict[str, str] = None,
|
|
886
|
+
working_dir: str = None,
|
|
887
|
+
home_dir: str = None,
|
|
888
|
+
gpu: bool = False,
|
|
889
|
+
nv_gpu: bool = False,
|
|
890
|
+
rocm_gpu: bool = False,
|
|
891
|
+
enable_networking: bool = True,
|
|
892
|
+
hostname: str = None,
|
|
893
|
+
fakeroot: bool = False,
|
|
894
|
+
shell: str = "/bin/bash"
|
|
895
|
+
) -> None:
|
|
896
|
+
"""
|
|
897
|
+
Start an interactive shell inside a Singularity container.
|
|
898
|
+
|
|
899
|
+
Args:
|
|
900
|
+
image_path: Path to Singularity image or sandbox
|
|
901
|
+
bind_mounts: Bind mounts (host_path: container_path)
|
|
902
|
+
environment: Environment variables to set
|
|
903
|
+
working_dir: Working directory inside container
|
|
904
|
+
home_dir: Home directory inside container
|
|
905
|
+
gpu: Enable GPU support
|
|
906
|
+
nv_gpu: Enable NVIDIA GPU support
|
|
907
|
+
rocm_gpu: Enable AMD ROCm GPU support
|
|
908
|
+
enable_networking: Enable network access
|
|
909
|
+
hostname: Set container hostname
|
|
910
|
+
fakeroot: Use fakeroot for unprivileged builds
|
|
911
|
+
shell: Shell to use
|
|
912
|
+
|
|
913
|
+
Returns:
|
|
914
|
+
None (starts interactive shell)
|
|
915
|
+
"""
|
|
916
|
+
apptainer_info = check_apptainer_availability()
|
|
917
|
+
if not apptainer_info["available"]:
|
|
918
|
+
print(f"✗ {apptainer_info['message']}")
|
|
919
|
+
return
|
|
920
|
+
|
|
921
|
+
cmd = apptainer_info["command"]
|
|
922
|
+
image_path = os.path.expanduser(image_path)
|
|
923
|
+
|
|
924
|
+
# Check if image exists
|
|
925
|
+
if not os.path.exists(image_path):
|
|
926
|
+
print(f"✗ Image not found: {image_path}")
|
|
927
|
+
return
|
|
928
|
+
|
|
929
|
+
# Build command
|
|
930
|
+
singularity_cmd = [cmd, "shell"]
|
|
931
|
+
|
|
932
|
+
# Add bind mounts
|
|
933
|
+
if bind_mounts:
|
|
934
|
+
for host_path, container_path in bind_mounts.items():
|
|
935
|
+
host_path = os.path.expanduser(host_path)
|
|
936
|
+
singularity_cmd.extend(["--bind", f"{host_path}:{container_path}"])
|
|
937
|
+
|
|
938
|
+
# Add environment variables
|
|
939
|
+
if environment:
|
|
940
|
+
for key, value in environment.items():
|
|
941
|
+
singularity_cmd.extend(["--env", f"{key}={value}"])
|
|
942
|
+
|
|
943
|
+
# Add working directory
|
|
944
|
+
if working_dir:
|
|
945
|
+
singularity_cmd.extend(["--pwd", working_dir])
|
|
946
|
+
|
|
947
|
+
# Add home directory
|
|
948
|
+
if home_dir:
|
|
949
|
+
singularity_cmd.extend(["--home", home_dir])
|
|
950
|
+
|
|
951
|
+
# Add GPU support
|
|
952
|
+
if gpu or nv_gpu:
|
|
953
|
+
singularity_cmd.append("--nv")
|
|
954
|
+
elif rocm_gpu:
|
|
955
|
+
singularity_cmd.append("--rocm")
|
|
956
|
+
|
|
957
|
+
# Add network options
|
|
958
|
+
if not enable_networking:
|
|
959
|
+
singularity_cmd.extend(["--net", "--network", "none"])
|
|
960
|
+
|
|
961
|
+
# Add hostname
|
|
962
|
+
if hostname:
|
|
963
|
+
singularity_cmd.extend(["--hostname", hostname])
|
|
964
|
+
|
|
965
|
+
# Add fakeroot
|
|
966
|
+
if fakeroot:
|
|
967
|
+
singularity_cmd.append("--fakeroot")
|
|
968
|
+
|
|
969
|
+
# Add image path
|
|
970
|
+
singularity_cmd.append(image_path)
|
|
971
|
+
|
|
972
|
+
# Execute command (interactive)
|
|
973
|
+
print(f"Starting interactive shell in container")
|
|
974
|
+
print(f"Image: {image_path}")
|
|
975
|
+
print(f"Shell: {shell}")
|
|
976
|
+
print(f"Working directory: {working_dir or '/workspace'}")
|
|
977
|
+
|
|
978
|
+
if bind_mounts:
|
|
979
|
+
print("Bind mounts:")
|
|
980
|
+
for host_path, container_path in bind_mounts.items():
|
|
981
|
+
print(f" {host_path} -> {container_path}")
|
|
982
|
+
|
|
983
|
+
print("\nType 'exit' to leave the container")
|
|
984
|
+
print("-" * 50)
|
|
985
|
+
|
|
986
|
+
try:
|
|
987
|
+
subprocess.run(singularity_cmd, check=True)
|
|
988
|
+
print("\n" + "-" * 50)
|
|
989
|
+
print("Exited container")
|
|
990
|
+
|
|
991
|
+
except subprocess.CalledProcessError as e:
|
|
992
|
+
print(f"\n✗ Failed to start shell: {e}")
|
|
993
|
+
|
|
994
|
+
except KeyboardInterrupt:
|
|
995
|
+
print("\n\nShell interrupted")
|
|
996
|
+
|
|
997
|
+
def convert_docker_to_singularity(
|
|
998
|
+
docker_image: str,
|
|
999
|
+
singularity_image: str,
|
|
1000
|
+
force: bool = False,
|
|
1001
|
+
sandbox: bool = False,
|
|
1002
|
+
library_url: str = "https://library.sylabs.io",
|
|
1003
|
+
auth_token: str = None,
|
|
1004
|
+
build_args: Dict[str, str] = None
|
|
1005
|
+
) -> Dict[str, Any]:
|
|
1006
|
+
"""
|
|
1007
|
+
Convert a Docker image to a Singularity image.
|
|
1008
|
+
|
|
1009
|
+
Args:
|
|
1010
|
+
docker_image: Docker image name (e.g., ubuntu:22.04)
|
|
1011
|
+
singularity_image: Output Singularity image path
|
|
1012
|
+
force: Overwrite existing image
|
|
1013
|
+
sandbox: Create writable sandbox instead of SIF
|
|
1014
|
+
library_url: Container library URL
|
|
1015
|
+
auth_token: Authentication token
|
|
1016
|
+
build_args: Build arguments
|
|
1017
|
+
|
|
1018
|
+
Returns:
|
|
1019
|
+
Dictionary with operation results
|
|
1020
|
+
"""
|
|
1021
|
+
apptainer_info = check_apptainer_availability()
|
|
1022
|
+
if not apptainer_info["available"]:
|
|
1023
|
+
return {
|
|
1024
|
+
"success": False,
|
|
1025
|
+
"error": "Apptainer/Singularity not available",
|
|
1026
|
+
"message": apptainer_info["message"]
|
|
1027
|
+
}
|
|
1028
|
+
|
|
1029
|
+
cmd = apptainer_info["command"]
|
|
1030
|
+
singularity_image = os.path.expanduser(singularity_image)
|
|
1031
|
+
|
|
1032
|
+
# Check if output already exists
|
|
1033
|
+
if os.path.exists(singularity_image) and not force:
|
|
1034
|
+
return {
|
|
1035
|
+
"success": True,
|
|
1036
|
+
"singularity_image": singularity_image,
|
|
1037
|
+
"message": f"Image already exists at {singularity_image}",
|
|
1038
|
+
"existing": True
|
|
1039
|
+
}
|
|
1040
|
+
|
|
1041
|
+
# Remove existing if force=True
|
|
1042
|
+
if force and os.path.exists(singularity_image):
|
|
1043
|
+
try:
|
|
1044
|
+
if os.path.isdir(singularity_image):
|
|
1045
|
+
shutil.rmtree(singularity_image)
|
|
1046
|
+
else:
|
|
1047
|
+
os.remove(singularity_image)
|
|
1048
|
+
except Exception as e:
|
|
1049
|
+
return {
|
|
1050
|
+
"success": False,
|
|
1051
|
+
"error": f"Could not remove existing image: {e}",
|
|
1052
|
+
"singularity_image": singularity_image
|
|
1053
|
+
}
|
|
1054
|
+
|
|
1055
|
+
# Ensure Docker image has docker:// prefix
|
|
1056
|
+
if not docker_image.startswith("docker://"):
|
|
1057
|
+
docker_image = f"docker://{docker_image}"
|
|
1058
|
+
|
|
1059
|
+
# Create parent directory if it doesn't exist
|
|
1060
|
+
os.makedirs(os.path.dirname(singularity_image), exist_ok=True)
|
|
1061
|
+
|
|
1062
|
+
# Build command
|
|
1063
|
+
singularity_cmd = [cmd, "build"]
|
|
1064
|
+
|
|
1065
|
+
# Add sandbox option
|
|
1066
|
+
if sandbox:
|
|
1067
|
+
singularity_cmd.append("--sandbox")
|
|
1068
|
+
|
|
1069
|
+
# Add authentication if provided
|
|
1070
|
+
if auth_token:
|
|
1071
|
+
singularity_cmd.extend(["--authfile", auth_token])
|
|
1072
|
+
|
|
1073
|
+
# Add library URL
|
|
1074
|
+
if library_url:
|
|
1075
|
+
singularity_cmd.extend(["--library", library_url])
|
|
1076
|
+
|
|
1077
|
+
# Add build arguments
|
|
1078
|
+
if build_args:
|
|
1079
|
+
for key, value in build_args.items():
|
|
1080
|
+
singularity_cmd.extend(["--build-arg", f"{key}={value}"])
|
|
1081
|
+
|
|
1082
|
+
# Add output and input
|
|
1083
|
+
singularity_cmd.append(singularity_image)
|
|
1084
|
+
singularity_cmd.append(docker_image)
|
|
1085
|
+
|
|
1086
|
+
# Execute command
|
|
1087
|
+
print(f"Converting Docker image {docker_image} to Singularity...")
|
|
1088
|
+
print(f"Output: {singularity_image}")
|
|
1089
|
+
print(f"Command: {' '.join(singularity_cmd)}")
|
|
1090
|
+
|
|
1091
|
+
try:
|
|
1092
|
+
result = subprocess.run(
|
|
1093
|
+
singularity_cmd,
|
|
1094
|
+
capture_output=True,
|
|
1095
|
+
text=True,
|
|
1096
|
+
check=True
|
|
1097
|
+
)
|
|
1098
|
+
|
|
1099
|
+
print(f"✓ Successfully converted Docker image to Singularity")
|
|
1100
|
+
|
|
1101
|
+
return {
|
|
1102
|
+
"success": True,
|
|
1103
|
+
"singularity_image": singularity_image,
|
|
1104
|
+
"docker_image": docker_image,
|
|
1105
|
+
"output": result.stdout,
|
|
1106
|
+
"stderr": result.stderr,
|
|
1107
|
+
"command": ' '.join(singularity_cmd)
|
|
1108
|
+
}
|
|
1109
|
+
|
|
1110
|
+
except subprocess.CalledProcessError as e:
|
|
1111
|
+
error_msg = f"Failed to convert Docker image: {e.stderr}"
|
|
1112
|
+
print(f"✗ {error_msg}")
|
|
1113
|
+
|
|
1114
|
+
return {
|
|
1115
|
+
"success": False,
|
|
1116
|
+
"error": error_msg,
|
|
1117
|
+
"singularity_image": singularity_image,
|
|
1118
|
+
"docker_image": docker_image,
|
|
1119
|
+
"stderr": e.stderr,
|
|
1120
|
+
"stdout": e.stdout,
|
|
1121
|
+
"returncode": e.returncode
|
|
1122
|
+
}
|
|
1123
|
+
|
|
1124
|
+
except Exception as e:
|
|
1125
|
+
error_msg = f"Unexpected error: {str(e)}"
|
|
1126
|
+
print(f"✗ {error_msg}")
|
|
1127
|
+
|
|
1128
|
+
return {
|
|
1129
|
+
"success": False,
|
|
1130
|
+
"error": error_msg,
|
|
1131
|
+
"singularity_image": singularity_image,
|
|
1132
|
+
"docker_image": docker_image
|
|
1133
|
+
}
|
|
1134
|
+
|
|
1135
|
+
def create_singularity_workstation(
|
|
1136
|
+
work_dir: str = "~/singularity_workstation",
|
|
1137
|
+
base_image: str = "docker://ubuntu:22.04",
|
|
1138
|
+
image_name: str = "dev_sandbox",
|
|
1139
|
+
packages: List[str] = None,
|
|
1140
|
+
pip_packages: List[str] = None,
|
|
1141
|
+
conda_packages: List[str] = None,
|
|
1142
|
+
environment_vars: Dict[str, str] = None,
|
|
1143
|
+
bind_mounts: Dict[str, str] = None,
|
|
1144
|
+
force_rebuild: bool = False,
|
|
1145
|
+
gpu_support: bool = False,
|
|
1146
|
+
cuda_version: str = None,
|
|
1147
|
+
rocm_version: str = None,
|
|
1148
|
+
mpi_support: bool = False,
|
|
1149
|
+
create_wrapper_scripts: bool = True,
|
|
1150
|
+
create_config_file: bool = True,
|
|
1151
|
+
config_file: str = None
|
|
1152
|
+
) -> Dict[str, Any]:
|
|
1153
|
+
"""
|
|
1154
|
+
Create a comprehensive Singularity development workstation.
|
|
1155
|
+
|
|
1156
|
+
Args:
|
|
1157
|
+
work_dir: Workspace directory
|
|
1158
|
+
base_image: Base container image
|
|
1159
|
+
image_name: Name for the Singularity image/sandbox
|
|
1160
|
+
packages: System packages to install
|
|
1161
|
+
pip_packages: Python packages to install
|
|
1162
|
+
conda_packages: Conda packages to install
|
|
1163
|
+
environment_vars: Environment variables to set
|
|
1164
|
+
bind_mounts: Default bind mounts
|
|
1165
|
+
force_rebuild: Force rebuild even if image exists
|
|
1166
|
+
gpu_support: Enable GPU support
|
|
1167
|
+
cuda_version: CUDA version for GPU support
|
|
1168
|
+
rocm_version: ROCm version for AMD GPU support
|
|
1169
|
+
mpi_support: Enable MPI support
|
|
1170
|
+
create_wrapper_scripts: Create helper scripts
|
|
1171
|
+
create_config_file: Create configuration file
|
|
1172
|
+
config_file: Load configuration from file
|
|
1173
|
+
|
|
1174
|
+
Returns:
|
|
1175
|
+
Dictionary with operation results
|
|
1176
|
+
"""
|
|
1177
|
+
# Load configuration from file if provided
|
|
1178
|
+
if config_file:
|
|
1179
|
+
config = load_apptainer_config(config_file)
|
|
1180
|
+
# Update arguments with config values
|
|
1181
|
+
for key, value in config.items():
|
|
1182
|
+
if key in locals():
|
|
1183
|
+
locals()[key] = value
|
|
1184
|
+
|
|
1185
|
+
# Initialize defaults
|
|
1186
|
+
if packages is None:
|
|
1187
|
+
packages = []
|
|
1188
|
+
|
|
1189
|
+
if pip_packages is None:
|
|
1190
|
+
pip_packages = []
|
|
1191
|
+
|
|
1192
|
+
if conda_packages is None:
|
|
1193
|
+
conda_packages = []
|
|
1194
|
+
|
|
1195
|
+
if environment_vars is None:
|
|
1196
|
+
environment_vars = {}
|
|
1197
|
+
|
|
1198
|
+
if bind_mounts is None:
|
|
1199
|
+
bind_mounts = {}
|
|
1200
|
+
|
|
1201
|
+
# Expand paths
|
|
1202
|
+
work_dir = os.path.expanduser(work_dir)
|
|
1203
|
+
work_dir = os.path.abspath(work_dir)
|
|
1204
|
+
|
|
1205
|
+
# Create workspace directory
|
|
1206
|
+
os.makedirs(work_dir, exist_ok=True)
|
|
1207
|
+
|
|
1208
|
+
# Paths for images and scripts
|
|
1209
|
+
sandbox_dir = os.path.join(work_dir, f"{image_name}_sandbox")
|
|
1210
|
+
sif_file = os.path.join(work_dir, f"{image_name}.sif")
|
|
1211
|
+
scripts_dir = os.path.join(work_dir, "scripts")
|
|
1212
|
+
config_dir = os.path.join(work_dir, "config")
|
|
1213
|
+
|
|
1214
|
+
# Check if sandbox already exists
|
|
1215
|
+
if os.path.exists(sandbox_dir) and not force_rebuild:
|
|
1216
|
+
print(f"✓ Sandbox already exists at {sandbox_dir}")
|
|
1217
|
+
print(f" Use --force-rebuild to recreate")
|
|
1218
|
+
|
|
1219
|
+
result = {
|
|
1220
|
+
"success": True,
|
|
1221
|
+
"sandbox_dir": sandbox_dir,
|
|
1222
|
+
"sif_file": sif_file if os.path.exists(sif_file) else None,
|
|
1223
|
+
"work_dir": work_dir,
|
|
1224
|
+
"existing": True
|
|
1225
|
+
}
|
|
1226
|
+
|
|
1227
|
+
else:
|
|
1228
|
+
# Create sandbox
|
|
1229
|
+
print(f"Creating development sandbox from {base_image}...")
|
|
1230
|
+
|
|
1231
|
+
result = create_development_sandbox(
|
|
1232
|
+
base_image=base_image,
|
|
1233
|
+
sandbox_dir=sandbox_dir,
|
|
1234
|
+
packages=packages,
|
|
1235
|
+
pip_packages=pip_packages,
|
|
1236
|
+
conda_packages=conda_packages,
|
|
1237
|
+
environment_vars=environment_vars,
|
|
1238
|
+
gpu_support=gpu_support,
|
|
1239
|
+
cuda_version=cuda_version,
|
|
1240
|
+
rocm_version=rocm_version,
|
|
1241
|
+
mpi_support=mpi_support,
|
|
1242
|
+
force=force_rebuild
|
|
1243
|
+
)
|
|
1244
|
+
|
|
1245
|
+
if not result["success"]:
|
|
1246
|
+
return result
|
|
1247
|
+
|
|
1248
|
+
# Create SIF file from sandbox (optional but recommended for portability)
|
|
1249
|
+
if result["success"] and not os.path.exists(sif_file):
|
|
1250
|
+
print(f"\nCreating SIF file for portability...")
|
|
1251
|
+
sif_result = create_apptainer_sandbox(
|
|
1252
|
+
base_image=sandbox_dir,
|
|
1253
|
+
sandbox_dir=sif_file,
|
|
1254
|
+
image_type="sif",
|
|
1255
|
+
force=True
|
|
1256
|
+
)
|
|
1257
|
+
|
|
1258
|
+
if sif_result["success"]:
|
|
1259
|
+
print(f"✓ SIF file created: {sif_file}")
|
|
1260
|
+
else:
|
|
1261
|
+
print(f"⚠️ Could not create SIF file: {sif_result.get('error', 'Unknown error')}")
|
|
1262
|
+
|
|
1263
|
+
# Create wrapper scripts
|
|
1264
|
+
if create_wrapper_scripts and result["success"]:
|
|
1265
|
+
print(f"\nCreating wrapper scripts...")
|
|
1266
|
+
create_wrapper_scripts_for_sandbox(
|
|
1267
|
+
sandbox_dir=sandbox_dir,
|
|
1268
|
+
sif_file=sif_file if os.path.exists(sif_file) else None,
|
|
1269
|
+
work_dir=work_dir,
|
|
1270
|
+
bind_mounts=bind_mounts,
|
|
1271
|
+
gpu_support=gpu_support,
|
|
1272
|
+
scripts_dir=scripts_dir
|
|
1273
|
+
)
|
|
1274
|
+
|
|
1275
|
+
# Create configuration file
|
|
1276
|
+
if create_config_file:
|
|
1277
|
+
config_data = {
|
|
1278
|
+
"work_dir": work_dir,
|
|
1279
|
+
"sandbox_dir": sandbox_dir,
|
|
1280
|
+
"sif_file": sif_file if os.path.exists(sif_file) else None,
|
|
1281
|
+
"base_image": base_image,
|
|
1282
|
+
"image_name": image_name,
|
|
1283
|
+
"packages": packages,
|
|
1284
|
+
"pip_packages": pip_packages,
|
|
1285
|
+
"conda_packages": conda_packages,
|
|
1286
|
+
"environment_vars": environment_vars,
|
|
1287
|
+
"bind_mounts": bind_mounts,
|
|
1288
|
+
"gpu_support": gpu_support,
|
|
1289
|
+
"cuda_version": cuda_version,
|
|
1290
|
+
"rocm_version": rocm_version,
|
|
1291
|
+
"mpi_support": mpi_support,
|
|
1292
|
+
"created": datetime.now().isoformat()
|
|
1293
|
+
}
|
|
1294
|
+
|
|
1295
|
+
config_path = os.path.join(config_dir, "apptainer_config.json")
|
|
1296
|
+
os.makedirs(config_dir, exist_ok=True)
|
|
1297
|
+
|
|
1298
|
+
with open(config_path, "w") as f:
|
|
1299
|
+
json.dump(config_data, f, indent=2)
|
|
1300
|
+
|
|
1301
|
+
print(f"✓ Configuration saved to {config_path}")
|
|
1302
|
+
|
|
1303
|
+
# Display usage instructions
|
|
1304
|
+
if result["success"]:
|
|
1305
|
+
display_apptainer_instructions(
|
|
1306
|
+
work_dir=work_dir,
|
|
1307
|
+
sandbox_dir=sandbox_dir,
|
|
1308
|
+
sif_file=sif_file if os.path.exists(sif_file) else None,
|
|
1309
|
+
bind_mounts=bind_mounts,
|
|
1310
|
+
gpu_support=gpu_support,
|
|
1311
|
+
image_name=image_name
|
|
1312
|
+
)
|
|
1313
|
+
|
|
1314
|
+
return result
|
|
1315
|
+
|
|
1316
|
+
def create_wrapper_scripts_for_sandbox(
|
|
1317
|
+
sandbox_dir: str,
|
|
1318
|
+
work_dir: str,
|
|
1319
|
+
bind_mounts: Dict[str, str] = None,
|
|
1320
|
+
gpu_support: bool = False,
|
|
1321
|
+
scripts_dir: str = "scripts",
|
|
1322
|
+
sif_file: str = None
|
|
1323
|
+
) -> None:
|
|
1324
|
+
"""
|
|
1325
|
+
Create wrapper scripts for easy interaction with the sandbox.
|
|
1326
|
+
|
|
1327
|
+
Args:
|
|
1328
|
+
sandbox_dir: Path to sandbox directory
|
|
1329
|
+
work_dir: Workspace directory
|
|
1330
|
+
bind_mounts: Default bind mounts
|
|
1331
|
+
gpu_support: Enable GPU support in scripts
|
|
1332
|
+
scripts_dir: Directory for scripts
|
|
1333
|
+
sif_file: Optional SIF file path
|
|
1334
|
+
"""
|
|
1335
|
+
if bind_mounts is None:
|
|
1336
|
+
bind_mounts = {}
|
|
1337
|
+
|
|
1338
|
+
# Create scripts directory
|
|
1339
|
+
scripts_path = os.path.join(work_dir, scripts_dir)
|
|
1340
|
+
os.makedirs(scripts_path, exist_ok=True)
|
|
1341
|
+
|
|
1342
|
+
# Determine which image to use
|
|
1343
|
+
image_path = sif_file if sif_file and os.path.exists(sif_file) else sandbox_dir
|
|
1344
|
+
|
|
1345
|
+
# Common bind mounts
|
|
1346
|
+
default_binds = {
|
|
1347
|
+
work_dir: "/workspace",
|
|
1348
|
+
os.path.expanduser("~/.ssh"): "/root/.ssh",
|
|
1349
|
+
os.path.expanduser("~/.gitconfig"): "/root/.gitconfig",
|
|
1350
|
+
"/tmp": "/tmp"
|
|
1351
|
+
}
|
|
1352
|
+
|
|
1353
|
+
# Add custom bind mounts
|
|
1354
|
+
if bind_mounts:
|
|
1355
|
+
default_binds.update(bind_mounts)
|
|
1356
|
+
|
|
1357
|
+
# Build bind mount string for scripts
|
|
1358
|
+
bind_args = []
|
|
1359
|
+
for host_path, container_path in default_binds.items():
|
|
1360
|
+
host_path = os.path.expanduser(host_path)
|
|
1361
|
+
if os.path.exists(host_path):
|
|
1362
|
+
bind_args.append(f"--bind {host_path}:{container_path}")
|
|
1363
|
+
|
|
1364
|
+
bind_string = " ".join(bind_args)
|
|
1365
|
+
|
|
1366
|
+
# GPU flag
|
|
1367
|
+
gpu_flag = "--nv" if gpu_support else ""
|
|
1368
|
+
|
|
1369
|
+
# Script 1: Enter shell
|
|
1370
|
+
shell_script = os.path.join(scripts_path, "enter_sandbox.sh")
|
|
1371
|
+
shell_content = f"""#!/bin/bash
|
|
1372
|
+
# Enter the development sandbox
|
|
1373
|
+
SINGULARITY_CMD="$(which apptainer 2>/dev/null || which singularity 2>/dev/null)"
|
|
1374
|
+
|
|
1375
|
+
if [ -z "$SINGULARITY_CMD" ]; then
|
|
1376
|
+
echo "Error: Neither apptainer nor singularity found in PATH"
|
|
1377
|
+
exit 1
|
|
1378
|
+
fi
|
|
1379
|
+
|
|
1380
|
+
echo "Entering development sandbox..."
|
|
1381
|
+
echo "Sandbox: {sandbox_dir}"
|
|
1382
|
+
echo "Working directory: /workspace"
|
|
1383
|
+
echo ""
|
|
1384
|
+
|
|
1385
|
+
$SINGULARITY_CMD shell {gpu_flag} {bind_string} {image_path}
|
|
1386
|
+
"""
|
|
1387
|
+
|
|
1388
|
+
with open(shell_script, "w") as f:
|
|
1389
|
+
f.write(shell_content)
|
|
1390
|
+
|
|
1391
|
+
os.chmod(shell_script, 0o755)
|
|
1392
|
+
|
|
1393
|
+
# Script 2: Run command
|
|
1394
|
+
run_script = os.path.join(scripts_path, "run_in_sandbox.sh")
|
|
1395
|
+
run_content = f"""#!/bin/bash
|
|
1396
|
+
# Run a command in the development sandbox
|
|
1397
|
+
SINGULARITY_CMD="$(which apptainer 2>/dev/null || which singularity 2>/dev/null)"
|
|
1398
|
+
|
|
1399
|
+
if [ -z "$SINGULARITY_CMD" ]; then
|
|
1400
|
+
echo "Error: Neither apptainer nor singularity found in PATH"
|
|
1401
|
+
exit 1
|
|
1402
|
+
fi
|
|
1403
|
+
|
|
1404
|
+
if [ $# -eq 0 ]; then
|
|
1405
|
+
echo "Usage: $0 <command> [args...]"
|
|
1406
|
+
echo "Example: $0 python script.py"
|
|
1407
|
+
exit 1
|
|
1408
|
+
fi
|
|
1409
|
+
|
|
1410
|
+
echo "Running command in sandbox: $@"
|
|
1411
|
+
echo ""
|
|
1412
|
+
|
|
1413
|
+
$SINGULARITY_CMD exec {gpu_flag} {bind_string} {image_path} "$@"
|
|
1414
|
+
"""
|
|
1415
|
+
|
|
1416
|
+
with open(run_script, "w") as f:
|
|
1417
|
+
f.write(run_content)
|
|
1418
|
+
|
|
1419
|
+
os.chmod(run_script, 0o755)
|
|
1420
|
+
|
|
1421
|
+
# Script 3: Start Jupyter
|
|
1422
|
+
jupyter_script = os.path.join(scripts_path, "start_jupyter.sh")
|
|
1423
|
+
jupyter_content = f"""#!/bin/bash
|
|
1424
|
+
# Start Jupyter server in the sandbox
|
|
1425
|
+
SINGULARITY_CMD="$(which apptainer 2>/dev/null || which singularity 2>/dev/null)"
|
|
1426
|
+
|
|
1427
|
+
if [ -z "$SINGULARITY_CMD" ]; then
|
|
1428
|
+
echo "Error: Neither apptainer nor singularity found in PATH"
|
|
1429
|
+
exit 1
|
|
1430
|
+
fi
|
|
1431
|
+
|
|
1432
|
+
PORT=${{1:-8888}}
|
|
1433
|
+
NOTEBOOK_DIR=${{2:-/workspace}}
|
|
1434
|
+
|
|
1435
|
+
echo "Starting Jupyter on port $PORT..."
|
|
1436
|
+
echo "Notebook directory: $NOTEBOOK_DIR"
|
|
1437
|
+
echo ""
|
|
1438
|
+
echo "Open in browser: http://localhost:$PORT"
|
|
1439
|
+
echo ""
|
|
1440
|
+
|
|
1441
|
+
$SINGULARITY_CMD exec {gpu_flag} {bind_string} \\
|
|
1442
|
+
--env JUPYTER_PORT=$PORT \\
|
|
1443
|
+
--env JUPYTER_NOTEBOOK_DIR=$NOTEBOOK_DIR \\
|
|
1444
|
+
{image_path} \\
|
|
1445
|
+
jupyter lab --ip=0.0.0.0 --port=$PORT --no-browser --notebook-dir=$NOTEBOOK_DIR
|
|
1446
|
+
"""
|
|
1447
|
+
|
|
1448
|
+
with open(jupyter_script, "w") as f:
|
|
1449
|
+
f.write(jupyter_content)
|
|
1450
|
+
|
|
1451
|
+
os.chmod(jupyter_script, 0o755)
|
|
1452
|
+
|
|
1453
|
+
# Script 4: Python environment
|
|
1454
|
+
python_script = os.path.join(scripts_path, "python_env.sh")
|
|
1455
|
+
python_content = f"""#!/bin/bash
|
|
1456
|
+
# Set up Python virtual environment in sandbox
|
|
1457
|
+
SINGULARITY_CMD="$(which apptainer 2>/dev/null || which singularity 2>/dev/null)"
|
|
1458
|
+
|
|
1459
|
+
if [ -z "$SINGULARITY_CMD" ]; then
|
|
1460
|
+
echo "Error: Neither apptainer nor singularity found in PATH"
|
|
1461
|
+
exit 1
|
|
1462
|
+
fi
|
|
1463
|
+
|
|
1464
|
+
VENV_DIR="/workspace/venv"
|
|
1465
|
+
|
|
1466
|
+
echo "Setting up Python virtual environment..."
|
|
1467
|
+
echo ""
|
|
1468
|
+
|
|
1469
|
+
$SINGULARITY_CMD exec {bind_string} {image_path} \\
|
|
1470
|
+
bash -c "
|
|
1471
|
+
if [ ! -d "$VENV_DIR" ]; then
|
|
1472
|
+
echo 'Creating virtual environment...'
|
|
1473
|
+
python3 -m venv $VENV_DIR
|
|
1474
|
+
fi
|
|
1475
|
+
|
|
1476
|
+
echo 'Activating virtual environment...'
|
|
1477
|
+
source $VENV_DIR/bin/activate
|
|
1478
|
+
|
|
1479
|
+
echo 'Upgrading pip...'
|
|
1480
|
+
pip install --upgrade pip
|
|
1481
|
+
|
|
1482
|
+
echo 'Virtual environment ready at $VENV_DIR'
|
|
1483
|
+
echo 'To activate: source $VENV_DIR/bin/activate'
|
|
1484
|
+
"
|
|
1485
|
+
"""
|
|
1486
|
+
|
|
1487
|
+
with open(python_script, "w") as f:
|
|
1488
|
+
f.write(python_content)
|
|
1489
|
+
|
|
1490
|
+
os.chmod(python_script, 0o755)
|
|
1491
|
+
|
|
1492
|
+
# Script 5: Backup sandbox
|
|
1493
|
+
backup_script = os.path.join(scripts_path, "backup_sandbox.sh")
|
|
1494
|
+
backup_content = f"""#!/bin/bash
|
|
1495
|
+
# Backup the sandbox
|
|
1496
|
+
BACKUP_DIR="{work_dir}/backups"
|
|
1497
|
+
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
|
1498
|
+
BACKUP_NAME="sandbox_backup_$TIMESTAMP"
|
|
1499
|
+
|
|
1500
|
+
echo "Backing up sandbox..."
|
|
1501
|
+
echo "Source: {sandbox_dir}"
|
|
1502
|
+
echo "Destination: $BACKUP_DIR/$BACKUP_NAME.tar.gz"
|
|
1503
|
+
echo ""
|
|
1504
|
+
|
|
1505
|
+
mkdir -p "$BACKUP_DIR"
|
|
1506
|
+
|
|
1507
|
+
# Create backup
|
|
1508
|
+
tar -czf "$BACKUP_DIR/$BACKUP_NAME.tar.gz" -C "{os.path.dirname(sandbox_dir)}" "{os.path.basename(sandbox_dir)}"
|
|
1509
|
+
|
|
1510
|
+
if [ $? -eq 0 ]; then
|
|
1511
|
+
echo "✓ Backup created: $BACKUP_DIR/$BACKUP_NAME.tar.gz"
|
|
1512
|
+
echo "Size: $(du -h "$BACKUP_DIR/$BACKUP_NAME.tar.gz" | cut -f1)"
|
|
1513
|
+
else
|
|
1514
|
+
echo "✗ Backup failed"
|
|
1515
|
+
exit 1
|
|
1516
|
+
fi
|
|
1517
|
+
"""
|
|
1518
|
+
|
|
1519
|
+
with open(backup_script, "w") as f:
|
|
1520
|
+
f.write(backup_content)
|
|
1521
|
+
|
|
1522
|
+
os.chmod(backup_script, 0o755)
|
|
1523
|
+
|
|
1524
|
+
print(f"Created wrapper scripts in {scripts_path}:")
|
|
1525
|
+
print(f" enter_sandbox.sh - Enter interactive shell")
|
|
1526
|
+
print(f" run_in_sandbox.sh - Run command in sandbox")
|
|
1527
|
+
print(f" start_jupyter.sh - Start Jupyter server")
|
|
1528
|
+
print(f" python_env.sh - Set up Python environment")
|
|
1529
|
+
print(f" backup_sandbox.sh - Backup sandbox")
|
|
1530
|
+
|
|
1531
|
+
def display_apptainer_instructions(
|
|
1532
|
+
work_dir: str,
|
|
1533
|
+
sandbox_dir: str,
|
|
1534
|
+
bind_mounts: Dict[str, str] = None,
|
|
1535
|
+
gpu_support: bool = False,
|
|
1536
|
+
image_name: str = "dev_sandbox",
|
|
1537
|
+
sif_file: str = None
|
|
1538
|
+
) -> None:
|
|
1539
|
+
"""
|
|
1540
|
+
Display comprehensive usage instructions for the Singularity workstation.
|
|
1541
|
+
|
|
1542
|
+
Args:
|
|
1543
|
+
work_dir: Workspace directory
|
|
1544
|
+
sandbox_dir: Path to sandbox directory
|
|
1545
|
+
bind_mounts: Default bind mounts
|
|
1546
|
+
gpu_support: GPU support enabled
|
|
1547
|
+
image_name: Image name
|
|
1548
|
+
sif_file: Optional SIF file path
|
|
1549
|
+
"""
|
|
1550
|
+
if bind_mounts is None:
|
|
1551
|
+
bind_mounts = {}
|
|
1552
|
+
|
|
1553
|
+
# Determine image to use
|
|
1554
|
+
image_path = sif_file if sif_file and os.path.exists(sif_file) else sandbox_dir
|
|
1555
|
+
image_type = "SIF" if sif_file and os.path.exists(sif_file) else "sandbox"
|
|
1556
|
+
|
|
1557
|
+
# Common bind mounts
|
|
1558
|
+
default_binds = {
|
|
1559
|
+
work_dir: "/workspace",
|
|
1560
|
+
"~/.ssh": "/root/.ssh",
|
|
1561
|
+
"~/.gitconfig": "/root/.gitconfig",
|
|
1562
|
+
"/tmp": "/tmp"
|
|
1563
|
+
}
|
|
1564
|
+
default_binds.update(bind_mounts)
|
|
1565
|
+
|
|
1566
|
+
print("\n" + "="*60)
|
|
1567
|
+
print(" SINGULARITY/APPTAINER DEVELOPMENT WORKSTATION")
|
|
1568
|
+
print("="*60)
|
|
1569
|
+
|
|
1570
|
+
print(f"\n📍 Workspace: {work_dir}")
|
|
1571
|
+
print(f"📦 Sandbox: {sandbox_dir}")
|
|
1572
|
+
if sif_file and os.path.exists(sif_file):
|
|
1573
|
+
print(f"📁 SIF File: {sif_file}")
|
|
1574
|
+
print(f"🖥️ Image Type: {image_type}")
|
|
1575
|
+
print(f"🎮 GPU Support: {'Yes' if gpu_support else 'No'}")
|
|
1576
|
+
|
|
1577
|
+
print("\n📂 Default Bind Mounts:")
|
|
1578
|
+
for host_path, container_path in default_binds.items():
|
|
1579
|
+
print(f" {host_path} -> {container_path}")
|
|
1580
|
+
|
|
1581
|
+
print("\n🚀 QUICK START COMMANDS:")
|
|
1582
|
+
print("-" * 40)
|
|
1583
|
+
|
|
1584
|
+
# Build common bind arguments
|
|
1585
|
+
bind_args = []
|
|
1586
|
+
for host_path, container_path in default_binds.items():
|
|
1587
|
+
expanded_host = os.path.expanduser(host_path)
|
|
1588
|
+
if os.path.exists(expanded_host):
|
|
1589
|
+
bind_args.append(f"--bind {expanded_host}:{container_path}")
|
|
1590
|
+
|
|
1591
|
+
bind_string = " ".join(bind_args)
|
|
1592
|
+
gpu_string = "--nv" if gpu_support else ""
|
|
1593
|
+
|
|
1594
|
+
print(f"\n1. Enter interactive shell:")
|
|
1595
|
+
print(f" singularity shell {gpu_string} {bind_string} {image_path}")
|
|
1596
|
+
|
|
1597
|
+
print(f"\n2. Run a command:")
|
|
1598
|
+
print(f" singularity exec {gpu_string} {bind_string} {image_path} <command>")
|
|
1599
|
+
print(f" Example: singularity exec {bind_string} {image_path} python --version")
|
|
1600
|
+
|
|
1601
|
+
print(f"\n3. Start Jupyter server:")
|
|
1602
|
+
print(f" singularity exec {gpu_string} {bind_string} \\")
|
|
1603
|
+
print(f" --env JUPYTER_PORT=8888 \\")
|
|
1604
|
+
print(f" {image_path} \\")
|
|
1605
|
+
print(f" jupyter lab --ip=0.0.0.0 --port=8888 --no-browser")
|
|
1606
|
+
|
|
1607
|
+
print(f"\n4. Using wrapper scripts (if created):")
|
|
1608
|
+
print(f" cd {work_dir}")
|
|
1609
|
+
print(f" ./scripts/enter_sandbox.sh")
|
|
1610
|
+
print(f" ./scripts/run_in_sandbox.sh python script.py")
|
|
1611
|
+
print(f" ./scripts/start_jupyter.sh")
|
|
1612
|
+
|
|
1613
|
+
print("\n🔧 ADVANCED USAGE:")
|
|
1614
|
+
print("-" * 40)
|
|
1615
|
+
|
|
1616
|
+
print(f"\n1. Create writable overlay:")
|
|
1617
|
+
print(f" singularity shell --overlay overlay.img {bind_string} {image_path}")
|
|
1618
|
+
|
|
1619
|
+
print(f"\n2. Use fakeroot (unprivileged):")
|
|
1620
|
+
print(f" singularity shell --fakeroot {bind_string} {image_path}")
|
|
1621
|
+
|
|
1622
|
+
print(f"\n3. Set working directory:")
|
|
1623
|
+
print(f" singularity shell --pwd /workspace {bind_string} {image_path}")
|
|
1624
|
+
|
|
1625
|
+
print(f"\n4. Set environment variables:")
|
|
1626
|
+
print(f" singularity shell --env MY_VAR=value {bind_string} {image_path}")
|
|
1627
|
+
|
|
1628
|
+
print(f"\n5. Mount additional directories:")
|
|
1629
|
+
print(f" singularity shell --bind /host/data:/data {bind_string} {image_path}")
|
|
1630
|
+
|
|
1631
|
+
print("\n🔄 CONVERTING TO/FROM OTHER FORMATS:")
|
|
1632
|
+
print("-" * 40)
|
|
1633
|
+
|
|
1634
|
+
print(f"\n1. Convert sandbox to SIF:")
|
|
1635
|
+
print(f" singularity build {image_name}.sif {sandbox_dir}")
|
|
1636
|
+
|
|
1637
|
+
print(f"\n2. Convert Docker to Singularity:")
|
|
1638
|
+
print(f" singularity build {image_name}.sif docker://ubuntu:22.04")
|
|
1639
|
+
|
|
1640
|
+
print(f"\n3. Convert SIF to sandbox:")
|
|
1641
|
+
print(f" singularity build --sandbox {image_name}_sandbox {image_name}.sif")
|
|
1642
|
+
|
|
1643
|
+
print("\n📊 MONITORING AND MAINTENANCE:")
|
|
1644
|
+
print("-" * 40)
|
|
1645
|
+
|
|
1646
|
+
print(f"\n1. Check container info:")
|
|
1647
|
+
print(f" singularity inspect {image_path}")
|
|
1648
|
+
|
|
1649
|
+
print(f"\n2. Run container tests:")
|
|
1650
|
+
print(f" singularity test {image_path}")
|
|
1651
|
+
|
|
1652
|
+
print(f"\n3. Verify container:")
|
|
1653
|
+
print(f" singularity verify {image_path}")
|
|
1654
|
+
|
|
1655
|
+
print(f"\n4. Clean cache:")
|
|
1656
|
+
print(f" singularity cache clean")
|
|
1657
|
+
|
|
1658
|
+
print("\n🎯 HPC/CLUSTER USAGE:")
|
|
1659
|
+
print("-" * 40)
|
|
1660
|
+
|
|
1661
|
+
print(f"\n1. Copy to cluster:")
|
|
1662
|
+
print(f" scp {image_path} user@cluster:/path/to/containers/")
|
|
1663
|
+
|
|
1664
|
+
print(f"\n2. SSH with X11 forwarding:")
|
|
1665
|
+
print(f" ssh -X user@cluster")
|
|
1666
|
+
print(f" singularity shell --bind /path/to/data {image_path}")
|
|
1667
|
+
|
|
1668
|
+
print(f"\n3. Submit batch job (SLURM example):")
|
|
1669
|
+
print(f''' #!/bin/bash
|
|
1670
|
+
#SBATCH --job-name=singularity_job
|
|
1671
|
+
#SBATCH --nodes=1
|
|
1672
|
+
#SBATCH --ntasks=1
|
|
1673
|
+
#SBATCH --cpus-per-task=4
|
|
1674
|
+
#SBATCH --mem=8G
|
|
1675
|
+
#SBATCH --time=01:00:00
|
|
1676
|
+
|
|
1677
|
+
module load singularity
|
|
1678
|
+
|
|
1679
|
+
singularity exec {bind_string} {image_path} \\
|
|
1680
|
+
python /workspace/script.py
|
|
1681
|
+
''')
|
|
1682
|
+
|
|
1683
|
+
print("\n🛡️ SECURITY NOTES:")
|
|
1684
|
+
print("-" * 40)
|
|
1685
|
+
|
|
1686
|
+
print(f"\n• Containers run with user privileges (more secure than Docker)")
|
|
1687
|
+
print(f"• Use --fakeroot for unprivileged operations")
|
|
1688
|
+
print(f"• Consider using --contain for better isolation")
|
|
1689
|
+
print(f"• Use --scratch for temporary files")
|
|
1690
|
+
print(f"• Verify images with 'singularity verify' when possible")
|
|
1691
|
+
|
|
1692
|
+
print("\n🔗 HELPFUL LINKS:")
|
|
1693
|
+
print("-" * 40)
|
|
1694
|
+
|
|
1695
|
+
print(f"\n• Apptainer documentation: https://apptainer.org/docs/")
|
|
1696
|
+
print(f"• Singularity documentation: https://docs.sylabs.io/")
|
|
1697
|
+
print(f"• Singularity containers library: https://cloud.sylabs.io/library")
|
|
1698
|
+
print(f"• HPC best practices: https://sylabs.io/guides/latest/user-guide/hpc.html")
|
|
1699
|
+
|
|
1700
|
+
print("\n" + "="*60)
|
|
1701
|
+
print(" READY TO DEVELOP WITH SINGULARITY/APPTAINER!")
|
|
1702
|
+
print("="*60)
|
|
1703
|
+
|
|
1704
|
+
def setup_ssh_forwarding(
|
|
1705
|
+
container_image: str,
|
|
1706
|
+
ssh_config: Dict[str, Any] = None,
|
|
1707
|
+
local_port: int = 2222,
|
|
1708
|
+
container_port: int = 22,
|
|
1709
|
+
bind_host: str = "127.0.0.1",
|
|
1710
|
+
key_path: str = None
|
|
1711
|
+
) -> Dict[str, Any]:
|
|
1712
|
+
"""
|
|
1713
|
+
Set up SSH forwarding to a Singularity container.
|
|
1714
|
+
|
|
1715
|
+
Args:
|
|
1716
|
+
container_image: Path to Singularity image
|
|
1717
|
+
ssh_config: SSH configuration
|
|
1718
|
+
local_port: Local port for SSH
|
|
1719
|
+
container_port: Container SSH port
|
|
1720
|
+
bind_host: Bind host address
|
|
1721
|
+
key_path: Path to SSH key
|
|
1722
|
+
|
|
1723
|
+
Returns:
|
|
1724
|
+
Dictionary with SSH forwarding setup
|
|
1725
|
+
"""
|
|
1726
|
+
if ssh_config is None:
|
|
1727
|
+
ssh_config = {}
|
|
1728
|
+
|
|
1729
|
+
# Default SSH configuration
|
|
1730
|
+
default_config = {
|
|
1731
|
+
"user": "root",
|
|
1732
|
+
"password": None,
|
|
1733
|
+
"key_auth": True,
|
|
1734
|
+
"port": container_port,
|
|
1735
|
+
"hostname": "localhost",
|
|
1736
|
+
"allow_agent": True,
|
|
1737
|
+
"compress": True
|
|
1738
|
+
}
|
|
1739
|
+
|
|
1740
|
+
# Update with user config
|
|
1741
|
+
default_config.update(ssh_config)
|
|
1742
|
+
|
|
1743
|
+
# Generate SSH key if not provided
|
|
1744
|
+
if key_path is None:
|
|
1745
|
+
key_path = os.path.expanduser("~/.ssh/id_rsa_singularity")
|
|
1746
|
+
|
|
1747
|
+
if not os.path.exists(key_path):
|
|
1748
|
+
print(f"Generating SSH key at {key_path}...")
|
|
1749
|
+
result = subprocess.run(
|
|
1750
|
+
["ssh-keygen", "-t", "rsa", "-b", "4096", "-f", key_path, "-N", ""],
|
|
1751
|
+
capture_output=True,
|
|
1752
|
+
text=True
|
|
1753
|
+
)
|
|
1754
|
+
|
|
1755
|
+
if result.returncode != 0:
|
|
1756
|
+
return {
|
|
1757
|
+
"success": False,
|
|
1758
|
+
"error": f"Failed to generate SSH key: {result.stderr}",
|
|
1759
|
+
"key_path": key_path
|
|
1760
|
+
}
|
|
1761
|
+
|
|
1762
|
+
# Start SSH server in container
|
|
1763
|
+
print(f"Starting SSH server in container...")
|
|
1764
|
+
|
|
1765
|
+
# Check if SSH server is running in container
|
|
1766
|
+
check_result = run_singularity_command(
|
|
1767
|
+
image_path=container_image,
|
|
1768
|
+
command="which sshd"
|
|
1769
|
+
)
|
|
1770
|
+
|
|
1771
|
+
if not check_result["success"]:
|
|
1772
|
+
# Install SSH server
|
|
1773
|
+
print("Installing OpenSSH server in container...")
|
|
1774
|
+
install_result = run_singularity_command(
|
|
1775
|
+
image_path=container_image,
|
|
1776
|
+
command="apt-get update && apt-get install -y openssh-server"
|
|
1777
|
+
)
|
|
1778
|
+
|
|
1779
|
+
if not install_result["success"]:
|
|
1780
|
+
return {
|
|
1781
|
+
"success": False,
|
|
1782
|
+
"error": f"Failed to install SSH server: {install_result.get('stderr', 'Unknown error')}",
|
|
1783
|
+
"container_image": container_image
|
|
1784
|
+
}
|
|
1785
|
+
|
|
1786
|
+
# Configure SSH server
|
|
1787
|
+
print("Configuring SSH server...")
|
|
1788
|
+
|
|
1789
|
+
# Create SSH directory and configure
|
|
1790
|
+
config_commands = [
|
|
1791
|
+
"mkdir -p /run/sshd",
|
|
1792
|
+
"echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config",
|
|
1793
|
+
"echo 'PasswordAuthentication yes' >> /etc/ssh/sshd_config",
|
|
1794
|
+
"echo 'PubkeyAuthentication yes' >> /etc/ssh/sshd_config",
|
|
1795
|
+
f"mkdir -p /root/.ssh",
|
|
1796
|
+
f"echo 'ssh-rsa ...' > /root/.ssh/authorized_keys", # Would add actual key
|
|
1797
|
+
]
|
|
1798
|
+
|
|
1799
|
+
for cmd in config_commands:
|
|
1800
|
+
run_singularity_command(
|
|
1801
|
+
image_path=container_image,
|
|
1802
|
+
command=cmd
|
|
1803
|
+
)
|
|
1804
|
+
|
|
1805
|
+
# Start SSH server in background
|
|
1806
|
+
print(f"Starting SSH server on port {container_port}...")
|
|
1807
|
+
|
|
1808
|
+
# We need to run the container in daemon mode with SSH
|
|
1809
|
+
# This is complex and would require a more sophisticated setup
|
|
1810
|
+
# For now, return instructions
|
|
1811
|
+
|
|
1812
|
+
instructions = {
|
|
1813
|
+
"manual_setup": "SSH forwarding requires running container in background with SSHD",
|
|
1814
|
+
"steps": [
|
|
1815
|
+
f"1. Start container with SSHD: singularity instance start --bind /path/to/ssh/key {container_image} ssh_instance",
|
|
1816
|
+
f"2. Connect to container: ssh -p {local_port} {default_config['user']}@{bind_host}",
|
|
1817
|
+
f"3. Stop container: singularity instance stop ssh_instance"
|
|
1818
|
+
],
|
|
1819
|
+
"ssh_config": default_config,
|
|
1820
|
+
"key_path": key_path
|
|
1821
|
+
}
|
|
1822
|
+
|
|
1823
|
+
print("\n📝 SSH Forwarding Setup Instructions:")
|
|
1824
|
+
print("-" * 40)
|
|
1825
|
+
for step in instructions["steps"]:
|
|
1826
|
+
print(f" {step}")
|
|
1827
|
+
|
|
1828
|
+
return {
|
|
1829
|
+
"success": True,
|
|
1830
|
+
"instructions": instructions,
|
|
1831
|
+
"ssh_config": default_config,
|
|
1832
|
+
"key_path": key_path
|
|
1833
|
+
}
|
|
1834
|
+
|
|
1835
|
+
def setup_x11_forwarding(
|
|
1836
|
+
container_image: str,
|
|
1837
|
+
display: str = None,
|
|
1838
|
+
xauth_path: str = None
|
|
1839
|
+
) -> Dict[str, Any]:
|
|
1840
|
+
"""
|
|
1841
|
+
Set up X11 forwarding for GUI applications in Singularity container.
|
|
1842
|
+
|
|
1843
|
+
Args:
|
|
1844
|
+
container_image: Path to Singularity image
|
|
1845
|
+
display: DISPLAY environment variable
|
|
1846
|
+
xauth_path: Path to Xauthority file
|
|
1847
|
+
|
|
1848
|
+
Returns:
|
|
1849
|
+
Dictionary with X11 forwarding setup
|
|
1850
|
+
"""
|
|
1851
|
+
# Get current DISPLAY
|
|
1852
|
+
if display is None:
|
|
1853
|
+
display = os.environ.get("DISPLAY", ":0")
|
|
1854
|
+
|
|
1855
|
+
# Get Xauthority path
|
|
1856
|
+
if xauth_path is None:
|
|
1857
|
+
xauth_path = os.environ.get("XAUTHORITY", os.path.expanduser("~/.Xauthority"))
|
|
1858
|
+
|
|
1859
|
+
# Check if X11 is available
|
|
1860
|
+
if not os.path.exists(xauth_path):
|
|
1861
|
+
return {
|
|
1862
|
+
"success": False,
|
|
1863
|
+
"error": f"Xauthority file not found: {xauth_path}",
|
|
1864
|
+
"display": display
|
|
1865
|
+
}
|
|
1866
|
+
|
|
1867
|
+
# Prepare bind mounts for X11
|
|
1868
|
+
x11_binds = {
|
|
1869
|
+
"/tmp/.X11-unix": "/tmp/.X11-unix",
|
|
1870
|
+
xauth_path: "/root/.Xauthority"
|
|
1871
|
+
}
|
|
1872
|
+
|
|
1873
|
+
# Test X11 forwarding
|
|
1874
|
+
print("Testing X11 forwarding...")
|
|
1875
|
+
|
|
1876
|
+
test_result = run_singularity_command(
|
|
1877
|
+
image_path=container_image,
|
|
1878
|
+
command="which xeyes || echo 'X11 apps not installed'",
|
|
1879
|
+
bind_mounts=x11_binds,
|
|
1880
|
+
environment={"DISPLAY": display}
|
|
1881
|
+
)
|
|
1882
|
+
|
|
1883
|
+
if test_result["success"]:
|
|
1884
|
+
print("✓ X11 forwarding setup complete")
|
|
1885
|
+
|
|
1886
|
+
return {
|
|
1887
|
+
"success": True,
|
|
1888
|
+
"display": display,
|
|
1889
|
+
"xauth_path": xauth_path,
|
|
1890
|
+
"bind_mounts": x11_binds,
|
|
1891
|
+
"environment": {"DISPLAY": display},
|
|
1892
|
+
"test_command": f"singularity exec --bind /tmp/.X11-unix:/tmp/.X11-unix --bind {xauth_path}:/root/.Xauthority --env DISPLAY={display} {container_image} xeyes"
|
|
1893
|
+
}
|
|
1894
|
+
|
|
1895
|
+
else:
|
|
1896
|
+
# X11 apps might not be installed in container
|
|
1897
|
+
print("⚠️ X11 apps not found in container. Installing...")
|
|
1898
|
+
|
|
1899
|
+
install_result = run_singularity_command(
|
|
1900
|
+
image_path=container_image,
|
|
1901
|
+
command="apt-get update && apt-get install -y x11-apps"
|
|
1902
|
+
)
|
|
1903
|
+
|
|
1904
|
+
if install_result["success"]:
|
|
1905
|
+
print("✓ X11 apps installed")
|
|
1906
|
+
|
|
1907
|
+
return {
|
|
1908
|
+
"success": True,
|
|
1909
|
+
"display": display,
|
|
1910
|
+
"xauth_path": xauth_path,
|
|
1911
|
+
"bind_mounts": x11_binds,
|
|
1912
|
+
"environment": {"DISPLAY": display},
|
|
1913
|
+
"installed_x11": True
|
|
1914
|
+
}
|
|
1915
|
+
else:
|
|
1916
|
+
return {
|
|
1917
|
+
"success": False,
|
|
1918
|
+
"error": "Failed to install X11 apps",
|
|
1919
|
+
"display": display,
|
|
1920
|
+
"xauth_path": xauth_path,
|
|
1921
|
+
"install_stderr": install_result.get("stderr")
|
|
1922
|
+
}
|
|
1923
|
+
|
|
1924
|
+
def create_batch_job_template(
|
|
1925
|
+
job_name: str = "singularity_job",
|
|
1926
|
+
image_path: str = None,
|
|
1927
|
+
work_dir: str = "/workspace",
|
|
1928
|
+
bind_mounts: Dict[str, str] = None,
|
|
1929
|
+
gpu_support: bool = False,
|
|
1930
|
+
mpi_support: bool = False,
|
|
1931
|
+
slurm_config: Dict[str, Any] = None,
|
|
1932
|
+
pbs_config: Dict[str, Any] = None,
|
|
1933
|
+
lsf_config: Dict[str, Any] = None,
|
|
1934
|
+
output_dir: str = "."
|
|
1935
|
+
) -> Dict[str, str]:
|
|
1936
|
+
"""
|
|
1937
|
+
Create batch job templates for HPC schedulers.
|
|
1938
|
+
|
|
1939
|
+
Args:
|
|
1940
|
+
job_name: Name of the job
|
|
1941
|
+
image_path: Path to Singularity image
|
|
1942
|
+
work_dir: Working directory in container
|
|
1943
|
+
bind_mounts: Bind mounts
|
|
1944
|
+
gpu_support: Enable GPU support
|
|
1945
|
+
mpi_support: Enable MPI support
|
|
1946
|
+
slurm_config: SLURM configuration
|
|
1947
|
+
pbs_config: PBS configuration
|
|
1948
|
+
lsf_config: LSF configuration
|
|
1949
|
+
output_dir: Output directory for templates
|
|
1950
|
+
|
|
1951
|
+
Returns:
|
|
1952
|
+
Dictionary with template paths
|
|
1953
|
+
"""
|
|
1954
|
+
if bind_mounts is None:
|
|
1955
|
+
bind_mounts = {}
|
|
1956
|
+
|
|
1957
|
+
if slurm_config is None:
|
|
1958
|
+
slurm_config = {
|
|
1959
|
+
"nodes": 1,
|
|
1960
|
+
"ntasks": 1,
|
|
1961
|
+
"cpus_per_task": 4,
|
|
1962
|
+
"mem": "8G",
|
|
1963
|
+
"time": "01:00:00",
|
|
1964
|
+
"partition": "normal",
|
|
1965
|
+
"gpus": 1 if gpu_support else 0,
|
|
1966
|
+
"account": None,
|
|
1967
|
+
"qos": None
|
|
1968
|
+
}
|
|
1969
|
+
|
|
1970
|
+
if pbs_config is None:
|
|
1971
|
+
pbs_config = {
|
|
1972
|
+
"nodes": 1,
|
|
1973
|
+
"ppn": 4,
|
|
1974
|
+
"mem": "8gb",
|
|
1975
|
+
"walltime": "01:00:00",
|
|
1976
|
+
"queue": "normal",
|
|
1977
|
+
"gpus": 1 if gpu_support else 0
|
|
1978
|
+
}
|
|
1979
|
+
|
|
1980
|
+
if lsf_config is None:
|
|
1981
|
+
lsf_config = {
|
|
1982
|
+
"cores": 4,
|
|
1983
|
+
"mem": 8000,
|
|
1984
|
+
"time": "01:00",
|
|
1985
|
+
"queue": "normal",
|
|
1986
|
+
"gpus": 1 if gpu_support else 0
|
|
1987
|
+
}
|
|
1988
|
+
|
|
1989
|
+
# Create output directory
|
|
1990
|
+
os.makedirs(output_dir, exist_ok=True)
|
|
1991
|
+
|
|
1992
|
+
# Build bind arguments string
|
|
1993
|
+
bind_args = []
|
|
1994
|
+
for host_path, container_path in bind_mounts.items():
|
|
1995
|
+
host_path = os.path.expanduser(host_path)
|
|
1996
|
+
bind_args.append(f"--bind {host_path}:{container_path}")
|
|
1997
|
+
|
|
1998
|
+
bind_string = " ".join(bind_args)
|
|
1999
|
+
gpu_string = "--nv" if gpu_support else ""
|
|
2000
|
+
|
|
2001
|
+
# SLURM template
|
|
2002
|
+
slurm_template = f"""#!/bin/bash
|
|
2003
|
+
#SBATCH --job-name={job_name}
|
|
2004
|
+
#SBATCH --nodes={slurm_config['nodes']}
|
|
2005
|
+
#SBATCH --ntasks={slurm_config['ntasks']}
|
|
2006
|
+
#SBATCH --cpus-per-task={slurm_config['cpus_per_task']}
|
|
2007
|
+
#SBATCH --mem={slurm_config['mem']}
|
|
2008
|
+
#SBATCH --time={slurm_config['time']}
|
|
2009
|
+
#SBATCH --partition={slurm_config['partition']}
|
|
2010
|
+
"""
|
|
2011
|
+
|
|
2012
|
+
if slurm_config['gpus'] > 0:
|
|
2013
|
+
slurm_template += f"#SBATCH --gpus={slurm_config['gpus']}\n"
|
|
2014
|
+
|
|
2015
|
+
if slurm_config['account']:
|
|
2016
|
+
slurm_template += f"#SBATCH --account={slurm_config['account']}\n"
|
|
2017
|
+
|
|
2018
|
+
if slurm_config['qos']:
|
|
2019
|
+
slurm_template += f"#SBATCH --qos={slurm_config['qos']}\n"
|
|
2020
|
+
|
|
2021
|
+
slurm_template += f"""
|
|
2022
|
+
#SBATCH --output={job_name}_%j.out
|
|
2023
|
+
#SBATCH --error={job_name}_%j.err
|
|
2024
|
+
|
|
2025
|
+
# Load Singularity/Apptainer module
|
|
2026
|
+
module load singularity
|
|
2027
|
+
|
|
2028
|
+
# Set working directory
|
|
2029
|
+
cd {work_dir}
|
|
2030
|
+
|
|
2031
|
+
# Run container
|
|
2032
|
+
singularity exec {gpu_string} {bind_string} {image_path or '$SINGULARITY_IMAGE'} \\
|
|
2033
|
+
python /workspace/script.py
|
|
2034
|
+
|
|
2035
|
+
echo "Job completed at $(date)"
|
|
2036
|
+
"""
|
|
2037
|
+
|
|
2038
|
+
slurm_path = os.path.join(output_dir, f"{job_name}.slurm")
|
|
2039
|
+
with open(slurm_path, "w") as f:
|
|
2040
|
+
f.write(slurm_template)
|
|
2041
|
+
|
|
2042
|
+
# PBS template
|
|
2043
|
+
pbs_template = f"""#!/bin/bash
|
|
2044
|
+
#PBS -N {job_name}
|
|
2045
|
+
#PBS -l nodes={pbs_config['nodes']}:ppn={pbs_config['ppn']}
|
|
2046
|
+
#PBS -l mem={pbs_config['mem']}
|
|
2047
|
+
#PBS -l walltime={pbs_config['walltime']}
|
|
2048
|
+
#PBS -q {pbs_config['queue']}
|
|
2049
|
+
"""
|
|
2050
|
+
|
|
2051
|
+
if pbs_config['gpus'] > 0:
|
|
2052
|
+
pbs_template += f"#PBS -l gpus={pbs_config['gpus']}\n"
|
|
2053
|
+
|
|
2054
|
+
pbs_template += f"""
|
|
2055
|
+
#PBS -j oe
|
|
2056
|
+
#PBS -o {job_name}_${{PBS_JOBID}}.out
|
|
2057
|
+
|
|
2058
|
+
# Load Singularity/Apptainer module
|
|
2059
|
+
module load singularity
|
|
2060
|
+
|
|
2061
|
+
# Set working directory
|
|
2062
|
+
cd {work_dir}
|
|
2063
|
+
|
|
2064
|
+
# Run container
|
|
2065
|
+
singularity exec {gpu_string} {bind_string} {image_path or '$SINGULARITY_IMAGE'} \\
|
|
2066
|
+
python /workspace/script.py
|
|
2067
|
+
|
|
2068
|
+
echo "Job completed at $(date)"
|
|
2069
|
+
"""
|
|
2070
|
+
|
|
2071
|
+
pbs_path = os.path.join(output_dir, f"{job_name}.pbs")
|
|
2072
|
+
with open(pbs_path, "w") as f:
|
|
2073
|
+
f.write(pbs_template)
|
|
2074
|
+
|
|
2075
|
+
# LSF template
|
|
2076
|
+
lsf_template = f"""#!/bin/bash
|
|
2077
|
+
#BSUB -J {job_name}
|
|
2078
|
+
#BSUB -n {lsf_config['cores']}
|
|
2079
|
+
#BSUB -R "rusage[mem={lsf_config['mem']}]"
|
|
2080
|
+
#BSUB -W {lsf_config['time']}
|
|
2081
|
+
#BSUB -q {lsf_config['queue']}
|
|
2082
|
+
"""
|
|
2083
|
+
|
|
2084
|
+
if lsf_config['gpus'] > 0:
|
|
2085
|
+
lsf_template += f"#BSUB -gpu num={lsf_config['gpus']}\n"
|
|
2086
|
+
|
|
2087
|
+
lsf_template += f"""
|
|
2088
|
+
#BSUB -o {job_name}_%J.out
|
|
2089
|
+
#BSUB -e {job_name}_%J.err
|
|
2090
|
+
|
|
2091
|
+
# Load Singularity/Apptainer module
|
|
2092
|
+
module load singularity
|
|
2093
|
+
|
|
2094
|
+
# Set working directory
|
|
2095
|
+
cd {work_dir}
|
|
2096
|
+
|
|
2097
|
+
# Run container
|
|
2098
|
+
singularity exec {gpu_string} {bind_string} {image_path or '$SINGULARITY_IMAGE'} \\
|
|
2099
|
+
python /workspace/script.py
|
|
2100
|
+
|
|
2101
|
+
echo "Job completed at $(date)"
|
|
2102
|
+
"""
|
|
2103
|
+
|
|
2104
|
+
lsf_path = os.path.join(output_dir, f"{job_name}.lsf")
|
|
2105
|
+
with open(lsf_path, "w") as f:
|
|
2106
|
+
f.write(lsf_template)
|
|
2107
|
+
|
|
2108
|
+
# Make scripts executable
|
|
2109
|
+
for path in [slurm_path, pbs_path, lsf_path]:
|
|
2110
|
+
os.chmod(path, 0o755)
|
|
2111
|
+
|
|
2112
|
+
print(f"Created batch job templates in {output_dir}:")
|
|
2113
|
+
print(f" {job_name}.slurm - SLURM job script")
|
|
2114
|
+
print(f" {job_name}.pbs - PBS/Torque job script")
|
|
2115
|
+
print(f" {job_name}.lsf - LSF job script")
|
|
2116
|
+
|
|
2117
|
+
return {
|
|
2118
|
+
"slurm": slurm_path,
|
|
2119
|
+
"pbs": pbs_path,
|
|
2120
|
+
"lsf": lsf_path,
|
|
2121
|
+
"bind_string": bind_string,
|
|
2122
|
+
"gpu_string": gpu_string
|
|
2123
|
+
}
|
|
2124
|
+
|
|
2125
|
+
def backup_singularity_environment(
|
|
2126
|
+
sandbox_dir: str,
|
|
2127
|
+
backup_dir: str = None,
|
|
2128
|
+
backup_name: str = None,
|
|
2129
|
+
compression: str = "gzip",
|
|
2130
|
+
exclude_patterns: List[str] = None,
|
|
2131
|
+
include_patterns: List[str] = None,
|
|
2132
|
+
verify: bool = True
|
|
2133
|
+
) -> Dict[str, Any]:
|
|
2134
|
+
"""
|
|
2135
|
+
Backup a Singularity sandbox or SIF file.
|
|
2136
|
+
|
|
2137
|
+
Args:
|
|
2138
|
+
sandbox_dir: Path to sandbox or SIF file
|
|
2139
|
+
backup_dir: Directory for backups
|
|
2140
|
+
backup_name: Name for backup file
|
|
2141
|
+
compression: Compression method (gzip, bzip2, xz, none)
|
|
2142
|
+
exclude_patterns: Patterns to exclude
|
|
2143
|
+
include_patterns: Patterns to include
|
|
2144
|
+
verify: Verify backup after creation
|
|
2145
|
+
|
|
2146
|
+
Returns:
|
|
2147
|
+
Dictionary with backup results
|
|
2148
|
+
"""
|
|
2149
|
+
sandbox_dir = os.path.expanduser(sandbox_dir)
|
|
2150
|
+
|
|
2151
|
+
if not os.path.exists(sandbox_dir):
|
|
2152
|
+
return {
|
|
2153
|
+
"success": False,
|
|
2154
|
+
"error": f"Sandbox/SIF not found: {sandbox_dir}",
|
|
2155
|
+
"sandbox_dir": sandbox_dir
|
|
2156
|
+
}
|
|
2157
|
+
|
|
2158
|
+
# Set backup directory
|
|
2159
|
+
if backup_dir is None:
|
|
2160
|
+
backup_dir = os.path.join(os.path.dirname(sandbox_dir), "backups")
|
|
2161
|
+
|
|
2162
|
+
backup_dir = os.path.expanduser(backup_dir)
|
|
2163
|
+
os.makedirs(backup_dir, exist_ok=True)
|
|
2164
|
+
|
|
2165
|
+
# Set backup name
|
|
2166
|
+
if backup_name is None:
|
|
2167
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
2168
|
+
base_name = os.path.basename(sandbox_dir.rstrip('/'))
|
|
2169
|
+
backup_name = f"{base_name}_backup_{timestamp}"
|
|
2170
|
+
|
|
2171
|
+
# Determine if it's a directory (sandbox) or file (SIF)
|
|
2172
|
+
is_dir = os.path.isdir(sandbox_dir)
|
|
2173
|
+
|
|
2174
|
+
# Create backup
|
|
2175
|
+
backup_path = os.path.join(backup_dir, backup_name)
|
|
2176
|
+
|
|
2177
|
+
print(f"Creating backup of {sandbox_dir}...")
|
|
2178
|
+
print(f"Backup destination: {backup_path}")
|
|
2179
|
+
print(f"Type: {'Directory' if is_dir else 'File'}")
|
|
2180
|
+
print(f"Compression: {compression}")
|
|
2181
|
+
|
|
2182
|
+
try:
|
|
2183
|
+
if is_dir:
|
|
2184
|
+
# Backup directory (sandbox)
|
|
2185
|
+
if compression == "gzip":
|
|
2186
|
+
backup_path += ".tar.gz"
|
|
2187
|
+
with tarfile.open(backup_path, "w:gz") as tar:
|
|
2188
|
+
tar.add(sandbox_dir, arcname=os.path.basename(sandbox_dir))
|
|
2189
|
+
|
|
2190
|
+
elif compression == "bzip2":
|
|
2191
|
+
backup_path += ".tar.bz2"
|
|
2192
|
+
with tarfile.open(backup_path, "w:bz2") as tar:
|
|
2193
|
+
tar.add(sandbox_dir, arcname=os.path.basename(sandbox_dir))
|
|
2194
|
+
|
|
2195
|
+
elif compression == "xz":
|
|
2196
|
+
backup_path += ".tar.xz"
|
|
2197
|
+
with tarfile.open(backup_path, "w:xz") as tar:
|
|
2198
|
+
tar.add(sandbox_dir, arcname=os.path.basename(sandbox_dir))
|
|
2199
|
+
|
|
2200
|
+
else: # none
|
|
2201
|
+
backup_path += ".tar"
|
|
2202
|
+
with tarfile.open(backup_path, "w") as tar:
|
|
2203
|
+
tar.add(sandbox_dir, arcname=os.path.basename(sandbox_dir))
|
|
2204
|
+
|
|
2205
|
+
else:
|
|
2206
|
+
# Backup file (SIF)
|
|
2207
|
+
if compression == "gzip":
|
|
2208
|
+
backup_path += ".sif.gz"
|
|
2209
|
+
with open(sandbox_dir, "rb") as f_in:
|
|
2210
|
+
with gzip.open(backup_path, "wb") as f_out:
|
|
2211
|
+
shutil.copyfileobj(f_in, f_out)
|
|
2212
|
+
|
|
2213
|
+
elif compression == "bzip2":
|
|
2214
|
+
backup_path += ".sif.bz2"
|
|
2215
|
+
import bz2
|
|
2216
|
+
with open(sandbox_dir, "rb") as f_in:
|
|
2217
|
+
with bz2.open(backup_path, "wb") as f_out:
|
|
2218
|
+
shutil.copyfileobj(f_in, f_out)
|
|
2219
|
+
|
|
2220
|
+
else: # none or other
|
|
2221
|
+
backup_path += ".sif"
|
|
2222
|
+
shutil.copy2(sandbox_dir, backup_path)
|
|
2223
|
+
|
|
2224
|
+
# Get backup size
|
|
2225
|
+
backup_size = os.path.getsize(backup_path)
|
|
2226
|
+
backup_size_mb = backup_size / (1024 * 1024)
|
|
2227
|
+
|
|
2228
|
+
print(f"✓ Backup created: {backup_path}")
|
|
2229
|
+
print(f" Size: {backup_size_mb:.2f} MB")
|
|
2230
|
+
|
|
2231
|
+
# Verify backup
|
|
2232
|
+
if verify:
|
|
2233
|
+
print("Verifying backup...")
|
|
2234
|
+
if is_dir:
|
|
2235
|
+
# Verify tar archive
|
|
2236
|
+
try:
|
|
2237
|
+
with tarfile.open(backup_path, "r") as tar:
|
|
2238
|
+
tar.getmembers() # Just try to read
|
|
2239
|
+
print("✓ Backup verified (tar archive is readable)")
|
|
2240
|
+
verified = True
|
|
2241
|
+
except Exception as e:
|
|
2242
|
+
print(f"✗ Backup verification failed: {e}")
|
|
2243
|
+
verified = False
|
|
2244
|
+
else:
|
|
2245
|
+
# Verify file copy
|
|
2246
|
+
if os.path.getsize(backup_path) == os.path.getsize(sandbox_dir):
|
|
2247
|
+
print("✓ Backup verified (file sizes match)")
|
|
2248
|
+
verified = True
|
|
2249
|
+
else:
|
|
2250
|
+
print(f"✗ Backup verification failed (size mismatch)")
|
|
2251
|
+
verified = False
|
|
2252
|
+
|
|
2253
|
+
if not verified:
|
|
2254
|
+
os.remove(backup_path)
|
|
2255
|
+
return {
|
|
2256
|
+
"success": False,
|
|
2257
|
+
"error": "Backup verification failed",
|
|
2258
|
+
"backup_path": backup_path,
|
|
2259
|
+
"sandbox_dir": sandbox_dir
|
|
2260
|
+
}
|
|
2261
|
+
|
|
2262
|
+
# Create checksum
|
|
2263
|
+
print("Creating checksum...")
|
|
2264
|
+
with open(backup_path, "rb") as f:
|
|
2265
|
+
file_hash = hashlib.sha256()
|
|
2266
|
+
chunk = f.read(8192)
|
|
2267
|
+
while chunk:
|
|
2268
|
+
file_hash.update(chunk)
|
|
2269
|
+
chunk = f.read(8192)
|
|
2270
|
+
|
|
2271
|
+
checksum = file_hash.hexdigest()
|
|
2272
|
+
|
|
2273
|
+
# Save checksum to file
|
|
2274
|
+
checksum_path = backup_path + ".sha256"
|
|
2275
|
+
with open(checksum_path, "w") as f:
|
|
2276
|
+
f.write(f"{checksum} {os.path.basename(backup_path)}\n")
|
|
2277
|
+
|
|
2278
|
+
print(f"✓ Checksum saved: {checksum_path}")
|
|
2279
|
+
print(f" SHA256: {checksum}")
|
|
2280
|
+
|
|
2281
|
+
return {
|
|
2282
|
+
"success": True,
|
|
2283
|
+
"backup_path": backup_path,
|
|
2284
|
+
"checksum_path": checksum_path,
|
|
2285
|
+
"checksum": checksum,
|
|
2286
|
+
"size_bytes": backup_size,
|
|
2287
|
+
"size_mb": backup_size_mb,
|
|
2288
|
+
"sandbox_dir": sandbox_dir,
|
|
2289
|
+
"compression": compression,
|
|
2290
|
+
"verified": verified if verify else None
|
|
2291
|
+
}
|
|
2292
|
+
|
|
2293
|
+
except Exception as e:
|
|
2294
|
+
error_msg = f"Failed to create backup: {str(e)}"
|
|
2295
|
+
print(f"✗ {error_msg}")
|
|
2296
|
+
|
|
2297
|
+
# Clean up failed backup
|
|
2298
|
+
if os.path.exists(backup_path):
|
|
2299
|
+
os.remove(backup_path)
|
|
2300
|
+
|
|
2301
|
+
return {
|
|
2302
|
+
"success": False,
|
|
2303
|
+
"error": error_msg,
|
|
2304
|
+
"backup_path": backup_path,
|
|
2305
|
+
"sandbox_dir": sandbox_dir
|
|
2306
|
+
}
|
|
2307
|
+
|
|
2308
|
+
def restore_singularity_environment(
|
|
2309
|
+
backup_path: str,
|
|
2310
|
+
restore_dir: str = None,
|
|
2311
|
+
restore_name: str = None,
|
|
2312
|
+
overwrite: bool = False,
|
|
2313
|
+
verify_checksum: bool = True
|
|
2314
|
+
) -> Dict[str, Any]:
|
|
2315
|
+
"""
|
|
2316
|
+
Restore a Singularity environment from backup.
|
|
2317
|
+
|
|
2318
|
+
Args:
|
|
2319
|
+
backup_path: Path to backup file
|
|
2320
|
+
restore_dir: Directory to restore to
|
|
2321
|
+
restore_name: Name for restored environment
|
|
2322
|
+
overwrite: Overwrite existing environment
|
|
2323
|
+
verify_checksum: Verify backup checksum before restore
|
|
2324
|
+
|
|
2325
|
+
Returns:
|
|
2326
|
+
Dictionary with restore results
|
|
2327
|
+
"""
|
|
2328
|
+
backup_path = os.path.expanduser(backup_path)
|
|
2329
|
+
|
|
2330
|
+
if not os.path.exists(backup_path):
|
|
2331
|
+
return {
|
|
2332
|
+
"success": False,
|
|
2333
|
+
"error": f"Backup file not found: {backup_path}",
|
|
2334
|
+
"backup_path": backup_path
|
|
2335
|
+
}
|
|
2336
|
+
|
|
2337
|
+
# Determine restore directory
|
|
2338
|
+
if restore_dir is None:
|
|
2339
|
+
restore_dir = os.path.dirname(backup_path)
|
|
2340
|
+
|
|
2341
|
+
restore_dir = os.path.expanduser(restore_dir)
|
|
2342
|
+
|
|
2343
|
+
# Determine restore name
|
|
2344
|
+
if restore_name is None:
|
|
2345
|
+
# Extract from backup filename
|
|
2346
|
+
base_name = os.path.basename(backup_path)
|
|
2347
|
+
# Remove backup suffix and extensions
|
|
2348
|
+
restore_name = base_name.replace("_backup_", "_").split(".")[0]
|
|
2349
|
+
|
|
2350
|
+
restore_path = os.path.join(restore_dir, restore_name)
|
|
2351
|
+
|
|
2352
|
+
print(f"Restoring from backup: {backup_path}")
|
|
2353
|
+
print(f"Restore destination: {restore_path}")
|
|
2354
|
+
|
|
2355
|
+
# Check if restore path already exists
|
|
2356
|
+
if os.path.exists(restore_path) and not overwrite:
|
|
2357
|
+
return {
|
|
2358
|
+
"success": False,
|
|
2359
|
+
"error": f"Restore path already exists: {restore_path}",
|
|
2360
|
+
"backup_path": backup_path,
|
|
2361
|
+
"restore_path": restore_path,
|
|
2362
|
+
"suggestion": "Use overwrite=True to overwrite"
|
|
2363
|
+
}
|
|
2364
|
+
|
|
2365
|
+
# Verify checksum if requested
|
|
2366
|
+
if verify_checksum:
|
|
2367
|
+
checksum_path = backup_path + ".sha256"
|
|
2368
|
+
if os.path.exists(checksum_path):
|
|
2369
|
+
print("Verifying backup checksum...")
|
|
2370
|
+
|
|
2371
|
+
with open(checksum_path, "r") as f:
|
|
2372
|
+
expected_checksum = f.read().split()[0]
|
|
2373
|
+
|
|
2374
|
+
with open(backup_path, "rb") as f:
|
|
2375
|
+
file_hash = hashlib.sha256()
|
|
2376
|
+
chunk = f.read(8192)
|
|
2377
|
+
while chunk:
|
|
2378
|
+
file_hash.update(chunk)
|
|
2379
|
+
chunk = f.read(8192)
|
|
2380
|
+
|
|
2381
|
+
actual_checksum = file_hash.hexdigest()
|
|
2382
|
+
|
|
2383
|
+
if expected_checksum == actual_checksum:
|
|
2384
|
+
print(f"✓ Checksum verified")
|
|
2385
|
+
else:
|
|
2386
|
+
print(f"✗ Checksum mismatch!")
|
|
2387
|
+
print(f" Expected: {expected_checksum}")
|
|
2388
|
+
print(f" Actual: {actual_checksum}")
|
|
2389
|
+
return {
|
|
2390
|
+
"success": False,
|
|
2391
|
+
"error": "Checksum verification failed",
|
|
2392
|
+
"backup_path": backup_path,
|
|
2393
|
+
"expected_checksum": expected_checksum,
|
|
2394
|
+
"actual_checksum": actual_checksum
|
|
2395
|
+
}
|
|
2396
|
+
else:
|
|
2397
|
+
print(f"⚠️ Checksum file not found: {checksum_path}")
|
|
2398
|
+
|
|
2399
|
+
try:
|
|
2400
|
+
# Remove existing if overwriting
|
|
2401
|
+
if os.path.exists(restore_path):
|
|
2402
|
+
if os.path.isdir(restore_path):
|
|
2403
|
+
shutil.rmtree(restore_path)
|
|
2404
|
+
else:
|
|
2405
|
+
os.remove(restore_path)
|
|
2406
|
+
|
|
2407
|
+
# Determine backup type and restore
|
|
2408
|
+
if backup_path.endswith((".tar.gz", ".tar.bz2", ".tar.xz", ".tar")):
|
|
2409
|
+
# Directory backup (sandbox)
|
|
2410
|
+
print("Restoring directory (sandbox)...")
|
|
2411
|
+
|
|
2412
|
+
# Extract archive
|
|
2413
|
+
with tarfile.open(backup_path, "r:*") as tar:
|
|
2414
|
+
tar.extractall(path=restore_dir)
|
|
2415
|
+
|
|
2416
|
+
# Find extracted directory
|
|
2417
|
+
extracted_members = tar.getnames()
|
|
2418
|
+
if extracted_members:
|
|
2419
|
+
# The first member should be the directory
|
|
2420
|
+
extracted_dir = extracted_members[0].split('/')[0]
|
|
2421
|
+
actual_restore_path = os.path.join(restore_dir, extracted_dir)
|
|
2422
|
+
|
|
2423
|
+
# Rename if needed
|
|
2424
|
+
if actual_restore_path != restore_path:
|
|
2425
|
+
os.rename(actual_restore_path, restore_path)
|
|
2426
|
+
|
|
2427
|
+
elif backup_path.endswith((".sif.gz", ".sif.bz2")):
|
|
2428
|
+
# Compressed SIF file
|
|
2429
|
+
print("Restoring compressed SIF file...")
|
|
2430
|
+
|
|
2431
|
+
if backup_path.endswith(".sif.gz"):
|
|
2432
|
+
with gzip.open(backup_path, "rb") as f_in:
|
|
2433
|
+
with open(restore_path, "wb") as f_out:
|
|
2434
|
+
shutil.copyfileobj(f_in, f_out)
|
|
2435
|
+
|
|
2436
|
+
elif backup_path.endswith(".sif.bz2"):
|
|
2437
|
+
import bz2
|
|
2438
|
+
with bz2.open(backup_path, "rb") as f_in:
|
|
2439
|
+
with open(restore_path, "wb") as f_out:
|
|
2440
|
+
shutil.copyfileobj(f_in, f_out)
|
|
2441
|
+
|
|
2442
|
+
elif backup_path.endswith(".sif"):
|
|
2443
|
+
# Plain SIF file
|
|
2444
|
+
print("Restoring SIF file...")
|
|
2445
|
+
shutil.copy2(backup_path, restore_path)
|
|
2446
|
+
|
|
2447
|
+
else:
|
|
2448
|
+
return {
|
|
2449
|
+
"success": False,
|
|
2450
|
+
"error": f"Unsupported backup format: {backup_path}",
|
|
2451
|
+
"backup_path": backup_path
|
|
2452
|
+
}
|
|
2453
|
+
|
|
2454
|
+
# Verify restore
|
|
2455
|
+
if os.path.exists(restore_path):
|
|
2456
|
+
restore_size = os.path.getsize(restore_path) if os.path.isfile(restore_path) else None
|
|
2457
|
+
restore_size_mb = restore_size / (1024 * 1024) if restore_size else None
|
|
2458
|
+
|
|
2459
|
+
print(f"✓ Restore successful: {restore_path}")
|
|
2460
|
+
if restore_size_mb:
|
|
2461
|
+
print(f" Size: {restore_size_mb:.2f} MB")
|
|
2462
|
+
|
|
2463
|
+
return {
|
|
2464
|
+
"success": True,
|
|
2465
|
+
"restore_path": restore_path,
|
|
2466
|
+
"backup_path": backup_path,
|
|
2467
|
+
"size_bytes": restore_size,
|
|
2468
|
+
"size_mb": restore_size_mb
|
|
2469
|
+
}
|
|
2470
|
+
else:
|
|
2471
|
+
return {
|
|
2472
|
+
"success": False,
|
|
2473
|
+
"error": "Restore failed - destination not created",
|
|
2474
|
+
"backup_path": backup_path,
|
|
2475
|
+
"restore_path": restore_path
|
|
2476
|
+
}
|
|
2477
|
+
|
|
2478
|
+
except Exception as e:
|
|
2479
|
+
error_msg = f"Failed to restore: {str(e)}"
|
|
2480
|
+
print(f"✗ {error_msg}")
|
|
2481
|
+
|
|
2482
|
+
# Clean up failed restore
|
|
2483
|
+
if os.path.exists(restore_path):
|
|
2484
|
+
if os.path.isdir(restore_path):
|
|
2485
|
+
shutil.rmtree(restore_path)
|
|
2486
|
+
else:
|
|
2487
|
+
os.remove(restore_path)
|
|
2488
|
+
|
|
2489
|
+
return {
|
|
2490
|
+
"success": False,
|
|
2491
|
+
"error": error_msg,
|
|
2492
|
+
"backup_path": backup_path,
|
|
2493
|
+
"restore_path": restore_path
|
|
2494
|
+
}
|
|
2495
|
+
|
|
2496
|
+
def migrate_singularity_environment(
|
|
2497
|
+
source_path: str,
|
|
2498
|
+
destination_host: str,
|
|
2499
|
+
destination_path: str = None,
|
|
2500
|
+
ssh_user: str = None,
|
|
2501
|
+
ssh_key: str = None,
|
|
2502
|
+
compress: bool = True,
|
|
2503
|
+
exclude_patterns: List[str] = None,
|
|
2504
|
+
resume: bool = False
|
|
2505
|
+
) -> Dict[str, Any]:
|
|
2506
|
+
"""
|
|
2507
|
+
Migrate a Singularity environment to another host.
|
|
2508
|
+
|
|
2509
|
+
Args:
|
|
2510
|
+
source_path: Path to sandbox or SIF on source
|
|
2511
|
+
destination_host: Destination host (user@hostname)
|
|
2512
|
+
destination_path: Path on destination host
|
|
2513
|
+
ssh_user: SSH username (if not in destination_host)
|
|
2514
|
+
ssh_key: Path to SSH key
|
|
2515
|
+
compress: Compress during transfer
|
|
2516
|
+
exclude_patterns: Patterns to exclude
|
|
2517
|
+
resume: Resume interrupted transfer
|
|
2518
|
+
|
|
2519
|
+
Returns:
|
|
2520
|
+
Dictionary with migration results
|
|
2521
|
+
"""
|
|
2522
|
+
source_path = os.path.expanduser(source_path)
|
|
2523
|
+
|
|
2524
|
+
if not os.path.exists(source_path):
|
|
2525
|
+
return {
|
|
2526
|
+
"success": False,
|
|
2527
|
+
"error": f"Source not found: {source_path}",
|
|
2528
|
+
"source_path": source_path
|
|
2529
|
+
}
|
|
2530
|
+
|
|
2531
|
+
# Parse destination host
|
|
2532
|
+
if "@" in destination_host:
|
|
2533
|
+
ssh_user, hostname = destination_host.split("@", 1)
|
|
2534
|
+
else:
|
|
2535
|
+
hostname = destination_host
|
|
2536
|
+
ssh_user = ssh_user or getpass.getuser()
|
|
2537
|
+
|
|
2538
|
+
# Set destination path
|
|
2539
|
+
if destination_path is None:
|
|
2540
|
+
destination_path = f"/home/{ssh_user}/singularity_envs/{os.path.basename(source_path)}"
|
|
2541
|
+
|
|
2542
|
+
print(f"Migrating Singularity environment to {destination_host}...")
|
|
2543
|
+
print(f"Source: {source_path}")
|
|
2544
|
+
print(f"Destination: {ssh_user}@{hostname}:{destination_path}")
|
|
2545
|
+
print(f"Compress: {compress}")
|
|
2546
|
+
print(f"Resume: {resume}")
|
|
2547
|
+
|
|
2548
|
+
# Build rsync command
|
|
2549
|
+
rsync_cmd = ["rsync", "-avz", "--progress"]
|
|
2550
|
+
|
|
2551
|
+
if resume:
|
|
2552
|
+
rsync_cmd.append("--partial")
|
|
2553
|
+
rsync_cmd.append("--append")
|
|
2554
|
+
|
|
2555
|
+
if compress:
|
|
2556
|
+
rsync_cmd.append("--compress")
|
|
2557
|
+
|
|
2558
|
+
# Add exclude patterns
|
|
2559
|
+
if exclude_patterns:
|
|
2560
|
+
for pattern in exclude_patterns:
|
|
2561
|
+
rsync_cmd.extend(["--exclude", pattern])
|
|
2562
|
+
|
|
2563
|
+
# Add SSH options
|
|
2564
|
+
ssh_options = []
|
|
2565
|
+
if ssh_key:
|
|
2566
|
+
ssh_options.extend(["-i", ssh_key])
|
|
2567
|
+
|
|
2568
|
+
if ssh_options:
|
|
2569
|
+
rsync_cmd.extend(["-e", f"ssh {' '.join(ssh_options)}"])
|
|
2570
|
+
|
|
2571
|
+
# Add source and destination
|
|
2572
|
+
rsync_cmd.append(source_path)
|
|
2573
|
+
rsync_cmd.append(f"{ssh_user}@{hostname}:{destination_path}")
|
|
2574
|
+
|
|
2575
|
+
print(f"\nCommand: {' '.join(rsync_cmd)}")
|
|
2576
|
+
print("-" * 60)
|
|
2577
|
+
|
|
2578
|
+
try:
|
|
2579
|
+
# Execute rsync
|
|
2580
|
+
result = subprocess.run(
|
|
2581
|
+
rsync_cmd,
|
|
2582
|
+
capture_output=True,
|
|
2583
|
+
text=True,
|
|
2584
|
+
check=True
|
|
2585
|
+
)
|
|
2586
|
+
|
|
2587
|
+
print(f"\n✓ Migration successful")
|
|
2588
|
+
print(f"Destination: {ssh_user}@{hostname}:{destination_path}")
|
|
2589
|
+
|
|
2590
|
+
# Test that destination is accessible
|
|
2591
|
+
print("\nTesting destination...")
|
|
2592
|
+
test_cmd = ["ssh"]
|
|
2593
|
+
if ssh_key:
|
|
2594
|
+
test_cmd.extend(["-i", ssh_key])
|
|
2595
|
+
test_cmd.extend([f"{ssh_user}@{hostname}", f"ls -la {destination_path}"])
|
|
2596
|
+
|
|
2597
|
+
test_result = subprocess.run(
|
|
2598
|
+
test_cmd,
|
|
2599
|
+
capture_output=True,
|
|
2600
|
+
text=True
|
|
2601
|
+
)
|
|
2602
|
+
|
|
2603
|
+
if test_result.returncode == 0:
|
|
2604
|
+
print(f"✓ Destination verified")
|
|
2605
|
+
else:
|
|
2606
|
+
print(f"⚠️ Could not verify destination: {test_result.stderr}")
|
|
2607
|
+
|
|
2608
|
+
return {
|
|
2609
|
+
"success": True,
|
|
2610
|
+
"source_path": source_path,
|
|
2611
|
+
"destination": f"{ssh_user}@{hostname}:{destination_path}",
|
|
2612
|
+
"destination_host": hostname,
|
|
2613
|
+
"destination_path": destination_path,
|
|
2614
|
+
"ssh_user": ssh_user,
|
|
2615
|
+
"output": result.stdout,
|
|
2616
|
+
"stderr": result.stderr
|
|
2617
|
+
}
|
|
2618
|
+
|
|
2619
|
+
except subprocess.CalledProcessError as e:
|
|
2620
|
+
error_msg = f"Migration failed: {e.stderr}"
|
|
2621
|
+
print(f"\n✗ {error_msg}")
|
|
2622
|
+
|
|
2623
|
+
return {
|
|
2624
|
+
"success": False,
|
|
2625
|
+
"error": error_msg,
|
|
2626
|
+
"source_path": source_path,
|
|
2627
|
+
"destination": f"{ssh_user}@{hostname}:{destination_path}",
|
|
2628
|
+
"stderr": e.stderr,
|
|
2629
|
+
"stdout": e.stdout,
|
|
2630
|
+
"returncode": e.returncode
|
|
2631
|
+
}
|
|
2632
|
+
|
|
2633
|
+
except Exception as e:
|
|
2634
|
+
error_msg = f"Unexpected error during migration: {str(e)}"
|
|
2635
|
+
print(f"\n✗ {error_msg}")
|
|
2636
|
+
|
|
2637
|
+
return {
|
|
2638
|
+
"success": False,
|
|
2639
|
+
"error": error_msg,
|
|
2640
|
+
"source_path": source_path,
|
|
2641
|
+
"destination": f"{ssh_user}@{hostname}:{destination_path}"
|
|
2642
|
+
}
|
|
2643
|
+
|
|
2644
|
+
def scan_singularity_image(
|
|
2645
|
+
image_path: str,
|
|
2646
|
+
scan_type: str = "security",
|
|
2647
|
+
output_format: str = "text",
|
|
2648
|
+
output_file: str = None,
|
|
2649
|
+
check_vulnerabilities: bool = True,
|
|
2650
|
+
check_malware: bool = False,
|
|
2651
|
+
check_secrets: bool = True,
|
|
2652
|
+
check_config: bool = True
|
|
2653
|
+
) -> Dict[str, Any]:
|
|
2654
|
+
"""
|
|
2655
|
+
Scan a Singularity image for security issues and vulnerabilities.
|
|
2656
|
+
|
|
2657
|
+
Args:
|
|
2658
|
+
image_path: Path to Singularity image
|
|
2659
|
+
scan_type: Type of scan (security, vulnerabilities, all)
|
|
2660
|
+
output_format: Output format (text, json, html)
|
|
2661
|
+
output_file: Output file path
|
|
2662
|
+
check_vulnerabilities: Check for known vulnerabilities
|
|
2663
|
+
check_malware: Check for malware (requires external tools)
|
|
2664
|
+
check_secrets: Check for exposed secrets
|
|
2665
|
+
check_config: Check configuration issues
|
|
2666
|
+
|
|
2667
|
+
Returns:
|
|
2668
|
+
Dictionary with scan results
|
|
2669
|
+
"""
|
|
2670
|
+
image_path = os.path.expanduser(image_path)
|
|
2671
|
+
|
|
2672
|
+
if not os.path.exists(image_path):
|
|
2673
|
+
return {
|
|
2674
|
+
"success": False,
|
|
2675
|
+
"error": f"Image not found: {image_path}",
|
|
2676
|
+
"image_path": image_path
|
|
2677
|
+
}
|
|
2678
|
+
|
|
2679
|
+
print(f"Scanning Singularity image: {image_path}")
|
|
2680
|
+
print(f"Scan type: {scan_type}")
|
|
2681
|
+
print(f"Output format: {output_format}")
|
|
2682
|
+
|
|
2683
|
+
results = {
|
|
2684
|
+
"image_path": image_path,
|
|
2685
|
+
"scan_type": scan_type,
|
|
2686
|
+
"timestamp": datetime.now().isoformat(),
|
|
2687
|
+
"checks": {},
|
|
2688
|
+
"issues": [],
|
|
2689
|
+
"warnings": [],
|
|
2690
|
+
"recommendations": []
|
|
2691
|
+
}
|
|
2692
|
+
|
|
2693
|
+
# Check 1: Basic image information
|
|
2694
|
+
print("\n1. Basic image information...")
|
|
2695
|
+
try:
|
|
2696
|
+
inspect_result = subprocess.run(
|
|
2697
|
+
["singularity", "inspect", image_path],
|
|
2698
|
+
capture_output=True,
|
|
2699
|
+
text=True
|
|
2700
|
+
)
|
|
2701
|
+
|
|
2702
|
+
if inspect_result.returncode == 0:
|
|
2703
|
+
results["checks"]["inspect"] = "success"
|
|
2704
|
+
results["image_info"] = inspect_result.stdout
|
|
2705
|
+
else:
|
|
2706
|
+
results["checks"]["inspect"] = "failed"
|
|
2707
|
+
results["warnings"].append("Could not inspect image")
|
|
2708
|
+
except:
|
|
2709
|
+
results["checks"]["inspect"] = "failed"
|
|
2710
|
+
|
|
2711
|
+
# Check 2: File permissions and ownership
|
|
2712
|
+
print("2. Checking file permissions...")
|
|
2713
|
+
try:
|
|
2714
|
+
# Run a simple check inside container
|
|
2715
|
+
perm_check = run_singularity_command(
|
|
2716
|
+
image_path=image_path,
|
|
2717
|
+
command="find / -type f -perm /o+w -ls 2>/dev/null | head -20"
|
|
2718
|
+
)
|
|
2719
|
+
|
|
2720
|
+
if perm_check["success"] and perm_check["stdout"].strip():
|
|
2721
|
+
results["issues"].append({
|
|
2722
|
+
"type": "permissions",
|
|
2723
|
+
"severity": "medium",
|
|
2724
|
+
"description": "World-writable files found",
|
|
2725
|
+
"details": perm_check["stdout"].strip().split('\n')
|
|
2726
|
+
})
|
|
2727
|
+
|
|
2728
|
+
results["checks"]["permissions"] = "completed"
|
|
2729
|
+
except:
|
|
2730
|
+
results["checks"]["permissions"] = "failed"
|
|
2731
|
+
|
|
2732
|
+
# Check 3: Exposed secrets (placeholder - would use trufflehog or similar)
|
|
2733
|
+
if check_secrets:
|
|
2734
|
+
print("3. Checking for exposed secrets...")
|
|
2735
|
+
# This would require external tools like trufflehog, gitleaks, etc.
|
|
2736
|
+
results["checks"]["secrets"] = "skipped (requires external tools)"
|
|
2737
|
+
results["recommendations"].append("Install and run trufflehog for secret scanning")
|
|
2738
|
+
|
|
2739
|
+
# Check 4: Known vulnerabilities (placeholder)
|
|
2740
|
+
if check_vulnerabilities:
|
|
2741
|
+
print("4. Checking for known vulnerabilities...")
|
|
2742
|
+
# This would require external tools like grype, trivy, etc.
|
|
2743
|
+
results["checks"]["vulnerabilities"] = "skipped (requires external tools)"
|
|
2744
|
+
results["recommendations"].append("Install and run grype or trivy for vulnerability scanning")
|
|
2745
|
+
|
|
2746
|
+
# Check 5: Configuration issues
|
|
2747
|
+
if check_config:
|
|
2748
|
+
print("5. Checking configuration...")
|
|
2749
|
+
try:
|
|
2750
|
+
# Check environment variables
|
|
2751
|
+
env_check = run_singularity_command(
|
|
2752
|
+
image_path=image_path,
|
|
2753
|
+
command="env | grep -i 'pass\|key\|secret\|token' | head -10"
|
|
2754
|
+
)
|
|
2755
|
+
|
|
2756
|
+
if env_check["success"] and env_check["stdout"].strip():
|
|
2757
|
+
results["issues"].append({
|
|
2758
|
+
"type": "environment",
|
|
2759
|
+
"severity": "high",
|
|
2760
|
+
"description": "Potential secrets in environment variables",
|
|
2761
|
+
"details": env_check["stdout"].strip().split('\n')
|
|
2762
|
+
})
|
|
2763
|
+
|
|
2764
|
+
results["checks"]["configuration"] = "completed"
|
|
2765
|
+
except:
|
|
2766
|
+
results["checks"]["configuration"] = "failed"
|
|
2767
|
+
|
|
2768
|
+
# Summary
|
|
2769
|
+
total_issues = len(results["issues"])
|
|
2770
|
+
total_warnings = len(results["warnings"])
|
|
2771
|
+
|
|
2772
|
+
print(f"\nScan completed: {total_issues} issues, {total_warnings} warnings")
|
|
2773
|
+
|
|
2774
|
+
# Generate output
|
|
2775
|
+
if output_file:
|
|
2776
|
+
print(f"Writing output to: {output_file}")
|
|
2777
|
+
|
|
2778
|
+
if output_format == "json":
|
|
2779
|
+
with open(output_file, "w") as f:
|
|
2780
|
+
json.dump(results, f, indent=2)
|
|
2781
|
+
elif output_format == "html":
|
|
2782
|
+
# Generate HTML report
|
|
2783
|
+
html_content = generate_html_report(results)
|
|
2784
|
+
with open(output_file, "w") as f:
|
|
2785
|
+
f.write(html_content)
|
|
2786
|
+
else: # text
|
|
2787
|
+
with open(output_file, "w") as f:
|
|
2788
|
+
f.write(generate_text_report(results))
|
|
2789
|
+
|
|
2790
|
+
return results
|
|
2791
|
+
|
|
2792
|
+
def generate_html_report(results: Dict[str, Any]) -> str:
|
|
2793
|
+
"""Generate HTML report from scan results."""
|
|
2794
|
+
html = f"""<!DOCTYPE html>
|
|
2795
|
+
<html>
|
|
2796
|
+
<head>
|
|
2797
|
+
<title>Singularity Image Scan Report</title>
|
|
2798
|
+
<style>
|
|
2799
|
+
body {{ font-family: Arial, sans-serif; margin: 40px; }}
|
|
2800
|
+
.header {{ background: #f0f0f0; padding: 20px; border-radius: 5px; }}
|
|
2801
|
+
.section {{ margin: 20px 0; padding: 15px; border: 1px solid #ddd; border-radius: 5px; }}
|
|
2802
|
+
.issue {{ background: #ffe6e6; padding: 10px; margin: 10px 0; border-left: 4px solid #ff4444; }}
|
|
2803
|
+
.warning {{ background: #fff3cd; padding: 10px; margin: 10px 0; border-left: 4px solid #ffc107; }}
|
|
2804
|
+
.recommendation {{ background: #d4edda; padding: 10px; margin: 10px 0; border-left: 4px solid #28a745; }}
|
|
2805
|
+
.check {{ background: #e9ecef; padding: 5px; margin: 5px 0; }}
|
|
2806
|
+
.success {{ color: #28a745; }}
|
|
2807
|
+
.failed {{ color: #dc3545; }}
|
|
2808
|
+
.severity-high {{ color: #dc3545; font-weight: bold; }}
|
|
2809
|
+
.severity-medium {{ color: #ffc107; font-weight: bold; }}
|
|
2810
|
+
.severity-low {{ color: #17a2b8; font-weight: bold; }}
|
|
2811
|
+
</style>
|
|
2812
|
+
</head>
|
|
2813
|
+
<body>
|
|
2814
|
+
<div class="header">
|
|
2815
|
+
<h1>Singularity Image Scan Report</h1>
|
|
2816
|
+
<p><strong>Image:</strong> {results.get('image_path', 'Unknown')}</p>
|
|
2817
|
+
<p><strong>Scan Type:</strong> {results.get('scan_type', 'Unknown')}</p>
|
|
2818
|
+
<p><strong>Timestamp:</strong> {results.get('timestamp', 'Unknown')}</p>
|
|
2819
|
+
</div>
|
|
2820
|
+
|
|
2821
|
+
<div class="section">
|
|
2822
|
+
<h2>Summary</h2>
|
|
2823
|
+
<p>Issues: {len(results.get('issues', []))}</p>
|
|
2824
|
+
<p>Warnings: {len(results.get('warnings', []))}</p>
|
|
2825
|
+
<p>Recommendations: {len(results.get('recommendations', []))}</p>
|
|
2826
|
+
</div>
|
|
2827
|
+
"""
|
|
2828
|
+
|
|
2829
|
+
# Issues
|
|
2830
|
+
if results.get('issues'):
|
|
2831
|
+
html += """
|
|
2832
|
+
<div class="section">
|
|
2833
|
+
<h2>Issues Found</h2>
|
|
2834
|
+
"""
|
|
2835
|
+
for issue in results['issues']:
|
|
2836
|
+
html += f"""
|
|
2837
|
+
<div class="issue">
|
|
2838
|
+
<h3 class="severity-{issue.get('severity', 'medium')}">{issue.get('type', 'Unknown')} - {issue.get('severity', 'medium').upper()}</h3>
|
|
2839
|
+
<p>{issue.get('description', 'No description')}</p>
|
|
2840
|
+
"""
|
|
2841
|
+
if issue.get('details'):
|
|
2842
|
+
html += "<ul>"
|
|
2843
|
+
for detail in issue['details']:
|
|
2844
|
+
html += f"<li>{detail}</li>"
|
|
2845
|
+
html += "</ul>"
|
|
2846
|
+
html += "</div>"
|
|
2847
|
+
html += "</div>"
|
|
2848
|
+
|
|
2849
|
+
# Warnings
|
|
2850
|
+
if results.get('warnings'):
|
|
2851
|
+
html += """
|
|
2852
|
+
<div class="section">
|
|
2853
|
+
<h2>Warnings</h2>
|
|
2854
|
+
"""
|
|
2855
|
+
for warning in results['warnings']:
|
|
2856
|
+
html += f'<div class="warning">{warning}</div>'
|
|
2857
|
+
html += "</div>"
|
|
2858
|
+
|
|
2859
|
+
# Recommendations
|
|
2860
|
+
if results.get('recommendations'):
|
|
2861
|
+
html += """
|
|
2862
|
+
<div class="section">
|
|
2863
|
+
<h2>Recommendations</h2>
|
|
2864
|
+
"""
|
|
2865
|
+
for recommendation in results['recommendations']:
|
|
2866
|
+
html += f'<div class="recommendation">{recommendation}</div>'
|
|
2867
|
+
html += "</div>"
|
|
2868
|
+
|
|
2869
|
+
# Checks performed
|
|
2870
|
+
if results.get('checks'):
|
|
2871
|
+
html += """
|
|
2872
|
+
<div class="section">
|
|
2873
|
+
<h2>Checks Performed</h2>
|
|
2874
|
+
"""
|
|
2875
|
+
for check, status in results['checks'].items():
|
|
2876
|
+
status_class = "success" if status == "success" or status == "completed" else "failed"
|
|
2877
|
+
html += f'<div class="check"><span class="{status_class}">●</span> {check}: {status}</div>'
|
|
2878
|
+
html += "</div>"
|
|
2879
|
+
|
|
2880
|
+
html += """
|
|
2881
|
+
</body>
|
|
2882
|
+
</html>
|
|
2883
|
+
"""
|
|
2884
|
+
|
|
2885
|
+
return html
|
|
2886
|
+
|
|
2887
|
+
def generate_text_report(results: Dict[str, Any]) -> str:
|
|
2888
|
+
"""Generate text report from scan results."""
|
|
2889
|
+
text = f"""Singularity Image Scan Report
|
|
2890
|
+
{'=' * 50}
|
|
2891
|
+
|
|
2892
|
+
Image: {results.get('image_path', 'Unknown')}
|
|
2893
|
+
Scan Type: {results.get('scan_type', 'Unknown')}
|
|
2894
|
+
Timestamp: {results.get('timestamp', 'Unknown')}
|
|
2895
|
+
|
|
2896
|
+
SUMMARY
|
|
2897
|
+
{'=' * 50}
|
|
2898
|
+
Issues: {len(results.get('issues', []))}
|
|
2899
|
+
Warnings: {len(results.get('warnings', []))}
|
|
2900
|
+
Recommendations: {len(results.get('recommendations', []))}
|
|
2901
|
+
|
|
2902
|
+
"""
|
|
2903
|
+
|
|
2904
|
+
# Issues
|
|
2905
|
+
if results.get('issues'):
|
|
2906
|
+
text += "ISSUES FOUND\n"
|
|
2907
|
+
text += "=" * 50 + "\n"
|
|
2908
|
+
for i, issue in enumerate(results['issues'], 1):
|
|
2909
|
+
text += f"\n{i}. [{issue.get('severity', 'medium').upper()}] {issue.get('type', 'Unknown')}\n"
|
|
2910
|
+
text += f" {issue.get('description', 'No description')}\n"
|
|
2911
|
+
if issue.get('details'):
|
|
2912
|
+
for detail in issue['details']:
|
|
2913
|
+
text += f" - {detail}\n"
|
|
2914
|
+
|
|
2915
|
+
# Warnings
|
|
2916
|
+
if results.get('warnings'):
|
|
2917
|
+
text += "\nWARNINGS\n"
|
|
2918
|
+
text += "=" * 50 + "\n"
|
|
2919
|
+
for i, warning in enumerate(results['warnings'], 1):
|
|
2920
|
+
text += f"{i}. {warning}\n"
|
|
2921
|
+
|
|
2922
|
+
# Recommendations
|
|
2923
|
+
if results.get('recommendations'):
|
|
2924
|
+
text += "\nRECOMMENDATIONS\n"
|
|
2925
|
+
text += "=" * 50 + "\n"
|
|
2926
|
+
for i, recommendation in enumerate(results['recommendations'], 1):
|
|
2927
|
+
text += f"{i}. {recommendation}\n"
|
|
2928
|
+
|
|
2929
|
+
# Checks performed
|
|
2930
|
+
if results.get('checks'):
|
|
2931
|
+
text += "\nCHECKS PERFORMED\n"
|
|
2932
|
+
text += "=" * 50 + "\n"
|
|
2933
|
+
for check, status in results['checks'].items():
|
|
2934
|
+
status_symbol = "✓" if status == "success" or status == "completed" else "✗"
|
|
2935
|
+
text += f"{status_symbol} {check}: {status}\n"
|
|
2936
|
+
|
|
2937
|
+
return text
|
|
2938
|
+
|
|
2939
|
+
def load_apptainer_config(config_file: str) -> Dict[str, Any]:
|
|
2940
|
+
"""
|
|
2941
|
+
Load Apptainer/Singularity configuration from file.
|
|
2942
|
+
|
|
2943
|
+
Args:
|
|
2944
|
+
config_file: Path to configuration file
|
|
2945
|
+
|
|
2946
|
+
Returns:
|
|
2947
|
+
Dictionary with configuration
|
|
2948
|
+
"""
|
|
2949
|
+
config_file = os.path.expanduser(config_file)
|
|
2950
|
+
|
|
2951
|
+
if not os.path.exists(config_file):
|
|
2952
|
+
print(f"Warning: Config file not found: {config_file}")
|
|
2953
|
+
return {}
|
|
2954
|
+
|
|
2955
|
+
try:
|
|
2956
|
+
with open(config_file, "r") as f:
|
|
2957
|
+
if config_file.endswith(".json"):
|
|
2958
|
+
return json.load(f)
|
|
2959
|
+
elif config_file.endswith((".yaml", ".yml")) and YAML_AVAILABLE:
|
|
2960
|
+
return yaml.safe_load(f)
|
|
2961
|
+
else:
|
|
2962
|
+
print(f"Warning: Unsupported config format: {config_file}")
|
|
2963
|
+
return {}
|
|
2964
|
+
except Exception as e:
|
|
2965
|
+
print(f"Warning: Could not load config file {config_file}: {e}")
|
|
2966
|
+
return {}
|
|
2967
|
+
|
|
2968
|
+
def save_apptainer_config(
|
|
2969
|
+
config: Dict[str, Any],
|
|
2970
|
+
config_file: str,
|
|
2971
|
+
format: str = "json"
|
|
2972
|
+
) -> bool:
|
|
2973
|
+
"""
|
|
2974
|
+
Save Apptainer/Singularity configuration to file.
|
|
2975
|
+
|
|
2976
|
+
Args:
|
|
2977
|
+
config: Configuration dictionary
|
|
2978
|
+
config_file: Path to configuration file
|
|
2979
|
+
format: File format (json, yaml)
|
|
2980
|
+
|
|
2981
|
+
Returns:
|
|
2982
|
+
True if successful, False otherwise
|
|
2983
|
+
"""
|
|
2984
|
+
config_file = os.path.expanduser(config_file)
|
|
2985
|
+
|
|
2986
|
+
try:
|
|
2987
|
+
os.makedirs(os.path.dirname(config_file), exist_ok=True)
|
|
2988
|
+
|
|
2989
|
+
with open(config_file, "w") as f:
|
|
2990
|
+
if format == "json":
|
|
2991
|
+
json.dump(config, f, indent=2)
|
|
2992
|
+
elif format == "yaml" and YAML_AVAILABLE:
|
|
2993
|
+
yaml.dump(config, f, default_flow_style=False)
|
|
2994
|
+
else:
|
|
2995
|
+
print(f"Warning: Unsupported format {format}, using JSON")
|
|
2996
|
+
json.dump(config, f, indent=2)
|
|
2997
|
+
|
|
2998
|
+
print(f"✓ Configuration saved to {config_file}")
|
|
2999
|
+
return True
|
|
3000
|
+
|
|
3001
|
+
except Exception as e:
|
|
3002
|
+
print(f"✗ Failed to save configuration: {e}")
|
|
3003
|
+
return False
|
|
3004
|
+
|
|
3005
|
+
def list_singularity_images(
|
|
3006
|
+
directory: str = None,
|
|
3007
|
+
recursive: bool = False,
|
|
3008
|
+
show_details: bool = False,
|
|
3009
|
+
filter_type: str = None
|
|
3010
|
+
) -> List[Dict[str, Any]]:
|
|
3011
|
+
"""
|
|
3012
|
+
List Singularity images in a directory.
|
|
3013
|
+
|
|
3014
|
+
Args:
|
|
3015
|
+
directory: Directory to search
|
|
3016
|
+
recursive: Search recursively
|
|
3017
|
+
show_details: Show detailed information
|
|
3018
|
+
filter_type: Filter by type (sif, sandbox, all)
|
|
3019
|
+
|
|
3020
|
+
Returns:
|
|
3021
|
+
List of image information dictionaries
|
|
3022
|
+
"""
|
|
3023
|
+
if directory is None:
|
|
3024
|
+
directory = os.getcwd()
|
|
3025
|
+
|
|
3026
|
+
directory = os.path.expanduser(directory)
|
|
3027
|
+
|
|
3028
|
+
if not os.path.exists(directory):
|
|
3029
|
+
print(f"Directory not found: {directory}")
|
|
3030
|
+
return []
|
|
3031
|
+
|
|
3032
|
+
images = []
|
|
3033
|
+
|
|
3034
|
+
# Define what to look for
|
|
3035
|
+
if filter_type == "sif":
|
|
3036
|
+
patterns = ["*.sif"]
|
|
3037
|
+
elif filter_type == "sandbox":
|
|
3038
|
+
# Sandboxes are directories, not files with extensions
|
|
3039
|
+
pass
|
|
3040
|
+
else:
|
|
3041
|
+
patterns = ["*.sif", "*.simg"]
|
|
3042
|
+
|
|
3043
|
+
# Find SIF files
|
|
3044
|
+
for pattern in patterns:
|
|
3045
|
+
if recursive:
|
|
3046
|
+
for root, dirs, files in os.walk(directory):
|
|
3047
|
+
for file in files:
|
|
3048
|
+
if file.endswith(pattern.replace("*", "")):
|
|
3049
|
+
filepath = os.path.join(root, file)
|
|
3050
|
+
images.append({
|
|
3051
|
+
"path": filepath,
|
|
3052
|
+
"type": "sif",
|
|
3053
|
+
"size": os.path.getsize(filepath),
|
|
3054
|
+
"modified": os.path.getmtime(filepath)
|
|
3055
|
+
})
|
|
3056
|
+
else:
|
|
3057
|
+
for file in os.listdir(directory):
|
|
3058
|
+
if file.endswith(pattern.replace("*", "")):
|
|
3059
|
+
filepath = os.path.join(directory, file)
|
|
3060
|
+
images.append({
|
|
3061
|
+
"path": filepath,
|
|
3062
|
+
"type": "sif",
|
|
3063
|
+
"size": os.path.getsize(filepath),
|
|
3064
|
+
"modified": os.path.getmtime(filepath)
|
|
3065
|
+
})
|
|
3066
|
+
|
|
3067
|
+
# Find sandboxes (directories that might be sandboxes)
|
|
3068
|
+
# This is heuristic - we look for directories with common sandbox structure
|
|
3069
|
+
if filter_type in [None, "sandbox", "all"]:
|
|
3070
|
+
if recursive:
|
|
3071
|
+
for root, dirs, files in os.walk(directory):
|
|
3072
|
+
for dir_name in dirs:
|
|
3073
|
+
dirpath = os.path.join(root, dir_name)
|
|
3074
|
+
# Check if it might be a sandbox
|
|
3075
|
+
if os.path.exists(os.path.join(dirpath, ".singularity.d")) or \
|
|
3076
|
+
os.path.exists(os.path.join(dirpath, "bin")) and \
|
|
3077
|
+
os.path.exists(os.path.join(dirpath, "etc")):
|
|
3078
|
+
images.append({
|
|
3079
|
+
"path": dirpath,
|
|
3080
|
+
"type": "sandbox",
|
|
3081
|
+
"size": get_directory_size(dirpath),
|
|
3082
|
+
"modified": os.path.getmtime(dirpath)
|
|
3083
|
+
})
|
|
3084
|
+
else:
|
|
3085
|
+
for item in os.listdir(directory):
|
|
3086
|
+
itempath = os.path.join(directory, item)
|
|
3087
|
+
if os.path.isdir(itempath):
|
|
3088
|
+
# Check if it might be a sandbox
|
|
3089
|
+
if os.path.exists(os.path.join(itempath, ".singularity.d")) or \
|
|
3090
|
+
os.path.exists(os.path.join(itempath, "bin")) and \
|
|
3091
|
+
os.path.exists(os.path.join(itempath, "etc")):
|
|
3092
|
+
images.append({
|
|
3093
|
+
"path": itempath,
|
|
3094
|
+
"type": "sandbox",
|
|
3095
|
+
"size": get_directory_size(itempath),
|
|
3096
|
+
"modified": os.path.getmtime(itempath)
|
|
3097
|
+
})
|
|
3098
|
+
|
|
3099
|
+
# Add details if requested
|
|
3100
|
+
if show_details:
|
|
3101
|
+
for img in images:
|
|
3102
|
+
try:
|
|
3103
|
+
inspect_result = subprocess.run(
|
|
3104
|
+
["singularity", "inspect", img["path"]],
|
|
3105
|
+
capture_output=True,
|
|
3106
|
+
text=True
|
|
3107
|
+
)
|
|
3108
|
+
if inspect_result.returncode == 0:
|
|
3109
|
+
img["details"] = inspect_result.stdout
|
|
3110
|
+
except:
|
|
3111
|
+
img["details"] = "Could not inspect"
|
|
3112
|
+
|
|
3113
|
+
return images
|
|
3114
|
+
|
|
3115
|
+
def get_directory_size(path: str) -> int:
|
|
3116
|
+
"""
|
|
3117
|
+
Get total size of a directory.
|
|
3118
|
+
|
|
3119
|
+
Args:
|
|
3120
|
+
path: Directory path
|
|
3121
|
+
|
|
3122
|
+
Returns:
|
|
3123
|
+
Size in bytes
|
|
3124
|
+
"""
|
|
3125
|
+
total = 0
|
|
3126
|
+
for dirpath, dirnames, filenames in os.walk(path):
|
|
3127
|
+
for f in filenames:
|
|
3128
|
+
fp = os.path.join(dirpath, f)
|
|
3129
|
+
# Skip if it is a symlink
|
|
3130
|
+
if not os.path.islink(fp):
|
|
3131
|
+
try:
|
|
3132
|
+
total += os.path.getsize(fp)
|
|
3133
|
+
except:
|
|
3134
|
+
pass
|
|
3135
|
+
return total
|
|
3136
|
+
|
|
3137
|
+
def clean_singularity_cache(
|
|
3138
|
+
cache_type: str = "all",
|
|
3139
|
+
force: bool = False,
|
|
3140
|
+
dry_run: bool = False
|
|
3141
|
+
) -> Dict[str, Any]:
|
|
3142
|
+
"""
|
|
3143
|
+
Clean Singularity/Apptainer cache.
|
|
3144
|
+
|
|
3145
|
+
Args:
|
|
3146
|
+
cache_type: Type of cache to clean (all, library, oci, blob, shub, net)
|
|
3147
|
+
force: Force cleanup without confirmation
|
|
3148
|
+
dry_run: Show what would be removed without actually removing
|
|
3149
|
+
|
|
3150
|
+
Returns:
|
|
3151
|
+
Dictionary with cleanup results
|
|
3152
|
+
"""
|
|
3153
|
+
apptainer_info = check_apptainer_availability()
|
|
3154
|
+
if not apptainer_info["available"]:
|
|
3155
|
+
return {
|
|
3156
|
+
"success": False,
|
|
3157
|
+
"error": "Apptainer/Singularity not available",
|
|
3158
|
+
"message": apptainer_info["message"]
|
|
3159
|
+
}
|
|
3160
|
+
|
|
3161
|
+
cmd = apptainer_info["command"]
|
|
3162
|
+
|
|
3163
|
+
# Build command
|
|
3164
|
+
cache_cmd = [cmd, "cache", "clean"]
|
|
3165
|
+
|
|
3166
|
+
if cache_type != "all":
|
|
3167
|
+
cache_cmd.append(cache_type)
|
|
3168
|
+
|
|
3169
|
+
if force:
|
|
3170
|
+
cache_cmd.append("--force")
|
|
3171
|
+
|
|
3172
|
+
if dry_run:
|
|
3173
|
+
print("DRY RUN - Showing what would be cleaned:")
|
|
3174
|
+
# We can't actually do a dry run with singularity cache clean
|
|
3175
|
+
# So we'll list the cache contents instead
|
|
3176
|
+
list_cmd = [cmd, "cache", "list"]
|
|
3177
|
+
result = subprocess.run(list_cmd, capture_output=True, text=True)
|
|
3178
|
+
|
|
3179
|
+
if result.returncode == 0:
|
|
3180
|
+
print(result.stdout)
|
|
3181
|
+
return {
|
|
3182
|
+
"success": True,
|
|
3183
|
+
"dry_run": True,
|
|
3184
|
+
"output": result.stdout,
|
|
3185
|
+
"cache_type": cache_type
|
|
3186
|
+
}
|
|
3187
|
+
else:
|
|
3188
|
+
return {
|
|
3189
|
+
"success": False,
|
|
3190
|
+
"dry_run": True,
|
|
3191
|
+
"error": f"Failed to list cache: {result.stderr}",
|
|
3192
|
+
"cache_type": cache_type
|
|
3193
|
+
}
|
|
3194
|
+
|
|
3195
|
+
print(f"Cleaning {cache_type} cache...")
|
|
3196
|
+
|
|
3197
|
+
try:
|
|
3198
|
+
result = subprocess.run(
|
|
3199
|
+
cache_cmd,
|
|
3200
|
+
capture_output=True,
|
|
3201
|
+
text=True,
|
|
3202
|
+
check=True
|
|
3203
|
+
)
|
|
3204
|
+
|
|
3205
|
+
print(f"✓ Cache cleaned")
|
|
3206
|
+
if result.stdout:
|
|
3207
|
+
print(result.stdout)
|
|
3208
|
+
|
|
3209
|
+
return {
|
|
3210
|
+
"success": True,
|
|
3211
|
+
"cache_type": cache_type,
|
|
3212
|
+
"output": result.stdout,
|
|
3213
|
+
"stderr": result.stderr
|
|
3214
|
+
}
|
|
3215
|
+
|
|
3216
|
+
except subprocess.CalledProcessError as e:
|
|
3217
|
+
error_msg = f"Failed to clean cache: {e.stderr}"
|
|
3218
|
+
print(f"✗ {error_msg}")
|
|
3219
|
+
|
|
3220
|
+
return {
|
|
3221
|
+
"success": False,
|
|
3222
|
+
"error": error_msg,
|
|
3223
|
+
"cache_type": cache_type,
|
|
3224
|
+
"stderr": e.stderr,
|
|
3225
|
+
"stdout": e.stdout,
|
|
3226
|
+
"returncode": e.returncode
|
|
3227
|
+
}
|
|
3228
|
+
|
|
3229
|
+
def create_singularity_overlay(
|
|
3230
|
+
size_mb: int = 1024,
|
|
3231
|
+
overlay_file: str = "overlay.img",
|
|
3232
|
+
filesystem: str = "ext3",
|
|
3233
|
+
sparse: bool = True
|
|
3234
|
+
) -> Dict[str, Any]:
|
|
3235
|
+
"""
|
|
3236
|
+
Create a writable overlay filesystem for Singularity.
|
|
3237
|
+
|
|
3238
|
+
Args:
|
|
3239
|
+
size_mb: Size of overlay in MB
|
|
3240
|
+
overlay_file: Path to overlay file
|
|
3241
|
+
filesystem: Filesystem type (ext3, ext4, xfs)
|
|
3242
|
+
sparse: Create sparse file (allocate on demand)
|
|
3243
|
+
|
|
3244
|
+
Returns:
|
|
3245
|
+
Dictionary with overlay creation results
|
|
3246
|
+
"""
|
|
3247
|
+
overlay_file = os.path.expanduser(overlay_file)
|
|
3248
|
+
|
|
3249
|
+
if os.path.exists(overlay_file):
|
|
3250
|
+
return {
|
|
3251
|
+
"success": False,
|
|
3252
|
+
"error": f"Overlay file already exists: {overlay_file}",
|
|
3253
|
+
"overlay_file": overlay_file,
|
|
3254
|
+
"suggestion": "Remove existing file or choose different name"
|
|
3255
|
+
}
|
|
3256
|
+
|
|
3257
|
+
print(f"Creating overlay filesystem: {overlay_file}")
|
|
3258
|
+
print(f"Size: {size_mb} MB")
|
|
3259
|
+
print(f"Filesystem: {filesystem}")
|
|
3260
|
+
print(f"Sparse: {sparse}")
|
|
3261
|
+
|
|
3262
|
+
try:
|
|
3263
|
+
# Create empty file
|
|
3264
|
+
if sparse:
|
|
3265
|
+
# Create sparse file
|
|
3266
|
+
with open(overlay_file, "wb") as f:
|
|
3267
|
+
f.seek(size_mb * 1024 * 1024 - 1)
|
|
3268
|
+
f.write(b'\0')
|
|
3269
|
+
else:
|
|
3270
|
+
# Create pre-allocated file
|
|
3271
|
+
with open(overlay_file, "wb") as f:
|
|
3272
|
+
f.write(b'\0' * size_mb * 1024 * 1024)
|
|
3273
|
+
|
|
3274
|
+
# Format filesystem
|
|
3275
|
+
format_cmd = ["mkfs", "-t", filesystem, overlay_file]
|
|
3276
|
+
result = subprocess.run(
|
|
3277
|
+
format_cmd,
|
|
3278
|
+
capture_output=True,
|
|
3279
|
+
text=True,
|
|
3280
|
+
check=True
|
|
3281
|
+
)
|
|
3282
|
+
|
|
3283
|
+
print(f"✓ Overlay created: {overlay_file}")
|
|
3284
|
+
print(f" Size: {size_mb} MB")
|
|
3285
|
+
print(f" Filesystem: {filesystem}")
|
|
3286
|
+
|
|
3287
|
+
return {
|
|
3288
|
+
"success": True,
|
|
3289
|
+
"overlay_file": overlay_file,
|
|
3290
|
+
"size_mb": size_mb,
|
|
3291
|
+
"filesystem": filesystem,
|
|
3292
|
+
"sparse": sparse,
|
|
3293
|
+
"output": result.stdout
|
|
3294
|
+
}
|
|
3295
|
+
|
|
3296
|
+
except subprocess.CalledProcessError as e:
|
|
3297
|
+
error_msg = f"Failed to create overlay: {e.stderr}"
|
|
3298
|
+
print(f"✗ {error_msg}")
|
|
3299
|
+
|
|
3300
|
+
# Clean up failed file
|
|
3301
|
+
if os.path.exists(overlay_file):
|
|
3302
|
+
os.remove(overlay_file)
|
|
3303
|
+
|
|
3304
|
+
return {
|
|
3305
|
+
"success": False,
|
|
3306
|
+
"error": error_msg,
|
|
3307
|
+
"overlay_file": overlay_file,
|
|
3308
|
+
"stderr": e.stderr
|
|
3309
|
+
}
|
|
3310
|
+
|
|
3311
|
+
except Exception as e:
|
|
3312
|
+
error_msg = f"Unexpected error: {str(e)}"
|
|
3313
|
+
print(f"✗ {error_msg}")
|
|
3314
|
+
|
|
3315
|
+
# Clean up failed file
|
|
3316
|
+
if os.path.exists(overlay_file):
|
|
3317
|
+
os.remove(overlay_file)
|
|
3318
|
+
|
|
3319
|
+
return {
|
|
3320
|
+
"success": False,
|
|
3321
|
+
"error": error_msg,
|
|
3322
|
+
"overlay_file": overlay_file
|
|
3323
|
+
}
|
|
3324
|
+
|
|
3325
|
+
def test_singularity_environment(
|
|
3326
|
+
image_path: str,
|
|
3327
|
+
test_commands: List[str] = None,
|
|
3328
|
+
bind_mounts: Dict[str, str] = None,
|
|
3329
|
+
working_dir: str = "/workspace",
|
|
3330
|
+
timeout: int = 30
|
|
3331
|
+
) -> Dict[str, Any]:
|
|
3332
|
+
"""
|
|
3333
|
+
Test a Singularity environment with various commands.
|
|
3334
|
+
|
|
3335
|
+
Args:
|
|
3336
|
+
image_path: Path to Singularity image
|
|
3337
|
+
test_commands: Commands to test
|
|
3338
|
+
bind_mounts: Bind mounts for testing
|
|
3339
|
+
working_dir: Working directory for tests
|
|
3340
|
+
timeout: Timeout for each command in seconds
|
|
3341
|
+
|
|
3342
|
+
Returns:
|
|
3343
|
+
Dictionary with test results
|
|
3344
|
+
"""
|
|
3345
|
+
if test_commands is None:
|
|
3346
|
+
test_commands = [
|
|
3347
|
+
"python3 --version",
|
|
3348
|
+
"pip3 --version",
|
|
3349
|
+
"git --version",
|
|
3350
|
+
"gcc --version",
|
|
3351
|
+
"make --version",
|
|
3352
|
+
"curl --version",
|
|
3353
|
+
"which bash",
|
|
3354
|
+
"ls -la /"
|
|
3355
|
+
]
|
|
3356
|
+
|
|
3357
|
+
if bind_mounts is None:
|
|
3358
|
+
bind_mounts = {}
|
|
3359
|
+
|
|
3360
|
+
image_path = os.path.expanduser(image_path)
|
|
3361
|
+
|
|
3362
|
+
if not os.path.exists(image_path):
|
|
3363
|
+
return {
|
|
3364
|
+
"success": False,
|
|
3365
|
+
"error": f"Image not found: {image_path}",
|
|
3366
|
+
"image_path": image_path
|
|
3367
|
+
}
|
|
3368
|
+
|
|
3369
|
+
print(f"Testing Singularity environment: {image_path}")
|
|
3370
|
+
print(f"Number of tests: {len(test_commands)}")
|
|
3371
|
+
print(f"Timeout: {timeout} seconds per test")
|
|
3372
|
+
|
|
3373
|
+
results = {
|
|
3374
|
+
"image_path": image_path,
|
|
3375
|
+
"tests": [],
|
|
3376
|
+
"passed": 0,
|
|
3377
|
+
"failed": 0,
|
|
3378
|
+
"total": len(test_commands)
|
|
3379
|
+
}
|
|
3380
|
+
|
|
3381
|
+
for i, cmd in enumerate(test_commands, 1):
|
|
3382
|
+
print(f"\nTest {i}/{len(test_commands)}: {cmd}")
|
|
3383
|
+
|
|
3384
|
+
try:
|
|
3385
|
+
test_result = run_singularity_command(
|
|
3386
|
+
image_path=image_path,
|
|
3387
|
+
command=cmd,
|
|
3388
|
+
bind_mounts=bind_mounts,
|
|
3389
|
+
working_dir=working_dir
|
|
3390
|
+
)
|
|
3391
|
+
|
|
3392
|
+
test_info = {
|
|
3393
|
+
"command": cmd,
|
|
3394
|
+
"success": test_result["success"],
|
|
3395
|
+
"returncode": test_result.get("returncode"),
|
|
3396
|
+
"stdout": test_result.get("stdout", "").strip(),
|
|
3397
|
+
"stderr": test_result.get("stderr", "").strip()
|
|
3398
|
+
}
|
|
3399
|
+
|
|
3400
|
+
if test_result["success"]:
|
|
3401
|
+
print(f" ✓ Passed")
|
|
3402
|
+
results["passed"] += 1
|
|
3403
|
+
else:
|
|
3404
|
+
print(f" ✗ Failed (return code: {test_result.get('returncode')})")
|
|
3405
|
+
if test_result.get("stderr"):
|
|
3406
|
+
print(f" Error: {test_result['stderr'][:100]}...")
|
|
3407
|
+
results["failed"] += 1
|
|
3408
|
+
|
|
3409
|
+
results["tests"].append(test_info)
|
|
3410
|
+
|
|
3411
|
+
except Exception as e:
|
|
3412
|
+
print(f" ✗ Error: {str(e)}")
|
|
3413
|
+
results["tests"].append({
|
|
3414
|
+
"command": cmd,
|
|
3415
|
+
"success": False,
|
|
3416
|
+
"error": str(e)
|
|
3417
|
+
})
|
|
3418
|
+
results["failed"] += 1
|
|
3419
|
+
|
|
3420
|
+
# Summary
|
|
3421
|
+
print(f"\n{'=' * 50}")
|
|
3422
|
+
print(f"TEST SUMMARY")
|
|
3423
|
+
print(f"{'=' * 50}")
|
|
3424
|
+
print(f"Total tests: {results['total']}")
|
|
3425
|
+
print(f"Passed: {results['passed']}")
|
|
3426
|
+
print(f"Failed: {results['failed']}")
|
|
3427
|
+
print(f"Success rate: {(results['passed'] / results['total'] * 100):.1f}%")
|
|
3428
|
+
|
|
3429
|
+
results["success_rate"] = results["passed"] / results["total"] * 100
|
|
3430
|
+
results["overall_success"] = results["failed"] == 0
|
|
3431
|
+
|
|
3432
|
+
if results["overall_success"]:
|
|
3433
|
+
print(f"\n✓ All tests passed!")
|
|
3434
|
+
else:
|
|
3435
|
+
print(f"\n⚠️ Some tests failed")
|
|
3436
|
+
|
|
3437
|
+
return results
|
|
3438
|
+
|
|
3439
|
+
def generate_singularity_usage_guide(
|
|
3440
|
+
image_path: str,
|
|
3441
|
+
output_file: str = None,
|
|
3442
|
+
format: str = "text"
|
|
3443
|
+
) -> str:
|
|
3444
|
+
"""
|
|
3445
|
+
Generate a usage guide for a Singularity image.
|
|
3446
|
+
|
|
3447
|
+
Args:
|
|
3448
|
+
image_path: Path to Singularity image
|
|
3449
|
+
output_file: Output file path
|
|
3450
|
+
format: Output format (text, markdown, html)
|
|
3451
|
+
|
|
3452
|
+
Returns:
|
|
3453
|
+
Usage guide content
|
|
3454
|
+
"""
|
|
3455
|
+
image_path = os.path.expanduser(image_path)
|
|
3456
|
+
|
|
3457
|
+
if not os.path.exists(image_path):
|
|
3458
|
+
return f"Error: Image not found: {image_path}"
|
|
3459
|
+
|
|
3460
|
+
# Get image information
|
|
3461
|
+
try:
|
|
3462
|
+
inspect_result = subprocess.run(
|
|
3463
|
+
["singularity", "inspect", image_path],
|
|
3464
|
+
capture_output=True,
|
|
3465
|
+
text=True
|
|
3466
|
+
)
|
|
3467
|
+
|
|
3468
|
+
if inspect_result.returncode != 0:
|
|
3469
|
+
image_info = "Could not inspect image"
|
|
3470
|
+
else:
|
|
3471
|
+
image_info = inspect_result.stdout
|
|
3472
|
+
except:
|
|
3473
|
+
image_info = "Could not inspect image"
|
|
3474
|
+
|
|
3475
|
+
# Determine image type
|
|
3476
|
+
is_sandbox = os.path.isdir(image_path)
|
|
3477
|
+
image_type = "sandbox" if is_sandbox else "SIF"
|
|
3478
|
+
image_name = os.path.basename(image_path)
|
|
3479
|
+
|
|
3480
|
+
# Generate guide
|
|
3481
|
+
if format == "markdown":
|
|
3482
|
+
guide = f"""# Singularity Image Usage Guide
|
|
3483
|
+
|
|
3484
|
+
## Image Information
|
|
3485
|
+
- **Name**: {image_name}
|
|
3486
|
+
- **Type**: {image_type}
|
|
3487
|
+
- **Path**: {image_path}
|
|
3488
|
+
- **Size**: {os.path.getsize(image_path) / (1024*1024):.1f} MB
|
|
3489
|
+
|
|
3490
|
+
## Basic Commands
|
|
3491
|
+
|
|
3492
|
+
### 1. Interactive Shell
|
|
3493
|
+
```bash
|
|
3494
|
+
singularity shell {image_path}
|
|
3495
|
+
### 2. Run a Command
|
|
3496
|
+
singularity exec {image_path} <command>
|
|
3497
|
+
#### Example:
|
|
3498
|
+
singularity exec {image_path} python --version
|
|
3499
|
+
### 3. With Bind Mounts
|
|
3500
|
+
singularity shell --bind /host/path:/container/path {image_path}
|
|
3501
|
+
### 4. With GPU Support (NVIDIA)
|
|
3502
|
+
singularity shell --nv {image_path}
|
|
3503
|
+
####Advanced Usage
|
|
3504
|
+
##### Environment Variables
|
|
3505
|
+
singularity exec --env MY_VAR=value {image_path} env
|
|
3506
|
+
##### Working Directory
|
|
3507
|
+
singularity exec --pwd /workspace {image_path} pwd
|
|
3508
|
+
#### Fakeroot (Unprivileged)
|
|
3509
|
+
#### HPC/Cluster Usage
|
|
3510
|
+
#### Copy to Cluster
|
|
3511
|
+
scp {image_path} user@cluster:/path/to/containers/
|
|
3512
|
+
#### SLURM Job Script
|
|
3513
|
+
#!/bin/bash
|
|
3514
|
+
#SBATCH --job-name=singularity_job
|
|
3515
|
+
#SBATCH --nodes=1
|
|
3516
|
+
#SBATCH --ntasks=1
|
|
3517
|
+
#SBATCH --cpus-per-task=4
|
|
3518
|
+
#SBATCH --mem=8G
|
|
3519
|
+
|
|
3520
|
+
module load singularity
|
|
3521
|
+
|
|
3522
|
+
singularity exec {image_path} python /workspace/script.py
|
|
3523
|
+
#### Image Information
|
|
3524
|
+
{image_info}
|
|
3525
|
+
#### Notes
|
|
3526
|
+
1. Use `singularity inspect {image_path}` for detailed information
|
|
3527
|
+
2. Use `singularity run {image_path}` if the image has a runscript
|
|
3528
|
+
3. Use `singularity test {image_path}` to run container tests
|
|
3529
|
+
#### Help
|
|
3530
|
+
Singularity documentation: https://docs.sylabs.io/
|
|
3531
|
+
Apptainer documentation: https://apptainer.org/docs/
|
|
3532
|
+
"""
|
|
3533
|
+
else: # text format
|
|
3534
|
+
guide = f"""Singularity Image Usage Guide
|
|
3535
|
+
{'=' * 60}
|
|
3536
|
+
|
|
3537
|
+
IMAGE INFORMATION
|
|
3538
|
+
{'=' * 60}
|
|
3539
|
+
Name: {image_name}
|
|
3540
|
+
Type: {image_type}
|
|
3541
|
+
Path: {image_path}
|
|
3542
|
+
Size: {os.path.getsize(image_path) / (1024*1024):.1f} MB
|
|
3543
|
+
|
|
3544
|
+
BASIC COMMANDS
|
|
3545
|
+
{'=' * 60}
|
|
3546
|
+
|
|
3547
|
+
Interactive Shell
|
|
3548
|
+
singularity shell {image_path}
|
|
3549
|
+
Run a Command
|
|
3550
|
+
singularity exec {image_path} <command>
|
|
3551
|
+
Example: singularity exec {image_path} python --version
|
|
3552
|
+
With Bind Mounts
|
|
3553
|
+
singularity shell --bind /host/path:/container/path {image_path}
|
|
3554
|
+
With GPU Support (NVIDIA)
|
|
3555
|
+
singularity shell --nv {image_path}
|
|
3556
|
+
ADVANCED USAGE
|
|
3557
|
+
{'=' * 60}
|
|
3558
|
+
|
|
3559
|
+
Environment Variables
|
|
3560
|
+
singularity exec --env MY_VAR=value {image_path} env
|
|
3561
|
+
Working Directory
|
|
3562
|
+
singularity exec --pwd /workspace {image_path} pwd
|
|
3563
|
+
Fakeroot (Unprivileged)
|
|
3564
|
+
singularity shell --fakeroot {image_path}
|
|
3565
|
+
HPC/CLUSTER USAGE
|
|
3566
|
+
{'=' * 60}
|
|
3567
|
+
|
|
3568
|
+
Copy to Cluster
|
|
3569
|
+
scp {image_path} user@cluster:/path/to/containers/
|
|
3570
|
+
SLURM Job Script Example
|
|
3571
|
+
#!/bin/bash
|
|
3572
|
+
#SBATCH --job-name=singularity_job
|
|
3573
|
+
#SBATCH --nodes=1
|
|
3574
|
+
#SBATCH --ntasks=1
|
|
3575
|
+
#SBATCH --cpus-per-task=4
|
|
3576
|
+
#SBATCH --mem=8G
|
|
3577
|
+
module load singularity
|
|
3578
|
+
singularity exec {image_path} python /workspace/script.py
|
|
3579
|
+
IMAGE INFORMATION
|
|
3580
|
+
{'=' * 60}
|
|
3581
|
+
{image_info}
|
|
3582
|
+
|
|
3583
|
+
NOTES
|
|
3584
|
+
{'=' * 60}
|
|
3585
|
+
• Use 'singularity inspect {image_path}' for detailed information
|
|
3586
|
+
• Use 'singularity run {image_path}' if the image has a runscript
|
|
3587
|
+
• Use 'singularity test {image_path}' to run container tests
|
|
3588
|
+
|
|
3589
|
+
HELP
|
|
3590
|
+
{'=' * 60}
|
|
3591
|
+
• Singularity documentation: https://docs.sylabs.io/
|
|
3592
|
+
• Apptainer documentation: https://apptainer.org/docs/
|
|
3593
|
+
"""
|
|
3594
|
+
# Save to file if requested
|
|
3595
|
+
if output_file:
|
|
3596
|
+
output_file = os.path.expanduser(output_file)
|
|
3597
|
+
with open(output_file, "w") as f:
|
|
3598
|
+
f.write(guide)
|
|
3599
|
+
print(f"✓ Usage guide saved to: {output_file}")
|
|
3600
|
+
|
|
3601
|
+
return guide
|
|
3602
|
+
def main():
|
|
3603
|
+
"""Main command-line interface."""
|
|
3604
|
+
parser = argparse.ArgumentParser(
|
|
3605
|
+
description="Apptainer2LS - Ultimate Singularity/Apptainer Development Environment Tool",
|
|
3606
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
3607
|
+
epilog="""
|
|
3608
|
+
Examples:
|
|
3609
|
+
%(prog)s create --work-dir ~/singularity_ws --image-name dev_env
|
|
3610
|
+
%(prog)s shell --image ~/singularity_ws/dev_env_sandbox
|
|
3611
|
+
%(prog)s test --image ~/singularity_ws/dev_env.sif
|
|
3612
|
+
%(prog)s convert --docker ubuntu:22.04 --output ubuntu.sif
|
|
3613
|
+
"""
|
|
3614
|
+
)
|
|
3615
|
+
parser.add_argument(
|
|
3616
|
+
"--version", "-v",
|
|
3617
|
+
action="version",
|
|
3618
|
+
version=f"Apptainer2LS v{__version__}"
|
|
3619
|
+
)
|
|
3620
|
+
|
|
3621
|
+
subparsers = parser.add_subparsers(dest="command", help="Command to execute")
|
|
3622
|
+
|
|
3623
|
+
# Create command
|
|
3624
|
+
create_parser = subparsers.add_parser("create", help="Create a development sandbox")
|
|
3625
|
+
create_parser.add_argument("--work-dir", default="~/singularity_workstation", help="Workspace directory")
|
|
3626
|
+
create_parser.add_argument("--base-image", default="docker://ubuntu:22.04", help="Base container image")
|
|
3627
|
+
create_parser.add_argument("--image-name", default="dev_sandbox", help="Name for the Singularity image")
|
|
3628
|
+
create_parser.add_argument("--packages", nargs="+", help="Additional system packages")
|
|
3629
|
+
create_parser.add_argument("--pip-packages", nargs="+", help="Python packages to install")
|
|
3630
|
+
create_parser.add_argument("--conda-packages", nargs="+", help="Conda packages to install")
|
|
3631
|
+
create_parser.add_argument("--gpu-support", action="store_true", help="Enable GPU support")
|
|
3632
|
+
create_parser.add_argument("--cuda-version", help="CUDA version for GPU support")
|
|
3633
|
+
create_parser.add_argument("--mpi-support", action="store_true", help="Enable MPI support")
|
|
3634
|
+
create_parser.add_argument("--force-rebuild", action="store_true", help="Force rebuild even if exists")
|
|
3635
|
+
create_parser.add_argument("--config", help="Configuration file")
|
|
3636
|
+
|
|
3637
|
+
# Shell command
|
|
3638
|
+
shell_parser = subparsers.add_parser("shell", help="Start interactive shell in container")
|
|
3639
|
+
shell_parser.add_argument("--image", required=True, help="Path to Singularity image/sandbox")
|
|
3640
|
+
shell_parser.add_argument("--bind", nargs="+", help="Bind mounts (host:container)")
|
|
3641
|
+
shell_parser.add_argument("--gpu", action="store_true", help="Enable GPU support")
|
|
3642
|
+
shell_parser.add_argument("--workdir", help="Working directory in container")
|
|
3643
|
+
shell_parser.add_argument("--env", nargs="+", help="Environment variables (KEY=VALUE)")
|
|
3644
|
+
|
|
3645
|
+
# Exec command
|
|
3646
|
+
exec_parser = subparsers.add_parser("exec", help="Execute command in container")
|
|
3647
|
+
exec_parser.add_argument("--image", required=True, help="Path to Singularity image/sandbox")
|
|
3648
|
+
exec_parser.add_argument("--bind", nargs="+", help="Bind mounts (host:container)")
|
|
3649
|
+
exec_parser.add_argument("--gpu", action="store_true", help="Enable GPU support")
|
|
3650
|
+
exec_parser.add_argument("--workdir", help="Working directory in container")
|
|
3651
|
+
exec_parser.add_argument("--env", nargs="+", help="Environment variables (KEY=VALUE)")
|
|
3652
|
+
exec_parser.add_argument("command", nargs=argparse.REMAINDER, help="Command to execute")
|
|
3653
|
+
|
|
3654
|
+
# Convert command
|
|
3655
|
+
convert_parser = subparsers.add_parser("convert", help="Convert Docker to Singularity")
|
|
3656
|
+
convert_parser.add_argument("--docker", required=True, help="Docker image name")
|
|
3657
|
+
convert_parser.add_argument("--output", required=True, help="Output Singularity image path")
|
|
3658
|
+
convert_parser.add_argument("--sandbox", action="store_true", help="Create sandbox instead of SIF")
|
|
3659
|
+
convert_parser.add_argument("--force", action="store_true", help="Overwrite existing")
|
|
3660
|
+
|
|
3661
|
+
# Test command
|
|
3662
|
+
test_parser = subparsers.add_parser("test", help="Test container environment")
|
|
3663
|
+
test_parser.add_argument("--image", required=True, help="Path to Singularity image/sandbox")
|
|
3664
|
+
test_parser.add_argument("--bind", nargs="+", help="Bind mounts (host:container)")
|
|
3665
|
+
test_parser.add_argument("--workdir", default="/workspace", help="Working directory for tests")
|
|
3666
|
+
|
|
3667
|
+
# Backup command
|
|
3668
|
+
backup_parser = subparsers.add_parser("backup", help="Backup Singularity environment")
|
|
3669
|
+
backup_parser.add_argument("--image", required=True, help="Path to Singularity image/sandbox")
|
|
3670
|
+
backup_parser.add_argument("--output", help="Output backup file path")
|
|
3671
|
+
backup_parser.add_argument("--compress", choices=["gzip", "bzip2", "xz", "none"], default="gzip", help="Compression method")
|
|
3672
|
+
backup_parser.add_argument("--verify", action="store_true", help="Verify backup after creation")
|
|
3673
|
+
|
|
3674
|
+
# Restore command
|
|
3675
|
+
restore_parser = subparsers.add_parser("restore", help="Restore Singularity environment")
|
|
3676
|
+
restore_parser.add_argument("--backup", required=True, help="Path to backup file")
|
|
3677
|
+
restore_parser.add_argument("--output", help="Output path for restored environment")
|
|
3678
|
+
restore_parser.add_argument("--overwrite", action="store_true", help="Overwrite existing")
|
|
3679
|
+
restore_parser.add_argument("--verify", action="store_true", default=True, help="Verify checksum before restore")
|
|
3680
|
+
|
|
3681
|
+
# Scan command
|
|
3682
|
+
scan_parser = subparsers.add_parser("scan", help="Scan image for security issues")
|
|
3683
|
+
scan_parser.add_argument("--image", required=True, help="Path to Singularity image/sandbox")
|
|
3684
|
+
scan_parser.add_argument("--output", help="Output file for report")
|
|
3685
|
+
scan_parser.add_argument("--format", choices=["text", "json", "html"], default="text", help="Output format")
|
|
3686
|
+
|
|
3687
|
+
# Batch command
|
|
3688
|
+
batch_parser = subparsers.add_parser("batch", help="Create batch job templates")
|
|
3689
|
+
batch_parser.add_argument("--job-name", default="singularity_job", help="Job name")
|
|
3690
|
+
batch_parser.add_argument("--image", help="Path to Singularity image")
|
|
3691
|
+
batch_parser.add_argument("--gpu", action="store_true", help="Include GPU support")
|
|
3692
|
+
batch_parser.add_argument("--output-dir", default=".", help="Output directory")
|
|
3693
|
+
|
|
3694
|
+
# Check command
|
|
3695
|
+
check_parser = subparsers.add_parser("check", help="Check Apptainer/Singularity availability")
|
|
3696
|
+
|
|
3697
|
+
# Install command
|
|
3698
|
+
install_parser = subparsers.add_parser("install", help="Install Apptainer/Singularity")
|
|
3699
|
+
|
|
3700
|
+
# Guide command
|
|
3701
|
+
guide_parser = subparsers.add_parser("guide", help="Generate usage guide")
|
|
3702
|
+
guide_parser.add_argument("--image", required=True, help="Path to Singularity image/sandbox")
|
|
3703
|
+
guide_parser.add_argument("--output", help="Output file")
|
|
3704
|
+
guide_parser.add_argument("--format", choices=["text", "markdown", "html"], default="text", help="Output format")
|
|
3705
|
+
|
|
3706
|
+
# Parse arguments
|
|
3707
|
+
args = parser.parse_args()
|
|
3708
|
+
|
|
3709
|
+
if not args.command:
|
|
3710
|
+
parser.print_help()
|
|
3711
|
+
sys.exit(1)
|
|
3712
|
+
|
|
3713
|
+
# Execute command
|
|
3714
|
+
if args.command == "create":
|
|
3715
|
+
# Parse bind mounts
|
|
3716
|
+
bind_mounts = {}
|
|
3717
|
+
if hasattr(args, "bind") and args.bind:
|
|
3718
|
+
for bind in args.bind:
|
|
3719
|
+
if ":" in bind:
|
|
3720
|
+
host, container = bind.split(":", 1)
|
|
3721
|
+
bind_mounts[host] = container
|
|
3722
|
+
|
|
3723
|
+
# Parse environment variables
|
|
3724
|
+
env_vars = {}
|
|
3725
|
+
if hasattr(args, "env") and args.env:
|
|
3726
|
+
for env in args.env:
|
|
3727
|
+
if "=" in env:
|
|
3728
|
+
key, value = env.split("=", 1)
|
|
3729
|
+
env_vars[key] = value
|
|
3730
|
+
|
|
3731
|
+
result = create_singularity_workstation(
|
|
3732
|
+
work_dir=args.work_dir,
|
|
3733
|
+
base_image=args.base_image,
|
|
3734
|
+
image_name=args.image_name,
|
|
3735
|
+
packages=args.packages,
|
|
3736
|
+
pip_packages=args.pip_packages,
|
|
3737
|
+
conda_packages=args.conda_packages,
|
|
3738
|
+
bind_mounts=bind_mounts,
|
|
3739
|
+
force_rebuild=args.force_rebuild,
|
|
3740
|
+
gpu_support=args.gpu_support,
|
|
3741
|
+
cuda_version=args.cuda_version,
|
|
3742
|
+
mpi_support=args.mpi_support,
|
|
3743
|
+
config_file=args.config
|
|
3744
|
+
)
|
|
3745
|
+
|
|
3746
|
+
if not result.get("success", False):
|
|
3747
|
+
sys.exit(1)
|
|
3748
|
+
|
|
3749
|
+
elif args.command == "shell":
|
|
3750
|
+
# Parse bind mounts
|
|
3751
|
+
bind_mounts = {}
|
|
3752
|
+
if args.bind:
|
|
3753
|
+
for bind in args.bind:
|
|
3754
|
+
if ":" in bind:
|
|
3755
|
+
host, container = bind.split(":", 1)
|
|
3756
|
+
bind_mounts[host] = container
|
|
3757
|
+
|
|
3758
|
+
# Parse environment variables
|
|
3759
|
+
env_vars = {}
|
|
3760
|
+
if args.env:
|
|
3761
|
+
for env in args.env:
|
|
3762
|
+
if "=" in env:
|
|
3763
|
+
key, value = env.split("=", 1)
|
|
3764
|
+
env_vars[key] = value
|
|
3765
|
+
|
|
3766
|
+
shell_into_container(
|
|
3767
|
+
image_path=args.image,
|
|
3768
|
+
bind_mounts=bind_mounts,
|
|
3769
|
+
environment=env_vars,
|
|
3770
|
+
working_dir=args.workdir,
|
|
3771
|
+
gpu=args.gpu
|
|
3772
|
+
)
|
|
3773
|
+
|
|
3774
|
+
elif args.command == "exec":
|
|
3775
|
+
if not args.command:
|
|
3776
|
+
print("Error: No command specified")
|
|
3777
|
+
sys.exit(1)
|
|
3778
|
+
|
|
3779
|
+
# Parse bind mounts
|
|
3780
|
+
bind_mounts = {}
|
|
3781
|
+
if args.bind:
|
|
3782
|
+
for bind in args.bind:
|
|
3783
|
+
if ":" in bind:
|
|
3784
|
+
host, container = bind.split(":", 1)
|
|
3785
|
+
bind_mounts[host] = container
|
|
3786
|
+
|
|
3787
|
+
# Parse environment variables
|
|
3788
|
+
env_vars = {}
|
|
3789
|
+
if args.env:
|
|
3790
|
+
for env in args.env:
|
|
3791
|
+
if "=" in env:
|
|
3792
|
+
key, value = env.split("=", 1)
|
|
3793
|
+
env_vars[key] = value
|
|
3794
|
+
|
|
3795
|
+
cmd = " ".join(args.command)
|
|
3796
|
+
result = run_singularity_command(
|
|
3797
|
+
image_path=args.image,
|
|
3798
|
+
command=cmd,
|
|
3799
|
+
bind_mounts=bind_mounts,
|
|
3800
|
+
environment=env_vars,
|
|
3801
|
+
working_dir=args.workdir,
|
|
3802
|
+
gpu=args.gpu
|
|
3803
|
+
)
|
|
3804
|
+
|
|
3805
|
+
if not result.get("success", False):
|
|
3806
|
+
sys.exit(1)
|
|
3807
|
+
|
|
3808
|
+
# Print output
|
|
3809
|
+
if result.get("stdout"):
|
|
3810
|
+
print(result["stdout"])
|
|
3811
|
+
if result.get("stderr"):
|
|
3812
|
+
print(result["stderr"], file=sys.stderr)
|
|
3813
|
+
|
|
3814
|
+
elif args.command == "convert":
|
|
3815
|
+
result = convert_docker_to_singularity(
|
|
3816
|
+
docker_image=args.docker,
|
|
3817
|
+
singularity_image=args.output,
|
|
3818
|
+
sandbox=args.sandbox,
|
|
3819
|
+
force=args.force
|
|
3820
|
+
)
|
|
3821
|
+
|
|
3822
|
+
if not result.get("success", False):
|
|
3823
|
+
sys.exit(1)
|
|
3824
|
+
|
|
3825
|
+
elif args.command == "test":
|
|
3826
|
+
# Parse bind mounts
|
|
3827
|
+
bind_mounts = {}
|
|
3828
|
+
if args.bind:
|
|
3829
|
+
for bind in args.bind:
|
|
3830
|
+
if ":" in bind:
|
|
3831
|
+
host, container = bind.split(":", 1)
|
|
3832
|
+
bind_mounts[host] = container
|
|
3833
|
+
|
|
3834
|
+
result = test_singularity_environment(
|
|
3835
|
+
image_path=args.image,
|
|
3836
|
+
bind_mounts=bind_mounts,
|
|
3837
|
+
working_dir=args.workdir
|
|
3838
|
+
)
|
|
3839
|
+
|
|
3840
|
+
if not result.get("overall_success", False):
|
|
3841
|
+
sys.exit(1)
|
|
3842
|
+
|
|
3843
|
+
elif args.command == "backup":
|
|
3844
|
+
result = backup_singularity_environment(
|
|
3845
|
+
sandbox_dir=args.image,
|
|
3846
|
+
backup_dir=os.path.dirname(args.output) if args.output else None,
|
|
3847
|
+
backup_name=os.path.basename(args.output) if args.output else None,
|
|
3848
|
+
compression=args.compress,
|
|
3849
|
+
verify=args.verify
|
|
3850
|
+
)
|
|
3851
|
+
|
|
3852
|
+
if not result.get("success", False):
|
|
3853
|
+
sys.exit(1)
|
|
3854
|
+
|
|
3855
|
+
elif args.command == "restore":
|
|
3856
|
+
result = restore_singularity_environment(
|
|
3857
|
+
backup_path=args.backup,
|
|
3858
|
+
restore_dir=os.path.dirname(args.output) if args.output else None,
|
|
3859
|
+
restore_name=os.path.basename(args.output) if args.output else None,
|
|
3860
|
+
overwrite=args.overwrite,
|
|
3861
|
+
verify_checksum=args.verify
|
|
3862
|
+
)
|
|
3863
|
+
|
|
3864
|
+
if not result.get("success", False):
|
|
3865
|
+
sys.exit(1)
|
|
3866
|
+
|
|
3867
|
+
elif args.command == "scan":
|
|
3868
|
+
result = scan_singularity_image(
|
|
3869
|
+
image_path=args.image,
|
|
3870
|
+
output_format=args.format,
|
|
3871
|
+
output_file=args.output
|
|
3872
|
+
)
|
|
3873
|
+
|
|
3874
|
+
if args.output:
|
|
3875
|
+
print(f"Scan report saved to: {args.output}")
|
|
3876
|
+
else:
|
|
3877
|
+
print(generate_text_report(result))
|
|
3878
|
+
|
|
3879
|
+
elif args.command == "batch":
|
|
3880
|
+
result = create_batch_job_template(
|
|
3881
|
+
job_name=args.job_name,
|
|
3882
|
+
image_path=args.image,
|
|
3883
|
+
gpu_support=args.gpu,
|
|
3884
|
+
output_dir=args.output_dir
|
|
3885
|
+
)
|
|
3886
|
+
|
|
3887
|
+
print(f"Batch job templates created in: {args.output_dir}")
|
|
3888
|
+
|
|
3889
|
+
elif args.command == "check":
|
|
3890
|
+
result = check_apptainer_availability()
|
|
3891
|
+
print(f"Apptainer/Singularity availability:")
|
|
3892
|
+
print(f" Available: {result['available']}")
|
|
3893
|
+
print(f" Command: {result['command']}")
|
|
3894
|
+
print(f" Version: {result['version']}")
|
|
3895
|
+
print(f" Message: {result['message']}")
|
|
3896
|
+
|
|
3897
|
+
if not result["available"]:
|
|
3898
|
+
sys.exit(1)
|
|
3899
|
+
|
|
3900
|
+
elif args.command == "install":
|
|
3901
|
+
install_apptainer_tool()
|
|
3902
|
+
|
|
3903
|
+
elif args.command == "guide":
|
|
3904
|
+
guide = generate_singularity_usage_guide(
|
|
3905
|
+
image_path=args.image,
|
|
3906
|
+
output_file=args.output,
|
|
3907
|
+
format=args.format
|
|
3908
|
+
)
|
|
3909
|
+
|
|
3910
|
+
if not args.output:
|
|
3911
|
+
print(guide)
|
|
3912
|
+
|
|
3913
|
+
if name == "main__":
|
|
3914
|
+
usage_str="""
|
|
3915
|
+
# 检查Apptainer/Singularity可用性
|
|
3916
|
+
python apptainer2ls.py check
|
|
3917
|
+
|
|
3918
|
+
# 创建开发沙盒
|
|
3919
|
+
python apptainer2ls.py create --work-dir ~/dev_ws --image-name my_env
|
|
3920
|
+
|
|
3921
|
+
# 进入交互式Shell
|
|
3922
|
+
python apptainer2ls.py shell --image ~/dev_ws/my_env_sandbox
|
|
3923
|
+
|
|
3924
|
+
# 执行命令
|
|
3925
|
+
python apptainer2ls.py exec --image my_env.sif -- python script.py
|
|
3926
|
+
|
|
3927
|
+
# Docker转换
|
|
3928
|
+
python apptainer2ls.py convert --docker tensorflow/tensorflow:latest --output tf.sif
|
|
3929
|
+
|
|
3930
|
+
# 备份环境
|
|
3931
|
+
python apptainer2ls.py backup --image my_env_sandbox --output backup.tar.gz
|
|
3932
|
+
|
|
3933
|
+
# 安全扫描
|
|
3934
|
+
python apptainer2ls.py scan --image my_env.sif --output scan_report.html
|
|
3935
|
+
|
|
3936
|
+
# 生成批处理模板
|
|
3937
|
+
python apptainer2ls.py batch --job-name my_job --gpu --output-dir ./jobs
|
|
3938
|
+
"""
|
|
3939
|
+
main()
|
|
3940
|
+
print(usage_str)
|