clonebox 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
clonebox/cloner.py CHANGED
@@ -3,28 +3,40 @@
3
3
  SelectiveVMCloner - Creates isolated VMs with only selected apps/paths/services.
4
4
  """
5
5
 
6
+ import base64
6
7
  import json
8
+ import logging
7
9
  import os
10
+ import secrets
11
+ import shutil
12
+ import string
8
13
  import subprocess
9
14
  import tempfile
15
+ import time
10
16
  import urllib.request
11
17
  import uuid
12
18
  import xml.etree.ElementTree as ET
13
19
  from dataclasses import dataclass, field
20
+ from datetime import datetime
14
21
  from pathlib import Path
15
- from typing import Optional
16
-
17
- try:
18
- from dotenv import load_dotenv
19
-
20
- load_dotenv()
21
- except ImportError:
22
- pass # dotenv is optional
22
+ from typing import Any, Dict, List, Optional, Tuple
23
23
 
24
24
  try:
25
25
  import libvirt
26
26
  except ImportError:
27
27
  libvirt = None
28
+ import yaml
29
+
30
+ from clonebox.di import get_container
31
+ from clonebox.interfaces.disk import DiskManager
32
+ from clonebox.interfaces.hypervisor import HypervisorBackend
33
+ from clonebox.interfaces.network import NetworkManager
34
+ from clonebox.logging import get_logger, log_operation
35
+ from clonebox.resources import ResourceLimits
36
+ from clonebox.rollback import vm_creation_transaction
37
+ from clonebox.secrets import SecretsManager, SSHKeyPair
38
+
39
+ log = get_logger(__name__)
28
40
 
29
41
  SNAP_INTERFACES = {
30
42
  "pycharm-community": [
@@ -96,6 +108,10 @@ class VMConfig:
96
108
  default_factory=lambda: os.getenv("VM_AUTOSTART_APPS", "true").lower() == "true"
97
109
  ) # Auto-start GUI apps after login (desktop autostart)
98
110
  web_services: list = field(default_factory=list) # Web services to start (uvicorn, etc.)
111
+ resources: dict = field(default_factory=dict) # Resource limits (cpu, memory, disk, network)
112
+ auth_method: str = "ssh_key" # ssh_key | one_time_password | password
113
+ ssh_public_key: Optional[str] = None
114
+ shutdown_after_setup: bool = False
99
115
 
100
116
  def to_dict(self) -> dict:
101
117
  return {
@@ -111,12 +127,29 @@ class SelectiveVMCloner:
111
127
  Uses bind mounts instead of full disk cloning.
112
128
  """
113
129
 
114
- def __init__(self, conn_uri: str = None, user_session: bool = False):
130
+ def __init__(
131
+ self,
132
+ conn_uri: str = None,
133
+ user_session: bool = False,
134
+ hypervisor: HypervisorBackend = None,
135
+ disk_manager: DiskManager = None,
136
+ network_manager: NetworkManager = None,
137
+ secrets_manager: SecretsManager = None,
138
+ ):
115
139
  self.user_session = user_session
140
+ container = get_container()
141
+
142
+ # Resolve dependencies
143
+ self.hypervisor = hypervisor or container.resolve(HypervisorBackend)
144
+ self.disk = disk_manager or container.resolve(DiskManager)
145
+ # self.network = network_manager or container.resolve(NetworkManager)
146
+ self.secrets = secrets_manager or container.resolve(SecretsManager)
147
+
116
148
  if conn_uri:
117
149
  self.conn_uri = conn_uri
118
150
  else:
119
151
  self.conn_uri = "qemu:///session" if user_session else "qemu:///system"
152
+
120
153
  self.conn = None
121
154
  self._connect()
122
155
 
@@ -176,52 +209,52 @@ class SelectiveVMCloner:
176
209
  return Path.home() / "Downloads"
177
210
 
178
211
  def _ensure_default_base_image(self, console=None) -> Path:
179
- def log(msg):
180
- if console:
181
- console.print(msg)
182
- else:
183
- print(msg)
184
-
185
- downloads_dir = self._get_downloads_dir()
186
- downloads_dir.mkdir(parents=True, exist_ok=True)
187
- cached_path = downloads_dir / self.DEFAULT_BASE_IMAGE_FILENAME
212
+ """Ensure a default Ubuntu 22.04 base image is available."""
213
+ with log_operation(log, "vm.ensure_base_image"):
214
+ downloads_dir = self._get_downloads_dir()
215
+ downloads_dir.mkdir(parents=True, exist_ok=True)
216
+ cached_path = downloads_dir / self.DEFAULT_BASE_IMAGE_FILENAME
217
+
218
+ if cached_path.exists() and cached_path.stat().st_size > 0:
219
+ return cached_path
220
+
221
+ log.info(
222
+ "Downloading base image (first run only). This will be cached in ~/Downloads...",
223
+ url=self.DEFAULT_BASE_IMAGE_URL,
224
+ )
188
225
 
189
- if cached_path.exists() and cached_path.stat().st_size > 0:
190
- return cached_path
226
+ try:
227
+ import urllib.request
191
228
 
192
- log(
193
- "[cyan]⬇️ Downloading base image (first run only). This will be cached in ~/Downloads...[/]"
194
- )
229
+ with tempfile.NamedTemporaryFile(
230
+ prefix=f"{self.DEFAULT_BASE_IMAGE_FILENAME}.",
231
+ dir=str(downloads_dir),
232
+ delete=False,
233
+ ) as tmp:
234
+ tmp_path = Path(tmp.name)
195
235
 
196
- try:
197
- with tempfile.NamedTemporaryFile(
198
- prefix=f"{self.DEFAULT_BASE_IMAGE_FILENAME}.",
199
- dir=str(downloads_dir),
200
- delete=False,
201
- ) as tmp:
202
- tmp_path = Path(tmp.name)
236
+ try:
237
+ urllib.request.urlretrieve(self.DEFAULT_BASE_IMAGE_URL, tmp_path)
238
+ tmp_path.replace(cached_path)
239
+ finally:
240
+ if tmp_path.exists() and tmp_path != cached_path:
241
+ try:
242
+ tmp_path.unlink()
243
+ except Exception:
244
+ pass
245
+ except Exception as e:
246
+ log.error(f"Failed to download base image: {e}")
247
+ raise RuntimeError(
248
+ "Failed to download a default base image.\n\n"
249
+ "🔧 Solutions:\n"
250
+ " 1. Provide a base image explicitly:\n"
251
+ " clonebox clone . --base-image /path/to/image.qcow2\n"
252
+ " 2. Download it manually and reuse it:\n"
253
+ f" wget -O {cached_path} {self.DEFAULT_BASE_IMAGE_URL}\n\n"
254
+ f"Original error: {e}"
255
+ ) from e
203
256
 
204
- try:
205
- urllib.request.urlretrieve(self.DEFAULT_BASE_IMAGE_URL, tmp_path)
206
- tmp_path.replace(cached_path)
207
- finally:
208
- if tmp_path.exists() and tmp_path != cached_path:
209
- try:
210
- tmp_path.unlink()
211
- except Exception:
212
- pass
213
- except Exception as e:
214
- raise RuntimeError(
215
- "Failed to download a default base image.\n\n"
216
- "🔧 Solutions:\n"
217
- " 1. Provide a base image explicitly:\n"
218
- " clonebox clone . --base-image /path/to/image.qcow2\n"
219
- " 2. Download it manually and reuse it:\n"
220
- f" wget -O {cached_path} {self.DEFAULT_BASE_IMAGE_URL}\n\n"
221
- f"Original error: {e}"
222
- ) from e
223
-
224
- return cached_path
257
+ return cached_path
225
258
 
226
259
  def _default_network_active(self) -> bool:
227
260
  """Check if libvirt default network is active."""
@@ -318,126 +351,115 @@ class SelectiveVMCloner:
318
351
  Returns:
319
352
  UUID of created VM
320
353
  """
321
-
322
- def log(msg):
323
- if console:
324
- console.print(msg)
325
- else:
326
- print(msg)
327
-
328
- # If VM already exists, optionally replace it
329
- existing_vm = None
330
- try:
331
- candidate_vm = self.conn.lookupByName(config.name)
332
- if candidate_vm is not None:
333
- # libvirt returns a domain object whose .name() should match the requested name.
334
- # In tests, an unconfigured MagicMock may be returned here; avoid treating that as
335
- # a real existing domain unless we can confirm the name matches.
354
+ with log_operation(
355
+ log, "vm.create", vm_name=config.name, ram_mb=config.ram_mb
356
+ ):
357
+ with vm_creation_transaction(self, config, console) as ctx:
358
+ # If VM already exists, optionally replace it
359
+ existing_vm = None
336
360
  try:
337
- if hasattr(candidate_vm, "name") and callable(candidate_vm.name):
338
- if candidate_vm.name() == config.name:
361
+ candidate_vm = self.conn.lookupByName(config.name)
362
+ if candidate_vm is not None:
363
+ try:
364
+ if hasattr(candidate_vm, "name") and callable(candidate_vm.name):
365
+ if candidate_vm.name() == config.name:
366
+ existing_vm = candidate_vm
367
+ else:
368
+ existing_vm = candidate_vm
369
+ except Exception:
339
370
  existing_vm = candidate_vm
340
- else:
341
- existing_vm = candidate_vm
342
371
  except Exception:
343
- existing_vm = candidate_vm
344
- except Exception:
345
- existing_vm = None
346
-
347
- if existing_vm is not None:
348
- if not replace:
349
- raise RuntimeError(
350
- f"VM '{config.name}' already exists.\n\n"
351
- f"🔧 Solutions:\n"
352
- f" 1. Reuse existing VM: clonebox start {config.name}\n"
353
- f" 2. Replace it: clonebox clone . --name {config.name} --replace\n"
354
- f" 3. Delete it: clonebox delete {config.name}\n"
355
- )
356
-
357
- log(f"[yellow]⚠️ VM '{config.name}' already exists - replacing...[/]")
358
- self.delete_vm(config.name, delete_storage=True, console=console, ignore_not_found=True)
359
-
360
- # Determine images directory
361
- images_dir = self.get_images_dir()
362
- vm_dir = images_dir / config.name
363
-
364
- try:
365
- vm_dir.mkdir(parents=True, exist_ok=True)
366
- except PermissionError as e:
367
- raise PermissionError(
368
- f"Cannot create VM directory: {vm_dir}\n\n"
369
- f"🔧 Solutions:\n"
370
- f" 1. Use --user flag to run in user session (recommended):\n"
371
- f" clonebox clone . --user\n\n"
372
- f" 2. Run with sudo (not recommended):\n"
373
- f" sudo clonebox clone .\n\n"
374
- f" 3. Fix directory permissions:\n"
375
- f" sudo mkdir -p {images_dir}\n"
376
- f" sudo chown -R $USER:libvirt {images_dir}\n\n"
377
- f"Original error: {e}"
378
- ) from e
379
-
380
- # Create root disk
381
- root_disk = vm_dir / "root.qcow2"
382
-
383
- if not config.base_image:
384
- config.base_image = str(self._ensure_default_base_image(console=console))
385
-
386
- if config.base_image and Path(config.base_image).exists():
387
- # Use backing file for faster creation
388
- log(f"[cyan]📀 Creating disk with backing file: {config.base_image}[/]")
389
- cmd = [
390
- "qemu-img",
391
- "create",
392
- "-f",
393
- "qcow2",
394
- "-b",
395
- config.base_image,
396
- "-F",
397
- "qcow2",
398
- str(root_disk),
399
- f"{config.disk_size_gb}G",
400
- ]
401
- else:
402
- # Create empty disk
403
- log(f"[cyan]📀 Creating empty {config.disk_size_gb}GB disk...[/]")
404
- cmd = ["qemu-img", "create", "-f", "qcow2", str(root_disk), f"{config.disk_size_gb}G"]
405
-
406
- subprocess.run(cmd, check=True, capture_output=True)
372
+ existing_vm = None
373
+
374
+ if existing_vm is not None:
375
+ if not replace:
376
+ raise RuntimeError(
377
+ f"VM '{config.name}' already exists.\n\n"
378
+ f"🔧 Solutions:\n"
379
+ f" 1. Reuse existing VM: clonebox start {config.name}\n"
380
+ f" 2. Replace it: clonebox clone . --name {config.name} --replace\n"
381
+ f" 3. Delete it: clonebox delete {config.name}\n"
382
+ )
407
383
 
408
- # Create cloud-init ISO if packages/services specified
409
- cloudinit_iso = None
410
- if config.packages or config.services:
411
- cloudinit_iso = self._create_cloudinit_iso(vm_dir, config)
412
- log(f"[cyan]☁️ Created cloud-init ISO with {len(config.packages)} packages[/]")
384
+ log.info(f"VM '{config.name}' already exists - replacing...")
385
+ self.delete_vm(config.name, delete_storage=True, console=console, ignore_not_found=True)
413
386
 
414
- # Resolve network mode
415
- network_mode = self.resolve_network_mode(config)
416
- if network_mode == "user":
417
- log(
418
- "[yellow]⚠️ Using user-mode networking (slirp) because default libvirt network is unavailable[/]"
419
- )
420
- else:
421
- log(f"[dim]Network mode: {network_mode}[/]")
422
-
423
- # Generate VM XML
424
- vm_xml = self._generate_vm_xml(config, root_disk, cloudinit_iso)
387
+ # Determine images directory
388
+ images_dir = self.get_images_dir()
389
+ try:
390
+ vm_dir = ctx.add_directory(images_dir / config.name)
391
+ vm_dir.mkdir(parents=True, exist_ok=True)
392
+ except PermissionError as e:
393
+ raise PermissionError(
394
+ f"Cannot create VM directory: {images_dir / config.name}\n\n"
395
+ f"🔧 Solutions:\n"
396
+ f" 1. Use --user flag to run in user session (recommended):\n"
397
+ f" clonebox clone . --user\n\n"
398
+ f" 2. Run with sudo (not recommended):\n"
399
+ f" sudo clonebox clone .\n\n"
400
+ f" 3. Fix directory permissions:\n"
401
+ f" sudo mkdir -p {images_dir}\n"
402
+ f" sudo chown -R $USER:libvirt {images_dir}\n\n"
403
+ f"Original error: {e}"
404
+ ) from e
405
+
406
+ # Create root disk
407
+ root_disk = ctx.add_file(vm_dir / "root.qcow2")
408
+
409
+ if not config.base_image:
410
+ config.base_image = str(self._ensure_default_base_image(console=console))
411
+
412
+ if config.base_image and Path(config.base_image).exists():
413
+ # Use backing file for faster creation
414
+ log.debug(f"Creating disk with backing file: {config.base_image}")
415
+ cmd = [
416
+ "qemu-img",
417
+ "create",
418
+ "-f",
419
+ "qcow2",
420
+ "-b",
421
+ config.base_image,
422
+ "-F",
423
+ "qcow2",
424
+ str(root_disk),
425
+ f"{config.disk_size_gb}G",
426
+ ]
427
+ else:
428
+ # Create empty disk
429
+ log.debug(f"Creating empty {config.disk_size_gb}GB disk...")
430
+ cmd = ["qemu-img", "create", "-f", "qcow2", str(root_disk), f"{config.disk_size_gb}G"]
431
+
432
+ subprocess.run(cmd, check=True, capture_output=True)
433
+
434
+ # Create cloud-init ISO if packages/services specified
435
+ cloudinit_iso = None
436
+ if config.packages or config.services:
437
+ cloudinit_iso = ctx.add_file(self._create_cloudinit_iso(vm_dir, config))
438
+ log.info(f"Created cloud-init ISO with {len(config.packages)} packages")
439
+
440
+ # Generate VM XML
441
+ vm_xml = self._generate_vm_xml(config, root_disk, cloudinit_iso)
442
+ ctx.add_libvirt_domain(self.conn, config.name)
443
+
444
+ # Define VM
445
+ log.info(f"Defining VM '{config.name}'...")
446
+ try:
447
+ vm = self.conn.defineXML(vm_xml)
448
+ except Exception as e:
449
+ raise RuntimeError(
450
+ f"Failed to define VM '{config.name}'.\n"
451
+ f"Error: {e}\n\n"
452
+ f"If the VM already exists, try: clonebox clone . --name {config.name} --replace\n"
453
+ ) from e
425
454
 
426
- # Define and create VM
427
- log(f"[cyan]🔧 Defining VM '{config.name}'...[/]")
428
- try:
429
- vm = self.conn.defineXML(vm_xml)
430
- except Exception as e:
431
- raise RuntimeError(
432
- f"Failed to define VM '{config.name}'.\n"
433
- f"Error: {e}\n\n"
434
- f"If the VM already exists, try: clonebox clone . --name {config.name} --replace\n"
435
- ) from e
455
+ # Start if autostart requested
456
+ if getattr(config, "autostart", False):
457
+ self.start_vm(config.name, open_viewer=True)
436
458
 
437
- log(f"[green]✅ VM '{config.name}' created successfully![/]")
438
- log(f"[dim] UUID: {vm.UUIDString()}[/]")
459
+ # All good - commit transaction
460
+ ctx.commit()
439
461
 
440
- return vm.UUIDString()
462
+ return vm.UUIDString()
441
463
 
442
464
  def _generate_vm_xml(
443
465
  self, config: VMConfig = None, root_disk: Path = None, cloudinit_iso: Optional[Path] = None
@@ -446,22 +468,34 @@ class SelectiveVMCloner:
446
468
 
447
469
  # Backward compatibility: if called without args, try to derive defaults
448
470
  if config is None:
449
- # Create a default config for backward compatibility
450
471
  config = VMConfig()
451
472
  if root_disk is None:
452
- # Use a default path for backward compatibility
453
473
  root_disk = Path("/var/lib/libvirt/images/default-disk.qcow2")
454
- if cloudinit_iso is None:
455
- cloudinit_iso = None
474
+
475
+ # Get resource limits from config or defaults
476
+ resource_data = getattr(config, "resources", {})
477
+ if not resource_data:
478
+ # Fallback to top-level fields
479
+ resource_data = {
480
+ "cpu": {"vcpus": config.vcpus},
481
+ "memory": {"limit": f"{config.ram_mb}M"},
482
+ }
483
+
484
+ limits = ResourceLimits.from_dict(resource_data)
456
485
 
457
486
  root = ET.Element("domain", type="kvm")
458
487
 
459
488
  # Basic metadata
460
489
  ET.SubElement(root, "name").text = config.name
461
490
  ET.SubElement(root, "uuid").text = str(uuid.uuid4())
462
- ET.SubElement(root, "memory", unit="MiB").text = str(config.ram_mb)
463
- ET.SubElement(root, "currentMemory", unit="MiB").text = str(config.ram_mb)
464
- ET.SubElement(root, "vcpu", placement="static").text = str(config.vcpus)
491
+
492
+ # Memory configuration using limits
493
+ limit_kib = limits.memory.limit_bytes // 1024
494
+ ET.SubElement(root, "memory", unit="KiB").text = str(limit_kib)
495
+ ET.SubElement(root, "currentMemory", unit="KiB").text = str(limit_kib)
496
+
497
+ # CPU configuration
498
+ ET.SubElement(root, "vcpu", placement="static").text = str(limits.cpu.vcpus)
465
499
 
466
500
  # OS configuration
467
501
  os_elem = ET.SubElement(root, "os")
@@ -473,6 +507,35 @@ class SelectiveVMCloner:
473
507
  ET.SubElement(features, "acpi")
474
508
  ET.SubElement(features, "apic")
475
509
 
510
+ # Resource tuning (CPU and Memory)
511
+ cputune_xml = limits.cpu.to_libvirt_xml()
512
+ if cputune_xml:
513
+ # We append pre-generated XML string later or use ET to parse it
514
+ # For simplicity with existing ET code, we'll use SubElement for basic ones
515
+ # and manual string insertion for complex tuning if needed,
516
+ # but let's try to stick to ET where possible.
517
+ pass
518
+
519
+ # CPU tuning element
520
+ if limits.cpu.shares or limits.cpu.quota or limits.cpu.pin:
521
+ cputune = ET.SubElement(root, "cputune")
522
+ ET.SubElement(cputune, "shares").text = str(limits.cpu.shares)
523
+ if limits.cpu.quota:
524
+ ET.SubElement(cputune, "period").text = str(limits.cpu.period)
525
+ ET.SubElement(cputune, "quota").text = str(limits.cpu.quota)
526
+ if limits.cpu.pin:
527
+ for idx, cpu in enumerate(limits.cpu.pin):
528
+ ET.SubElement(cputune, "vcpupin", vcpu=str(idx), cpuset=str(cpu))
529
+
530
+ # Memory tuning element
531
+ if limits.memory.soft_limit or limits.memory.swap:
532
+ memtune = ET.SubElement(root, "memtune")
533
+ ET.SubElement(memtune, "hard_limit", unit="KiB").text = str(limit_kib)
534
+ if limits.memory.soft_limit_bytes:
535
+ ET.SubElement(memtune, "soft_limit", unit="KiB").text = str(limits.memory.soft_limit_bytes // 1024)
536
+ if limits.memory.swap_bytes:
537
+ ET.SubElement(memtune, "swap_hard_limit", unit="KiB").text = str(limits.memory.swap_bytes // 1024)
538
+
476
539
  # CPU
477
540
  ET.SubElement(root, "cpu", mode="host-passthrough", check="none")
478
541
 
@@ -487,6 +550,18 @@ class SelectiveVMCloner:
487
550
  ET.SubElement(disk, "driver", name="qemu", type="qcow2", cache="writeback")
488
551
  ET.SubElement(disk, "source", file=str(root_disk))
489
552
  ET.SubElement(disk, "target", dev="vda", bus="virtio")
553
+
554
+ # Disk I/O tuning
555
+ if limits.disk.read_bps or limits.disk.write_bps or limits.disk.read_iops or limits.disk.write_iops:
556
+ iotune = ET.SubElement(disk, "iotune")
557
+ if limits.disk.read_bps_bytes:
558
+ ET.SubElement(iotune, "read_bytes_sec").text = str(limits.disk.read_bps_bytes)
559
+ if limits.disk.write_bps_bytes:
560
+ ET.SubElement(iotune, "write_bytes_sec").text = str(limits.disk.write_bps_bytes)
561
+ if limits.disk.read_iops:
562
+ ET.SubElement(iotune, "read_iops_sec").text = str(limits.disk.read_iops)
563
+ if limits.disk.write_iops:
564
+ ET.SubElement(iotune, "write_iops_sec").text = str(limits.disk.write_iops)
490
565
 
491
566
  # Cloud-init ISO
492
567
  if cloudinit_iso:
@@ -516,6 +591,15 @@ class SelectiveVMCloner:
516
591
  iface = ET.SubElement(devices, "interface", type="network")
517
592
  ET.SubElement(iface, "source", network="default")
518
593
  ET.SubElement(iface, "model", type="virtio")
594
+
595
+ # Network bandwidth tuning
596
+ if limits.network.inbound or limits.network.outbound:
597
+ bandwidth = ET.SubElement(iface, "bandwidth")
598
+ if limits.network.inbound_kbps:
599
+ # average in KB/s
600
+ ET.SubElement(bandwidth, "inbound", average=str(limits.network.inbound_kbps // 8))
601
+ if limits.network.outbound_kbps:
602
+ ET.SubElement(bandwidth, "outbound", average=str(limits.network.outbound_kbps // 8))
519
603
 
520
604
  # Serial console
521
605
  serial = ET.SubElement(devices, "serial", type="pty")
@@ -1114,7 +1198,50 @@ fi
1114
1198
  return encoded
1115
1199
 
1116
1200
  def _create_cloudinit_iso(self, vm_dir: Path, config: VMConfig) -> Path:
1117
- """Create cloud-init ISO with user-data and meta-data."""
1201
+ """Create cloud-init ISO with secure credential handling."""
1202
+ secrets_mgr = SecretsManager()
1203
+
1204
+ # Determine authentication method
1205
+ auth_method = getattr(config, "auth_method", "ssh_key")
1206
+
1207
+ ssh_authorized_keys = []
1208
+ chpasswd_config = ""
1209
+ lock_passwd = "true"
1210
+ ssh_pwauth = "false"
1211
+ bootcmd_extra = []
1212
+
1213
+ if auth_method == "ssh_key":
1214
+ ssh_key_path = vm_dir / "ssh_key"
1215
+ provided_key = getattr(config, "ssh_public_key", None)
1216
+
1217
+ if provided_key:
1218
+ ssh_authorized_keys = [provided_key]
1219
+ else:
1220
+ key_pair = SSHKeyPair.generate()
1221
+ key_pair.save(ssh_key_path)
1222
+ ssh_authorized_keys = [key_pair.public_key]
1223
+ log.info(f"SSH key generated and saved to: {ssh_key_path}")
1224
+
1225
+ elif auth_method == "one_time_password":
1226
+ otp, chpasswd_raw = SecretsManager.generate_one_time_password()
1227
+ chpasswd_config = chpasswd_raw
1228
+ bootcmd_extra = [
1229
+ ' - echo "===================="',
1230
+ f' - echo "ONE-TIME PASSWORD: {otp}"',
1231
+ ' - echo "You MUST change this on first login!"',
1232
+ ' - echo "===================="',
1233
+ ]
1234
+ lock_passwd = "false"
1235
+ ssh_pwauth = "true"
1236
+ log.warning("One-time password generated. It will be shown on VM console.")
1237
+
1238
+ else:
1239
+ # Fallback to legacy password from environment/secrets
1240
+ password = secrets_mgr.get("VM_PASSWORD") or getattr(config, "password", "ubuntu")
1241
+ chpasswd_config = f"chpasswd:\n list: |\n {config.username}:{password}\n expire: False"
1242
+ lock_passwd = "false"
1243
+ ssh_pwauth = "true"
1244
+ log.warning("DEPRECATED: Using password authentication. Switch to 'ssh_key' for better security.")
1118
1245
 
1119
1246
  cloudinit_dir = vm_dir / "cloud-init"
1120
1247
  cloudinit_dir.mkdir(exist_ok=True)
@@ -1745,7 +1872,7 @@ WantedBy=default.target
1745
1872
  runcmd_lines.append(f" - echo '{service_b64}' | base64 -d > {service_path}")
1746
1873
 
1747
1874
  # Fix snap interfaces reconnection script to be more robust
1748
- snap_fix_script = r'''#!/bin/bash
1875
+ snap_fix_script = r"""#!/bin/bash
1749
1876
  # Fix snap interfaces for GUI apps
1750
1877
  set -euo pipefail
1751
1878
  SNAP_LIST=$(snap list | awk 'NR>1 {print $1}')
@@ -1761,9 +1888,11 @@ for snap in $SNAP_LIST; do
1761
1888
  esac
1762
1889
  done
1763
1890
  systemctl restart snapd 2>/dev/null || true
1764
- '''
1891
+ """
1765
1892
  snap_fix_b64 = base64.b64encode(snap_fix_script.encode()).decode()
1766
- runcmd_lines.append(f" - echo '{snap_fix_b64}' | base64 -d > /usr/local/bin/clonebox-fix-snaps")
1893
+ runcmd_lines.append(
1894
+ f" - echo '{snap_fix_b64}' | base64 -d > /usr/local/bin/clonebox-fix-snaps"
1895
+ )
1767
1896
  runcmd_lines.append(" - chmod +x /usr/local/bin/clonebox-fix-snaps")
1768
1897
  runcmd_lines.append(" - /usr/local/bin/clonebox-fix-snaps || true")
1769
1898
 
@@ -2027,30 +2156,40 @@ if __name__ == "__main__":
2027
2156
  runcmd_lines.append(" - sleep 10 && reboot")
2028
2157
 
2029
2158
  runcmd_yaml = "\n".join(runcmd_lines) if runcmd_lines else ""
2030
- bootcmd_yaml = "\n".join(mount_commands) if mount_commands else ""
2031
- bootcmd_block = f"\nbootcmd:\n{bootcmd_yaml}\n" if bootcmd_yaml else ""
2032
-
2033
- # Remove power_state - using shutdown -r instead
2034
- power_state_yaml = ""
2159
+
2160
+ # Build bootcmd combining mount commands and extra security bootcmds
2161
+ bootcmd_lines = list(mount_commands) if mount_commands else []
2162
+ if bootcmd_extra:
2163
+ bootcmd_lines.extend(bootcmd_extra)
2164
+
2165
+ bootcmd_block = ""
2166
+ if bootcmd_lines:
2167
+ bootcmd_block = "\nbootcmd:\n" + "\n".join(bootcmd_lines) + "\n"
2035
2168
 
2036
- user_data = f"""#cloud-config
2169
+ # User-data components
2170
+ user_data_header = f"""#cloud-config
2037
2171
  hostname: {config.name}
2038
2172
  manage_etc_hosts: true
2039
2173
 
2040
- # Default user
2041
2174
  users:
2042
2175
  - name: {config.username}
2043
2176
  sudo: ALL=(ALL) NOPASSWD:ALL
2044
2177
  shell: /bin/bash
2045
- lock_passwd: false
2046
2178
  groups: sudo,adm,dialout,cdrom,floppy,audio,dip,video,plugdev,netdev,docker
2047
- plain_text_passwd: {config.password}
2048
-
2049
- # Allow password authentication
2050
- ssh_pwauth: true
2051
- chpasswd:
2052
- expire: false
2179
+ lock_passwd: {lock_passwd}
2180
+ """
2181
+ if ssh_authorized_keys:
2182
+ user_data_header += " ssh_authorized_keys:\n"
2183
+ for key in ssh_authorized_keys:
2184
+ user_data_header += f" - {key}\n"
2185
+
2186
+ if chpasswd_config:
2187
+ user_data_header += f"\n{chpasswd_config}\n"
2188
+
2189
+ user_data_header += f"ssh_pwauth: {ssh_pwauth}\n"
2053
2190
 
2191
+ # Assemble final user-data
2192
+ user_data = f"""{user_data_header}
2054
2193
  # Make sure root partition + filesystem grows to fill the qcow2 disk size
2055
2194
  growpart:
2056
2195
  mode: auto
@@ -2070,7 +2209,6 @@ packages:
2070
2209
  # Run after packages are installed
2071
2210
  runcmd:
2072
2211
  {runcmd_yaml}
2073
- {power_state_yaml}
2074
2212
 
2075
2213
  final_message: "CloneBox VM is ready after $UPTIME seconds"
2076
2214
  """