virtui-manager 1.1.6__py3-none-any.whl → 1.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {virtui_manager-1.1.6.dist-info → virtui_manager-1.4.0.dist-info}/METADATA +1 -1
- virtui_manager-1.4.0.dist-info/RECORD +76 -0
- vmanager/constants.py +739 -108
- vmanager/dialog.css +24 -0
- vmanager/firmware_manager.py +4 -1
- vmanager/i18n.py +32 -0
- vmanager/libvirt_utils.py +132 -3
- vmanager/locales/de/LC_MESSAGES/virtui-manager.mo +0 -0
- vmanager/locales/de/LC_MESSAGES/virtui-manager.po +3158 -0
- vmanager/locales/fr/LC_MESSAGES/virtui-manager.mo +0 -0
- vmanager/locales/fr/LC_MESSAGES/virtui-manager.po +3155 -0
- vmanager/locales/it/LC_MESSAGES/virtui-manager.mo +0 -0
- vmanager/locales/it/LC_MESSAGES/virtui-manager.po +3132 -0
- vmanager/locales/virtui-manager.pot +3033 -0
- vmanager/modals/bulk_modals.py +13 -12
- vmanager/modals/cache_stats_modal.py +6 -5
- vmanager/modals/capabilities_modal.py +133 -0
- vmanager/modals/config_modal.py +25 -24
- vmanager/modals/cpu_mem_pc_modals.py +22 -21
- vmanager/modals/custom_migration_modal.py +10 -9
- vmanager/modals/disk_pool_modals.py +60 -59
- vmanager/modals/host_dashboard_modal.py +137 -0
- vmanager/modals/host_stats.py +199 -0
- vmanager/modals/howto_disk_modal.py +2 -1
- vmanager/modals/howto_network_modal.py +2 -1
- vmanager/modals/howto_overlay_modal.py +2 -1
- vmanager/modals/howto_ssh_modal.py +2 -1
- vmanager/modals/howto_virtiofs_modal.py +2 -1
- vmanager/modals/input_modals.py +11 -10
- vmanager/modals/log_modal.py +2 -1
- vmanager/modals/migration_modals.py +21 -19
- vmanager/modals/network_modals.py +45 -36
- vmanager/modals/provisioning_modals.py +56 -56
- vmanager/modals/select_server_modals.py +8 -7
- vmanager/modals/selection_modals.py +7 -6
- vmanager/modals/server_modals.py +24 -23
- vmanager/modals/server_prefs_modals.py +78 -71
- vmanager/modals/utils_modals.py +10 -9
- vmanager/modals/virsh_modals.py +3 -2
- vmanager/modals/virtiofs_modals.py +6 -5
- vmanager/modals/vm_type_info_modal.py +2 -1
- vmanager/modals/vmanager_modals.py +19 -19
- vmanager/modals/vmcard_dialog.py +57 -57
- vmanager/modals/vmdetails_modals.py +115 -123
- vmanager/modals/xml_modals.py +3 -2
- vmanager/network_manager.py +4 -1
- vmanager/storage_manager.py +157 -39
- vmanager/utils.py +54 -7
- vmanager/vm_actions.py +48 -24
- vmanager/vm_migration.py +4 -1
- vmanager/vm_queries.py +67 -25
- vmanager/vm_service.py +8 -5
- vmanager/vmanager.css +55 -1
- vmanager/vmanager.py +247 -120
- vmanager/vmcard.css +3 -1
- vmanager/vmcard.py +270 -205
- vmanager/webconsole_manager.py +22 -22
- virtui_manager-1.1.6.dist-info/RECORD +0 -65
- {virtui_manager-1.1.6.dist-info → virtui_manager-1.4.0.dist-info}/WHEEL +0 -0
- {virtui_manager-1.1.6.dist-info → virtui_manager-1.4.0.dist-info}/entry_points.txt +0 -0
- {virtui_manager-1.1.6.dist-info → virtui_manager-1.4.0.dist-info}/licenses/LICENSE +0 -0
- {virtui_manager-1.1.6.dist-info → virtui_manager-1.4.0.dist-info}/top_level.txt +0 -0
vmanager/modals/xml_modals.py
CHANGED
|
@@ -6,6 +6,7 @@ from textual.widgets import Button, TextArea
|
|
|
6
6
|
from textual.widgets.text_area import LanguageDoesNotExist
|
|
7
7
|
from textual.containers import Vertical, Horizontal
|
|
8
8
|
from .base_modals import BaseModal
|
|
9
|
+
from ..constants import ButtonLabels
|
|
9
10
|
|
|
10
11
|
class XMLDisplayModal(BaseModal[str | None]):
|
|
11
12
|
"""A modal screen for displaying and editing XML."""
|
|
@@ -32,8 +33,8 @@ class XMLDisplayModal(BaseModal[str | None]):
|
|
|
32
33
|
with Vertical(id="dialog-buttons"):
|
|
33
34
|
with Horizontal():
|
|
34
35
|
if not self.read_only:
|
|
35
|
-
yield Button(
|
|
36
|
-
yield Button(
|
|
36
|
+
yield Button(ButtonLabels.SAVE, variant="primary", id="save-btn")
|
|
37
|
+
yield Button(ButtonLabels.CLOSE, id="close-btn")
|
|
37
38
|
|
|
38
39
|
def on_mount(self) -> None:
|
|
39
40
|
self.query_one(TextArea).focus()
|
vmanager/network_manager.py
CHANGED
|
@@ -9,6 +9,7 @@ import xml.etree.ElementTree as ET
|
|
|
9
9
|
from functools import lru_cache
|
|
10
10
|
import libvirt
|
|
11
11
|
from .utils import log_function_call
|
|
12
|
+
from .libvirt_utils import get_host_domain_capabilities
|
|
12
13
|
|
|
13
14
|
|
|
14
15
|
@lru_cache(maxsize=16)
|
|
@@ -246,7 +247,9 @@ def get_host_network_info(conn: libvirt.virConnect):
|
|
|
246
247
|
"""
|
|
247
248
|
networks = []
|
|
248
249
|
try:
|
|
249
|
-
caps_xml = conn
|
|
250
|
+
caps_xml = get_host_domain_capabilities(conn)
|
|
251
|
+
if not caps_xml:
|
|
252
|
+
return networks
|
|
250
253
|
root = ET.fromstring(caps_xml)
|
|
251
254
|
for interface in root.findall(".//interface"):
|
|
252
255
|
ip_elem = interface.find("ip")
|
vmanager/storage_manager.py
CHANGED
|
@@ -16,6 +16,119 @@ from .libvirt_utils import (
|
|
|
16
16
|
)
|
|
17
17
|
from .vm_queries import get_vm_disks_info
|
|
18
18
|
|
|
19
|
+
|
|
20
|
+
def _safe_is_pool_active(pool: libvirt.virStoragePool) -> bool:
|
|
21
|
+
"""
|
|
22
|
+
Safely check if a storage pool is active without blocking the UI.
|
|
23
|
+
Returns False if the check fails or times out.
|
|
24
|
+
"""
|
|
25
|
+
try:
|
|
26
|
+
return pool.isActive()
|
|
27
|
+
except libvirt.libvirtError as e:
|
|
28
|
+
logging.debug(f"Failed to check if pool '{pool.name()}' is active: {e}")
|
|
29
|
+
return False
|
|
30
|
+
except Exception as e:
|
|
31
|
+
logging.debug(f"Unexpected error checking pool '{pool.name()}' status: {e}")
|
|
32
|
+
return False
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _ensure_pool_active(pool: libvirt.virStoragePool) -> bool:
|
|
36
|
+
"""
|
|
37
|
+
Ensure a storage pool is active. If not active, try to activate it.
|
|
38
|
+
Returns True if pool is active (or was successfully activated), False otherwise.
|
|
39
|
+
"""
|
|
40
|
+
if _safe_is_pool_active(pool):
|
|
41
|
+
return True
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
logging.info(f"Pool '{pool.name()}' is not active, attempting to activate...")
|
|
45
|
+
pool.create(0)
|
|
46
|
+
return True
|
|
47
|
+
except libvirt.libvirtError as e:
|
|
48
|
+
logging.error(f"Failed to activate pool '{pool.name()}': {e}")
|
|
49
|
+
return False
|
|
50
|
+
except Exception as e:
|
|
51
|
+
logging.error(f"Unexpected error activating pool '{pool.name()}': {e}")
|
|
52
|
+
return False
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _safe_get_pool_info(pool: libvirt.virStoragePool) -> tuple:
|
|
56
|
+
"""
|
|
57
|
+
Safely get pool info without blocking the UI.
|
|
58
|
+
Returns (capacity, allocation, available) or (0, 0, 0) on failure.
|
|
59
|
+
"""
|
|
60
|
+
try:
|
|
61
|
+
info = pool.info()
|
|
62
|
+
return info[1], info[2], info[3] # capacity, allocation, available
|
|
63
|
+
except libvirt.libvirtError as e:
|
|
64
|
+
logging.debug(f"Failed to get info for pool '{pool.name()}': {e}")
|
|
65
|
+
return 0, 0, 0
|
|
66
|
+
except Exception as e:
|
|
67
|
+
logging.debug(f"Unexpected error getting info for pool '{pool.name()}': {e}")
|
|
68
|
+
return 0, 0, 0
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def _safe_get_pool_autostart(pool: libvirt.virStoragePool) -> bool:
|
|
72
|
+
"""
|
|
73
|
+
Safely get pool autostart setting without blocking the UI.
|
|
74
|
+
Returns False on failure.
|
|
75
|
+
"""
|
|
76
|
+
try:
|
|
77
|
+
return pool.autostart() == 1
|
|
78
|
+
except libvirt.libvirtError as e:
|
|
79
|
+
logging.debug(f"Failed to get autostart for pool '{pool.name()}': {e}")
|
|
80
|
+
return False
|
|
81
|
+
except Exception as e:
|
|
82
|
+
logging.debug(f"Unexpected error getting autostart for pool '{pool.name()}': {e}")
|
|
83
|
+
return False
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _safe_refresh_pool(pool: libvirt.virStoragePool) -> bool:
|
|
87
|
+
"""
|
|
88
|
+
Safely refresh a storage pool without blocking the UI.
|
|
89
|
+
Returns True on success, False on failure.
|
|
90
|
+
"""
|
|
91
|
+
try:
|
|
92
|
+
pool.refresh(0)
|
|
93
|
+
return True
|
|
94
|
+
except libvirt.libvirtError as e:
|
|
95
|
+
logging.debug(f"Failed to refresh pool '{pool.name()}': {e}")
|
|
96
|
+
return False
|
|
97
|
+
except Exception as e:
|
|
98
|
+
logging.debug(f"Unexpected error refreshing pool '{pool.name()}': {e}")
|
|
99
|
+
return False
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _safe_get_volume_info(vol: libvirt.virStorageVol) -> tuple:
|
|
103
|
+
"""
|
|
104
|
+
Safely get volume info without blocking the UI.
|
|
105
|
+
Returns (type, capacity, allocation) or (0, 0, 0) on failure.
|
|
106
|
+
"""
|
|
107
|
+
try:
|
|
108
|
+
vol_type, capacity, allocation = _safe_get_volume_info(vol)
|
|
109
|
+
return vol_type, capacity, allocation
|
|
110
|
+
except libvirt.libvirtError as e:
|
|
111
|
+
logging.debug(f"Failed to get info for volume '{vol.name()}': {e}")
|
|
112
|
+
return 0, 0, 0
|
|
113
|
+
except Exception as e:
|
|
114
|
+
logging.debug(f"Unexpected error getting info for volume '{vol.name()}': {e}")
|
|
115
|
+
return 0, 0, 0
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def _safe_get_volume_path(vol: libvirt.virStorageVol) -> str:
|
|
119
|
+
"""
|
|
120
|
+
Safely get volume path without blocking the UI.
|
|
121
|
+
Returns empty string on failure.
|
|
122
|
+
"""
|
|
123
|
+
try:
|
|
124
|
+
return vol.path()
|
|
125
|
+
except libvirt.libvirtError as e:
|
|
126
|
+
logging.debug(f"Failed to get path for volume '{vol.name()}': {e}")
|
|
127
|
+
return ""
|
|
128
|
+
except Exception as e:
|
|
129
|
+
logging.debug(f"Unexpected error getting path for volume '{vol.name()}': {e}")
|
|
130
|
+
return ""
|
|
131
|
+
|
|
19
132
|
@lru_cache(maxsize=16)
|
|
20
133
|
def list_storage_pools(conn: libvirt.virConnect) -> List[Dict[str, Any]]:
|
|
21
134
|
"""
|
|
@@ -35,15 +148,17 @@ def list_storage_pools(conn: libvirt.virConnect) -> List[Dict[str, Any]]:
|
|
|
35
148
|
except libvirt.libvirtError:
|
|
36
149
|
name = "Unknown Pool"
|
|
37
150
|
|
|
38
|
-
is_active = pool
|
|
39
|
-
|
|
151
|
+
is_active = _safe_is_pool_active(pool)
|
|
152
|
+
capacity, allocation, _ = _safe_get_pool_info(pool)
|
|
153
|
+
autostart = _safe_get_pool_autostart(pool)
|
|
154
|
+
|
|
40
155
|
pools_info.append({
|
|
41
156
|
'name': name,
|
|
42
157
|
'pool': pool,
|
|
43
158
|
'status': 'active' if is_active else 'inactive',
|
|
44
|
-
'autostart':
|
|
45
|
-
'capacity':
|
|
46
|
-
'allocation':
|
|
159
|
+
'autostart': autostart,
|
|
160
|
+
'capacity': capacity,
|
|
161
|
+
'allocation': allocation,
|
|
47
162
|
})
|
|
48
163
|
except libvirt.libvirtError as e:
|
|
49
164
|
# If we fail to get details (e.g. NFS down), still list the pool but as unavailable
|
|
@@ -74,7 +189,7 @@ def list_storage_volumes(pool: libvirt.virStoragePool) -> List[Dict[str, Any]]:
|
|
|
74
189
|
Lists all storage volumes in a given pool.
|
|
75
190
|
"""
|
|
76
191
|
volumes_info = []
|
|
77
|
-
if not pool or not pool
|
|
192
|
+
if not pool or not _safe_is_pool_active(pool):
|
|
78
193
|
return volumes_info
|
|
79
194
|
|
|
80
195
|
try:
|
|
@@ -82,13 +197,13 @@ def list_storage_volumes(pool: libvirt.virStoragePool) -> List[Dict[str, Any]]:
|
|
|
82
197
|
for name in vol_names:
|
|
83
198
|
try:
|
|
84
199
|
vol = pool.storageVolLookupByName(name)
|
|
85
|
-
|
|
200
|
+
vol_type, capacity, allocation = _safe_get_volume_info(vol)
|
|
86
201
|
volumes_info.append({
|
|
87
202
|
'name': name,
|
|
88
203
|
'volume': vol,
|
|
89
|
-
'type':
|
|
90
|
-
'capacity':
|
|
91
|
-
'allocation':
|
|
204
|
+
'type': vol_type,
|
|
205
|
+
'capacity': capacity,
|
|
206
|
+
'allocation': allocation,
|
|
92
207
|
})
|
|
93
208
|
except libvirt.libvirtError:
|
|
94
209
|
continue
|
|
@@ -178,8 +293,8 @@ def create_volume(pool: libvirt.virStoragePool, name: str, size_gb: int, vol_for
|
|
|
178
293
|
"""
|
|
179
294
|
Creates a new storage volume in a pool.
|
|
180
295
|
"""
|
|
181
|
-
if not pool
|
|
182
|
-
msg = f"Pool '{pool.name()}' is not active."
|
|
296
|
+
if not _ensure_pool_active(pool):
|
|
297
|
+
msg = f"Pool '{pool.name()}' is not active and could not be activated."
|
|
183
298
|
logging.error(msg)
|
|
184
299
|
raise Exception(msg)
|
|
185
300
|
|
|
@@ -252,8 +367,8 @@ def attach_volume(pool: libvirt.virStoragePool, name: str, path: str, vol_format
|
|
|
252
367
|
"""
|
|
253
368
|
Attaches an existing file as a storage volume in a pool.
|
|
254
369
|
"""
|
|
255
|
-
if not pool
|
|
256
|
-
msg = f"Pool '{pool.name()}' is not active."
|
|
370
|
+
if not _ensure_pool_active(pool):
|
|
371
|
+
msg = f"Pool '{pool.name()}' is not active and could not be activated."
|
|
257
372
|
logging.error(msg)
|
|
258
373
|
raise Exception(msg)
|
|
259
374
|
|
|
@@ -309,7 +424,7 @@ def attach_volume(pool: libvirt.virStoragePool, name: str, path: str, vol_format
|
|
|
309
424
|
|
|
310
425
|
try:
|
|
311
426
|
# Refresh the pool to make sure libvirt knows about the file if it was just copied.
|
|
312
|
-
pool
|
|
427
|
+
_safe_refresh_pool(pool)
|
|
313
428
|
vol = pool.storageVolLookupByName(name)
|
|
314
429
|
if vol:
|
|
315
430
|
logging.warning(f"Volume '{name}' already exists in pool '{pool.name()}'. Not creating.")
|
|
@@ -320,7 +435,7 @@ def attach_volume(pool: libvirt.virStoragePool, name: str, path: str, vol_format
|
|
|
320
435
|
try:
|
|
321
436
|
pool.createXML(vol_xml, 0)
|
|
322
437
|
# Refresh again after creating the volume from XML
|
|
323
|
-
pool
|
|
438
|
+
_safe_refresh_pool(pool)
|
|
324
439
|
except libvirt.libvirtError as e:
|
|
325
440
|
# If creation fails, attempt to clean up the copied file
|
|
326
441
|
if pool_type == 'dir' and 'dest_path' in locals() and os.path.exists(dest_path):
|
|
@@ -336,8 +451,8 @@ def create_overlay_volume(pool: libvirt.virStoragePool, name: str, backing_vol_p
|
|
|
336
451
|
Creates a qcow2 overlay volume backed by another volume (backing file).
|
|
337
452
|
The new volume will record changes, while the backing file remains untouched.
|
|
338
453
|
"""
|
|
339
|
-
if not pool
|
|
340
|
-
msg = f"Pool '{pool.name()}' is not active."
|
|
454
|
+
if not _ensure_pool_active(pool):
|
|
455
|
+
msg = f"Pool '{pool.name()}' is not active and could not be activated."
|
|
341
456
|
logging.error(msg)
|
|
342
457
|
raise Exception(msg)
|
|
343
458
|
|
|
@@ -347,7 +462,7 @@ def create_overlay_volume(pool: libvirt.virStoragePool, name: str, backing_vol_p
|
|
|
347
462
|
if not backing_vol:
|
|
348
463
|
raise Exception(f"Could not find backing volume for path '{backing_vol_path}' to determine capacity.")
|
|
349
464
|
|
|
350
|
-
capacity = backing_vol
|
|
465
|
+
_, capacity, _ = _safe_get_volume_info(backing_vol)
|
|
351
466
|
|
|
352
467
|
vol_xml = f"""
|
|
353
468
|
<volume>
|
|
@@ -418,7 +533,7 @@ def find_vms_using_volume(conn: libvirt.virConnect, vol_path: str, vol_name: str
|
|
|
418
533
|
try:
|
|
419
534
|
p = conn.storagePoolLookupByName(pool_name)
|
|
420
535
|
v = p.storageVolLookupByName(volume_name_from_xml)
|
|
421
|
-
if v
|
|
536
|
+
if _safe_get_volume_path(v) == vol_path:
|
|
422
537
|
vms_using_volume.append(domain)
|
|
423
538
|
break # Found it, move to the next domain
|
|
424
539
|
except libvirt.libvirtError:
|
|
@@ -486,11 +601,11 @@ def move_volume(conn: libvirt.virConnect, source_pool_name: str, dest_pool_name:
|
|
|
486
601
|
source_vol = source_pool.storageVolLookupByName(volume_name)
|
|
487
602
|
|
|
488
603
|
# Check for available space before starting the move
|
|
489
|
-
|
|
490
|
-
source_capacity = source_info[1] # in bytes
|
|
604
|
+
_, source_capacity, _ = _safe_get_volume_info(source_vol) # in bytes
|
|
491
605
|
|
|
492
606
|
# Check if the volume is in use by any running VMs before starting the move
|
|
493
|
-
|
|
607
|
+
source_path = _safe_get_volume_path(source_vol)
|
|
608
|
+
vms_using_volume = find_vms_using_volume(conn, source_path, source_vol.name())
|
|
494
609
|
running_vms = [vm.name() for vm in vms_using_volume if vm.state()[0] == libvirt.VIR_DOMAIN_RUNNING]
|
|
495
610
|
|
|
496
611
|
if running_vms:
|
|
@@ -501,8 +616,7 @@ def move_volume(conn: libvirt.virConnect, source_pool_name: str, dest_pool_name:
|
|
|
501
616
|
if vms_using_volume:
|
|
502
617
|
log_and_callback(f"Volume is used by offline VM(s):\n{[vm.name() for vm in vms_using_volume]}.\nTheir configuration will be updated after the move.\nWait Until the process is finished (can take a lot of time).")
|
|
503
618
|
|
|
504
|
-
|
|
505
|
-
source_capacity = source_info[1]
|
|
619
|
+
_, source_capacity, _ = _safe_get_volume_info(source_vol)
|
|
506
620
|
source_format = "qcow2" # Default
|
|
507
621
|
try:
|
|
508
622
|
source_format = ET.fromstring(source_vol.XMLDesc(0)).findtext("target/format[@type]", "qcow2")
|
|
@@ -626,11 +740,11 @@ def move_volume(conn: libvirt.virConnect, source_pool_name: str, dest_pool_name:
|
|
|
626
740
|
|
|
627
741
|
# Refresh destination pool to make the new volume visible
|
|
628
742
|
log_and_callback(f"Refreshing destination pool '{dest_pool.name()}'...")
|
|
629
|
-
dest_pool
|
|
743
|
+
_safe_refresh_pool(dest_pool)
|
|
630
744
|
|
|
631
745
|
# Update any VM configurations that use this volume
|
|
632
|
-
old_path = source_vol
|
|
633
|
-
new_path = new_vol
|
|
746
|
+
old_path = _safe_get_volume_path(source_vol)
|
|
747
|
+
new_path = _safe_get_volume_path(new_vol)
|
|
634
748
|
old_pool_name = source_pool.name()
|
|
635
749
|
new_pool_name = dest_pool.name()
|
|
636
750
|
|
|
@@ -673,7 +787,7 @@ def move_volume(conn: libvirt.virConnect, source_pool_name: str, dest_pool_name:
|
|
|
673
787
|
|
|
674
788
|
# Refresh source pool to remove the old volume from listings
|
|
675
789
|
log_and_callback(f"Refreshing source pool '{source_pool.name()}'...")
|
|
676
|
-
source_pool
|
|
790
|
+
_safe_refresh_pool(source_pool)
|
|
677
791
|
log_and_callback("\nMove Finished, you can close this window")
|
|
678
792
|
|
|
679
793
|
except Exception as e:
|
|
@@ -708,8 +822,12 @@ def delete_storage_pool(pool: libvirt.virStoragePool):
|
|
|
708
822
|
"""
|
|
709
823
|
try:
|
|
710
824
|
# If pool is active, destroy it first (make it inactive)
|
|
711
|
-
if pool
|
|
712
|
-
|
|
825
|
+
if _safe_is_pool_active(pool):
|
|
826
|
+
try:
|
|
827
|
+
pool.destroy()
|
|
828
|
+
except libvirt.libvirtError as e:
|
|
829
|
+
logging.warning(f"Failed to destroy active pool '{pool.name()}': {e}")
|
|
830
|
+
# Continue with undefine even if destroy fails
|
|
713
831
|
# Undefine the pool (delete it)
|
|
714
832
|
pool.undefine()
|
|
715
833
|
except libvirt.libvirtError as e:
|
|
@@ -729,7 +847,7 @@ def get_all_storage_volumes(conn: libvirt.virConnect) -> List[libvirt.virStorage
|
|
|
729
847
|
pools_info = list_storage_pools(conn)
|
|
730
848
|
for pool_info in pools_info:
|
|
731
849
|
pool = pool_info['pool']
|
|
732
|
-
if pool
|
|
850
|
+
if _safe_is_pool_active(pool):
|
|
733
851
|
try:
|
|
734
852
|
all_volumes.extend(pool.listAllVolumes())
|
|
735
853
|
except libvirt.libvirtError:
|
|
@@ -749,7 +867,7 @@ def list_unused_volumes(conn: libvirt.virConnect, pool_name: str = None) -> List
|
|
|
749
867
|
if pool_name:
|
|
750
868
|
try:
|
|
751
869
|
pool = conn.storagePoolLookupByName(pool_name)
|
|
752
|
-
if not pool
|
|
870
|
+
if not _safe_is_pool_active(pool):
|
|
753
871
|
return []
|
|
754
872
|
all_volumes = pool.listAllVolumes()
|
|
755
873
|
except libvirt.libvirtError:
|
|
@@ -795,7 +913,8 @@ def list_unused_volumes(conn: libvirt.virConnect, pool_name: str = None) -> List
|
|
|
795
913
|
|
|
796
914
|
unused_volumes = []
|
|
797
915
|
for vol in all_volumes:
|
|
798
|
-
|
|
916
|
+
vol_path = _safe_get_volume_path(vol)
|
|
917
|
+
if vol_path and vol_path not in used_disk_paths:
|
|
799
918
|
unused_volumes.append(vol)
|
|
800
919
|
|
|
801
920
|
return unused_volumes
|
|
@@ -916,8 +1035,7 @@ def copy_volume_across_hosts(source_conn: libvirt.virConnect, dest_conn: libvirt
|
|
|
916
1035
|
log_and_callback(f"[red]ERROR:[/ ] Could not find source/destination resources: {e}")
|
|
917
1036
|
raise
|
|
918
1037
|
|
|
919
|
-
|
|
920
|
-
source_capacity = source_info[1]
|
|
1038
|
+
_, source_capacity, _ = _safe_get_volume_info(source_vol)
|
|
921
1039
|
source_format = "qcow2"
|
|
922
1040
|
try:
|
|
923
1041
|
source_format = ET.fromstring(source_vol.XMLDesc(0)).findtext("target/format[@type]", "qcow2")
|
|
@@ -1093,13 +1211,13 @@ def copy_volume_across_hosts(source_conn: libvirt.virConnect, dest_conn: libvirt
|
|
|
1093
1211
|
if upload_error: raise upload_error
|
|
1094
1212
|
|
|
1095
1213
|
log_and_callback("Transfer complete.")
|
|
1096
|
-
dest_pool
|
|
1214
|
+
_safe_refresh_pool(dest_pool)
|
|
1097
1215
|
|
|
1098
1216
|
return {
|
|
1099
|
-
"old_disk_path": source_vol
|
|
1217
|
+
"old_disk_path": _safe_get_volume_path(source_vol),
|
|
1100
1218
|
"new_pool_name": dest_pool.name(),
|
|
1101
1219
|
"new_volume_name": dest_vol.name(),
|
|
1102
|
-
"new_disk_path": dest_vol
|
|
1220
|
+
"new_disk_path": _safe_get_volume_path(dest_vol),
|
|
1103
1221
|
}
|
|
1104
1222
|
|
|
1105
1223
|
except Exception as e:
|
vmanager/utils.py
CHANGED
|
@@ -268,6 +268,20 @@ def check_websockify() -> bool:
|
|
|
268
268
|
return False
|
|
269
269
|
|
|
270
270
|
|
|
271
|
+
def check_tmux() -> bool:
|
|
272
|
+
"""
|
|
273
|
+
Checks if running inside a tmux session and tmux command is available.
|
|
274
|
+
|
|
275
|
+
Returns:
|
|
276
|
+
bool: True if inside tmux and tmux is installed, False otherwise
|
|
277
|
+
"""
|
|
278
|
+
try:
|
|
279
|
+
return os.environ.get("TMUX") is not None and shutil.which("tmux") is not None
|
|
280
|
+
except Exception as e:
|
|
281
|
+
logging.error(f"Error checking tmux: {e}")
|
|
282
|
+
return False
|
|
283
|
+
|
|
284
|
+
|
|
271
285
|
def check_is_firewalld_running() -> Union[str, bool]:
|
|
272
286
|
"""
|
|
273
287
|
Check if firewalld is running.
|
|
@@ -428,9 +442,9 @@ class CacheMonitor:
|
|
|
428
442
|
def log_stats(self) -> None:
|
|
429
443
|
"""Log cache statistics."""
|
|
430
444
|
stats = self.get_all_stats()
|
|
431
|
-
logging.
|
|
445
|
+
logging.debug("=== Cache Statistics ===")
|
|
432
446
|
for name, data in stats.items():
|
|
433
|
-
logging.
|
|
447
|
+
logging.debug(
|
|
434
448
|
f"{name}: {data['hit_rate']:.1f}% hit rate "
|
|
435
449
|
f"({data['hits']} hits, {data['misses']} misses, "
|
|
436
450
|
f"{data['current_size']}/{data['max_size']} entries)"
|
|
@@ -464,7 +478,7 @@ def generate_tooltip_markdown(
|
|
|
464
478
|
) -> str:
|
|
465
479
|
"""Generate tooltip markdown (pure function, cacheable)."""
|
|
466
480
|
mem_display = format_memory_display(memory)
|
|
467
|
-
cpu_display = f"{cpu}
|
|
481
|
+
cpu_display = f"{cpu} {cpu_model}" if cpu_model else str(cpu)
|
|
468
482
|
|
|
469
483
|
return (
|
|
470
484
|
f"`{uuid}` \n"
|
|
@@ -482,10 +496,10 @@ def setup_cache_monitoring(enable: bool = True):
|
|
|
482
496
|
cache_monitor = CacheMonitor()
|
|
483
497
|
cache_monitor.tracked_functions.clear()
|
|
484
498
|
if not enable:
|
|
485
|
-
logging.
|
|
499
|
+
logging.debug("Cache monitoring disabled.")
|
|
486
500
|
return
|
|
487
501
|
|
|
488
|
-
logging.
|
|
502
|
+
logging.debug("Cache monitoring enabled.")
|
|
489
503
|
cache_monitor.track(format_server_names)
|
|
490
504
|
cache_monitor.track(extract_server_name_from_uri)
|
|
491
505
|
cache_monitor.track(get_server_color_cached)
|
|
@@ -543,5 +557,38 @@ def setup_cache_monitoring(enable: bool = True):
|
|
|
543
557
|
|
|
544
558
|
return cache_monitor
|
|
545
559
|
|
|
546
|
-
|
|
547
|
-
|
|
560
|
+
|
|
561
|
+
def setup_logging():
|
|
562
|
+
"""Configures the logging for the application."""
|
|
563
|
+
from .config import load_config, get_log_path
|
|
564
|
+
config = load_config()
|
|
565
|
+
log_level_str = config.get("LOG_LEVEL", "INFO").upper()
|
|
566
|
+
log_level = getattr(logging, log_level_str, logging.INFO)
|
|
567
|
+
log_path = get_log_path()
|
|
568
|
+
|
|
569
|
+
# Ensure directory exists
|
|
570
|
+
log_path.parent.mkdir(parents=True, exist_ok=True)
|
|
571
|
+
root_logger = logging.getLogger()
|
|
572
|
+
|
|
573
|
+
for handler in root_logger.handlers[:]:
|
|
574
|
+
if isinstance(handler, logging.StreamHandler) and not isinstance(handler, logging.FileHandler):
|
|
575
|
+
root_logger.removeHandler(handler)
|
|
576
|
+
|
|
577
|
+
# Check if we already added a FileHandler to this path
|
|
578
|
+
has_file_handler = False
|
|
579
|
+
for handler in root_logger.handlers:
|
|
580
|
+
if isinstance(handler, logging.FileHandler) and handler.baseFilename == str(log_path.absolute()):
|
|
581
|
+
has_file_handler = True
|
|
582
|
+
break
|
|
583
|
+
|
|
584
|
+
if not has_file_handler:
|
|
585
|
+
file_handler = logging.FileHandler(log_path)
|
|
586
|
+
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
|
|
587
|
+
root_logger.addHandler(file_handler)
|
|
588
|
+
|
|
589
|
+
root_logger.setLevel(log_level)
|
|
590
|
+
|
|
591
|
+
if not has_file_handler:
|
|
592
|
+
logging.info("--- Logging initialized ---")
|
|
593
|
+
|
|
594
|
+
setup_cache_monitoring(enable=False)
|
vmanager/vm_actions.py
CHANGED
|
@@ -13,7 +13,8 @@ from .libvirt_utils import (
|
|
|
13
13
|
_get_disabled_disks_elem,
|
|
14
14
|
_get_backing_chain_elem,
|
|
15
15
|
get_overlay_backing_path,
|
|
16
|
-
get_internal_id
|
|
16
|
+
get_internal_id,
|
|
17
|
+
get_host_domain_capabilities
|
|
17
18
|
)
|
|
18
19
|
from .utils import log_function_call
|
|
19
20
|
from .vm_queries import get_vm_disks_info, get_vm_tpm_info, _get_domain_root, get_vm_snapshots
|
|
@@ -1872,6 +1873,18 @@ def stop_vm(domain: libvirt.virDomain):
|
|
|
1872
1873
|
invalidate_cache(get_internal_id(domain))
|
|
1873
1874
|
domain.shutdown()
|
|
1874
1875
|
|
|
1876
|
+
def hibernate_vm(domain: libvirt.virDomain):
|
|
1877
|
+
"""
|
|
1878
|
+
Saves (hibernates) the VM state to disk and stops it.
|
|
1879
|
+
"""
|
|
1880
|
+
if not domain:
|
|
1881
|
+
raise ValueError("Invalid domain object.")
|
|
1882
|
+
if not domain.isActive():
|
|
1883
|
+
raise libvirt.libvirtError(f"VM '{domain.name()}' is not active, cannot save.")
|
|
1884
|
+
|
|
1885
|
+
invalidate_cache(get_internal_id(domain))
|
|
1886
|
+
domain.managedSave(0)
|
|
1887
|
+
|
|
1875
1888
|
def pause_vm(domain: libvirt.virDomain):
|
|
1876
1889
|
"""
|
|
1877
1890
|
Pauses the execution of the VM.
|
|
@@ -2186,30 +2199,33 @@ def check_server_migration_compatibility(source_conn: libvirt.virConnect, dest_c
|
|
|
2186
2199
|
source_tpm_info = get_vm_tpm_info(source_root)
|
|
2187
2200
|
if source_tpm_info:
|
|
2188
2201
|
try:
|
|
2189
|
-
dest_caps_xml = dest_conn
|
|
2190
|
-
|
|
2191
|
-
|
|
2192
|
-
|
|
2193
|
-
|
|
2194
|
-
|
|
2195
|
-
|
|
2196
|
-
|
|
2197
|
-
|
|
2202
|
+
dest_caps_xml = get_host_domain_capabilities(dest_conn)
|
|
2203
|
+
if dest_caps_xml:
|
|
2204
|
+
dest_caps_root = ET.fromstring(dest_caps_xml)
|
|
2205
|
+
|
|
2206
|
+
# Check if destination host supports TPM devices at all
|
|
2207
|
+
if not dest_caps_root.find(".//devices/tpm"):
|
|
2208
|
+
issues.append({
|
|
2209
|
+
'severity': 'ERROR',
|
|
2210
|
+
'message': f"Source VM '{domain_name}' uses TPM, but destination host '{dest_conn.getURI()}' does not appear to support TPM devices."
|
|
2211
|
+
})
|
|
2212
|
+
else:
|
|
2213
|
+
for tpm_dev in source_tpm_info:
|
|
2214
|
+
if tpm_dev['type'] == 'passthrough':
|
|
2215
|
+
# More specific check for passthrough TPM
|
|
2216
|
+
issues.append({
|
|
2217
|
+
'severity': 'WARNING',
|
|
2218
|
+
'message': f"Source VM '{domain_name}' uses passthrough TPM ({tpm_dev['model']}). Passthrough TPM migration is often problematic due to hardware dependencies. Manual verification on destination host '{dest_conn.getURI()}' recommended."
|
|
2219
|
+
})
|
|
2220
|
+
elif tpm_dev['type'] == 'emulated' and is_live:
|
|
2221
|
+
# Emulated TPM should generally be fine for cold migration.
|
|
2222
|
+
# Live migration of emulated TPM might be tricky.
|
|
2223
|
+
issues.append({
|
|
2224
|
+
'severity': 'WARNING',
|
|
2225
|
+
'message': f"Source VM '{domain_name}' uses emulated TPM. Live migration with TPM can sometimes have issues; cold migration is safer."
|
|
2226
|
+
})
|
|
2198
2227
|
else:
|
|
2199
|
-
for
|
|
2200
|
-
if tpm_dev['type'] == 'passthrough':
|
|
2201
|
-
# More specific check for passthrough TPM
|
|
2202
|
-
issues.append({
|
|
2203
|
-
'severity': 'WARNING',
|
|
2204
|
-
'message': f"Source VM '{domain_name}' uses passthrough TPM ({tpm_dev['model']}). Passthrough TPM migration is often problematic due to hardware dependencies. Manual verification on destination host '{dest_conn.getURI()}' recommended."
|
|
2205
|
-
})
|
|
2206
|
-
elif tpm_dev['type'] == 'emulated' and is_live:
|
|
2207
|
-
# Emulated TPM should generally be fine for cold migration.
|
|
2208
|
-
# Live migration of emulated TPM might be tricky.
|
|
2209
|
-
issues.append({
|
|
2210
|
-
'severity': 'WARNING',
|
|
2211
|
-
'message': f"Source VM '{domain_name}' uses emulated TPM. Live migration with TPM can sometimes have issues; cold migration is safer."
|
|
2212
|
-
})
|
|
2228
|
+
issues.append({'severity': 'WARNING', 'message': f"Could not retrieve destination host capabilities for TPM check."})
|
|
2213
2229
|
|
|
2214
2230
|
except libvirt.libvirtError as e:
|
|
2215
2231
|
issues.append({'severity': 'WARNING', 'message': f"Could not retrieve destination host capabilities for TPM check: {e}"})
|
|
@@ -2227,6 +2243,14 @@ def check_vm_migration_compatibility(domain: libvirt.virDomain, dest_conn: libvi
|
|
|
2227
2243
|
"""
|
|
2228
2244
|
issues = []
|
|
2229
2245
|
|
|
2246
|
+
# Check for name collision
|
|
2247
|
+
try:
|
|
2248
|
+
dest_conn.lookupByName(domain.name())
|
|
2249
|
+
issues.append({'severity': 'ERROR', 'message': f"A VM with the name '{domain.name()}' already exists on the destination host."})
|
|
2250
|
+
except libvirt.libvirtError as e:
|
|
2251
|
+
if e.get_error_code() != libvirt.VIR_ERR_NO_DOMAIN:
|
|
2252
|
+
issues.append({'severity': 'WARNING', 'message': f"Could not check for name collision on destination: {e}"})
|
|
2253
|
+
|
|
2230
2254
|
try:
|
|
2231
2255
|
xml_desc = domain.XMLDesc(0)
|
|
2232
2256
|
root = ET.fromstring(xml_desc)
|
vmanager/vm_migration.py
CHANGED
|
@@ -286,7 +286,10 @@ def custom_migrate_vm(source_conn: libvirt.virConnect, dest_conn: libvirt.virCon
|
|
|
286
286
|
raise
|
|
287
287
|
|
|
288
288
|
# 2. Analyze storage and propose move actions
|
|
289
|
-
actions = [
|
|
289
|
+
actions = [{
|
|
290
|
+
"type": "vm_metadata",
|
|
291
|
+
"vm_name": domain.name()
|
|
292
|
+
}]
|
|
290
293
|
root = ET.fromstring(xml_desc)
|
|
291
294
|
disks = get_vm_disks_info(source_conn, root)
|
|
292
295
|
|