virtui-manager 1.1.5__py3-none-any.whl → 1.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {virtui_manager-1.1.5.dist-info → virtui_manager-1.3.0.dist-info}/METADATA +1 -1
- virtui_manager-1.3.0.dist-info/RECORD +73 -0
- vmanager/constants.py +737 -108
- vmanager/dialog.css +24 -0
- vmanager/firmware_manager.py +4 -1
- vmanager/i18n.py +32 -0
- vmanager/libvirt_utils.py +132 -3
- vmanager/locales/de/LC_MESSAGES/virtui-manager.po +3012 -0
- vmanager/locales/fr/LC_MESSAGES/virtui-manager.mo +0 -0
- vmanager/locales/fr/LC_MESSAGES/virtui-manager.po +3124 -0
- vmanager/locales/it/LC_MESSAGES/virtui-manager.po +3012 -0
- vmanager/locales/virtui-manager.pot +3012 -0
- vmanager/modals/bulk_modals.py +13 -12
- vmanager/modals/cache_stats_modal.py +6 -5
- vmanager/modals/capabilities_modal.py +133 -0
- vmanager/modals/config_modal.py +25 -24
- vmanager/modals/cpu_mem_pc_modals.py +22 -21
- vmanager/modals/custom_migration_modal.py +10 -9
- vmanager/modals/disk_pool_modals.py +60 -59
- vmanager/modals/host_dashboard_modal.py +137 -0
- vmanager/modals/howto_disk_modal.py +13 -72
- vmanager/modals/howto_network_modal.py +13 -39
- vmanager/modals/howto_overlay_modal.py +13 -52
- vmanager/modals/howto_ssh_modal.py +12 -67
- vmanager/modals/howto_virtiofs_modal.py +13 -64
- vmanager/modals/input_modals.py +11 -10
- vmanager/modals/log_modal.py +2 -1
- vmanager/modals/migration_modals.py +20 -18
- vmanager/modals/network_modals.py +45 -36
- vmanager/modals/provisioning_modals.py +56 -56
- vmanager/modals/select_server_modals.py +8 -7
- vmanager/modals/selection_modals.py +7 -6
- vmanager/modals/server_modals.py +24 -23
- vmanager/modals/server_prefs_modals.py +103 -87
- vmanager/modals/utils_modals.py +10 -9
- vmanager/modals/virsh_modals.py +3 -2
- vmanager/modals/virtiofs_modals.py +6 -5
- vmanager/modals/vm_type_info_modal.py +2 -1
- vmanager/modals/vmanager_modals.py +19 -19
- vmanager/modals/vmcard_dialog.py +57 -57
- vmanager/modals/vmdetails_modals.py +115 -123
- vmanager/modals/xml_modals.py +3 -2
- vmanager/network_manager.py +4 -1
- vmanager/storage_manager.py +182 -42
- vmanager/utils.py +39 -6
- vmanager/vm_actions.py +28 -24
- vmanager/vm_queries.py +67 -25
- vmanager/vm_service.py +8 -5
- vmanager/vmanager.css +46 -0
- vmanager/vmanager.py +178 -112
- vmanager/vmcard.py +161 -159
- vmanager/webconsole_manager.py +21 -21
- virtui_manager-1.1.5.dist-info/RECORD +0 -65
- {virtui_manager-1.1.5.dist-info → virtui_manager-1.3.0.dist-info}/WHEEL +0 -0
- {virtui_manager-1.1.5.dist-info → virtui_manager-1.3.0.dist-info}/entry_points.txt +0 -0
- {virtui_manager-1.1.5.dist-info → virtui_manager-1.3.0.dist-info}/licenses/LICENSE +0 -0
- {virtui_manager-1.1.5.dist-info → virtui_manager-1.3.0.dist-info}/top_level.txt +0 -0
vmanager/vm_queries.py
CHANGED
|
@@ -13,8 +13,9 @@ from .libvirt_utils import (
|
|
|
13
13
|
_get_disabled_disks_elem,
|
|
14
14
|
get_overlay_backing_path,
|
|
15
15
|
_get_backing_chain_elem,
|
|
16
|
+
get_host_domain_capabilities,
|
|
16
17
|
)
|
|
17
|
-
|
|
18
|
+
from .constants import StatusText
|
|
18
19
|
|
|
19
20
|
def _parse_domain_xml_by_hash(xml_hash: str, xml_content: str) -> ET.Element | None:
|
|
20
21
|
"""
|
|
@@ -108,18 +109,18 @@ def get_status(domain, state=None):
|
|
|
108
109
|
try:
|
|
109
110
|
state, _ = domain.state()
|
|
110
111
|
except libvirt.libvirtError:
|
|
111
|
-
return
|
|
112
|
+
return StatusText.UNKNOWN
|
|
112
113
|
|
|
113
114
|
if state == libvirt.VIR_DOMAIN_RUNNING:
|
|
114
|
-
return
|
|
115
|
+
return StatusText.RUNNING
|
|
115
116
|
elif state == libvirt.VIR_DOMAIN_PAUSED:
|
|
116
|
-
return
|
|
117
|
+
return StatusText.PAUSED
|
|
117
118
|
elif state == libvirt.VIR_DOMAIN_PMSUSPENDED:
|
|
118
|
-
return
|
|
119
|
+
return StatusText.PMSUSPENDED
|
|
119
120
|
elif state == libvirt.VIR_DOMAIN_BLOCKED:
|
|
120
|
-
return
|
|
121
|
+
return StatusText.BLOCKED
|
|
121
122
|
else:
|
|
122
|
-
return
|
|
123
|
+
return StatusText.STOPPED
|
|
123
124
|
|
|
124
125
|
@lru_cache(maxsize=16)
|
|
125
126
|
def get_vm_description(domain):
|
|
@@ -565,6 +566,7 @@ def get_all_vm_overlay_usage(conn: libvirt.virConnect) -> dict[str, list[str]]:
|
|
|
565
566
|
"""
|
|
566
567
|
Scans all VMs and returns a mapping of backing file path to a list of VM names
|
|
567
568
|
that use it via an overlay (checked via metadata).
|
|
569
|
+
Optimized to fetch VM XMLs in parallel.
|
|
568
570
|
"""
|
|
569
571
|
backing_to_vms_map = {}
|
|
570
572
|
if not conn:
|
|
@@ -575,30 +577,43 @@ def get_all_vm_overlay_usage(conn: libvirt.virConnect) -> dict[str, list[str]]:
|
|
|
575
577
|
except libvirt.libvirtError:
|
|
576
578
|
return backing_to_vms_map
|
|
577
579
|
|
|
578
|
-
|
|
580
|
+
def process_domain_overlay_usage(domain):
|
|
581
|
+
"""Helper to process a single domain for overlay usage."""
|
|
579
582
|
try:
|
|
580
583
|
_, root = _get_domain_root(domain)
|
|
581
584
|
if root is not None:
|
|
582
585
|
# Check all disks to see if they are overlays in metadata
|
|
583
586
|
disks = get_vm_disks_info(conn, root)
|
|
584
587
|
vm_name = domain.name()
|
|
588
|
+
overlay_mappings = []
|
|
585
589
|
for disk in disks:
|
|
586
590
|
path = disk.get('path')
|
|
587
591
|
if path:
|
|
588
592
|
backing_path = get_overlay_backing_path(root, path)
|
|
589
593
|
if backing_path:
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
if vm_name not in backing_to_vms_map[backing_path]:
|
|
593
|
-
backing_to_vms_map[backing_path].append(vm_name)
|
|
594
|
+
overlay_mappings.append((backing_path, vm_name))
|
|
595
|
+
return overlay_mappings
|
|
594
596
|
except Exception:
|
|
595
|
-
|
|
597
|
+
pass
|
|
598
|
+
return []
|
|
599
|
+
|
|
600
|
+
# Use ThreadPoolExecutor for parallel processing
|
|
601
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
|
|
602
|
+
results = executor.map(process_domain_overlay_usage, domains)
|
|
603
|
+
|
|
604
|
+
for overlay_mappings in results:
|
|
605
|
+
for backing_path, vm_name in overlay_mappings:
|
|
606
|
+
if backing_path not in backing_to_vms_map:
|
|
607
|
+
backing_to_vms_map[backing_path] = []
|
|
608
|
+
if vm_name not in backing_to_vms_map[backing_path]:
|
|
609
|
+
backing_to_vms_map[backing_path].append(vm_name)
|
|
596
610
|
|
|
597
611
|
return backing_to_vms_map
|
|
598
612
|
|
|
599
613
|
def get_all_vm_nvram_usage(conn: libvirt.virConnect) -> dict[str, list[str]]:
|
|
600
614
|
"""
|
|
601
615
|
Scans all VMs and returns a mapping of NVRAM file path to a list of VM names.
|
|
616
|
+
Optimized to fetch VM XMLs in parallel.
|
|
602
617
|
"""
|
|
603
618
|
nvram_to_vms_map = {}
|
|
604
619
|
if not conn:
|
|
@@ -609,7 +624,8 @@ def get_all_vm_nvram_usage(conn: libvirt.virConnect) -> dict[str, list[str]]:
|
|
|
609
624
|
except libvirt.libvirtError:
|
|
610
625
|
return nvram_to_vms_map
|
|
611
626
|
|
|
612
|
-
|
|
627
|
+
def process_domain_nvram_usage(domain):
|
|
628
|
+
"""Helper to process a single domain for NVRAM usage."""
|
|
613
629
|
try:
|
|
614
630
|
_, root = _get_domain_root(domain)
|
|
615
631
|
if root is not None:
|
|
@@ -617,13 +633,22 @@ def get_all_vm_nvram_usage(conn: libvirt.virConnect) -> dict[str, list[str]]:
|
|
|
617
633
|
if nvram_elem is not None:
|
|
618
634
|
nvram_path = nvram_elem.text
|
|
619
635
|
if nvram_path:
|
|
620
|
-
|
|
621
|
-
if nvram_path not in nvram_to_vms_map:
|
|
622
|
-
nvram_to_vms_map[nvram_path] = []
|
|
623
|
-
if vm_name not in nvram_to_vms_map[nvram_path]:
|
|
624
|
-
nvram_to_vms_map[nvram_path].append(vm_name)
|
|
636
|
+
return nvram_path, domain.name()
|
|
625
637
|
except Exception:
|
|
626
|
-
|
|
638
|
+
pass
|
|
639
|
+
return None, None
|
|
640
|
+
|
|
641
|
+
# Use ThreadPoolExecutor for parallel processing
|
|
642
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
|
|
643
|
+
results = executor.map(process_domain_nvram_usage, domains)
|
|
644
|
+
|
|
645
|
+
for nvram_path, vm_name in results:
|
|
646
|
+
if nvram_path and vm_name:
|
|
647
|
+
if nvram_path not in nvram_to_vms_map:
|
|
648
|
+
nvram_to_vms_map[nvram_path] = []
|
|
649
|
+
if vm_name not in nvram_to_vms_map[nvram_path]:
|
|
650
|
+
nvram_to_vms_map[nvram_path].append(vm_name)
|
|
651
|
+
|
|
627
652
|
return nvram_to_vms_map
|
|
628
653
|
|
|
629
654
|
|
|
@@ -644,7 +669,9 @@ def get_supported_machine_types(conn, domain):
|
|
|
644
669
|
arch = arch_elem.get('arch') if arch_elem is not None else 'x86_64' # default
|
|
645
670
|
|
|
646
671
|
# Get capabilities
|
|
647
|
-
caps_xml = conn
|
|
672
|
+
caps_xml = get_host_domain_capabilities(conn)
|
|
673
|
+
if not caps_xml:
|
|
674
|
+
return []
|
|
648
675
|
caps_root = ET.fromstring(caps_xml)
|
|
649
676
|
|
|
650
677
|
# Find machines for that arch
|
|
@@ -1016,19 +1043,34 @@ def check_for_spice_vms(conn):
|
|
|
1016
1043
|
"""
|
|
1017
1044
|
Checks if any VM uses Spice graphics.
|
|
1018
1045
|
Returns a message if a Spice VM is found, otherwise None.
|
|
1046
|
+
Optimized to fetch VM XMLs in parallel.
|
|
1019
1047
|
"""
|
|
1020
1048
|
if not conn:
|
|
1021
1049
|
return None
|
|
1022
1050
|
try:
|
|
1023
1051
|
all_domains = conn.listAllDomains(0) or []
|
|
1024
|
-
|
|
1052
|
+
except libvirt.libvirtError:
|
|
1053
|
+
return None
|
|
1054
|
+
|
|
1055
|
+
def check_domain_for_spice(domain):
|
|
1056
|
+
"""Helper to check a single domain for Spice graphics."""
|
|
1057
|
+
try:
|
|
1025
1058
|
_, root = _get_domain_root(domain)
|
|
1026
1059
|
if root is not None:
|
|
1027
1060
|
graphics_info = get_vm_graphics_info(root)
|
|
1028
1061
|
if graphics_info.get("type") == "spice":
|
|
1029
|
-
return
|
|
1030
|
-
|
|
1031
|
-
|
|
1062
|
+
return True
|
|
1063
|
+
except Exception:
|
|
1064
|
+
pass
|
|
1065
|
+
return False
|
|
1066
|
+
|
|
1067
|
+
# Use ThreadPoolExecutor for parallel processing
|
|
1068
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
|
|
1069
|
+
results = executor.map(check_domain_for_spice, all_domains)
|
|
1070
|
+
# Check if any domain uses Spice
|
|
1071
|
+
if any(results):
|
|
1072
|
+
return "Some VMs use Spice graphics. 'Web Console' is only available for VNC."
|
|
1073
|
+
|
|
1032
1074
|
return None
|
|
1033
1075
|
|
|
1034
1076
|
def get_all_network_usage(conn: libvirt.virConnect) -> dict[str, list[str]]:
|
vmanager/vm_service.py
CHANGED
|
@@ -1297,7 +1297,7 @@ class VMService:
|
|
|
1297
1297
|
pass
|
|
1298
1298
|
return None
|
|
1299
1299
|
|
|
1300
|
-
def find_domains_by_uuids(self, active_uris: list[str], vm_uuids: list[str]) -> dict[str, libvirt.virDomain]:
|
|
1300
|
+
def find_domains_by_uuids(self, active_uris: list[str], vm_uuids: list[str], check_validity: bool = True) -> dict[str, libvirt.virDomain]:
|
|
1301
1301
|
"""Finds and returns a dictionary of domain objects from a list of UUIDs."""
|
|
1302
1302
|
self._update_target_uris(active_uris)
|
|
1303
1303
|
|
|
@@ -1327,11 +1327,14 @@ class VMService:
|
|
|
1327
1327
|
|
|
1328
1328
|
valid = False
|
|
1329
1329
|
if domain:
|
|
1330
|
-
|
|
1331
|
-
|
|
1330
|
+
if check_validity:
|
|
1331
|
+
try:
|
|
1332
|
+
domain.info() # Check if domain is still valid
|
|
1333
|
+
valid = True
|
|
1334
|
+
except libvirt.libvirtError:
|
|
1335
|
+
valid = False
|
|
1336
|
+
else:
|
|
1332
1337
|
valid = True
|
|
1333
|
-
except libvirt.libvirtError:
|
|
1334
|
-
valid = False
|
|
1335
1338
|
|
|
1336
1339
|
if valid:
|
|
1337
1340
|
found_domains[uuid] = domain
|
vmanager/vmanager.css
CHANGED
|
@@ -28,6 +28,7 @@ Select:focus {
|
|
|
28
28
|
Toast.-inprogress {
|
|
29
29
|
background: $primary;
|
|
30
30
|
color: $text;
|
|
31
|
+
border-left: tall $primary;
|
|
31
32
|
}
|
|
32
33
|
|
|
33
34
|
/* --- Base Dialog & Container Styles --- */
|
|
@@ -635,3 +636,48 @@ NetworkListItem { height: auto; margin-bottom: 0; }
|
|
|
635
636
|
.net-mode { width: 15%; }
|
|
636
637
|
#net-active-check { width: 25%; }
|
|
637
638
|
#net-autostart-check { width: 25%; }
|
|
639
|
+
|
|
640
|
+
/* Host Dashboard Styles */
|
|
641
|
+
#host-dashboard-dialog {
|
|
642
|
+
width: 80;
|
|
643
|
+
height: auto;
|
|
644
|
+
border: round $primary;
|
|
645
|
+
background: $surface;
|
|
646
|
+
padding: 1 2;
|
|
647
|
+
}
|
|
648
|
+
|
|
649
|
+
.info-section-dashboard {
|
|
650
|
+
background: $panel;
|
|
651
|
+
margin-bottom: 1;
|
|
652
|
+
border: round $primary;
|
|
653
|
+
padding: 0 1;
|
|
654
|
+
height: auto;
|
|
655
|
+
}
|
|
656
|
+
|
|
657
|
+
.section-title-dashboard {
|
|
658
|
+
text-style: bold;
|
|
659
|
+
background: $primary;
|
|
660
|
+
color: $text;
|
|
661
|
+
width: 100%;
|
|
662
|
+
margin-bottom: 1;
|
|
663
|
+
}
|
|
664
|
+
|
|
665
|
+
.host-info-grid {
|
|
666
|
+
grid-size-columns: 2;
|
|
667
|
+
grid-gutter: 1;
|
|
668
|
+
height: auto;
|
|
669
|
+
}
|
|
670
|
+
|
|
671
|
+
.usage-row {
|
|
672
|
+
height: 3;
|
|
673
|
+
align: center middle;
|
|
674
|
+
}
|
|
675
|
+
|
|
676
|
+
.usage-row Label {
|
|
677
|
+
width: auto;
|
|
678
|
+
margin-right: 2;
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
#mem-bar, #cpu-alloc-bar, #mem-alloc-bar {
|
|
682
|
+
width: 1fr;
|
|
683
|
+
}
|