virtui-manager 1.1.6__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. {virtui_manager-1.1.6.dist-info → virtui_manager-1.3.0.dist-info}/METADATA +1 -1
  2. virtui_manager-1.3.0.dist-info/RECORD +73 -0
  3. vmanager/constants.py +737 -108
  4. vmanager/dialog.css +24 -0
  5. vmanager/firmware_manager.py +4 -1
  6. vmanager/i18n.py +32 -0
  7. vmanager/libvirt_utils.py +132 -3
  8. vmanager/locales/de/LC_MESSAGES/virtui-manager.po +3012 -0
  9. vmanager/locales/fr/LC_MESSAGES/virtui-manager.mo +0 -0
  10. vmanager/locales/fr/LC_MESSAGES/virtui-manager.po +3124 -0
  11. vmanager/locales/it/LC_MESSAGES/virtui-manager.po +3012 -0
  12. vmanager/locales/virtui-manager.pot +3012 -0
  13. vmanager/modals/bulk_modals.py +13 -12
  14. vmanager/modals/cache_stats_modal.py +6 -5
  15. vmanager/modals/capabilities_modal.py +133 -0
  16. vmanager/modals/config_modal.py +25 -24
  17. vmanager/modals/cpu_mem_pc_modals.py +22 -21
  18. vmanager/modals/custom_migration_modal.py +10 -9
  19. vmanager/modals/disk_pool_modals.py +60 -59
  20. vmanager/modals/host_dashboard_modal.py +137 -0
  21. vmanager/modals/howto_disk_modal.py +2 -1
  22. vmanager/modals/howto_network_modal.py +2 -1
  23. vmanager/modals/howto_overlay_modal.py +2 -1
  24. vmanager/modals/howto_ssh_modal.py +2 -1
  25. vmanager/modals/howto_virtiofs_modal.py +2 -1
  26. vmanager/modals/input_modals.py +11 -10
  27. vmanager/modals/log_modal.py +2 -1
  28. vmanager/modals/migration_modals.py +20 -18
  29. vmanager/modals/network_modals.py +45 -36
  30. vmanager/modals/provisioning_modals.py +56 -56
  31. vmanager/modals/select_server_modals.py +8 -7
  32. vmanager/modals/selection_modals.py +7 -6
  33. vmanager/modals/server_modals.py +24 -23
  34. vmanager/modals/server_prefs_modals.py +78 -71
  35. vmanager/modals/utils_modals.py +10 -9
  36. vmanager/modals/virsh_modals.py +3 -2
  37. vmanager/modals/virtiofs_modals.py +6 -5
  38. vmanager/modals/vm_type_info_modal.py +2 -1
  39. vmanager/modals/vmanager_modals.py +19 -19
  40. vmanager/modals/vmcard_dialog.py +57 -57
  41. vmanager/modals/vmdetails_modals.py +115 -123
  42. vmanager/modals/xml_modals.py +3 -2
  43. vmanager/network_manager.py +4 -1
  44. vmanager/storage_manager.py +157 -39
  45. vmanager/utils.py +39 -6
  46. vmanager/vm_actions.py +28 -24
  47. vmanager/vm_queries.py +67 -25
  48. vmanager/vm_service.py +8 -5
  49. vmanager/vmanager.css +46 -0
  50. vmanager/vmanager.py +178 -112
  51. vmanager/vmcard.py +161 -159
  52. vmanager/webconsole_manager.py +21 -21
  53. virtui_manager-1.1.6.dist-info/RECORD +0 -65
  54. {virtui_manager-1.1.6.dist-info → virtui_manager-1.3.0.dist-info}/WHEEL +0 -0
  55. {virtui_manager-1.1.6.dist-info → virtui_manager-1.3.0.dist-info}/entry_points.txt +0 -0
  56. {virtui_manager-1.1.6.dist-info → virtui_manager-1.3.0.dist-info}/licenses/LICENSE +0 -0
  57. {virtui_manager-1.1.6.dist-info → virtui_manager-1.3.0.dist-info}/top_level.txt +0 -0
@@ -6,6 +6,7 @@ from textual.widgets import Button, TextArea
6
6
  from textual.widgets.text_area import LanguageDoesNotExist
7
7
  from textual.containers import Vertical, Horizontal
8
8
  from .base_modals import BaseModal
9
+ from ..constants import ButtonLabels
9
10
 
10
11
  class XMLDisplayModal(BaseModal[str | None]):
11
12
  """A modal screen for displaying and editing XML."""
@@ -32,8 +33,8 @@ class XMLDisplayModal(BaseModal[str | None]):
32
33
  with Vertical(id="dialog-buttons"):
33
34
  with Horizontal():
34
35
  if not self.read_only:
35
- yield Button("Save", variant="primary", id="save-btn")
36
- yield Button("Close", id="close-btn")
36
+ yield Button(ButtonLabels.SAVE, variant="primary", id="save-btn")
37
+ yield Button(ButtonLabels.CLOSE, id="close-btn")
37
38
 
38
39
  def on_mount(self) -> None:
39
40
  self.query_one(TextArea).focus()
@@ -9,6 +9,7 @@ import xml.etree.ElementTree as ET
9
9
  from functools import lru_cache
10
10
  import libvirt
11
11
  from .utils import log_function_call
12
+ from .libvirt_utils import get_host_domain_capabilities
12
13
 
13
14
 
14
15
  @lru_cache(maxsize=16)
@@ -246,7 +247,9 @@ def get_host_network_info(conn: libvirt.virConnect):
246
247
  """
247
248
  networks = []
248
249
  try:
249
- caps_xml = conn.getCapabilities()
250
+ caps_xml = get_host_domain_capabilities(conn)
251
+ if not caps_xml:
252
+ return networks
250
253
  root = ET.fromstring(caps_xml)
251
254
  for interface in root.findall(".//interface"):
252
255
  ip_elem = interface.find("ip")
@@ -16,6 +16,119 @@ from .libvirt_utils import (
16
16
  )
17
17
  from .vm_queries import get_vm_disks_info
18
18
 
19
+
20
+ def _safe_is_pool_active(pool: libvirt.virStoragePool) -> bool:
21
+ """
22
+ Safely check if a storage pool is active without blocking the UI.
23
+ Returns False if the check fails or times out.
24
+ """
25
+ try:
26
+ return pool.isActive()
27
+ except libvirt.libvirtError as e:
28
+ logging.debug(f"Failed to check if pool '{pool.name()}' is active: {e}")
29
+ return False
30
+ except Exception as e:
31
+ logging.debug(f"Unexpected error checking pool '{pool.name()}' status: {e}")
32
+ return False
33
+
34
+
35
+ def _ensure_pool_active(pool: libvirt.virStoragePool) -> bool:
36
+ """
37
+ Ensure a storage pool is active. If not active, try to activate it.
38
+ Returns True if pool is active (or was successfully activated), False otherwise.
39
+ """
40
+ if _safe_is_pool_active(pool):
41
+ return True
42
+
43
+ try:
44
+ logging.info(f"Pool '{pool.name()}' is not active, attempting to activate...")
45
+ pool.create(0)
46
+ return True
47
+ except libvirt.libvirtError as e:
48
+ logging.error(f"Failed to activate pool '{pool.name()}': {e}")
49
+ return False
50
+ except Exception as e:
51
+ logging.error(f"Unexpected error activating pool '{pool.name()}': {e}")
52
+ return False
53
+
54
+
55
+ def _safe_get_pool_info(pool: libvirt.virStoragePool) -> tuple:
56
+ """
57
+ Safely get pool info without blocking the UI.
58
+ Returns (capacity, allocation, available) or (0, 0, 0) on failure.
59
+ """
60
+ try:
61
+ info = pool.info()
62
+ return info[1], info[2], info[3] # capacity, allocation, available
63
+ except libvirt.libvirtError as e:
64
+ logging.debug(f"Failed to get info for pool '{pool.name()}': {e}")
65
+ return 0, 0, 0
66
+ except Exception as e:
67
+ logging.debug(f"Unexpected error getting info for pool '{pool.name()}': {e}")
68
+ return 0, 0, 0
69
+
70
+
71
+ def _safe_get_pool_autostart(pool: libvirt.virStoragePool) -> bool:
72
+ """
73
+ Safely get pool autostart setting without blocking the UI.
74
+ Returns False on failure.
75
+ """
76
+ try:
77
+ return pool.autostart() == 1
78
+ except libvirt.libvirtError as e:
79
+ logging.debug(f"Failed to get autostart for pool '{pool.name()}': {e}")
80
+ return False
81
+ except Exception as e:
82
+ logging.debug(f"Unexpected error getting autostart for pool '{pool.name()}': {e}")
83
+ return False
84
+
85
+
86
+ def _safe_refresh_pool(pool: libvirt.virStoragePool) -> bool:
87
+ """
88
+ Safely refresh a storage pool without blocking the UI.
89
+ Returns True on success, False on failure.
90
+ """
91
+ try:
92
+ pool.refresh(0)
93
+ return True
94
+ except libvirt.libvirtError as e:
95
+ logging.debug(f"Failed to refresh pool '{pool.name()}': {e}")
96
+ return False
97
+ except Exception as e:
98
+ logging.debug(f"Unexpected error refreshing pool '{pool.name()}': {e}")
99
+ return False
100
+
101
+
102
+ def _safe_get_volume_info(vol: libvirt.virStorageVol) -> tuple:
103
+ """
104
+ Safely get volume info without blocking the UI.
105
+ Returns (type, capacity, allocation) or (0, 0, 0) on failure.
106
+ """
107
+ try:
108
+ vol_type, capacity, allocation = _safe_get_volume_info(vol)
109
+ return vol_type, capacity, allocation
110
+ except libvirt.libvirtError as e:
111
+ logging.debug(f"Failed to get info for volume '{vol.name()}': {e}")
112
+ return 0, 0, 0
113
+ except Exception as e:
114
+ logging.debug(f"Unexpected error getting info for volume '{vol.name()}': {e}")
115
+ return 0, 0, 0
116
+
117
+
118
+ def _safe_get_volume_path(vol: libvirt.virStorageVol) -> str:
119
+ """
120
+ Safely get volume path without blocking the UI.
121
+ Returns empty string on failure.
122
+ """
123
+ try:
124
+ return vol.path()
125
+ except libvirt.libvirtError as e:
126
+ logging.debug(f"Failed to get path for volume '{vol.name()}': {e}")
127
+ return ""
128
+ except Exception as e:
129
+ logging.debug(f"Unexpected error getting path for volume '{vol.name()}': {e}")
130
+ return ""
131
+
19
132
  @lru_cache(maxsize=16)
20
133
  def list_storage_pools(conn: libvirt.virConnect) -> List[Dict[str, Any]]:
21
134
  """
@@ -35,15 +148,17 @@ def list_storage_pools(conn: libvirt.virConnect) -> List[Dict[str, Any]]:
35
148
  except libvirt.libvirtError:
36
149
  name = "Unknown Pool"
37
150
 
38
- is_active = pool.isActive()
39
- info = pool.info()
151
+ is_active = _safe_is_pool_active(pool)
152
+ capacity, allocation, _ = _safe_get_pool_info(pool)
153
+ autostart = _safe_get_pool_autostart(pool)
154
+
40
155
  pools_info.append({
41
156
  'name': name,
42
157
  'pool': pool,
43
158
  'status': 'active' if is_active else 'inactive',
44
- 'autostart': pool.autostart() == 1,
45
- 'capacity': info[1],
46
- 'allocation': info[2],
159
+ 'autostart': autostart,
160
+ 'capacity': capacity,
161
+ 'allocation': allocation,
47
162
  })
48
163
  except libvirt.libvirtError as e:
49
164
  # If we fail to get details (e.g. NFS down), still list the pool but as unavailable
@@ -74,7 +189,7 @@ def list_storage_volumes(pool: libvirt.virStoragePool) -> List[Dict[str, Any]]:
74
189
  Lists all storage volumes in a given pool.
75
190
  """
76
191
  volumes_info = []
77
- if not pool or not pool.isActive():
192
+ if not pool or not _safe_is_pool_active(pool):
78
193
  return volumes_info
79
194
 
80
195
  try:
@@ -82,13 +197,13 @@ def list_storage_volumes(pool: libvirt.virStoragePool) -> List[Dict[str, Any]]:
82
197
  for name in vol_names:
83
198
  try:
84
199
  vol = pool.storageVolLookupByName(name)
85
- info = vol.info() # type, capacity, allocation
200
+ vol_type, capacity, allocation = _safe_get_volume_info(vol)
86
201
  volumes_info.append({
87
202
  'name': name,
88
203
  'volume': vol,
89
- 'type': info[0],
90
- 'capacity': info[1],
91
- 'allocation': info[2],
204
+ 'type': vol_type,
205
+ 'capacity': capacity,
206
+ 'allocation': allocation,
92
207
  })
93
208
  except libvirt.libvirtError:
94
209
  continue
@@ -178,8 +293,8 @@ def create_volume(pool: libvirt.virStoragePool, name: str, size_gb: int, vol_for
178
293
  """
179
294
  Creates a new storage volume in a pool.
180
295
  """
181
- if not pool.isActive():
182
- msg = f"Pool '{pool.name()}' is not active."
296
+ if not _ensure_pool_active(pool):
297
+ msg = f"Pool '{pool.name()}' is not active and could not be activated."
183
298
  logging.error(msg)
184
299
  raise Exception(msg)
185
300
 
@@ -252,8 +367,8 @@ def attach_volume(pool: libvirt.virStoragePool, name: str, path: str, vol_format
252
367
  """
253
368
  Attaches an existing file as a storage volume in a pool.
254
369
  """
255
- if not pool.isActive():
256
- msg = f"Pool '{pool.name()}' is not active."
370
+ if not _ensure_pool_active(pool):
371
+ msg = f"Pool '{pool.name()}' is not active and could not be activated."
257
372
  logging.error(msg)
258
373
  raise Exception(msg)
259
374
 
@@ -309,7 +424,7 @@ def attach_volume(pool: libvirt.virStoragePool, name: str, path: str, vol_format
309
424
 
310
425
  try:
311
426
  # Refresh the pool to make sure libvirt knows about the file if it was just copied.
312
- pool.refresh(0)
427
+ _safe_refresh_pool(pool)
313
428
  vol = pool.storageVolLookupByName(name)
314
429
  if vol:
315
430
  logging.warning(f"Volume '{name}' already exists in pool '{pool.name()}'. Not creating.")
@@ -320,7 +435,7 @@ def attach_volume(pool: libvirt.virStoragePool, name: str, path: str, vol_format
320
435
  try:
321
436
  pool.createXML(vol_xml, 0)
322
437
  # Refresh again after creating the volume from XML
323
- pool.refresh(0)
438
+ _safe_refresh_pool(pool)
324
439
  except libvirt.libvirtError as e:
325
440
  # If creation fails, attempt to clean up the copied file
326
441
  if pool_type == 'dir' and 'dest_path' in locals() and os.path.exists(dest_path):
@@ -336,8 +451,8 @@ def create_overlay_volume(pool: libvirt.virStoragePool, name: str, backing_vol_p
336
451
  Creates a qcow2 overlay volume backed by another volume (backing file).
337
452
  The new volume will record changes, while the backing file remains untouched.
338
453
  """
339
- if not pool.isActive():
340
- msg = f"Pool '{pool.name()}' is not active."
454
+ if not _ensure_pool_active(pool):
455
+ msg = f"Pool '{pool.name()}' is not active and could not be activated."
341
456
  logging.error(msg)
342
457
  raise Exception(msg)
343
458
 
@@ -347,7 +462,7 @@ def create_overlay_volume(pool: libvirt.virStoragePool, name: str, backing_vol_p
347
462
  if not backing_vol:
348
463
  raise Exception(f"Could not find backing volume for path '{backing_vol_path}' to determine capacity.")
349
464
 
350
- capacity = backing_vol.info()[1]
465
+ _, capacity, _ = _safe_get_volume_info(backing_vol)
351
466
 
352
467
  vol_xml = f"""
353
468
  <volume>
@@ -418,7 +533,7 @@ def find_vms_using_volume(conn: libvirt.virConnect, vol_path: str, vol_name: str
418
533
  try:
419
534
  p = conn.storagePoolLookupByName(pool_name)
420
535
  v = p.storageVolLookupByName(volume_name_from_xml)
421
- if v.path() == vol_path:
536
+ if _safe_get_volume_path(v) == vol_path:
422
537
  vms_using_volume.append(domain)
423
538
  break # Found it, move to the next domain
424
539
  except libvirt.libvirtError:
@@ -486,11 +601,11 @@ def move_volume(conn: libvirt.virConnect, source_pool_name: str, dest_pool_name:
486
601
  source_vol = source_pool.storageVolLookupByName(volume_name)
487
602
 
488
603
  # Check for available space before starting the move
489
- source_info = source_vol.info()
490
- source_capacity = source_info[1] # in bytes
604
+ _, source_capacity, _ = _safe_get_volume_info(source_vol) # in bytes
491
605
 
492
606
  # Check if the volume is in use by any running VMs before starting the move
493
- vms_using_volume = find_vms_using_volume(conn, source_vol.path(), source_vol.name())
607
+ source_path = _safe_get_volume_path(source_vol)
608
+ vms_using_volume = find_vms_using_volume(conn, source_path, source_vol.name())
494
609
  running_vms = [vm.name() for vm in vms_using_volume if vm.state()[0] == libvirt.VIR_DOMAIN_RUNNING]
495
610
 
496
611
  if running_vms:
@@ -501,8 +616,7 @@ def move_volume(conn: libvirt.virConnect, source_pool_name: str, dest_pool_name:
501
616
  if vms_using_volume:
502
617
  log_and_callback(f"Volume is used by offline VM(s):\n{[vm.name() for vm in vms_using_volume]}.\nTheir configuration will be updated after the move.\nWait Until the process is finished (can take a lot of time).")
503
618
 
504
- source_info = source_vol.info()
505
- source_capacity = source_info[1]
619
+ _, source_capacity, _ = _safe_get_volume_info(source_vol)
506
620
  source_format = "qcow2" # Default
507
621
  try:
508
622
  source_format = ET.fromstring(source_vol.XMLDesc(0)).findtext("target/format[@type]", "qcow2")
@@ -626,11 +740,11 @@ def move_volume(conn: libvirt.virConnect, source_pool_name: str, dest_pool_name:
626
740
 
627
741
  # Refresh destination pool to make the new volume visible
628
742
  log_and_callback(f"Refreshing destination pool '{dest_pool.name()}'...")
629
- dest_pool.refresh(0)
743
+ _safe_refresh_pool(dest_pool)
630
744
 
631
745
  # Update any VM configurations that use this volume
632
- old_path = source_vol.path()
633
- new_path = new_vol.path()
746
+ old_path = _safe_get_volume_path(source_vol)
747
+ new_path = _safe_get_volume_path(new_vol)
634
748
  old_pool_name = source_pool.name()
635
749
  new_pool_name = dest_pool.name()
636
750
 
@@ -673,7 +787,7 @@ def move_volume(conn: libvirt.virConnect, source_pool_name: str, dest_pool_name:
673
787
 
674
788
  # Refresh source pool to remove the old volume from listings
675
789
  log_and_callback(f"Refreshing source pool '{source_pool.name()}'...")
676
- source_pool.refresh(0)
790
+ _safe_refresh_pool(source_pool)
677
791
  log_and_callback("\nMove Finished, you can close this window")
678
792
 
679
793
  except Exception as e:
@@ -708,8 +822,12 @@ def delete_storage_pool(pool: libvirt.virStoragePool):
708
822
  """
709
823
  try:
710
824
  # If pool is active, destroy it first (make it inactive)
711
- if pool.isActive():
712
- pool.destroy()
825
+ if _safe_is_pool_active(pool):
826
+ try:
827
+ pool.destroy()
828
+ except libvirt.libvirtError as e:
829
+ logging.warning(f"Failed to destroy active pool '{pool.name()}': {e}")
830
+ # Continue with undefine even if destroy fails
713
831
  # Undefine the pool (delete it)
714
832
  pool.undefine()
715
833
  except libvirt.libvirtError as e:
@@ -729,7 +847,7 @@ def get_all_storage_volumes(conn: libvirt.virConnect) -> List[libvirt.virStorage
729
847
  pools_info = list_storage_pools(conn)
730
848
  for pool_info in pools_info:
731
849
  pool = pool_info['pool']
732
- if pool.isActive():
850
+ if _safe_is_pool_active(pool):
733
851
  try:
734
852
  all_volumes.extend(pool.listAllVolumes())
735
853
  except libvirt.libvirtError:
@@ -749,7 +867,7 @@ def list_unused_volumes(conn: libvirt.virConnect, pool_name: str = None) -> List
749
867
  if pool_name:
750
868
  try:
751
869
  pool = conn.storagePoolLookupByName(pool_name)
752
- if not pool.isActive():
870
+ if not _safe_is_pool_active(pool):
753
871
  return []
754
872
  all_volumes = pool.listAllVolumes()
755
873
  except libvirt.libvirtError:
@@ -795,7 +913,8 @@ def list_unused_volumes(conn: libvirt.virConnect, pool_name: str = None) -> List
795
913
 
796
914
  unused_volumes = []
797
915
  for vol in all_volumes:
798
- if vol.path() not in used_disk_paths:
916
+ vol_path = _safe_get_volume_path(vol)
917
+ if vol_path and vol_path not in used_disk_paths:
799
918
  unused_volumes.append(vol)
800
919
 
801
920
  return unused_volumes
@@ -916,8 +1035,7 @@ def copy_volume_across_hosts(source_conn: libvirt.virConnect, dest_conn: libvirt
916
1035
  log_and_callback(f"[red]ERROR:[/ ] Could not find source/destination resources: {e}")
917
1036
  raise
918
1037
 
919
- source_info = source_vol.info()
920
- source_capacity = source_info[1]
1038
+ _, source_capacity, _ = _safe_get_volume_info(source_vol)
921
1039
  source_format = "qcow2"
922
1040
  try:
923
1041
  source_format = ET.fromstring(source_vol.XMLDesc(0)).findtext("target/format[@type]", "qcow2")
@@ -1093,13 +1211,13 @@ def copy_volume_across_hosts(source_conn: libvirt.virConnect, dest_conn: libvirt
1093
1211
  if upload_error: raise upload_error
1094
1212
 
1095
1213
  log_and_callback("Transfer complete.")
1096
- dest_pool.refresh(0)
1214
+ _safe_refresh_pool(dest_pool)
1097
1215
 
1098
1216
  return {
1099
- "old_disk_path": source_vol.path(),
1217
+ "old_disk_path": _safe_get_volume_path(source_vol),
1100
1218
  "new_pool_name": dest_pool.name(),
1101
1219
  "new_volume_name": dest_vol.name(),
1102
- "new_disk_path": dest_vol.path(),
1220
+ "new_disk_path": _safe_get_volume_path(dest_vol),
1103
1221
  }
1104
1222
 
1105
1223
  except Exception as e:
vmanager/utils.py CHANGED
@@ -428,9 +428,9 @@ class CacheMonitor:
428
428
  def log_stats(self) -> None:
429
429
  """Log cache statistics."""
430
430
  stats = self.get_all_stats()
431
- logging.info("=== Cache Statistics ===")
431
+ logging.debug("=== Cache Statistics ===")
432
432
  for name, data in stats.items():
433
- logging.info(
433
+ logging.debug(
434
434
  f"{name}: {data['hit_rate']:.1f}% hit rate "
435
435
  f"({data['hits']} hits, {data['misses']} misses, "
436
436
  f"{data['current_size']}/{data['max_size']} entries)"
@@ -482,10 +482,10 @@ def setup_cache_monitoring(enable: bool = True):
482
482
  cache_monitor = CacheMonitor()
483
483
  cache_monitor.tracked_functions.clear()
484
484
  if not enable:
485
- logging.info("Cache monitoring disabled.")
485
+ logging.debug("Cache monitoring disabled.")
486
486
  return
487
487
 
488
- logging.info("Cache monitoring enabled.")
488
+ logging.debug("Cache monitoring enabled.")
489
489
  cache_monitor.track(format_server_names)
490
490
  cache_monitor.track(extract_server_name_from_uri)
491
491
  cache_monitor.track(get_server_color_cached)
@@ -543,5 +543,38 @@ def setup_cache_monitoring(enable: bool = True):
543
543
 
544
544
  return cache_monitor
545
545
 
546
- # Disabled by default
547
- #setup_cache_monitoring(enable=False)
546
+
547
+ def setup_logging():
548
+ """Configures the logging for the application."""
549
+ from .config import load_config, get_log_path
550
+ config = load_config()
551
+ log_level_str = config.get("LOG_LEVEL", "INFO").upper()
552
+ log_level = getattr(logging, log_level_str, logging.INFO)
553
+ log_path = get_log_path()
554
+
555
+ # Ensure directory exists
556
+ log_path.parent.mkdir(parents=True, exist_ok=True)
557
+ root_logger = logging.getLogger()
558
+
559
+ for handler in root_logger.handlers[:]:
560
+ if isinstance(handler, logging.StreamHandler) and not isinstance(handler, logging.FileHandler):
561
+ root_logger.removeHandler(handler)
562
+
563
+ # Check if we already added a FileHandler to this path
564
+ has_file_handler = False
565
+ for handler in root_logger.handlers:
566
+ if isinstance(handler, logging.FileHandler) and handler.baseFilename == str(log_path.absolute()):
567
+ has_file_handler = True
568
+ break
569
+
570
+ if not has_file_handler:
571
+ file_handler = logging.FileHandler(log_path)
572
+ file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
573
+ root_logger.addHandler(file_handler)
574
+
575
+ root_logger.setLevel(log_level)
576
+
577
+ if not has_file_handler:
578
+ logging.info("--- Logging initialized ---")
579
+
580
+ setup_cache_monitoring(enable=False)
vmanager/vm_actions.py CHANGED
@@ -13,7 +13,8 @@ from .libvirt_utils import (
13
13
  _get_disabled_disks_elem,
14
14
  _get_backing_chain_elem,
15
15
  get_overlay_backing_path,
16
- get_internal_id
16
+ get_internal_id,
17
+ get_host_domain_capabilities
17
18
  )
18
19
  from .utils import log_function_call
19
20
  from .vm_queries import get_vm_disks_info, get_vm_tpm_info, _get_domain_root, get_vm_snapshots
@@ -2186,30 +2187,33 @@ def check_server_migration_compatibility(source_conn: libvirt.virConnect, dest_c
2186
2187
  source_tpm_info = get_vm_tpm_info(source_root)
2187
2188
  if source_tpm_info:
2188
2189
  try:
2189
- dest_caps_xml = dest_conn.getCapabilities()
2190
- dest_caps_root = ET.fromstring(dest_caps_xml)
2191
-
2192
- # Check if destination host supports TPM devices at all
2193
- if not dest_caps_root.find(".//devices/tpm"):
2194
- issues.append({
2195
- 'severity': 'ERROR',
2196
- 'message': f"Source VM '{domain_name}' uses TPM, but destination host '{dest_conn.getURI()}' does not appear to support TPM devices."
2197
- })
2190
+ dest_caps_xml = get_host_domain_capabilities(dest_conn)
2191
+ if dest_caps_xml:
2192
+ dest_caps_root = ET.fromstring(dest_caps_xml)
2193
+
2194
+ # Check if destination host supports TPM devices at all
2195
+ if not dest_caps_root.find(".//devices/tpm"):
2196
+ issues.append({
2197
+ 'severity': 'ERROR',
2198
+ 'message': f"Source VM '{domain_name}' uses TPM, but destination host '{dest_conn.getURI()}' does not appear to support TPM devices."
2199
+ })
2200
+ else:
2201
+ for tpm_dev in source_tpm_info:
2202
+ if tpm_dev['type'] == 'passthrough':
2203
+ # More specific check for passthrough TPM
2204
+ issues.append({
2205
+ 'severity': 'WARNING',
2206
+ 'message': f"Source VM '{domain_name}' uses passthrough TPM ({tpm_dev['model']}). Passthrough TPM migration is often problematic due to hardware dependencies. Manual verification on destination host '{dest_conn.getURI()}' recommended."
2207
+ })
2208
+ elif tpm_dev['type'] == 'emulated' and is_live:
2209
+ # Emulated TPM should generally be fine for cold migration.
2210
+ # Live migration of emulated TPM might be tricky.
2211
+ issues.append({
2212
+ 'severity': 'WARNING',
2213
+ 'message': f"Source VM '{domain_name}' uses emulated TPM. Live migration with TPM can sometimes have issues; cold migration is safer."
2214
+ })
2198
2215
  else:
2199
- for tpm_dev in source_tpm_info:
2200
- if tpm_dev['type'] == 'passthrough':
2201
- # More specific check for passthrough TPM
2202
- issues.append({
2203
- 'severity': 'WARNING',
2204
- 'message': f"Source VM '{domain_name}' uses passthrough TPM ({tpm_dev['model']}). Passthrough TPM migration is often problematic due to hardware dependencies. Manual verification on destination host '{dest_conn.getURI()}' recommended."
2205
- })
2206
- elif tpm_dev['type'] == 'emulated' and is_live:
2207
- # Emulated TPM should generally be fine for cold migration.
2208
- # Live migration of emulated TPM might be tricky.
2209
- issues.append({
2210
- 'severity': 'WARNING',
2211
- 'message': f"Source VM '{domain_name}' uses emulated TPM. Live migration with TPM can sometimes have issues; cold migration is safer."
2212
- })
2216
+ issues.append({'severity': 'WARNING', 'message': f"Could not retrieve destination host capabilities for TPM check."})
2213
2217
 
2214
2218
  except libvirt.libvirtError as e:
2215
2219
  issues.append({'severity': 'WARNING', 'message': f"Could not retrieve destination host capabilities for TPM check: {e}"})