atex 0.8__py3-none-any.whl → 0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,465 @@
1
+ import re
2
+ import time
3
+ import uuid
4
+ import shlex
5
+ import socket
6
+ import random
7
+ import tempfile
8
+ import threading
9
+ import subprocess
10
+ import urllib.parse
11
+ import xml.etree.ElementTree as ET
12
+ from pathlib import Path
13
+
14
+ from ... import connection, util
15
+ from .. import Provisioner, Remote
16
+ from . import locking
17
+
18
+ libvirt = util.import_libvirt()
19
+
20
+ # thread-safe bool
21
+ libvirt_needs_setup = threading.Semaphore(1)
22
+
23
+
24
+ def setup_event_loop():
25
+ if not libvirt_needs_setup.acquire(blocking=False):
26
+ return
27
+
28
+ # register and run default even loop
29
+ libvirt.virEventRegisterDefaultImpl()
30
+
31
+ def loop():
32
+ while True:
33
+ time.sleep(0.5)
34
+ libvirt.virEventRunDefaultImpl()
35
+
36
+ util.debug("starting libvirt event loop")
37
+ thread = threading.Thread(target=loop, name="libvirt_event_loop", daemon=True)
38
+ thread.start()
39
+
40
+
41
+ class LibvirtCloningRemote(Remote, connection.ssh.ManagedSSHConn):
42
+ """
43
+ TODO
44
+ """
45
+
46
+ def __init__(self, ssh_options, host, domain, source_image, *, release_hook):
47
+ """
48
+ 'ssh_options' are a dict, passed to ManagedSSHConn __init__().
49
+
50
+ 'host' is a str of libvirt host name (used for repr()).
51
+
52
+ 'domain' is a str of libvirt domain name (used for repr()).
53
+
54
+ 'source_image' is a str of libvirt volume name that was cloned
55
+ for the domain to boot from (used for repr()).
56
+
57
+ 'release_hook' is a callable called on .release() in addition
58
+ to disconnecting the connection.
59
+ """
60
+ # NOTE: self.lock inherited from ManagedSSHConn
61
+ super().__init__(options=ssh_options)
62
+ self.host = host
63
+ self.domain = domain
64
+ self.source_image = source_image
65
+ self.release_called = False
66
+ self.release_hook = release_hook
67
+
68
+ def release(self):
69
+ with self.lock:
70
+ if self.release_called:
71
+ return
72
+ else:
73
+ self.release_called = True
74
+ self.release_hook(self)
75
+ self.disconnect()
76
+
77
+ # not /technically/ a valid repr(), but meh
78
+ def __repr__(self):
79
+ class_name = self.__class__.__name__
80
+ return f"{class_name}({self.host}, {self.domain}, {self.source_image})"
81
+
82
+
83
+ # needs ManagedSSHConn due to .forward()
84
+ def reliable_ssh_local_fwd(conn, dest, retries=10):
85
+ for _ in range(retries):
86
+ # let the kernel give us a free port
87
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
88
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
89
+ s.bind(("127.0.0.1", 0))
90
+ port = s.getsockname()[1]
91
+ # and try to quickly use it for forwarding
92
+ try:
93
+ conn.forward("LocalForward", f"127.0.0.1:{port} {dest}")
94
+ return port
95
+ except subprocess.CalledProcessError:
96
+ pass
97
+ raise ConnectionError("could not add LocalForward / find a free port")
98
+
99
+
100
+ class LibvirtCloningProvisioner(Provisioner):
101
+ """
102
+ Provisioning done via pre-created libvirt domains on the libvirt VM host,
103
+ which are left (largely) untouched, except for their disk images, which are
104
+ swapped in by a fresh clones of a user-specified image name.
105
+ (This image name is presumably a fresh OS install made by 3rd party logic.)
106
+
107
+ This allows concurrent access by multiple users (no domains are created
108
+ or removed, just taken/released) and fast provisioning times (volume cloning
109
+ is much faster than Anaconda installs).
110
+
111
+ Access to the libvirt host is via ssh, but the remote user does not need to
112
+ have shell access, only TCP forwarding and libvirt socket access, ie.
113
+
114
+ Match User libvirtuser
115
+ AllowTcpForwarding yes
116
+ ForceCommand /usr/bin/virt-ssh-helper qemu:///system
117
+ #ForceCommand /usr/bin/nc -U /var/run/libvirt/libvirt-sock # older
118
+
119
+ Note that eligible domains must also have a pre-existing disk image defined
120
+ as a volume (<disk type='volume' ...>) NOT as path (<disk type='path' ...>)
121
+ since only a volume has a pool association that can be matched up with the
122
+ would-be-cloned image name.
123
+ """
124
+
125
+ def __init__(
126
+ self, host, image, *, pool="default", domain_filter=".*",
127
+ domain_user="root", domain_sshkey,
128
+ reserve_delay=3, reserve_time=3600, start_event_loop=True,
129
+ ):
130
+ """
131
+ 'host' is a ManagedSSHConn class instance, connected to a libvirt host.
132
+
133
+ 'image' is a string with a libvirt storage volume name inside the
134
+ given storage 'pool' that should be used as the source for cloning.
135
+
136
+ 'pool' is a libvirt storage pool used by all relevant domains on the
137
+ libvirt host **as well as** the would-be-cloned images.
138
+
139
+ 'domain_filter' is a regex string matching libvirt domain names to
140
+ attempt reservation on. Useful for including only ie. 'auto-.*' domains
141
+ while leaving other domains on the same libvirt host untouched.
142
+
143
+ 'domain_user' and 'domain_sshkey' (strings) specify how to connect to
144
+ an OS booted from the pre-instaled 'image', as these credentials are
145
+ known only to the logic that created the 'image' in the first place.
146
+
147
+ 'reserve_delay' is an int of how many seconds to wait between trying to
148
+ lock libvirt domains, after every unsuccessful locking attempt.
149
+ Ie. with delay=5 and 20 domains, the code will try to lock every domain
150
+ in 5*20=100 seconds before looping back to the first.
151
+
152
+ 'reserve_time' is an int of maximum seconds to reserve a libvirt domain
153
+ for before other users can steal it for themselves. Note that there is
154
+ no automatic timeout release logic, it's just a hint for others.
155
+
156
+ 'start_event_loop' set to True starts a global default libvirt event
157
+ loop as part of .start() (or context manager enter) in a background
158
+ daemon thread.
159
+ This is necessary to maintain connection keep-alives, but if you plan
160
+ on managing the loop yourself (have custom uses for the libvirt module),
161
+ setting False here avoids any meddling by this class.
162
+ """
163
+ self.lock = threading.RLock()
164
+ self.host = host
165
+ self.image = image
166
+ self.pool = pool
167
+ self.domain_filter = domain_filter
168
+ self.domain_user = domain_user
169
+ self.domain_sshkey = domain_sshkey
170
+ self.reserve_delay = reserve_delay
171
+ self.reserve_time = reserve_time
172
+ self.start_event_loop = start_event_loop
173
+
174
+ self.signature = uuid.uuid4()
175
+ self.reserve_end = None
176
+ self.queue = util.ThreadQueue(daemon=True)
177
+
178
+ # use two libvirt connections - one to handle reservations and cloning,
179
+ # and another for management and cleanup;
180
+ # the idea is to neuter the reserving thread on exit simply by closing
181
+ # its connection, so we can run cleanup from the other one
182
+ self.reserve_conn = None
183
+ self.manage_conn = None
184
+
185
+ # domain names we successfully locked, but which are still in the
186
+ # process of being set up (image cloning, OS booting, waiting for ssh
187
+ # etc.)
188
+ self.reserving = set()
189
+
190
+ # all active Remotes we managed to reserve and return to the user
191
+ self.remotes = []
192
+
193
+ def _reserve_one(self):
194
+ with self.lock:
195
+ conn = self.reserve_conn
196
+
197
+ # find the to-be-cloned image in the specified pool
198
+ pool = conn.storagePoolLookupByName(self.pool)
199
+ source_vol = pool.storageVolLookupByName(self.image)
200
+
201
+ # find the to-be-cloned image format
202
+ xml_root = ET.fromstring(source_vol.XMLDesc())
203
+ source_format = xml_root.find("target").find("format").get("type")
204
+
205
+ util.debug(
206
+ f"found volume {source_vol.name()} (format:{source_format}) in pool {pool.name()}",
207
+ )
208
+
209
+ # translate domain names to virDomain objects
210
+ with self.lock:
211
+ already_reserving = self.reserving
212
+ already_reserving = {conn.lookupByName(name) for name in already_reserving}
213
+
214
+ # acquire (lock) a domain on the libvirt host
215
+ util.debug("attempting to acquire a domain")
216
+ acquired = None
217
+ while not acquired:
218
+ domains = []
219
+ for domain in conn.listAllDomains():
220
+ if not re.match(self.domain_filter, domain.name()):
221
+ continue
222
+ if domain in already_reserving:
223
+ continue
224
+ domains.append(domain)
225
+
226
+ random.shuffle(domains)
227
+ for domain in domains:
228
+ if locking.lock(domain, self.signature, self.reserve_end):
229
+ acquired = domain
230
+ util.debug(f"acquired domain {acquired.name()}")
231
+ break
232
+ time.sleep(self.reserve_delay)
233
+
234
+ with self.lock:
235
+ self.reserving.add(acquired.name())
236
+
237
+ # shutdown the domain so we can work with its volumes
238
+ try:
239
+ acquired.destroy()
240
+ except libvirt.libvirtError as e:
241
+ if "domain is not running" not in str(e):
242
+ raise
243
+
244
+ # parse XML definition of the domain
245
+ xmldesc = acquired.XMLDesc()
246
+ util.debug(f"domain {acquired.name()} XML:\n{xmldesc}") # TODO: EXTRADEBUG log level
247
+ xml_root = ET.fromstring(xmldesc)
248
+ nvram_vol = nvram_path = None
249
+
250
+ # if it looks like UEFI/SecureBoot, try to find its nvram image in
251
+ # any one of the storage pools and delete it, freeing any previous
252
+ # OS installation metadata
253
+ if (xml_os := xml_root.find("os")) is not None:
254
+ if (xml_nvram := xml_os.find("nvram")) is not None:
255
+ nvram_path = xml_nvram.text
256
+ if nvram_path:
257
+ # the file might be in any storage pool and is not refreshed
258
+ # by libvirt natively (because treating nvram as a storage pool
259
+ # is a user hack)
260
+ for p in conn.listAllStoragePools():
261
+ p.refresh()
262
+ try:
263
+ nvram_vol = conn.storageVolLookupByPath(nvram_path)
264
+ except libvirt.libvirtError as e:
265
+ if "Storage volume not found" not in str(e):
266
+ raise
267
+ if nvram_vol:
268
+ util.debug(f"deleting nvram volume {nvram_vol.name()}")
269
+ nvram_vol.delete()
270
+
271
+ # try to find a disk that is a volume in the specified storage pool
272
+ # that we could replace by cloning from the provided image
273
+ xml_devices = xml_root.find("devices")
274
+ if xml_devices is None:
275
+ raise RuntimeError(f"could not find <devices> for domain '{acquired.name()}'")
276
+
277
+ disk_vol_name = None
278
+ for xml_disk in xml_devices.findall("disk"):
279
+ if xml_disk.get("type") != "volume":
280
+ continue
281
+ xml_disk_source = xml_disk.find("source")
282
+ if xml_disk_source is None:
283
+ continue
284
+ if xml_disk_source.get("pool") != pool.name():
285
+ continue
286
+ disk_vol_name = xml_disk_source.get("volume")
287
+ util.debug(f"found a domain disk in XML: {disk_vol_name} for pool {pool.name()}")
288
+ break
289
+ else:
290
+ raise RuntimeError("could not find any <disk> in <devices>")
291
+
292
+ # clone the to-be-cloned image under the same name as the original
293
+ # domain volume
294
+ new_volume = util.dedent(fr"""
295
+ <volume>
296
+ <name>{disk_vol_name}</name>
297
+ <target>
298
+ <format type='{source_format}'/>
299
+ </target>
300
+ </volume>
301
+ """)
302
+ try:
303
+ disk_vol = pool.storageVolLookupByName(disk_vol_name)
304
+ disk_vol.delete()
305
+ except libvirt.libvirtError as e:
306
+ if "Storage volume not found" not in str(e):
307
+ raise
308
+ pool.createXMLFrom(new_volume, source_vol)
309
+
310
+ # start the domain up
311
+ util.debug(f"starting up {acquired.name()}")
312
+ acquired.create() # like 'virsh start' NOT 'virsh create'
313
+
314
+ # wait for an IP address leased by libvirt host
315
+ addrs = {}
316
+ while not addrs:
317
+ addrs = acquired.interfaceAddresses(
318
+ libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE,
319
+ )
320
+ time.sleep(1)
321
+ util.debug(f"found iface addrs: {addrs}")
322
+ first_iface = next(iter(addrs.values()))
323
+ first_addr = next(iter(first_iface.values()))[0]["addr"]
324
+
325
+ # set up ssh LocalForward to it
326
+ port = reliable_ssh_local_fwd(self.host, f"{first_addr}:22")
327
+
328
+ # create a remote and connect it
329
+ def release_hook(remote):
330
+ # un-forward the libvirt host ssh-forwarded port
331
+ self.host.forward("LocalForward", f"127.0.0.1:{port} {first_addr}:22", cancel=True)
332
+
333
+ # keep this entire block in a lock because the Provisioner can
334
+ # swap out self.manage_conn and close the previous one at any time,
335
+ # ie. between us reading self.manage_conn and using it
336
+ with self.lock:
337
+ # unlock the domain on the libvirt host
338
+ if self.manage_conn:
339
+ try:
340
+ domain = self.manage_conn.lookupByName(remote.domain)
341
+ locking.unlock(domain, self.signature)
342
+ except libvirt.libvirtError as e:
343
+ if "Domain not found" not in str(e):
344
+ raise
345
+ # remove from the list of remotes inside this Provisioner
346
+ try:
347
+ self.remotes.remove(remote)
348
+ except ValueError:
349
+ pass
350
+
351
+ ssh_options = {
352
+ "Hostname": "127.0.0.1",
353
+ "User": self.domain_user,
354
+ "Port": str(port),
355
+ "IdentityFile": self.domain_sshkey,
356
+ "ConnectionAttempts": "1000",
357
+ "Compression": "yes",
358
+ }
359
+ remote = LibvirtCloningRemote(
360
+ ssh_options=ssh_options,
361
+ host=self.host.options["Hostname"], # TODO: something more reliable?
362
+ domain=acquired.name(),
363
+ source_image=self.image,
364
+ release_hook=release_hook,
365
+ )
366
+ # LocalForward-ed connection is prone to failing with
367
+ # 'read: Connection reset by peer' instead of a timeout,
368
+ # so retry a few times
369
+ for _ in range(100):
370
+ try:
371
+ remote.connect()
372
+ break
373
+ except ConnectionError:
374
+ time.sleep(0.5)
375
+
376
+ with self.lock:
377
+ self.remotes.append(remote)
378
+ self.reserving.remove(acquired.name())
379
+
380
+ return remote
381
+
382
+ def _open_libvirt_conn(self):
383
+ # trick .cmd() to not run anything, but just return the ssh CLI
384
+ cli_args = self.host.cmd(
385
+ ("virt-ssh-helper", "qemu:///system"),
386
+ func=lambda *args, **_: args[0],
387
+ )
388
+ # to make libvirt connect via our ManagedSSHConn, we need to give it
389
+ # a specific ssh CLI, but libvirt URI command= takes only one argv[0]
390
+ # and cannot pass arguments - we work around this by creating a temp
391
+ # arg-less executable
392
+ with tempfile.NamedTemporaryFile("w+t", delete_on_close=False) as f:
393
+ f.write("#!/bin/bash\n")
394
+ f.write("exec ")
395
+ f.write(shlex.join(cli_args))
396
+ f.write("\n")
397
+ f.close()
398
+ name = Path(f.name)
399
+ name.chmod(0o0500) # r-x------
400
+ uri = f"qemu+ext:///system?command={urllib.parse.quote(str(name.absolute()))}"
401
+ util.debug(f"opening libvirt conn to {uri}")
402
+ conn = libvirt.open(uri)
403
+ conn.setKeepAlive(5, 3)
404
+ return conn
405
+
406
+ def start(self):
407
+ if self.start_event_loop:
408
+ setup_event_loop()
409
+ with self.lock:
410
+ self.reserve_conn = self._open_libvirt_conn()
411
+ self.manage_conn = self.reserve_conn # for now
412
+ self.reserve_end = int(time.time()) + self.reserve_time
413
+ # get an initial first remote
414
+ self.queue.start_thread(target=self._reserve_one)
415
+
416
+ def stop(self):
417
+ with self.lock:
418
+ #util.debug(f"SELF.RESERVING: {self.reserving} // SELF.REMOTES: {self.remotes}")
419
+ # close reserving libvirt host connection
420
+ # - this stops _reserve_one() from doing anything bad
421
+ if self.reserve_conn:
422
+ self.reserve_conn.close()
423
+ self.reserve_conn = None
424
+
425
+ # reopen managing connection here (because we closed reserve_conn)
426
+ # - note that we can't open this in .start() because libvirt conns
427
+ # can break on signals/interrupts, resulting in "Cannot recv data"
428
+ self.manage_conn = self._open_libvirt_conn()
429
+ # abort reservations in progress
430
+ while self.reserving:
431
+ try:
432
+ domain = self.manage_conn.lookupByName(self.reserving.pop())
433
+ locking.unlock(domain, self.signature)
434
+ except libvirt.libvirtError as e:
435
+ util.debug(f"GOT ERROR: {str(e)}")
436
+ pass
437
+ # cancel/release all Remotes ever created by us
438
+ while self.remotes:
439
+ self.remotes.pop().release()
440
+ self.manage_conn.close()
441
+ self.manage_conn = None
442
+
443
+ self.reserve_end = None
444
+ # TODO: wait for threadqueue threads to join?
445
+
446
+ def get_remote(self, block=True):
447
+ # if the reservation thread is not running, start one
448
+ with self.lock:
449
+ if not self.queue.threads:
450
+ self.queue.start_thread(target=self._reserve_one)
451
+ try:
452
+ return self.queue.get(block=block)
453
+ except util.ThreadQueue.Empty:
454
+ # always non-blocking
455
+ return None
456
+
457
+ # not /technically/ a valid repr(), but meh
458
+ def __repr__(self):
459
+ class_name = self.__class__.__name__
460
+ remotes = len(self.remotes)
461
+ host_name = self.host.options["Hostname"]
462
+ return (
463
+ f"{class_name}({host_name}, {self.domain_filter}, {self.signature}, "
464
+ f"{remotes} remotes, {hex(id(self))})"
465
+ )
@@ -0,0 +1,168 @@
1
+ """
2
+ Helpers for "reserving" (locking) libvirt domains using the <metadata> tag.
3
+
4
+ The idea is for each user to generate some "signature" (ie. UUIDv4) and
5
+ attempt to lock eligible domains on a libvirt host in a random order,
6
+ hopefully eventually succeeding (as others release the domain locks).
7
+
8
+ Lock safety is ensured by libvirt retaining <metadata> content (tag) order,
9
+ so each user can re-check (after locking) whether the race was won or lost,
10
+ depending on whether the user's signature is on top of <metadata>.
11
+
12
+ A timestamp (meaning now()+duration) is used as the lock tag value/text,
13
+ and any expired timestamps (now() > timestamp) are ignored by the locking
14
+ logic.
15
+ """
16
+
17
+ import re
18
+ import time
19
+ import random
20
+ import xml.etree.ElementTree as ET
21
+
22
+ import libvirt
23
+
24
+
25
+ def get_locks(domain, expired=False):
26
+ """
27
+ Yield (signature,timestamp) tuples of atex-lock entries for a 'domain'.
28
+
29
+ If 'expired' is True, yield also locks with an expired timestamp.
30
+
31
+ If a timestamp is missing, it is substituted with 0.
32
+ """
33
+ xml_dump = ET.fromstring(domain.XMLDesc(libvirt.VIR_DOMAIN_XML_INACTIVE))
34
+ metadata = xml_dump.find("metadata")
35
+ # no <metadata> - no locks possible
36
+ if metadata is None:
37
+ return
38
+ now = int(time.time())
39
+ for elem in metadata:
40
+ if match := re.fullmatch(r"{(.+)}atex-lock", elem.tag):
41
+ timestamp = int(elem.text) if elem.text else 0
42
+ if not expired and timestamp <= now:
43
+ continue
44
+ signature = match.group(1)
45
+ yield (signature, timestamp)
46
+
47
+
48
+ def unlock(domain, signature):
49
+ """
50
+ Unlock a domain previously locked by lock().
51
+ """
52
+ domain.setMetadata(
53
+ libvirt.VIR_DOMAIN_METADATA_ELEMENT,
54
+ None,
55
+ "atex-lock",
56
+ str(signature),
57
+ libvirt.VIR_DOMAIN_AFFECT_CONFIG,
58
+ )
59
+
60
+
61
+ def lock(domain, signature, timestamp):
62
+ """
63
+ Attempt to lock a domain under 'signature' ownership,
64
+ writing out 'timestamp' as the lock tag content.
65
+
66
+ Returns True if the domain was successfully locked, False otherwise.
67
+ """
68
+ signature = str(signature)
69
+ timestamp = int(timestamp)
70
+
71
+ # if there are any existing locks held on the domain
72
+ if any(get_locks(domain)):
73
+ return False
74
+
75
+ # try locking it
76
+ domain.setMetadata(
77
+ libvirt.VIR_DOMAIN_METADATA_ELEMENT,
78
+ f"<atex-lock>{timestamp}</atex-lock>",
79
+ "atex-lock",
80
+ signature,
81
+ libvirt.VIR_DOMAIN_AFFECT_CONFIG,
82
+ )
83
+
84
+ # get fresh XML and verify we won the race
85
+ try:
86
+ first = next(get_locks(domain))
87
+ except StopIteration:
88
+ raise RuntimeError(
89
+ "failed to verify lock signature, was timestamp already expired?",
90
+ ) from None
91
+
92
+ first_sig, first_stamp = first
93
+ if first_sig == signature and first_stamp == timestamp:
94
+ return True
95
+ else:
96
+ # we lost
97
+ unlock(domain, signature)
98
+ return False
99
+
100
+
101
+ def lock_any(connection, signature, duration, filter_domains=lambda _: True):
102
+ """
103
+ Given a libvirt 'connection', attempt to lock (reserve) any one
104
+ domain under 'signature' ownership for 'duration' seconds.
105
+
106
+ If 'filter_domain' is given as a callable, it is used to filter
107
+ domains considered for locking. It takes one argument (libvirt
108
+ domain object) and must return True (domain is eligible for locking)
109
+ or False (domain should be skipped).
110
+ For example: lambda dom: dom.name().startswith("foo-")
111
+
112
+ Returns a libvirt domain object of a successfully locked domain,
113
+ or None if no domain could be locked.
114
+ """
115
+ domains = connection.listAllDomains(libvirt.VIR_CONNECT_LIST_DOMAINS_PERSISTENT)
116
+ # try to avoid lock conflicts
117
+ random.shuffle(domains)
118
+
119
+ timestamp = int(time.time() + duration)
120
+ for domain in filter(filter_domains, domains):
121
+ if lock(domain, signature, timestamp):
122
+ return domain
123
+ return None
124
+
125
+
126
+ def unlock_all(connection, signature=None, shutdown=False, filter_domains=lambda _: True):
127
+ """
128
+ Remove all locks for all domains.
129
+
130
+ If 'signature' is given, remove only locks matching the signature.
131
+
132
+ If 'shutdown' is True, also forcibly shutdown (destroy) all domains.
133
+
134
+ If 'filter_domains' is given, it behaves like for lock_any().
135
+ """
136
+ domains = connection.listAllDomains(libvirt.VIR_CONNECT_LIST_DOMAINS_PERSISTENT)
137
+ for domain in filter(filter_domains, domains):
138
+ for lock, _ in get_locks(domain, expired=True):
139
+ if signature:
140
+ if str(signature) == lock:
141
+ unlock(domain, lock)
142
+ else:
143
+ unlock(domain, lock)
144
+ if shutdown:
145
+ domain.destroy()
146
+
147
+
148
+ def cleanup_expired(connection, timestamp=None, filter_domains=lambda _: True):
149
+ """
150
+ Clean up expired locks for all domains.
151
+
152
+ Useful when a client terminates without releasing the lock, which later
153
+ expires (making the domain available), but as no other user is responsible
154
+ for the given signature, it is never removed, unless this function is used
155
+ (by some maintenance service).
156
+
157
+ Note that unlock_all() cleans up all locks, incl. expired ones.
158
+
159
+ If 'timestamp' is given, it is used instead of the now() UTC timestamp.
160
+
161
+ If 'filter_domains' is given, it behaves like for lock_any().
162
+ """
163
+ now = int(timestamp) if timestamp is not None else int(time.time())
164
+ domains = connection.listAllDomains(libvirt.VIR_CONNECT_LIST_DOMAINS_PERSISTENT)
165
+ for domain in filter(filter_domains, domains):
166
+ for signature, stamp in get_locks(domain, expired=True):
167
+ if stamp <= now:
168
+ unlock(domain, signature)
@@ -52,7 +52,7 @@ virsh net-define "$tmpfile"
52
52
  virsh net-autostart default
53
53
  virsh net-start default
54
54
 
55
- # set up a default network
55
+ # set up a default storage pool
56
56
  if virsh -q pool-list --name | grep -q '^default *$'; then
57
57
  virsh pool-destroy default
58
58
  virsh pool-undefine default
@@ -70,3 +70,23 @@ EOF
70
70
  virsh pool-define "$tmpfile"
71
71
  virsh pool-autostart default
72
72
  virsh pool-start default
73
+
74
+ # create another storage pool for nvram .vars files,
75
+ # so they can be easily removed using just a libvirt connection
76
+ if virsh -q pool-list --name | grep -q '^nvram *$'; then
77
+ virsh pool-destroy nvram
78
+ virsh pool-undefine nvram
79
+ elif virsh -q pool-list --name --inactive | grep -q '^nvram *$'; then
80
+ virsh pool-undefine nvram
81
+ fi
82
+ cat > "$tmpfile" <<EOF
83
+ <pool type='dir'>
84
+ <name>nvram</name>
85
+ <target>
86
+ <path>/var/lib/libvirt/qemu/nvram</path>
87
+ </target>
88
+ </pool>
89
+ EOF
90
+ virsh pool-define "$tmpfile"
91
+ virsh pool-autostart nvram
92
+ virsh pool-start nvram
@@ -0,0 +1 @@
1
+ from .podman import PodmanProvisioner, PodmanRemote # noqa: F401