atex 0.8__py3-none-any.whl → 0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. atex/aggregator/__init__.py +60 -0
  2. atex/aggregator/json.py +96 -0
  3. atex/cli/__init__.py +11 -1
  4. atex/cli/fmf.py +73 -23
  5. atex/cli/libvirt.py +128 -0
  6. atex/cli/testingfarm.py +60 -3
  7. atex/connection/__init__.py +13 -11
  8. atex/connection/podman.py +61 -0
  9. atex/connection/ssh.py +38 -47
  10. atex/executor/executor.py +144 -119
  11. atex/executor/reporter.py +66 -71
  12. atex/executor/scripts.py +13 -5
  13. atex/executor/testcontrol.py +43 -30
  14. atex/fmf.py +94 -74
  15. atex/orchestrator/__init__.py +76 -2
  16. atex/orchestrator/adhoc.py +465 -0
  17. atex/{provision → provisioner}/__init__.py +54 -42
  18. atex/provisioner/libvirt/__init__.py +2 -0
  19. atex/provisioner/libvirt/libvirt.py +472 -0
  20. atex/provisioner/libvirt/locking.py +170 -0
  21. atex/{provision → provisioner}/libvirt/setup-libvirt.sh +21 -1
  22. atex/provisioner/podman/__init__.py +2 -0
  23. atex/provisioner/podman/podman.py +169 -0
  24. atex/{provision → provisioner}/testingfarm/api.py +121 -69
  25. atex/{provision → provisioner}/testingfarm/testingfarm.py +44 -52
  26. atex/util/libvirt.py +18 -0
  27. atex/util/log.py +53 -43
  28. atex/util/named_mapping.py +158 -0
  29. atex/util/subprocess.py +46 -12
  30. atex/util/threads.py +71 -20
  31. atex-0.10.dist-info/METADATA +86 -0
  32. atex-0.10.dist-info/RECORD +44 -0
  33. atex/orchestrator/aggregator.py +0 -106
  34. atex/orchestrator/orchestrator.py +0 -324
  35. atex/provision/libvirt/__init__.py +0 -24
  36. atex/provision/podman/README +0 -59
  37. atex/provision/podman/host_container.sh +0 -74
  38. atex-0.8.dist-info/METADATA +0 -197
  39. atex-0.8.dist-info/RECORD +0 -37
  40. /atex/{provision → provisioner}/libvirt/VM_PROVISION +0 -0
  41. /atex/{provision → provisioner}/testingfarm/__init__.py +0 -0
  42. {atex-0.8.dist-info → atex-0.10.dist-info}/WHEEL +0 -0
  43. {atex-0.8.dist-info → atex-0.10.dist-info}/entry_points.txt +0 -0
  44. {atex-0.8.dist-info → atex-0.10.dist-info}/licenses/COPYING.txt +0 -0
@@ -0,0 +1,472 @@
1
+ import re
2
+ import time
3
+ import uuid
4
+ import shlex
5
+ import socket
6
+ import random
7
+ import textwrap
8
+ import tempfile
9
+ import threading
10
+ import subprocess
11
+ import urllib.parse
12
+ import xml.etree.ElementTree as ET
13
+ from pathlib import Path
14
+
15
+ from ... import connection, util
16
+ from .. import Provisioner, Remote
17
+ from . import locking
18
+
19
+ libvirt = util.import_libvirt()
20
+
21
+ # thread-safe bool
22
+ libvirt_needs_setup = threading.Semaphore(1)
23
+
24
+
25
+ def setup_event_loop():
26
+ if not libvirt_needs_setup.acquire(blocking=False):
27
+ return
28
+
29
+ # register and run default even loop
30
+ libvirt.virEventRegisterDefaultImpl()
31
+
32
+ def loop():
33
+ while True:
34
+ time.sleep(0.5)
35
+ libvirt.virEventRunDefaultImpl()
36
+
37
+ util.debug("starting libvirt event loop")
38
+ thread = threading.Thread(target=loop, name="libvirt_event_loop", daemon=True)
39
+ thread.start()
40
+
41
+
42
+ class LibvirtCloningRemote(Remote, connection.ssh.ManagedSSHConnection):
43
+ """
44
+ TODO
45
+ """
46
+
47
+ def __init__(self, ssh_options, host, domain, source_image, *, release_hook):
48
+ """
49
+ 'ssh_options' are a dict, passed to ManagedSSHConnection __init__().
50
+
51
+ 'host' is a str of libvirt host name (used for repr()).
52
+
53
+ 'domain' is a str of libvirt domain name (used for repr()).
54
+
55
+ 'source_image' is a str of libvirt volume name that was cloned
56
+ for the domain to boot from (used for repr()).
57
+
58
+ 'release_hook' is a callable called on .release() in addition
59
+ to disconnecting the connection.
60
+ """
61
+ # NOTE: self.lock inherited from ManagedSSHConnection
62
+ super().__init__(options=ssh_options)
63
+ self.host = host
64
+ self.domain = domain
65
+ self.source_image = source_image
66
+ self.release_called = False
67
+ self.release_hook = release_hook
68
+
69
+ def release(self):
70
+ with self.lock:
71
+ if self.release_called:
72
+ return
73
+ else:
74
+ self.release_called = True
75
+ self.release_hook(self)
76
+ self.disconnect()
77
+
78
+ # not /technically/ a valid repr(), but meh
79
+ def __repr__(self):
80
+ class_name = self.__class__.__name__
81
+ return f"{class_name}({self.host}, {self.domain}, {self.source_image})"
82
+
83
+
84
+ # needs ManagedSSHConnection due to .forward()
85
+ def reliable_ssh_local_fwd(conn, dest, retries=10):
86
+ for _ in range(retries):
87
+ # let the kernel give us a free port
88
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
89
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
90
+ s.bind(("127.0.0.1", 0))
91
+ port = s.getsockname()[1]
92
+ # and try to quickly use it for forwarding
93
+ try:
94
+ conn.forward("LocalForward", f"127.0.0.1:{port} {dest}")
95
+ return port
96
+ except subprocess.CalledProcessError:
97
+ pass
98
+ raise ConnectionError("could not add LocalForward / find a free port")
99
+
100
+
101
+ class LibvirtCloningProvisioner(Provisioner):
102
+ """
103
+ Provisioning done via pre-created libvirt domains on the libvirt VM host,
104
+ which are left (largely) untouched, except for their disk images, which are
105
+ swapped in by a fresh clones of a user-specified image name.
106
+ (This image name is presumably a fresh OS install made by 3rd party logic.)
107
+
108
+ This allows concurrent access by multiple users (no domains are created
109
+ or removed, just taken/released) and fast provisioning times (volume cloning
110
+ is much faster than Anaconda installs).
111
+
112
+ Access to the libvirt host is via ssh, but the remote user does not need to
113
+ have shell access, only TCP forwarding and libvirt socket access, ie.
114
+
115
+ Match User libvirtuser
116
+ AllowTcpForwarding yes
117
+ ForceCommand /usr/bin/virt-ssh-helper qemu:///system
118
+ #ForceCommand /usr/bin/nc -U /var/run/libvirt/libvirt-sock # older
119
+
120
+ Note that eligible domains must also have a pre-existing disk image defined
121
+ as a volume (<disk type='volume' ...>) NOT as path (<disk type='path' ...>)
122
+ since only a volume has a pool association that can be matched up with the
123
+ would-be-cloned image name.
124
+ """
125
+
126
+ def __init__(
127
+ self, host, image, *, pool="default", domain_filter=".*",
128
+ domain_user="root", domain_sshkey,
129
+ reserve_delay=3, reserve_time=3600, start_event_loop=True,
130
+ ):
131
+ """
132
+ 'host' is a ManagedSSHConnection class instance, connected to a libvirt host.
133
+
134
+ 'image' is a string with a libvirt storage volume name inside the
135
+ given storage 'pool' that should be used as the source for cloning.
136
+
137
+ 'pool' is a libvirt storage pool used by all relevant domains on the
138
+ libvirt host **as well as** the would-be-cloned images.
139
+
140
+ 'domain_filter' is a regex string matching libvirt domain names to
141
+ attempt reservation on. Useful for including only ie. 'auto-.*' domains
142
+ while leaving other domains on the same libvirt host untouched.
143
+
144
+ 'domain_user' and 'domain_sshkey' (strings) specify how to connect to
145
+ an OS booted from the pre-instaled 'image', as these credentials are
146
+ known only to the logic that created the 'image' in the first place.
147
+
148
+ 'reserve_delay' is an int of how many seconds to wait between trying to
149
+ lock libvirt domains, after every unsuccessful locking attempt.
150
+ Ie. with delay=5 and 20 domains, the code will try to lock every domain
151
+ in 5*20=100 seconds before looping back to the first.
152
+
153
+ 'reserve_time' is an int of maximum seconds to reserve a libvirt domain
154
+ for before other users can steal it for themselves. Note that there is
155
+ no automatic timeout release logic, it's just a hint for others.
156
+
157
+ 'start_event_loop' set to True starts a global default libvirt event
158
+ loop as part of .start() (or context manager enter) in a background
159
+ daemon thread.
160
+ This is necessary to maintain connection keep-alives, but if you plan
161
+ on managing the loop yourself (have custom uses for the libvirt module),
162
+ setting False here avoids any meddling by this class.
163
+ """
164
+ self.lock = threading.RLock()
165
+ self.host = host
166
+ self.image = image
167
+ self.pool = pool
168
+ self.domain_filter = domain_filter
169
+ self.domain_user = domain_user
170
+ self.domain_sshkey = domain_sshkey
171
+ self.reserve_delay = reserve_delay
172
+ self.reserve_time = reserve_time
173
+ self.start_event_loop = start_event_loop
174
+
175
+ self.signature = uuid.uuid4()
176
+ self.reserve_end = None
177
+ self.queue = util.ThreadQueue(daemon=True)
178
+ self.to_reserve = 0
179
+
180
+ # use two libvirt connections - one to handle reservations and cloning,
181
+ # and another for management and cleanup;
182
+ # the idea is to neuter the reserving thread on exit simply by closing
183
+ # its connection, so we can run cleanup from the other one
184
+ self.reserve_conn = None
185
+ self.manage_conn = None
186
+
187
+ # domain names we successfully locked, but which are still in the
188
+ # process of being set up (image cloning, OS booting, waiting for ssh
189
+ # etc.)
190
+ self.reserving = set()
191
+
192
+ # all active Remotes we managed to reserve and return to the user
193
+ self.remotes = []
194
+
195
+ def _reserve_one(self):
196
+ with self.lock:
197
+ conn = self.reserve_conn
198
+
199
+ # find the to-be-cloned image in the specified pool
200
+ pool = conn.storagePoolLookupByName(self.pool)
201
+ source_vol = pool.storageVolLookupByName(self.image)
202
+
203
+ # find the to-be-cloned image format
204
+ xml_root = ET.fromstring(source_vol.XMLDesc())
205
+ source_format = xml_root.find("target").find("format").get("type")
206
+
207
+ util.debug(
208
+ f"found volume {source_vol.name()} (format:{source_format}) in pool {pool.name()}",
209
+ )
210
+
211
+ # translate domain names to virDomain objects
212
+ with self.lock:
213
+ already_reserving = self.reserving
214
+ already_reserving = {conn.lookupByName(name) for name in already_reserving}
215
+
216
+ # acquire (lock) a domain on the libvirt host
217
+ util.debug("attempting to acquire a domain")
218
+ acquired = None
219
+ while not acquired:
220
+ domains = []
221
+ for domain in conn.listAllDomains():
222
+ if not re.match(self.domain_filter, domain.name()):
223
+ continue
224
+ if domain in already_reserving:
225
+ continue
226
+ domains.append(domain)
227
+
228
+ random.shuffle(domains)
229
+ for domain in domains:
230
+ if locking.lock(domain, self.signature, self.reserve_end):
231
+ acquired = domain
232
+ util.debug(f"acquired domain {acquired.name()}")
233
+ break
234
+ time.sleep(self.reserve_delay)
235
+
236
+ with self.lock:
237
+ self.reserving.add(acquired.name())
238
+
239
+ # shutdown the domain so we can work with its volumes
240
+ try:
241
+ acquired.destroy()
242
+ except libvirt.libvirtError as e:
243
+ if "domain is not running" not in str(e):
244
+ raise
245
+
246
+ # parse XML definition of the domain
247
+ xmldesc = acquired.XMLDesc().rstrip("\n")
248
+ util.extradebug(f"domain {acquired.name()} XML:\n{textwrap.indent(xmldesc, ' ')}")
249
+ xml_root = ET.fromstring(xmldesc)
250
+ nvram_vol = nvram_path = None
251
+
252
+ # if it looks like UEFI/SecureBoot, try to find its nvram image in
253
+ # any one of the storage pools and delete it, freeing any previous
254
+ # OS installation metadata
255
+ if (xml_os := xml_root.find("os")) is not None:
256
+ if (xml_nvram := xml_os.find("nvram")) is not None:
257
+ nvram_path = xml_nvram.text
258
+ if nvram_path:
259
+ # the file might be in any storage pool and is not refreshed
260
+ # by libvirt natively (because treating nvram as a storage pool
261
+ # is a user hack)
262
+ for p in conn.listAllStoragePools():
263
+ p.refresh()
264
+ try:
265
+ nvram_vol = conn.storageVolLookupByPath(nvram_path)
266
+ except libvirt.libvirtError as e:
267
+ if "Storage volume not found" not in str(e):
268
+ raise
269
+ if nvram_vol:
270
+ util.debug(f"deleting nvram volume {nvram_vol.name()}")
271
+ nvram_vol.delete()
272
+
273
+ # try to find a disk that is a volume in the specified storage pool
274
+ # that we could replace by cloning from the provided image
275
+ xml_devices = xml_root.find("devices")
276
+ if xml_devices is None:
277
+ raise RuntimeError(f"could not find <devices> for domain '{acquired.name()}'")
278
+
279
+ disk_vol_name = None
280
+ for xml_disk in xml_devices.findall("disk"):
281
+ if xml_disk.get("type") != "volume":
282
+ continue
283
+ xml_disk_source = xml_disk.find("source")
284
+ if xml_disk_source is None:
285
+ continue
286
+ if xml_disk_source.get("pool") != pool.name():
287
+ continue
288
+ disk_vol_name = xml_disk_source.get("volume")
289
+ util.debug(f"found a domain disk in XML: {disk_vol_name} for pool {pool.name()}")
290
+ break
291
+ else:
292
+ raise RuntimeError("could not find any <disk> in <devices>")
293
+
294
+ # clone the to-be-cloned image under the same name as the original
295
+ # domain volume
296
+ new_volume = util.dedent(fr"""
297
+ <volume>
298
+ <name>{disk_vol_name}</name>
299
+ <target>
300
+ <format type='{source_format}'/>
301
+ </target>
302
+ </volume>
303
+ """)
304
+ try:
305
+ disk_vol = pool.storageVolLookupByName(disk_vol_name)
306
+ disk_vol.delete()
307
+ except libvirt.libvirtError as e:
308
+ if "Storage volume not found" not in str(e):
309
+ raise
310
+ pool.createXMLFrom(new_volume, source_vol)
311
+
312
+ # start the domain up
313
+ util.debug(f"starting up {acquired.name()}")
314
+ acquired.create() # like 'virsh start' NOT 'virsh create'
315
+
316
+ # wait for an IP address leased by libvirt host
317
+ addrs = {}
318
+ while not addrs:
319
+ addrs = acquired.interfaceAddresses(
320
+ libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE,
321
+ )
322
+ time.sleep(1)
323
+ util.debug(f"found iface addrs: {addrs}")
324
+ first_iface = next(iter(addrs.values()))
325
+ first_addr = next(iter(first_iface.values()))[0]["addr"]
326
+
327
+ # set up ssh LocalForward to it
328
+ port = reliable_ssh_local_fwd(self.host, f"{first_addr}:22")
329
+
330
+ # prepare release using variables from this scope
331
+ def release_hook(remote):
332
+ # un-forward the libvirt host ssh-forwarded port
333
+ self.host.forward("LocalForward", f"127.0.0.1:{port} {first_addr}:22", cancel=True)
334
+
335
+ # keep this entire block in a lock because the Provisioner can
336
+ # swap out self.manage_conn and close the previous one at any time,
337
+ # ie. between us reading self.manage_conn and using it
338
+ with self.lock:
339
+ # unlock the domain on the libvirt host
340
+ if self.manage_conn:
341
+ try:
342
+ domain = self.manage_conn.lookupByName(remote.domain)
343
+ locking.unlock(domain, self.signature)
344
+ domain.destroy()
345
+ except libvirt.libvirtError as e:
346
+ if "Domain not found" not in str(e):
347
+ raise
348
+ # remove from the list of remotes inside this Provisioner
349
+ try:
350
+ self.remotes.remove(remote)
351
+ except ValueError:
352
+ pass
353
+
354
+ # create a remote and connect it
355
+ ssh_options = {
356
+ "Hostname": "127.0.0.1",
357
+ "User": self.domain_user,
358
+ "Port": str(port),
359
+ "IdentityFile": str(Path(self.domain_sshkey).absolute()),
360
+ "ConnectionAttempts": "1000",
361
+ "Compression": "yes",
362
+ }
363
+ remote = LibvirtCloningRemote(
364
+ ssh_options=ssh_options,
365
+ host=self.host.options["Hostname"], # TODO: something more reliable?
366
+ domain=acquired.name(),
367
+ source_image=self.image,
368
+ release_hook=release_hook,
369
+ )
370
+ # LocalForward-ed connection is prone to failing with
371
+ # 'read: Connection reset by peer' instead of a timeout,
372
+ # so retry a few times
373
+ for _ in range(100):
374
+ try:
375
+ remote.connect()
376
+ break
377
+ except ConnectionError:
378
+ time.sleep(0.5)
379
+
380
+ with self.lock:
381
+ self.remotes.append(remote)
382
+ self.reserving.remove(acquired.name())
383
+
384
+ return remote
385
+
386
+ def _open_libvirt_conn(self):
387
+ # trick .cmd() to not run anything, but just return the ssh CLI
388
+ cli_args = self.host.cmd(
389
+ ("virt-ssh-helper", "qemu:///system"),
390
+ func=lambda *args, **_: args[0],
391
+ )
392
+ # to make libvirt connect via our ManagedSSHConnection, we need to give it
393
+ # a specific ssh CLI, but libvirt URI command= takes only one argv[0]
394
+ # and cannot pass arguments - we work around this by creating a temp
395
+ # arg-less executable
396
+ with tempfile.NamedTemporaryFile("w+t", delete_on_close=False) as f:
397
+ f.write("#!/bin/bash\n")
398
+ f.write("exec ")
399
+ f.write(shlex.join(cli_args))
400
+ f.write("\n")
401
+ f.close()
402
+ name = Path(f.name)
403
+ name.chmod(0o0500) # r-x------
404
+ uri = f"qemu+ext:///system?command={urllib.parse.quote(str(name.absolute()))}"
405
+ util.debug(f"opening libvirt conn to {uri}")
406
+ conn = libvirt.open(uri)
407
+ conn.setKeepAlive(5, 3)
408
+ return conn
409
+
410
+ def start(self):
411
+ if self.start_event_loop:
412
+ setup_event_loop()
413
+ with self.lock:
414
+ self.reserve_conn = self._open_libvirt_conn()
415
+ self.manage_conn = self.reserve_conn # for now
416
+ self.reserve_end = int(time.time()) + self.reserve_time
417
+
418
+ def stop(self):
419
+ with self.lock:
420
+ #util.debug(f"SELF.RESERVING: {self.reserving} // SELF.REMOTES: {self.remotes}")
421
+ # close reserving libvirt host connection
422
+ # - this stops _reserve_one() from doing anything bad
423
+ if self.reserve_conn:
424
+ self.reserve_conn.close()
425
+ self.reserve_conn = None
426
+
427
+ # reopen managing connection here (because we closed reserve_conn)
428
+ # - note that we can't open this in .start() because libvirt conns
429
+ # can break on signals/interrupts, resulting in "Cannot recv data"
430
+ self.manage_conn = self._open_libvirt_conn()
431
+ # abort reservations in progress
432
+ while self.reserving:
433
+ try:
434
+ domain = self.manage_conn.lookupByName(self.reserving.pop())
435
+ locking.unlock(domain, self.signature)
436
+ except libvirt.libvirtError as e:
437
+ util.debug(f"GOT ERROR: {str(e)}")
438
+ pass
439
+ # cancel/release all Remotes ever created by us
440
+ while self.remotes:
441
+ self.remotes.pop().release()
442
+ self.manage_conn.close()
443
+ self.manage_conn = None
444
+
445
+ self.reserve_end = None
446
+ # TODO: wait for threadqueue threads to join?
447
+
448
+ def provision(self, count=1):
449
+ with self.lock:
450
+ self.to_reserve += count
451
+
452
+ def get_remote(self, block=True):
453
+ with self.lock:
454
+ # if the reservation thread is not running, start one
455
+ if not self.queue.threads and self.to_reserve > 0:
456
+ self.queue.start_thread(target=self._reserve_one)
457
+ self.to_reserve -= 1
458
+ try:
459
+ return self.queue.get(block=block)
460
+ except util.ThreadQueue.Empty:
461
+ # always non-blocking
462
+ return None
463
+
464
+ # not /technically/ a valid repr(), but meh
465
+ def __repr__(self):
466
+ class_name = self.__class__.__name__
467
+ remotes = len(self.remotes)
468
+ host_name = self.host.options["Hostname"]
469
+ return (
470
+ f"{class_name}({host_name}, {self.domain_filter}, {self.signature}, "
471
+ f"{remotes} remotes, {hex(id(self))})"
472
+ )
@@ -0,0 +1,170 @@
1
+ """
2
+ Helpers for "reserving" (locking) libvirt domains using the <metadata> tag.
3
+
4
+ The idea is for each user to generate some "signature" (ie. UUIDv4) and
5
+ attempt to lock eligible domains on a libvirt host in a random order,
6
+ hopefully eventually succeeding (as others release the domain locks).
7
+
8
+ Lock safety is ensured by libvirt retaining <metadata> content (tag) order,
9
+ so each user can re-check (after locking) whether the race was won or lost,
10
+ depending on whether the user's signature is on top of <metadata>.
11
+
12
+ A timestamp (meaning now()+duration) is used as the lock tag value/text,
13
+ and any expired timestamps (now() > timestamp) are ignored by the locking
14
+ logic.
15
+ """
16
+
17
+ import re
18
+ import time
19
+ import random
20
+ import xml.etree.ElementTree as ET
21
+
22
+ from ... import util
23
+
24
+ libvirt = util.import_libvirt()
25
+
26
+
27
+ def get_locks(domain, expired=False):
28
+ """
29
+ Yield (signature,timestamp) tuples of atex-lock entries for a 'domain'.
30
+
31
+ If 'expired' is True, yield also locks with an expired timestamp.
32
+
33
+ If a timestamp is missing, it is substituted with 0.
34
+ """
35
+ xml_dump = ET.fromstring(domain.XMLDesc(libvirt.VIR_DOMAIN_XML_INACTIVE))
36
+ metadata = xml_dump.find("metadata")
37
+ # no <metadata> - no locks possible
38
+ if metadata is None:
39
+ return
40
+ now = int(time.time())
41
+ for elem in metadata:
42
+ if match := re.fullmatch(r"{(.+)}atex-lock", elem.tag):
43
+ timestamp = int(elem.text) if elem.text else 0
44
+ if not expired and timestamp <= now:
45
+ continue
46
+ signature = match.group(1)
47
+ yield (signature, timestamp)
48
+
49
+
50
+ def unlock(domain, signature):
51
+ """
52
+ Unlock a domain previously locked by lock().
53
+ """
54
+ domain.setMetadata(
55
+ libvirt.VIR_DOMAIN_METADATA_ELEMENT,
56
+ None,
57
+ "atex-lock",
58
+ str(signature),
59
+ libvirt.VIR_DOMAIN_AFFECT_CONFIG,
60
+ )
61
+
62
+
63
+ def lock(domain, signature, timestamp):
64
+ """
65
+ Attempt to lock a domain under 'signature' ownership,
66
+ writing out 'timestamp' as the lock tag content.
67
+
68
+ Returns True if the domain was successfully locked, False otherwise.
69
+ """
70
+ signature = str(signature)
71
+ timestamp = int(timestamp)
72
+
73
+ # if there are any existing locks held on the domain
74
+ if any(get_locks(domain)):
75
+ return False
76
+
77
+ # try locking it
78
+ domain.setMetadata(
79
+ libvirt.VIR_DOMAIN_METADATA_ELEMENT,
80
+ f"<atex-lock>{timestamp}</atex-lock>",
81
+ "atex-lock",
82
+ signature,
83
+ libvirt.VIR_DOMAIN_AFFECT_CONFIG,
84
+ )
85
+
86
+ # get fresh XML and verify we won the race
87
+ try:
88
+ first = next(get_locks(domain))
89
+ except StopIteration:
90
+ raise RuntimeError(
91
+ "failed to verify lock signature, was timestamp already expired?",
92
+ ) from None
93
+
94
+ first_sig, first_stamp = first
95
+ if first_sig == signature and first_stamp == timestamp:
96
+ return True
97
+ else:
98
+ # we lost
99
+ unlock(domain, signature)
100
+ return False
101
+
102
+
103
+ def lock_any(connection, signature, duration, filter_domains=lambda _: True):
104
+ """
105
+ Given a libvirt 'connection', attempt to lock (reserve) any one
106
+ domain under 'signature' ownership for 'duration' seconds.
107
+
108
+ If 'filter_domain' is given as a callable, it is used to filter
109
+ domains considered for locking. It takes one argument (libvirt
110
+ domain object) and must return True (domain is eligible for locking)
111
+ or False (domain should be skipped).
112
+ For example: lambda dom: dom.name().startswith("foo-")
113
+
114
+ Returns a libvirt domain object of a successfully locked domain,
115
+ or None if no domain could be locked.
116
+ """
117
+ domains = connection.listAllDomains(libvirt.VIR_CONNECT_LIST_DOMAINS_PERSISTENT)
118
+ # try to avoid lock conflicts
119
+ random.shuffle(domains)
120
+
121
+ timestamp = int(time.time() + duration)
122
+ for domain in filter(filter_domains, domains):
123
+ if lock(domain, signature, timestamp):
124
+ return domain
125
+ return None
126
+
127
+
128
+ def unlock_all(connection, signature=None, shutdown=False, filter_domains=lambda _: True):
129
+ """
130
+ Remove all locks for all domains.
131
+
132
+ If 'signature' is given, remove only locks matching the signature.
133
+
134
+ If 'shutdown' is True, also forcibly shutdown (destroy) all domains.
135
+
136
+ If 'filter_domains' is given, it behaves like for lock_any().
137
+ """
138
+ domains = connection.listAllDomains(libvirt.VIR_CONNECT_LIST_DOMAINS_PERSISTENT)
139
+ for domain in filter(filter_domains, domains):
140
+ for lock, _ in get_locks(domain, expired=True):
141
+ if signature:
142
+ if str(signature) == lock:
143
+ unlock(domain, lock)
144
+ else:
145
+ unlock(domain, lock)
146
+ if shutdown:
147
+ domain.destroy()
148
+
149
+
150
+ def cleanup_expired(connection, timestamp=None, filter_domains=lambda _: True):
151
+ """
152
+ Clean up expired locks for all domains.
153
+
154
+ Useful when a client terminates without releasing the lock, which later
155
+ expires (making the domain available), but as no other user is responsible
156
+ for the given signature, it is never removed, unless this function is used
157
+ (by some maintenance service).
158
+
159
+ Note that unlock_all() cleans up all locks, incl. expired ones.
160
+
161
+ If 'timestamp' is given, it is used instead of the now() UTC timestamp.
162
+
163
+ If 'filter_domains' is given, it behaves like for lock_any().
164
+ """
165
+ now = int(timestamp) if timestamp is not None else int(time.time())
166
+ domains = connection.listAllDomains(libvirt.VIR_CONNECT_LIST_DOMAINS_PERSISTENT)
167
+ for domain in filter(filter_domains, domains):
168
+ for signature, stamp in get_locks(domain, expired=True):
169
+ if stamp <= now:
170
+ unlock(domain, signature)
@@ -52,7 +52,7 @@ virsh net-define "$tmpfile"
52
52
  virsh net-autostart default
53
53
  virsh net-start default
54
54
 
55
- # set up a default network
55
+ # set up a default storage pool
56
56
  if virsh -q pool-list --name | grep -q '^default *$'; then
57
57
  virsh pool-destroy default
58
58
  virsh pool-undefine default
@@ -70,3 +70,23 @@ EOF
70
70
  virsh pool-define "$tmpfile"
71
71
  virsh pool-autostart default
72
72
  virsh pool-start default
73
+
74
+ # create another storage pool for nvram .vars files,
75
+ # so they can be easily removed using just a libvirt connection
76
+ if virsh -q pool-list --name | grep -q '^nvram *$'; then
77
+ virsh pool-destroy nvram
78
+ virsh pool-undefine nvram
79
+ elif virsh -q pool-list --name --inactive | grep -q '^nvram *$'; then
80
+ virsh pool-undefine nvram
81
+ fi
82
+ cat > "$tmpfile" <<EOF
83
+ <pool type='dir'>
84
+ <name>nvram</name>
85
+ <target>
86
+ <path>/var/lib/libvirt/qemu/nvram</path>
87
+ </target>
88
+ </pool>
89
+ EOF
90
+ virsh pool-define "$tmpfile"
91
+ virsh pool-autostart nvram
92
+ virsh pool-start nvram
@@ -0,0 +1,2 @@
1
+ from .podman import PodmanProvisioner, PodmanRemote # noqa: F401
2
+ from .podman import pull_image, build_container_with_deps # noqa: F401