atex 0.9__py3-none-any.whl → 0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. atex/aggregator/__init__.py +60 -0
  2. atex/{orchestrator/aggregator.py → aggregator/json.py} +6 -21
  3. atex/cli/__init__.py +11 -1
  4. atex/cli/libvirt.py +3 -2
  5. atex/cli/testingfarm.py +48 -3
  6. atex/connection/podman.py +2 -4
  7. atex/connection/ssh.py +7 -14
  8. atex/executor/executor.py +18 -17
  9. atex/executor/scripts.py +5 -3
  10. atex/executor/testcontrol.py +1 -1
  11. atex/orchestrator/__init__.py +76 -3
  12. atex/orchestrator/{orchestrator.py → adhoc.py} +183 -103
  13. atex/{provision → provisioner}/__init__.py +49 -37
  14. atex/{provision → provisioner}/libvirt/libvirt.py +21 -14
  15. atex/{provision → provisioner}/libvirt/locking.py +3 -1
  16. atex/provisioner/podman/__init__.py +2 -0
  17. atex/provisioner/podman/podman.py +169 -0
  18. atex/{provision → provisioner}/testingfarm/api.py +53 -44
  19. atex/{provision → provisioner}/testingfarm/testingfarm.py +17 -23
  20. atex/util/log.py +62 -67
  21. atex/util/subprocess.py +46 -12
  22. atex/util/threads.py +7 -0
  23. atex-0.10.dist-info/METADATA +86 -0
  24. atex-0.10.dist-info/RECORD +44 -0
  25. atex/provision/podman/__init__.py +0 -1
  26. atex/provision/podman/podman.py +0 -274
  27. atex-0.9.dist-info/METADATA +0 -178
  28. atex-0.9.dist-info/RECORD +0 -43
  29. /atex/{provision → provisioner}/libvirt/VM_PROVISION +0 -0
  30. /atex/{provision → provisioner}/libvirt/__init__.py +0 -0
  31. /atex/{provision → provisioner}/libvirt/setup-libvirt.sh +0 -0
  32. /atex/{provision → provisioner}/testingfarm/__init__.py +0 -0
  33. {atex-0.9.dist-info → atex-0.10.dist-info}/WHEEL +0 -0
  34. {atex-0.9.dist-info → atex-0.10.dist-info}/entry_points.txt +0 -0
  35. {atex-0.9.dist-info → atex-0.10.dist-info}/licenses/COPYING.txt +0 -0
@@ -4,6 +4,7 @@ import uuid
4
4
  import shlex
5
5
  import socket
6
6
  import random
7
+ import textwrap
7
8
  import tempfile
8
9
  import threading
9
10
  import subprocess
@@ -38,14 +39,14 @@ def setup_event_loop():
38
39
  thread.start()
39
40
 
40
41
 
41
- class LibvirtCloningRemote(Remote, connection.ssh.ManagedSSHConn):
42
+ class LibvirtCloningRemote(Remote, connection.ssh.ManagedSSHConnection):
42
43
  """
43
44
  TODO
44
45
  """
45
46
 
46
47
  def __init__(self, ssh_options, host, domain, source_image, *, release_hook):
47
48
  """
48
- 'ssh_options' are a dict, passed to ManagedSSHConn __init__().
49
+ 'ssh_options' are a dict, passed to ManagedSSHConnection __init__().
49
50
 
50
51
  'host' is a str of libvirt host name (used for repr()).
51
52
 
@@ -57,7 +58,7 @@ class LibvirtCloningRemote(Remote, connection.ssh.ManagedSSHConn):
57
58
  'release_hook' is a callable called on .release() in addition
58
59
  to disconnecting the connection.
59
60
  """
60
- # NOTE: self.lock inherited from ManagedSSHConn
61
+ # NOTE: self.lock inherited from ManagedSSHConnection
61
62
  super().__init__(options=ssh_options)
62
63
  self.host = host
63
64
  self.domain = domain
@@ -80,7 +81,7 @@ class LibvirtCloningRemote(Remote, connection.ssh.ManagedSSHConn):
80
81
  return f"{class_name}({self.host}, {self.domain}, {self.source_image})"
81
82
 
82
83
 
83
- # needs ManagedSSHConn due to .forward()
84
+ # needs ManagedSSHConnection due to .forward()
84
85
  def reliable_ssh_local_fwd(conn, dest, retries=10):
85
86
  for _ in range(retries):
86
87
  # let the kernel give us a free port
@@ -128,7 +129,7 @@ class LibvirtCloningProvisioner(Provisioner):
128
129
  reserve_delay=3, reserve_time=3600, start_event_loop=True,
129
130
  ):
130
131
  """
131
- 'host' is a ManagedSSHConn class instance, connected to a libvirt host.
132
+ 'host' is a ManagedSSHConnection class instance, connected to a libvirt host.
132
133
 
133
134
  'image' is a string with a libvirt storage volume name inside the
134
135
  given storage 'pool' that should be used as the source for cloning.
@@ -174,6 +175,7 @@ class LibvirtCloningProvisioner(Provisioner):
174
175
  self.signature = uuid.uuid4()
175
176
  self.reserve_end = None
176
177
  self.queue = util.ThreadQueue(daemon=True)
178
+ self.to_reserve = 0
177
179
 
178
180
  # use two libvirt connections - one to handle reservations and cloning,
179
181
  # and another for management and cleanup;
@@ -242,8 +244,8 @@ class LibvirtCloningProvisioner(Provisioner):
242
244
  raise
243
245
 
244
246
  # parse XML definition of the domain
245
- xmldesc = acquired.XMLDesc()
246
- util.debug(f"domain {acquired.name()} XML:\n{xmldesc}") # TODO: EXTRADEBUG log level
247
+ xmldesc = acquired.XMLDesc().rstrip("\n")
248
+ util.extradebug(f"domain {acquired.name()} XML:\n{textwrap.indent(xmldesc, ' ')}")
247
249
  xml_root = ET.fromstring(xmldesc)
248
250
  nvram_vol = nvram_path = None
249
251
 
@@ -325,7 +327,7 @@ class LibvirtCloningProvisioner(Provisioner):
325
327
  # set up ssh LocalForward to it
326
328
  port = reliable_ssh_local_fwd(self.host, f"{first_addr}:22")
327
329
 
328
- # create a remote and connect it
330
+ # prepare release using variables from this scope
329
331
  def release_hook(remote):
330
332
  # un-forward the libvirt host ssh-forwarded port
331
333
  self.host.forward("LocalForward", f"127.0.0.1:{port} {first_addr}:22", cancel=True)
@@ -339,6 +341,7 @@ class LibvirtCloningProvisioner(Provisioner):
339
341
  try:
340
342
  domain = self.manage_conn.lookupByName(remote.domain)
341
343
  locking.unlock(domain, self.signature)
344
+ domain.destroy()
342
345
  except libvirt.libvirtError as e:
343
346
  if "Domain not found" not in str(e):
344
347
  raise
@@ -348,11 +351,12 @@ class LibvirtCloningProvisioner(Provisioner):
348
351
  except ValueError:
349
352
  pass
350
353
 
354
+ # create a remote and connect it
351
355
  ssh_options = {
352
356
  "Hostname": "127.0.0.1",
353
357
  "User": self.domain_user,
354
358
  "Port": str(port),
355
- "IdentityFile": self.domain_sshkey,
359
+ "IdentityFile": str(Path(self.domain_sshkey).absolute()),
356
360
  "ConnectionAttempts": "1000",
357
361
  "Compression": "yes",
358
362
  }
@@ -385,7 +389,7 @@ class LibvirtCloningProvisioner(Provisioner):
385
389
  ("virt-ssh-helper", "qemu:///system"),
386
390
  func=lambda *args, **_: args[0],
387
391
  )
388
- # to make libvirt connect via our ManagedSSHConn, we need to give it
392
+ # to make libvirt connect via our ManagedSSHConnection, we need to give it
389
393
  # a specific ssh CLI, but libvirt URI command= takes only one argv[0]
390
394
  # and cannot pass arguments - we work around this by creating a temp
391
395
  # arg-less executable
@@ -410,8 +414,6 @@ class LibvirtCloningProvisioner(Provisioner):
410
414
  self.reserve_conn = self._open_libvirt_conn()
411
415
  self.manage_conn = self.reserve_conn # for now
412
416
  self.reserve_end = int(time.time()) + self.reserve_time
413
- # get an initial first remote
414
- self.queue.start_thread(target=self._reserve_one)
415
417
 
416
418
  def stop(self):
417
419
  with self.lock:
@@ -443,11 +445,16 @@ class LibvirtCloningProvisioner(Provisioner):
443
445
  self.reserve_end = None
444
446
  # TODO: wait for threadqueue threads to join?
445
447
 
448
+ def provision(self, count=1):
449
+ with self.lock:
450
+ self.to_reserve += count
451
+
446
452
  def get_remote(self, block=True):
447
- # if the reservation thread is not running, start one
448
453
  with self.lock:
449
- if not self.queue.threads:
454
+ # if the reservation thread is not running, start one
455
+ if not self.queue.threads and self.to_reserve > 0:
450
456
  self.queue.start_thread(target=self._reserve_one)
457
+ self.to_reserve -= 1
451
458
  try:
452
459
  return self.queue.get(block=block)
453
460
  except util.ThreadQueue.Empty:
@@ -19,7 +19,9 @@ import time
19
19
  import random
20
20
  import xml.etree.ElementTree as ET
21
21
 
22
- import libvirt
22
+ from ... import util
23
+
24
+ libvirt = util.import_libvirt()
23
25
 
24
26
 
25
27
  def get_locks(domain, expired=False):
@@ -0,0 +1,2 @@
1
+ from .podman import PodmanProvisioner, PodmanRemote # noqa: F401
2
+ from .podman import pull_image, build_container_with_deps # noqa: F401
@@ -0,0 +1,169 @@
1
+ import tempfile
2
+ import threading
3
+ import subprocess
4
+
5
+ from ... import connection, util
6
+ from .. import Provisioner, Remote
7
+
8
+
9
+ class PodmanRemote(Remote, connection.podman.PodmanConnection):
10
+ """
11
+ Built on the official Remote API, pulling in the Connection API
12
+ as implemented by ManagedSSHConnection.
13
+ """
14
+
15
+ def __init__(self, image, container, *, release_hook):
16
+ """
17
+ 'image' is an image tag (used for repr()).
18
+
19
+ 'container' is a podman container id / name.
20
+
21
+ 'release_hook' is a callable called on .release() in addition
22
+ to disconnecting the connection.
23
+ """
24
+ super().__init__(container=container)
25
+ self.lock = threading.RLock()
26
+ self.image = image
27
+ self.container = container
28
+ self.release_called = False
29
+ self.release_hook = release_hook
30
+
31
+ def release(self):
32
+ with self.lock:
33
+ if self.release_called:
34
+ return
35
+ else:
36
+ self.release_called = True
37
+ self.release_hook(self)
38
+ self.disconnect()
39
+ util.subprocess_run(
40
+ ("podman", "container", "rm", "-f", "-t", "0", self.container),
41
+ check=False, # ignore if it fails
42
+ stdout=subprocess.DEVNULL,
43
+ )
44
+
45
+ # not /technically/ a valid repr(), but meh
46
+ def __repr__(self):
47
+ class_name = self.__class__.__name__
48
+
49
+ if "/" in self.image:
50
+ image = self.image.rsplit("/",1)[1]
51
+ elif len(self.image) > 20:
52
+ image = f"{self.image[:17]}..."
53
+ else:
54
+ image = self.image
55
+
56
+ name = f"{self.container[:17]}..." if len(self.container) > 20 else self.container
57
+
58
+ return f"{class_name}({image}, {name})"
59
+
60
+
61
+ class PodmanProvisioner(Provisioner):
62
+ def __init__(self, image, run_options=None):
63
+ """
64
+ 'image' is a string of image tag/id to create containers from.
65
+ It can be a local identifier or an URL.
66
+
67
+ 'run_options' is an iterable with additional CLI options passed
68
+ to 'podman container run'.
69
+ """
70
+ self.lock = threading.RLock()
71
+ self.image = image
72
+ self.run_options = run_options or ()
73
+
74
+ # created PodmanRemote instances, ready to be handed over to the user,
75
+ # or already in use by the user
76
+ self.remotes = []
77
+ self.to_create = 0
78
+
79
+ def start(self):
80
+ if not self.image:
81
+ raise ValueError("image cannot be empty")
82
+
83
+ def stop(self):
84
+ with self.lock:
85
+ while self.remotes:
86
+ self.remotes.pop().release()
87
+
88
+ def provision(self, count=1):
89
+ with self.lock:
90
+ self.to_create += count
91
+
92
+ def get_remote(self, block=True):
93
+ if self.to_create <= 0:
94
+ if block:
95
+ raise RuntimeError("no .provision() requested, would block forever")
96
+ else:
97
+ return None
98
+
99
+ proc = util.subprocess_run(
100
+ (
101
+ "podman", "container", "run", "--quiet", "--detach", "--pull", "never",
102
+ *self.run_options, self.image, "sleep", "inf",
103
+ ),
104
+ check=True,
105
+ text=True,
106
+ stdout=subprocess.PIPE,
107
+ )
108
+ container_id = proc.stdout.rstrip("\n")
109
+
110
+ def release_hook(remote):
111
+ # remove from the list of remotes inside this Provisioner
112
+ with self.lock:
113
+ try:
114
+ self.remotes.remove(remote)
115
+ except ValueError:
116
+ pass
117
+
118
+ remote = PodmanRemote(
119
+ self.image,
120
+ container_id,
121
+ release_hook=release_hook,
122
+ )
123
+
124
+ with self.lock:
125
+ self.remotes.append(remote)
126
+ self.to_create -= 1
127
+
128
+ return remote
129
+
130
+ # not /technically/ a valid repr(), but meh
131
+ def __repr__(self):
132
+ class_name = self.__class__.__name__
133
+ return (
134
+ f"{class_name}({self.image}, {len(self.remotes)} remotes, {hex(id(self))})"
135
+ )
136
+
137
+
138
+ def pull_image(origin):
139
+ proc = util.subprocess_run(
140
+ ("podman", "image", "pull", "-q", origin),
141
+ check=True,
142
+ text=True,
143
+ stdout=subprocess.PIPE,
144
+ )
145
+ return proc.stdout.rstrip("\n")
146
+
147
+
148
+ def build_container_with_deps(origin, tag=None, *, extra_pkgs=None):
149
+ tag_args = ("-t", tag) if tag else ()
150
+
151
+ pkgs = ["rsync"]
152
+ if extra_pkgs:
153
+ pkgs += extra_pkgs
154
+ pkgs_str = " ".join(pkgs)
155
+
156
+ with tempfile.NamedTemporaryFile("w+t", delete_on_close=False) as tmpf:
157
+ tmpf.write(util.dedent(fr"""
158
+ FROM {origin}
159
+ RUN dnf -y -q --setopt=install_weak_deps=False install {pkgs_str} >/dev/null
160
+ RUN dnf -y -q clean packages >/dev/null
161
+ """))
162
+ tmpf.close()
163
+ proc = util.subprocess_run(
164
+ ("podman", "image", "build", "-q", "-f", tmpf.name, *tag_args, "."),
165
+ check=True,
166
+ text=True,
167
+ stdout=subprocess.PIPE,
168
+ )
169
+ return proc.stdout.rstrip("\n")
@@ -16,12 +16,9 @@ import urllib3
16
16
 
17
17
  DEFAULT_API_URL = "https://api.testing-farm.io/v0.1"
18
18
 
19
- # how many seconds to sleep for during API polling
20
- API_QUERY_DELAY = 30
21
-
22
19
  DEFAULT_RESERVE_TEST = {
23
20
  "url": "https://github.com/RHSecurityCompliance/atex-reserve",
24
- "ref": "v0.9",
21
+ "ref": "v0.10",
25
22
  "path": ".",
26
23
  "name": "/plans/reserve",
27
24
  }
@@ -88,12 +85,13 @@ class TestingFarmAPI:
88
85
  self.api_url = url
89
86
  self.api_token = token or os.environ.get("TESTING_FARM_API_TOKEN")
90
87
 
91
- def _query(self, method, path, *args, headers=None, **kwargs):
88
+ def _query(self, method, path, *args, headers=None, auth=True, **kwargs):
92
89
  url = f"{self.api_url}{path}"
93
- if headers is not None:
94
- headers["Authorization"] = f"Bearer {self.api_token}"
95
- else:
96
- headers = {"Authorization": f"Bearer {self.api_token}"}
90
+ if self.api_token and auth:
91
+ if headers is not None:
92
+ headers["Authorization"] = f"Bearer {self.api_token}"
93
+ else:
94
+ headers = {"Authorization": f"Bearer {self.api_token}"}
97
95
 
98
96
  reply = _http.request(method, url, *args, headers=headers, preload_content=False, **kwargs)
99
97
 
@@ -180,7 +178,7 @@ class TestingFarmAPI:
180
178
  fields["token_id"] = self.whoami()["token"]["id"]
181
179
  fields["user_id"] = self.whoami()["user"]["id"]
182
180
 
183
- return self._query("GET", "/requests", fields=fields)
181
+ return self._query("GET", "/requests", fields=fields, auth=mine)
184
182
 
185
183
  def get_request(self, request_id):
186
184
  """
@@ -210,19 +208,22 @@ class Request:
210
208
  request.
211
209
  """
212
210
 
213
- # TODO: maintain internal time.monotonic() clock and call .update() from
214
- # functions like .alive() if last update is > API_QUERY_DELAY
211
+ # actually query the TestingFarm API at most every X seconds,
212
+ # re-using cached state between updates
213
+ api_query_limit = 30
215
214
 
216
215
  def __init__(self, id=None, api=None, initial_data=None):
217
216
  """
218
217
  'id' is a Testing Farm request UUID
218
+
219
219
  'api' is a TestingFarmAPI instance - if unspecified, a sensible default
220
- 'initial_data' (dict) can be used to pre-fill an initial Request state
221
- will be used.
220
+
221
+ 'initial_data' (dict) can be used to pre-fill an initial Request state.
222
222
  """
223
223
  self.id = id
224
224
  self.api = api or TestingFarmAPI()
225
225
  self.data = initial_data or {}
226
+ self.next_query = 0
226
227
 
227
228
  def submit(self, spec):
228
229
  """
@@ -234,16 +235,12 @@ class Request:
234
235
  self.data = self.api.submit_request(spec)
235
236
  self.id = self.data["id"]
236
237
 
237
- def update(self):
238
- """
239
- Query Testing Farm API to get a more up-to-date version of the request
240
- metadata. Do not call too frequently.
241
- This function is also used internally by others, you do not need to
242
- always call it manually.
243
- """
244
- self.data = self.api.get_request(self.id)
245
- # TODO: refresh internal time.monotonic() timer
246
- return self.data
238
+ def _refresh(self):
239
+ if not self.id:
240
+ return
241
+ if time.monotonic() > self.next_query:
242
+ self.data = self.api.get_request(self.id)
243
+ self.next_query = time.monotonic() + self.api_query_limit
247
244
 
248
245
  def cancel(self):
249
246
  if not self.id:
@@ -254,35 +251,44 @@ class Request:
254
251
  return data
255
252
 
256
253
  def alive(self):
257
- if "state" not in self.data:
258
- self.update()
254
+ if not self.id:
255
+ return False
256
+ self._refresh()
259
257
  return self.data["state"] not in END_STATES
260
258
 
261
259
  def assert_alive(self):
262
260
  if not self.alive():
263
261
  state = self.data["state"]
264
- raise GoneAwayError(f"request {self.data['id']} not alive anymore, entered: {state}")
262
+ raise GoneAwayError(f"request {self.id} not alive anymore, entered: {state}")
265
263
 
266
264
  def wait_for_state(self, state):
267
- if "state" not in self.data:
268
- self.update()
269
- self.assert_alive()
270
- while self.data["state"] != state:
271
- time.sleep(API_QUERY_DELAY)
272
- self.update()
273
- self.assert_alive()
265
+ """
266
+ 'state' is a str or a tuple of states to wait for.
267
+ """
268
+ watched = (state,) if isinstance(state, str) else state
269
+ while True:
270
+ self._refresh()
271
+ if self.data["state"] in watched:
272
+ break
273
+ # if the request ended in one of END_STATES and the above condition
274
+ # did not catch it, the wait will never end
275
+ if self.data["state"] in END_STATES:
276
+ raise GoneAwayError(f"request {self.id} ended with {self.data['state']}")
274
277
 
275
278
  def __repr__(self):
276
279
  return f"Request(id={self.id})"
277
280
 
278
281
  def __str__(self):
282
+ self._refresh()
279
283
  # python has no better dict-pretty-printing logic
280
284
  return json.dumps(self.data, sort_keys=True, indent=4)
281
285
 
282
286
  def __contains__(self, item):
287
+ self._refresh()
283
288
  return item in self.data
284
289
 
285
290
  def __getitem__(self, key):
291
+ self._refresh()
286
292
  return self.data[key]
287
293
 
288
294
 
@@ -292,6 +298,10 @@ class PipelineLogStreamer:
292
298
  to "stream" its contents over time (over many requests), never having to
293
299
  re-read old pipeline.log content.
294
300
  """
301
+
302
+ # how frequently to check for pipeline.log updates (seconds)
303
+ pipeline_query_limit = 30
304
+
295
305
  def __init__(self, request):
296
306
  self.request = request
297
307
 
@@ -324,8 +334,7 @@ class PipelineLogStreamer:
324
334
  return log
325
335
 
326
336
  finally:
327
- time.sleep(API_QUERY_DELAY)
328
- self.request.update()
337
+ time.sleep(self.pipeline_query_limit)
329
338
 
330
339
  def __iter__(self):
331
340
  url = self._wait_for_entry()
@@ -356,8 +365,7 @@ class PipelineLogStreamer:
356
365
  buffer = buffer[index+1:]
357
366
 
358
367
  finally:
359
- time.sleep(API_QUERY_DELAY)
360
- self.request.update()
368
+ time.sleep(self.pipeline_query_limit)
361
369
 
362
370
 
363
371
  class Reserve:
@@ -532,14 +540,17 @@ class Reserve:
532
540
  with self.lock:
533
541
  self.request = Request(api=self.api)
534
542
  self.request.submit(spec)
535
- util.debug(f"submitted request:\n{textwrap.indent(str(self.request), ' ')}")
543
+ util.debug(f"submitted request {self.request.id}")
544
+ util.extradebug(
545
+ f"request {self.request.id}:\n{textwrap.indent(str(self.request), ' ')}",
546
+ )
536
547
 
537
548
  # wait for user/host to ssh to
538
549
  ssh_user = ssh_host = None
539
550
  for line in PipelineLogStreamer(self.request):
540
551
  # the '\033[0m' is to reset colors sometimes left in a bad
541
552
  # state by pipeline.log
542
- util.debug(f"pipeline: {line}\033[0m")
553
+ util.extradebug(f"{line}\033[0m")
543
554
  # find hidden login details
544
555
  m = re.search(
545
556
  # host address can be an IP address or a hostname
@@ -559,14 +570,12 @@ class Reserve:
559
570
  # (it will be failing to login for a while, until the reserve test
560
571
  # installs our ssh pubkey into authorized_keys)
561
572
  ssh_attempt_cmd = (
562
- "ssh", "-q", "-i", ssh_key, f"-oConnectionAttempts={API_QUERY_DELAY}",
573
+ "ssh", "-q", "-i", ssh_key.absolute(), "-oConnectionAttempts=60",
563
574
  "-oStrictHostKeyChecking=no", "-oUserKnownHostsFile=/dev/null",
564
575
  f"{ssh_user}@{ssh_host}", "exit 123",
565
576
  )
566
577
  while True:
567
- # wait for API_QUERY_DELAY between ssh retries, seems like GEFN sleep time
568
- time.sleep(API_QUERY_DELAY)
569
- self.request.update()
578
+ time.sleep(1)
570
579
  self.request.assert_alive()
571
580
 
572
581
  proc = util.subprocess_run(
@@ -8,22 +8,22 @@ from .. import Provisioner, Remote
8
8
  from . import api
9
9
 
10
10
 
11
- class TestingFarmRemote(Remote, connection.ssh.ManagedSSHConn):
11
+ class TestingFarmRemote(Remote, connection.ssh.ManagedSSHConnection):
12
12
  """
13
13
  Built on the official Remote API, pulling in the Connection API
14
- as implemented by ManagedSSHConn.
14
+ as implemented by ManagedSSHConnection.
15
15
  """
16
16
 
17
17
  def __init__(self, request_id, ssh_options, *, release_hook):
18
18
  """
19
19
  'request_id' is a string with Testing Farm request UUID (for printouts).
20
20
 
21
- 'ssh_options' are a dict, passed to ManagedSSHConn __init__().
21
+ 'ssh_options' are a dict, passed to ManagedSSHConnection __init__().
22
22
 
23
23
  'release_hook' is a callable called on .release() in addition
24
24
  to disconnecting the connection.
25
25
  """
26
- # NOTE: self.lock inherited from ManagedSSHConn
26
+ # NOTE: self.lock inherited from ManagedSSHConnection
27
27
  super().__init__(options=ssh_options)
28
28
  self.request_id = request_id
29
29
  self.release_hook = release_hook
@@ -35,8 +35,8 @@ class TestingFarmRemote(Remote, connection.ssh.ManagedSSHConn):
35
35
  return
36
36
  else:
37
37
  self.release_called = True
38
- self.release_hook(self)
39
38
  self.disconnect()
39
+ self.release_hook(self)
40
40
 
41
41
  # not /technically/ a valid repr(), but meh
42
42
  def __repr__(self):
@@ -49,25 +49,20 @@ class TestingFarmRemote(Remote, connection.ssh.ManagedSSHConn):
49
49
 
50
50
 
51
51
  class TestingFarmProvisioner(Provisioner):
52
- # TODO: have max_systems as (min,default,max) tuple; have an algorithm that
53
- # starts at default and scales up/down as needed
52
+ absolute_max_remotes = 100
54
53
 
55
- def __init__(self, compose, arch="x86_64", *, max_systems=1, max_retries=10, **reserve_kwargs):
54
+ def __init__(self, compose, arch="x86_64", *, max_retries=10, **reserve_kwargs):
56
55
  """
57
56
  'compose' is a Testing Farm compose to prepare.
58
57
 
59
58
  'arch' is an architecture associated with the compose.
60
59
 
61
- 'max_systems' is an int of how many systems to reserve (and keep
62
- reserved) in an internal pool.
63
-
64
60
  'max_retries' is a maximum number of provisioning (Testing Farm) errors
65
61
  that will be reprovisioned before giving up.
66
62
  """
67
63
  self.lock = threading.RLock()
68
64
  self.compose = compose
69
65
  self.arch = arch
70
- self.max_systems = max_systems
71
66
  self.reserve_kwargs = reserve_kwargs
72
67
  self.retries = max_retries
73
68
 
@@ -100,7 +95,7 @@ class TestingFarmProvisioner(Provisioner):
100
95
  "Hostname": machine.host,
101
96
  "User": machine.user,
102
97
  "Port": machine.port,
103
- "IdentityFile": machine.ssh_key,
98
+ "IdentityFile": machine.ssh_key.absolute(),
104
99
  "ConnectionAttempts": "1000",
105
100
  "Compression": "yes",
106
101
  }
@@ -157,11 +152,6 @@ class TestingFarmProvisioner(Provisioner):
157
152
  with self.lock:
158
153
  self._tmpdir = tempfile.TemporaryDirectory()
159
154
  self.ssh_key, self.ssh_pubkey = util.ssh_keygen(self._tmpdir.name)
160
- # start up all initial reservations
161
- for i in range(self.max_systems):
162
- delay = (api.API_QUERY_DELAY / self.max_systems) * i
163
- #self.queue.start_thread(target=self._schedule_one_reservation, args=(delay,))
164
- self._schedule_one_reservation(delay)
165
155
 
166
156
  def stop(self):
167
157
  with self.lock:
@@ -188,14 +178,18 @@ class TestingFarmProvisioner(Provisioner):
188
178
  self._tmpdir = None
189
179
  return callables
190
180
 
191
- def get_remote(self, block=True):
192
- # fill .release()d remotes back up with reservations
181
+ def provision(self, count=1):
193
182
  with self.lock:
194
- deficit = self.max_systems - len(self.remotes) - len(self.reserving)
195
- for i in range(deficit):
196
- delay = (api.API_QUERY_DELAY / deficit) * i
183
+ reservations = len(self.remotes) + len(self.reserving)
184
+ # clamp count to absolute_max_remotes
185
+ if count + reservations > self.absolute_max_remotes:
186
+ count = self.absolute_max_remotes - reservations
187
+ # spread out the request submissions
188
+ for i in range(count):
189
+ delay = (api.Request.api_query_limit / count) * i
197
190
  self._schedule_one_reservation(delay)
198
191
 
192
+ def get_remote(self, block=True):
199
193
  while True:
200
194
  # otherwise wait on a queue of Remotes being provisioned
201
195
  try: