teuthology 1.0.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (172) hide show
  1. scripts/describe.py +1 -0
  2. scripts/dispatcher.py +62 -0
  3. scripts/exporter.py +18 -0
  4. scripts/lock.py +1 -1
  5. scripts/node_cleanup.py +58 -0
  6. scripts/openstack.py +9 -9
  7. scripts/results.py +12 -11
  8. scripts/run.py +4 -0
  9. scripts/schedule.py +4 -0
  10. scripts/suite.py +61 -16
  11. scripts/supervisor.py +44 -0
  12. scripts/update_inventory.py +10 -4
  13. scripts/wait.py +31 -0
  14. teuthology/__init__.py +24 -21
  15. teuthology/beanstalk.py +4 -3
  16. teuthology/config.py +17 -6
  17. teuthology/contextutil.py +18 -14
  18. teuthology/describe_tests.py +25 -18
  19. teuthology/dispatcher/__init__.py +365 -0
  20. teuthology/dispatcher/supervisor.py +374 -0
  21. teuthology/exceptions.py +54 -0
  22. teuthology/exporter.py +347 -0
  23. teuthology/kill.py +76 -75
  24. teuthology/lock/cli.py +16 -7
  25. teuthology/lock/ops.py +276 -70
  26. teuthology/lock/query.py +61 -44
  27. teuthology/ls.py +9 -18
  28. teuthology/misc.py +152 -137
  29. teuthology/nuke/__init__.py +12 -351
  30. teuthology/openstack/__init__.py +4 -3
  31. teuthology/openstack/openstack-centos-7.0-user-data.txt +1 -1
  32. teuthology/openstack/openstack-centos-7.1-user-data.txt +1 -1
  33. teuthology/openstack/openstack-centos-7.2-user-data.txt +1 -1
  34. teuthology/openstack/openstack-debian-8.0-user-data.txt +1 -1
  35. teuthology/openstack/openstack-opensuse-42.1-user-data.txt +1 -1
  36. teuthology/openstack/openstack-teuthology.cron +0 -1
  37. teuthology/orchestra/cluster.py +51 -9
  38. teuthology/orchestra/connection.py +23 -16
  39. teuthology/orchestra/console.py +111 -50
  40. teuthology/orchestra/daemon/cephadmunit.py +23 -5
  41. teuthology/orchestra/daemon/state.py +10 -3
  42. teuthology/orchestra/daemon/systemd.py +10 -8
  43. teuthology/orchestra/opsys.py +32 -11
  44. teuthology/orchestra/remote.py +369 -152
  45. teuthology/orchestra/run.py +21 -12
  46. teuthology/packaging.py +54 -15
  47. teuthology/provision/__init__.py +30 -10
  48. teuthology/provision/cloud/openstack.py +12 -6
  49. teuthology/provision/cloud/util.py +1 -2
  50. teuthology/provision/downburst.py +83 -29
  51. teuthology/provision/fog.py +68 -20
  52. teuthology/provision/openstack.py +5 -4
  53. teuthology/provision/pelagos.py +13 -5
  54. teuthology/repo_utils.py +91 -44
  55. teuthology/report.py +57 -35
  56. teuthology/results.py +5 -3
  57. teuthology/run.py +21 -15
  58. teuthology/run_tasks.py +114 -40
  59. teuthology/schedule.py +4 -3
  60. teuthology/scrape.py +28 -22
  61. teuthology/suite/__init__.py +75 -46
  62. teuthology/suite/build_matrix.py +34 -24
  63. teuthology/suite/fragment-merge.lua +105 -0
  64. teuthology/suite/matrix.py +31 -2
  65. teuthology/suite/merge.py +175 -0
  66. teuthology/suite/placeholder.py +8 -8
  67. teuthology/suite/run.py +204 -102
  68. teuthology/suite/util.py +67 -211
  69. teuthology/task/__init__.py +1 -1
  70. teuthology/task/ansible.py +101 -31
  71. teuthology/task/buildpackages.py +2 -2
  72. teuthology/task/ceph_ansible.py +13 -6
  73. teuthology/task/cephmetrics.py +2 -1
  74. teuthology/task/clock.py +33 -14
  75. teuthology/task/exec.py +18 -0
  76. teuthology/task/hadoop.py +2 -2
  77. teuthology/task/install/__init__.py +51 -22
  78. teuthology/task/install/bin/adjust-ulimits +16 -0
  79. teuthology/task/install/bin/daemon-helper +114 -0
  80. teuthology/task/install/bin/stdin-killer +263 -0
  81. teuthology/task/install/deb.py +24 -4
  82. teuthology/task/install/redhat.py +36 -32
  83. teuthology/task/install/rpm.py +41 -14
  84. teuthology/task/install/util.py +48 -22
  85. teuthology/task/internal/__init__.py +69 -11
  86. teuthology/task/internal/edit_sudoers.sh +10 -0
  87. teuthology/task/internal/lock_machines.py +3 -133
  88. teuthology/task/internal/redhat.py +48 -28
  89. teuthology/task/internal/syslog.py +31 -8
  90. teuthology/task/kernel.py +155 -147
  91. teuthology/task/lockfile.py +1 -1
  92. teuthology/task/mpi.py +10 -10
  93. teuthology/task/pcp.py +1 -1
  94. teuthology/task/selinux.py +17 -8
  95. teuthology/task/ssh_keys.py +6 -6
  96. teuthology/task/tests/__init__.py +137 -77
  97. teuthology/task/tests/test_fetch_coredumps.py +116 -0
  98. teuthology/task/tests/test_run.py +4 -4
  99. teuthology/timer.py +3 -3
  100. teuthology/util/loggerfile.py +19 -0
  101. teuthology/util/scanner.py +159 -0
  102. teuthology/util/sentry.py +52 -0
  103. teuthology/util/time.py +52 -0
  104. teuthology-1.2.0.data/scripts/adjust-ulimits +16 -0
  105. teuthology-1.2.0.data/scripts/daemon-helper +114 -0
  106. teuthology-1.2.0.data/scripts/stdin-killer +263 -0
  107. teuthology-1.2.0.dist-info/METADATA +89 -0
  108. teuthology-1.2.0.dist-info/RECORD +174 -0
  109. {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/WHEEL +1 -1
  110. {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/entry_points.txt +5 -2
  111. scripts/nuke.py +0 -45
  112. scripts/worker.py +0 -37
  113. teuthology/nuke/actions.py +0 -456
  114. teuthology/openstack/test/__init__.py +0 -0
  115. teuthology/openstack/test/openstack-integration.py +0 -286
  116. teuthology/openstack/test/test_config.py +0 -35
  117. teuthology/openstack/test/test_openstack.py +0 -1695
  118. teuthology/orchestra/test/__init__.py +0 -0
  119. teuthology/orchestra/test/integration/__init__.py +0 -0
  120. teuthology/orchestra/test/integration/test_integration.py +0 -94
  121. teuthology/orchestra/test/test_cluster.py +0 -240
  122. teuthology/orchestra/test/test_connection.py +0 -106
  123. teuthology/orchestra/test/test_console.py +0 -217
  124. teuthology/orchestra/test/test_opsys.py +0 -404
  125. teuthology/orchestra/test/test_remote.py +0 -185
  126. teuthology/orchestra/test/test_run.py +0 -286
  127. teuthology/orchestra/test/test_systemd.py +0 -54
  128. teuthology/orchestra/test/util.py +0 -12
  129. teuthology/sentry.py +0 -18
  130. teuthology/test/__init__.py +0 -0
  131. teuthology/test/fake_archive.py +0 -107
  132. teuthology/test/fake_fs.py +0 -92
  133. teuthology/test/integration/__init__.py +0 -0
  134. teuthology/test/integration/test_suite.py +0 -86
  135. teuthology/test/task/__init__.py +0 -205
  136. teuthology/test/task/test_ansible.py +0 -624
  137. teuthology/test/task/test_ceph_ansible.py +0 -176
  138. teuthology/test/task/test_console_log.py +0 -88
  139. teuthology/test/task/test_install.py +0 -337
  140. teuthology/test/task/test_internal.py +0 -57
  141. teuthology/test/task/test_kernel.py +0 -243
  142. teuthology/test/task/test_pcp.py +0 -379
  143. teuthology/test/task/test_selinux.py +0 -35
  144. teuthology/test/test_config.py +0 -189
  145. teuthology/test/test_contextutil.py +0 -68
  146. teuthology/test/test_describe_tests.py +0 -316
  147. teuthology/test/test_email_sleep_before_teardown.py +0 -81
  148. teuthology/test/test_exit.py +0 -97
  149. teuthology/test/test_get_distro.py +0 -47
  150. teuthology/test/test_get_distro_version.py +0 -47
  151. teuthology/test/test_get_multi_machine_types.py +0 -27
  152. teuthology/test/test_job_status.py +0 -60
  153. teuthology/test/test_ls.py +0 -48
  154. teuthology/test/test_misc.py +0 -368
  155. teuthology/test/test_nuke.py +0 -232
  156. teuthology/test/test_packaging.py +0 -763
  157. teuthology/test/test_parallel.py +0 -28
  158. teuthology/test/test_repo_utils.py +0 -204
  159. teuthology/test/test_report.py +0 -77
  160. teuthology/test/test_results.py +0 -155
  161. teuthology/test/test_run.py +0 -238
  162. teuthology/test/test_safepath.py +0 -55
  163. teuthology/test/test_schedule.py +0 -45
  164. teuthology/test/test_scrape.py +0 -167
  165. teuthology/test/test_timer.py +0 -80
  166. teuthology/test/test_vps_os_vers_parameter_checking.py +0 -84
  167. teuthology/test/test_worker.py +0 -303
  168. teuthology/worker.py +0 -339
  169. teuthology-1.0.0.dist-info/METADATA +0 -76
  170. teuthology-1.0.0.dist-info/RECORD +0 -210
  171. {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/LICENSE +0 -0
  172. {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/top_level.txt +0 -0
@@ -4,22 +4,29 @@ Note that there is no corresponding task defined for this module. All of
4
4
  the calls are made from other modules, most notably teuthology/run.py
5
5
  """
6
6
  import contextlib
7
+ import functools
8
+ import gzip
7
9
  import logging
8
10
  import os
11
+ import shutil
9
12
  import time
10
13
  import yaml
11
14
  import subprocess
15
+ import tempfile
16
+ import re
17
+ import humanfriendly
12
18
 
13
19
  import teuthology.lock.ops
14
- from teuthology import misc
15
- from teuthology.packaging import get_builder_project
20
+ from teuthology import misc, packaging
16
21
  from teuthology import report
17
22
  from teuthology.config import config as teuth_config
18
- from teuthology.exceptions import VersionNotFoundError
23
+ from teuthology.exceptions import ConfigError, VersionNotFoundError
19
24
  from teuthology.job_status import get_status, set_status
20
25
  from teuthology.orchestra import cluster, remote, run
21
26
  # the below import with noqa is to workaround run.py which does not support multilevel submodule import
22
- from teuthology.task.internal.redhat import setup_cdn_repo, setup_base_repo, setup_additional_repo, setup_stage_cdn # noqa
27
+ from teuthology.task.internal.redhat import (setup_cdn_repo, setup_base_repo, # noqa
28
+ setup_additional_repo, # noqa
29
+ setup_stage_cdn, setup_container_registry) # noqa
23
30
 
24
31
  log = logging.getLogger(__name__)
25
32
 
@@ -81,7 +88,7 @@ def check_packages(ctx, config):
81
88
  # We can only do this check if there are a defined sha1 and os_type
82
89
  # in the job config.
83
90
  if os_type and sha1:
84
- package = get_builder_project()("ceph", ctx.config)
91
+ package = packaging.get_builder_project()("ceph", ctx.config)
85
92
  template = "Checking packages for os_type '{os}', " \
86
93
  "flavor '{flav}' and ceph hash '{ver}'"
87
94
  log.info(
@@ -265,7 +272,7 @@ def check_ceph_data(ctx, config):
265
272
  try:
266
273
  proc.wait()
267
274
  except run.CommandFailedError:
268
- log.error('Host %s has stale /var/lib/ceph, check lock and nuke/cleanup.', proc.remote.shortname)
275
+ log.error('Host %s has stale /var/lib/ceph!', proc.remote.shortname)
269
276
  failed = True
270
277
  if failed:
271
278
  raise RuntimeError('Stale /var/lib/ceph detected, aborting.')
@@ -310,7 +317,29 @@ def fetch_binaries_for_coredumps(path, remote):
310
317
  # Parse file output to get program, Example output:
311
318
  # 1422917770.7450.core: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, \
312
319
  # from 'radosgw --rgw-socket-path /home/ubuntu/cephtest/apache/tmp.client.0/fastcgi_soc'
313
- dump_program = dump_out.split("from '")[1].split(' ')[0]
320
+ log.info(f' core looks like: {dump_out}')
321
+
322
+ if 'gzip' in dump_out:
323
+ try:
324
+ log.info("core is compressed, try accessing gzip file ...")
325
+ with gzip.open(dump_path, 'rb') as f_in, \
326
+ tempfile.NamedTemporaryFile(mode='w+b') as f_out:
327
+ shutil.copyfileobj(f_in, f_out)
328
+ dump_info = subprocess.Popen(['file', f_out.name],
329
+ stdout=subprocess.PIPE)
330
+ dump_out = dump_info.communicate()[0].decode()
331
+ log.info(f' core looks like: {dump_out}')
332
+ except Exception as e:
333
+ log.info('Something went wrong while opening the compressed file')
334
+ log.error(e)
335
+ continue
336
+ try:
337
+ dump_program = re.findall("from '([^']+)'", dump_out)[0]
338
+ log.info(f' dump_program: {dump_program}')
339
+ except Exception as e:
340
+ log.info("core doesn't have the desired format, moving on ...")
341
+ log.error(e)
342
+ continue
314
343
 
315
344
  # Find path on remote server:
316
345
  remote_path = remote.sh(['which', dump_program]).rstrip()
@@ -334,6 +363,14 @@ def fetch_binaries_for_coredumps(path, remote):
334
363
  remote.get_file(debug_path, coredump_path)
335
364
 
336
365
 
366
+ def gzip_if_too_large(compress_min_size, src, tarinfo, local_path):
367
+ if tarinfo.size >= compress_min_size:
368
+ with gzip.open(local_path + '.gz', 'wb') as dest:
369
+ shutil.copyfileobj(src, dest)
370
+ else:
371
+ misc.copy_fileobj(src, tarinfo, local_path)
372
+
373
+
337
374
  @contextlib.contextmanager
338
375
  def archive(ctx, config):
339
376
  """
@@ -348,6 +385,9 @@ def archive(ctx, config):
348
385
  )
349
386
  )
350
387
 
388
+ # Add logs directory to job's info log file
389
+ misc.add_remote_path(ctx, 'init', archive_dir)
390
+
351
391
  try:
352
392
  yield
353
393
  except Exception:
@@ -364,7 +404,18 @@ def archive(ctx, config):
364
404
  os.mkdir(logdir)
365
405
  for rem in ctx.cluster.remotes.keys():
366
406
  path = os.path.join(logdir, rem.shortname)
367
- misc.pull_directory(rem, archive_dir, path)
407
+ min_size_option = ctx.config.get('log-compress-min-size',
408
+ '128MB')
409
+ try:
410
+ compress_min_size_bytes = \
411
+ humanfriendly.parse_size(min_size_option)
412
+ except humanfriendly.InvalidSize:
413
+ msg = 'invalid "log-compress-min-size": {}'.format(min_size_option)
414
+ log.error(msg)
415
+ raise ConfigError(msg)
416
+ maybe_compress = functools.partial(gzip_if_too_large,
417
+ compress_min_size_bytes)
418
+ misc.pull_directory(rem, archive_dir, path, maybe_compress)
368
419
  # Check for coredumps and pull binaries
369
420
  fetch_binaries_for_coredumps(path, rem)
370
421
 
@@ -414,9 +465,10 @@ def coredump(ctx, config):
414
465
  Stash a coredump of this system if an error occurs.
415
466
  """
416
467
  log.info('Enabling coredump saving...')
468
+ cluster = ctx.cluster.filter(lambda r: not r.is_container)
417
469
  archive_dir = misc.get_archive_dir(ctx)
418
470
  run.wait(
419
- ctx.cluster.run(
471
+ cluster.run(
420
472
  args=[
421
473
  'install', '-d', '-m0755', '--',
422
474
  '{adir}/coredump'.format(adir=archive_dir),
@@ -435,11 +487,17 @@ def coredump(ctx, config):
435
487
  try:
436
488
  yield
437
489
  finally:
490
+ cluster = ctx.cluster.filter(lambda r: not r.is_container)
438
491
  run.wait(
439
- ctx.cluster.run(
492
+ cluster.run(
440
493
  args=[
441
494
  'sudo', 'sysctl', '-w', 'kernel.core_pattern=core',
442
495
  run.Raw('&&'),
496
+ 'sudo', 'bash', '-c',
497
+ (f'for f in `find {archive_dir}/coredump -type f`; do '
498
+ 'file $f | grep -q systemd-sysusers && rm $f || true ; '
499
+ 'done'),
500
+ run.Raw('&&'),
443
501
  # don't litter the archive dir if there were no cores dumped
444
502
  'rmdir',
445
503
  '--ignore-fail-on-non-empty',
@@ -452,7 +510,7 @@ def coredump(ctx, config):
452
510
 
453
511
  # set status = 'fail' if the dir is still there = coredumps were
454
512
  # seen
455
- for rem in ctx.cluster.remotes.keys():
513
+ for rem in cluster.remotes.keys():
456
514
  try:
457
515
  rem.sh("test -e " + archive_dir + "/coredump")
458
516
  except run.CommandFailedError:
@@ -0,0 +1,10 @@
1
+ #! /bin/sh
2
+
3
+ sudo vi -e /etc/sudoers <<EOF
4
+ g/ requiretty/s// !requiretty/
5
+ g/ !visiblepw/s// visiblepw/
6
+ w!
7
+ q
8
+ EOF
9
+ exit
10
+
@@ -1,17 +1,9 @@
1
1
  import contextlib
2
2
  import logging
3
- import time
4
- import yaml
5
3
 
6
4
  import teuthology.lock.ops
7
5
  import teuthology.lock.query
8
6
  import teuthology.lock.util
9
- from teuthology import misc
10
- from teuthology import provision
11
- from teuthology import report
12
-
13
- from teuthology.config import config as teuth_config
14
- from teuthology.job_status import get_status, set_status
15
7
 
16
8
  log = logging.getLogger(__name__)
17
9
 
@@ -23,137 +15,15 @@ def lock_machines(ctx, config):
23
15
  new machines. This is not called if the one has teuthology-locked
24
16
  machines and placed those keys in the Targets section of a yaml file.
25
17
  """
26
- # It's OK for os_type and os_version to be None here. If we're trying
27
- # to lock a bare metal machine, we'll take whatever is available. If
28
- # we want a vps, defaults will be provided by misc.get_distro and
29
- # misc.get_distro_version in provision.create_if_vm
30
- os_type = ctx.config.get("os_type")
31
- os_version = ctx.config.get("os_version")
32
- arch = ctx.config.get('arch')
33
- log.info('Locking machines...')
34
18
  assert isinstance(config[0], int), 'config[0] must be an integer'
35
19
  machine_type = config[1]
36
20
  total_requested = config[0]
37
21
  # We want to make sure there are always this many machines available
38
- reserved = teuth_config.reserve_machines
39
- assert isinstance(reserved, int), 'reserve_machines must be integer'
40
- assert (reserved >= 0), 'reserve_machines should >= 0'
41
-
42
- # change the status during the locking process
43
- report.try_push_job_info(ctx.config, dict(status='waiting'))
44
-
45
- all_locked = dict()
46
- requested = total_requested
47
- while True:
48
- # get a candidate list of machines
49
- machines = teuthology.lock.query.list_locks(machine_type=machine_type, up=True,
50
- locked=False, count=requested + reserved)
51
- if machines is None:
52
- if ctx.block:
53
- log.error('Error listing machines, trying again')
54
- time.sleep(20)
55
- continue
56
- else:
57
- raise RuntimeError('Error listing machines')
58
-
59
- # make sure there are machines for non-automated jobs to run
60
- if len(machines) < reserved + requested and ctx.owner.startswith('scheduled'):
61
- if ctx.block:
62
- log.info(
63
- 'waiting for more %s machines to be free (need %s + %s, have %s)...',
64
- machine_type,
65
- reserved,
66
- requested,
67
- len(machines),
68
- )
69
- time.sleep(10)
70
- continue
71
- else:
72
- assert 0, ('not enough machines free; need %s + %s, have %s' %
73
- (reserved, requested, len(machines)))
74
-
75
- try:
76
- newly_locked = teuthology.lock.ops.lock_many(ctx, requested, machine_type,
77
- ctx.owner, ctx.archive, os_type,
78
- os_version, arch)
79
- except Exception:
80
- # Lock failures should map to the 'dead' status instead of 'fail'
81
- set_status(ctx.summary, 'dead')
82
- raise
83
- all_locked.update(newly_locked)
84
- log.info(
85
- '{newly_locked} {mtype} machines locked this try, '
86
- '{total_locked}/{total_requested} locked so far'.format(
87
- newly_locked=len(newly_locked),
88
- mtype=machine_type,
89
- total_locked=len(all_locked),
90
- total_requested=total_requested,
91
- )
92
- )
93
- if len(all_locked) == total_requested:
94
- vmlist = []
95
- for lmach in all_locked:
96
- if teuthology.lock.query.is_vm(lmach):
97
- vmlist.append(lmach)
98
- if vmlist:
99
- log.info('Waiting for virtual machines to come up')
100
- keys_dict = dict()
101
- loopcount = 0
102
- while len(keys_dict) != len(vmlist):
103
- loopcount += 1
104
- time.sleep(10)
105
- keys_dict = misc.ssh_keyscan(vmlist)
106
- log.info('virtual machine is still unavailable')
107
- if loopcount == 40:
108
- loopcount = 0
109
- log.info('virtual machine(s) still not up, ' +
110
- 'recreating unresponsive ones.')
111
- for guest in vmlist:
112
- if guest not in keys_dict.keys():
113
- log.info('recreating: ' + guest)
114
- full_name = misc.canonicalize_hostname(guest)
115
- provision.destroy_if_vm(ctx, full_name)
116
- provision.create_if_vm(ctx, full_name)
117
- if teuthology.lock.ops.do_update_keys(keys_dict)[0]:
118
- log.info("Error in virtual machine keys")
119
- newscandict = {}
120
- for dkey in all_locked.keys():
121
- stats = teuthology.lock.query.get_status(dkey)
122
- newscandict[dkey] = stats['ssh_pub_key']
123
- ctx.config['targets'] = newscandict
124
- else:
125
- ctx.config['targets'] = all_locked
126
- locked_targets = yaml.safe_dump(
127
- ctx.config['targets'],
128
- default_flow_style=False
129
- ).splitlines()
130
- log.info('\n '.join(['Locked targets:', ] + locked_targets))
131
- # successfully locked machines, change status back to running
132
- report.try_push_job_info(ctx.config, dict(status='running'))
133
- break
134
- elif not ctx.block:
135
- assert 0, 'not enough machines are available'
136
- else:
137
- requested = requested - len(newly_locked)
138
- assert requested > 0, "lock_machines: requested counter went" \
139
- "negative, this shouldn't happen"
140
-
141
- log.info(
142
- "{total} machines locked ({new} new); need {more} more".format(
143
- total=len(all_locked), new=len(newly_locked), more=requested)
144
- )
145
- log.warn('Could not lock enough machines, waiting...')
146
- time.sleep(10)
22
+ teuthology.lock.ops.block_and_lock_machines(ctx, total_requested, machine_type)
147
23
  try:
148
24
  yield
149
25
  finally:
150
- # If both unlock_on_failure and nuke-on-error are set, don't unlock now
151
- # because we're just going to nuke (and unlock) later.
152
- unlock_on_failure = (
153
- ctx.config.get('unlock_on_failure', False)
154
- and not ctx.config.get('nuke-on-error', False)
155
- )
156
- if get_status(ctx.summary) == 'pass' or unlock_on_failure:
26
+ if ctx.config.get("unlock_on_failure", True):
157
27
  log.info('Unlocking machines...')
158
28
  for machine in ctx.config['targets'].keys():
159
- teuthology.lock.ops.unlock_one(ctx, machine, ctx.owner, ctx.archive)
29
+ teuthology.lock.ops.unlock_one(machine, ctx.owner, ctx.archive)
@@ -4,6 +4,8 @@ Internal tasks for redhat downstream builds
4
4
  import contextlib
5
5
  import logging
6
6
  import requests
7
+ import yaml
8
+ import os
7
9
  from tempfile import NamedTemporaryFile
8
10
  from teuthology.config import config as teuthconfig
9
11
  from teuthology.parallel import parallel
@@ -19,10 +21,16 @@ def setup_stage_cdn(ctx, config):
19
21
  """
20
22
  Configure internal stage cdn
21
23
  """
24
+ suite_path = ctx.config.get('suite_path')
25
+ if not suite_path:
26
+ raise ConfigError("suite_path missing")
27
+ teuthconfig.suite_path = suite_path
28
+
22
29
  rhbuild = ctx.config.get('redhat').get('rhbuild')
23
30
  if not rhbuild:
24
- raise ConfigError("Provide rhbuild attribute")
31
+ raise ConfigError('Provide rhbuild attribute')
25
32
  teuthconfig.rhbuild = str(rhbuild)
33
+
26
34
  with parallel() as p:
27
35
  for remote in ctx.cluster.remotes.keys():
28
36
  if remote.os.name == 'rhel':
@@ -41,7 +49,7 @@ def _subscribe_stage_cdn(remote):
41
49
  cdn_config = teuthconfig.get('cdn-config', dict())
42
50
  server_url = cdn_config.get('server-url', 'subscription.rhsm.stage.redhat.com:443/subscription')
43
51
  base_url = cdn_config.get('base-url', 'https://cdn.stage.redhat.com')
44
- username = cdn_config.get('username', 'cephuser')
52
+ username = cdn_config.get('username')
45
53
  password = cdn_config.get('password')
46
54
  remote.run(
47
55
  args=[
@@ -80,6 +88,36 @@ def setup_cdn_repo(ctx, config):
80
88
  set_cdn_repo(ctx, config)
81
89
  yield
82
90
 
91
+ @contextlib.contextmanager
92
+ def setup_container_registry(ctx, config):
93
+ """
94
+ setup container registry if setup_container_registry in config
95
+
96
+ redhat:
97
+ setup_container_registry: <registry.io> # registry-name
98
+ """
99
+ if ctx.config.get('redhat').get('setup_container_registry', None):
100
+ registry = ctx.config['redhat']['setup_container_registry']
101
+
102
+ # fetch credentials from teuth_config
103
+ creds = teuthconfig.get('registries', dict()).get(registry)
104
+ if not creds:
105
+ raise ConfigError("Registry not found....")
106
+
107
+ # container-tool login
108
+ for remote in ctx.cluster.remotes.keys():
109
+ container_tool = "podman"
110
+ if remote.os.version.startswith('7'):
111
+ container_tool = "docker"
112
+
113
+ remote.run(args=[
114
+ 'sudo', container_tool,
115
+ 'login', registry,
116
+ '--username', creds['username'],
117
+ '--password', creds['password'],
118
+ ]
119
+ )
120
+ yield
83
121
 
84
122
  @contextlib.contextmanager
85
123
  def setup_additional_repo(ctx, config):
@@ -101,29 +139,20 @@ def setup_additional_repo(ctx, config):
101
139
 
102
140
 
103
141
  def _enable_rhel_repos(remote):
104
- rhel_7_rpms = ['rhel-7-server-rpms',
105
- 'rhel-7-server-optional-rpms',
106
- 'rhel-7-server-extras-rpms']
107
-
108
- rhel_8_rpms = ['rhel-8-for-x86_64-appstream-rpms',
109
- 'rhel-8-for-x86_64-baseos-rpms',
110
- 'ansible-2.8-for-rhel-8-x86_64-rpms']
111
142
 
112
- if teuthconfig.rhbuild.startswith("3"):
113
- rhel_7_rpms.append('rhel-7-server-ansible-2.6-rpms')
114
- elif teuthconfig.rhbuild.startswith("4"):
115
- rhel_7_rpms.append('rhel-7-server-ansible-2.8-rpms')
143
+ # Look for rh specific repos
144
+ ds_yaml = os.path.join(
145
+ teuthconfig.get('ds_yaml_dir'),
146
+ teuthconfig.rhbuild + ".yaml"
147
+ )
116
148
 
117
- repos_to_subscribe = {'7': rhel_7_rpms,
118
- '8': rhel_8_rpms}
149
+ rhel_repos = yaml.safe_load(open(ds_yaml))
150
+ repos_to_subscribe = rhel_repos.get('rhel_repos').get(remote.os.version[0])
119
151
 
120
- for repo in repos_to_subscribe.get(remote.os.version[0]):
152
+ for repo in repos_to_subscribe:
121
153
  remote.run(args=['sudo', 'subscription-manager',
122
154
  'repos', '--enable={r}'.format(r=repo)])
123
155
 
124
- if remote.os.version.startswith('8'):
125
- workaround(remote)
126
-
127
156
 
128
157
  @contextlib.contextmanager
129
158
  def setup_base_repo(ctx, config):
@@ -236,12 +265,3 @@ def _create_temp_repo_file(repos, repo_file):
236
265
  repo_file.write(gpgcheck)
237
266
  repo_file.write(enabled)
238
267
  repo_file.close()
239
-
240
-
241
- def workaround(remote):
242
- log.info('temporary workaround')
243
- remote.run(args=['sudo',
244
- 'yum',
245
- 'install', '-y',
246
- 'http://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/d/dbench-4.0-10.el7.x86_64.rpm'])
247
-
@@ -22,12 +22,17 @@ def syslog(ctx, config):
22
22
  yield
23
23
  return
24
24
 
25
+ cluster = ctx.cluster.filter(lambda r: not r.is_container)
26
+ if not len(cluster.remotes.keys()):
27
+ yield
28
+ return
29
+
25
30
  log.info('Starting syslog monitoring...')
26
31
 
27
32
  archive_dir = misc.get_archive_dir(ctx)
28
33
  log_dir = '{adir}/syslog'.format(adir=archive_dir)
29
34
  run.wait(
30
- ctx.cluster.run(
35
+ cluster.run(
31
36
  args=['mkdir', '-p', '-m0755', '--', log_dir],
32
37
  wait=False,
33
38
  )
@@ -43,7 +48,7 @@ def syslog(ctx, config):
43
48
  ]
44
49
  conf_fp = BytesIO('\n'.join(conf_lines).encode())
45
50
  try:
46
- for rem in ctx.cluster.remotes.keys():
51
+ for rem in cluster.remotes.keys():
47
52
  log_context = 'system_u:object_r:var_log_t:s0'
48
53
  for log_path in (kern_log, misc_log):
49
54
  rem.run(args=['install', '-m', '666', '/dev/null', log_path])
@@ -55,7 +60,7 @@ def syslog(ctx, config):
55
60
  )
56
61
  conf_fp.seek(0)
57
62
  run.wait(
58
- ctx.cluster.run(
63
+ cluster.run(
59
64
  args=[
60
65
  'sudo',
61
66
  'service',
@@ -70,10 +75,14 @@ def syslog(ctx, config):
70
75
 
71
76
  yield
72
77
  finally:
78
+ cluster = ctx.cluster.filter(lambda r: not r.is_container)
79
+ if not len(cluster.remotes.keys()):
80
+ return
81
+
73
82
  log.info('Shutting down syslog monitoring...')
74
83
 
75
84
  run.wait(
76
- ctx.cluster.run(
85
+ cluster.run(
77
86
  args=[
78
87
  'sudo',
79
88
  'rm',
@@ -93,13 +102,13 @@ def syslog(ctx, config):
93
102
  # flush the file fully. oh well.
94
103
 
95
104
  log.info('Checking logs for errors...')
96
- for rem in ctx.cluster.remotes.keys():
105
+ for rem in cluster.remotes.keys():
97
106
  log.debug('Checking %s', rem.name)
98
107
  stdout = rem.sh(
99
108
  [
100
109
  'egrep', '--binary-files=text',
101
110
  '\\bBUG\\b|\\bINFO\\b|\\bDEADLOCK\\b',
102
- run.Raw('{adir}/syslog/*.log'.format(adir=archive_dir)),
111
+ run.Raw(f'{archive_dir}/syslog/kern.log'),
103
112
  run.Raw('|'),
104
113
  'grep', '-v', 'task .* blocked for more than .* seconds',
105
114
  run.Raw('|'),
@@ -152,7 +161,7 @@ def syslog(ctx, config):
152
161
 
153
162
  log.info('Compressing syslogs...')
154
163
  run.wait(
155
- ctx.cluster.run(
164
+ cluster.run(
156
165
  args=[
157
166
  'find',
158
167
  '{adir}/syslog'.format(adir=archive_dir),
@@ -169,5 +178,19 @@ def syslog(ctx, config):
169
178
  '--',
170
179
  ],
171
180
  wait=False,
172
- ),
181
+ )
182
+ )
183
+
184
+ log.info('Gathering journactl -b0...')
185
+ run.wait(
186
+ cluster.run(
187
+ args=[
188
+ 'sudo', 'journalctl', '-b0',
189
+ run.Raw('|'),
190
+ 'gzip', '-9',
191
+ run.Raw('>'),
192
+ f'{archive_dir}/syslog/journalctl-b0.gz',
193
+ ],
194
+ wait=False,
195
+ )
173
196
  )