teuthology 1.0.0__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scripts/describe.py +1 -0
- scripts/dispatcher.py +62 -0
- scripts/exporter.py +18 -0
- scripts/lock.py +1 -1
- scripts/node_cleanup.py +58 -0
- scripts/openstack.py +9 -9
- scripts/results.py +12 -11
- scripts/run.py +4 -0
- scripts/schedule.py +4 -0
- scripts/suite.py +61 -16
- scripts/supervisor.py +44 -0
- scripts/update_inventory.py +10 -4
- scripts/wait.py +31 -0
- teuthology/__init__.py +24 -21
- teuthology/beanstalk.py +4 -3
- teuthology/config.py +17 -6
- teuthology/contextutil.py +18 -14
- teuthology/describe_tests.py +25 -18
- teuthology/dispatcher/__init__.py +365 -0
- teuthology/dispatcher/supervisor.py +374 -0
- teuthology/exceptions.py +54 -0
- teuthology/exporter.py +347 -0
- teuthology/kill.py +76 -75
- teuthology/lock/cli.py +16 -7
- teuthology/lock/ops.py +276 -70
- teuthology/lock/query.py +61 -44
- teuthology/ls.py +9 -18
- teuthology/misc.py +152 -137
- teuthology/nuke/__init__.py +12 -351
- teuthology/openstack/__init__.py +4 -3
- teuthology/openstack/openstack-centos-7.0-user-data.txt +1 -1
- teuthology/openstack/openstack-centos-7.1-user-data.txt +1 -1
- teuthology/openstack/openstack-centos-7.2-user-data.txt +1 -1
- teuthology/openstack/openstack-debian-8.0-user-data.txt +1 -1
- teuthology/openstack/openstack-opensuse-42.1-user-data.txt +1 -1
- teuthology/openstack/openstack-teuthology.cron +0 -1
- teuthology/orchestra/cluster.py +51 -9
- teuthology/orchestra/connection.py +23 -16
- teuthology/orchestra/console.py +111 -50
- teuthology/orchestra/daemon/cephadmunit.py +23 -5
- teuthology/orchestra/daemon/state.py +10 -3
- teuthology/orchestra/daemon/systemd.py +10 -8
- teuthology/orchestra/opsys.py +32 -11
- teuthology/orchestra/remote.py +369 -152
- teuthology/orchestra/run.py +21 -12
- teuthology/packaging.py +54 -15
- teuthology/provision/__init__.py +30 -10
- teuthology/provision/cloud/openstack.py +12 -6
- teuthology/provision/cloud/util.py +1 -2
- teuthology/provision/downburst.py +83 -29
- teuthology/provision/fog.py +68 -20
- teuthology/provision/openstack.py +5 -4
- teuthology/provision/pelagos.py +13 -5
- teuthology/repo_utils.py +91 -44
- teuthology/report.py +57 -35
- teuthology/results.py +5 -3
- teuthology/run.py +21 -15
- teuthology/run_tasks.py +114 -40
- teuthology/schedule.py +4 -3
- teuthology/scrape.py +28 -22
- teuthology/suite/__init__.py +75 -46
- teuthology/suite/build_matrix.py +34 -24
- teuthology/suite/fragment-merge.lua +105 -0
- teuthology/suite/matrix.py +31 -2
- teuthology/suite/merge.py +175 -0
- teuthology/suite/placeholder.py +8 -8
- teuthology/suite/run.py +204 -102
- teuthology/suite/util.py +67 -211
- teuthology/task/__init__.py +1 -1
- teuthology/task/ansible.py +101 -31
- teuthology/task/buildpackages.py +2 -2
- teuthology/task/ceph_ansible.py +13 -6
- teuthology/task/cephmetrics.py +2 -1
- teuthology/task/clock.py +33 -14
- teuthology/task/exec.py +18 -0
- teuthology/task/hadoop.py +2 -2
- teuthology/task/install/__init__.py +51 -22
- teuthology/task/install/bin/adjust-ulimits +16 -0
- teuthology/task/install/bin/daemon-helper +114 -0
- teuthology/task/install/bin/stdin-killer +263 -0
- teuthology/task/install/deb.py +24 -4
- teuthology/task/install/redhat.py +36 -32
- teuthology/task/install/rpm.py +41 -14
- teuthology/task/install/util.py +48 -22
- teuthology/task/internal/__init__.py +69 -11
- teuthology/task/internal/edit_sudoers.sh +10 -0
- teuthology/task/internal/lock_machines.py +3 -133
- teuthology/task/internal/redhat.py +48 -28
- teuthology/task/internal/syslog.py +31 -8
- teuthology/task/kernel.py +155 -147
- teuthology/task/lockfile.py +1 -1
- teuthology/task/mpi.py +10 -10
- teuthology/task/pcp.py +1 -1
- teuthology/task/selinux.py +17 -8
- teuthology/task/ssh_keys.py +6 -6
- teuthology/task/tests/__init__.py +137 -77
- teuthology/task/tests/test_fetch_coredumps.py +116 -0
- teuthology/task/tests/test_run.py +4 -4
- teuthology/timer.py +3 -3
- teuthology/util/loggerfile.py +19 -0
- teuthology/util/scanner.py +159 -0
- teuthology/util/sentry.py +52 -0
- teuthology/util/time.py +52 -0
- teuthology-1.2.0.data/scripts/adjust-ulimits +16 -0
- teuthology-1.2.0.data/scripts/daemon-helper +114 -0
- teuthology-1.2.0.data/scripts/stdin-killer +263 -0
- teuthology-1.2.0.dist-info/METADATA +89 -0
- teuthology-1.2.0.dist-info/RECORD +174 -0
- {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/WHEEL +1 -1
- {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/entry_points.txt +5 -2
- scripts/nuke.py +0 -45
- scripts/worker.py +0 -37
- teuthology/nuke/actions.py +0 -456
- teuthology/openstack/test/__init__.py +0 -0
- teuthology/openstack/test/openstack-integration.py +0 -286
- teuthology/openstack/test/test_config.py +0 -35
- teuthology/openstack/test/test_openstack.py +0 -1695
- teuthology/orchestra/test/__init__.py +0 -0
- teuthology/orchestra/test/integration/__init__.py +0 -0
- teuthology/orchestra/test/integration/test_integration.py +0 -94
- teuthology/orchestra/test/test_cluster.py +0 -240
- teuthology/orchestra/test/test_connection.py +0 -106
- teuthology/orchestra/test/test_console.py +0 -217
- teuthology/orchestra/test/test_opsys.py +0 -404
- teuthology/orchestra/test/test_remote.py +0 -185
- teuthology/orchestra/test/test_run.py +0 -286
- teuthology/orchestra/test/test_systemd.py +0 -54
- teuthology/orchestra/test/util.py +0 -12
- teuthology/sentry.py +0 -18
- teuthology/test/__init__.py +0 -0
- teuthology/test/fake_archive.py +0 -107
- teuthology/test/fake_fs.py +0 -92
- teuthology/test/integration/__init__.py +0 -0
- teuthology/test/integration/test_suite.py +0 -86
- teuthology/test/task/__init__.py +0 -205
- teuthology/test/task/test_ansible.py +0 -624
- teuthology/test/task/test_ceph_ansible.py +0 -176
- teuthology/test/task/test_console_log.py +0 -88
- teuthology/test/task/test_install.py +0 -337
- teuthology/test/task/test_internal.py +0 -57
- teuthology/test/task/test_kernel.py +0 -243
- teuthology/test/task/test_pcp.py +0 -379
- teuthology/test/task/test_selinux.py +0 -35
- teuthology/test/test_config.py +0 -189
- teuthology/test/test_contextutil.py +0 -68
- teuthology/test/test_describe_tests.py +0 -316
- teuthology/test/test_email_sleep_before_teardown.py +0 -81
- teuthology/test/test_exit.py +0 -97
- teuthology/test/test_get_distro.py +0 -47
- teuthology/test/test_get_distro_version.py +0 -47
- teuthology/test/test_get_multi_machine_types.py +0 -27
- teuthology/test/test_job_status.py +0 -60
- teuthology/test/test_ls.py +0 -48
- teuthology/test/test_misc.py +0 -368
- teuthology/test/test_nuke.py +0 -232
- teuthology/test/test_packaging.py +0 -763
- teuthology/test/test_parallel.py +0 -28
- teuthology/test/test_repo_utils.py +0 -204
- teuthology/test/test_report.py +0 -77
- teuthology/test/test_results.py +0 -155
- teuthology/test/test_run.py +0 -238
- teuthology/test/test_safepath.py +0 -55
- teuthology/test/test_schedule.py +0 -45
- teuthology/test/test_scrape.py +0 -167
- teuthology/test/test_timer.py +0 -80
- teuthology/test/test_vps_os_vers_parameter_checking.py +0 -84
- teuthology/test/test_worker.py +0 -303
- teuthology/worker.py +0 -339
- teuthology-1.0.0.dist-info/METADATA +0 -76
- teuthology-1.0.0.dist-info/RECORD +0 -210
- {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/LICENSE +0 -0
- {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/top_level.txt +0 -0
teuthology/task/kernel.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
Kernel installation task
|
3
3
|
"""
|
4
4
|
|
5
|
+
import contextlib
|
5
6
|
import logging
|
6
7
|
import os
|
7
8
|
import re
|
@@ -28,10 +29,11 @@ from teuthology.packaging import (
|
|
28
29
|
get_koji_task_result,
|
29
30
|
get_builder_project,
|
30
31
|
)
|
32
|
+
from teuthology.task.install.deb import install_dep_packages
|
31
33
|
|
32
34
|
log = logging.getLogger(__name__)
|
33
35
|
|
34
|
-
CONFIG_DEFAULT = {'branch': '
|
36
|
+
CONFIG_DEFAULT = {'branch': 'main'}
|
35
37
|
TIMEOUT_DEFAULT = 300
|
36
38
|
|
37
39
|
VERSION_KEYS = ['branch', 'tag', 'sha1', 'deb', 'rpm', 'koji', 'koji_task']
|
@@ -76,9 +78,10 @@ def normalize_config(ctx, config):
|
|
76
78
|
:param ctx: Context
|
77
79
|
:param config: Configuration
|
78
80
|
"""
|
81
|
+
log.info(f'normalize config orig: {config}')
|
79
82
|
if not config or \
|
80
83
|
len([x for x in config.keys() if x in
|
81
|
-
VERSION_KEYS + ['kdb', 'flavor']]) == len(config.keys()):
|
84
|
+
VERSION_KEYS + ['kdb', 'flavor', 'hwe']]) == len(config.keys()):
|
82
85
|
new_config = {}
|
83
86
|
if not config:
|
84
87
|
config = CONFIG_DEFAULT
|
@@ -98,6 +101,7 @@ def normalize_config(ctx, config):
|
|
98
101
|
# specific overrides generic
|
99
102
|
if name not in config:
|
100
103
|
new_config[name] = role_config.copy()
|
104
|
+
log.info(f'normalize config final: {new_config}')
|
101
105
|
return new_config
|
102
106
|
|
103
107
|
|
@@ -117,7 +121,6 @@ def normalize_and_apply_overrides(ctx, config, overrides):
|
|
117
121
|
if 'timeout' in config:
|
118
122
|
timeout = config.pop('timeout')
|
119
123
|
config = normalize_config(ctx, config)
|
120
|
-
log.debug('normalized config %s' % config)
|
121
124
|
|
122
125
|
if 'timeout' in overrides:
|
123
126
|
timeout = overrides.pop('timeout')
|
@@ -286,7 +289,7 @@ def install_firmware(ctx, config):
|
|
286
289
|
run.Raw('&&'),
|
287
290
|
'sudo', 'git', 'fetch', 'origin',
|
288
291
|
run.Raw('&&'),
|
289
|
-
'sudo', 'git', 'reset', '--hard', 'origin/
|
292
|
+
'sudo', 'git', 'reset', '--hard', 'origin/main'
|
290
293
|
],
|
291
294
|
)
|
292
295
|
|
@@ -479,7 +482,7 @@ def update_rh_kernel(remote):
|
|
479
482
|
log.info("Latest version already installed on %s", remote.shortname)
|
480
483
|
|
481
484
|
|
482
|
-
def install_and_reboot(ctx, config):
|
485
|
+
def install_and_reboot(ctx, need_install, config):
|
483
486
|
"""
|
484
487
|
Install and reboot the kernel. This mostly performs remote
|
485
488
|
installation operations. The code does check for Arm images
|
@@ -489,15 +492,16 @@ def install_and_reboot(ctx, config):
|
|
489
492
|
it expects kernel entries to be present under submenu entries.
|
490
493
|
|
491
494
|
:param ctx: Context
|
495
|
+
:param need_install: map from caller
|
492
496
|
:param config: Configuration
|
493
497
|
"""
|
494
498
|
procs = {}
|
495
499
|
kernel_title = ''
|
496
|
-
for role, src in
|
500
|
+
for role, src in need_install.items():
|
497
501
|
(role_remote,) = ctx.cluster.only(role).remotes.keys()
|
498
502
|
if isinstance(src, str) and src.find('distro') >= 0:
|
499
503
|
log.info('Installing distro kernel on {role}...'.format(role=role))
|
500
|
-
install_kernel(role_remote, version=src)
|
504
|
+
install_kernel(role_remote, config[role], version=src)
|
501
505
|
continue
|
502
506
|
|
503
507
|
log.info('Installing kernel {src} on {role}...'.format(src=src,
|
@@ -514,7 +518,7 @@ def install_and_reboot(ctx, config):
|
|
514
518
|
'--replacepkgs',
|
515
519
|
remote_pkg_path(role_remote),
|
516
520
|
])
|
517
|
-
install_kernel(role_remote, remote_pkg_path(role_remote))
|
521
|
+
install_kernel(role_remote, config[role], path=remote_pkg_path(role_remote))
|
518
522
|
continue
|
519
523
|
|
520
524
|
# TODO: Refactor this into install_kernel() so that it handles all
|
@@ -658,7 +662,7 @@ def enable_disable_kdb(ctx, config):
|
|
658
662
|
'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc'
|
659
663
|
])
|
660
664
|
except run.CommandFailedError:
|
661
|
-
log.
|
665
|
+
log.warning('Kernel does not support kdb')
|
662
666
|
else:
|
663
667
|
log.info('Disabling kdb on {role}...'.format(role=role))
|
664
668
|
# Add true pipe so command doesn't fail on kernel without kdb support.
|
@@ -672,10 +676,10 @@ def enable_disable_kdb(ctx, config):
|
|
672
676
|
'true',
|
673
677
|
])
|
674
678
|
except run.CommandFailedError:
|
675
|
-
log.
|
679
|
+
log.warning('Kernel does not support kdb')
|
676
680
|
|
677
681
|
|
678
|
-
def wait_for_reboot(ctx, need_install, timeout, distro=False):
|
682
|
+
def wait_for_reboot(ctx, need_install, timeout, config, distro=False):
|
679
683
|
"""
|
680
684
|
Loop reconnecting and checking kernel versions until
|
681
685
|
they're all correct or the timeout is exceeded.
|
@@ -691,15 +695,15 @@ def wait_for_reboot(ctx, need_install, timeout, distro=False):
|
|
691
695
|
time.sleep(30)
|
692
696
|
starttime = time.time()
|
693
697
|
while need_install:
|
694
|
-
teuthology.reconnect(ctx, timeout)
|
695
698
|
for client in list(need_install.keys()):
|
696
699
|
if 'distro' in str(need_install[client]):
|
697
700
|
distro = True
|
698
701
|
log.info('Checking client {client} for new kernel version...'.format(client=client))
|
699
702
|
try:
|
703
|
+
(remote,) = ctx.cluster.only(client).remotes.keys()
|
704
|
+
remote.reconnect(timeout=timeout)
|
700
705
|
if distro:
|
701
|
-
(remote,
|
702
|
-
assert not need_to_install_distro(remote), \
|
706
|
+
assert not need_to_install_distro(remote, config[client]), \
|
703
707
|
'failed to install new distro kernel version within timeout'
|
704
708
|
|
705
709
|
else:
|
@@ -733,7 +737,7 @@ def get_version_of_running_kernel(remote):
|
|
733
737
|
return current
|
734
738
|
|
735
739
|
|
736
|
-
def need_to_install_distro(remote):
|
740
|
+
def need_to_install_distro(remote, role_config):
|
737
741
|
"""
|
738
742
|
Installing kernels on rpm won't setup grub/boot into them. This installs
|
739
743
|
the newest kernel package and checks its version and compares against
|
@@ -783,7 +787,7 @@ def need_to_install_distro(remote):
|
|
783
787
|
newest = get_latest_image_version_rpm(remote)
|
784
788
|
|
785
789
|
if package_type == 'deb':
|
786
|
-
newest = get_latest_image_version_deb(remote, dist_release)
|
790
|
+
newest = get_latest_image_version_deb(remote, dist_release, role_config)
|
787
791
|
|
788
792
|
if current in newest or current.replace('-', '_') in newest:
|
789
793
|
log.info('Newest distro kernel installed and running')
|
@@ -820,7 +824,7 @@ def maybe_generate_initrd_rpm(remote, path, version):
|
|
820
824
|
])
|
821
825
|
|
822
826
|
|
823
|
-
def install_kernel(remote, path=None, version=None):
|
827
|
+
def install_kernel(remote, role_config, path=None, version=None):
|
824
828
|
"""
|
825
829
|
A bit of misnomer perhaps - the actual kernel package is installed
|
826
830
|
elsewhere, this function deals with initrd and grub. Currently the
|
@@ -855,7 +859,7 @@ def install_kernel(remote, path=None, version=None):
|
|
855
859
|
return
|
856
860
|
|
857
861
|
if package_type == 'deb':
|
858
|
-
newversion = get_latest_image_version_deb(remote, dist_release)
|
862
|
+
newversion = get_latest_image_version_deb(remote, dist_release, role_config)
|
859
863
|
if 'ubuntu' in dist_release:
|
860
864
|
grub2conf = teuthology.get_file(remote,
|
861
865
|
'/boot/grub/grub.cfg', sudo=True).decode()
|
@@ -979,14 +983,14 @@ def generate_legacy_grub_entry(remote, newversion):
|
|
979
983
|
if re.match('^title', line):
|
980
984
|
titleline = line
|
981
985
|
titlelinenum = linenum
|
982
|
-
if re.match('(^\s+)root', line):
|
986
|
+
if re.match(r'(^\s+)root', line):
|
983
987
|
rootline = line
|
984
|
-
if re.match('(^\s+)kernel', line):
|
988
|
+
if re.match(r'(^\s+)kernel', line):
|
985
989
|
kernelline = line
|
986
990
|
for word in line.split(' '):
|
987
991
|
if 'vmlinuz' in word:
|
988
992
|
kernelversion = word.split('vmlinuz-')[-1]
|
989
|
-
if re.match('(^\s+)initrd', line):
|
993
|
+
if re.match(r'(^\s+)initrd', line):
|
990
994
|
initline = line
|
991
995
|
if (kernelline != '') and (initline != ''):
|
992
996
|
break
|
@@ -1029,8 +1033,9 @@ def get_image_version(remote, path):
|
|
1029
1033
|
raise UnsupportedPackageTypeError(remote)
|
1030
1034
|
|
1031
1035
|
for file in files.split('\n'):
|
1032
|
-
|
1033
|
-
|
1036
|
+
match = re.search(r'/lib/modules/(.*)/modules\.order$', file)
|
1037
|
+
if match:
|
1038
|
+
version = match.group(1)
|
1034
1039
|
break
|
1035
1040
|
|
1036
1041
|
log.debug("get_image_version: %s", version)
|
@@ -1049,9 +1054,9 @@ def get_latest_image_version_rpm(remote):
|
|
1049
1054
|
kernel_pkg_name = "kernel-default"
|
1050
1055
|
else:
|
1051
1056
|
kernel_pkg_name = "kernel"
|
1052
|
-
# get tip of package list ordered by
|
1057
|
+
# get tip of package list ordered by descending version
|
1053
1058
|
newest_package = remote.sh(
|
1054
|
-
'rpm -q %s
|
1059
|
+
'rpm -q %s | sort -rV | head -n 1' % kernel_pkg_name).strip()
|
1055
1060
|
for kernel in newest_package.split():
|
1056
1061
|
if kernel.startswith('kernel'):
|
1057
1062
|
if 'ceph' not in kernel:
|
@@ -1062,7 +1067,7 @@ def get_latest_image_version_rpm(remote):
|
|
1062
1067
|
return version
|
1063
1068
|
|
1064
1069
|
|
1065
|
-
def get_latest_image_version_deb(remote, ostype):
|
1070
|
+
def get_latest_image_version_deb(remote, ostype, role_config):
|
1066
1071
|
"""
|
1067
1072
|
Get kernel image version of the newest kernel deb package.
|
1068
1073
|
Used for distro case.
|
@@ -1078,8 +1083,8 @@ def get_latest_image_version_deb(remote, ostype):
|
|
1078
1083
|
# Note that a dependency list may have multiple comma-separated entries,
|
1079
1084
|
# but also each entry may be an alternative (pkg1 | pkg2)
|
1080
1085
|
if 'debian' in ostype:
|
1081
|
-
|
1082
|
-
|
1086
|
+
args=['sudo', 'apt-get', '-y', 'install', 'linux-image-amd64']
|
1087
|
+
install_dep_packages(remote, args)
|
1083
1088
|
remote.run(args=['dpkg', '-s', 'linux-image-amd64'], stdout=output)
|
1084
1089
|
for line in output.getvalue().split('\n'):
|
1085
1090
|
if 'Depends:' in line:
|
@@ -1088,26 +1093,16 @@ def get_latest_image_version_deb(remote, ostype):
|
|
1088
1093
|
return newest
|
1089
1094
|
# Ubuntu is a depend in a depend.
|
1090
1095
|
if 'ubuntu' in ostype:
|
1091
|
-
|
1092
|
-
|
1093
|
-
|
1094
|
-
|
1095
|
-
|
1096
|
-
|
1097
|
-
|
1098
|
-
|
1099
|
-
|
1100
|
-
|
1101
|
-
depends])
|
1102
|
-
remote.run(args=['dpkg', '-s', depends], stdout=output)
|
1103
|
-
except run.CommandFailedError:
|
1104
|
-
# Non precise ubuntu machines (like trusty) don't have
|
1105
|
-
# linux-image-current-generic so use linux-image-generic instead.
|
1106
|
-
remote.run(args=['sudo', 'DEBIAN_FRONTEND=noninteractive',
|
1107
|
-
'apt-get', '-y', 'install',
|
1108
|
-
'linux-image-generic'], stdout=output)
|
1109
|
-
remote.run(args=['dpkg', '-s', 'linux-image-generic'],
|
1110
|
-
stdout=output)
|
1096
|
+
name = 'linux-image-generic'
|
1097
|
+
if role_config.get('hwe'):
|
1098
|
+
name = f'linux-image-generic-hwe-{remote.os.version}'
|
1099
|
+
|
1100
|
+
args=['sudo', 'DEBIAN_FRONTEND=noninteractive',
|
1101
|
+
'apt-get', '-y', 'install', name]
|
1102
|
+
install_dep_packages(remote, args)
|
1103
|
+
remote.run(args=['dpkg', '-s', name],
|
1104
|
+
stdout=output)
|
1105
|
+
|
1111
1106
|
for line in output.getvalue().split('\n'):
|
1112
1107
|
if 'Depends:' in line:
|
1113
1108
|
newest = line.split('linux-image-')[1]
|
@@ -1137,13 +1132,14 @@ def get_sha1_from_pkg_name(path):
|
|
1137
1132
|
return sha1
|
1138
1133
|
|
1139
1134
|
|
1135
|
+
@contextlib.contextmanager
|
1140
1136
|
def task(ctx, config):
|
1141
1137
|
"""
|
1142
1138
|
Make sure the specified kernel is installed.
|
1143
1139
|
This can be a branch, tag, or sha1 of ceph-client.git or a local
|
1144
1140
|
kernel package.
|
1145
1141
|
|
1146
|
-
To install ceph-client.git branch (default:
|
1142
|
+
To install ceph-client.git branch (default: main)::
|
1147
1143
|
|
1148
1144
|
kernel:
|
1149
1145
|
branch: testing
|
@@ -1212,7 +1208,7 @@ def task(ctx, config):
|
|
1212
1208
|
client.1:
|
1213
1209
|
branch: more_specific
|
1214
1210
|
osd.3:
|
1215
|
-
branch:
|
1211
|
+
branch: main
|
1216
1212
|
|
1217
1213
|
To wait 3 minutes for hosts to reboot (default: 300)::
|
1218
1214
|
|
@@ -1237,105 +1233,117 @@ def task(ctx, config):
|
|
1237
1233
|
validate_config(ctx, config)
|
1238
1234
|
log.info('config %s, timeout %d' % (config, timeout))
|
1239
1235
|
|
1240
|
-
|
1241
|
-
|
1242
|
-
|
1236
|
+
with parallel() as p:
|
1237
|
+
for role, role_config in config.items():
|
1238
|
+
p.spawn(process_role, ctx, config, timeout, role, role_config)
|
1243
1239
|
|
1244
|
-
|
1245
|
-
|
1246
|
-
|
1247
|
-
|
1248
|
-
if role_config.get('rpm') or role_config.get('deb'):
|
1249
|
-
# We only care about path - deb: vs rpm: is meaningless,
|
1250
|
-
# rpm: just happens to be parsed first. Nothing is stopping
|
1251
|
-
# 'deb: /path/to/foo.rpm' and it will work provided remote's
|
1252
|
-
# os.package_type is 'rpm' and vice versa.
|
1253
|
-
path = role_config.get('rpm')
|
1254
|
-
if not path:
|
1255
|
-
path = role_config.get('deb')
|
1256
|
-
sha1 = get_sha1_from_pkg_name(path)
|
1257
|
-
assert sha1, "failed to extract commit hash from path %s" % path
|
1258
|
-
if need_to_install(ctx, role, sha1):
|
1259
|
-
need_install[role] = path
|
1260
|
-
need_version[role] = sha1
|
1261
|
-
elif role_config.get('sha1') == 'distro':
|
1262
|
-
version = need_to_install_distro(role_remote)
|
1263
|
-
if version:
|
1264
|
-
need_install[role] = 'distro'
|
1265
|
-
need_version[role] = version
|
1266
|
-
elif role_config.get("koji") or role_config.get('koji_task'):
|
1267
|
-
# installing a kernel from koji
|
1268
|
-
build_id = role_config.get("koji")
|
1269
|
-
task_id = role_config.get("koji_task")
|
1270
|
-
if role_remote.os.package_type != "rpm":
|
1271
|
-
msg = (
|
1272
|
-
"Installing a kernel from koji is only supported "
|
1273
|
-
"on rpm based systems. System type is {system_type}."
|
1274
|
-
)
|
1275
|
-
msg = msg.format(system_type=system_type)
|
1276
|
-
log.error(msg)
|
1277
|
-
ctx.summary["failure_reason"] = msg
|
1278
|
-
ctx.summary["status"] = "dead"
|
1279
|
-
raise ConfigError(msg)
|
1280
|
-
|
1281
|
-
# FIXME: this install should probably happen somewhere else
|
1282
|
-
# but I'm not sure where, so we'll leave it here for now.
|
1283
|
-
install_package('koji', role_remote)
|
1284
|
-
|
1285
|
-
if build_id:
|
1286
|
-
# get information about this build from koji
|
1287
|
-
build_info = get_koji_build_info(build_id, role_remote, ctx)
|
1288
|
-
version = "{ver}-{rel}.x86_64".format(
|
1289
|
-
ver=build_info["version"],
|
1290
|
-
rel=build_info["release"]
|
1291
|
-
)
|
1292
|
-
elif task_id:
|
1293
|
-
# get information about results of this task from koji
|
1294
|
-
task_result = get_koji_task_result(task_id, role_remote, ctx)
|
1295
|
-
# this is not really 'build_info', it's a dict of information
|
1296
|
-
# about the kernel rpm from the task results, but for the sake
|
1297
|
-
# of reusing the code below I'll still call it that.
|
1298
|
-
build_info = get_koji_task_rpm_info(
|
1299
|
-
'kernel',
|
1300
|
-
task_result['rpms']
|
1301
|
-
)
|
1302
|
-
# add task_id so we can know later that we're installing
|
1303
|
-
# from a task and not a build.
|
1304
|
-
build_info["task_id"] = task_id
|
1305
|
-
version = build_info["version"]
|
1306
|
-
|
1307
|
-
if need_to_install(ctx, role, version):
|
1308
|
-
need_install[role] = build_info
|
1309
|
-
need_version[role] = version
|
1310
|
-
else:
|
1311
|
-
builder = get_builder_project()(
|
1312
|
-
"kernel",
|
1313
|
-
role_config,
|
1314
|
-
ctx=ctx,
|
1315
|
-
remote=role_remote,
|
1316
|
-
)
|
1317
|
-
sha1 = builder.sha1
|
1318
|
-
log.debug('sha1 for {role} is {sha1}'.format(role=role, sha1=sha1))
|
1319
|
-
ctx.summary['{role}-kernel-sha1'.format(role=role)] = sha1
|
1240
|
+
try:
|
1241
|
+
yield
|
1242
|
+
finally:
|
1243
|
+
pass
|
1320
1244
|
|
1321
|
-
if need_to_install(ctx, role, sha1):
|
1322
|
-
if teuth_config.use_shaman:
|
1323
|
-
version = builder.scm_version
|
1324
|
-
else:
|
1325
|
-
version = builder.version
|
1326
|
-
if not version:
|
1327
|
-
raise VersionNotFoundError(builder.base_url)
|
1328
|
-
need_install[role] = sha1
|
1329
|
-
need_version[role] = version
|
1330
1245
|
|
1331
|
-
|
1332
|
-
|
1333
|
-
|
1246
|
+
def process_role(ctx, config, timeout, role, role_config):
|
1247
|
+
need_install = None # sha1 to dl, or path to rpm or deb
|
1248
|
+
need_version = None # utsrelease or sha1
|
1334
1249
|
|
1335
|
-
|
1336
|
-
|
1337
|
-
|
1338
|
-
|
1339
|
-
|
1250
|
+
# gather information about this remote
|
1251
|
+
(role_remote,) = ctx.cluster.only(role).remotes.keys()
|
1252
|
+
system_type = role_remote.os.name
|
1253
|
+
if role_remote.is_container:
|
1254
|
+
log.info(f"Remote f{role_remote.shortname} is a container; skipping kernel installation")
|
1255
|
+
return
|
1256
|
+
if role_config.get('rpm') or role_config.get('deb'):
|
1257
|
+
# We only care about path - deb: vs rpm: is meaningless,
|
1258
|
+
# rpm: just happens to be parsed first. Nothing is stopping
|
1259
|
+
# 'deb: /path/to/foo.rpm' and it will work provided remote's
|
1260
|
+
# os.package_type is 'rpm' and vice versa.
|
1261
|
+
path = role_config.get('rpm')
|
1262
|
+
if not path:
|
1263
|
+
path = role_config.get('deb')
|
1264
|
+
sha1 = get_sha1_from_pkg_name(path)
|
1265
|
+
assert sha1, "failed to extract commit hash from path %s" % path
|
1266
|
+
if need_to_install(ctx, role, sha1):
|
1267
|
+
need_install = path
|
1268
|
+
need_version = sha1
|
1269
|
+
elif role_config.get('sha1') == 'distro':
|
1270
|
+
version = need_to_install_distro(role_remote, role_config)
|
1271
|
+
if version:
|
1272
|
+
need_install = 'distro'
|
1273
|
+
need_version = version
|
1274
|
+
elif role_config.get("koji") or role_config.get('koji_task'):
|
1275
|
+
# installing a kernel from koji
|
1276
|
+
build_id = role_config.get("koji")
|
1277
|
+
task_id = role_config.get("koji_task")
|
1278
|
+
if role_remote.os.package_type != "rpm":
|
1279
|
+
msg = (
|
1280
|
+
"Installing a kernel from koji is only supported "
|
1281
|
+
"on rpm based systems. System type is {system_type}."
|
1282
|
+
)
|
1283
|
+
msg = msg.format(system_type=system_type)
|
1284
|
+
log.error(msg)
|
1285
|
+
ctx.summary["failure_reason"] = msg
|
1286
|
+
ctx.summary["status"] = "dead"
|
1287
|
+
raise ConfigError(msg)
|
1288
|
+
|
1289
|
+
# FIXME: this install should probably happen somewhere else
|
1290
|
+
# but I'm not sure where, so we'll leave it here for now.
|
1291
|
+
install_package('koji', role_remote)
|
1292
|
+
|
1293
|
+
if build_id:
|
1294
|
+
# get information about this build from koji
|
1295
|
+
build_info = get_koji_build_info(build_id, role_remote, ctx)
|
1296
|
+
version = "{ver}-{rel}.x86_64".format(
|
1297
|
+
ver=build_info["version"],
|
1298
|
+
rel=build_info["release"]
|
1299
|
+
)
|
1300
|
+
elif task_id:
|
1301
|
+
# get information about results of this task from koji
|
1302
|
+
task_result = get_koji_task_result(task_id, role_remote, ctx)
|
1303
|
+
# this is not really 'build_info', it's a dict of information
|
1304
|
+
# about the kernel rpm from the task results, but for the sake
|
1305
|
+
# of reusing the code below I'll still call it that.
|
1306
|
+
build_info = get_koji_task_rpm_info(
|
1307
|
+
'kernel',
|
1308
|
+
task_result['rpms']
|
1309
|
+
)
|
1310
|
+
# add task_id so we can know later that we're installing
|
1311
|
+
# from a task and not a build.
|
1312
|
+
build_info["task_id"] = task_id
|
1313
|
+
version = build_info["version"]
|
1314
|
+
|
1315
|
+
if need_to_install(ctx, role, version):
|
1316
|
+
need_install = build_info
|
1317
|
+
need_version = version
|
1318
|
+
else:
|
1319
|
+
builder = get_builder_project()(
|
1320
|
+
"kernel",
|
1321
|
+
role_config,
|
1322
|
+
ctx=ctx,
|
1323
|
+
remote=role_remote,
|
1324
|
+
)
|
1325
|
+
sha1 = builder.sha1
|
1326
|
+
log.debug('sha1 for {role} is {sha1}'.format(role=role, sha1=sha1))
|
1327
|
+
ctx.summary['{role}-kernel-sha1'.format(role=role)] = sha1
|
1340
1328
|
|
1341
|
-
|
1329
|
+
if need_to_install(ctx, role, sha1):
|
1330
|
+
if teuth_config.use_shaman:
|
1331
|
+
version = builder.scm_version
|
1332
|
+
else:
|
1333
|
+
version = builder.version
|
1334
|
+
if not version:
|
1335
|
+
raise VersionNotFoundError(builder.base_url)
|
1336
|
+
need_install = sha1
|
1337
|
+
need_version = version
|
1338
|
+
|
1339
|
+
if need_install:
|
1340
|
+
install_firmware(ctx, {role: need_install})
|
1341
|
+
download_kernel(ctx, {role: need_install})
|
1342
|
+
install_and_reboot(ctx, {role: need_install}, config)
|
1343
|
+
wait_for_reboot(ctx, {role: need_version}, timeout, config)
|
1344
|
+
|
1345
|
+
# enable or disable kdb if specified, otherwise do not touch
|
1346
|
+
if role_config.get('kdb') is not None:
|
1347
|
+
kdb = role_config.get('kdb')
|
1348
|
+
enable_disable_kdb(ctx, {role: kdb})
|
1349
|
+
|
teuthology/task/lockfile.py
CHANGED
@@ -94,7 +94,7 @@ def task(ctx, config):
|
|
94
94
|
'wget',
|
95
95
|
'-nv',
|
96
96
|
'--no-check-certificate',
|
97
|
-
'https://raw.github.com/gregsfortytwo/FileLocker/
|
97
|
+
'https://raw.github.com/gregsfortytwo/FileLocker/main/sclockandhold.cpp',
|
98
98
|
'-O', '{tdir}/lockfile/sclockandhold.cpp'.format(tdir=testdir),
|
99
99
|
run.Raw('&&'),
|
100
100
|
'g++', '{tdir}/lockfile/sclockandhold.cpp'.format(tdir=testdir),
|
teuthology/task/mpi.py
CHANGED
@@ -18,7 +18,7 @@ def _check_mpi_version(remotes):
|
|
18
18
|
for remote in remotes:
|
19
19
|
version_str = remote.sh("mpiexec --version")
|
20
20
|
try:
|
21
|
-
version = re.search("^\s+Version:\s+(.+)$", version_str, re.MULTILINE).group(1)
|
21
|
+
version = re.search(r"^\s+Version:\s+(.+)$", version_str, re.MULTILINE).group(1)
|
22
22
|
except AttributeError:
|
23
23
|
raise RuntimeError("Malformed MPI version output: {0}".format(version_str))
|
24
24
|
else:
|
@@ -89,7 +89,7 @@ def task(ctx, config):
|
|
89
89
|
mpiexec = config['exec'].replace('$TESTDIR', testdir)
|
90
90
|
hosts = []
|
91
91
|
remotes = []
|
92
|
-
|
92
|
+
main_remote = None
|
93
93
|
if 'nodes' in config:
|
94
94
|
if isinstance(config['nodes'], str) and config['nodes'] == 'all':
|
95
95
|
for role in teuthology.all_roles(ctx.cluster):
|
@@ -97,17 +97,17 @@ def task(ctx, config):
|
|
97
97
|
ip,port = remote.ssh.get_transport().getpeername()
|
98
98
|
hosts.append(ip)
|
99
99
|
remotes.append(remote)
|
100
|
-
(
|
100
|
+
(main_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.keys()
|
101
101
|
elif isinstance(config['nodes'], list):
|
102
102
|
for role in config['nodes']:
|
103
103
|
(remote,) = ctx.cluster.only(role).remotes.keys()
|
104
104
|
ip,port = remote.ssh.get_transport().getpeername()
|
105
105
|
hosts.append(ip)
|
106
106
|
remotes.append(remote)
|
107
|
-
(
|
107
|
+
(main_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.keys()
|
108
108
|
else:
|
109
109
|
roles = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
|
110
|
-
(
|
110
|
+
(main_remote,) = ctx.cluster.only(roles[0]).remotes.keys()
|
111
111
|
for role in roles:
|
112
112
|
(remote,) = ctx.cluster.only(role).remotes.keys()
|
113
113
|
ip,port = remote.ssh.get_transport().getpeername()
|
@@ -121,17 +121,17 @@ def task(ctx, config):
|
|
121
121
|
if 'workdir' in config:
|
122
122
|
workdir = ['-wdir', config['workdir'].replace('$TESTDIR', testdir) ]
|
123
123
|
|
124
|
-
log.info('mpi rank 0 is: {name}'.format(name=
|
124
|
+
log.info('mpi rank 0 is: {name}'.format(name=main_remote.name))
|
125
125
|
|
126
126
|
# write out the mpi hosts file
|
127
127
|
log.info('mpi nodes: [%s]' % (', '.join(hosts)))
|
128
|
-
teuthology.write_file(remote=
|
128
|
+
teuthology.write_file(remote=main_remote,
|
129
129
|
path='{tdir}/mpi-hosts'.format(tdir=testdir),
|
130
130
|
data='\n'.join(hosts))
|
131
|
-
log.info('mpiexec on {name}: {cmd}'.format(name=
|
131
|
+
log.info('mpiexec on {name}: {cmd}'.format(name=main_remote.name, cmd=mpiexec))
|
132
132
|
args=['mpiexec', '-f', '{tdir}/mpi-hosts'.format(tdir=testdir)]
|
133
133
|
args.extend(workdir)
|
134
134
|
args.extend(mpiexec.split(' '))
|
135
|
-
|
135
|
+
main_remote.run(args=args, )
|
136
136
|
log.info('mpi task completed')
|
137
|
-
|
137
|
+
main_remote.run(args=['rm', '{tdir}/mpi-hosts'.format(tdir=testdir)])
|
teuthology/task/pcp.py
CHANGED
teuthology/task/selinux.py
CHANGED
@@ -24,7 +24,7 @@ class SELinux(Task):
|
|
24
24
|
fail for other denials one can add the overrides with appropriate escapes
|
25
25
|
overrides:
|
26
26
|
selinux:
|
27
|
-
|
27
|
+
allowlist:
|
28
28
|
- 'name="cephtest"'
|
29
29
|
- 'dmidecode'
|
30
30
|
- 'comm="logrotate"'
|
@@ -53,6 +53,9 @@ class SELinux(Task):
|
|
53
53
|
if remote.is_vm:
|
54
54
|
msg = "Excluding {host}: VMs are not yet supported"
|
55
55
|
log.info(msg.format(host=remote.shortname))
|
56
|
+
elif remote.is_container:
|
57
|
+
msg = "Excluding {host}: containers are not yet supported"
|
58
|
+
log.info(msg.format(host=remote.shortname))
|
56
59
|
elif remote.os.name in ['opensuse', 'sle']:
|
57
60
|
msg = "Excluding {host}: \
|
58
61
|
SELinux is not supported for '{os}' os_type yet"
|
@@ -130,16 +133,22 @@ class SELinux(Task):
|
|
130
133
|
'comm="setroubleshootd"',
|
131
134
|
'comm="rpm"',
|
132
135
|
'tcontext=system_u:object_r:container_runtime_exec_t:s0',
|
136
|
+
'comm="ksmtuned"',
|
137
|
+
'comm="sssd"',
|
138
|
+
'comm="sss_cache"',
|
139
|
+
'context=system_u:system_r:NetworkManager_dispatcher_t:s0',
|
140
|
+
'context=system_u:system_r:getty_t:s0',
|
133
141
|
]
|
134
|
-
|
135
|
-
if
|
136
|
-
known_denials.extend(
|
137
|
-
|
142
|
+
se_allowlist = self.config.get('allowlist', [])
|
143
|
+
if se_allowlist:
|
144
|
+
known_denials.extend(se_allowlist)
|
145
|
+
get_denials_cmd = ['sudo', 'grep', '-a', 'avc: .*denied', '/var/log/audit/audit.log']
|
146
|
+
filter_denials_cmd = ['grep', '-av']
|
147
|
+
for known_denial in known_denials:
|
148
|
+
filter_denials_cmd.extend(['-e', known_denial])
|
138
149
|
for remote in self.cluster.remotes.keys():
|
139
150
|
proc = remote.run(
|
140
|
-
args=
|
141
|
-
'/var/log/audit/audit.log', run.Raw('|'), 'grep', '-av',
|
142
|
-
run.Raw(ignore_known_denials)],
|
151
|
+
args = get_denials_cmd + [run.Raw('|')] + filter_denials_cmd,
|
143
152
|
stdout=StringIO(),
|
144
153
|
check_status=False,
|
145
154
|
)
|