teuthology 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scripts/describe.py +1 -0
- scripts/dispatcher.py +55 -26
- scripts/exporter.py +18 -0
- scripts/lock.py +1 -1
- scripts/node_cleanup.py +58 -0
- scripts/openstack.py +9 -9
- scripts/results.py +12 -11
- scripts/schedule.py +4 -0
- scripts/suite.py +57 -16
- scripts/supervisor.py +44 -0
- scripts/update_inventory.py +10 -4
- teuthology/__init__.py +24 -26
- teuthology/beanstalk.py +4 -3
- teuthology/config.py +16 -6
- teuthology/contextutil.py +18 -14
- teuthology/describe_tests.py +25 -18
- teuthology/dispatcher/__init__.py +210 -35
- teuthology/dispatcher/supervisor.py +140 -58
- teuthology/exceptions.py +43 -0
- teuthology/exporter.py +347 -0
- teuthology/kill.py +76 -81
- teuthology/lock/cli.py +3 -3
- teuthology/lock/ops.py +135 -61
- teuthology/lock/query.py +61 -44
- teuthology/ls.py +1 -1
- teuthology/misc.py +61 -75
- teuthology/nuke/__init__.py +12 -353
- teuthology/openstack/__init__.py +4 -3
- teuthology/openstack/openstack-centos-7.0-user-data.txt +1 -1
- teuthology/openstack/openstack-centos-7.1-user-data.txt +1 -1
- teuthology/openstack/openstack-centos-7.2-user-data.txt +1 -1
- teuthology/openstack/openstack-debian-8.0-user-data.txt +1 -1
- teuthology/openstack/openstack-opensuse-42.1-user-data.txt +1 -1
- teuthology/openstack/openstack-teuthology.cron +0 -1
- teuthology/orchestra/cluster.py +49 -7
- teuthology/orchestra/connection.py +16 -5
- teuthology/orchestra/console.py +111 -50
- teuthology/orchestra/daemon/cephadmunit.py +17 -4
- teuthology/orchestra/daemon/state.py +8 -1
- teuthology/orchestra/daemon/systemd.py +4 -4
- teuthology/orchestra/opsys.py +30 -11
- teuthology/orchestra/remote.py +405 -338
- teuthology/orchestra/run.py +3 -3
- teuthology/packaging.py +19 -16
- teuthology/provision/__init__.py +30 -10
- teuthology/provision/cloud/openstack.py +12 -6
- teuthology/provision/cloud/util.py +1 -2
- teuthology/provision/downburst.py +4 -3
- teuthology/provision/fog.py +68 -20
- teuthology/provision/openstack.py +5 -4
- teuthology/provision/pelagos.py +1 -1
- teuthology/repo_utils.py +43 -13
- teuthology/report.py +57 -35
- teuthology/results.py +5 -3
- teuthology/run.py +13 -14
- teuthology/run_tasks.py +27 -43
- teuthology/schedule.py +4 -3
- teuthology/scrape.py +28 -22
- teuthology/suite/__init__.py +74 -45
- teuthology/suite/build_matrix.py +34 -24
- teuthology/suite/fragment-merge.lua +105 -0
- teuthology/suite/matrix.py +31 -2
- teuthology/suite/merge.py +175 -0
- teuthology/suite/placeholder.py +6 -9
- teuthology/suite/run.py +175 -100
- teuthology/suite/util.py +64 -218
- teuthology/task/__init__.py +1 -1
- teuthology/task/ansible.py +101 -32
- teuthology/task/buildpackages.py +2 -2
- teuthology/task/ceph_ansible.py +13 -6
- teuthology/task/cephmetrics.py +2 -1
- teuthology/task/clock.py +33 -14
- teuthology/task/exec.py +18 -0
- teuthology/task/hadoop.py +2 -2
- teuthology/task/install/__init__.py +29 -7
- teuthology/task/install/bin/adjust-ulimits +16 -0
- teuthology/task/install/bin/daemon-helper +114 -0
- teuthology/task/install/bin/stdin-killer +263 -0
- teuthology/task/install/deb.py +1 -1
- teuthology/task/install/rpm.py +17 -5
- teuthology/task/install/util.py +3 -3
- teuthology/task/internal/__init__.py +41 -10
- teuthology/task/internal/edit_sudoers.sh +10 -0
- teuthology/task/internal/lock_machines.py +2 -9
- teuthology/task/internal/redhat.py +31 -1
- teuthology/task/internal/syslog.py +31 -8
- teuthology/task/kernel.py +152 -145
- teuthology/task/lockfile.py +1 -1
- teuthology/task/mpi.py +10 -10
- teuthology/task/pcp.py +1 -1
- teuthology/task/selinux.py +16 -8
- teuthology/task/ssh_keys.py +4 -4
- teuthology/task/tests/__init__.py +137 -77
- teuthology/task/tests/test_fetch_coredumps.py +116 -0
- teuthology/task/tests/test_run.py +4 -4
- teuthology/timer.py +3 -3
- teuthology/util/loggerfile.py +19 -0
- teuthology/util/scanner.py +159 -0
- teuthology/util/sentry.py +52 -0
- teuthology/util/time.py +52 -0
- teuthology-1.2.0.data/scripts/adjust-ulimits +16 -0
- teuthology-1.2.0.data/scripts/daemon-helper +114 -0
- teuthology-1.2.0.data/scripts/stdin-killer +263 -0
- teuthology-1.2.0.dist-info/METADATA +89 -0
- teuthology-1.2.0.dist-info/RECORD +174 -0
- {teuthology-1.1.0.dist-info → teuthology-1.2.0.dist-info}/WHEEL +1 -1
- {teuthology-1.1.0.dist-info → teuthology-1.2.0.dist-info}/entry_points.txt +3 -2
- scripts/nuke.py +0 -47
- scripts/worker.py +0 -37
- teuthology/nuke/actions.py +0 -456
- teuthology/openstack/test/__init__.py +0 -0
- teuthology/openstack/test/openstack-integration.py +0 -286
- teuthology/openstack/test/test_config.py +0 -35
- teuthology/openstack/test/test_openstack.py +0 -1695
- teuthology/orchestra/test/__init__.py +0 -0
- teuthology/orchestra/test/integration/__init__.py +0 -0
- teuthology/orchestra/test/integration/test_integration.py +0 -94
- teuthology/orchestra/test/test_cluster.py +0 -240
- teuthology/orchestra/test/test_connection.py +0 -106
- teuthology/orchestra/test/test_console.py +0 -217
- teuthology/orchestra/test/test_opsys.py +0 -404
- teuthology/orchestra/test/test_remote.py +0 -185
- teuthology/orchestra/test/test_run.py +0 -286
- teuthology/orchestra/test/test_systemd.py +0 -54
- teuthology/orchestra/test/util.py +0 -12
- teuthology/test/__init__.py +0 -0
- teuthology/test/fake_archive.py +0 -107
- teuthology/test/fake_fs.py +0 -92
- teuthology/test/integration/__init__.py +0 -0
- teuthology/test/integration/test_suite.py +0 -86
- teuthology/test/task/__init__.py +0 -205
- teuthology/test/task/test_ansible.py +0 -624
- teuthology/test/task/test_ceph_ansible.py +0 -176
- teuthology/test/task/test_console_log.py +0 -88
- teuthology/test/task/test_install.py +0 -337
- teuthology/test/task/test_internal.py +0 -57
- teuthology/test/task/test_kernel.py +0 -243
- teuthology/test/task/test_pcp.py +0 -379
- teuthology/test/task/test_selinux.py +0 -35
- teuthology/test/test_config.py +0 -189
- teuthology/test/test_contextutil.py +0 -68
- teuthology/test/test_describe_tests.py +0 -316
- teuthology/test/test_email_sleep_before_teardown.py +0 -81
- teuthology/test/test_exit.py +0 -97
- teuthology/test/test_get_distro.py +0 -47
- teuthology/test/test_get_distro_version.py +0 -47
- teuthology/test/test_get_multi_machine_types.py +0 -27
- teuthology/test/test_job_status.py +0 -60
- teuthology/test/test_ls.py +0 -48
- teuthology/test/test_misc.py +0 -391
- teuthology/test/test_nuke.py +0 -290
- teuthology/test/test_packaging.py +0 -763
- teuthology/test/test_parallel.py +0 -28
- teuthology/test/test_repo_utils.py +0 -225
- teuthology/test/test_report.py +0 -77
- teuthology/test/test_results.py +0 -155
- teuthology/test/test_run.py +0 -239
- teuthology/test/test_safepath.py +0 -55
- teuthology/test/test_schedule.py +0 -45
- teuthology/test/test_scrape.py +0 -167
- teuthology/test/test_timer.py +0 -80
- teuthology/test/test_vps_os_vers_parameter_checking.py +0 -84
- teuthology/test/test_worker.py +0 -303
- teuthology/worker.py +0 -354
- teuthology-1.1.0.dist-info/METADATA +0 -76
- teuthology-1.1.0.dist-info/RECORD +0 -213
- {teuthology-1.1.0.dist-info → teuthology-1.2.0.dist-info}/LICENSE +0 -0
- {teuthology-1.1.0.dist-info → teuthology-1.2.0.dist-info}/top_level.txt +0 -0
teuthology/suite/placeholder.py
CHANGED
@@ -45,13 +45,13 @@ def substitute_placeholders(input_dict, values_dict):
|
|
45
45
|
# Template for the config that becomes the base for each generated job config
|
46
46
|
dict_templ = {
|
47
47
|
'branch': Placeholder('ceph_branch'),
|
48
|
+
'expire': Placeholder('expire'),
|
48
49
|
'sha1': Placeholder('ceph_hash'),
|
49
50
|
'teuthology_branch': Placeholder('teuthology_branch'),
|
50
51
|
'teuthology_sha1': Placeholder('teuthology_sha1'),
|
51
52
|
'archive_upload': Placeholder('archive_upload'),
|
52
53
|
'archive_upload_key': Placeholder('archive_upload_key'),
|
53
54
|
'machine_type': Placeholder('machine_type'),
|
54
|
-
'nuke-on-error': True,
|
55
55
|
'os_type': Placeholder('distro'),
|
56
56
|
'os_version': Placeholder('distro_version'),
|
57
57
|
'overrides': {
|
@@ -68,16 +68,13 @@ dict_templ = {
|
|
68
68
|
'debug mgr': 20,
|
69
69
|
'debug ms': 1},
|
70
70
|
'osd': {
|
71
|
-
'debug filestore': 20,
|
72
|
-
'debug journal': 20,
|
73
71
|
'debug ms': 1,
|
74
|
-
'debug osd':
|
72
|
+
'debug osd': 20
|
75
73
|
}
|
76
74
|
},
|
77
|
-
'
|
78
|
-
|
79
|
-
|
80
|
-
'\(MDS_UP_LESS_THAN_MAX\)'],
|
75
|
+
'flavor': Placeholder('flavor'),
|
76
|
+
'log-ignorelist': [r'\(MDS_ALL_DOWN\)',
|
77
|
+
r'\(MDS_UP_LESS_THAN_MAX\)'],
|
81
78
|
'sha1': Placeholder('ceph_hash'),
|
82
79
|
},
|
83
80
|
'ceph-deploy': {
|
@@ -86,13 +83,13 @@ dict_templ = {
|
|
86
83
|
'log file': '/var/log/ceph/ceph-$name.$pid.log'
|
87
84
|
},
|
88
85
|
'mon': {
|
89
|
-
'osd default pool size': 2
|
90
86
|
}
|
91
87
|
}
|
92
88
|
},
|
93
89
|
'install': {
|
94
90
|
'ceph': {
|
95
91
|
'sha1': Placeholder('ceph_hash'),
|
92
|
+
'flavor': Placeholder('flavor'),
|
96
93
|
}
|
97
94
|
},
|
98
95
|
'workunit': {
|
teuthology/suite/run.py
CHANGED
@@ -1,27 +1,30 @@
|
|
1
1
|
import copy
|
2
|
+
import datetime
|
2
3
|
import logging
|
3
4
|
import os
|
4
5
|
import pwd
|
6
|
+
import yaml
|
5
7
|
import re
|
6
8
|
import time
|
7
|
-
import yaml
|
8
9
|
|
9
10
|
from humanfriendly import format_timespan
|
10
11
|
|
11
|
-
from datetime import datetime
|
12
12
|
from tempfile import NamedTemporaryFile
|
13
|
+
from teuthology import repo_utils
|
13
14
|
|
14
15
|
from teuthology.config import config, JobConfig
|
15
16
|
from teuthology.exceptions import (
|
16
|
-
BranchNotFoundError, CommitNotFoundError,
|
17
|
+
BranchMismatchError, BranchNotFoundError, CommitNotFoundError,
|
17
18
|
)
|
18
19
|
from teuthology.misc import deep_merge, get_results_url
|
19
20
|
from teuthology.orchestra.opsys import OS
|
20
21
|
from teuthology.repo_utils import build_git_url
|
21
22
|
|
22
23
|
from teuthology.suite import util
|
24
|
+
from teuthology.suite.merge import config_merge
|
23
25
|
from teuthology.suite.build_matrix import build_matrix
|
24
26
|
from teuthology.suite.placeholder import substitute_placeholders, dict_templ
|
27
|
+
from teuthology.util.time import parse_offset, parse_timestamp, TIMESTAMP_FMT
|
25
28
|
|
26
29
|
log = logging.getLogger(__name__)
|
27
30
|
|
@@ -31,8 +34,7 @@ class Run(object):
|
|
31
34
|
WAIT_PAUSE = 5 * 60
|
32
35
|
__slots__ = (
|
33
36
|
'args', 'name', 'base_config', 'suite_repo_path', 'base_yaml_paths',
|
34
|
-
'base_args', '
|
35
|
-
'timestamp', 'user',
|
37
|
+
'base_args', 'kernel_dict', 'config_input', 'timestamp', 'user', 'os',
|
36
38
|
)
|
37
39
|
|
38
40
|
def __init__(self, args):
|
@@ -42,7 +44,7 @@ class Run(object):
|
|
42
44
|
self.args = args
|
43
45
|
# We assume timestamp is a datetime.datetime object
|
44
46
|
self.timestamp = self.args.timestamp or \
|
45
|
-
datetime.now().strftime(
|
47
|
+
datetime.datetime.now().strftime(TIMESTAMP_FMT)
|
46
48
|
self.user = self.args.user or pwd.getpwuid(os.getuid()).pw_name
|
47
49
|
|
48
50
|
self.name = self.make_run_name()
|
@@ -53,8 +55,6 @@ class Run(object):
|
|
53
55
|
config.ceph_qa_suite_git_url = self.args.suite_repo
|
54
56
|
|
55
57
|
self.base_config = self.create_initial_config()
|
56
|
-
# caches package versions to minimize requests to gbs
|
57
|
-
self.package_versions = dict()
|
58
58
|
|
59
59
|
# Interpret any relative paths as being relative to ceph-qa-suite
|
60
60
|
# (absolute paths are unchanged by this)
|
@@ -74,7 +74,7 @@ class Run(object):
|
|
74
74
|
self.args.suite,
|
75
75
|
self.args.ceph_branch,
|
76
76
|
self.args.kernel_branch or '-',
|
77
|
-
self.args.
|
77
|
+
self.args.flavor, worker
|
78
78
|
]
|
79
79
|
).replace('/', ':')
|
80
80
|
|
@@ -87,6 +87,16 @@ class Run(object):
|
|
87
87
|
|
88
88
|
:returns: A JobConfig object
|
89
89
|
"""
|
90
|
+
now = datetime.datetime.now(datetime.timezone.utc)
|
91
|
+
expires = self.get_expiration()
|
92
|
+
if expires:
|
93
|
+
if now > expires:
|
94
|
+
util.schedule_fail(
|
95
|
+
f"Refusing to schedule because the expiration date is in the past: {self.args.expire}",
|
96
|
+
dry_run=self.args.dry_run,
|
97
|
+
)
|
98
|
+
|
99
|
+
self.os = self.choose_os()
|
90
100
|
self.kernel_dict = self.choose_kernel()
|
91
101
|
ceph_hash = self.choose_ceph_hash()
|
92
102
|
# We don't store ceph_version because we don't use it yet outside of
|
@@ -98,7 +108,7 @@ class Run(object):
|
|
98
108
|
self.suite_repo_path = self.args.suite_dir
|
99
109
|
else:
|
100
110
|
self.suite_repo_path = util.fetch_repos(
|
101
|
-
suite_branch, test_name=self.name)
|
111
|
+
suite_branch, test_name=self.name, dry_run=self.args.dry_run)
|
102
112
|
teuthology_branch, teuthology_sha1 = self.choose_teuthology_branch()
|
103
113
|
|
104
114
|
|
@@ -115,15 +125,46 @@ class Run(object):
|
|
115
125
|
teuthology_branch=teuthology_branch,
|
116
126
|
teuthology_sha1=teuthology_sha1,
|
117
127
|
machine_type=self.args.machine_type,
|
118
|
-
distro=self.
|
119
|
-
distro_version=self.
|
128
|
+
distro=self.os.name,
|
129
|
+
distro_version=self.os.version,
|
120
130
|
archive_upload=config.archive_upload,
|
121
131
|
archive_upload_key=config.archive_upload_key,
|
122
132
|
suite_repo=config.get_ceph_qa_suite_git_url(),
|
123
133
|
suite_relpath=self.args.suite_relpath,
|
134
|
+
flavor=self.args.flavor,
|
135
|
+
expire=expires.strftime(TIMESTAMP_FMT) if expires else None,
|
124
136
|
)
|
125
137
|
return self.build_base_config()
|
126
138
|
|
139
|
+
def get_expiration(self, _base_time: datetime.datetime | None = None) -> datetime.datetime | None:
|
140
|
+
"""
|
141
|
+
_base_time: For testing, calculate relative offsets from this base time
|
142
|
+
|
143
|
+
:returns: True if the job should run; False if it has expired
|
144
|
+
"""
|
145
|
+
log.info(f"Checking for expiration ({self.args.expire})")
|
146
|
+
expires_str = self.args.expire
|
147
|
+
if expires_str is None:
|
148
|
+
return None
|
149
|
+
now = datetime.datetime.now(datetime.timezone.utc)
|
150
|
+
if _base_time is None:
|
151
|
+
_base_time = now
|
152
|
+
try:
|
153
|
+
expires = parse_timestamp(expires_str)
|
154
|
+
except ValueError:
|
155
|
+
expires = _base_time + parse_offset(expires_str)
|
156
|
+
return expires
|
157
|
+
|
158
|
+
def choose_os(self):
|
159
|
+
os_type = self.args.distro
|
160
|
+
os_version = self.args.distro_version
|
161
|
+
if not (os_type and os_version):
|
162
|
+
os_ = util.get_distro_defaults(
|
163
|
+
self.args.distro, self.args.machine_type)[2]
|
164
|
+
else:
|
165
|
+
os_ = OS(os_type, os_version)
|
166
|
+
return os_
|
167
|
+
|
127
168
|
def choose_kernel(self):
|
128
169
|
# Put together a stanza specifying the kernel hash
|
129
170
|
if self.args.kernel_branch == 'distro':
|
@@ -134,20 +175,25 @@ class Run(object):
|
|
134
175
|
kernel_hash = None
|
135
176
|
else:
|
136
177
|
kernel_hash = util.get_gitbuilder_hash(
|
137
|
-
'kernel', self.args.kernel_branch,
|
178
|
+
'kernel', self.args.kernel_branch, 'default',
|
138
179
|
self.args.machine_type, self.args.distro,
|
139
180
|
self.args.distro_version,
|
140
181
|
)
|
141
182
|
if not kernel_hash:
|
142
183
|
util.schedule_fail(
|
143
184
|
"Kernel branch '{branch}' not found".format(
|
144
|
-
branch=self.args.kernel_branch)
|
185
|
+
branch=self.args.kernel_branch),
|
186
|
+
dry_run=self.args.dry_run,
|
145
187
|
)
|
188
|
+
kdb = True
|
189
|
+
if self.args.kdb is not None:
|
190
|
+
kdb = self.args.kdb
|
191
|
+
|
146
192
|
if kernel_hash:
|
147
193
|
log.info("kernel sha1: {hash}".format(hash=kernel_hash))
|
148
|
-
kernel_dict = dict(kernel=dict(kdb=
|
194
|
+
kernel_dict = dict(kernel=dict(kdb=kdb, sha1=kernel_hash))
|
149
195
|
if kernel_hash != 'distro':
|
150
|
-
kernel_dict['kernel']['flavor'] =
|
196
|
+
kernel_dict['kernel']['flavor'] = 'default'
|
151
197
|
else:
|
152
198
|
kernel_dict = dict()
|
153
199
|
return kernel_dict
|
@@ -160,6 +206,7 @@ class Run(object):
|
|
160
206
|
"""
|
161
207
|
repo_name = self.ceph_repo_name
|
162
208
|
|
209
|
+
ceph_hash = None
|
163
210
|
if self.args.ceph_sha1:
|
164
211
|
ceph_hash = self.args.ceph_sha1
|
165
212
|
if self.args.validate_sha1:
|
@@ -169,17 +216,18 @@ class Run(object):
|
|
169
216
|
self.args.ceph_sha1,
|
170
217
|
'%s.git' % repo_name
|
171
218
|
)
|
172
|
-
util.schedule_fail(message=str(exc), name=self.name)
|
219
|
+
util.schedule_fail(message=str(exc), name=self.name, dry_run=self.args.dry_run)
|
173
220
|
log.info("ceph sha1 explicitly supplied")
|
174
221
|
|
175
222
|
elif self.args.ceph_branch:
|
176
|
-
ceph_hash = util.git_ls_remote(
|
223
|
+
ceph_hash = util.git_ls_remote(
|
224
|
+
self.args.ceph_repo, self.args.ceph_branch)
|
177
225
|
if not ceph_hash:
|
178
226
|
exc = BranchNotFoundError(
|
179
227
|
self.args.ceph_branch,
|
180
228
|
'%s.git' % repo_name
|
181
229
|
)
|
182
|
-
util.schedule_fail(message=str(exc), name=self.name)
|
230
|
+
util.schedule_fail(message=str(exc), name=self.name, dry_run=self.args.dry_run)
|
183
231
|
|
184
232
|
log.info("ceph sha1: {hash}".format(hash=ceph_hash))
|
185
233
|
return ceph_hash
|
@@ -188,13 +236,14 @@ class Run(object):
|
|
188
236
|
if config.suite_verify_ceph_hash and not self.args.newest:
|
189
237
|
# don't bother if newest; we'll search for an older one
|
190
238
|
# Get the ceph package version
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
239
|
+
ceph_version = util.package_version_for_hash(
|
240
|
+
ceph_hash, self.args.flavor, self.os.name,
|
241
|
+
self.os.version, self.args.machine_type,
|
242
|
+
)
|
243
|
+
if not ceph_version:
|
244
|
+
msg = f"Packages for os_type '{self.os.name}', flavor " \
|
245
|
+
f"{self.args.flavor} and ceph hash '{ceph_hash}' not found"
|
246
|
+
util.schedule_fail(msg, self.name, dry_run=self.args.dry_run)
|
198
247
|
log.info("ceph version: {ver}".format(ver=ceph_version))
|
199
248
|
return ceph_version
|
200
249
|
else:
|
@@ -219,7 +268,7 @@ class Run(object):
|
|
219
268
|
of the teuthology config files ``$HOME/teuthology.yaml``
|
220
269
|
or ``/etc/teuthology.yaml`` correspondingly.
|
221
270
|
|
222
|
-
Use ``
|
271
|
+
Use ``main``.
|
223
272
|
|
224
273
|
Generate exception if the branch is not present in the repo.
|
225
274
|
|
@@ -243,15 +292,32 @@ class Run(object):
|
|
243
292
|
log.warning(
|
244
293
|
'The teuthology branch config is empty, skipping')
|
245
294
|
if not teuthology_branch:
|
246
|
-
teuthology_branch = config.get('teuthology_branch'
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
teuthology_branch
|
251
|
-
|
295
|
+
teuthology_branch = config.get('teuthology_branch')
|
296
|
+
|
297
|
+
if config.teuthology_path:
|
298
|
+
actual_branch = repo_utils.current_branch(config.teuthology_path)
|
299
|
+
if teuthology_branch and actual_branch != teuthology_branch:
|
300
|
+
raise BranchMismatchError(
|
301
|
+
teuthology_branch,
|
302
|
+
config.teuthology_path,
|
303
|
+
"config.teuthology_path is set",
|
304
|
+
)
|
305
|
+
if not teuthology_branch:
|
306
|
+
teuthology_branch = actual_branch
|
307
|
+
teuthology_sha1 = util.git_ls_remote(
|
308
|
+
f"file://{config.teuthology_path}",
|
309
|
+
teuthology_branch
|
310
|
+
)
|
311
|
+
else:
|
312
|
+
if not teuthology_branch:
|
313
|
+
teuthology_branch = 'main'
|
314
|
+
teuthology_sha1 = util.git_ls_remote(
|
315
|
+
'teuthology',
|
316
|
+
teuthology_branch
|
317
|
+
)
|
252
318
|
if not teuthology_sha1:
|
253
319
|
exc = BranchNotFoundError(teuthology_branch, build_git_url('teuthology'))
|
254
|
-
util.schedule_fail(message=str(exc), name=self.name)
|
320
|
+
util.schedule_fail(message=str(exc), name=self.name, dry_run=self.args.dry_run)
|
255
321
|
log.info("teuthology branch: %s %s", teuthology_branch, teuthology_sha1)
|
256
322
|
return teuthology_branch, teuthology_sha1
|
257
323
|
|
@@ -271,32 +337,32 @@ class Run(object):
|
|
271
337
|
|
272
338
|
@staticmethod
|
273
339
|
def _repo_name(url):
|
274
|
-
return re.sub('\.git$', '', url.split('/')[-1])
|
340
|
+
return re.sub(r'\.git$', '', url.split('/')[-1])
|
275
341
|
|
276
342
|
def choose_suite_branch(self):
|
277
343
|
suite_repo_name = self.suite_repo_name
|
278
344
|
suite_repo_project_or_url = self.args.suite_repo or 'ceph-qa-suite'
|
279
345
|
suite_branch = self.args.suite_branch
|
280
346
|
ceph_branch = self.args.ceph_branch
|
281
|
-
if suite_branch and suite_branch != '
|
347
|
+
if suite_branch and suite_branch != 'main':
|
282
348
|
if not util.git_branch_exists(
|
283
349
|
suite_repo_project_or_url,
|
284
350
|
suite_branch
|
285
351
|
):
|
286
352
|
exc = BranchNotFoundError(suite_branch, suite_repo_name)
|
287
|
-
util.schedule_fail(message=str(exc), name=self.name)
|
353
|
+
util.schedule_fail(message=str(exc), name=self.name, dry_run=self.args.dry_run)
|
288
354
|
elif not suite_branch:
|
289
355
|
# Decide what branch of the suite repo to use
|
290
356
|
if util.git_branch_exists(suite_repo_project_or_url, ceph_branch):
|
291
357
|
suite_branch = ceph_branch
|
292
358
|
else:
|
293
359
|
log.info(
|
294
|
-
"branch {0} not in {1}; will use
|
360
|
+
"branch {0} not in {1}; will use main for"
|
295
361
|
" ceph-qa-suite".format(
|
296
362
|
ceph_branch,
|
297
363
|
suite_repo_name
|
298
364
|
))
|
299
|
-
suite_branch = '
|
365
|
+
suite_branch = 'main'
|
300
366
|
return suite_branch
|
301
367
|
|
302
368
|
def choose_suite_hash(self, suite_branch):
|
@@ -308,7 +374,7 @@ class Run(object):
|
|
308
374
|
)
|
309
375
|
if not suite_hash:
|
310
376
|
exc = BranchNotFoundError(suite_branch, suite_repo_name)
|
311
|
-
util.schedule_fail(message=str(exc), name=self.name)
|
377
|
+
util.schedule_fail(message=str(exc), name=self.name, dry_run=self.args.dry_run)
|
312
378
|
log.info("%s branch: %s %s", suite_repo_name, suite_branch, suite_hash)
|
313
379
|
return suite_hash
|
314
380
|
|
@@ -320,6 +386,9 @@ class Run(object):
|
|
320
386
|
job_config.user = self.user
|
321
387
|
job_config.timestamp = self.timestamp
|
322
388
|
job_config.priority = self.args.priority
|
389
|
+
job_config.seed = self.args.seed
|
390
|
+
if self.args.subset:
|
391
|
+
job_config.subset = '/'.join(str(i) for i in self.args.subset)
|
323
392
|
if self.args.email:
|
324
393
|
job_config.email = self.args.email
|
325
394
|
if self.args.owner:
|
@@ -354,6 +423,8 @@ class Run(object):
|
|
354
423
|
if self.args.subset:
|
355
424
|
subset = '/'.join(str(i) for i in self.args.subset)
|
356
425
|
args.extend(['--subset', subset])
|
426
|
+
if self.args.no_nested_subset:
|
427
|
+
args.extend(['--no-nested-subset'])
|
357
428
|
args.extend(['--seed', str(self.args.seed)])
|
358
429
|
util.teuthology_schedule(
|
359
430
|
args=args,
|
@@ -402,16 +473,13 @@ class Run(object):
|
|
402
473
|
def collect_jobs(self, arch, configs, newest=False, limit=0):
|
403
474
|
jobs_to_schedule = []
|
404
475
|
jobs_missing_packages = []
|
405
|
-
for description, fragment_paths in configs:
|
476
|
+
for description, fragment_paths, parsed_yaml in configs:
|
406
477
|
if limit > 0 and len(jobs_to_schedule) >= limit:
|
407
478
|
log.info(
|
408
479
|
'Stopped after {limit} jobs due to --limit={limit}'.format(
|
409
480
|
limit=limit))
|
410
481
|
break
|
411
482
|
|
412
|
-
raw_yaml = '\n'.join([open(a, 'r').read() for a in fragment_paths])
|
413
|
-
|
414
|
-
parsed_yaml = yaml.safe_load(raw_yaml)
|
415
483
|
os_type = parsed_yaml.get('os_type') or self.base_config.os_type
|
416
484
|
os_version = parsed_yaml.get('os_version') or self.base_config.os_version
|
417
485
|
exclude_arch = parsed_yaml.get('exclude_arch')
|
@@ -433,13 +501,16 @@ class Run(object):
|
|
433
501
|
'--',
|
434
502
|
])
|
435
503
|
arg.extend(self.base_yaml_paths)
|
436
|
-
|
504
|
+
|
505
|
+
parsed_yaml_txt = yaml.dump(parsed_yaml)
|
506
|
+
arg.append('-')
|
437
507
|
|
438
508
|
job = dict(
|
439
509
|
yaml=parsed_yaml,
|
440
510
|
desc=description,
|
441
511
|
sha1=self.base_config.sha1,
|
442
|
-
args=arg
|
512
|
+
args=arg,
|
513
|
+
stdin=parsed_yaml_txt,
|
443
514
|
)
|
444
515
|
|
445
516
|
sha1 = self.base_config.sha1
|
@@ -448,31 +519,16 @@ class Run(object):
|
|
448
519
|
full_job_config = copy.deepcopy(self.base_config.to_dict())
|
449
520
|
deep_merge(full_job_config, parsed_yaml)
|
450
521
|
flavor = util.get_install_task_flavor(full_job_config)
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
# again for them.
|
455
|
-
try:
|
456
|
-
self.package_versions = util.get_package_versions(
|
457
|
-
sha1,
|
458
|
-
os_type,
|
459
|
-
os_version,
|
460
|
-
flavor,
|
461
|
-
self.package_versions
|
462
|
-
)
|
463
|
-
except VersionNotFoundError:
|
464
|
-
pass
|
465
|
-
if not util.has_packages_for_distro(
|
466
|
-
sha1, os_type, os_version, flavor, self.package_versions
|
467
|
-
):
|
468
|
-
m = "Packages for os_type '{os}', flavor {flavor} and " + \
|
469
|
-
"ceph hash '{ver}' not found"
|
470
|
-
log.error(m.format(os=os_type, flavor=flavor, ver=sha1))
|
522
|
+
version = util.package_version_for_hash(sha1, flavor, os_type,
|
523
|
+
os_version, self.args.machine_type)
|
524
|
+
if not version:
|
471
525
|
jobs_missing_packages.append(job)
|
526
|
+
log.error(f"Packages for os_type '{os_type}', flavor {flavor} and "
|
527
|
+
f"ceph hash '{sha1}' not found")
|
472
528
|
# optimization: one missing package causes backtrack in newest mode;
|
473
529
|
# no point in continuing the search
|
474
530
|
if newest:
|
475
|
-
return jobs_missing_packages,
|
531
|
+
return jobs_missing_packages, []
|
476
532
|
|
477
533
|
jobs_to_schedule.append(job)
|
478
534
|
return jobs_missing_packages, jobs_to_schedule
|
@@ -486,20 +542,19 @@ class Run(object):
|
|
486
542
|
log_prefix = ''
|
487
543
|
if job in jobs_missing_packages:
|
488
544
|
log_prefix = "Missing Packages: "
|
489
|
-
if
|
490
|
-
not self.args.dry_run and
|
491
|
-
not config.suite_allow_missing_packages
|
492
|
-
):
|
545
|
+
if not config.suite_allow_missing_packages:
|
493
546
|
util.schedule_fail(
|
494
|
-
"At least one job needs packages that don't exist
|
495
|
-
"hash {
|
547
|
+
"At least one job needs packages that don't exist "
|
548
|
+
f"for hash {self.base_config.sha1}.",
|
496
549
|
name,
|
550
|
+
dry_run=self.args.dry_run,
|
497
551
|
)
|
498
552
|
util.teuthology_schedule(
|
499
553
|
args=job['args'],
|
500
554
|
dry_run=self.args.dry_run,
|
501
555
|
verbose=self.args.verbose,
|
502
556
|
log_prefix=log_prefix,
|
557
|
+
stdin=job['stdin'],
|
503
558
|
)
|
504
559
|
throttle = self.args.throttle
|
505
560
|
if not self.args.dry_run and throttle:
|
@@ -513,17 +568,28 @@ class Run(object):
|
|
513
568
|
Use the following testing priority
|
514
569
|
10 to 49: Tests which are urgent and blocking other important development.
|
515
570
|
50 to 74: Testing a particular feature/fix with less than 25 jobs and can also be used for urgent release testing.
|
516
|
-
75 to 99: Tech Leads usually schedule integration tests with this priority to verify pull requests against
|
571
|
+
75 to 99: Tech Leads usually schedule integration tests with this priority to verify pull requests against main.
|
517
572
|
100 to 149: QE validation of point releases.
|
518
573
|
150 to 199: Testing a particular feature/fix with less than 100 jobs and results will be available in a day or so.
|
519
574
|
200 to 1000: Large test runs that can be done over the course of a week.
|
520
575
|
Note: To force run, use --force-priority'''
|
521
576
|
if priority < 50:
|
522
|
-
util.schedule_fail(msg)
|
577
|
+
util.schedule_fail(msg, dry_run=self.args.dry_run)
|
523
578
|
elif priority < 75 and jobs_to_schedule > 25:
|
524
|
-
util.schedule_fail(msg)
|
579
|
+
util.schedule_fail(msg, dry_run=self.args.dry_run)
|
525
580
|
elif priority < 150 and jobs_to_schedule > 100:
|
526
|
-
util.schedule_fail(msg)
|
581
|
+
util.schedule_fail(msg, dry_run=self.args.dry_run)
|
582
|
+
|
583
|
+
def check_num_jobs(self, jobs_to_schedule):
|
584
|
+
"""
|
585
|
+
Fail schedule if number of jobs exceeds job threshold.
|
586
|
+
"""
|
587
|
+
threshold = self.args.job_threshold
|
588
|
+
msg=f'''Unable to schedule {jobs_to_schedule} jobs, too many jobs, when maximum {threshold} jobs allowed.
|
589
|
+
|
590
|
+
Note: If you still want to go ahead, use --job-threshold 0'''
|
591
|
+
if threshold and jobs_to_schedule > threshold:
|
592
|
+
util.schedule_fail(msg, dry_run=self.args.dry_run)
|
527
593
|
|
528
594
|
def schedule_suite(self):
|
529
595
|
"""
|
@@ -543,11 +609,21 @@ Note: To force run, use --force-priority'''
|
|
543
609
|
self.base_config.suite.replace(':', '/'),
|
544
610
|
))
|
545
611
|
log.debug('Suite %s in %s' % (suite_name, suite_path))
|
612
|
+
log.debug(f"subset = {self.args.subset}")
|
613
|
+
log.debug(f"no_nested_subset = {self.args.no_nested_subset}")
|
546
614
|
configs = build_matrix(suite_path,
|
547
615
|
subset=self.args.subset,
|
616
|
+
no_nested_subset=self.args.no_nested_subset,
|
548
617
|
seed=self.args.seed)
|
549
|
-
|
550
|
-
|
618
|
+
generated = len(configs)
|
619
|
+
log.info(f'Suite {suite_name} in {suite_path} generated {generated} jobs (not yet filtered or merged)')
|
620
|
+
configs = list(config_merge(configs,
|
621
|
+
filter_in=self.args.filter_in,
|
622
|
+
filter_out=self.args.filter_out,
|
623
|
+
filter_all=self.args.filter_all,
|
624
|
+
filter_fragments=self.args.filter_fragments,
|
625
|
+
seed=self.args.seed,
|
626
|
+
suite_name=suite_name))
|
551
627
|
|
552
628
|
if self.args.dry_run:
|
553
629
|
log.debug("Base job config:\n%s" % self.base_config)
|
@@ -584,7 +660,7 @@ Note: To force run, use --force-priority'''
|
|
584
660
|
'this run for {that_long}? (y/N):'
|
585
661
|
.format(
|
586
662
|
that_long=format_timespan(sleep_before_teardown),
|
587
|
-
total=
|
663
|
+
total=generated,
|
588
664
|
maximum=job_limit))
|
589
665
|
while True:
|
590
666
|
insane=(input(are_you_insane) or 'n').lower()
|
@@ -597,23 +673,18 @@ Note: To force run, use --force-priority'''
|
|
597
673
|
# if not, do it once
|
598
674
|
backtrack = 0
|
599
675
|
limit = self.args.newest
|
676
|
+
sha1s = []
|
677
|
+
jobs_to_schedule = []
|
678
|
+
jobs_missing_packages = []
|
600
679
|
while backtrack <= limit:
|
601
680
|
jobs_missing_packages, jobs_to_schedule = \
|
602
|
-
self.collect_jobs(arch,
|
603
|
-
util.filter_configs(configs,
|
604
|
-
filter_in=self.args.filter_in,
|
605
|
-
filter_out=self.args.filter_out,
|
606
|
-
filter_all=self.args.filter_all,
|
607
|
-
filter_fragments=self.args.filter_fragments,
|
608
|
-
suite_name=suite_name),
|
609
|
-
self.args.newest, job_limit)
|
681
|
+
self.collect_jobs(arch, configs, self.args.newest, job_limit)
|
610
682
|
if jobs_missing_packages and self.args.newest:
|
611
|
-
|
612
|
-
util.
|
613
|
-
if
|
614
|
-
util.schedule_fail('Backtrack for --newest failed', name)
|
615
|
-
|
616
|
-
self.config_input['ceph_hash'] = new_sha1
|
683
|
+
if not sha1s:
|
684
|
+
sha1s = util.find_git_parents('ceph', str(self.base_config.sha1), self.args.newest)
|
685
|
+
if not sha1s:
|
686
|
+
util.schedule_fail('Backtrack for --newest failed', name, dry_run=self.args.dry_run)
|
687
|
+
self.config_input['ceph_hash'] = sha1s.pop(0)
|
617
688
|
self.base_config = self.build_base_config()
|
618
689
|
backtrack += 1
|
619
690
|
continue
|
@@ -626,11 +697,9 @@ Note: To force run, use --force-priority'''
|
|
626
697
|
util.schedule_fail(
|
627
698
|
'Exceeded %d backtracks; raise --newest value' % limit,
|
628
699
|
name,
|
700
|
+
dry_run=self.args.dry_run,
|
629
701
|
)
|
630
702
|
|
631
|
-
if self.args.dry_run:
|
632
|
-
log.debug("Base job config:\n%s" % self.base_config)
|
633
|
-
|
634
703
|
with open(base_yaml_path, 'w+b') as base_yaml:
|
635
704
|
base_yaml.write(str(self.base_config).encode())
|
636
705
|
|
@@ -641,20 +710,26 @@ Note: To force run, use --force-priority'''
|
|
641
710
|
if self.args.priority and jobs_to_schedule and not self.args.force_priority:
|
642
711
|
self.check_priority(len(jobs_to_schedule))
|
643
712
|
|
713
|
+
self.check_num_jobs(len(jobs_to_schedule))
|
714
|
+
|
644
715
|
self.schedule_jobs(jobs_missing_packages, jobs_to_schedule, name)
|
645
716
|
|
646
717
|
os.remove(base_yaml_path)
|
647
718
|
|
648
719
|
count = len(jobs_to_schedule)
|
649
720
|
missing_count = len(jobs_missing_packages)
|
721
|
+
total_count = count
|
722
|
+
if self.args.num:
|
723
|
+
total_count *= self.args.num
|
650
724
|
log.info(
|
651
725
|
'Suite %s in %s scheduled %d jobs.' %
|
652
726
|
(suite_name, suite_path, count)
|
653
727
|
)
|
654
728
|
log.info('%d/%d jobs were filtered out.',
|
655
|
-
(
|
656
|
-
|
729
|
+
(generated - count),
|
730
|
+
generated)
|
657
731
|
if missing_count:
|
658
|
-
log.
|
732
|
+
log.warning('Scheduled %d/%d jobs that are missing packages!',
|
659
733
|
missing_count, count)
|
734
|
+
log.info('Scheduled %d jobs in total.', total_count)
|
660
735
|
return count
|