teuthology 1.1.0__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scripts/describe.py +1 -0
- scripts/dispatcher.py +55 -26
- scripts/exporter.py +18 -0
- scripts/lock.py +1 -1
- scripts/node_cleanup.py +58 -0
- scripts/openstack.py +9 -9
- scripts/results.py +12 -11
- scripts/schedule.py +4 -0
- scripts/suite.py +57 -16
- scripts/supervisor.py +44 -0
- scripts/update_inventory.py +10 -4
- teuthology/__init__.py +24 -26
- teuthology/beanstalk.py +4 -3
- teuthology/config.py +16 -6
- teuthology/contextutil.py +18 -14
- teuthology/describe_tests.py +25 -18
- teuthology/dispatcher/__init__.py +210 -35
- teuthology/dispatcher/supervisor.py +140 -58
- teuthology/exceptions.py +43 -0
- teuthology/exporter.py +347 -0
- teuthology/kill.py +76 -81
- teuthology/lock/cli.py +3 -3
- teuthology/lock/ops.py +135 -61
- teuthology/lock/query.py +61 -44
- teuthology/ls.py +1 -1
- teuthology/misc.py +61 -75
- teuthology/nuke/__init__.py +12 -353
- teuthology/openstack/__init__.py +4 -3
- teuthology/openstack/openstack-centos-7.0-user-data.txt +1 -1
- teuthology/openstack/openstack-centos-7.1-user-data.txt +1 -1
- teuthology/openstack/openstack-centos-7.2-user-data.txt +1 -1
- teuthology/openstack/openstack-debian-8.0-user-data.txt +1 -1
- teuthology/openstack/openstack-opensuse-42.1-user-data.txt +1 -1
- teuthology/openstack/openstack-teuthology.cron +0 -1
- teuthology/orchestra/cluster.py +49 -7
- teuthology/orchestra/connection.py +17 -4
- teuthology/orchestra/console.py +111 -50
- teuthology/orchestra/daemon/cephadmunit.py +15 -2
- teuthology/orchestra/daemon/state.py +8 -1
- teuthology/orchestra/daemon/systemd.py +4 -4
- teuthology/orchestra/opsys.py +30 -11
- teuthology/orchestra/remote.py +405 -338
- teuthology/orchestra/run.py +3 -3
- teuthology/packaging.py +19 -16
- teuthology/provision/__init__.py +30 -10
- teuthology/provision/cloud/openstack.py +12 -6
- teuthology/provision/cloud/util.py +1 -2
- teuthology/provision/downburst.py +4 -3
- teuthology/provision/fog.py +68 -20
- teuthology/provision/openstack.py +5 -4
- teuthology/provision/pelagos.py +1 -1
- teuthology/repo_utils.py +43 -13
- teuthology/report.py +57 -35
- teuthology/results.py +5 -3
- teuthology/run.py +13 -14
- teuthology/run_tasks.py +27 -43
- teuthology/schedule.py +4 -3
- teuthology/scrape.py +28 -22
- teuthology/suite/__init__.py +74 -45
- teuthology/suite/build_matrix.py +34 -24
- teuthology/suite/fragment-merge.lua +105 -0
- teuthology/suite/matrix.py +31 -2
- teuthology/suite/merge.py +175 -0
- teuthology/suite/placeholder.py +6 -9
- teuthology/suite/run.py +175 -100
- teuthology/suite/util.py +64 -218
- teuthology/task/__init__.py +1 -1
- teuthology/task/ansible.py +101 -32
- teuthology/task/buildpackages.py +2 -2
- teuthology/task/ceph_ansible.py +13 -6
- teuthology/task/cephmetrics.py +2 -1
- teuthology/task/clock.py +33 -14
- teuthology/task/exec.py +18 -0
- teuthology/task/hadoop.py +2 -2
- teuthology/task/install/__init__.py +29 -7
- teuthology/task/install/bin/adjust-ulimits +16 -0
- teuthology/task/install/bin/daemon-helper +114 -0
- teuthology/task/install/bin/stdin-killer +263 -0
- teuthology/task/install/deb.py +1 -1
- teuthology/task/install/rpm.py +17 -5
- teuthology/task/install/util.py +3 -3
- teuthology/task/internal/__init__.py +41 -10
- teuthology/task/internal/edit_sudoers.sh +10 -0
- teuthology/task/internal/lock_machines.py +2 -9
- teuthology/task/internal/redhat.py +31 -1
- teuthology/task/internal/syslog.py +31 -8
- teuthology/task/kernel.py +152 -145
- teuthology/task/lockfile.py +1 -1
- teuthology/task/mpi.py +10 -10
- teuthology/task/pcp.py +1 -1
- teuthology/task/selinux.py +16 -8
- teuthology/task/ssh_keys.py +4 -4
- teuthology/timer.py +3 -3
- teuthology/util/loggerfile.py +19 -0
- teuthology/util/scanner.py +159 -0
- teuthology/util/sentry.py +52 -0
- teuthology/util/time.py +52 -0
- teuthology-1.2.1.data/scripts/adjust-ulimits +16 -0
- teuthology-1.2.1.data/scripts/daemon-helper +114 -0
- teuthology-1.2.1.data/scripts/stdin-killer +263 -0
- teuthology-1.2.1.dist-info/METADATA +88 -0
- teuthology-1.2.1.dist-info/RECORD +168 -0
- {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/WHEEL +1 -1
- {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/entry_points.txt +3 -2
- scripts/nuke.py +0 -47
- scripts/worker.py +0 -37
- teuthology/lock/test/__init__.py +0 -0
- teuthology/lock/test/test_lock.py +0 -7
- teuthology/nuke/actions.py +0 -456
- teuthology/openstack/test/__init__.py +0 -0
- teuthology/openstack/test/openstack-integration.py +0 -286
- teuthology/openstack/test/test_config.py +0 -35
- teuthology/openstack/test/test_openstack.py +0 -1695
- teuthology/orchestra/test/__init__.py +0 -0
- teuthology/orchestra/test/integration/__init__.py +0 -0
- teuthology/orchestra/test/integration/test_integration.py +0 -94
- teuthology/orchestra/test/test_cluster.py +0 -240
- teuthology/orchestra/test/test_connection.py +0 -106
- teuthology/orchestra/test/test_console.py +0 -217
- teuthology/orchestra/test/test_opsys.py +0 -404
- teuthology/orchestra/test/test_remote.py +0 -185
- teuthology/orchestra/test/test_run.py +0 -286
- teuthology/orchestra/test/test_systemd.py +0 -54
- teuthology/orchestra/test/util.py +0 -12
- teuthology/task/tests/__init__.py +0 -110
- teuthology/task/tests/test_locking.py +0 -25
- teuthology/task/tests/test_run.py +0 -40
- teuthology/test/__init__.py +0 -0
- teuthology/test/fake_archive.py +0 -107
- teuthology/test/fake_fs.py +0 -92
- teuthology/test/integration/__init__.py +0 -0
- teuthology/test/integration/test_suite.py +0 -86
- teuthology/test/task/__init__.py +0 -205
- teuthology/test/task/test_ansible.py +0 -624
- teuthology/test/task/test_ceph_ansible.py +0 -176
- teuthology/test/task/test_console_log.py +0 -88
- teuthology/test/task/test_install.py +0 -337
- teuthology/test/task/test_internal.py +0 -57
- teuthology/test/task/test_kernel.py +0 -243
- teuthology/test/task/test_pcp.py +0 -379
- teuthology/test/task/test_selinux.py +0 -35
- teuthology/test/test_config.py +0 -189
- teuthology/test/test_contextutil.py +0 -68
- teuthology/test/test_describe_tests.py +0 -316
- teuthology/test/test_email_sleep_before_teardown.py +0 -81
- teuthology/test/test_exit.py +0 -97
- teuthology/test/test_get_distro.py +0 -47
- teuthology/test/test_get_distro_version.py +0 -47
- teuthology/test/test_get_multi_machine_types.py +0 -27
- teuthology/test/test_job_status.py +0 -60
- teuthology/test/test_ls.py +0 -48
- teuthology/test/test_misc.py +0 -391
- teuthology/test/test_nuke.py +0 -290
- teuthology/test/test_packaging.py +0 -763
- teuthology/test/test_parallel.py +0 -28
- teuthology/test/test_repo_utils.py +0 -225
- teuthology/test/test_report.py +0 -77
- teuthology/test/test_results.py +0 -155
- teuthology/test/test_run.py +0 -239
- teuthology/test/test_safepath.py +0 -55
- teuthology/test/test_schedule.py +0 -45
- teuthology/test/test_scrape.py +0 -167
- teuthology/test/test_timer.py +0 -80
- teuthology/test/test_vps_os_vers_parameter_checking.py +0 -84
- teuthology/test/test_worker.py +0 -303
- teuthology/worker.py +0 -354
- teuthology-1.1.0.dist-info/METADATA +0 -76
- teuthology-1.1.0.dist-info/RECORD +0 -213
- {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/LICENSE +0 -0
- {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/top_level.txt +0 -0
teuthology/suite/__init__.py
CHANGED
@@ -5,6 +5,7 @@
|
|
5
5
|
import logging
|
6
6
|
import os
|
7
7
|
import random
|
8
|
+
import sys
|
8
9
|
import time
|
9
10
|
from distutils.util import strtobool
|
10
11
|
|
@@ -43,8 +44,6 @@ def process_args(args):
|
|
43
44
|
'ceph': 'ceph_branch',
|
44
45
|
'sha1': 'ceph_sha1',
|
45
46
|
'kernel': 'kernel_branch',
|
46
|
-
# FIXME: ceph flavor and kernel flavor are separate things
|
47
|
-
'flavor': 'kernel_flavor',
|
48
47
|
'<config_yaml>': 'base_yaml_paths',
|
49
48
|
'filter': 'filter_in',
|
50
49
|
}
|
@@ -59,11 +58,14 @@ def process_args(args):
|
|
59
58
|
value = normalize_suite_name(value)
|
60
59
|
if key == 'suite_relpath' and value is None:
|
61
60
|
value = ''
|
62
|
-
elif key in ('limit', 'priority', 'num', 'newest', 'seed'):
|
61
|
+
elif key in ('limit', 'priority', 'num', 'newest', 'seed', 'job_threshold'):
|
63
62
|
value = int(value)
|
64
63
|
elif key == 'subset' and value is not None:
|
65
64
|
# take input string '2/3' and turn into (2, 3)
|
66
65
|
value = tuple(map(int, value.split('/')))
|
66
|
+
elif key == 'expire' and value is None:
|
67
|
+
# Skip empty 'expire' values
|
68
|
+
continue
|
67
69
|
elif key in ('filter_all', 'filter_in', 'filter_out', 'rerun_statuses'):
|
68
70
|
if not value:
|
69
71
|
value = []
|
@@ -77,7 +79,7 @@ def process_args(args):
|
|
77
79
|
value = expand_short_repo_name(
|
78
80
|
value,
|
79
81
|
config.get_ceph_qa_suite_git_url())
|
80
|
-
elif key in ('validate_sha1', 'filter_fragments'):
|
82
|
+
elif key in ('validate_sha1', 'filter_fragments', 'kdb'):
|
81
83
|
value = strtobool(value)
|
82
84
|
conf[key] = value
|
83
85
|
return conf
|
@@ -111,11 +113,15 @@ def main(args):
|
|
111
113
|
if conf.verbose:
|
112
114
|
teuthology.log.setLevel(logging.DEBUG)
|
113
115
|
|
116
|
+
dry_run = conf.dry_run
|
114
117
|
if not conf.machine_type or conf.machine_type == 'None':
|
115
|
-
|
118
|
+
if not config.default_machine_type or config.default_machine_type == 'None':
|
119
|
+
schedule_fail("Must specify a machine_type", dry_run=dry_run)
|
120
|
+
else:
|
121
|
+
conf.machine_type = config.default_machine_type
|
116
122
|
elif 'multi' in conf.machine_type:
|
117
123
|
schedule_fail("'multi' is not a valid machine_type. " +
|
118
|
-
"Maybe you want '
|
124
|
+
"Maybe you want 'gibba,smithi,mira' or similar", dry_run=dry_run)
|
119
125
|
|
120
126
|
if conf.email:
|
121
127
|
config.results_email = conf.email
|
@@ -124,16 +130,7 @@ def main(args):
|
|
124
130
|
log.info('Will upload archives to ' + conf.archive_upload)
|
125
131
|
|
126
132
|
if conf.rerun:
|
127
|
-
|
128
|
-
if len(rerun_filters['descriptions']) == 0:
|
129
|
-
log.warn(
|
130
|
-
"No jobs matched the status filters: %s",
|
131
|
-
conf.rerun_statuses,
|
132
|
-
)
|
133
|
-
return
|
134
|
-
conf.filter_in.extend(rerun_filters['descriptions'])
|
135
|
-
conf.suite = normalize_suite_name(rerun_filters['suite'])
|
136
|
-
conf.subset, conf.seed = get_rerun_conf(conf)
|
133
|
+
get_rerun_conf_overrides(conf)
|
137
134
|
if conf.seed < 0:
|
138
135
|
conf.seed = random.randint(0, 9999)
|
139
136
|
log.info('Using random seed=%s', conf.seed)
|
@@ -146,11 +143,68 @@ def main(args):
|
|
146
143
|
conf.archive_upload_url)
|
147
144
|
|
148
145
|
|
149
|
-
def
|
146
|
+
def get_rerun_conf_overrides(conf):
|
150
147
|
reporter = ResultsReporter()
|
151
|
-
run = reporter.get_run(
|
148
|
+
run = reporter.get_run(conf.rerun)
|
149
|
+
|
150
|
+
conf.suite = normalize_suite_name(run['suite'])
|
151
|
+
|
152
|
+
try:
|
153
|
+
job0 = run['jobs'][0]
|
154
|
+
except IndexError:
|
155
|
+
job0 = None
|
156
|
+
|
157
|
+
seed = None if job0 is None else job0.get('seed')
|
158
|
+
if conf.seed >= 0 and conf.seed != seed:
|
159
|
+
log.error('--seed %s does not match with rerun seed: %s',
|
160
|
+
conf.seed, seed)
|
161
|
+
sys.exit(1)
|
162
|
+
else:
|
163
|
+
log.info('Using rerun seed=%s', seed)
|
164
|
+
conf.seed = seed
|
165
|
+
|
166
|
+
if job0 is not None:
|
167
|
+
subset = job0.get('subset', '1/1')
|
168
|
+
if subset is None:
|
169
|
+
subset = '1/1'
|
170
|
+
subset = tuple(map(int, subset.split('/')))
|
171
|
+
else:
|
172
|
+
subset = None
|
173
|
+
if conf.subset is not None and conf.subset != subset:
|
174
|
+
log.error('--subset %s does not match with '
|
175
|
+
'rerun subset: %s',
|
176
|
+
conf.subset, subset)
|
177
|
+
sys.exit(1)
|
178
|
+
else:
|
179
|
+
if subset == (1, 1):
|
180
|
+
conf.subset = None
|
181
|
+
else:
|
182
|
+
log.info('Using rerun subset=%s', subset)
|
183
|
+
conf.subset = subset
|
184
|
+
|
185
|
+
no_nested_subset = False if job0 is None else job0.get('no_nested_subset', False)
|
186
|
+
if conf.no_nested_subset is not None and conf.no_nested_subset != no_nested_subset:
|
187
|
+
log.error('--no-nested-subset specified but does not match with '
|
188
|
+
'rerun --no-nested-subset: %s',
|
189
|
+
no_nested_subset)
|
190
|
+
sys.exit(1)
|
191
|
+
else:
|
192
|
+
log.info('Using rerun no_nested_subset=%s', no_nested_subset)
|
193
|
+
conf.no_nested_subset = no_nested_subset
|
194
|
+
|
195
|
+
rerun_filters = get_rerun_filters(run, conf.rerun_statuses)
|
196
|
+
if len(rerun_filters['descriptions']) == 0:
|
197
|
+
log.warning(
|
198
|
+
"No jobs matched the status filters: %s",
|
199
|
+
conf.rerun_statuses,
|
200
|
+
)
|
201
|
+
return
|
202
|
+
|
203
|
+
conf.filter_in.extend(rerun_filters['descriptions'])
|
204
|
+
|
205
|
+
|
206
|
+
def get_rerun_filters(run, statuses):
|
152
207
|
filters = dict()
|
153
|
-
filters['suite'] = run['suite']
|
154
208
|
jobs = []
|
155
209
|
for job in run['jobs']:
|
156
210
|
if job['status'] in statuses:
|
@@ -159,31 +213,6 @@ def get_rerun_filters(name, statuses):
|
|
159
213
|
return filters
|
160
214
|
|
161
215
|
|
162
|
-
def get_rerun_conf(conf):
|
163
|
-
reporter = ResultsReporter()
|
164
|
-
try:
|
165
|
-
subset, seed = reporter.get_rerun_conf(conf.rerun)
|
166
|
-
except IOError:
|
167
|
-
return conf.subset, conf.seed
|
168
|
-
if seed is None:
|
169
|
-
return conf.subset, conf.seed
|
170
|
-
if conf.seed < 0:
|
171
|
-
log.info('Using stored seed=%s', seed)
|
172
|
-
elif conf.seed != seed:
|
173
|
-
log.error('--seed {conf_seed} does not match with ' +
|
174
|
-
'stored seed: {stored_seed}',
|
175
|
-
conf_seed=conf.seed,
|
176
|
-
stored_seed=seed)
|
177
|
-
if conf.subset is None:
|
178
|
-
log.info('Using stored subset=%s', subset)
|
179
|
-
elif conf.subset != subset:
|
180
|
-
log.error('--subset {conf_subset} does not match with ' +
|
181
|
-
'stored subset: {stored_subset}',
|
182
|
-
conf_subset=conf.subset,
|
183
|
-
stored_subset=subset)
|
184
|
-
return subset, seed
|
185
|
-
|
186
|
-
|
187
216
|
class WaitException(Exception):
|
188
217
|
pass
|
189
218
|
|
@@ -228,5 +257,5 @@ def wait(name, max_job_time, upload_url):
|
|
228
257
|
url = os.path.join(upload_url, name, job['job_id'])
|
229
258
|
else:
|
230
259
|
url = job['log_href']
|
231
|
-
log.info(job['status']
|
260
|
+
log.info(f"{job['status']} {url} {job['description']}")
|
232
261
|
return exit_code
|
teuthology/suite/build_matrix.py
CHANGED
@@ -7,7 +7,7 @@ from teuthology.suite import matrix
|
|
7
7
|
log = logging.getLogger(__name__)
|
8
8
|
|
9
9
|
|
10
|
-
def build_matrix(path, subset=None, seed=None):
|
10
|
+
def build_matrix(path, subset=None, no_nested_subset=False, seed=None):
|
11
11
|
"""
|
12
12
|
Return a list of items descibed by path such that if the list of
|
13
13
|
items is chunked into mincyclicity pieces, each piece is still a
|
@@ -33,7 +33,9 @@ def build_matrix(path, subset=None, seed=None):
|
|
33
33
|
|
34
34
|
For a directory with a magic '%' file, we generate a result set
|
35
35
|
for each item in the directory, and then do a product to generate
|
36
|
-
a result list with all combinations (A Product).
|
36
|
+
a result list with all combinations (A Product). If the file
|
37
|
+
contains an integer, it is used as the divisor for a random
|
38
|
+
subset.
|
37
39
|
|
38
40
|
For a directory with a magic '$' file, or for a directory whose name
|
39
41
|
ends in '$', we generate a list of all items that we will randomly
|
@@ -46,6 +48,7 @@ def build_matrix(path, subset=None, seed=None):
|
|
46
48
|
|
47
49
|
:param path: The path to search for yaml fragments
|
48
50
|
:param subset: (index, outof)
|
51
|
+
:param no_nested_subset: disable nested subsets
|
49
52
|
:param seed: The seed for repeatable random test
|
50
53
|
"""
|
51
54
|
if subset:
|
@@ -53,31 +56,24 @@ def build_matrix(path, subset=None, seed=None):
|
|
53
56
|
'Subset=%s/%s' %
|
54
57
|
(str(subset[0]), str(subset[1]))
|
55
58
|
)
|
59
|
+
if no_nested_subset:
|
60
|
+
log.info("no_nested_subset")
|
56
61
|
random.seed(seed)
|
57
|
-
mat, first, matlimit = _get_matrix(path, subset)
|
62
|
+
mat, first, matlimit = _get_matrix(path, subset, no_nested_subset)
|
58
63
|
return generate_combinations(path, mat, first, matlimit)
|
59
64
|
|
60
65
|
|
61
|
-
def _get_matrix(path, subset=None):
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
(index, outof) = subset
|
67
|
-
mat = _build_matrix(path, mincyclicity=outof)
|
68
|
-
first = (mat.size() // outof) * index
|
69
|
-
if index == outof or index == outof - 1:
|
70
|
-
matlimit = mat.size()
|
71
|
-
else:
|
72
|
-
matlimit = (mat.size() // outof) * (index + 1)
|
66
|
+
def _get_matrix(path, subset=None, no_nested_subset=False):
|
67
|
+
(which, divisions) = (0,1) if subset is None else subset
|
68
|
+
if divisions > 1:
|
69
|
+
mat = _build_matrix(path, mincyclicity=divisions, no_nested_subset=no_nested_subset)
|
70
|
+
mat = matrix.Subset(mat, divisions, which=which)
|
73
71
|
else:
|
74
|
-
|
75
|
-
|
76
|
-
matlimit = mat.size()
|
77
|
-
return mat, first, matlimit
|
72
|
+
mat = _build_matrix(path, no_nested_subset=no_nested_subset)
|
73
|
+
return mat, 0, mat.size()
|
78
74
|
|
79
75
|
|
80
|
-
def _build_matrix(path, mincyclicity=0, item=''):
|
76
|
+
def _build_matrix(path, mincyclicity=0, no_nested_subset=False, item=''):
|
81
77
|
if os.path.basename(path)[0] == '.':
|
82
78
|
return None
|
83
79
|
if not os.path.exists(path):
|
@@ -100,6 +96,7 @@ def _build_matrix(path, mincyclicity=0, item=''):
|
|
100
96
|
submat = _build_matrix(
|
101
97
|
os.path.join(path, fn),
|
102
98
|
mincyclicity,
|
99
|
+
no_nested_subset,
|
103
100
|
fn)
|
104
101
|
if submat is not None:
|
105
102
|
submats.append(submat)
|
@@ -115,6 +112,7 @@ def _build_matrix(path, mincyclicity=0, item=''):
|
|
115
112
|
submat = _build_matrix(
|
116
113
|
os.path.join(path, fn),
|
117
114
|
mincyclicity,
|
115
|
+
no_nested_subset,
|
118
116
|
fn)
|
119
117
|
if submat is not None:
|
120
118
|
submats.append(submat)
|
@@ -122,19 +120,30 @@ def _build_matrix(path, mincyclicity=0, item=''):
|
|
122
120
|
elif '%' in files:
|
123
121
|
# convolve items
|
124
122
|
files.remove('%')
|
123
|
+
with open(os.path.join(path, '%')) as f:
|
124
|
+
divisions = f.read()
|
125
|
+
if no_nested_subset or len(divisions) == 0:
|
126
|
+
divisions = 1
|
127
|
+
else:
|
128
|
+
divisions = int(divisions)
|
129
|
+
assert divisions > 0
|
125
130
|
submats = []
|
126
131
|
for fn in sorted(files):
|
127
132
|
submat = _build_matrix(
|
128
133
|
os.path.join(path, fn),
|
129
|
-
|
130
|
-
|
134
|
+
0,
|
135
|
+
no_nested_subset,
|
136
|
+
fn)
|
131
137
|
if submat is not None:
|
132
138
|
submats.append(submat)
|
133
139
|
mat = matrix.Product(item, submats)
|
134
|
-
|
140
|
+
minc = mincyclicity * divisions
|
141
|
+
if mat and mat.cyclicity() < minc:
|
135
142
|
mat = matrix.Cycle(
|
136
|
-
(
|
143
|
+
(minc + mat.cyclicity() - 1) // mat.cyclicity(), mat
|
137
144
|
)
|
145
|
+
if divisions > 1:
|
146
|
+
mat = matrix.Subset(mat, divisions)
|
138
147
|
return mat
|
139
148
|
else:
|
140
149
|
# list items
|
@@ -143,6 +152,7 @@ def _build_matrix(path, mincyclicity=0, item=''):
|
|
143
152
|
submat = _build_matrix(
|
144
153
|
os.path.join(path, fn),
|
145
154
|
mincyclicity,
|
155
|
+
no_nested_subset,
|
146
156
|
fn)
|
147
157
|
if submat is None:
|
148
158
|
continue
|
@@ -0,0 +1,105 @@
|
|
1
|
+
-- allow only some Lua (and lunatic) builtins for use by scripts
|
2
|
+
local lua_allowlist = {
|
3
|
+
assert = assert,
|
4
|
+
error = error,
|
5
|
+
ipairs = ipairs,
|
6
|
+
next = next,
|
7
|
+
pairs = pairs,
|
8
|
+
tonumber = tonumber,
|
9
|
+
tostring = tostring,
|
10
|
+
py_attrgetter = python.as_attrgetter,
|
11
|
+
py_dict = python.builtins.dict,
|
12
|
+
py_len = python.builtins.len,
|
13
|
+
py_list = python.builtins.list,
|
14
|
+
py_tuple = python.builtins.tuple,
|
15
|
+
py_enumerate = python.enumerate,
|
16
|
+
py_iterex = python.iterex,
|
17
|
+
py_itemgetter = python.as_itemgetter,
|
18
|
+
math = math,
|
19
|
+
}
|
20
|
+
lua_allowlist.__index = lua_allowlist
|
21
|
+
|
22
|
+
-- accept a fragment/config (or just return true from the script!)
|
23
|
+
local function accept()
|
24
|
+
coroutine.yield(true)
|
25
|
+
end
|
26
|
+
-- reject a fragment/config (or just return false from the script!)
|
27
|
+
local function reject()
|
28
|
+
coroutine.yield(false)
|
29
|
+
end
|
30
|
+
-- this implements logic for filtering (via teuthology-suite CLI flags)
|
31
|
+
local function matches(_ENV, f)
|
32
|
+
if description:find(f, 1, true) then
|
33
|
+
return true
|
34
|
+
end
|
35
|
+
if filter_fragments then
|
36
|
+
for i,path in py_enumerate(base_frag_paths) do
|
37
|
+
if path:find(f) then
|
38
|
+
return true
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
local function check_filters(_ENV)
|
45
|
+
if filter_all then
|
46
|
+
for i,f in py_enumerate(filter_all) do
|
47
|
+
if not matches(_ENV, f) then
|
48
|
+
reject()
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
if filter_in then
|
53
|
+
local found, tried = false, false
|
54
|
+
for i,f in py_enumerate(filter_in) do
|
55
|
+
tried = true
|
56
|
+
if matches(_ENV, f) then
|
57
|
+
found = true
|
58
|
+
break
|
59
|
+
end
|
60
|
+
end
|
61
|
+
if tried and not found then
|
62
|
+
reject()
|
63
|
+
end
|
64
|
+
end
|
65
|
+
if filter_out then
|
66
|
+
for i,f in py_enumerate(filter_out) do
|
67
|
+
if matches(_ENV, f) then
|
68
|
+
reject()
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
function new_script(script, log, deep_merge, yaml_load)
|
75
|
+
-- create a restricted sandbox for the script:
|
76
|
+
local env = setmetatable({
|
77
|
+
accept = accept,
|
78
|
+
deep_merge = deep_merge,
|
79
|
+
log = log,
|
80
|
+
reject = reject,
|
81
|
+
yaml_load = yaml_load,
|
82
|
+
}, lua_allowlist)
|
83
|
+
|
84
|
+
-- avoid putting check_filters in _ENV
|
85
|
+
-- try to keep line numbers correct:
|
86
|
+
local header = [[do local check_filters = ...; accept(); check_filters(_ENV) end local function main() do ]]
|
87
|
+
local footer = [[ end return true end return main()]]
|
88
|
+
local function chunks()
|
89
|
+
coroutine.yield(header)
|
90
|
+
if #script > 0 then
|
91
|
+
coroutine.yield(script)
|
92
|
+
end
|
93
|
+
coroutine.yield(footer)
|
94
|
+
end
|
95
|
+
|
96
|
+
-- put the script in a coroutine so we can yield success/failure from
|
97
|
+
-- anywhere in the script, including in nested function calls.
|
98
|
+
local f, err = load(coroutine.wrap(chunks), 'teuthology', 't', env)
|
99
|
+
if f == nil then
|
100
|
+
error("failure to load script: "..err)
|
101
|
+
end
|
102
|
+
f = coroutine.wrap(f)
|
103
|
+
f(check_filters)
|
104
|
+
return env, f
|
105
|
+
end
|
teuthology/suite/matrix.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
import os
|
2
2
|
import random
|
3
3
|
import heapq
|
4
|
-
from
|
4
|
+
from math import gcd
|
5
5
|
from functools import reduce
|
6
6
|
|
7
7
|
def lcm(a, b):
|
@@ -75,6 +75,35 @@ class Cycle(Matrix):
|
|
75
75
|
def tostr(self, depth):
|
76
76
|
return '\t'*depth + "Cycle({num}):\n".format(num=self.num) + self.mat.tostr(depth + 1)
|
77
77
|
|
78
|
+
# Logically, inverse of Cycle
|
79
|
+
class Subset(Matrix):
|
80
|
+
"""
|
81
|
+
Run a matrix subset.
|
82
|
+
"""
|
83
|
+
def __init__(self, mat, divisions, which=None):
|
84
|
+
self.mat = mat
|
85
|
+
self.divisions = divisions
|
86
|
+
if which is None:
|
87
|
+
self.which = random.randint(0, divisions-1)
|
88
|
+
else:
|
89
|
+
assert which < divisions
|
90
|
+
self.which = which
|
91
|
+
|
92
|
+
def size(self):
|
93
|
+
return self.mat.size() // self.divisions
|
94
|
+
|
95
|
+
def index(self, i):
|
96
|
+
i += self.which * self.size()
|
97
|
+
assert i < self.mat.size()
|
98
|
+
return self.mat.index(i)
|
99
|
+
|
100
|
+
def minscanlen(self):
|
101
|
+
return self.mat.minscanlen()
|
102
|
+
|
103
|
+
def tostr(self, depth):
|
104
|
+
return '\t'*depth + "Subset({num}, {index}):\n".format(num=self.num, index=self.index) + self.mat.tostr(depth + 1)
|
105
|
+
|
106
|
+
|
78
107
|
class Base(Matrix):
|
79
108
|
"""
|
80
109
|
Just a single item.
|
@@ -248,7 +277,7 @@ class Sum(Matrix):
|
|
248
277
|
"""
|
249
278
|
def __init__(self, item, _submats):
|
250
279
|
assert len(_submats) > 0, \
|
251
|
-
"Sum requires non-empty _submats"
|
280
|
+
f"Sum requires non-empty _submats: {item}"
|
252
281
|
self.item = item
|
253
282
|
|
254
283
|
self._pseudo_size = lcml((i.size() for i in _submats)) * len(_submats)
|
@@ -0,0 +1,175 @@
|
|
1
|
+
import copy
|
2
|
+
import logging
|
3
|
+
import lupa
|
4
|
+
import os
|
5
|
+
from types import MappingProxyType
|
6
|
+
import yaml
|
7
|
+
|
8
|
+
from teuthology.suite.build_matrix import combine_path
|
9
|
+
from teuthology.suite.util import strip_fragment_path
|
10
|
+
from teuthology.misc import deep_merge
|
11
|
+
|
12
|
+
log = logging.getLogger(__name__)
|
13
|
+
|
14
|
+
TEUTHOLOGY_TEMPLATE = MappingProxyType({
|
15
|
+
"teuthology": {
|
16
|
+
"fragments_dropped": [],
|
17
|
+
"meta": {},
|
18
|
+
"postmerge": [],
|
19
|
+
}
|
20
|
+
})
|
21
|
+
|
22
|
+
L = lupa.LuaRuntime()
|
23
|
+
FRAGMENT_MERGE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fragment-merge.lua")
|
24
|
+
with open(FRAGMENT_MERGE) as f:
|
25
|
+
L.execute(f.read())
|
26
|
+
|
27
|
+
def config_merge(configs, suite_name=None, **kwargs):
|
28
|
+
"""
|
29
|
+
This procedure selects and merges YAML fragments for each job in the
|
30
|
+
configs array generated for the matrix of jobs.
|
31
|
+
|
32
|
+
The primary task here is to run premerge and postmerge scripts specified
|
33
|
+
with the YAML fragments as part of filtering out jobs or individual YAML
|
34
|
+
fragments. This is done with Lua scripting (via "lupa", a "lunatic"
|
35
|
+
derivative).
|
36
|
+
|
37
|
+
A premerge script looks like:
|
38
|
+
|
39
|
+
<foo.yaml>
|
40
|
+
teuthology:
|
41
|
+
premerge: |
|
42
|
+
if yaml.os_type == 'ubuntu' then reject() end
|
43
|
+
</foo.yaml>
|
44
|
+
|
45
|
+
This script runs prior to a YAML fragment merging into the complete YAML
|
46
|
+
specification for a job. The script has access to the complete YAML
|
47
|
+
description generated so far as part of merging earlier fragments
|
48
|
+
(remember: fragments are ordered lexicographically). In the above case, the
|
49
|
+
os_type is checked with the foo.yaml fragment dropped if the job is
|
50
|
+
configured to run on Ubuntu (note: this does not account for a jobs'
|
51
|
+
default os_type which is not yet known).
|
52
|
+
|
53
|
+
The postmerge scripts look like:
|
54
|
+
|
55
|
+
<bar.yaml>
|
56
|
+
teuthology:
|
57
|
+
postmerge:
|
58
|
+
- if yaml.os_type == "ubuntu" then reject() end
|
59
|
+
</bar.yaml>
|
60
|
+
|
61
|
+
This script is the same but has a different effect: if, after combining all
|
62
|
+
the YAML fragments for a job, the os_type is "ubuntu", then the entire job
|
63
|
+
is dropped (filtered out / rejected). postmerge scripts are also specified
|
64
|
+
as a list of strings in the teuthology.postmerge array. All of these
|
65
|
+
strings are concatenated and then executed as a single script. So,
|
66
|
+
postmerge scripts from multiple fragments are all combined. You may use
|
67
|
+
this to define variables, functions, or anything else you need.
|
68
|
+
|
69
|
+
Scripts have access to the entire yaml object and may do any desired advanced
|
70
|
+
checks. It is also possible to programatically change the YAML definition:
|
71
|
+
|
72
|
+
<foo.yaml>
|
73
|
+
teuthology:
|
74
|
+
postmerge:
|
75
|
+
- |
|
76
|
+
local attr = py_attrgetter
|
77
|
+
local tasks = py_list()
|
78
|
+
for i = 1, 3 do
|
79
|
+
local task = py_dict(
|
80
|
+
exec = py_dict(py_list(
|
81
|
+
py_tuple("mon.a", py_list(
|
82
|
+
"echo "..i
|
83
|
+
)
|
84
|
+
))
|
85
|
+
)
|
86
|
+
attr(tasks).append(task)
|
87
|
+
end
|
88
|
+
deep_merge(yaml.tasks, tasks)
|
89
|
+
</foo.yaml>
|
90
|
+
|
91
|
+
This will be as if the yaml file contained:
|
92
|
+
|
93
|
+
<foo.yaml>
|
94
|
+
tasks:
|
95
|
+
exec:
|
96
|
+
mon.a:
|
97
|
+
- echo 1
|
98
|
+
exec:
|
99
|
+
mon.a:
|
100
|
+
- echo 2
|
101
|
+
exec:
|
102
|
+
mon.a:
|
103
|
+
- echo 3
|
104
|
+
</foo.yaml>
|
105
|
+
|
106
|
+
Which will be merged normally (via deep_merge) after the script is run.
|
107
|
+
|
108
|
+
Scripts are well sandboxed with access to a small selection of the Lua
|
109
|
+
builtin libraries. There is also access to some python/lupa specific
|
110
|
+
functions which are prefixed with "py_". No I/O or other system functions
|
111
|
+
permitted.
|
112
|
+
|
113
|
+
The teuthology-suite filtering options are now implemented via builtin
|
114
|
+
postmerge scripts. Logically, if a filter matches then reject will drop
|
115
|
+
the entire job (config) from the list.
|
116
|
+
"""
|
117
|
+
seed = kwargs.setdefault('seed', 1)
|
118
|
+
if not isinstance(seed, int):
|
119
|
+
log.debug("no valid seed input: using 1")
|
120
|
+
seed = 1
|
121
|
+
log.debug("configuring Lua randomseed to %d", seed)
|
122
|
+
L.execute(f'local math = require"math"; math.randomseed({seed});')
|
123
|
+
new_script = L.eval('new_script')
|
124
|
+
yaml_cache = {}
|
125
|
+
for desc, paths in configs:
|
126
|
+
log.debug("merging config %s", desc)
|
127
|
+
|
128
|
+
if suite_name is not None:
|
129
|
+
desc = combine_path(suite_name, desc)
|
130
|
+
|
131
|
+
yaml_complete_obj = {}
|
132
|
+
deep_merge(yaml_complete_obj, dict(TEUTHOLOGY_TEMPLATE))
|
133
|
+
for path in paths:
|
134
|
+
if path not in yaml_cache:
|
135
|
+
with open(path) as f:
|
136
|
+
txt = f.read()
|
137
|
+
yaml_cache[path] = (txt, yaml.safe_load(txt))
|
138
|
+
|
139
|
+
yaml_fragment_txt, yaml_fragment_obj = yaml_cache[path]
|
140
|
+
if yaml_fragment_obj is None:
|
141
|
+
continue
|
142
|
+
yaml_fragment_obj = copy.deepcopy(yaml_fragment_obj)
|
143
|
+
premerge = yaml_fragment_obj.get('teuthology', {}).pop('premerge', '')
|
144
|
+
if premerge:
|
145
|
+
log.debug("premerge script running:\n%s", premerge)
|
146
|
+
env, script = new_script(premerge, log, deep_merge, yaml.safe_load)
|
147
|
+
env['base_frag_paths'] = [strip_fragment_path(x) for x in paths]
|
148
|
+
env['description'] = desc
|
149
|
+
env['frag_paths'] = paths
|
150
|
+
env['suite_name'] = suite_name
|
151
|
+
env['yaml'] = yaml_complete_obj
|
152
|
+
env['yaml_fragment'] = yaml_fragment_obj
|
153
|
+
for k,v in kwargs.items():
|
154
|
+
env[k] = v
|
155
|
+
if not script():
|
156
|
+
log.debug("skipping merge of fragment %s due to premerge filter", path)
|
157
|
+
yaml_complete_obj['teuthology']['fragments_dropped'].append(path)
|
158
|
+
continue
|
159
|
+
deep_merge(yaml_complete_obj, yaml_fragment_obj)
|
160
|
+
|
161
|
+
postmerge = yaml_complete_obj.get('teuthology', {}).get('postmerge', [])
|
162
|
+
postmerge = "\n".join(postmerge)
|
163
|
+
log.debug("postmerge script running:\n%s", postmerge)
|
164
|
+
env, script = new_script(postmerge, log, deep_merge, yaml.safe_load)
|
165
|
+
env['base_frag_paths'] = [strip_fragment_path(x) for x in paths]
|
166
|
+
env['description'] = desc
|
167
|
+
env['frag_paths'] = paths
|
168
|
+
env['suite_name'] = suite_name
|
169
|
+
env['yaml'] = yaml_complete_obj
|
170
|
+
for k,v in kwargs.items():
|
171
|
+
env[k] = v
|
172
|
+
if not script():
|
173
|
+
log.debug("skipping config %s due to postmerge filter", desc)
|
174
|
+
continue
|
175
|
+
yield desc, paths, yaml_complete_obj
|