teuthology 1.0.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (172) hide show
  1. scripts/describe.py +1 -0
  2. scripts/dispatcher.py +62 -0
  3. scripts/exporter.py +18 -0
  4. scripts/lock.py +1 -1
  5. scripts/node_cleanup.py +58 -0
  6. scripts/openstack.py +9 -9
  7. scripts/results.py +12 -11
  8. scripts/run.py +4 -0
  9. scripts/schedule.py +4 -0
  10. scripts/suite.py +61 -16
  11. scripts/supervisor.py +44 -0
  12. scripts/update_inventory.py +10 -4
  13. scripts/wait.py +31 -0
  14. teuthology/__init__.py +24 -21
  15. teuthology/beanstalk.py +4 -3
  16. teuthology/config.py +17 -6
  17. teuthology/contextutil.py +18 -14
  18. teuthology/describe_tests.py +25 -18
  19. teuthology/dispatcher/__init__.py +365 -0
  20. teuthology/dispatcher/supervisor.py +374 -0
  21. teuthology/exceptions.py +54 -0
  22. teuthology/exporter.py +347 -0
  23. teuthology/kill.py +76 -75
  24. teuthology/lock/cli.py +16 -7
  25. teuthology/lock/ops.py +276 -70
  26. teuthology/lock/query.py +61 -44
  27. teuthology/ls.py +9 -18
  28. teuthology/misc.py +152 -137
  29. teuthology/nuke/__init__.py +12 -351
  30. teuthology/openstack/__init__.py +4 -3
  31. teuthology/openstack/openstack-centos-7.0-user-data.txt +1 -1
  32. teuthology/openstack/openstack-centos-7.1-user-data.txt +1 -1
  33. teuthology/openstack/openstack-centos-7.2-user-data.txt +1 -1
  34. teuthology/openstack/openstack-debian-8.0-user-data.txt +1 -1
  35. teuthology/openstack/openstack-opensuse-42.1-user-data.txt +1 -1
  36. teuthology/openstack/openstack-teuthology.cron +0 -1
  37. teuthology/orchestra/cluster.py +51 -9
  38. teuthology/orchestra/connection.py +23 -16
  39. teuthology/orchestra/console.py +111 -50
  40. teuthology/orchestra/daemon/cephadmunit.py +23 -5
  41. teuthology/orchestra/daemon/state.py +10 -3
  42. teuthology/orchestra/daemon/systemd.py +10 -8
  43. teuthology/orchestra/opsys.py +32 -11
  44. teuthology/orchestra/remote.py +369 -152
  45. teuthology/orchestra/run.py +21 -12
  46. teuthology/packaging.py +54 -15
  47. teuthology/provision/__init__.py +30 -10
  48. teuthology/provision/cloud/openstack.py +12 -6
  49. teuthology/provision/cloud/util.py +1 -2
  50. teuthology/provision/downburst.py +83 -29
  51. teuthology/provision/fog.py +68 -20
  52. teuthology/provision/openstack.py +5 -4
  53. teuthology/provision/pelagos.py +13 -5
  54. teuthology/repo_utils.py +91 -44
  55. teuthology/report.py +57 -35
  56. teuthology/results.py +5 -3
  57. teuthology/run.py +21 -15
  58. teuthology/run_tasks.py +114 -40
  59. teuthology/schedule.py +4 -3
  60. teuthology/scrape.py +28 -22
  61. teuthology/suite/__init__.py +75 -46
  62. teuthology/suite/build_matrix.py +34 -24
  63. teuthology/suite/fragment-merge.lua +105 -0
  64. teuthology/suite/matrix.py +31 -2
  65. teuthology/suite/merge.py +175 -0
  66. teuthology/suite/placeholder.py +8 -8
  67. teuthology/suite/run.py +204 -102
  68. teuthology/suite/util.py +67 -211
  69. teuthology/task/__init__.py +1 -1
  70. teuthology/task/ansible.py +101 -31
  71. teuthology/task/buildpackages.py +2 -2
  72. teuthology/task/ceph_ansible.py +13 -6
  73. teuthology/task/cephmetrics.py +2 -1
  74. teuthology/task/clock.py +33 -14
  75. teuthology/task/exec.py +18 -0
  76. teuthology/task/hadoop.py +2 -2
  77. teuthology/task/install/__init__.py +51 -22
  78. teuthology/task/install/bin/adjust-ulimits +16 -0
  79. teuthology/task/install/bin/daemon-helper +114 -0
  80. teuthology/task/install/bin/stdin-killer +263 -0
  81. teuthology/task/install/deb.py +24 -4
  82. teuthology/task/install/redhat.py +36 -32
  83. teuthology/task/install/rpm.py +41 -14
  84. teuthology/task/install/util.py +48 -22
  85. teuthology/task/internal/__init__.py +69 -11
  86. teuthology/task/internal/edit_sudoers.sh +10 -0
  87. teuthology/task/internal/lock_machines.py +3 -133
  88. teuthology/task/internal/redhat.py +48 -28
  89. teuthology/task/internal/syslog.py +31 -8
  90. teuthology/task/kernel.py +155 -147
  91. teuthology/task/lockfile.py +1 -1
  92. teuthology/task/mpi.py +10 -10
  93. teuthology/task/pcp.py +1 -1
  94. teuthology/task/selinux.py +17 -8
  95. teuthology/task/ssh_keys.py +6 -6
  96. teuthology/task/tests/__init__.py +137 -77
  97. teuthology/task/tests/test_fetch_coredumps.py +116 -0
  98. teuthology/task/tests/test_run.py +4 -4
  99. teuthology/timer.py +3 -3
  100. teuthology/util/loggerfile.py +19 -0
  101. teuthology/util/scanner.py +159 -0
  102. teuthology/util/sentry.py +52 -0
  103. teuthology/util/time.py +52 -0
  104. teuthology-1.2.0.data/scripts/adjust-ulimits +16 -0
  105. teuthology-1.2.0.data/scripts/daemon-helper +114 -0
  106. teuthology-1.2.0.data/scripts/stdin-killer +263 -0
  107. teuthology-1.2.0.dist-info/METADATA +89 -0
  108. teuthology-1.2.0.dist-info/RECORD +174 -0
  109. {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/WHEEL +1 -1
  110. {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/entry_points.txt +5 -2
  111. scripts/nuke.py +0 -45
  112. scripts/worker.py +0 -37
  113. teuthology/nuke/actions.py +0 -456
  114. teuthology/openstack/test/__init__.py +0 -0
  115. teuthology/openstack/test/openstack-integration.py +0 -286
  116. teuthology/openstack/test/test_config.py +0 -35
  117. teuthology/openstack/test/test_openstack.py +0 -1695
  118. teuthology/orchestra/test/__init__.py +0 -0
  119. teuthology/orchestra/test/integration/__init__.py +0 -0
  120. teuthology/orchestra/test/integration/test_integration.py +0 -94
  121. teuthology/orchestra/test/test_cluster.py +0 -240
  122. teuthology/orchestra/test/test_connection.py +0 -106
  123. teuthology/orchestra/test/test_console.py +0 -217
  124. teuthology/orchestra/test/test_opsys.py +0 -404
  125. teuthology/orchestra/test/test_remote.py +0 -185
  126. teuthology/orchestra/test/test_run.py +0 -286
  127. teuthology/orchestra/test/test_systemd.py +0 -54
  128. teuthology/orchestra/test/util.py +0 -12
  129. teuthology/sentry.py +0 -18
  130. teuthology/test/__init__.py +0 -0
  131. teuthology/test/fake_archive.py +0 -107
  132. teuthology/test/fake_fs.py +0 -92
  133. teuthology/test/integration/__init__.py +0 -0
  134. teuthology/test/integration/test_suite.py +0 -86
  135. teuthology/test/task/__init__.py +0 -205
  136. teuthology/test/task/test_ansible.py +0 -624
  137. teuthology/test/task/test_ceph_ansible.py +0 -176
  138. teuthology/test/task/test_console_log.py +0 -88
  139. teuthology/test/task/test_install.py +0 -337
  140. teuthology/test/task/test_internal.py +0 -57
  141. teuthology/test/task/test_kernel.py +0 -243
  142. teuthology/test/task/test_pcp.py +0 -379
  143. teuthology/test/task/test_selinux.py +0 -35
  144. teuthology/test/test_config.py +0 -189
  145. teuthology/test/test_contextutil.py +0 -68
  146. teuthology/test/test_describe_tests.py +0 -316
  147. teuthology/test/test_email_sleep_before_teardown.py +0 -81
  148. teuthology/test/test_exit.py +0 -97
  149. teuthology/test/test_get_distro.py +0 -47
  150. teuthology/test/test_get_distro_version.py +0 -47
  151. teuthology/test/test_get_multi_machine_types.py +0 -27
  152. teuthology/test/test_job_status.py +0 -60
  153. teuthology/test/test_ls.py +0 -48
  154. teuthology/test/test_misc.py +0 -368
  155. teuthology/test/test_nuke.py +0 -232
  156. teuthology/test/test_packaging.py +0 -763
  157. teuthology/test/test_parallel.py +0 -28
  158. teuthology/test/test_repo_utils.py +0 -204
  159. teuthology/test/test_report.py +0 -77
  160. teuthology/test/test_results.py +0 -155
  161. teuthology/test/test_run.py +0 -238
  162. teuthology/test/test_safepath.py +0 -55
  163. teuthology/test/test_schedule.py +0 -45
  164. teuthology/test/test_scrape.py +0 -167
  165. teuthology/test/test_timer.py +0 -80
  166. teuthology/test/test_vps_os_vers_parameter_checking.py +0 -84
  167. teuthology/test/test_worker.py +0 -303
  168. teuthology/worker.py +0 -339
  169. teuthology-1.0.0.dist-info/METADATA +0 -76
  170. teuthology-1.0.0.dist-info/RECORD +0 -210
  171. {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/LICENSE +0 -0
  172. {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/top_level.txt +0 -0
teuthology/misc.py CHANGED
@@ -7,6 +7,7 @@ import os
7
7
  import logging
8
8
  import configobj
9
9
  import getpass
10
+ import shutil
10
11
  import socket
11
12
  import subprocess
12
13
  import tarfile
@@ -14,7 +15,13 @@ import time
14
15
  import yaml
15
16
  import json
16
17
  import re
18
+ from sys import stdin
17
19
  import pprint
20
+ import datetime
21
+
22
+ from tarfile import ReadError
23
+
24
+ from typing import Optional, TypeVar
18
25
 
19
26
  from teuthology.util.compat import urljoin, urlopen, HTTPError
20
27
 
@@ -31,13 +38,12 @@ from teuthology.orchestra.opsys import DEFAULT_OS_VERSION
31
38
 
32
39
  log = logging.getLogger(__name__)
33
40
 
34
- import datetime
35
41
  stamp = datetime.datetime.now().strftime("%y%m%d%H%M")
36
42
 
37
43
  is_arm = lambda x: x.startswith('tala') or x.startswith(
38
44
  'ubuntu@tala') or x.startswith('saya') or x.startswith('ubuntu@saya')
39
45
 
40
- hostname_expr_templ = '(?P<user>.*@)?(?P<shortname>.*)\.{lab_domain}'
46
+ hostname_expr_templ = '(?P<user>.*@)?(?P<shortname>.*){lab_domain}'
41
47
 
42
48
  def host_shortname(hostname):
43
49
  if _is_ipv4(hostname) or _is_ipv6(hostname):
@@ -45,9 +51,9 @@ def host_shortname(hostname):
45
51
  else:
46
52
  return hostname.split('.', 1)[0]
47
53
 
48
- def canonicalize_hostname(hostname, user='ubuntu'):
54
+ def canonicalize_hostname(hostname, user: Optional[str] ='ubuntu'):
49
55
  hostname_expr = hostname_expr_templ.format(
50
- lab_domain=config.lab_domain.replace('.', '\.'))
56
+ lab_domain=config.lab_domain.replace('.', r'\.'))
51
57
  match = re.match(hostname_expr, hostname)
52
58
  if _is_ipv4(hostname) or _is_ipv6(hostname):
53
59
  return "%s@%s" % (user, hostname)
@@ -63,18 +69,22 @@ def canonicalize_hostname(hostname, user='ubuntu'):
63
69
  user_ = user
64
70
 
65
71
  user_at = user_.strip('@') + '@' if user_ else ''
66
-
67
- ret = '{user_at}{short}.{lab_domain}'.format(
72
+ domain = config.lab_domain
73
+ if domain and not shortname.endswith('.'):
74
+ domain = '.' + domain
75
+ ret = '{user_at}{short}{domain}'.format(
68
76
  user_at=user_at,
69
77
  short=shortname,
70
- lab_domain=config.lab_domain,
78
+ domain=domain,
71
79
  )
72
80
  return ret
73
81
 
74
82
 
75
83
  def decanonicalize_hostname(hostname):
76
- hostname_expr = hostname_expr_templ.format(
77
- lab_domain=config.lab_domain.replace('.', '\.'))
84
+ lab_domain = ''
85
+ if config.lab_domain:
86
+ lab_domain=r'\.' + config.lab_domain.replace('.', r'\.')
87
+ hostname_expr = hostname_expr_templ.format(lab_domain=lab_domain)
78
88
  match = re.match(hostname_expr, hostname)
79
89
  if match:
80
90
  hostname = match.groupdict()['shortname']
@@ -99,31 +109,20 @@ def config_file(string):
99
109
  return config_dict
100
110
 
101
111
 
102
- class MergeConfig(argparse.Action):
103
- """
104
- Used by scripts to mergeg configurations. (nuke, run, and
105
- schedule, for example)
106
- """
107
- def __call__(self, parser, namespace, values, option_string=None):
108
- """
109
- Perform merges of all the day in the config dictionaries.
110
- """
111
- config_dict = getattr(namespace, self.dest)
112
- for new in values:
113
- deep_merge(config_dict, new)
114
-
115
-
116
- def merge_configs(config_paths):
112
+ def merge_configs(config_paths) -> dict:
117
113
  """ Takes one or many paths to yaml config files and merges them
118
114
  together, returning the result.
119
115
  """
120
116
  conf_dict = dict()
121
117
  for conf_path in config_paths:
122
- if not os.path.exists(conf_path):
118
+ if conf_path == "-":
119
+ partial_dict = yaml.safe_load(stdin)
120
+ elif not os.path.exists(conf_path):
123
121
  log.debug("The config path {0} does not exist, skipping.".format(conf_path))
124
122
  continue
125
- with open(conf_path) as partial_file:
126
- partial_dict = yaml.safe_load(partial_file)
123
+ else:
124
+ with open(conf_path) as partial_file:
125
+ partial_dict: dict = yaml.safe_load(partial_file)
127
126
  try:
128
127
  conf_dict = deep_merge(conf_dict, partial_dict)
129
128
  except Exception:
@@ -220,13 +219,13 @@ def get_ceph_binary_url(package=None,
220
219
  assert tag is None, "cannot set both sha1 and tag"
221
220
  else:
222
221
  # gitbuilder uses remote-style ref names for branches, mangled to
223
- # have underscores instead of slashes; e.g. origin_master
222
+ # have underscores instead of slashes; e.g. origin_main
224
223
  if tag is not None:
225
224
  ref = tag
226
225
  assert branch is None, "cannot set both branch and tag"
227
226
  else:
228
227
  if branch is None:
229
- branch = 'master'
228
+ branch = 'main'
230
229
  ref = branch
231
230
 
232
231
  sha1_url = urljoin(BASE, 'ref/{ref}/sha1'.format(ref=ref))
@@ -521,14 +520,7 @@ def write_file(remote, path, data):
521
520
  :param path: Path on the remote being written to.
522
521
  :param data: Data to be written.
523
522
  """
524
- remote.run(
525
- args=[
526
- 'cat',
527
- run.Raw('>'),
528
- path,
529
- ],
530
- stdin=data,
531
- )
523
+ remote.write_file(path, data)
532
524
 
533
525
 
534
526
  def sudo_write_file(remote, path, data, perms=None, owner=None):
@@ -543,21 +535,7 @@ def sudo_write_file(remote, path, data, perms=None, owner=None):
543
535
 
544
536
  Both perms and owner are passed directly to chmod.
545
537
  """
546
- permargs = []
547
- if perms:
548
- permargs = [run.Raw('&&'), 'sudo', 'chmod', perms, path]
549
- owner_args = []
550
- if owner:
551
- owner_args = [run.Raw('&&'), 'sudo', 'chown', owner, path]
552
- remote.run(
553
- args=[
554
- 'sudo',
555
- 'sh',
556
- '-c',
557
- 'cat > ' + path,
558
- ] + owner_args + permargs,
559
- stdin=data,
560
- )
538
+ remote.sudo_write_file(path, data, mode=perms, owner=owner)
561
539
 
562
540
 
563
541
  def copy_file(from_remote, from_path, to_remote, to_path=None):
@@ -645,7 +623,7 @@ def remove_lines_from_file(remote, path, line_is_valid_test,
645
623
  on when the main site goes up and down.
646
624
  """
647
625
  # read in the specified file
648
- in_data = get_file(remote, path, False).decode()
626
+ in_data = remote.read_file(path, False).decode()
649
627
  out_data = ""
650
628
 
651
629
  first_line = True
@@ -677,22 +655,8 @@ def remove_lines_from_file(remote, path, line_is_valid_test,
677
655
  def append_lines_to_file(remote, path, lines, sudo=False):
678
656
  """
679
657
  Append lines to a file.
680
- An intermediate file is used in the same manner as in
681
- Remove_lines_from_list.
682
658
  """
683
-
684
- temp_file_path = remote.mktemp()
685
-
686
- data = get_file(remote, path, sudo).decode()
687
-
688
- # add the additional data and write it back out, using a temp file
689
- # in case of connectivity of loss, and then mv it to the
690
- # actual desired location
691
- data += lines
692
- write_file(remote, temp_file_path, data)
693
-
694
- # then do a 'mv' to the actual file location
695
- move_file(remote, temp_file_path, path, sudo)
659
+ remote.write_file(path, lines, append=True, sudo=sudo)
696
660
 
697
661
  def prepend_lines_to_file(remote, path, lines, sudo=False):
698
662
  """
@@ -702,17 +666,9 @@ def prepend_lines_to_file(remote, path, lines, sudo=False):
702
666
  """
703
667
 
704
668
  temp_file_path = remote.mktemp()
705
-
706
- data = get_file(remote, path, sudo).decode()
707
-
708
- # add the additional data and write it back out, using a temp file
709
- # in case of connectivity of loss, and then mv it to the
710
- # actual desired location
711
- data = lines + data
712
- write_file(remote, temp_file_path, data)
713
-
714
- # then do a 'mv' to the actual file location
715
- move_file(remote, temp_file_path, path, sudo)
669
+ remote.write_file(temp_file_path, lines)
670
+ remote.copy_file(path, temp_file_path, append=True, sudo=sudo)
671
+ remote.move_file(temp_file_path, path, sudo=sudo)
716
672
 
717
673
 
718
674
  def create_file(remote, path, data="", permissions=str(644), sudo=False):
@@ -753,16 +709,31 @@ def get_file(remote, path, sudo=False, dest_dir='/tmp'):
753
709
  return file_data
754
710
 
755
711
 
756
- def pull_directory(remote, remotedir, localdir):
712
+ def copy_fileobj(src, tarinfo, local_path):
713
+ with open(local_path, 'wb') as dest:
714
+ shutil.copyfileobj(src, dest)
715
+
716
+
717
+ def pull_directory(remote, remotedir, localdir, write_to=copy_fileobj):
757
718
  """
758
719
  Copy a remote directory to a local directory.
720
+
721
+ :param remote: the remote object representing the remote host from where
722
+ the specified directory is pulled
723
+ :param remotedir: the source directory on remote host
724
+ :param localdir: the destination directory on localhost
725
+ :param write_to: optional function to write the file to localdir.
726
+ its signature should be:
727
+ func(src: fileobj,
728
+ tarinfo: tarfile.TarInfo,
729
+ local_path: str)
759
730
  """
760
731
  log.debug('Transferring archived files from %s:%s to %s',
761
732
  remote.shortname, remotedir, localdir)
762
733
  if not os.path.exists(localdir):
763
734
  os.mkdir(localdir)
764
- r = remote.get_tar_stream(remotedir, sudo=True)
765
- tar = tarfile.open(mode='r|gz', fileobj=r.stdout)
735
+ r = remote.get_tar_stream(remotedir, sudo=True, compress=False)
736
+ tar = tarfile.open(mode='r|', fileobj=r.stdout)
766
737
  while True:
767
738
  ti = tar.next()
768
739
  if ti is None:
@@ -775,7 +746,8 @@ def pull_directory(remote, remotedir, localdir):
775
746
  elif ti.isfile():
776
747
  sub = safepath.munge(ti.name)
777
748
  safepath.makedirs(root=localdir, path=os.path.dirname(sub))
778
- tar.makefile(ti, targetpath=os.path.join(localdir, sub))
749
+ with tar.extractfile(ti) as src:
750
+ write_to(src, ti, os.path.join(localdir, sub))
779
751
  else:
780
752
  if ti.isdev():
781
753
  type_ = 'device'
@@ -798,7 +770,7 @@ def pull_directory_tarball(remote, remotedir, localfile):
798
770
 
799
771
 
800
772
  def get_wwn_id_map(remote, devs):
801
- log.warn("Entering get_wwn_id_map, a deprecated function that will be removed")
773
+ log.warning("Entering get_wwn_id_map, a deprecated function that will be removed")
802
774
  return dict((d, d) for d in devs)
803
775
 
804
776
 
@@ -808,7 +780,7 @@ def get_scratch_devices(remote):
808
780
  """
809
781
  devs = []
810
782
  try:
811
- file_data = get_file(remote, "/scratch_devs").decode()
783
+ file_data = remote.read_file("/scratch_devs").decode()
812
784
  devs = file_data.split()
813
785
  except Exception:
814
786
  devs = remote.sh('ls /dev/[sv]d?').strip().split('\n')
@@ -817,33 +789,31 @@ def get_scratch_devices(remote):
817
789
  for dev in devs:
818
790
  if 'vda' in dev:
819
791
  devs.remove(dev)
820
- log.warn("Removing root device: %s from device list" % dev)
792
+ log.warning("Removing root device: %s from device list" % dev)
821
793
 
822
794
  log.debug('devs={d}'.format(d=devs))
823
795
 
824
796
  retval = []
825
797
  for dev in devs:
826
- try:
827
- # FIXME: Split this into multiple calls.
828
- remote.run(
829
- args=[
830
- # node exists
831
- 'stat',
832
- dev,
833
- run.Raw('&&'),
834
- # readable
835
- 'sudo', 'dd', 'if=%s' % dev, 'of=/dev/null', 'count=1',
836
- run.Raw('&&'),
837
- # not mounted
838
- run.Raw('!'),
839
- 'mount',
840
- run.Raw('|'),
841
- 'grep', '-q', dev,
842
- ]
843
- )
798
+ dev_checks = [
799
+ [['stat', dev], "does not exist"],
800
+ [['sudo', 'dd', 'if=%s' % dev, 'of=/dev/null', 'count=1'], "is not readable"],
801
+ [
802
+ [run.Raw('!'), 'mount', run.Raw('|'), 'grep', '-v', 'devtmpfs', run.Raw('|'),
803
+ 'grep', '-q', dev],
804
+ "is in use"
805
+ ],
806
+ ]
807
+ for args, msg in dev_checks:
808
+ try:
809
+ remote.run(args=args)
810
+ except CommandFailedError:
811
+ log.debug(f"get_scratch_devices: {dev} {msg}")
812
+ break
813
+ else:
844
814
  retval.append(dev)
845
- except CommandFailedError:
846
- log.debug("get_scratch_devices: %s is in use" % dev)
815
+ continue
816
+ break
847
817
  return retval
848
818
 
849
819
 
@@ -1015,7 +985,8 @@ def replace_all_with_clients(cluster, config):
1015
985
  return norm_config
1016
986
 
1017
987
 
1018
- def deep_merge(a, b):
988
+ DeepMerge = TypeVar('DeepMerge')
989
+ def deep_merge(a: DeepMerge, b: DeepMerge) -> DeepMerge:
1019
990
  """
1020
991
  Deep Merge. If a and b are both lists, all elements in b are
1021
992
  added into a. If a and b are both dictionaries, elements in b are
@@ -1023,10 +994,10 @@ def deep_merge(a, b):
1023
994
  :param a: object items will be merged into
1024
995
  :param b: object items will be merged from
1025
996
  """
1026
- if a is None:
1027
- return b
1028
997
  if b is None:
1029
998
  return a
999
+ if a is None:
1000
+ return deep_merge(b.__class__(), b)
1030
1001
  if isinstance(a, list):
1031
1002
  assert isinstance(b, list)
1032
1003
  a.extend(b)
@@ -1034,15 +1005,12 @@ def deep_merge(a, b):
1034
1005
  if isinstance(a, dict):
1035
1006
  assert isinstance(b, dict)
1036
1007
  for (k, v) in b.items():
1037
- if k in a:
1038
- a[k] = deep_merge(a[k], v)
1039
- else:
1040
- a[k] = v
1008
+ a[k] = deep_merge(a.get(k), v)
1041
1009
  return a
1042
1010
  return b
1043
1011
 
1044
1012
 
1045
- def get_valgrind_args(testdir, name, preamble, v):
1013
+ def get_valgrind_args(testdir, name, preamble, v, exit_on_first_error=True):
1046
1014
  """
1047
1015
  Build a command line for running valgrind.
1048
1016
 
@@ -1067,28 +1035,31 @@ def get_valgrind_args(testdir, name, preamble, v):
1067
1035
  'valgrind',
1068
1036
  '--trace-children=no',
1069
1037
  '--child-silent-after-fork=yes',
1038
+ '--soname-synonyms=somalloc=*tcmalloc*',
1070
1039
  '--num-callers=50',
1071
1040
  '--suppressions={tdir}/valgrind.supp'.format(tdir=testdir),
1072
1041
  '--xml=yes',
1073
1042
  '--xml-file={vdir}/{n}.log'.format(vdir=val_path, n=name),
1074
1043
  '--time-stamp=yes',
1075
1044
  '--vgdb=yes',
1076
- # at least Valgrind 3.14 is required
1077
- '--exit-on-first-error=yes',
1078
- '--error-exitcode=42',
1079
1045
  ]
1080
1046
  else:
1081
1047
  extra_args = [
1082
1048
  'valgrind',
1083
1049
  '--trace-children=no',
1084
1050
  '--child-silent-after-fork=yes',
1051
+ '--soname-synonyms=somalloc=*tcmalloc*',
1085
1052
  '--suppressions={tdir}/valgrind.supp'.format(tdir=testdir),
1086
1053
  '--log-file={vdir}/{n}.log'.format(vdir=val_path, n=name),
1087
1054
  '--time-stamp=yes',
1088
1055
  '--vgdb=yes',
1056
+ ]
1057
+ if exit_on_first_error:
1058
+ extra_args.extend([
1059
+ # at least Valgrind 3.14 is required
1089
1060
  '--exit-on-first-error=yes',
1090
1061
  '--error-exitcode=42',
1091
- ]
1062
+ ])
1092
1063
  args = [
1093
1064
  'cd', testdir,
1094
1065
  run.Raw('&&'),
@@ -1113,7 +1084,8 @@ def ssh_keyscan(hostnames, _raise=True):
1113
1084
  for hostname in hostnames:
1114
1085
  with safe_while(
1115
1086
  sleep=1,
1116
- tries=5 if _raise else 1,
1087
+ tries=15 if _raise else 1,
1088
+ increment=1,
1117
1089
  _raise=_raise,
1118
1090
  action="ssh_keyscan " + hostname,
1119
1091
  ) as proceed:
@@ -1126,7 +1098,7 @@ def ssh_keyscan(hostnames, _raise=True):
1126
1098
  missing = set(hostnames) - set(keys_dict.keys())
1127
1099
  msg = "Unable to scan these host keys: %s" % ' '.join(missing)
1128
1100
  if not _raise:
1129
- log.warn(msg)
1101
+ log.warning(msg)
1130
1102
  else:
1131
1103
  raise RuntimeError(msg)
1132
1104
  return keys_dict
@@ -1139,7 +1111,7 @@ def _ssh_keyscan(hostname):
1139
1111
  :param hostname: The hostname
1140
1112
  :returns: The host key
1141
1113
  """
1142
- args = ['ssh-keyscan', '-T', '1', '-t', 'rsa', hostname]
1114
+ args = ['ssh-keyscan', '-T', '1', hostname]
1143
1115
  p = subprocess.Popen(
1144
1116
  args=args,
1145
1117
  stdout=subprocess.PIPE,
@@ -1151,9 +1123,12 @@ def _ssh_keyscan(hostname):
1151
1123
  line = line.strip()
1152
1124
  if line and not line.startswith('#'):
1153
1125
  log.error(line)
1126
+ keys = list()
1154
1127
  for line in p.stdout:
1155
1128
  host, key = line.strip().decode().split(' ', 1)
1156
- return key
1129
+ keys.append(key)
1130
+ if len(keys) > 0:
1131
+ return sorted(keys)[0]
1157
1132
 
1158
1133
 
1159
1134
  def ssh_keyscan_wait(hostname):
@@ -1194,28 +1169,19 @@ def stop_daemons_of_type(ctx, type_, cluster='ceph'):
1194
1169
  def get_system_type(remote, distro=False, version=False):
1195
1170
  """
1196
1171
  If distro, return distro.
1197
- If version, return version (lsb_release -rs)
1172
+ If version, return version
1198
1173
  If both, return both.
1199
1174
  If neither, return 'deb' or 'rpm' if distro is known to be one of those
1200
- Finally, if unknown, return the unfiltered distro (from lsb_release -is)
1201
1175
  """
1202
- system_value = remote.sh('sudo lsb_release -is').strip()
1203
- log.debug("System to be installed: %s" % system_value)
1204
1176
  if version:
1205
- version = remote.sh('sudo lsb_release -rs').strip()
1177
+ version = remote.os.version
1206
1178
  if distro and version:
1207
- return system_value.lower(), version
1179
+ return remote.os.name, version
1208
1180
  if distro:
1209
- return system_value.lower()
1181
+ return remote.os.name
1210
1182
  if version:
1211
1183
  return version
1212
- if system_value in ['Ubuntu', 'Debian']:
1213
- return "deb"
1214
- if system_value in ['CentOS', 'Fedora', 'RedHatEnterpriseServer',
1215
- 'RedHatEnterprise',
1216
- 'openSUSE', 'openSUSE project', 'SUSE', 'SUSE LINUX']:
1217
- return "rpm"
1218
- return system_value
1184
+ return remote.os.package_type
1219
1185
 
1220
1186
  def get_pkg_type(os_type):
1221
1187
  if os_type in ('centos', 'fedora', 'opensuse', 'rhel', 'sle'):
@@ -1324,7 +1290,7 @@ def sh(command, log_limit=1024, cwd=None, env=None):
1324
1290
  for line in proc.stdout:
1325
1291
  line = line.decode()
1326
1292
  lines.append(line)
1327
- line = line.strip()
1293
+ line = line.rstrip()
1328
1294
  if len(line) > log_limit:
1329
1295
  truncated = True
1330
1296
  log.debug(line[:log_limit] +
@@ -1345,3 +1311,52 @@ def sh(command, log_limit=1024, cwd=None, env=None):
1345
1311
  output=output
1346
1312
  )
1347
1313
  return output
1314
+
1315
+
1316
+ def add_remote_path(ctx, local_dir, remote_dir):
1317
+ """
1318
+ Add key/value pair (local_dir: remote_dir) to job's info.yaml.
1319
+ These key/value pairs are read to archive them in case of job timeout.
1320
+ """
1321
+ if ctx.archive is None:
1322
+ return
1323
+ with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file:
1324
+ info_yaml = yaml.safe_load(info_file)
1325
+ info_file.seek(0)
1326
+ if 'archive' in info_yaml:
1327
+ info_yaml['archive'][local_dir] = remote_dir
1328
+ else:
1329
+ info_yaml['archive'] = {local_dir: remote_dir}
1330
+ yaml.safe_dump(info_yaml, info_file, default_flow_style=False)
1331
+
1332
+
1333
+ def archive_logs(ctx, remote_path, log_path):
1334
+ """
1335
+ Archive directories from all nodes in a cliuster. It pulls all files in
1336
+ remote_path dir to job's archive dir under log_path dir.
1337
+ """
1338
+ if ctx.archive is None:
1339
+ return
1340
+ path = os.path.join(ctx.archive, 'remote')
1341
+ os.makedirs(path, exist_ok=True)
1342
+ for remote in ctx.cluster.remotes.keys():
1343
+ sub = os.path.join(path, remote.shortname)
1344
+ os.makedirs(sub, exist_ok=True)
1345
+ try:
1346
+ pull_directory(remote, remote_path, os.path.join(sub, log_path))
1347
+ except ReadError:
1348
+ pass
1349
+
1350
+
1351
+ def compress_logs(ctx, remote_dir):
1352
+ """
1353
+ Compress all files in remote_dir from all nodes in a cluster.
1354
+ """
1355
+ log.info('Compressing logs...')
1356
+ run.wait(
1357
+ ctx.cluster.run(
1358
+ args=(f"sudo find {remote_dir} -name *.log -print0 | "
1359
+ f"sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose --"),
1360
+ wait=False,
1361
+ ),
1362
+ )