toil 6.1.0__py3-none-any.whl → 7.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. toil/__init__.py +1 -232
  2. toil/batchSystems/abstractBatchSystem.py +22 -13
  3. toil/batchSystems/abstractGridEngineBatchSystem.py +59 -45
  4. toil/batchSystems/awsBatch.py +8 -8
  5. toil/batchSystems/contained_executor.py +4 -5
  6. toil/batchSystems/gridengine.py +1 -1
  7. toil/batchSystems/htcondor.py +5 -5
  8. toil/batchSystems/kubernetes.py +25 -11
  9. toil/batchSystems/local_support.py +3 -3
  10. toil/batchSystems/lsf.py +2 -2
  11. toil/batchSystems/mesos/batchSystem.py +4 -4
  12. toil/batchSystems/mesos/executor.py +3 -2
  13. toil/batchSystems/options.py +9 -0
  14. toil/batchSystems/singleMachine.py +11 -10
  15. toil/batchSystems/slurm.py +64 -22
  16. toil/batchSystems/torque.py +1 -1
  17. toil/bus.py +7 -3
  18. toil/common.py +36 -13
  19. toil/cwl/cwltoil.py +365 -312
  20. toil/deferred.py +1 -1
  21. toil/fileStores/abstractFileStore.py +17 -17
  22. toil/fileStores/cachingFileStore.py +2 -2
  23. toil/fileStores/nonCachingFileStore.py +1 -1
  24. toil/job.py +228 -60
  25. toil/jobStores/abstractJobStore.py +18 -10
  26. toil/jobStores/aws/jobStore.py +280 -218
  27. toil/jobStores/aws/utils.py +57 -29
  28. toil/jobStores/conftest.py +2 -2
  29. toil/jobStores/fileJobStore.py +2 -2
  30. toil/jobStores/googleJobStore.py +3 -4
  31. toil/leader.py +72 -24
  32. toil/lib/aws/__init__.py +26 -10
  33. toil/lib/aws/iam.py +2 -2
  34. toil/lib/aws/session.py +62 -22
  35. toil/lib/aws/utils.py +73 -37
  36. toil/lib/conversions.py +5 -1
  37. toil/lib/ec2.py +118 -69
  38. toil/lib/expando.py +1 -1
  39. toil/lib/io.py +14 -2
  40. toil/lib/misc.py +1 -3
  41. toil/lib/resources.py +55 -21
  42. toil/lib/retry.py +12 -5
  43. toil/lib/threading.py +2 -2
  44. toil/lib/throttle.py +1 -1
  45. toil/options/common.py +27 -24
  46. toil/provisioners/__init__.py +9 -3
  47. toil/provisioners/abstractProvisioner.py +9 -7
  48. toil/provisioners/aws/__init__.py +20 -15
  49. toil/provisioners/aws/awsProvisioner.py +406 -329
  50. toil/provisioners/gceProvisioner.py +2 -2
  51. toil/provisioners/node.py +13 -5
  52. toil/server/app.py +1 -1
  53. toil/statsAndLogging.py +58 -16
  54. toil/test/__init__.py +27 -12
  55. toil/test/batchSystems/batchSystemTest.py +40 -33
  56. toil/test/batchSystems/batch_system_plugin_test.py +79 -0
  57. toil/test/batchSystems/test_slurm.py +1 -1
  58. toil/test/cwl/cwlTest.py +8 -91
  59. toil/test/cwl/seqtk_seq.cwl +1 -1
  60. toil/test/docs/scriptsTest.py +10 -13
  61. toil/test/jobStores/jobStoreTest.py +33 -49
  62. toil/test/lib/aws/test_iam.py +2 -2
  63. toil/test/provisioners/aws/awsProvisionerTest.py +51 -34
  64. toil/test/provisioners/clusterTest.py +90 -8
  65. toil/test/server/serverTest.py +2 -2
  66. toil/test/src/autoDeploymentTest.py +1 -1
  67. toil/test/src/dockerCheckTest.py +2 -1
  68. toil/test/src/environmentTest.py +125 -0
  69. toil/test/src/fileStoreTest.py +1 -1
  70. toil/test/src/jobDescriptionTest.py +18 -8
  71. toil/test/src/jobTest.py +1 -1
  72. toil/test/src/realtimeLoggerTest.py +4 -0
  73. toil/test/src/workerTest.py +52 -19
  74. toil/test/utils/toilDebugTest.py +61 -3
  75. toil/test/utils/utilsTest.py +20 -18
  76. toil/test/wdl/wdltoil_test.py +24 -71
  77. toil/test/wdl/wdltoil_test_kubernetes.py +77 -0
  78. toil/toilState.py +68 -9
  79. toil/utils/toilDebugJob.py +153 -26
  80. toil/utils/toilLaunchCluster.py +12 -2
  81. toil/utils/toilRsyncCluster.py +7 -2
  82. toil/utils/toilSshCluster.py +7 -3
  83. toil/utils/toilStats.py +2 -1
  84. toil/utils/toilStatus.py +97 -51
  85. toil/version.py +10 -10
  86. toil/wdl/wdltoil.py +318 -51
  87. toil/worker.py +96 -69
  88. {toil-6.1.0.dist-info → toil-7.0.0.dist-info}/LICENSE +25 -0
  89. {toil-6.1.0.dist-info → toil-7.0.0.dist-info}/METADATA +55 -21
  90. {toil-6.1.0.dist-info → toil-7.0.0.dist-info}/RECORD +93 -90
  91. {toil-6.1.0.dist-info → toil-7.0.0.dist-info}/WHEEL +1 -1
  92. {toil-6.1.0.dist-info → toil-7.0.0.dist-info}/entry_points.txt +0 -0
  93. {toil-6.1.0.dist-info → toil-7.0.0.dist-info}/top_level.txt +0 -0
toil/test/cwl/cwlTest.py CHANGED
@@ -23,15 +23,14 @@ import sys
23
23
  import unittest
24
24
  import uuid
25
25
  import zipfile
26
+
26
27
  from functools import partial
27
28
  from io import StringIO
28
29
  from pathlib import Path
29
30
  from typing import (TYPE_CHECKING,
30
- Any,
31
31
  Callable,
32
32
  Dict,
33
33
  List,
34
- MutableMapping,
35
34
  Optional,
36
35
  cast)
37
36
  from unittest.mock import Mock, call
@@ -51,7 +50,6 @@ from toil.cwl.utils import (DirectoryStructure,
51
50
  download_structure,
52
51
  visit_cwl_class_and_reduce,
53
52
  visit_top_cwl_class)
54
- from toil.exceptions import FailedJobsException
55
53
  from toil.fileStores import FileID
56
54
  from toil.fileStores.abstractFileStore import AbstractFileStore
57
55
  from toil.lib.threading import cpu_count
@@ -74,7 +72,6 @@ from toil.test import (ToilTest,
74
72
  needs_torque,
75
73
  needs_wes_server,
76
74
  slow)
77
- from toil.test.provisioners.clusterTest import AbstractClusterTest
78
75
 
79
76
  log = logging.getLogger(__name__)
80
77
  CONFORMANCE_TEST_TIMEOUT = 10000
@@ -505,7 +502,7 @@ class CWLWorkflowTest(ToilTest):
505
502
  "src/toil/test/cwl/seqtk_seq.cwl",
506
503
  "src/toil/test/cwl/seqtk_seq_job.json",
507
504
  self._expected_seqtk_output(self.outDir),
508
- main_args=["--default-container", "quay.io/biocontainers/seqtk:r93--0"],
505
+ main_args=["--default-container", "quay.io/biocontainers/seqtk:1.4--he4a0461_1"],
509
506
  out_name="output1",
510
507
  )
511
508
 
@@ -540,7 +537,6 @@ class CWLWorkflowTest(ToilTest):
540
537
  """
541
538
  log.info("Running CWL Test Restart. Expecting failure, then success.")
542
539
  from toil.cwl import cwltoil
543
- from toil.jobStores.abstractJobStore import NoSuchJobStoreException
544
540
 
545
541
  outDir = self._createTempDir()
546
542
  cwlDir = os.path.join(self._projectRootPath(), "src", "toil", "test", "cwl")
@@ -570,18 +566,19 @@ class CWLWorkflowTest(ToilTest):
570
566
  # Force a failure by trying to use an incorrect version of `rev` from the PATH
571
567
  os.environ["PATH"] = path_with_bogus_rev()
572
568
  try:
573
- cwltoil.main(cmd)
569
+ subprocess.check_output(["toil-cwl-runner"] + cmd, env=os.environ.copy(), stderr=subprocess.STDOUT)
574
570
  self.fail("Expected problem job with incorrect PATH did not fail")
575
- except FailedJobsException:
571
+ except subprocess.CalledProcessError:
576
572
  pass
577
573
  # Finish the job with a correct PATH
578
574
  os.environ["PATH"] = orig_path
579
- cwltoil.main(["--restart"] + cmd)
575
+ cmd.insert(0, "--restart")
576
+ cwltoil.main(cmd)
580
577
  # Should fail because previous job completed successfully
581
578
  try:
582
- cwltoil.main(["--restart"] + cmd)
579
+ subprocess.check_output(["toil-cwl-runner"] + cmd, env=os.environ.copy(), stderr=subprocess.STDOUT)
583
580
  self.fail("Restart with missing directory did not fail")
584
- except NoSuchJobStoreException:
581
+ except subprocess.CalledProcessError:
585
582
  pass
586
583
 
587
584
  @needs_aws_s3
@@ -1168,86 +1165,6 @@ class CWLv12Test(ToilTest):
1168
1165
  )
1169
1166
 
1170
1167
 
1171
- @needs_aws_ec2
1172
- @needs_fetchable_appliance
1173
- @slow
1174
- class CWLOnARMTest(AbstractClusterTest):
1175
- """
1176
- Run the CWL 1.2 conformance tests on ARM specifically.
1177
- """
1178
-
1179
- def __init__(self, methodName: str) -> None:
1180
- super().__init__(methodName=methodName)
1181
- self.clusterName = "cwl-test-" + str(uuid.uuid4())
1182
- self.leaderNodeType = "t4g.2xlarge"
1183
- self.clusterType = "kubernetes"
1184
- # We need to be running in a directory which Flatcar and the Toil Appliance both have
1185
- self.cwl_test_dir = "/tmp/toil/cwlTests"
1186
-
1187
- def setUp(self) -> None:
1188
- super().setUp()
1189
- self.jobStore = f"aws:{self.awsRegion()}:cluster-{uuid.uuid4()}"
1190
-
1191
- @needs_env_var("CI_COMMIT_SHA", "a git commit sha")
1192
- def test_cwl_on_arm(self) -> None:
1193
- # Make a cluster
1194
- self.launchCluster()
1195
- # get the leader so we know the IP address - we don't need to wait since create cluster
1196
- # already ensures the leader is running
1197
- self.cluster = cluster_factory(
1198
- provisioner="aws", zone=self.zone, clusterName=self.clusterName
1199
- )
1200
- self.leader = self.cluster.getLeader()
1201
-
1202
- commit = os.environ["CI_COMMIT_SHA"]
1203
- self.sshUtil(
1204
- [
1205
- "bash",
1206
- "-c",
1207
- f"mkdir -p {self.cwl_test_dir} && cd {self.cwl_test_dir} && git clone https://github.com/DataBiosphere/toil.git",
1208
- ]
1209
- )
1210
-
1211
- # We use CI_COMMIT_SHA to retrieve the Toil version needed to run the CWL tests
1212
- self.sshUtil(
1213
- ["bash", "-c", f"cd {self.cwl_test_dir}/toil && git checkout {commit}"]
1214
- )
1215
-
1216
- # --never-download prevents silent upgrades to pip, wheel and setuptools
1217
- self.sshUtil(
1218
- [
1219
- "bash",
1220
- "-c",
1221
- f"virtualenv --system-site-packages --never-download {self.venvDir}",
1222
- ]
1223
- )
1224
- self.sshUtil(
1225
- [
1226
- "bash",
1227
- "-c",
1228
- f". .{self.venvDir}/bin/activate && cd {self.cwl_test_dir}/toil && make prepare && make develop extras=[all]",
1229
- ]
1230
- )
1231
-
1232
- # Runs the CWLv12Test on an ARM instance
1233
- self.sshUtil(
1234
- [
1235
- "bash",
1236
- "-c",
1237
- f". .{self.venvDir}/bin/activate && cd {self.cwl_test_dir}/toil && pytest --log-cli-level DEBUG -r s src/toil/test/cwl/cwlTest.py::CWLv12Test::test_run_conformance",
1238
- ]
1239
- )
1240
-
1241
- # We know if it succeeds it should save a junit XML for us to read.
1242
- # Bring it back to be an artifact.
1243
- self.rsync_util(
1244
- f":{self.cwl_test_dir}/toil/conformance-1.2.junit.xml",
1245
- os.path.join(
1246
- self._projectRootPath(),
1247
- "arm-conformance-1.2.junit.xml"
1248
- )
1249
- )
1250
-
1251
1168
  @needs_cwl
1252
1169
  @pytest.mark.cwl_small_log_dir
1253
1170
  def test_workflow_echo_string_scatter_stderr_log_dir(tmp_path: Path) -> None:
@@ -21,4 +21,4 @@ hints:
21
21
  packages:
22
22
  - package: seqtk
23
23
  version:
24
- - r93
24
+ - '1.4'
@@ -4,6 +4,9 @@ import shutil
4
4
  import subprocess
5
5
  import sys
6
6
  import unittest
7
+ import pytest
8
+
9
+ from typing import List
7
10
 
8
11
  pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # noqa
9
12
  sys.path.insert(0, pkg_root) # noqa
@@ -21,12 +24,6 @@ class ToilDocumentationTest(ToilTest):
21
24
 
22
25
  def tearDown(self) -> None:
23
26
  super(ToilTest, self).tearDown()
24
- # src/toil/test/docs/scripts/cwlExampleFiles/sample_1_output.txt
25
- output_files = ["sample_1_output.txt", "sample_2_output.txt", "sample_3_output.txt"]
26
- for output in output_files:
27
- output_file = os.path.join(self.directory, 'scripts/cwlExampleFiles', output)
28
- if os.path.exists(output_file):
29
- os.remove(output_file)
30
27
 
31
28
  jobstores = ['/mnt/ephemeral/workspace/toil-pull-requests/toilWorkflowRun']
32
29
  for jobstore in jobstores:
@@ -36,9 +33,9 @@ class ToilDocumentationTest(ToilTest):
36
33
  unittest.TestCase.tearDown(self)
37
34
 
38
35
  """Just check the exit code"""
39
- def checkExitCode(self, script):
36
+ def checkExitCode(self, script, extra_args: List[str] = []):
40
37
  program = os.path.join(self.directory, "scripts", script)
41
- process = subprocess.Popen([python, program, "file:my-jobstore", "--clean=always"],
38
+ process = subprocess.Popen([python, program, "file:my-jobstore", "--clean=always"] + extra_args,
42
39
  stdout=subprocess.PIPE, stderr=subprocess.PIPE)
43
40
  stdout, stderr = process.communicate()
44
41
  if isinstance(stdout, bytes):
@@ -65,13 +62,11 @@ class ToilDocumentationTest(ToilTest):
65
62
  n = re.search(pattern, outerr)
66
63
  self.assertNotEqual(n, None, f"Pattern:\n{expectedPattern}\nOutput:\n{outerr}")
67
64
 
68
- @needs_cwl
69
- def testCwlexample(self):
70
- self.checkExitCode("tutorial_cwlexample.py")
71
-
72
65
  def testStats(self):
73
- self.checkExitCode("tutorial_stats.py")
66
+ # This script asks for 4 cores but we might need to run the tests in only 3.
67
+ self.checkExitCode("tutorial_stats.py", ["--scale=0.5"])
74
68
 
69
+ @pytest.mark.timeout(1200)
75
70
  def testDynamic(self):
76
71
  self.checkExitCode("tutorial_dynamic.py")
77
72
 
@@ -117,6 +112,7 @@ class ToilDocumentationTest(ToilTest):
117
112
  "second or third.*Hello world, I have a message: second or third.*Hello world,"
118
113
  " I have a message: last")
119
114
 
115
+ @pytest.mark.timeout(1200)
120
116
  def testPromises2(self):
121
117
  self.checkExpectedOut("tutorial_promises2.py",
122
118
  "['00000', '00001', '00010', '00011', '00100', '00101', '00110', '00111',"
@@ -146,5 +142,6 @@ class ToilDocumentationTest(ToilTest):
146
142
  def testStaging(self):
147
143
  self.checkExitCode("tutorial_staging.py")
148
144
 
145
+
149
146
  if __name__ == "__main__":
150
147
  unittest.main()
@@ -129,8 +129,7 @@ class AbstractJobStoreTest:
129
129
  # Requirements for jobs to be created.
130
130
  self.arbitraryRequirements = {'memory': 1, 'disk': 2, 'cores': 1, 'preemptible': False}
131
131
  # Function to make an arbitrary new job
132
- self.arbitraryJob = lambda: JobDescription(command='command',
133
- jobName='arbitrary',
132
+ self.arbitraryJob = lambda: JobDescription(jobName='arbitrary',
134
133
  requirements=self.arbitraryRequirements)
135
134
 
136
135
  self.parentJobReqs = dict(memory=12, cores=34, disk=35, preemptible=True)
@@ -143,9 +142,9 @@ class AbstractJobStoreTest:
143
142
  super().tearDown()
144
143
 
145
144
  def testInitialState(self):
146
- """Ensure proper handling of nonexistant files."""
147
- self.assertFalse(self.jobstore_initialized.job_exists('nonexistantFile'))
148
- self.assertRaises(NoSuchJobException, self.jobstore_initialized.load_job, 'nonexistantFile')
145
+ """Ensure proper handling of nonexistent files."""
146
+ self.assertFalse(self.jobstore_initialized.job_exists('nonexistentFile'))
147
+ self.assertRaises(NoSuchJobException, self.jobstore_initialized.load_job, 'nonexistentFile')
149
148
 
150
149
  def testJobCreation(self):
151
150
  """
@@ -158,8 +157,7 @@ class AbstractJobStoreTest:
158
157
  jobstore = self.jobstore_initialized
159
158
 
160
159
  # Create a job and verify its existence/properties
161
- job = JobDescription(command='parent1',
162
- requirements=self.parentJobReqs,
160
+ job = JobDescription(requirements=self.parentJobReqs,
163
161
  jobName='test1', unitName='onParent')
164
162
  self.assertTrue(isinstance(job.jobStoreID, TemporaryID))
165
163
  jobstore.assign_job_id(job)
@@ -169,7 +167,6 @@ class AbstractJobStoreTest:
169
167
  self.assertEqual(created, job)
170
168
 
171
169
  self.assertTrue(jobstore.job_exists(job.jobStoreID))
172
- self.assertEqual(job.command, 'parent1')
173
170
  self.assertEqual(job.memory, self.parentJobReqs['memory'])
174
171
  self.assertEqual(job.cores, self.parentJobReqs['cores'])
175
172
  self.assertEqual(job.disk, self.parentJobReqs['disk'])
@@ -194,8 +191,7 @@ class AbstractJobStoreTest:
194
191
  """Tests that a job created via one JobStore instance can be loaded from another."""
195
192
 
196
193
  # Create a job on the first jobstore.
197
- jobDesc1 = JobDescription(command='jobstore1',
198
- requirements=self.parentJobReqs,
194
+ jobDesc1 = JobDescription(requirements=self.parentJobReqs,
199
195
  jobName='test1', unitName='onJS1')
200
196
  self.jobstore_initialized.assign_job_id(jobDesc1)
201
197
  self.jobstore_initialized.create_job(jobDesc1)
@@ -203,16 +199,14 @@ class AbstractJobStoreTest:
203
199
  # Load it from the second jobstore
204
200
  jobDesc2 = self.jobstore_resumed_noconfig.load_job(jobDesc1.jobStoreID)
205
201
 
206
- self.assertEqual(jobDesc1.command, jobDesc2.command)
202
+ self.assertEqual(jobDesc1._body, jobDesc2._body)
207
203
 
208
204
  def testChildLoadingEquality(self):
209
205
  """Test that loading a child job operates as expected."""
210
- job = JobDescription(command='parent1',
211
- requirements=self.parentJobReqs,
206
+ job = JobDescription(requirements=self.parentJobReqs,
212
207
  jobName='test1', unitName='onParent')
213
208
 
214
- childJob = JobDescription(command='child1',
215
- requirements=self.childJobReqs1,
209
+ childJob = JobDescription(requirements=self.childJobReqs1,
216
210
  jobName='test2', unitName='onChild1')
217
211
  self.jobstore_initialized.assign_job_id(job)
218
212
  self.jobstore_initialized.assign_job_id(childJob)
@@ -221,7 +215,7 @@ class AbstractJobStoreTest:
221
215
  job.addChild(childJob.jobStoreID)
222
216
  self.jobstore_initialized.update_job(job)
223
217
 
224
- self.assertEqual(self.jobstore_initialized.load_job(list(job.allSuccessors())[0]).command, childJob.command)
218
+ self.assertEqual(self.jobstore_initialized.load_job(list(job.allSuccessors())[0])._body, childJob._body)
225
219
 
226
220
  def testPersistantFilesToDelete(self):
227
221
  """
@@ -236,8 +230,7 @@ class AbstractJobStoreTest:
236
230
  """
237
231
 
238
232
  # Create a job.
239
- job = JobDescription(command='job1',
240
- requirements=self.parentJobReqs,
233
+ job = JobDescription(requirements=self.parentJobReqs,
241
234
  jobName='test1', unitName='onJS1')
242
235
 
243
236
  self.jobstore_initialized.assign_job_id(job)
@@ -251,16 +244,13 @@ class AbstractJobStoreTest:
251
244
  jobstore1 = self.jobstore_initialized
252
245
  jobstore2 = self.jobstore_resumed_noconfig
253
246
 
254
- job1 = JobDescription(command='parent1',
255
- requirements=self.parentJobReqs,
247
+ job1 = JobDescription(requirements=self.parentJobReqs,
256
248
  jobName='test1', unitName='onParent')
257
249
 
258
- childJob1 = JobDescription(command='child1',
259
- requirements=self.childJobReqs1,
250
+ childJob1 = JobDescription(requirements=self.childJobReqs1,
260
251
  jobName='test2', unitName='onChild1')
261
252
 
262
- childJob2 = JobDescription(command='child2',
263
- requirements=self.childJobReqs2,
253
+ childJob2 = JobDescription(requirements=self.childJobReqs2,
264
254
  jobName='test3', unitName='onChild2')
265
255
 
266
256
  jobstore1.assign_job_id(job1)
@@ -297,20 +287,17 @@ class AbstractJobStoreTest:
297
287
  """Tests the consequences of deleting jobs."""
298
288
  # A local jobstore object for testing.
299
289
  jobstore = self.jobstore_initialized
300
- job = JobDescription(command='job1',
301
- requirements=self.parentJobReqs,
290
+ job = JobDescription(requirements=self.parentJobReqs,
302
291
  jobName='test1', unitName='onJob')
303
292
  # Create job
304
293
  jobstore.assign_job_id(job)
305
294
  jobstore.create_job(job)
306
295
 
307
296
  # Create child Jobs
308
- child1 = JobDescription(command='child1',
309
- requirements=self.childJobReqs1,
297
+ child1 = JobDescription(requirements=self.childJobReqs1,
310
298
  jobName='test2', unitName='onChild1')
311
299
 
312
- child2 = JobDescription(command='job1',
313
- requirements=self.childJobReqs2,
300
+ child2 = JobDescription(requirements=self.childJobReqs2,
314
301
  jobName='test3', unitName='onChild2')
315
302
 
316
303
  # Add children to parent.
@@ -322,9 +309,8 @@ class AbstractJobStoreTest:
322
309
  job.addChild(child2.jobStoreID)
323
310
  jobstore.update_job(job)
324
311
 
325
- # Get it ready to run children
326
- job.command = None
327
- jobstore.update_job(job)
312
+ # Parent must have no body to start on children
313
+ assert not job.has_body()
328
314
 
329
315
  # Go get the children
330
316
  childJobs = [jobstore.load_job(childID) for childID in job.nextSuccessors()]
@@ -421,8 +407,7 @@ class AbstractJobStoreTest:
421
407
  jobstore2 = self.jobstore_resumed_noconfig
422
408
 
423
409
  # Create jobNodeOnJS1
424
- jobOnJobStore1 = JobDescription(command='job1',
425
- requirements=self.parentJobReqs,
410
+ jobOnJobStore1 = JobDescription(requirements=self.parentJobReqs,
426
411
  jobName='test1', unitName='onJobStore1')
427
412
 
428
413
  # First recreate job
@@ -489,12 +474,11 @@ class AbstractJobStoreTest:
489
474
  pass
490
475
 
491
476
  def testStatsAndLogging(self):
492
- """Tests behavior of reading and writting stats and logging."""
477
+ """Tests behavior of reading and writing stats and logging."""
493
478
  jobstore1 = self.jobstore_initialized
494
479
  jobstore2 = self.jobstore_resumed_noconfig
495
480
 
496
- jobOnJobStore1 = JobDescription(command='job1',
497
- requirements=self.parentJobReqs,
481
+ jobOnJobStore1 = JobDescription(requirements=self.parentJobReqs,
498
482
  jobName='test1', unitName='onJobStore1')
499
483
 
500
484
  jobstore1.assign_job_id(jobOnJobStore1)
@@ -565,8 +549,7 @@ class AbstractJobStoreTest:
565
549
  jobs = []
566
550
  with jobstore.batch():
567
551
  for i in range(100):
568
- overlargeJob = JobDescription(command='overlarge',
569
- requirements=jobRequirements,
552
+ overlargeJob = JobDescription(requirements=jobRequirements,
570
553
  jobName='test-overlarge', unitName='onJobStore')
571
554
  jobstore.assign_job_id(overlargeJob)
572
555
  jobstore.create_job(overlargeJob)
@@ -668,7 +651,7 @@ class AbstractJobStoreTest:
668
651
  :param int size: the size of the file to test importing/exporting with
669
652
  """
670
653
  # Prepare test file in other job store
671
- self.jobstore_initialized.partSize = cls.mpTestPartSize
654
+ self.jobstore_initialized.part_size = cls.mpTestPartSize
672
655
  self.jobstore_initialized.moveExports = moveExports
673
656
 
674
657
  # Test assumes imports are not linked
@@ -724,7 +707,7 @@ class AbstractJobStoreTest:
724
707
  to import from or export to
725
708
  """
726
709
  # Prepare test file in other job store
727
- self.jobstore_initialized.partSize = cls.mpTestPartSize
710
+ self.jobstore_initialized.part_size = cls.mpTestPartSize
728
711
  other = otherCls('testSharedFiles')
729
712
  store = other._externalStore()
730
713
 
@@ -1312,7 +1295,6 @@ class AWSJobStoreTest(AbstractJobStoreTest.Test):
1312
1295
  failed to be created. We simulate a failed jobstore bucket creation by using a bucket in a
1313
1296
  different region with the same name.
1314
1297
  """
1315
- from boto.sdb import connect_to_region
1316
1298
  from botocore.exceptions import ClientError
1317
1299
 
1318
1300
  from toil.jobStores.aws.jobStore import BucketLocationConflictException
@@ -1352,13 +1334,16 @@ class AWSJobStoreTest(AbstractJobStoreTest.Test):
1352
1334
  except BucketLocationConflictException:
1353
1335
  # Catch the expected BucketLocationConflictException and ensure that the bound
1354
1336
  # domains don't exist in SDB.
1355
- sdb = connect_to_region(self.awsRegion())
1337
+ sdb = establish_boto3_session().client(region_name=self.awsRegion(), service_name="sdb")
1356
1338
  next_token = None
1357
1339
  allDomainNames = []
1358
1340
  while True:
1359
- domains = sdb.get_all_domains(max_domains=100, next_token=next_token)
1360
- allDomainNames.extend([x.name for x in domains])
1361
- next_token = domains.next_token
1341
+ if next_token is None:
1342
+ domains = sdb.list_domains(MaxNumberOfDomains=100)
1343
+ else:
1344
+ domains = sdb.list_domains(MaxNumberOfDomains=100, NextToken=next_token)
1345
+ allDomainNames.extend(domains["DomainNames"])
1346
+ next_token = domains.get("NextToken")
1362
1347
  if next_token is None:
1363
1348
  break
1364
1349
  self.assertFalse([d for d in allDomainNames if testJobStoreUUID in d])
@@ -1394,8 +1379,7 @@ class AWSJobStoreTest(AbstractJobStoreTest.Test):
1394
1379
  def testOverlargeJob(self):
1395
1380
  jobstore = self.jobstore_initialized
1396
1381
  jobRequirements = dict(memory=12, cores=34, disk=35, preemptible=True)
1397
- overlargeJob = JobDescription(command='overlarge',
1398
- requirements=jobRequirements,
1382
+ overlargeJob = JobDescription(requirements=jobRequirements,
1399
1383
  jobName='test-overlarge', unitName='onJobStore')
1400
1384
 
1401
1385
  # Make the pickled size of the job larger than 256K
@@ -15,7 +15,7 @@ import json
15
15
  import logging
16
16
 
17
17
  import boto3
18
- from moto import mock_iam
18
+ from moto import mock_aws
19
19
 
20
20
  from toil.lib.aws import iam
21
21
  from toil.test import ToilTest
@@ -46,7 +46,7 @@ class IAMTest(ToilTest):
46
46
  assert iam.permission_matches_any("iam:*", ["*"]) is True
47
47
  assert iam.permission_matches_any("ec2:*", ['iam:*']) is False
48
48
 
49
- @mock_iam
49
+ @mock_aws
50
50
  def test_get_policy_permissions(self):
51
51
  mock_iam = boto3.client("iam")
52
52
 
@@ -19,9 +19,13 @@ import time
19
19
  from abc import abstractmethod
20
20
  from inspect import getsource
21
21
  from textwrap import dedent
22
+ from typing import Optional, List
22
23
  from uuid import uuid4
23
24
 
25
+ import botocore.exceptions
24
26
  import pytest
27
+ from mypy_boto3_ec2 import EC2Client
28
+ from mypy_boto3_ec2.type_defs import EbsInstanceBlockDeviceTypeDef, InstanceTypeDef, InstanceBlockDeviceMappingTypeDef, FilterTypeDef, DescribeVolumesResultTypeDef, VolumeTypeDef
25
29
 
26
30
  from toil.provisioners import cluster_factory
27
31
  from toil.provisioners.aws.awsProvisioner import AWSProvisioner
@@ -113,15 +117,20 @@ class AbstractAWSAutoscaleTest(AbstractClusterTest):
113
117
  def rsyncUtil(self, src, dest):
114
118
  subprocess.check_call(['toil', 'rsync-cluster', '--insecure', '-p=aws', '-z', self.zone, self.clusterName] + [src, dest])
115
119
 
116
- def getRootVolID(self):
117
- instances = self.cluster._get_nodes_in_cluster()
118
- instances.sort(key=lambda x: x.launch_time)
119
- leader = instances[0] # assume leader was launched first
120
-
121
- from boto.ec2.blockdevicemapping import BlockDeviceType
122
- rootBlockDevice = leader.block_device_mapping["/dev/xvda"]
123
- assert isinstance(rootBlockDevice, BlockDeviceType)
124
- return rootBlockDevice.volume_id
120
+ def getRootVolID(self) -> str:
121
+ instances: List[InstanceTypeDef] = self.cluster._get_nodes_in_cluster_boto3()
122
+ instances.sort(key=lambda x: x.get("LaunchTime"))
123
+ leader: InstanceTypeDef = instances[0] # assume leader was launched first
124
+
125
+ bdm: Optional[List[InstanceBlockDeviceMappingTypeDef]] = leader.get("BlockDeviceMappings")
126
+ assert bdm is not None
127
+ root_block_device: Optional[EbsInstanceBlockDeviceTypeDef] = None
128
+ for device in bdm:
129
+ if device["DeviceName"] == "/dev/xvda":
130
+ root_block_device = device["Ebs"]
131
+ assert root_block_device is not None # There should be a device named "/dev/xvda"
132
+ assert root_block_device.get("VolumeId") is not None
133
+ return root_block_device["VolumeId"]
125
134
 
126
135
  @abstractmethod
127
136
  def _getScript(self):
@@ -191,21 +200,20 @@ class AbstractAWSAutoscaleTest(AbstractClusterTest):
191
200
 
192
201
  assert len(self.cluster._getRoleNames()) == 1
193
202
 
194
- from boto.exception import EC2ResponseError
195
203
  volumeID = self.getRootVolID()
196
204
  self.cluster.destroyCluster()
205
+ boto3_ec2: EC2Client = self.aws.client(region=self.region, service_name="ec2")
206
+ volume_filter: FilterTypeDef = {"Name": "volume-id", "Values": [volumeID]}
207
+ volumes: Optional[List[VolumeTypeDef]] = None
197
208
  for attempt in range(6):
198
209
  # https://github.com/BD2KGenomics/toil/issues/1567
199
210
  # retry this for up to 1 minute until the volume disappears
200
- try:
201
- self.boto2_ec2.get_all_volumes(volume_ids=[volumeID])
202
- time.sleep(10)
203
- except EC2ResponseError as e:
204
- if e.status == 400 and 'InvalidVolume.NotFound' in e.code:
205
- break
206
- else:
207
- raise
208
- else:
211
+ volumes = boto3_ec2.describe_volumes(Filters=[volume_filter])["Volumes"]
212
+ if len(volumes) == 0:
213
+ # None are left, so they have been properly deleted
214
+ break
215
+ time.sleep(10)
216
+ if volumes is None or len(volumes) > 0:
209
217
  self.fail('Volume with ID %s was not cleaned up properly' % volumeID)
210
218
 
211
219
  assert len(self.cluster._getRoleNames()) == 0
@@ -246,16 +254,19 @@ class AWSAutoscaleTest(AbstractAWSAutoscaleTest):
246
254
  # add arguments to test that we can specify leader storage
247
255
  self.createClusterUtil(args=['--leaderStorage', str(self.requestedLeaderStorage)])
248
256
 
249
- def getRootVolID(self):
257
+ def getRootVolID(self) -> str:
250
258
  """
251
259
  Adds in test to check that EBS volume is build with adequate size.
252
260
  Otherwise is functionally equivalent to parent.
253
261
  :return: volumeID
254
262
  """
255
263
  volumeID = super().getRootVolID()
256
- rootVolume = self.boto2_ec2.get_all_volumes(volume_ids=[volumeID])[0]
264
+ boto3_ec2: EC2Client = self.aws.client(region=self.region, service_name="ec2")
265
+ volume_filter: FilterTypeDef = {"Name": "volume-id", "Values": [volumeID]}
266
+ volumes: DescribeVolumesResultTypeDef = boto3_ec2.describe_volumes(Filters=[volume_filter])
267
+ root_volume: VolumeTypeDef = volumes["Volumes"][0] # should be first
257
268
  # test that the leader is given adequate storage
258
- self.assertGreaterEqual(rootVolume.size, self.requestedLeaderStorage)
269
+ self.assertGreaterEqual(root_volume["Size"], self.requestedLeaderStorage)
259
270
  return volumeID
260
271
 
261
272
  @integrative
@@ -290,8 +301,6 @@ class AWSStaticAutoscaleTest(AWSAutoscaleTest):
290
301
  self.requestedNodeStorage = 20
291
302
 
292
303
  def launchCluster(self):
293
- from boto.ec2.blockdevicemapping import BlockDeviceType
294
-
295
304
  from toil.lib.ec2 import wait_instances_running
296
305
  self.createClusterUtil(args=['--leaderStorage', str(self.requestedLeaderStorage),
297
306
  '--nodeTypes', ",".join(self.instanceTypes),
@@ -303,8 +312,8 @@ class AWSStaticAutoscaleTest(AWSAutoscaleTest):
303
312
  # visible to EC2 read requests immediately after the create returns,
304
313
  # which is the last thing that starting the cluster does.
305
314
  time.sleep(10)
306
- nodes = self.cluster._get_nodes_in_cluster()
307
- nodes.sort(key=lambda x: x.launch_time)
315
+ nodes: List[InstanceTypeDef] = self.cluster._get_nodes_in_cluster_boto3()
316
+ nodes.sort(key=lambda x: x.get("LaunchTime"))
308
317
  # assuming that leader is first
309
318
  workers = nodes[1:]
310
319
  # test that two worker nodes were created
@@ -312,11 +321,22 @@ class AWSStaticAutoscaleTest(AWSAutoscaleTest):
312
321
  # test that workers have expected storage size
313
322
  # just use the first worker
314
323
  worker = workers[0]
315
- worker = next(wait_instances_running(self.boto2_ec2, [worker]))
316
- rootBlockDevice = worker.block_device_mapping["/dev/xvda"]
317
- self.assertTrue(isinstance(rootBlockDevice, BlockDeviceType))
318
- rootVolume = self.boto2_ec2.get_all_volumes(volume_ids=[rootBlockDevice.volume_id])[0]
319
- self.assertGreaterEqual(rootVolume.size, self.requestedNodeStorage)
324
+ boto3_ec2: EC2Client = self.aws.client(region=self.region, service_name="ec2")
325
+
326
+ worker: InstanceTypeDef = next(wait_instances_running(boto3_ec2, [worker]))
327
+
328
+ bdm: Optional[List[InstanceBlockDeviceMappingTypeDef]] = worker.get("BlockDeviceMappings")
329
+ assert bdm is not None
330
+ root_block_device: Optional[EbsInstanceBlockDeviceTypeDef] = None
331
+ for device in bdm:
332
+ if device["DeviceName"] == "/dev/xvda":
333
+ root_block_device = device["Ebs"]
334
+ assert root_block_device is not None
335
+ assert root_block_device.get("VolumeId") is not None # TypedDicts cannot have runtime type checks
336
+
337
+ volume_filter: FilterTypeDef = {"Name": "volume-id", "Values": [root_block_device["VolumeId"]]}
338
+ root_volume: VolumeTypeDef = boto3_ec2.describe_volumes(Filters=[volume_filter])["Volumes"][0] # should be first
339
+ self.assertGreaterEqual(root_volume.get("Size"), self.requestedNodeStorage)
320
340
 
321
341
  def _runScript(self, toilOptions):
322
342
  # Autoscale even though we have static nodes
@@ -337,9 +357,6 @@ class AWSManagedAutoscaleTest(AWSAutoscaleTest):
337
357
  self.requestedNodeStorage = 20
338
358
 
339
359
  def launchCluster(self):
340
- from boto.ec2.blockdevicemapping import BlockDeviceType # noqa
341
-
342
- from toil.lib.ec2 import wait_instances_running # noqa
343
360
  self.createClusterUtil(args=['--leaderStorage', str(self.requestedLeaderStorage),
344
361
  '--nodeTypes', ",".join(self.instanceTypes),
345
362
  '--workers', ",".join([f'0-{c}' for c in self.numWorkers]),