toil 6.1.0a1__py3-none-any.whl → 7.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. toil/__init__.py +1 -232
  2. toil/batchSystems/abstractBatchSystem.py +41 -17
  3. toil/batchSystems/abstractGridEngineBatchSystem.py +79 -65
  4. toil/batchSystems/awsBatch.py +8 -8
  5. toil/batchSystems/cleanup_support.py +7 -3
  6. toil/batchSystems/contained_executor.py +4 -5
  7. toil/batchSystems/gridengine.py +1 -1
  8. toil/batchSystems/htcondor.py +5 -5
  9. toil/batchSystems/kubernetes.py +25 -11
  10. toil/batchSystems/local_support.py +3 -3
  11. toil/batchSystems/lsf.py +9 -9
  12. toil/batchSystems/mesos/batchSystem.py +4 -4
  13. toil/batchSystems/mesos/executor.py +3 -2
  14. toil/batchSystems/options.py +9 -0
  15. toil/batchSystems/singleMachine.py +11 -10
  16. toil/batchSystems/slurm.py +129 -16
  17. toil/batchSystems/torque.py +1 -1
  18. toil/bus.py +45 -3
  19. toil/common.py +56 -31
  20. toil/cwl/cwltoil.py +442 -371
  21. toil/deferred.py +1 -1
  22. toil/exceptions.py +1 -1
  23. toil/fileStores/abstractFileStore.py +69 -20
  24. toil/fileStores/cachingFileStore.py +6 -22
  25. toil/fileStores/nonCachingFileStore.py +6 -15
  26. toil/job.py +270 -86
  27. toil/jobStores/abstractJobStore.py +37 -31
  28. toil/jobStores/aws/jobStore.py +280 -218
  29. toil/jobStores/aws/utils.py +60 -31
  30. toil/jobStores/conftest.py +2 -2
  31. toil/jobStores/fileJobStore.py +3 -3
  32. toil/jobStores/googleJobStore.py +3 -4
  33. toil/leader.py +89 -38
  34. toil/lib/aws/__init__.py +26 -10
  35. toil/lib/aws/iam.py +2 -2
  36. toil/lib/aws/session.py +62 -22
  37. toil/lib/aws/utils.py +73 -37
  38. toil/lib/conversions.py +24 -1
  39. toil/lib/ec2.py +118 -69
  40. toil/lib/expando.py +1 -1
  41. toil/lib/generatedEC2Lists.py +8 -8
  42. toil/lib/io.py +42 -4
  43. toil/lib/misc.py +1 -3
  44. toil/lib/resources.py +57 -16
  45. toil/lib/retry.py +12 -5
  46. toil/lib/threading.py +29 -14
  47. toil/lib/throttle.py +1 -1
  48. toil/options/common.py +31 -30
  49. toil/options/wdl.py +5 -0
  50. toil/provisioners/__init__.py +9 -3
  51. toil/provisioners/abstractProvisioner.py +12 -2
  52. toil/provisioners/aws/__init__.py +20 -15
  53. toil/provisioners/aws/awsProvisioner.py +406 -329
  54. toil/provisioners/gceProvisioner.py +2 -2
  55. toil/provisioners/node.py +13 -5
  56. toil/server/app.py +1 -1
  57. toil/statsAndLogging.py +93 -23
  58. toil/test/__init__.py +27 -12
  59. toil/test/batchSystems/batchSystemTest.py +40 -33
  60. toil/test/batchSystems/batch_system_plugin_test.py +79 -0
  61. toil/test/batchSystems/test_slurm.py +22 -7
  62. toil/test/cactus/__init__.py +0 -0
  63. toil/test/cactus/test_cactus_integration.py +58 -0
  64. toil/test/cwl/cwlTest.py +245 -236
  65. toil/test/cwl/seqtk_seq.cwl +1 -1
  66. toil/test/docs/scriptsTest.py +11 -14
  67. toil/test/jobStores/jobStoreTest.py +40 -54
  68. toil/test/lib/aws/test_iam.py +2 -2
  69. toil/test/lib/test_ec2.py +1 -1
  70. toil/test/options/__init__.py +13 -0
  71. toil/test/options/options.py +37 -0
  72. toil/test/provisioners/aws/awsProvisionerTest.py +51 -34
  73. toil/test/provisioners/clusterTest.py +99 -16
  74. toil/test/server/serverTest.py +2 -2
  75. toil/test/src/autoDeploymentTest.py +1 -1
  76. toil/test/src/dockerCheckTest.py +2 -1
  77. toil/test/src/environmentTest.py +125 -0
  78. toil/test/src/fileStoreTest.py +1 -1
  79. toil/test/src/jobDescriptionTest.py +18 -8
  80. toil/test/src/jobTest.py +1 -1
  81. toil/test/src/realtimeLoggerTest.py +4 -0
  82. toil/test/src/workerTest.py +52 -19
  83. toil/test/utils/toilDebugTest.py +62 -4
  84. toil/test/utils/utilsTest.py +23 -21
  85. toil/test/wdl/wdltoil_test.py +49 -21
  86. toil/test/wdl/wdltoil_test_kubernetes.py +77 -0
  87. toil/toilState.py +68 -9
  88. toil/utils/toilDebugFile.py +1 -1
  89. toil/utils/toilDebugJob.py +153 -26
  90. toil/utils/toilLaunchCluster.py +12 -2
  91. toil/utils/toilRsyncCluster.py +7 -2
  92. toil/utils/toilSshCluster.py +7 -3
  93. toil/utils/toilStats.py +310 -266
  94. toil/utils/toilStatus.py +98 -52
  95. toil/version.py +11 -11
  96. toil/wdl/wdltoil.py +644 -225
  97. toil/worker.py +125 -83
  98. {toil-6.1.0a1.dist-info → toil-7.0.0.dist-info}/LICENSE +25 -0
  99. toil-7.0.0.dist-info/METADATA +158 -0
  100. {toil-6.1.0a1.dist-info → toil-7.0.0.dist-info}/RECORD +103 -96
  101. {toil-6.1.0a1.dist-info → toil-7.0.0.dist-info}/WHEEL +1 -1
  102. toil-6.1.0a1.dist-info/METADATA +0 -125
  103. {toil-6.1.0a1.dist-info → toil-7.0.0.dist-info}/entry_points.txt +0 -0
  104. {toil-6.1.0a1.dist-info → toil-7.0.0.dist-info}/top_level.txt +0 -0
@@ -21,4 +21,4 @@ hints:
21
21
  packages:
22
22
  - package: seqtk
23
23
  version:
24
- - r93
24
+ - '1.4'
@@ -4,6 +4,9 @@ import shutil
4
4
  import subprocess
5
5
  import sys
6
6
  import unittest
7
+ import pytest
8
+
9
+ from typing import List
7
10
 
8
11
  pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # noqa
9
12
  sys.path.insert(0, pkg_root) # noqa
@@ -21,12 +24,6 @@ class ToilDocumentationTest(ToilTest):
21
24
 
22
25
  def tearDown(self) -> None:
23
26
  super(ToilTest, self).tearDown()
24
- # src/toil/test/docs/scripts/cwlExampleFiles/sample_1_output.txt
25
- output_files = ["sample_1_output.txt", "sample_2_output.txt", "sample_3_output.txt"]
26
- for output in output_files:
27
- output_file = os.path.join(self.directory, 'scripts/cwlExampleFiles', output)
28
- if os.path.exists(output_file):
29
- os.remove(output_file)
30
27
 
31
28
  jobstores = ['/mnt/ephemeral/workspace/toil-pull-requests/toilWorkflowRun']
32
29
  for jobstore in jobstores:
@@ -36,9 +33,9 @@ class ToilDocumentationTest(ToilTest):
36
33
  unittest.TestCase.tearDown(self)
37
34
 
38
35
  """Just check the exit code"""
39
- def checkExitCode(self, script):
36
+ def checkExitCode(self, script, extra_args: List[str] = []):
40
37
  program = os.path.join(self.directory, "scripts", script)
41
- process = subprocess.Popen([python, program, "file:my-jobstore", "--clean=always"],
38
+ process = subprocess.Popen([python, program, "file:my-jobstore", "--clean=always"] + extra_args,
42
39
  stdout=subprocess.PIPE, stderr=subprocess.PIPE)
43
40
  stdout, stderr = process.communicate()
44
41
  if isinstance(stdout, bytes):
@@ -65,13 +62,11 @@ class ToilDocumentationTest(ToilTest):
65
62
  n = re.search(pattern, outerr)
66
63
  self.assertNotEqual(n, None, f"Pattern:\n{expectedPattern}\nOutput:\n{outerr}")
67
64
 
68
- @needs_cwl
69
- def testCwlexample(self):
70
- self.checkExitCode("tutorial_cwlexample.py")
71
-
72
- def testDiscoverfiles(self):
73
- self.checkExitCode("tutorial_discoverfiles.py")
65
+ def testStats(self):
66
+ # This script asks for 4 cores but we might need to run the tests in only 3.
67
+ self.checkExitCode("tutorial_stats.py", ["--scale=0.5"])
74
68
 
69
+ @pytest.mark.timeout(1200)
75
70
  def testDynamic(self):
76
71
  self.checkExitCode("tutorial_dynamic.py")
77
72
 
@@ -117,6 +112,7 @@ class ToilDocumentationTest(ToilTest):
117
112
  "second or third.*Hello world, I have a message: second or third.*Hello world,"
118
113
  " I have a message: last")
119
114
 
115
+ @pytest.mark.timeout(1200)
120
116
  def testPromises2(self):
121
117
  self.checkExpectedOut("tutorial_promises2.py",
122
118
  "['00000', '00001', '00010', '00011', '00100', '00101', '00110', '00111',"
@@ -146,5 +142,6 @@ class ToilDocumentationTest(ToilTest):
146
142
  def testStaging(self):
147
143
  self.checkExitCode("tutorial_staging.py")
148
144
 
145
+
149
146
  if __name__ == "__main__":
150
147
  unittest.main()
@@ -40,7 +40,6 @@ from toil.job import Job, JobDescription, TemporaryID
40
40
  from toil.jobStores.abstractJobStore import (NoSuchFileException,
41
41
  NoSuchJobException)
42
42
  from toil.jobStores.fileJobStore import FileJobStore
43
- from toil.lib.aws.utils import create_s3_bucket, get_object_for_url
44
43
  from toil.lib.io import mkdtemp
45
44
  from toil.lib.memoize import memoize
46
45
  from toil.lib.retry import retry
@@ -130,8 +129,7 @@ class AbstractJobStoreTest:
130
129
  # Requirements for jobs to be created.
131
130
  self.arbitraryRequirements = {'memory': 1, 'disk': 2, 'cores': 1, 'preemptible': False}
132
131
  # Function to make an arbitrary new job
133
- self.arbitraryJob = lambda: JobDescription(command='command',
134
- jobName='arbitrary',
132
+ self.arbitraryJob = lambda: JobDescription(jobName='arbitrary',
135
133
  requirements=self.arbitraryRequirements)
136
134
 
137
135
  self.parentJobReqs = dict(memory=12, cores=34, disk=35, preemptible=True)
@@ -144,9 +142,9 @@ class AbstractJobStoreTest:
144
142
  super().tearDown()
145
143
 
146
144
  def testInitialState(self):
147
- """Ensure proper handling of nonexistant files."""
148
- self.assertFalse(self.jobstore_initialized.job_exists('nonexistantFile'))
149
- self.assertRaises(NoSuchJobException, self.jobstore_initialized.load_job, 'nonexistantFile')
145
+ """Ensure proper handling of nonexistent files."""
146
+ self.assertFalse(self.jobstore_initialized.job_exists('nonexistentFile'))
147
+ self.assertRaises(NoSuchJobException, self.jobstore_initialized.load_job, 'nonexistentFile')
150
148
 
151
149
  def testJobCreation(self):
152
150
  """
@@ -159,8 +157,7 @@ class AbstractJobStoreTest:
159
157
  jobstore = self.jobstore_initialized
160
158
 
161
159
  # Create a job and verify its existence/properties
162
- job = JobDescription(command='parent1',
163
- requirements=self.parentJobReqs,
160
+ job = JobDescription(requirements=self.parentJobReqs,
164
161
  jobName='test1', unitName='onParent')
165
162
  self.assertTrue(isinstance(job.jobStoreID, TemporaryID))
166
163
  jobstore.assign_job_id(job)
@@ -170,7 +167,6 @@ class AbstractJobStoreTest:
170
167
  self.assertEqual(created, job)
171
168
 
172
169
  self.assertTrue(jobstore.job_exists(job.jobStoreID))
173
- self.assertEqual(job.command, 'parent1')
174
170
  self.assertEqual(job.memory, self.parentJobReqs['memory'])
175
171
  self.assertEqual(job.cores, self.parentJobReqs['cores'])
176
172
  self.assertEqual(job.disk, self.parentJobReqs['disk'])
@@ -195,8 +191,7 @@ class AbstractJobStoreTest:
195
191
  """Tests that a job created via one JobStore instance can be loaded from another."""
196
192
 
197
193
  # Create a job on the first jobstore.
198
- jobDesc1 = JobDescription(command='jobstore1',
199
- requirements=self.parentJobReqs,
194
+ jobDesc1 = JobDescription(requirements=self.parentJobReqs,
200
195
  jobName='test1', unitName='onJS1')
201
196
  self.jobstore_initialized.assign_job_id(jobDesc1)
202
197
  self.jobstore_initialized.create_job(jobDesc1)
@@ -204,16 +199,14 @@ class AbstractJobStoreTest:
204
199
  # Load it from the second jobstore
205
200
  jobDesc2 = self.jobstore_resumed_noconfig.load_job(jobDesc1.jobStoreID)
206
201
 
207
- self.assertEqual(jobDesc1.command, jobDesc2.command)
202
+ self.assertEqual(jobDesc1._body, jobDesc2._body)
208
203
 
209
204
  def testChildLoadingEquality(self):
210
205
  """Test that loading a child job operates as expected."""
211
- job = JobDescription(command='parent1',
212
- requirements=self.parentJobReqs,
206
+ job = JobDescription(requirements=self.parentJobReqs,
213
207
  jobName='test1', unitName='onParent')
214
208
 
215
- childJob = JobDescription(command='child1',
216
- requirements=self.childJobReqs1,
209
+ childJob = JobDescription(requirements=self.childJobReqs1,
217
210
  jobName='test2', unitName='onChild1')
218
211
  self.jobstore_initialized.assign_job_id(job)
219
212
  self.jobstore_initialized.assign_job_id(childJob)
@@ -222,7 +215,7 @@ class AbstractJobStoreTest:
222
215
  job.addChild(childJob.jobStoreID)
223
216
  self.jobstore_initialized.update_job(job)
224
217
 
225
- self.assertEqual(self.jobstore_initialized.load_job(list(job.allSuccessors())[0]).command, childJob.command)
218
+ self.assertEqual(self.jobstore_initialized.load_job(list(job.allSuccessors())[0])._body, childJob._body)
226
219
 
227
220
  def testPersistantFilesToDelete(self):
228
221
  """
@@ -237,8 +230,7 @@ class AbstractJobStoreTest:
237
230
  """
238
231
 
239
232
  # Create a job.
240
- job = JobDescription(command='job1',
241
- requirements=self.parentJobReqs,
233
+ job = JobDescription(requirements=self.parentJobReqs,
242
234
  jobName='test1', unitName='onJS1')
243
235
 
244
236
  self.jobstore_initialized.assign_job_id(job)
@@ -252,16 +244,13 @@ class AbstractJobStoreTest:
252
244
  jobstore1 = self.jobstore_initialized
253
245
  jobstore2 = self.jobstore_resumed_noconfig
254
246
 
255
- job1 = JobDescription(command='parent1',
256
- requirements=self.parentJobReqs,
247
+ job1 = JobDescription(requirements=self.parentJobReqs,
257
248
  jobName='test1', unitName='onParent')
258
249
 
259
- childJob1 = JobDescription(command='child1',
260
- requirements=self.childJobReqs1,
250
+ childJob1 = JobDescription(requirements=self.childJobReqs1,
261
251
  jobName='test2', unitName='onChild1')
262
252
 
263
- childJob2 = JobDescription(command='child2',
264
- requirements=self.childJobReqs2,
253
+ childJob2 = JobDescription(requirements=self.childJobReqs2,
265
254
  jobName='test3', unitName='onChild2')
266
255
 
267
256
  jobstore1.assign_job_id(job1)
@@ -298,20 +287,17 @@ class AbstractJobStoreTest:
298
287
  """Tests the consequences of deleting jobs."""
299
288
  # A local jobstore object for testing.
300
289
  jobstore = self.jobstore_initialized
301
- job = JobDescription(command='job1',
302
- requirements=self.parentJobReqs,
290
+ job = JobDescription(requirements=self.parentJobReqs,
303
291
  jobName='test1', unitName='onJob')
304
292
  # Create job
305
293
  jobstore.assign_job_id(job)
306
294
  jobstore.create_job(job)
307
295
 
308
296
  # Create child Jobs
309
- child1 = JobDescription(command='child1',
310
- requirements=self.childJobReqs1,
297
+ child1 = JobDescription(requirements=self.childJobReqs1,
311
298
  jobName='test2', unitName='onChild1')
312
299
 
313
- child2 = JobDescription(command='job1',
314
- requirements=self.childJobReqs2,
300
+ child2 = JobDescription(requirements=self.childJobReqs2,
315
301
  jobName='test3', unitName='onChild2')
316
302
 
317
303
  # Add children to parent.
@@ -323,9 +309,8 @@ class AbstractJobStoreTest:
323
309
  job.addChild(child2.jobStoreID)
324
310
  jobstore.update_job(job)
325
311
 
326
- # Get it ready to run children
327
- job.command = None
328
- jobstore.update_job(job)
312
+ # Parent must have no body to start on children
313
+ assert not job.has_body()
329
314
 
330
315
  # Go get the children
331
316
  childJobs = [jobstore.load_job(childID) for childID in job.nextSuccessors()]
@@ -422,8 +407,7 @@ class AbstractJobStoreTest:
422
407
  jobstore2 = self.jobstore_resumed_noconfig
423
408
 
424
409
  # Create jobNodeOnJS1
425
- jobOnJobStore1 = JobDescription(command='job1',
426
- requirements=self.parentJobReqs,
410
+ jobOnJobStore1 = JobDescription(requirements=self.parentJobReqs,
427
411
  jobName='test1', unitName='onJobStore1')
428
412
 
429
413
  # First recreate job
@@ -490,12 +474,11 @@ class AbstractJobStoreTest:
490
474
  pass
491
475
 
492
476
  def testStatsAndLogging(self):
493
- """Tests behavior of reading and writting stats and logging."""
477
+ """Tests behavior of reading and writing stats and logging."""
494
478
  jobstore1 = self.jobstore_initialized
495
479
  jobstore2 = self.jobstore_resumed_noconfig
496
480
 
497
- jobOnJobStore1 = JobDescription(command='job1',
498
- requirements=self.parentJobReqs,
481
+ jobOnJobStore1 = JobDescription(requirements=self.parentJobReqs,
499
482
  jobName='test1', unitName='onJobStore1')
500
483
 
501
484
  jobstore1.assign_job_id(jobOnJobStore1)
@@ -548,14 +531,16 @@ class AbstractJobStoreTest:
548
531
  jobNames = ['testStatsAndLogging_writeLogFiles']
549
532
  jobLogList = ['string', b'bytes', '', b'newline\n']
550
533
  config = self._createConfig()
551
- setattr(config, 'writeLogs', '.')
534
+ setattr(config, 'writeLogs', self._createTempDir())
552
535
  setattr(config, 'writeLogsGzip', None)
553
536
  StatsAndLogging.writeLogFiles(jobNames, jobLogList, config)
554
- jobLogFile = os.path.join(config.writeLogs, jobNames[0] + '000.log')
537
+ jobLogFile = os.path.join(config.writeLogs, jobNames[0] + '_000.log')
538
+ # The log directory should get exactly one file, names after this
539
+ # easy job name with no replacements needed.
540
+ self.assertEqual(os.listdir(config.writeLogs), [os.path.basename(jobLogFile)])
555
541
  self.assertTrue(os.path.isfile(jobLogFile))
556
542
  with open(jobLogFile) as f:
557
543
  self.assertEqual(f.read(), 'string\nbytes\n\nnewline\n')
558
- os.remove(jobLogFile)
559
544
 
560
545
  def testBatchCreate(self):
561
546
  """Test creation of many jobs."""
@@ -564,8 +549,7 @@ class AbstractJobStoreTest:
564
549
  jobs = []
565
550
  with jobstore.batch():
566
551
  for i in range(100):
567
- overlargeJob = JobDescription(command='overlarge',
568
- requirements=jobRequirements,
552
+ overlargeJob = JobDescription(requirements=jobRequirements,
569
553
  jobName='test-overlarge', unitName='onJobStore')
570
554
  jobstore.assign_job_id(overlargeJob)
571
555
  jobstore.create_job(overlargeJob)
@@ -667,7 +651,7 @@ class AbstractJobStoreTest:
667
651
  :param int size: the size of the file to test importing/exporting with
668
652
  """
669
653
  # Prepare test file in other job store
670
- self.jobstore_initialized.partSize = cls.mpTestPartSize
654
+ self.jobstore_initialized.part_size = cls.mpTestPartSize
671
655
  self.jobstore_initialized.moveExports = moveExports
672
656
 
673
657
  # Test assumes imports are not linked
@@ -723,7 +707,7 @@ class AbstractJobStoreTest:
723
707
  to import from or export to
724
708
  """
725
709
  # Prepare test file in other job store
726
- self.jobstore_initialized.partSize = cls.mpTestPartSize
710
+ self.jobstore_initialized.part_size = cls.mpTestPartSize
727
711
  other = otherCls('testSharedFiles')
728
712
  store = other._externalStore()
729
713
 
@@ -1311,7 +1295,6 @@ class AWSJobStoreTest(AbstractJobStoreTest.Test):
1311
1295
  failed to be created. We simulate a failed jobstore bucket creation by using a bucket in a
1312
1296
  different region with the same name.
1313
1297
  """
1314
- from boto.sdb import connect_to_region
1315
1298
  from botocore.exceptions import ClientError
1316
1299
 
1317
1300
  from toil.jobStores.aws.jobStore import BucketLocationConflictException
@@ -1351,13 +1334,16 @@ class AWSJobStoreTest(AbstractJobStoreTest.Test):
1351
1334
  except BucketLocationConflictException:
1352
1335
  # Catch the expected BucketLocationConflictException and ensure that the bound
1353
1336
  # domains don't exist in SDB.
1354
- sdb = connect_to_region(self.awsRegion())
1337
+ sdb = establish_boto3_session().client(region_name=self.awsRegion(), service_name="sdb")
1355
1338
  next_token = None
1356
1339
  allDomainNames = []
1357
1340
  while True:
1358
- domains = sdb.get_all_domains(max_domains=100, next_token=next_token)
1359
- allDomainNames.extend([x.name for x in domains])
1360
- next_token = domains.next_token
1341
+ if next_token is None:
1342
+ domains = sdb.list_domains(MaxNumberOfDomains=100)
1343
+ else:
1344
+ domains = sdb.list_domains(MaxNumberOfDomains=100, NextToken=next_token)
1345
+ allDomainNames.extend(domains["DomainNames"])
1346
+ next_token = domains.get("NextToken")
1361
1347
  if next_token is None:
1362
1348
  break
1363
1349
  self.assertFalse([d for d in allDomainNames if testJobStoreUUID in d])
@@ -1393,8 +1379,7 @@ class AWSJobStoreTest(AbstractJobStoreTest.Test):
1393
1379
  def testOverlargeJob(self):
1394
1380
  jobstore = self.jobstore_initialized
1395
1381
  jobRequirements = dict(memory=12, cores=34, disk=35, preemptible=True)
1396
- overlargeJob = JobDescription(command='overlarge',
1397
- requirements=jobRequirements,
1382
+ overlargeJob = JobDescription(requirements=jobRequirements,
1398
1383
  jobName='test-overlarge', unitName='onJobStore')
1399
1384
 
1400
1385
  # Make the pickled size of the job larger than 256K
@@ -1463,6 +1448,7 @@ class AWSJobStoreTest(AbstractJobStoreTest.Test):
1463
1448
 
1464
1449
  def _hashTestFile(self, url: str) -> str:
1465
1450
  from toil.jobStores.aws.jobStore import AWSJobStore
1451
+ from toil.lib.aws.utils import get_object_for_url
1466
1452
  str(AWSJobStore) # to prevent removal of that import
1467
1453
  key = get_object_for_url(urlparse.urlparse(url), existing=True)
1468
1454
  contents = key.get().get('Body').read()
@@ -1471,7 +1457,7 @@ class AWSJobStoreTest(AbstractJobStoreTest.Test):
1471
1457
  def _createExternalStore(self):
1472
1458
  """A S3.Bucket instance is returned"""
1473
1459
  from toil.jobStores.aws.jobStore import establish_boto3_session
1474
- from toil.lib.aws.utils import retry_s3
1460
+ from toil.lib.aws.utils import retry_s3, create_s3_bucket
1475
1461
 
1476
1462
  resource = establish_boto3_session().resource(
1477
1463
  "s3", region_name=self.awsRegion()
@@ -15,7 +15,7 @@ import json
15
15
  import logging
16
16
 
17
17
  import boto3
18
- from moto import mock_iam
18
+ from moto import mock_aws
19
19
 
20
20
  from toil.lib.aws import iam
21
21
  from toil.test import ToilTest
@@ -46,7 +46,7 @@ class IAMTest(ToilTest):
46
46
  assert iam.permission_matches_any("iam:*", ["*"]) is True
47
47
  assert iam.permission_matches_any("ec2:*", ['iam:*']) is False
48
48
 
49
- @mock_iam
49
+ @mock_aws
50
50
  def test_get_policy_permissions(self):
51
51
  mock_iam = boto3.client("iam")
52
52
 
toil/test/lib/test_ec2.py CHANGED
@@ -20,7 +20,6 @@ from toil.lib.aws.ami import (aws_marketplace_flatcar_ami_search,
20
20
  feed_flatcar_ami_release,
21
21
  flatcar_release_feed_amis,
22
22
  get_flatcar_ami)
23
- from toil.lib.aws.session import establish_boto3_session
24
23
  from toil.test import ToilTest, needs_aws_ec2, needs_online
25
24
 
26
25
  logger = logging.getLogger(__name__)
@@ -59,6 +58,7 @@ class FlatcarFeedTest(ToilTest):
59
58
  class AMITest(ToilTest):
60
59
  @classmethod
61
60
  def setUpClass(cls):
61
+ from toil.lib.aws.session import establish_boto3_session
62
62
  session = establish_boto3_session(region_name='us-west-2')
63
63
  cls.ec2_client = session.client('ec2')
64
64
 
@@ -0,0 +1,13 @@
1
+ # Copyright (C) 2015-2021 Regents of the University of California
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
@@ -0,0 +1,37 @@
1
+ from configargparse import ArgParser
2
+
3
+ from toil.common import addOptions, Toil
4
+ from toil.test import ToilTest
5
+
6
+
7
+ class OptionsTest(ToilTest):
8
+ """
9
+ Class to test functionality of all Toil options
10
+ """
11
+ def test_default_caching_slurm(self):
12
+ """
13
+ Test to ensure that caching will be set to false when running on Slurm
14
+ :return:
15
+ """
16
+ parser = ArgParser()
17
+ addOptions(parser, jobstore_as_flag=True, wdl=False, cwl=False)
18
+ test_args = ["--jobstore=example-jobstore", "--batchSystem=slurm"]
19
+ options = parser.parse_args(test_args)
20
+ with Toil(options) as toil:
21
+ caching_value = toil.config.caching
22
+ self.assertEqual(caching_value, False)
23
+
24
+ def test_caching_option_priority(self):
25
+ """
26
+ Test to ensure that the --caching option takes priority over the default_caching() return value
27
+ :return:
28
+ """
29
+ parser = ArgParser()
30
+ addOptions(parser, jobstore_as_flag=True, wdl=False, cwl=False)
31
+ # the kubernetes batchsystem (and I think all batchsystems including singlemachine) return False
32
+ # for default_caching
33
+ test_args = ["--jobstore=example-jobstore", "--batchSystem=kubernetes", "--caching=True"]
34
+ options = parser.parse_args(test_args)
35
+ with Toil(options) as toil:
36
+ caching_value = toil.config.caching
37
+ self.assertEqual(caching_value, True)
@@ -19,9 +19,13 @@ import time
19
19
  from abc import abstractmethod
20
20
  from inspect import getsource
21
21
  from textwrap import dedent
22
+ from typing import Optional, List
22
23
  from uuid import uuid4
23
24
 
25
+ import botocore.exceptions
24
26
  import pytest
27
+ from mypy_boto3_ec2 import EC2Client
28
+ from mypy_boto3_ec2.type_defs import EbsInstanceBlockDeviceTypeDef, InstanceTypeDef, InstanceBlockDeviceMappingTypeDef, FilterTypeDef, DescribeVolumesResultTypeDef, VolumeTypeDef
25
29
 
26
30
  from toil.provisioners import cluster_factory
27
31
  from toil.provisioners.aws.awsProvisioner import AWSProvisioner
@@ -113,15 +117,20 @@ class AbstractAWSAutoscaleTest(AbstractClusterTest):
113
117
  def rsyncUtil(self, src, dest):
114
118
  subprocess.check_call(['toil', 'rsync-cluster', '--insecure', '-p=aws', '-z', self.zone, self.clusterName] + [src, dest])
115
119
 
116
- def getRootVolID(self):
117
- instances = self.cluster._get_nodes_in_cluster()
118
- instances.sort(key=lambda x: x.launch_time)
119
- leader = instances[0] # assume leader was launched first
120
-
121
- from boto.ec2.blockdevicemapping import BlockDeviceType
122
- rootBlockDevice = leader.block_device_mapping["/dev/xvda"]
123
- assert isinstance(rootBlockDevice, BlockDeviceType)
124
- return rootBlockDevice.volume_id
120
+ def getRootVolID(self) -> str:
121
+ instances: List[InstanceTypeDef] = self.cluster._get_nodes_in_cluster_boto3()
122
+ instances.sort(key=lambda x: x.get("LaunchTime"))
123
+ leader: InstanceTypeDef = instances[0] # assume leader was launched first
124
+
125
+ bdm: Optional[List[InstanceBlockDeviceMappingTypeDef]] = leader.get("BlockDeviceMappings")
126
+ assert bdm is not None
127
+ root_block_device: Optional[EbsInstanceBlockDeviceTypeDef] = None
128
+ for device in bdm:
129
+ if device["DeviceName"] == "/dev/xvda":
130
+ root_block_device = device["Ebs"]
131
+ assert root_block_device is not None # There should be a device named "/dev/xvda"
132
+ assert root_block_device.get("VolumeId") is not None
133
+ return root_block_device["VolumeId"]
125
134
 
126
135
  @abstractmethod
127
136
  def _getScript(self):
@@ -191,21 +200,20 @@ class AbstractAWSAutoscaleTest(AbstractClusterTest):
191
200
 
192
201
  assert len(self.cluster._getRoleNames()) == 1
193
202
 
194
- from boto.exception import EC2ResponseError
195
203
  volumeID = self.getRootVolID()
196
204
  self.cluster.destroyCluster()
205
+ boto3_ec2: EC2Client = self.aws.client(region=self.region, service_name="ec2")
206
+ volume_filter: FilterTypeDef = {"Name": "volume-id", "Values": [volumeID]}
207
+ volumes: Optional[List[VolumeTypeDef]] = None
197
208
  for attempt in range(6):
198
209
  # https://github.com/BD2KGenomics/toil/issues/1567
199
210
  # retry this for up to 1 minute until the volume disappears
200
- try:
201
- self.boto2_ec2.get_all_volumes(volume_ids=[volumeID])
202
- time.sleep(10)
203
- except EC2ResponseError as e:
204
- if e.status == 400 and 'InvalidVolume.NotFound' in e.code:
205
- break
206
- else:
207
- raise
208
- else:
211
+ volumes = boto3_ec2.describe_volumes(Filters=[volume_filter])["Volumes"]
212
+ if len(volumes) == 0:
213
+ # None are left, so they have been properly deleted
214
+ break
215
+ time.sleep(10)
216
+ if volumes is None or len(volumes) > 0:
209
217
  self.fail('Volume with ID %s was not cleaned up properly' % volumeID)
210
218
 
211
219
  assert len(self.cluster._getRoleNames()) == 0
@@ -246,16 +254,19 @@ class AWSAutoscaleTest(AbstractAWSAutoscaleTest):
246
254
  # add arguments to test that we can specify leader storage
247
255
  self.createClusterUtil(args=['--leaderStorage', str(self.requestedLeaderStorage)])
248
256
 
249
- def getRootVolID(self):
257
+ def getRootVolID(self) -> str:
250
258
  """
251
259
  Adds in test to check that EBS volume is build with adequate size.
252
260
  Otherwise is functionally equivalent to parent.
253
261
  :return: volumeID
254
262
  """
255
263
  volumeID = super().getRootVolID()
256
- rootVolume = self.boto2_ec2.get_all_volumes(volume_ids=[volumeID])[0]
264
+ boto3_ec2: EC2Client = self.aws.client(region=self.region, service_name="ec2")
265
+ volume_filter: FilterTypeDef = {"Name": "volume-id", "Values": [volumeID]}
266
+ volumes: DescribeVolumesResultTypeDef = boto3_ec2.describe_volumes(Filters=[volume_filter])
267
+ root_volume: VolumeTypeDef = volumes["Volumes"][0] # should be first
257
268
  # test that the leader is given adequate storage
258
- self.assertGreaterEqual(rootVolume.size, self.requestedLeaderStorage)
269
+ self.assertGreaterEqual(root_volume["Size"], self.requestedLeaderStorage)
259
270
  return volumeID
260
271
 
261
272
  @integrative
@@ -290,8 +301,6 @@ class AWSStaticAutoscaleTest(AWSAutoscaleTest):
290
301
  self.requestedNodeStorage = 20
291
302
 
292
303
  def launchCluster(self):
293
- from boto.ec2.blockdevicemapping import BlockDeviceType
294
-
295
304
  from toil.lib.ec2 import wait_instances_running
296
305
  self.createClusterUtil(args=['--leaderStorage', str(self.requestedLeaderStorage),
297
306
  '--nodeTypes', ",".join(self.instanceTypes),
@@ -303,8 +312,8 @@ class AWSStaticAutoscaleTest(AWSAutoscaleTest):
303
312
  # visible to EC2 read requests immediately after the create returns,
304
313
  # which is the last thing that starting the cluster does.
305
314
  time.sleep(10)
306
- nodes = self.cluster._get_nodes_in_cluster()
307
- nodes.sort(key=lambda x: x.launch_time)
315
+ nodes: List[InstanceTypeDef] = self.cluster._get_nodes_in_cluster_boto3()
316
+ nodes.sort(key=lambda x: x.get("LaunchTime"))
308
317
  # assuming that leader is first
309
318
  workers = nodes[1:]
310
319
  # test that two worker nodes were created
@@ -312,11 +321,22 @@ class AWSStaticAutoscaleTest(AWSAutoscaleTest):
312
321
  # test that workers have expected storage size
313
322
  # just use the first worker
314
323
  worker = workers[0]
315
- worker = next(wait_instances_running(self.boto2_ec2, [worker]))
316
- rootBlockDevice = worker.block_device_mapping["/dev/xvda"]
317
- self.assertTrue(isinstance(rootBlockDevice, BlockDeviceType))
318
- rootVolume = self.boto2_ec2.get_all_volumes(volume_ids=[rootBlockDevice.volume_id])[0]
319
- self.assertGreaterEqual(rootVolume.size, self.requestedNodeStorage)
324
+ boto3_ec2: EC2Client = self.aws.client(region=self.region, service_name="ec2")
325
+
326
+ worker: InstanceTypeDef = next(wait_instances_running(boto3_ec2, [worker]))
327
+
328
+ bdm: Optional[List[InstanceBlockDeviceMappingTypeDef]] = worker.get("BlockDeviceMappings")
329
+ assert bdm is not None
330
+ root_block_device: Optional[EbsInstanceBlockDeviceTypeDef] = None
331
+ for device in bdm:
332
+ if device["DeviceName"] == "/dev/xvda":
333
+ root_block_device = device["Ebs"]
334
+ assert root_block_device is not None
335
+ assert root_block_device.get("VolumeId") is not None # TypedDicts cannot have runtime type checks
336
+
337
+ volume_filter: FilterTypeDef = {"Name": "volume-id", "Values": [root_block_device["VolumeId"]]}
338
+ root_volume: VolumeTypeDef = boto3_ec2.describe_volumes(Filters=[volume_filter])["Volumes"][0] # should be first
339
+ self.assertGreaterEqual(root_volume.get("Size"), self.requestedNodeStorage)
320
340
 
321
341
  def _runScript(self, toilOptions):
322
342
  # Autoscale even though we have static nodes
@@ -337,9 +357,6 @@ class AWSManagedAutoscaleTest(AWSAutoscaleTest):
337
357
  self.requestedNodeStorage = 20
338
358
 
339
359
  def launchCluster(self):
340
- from boto.ec2.blockdevicemapping import BlockDeviceType # noqa
341
-
342
- from toil.lib.ec2 import wait_instances_running # noqa
343
360
  self.createClusterUtil(args=['--leaderStorage', str(self.requestedLeaderStorage),
344
361
  '--nodeTypes', ",".join(self.instanceTypes),
345
362
  '--workers', ",".join([f'0-{c}' for c in self.numWorkers]),