toil 8.2.0__py3-none-any.whl → 9.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. toil/batchSystems/abstractBatchSystem.py +13 -5
  2. toil/batchSystems/abstractGridEngineBatchSystem.py +17 -5
  3. toil/batchSystems/kubernetes.py +13 -2
  4. toil/batchSystems/mesos/batchSystem.py +33 -2
  5. toil/batchSystems/registry.py +15 -118
  6. toil/batchSystems/slurm.py +191 -16
  7. toil/common.py +20 -1
  8. toil/cwl/cwltoil.py +97 -119
  9. toil/cwl/utils.py +103 -3
  10. toil/fileStores/__init__.py +1 -1
  11. toil/fileStores/abstractFileStore.py +5 -2
  12. toil/fileStores/cachingFileStore.py +1 -1
  13. toil/job.py +30 -14
  14. toil/jobStores/abstractJobStore.py +35 -255
  15. toil/jobStores/aws/jobStore.py +864 -1964
  16. toil/jobStores/aws/utils.py +24 -270
  17. toil/jobStores/fileJobStore.py +2 -1
  18. toil/jobStores/googleJobStore.py +32 -13
  19. toil/jobStores/utils.py +0 -327
  20. toil/leader.py +27 -22
  21. toil/lib/accelerators.py +1 -1
  22. toil/lib/aws/config.py +22 -0
  23. toil/lib/aws/s3.py +477 -9
  24. toil/lib/aws/utils.py +22 -33
  25. toil/lib/checksum.py +88 -0
  26. toil/lib/conversions.py +33 -31
  27. toil/lib/directory.py +217 -0
  28. toil/lib/ec2.py +97 -29
  29. toil/lib/exceptions.py +2 -1
  30. toil/lib/expando.py +2 -2
  31. toil/lib/generatedEC2Lists.py +138 -19
  32. toil/lib/io.py +33 -2
  33. toil/lib/memoize.py +21 -7
  34. toil/lib/misc.py +1 -1
  35. toil/lib/pipes.py +385 -0
  36. toil/lib/plugins.py +106 -0
  37. toil/lib/retry.py +1 -1
  38. toil/lib/threading.py +1 -1
  39. toil/lib/url.py +320 -0
  40. toil/lib/web.py +4 -5
  41. toil/options/cwl.py +13 -1
  42. toil/options/runner.py +17 -10
  43. toil/options/wdl.py +12 -1
  44. toil/provisioners/__init__.py +5 -2
  45. toil/provisioners/aws/__init__.py +43 -36
  46. toil/provisioners/aws/awsProvisioner.py +47 -15
  47. toil/provisioners/node.py +60 -12
  48. toil/resource.py +3 -13
  49. toil/server/app.py +12 -6
  50. toil/server/cli/wes_cwl_runner.py +2 -2
  51. toil/server/wes/abstract_backend.py +21 -43
  52. toil/server/wes/toil_backend.py +2 -2
  53. toil/test/__init__.py +16 -18
  54. toil/test/batchSystems/batchSystemTest.py +2 -9
  55. toil/test/batchSystems/batch_system_plugin_test.py +7 -0
  56. toil/test/batchSystems/test_slurm.py +103 -14
  57. toil/test/cwl/cwlTest.py +181 -8
  58. toil/test/cwl/staging_cat.cwl +27 -0
  59. toil/test/cwl/staging_make_file.cwl +25 -0
  60. toil/test/cwl/staging_workflow.cwl +43 -0
  61. toil/test/cwl/zero_default.cwl +61 -0
  62. toil/test/docs/scripts/tutorial_staging.py +17 -8
  63. toil/test/docs/scriptsTest.py +2 -1
  64. toil/test/jobStores/jobStoreTest.py +23 -133
  65. toil/test/lib/aws/test_iam.py +7 -7
  66. toil/test/lib/aws/test_s3.py +30 -33
  67. toil/test/lib/aws/test_utils.py +9 -9
  68. toil/test/lib/test_url.py +69 -0
  69. toil/test/lib/url_plugin_test.py +105 -0
  70. toil/test/provisioners/aws/awsProvisionerTest.py +60 -7
  71. toil/test/provisioners/clusterTest.py +15 -2
  72. toil/test/provisioners/gceProvisionerTest.py +1 -1
  73. toil/test/server/serverTest.py +78 -36
  74. toil/test/src/autoDeploymentTest.py +2 -3
  75. toil/test/src/fileStoreTest.py +89 -87
  76. toil/test/utils/ABCWorkflowDebug/ABC.txt +1 -0
  77. toil/test/utils/ABCWorkflowDebug/debugWorkflow.py +4 -4
  78. toil/test/utils/toilKillTest.py +35 -28
  79. toil/test/wdl/md5sum/md5sum-gs.json +1 -1
  80. toil/test/wdl/md5sum/md5sum.json +1 -1
  81. toil/test/wdl/testfiles/read_file.wdl +18 -0
  82. toil/test/wdl/testfiles/url_to_optional_file.wdl +2 -1
  83. toil/test/wdl/wdltoil_test.py +171 -162
  84. toil/test/wdl/wdltoil_test_kubernetes.py +9 -0
  85. toil/utils/toilDebugFile.py +6 -3
  86. toil/utils/toilSshCluster.py +23 -0
  87. toil/utils/toilStats.py +17 -2
  88. toil/utils/toilUpdateEC2Instances.py +1 -0
  89. toil/version.py +10 -10
  90. toil/wdl/wdltoil.py +1179 -825
  91. toil/worker.py +16 -8
  92. {toil-8.2.0.dist-info → toil-9.1.0.dist-info}/METADATA +32 -32
  93. {toil-8.2.0.dist-info → toil-9.1.0.dist-info}/RECORD +97 -85
  94. {toil-8.2.0.dist-info → toil-9.1.0.dist-info}/WHEEL +1 -1
  95. toil/lib/iterables.py +0 -112
  96. toil/test/docs/scripts/stagingExampleFiles/in.txt +0 -1
  97. {toil-8.2.0.dist-info → toil-9.1.0.dist-info}/entry_points.txt +0 -0
  98. {toil-8.2.0.dist-info → toil-9.1.0.dist-info}/licenses/LICENSE +0 -0
  99. {toil-8.2.0.dist-info → toil-9.1.0.dist-info}/top_level.txt +0 -0
@@ -22,6 +22,7 @@ import os
22
22
  import random
23
23
  import signal
24
24
  import stat
25
+ import tempfile
25
26
  import time
26
27
  from abc import ABCMeta
27
28
  from struct import pack, unpack
@@ -83,15 +84,21 @@ class hidden:
83
84
  def setUp(self):
84
85
  super().setUp()
85
86
  self.work_dir = self._createTempDir()
86
- self.options = Job.Runner.getDefaultOptions(self._getTestJobStore())
87
- self.options.logLevel = "DEBUG"
88
- self.options.realTimeLogging = True
89
- self.options.workDir = self.work_dir
90
- self.options.clean = "always"
91
- self.options.logFile = os.path.join(self.work_dir, "logFile")
92
-
93
87
  self.tmp_dir = self._createTempDir()
94
88
 
89
+ def options(self):
90
+ """
91
+ Mint a fresh options object with default settings and a fresh jobstore.
92
+ """
93
+ options = Job.Runner.getDefaultOptions(self._getTestJobStore())
94
+ options.logLevel = "DEBUG"
95
+ options.realTimeLogging = True
96
+ options.workDir = self.work_dir
97
+ options.clean = "always"
98
+ log_fd, options.logFile = tempfile.mkstemp(dir=self.work_dir, prefix="logFile")
99
+ os.close(log_fd)
100
+ return options
101
+
95
102
  def create_file(self, content, executable=False):
96
103
  file_path = f"{self.tmp_dir}/{uuid4()}"
97
104
 
@@ -124,7 +131,7 @@ class hidden:
124
131
  A.addChild(C)
125
132
  B.addChild(D)
126
133
  C.addChild(D)
127
- Job.Runner.startToil(A, self.options)
134
+ Job.Runner.startToil(A, self.options())
128
135
 
129
136
  @slow
130
137
  def testFileStoreLogging(self):
@@ -154,7 +161,7 @@ class hidden:
154
161
 
155
162
  F = Job.wrapJobFn(self._accessAndFail, disk="100M")
156
163
  try:
157
- Job.Runner.startToil(F, self.options)
164
+ Job.Runner.startToil(F, self.options())
158
165
  except FailedJobsException:
159
166
  # We expect this.
160
167
  pass
@@ -190,7 +197,7 @@ class hidden:
190
197
  numIters=30,
191
198
  disk="2G",
192
199
  )
193
- Job.Runner.startToil(F, self.options)
200
+ Job.Runner.startToil(F, self.options())
194
201
 
195
202
  @staticmethod
196
203
  def _testFileStoreOperations(job, nonLocalDir, numIters=100):
@@ -300,12 +307,13 @@ class hidden:
300
307
  f"[executable: {executable}]\n"
301
308
  f"[caching: {caching}]\n"
302
309
  ):
303
- self.options.caching = caching
310
+ options = self.options()
311
+ options.caching = caching
304
312
  read_write_job = Job.wrapJobFn(
305
313
  self._testWriteReadGlobalFilePermissions,
306
314
  executable=executable,
307
315
  )
308
- Job.Runner.startToil(read_write_job, self.options)
316
+ Job.Runner.startToil(read_write_job, options)
309
317
 
310
318
  @staticmethod
311
319
  def _testWriteReadGlobalFilePermissions(job, executable):
@@ -341,7 +349,7 @@ class hidden:
341
349
  export_file_job = Job.wrapJobFn(
342
350
  self._testWriteExportFileCompatibility, executable=executable
343
351
  )
344
- with Toil(self.options) as toil:
352
+ with Toil(self.options()) as toil:
345
353
  initialPermissions, fileID = toil.start(export_file_job)
346
354
  dstFile = os.path.join(self._createTempDir(), str(uuid4()))
347
355
  toil.exportFile(fileID, "file://" + dstFile)
@@ -367,16 +375,17 @@ class hidden:
367
375
  Ensures that files imported to the leader preserve their executable permissions
368
376
  when they are read by the fileStore.
369
377
  """
370
- with Toil(self.options) as toil:
371
- for executable in True, False:
372
- file_path = self.create_file(content="Hello", executable=executable)
373
- initial_permissions = os.stat(file_path).st_mode & stat.S_IXUSR
374
- file_id = toil.importFile(f"file://{file_path}")
375
- for mutable in True, False:
376
- for symlink in True, False:
377
- with self.subTest(
378
- f"Now testing readGlobalFileWith: mutable={mutable} symlink={symlink}"
379
- ):
378
+
379
+ for executable in True, False:
380
+ file_path = self.create_file(content="Hello", executable=executable)
381
+ initial_permissions = os.stat(file_path).st_mode & stat.S_IXUSR
382
+ for mutable in True, False:
383
+ for symlink in True, False:
384
+ with self.subTest(
385
+ f"Now testing readGlobalFileWith: mutable={mutable} symlink={symlink}"
386
+ ):
387
+ with Toil(self.options()) as toil:
388
+ file_id = toil.importFile(f"file://{file_path}")
380
389
  A = Job.wrapJobFn(
381
390
  self._testImportReadFileCompatibility,
382
391
  fileID=file_id,
@@ -401,7 +410,7 @@ class hidden:
401
410
  """
402
411
  Checks if text mode is compatible with file streams.
403
412
  """
404
- with Toil(self.options) as toil:
413
+ with Toil(self.options()) as toil:
405
414
  A = Job.wrapJobFn(self._testReadWriteFileStreamTextMode)
406
415
  toil.start(A)
407
416
 
@@ -446,9 +455,10 @@ class hidden:
446
455
  :class:toil.fileStores.CachingFileStore.
447
456
  """
448
457
 
449
- def setUp(self):
450
- super().setUp()
451
- self.options.caching = False
458
+ def options(self):
459
+ options = super().options()
460
+ options.caching = False
461
+ return options
452
462
 
453
463
  class AbstractCachingFileStoreTest(AbstractFileStoreTest, metaclass=ABCMeta):
454
464
  """
@@ -456,9 +466,10 @@ class hidden:
456
466
  :class:toil.fileStores.cachingFileStore.CachingFileStore.
457
467
  """
458
468
 
459
- def setUp(self):
460
- super().setUp()
461
- self.options.caching = True
469
+ def options(self):
470
+ options = super().options()
471
+ options.caching = True
472
+ return options
462
473
 
463
474
  @slow
464
475
  @pytest.mark.xfail(reason="Cannot succeed in time on small CI runners")
@@ -470,9 +481,6 @@ class hidden:
470
481
  """
471
482
  if testingIsAutomatic and self.jobStoreType != "file":
472
483
  self.skipTest("To save time")
473
- self.options.retryCount = 10
474
- self.options.badWorker = 0.25
475
- self.options.badWorkerFailInterval = 0.2
476
484
  for test in range(0, 20):
477
485
  E = Job.wrapJobFn(self._uselessFunc)
478
486
  F = Job.wrapJobFn(self._uselessFunc)
@@ -481,7 +489,11 @@ class hidden:
481
489
  jobs[i] = Job.wrapJobFn(self._uselessFunc)
482
490
  E.addChild(jobs[i])
483
491
  jobs[i].addChild(F)
484
- Job.Runner.startToil(E, self.options)
492
+ options = self.options()
493
+ options.retryCount = 10
494
+ options.badWorker = 0.25
495
+ options.badWorkerFailInterval = 0.2
496
+ Job.Runner.startToil(E, options)
485
497
 
486
498
  @slow
487
499
  def testCacheEvictionPartialEvict(self):
@@ -491,12 +503,6 @@ class hidden:
491
503
  A Third Job requests 10MB of disk requiring eviction of the 1st file. Ensure that the
492
504
  behavior is as expected.
493
505
  """
494
- self._testValidityOfCacheEvictTest()
495
-
496
- # Explicitly set clean to always so even the failed cases get cleaned (This will
497
- # overwrite the value set in setUp if it is ever changed in the future)
498
- self.options.clean = "always"
499
-
500
506
  self._testCacheEviction(file1MB=20, file2MB=30, diskRequestMB=10)
501
507
 
502
508
  @slow
@@ -507,12 +513,6 @@ class hidden:
507
513
  A Third Job requests 10MB of disk requiring eviction of the 1st file. Ensure that the
508
514
  behavior is as expected.
509
515
  """
510
- self._testValidityOfCacheEvictTest()
511
-
512
- # Explicitly set clean to always so even the failed cases get cleaned (This will
513
- # overwrite the value set in setUp if it is ever changed in the future)
514
- self.options.clean = "always"
515
-
516
516
  self._testCacheEviction(file1MB=20, file2MB=30, diskRequestMB=30)
517
517
 
518
518
  @slow
@@ -523,31 +523,25 @@ class hidden:
523
523
  A Third Job requests 10MB of disk requiring eviction of the 1st file. Ensure that the
524
524
  behavior is as expected.
525
525
  """
526
- self._testValidityOfCacheEvictTest()
527
-
528
- # Explicitly set clean to always so even the failed cases get cleaned (This will
529
- # overwrite the value set in setUp if it is ever changed in the future)
530
- self.options.clean = "always"
531
-
532
526
  self._testCacheEviction(file1MB=20, file2MB=30, diskRequestMB=60)
533
527
 
534
- def _testValidityOfCacheEvictTest(self):
528
+ def _testValidityOfCacheEvictTest(self, options):
535
529
  # If the job store and cache are on the same file system, file
536
530
  # sizes are accounted for by the job store and are not reflected in
537
531
  # the cache hence this test is redundant (caching will be free).
538
- if not self.options.jobStore.startswith(("aws", "google")):
539
- workDirDev = os.stat(self.options.workDir).st_dev
540
- if self.options.jobStore.startswith("file:"):
532
+ if not options.jobStore.startswith(("aws", "google")):
533
+ workDirDev = os.stat(options.workDir).st_dev
534
+ if options.jobStore.startswith("file:"):
541
535
  # Before #4538, options.jobStore would have the raw path while the Config object would prepend the
542
536
  # filesystem to the path (/path/to/file vs file:/path/to/file)
543
537
  # The options namespace and the Config object now have the exact same behavior
544
538
  # which means parse_jobstore will be called with argparse rather than with the config object
545
539
  # so remove the prepended file: scheme
546
540
  jobStoreDev = os.stat(
547
- os.path.dirname(self.options.jobStore[5:])
541
+ os.path.dirname(options.jobStore[5:])
548
542
  ).st_dev
549
543
  else:
550
- jobStoreDev = os.stat(os.path.dirname(self.options.jobStore)).st_dev
544
+ jobStoreDev = os.stat(os.path.dirname(options.jobStore)).st_dev
551
545
  if workDirDev == jobStoreDev:
552
546
  self.skipTest(
553
547
  "Job store and working directory are on the same filesystem."
@@ -561,7 +555,9 @@ class hidden:
561
555
  or results in an error due to lack of space, respectively. Ensure that the behavior is
562
556
  as expected.
563
557
  """
564
- self.options.retryCount = 0
558
+ options = self.options()
559
+ options.retryCount = 0
560
+ self._testValidityOfCacheEvictTest(options)
565
561
  if diskRequestMB > 50:
566
562
  # This can be non int as it will never reach _probeJobReqs
567
563
  expectedResult = "Fail"
@@ -599,9 +595,9 @@ class hidden:
599
595
  D.addChild(E)
600
596
  E.addChild(F)
601
597
  F.addChild(G)
602
- Job.Runner.startToil(A, self.options)
598
+ Job.Runner.startToil(A, options)
603
599
  except FailedJobsException as err:
604
- with open(self.options.logFile) as f:
600
+ with open(options.logFile) as f:
605
601
  logContents = f.read()
606
602
  if CacheUnbalancedError.message in logContents:
607
603
  self.assertEqual(expectedResult, "Fail")
@@ -742,8 +738,9 @@ class hidden:
742
738
  """
743
739
  print("Testing")
744
740
  logger.debug("Testing testing 123")
745
- self.options.retryCount = 0
746
- self.options.logLevel = "DEBUG"
741
+ options = self.options()
742
+ options.retryCount = 0
743
+ options.logLevel = "DEBUG"
747
744
  A = Job.wrapJobFn(self._adjustCacheLimit, newTotalMB=1024, disk="1G")
748
745
  B = Job.wrapJobFn(self._doubleWriteFileToJobStore, fileMB=850, disk="900M")
749
746
  C = Job.wrapJobFn(
@@ -754,7 +751,7 @@ class hidden:
754
751
  A.addChild(B)
755
752
  B.addChild(C)
756
753
  C.addChild(D)
757
- Job.Runner.startToil(A, self.options)
754
+ Job.Runner.startToil(A, options)
758
755
 
759
756
  @staticmethod
760
757
  def _doubleWriteFileToJobStore(job, fileMB):
@@ -809,7 +806,7 @@ class hidden:
809
806
  isLocalFile=False,
810
807
  nonLocalDir=workdir,
811
808
  )
812
- Job.Runner.startToil(A, self.options)
809
+ Job.Runner.startToil(A, self.options())
813
810
 
814
811
  def testWriteLocalFileToJobStore(self):
815
812
  """
@@ -817,7 +814,7 @@ class hidden:
817
814
  default. Ensure the file is cached.
818
815
  """
819
816
  A = Job.wrapJobFn(self._writeFileToJobStoreWithAsserts, isLocalFile=True)
820
- Job.Runner.startToil(A, self.options)
817
+ Job.Runner.startToil(A, self.options())
821
818
 
822
819
  # readGlobalFile tests
823
820
 
@@ -854,7 +851,7 @@ class hidden:
854
851
  fsID=A.rv(),
855
852
  )
856
853
  A.addChild(B)
857
- Job.Runner.startToil(A, self.options)
854
+ Job.Runner.startToil(A, self.options())
858
855
 
859
856
  @staticmethod
860
857
  def _readFromJobStore(job, isCachedFile, cacheReadFile, fsID, isTest=True):
@@ -927,7 +924,7 @@ class hidden:
927
924
  fsID=A.rv(),
928
925
  )
929
926
  A.addChild(B)
930
- Job.Runner.startToil(A, self.options)
927
+ Job.Runner.startToil(A, self.options())
931
928
 
932
929
  @slow
933
930
  def testMultipleJobsReadSameCacheHitGlobalFile(self):
@@ -980,7 +977,7 @@ class hidden:
980
977
  )
981
978
  A.addChild(jobs[i])
982
979
  jobs[i].addChild(B)
983
- Job.Runner.startToil(A, self.options)
980
+ Job.Runner.startToil(A, self.options())
984
981
  with open(file_name) as y:
985
982
  # At least one job at a time should have been observed.
986
983
  # We can't actually guarantee that any of our jobs will
@@ -1065,7 +1062,7 @@ class hidden:
1065
1062
  # Tests that files written to job store can be immediately exported
1066
1063
  # motivated by https://github.com/BD2KGenomics/toil/issues/1469
1067
1064
  root = Job.wrapJobFn(self._writeExportGlobalFile)
1068
- Job.Runner.startToil(root, self.options)
1065
+ Job.Runner.startToil(root, self.options())
1069
1066
 
1070
1067
  # Testing for the return of file sizes to the sigma job pool.
1071
1068
  @slow
@@ -1083,7 +1080,7 @@ class hidden:
1083
1080
  nonLocalDir=workdir,
1084
1081
  disk="2Gi",
1085
1082
  )
1086
- Job.Runner.startToil(F, self.options)
1083
+ Job.Runner.startToil(F, self.options())
1087
1084
 
1088
1085
  @slow
1089
1086
  def testReturnFileSizesWithBadWorker(self):
@@ -1092,9 +1089,10 @@ class hidden:
1092
1089
  Read back written and locally deleted files. Ensure that after
1093
1090
  every step that the cache is in a valid state.
1094
1091
  """
1095
- self.options.retryCount = 20
1096
- self.options.badWorker = 0.5
1097
- self.options.badWorkerFailInterval = 0.1
1092
+ options = self.options()
1093
+ options.retryCount = 20
1094
+ options.badWorker = 0.5
1095
+ options.badWorkerFailInterval = 0.1
1098
1096
  workdir = self._createTempDir(purpose="nonLocalDir")
1099
1097
  F = Job.wrapJobFn(
1100
1098
  self._returnFileTestFn,
@@ -1104,7 +1102,7 @@ class hidden:
1104
1102
  numIters=30,
1105
1103
  disk="2Gi",
1106
1104
  )
1107
- Job.Runner.startToil(F, self.options)
1105
+ Job.Runner.startToil(F, options)
1108
1106
 
1109
1107
  @staticmethod
1110
1108
  def _returnFileTestFn(
@@ -1350,7 +1348,8 @@ class hidden:
1350
1348
  tracking values in the cache state file appropriately.
1351
1349
  """
1352
1350
  workdir = self._createTempDir(purpose="nonLocalDir")
1353
- self.options.retryCount = 1
1351
+ options = self.options()
1352
+ options.retryCount = 1
1354
1353
  jobDiskBytes = 2 * 1024 * 1024 * 1024
1355
1354
  F = Job.wrapJobFn(
1356
1355
  self._controlledFailTestFn,
@@ -1360,7 +1359,7 @@ class hidden:
1360
1359
  )
1361
1360
  G = Job.wrapJobFn(self._probeJobReqs, sigmaJob=100, disk="100Mi")
1362
1361
  F.addChild(G)
1363
- Job.Runner.startToil(F, self.options)
1362
+ Job.Runner.startToil(F, options)
1364
1363
 
1365
1364
  @staticmethod
1366
1365
  def _controlledFailTestFn(job, jobDisk, testDir):
@@ -1420,7 +1419,8 @@ class hidden:
1420
1419
  self._deleteLocallyReadFilesFn(readAsMutable=False)
1421
1420
 
1422
1421
  def _deleteLocallyReadFilesFn(self, readAsMutable):
1423
- self.options.retryCount = 0
1422
+ options = self.options()
1423
+ options.retryCount = 0
1424
1424
  A = Job.wrapJobFn(
1425
1425
  self._writeFileToJobStoreWithAsserts, isLocalFile=True, memory="10M"
1426
1426
  )
@@ -1431,7 +1431,7 @@ class hidden:
1431
1431
  memory="20M",
1432
1432
  )
1433
1433
  A.addChild(B)
1434
- Job.Runner.startToil(A, self.options)
1434
+ Job.Runner.startToil(A, options)
1435
1435
 
1436
1436
  @staticmethod
1437
1437
  def _removeReadFileFn(job, fileToDelete, readAsMutable):
@@ -1450,20 +1450,20 @@ class hidden:
1450
1450
  outfile = job.fileStore.readGlobalFile(
1451
1451
  fileToDelete, os.path.join(work_dir, "temp"), mutable=readAsMutable
1452
1452
  )
1453
- tempfile = os.path.join(work_dir, "tmp.tmp")
1453
+ temp_file = os.path.join(work_dir, "tmp.tmp")
1454
1454
  # The first time we run this loop, processsingReadFile is True and fileToDelete is the
1455
1455
  # file read from the job store. The second time, processsingReadFile is False and
1456
1456
  # fileToDelete is one that was just written in to the job store. Ensure the correct
1457
1457
  # behaviour is seen in both conditions.
1458
1458
  while True:
1459
- os.rename(outfile, tempfile)
1459
+ os.rename(outfile, temp_file)
1460
1460
  try:
1461
1461
  job.fileStore.deleteLocalFile(fileToDelete)
1462
1462
  except IllegalDeletionCacheError:
1463
1463
  job.fileStore.log_to_leader(
1464
1464
  "Detected a deleted file %s." % fileToDelete
1465
1465
  )
1466
- os.rename(tempfile, outfile)
1466
+ os.rename(temp_file, outfile)
1467
1467
  else:
1468
1468
  # If we are processing the write test, or if we are testing the immutably read
1469
1469
  # file, we should not reach here.
@@ -1482,10 +1482,11 @@ class hidden:
1482
1482
  """
1483
1483
  Test the deletion capabilities of deleteLocalFile
1484
1484
  """
1485
- self.options.retryCount = 0
1485
+ options = self.options()
1486
+ options.retryCount = 0
1486
1487
  workdir = self._createTempDir(purpose="nonLocalDir")
1487
1488
  A = Job.wrapJobFn(self._deleteLocalFileFn, nonLocalDir=workdir)
1488
- Job.Runner.startToil(A, self.options)
1489
+ Job.Runner.startToil(A, options)
1489
1490
 
1490
1491
  @staticmethod
1491
1492
  def _deleteLocalFileFn(job, nonLocalDir):
@@ -1549,8 +1550,9 @@ class hidden:
1549
1550
  Test many simultaneous read attempts on a file created via a stream
1550
1551
  directly to the job store.
1551
1552
  """
1552
- self.options.retryCount = 0
1553
- self.options.disableChaining = True
1553
+ options = self.options()
1554
+ options.retryCount = 0
1555
+ options.disableChaining = True
1554
1556
 
1555
1557
  # Make a file
1556
1558
  parent = Job.wrapJobFn(self._createUncachedFileStream)
@@ -1558,7 +1560,7 @@ class hidden:
1558
1560
  for i in range(30):
1559
1561
  parent.addChildJobFn(self._readFileWithDelay, parent.rv())
1560
1562
 
1561
- Job.Runner.startToil(parent, self.options)
1563
+ Job.Runner.startToil(parent, options)
1562
1564
 
1563
1565
  @staticmethod
1564
1566
  def _createUncachedFileStream(job):
@@ -0,0 +1 @@
1
+ ABC
@@ -158,6 +158,7 @@ def broken_job(job, num):
158
158
 
159
159
  if __name__ == "__main__":
160
160
  jobStorePath = sys.argv[1] if len(sys.argv) > 1 else mkdtemp("debugWorkflow")
161
+ tmp: str = mkdtemp("debugWorkflow_tmp")
161
162
  options = Job.Runner.getDefaultOptions(jobStorePath)
162
163
  options.clean = "never"
163
164
  options.stats = True
@@ -178,10 +179,9 @@ if __name__ == "__main__":
178
179
  job1 = Job.wrapJobFn(writeA, file_maker)
179
180
  job2 = Job.wrapJobFn(writeB, file_maker, B_file)
180
181
  job3 = Job.wrapJobFn(writeC)
181
- with get_data("test/utils/ABCWorkflowDebug/ABC.txt") as filepath:
182
- job4 = Job.wrapJobFn(
183
- writeABC, job1.rv(), job2.rv(), job3.rv(), str(filepath)
184
- )
182
+ job4 = Job.wrapJobFn(
183
+ writeABC, job1.rv(), job2.rv(), job3.rv(), os.path.join(tmp, "ABC.txt")
184
+ )
185
185
  job5 = Job.wrapJobFn(finalize_jobs, 1)
186
186
  job6 = Job.wrapJobFn(finalize_jobs, 2)
187
187
  job7 = Job.wrapJobFn(finalize_jobs, 3)
@@ -47,34 +47,40 @@ class _ToilKillTest:
47
47
  str(input_file),
48
48
  ]
49
49
  kill_cmd = ["toil", "kill", job_store]
50
-
51
- # run the sleep workflow
52
- logger.info("Running workflow: %s", " ".join(run_cmd))
53
- cwl_process = subprocess.Popen(run_cmd)
54
-
55
- # wait until workflow starts running
56
- while True:
57
- assert (
58
- cwl_process.poll() is None
59
- ), "toil-cwl-runner finished too soon"
60
- try:
61
- job_store_real = Toil.resumeJobStore(job_store)
62
- job_store_real.read_leader_pid()
63
- # pid file exists, now wait for the kill flag to exist
64
- if not job_store_real.read_kill_flag():
65
- # kill flag exists to be deleted to kill the leader
66
- break
67
- else:
68
- logger.info("Waiting for kill flag...")
69
- except (NoSuchJobStoreException, NoSuchFileException):
70
- logger.info("Waiting for job store to be openable...")
71
- time.sleep(2)
72
-
73
- # run toil kill
74
- subprocess.check_call(kill_cmd)
75
-
76
- # after toil kill succeeds, the workflow should've exited
77
- assert cwl_process.poll() is None
50
+ clean_cmd = ["toil", "clean", job_store]
51
+
52
+ try:
53
+ # run the sleep workflow
54
+ logger.info("Running workflow: %s", " ".join(run_cmd))
55
+ cwl_process = subprocess.Popen(run_cmd)
56
+
57
+ # wait until workflow starts running
58
+ while True:
59
+ assert (
60
+ cwl_process.poll() is None
61
+ ), "toil-cwl-runner finished too soon"
62
+ try:
63
+ job_store_real = Toil.resumeJobStore(job_store)
64
+ job_store_real.read_leader_pid()
65
+ # pid file exists, now wait for the kill flag to exist
66
+ if not job_store_real.read_kill_flag():
67
+ # kill flag exists to be deleted to kill the leader
68
+ break
69
+ else:
70
+ logger.info("Waiting for kill flag...")
71
+ except (NoSuchJobStoreException, NoSuchFileException):
72
+ logger.info("Waiting for job store to be openable...")
73
+ time.sleep(2)
74
+
75
+ # run toil kill
76
+ subprocess.check_call(kill_cmd)
77
+
78
+ # after toil kill succeeds, the workflow should've exited
79
+ assert cwl_process.poll() is None
80
+ finally:
81
+ # Clean up the job store since the workflow won't do it
82
+ # since it got killed.
83
+ subprocess.check_call(clean_cmd)
78
84
 
79
85
 
80
86
  class TestToilKill(_ToilKillTest):
@@ -91,6 +97,7 @@ class TestToilKillWithAWSJobStore(_ToilKillTest):
91
97
  """A set of test cases for "toil kill" using the AWS job store."""
92
98
 
93
99
  @needs_cwl
100
+ @needs_aws_s3
94
101
  def test_cwl_toil_kill(self) -> None:
95
102
  """Test "toil kill" on a CWL workflow with a 100 second sleep."""
96
103
  self._test_cwl_toil_kill(generate_locator("aws", decoration="testkill"))
@@ -1 +1 @@
1
- {"ga4ghMd5.inputFile": "gs://broad-public-datasets/NA12878/NA12878.cram.crai"}
1
+ {"ga4ghMd5.inputFile": "gs://gcp-public-data-landsat/LT04/01/003/027/LT04_L1TP_003027_19830202_20170220_01_T1/README.GTF"}
@@ -1 +1 @@
1
- {"ga4ghMd5.inputFile": "src/toil/test/wdl/md5sum/md5sum.input"}
1
+ {"ga4ghMd5.inputFile": "md5sum.input"}
@@ -0,0 +1,18 @@
1
+ version 1.0
2
+
3
+ # Workflow to read a file from a string path
4
+
5
+ workflow read_file {
6
+
7
+ input {
8
+ String input_string
9
+ }
10
+
11
+ Array[String] the_lines = read_lines(input_string)
12
+
13
+ output {
14
+ Array[String] lines = the_lines
15
+ File remade_file = write_lines(the_lines)
16
+ }
17
+
18
+ }
@@ -3,9 +3,10 @@ version 1.0
3
3
  workflow url_to_optional_file {
4
4
  input {
5
5
  Int http_code = 404
6
+ String base_url = "https://httpstat.us/"
6
7
  }
7
8
 
8
- File? the_file = "https://httpstat.us/" + http_code
9
+ File? the_file = base_url + http_code
9
10
 
10
11
  output {
11
12
  File? out_file = the_file