toil 8.1.0b1__py3-none-any.whl → 9.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (275) hide show
  1. toil/__init__.py +0 -35
  2. toil/batchSystems/abstractBatchSystem.py +1 -1
  3. toil/batchSystems/abstractGridEngineBatchSystem.py +1 -1
  4. toil/batchSystems/awsBatch.py +1 -1
  5. toil/batchSystems/cleanup_support.py +1 -1
  6. toil/batchSystems/kubernetes.py +53 -7
  7. toil/batchSystems/local_support.py +1 -1
  8. toil/batchSystems/mesos/batchSystem.py +13 -8
  9. toil/batchSystems/mesos/test/__init__.py +3 -2
  10. toil/batchSystems/registry.py +15 -118
  11. toil/batchSystems/singleMachine.py +1 -1
  12. toil/batchSystems/slurm.py +27 -26
  13. toil/bus.py +5 -3
  14. toil/common.py +59 -12
  15. toil/cwl/cwltoil.py +81 -38
  16. toil/cwl/utils.py +103 -3
  17. toil/job.py +64 -49
  18. toil/jobStores/abstractJobStore.py +35 -239
  19. toil/jobStores/aws/jobStore.py +2 -1
  20. toil/jobStores/fileJobStore.py +27 -2
  21. toil/jobStores/googleJobStore.py +110 -33
  22. toil/leader.py +9 -0
  23. toil/lib/accelerators.py +4 -2
  24. toil/lib/aws/utils.py.orig +504 -0
  25. toil/lib/bioio.py +1 -1
  26. toil/lib/docker.py +252 -91
  27. toil/lib/dockstore.py +11 -3
  28. toil/lib/exceptions.py +5 -3
  29. toil/lib/generatedEC2Lists.py +81 -19
  30. toil/lib/history.py +87 -13
  31. toil/lib/history_submission.py +23 -9
  32. toil/lib/io.py +34 -22
  33. toil/lib/misc.py +8 -2
  34. toil/lib/plugins.py +106 -0
  35. toil/lib/resources.py +2 -1
  36. toil/lib/threading.py +11 -10
  37. toil/lib/url.py +320 -0
  38. toil/options/common.py +8 -0
  39. toil/options/cwl.py +13 -1
  40. toil/options/runner.py +17 -10
  41. toil/options/wdl.py +22 -0
  42. toil/provisioners/aws/awsProvisioner.py +25 -2
  43. toil/server/api_spec/LICENSE +201 -0
  44. toil/server/api_spec/README.rst +5 -0
  45. toil/server/app.py +12 -6
  46. toil/server/cli/wes_cwl_runner.py +3 -2
  47. toil/server/wes/abstract_backend.py +21 -43
  48. toil/server/wes/toil_backend.py +2 -2
  49. toil/test/__init__.py +275 -115
  50. toil/test/batchSystems/batchSystemTest.py +228 -213
  51. toil/test/batchSystems/batch_system_plugin_test.py +7 -0
  52. toil/test/batchSystems/test_slurm.py +27 -0
  53. toil/test/cactus/pestis.tar.gz +0 -0
  54. toil/test/conftest.py +7 -0
  55. toil/test/cwl/2.fasta +11 -0
  56. toil/test/cwl/2.fastq +12 -0
  57. toil/test/cwl/conftest.py +1 -1
  58. toil/test/cwl/cwlTest.py +1175 -870
  59. toil/test/cwl/directory/directory/file.txt +15 -0
  60. toil/test/cwl/download_directory_file.json +4 -0
  61. toil/test/cwl/download_directory_s3.json +4 -0
  62. toil/test/cwl/download_file.json +6 -0
  63. toil/test/cwl/download_http.json +6 -0
  64. toil/test/cwl/download_https.json +6 -0
  65. toil/test/cwl/download_s3.json +6 -0
  66. toil/test/cwl/download_subdirectory_file.json +5 -0
  67. toil/test/cwl/download_subdirectory_s3.json +5 -0
  68. toil/test/cwl/empty.json +1 -0
  69. toil/test/cwl/mock_mpi/fake_mpi.yml +8 -0
  70. toil/test/cwl/mock_mpi/fake_mpi_run.py +42 -0
  71. toil/test/cwl/optional-file-exists.json +6 -0
  72. toil/test/cwl/optional-file-missing.json +6 -0
  73. toil/test/cwl/preemptible_expression.json +1 -0
  74. toil/test/cwl/revsort-job-missing.json +6 -0
  75. toil/test/cwl/revsort-job.json +6 -0
  76. toil/test/cwl/s3_secondary_file.json +16 -0
  77. toil/test/cwl/seqtk_seq_job.json +6 -0
  78. toil/test/cwl/stream.json +6 -0
  79. toil/test/cwl/test_filename_conflict_resolution.ms/table.dat +0 -0
  80. toil/test/cwl/test_filename_conflict_resolution.ms/table.f0 +0 -0
  81. toil/test/cwl/test_filename_conflict_resolution.ms/table.f1 +0 -0
  82. toil/test/cwl/test_filename_conflict_resolution.ms/table.f1i +0 -0
  83. toil/test/cwl/test_filename_conflict_resolution.ms/table.f2 +0 -0
  84. toil/test/cwl/test_filename_conflict_resolution.ms/table.f2_TSM0 +0 -0
  85. toil/test/cwl/test_filename_conflict_resolution.ms/table.f3 +0 -0
  86. toil/test/cwl/test_filename_conflict_resolution.ms/table.f3_TSM0 +0 -0
  87. toil/test/cwl/test_filename_conflict_resolution.ms/table.f4 +0 -0
  88. toil/test/cwl/test_filename_conflict_resolution.ms/table.f4_TSM0 +0 -0
  89. toil/test/cwl/test_filename_conflict_resolution.ms/table.f5 +0 -0
  90. toil/test/cwl/test_filename_conflict_resolution.ms/table.info +0 -0
  91. toil/test/cwl/test_filename_conflict_resolution.ms/table.lock +0 -0
  92. toil/test/cwl/whale.txt +16 -0
  93. toil/test/docs/scripts/example_alwaysfail.py +38 -0
  94. toil/test/docs/scripts/example_alwaysfail_with_files.wdl +33 -0
  95. toil/test/docs/scripts/example_cachingbenchmark.py +117 -0
  96. toil/test/docs/scripts/stagingExampleFiles/in.txt +1 -0
  97. toil/test/docs/scripts/stagingExampleFiles/out.txt +2 -0
  98. toil/test/docs/scripts/tutorial_arguments.py +23 -0
  99. toil/test/docs/scripts/tutorial_debugging.patch +12 -0
  100. toil/test/docs/scripts/tutorial_debugging_hangs.wdl +126 -0
  101. toil/test/docs/scripts/tutorial_debugging_works.wdl +129 -0
  102. toil/test/docs/scripts/tutorial_docker.py +20 -0
  103. toil/test/docs/scripts/tutorial_dynamic.py +24 -0
  104. toil/test/docs/scripts/tutorial_encapsulation.py +28 -0
  105. toil/test/docs/scripts/tutorial_encapsulation2.py +29 -0
  106. toil/test/docs/scripts/tutorial_helloworld.py +15 -0
  107. toil/test/docs/scripts/tutorial_invokeworkflow.py +27 -0
  108. toil/test/docs/scripts/tutorial_invokeworkflow2.py +30 -0
  109. toil/test/docs/scripts/tutorial_jobfunctions.py +22 -0
  110. toil/test/docs/scripts/tutorial_managing.py +29 -0
  111. toil/test/docs/scripts/tutorial_managing2.py +56 -0
  112. toil/test/docs/scripts/tutorial_multiplejobs.py +25 -0
  113. toil/test/docs/scripts/tutorial_multiplejobs2.py +21 -0
  114. toil/test/docs/scripts/tutorial_multiplejobs3.py +22 -0
  115. toil/test/docs/scripts/tutorial_promises.py +25 -0
  116. toil/test/docs/scripts/tutorial_promises2.py +30 -0
  117. toil/test/docs/scripts/tutorial_quickstart.py +22 -0
  118. toil/test/docs/scripts/tutorial_requirements.py +44 -0
  119. toil/test/docs/scripts/tutorial_services.py +45 -0
  120. toil/test/docs/scripts/tutorial_staging.py +45 -0
  121. toil/test/docs/scripts/tutorial_stats.py +64 -0
  122. toil/test/docs/scriptsTest.py +2 -1
  123. toil/test/lib/aws/test_iam.py +3 -1
  124. toil/test/lib/dockerTest.py +205 -122
  125. toil/test/lib/test_history.py +101 -77
  126. toil/test/lib/test_url.py +69 -0
  127. toil/test/lib/url_plugin_test.py +105 -0
  128. toil/test/provisioners/aws/awsProvisionerTest.py +13 -10
  129. toil/test/provisioners/clusterTest.py +17 -4
  130. toil/test/provisioners/gceProvisionerTest.py +17 -15
  131. toil/test/server/serverTest.py +78 -36
  132. toil/test/sort/sort.py +4 -1
  133. toil/test/src/busTest.py +17 -17
  134. toil/test/src/deferredFunctionTest.py +145 -132
  135. toil/test/src/importExportFileTest.py +71 -63
  136. toil/test/src/jobEncapsulationTest.py +27 -28
  137. toil/test/src/jobServiceTest.py +149 -133
  138. toil/test/src/jobTest.py +219 -211
  139. toil/test/src/miscTests.py +66 -60
  140. toil/test/src/promisedRequirementTest.py +163 -169
  141. toil/test/src/regularLogTest.py +24 -24
  142. toil/test/src/resourceTest.py +82 -76
  143. toil/test/src/restartDAGTest.py +51 -47
  144. toil/test/src/resumabilityTest.py +24 -19
  145. toil/test/src/retainTempDirTest.py +60 -57
  146. toil/test/src/systemTest.py +17 -13
  147. toil/test/src/threadingTest.py +29 -32
  148. toil/test/utils/ABCWorkflowDebug/B_file.txt +1 -0
  149. toil/test/utils/ABCWorkflowDebug/debugWorkflow.py +204 -0
  150. toil/test/utils/ABCWorkflowDebug/mkFile.py +16 -0
  151. toil/test/utils/ABCWorkflowDebug/sleep.cwl +12 -0
  152. toil/test/utils/ABCWorkflowDebug/sleep.yaml +1 -0
  153. toil/test/utils/toilDebugTest.py +117 -102
  154. toil/test/utils/toilKillTest.py +54 -53
  155. toil/test/utils/utilsTest.py +303 -229
  156. toil/test/wdl/lint_error.wdl +9 -0
  157. toil/test/wdl/md5sum/empty_file.json +1 -0
  158. toil/test/wdl/md5sum/md5sum-gs.json +1 -0
  159. toil/test/wdl/md5sum/md5sum.1.0.wdl +32 -0
  160. toil/test/wdl/md5sum/md5sum.input +1 -0
  161. toil/test/wdl/md5sum/md5sum.json +1 -0
  162. toil/test/wdl/md5sum/md5sum.wdl +25 -0
  163. toil/test/wdl/miniwdl_self_test/inputs-namespaced.json +1 -0
  164. toil/test/wdl/miniwdl_self_test/inputs.json +1 -0
  165. toil/test/wdl/miniwdl_self_test/self_test.wdl +40 -0
  166. toil/test/wdl/standard_library/as_map.json +16 -0
  167. toil/test/wdl/standard_library/as_map_as_input.wdl +23 -0
  168. toil/test/wdl/standard_library/as_pairs.json +7 -0
  169. toil/test/wdl/standard_library/as_pairs_as_input.wdl +23 -0
  170. toil/test/wdl/standard_library/ceil.json +3 -0
  171. toil/test/wdl/standard_library/ceil_as_command.wdl +16 -0
  172. toil/test/wdl/standard_library/ceil_as_input.wdl +16 -0
  173. toil/test/wdl/standard_library/collect_by_key.json +1 -0
  174. toil/test/wdl/standard_library/collect_by_key_as_input.wdl +23 -0
  175. toil/test/wdl/standard_library/cross.json +11 -0
  176. toil/test/wdl/standard_library/cross_as_input.wdl +19 -0
  177. toil/test/wdl/standard_library/flatten.json +7 -0
  178. toil/test/wdl/standard_library/flatten_as_input.wdl +18 -0
  179. toil/test/wdl/standard_library/floor.json +3 -0
  180. toil/test/wdl/standard_library/floor_as_command.wdl +16 -0
  181. toil/test/wdl/standard_library/floor_as_input.wdl +16 -0
  182. toil/test/wdl/standard_library/keys.json +8 -0
  183. toil/test/wdl/standard_library/keys_as_input.wdl +24 -0
  184. toil/test/wdl/standard_library/length.json +7 -0
  185. toil/test/wdl/standard_library/length_as_input.wdl +16 -0
  186. toil/test/wdl/standard_library/length_as_input_with_map.json +7 -0
  187. toil/test/wdl/standard_library/length_as_input_with_map.wdl +17 -0
  188. toil/test/wdl/standard_library/length_invalid.json +3 -0
  189. toil/test/wdl/standard_library/range.json +3 -0
  190. toil/test/wdl/standard_library/range_0.json +3 -0
  191. toil/test/wdl/standard_library/range_as_input.wdl +17 -0
  192. toil/test/wdl/standard_library/range_invalid.json +3 -0
  193. toil/test/wdl/standard_library/read_boolean.json +3 -0
  194. toil/test/wdl/standard_library/read_boolean_as_command.wdl +17 -0
  195. toil/test/wdl/standard_library/read_float.json +3 -0
  196. toil/test/wdl/standard_library/read_float_as_command.wdl +17 -0
  197. toil/test/wdl/standard_library/read_int.json +3 -0
  198. toil/test/wdl/standard_library/read_int_as_command.wdl +17 -0
  199. toil/test/wdl/standard_library/read_json.json +3 -0
  200. toil/test/wdl/standard_library/read_json_as_output.wdl +31 -0
  201. toil/test/wdl/standard_library/read_lines.json +3 -0
  202. toil/test/wdl/standard_library/read_lines_as_output.wdl +31 -0
  203. toil/test/wdl/standard_library/read_map.json +3 -0
  204. toil/test/wdl/standard_library/read_map_as_output.wdl +31 -0
  205. toil/test/wdl/standard_library/read_string.json +3 -0
  206. toil/test/wdl/standard_library/read_string_as_command.wdl +17 -0
  207. toil/test/wdl/standard_library/read_tsv.json +3 -0
  208. toil/test/wdl/standard_library/read_tsv_as_output.wdl +31 -0
  209. toil/test/wdl/standard_library/round.json +3 -0
  210. toil/test/wdl/standard_library/round_as_command.wdl +16 -0
  211. toil/test/wdl/standard_library/round_as_input.wdl +16 -0
  212. toil/test/wdl/standard_library/size.json +3 -0
  213. toil/test/wdl/standard_library/size_as_command.wdl +17 -0
  214. toil/test/wdl/standard_library/size_as_output.wdl +36 -0
  215. toil/test/wdl/standard_library/stderr.json +3 -0
  216. toil/test/wdl/standard_library/stderr_as_output.wdl +30 -0
  217. toil/test/wdl/standard_library/stdout.json +3 -0
  218. toil/test/wdl/standard_library/stdout_as_output.wdl +30 -0
  219. toil/test/wdl/standard_library/sub.json +3 -0
  220. toil/test/wdl/standard_library/sub_as_input.wdl +17 -0
  221. toil/test/wdl/standard_library/sub_as_input_with_file.wdl +17 -0
  222. toil/test/wdl/standard_library/transpose.json +6 -0
  223. toil/test/wdl/standard_library/transpose_as_input.wdl +18 -0
  224. toil/test/wdl/standard_library/write_json.json +6 -0
  225. toil/test/wdl/standard_library/write_json_as_command.wdl +17 -0
  226. toil/test/wdl/standard_library/write_lines.json +7 -0
  227. toil/test/wdl/standard_library/write_lines_as_command.wdl +17 -0
  228. toil/test/wdl/standard_library/write_map.json +6 -0
  229. toil/test/wdl/standard_library/write_map_as_command.wdl +17 -0
  230. toil/test/wdl/standard_library/write_tsv.json +6 -0
  231. toil/test/wdl/standard_library/write_tsv_as_command.wdl +17 -0
  232. toil/test/wdl/standard_library/zip.json +12 -0
  233. toil/test/wdl/standard_library/zip_as_input.wdl +19 -0
  234. toil/test/wdl/test.csv +3 -0
  235. toil/test/wdl/test.tsv +3 -0
  236. toil/test/wdl/testfiles/croo.wdl +38 -0
  237. toil/test/wdl/testfiles/drop_files.wdl +62 -0
  238. toil/test/wdl/testfiles/drop_files_subworkflow.wdl +13 -0
  239. toil/test/wdl/testfiles/empty.txt +0 -0
  240. toil/test/wdl/testfiles/not_enough_outputs.wdl +33 -0
  241. toil/test/wdl/testfiles/random.wdl +66 -0
  242. toil/test/wdl/testfiles/read_file.wdl +18 -0
  243. toil/test/wdl/testfiles/string_file_coercion.json +1 -0
  244. toil/test/wdl/testfiles/string_file_coercion.wdl +35 -0
  245. toil/test/wdl/testfiles/test.json +4 -0
  246. toil/test/wdl/testfiles/test_boolean.txt +1 -0
  247. toil/test/wdl/testfiles/test_float.txt +1 -0
  248. toil/test/wdl/testfiles/test_int.txt +1 -0
  249. toil/test/wdl/testfiles/test_lines.txt +5 -0
  250. toil/test/wdl/testfiles/test_map.txt +2 -0
  251. toil/test/wdl/testfiles/test_string.txt +1 -0
  252. toil/test/wdl/testfiles/url_to_file.wdl +13 -0
  253. toil/test/wdl/testfiles/url_to_optional_file.wdl +14 -0
  254. toil/test/wdl/testfiles/vocab.json +1 -0
  255. toil/test/wdl/testfiles/vocab.wdl +66 -0
  256. toil/test/wdl/testfiles/wait.wdl +34 -0
  257. toil/test/wdl/wdl_specification/type_pair.json +23 -0
  258. toil/test/wdl/wdl_specification/type_pair_basic.wdl +36 -0
  259. toil/test/wdl/wdl_specification/type_pair_with_files.wdl +36 -0
  260. toil/test/wdl/wdl_specification/v1_spec.json +1 -0
  261. toil/test/wdl/wdl_specification/v1_spec_declaration.wdl +39 -0
  262. toil/test/wdl/wdltoil_test.py +751 -529
  263. toil/test/wdl/wdltoil_test_kubernetes.py +2 -2
  264. toil/utils/toilSshCluster.py +23 -0
  265. toil/utils/toilUpdateEC2Instances.py +1 -0
  266. toil/version.py +5 -5
  267. toil/wdl/wdltoil.py +518 -437
  268. toil/worker.py +11 -6
  269. {toil-8.1.0b1.dist-info → toil-9.0.0.dist-info}/METADATA +25 -24
  270. toil-9.0.0.dist-info/RECORD +444 -0
  271. {toil-8.1.0b1.dist-info → toil-9.0.0.dist-info}/WHEEL +1 -1
  272. toil-8.1.0b1.dist-info/RECORD +0 -259
  273. {toil-8.1.0b1.dist-info → toil-9.0.0.dist-info}/entry_points.txt +0 -0
  274. {toil-8.1.0b1.dist-info → toil-9.0.0.dist-info/licenses}/LICENSE +0 -0
  275. {toil-8.1.0b1.dist-info → toil-9.0.0.dist-info}/top_level.txt +0 -0
@@ -11,10 +11,13 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
+ from collections.abc import Iterable, Generator
15
+ import argparse
14
16
  import fcntl
15
17
  import itertools
16
18
  import logging
17
19
  import os
20
+ from pathlib import Path
18
21
  import subprocess
19
22
  import sys
20
23
  import tempfile
@@ -23,6 +26,7 @@ import time
23
26
  from abc import ABCMeta, abstractmethod
24
27
  from fractions import Fraction
25
28
  from unittest import skipIf
29
+ from typing import Optional, Any, TYPE_CHECKING
26
30
 
27
31
  from toil.batchSystems.abstractBatchSystem import (
28
32
  AbstractBatchSystem,
@@ -38,12 +42,12 @@ from toil.batchSystems.registry import (
38
42
  add_batch_system_factory,
39
43
  get_batch_system,
40
44
  get_batch_systems,
41
- restore_batch_system_plugin_state,
42
- save_batch_system_plugin_state,
43
45
  )
44
46
  from toil.batchSystems.singleMachine import SingleMachineBatchSystem
45
47
  from toil.common import Config, Toil
46
- from toil.job import Job, JobDescription, Requirer
48
+ from toil.fileStores.abstractFileStore import AbstractFileStore
49
+ from toil.job import Job, JobDescription, Requirer, ServiceHostJob
50
+ from toil.lib.misc import StrPath
47
51
  from toil.lib.retry import retry_flaky_test
48
52
  from toil.lib.threading import cpu_count
49
53
  from toil.test import (
@@ -60,7 +64,16 @@ from toil.test import (
60
64
  needs_slurm,
61
65
  needs_torque,
62
66
  slow,
67
+ pslow,
68
+ pneeds_mesos,
63
69
  )
70
+ from toil.lib.plugins import remove_plugin
71
+
72
+ import pytest
73
+
74
+ if TYPE_CHECKING:
75
+ from toil.batchSystems.mesos.batchSystem import MesosBatchSystem
76
+
64
77
 
65
78
  logger = logging.getLogger(__name__)
66
79
 
@@ -83,19 +96,13 @@ class BatchSystemPluginTest(ToilTest):
83
96
  Class for testing batch system plugin functionality.
84
97
  """
85
98
 
86
- def setUp(self):
87
- # Save plugin state so our plugin doesn't stick around after the test
88
- # (and create duplicate options)
89
- self.__state = save_batch_system_plugin_state()
90
- super().setUp()
91
-
92
- def tearDown(self):
99
+ def tearDown(self) -> None:
93
100
  # Restore plugin state
94
- restore_batch_system_plugin_state(self.__state)
101
+ remove_plugin("batch_system", "testBatchSystem")
95
102
  super().tearDown()
96
103
 
97
- def test_add_batch_system_factory(self):
98
- def test_batch_system_factory():
104
+ def test_add_batch_system_factory(self) -> None:
105
+ def test_batch_system_factory() -> type[SingleMachineBatchSystem]:
99
106
  # TODO: Adding the same batch system under multiple names means we
100
107
  # can't actually create Toil options, because each version tries to
101
108
  # add its arguments.
@@ -112,7 +119,6 @@ class hidden:
112
119
 
113
120
  http://stackoverflow.com/questions/1323455/python-unit-test-with-base-and-sub-class#answer-25695512
114
121
  """
115
-
116
122
  class AbstractBatchSystemTest(ToilTest, metaclass=ABCMeta):
117
123
  """
118
124
  A base test case with generic tests that every batch system should pass.
@@ -124,11 +130,11 @@ class hidden:
124
130
  def createBatchSystem(self) -> AbstractBatchSystem:
125
131
  raise NotImplementedError
126
132
 
127
- def supportsWallTime(self):
133
+ def supportsWallTime(self) -> bool:
128
134
  return False
129
135
 
130
136
  @classmethod
131
- def createConfig(cls):
137
+ def createConfig(cls) -> Config:
132
138
  """
133
139
  Returns a dummy config for the batch system tests. We need a workflowID to be set up
134
140
  since we are running tests without setting up a jobstore. This is the class version
@@ -143,7 +149,7 @@ class hidden:
143
149
  config.cleanWorkDir = "always"
144
150
  return config
145
151
 
146
- def _createConfig(self):
152
+ def _createConfig(self) -> Config:
147
153
  """
148
154
  Returns a dummy config for the batch system tests. We need a workflowID to be set up
149
155
  since we are running tests without setting up a jobstore.
@@ -152,7 +158,9 @@ class hidden:
152
158
  """
153
159
  return self.createConfig()
154
160
 
155
- def _mockJobDescription(self, jobStoreID=None, **kwargs):
161
+ def _mockJobDescription(
162
+ self, jobStoreID: Optional[str] = None, **kwargs: Any
163
+ ) -> JobDescription:
156
164
  """
157
165
  Create a mock-up JobDescription with the given ID and other parameters.
158
166
  """
@@ -168,17 +176,17 @@ class hidden:
168
176
  return desc
169
177
 
170
178
  @classmethod
171
- def setUpClass(cls):
179
+ def setUpClass(cls) -> None:
172
180
  super().setUpClass()
173
181
  logging.basicConfig(level=logging.DEBUG)
174
182
 
175
- def setUp(self):
183
+ def setUp(self) -> None:
176
184
  super().setUp()
177
185
  self.config = self._createConfig()
178
186
  self.batchSystem = self.createBatchSystem()
179
187
  self.tempDir = self._createTempDir("testFiles")
180
188
 
181
- def tearDown(self):
189
+ def tearDown(self) -> None:
182
190
  self.batchSystem.shutdown()
183
191
  super().tearDown()
184
192
 
@@ -189,11 +197,11 @@ class hidden:
189
197
  """
190
198
  return 120
191
199
 
192
- def test_available_cores(self):
200
+ def test_available_cores(self) -> None:
193
201
  self.assertTrue(cpu_count() >= numCores)
194
202
 
195
203
  @retry_flaky_test(prepare=[tearDown, setUp])
196
- def test_run_jobs(self):
204
+ def test_run_jobs(self) -> None:
197
205
  jobDesc1 = self._mockJobDescription(
198
206
  jobName="test1",
199
207
  unitName=None,
@@ -247,6 +255,7 @@ class hidden:
247
255
  job3 = self.batchSystem.issueBatchJob("mktemp -d", jobDesc3)
248
256
 
249
257
  jobUpdateInfo = self.batchSystem.getUpdatedBatchJob(maxWait=1000)
258
+ assert jobUpdateInfo is not None
250
259
  jobID, exitStatus, wallTime = (
251
260
  jobUpdateInfo.jobID,
252
261
  jobUpdateInfo.exitStatus,
@@ -260,6 +269,7 @@ class hidden:
260
269
  self.assertEqual(jobID, job3)
261
270
  self.assertEqual(exitStatus, 0)
262
271
  if self.supportsWallTime():
272
+ assert wallTime is not None
263
273
  self.assertTrue(wallTime > 0)
264
274
  else:
265
275
  self.assertIsNone(wallTime)
@@ -270,7 +280,7 @@ class hidden:
270
280
  # Make sure killBatchJobs can handle jobs that don't exist
271
281
  self.batchSystem.killBatchJobs([10])
272
282
 
273
- def test_set_env(self):
283
+ def test_set_env(self) -> None:
274
284
  # Start with a relatively safe script
275
285
  script_shell = (
276
286
  'if [ "x${FOO}" == "xbar" ] ; then exit 23 ; else exit 42 ; fi'
@@ -289,6 +299,7 @@ class hidden:
289
299
  )
290
300
  job4 = self.batchSystem.issueBatchJob(command, jobDesc4)
291
301
  jobUpdateInfo = self.batchSystem.getUpdatedBatchJob(maxWait=1000)
302
+ assert jobUpdateInfo is not None
292
303
  jobID, exitStatus, wallTime = (
293
304
  jobUpdateInfo.jobID,
294
305
  jobUpdateInfo.exitStatus,
@@ -305,11 +316,12 @@ class hidden:
305
316
  requirements=defaultRequirements,
306
317
  )
307
318
  job5 = self.batchSystem.issueBatchJob(command, jobDesc5)
308
- jobUpdateInfo = self.batchSystem.getUpdatedBatchJob(maxWait=1000)
309
- self.assertEqual(jobUpdateInfo.exitStatus, 23)
310
- self.assertEqual(jobUpdateInfo.jobID, job5)
319
+ jobUpdateInfo2 = self.batchSystem.getUpdatedBatchJob(maxWait=1000)
320
+ assert jobUpdateInfo2 is not None
321
+ self.assertEqual(jobUpdateInfo2.exitStatus, 23)
322
+ self.assertEqual(jobUpdateInfo2.jobID, job5)
311
323
 
312
- def test_set_job_env(self):
324
+ def test_set_job_env(self) -> None:
313
325
  """Test the mechanism for setting per-job environment variables to batch system jobs."""
314
326
  script = 'if [ "x${FOO}" == "xbar" ] ; then exit 23 ; else exit 42 ; fi'
315
327
  command = 'bash -c "\\${@}" bash eval ' + script.replace(";", r"\;")
@@ -325,6 +337,7 @@ class hidden:
325
337
  command, job_desc_6, job_environment={"FOO": "bar"}
326
338
  )
327
339
  job_update_info = self.batchSystem.getUpdatedBatchJob(maxWait=1000)
340
+ assert job_update_info is not None
328
341
  self.assertEqual(job_update_info.exitStatus, 23) # this should succeed
329
342
  self.assertEqual(job_update_info.jobID, job6)
330
343
  # Now check that the environment variable doesn't exist for other jobs
@@ -335,11 +348,12 @@ class hidden:
335
348
  requirements=defaultRequirements,
336
349
  )
337
350
  job7 = self.batchSystem.issueBatchJob(command, job_desc_7)
338
- job_update_info = self.batchSystem.getUpdatedBatchJob(maxWait=1000)
339
- self.assertEqual(job_update_info.exitStatus, 42)
340
- self.assertEqual(job_update_info.jobID, job7)
351
+ job_update_info2 = self.batchSystem.getUpdatedBatchJob(maxWait=1000)
352
+ assert job_update_info2 is not None
353
+ self.assertEqual(job_update_info2.exitStatus, 42)
354
+ self.assertEqual(job_update_info2.jobID, job7)
341
355
 
342
- def testCheckResourceRequest(self):
356
+ def testCheckResourceRequest(self) -> None:
343
357
  if isinstance(self.batchSystem, BatchSystemSupport):
344
358
  check_resource_request = self.batchSystem.check_resource_request
345
359
  # Assuming we have <2000 cores, this should be too many cores
@@ -405,11 +419,11 @@ class hidden:
405
419
  Requirer(dict(memory=10, cores=1, disk=100, accelerators=[]))
406
420
  )
407
421
 
408
- def testScalableBatchSystem(self):
422
+ def testScalableBatchSystem(self) -> None:
409
423
  # If instance of scalable batch system
410
424
  pass
411
425
 
412
- def _waitForJobsToIssue(self, numJobs):
426
+ def _waitForJobsToIssue(self, numJobs: int) -> list[int]:
413
427
  issuedIDs = []
414
428
  for it in range(20):
415
429
  issuedIDs = self.batchSystem.getIssuedBatchJobIDs()
@@ -418,7 +432,7 @@ class hidden:
418
432
  time.sleep(1)
419
433
  return issuedIDs
420
434
 
421
- def _waitForJobsToStart(self, numJobs, tries=20):
435
+ def _waitForJobsToStart(self, numJobs: int, tries: int = 20) -> list[int]:
422
436
  """
423
437
  Loop until the given number of distinct jobs are in the
424
438
  running state, or until the given number of tries is exhausted
@@ -437,114 +451,109 @@ class hidden:
437
451
  time.sleep(1)
438
452
  return runningIDs
439
453
 
440
- class AbstractBatchSystemJobTest(ToilTest, metaclass=ABCMeta):
454
+ class AbstractGridEngineBatchSystemTest(AbstractBatchSystemTest):
441
455
  """
442
- An abstract base class for batch system tests that use a full Toil workflow rather
443
- than using the batch system directly.
456
+ An abstract class to reduce redundancy between Grid Engine, Slurm, and other similar batch
457
+ systems
444
458
  """
445
459
 
446
- cpuCount = cpu_count()
447
- allocatedCores = sorted({1, 2, cpuCount})
448
- sleepTime = 30
460
+ def _createConfig(self) -> Config:
461
+ config = super()._createConfig()
462
+ config.statePollingWait = 0.5 # Reduce polling wait so tests run faster
463
+ # can't use _getTestJobStorePath since that method removes the directory
464
+ config.jobStore = "file:" + self._createTempDir("jobStore")
465
+ return config
449
466
 
450
- @abstractmethod
451
- def getBatchSystemName(self):
452
- """
453
- :rtype: (str, AbstractBatchSystem)
454
- """
455
- raise NotImplementedError
456
467
 
457
- def getOptions(self, tempDir):
458
- """
459
- Configures options for Toil workflow and makes job store.
460
- :param str tempDir: path to test directory
461
- :return: Toil options object
462
- """
463
- options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
464
- options.logLevel = "DEBUG"
465
- options.batchSystem = self.batchSystemName
466
- options.workDir = tempDir
467
- options.maxCores = self.cpuCount
468
- return options
469
-
470
- def setUp(self):
471
- self.batchSystemName = self.getBatchSystemName()
472
- super().setUp()
468
+ class AbstractBatchSystemJobTest:
469
+ """
470
+ An abstract base class for batch system tests that use a full Toil workflow rather
471
+ than using the batch system directly.
472
+ """
473
473
 
474
- def tearDown(self):
475
- super().tearDown()
474
+ cpuCount = cpu_count() if cpu_count() < 4 else 4
475
+ allocatedCores = sorted({1, 2, cpuCount})
476
+ sleepTime = 30
476
477
 
477
- @slow
478
- def testJobConcurrency(self):
479
- """
480
- Tests that the batch system is allocating core resources properly for concurrent tasks.
481
- """
482
- for coresPerJob in self.allocatedCores:
483
- tempDir = self._createTempDir("testFiles")
484
- options = self.getOptions(tempDir)
485
-
486
- counterPath = os.path.join(tempDir, "counter")
487
- resetCounters(counterPath)
488
- value, maxValue = getCounters(counterPath)
489
- assert (value, maxValue) == (0, 0)
490
-
491
- root = Job()
492
- for _ in range(self.cpuCount):
493
- root.addFollowOn(
494
- Job.wrapFn(
495
- measureConcurrency,
496
- counterPath,
497
- self.sleepTime,
498
- cores=coresPerJob,
499
- memory="1M",
500
- disk="1Mi",
501
- )
502
- )
503
- with Toil(options) as toil:
504
- toil.start(root)
505
- _, maxValue = getCounters(counterPath)
506
- self.assertEqual(maxValue, self.cpuCount // coresPerJob)
478
+ @abstractmethod
479
+ def getBatchSystemName(self) -> str:
480
+ """
481
+ :rtype: (str, AbstractBatchSystem)
482
+ """
483
+ raise NotImplementedError
507
484
 
508
- def test_omp_threads(self):
509
- """
510
- Test if the OMP_NUM_THREADS env var is set correctly based on jobs.cores.
511
- """
512
- test_cases = {
513
- # mapping of the number of cores to the OMP_NUM_THREADS value
514
- 0.1: "1",
515
- 1: "1",
516
- 2: "2",
517
- }
518
-
519
- temp_dir = self._createTempDir()
520
- options = self.getOptions(temp_dir)
521
-
522
- for cores, expected_omp_threads in test_cases.items():
523
- if os.environ.get("OMP_NUM_THREADS"):
524
- expected_omp_threads = os.environ.get("OMP_NUM_THREADS")
525
- logger.info(
526
- f"OMP_NUM_THREADS is set. Using OMP_NUM_THREADS={expected_omp_threads} instead."
527
- )
528
- with Toil(options) as toil:
529
- output = toil.start(
530
- Job.wrapFn(
531
- get_omp_threads, memory="1Mi", cores=cores, disk="1Mi"
532
- )
533
- )
534
- self.assertEqual(output, expected_omp_threads)
485
+ def getOptions(self, tempDir: Path) -> argparse.Namespace:
486
+ """
487
+ Configures options for Toil workflow and makes job store.
488
+ :param str tempDir: path to test directory
489
+ :return: Toil options object
490
+ """
491
+ workdir = tempDir / "workdir"
492
+ workdir.mkdir()
493
+ options = Job.Runner.getDefaultOptions(tempDir / "jobstore")
494
+ options.logLevel = "DEBUG"
495
+ options.batchSystem = self.getBatchSystemName()
496
+ options.workDir = str(workdir)
497
+ options.maxCores = self.cpuCount
498
+ return options
535
499
 
536
- class AbstractGridEngineBatchSystemTest(AbstractBatchSystemTest):
500
+ @pslow
501
+ @pytest.mark.slow
502
+ def testJobConcurrency(self, tmp_path: Path) -> None:
537
503
  """
538
- An abstract class to reduce redundancy between Grid Engine, Slurm, and other similar batch
539
- systems
504
+ Tests that the batch system is allocating core resources properly for concurrent tasks.
540
505
  """
506
+ for coresPerJob in self.allocatedCores:
507
+ tempDir = tmp_path / f"testFiles_{coresPerJob}"
508
+ tempDir.mkdir()
509
+ options = self.getOptions(tempDir)
510
+
511
+ counterPath = tempDir / "counter"
512
+ resetCounters(counterPath)
513
+ value, maxValue = getCounters(counterPath)
514
+ assert (value, maxValue) == (0, 0)
515
+
516
+ root = Job()
517
+ for _ in range(self.cpuCount):
518
+ root.addFollowOn(
519
+ Job.wrapFn(
520
+ measureConcurrency,
521
+ counterPath,
522
+ self.sleepTime,
523
+ cores=coresPerJob,
524
+ memory="1M",
525
+ disk="1Mi",
526
+ )
527
+ )
528
+ with Toil(options) as toil:
529
+ toil.start(root)
530
+ _, maxValue = getCounters(counterPath)
531
+ assert maxValue == (self.cpuCount // coresPerJob)
541
532
 
542
- def _createConfig(self):
543
- config = super()._createConfig()
544
- config.statePollingWait = 0.5 # Reduce polling wait so tests run faster
545
- # can't use _getTestJobStorePath since that method removes the directory
546
- config.jobStore = "file:" + self._createTempDir("jobStore")
547
- return config
533
+ def test_omp_threads(self, tmp_path: Path) -> None:
534
+ """
535
+ Test if the OMP_NUM_THREADS env var is set correctly based on jobs.cores.
536
+ """
537
+ test_cases = {
538
+ # mapping of the number of cores to the OMP_NUM_THREADS value
539
+ 0.1: "1",
540
+ 1: "1",
541
+ 2: "2",
542
+ }
543
+
544
+ options = self.getOptions(tmp_path)
545
+
546
+ for cores, expected_omp_threads in test_cases.items():
547
+ if eont := os.environ.get("OMP_NUM_THREADS"):
548
+ expected_omp_threads = eont
549
+ logger.info(
550
+ f"OMP_NUM_THREADS is set. Using OMP_NUM_THREADS={expected_omp_threads} instead."
551
+ )
552
+ with Toil(options) as toil:
553
+ output = toil.start(
554
+ Job.wrapFn(get_omp_threads, memory="1Mi", cores=cores, disk="1Mi")
555
+ )
556
+ assert output == expected_omp_threads
548
557
 
549
558
 
550
559
  @needs_kubernetes
@@ -555,10 +564,10 @@ class KubernetesBatchSystemTest(hidden.AbstractBatchSystemTest):
555
564
  Tests against the Kubernetes batch system
556
565
  """
557
566
 
558
- def supportsWallTime(self):
567
+ def supportsWallTime(self) -> bool:
559
568
  return True
560
569
 
561
- def createBatchSystem(self):
570
+ def createBatchSystem(self) -> AbstractBatchSystem:
562
571
  # We know we have Kubernetes so we can import the batch system
563
572
  from toil.batchSystems.kubernetes import KubernetesBatchSystem
564
573
 
@@ -573,7 +582,7 @@ class KubernetesBatchSystemBenchTest(ToilTest):
573
582
  Kubernetes batch system unit tests that don't need to actually talk to a cluster.
574
583
  """
575
584
 
576
- def test_preemptability_constraints(self):
585
+ def test_preemptability_constraints(self) -> None:
577
586
  """
578
587
  Make sure we generate the right preemptability constraints.
579
588
  """
@@ -645,7 +654,7 @@ class KubernetesBatchSystemBenchTest(ToilTest):
645
654
  str(spot_spec.tolerations),
646
655
  )
647
656
 
648
- def test_label_constraints(self):
657
+ def test_label_constraints(self) -> None:
649
658
  """
650
659
  Make sure we generate the right preemptability constraints.
651
660
  """
@@ -694,10 +703,10 @@ class AWSBatchBatchSystemTest(hidden.AbstractBatchSystemTest):
694
703
  Tests against the AWS Batch batch system
695
704
  """
696
705
 
697
- def supportsWallTime(self):
706
+ def supportsWallTime(self) -> bool:
698
707
  return True
699
708
 
700
- def createBatchSystem(self):
709
+ def createBatchSystem(self) -> AbstractBatchSystem:
701
710
  from toil.batchSystems.awsBatch import AWSBatchBatchSystem
702
711
 
703
712
  return AWSBatchBatchSystem(
@@ -716,8 +725,10 @@ class MesosBatchSystemTest(hidden.AbstractBatchSystemTest, MesosTestSupport):
716
725
  Tests against the Mesos batch system
717
726
  """
718
727
 
728
+ batchSystem: "MesosBatchSystem"
729
+
719
730
  @classmethod
720
- def createConfig(cls):
731
+ def createConfig(cls) -> Config:
721
732
  """
722
733
  needs to set mesos_endpoint to localhost for testing since the default is now the
723
734
  private IP address
@@ -726,10 +737,10 @@ class MesosBatchSystemTest(hidden.AbstractBatchSystemTest, MesosTestSupport):
726
737
  config.mesos_endpoint = "localhost:5050"
727
738
  return config
728
739
 
729
- def supportsWallTime(self):
740
+ def supportsWallTime(self) -> bool:
730
741
  return True
731
742
 
732
- def createBatchSystem(self):
743
+ def createBatchSystem(self) -> "MesosBatchSystem":
733
744
  # We know we have Mesos so we can import the batch system
734
745
  from toil.batchSystems.mesos.batchSystem import MesosBatchSystem
735
746
 
@@ -738,11 +749,11 @@ class MesosBatchSystemTest(hidden.AbstractBatchSystemTest, MesosTestSupport):
738
749
  config=self.config, maxCores=numCores, maxMemory=1e9, maxDisk=1001
739
750
  )
740
751
 
741
- def tearDown(self):
752
+ def tearDown(self) -> None:
742
753
  self._stopMesos()
743
754
  super().tearDown()
744
755
 
745
- def testIgnoreNode(self):
756
+ def testIgnoreNode(self) -> None:
746
757
  self.batchSystem.ignoreNode("localhost")
747
758
  jobDesc = self._mockJobDescription(
748
759
  jobName="test2",
@@ -807,14 +818,14 @@ class SingleMachineBatchSystemTest(hidden.AbstractBatchSystemTest):
807
818
  import signal
808
819
  import sys
809
820
  import time
810
- from typing import Any
821
+ from typing import Any, Iterable
811
822
 
812
823
  def handle_signal(sig: Any, frame: Any) -> None:
813
824
  sys.stderr.write(f"{os.getpid()} ignoring signal {sig}\n")
814
825
 
815
826
  if hasattr(signal, "valid_signals"):
816
827
  # We can just ask about the signals
817
- all_signals = signal.valid_signals()
828
+ all_signals: Iterable[signal.Signals] = signal.valid_signals()
818
829
  else:
819
830
  # Fish them out by name
820
831
  all_signals = [
@@ -901,7 +912,7 @@ class SingleMachineBatchSystemTest(hidden.AbstractBatchSystemTest):
901
912
  os.unlink(script_path)
902
913
  os.unlink(lockable_path)
903
914
 
904
- def testHidingProcessEscape(self):
915
+ def testHidingProcessEscape(self) -> None:
905
916
  """
906
917
  Test to make sure that child processes and their descendants go away
907
918
  when the Toil workflow stops, even if the job process stops and leaves children.
@@ -980,7 +991,7 @@ class MaxCoresSingleMachineBatchSystemTest(ToilTest):
980
991
  return " ".join([sys.executable, self.scriptPath, self.counterPath])
981
992
 
982
993
  @retry_flaky_test(prepare=[tearDown, setUp])
983
- def test(self):
994
+ def test(self) -> None:
984
995
  # We'll use fractions to avoid rounding errors. Remember that not every fraction can be
985
996
  # represented as a floating point number.
986
997
  F = Fraction
@@ -1025,7 +1036,7 @@ class MaxCoresSingleMachineBatchSystemTest(ToilTest):
1025
1036
  self.assertEqual(len(jobIds), jobs)
1026
1037
  while jobIds:
1027
1038
  job = bs.getUpdatedBatchJob(maxWait=10)
1028
- self.assertIsNotNone(job)
1039
+ assert job is not None
1029
1040
  jobId, status, wallTime = (
1030
1041
  job.jobID,
1031
1042
  job.exitStatus,
@@ -1057,7 +1068,7 @@ class MaxCoresSingleMachineBatchSystemTest(ToilTest):
1057
1068
  SingleMachineBatchSystem.numCores < 3,
1058
1069
  "Need at least three cores to run this test",
1059
1070
  )
1060
- def testServices(self):
1071
+ def testServices(self) -> None:
1061
1072
  options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
1062
1073
  options.logLevel = "DEBUG"
1063
1074
  options.maxCores = 3
@@ -1072,38 +1083,38 @@ class MaxCoresSingleMachineBatchSystemTest(ToilTest):
1072
1083
  # Toil can use only top-level functions so we have to add them here:
1073
1084
 
1074
1085
 
1075
- def parentJob(job, cmd):
1086
+ def parentJob(job: Job, cmd: str) -> None:
1076
1087
  job.addChildJobFn(childJob, cmd)
1077
1088
 
1078
1089
 
1079
- def childJob(job, cmd):
1090
+ def childJob(job: Job, cmd: str) -> None:
1080
1091
  job.addService(Service(cmd))
1081
1092
  job.addChildJobFn(grandChildJob, cmd)
1082
1093
  subprocess.check_call(cmd, shell=True)
1083
1094
 
1084
1095
 
1085
- def grandChildJob(job, cmd):
1096
+ def grandChildJob(job: Job, cmd: str) -> None:
1086
1097
  job.addService(Service(cmd))
1087
1098
  job.addChildFn(greatGrandChild, cmd)
1088
1099
  subprocess.check_call(cmd, shell=True)
1089
1100
 
1090
1101
 
1091
- def greatGrandChild(cmd):
1102
+ def greatGrandChild(cmd: str) -> None:
1092
1103
  subprocess.check_call(cmd, shell=True)
1093
1104
 
1094
1105
 
1095
1106
  class Service(Job.Service):
1096
- def __init__(self, cmd):
1107
+ def __init__(self, cmd: str) -> None:
1097
1108
  super().__init__()
1098
1109
  self.cmd = cmd
1099
1110
 
1100
- def start(self, fileStore):
1111
+ def start(self, job: ServiceHostJob) -> None:
1101
1112
  subprocess.check_call(self.cmd + " 1", shell=True)
1102
1113
 
1103
- def check(self):
1114
+ def check(self) -> bool:
1104
1115
  return True
1105
1116
 
1106
- def stop(self, fileStore):
1117
+ def stop(self, job: ServiceHostJob) -> None:
1107
1118
  subprocess.check_call(self.cmd + " -1", shell=True)
1108
1119
 
1109
1120
 
@@ -1121,7 +1132,7 @@ class GridEngineBatchSystemTest(hidden.AbstractGridEngineBatchSystemTest):
1121
1132
  config=self.config, maxCores=numCores, maxMemory=1000e9, maxDisk=1e9
1122
1133
  )
1123
1134
 
1124
- def tearDown(self):
1135
+ def tearDown(self) -> None:
1125
1136
  super().tearDown()
1126
1137
  # Cleanup GridEngine output log file from qsub
1127
1138
  from glob import glob
@@ -1144,7 +1155,7 @@ class SlurmBatchSystemTest(hidden.AbstractGridEngineBatchSystemTest):
1144
1155
  config=self.config, maxCores=numCores, maxMemory=1000e9, maxDisk=1e9
1145
1156
  )
1146
1157
 
1147
- def tearDown(self):
1158
+ def tearDown(self) -> None:
1148
1159
  super().tearDown()
1149
1160
  # Cleanup 'slurm-%j.out' produced by sbatch
1150
1161
  from glob import glob
@@ -1175,8 +1186,8 @@ class TorqueBatchSystemTest(hidden.AbstractGridEngineBatchSystemTest):
1175
1186
  Tests against the Torque batch system
1176
1187
  """
1177
1188
 
1178
- def _createDummyConfig(self):
1179
- config = super()._createDummyConfig()
1189
+ def _createDummyConfig(self) -> Config:
1190
+ config = super()._createConfig()
1180
1191
  # can't use _getTestJobStorePath since that method removes the directory
1181
1192
  config.jobStore = self._createTempDir("jobStore")
1182
1193
  return config
@@ -1188,7 +1199,7 @@ class TorqueBatchSystemTest(hidden.AbstractGridEngineBatchSystemTest):
1188
1199
  config=self.config, maxCores=numCores, maxMemory=1000e9, maxDisk=1e9
1189
1200
  )
1190
1201
 
1191
- def tearDown(self):
1202
+ def tearDown(self) -> None:
1192
1203
  super().tearDown()
1193
1204
  # Cleanup 'toil_job-%j.out' produced by sbatch
1194
1205
  from glob import glob
@@ -1211,42 +1222,39 @@ class HTCondorBatchSystemTest(hidden.AbstractGridEngineBatchSystemTest):
1211
1222
  config=self.config, maxCores=numCores, maxMemory=1000e9, maxDisk=1e9
1212
1223
  )
1213
1224
 
1214
- def tearDown(self):
1225
+ def tearDown(self) -> None:
1215
1226
  super().tearDown()
1216
1227
 
1217
1228
 
1218
- class SingleMachineBatchSystemJobTest(hidden.AbstractBatchSystemJobTest):
1229
+ class TestSingleMachineBatchSystemJob(AbstractBatchSystemJobTest):
1219
1230
  """
1220
1231
  Tests Toil workflow against the SingleMachine batch system
1221
1232
  """
1222
1233
 
1223
- def getBatchSystemName(self):
1234
+ def getBatchSystemName(self) -> str:
1224
1235
  return "single_machine"
1225
1236
 
1226
- @slow
1227
- @retry_flaky_test(
1228
- prepare=[
1229
- hidden.AbstractBatchSystemJobTest.tearDown,
1230
- hidden.AbstractBatchSystemJobTest.setUp,
1231
- ]
1232
- )
1233
- def testConcurrencyWithDisk(self):
1237
+ @pslow
1238
+ @pytest.mark.slow
1239
+ @retry_flaky_test(prepare=[])
1240
+ def testConcurrencyWithDisk(self, tmp_path: Path) -> None:
1234
1241
  """
1235
1242
  Tests that the batch system is allocating disk resources properly
1236
1243
  """
1237
- tempDir = self._createTempDir("testFiles")
1238
1244
 
1239
- options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
1240
- options.workDir = tempDir
1245
+ workdir = tmp_path / "workdir"
1246
+ workdir.mkdir()
1247
+ options = Job.Runner.getDefaultOptions(tmp_path / "jobstore")
1248
+ options.workDir = str(workdir)
1241
1249
  from toil import physicalDisk
1242
1250
 
1243
1251
  availableDisk = physicalDisk(options.workDir)
1244
1252
  logger.info("Testing disk concurrency limits with %s disk space", availableDisk)
1245
1253
  # More disk might become available by the time Toil starts, so we limit it here
1246
1254
  options.maxDisk = availableDisk
1247
- options.batchSystem = self.batchSystemName
1255
+ options.batchSystem = self.getBatchSystemName()
1248
1256
 
1249
- counterPath = os.path.join(tempDir, "counter")
1257
+ counterPath = tmp_path / "counter"
1250
1258
  resetCounters(counterPath)
1251
1259
  value, maxValue = getCounters(counterPath)
1252
1260
  assert (value, maxValue) == (0, 0)
@@ -1283,31 +1291,32 @@ class SingleMachineBatchSystemJobTest(hidden.AbstractBatchSystemJobTest):
1283
1291
 
1284
1292
  logger.info("After run: %s disk space", physicalDisk(options.workDir))
1285
1293
 
1286
- self.assertEqual(maxValue, 1)
1294
+ assert maxValue == 1
1287
1295
 
1288
- @skipIf(
1296
+ @pytest.mark.skipif(
1289
1297
  SingleMachineBatchSystem.numCores < 4,
1290
- "Need at least four cores to run this test",
1298
+ reason="Need at least four cores to run this test",
1291
1299
  )
1292
- @slow
1293
- def testNestedResourcesDoNotBlock(self):
1300
+ @pslow
1301
+ @pytest.mark.slow
1302
+ def testNestedResourcesDoNotBlock(self, tmp_path: Path) -> None:
1294
1303
  """
1295
1304
  Resources are requested in the order Memory > Cpu > Disk.
1296
1305
  Test that unavailability of cpus for one job that is scheduled does not block another job
1297
1306
  that can run.
1298
1307
  """
1299
- tempDir = self._createTempDir("testFiles")
1300
-
1301
- options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
1302
- options.workDir = tempDir
1308
+ workdir = tmp_path / "workdir"
1309
+ workdir.mkdir()
1310
+ options = Job.Runner.getDefaultOptions(tmp_path / "jobstore")
1311
+ options.workDir = str(workdir)
1303
1312
  options.maxCores = 4
1304
1313
  from toil import physicalMemory
1305
1314
 
1306
1315
  availableMemory = physicalMemory()
1307
- options.batchSystem = self.batchSystemName
1316
+ options.batchSystem = self.getBatchSystemName()
1308
1317
 
1309
- outFile = os.path.join(tempDir, "counter")
1310
- open(outFile, "w").close()
1318
+ outFile = tmp_path / "counter"
1319
+ outFile.open("w").close()
1311
1320
 
1312
1321
  root = Job()
1313
1322
 
@@ -1395,7 +1404,7 @@ class SingleMachineBatchSystemJobTest(hidden.AbstractBatchSystemJobTest):
1395
1404
  should not block them, and should only run after they finish.
1396
1405
  """
1397
1406
  Job.Runner.startToil(root, options)
1398
- with open(outFile) as oFH:
1407
+ with outFile.open() as oFH:
1399
1408
  outString = oFH.read()
1400
1409
  # The ordering of b, fJ and sJ is non-deterministic since they are scheduled at the same
1401
1410
  # time. We look for all possible permutations.
@@ -1406,12 +1415,12 @@ class SingleMachineBatchSystemJobTest(hidden.AbstractBatchSystemJobTest):
1406
1415
  assert outString.endswith("sJCsJGCfJC")
1407
1416
 
1408
1417
 
1409
- def _resourceBlockTestAuxFn(outFile, sleepTime, writeVal):
1418
+ def _resourceBlockTestAuxFn(outFile: StrPath, sleepTime: int, writeVal: str) -> None:
1410
1419
  """
1411
1420
  Write a value to the out file and then sleep for requested seconds.
1412
- :param str outFile: File to write to
1413
- :param int sleepTime: Time to sleep for
1414
- :param str writeVal: Character to write
1421
+ :param outFile: File to write to
1422
+ :param sleepTime: Time to sleep for
1423
+ :param writeVal: Character to write
1415
1424
  """
1416
1425
  with open(outFile, "a") as oFH:
1417
1426
  fcntl.flock(oFH, fcntl.LOCK_EX)
@@ -1419,33 +1428,38 @@ def _resourceBlockTestAuxFn(outFile, sleepTime, writeVal):
1419
1428
  time.sleep(sleepTime)
1420
1429
 
1421
1430
 
1422
- @slow
1423
- @needs_mesos
1424
- class MesosBatchSystemJobTest(hidden.AbstractBatchSystemJobTest, MesosTestSupport):
1431
+ @pslow
1432
+ @pytest.mark.slow
1433
+ @pneeds_mesos
1434
+ class TestMesosBatchSystemJob(AbstractBatchSystemJobTest, MesosTestSupport):
1425
1435
  """
1426
1436
  Tests Toil workflow against the Mesos batch system
1427
1437
  """
1428
1438
 
1429
- def getOptions(self, tempDir):
1439
+ @pytest.fixture(autouse=True)
1440
+ def mesos_support(self) -> Generator[None]:
1441
+ try:
1442
+ self._startMesos(self.cpuCount)
1443
+ yield
1444
+ finally:
1445
+ self._stopMesos()
1446
+
1447
+ def getOptions(self, tempDir: Path) -> argparse.Namespace:
1430
1448
  options = super().getOptions(tempDir)
1431
1449
  options.mesos_endpoint = "localhost:5050"
1432
1450
  return options
1433
1451
 
1434
- def getBatchSystemName(self):
1435
- self._startMesos(self.cpuCount)
1452
+ def getBatchSystemName(self) -> "str":
1436
1453
  return "mesos"
1437
1454
 
1438
- def tearDown(self):
1439
- self._stopMesos()
1440
1455
 
1441
-
1442
- def measureConcurrency(filepath, sleep_time=10):
1456
+ def measureConcurrency(filepath: StrPath, sleep_time: int = 10) -> int:
1443
1457
  """
1444
1458
  Run in parallel to determine the number of concurrent tasks.
1445
1459
  This code was copied from toil.batchSystemTestMaxCoresSingleMachineBatchSystemTest
1446
- :param str filepath: path to counter file
1447
- :param int sleep_time: number of seconds to sleep before counting down
1448
- :return int max concurrency value:
1460
+ :param filepath: path to counter file
1461
+ :param sleep_time: number of seconds to sleep before counting down
1462
+ :return: max concurrency value
1449
1463
  """
1450
1464
  count(1, filepath)
1451
1465
  try:
@@ -1454,16 +1468,17 @@ def measureConcurrency(filepath, sleep_time=10):
1454
1468
  return count(-1, filepath)
1455
1469
 
1456
1470
 
1457
- def count(delta, file_path):
1471
+ def count(delta: int, file_path: StrPath) -> int:
1458
1472
  """
1459
1473
  Increments counter file and returns the max number of times the file
1460
1474
  has been modified. Counter data must be in the form:
1461
1475
  concurrent tasks, max concurrent tasks (counter should be initialized to 0,0)
1462
1476
 
1463
- :param int delta: increment value
1464
- :param str file_path: path to shared counter file
1465
- :return int max concurrent tasks:
1477
+ :param delta: increment value
1478
+ :param file_path: path to shared counter file
1479
+ :return: max concurrent tasks
1466
1480
  """
1481
+
1467
1482
  fd = os.open(file_path, os.O_RDWR)
1468
1483
  try:
1469
1484
  fcntl.flock(fd, fcntl.LOCK_EX)
@@ -1483,13 +1498,13 @@ def count(delta, file_path):
1483
1498
  return maxValue
1484
1499
 
1485
1500
 
1486
- def getCounters(path):
1501
+ def getCounters(path: StrPath) -> tuple[int, int]:
1487
1502
  with open(path, "r+") as f:
1488
1503
  concurrentTasks, maxConcurrentTasks = (int(i) for i in f.read().split(","))
1489
1504
  return concurrentTasks, maxConcurrentTasks
1490
1505
 
1491
1506
 
1492
- def resetCounters(path):
1507
+ def resetCounters(path: StrPath) -> None:
1493
1508
  with open(path, "w") as f:
1494
1509
  f.write("0,0")
1495
1510
  f.close()