toil 7.0.0__py3-none-any.whl → 8.1.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (197) hide show
  1. toil/__init__.py +124 -86
  2. toil/batchSystems/__init__.py +1 -0
  3. toil/batchSystems/abstractBatchSystem.py +137 -77
  4. toil/batchSystems/abstractGridEngineBatchSystem.py +211 -101
  5. toil/batchSystems/awsBatch.py +237 -128
  6. toil/batchSystems/cleanup_support.py +22 -16
  7. toil/batchSystems/contained_executor.py +30 -26
  8. toil/batchSystems/gridengine.py +85 -49
  9. toil/batchSystems/htcondor.py +164 -87
  10. toil/batchSystems/kubernetes.py +622 -386
  11. toil/batchSystems/local_support.py +17 -12
  12. toil/batchSystems/lsf.py +132 -79
  13. toil/batchSystems/lsfHelper.py +13 -11
  14. toil/batchSystems/mesos/__init__.py +41 -29
  15. toil/batchSystems/mesos/batchSystem.py +288 -149
  16. toil/batchSystems/mesos/executor.py +77 -49
  17. toil/batchSystems/mesos/test/__init__.py +31 -23
  18. toil/batchSystems/options.py +39 -29
  19. toil/batchSystems/registry.py +53 -19
  20. toil/batchSystems/singleMachine.py +293 -123
  21. toil/batchSystems/slurm.py +651 -155
  22. toil/batchSystems/torque.py +46 -32
  23. toil/bus.py +141 -73
  24. toil/common.py +784 -397
  25. toil/cwl/__init__.py +1 -1
  26. toil/cwl/cwltoil.py +1137 -534
  27. toil/cwl/utils.py +17 -22
  28. toil/deferred.py +62 -41
  29. toil/exceptions.py +5 -3
  30. toil/fileStores/__init__.py +5 -5
  31. toil/fileStores/abstractFileStore.py +88 -57
  32. toil/fileStores/cachingFileStore.py +711 -247
  33. toil/fileStores/nonCachingFileStore.py +113 -75
  34. toil/job.py +1031 -349
  35. toil/jobStores/abstractJobStore.py +387 -243
  36. toil/jobStores/aws/jobStore.py +772 -412
  37. toil/jobStores/aws/utils.py +161 -109
  38. toil/jobStores/conftest.py +1 -0
  39. toil/jobStores/fileJobStore.py +289 -151
  40. toil/jobStores/googleJobStore.py +137 -70
  41. toil/jobStores/utils.py +36 -15
  42. toil/leader.py +614 -269
  43. toil/lib/accelerators.py +115 -18
  44. toil/lib/aws/__init__.py +55 -28
  45. toil/lib/aws/ami.py +122 -87
  46. toil/lib/aws/iam.py +284 -108
  47. toil/lib/aws/s3.py +31 -0
  48. toil/lib/aws/session.py +204 -58
  49. toil/lib/aws/utils.py +290 -213
  50. toil/lib/bioio.py +13 -5
  51. toil/lib/compatibility.py +11 -6
  52. toil/lib/conversions.py +83 -49
  53. toil/lib/docker.py +131 -103
  54. toil/lib/dockstore.py +379 -0
  55. toil/lib/ec2.py +322 -209
  56. toil/lib/ec2nodes.py +174 -105
  57. toil/lib/encryption/_dummy.py +5 -3
  58. toil/lib/encryption/_nacl.py +10 -6
  59. toil/lib/encryption/conftest.py +1 -0
  60. toil/lib/exceptions.py +26 -7
  61. toil/lib/expando.py +4 -2
  62. toil/lib/ftp_utils.py +217 -0
  63. toil/lib/generatedEC2Lists.py +127 -19
  64. toil/lib/history.py +1271 -0
  65. toil/lib/history_submission.py +681 -0
  66. toil/lib/humanize.py +6 -2
  67. toil/lib/io.py +121 -12
  68. toil/lib/iterables.py +4 -2
  69. toil/lib/memoize.py +12 -8
  70. toil/lib/misc.py +83 -18
  71. toil/lib/objects.py +2 -2
  72. toil/lib/resources.py +19 -7
  73. toil/lib/retry.py +125 -87
  74. toil/lib/threading.py +282 -80
  75. toil/lib/throttle.py +15 -14
  76. toil/lib/trs.py +390 -0
  77. toil/lib/web.py +38 -0
  78. toil/options/common.py +850 -402
  79. toil/options/cwl.py +185 -90
  80. toil/options/runner.py +50 -0
  81. toil/options/wdl.py +70 -19
  82. toil/provisioners/__init__.py +111 -46
  83. toil/provisioners/abstractProvisioner.py +322 -157
  84. toil/provisioners/aws/__init__.py +62 -30
  85. toil/provisioners/aws/awsProvisioner.py +980 -627
  86. toil/provisioners/clusterScaler.py +541 -279
  87. toil/provisioners/gceProvisioner.py +283 -180
  88. toil/provisioners/node.py +147 -79
  89. toil/realtimeLogger.py +34 -22
  90. toil/resource.py +137 -75
  91. toil/server/app.py +127 -61
  92. toil/server/celery_app.py +3 -1
  93. toil/server/cli/wes_cwl_runner.py +84 -55
  94. toil/server/utils.py +56 -31
  95. toil/server/wes/abstract_backend.py +64 -26
  96. toil/server/wes/amazon_wes_utils.py +21 -15
  97. toil/server/wes/tasks.py +121 -63
  98. toil/server/wes/toil_backend.py +142 -107
  99. toil/server/wsgi_app.py +4 -3
  100. toil/serviceManager.py +58 -22
  101. toil/statsAndLogging.py +183 -65
  102. toil/test/__init__.py +263 -179
  103. toil/test/batchSystems/batchSystemTest.py +438 -195
  104. toil/test/batchSystems/batch_system_plugin_test.py +18 -7
  105. toil/test/batchSystems/test_gridengine.py +173 -0
  106. toil/test/batchSystems/test_lsf_helper.py +67 -58
  107. toil/test/batchSystems/test_slurm.py +265 -49
  108. toil/test/cactus/test_cactus_integration.py +20 -22
  109. toil/test/cwl/conftest.py +39 -0
  110. toil/test/cwl/cwlTest.py +375 -72
  111. toil/test/cwl/measure_default_memory.cwl +12 -0
  112. toil/test/cwl/not_run_required_input.cwl +29 -0
  113. toil/test/cwl/optional-file.cwl +18 -0
  114. toil/test/cwl/scatter_duplicate_outputs.cwl +40 -0
  115. toil/test/docs/scriptsTest.py +60 -34
  116. toil/test/jobStores/jobStoreTest.py +412 -235
  117. toil/test/lib/aws/test_iam.py +116 -48
  118. toil/test/lib/aws/test_s3.py +16 -9
  119. toil/test/lib/aws/test_utils.py +5 -6
  120. toil/test/lib/dockerTest.py +118 -141
  121. toil/test/lib/test_conversions.py +113 -115
  122. toil/test/lib/test_ec2.py +57 -49
  123. toil/test/lib/test_history.py +212 -0
  124. toil/test/lib/test_misc.py +12 -5
  125. toil/test/lib/test_trs.py +161 -0
  126. toil/test/mesos/MesosDataStructuresTest.py +23 -10
  127. toil/test/mesos/helloWorld.py +7 -6
  128. toil/test/mesos/stress.py +25 -20
  129. toil/test/options/options.py +7 -2
  130. toil/test/provisioners/aws/awsProvisionerTest.py +293 -140
  131. toil/test/provisioners/clusterScalerTest.py +440 -250
  132. toil/test/provisioners/clusterTest.py +81 -42
  133. toil/test/provisioners/gceProvisionerTest.py +174 -100
  134. toil/test/provisioners/provisionerTest.py +25 -13
  135. toil/test/provisioners/restartScript.py +5 -4
  136. toil/test/server/serverTest.py +188 -141
  137. toil/test/sort/restart_sort.py +137 -68
  138. toil/test/sort/sort.py +134 -66
  139. toil/test/sort/sortTest.py +91 -49
  140. toil/test/src/autoDeploymentTest.py +140 -100
  141. toil/test/src/busTest.py +20 -18
  142. toil/test/src/checkpointTest.py +8 -2
  143. toil/test/src/deferredFunctionTest.py +49 -35
  144. toil/test/src/dockerCheckTest.py +33 -26
  145. toil/test/src/environmentTest.py +20 -10
  146. toil/test/src/fileStoreTest.py +538 -271
  147. toil/test/src/helloWorldTest.py +7 -4
  148. toil/test/src/importExportFileTest.py +61 -31
  149. toil/test/src/jobDescriptionTest.py +32 -17
  150. toil/test/src/jobEncapsulationTest.py +2 -0
  151. toil/test/src/jobFileStoreTest.py +74 -50
  152. toil/test/src/jobServiceTest.py +187 -73
  153. toil/test/src/jobTest.py +120 -70
  154. toil/test/src/miscTests.py +19 -18
  155. toil/test/src/promisedRequirementTest.py +82 -36
  156. toil/test/src/promisesTest.py +7 -6
  157. toil/test/src/realtimeLoggerTest.py +6 -6
  158. toil/test/src/regularLogTest.py +71 -37
  159. toil/test/src/resourceTest.py +80 -49
  160. toil/test/src/restartDAGTest.py +36 -22
  161. toil/test/src/resumabilityTest.py +9 -2
  162. toil/test/src/retainTempDirTest.py +45 -14
  163. toil/test/src/systemTest.py +12 -8
  164. toil/test/src/threadingTest.py +44 -25
  165. toil/test/src/toilContextManagerTest.py +10 -7
  166. toil/test/src/userDefinedJobArgTypeTest.py +8 -5
  167. toil/test/src/workerTest.py +33 -16
  168. toil/test/utils/toilDebugTest.py +70 -58
  169. toil/test/utils/toilKillTest.py +4 -5
  170. toil/test/utils/utilsTest.py +239 -102
  171. toil/test/wdl/wdltoil_test.py +789 -148
  172. toil/test/wdl/wdltoil_test_kubernetes.py +37 -23
  173. toil/toilState.py +52 -26
  174. toil/utils/toilConfig.py +13 -4
  175. toil/utils/toilDebugFile.py +44 -27
  176. toil/utils/toilDebugJob.py +85 -25
  177. toil/utils/toilDestroyCluster.py +11 -6
  178. toil/utils/toilKill.py +8 -3
  179. toil/utils/toilLaunchCluster.py +251 -145
  180. toil/utils/toilMain.py +37 -16
  181. toil/utils/toilRsyncCluster.py +27 -14
  182. toil/utils/toilSshCluster.py +45 -22
  183. toil/utils/toilStats.py +75 -36
  184. toil/utils/toilStatus.py +226 -119
  185. toil/utils/toilUpdateEC2Instances.py +3 -1
  186. toil/version.py +6 -6
  187. toil/wdl/utils.py +5 -5
  188. toil/wdl/wdltoil.py +3528 -1053
  189. toil/worker.py +370 -149
  190. toil-8.1.0b1.dist-info/METADATA +178 -0
  191. toil-8.1.0b1.dist-info/RECORD +259 -0
  192. {toil-7.0.0.dist-info → toil-8.1.0b1.dist-info}/WHEEL +1 -1
  193. toil-7.0.0.dist-info/METADATA +0 -158
  194. toil-7.0.0.dist-info/RECORD +0 -244
  195. {toil-7.0.0.dist-info → toil-8.1.0b1.dist-info}/LICENSE +0 -0
  196. {toil-7.0.0.dist-info → toil-8.1.0b1.dist-info}/entry_points.txt +0 -0
  197. {toil-7.0.0.dist-info → toil-8.1.0b1.dist-info}/top_level.txt +0 -0
toil/options/common.py CHANGED
@@ -1,17 +1,16 @@
1
+ import logging
1
2
  import os
2
- from argparse import ArgumentParser, Action, _AppendAction
3
- from typing import Any, Optional, Union, Type, Callable, List, Dict, TYPE_CHECKING
3
+ from argparse import Action, ArgumentParser, _AppendAction
4
+ from typing import TYPE_CHECKING, Any, Callable, Optional, Union
4
5
 
5
6
  from configargparse import SUPPRESS
6
- import logging
7
-
8
7
  from ruamel.yaml import YAML
9
8
 
10
- from toil.lib.conversions import bytes2human, human2bytes, strtobool, opt_strtobool
11
-
12
9
  from toil.batchSystems.options import add_all_batchsystem_options
10
+ from toil.lib.conversions import bytes2human, human2bytes, opt_strtobool, strtobool
13
11
  from toil.provisioners import parse_node_types
14
12
  from toil.statsAndLogging import add_logging_options
13
+
15
14
  if TYPE_CHECKING:
16
15
  from toil.job import AcceleratorRequirement
17
16
 
@@ -23,7 +22,8 @@ SYS_MAX_SIZE = 9223372036854775807
23
22
  # sys.max_size on 64 bit systems is 9223372036854775807, so that 32-bit systems
24
23
  # use the same number
25
24
 
26
- def parse_set_env(l: List[str]) -> Dict[str, Optional[str]]:
25
+
26
+ def parse_set_env(l: list[str]) -> dict[str, Optional[str]]:
27
27
  """
28
28
  Parse a list of strings of the form "NAME=VALUE" or just "NAME" into a dictionary.
29
29
 
@@ -56,20 +56,20 @@ def parse_set_env(l: List[str]) -> Dict[str, Optional[str]]:
56
56
  v: Optional[str] = None
57
57
  for i in l:
58
58
  try:
59
- k, v = i.split('=', 1)
59
+ k, v = i.split("=", 1)
60
60
  except ValueError:
61
61
  k, v = i, None
62
62
  if not k:
63
- raise ValueError('Empty name')
63
+ raise ValueError("Empty name")
64
64
  d[k] = v
65
65
  return d
66
66
 
67
67
 
68
- def parse_str_list(s: str) -> List[str]:
68
+ def parse_str_list(s: str) -> list[str]:
69
69
  return [str(x) for x in s.split(",")]
70
70
 
71
71
 
72
- def parse_int_list(s: str) -> List[int]:
72
+ def parse_int_list(s: str) -> list[int]:
73
73
  return [int(x) for x in s.split(",")]
74
74
 
75
75
 
@@ -91,7 +91,7 @@ def fC(minValue: float, maxValue: Optional[float] = None) -> Callable[[float], b
91
91
  return lambda x: minValue <= x < maxValue
92
92
 
93
93
 
94
- def parse_accelerator_list(specs: Optional[str]) -> List['AcceleratorRequirement']:
94
+ def parse_accelerator_list(specs: Optional[str]) -> list["AcceleratorRequirement"]:
95
95
  """
96
96
  Parse a string description of one or more accelerator requirements.
97
97
  """
@@ -102,20 +102,22 @@ def parse_accelerator_list(specs: Optional[str]) -> List['AcceleratorRequirement
102
102
  # Otherwise parse each requirement.
103
103
  from toil.job import parse_accelerator
104
104
 
105
- return [parse_accelerator(r) for r in specs.split(',')]
105
+ return [parse_accelerator(r) for r in specs.split(",")]
106
106
 
107
107
 
108
108
  def parseBool(val: str) -> bool:
109
- if val.lower() in ['true', 't', 'yes', 'y', 'on', '1']:
109
+ if val.lower() in ["true", "t", "yes", "y", "on", "1"]:
110
110
  return True
111
- elif val.lower() in ['false', 'f', 'no', 'n', 'off', '0']:
111
+ elif val.lower() in ["false", "f", "no", "n", "off", "0"]:
112
112
  return False
113
113
  else:
114
- raise RuntimeError("Could not interpret \"%s\" as a boolean value" % val)
114
+ raise RuntimeError('Could not interpret "%s" as a boolean value' % val)
115
115
 
116
116
 
117
117
  # This is kept in the outer scope as multiple batchsystem files use this
118
- def make_open_interval_action(min: Union[int, float], max: Optional[Union[int, float]] = None) -> Type[Action]:
118
+ def make_open_interval_action(
119
+ min: Union[int, float], max: Optional[Union[int, float]] = None
120
+ ) -> type[Action]:
119
121
  """
120
122
  Returns an argparse action class to check if the input is within the given half-open interval.
121
123
  ex:
@@ -128,7 +130,9 @@ def make_open_interval_action(min: Union[int, float], max: Optional[Union[int, f
128
130
  """
129
131
 
130
132
  class IntOrFloatOpenAction(Action):
131
- def __call__(self, parser: Any, namespace: Any, values: Any, option_string: Any = None) -> None:
133
+ def __call__(
134
+ self, parser: Any, namespace: Any, values: Any, option_string: Any = None
135
+ ) -> None:
132
136
  if isinstance(min, int):
133
137
  if max is not None: # for mypy
134
138
  assert isinstance(max, int)
@@ -146,7 +150,9 @@ def make_open_interval_action(min: Union[int, float], max: Optional[Union[int, f
146
150
  f"{option_string} ({values}) must be at least {min} and strictly less than {max})"
147
151
  )
148
152
  except AssertionError:
149
- raise RuntimeError(f"The {option_string} option has an invalid value: {values}")
153
+ raise RuntimeError(
154
+ f"The {option_string} option has an invalid value: {values}"
155
+ )
150
156
  setattr(namespace, self.dest, values)
151
157
 
152
158
  return IntOrFloatOpenAction
@@ -164,8 +170,9 @@ def parse_jobstore(jobstore_uri: str) -> str:
164
170
  :return: URI of the jobstore
165
171
  """
166
172
  from toil.common import Toil
173
+
167
174
  name, rest = Toil.parseLocator(jobstore_uri)
168
- if name == 'file':
175
+ if name == "file":
169
176
  # We need to resolve relative paths early, on the leader, because the worker process
170
177
  # may have a different working directory than the leader, e.g. under Mesos.
171
178
  return Toil.buildLocator(name, os.path.abspath(rest))
@@ -173,22 +180,26 @@ def parse_jobstore(jobstore_uri: str) -> str:
173
180
  return jobstore_uri
174
181
 
175
182
 
176
- JOBSTORE_HELP = ("The location of the job store for the workflow. "
177
- "A job store holds persistent information about the jobs, stats, and files in a "
178
- "workflow. If the workflow is run with a distributed batch system, the job "
179
- "store must be accessible by all worker nodes. Depending on the desired "
180
- "job store implementation, the location should be formatted according to "
181
- "one of the following schemes:\n\n"
182
- "file:<path> where <path> points to a directory on the file system\n\n"
183
- "aws:<region>:<prefix> where <region> is the name of an AWS region like "
184
- "us-west-2 and <prefix> will be prepended to the names of any top-level "
185
- "AWS resources in use by job store, e.g. S3 buckets.\n\n "
186
- "google:<project_id>:<prefix> TODO: explain\n\n"
187
- "For backwards compatibility, you may also specify ./foo (equivalent to "
188
- "file:./foo or just file:foo) or /bar (equivalent to file:/bar).")
189
-
190
-
191
- def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False, cwl: bool = False) -> None:
183
+ JOBSTORE_HELP = (
184
+ "The location of the job store for the workflow. "
185
+ "A job store holds persistent information about the jobs, stats, and files in a "
186
+ "workflow. If the workflow is run with a distributed batch system, the job "
187
+ "store must be accessible by all worker nodes. Depending on the desired "
188
+ "job store implementation, the location should be formatted according to "
189
+ "one of the following schemes:\n\n"
190
+ "file:<path> where <path> points to a directory on the file system\n\n"
191
+ "aws:<region>:<prefix> where <region> is the name of an AWS region like "
192
+ "us-west-2 and <prefix> will be prepended to the names of any top-level "
193
+ "AWS resources in use by job store, e.g. S3 buckets.\n\n "
194
+ "google:<project_id>:<prefix> TODO: explain\n\n"
195
+ "For backwards compatibility, you may also specify ./foo (equivalent to "
196
+ "file:./foo or just file:foo) or /bar (equivalent to file:/bar)."
197
+ )
198
+
199
+
200
+ def add_base_toil_options(
201
+ parser: ArgumentParser, jobstore_as_flag: bool = False, cwl: bool = False
202
+ ) -> None:
192
203
  """
193
204
  Add base Toil command line options to the parser.
194
205
  :param parser: Argument parser to add options to
@@ -203,40 +214,58 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
203
214
 
204
215
  # If using argparse instead of configargparse, this should just not parse when calling parse_args()
205
216
  # default config value is set to none as defaults should already be populated at config init
206
- config.add_argument('--config', dest='config', is_config_file_arg=True, default=None, metavar="PATH",
207
- help="Get options from a config file.")
217
+ config.add_argument(
218
+ "--config",
219
+ dest="config",
220
+ is_config_file_arg=True,
221
+ default=None,
222
+ metavar="PATH",
223
+ help="Get options from a config file.",
224
+ )
208
225
 
209
226
  add_logging_options(parser)
210
227
  parser.register("type", "bool", parseBool) # Custom type for arg=True/False.
211
228
 
212
229
  # Core options
213
230
  core_options = parser.add_argument_group(
214
- title="Toil core options.",
231
+ title="Toil core options",
215
232
  description="Options to specify the location of the Toil workflow and "
216
- "turn on stats collation about the performance of jobs."
233
+ "turn on stats collation about the performance of jobs.",
217
234
  )
218
235
  if jobstore_as_flag:
219
- core_options.add_argument('--jobstore', '--jobStore', dest='jobStore', type=parse_jobstore, default=None,
220
- help=JOBSTORE_HELP)
236
+ core_options.add_argument(
237
+ "--jobstore",
238
+ "--jobStore",
239
+ dest="jobStore",
240
+ type=parse_jobstore,
241
+ default=None,
242
+ help=JOBSTORE_HELP,
243
+ )
221
244
  else:
222
- core_options.add_argument('jobStore', type=parse_jobstore, help=JOBSTORE_HELP)
245
+ core_options.add_argument("jobStore", type=parse_jobstore, help=JOBSTORE_HELP)
223
246
 
224
247
  class WorkDirAction(Action):
225
248
  """
226
249
  Argparse action class to check that the provided --workDir exists
227
250
  """
228
251
 
229
- def __call__(self, parser: Any, namespace: Any, values: Any, option_string: Any = None) -> None:
252
+ def __call__(
253
+ self, parser: Any, namespace: Any, values: Any, option_string: Any = None
254
+ ) -> None:
230
255
  workDir = values
231
256
  if workDir is not None:
232
257
  workDir = os.path.abspath(workDir)
233
258
  if not os.path.exists(workDir):
234
- raise RuntimeError(f"The path provided to --workDir ({workDir}) does not exist.")
259
+ raise RuntimeError(
260
+ f"The path provided to --workDir ({workDir}) does not exist."
261
+ )
235
262
 
236
263
  if len(workDir) > 80:
237
- logger.warning(f'Length of workDir path "{workDir}" is {len(workDir)} characters. '
238
- f'Consider setting a shorter path with --workPath or setting TMPDIR to something '
239
- f'like "/tmp" to avoid overly long paths.')
264
+ logger.warning(
265
+ f'Length of workDir path "{workDir}" is {len(workDir)} characters. '
266
+ f"Consider setting a shorter path with --workPath or setting TMPDIR to something "
267
+ f'like "/tmp" to avoid overly long paths.'
268
+ )
240
269
  setattr(namespace, self.dest, workDir)
241
270
 
242
271
  class CoordinationDirAction(Action):
@@ -244,16 +273,21 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
244
273
  Argparse action class to check that the provided --coordinationDir exists
245
274
  """
246
275
 
247
- def __call__(self, parser: Any, namespace: Any, values: Any, option_string: Any = None) -> None:
276
+ def __call__(
277
+ self, parser: Any, namespace: Any, values: Any, option_string: Any = None
278
+ ) -> None:
248
279
  coordination_dir = values
249
280
  if coordination_dir is not None:
250
281
  coordination_dir = os.path.abspath(coordination_dir)
251
282
  if not os.path.exists(coordination_dir):
252
283
  raise RuntimeError(
253
- f"The path provided to --coordinationDir ({coordination_dir}) does not exist.")
284
+ f"The path provided to --coordinationDir ({coordination_dir}) does not exist."
285
+ )
254
286
  setattr(namespace, self.dest, coordination_dir)
255
287
 
256
- def make_closed_interval_action(min: Union[int, float], max: Optional[Union[int, float]] = None) -> Type[Action]:
288
+ def make_closed_interval_action(
289
+ min: Union[int, float], max: Optional[Union[int, float]] = None
290
+ ) -> type[Action]:
257
291
  """
258
292
  Returns an argparse action class to check if the input is within the given half-open interval.
259
293
  ex:
@@ -265,7 +299,13 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
265
299
  """
266
300
 
267
301
  class ClosedIntOrFloatAction(Action):
268
- def __call__(self, parser: Any, namespace: Any, values: Any, option_string: Any = None) -> None:
302
+ def __call__(
303
+ self,
304
+ parser: Any,
305
+ namespace: Any,
306
+ values: Any,
307
+ option_string: Any = None,
308
+ ) -> None:
269
309
  def is_within(x: Union[int, float]) -> bool:
270
310
  if max is None:
271
311
  return min <= x
@@ -275,132 +315,223 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
275
315
  try:
276
316
  if not is_within(values):
277
317
  raise parser.error(
278
- f"{option_string} ({values}) must be within the range: [{min}, {'infinity' if max is None else max}]")
318
+ f"{option_string} ({values}) must be within the range: [{min}, {'infinity' if max is None else max}]"
319
+ )
279
320
  except AssertionError:
280
- raise RuntimeError(f"The {option_string} option has an invalid value: {values}")
321
+ raise RuntimeError(
322
+ f"The {option_string} option has an invalid value: {values}"
323
+ )
281
324
  setattr(namespace, self.dest, values)
282
325
 
283
326
  return ClosedIntOrFloatAction
284
327
 
285
- core_options.add_argument("--workDir", dest="workDir", default=None, env_var="TOIL_WORKDIR", action=WorkDirAction,
286
- metavar="PATH",
287
- help="Absolute path to directory where temporary files generated during the Toil "
288
- "run should be placed. Standard output and error from batch system jobs "
289
- "(unless --noStdOutErr is set) will be placed in this directory. A cache directory "
290
- "may be placed in this directory. Temp files and folders will be placed in a "
291
- "directory toil-<workflowID> within workDir. The workflowID is generated by "
292
- "Toil and will be reported in the workflow logs. Default is determined by the "
293
- "variables (TMPDIR, TEMP, TMP) via mkdtemp. This directory needs to exist on "
294
- "all machines running jobs; if capturing standard output and error from batch "
295
- "system jobs is desired, it will generally need to be on a shared file system. "
296
- "When sharing a cache between containers on a host, this directory must be "
297
- "shared between the containers.")
298
- core_options.add_argument("--coordinationDir", dest="coordination_dir", default=None,
299
- env_var="TOIL_COORDINATION_DIR", action=CoordinationDirAction, metavar="PATH",
300
- help="Absolute path to directory where Toil will keep state and lock files."
301
- "When sharing a cache between containers on a host, this directory must be "
302
- "shared between the containers.")
303
- core_options.add_argument("--noStdOutErr", dest="noStdOutErr", default=False, action="store_true",
304
- help="Do not capture standard output and error from batch system jobs.")
305
- core_options.add_argument("--stats", dest="stats", default=False, action="store_true",
306
- help="Records statistics about the toil workflow to be used by 'toil stats'.")
307
- clean_choices = ['always', 'onError', 'never', 'onSuccess']
308
- core_options.add_argument("--clean", dest="clean", choices=clean_choices, default="onSuccess",
309
- help=f"Determines the deletion of the jobStore upon completion of the program. "
310
- f"Choices: {clean_choices}. The --stats option requires information from the "
311
- f"jobStore upon completion so the jobStore will never be deleted with that flag. "
312
- f"If you wish to be able to restart the run, choose \'never\' or \'onSuccess\'. "
313
- f"Default is \'never\' if stats is enabled, and \'onSuccess\' otherwise.")
314
- core_options.add_argument("--cleanWorkDir", dest="cleanWorkDir", choices=clean_choices, default='always',
315
- help=f"Determines deletion of temporary worker directory upon completion of a job. "
316
- f"Choices: {clean_choices}. Default = always. WARNING: This option should be "
317
- f"changed for debugging only. Running a full pipeline with this option could "
318
- f"fill your disk with excessive intermediate data.")
319
- core_options.add_argument("--clusterStats", dest="clusterStats", nargs='?', action='store', default=None,
320
- metavar="OPT_PATH", const=os.getcwd(),
321
- help="If enabled, writes out JSON resource usage statistics to a file. "
322
- "The default location for this file is the current working directory, but an "
323
- "absolute path can also be passed to specify where this file should be written. "
324
- "This options only applies when using scalable batch systems.")
328
+ core_options.add_argument(
329
+ "--workDir",
330
+ dest="workDir",
331
+ default=None,
332
+ env_var="TOIL_WORKDIR",
333
+ action=WorkDirAction,
334
+ metavar="PATH",
335
+ help="Absolute path to directory where temporary files generated during the Toil "
336
+ "run should be placed. Standard output and error from batch system jobs "
337
+ "(unless --noStdOutErr is set) will be placed in this directory. A cache directory "
338
+ "may be placed in this directory. Temp files and folders will be placed in a "
339
+ "directory toil-<workflowID> within workDir. The workflowID is generated by "
340
+ "Toil and will be reported in the workflow logs. Default is determined by the "
341
+ "variables (TMPDIR, TEMP, TMP) via mkdtemp. This directory needs to exist on "
342
+ "all machines running jobs; if capturing standard output and error from batch "
343
+ "system jobs is desired, it will generally need to be on a shared file system. "
344
+ "When sharing a cache between containers on a host, this directory must be "
345
+ "shared between the containers.",
346
+ )
347
+ core_options.add_argument(
348
+ "--coordinationDir",
349
+ dest="coordination_dir",
350
+ default=None,
351
+ env_var="TOIL_COORDINATION_DIR",
352
+ action=CoordinationDirAction,
353
+ metavar="PATH",
354
+ help="Absolute path to directory where Toil will keep state and lock files. "
355
+ "When sharing a cache between containers on a host, this directory must be "
356
+ "shared between the containers.",
357
+ )
358
+ core_options.add_argument(
359
+ "--noStdOutErr",
360
+ dest="noStdOutErr",
361
+ default=False,
362
+ action="store_true",
363
+ help="Do not capture standard output and error from batch system jobs.",
364
+ )
365
+ # TODO: Should this be deprecated since we always save stats now for history tracking?
366
+ core_options.add_argument(
367
+ "--stats",
368
+ dest="stats",
369
+ default=False,
370
+ action="store_true",
371
+ help="Keep statistics about the toil workflow to be used by 'toil stats'.",
372
+ )
373
+ clean_choices = ["always", "onError", "never", "onSuccess"]
374
+ core_options.add_argument(
375
+ "--clean",
376
+ dest="clean",
377
+ choices=clean_choices,
378
+ default="onSuccess",
379
+ help=f"Determines the deletion of the jobStore upon completion of the program. "
380
+ f"Choices: {clean_choices}. The --stats option requires information from the "
381
+ f"jobStore upon completion so the jobStore will never be deleted with that flag. "
382
+ f"If you wish to be able to restart the run, choose 'never' or 'onSuccess'. "
383
+ f"Default is 'never' if stats is enabled, and 'onSuccess' otherwise.",
384
+ )
385
+ core_options.add_argument(
386
+ "--cleanWorkDir",
387
+ dest="cleanWorkDir",
388
+ choices=clean_choices,
389
+ default="always",
390
+ help=f"Determines deletion of temporary worker directory upon completion of a job. "
391
+ f"Choices: {clean_choices}. Default = always. WARNING: This option should be "
392
+ f"changed for debugging only. Running a full pipeline with this option could "
393
+ f"fill your disk with excessive intermediate data.",
394
+ )
395
+ core_options.add_argument(
396
+ "--clusterStats",
397
+ dest="clusterStats",
398
+ nargs="?",
399
+ action="store",
400
+ default=None,
401
+ metavar="OPT_PATH",
402
+ const=os.getcwd(),
403
+ help="If enabled, writes out JSON resource usage statistics to a file. "
404
+ "The default location for this file is the current working directory, but an "
405
+ "absolute path can also be passed to specify where this file should be written. "
406
+ "This options only applies when using scalable batch systems.",
407
+ )
325
408
 
326
409
  # Restarting the workflow options
327
410
  restart_options = parser.add_argument_group(
328
- title="Toil options for restarting an existing workflow.",
329
- description="Allows the restart of an existing workflow"
411
+ title="Toil options for restarting an existing workflow",
412
+ description="Allows the restart of an existing workflow",
413
+ )
414
+ restart_options.add_argument(
415
+ "--restart",
416
+ dest="restart",
417
+ default=False,
418
+ action="store_true",
419
+ help="If --restart is specified then will attempt to restart existing workflow "
420
+ "at the location pointed to by the --jobStore option. Will raise an exception "
421
+ "if the workflow does not exist",
330
422
  )
331
- restart_options.add_argument("--restart", dest="restart", default=False, action="store_true",
332
- help="If --restart is specified then will attempt to restart existing workflow "
333
- "at the location pointed to by the --jobStore option. Will raise an exception "
334
- "if the workflow does not exist")
335
423
 
336
424
  # Batch system options
337
425
  batchsystem_options = parser.add_argument_group(
338
- title="Toil options for specifying the batch system.",
339
- description="Allows the specification of the batch system."
426
+ title="Toil options for specifying the batch system",
427
+ description="Allows the specification of the batch system.",
340
428
  )
341
429
  add_all_batchsystem_options(batchsystem_options)
342
430
 
343
431
  # File store options
344
432
  file_store_options = parser.add_argument_group(
345
- title="Toil options for configuring storage.",
346
- description="Allows configuring Toil's data storage."
433
+ title="Toil options for configuring storage",
434
+ description="Allows configuring Toil's data storage.",
347
435
  )
348
436
  link_imports = file_store_options.add_mutually_exclusive_group()
349
- link_imports_help = ("When using a filesystem based job store, CWL input files are by default symlinked in. "
350
- "Setting this option to True instead copies the files into the job store, which may protect "
351
- "them from being modified externally. When set to False, as long as caching is enabled, "
352
- "Toil will protect the file automatically by changing the permissions to read-only."
353
- "default=%(default)s")
354
- link_imports.add_argument("--symlinkImports", dest="symlinkImports", type=strtobool, default=True,
355
- metavar="BOOL", help=link_imports_help)
437
+ link_imports_help = (
438
+ "When using a filesystem based job store, CWL input files are by default symlinked in. "
439
+ "Setting this option to True instead copies the files into the job store, which may protect "
440
+ "them from being modified externally. When set to False, as long as caching is enabled, "
441
+ "Toil will protect the file automatically by changing the permissions to read-only. "
442
+ "default=%(default)s"
443
+ )
444
+ link_imports.add_argument(
445
+ "--symlinkImports",
446
+ dest="symlinkImports",
447
+ type=strtobool,
448
+ default=True,
449
+ metavar="BOOL",
450
+ help=link_imports_help,
451
+ )
356
452
  move_exports = file_store_options.add_mutually_exclusive_group()
357
- move_exports_help = ('When using a filesystem based job store, output files are by default moved to the '
358
- 'output directory, and a symlink to the moved exported file is created at the initial '
359
- 'location. Setting this option to True instead copies the files into the output directory. '
360
- 'Applies to filesystem-based job stores only.'
361
- 'default=%(default)s')
362
- move_exports.add_argument("--moveOutputs", dest="moveOutputs", type=strtobool, default=False, metavar="BOOL",
363
- help=move_exports_help)
453
+ move_exports_help = (
454
+ "When using a filesystem based job store, output files are by default moved to the "
455
+ "output directory, and a symlink to the moved exported file is created at the initial "
456
+ "location. Setting this option to True instead copies the files into the output directory. "
457
+ "Applies to filesystem-based job stores only. "
458
+ "default=%(default)s"
459
+ )
460
+ move_exports.add_argument(
461
+ "--moveOutputs",
462
+ dest="moveOutputs",
463
+ type=strtobool,
464
+ default=False,
465
+ metavar="BOOL",
466
+ help=move_exports_help,
467
+ )
364
468
 
365
469
  caching = file_store_options.add_mutually_exclusive_group()
366
- caching_help = "Enable or disable caching for your workflow, specifying this overrides default from job store"
367
- caching.add_argument('--caching', dest='caching', type=opt_strtobool, default=None, metavar="BOOL",
368
- help=caching_help)
470
+ caching_help = ("Enable or disable worker level file caching for your workflow, specifying this overrides default from batch system. "
471
+ "Does not affect CWL or WDL task caching.")
472
+ caching.add_argument(
473
+ "--caching",
474
+ dest="caching",
475
+ type=opt_strtobool,
476
+ default=None,
477
+ metavar="BOOL",
478
+ help=caching_help,
479
+ )
369
480
  # default is None according to PR 4299, seems to be generated at runtime
370
481
 
482
+ file_store_options.add_argument(
483
+ "--symlinkJobStoreReads",
484
+ dest="symlink_job_store_reads",
485
+ type=strtobool,
486
+ default=True,
487
+ metavar="BOOL",
488
+ help="Allow reads and container mounts from a JobStore's shared filesystem directly "
489
+ "via symlink. default=%(default)s",
490
+ )
491
+
371
492
  # Auto scaling options
372
493
  autoscaling_options = parser.add_argument_group(
373
- title="Toil options for autoscaling the cluster of worker nodes.",
494
+ title="Toil options for autoscaling the cluster of worker nodes",
374
495
  description="Allows the specification of the minimum and maximum number of nodes in an autoscaled cluster, "
375
- "as well as parameters to control the level of provisioning."
496
+ "as well as parameters to control the level of provisioning.",
376
497
  )
377
- provisioner_choices = ['aws', 'gce', None]
498
+ provisioner_choices = ["aws", "gce", None]
378
499
 
379
500
  # TODO: Better consolidate this provisioner arg and the one in provisioners/__init__.py?
380
- autoscaling_options.add_argument('--provisioner', '-p', dest="provisioner", choices=provisioner_choices,
381
- default=None,
382
- help=f"The provisioner for cluster auto-scaling. This is the main Toil "
383
- f"'--provisioner' option, and defaults to None for running on single "
384
- f"machine and non-auto-scaling batch systems. The currently supported "
385
- f"choices are {provisioner_choices}. The default is %(default)s.")
386
- autoscaling_options.add_argument('--nodeTypes', default=[], dest="nodeTypes", type=parse_node_types,
387
- action="extend",
388
- help="Specifies a list of comma-separated node types, each of which is "
389
- "composed of slash-separated instance types, and an optional spot "
390
- "bid set off by a colon, making the node type preemptible. Instance "
391
- "types may appear in multiple node types, and the same node type "
392
- "may appear as both preemptible and non-preemptible.\n"
393
- "Valid argument specifying two node types:\n"
394
- "\tc5.4xlarge/c5a.4xlarge:0.42,t2.large\n"
395
- "Node types:\n"
396
- "\tc5.4xlarge/c5a.4xlarge:0.42 and t2.large\n"
397
- "Instance types:\n"
398
- "\tc5.4xlarge, c5a.4xlarge, and t2.large\n"
399
- "Semantics:\n"
400
- "\tBid $0.42/hour for either c5.4xlarge or c5a.4xlarge instances,\n"
401
- "\ttreated interchangeably, while they are available at that price,\n"
402
- "\tand buy t2.large instances at full price.\n"
403
- "default=%(default)s")
501
+ autoscaling_options.add_argument(
502
+ "--provisioner",
503
+ "-p",
504
+ dest="provisioner",
505
+ choices=provisioner_choices,
506
+ default=None,
507
+ help=f"The provisioner for cluster auto-scaling. This is the main Toil "
508
+ f"'--provisioner' option, and defaults to None for running on single "
509
+ f"machine and non-auto-scaling batch systems. The currently supported "
510
+ f"choices are {provisioner_choices}. The default is %(default)s.",
511
+ )
512
+ autoscaling_options.add_argument(
513
+ "--nodeTypes",
514
+ default=[],
515
+ dest="nodeTypes",
516
+ type=parse_node_types,
517
+ action="extend",
518
+ help="Specifies a list of comma-separated node types, each of which is "
519
+ "composed of slash-separated instance types, and an optional spot "
520
+ "bid set off by a colon, making the node type preemptible. Instance "
521
+ "types may appear in multiple node types, and the same node type "
522
+ "may appear as both preemptible and non-preemptible.\n"
523
+ "Valid argument specifying two node types:\n"
524
+ "\tc5.4xlarge/c5a.4xlarge:0.42,t2.large\n"
525
+ "Node types:\n"
526
+ "\tc5.4xlarge/c5a.4xlarge:0.42 and t2.large\n"
527
+ "Instance types:\n"
528
+ "\tc5.4xlarge, c5a.4xlarge, and t2.large\n"
529
+ "Semantics:\n"
530
+ "\tBid $0.42/hour for either c5.4xlarge or c5a.4xlarge instances,\n"
531
+ "\ttreated interchangeably, while they are available at that price,\n"
532
+ "\tand buy t2.large instances at full price.\n"
533
+ "default=%(default)s",
534
+ )
404
535
 
405
536
  class NodeExtendAction(_AppendAction):
406
537
  """
@@ -413,251 +544,495 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
413
544
  super().__init__(option_strings, dest, **kwargs)
414
545
  self.is_default = True
415
546
 
416
- def __call__(self, parser: Any, namespace: Any, values: Any, option_string: Any = None) -> None:
547
+ def __call__(
548
+ self, parser: Any, namespace: Any, values: Any, option_string: Any = None
549
+ ) -> None:
417
550
  if self.is_default:
418
551
  setattr(namespace, self.dest, values)
419
552
  self.is_default = False
420
553
  else:
421
554
  super().__call__(parser, namespace, values, option_string)
422
555
 
423
- autoscaling_options.add_argument('--maxNodes', default=[10], dest="maxNodes", type=parse_int_list,
424
- action=NodeExtendAction, metavar="INT[,INT...]",
425
- help=f"Maximum number of nodes of each type in the cluster, if using autoscaling, "
426
- f"provided as a comma-separated list. The first value is used as a default "
427
- f"if the list length is less than the number of nodeTypes. "
428
- f"default=%(default)s")
429
- autoscaling_options.add_argument('--minNodes', default=[0], dest="minNodes", type=parse_int_list,
430
- action=NodeExtendAction, metavar="INT[,INT...]",
431
- help="Mininum number of nodes of each type in the cluster, if using "
432
- "auto-scaling. This should be provided as a comma-separated list of the "
433
- "same length as the list of node types. default=%(default)s")
434
- autoscaling_options.add_argument("--targetTime", dest="targetTime", default=defaultTargetTime, type=int,
435
- action=make_closed_interval_action(0), metavar="INT",
436
- help=f"Sets how rapidly you aim to complete jobs in seconds. Shorter times mean "
437
- f"more aggressive parallelization. The autoscaler attempts to scale up/down "
438
- f"so that it expects all queued jobs will complete within targetTime "
439
- f"seconds. default=%(default)s")
440
- autoscaling_options.add_argument("--betaInertia", dest="betaInertia", default=0.1, type=float,
441
- action=make_closed_interval_action(0.0, 0.9), metavar="FLOAT",
442
- help=f"A smoothing parameter to prevent unnecessary oscillations in the number "
443
- f"of provisioned nodes. This controls an exponentially weighted moving "
444
- f"average of the estimated number of nodes. A value of 0.0 disables any "
445
- f"smoothing, and a value of 0.9 will smooth so much that few changes will "
446
- f"ever be made. Must be between 0.0 and 0.9. default=%(default)s")
447
- autoscaling_options.add_argument("--scaleInterval", dest="scaleInterval", default=60, type=int, metavar="INT",
448
- help=f"The interval (seconds) between assessing if the scale of "
449
- f"the cluster needs to change. default=%(default)s")
450
- autoscaling_options.add_argument("--preemptibleCompensation", "--preemptableCompensation",
451
- dest="preemptibleCompensation", default=0.0, type=float,
452
- action=make_closed_interval_action(0.0, 1.0), metavar="FLOAT",
453
- help=f"The preference of the autoscaler to replace preemptible nodes with "
454
- f"non-preemptible nodes, when preemptible nodes cannot be started for some "
455
- f"reason. This value must be between 0.0 and 1.0, inclusive. "
456
- f"A value of 0.0 disables such "
457
- f"compensation, a value of 0.5 compensates two missing preemptible nodes "
458
- f"with a non-preemptible one. A value of 1.0 replaces every missing "
459
- f"pre-emptable node with a non-preemptible one. default=%(default)s")
460
- autoscaling_options.add_argument("--nodeStorage", dest="nodeStorage", default=50, type=int, metavar="INT",
461
- help="Specify the size of the root volume of worker nodes when they are launched "
462
- "in gigabytes. You may want to set this if your jobs require a lot of disk "
463
- f"space. (default=%(default)s).")
464
- autoscaling_options.add_argument('--nodeStorageOverrides', dest="nodeStorageOverrides", default=[],
465
- type=parse_str_list, action="extend",
466
- metavar="NODETYPE:NODESTORAGE[,NODETYPE:NODESTORAGE...]",
467
- help="Comma-separated list of nodeType:nodeStorage that are used to override "
468
- "the default value from --nodeStorage for the specified nodeType(s). "
469
- "This is useful for heterogeneous jobs where some tasks require much more "
470
- "disk than others.")
471
-
472
- autoscaling_options.add_argument("--metrics", dest="metrics", default=False, type=strtobool, metavar="BOOL",
473
- help="Enable the prometheus/grafana dashboard for monitoring CPU/RAM usage, "
474
- "queue size, and issued jobs.")
475
- autoscaling_options.add_argument("--assumeZeroOverhead", dest="assume_zero_overhead", default=False,
476
- type=strtobool, metavar="BOOL",
477
- help="Ignore scheduler and OS overhead and assume jobs can use every last byte "
478
- "of memory and disk on a node when autoscaling.")
556
+ autoscaling_options.add_argument(
557
+ "--maxNodes",
558
+ default=[10],
559
+ dest="maxNodes",
560
+ type=parse_int_list,
561
+ action=NodeExtendAction,
562
+ metavar="INT[,INT...]",
563
+ help=f"Maximum number of nodes of each type in the cluster, if using autoscaling, "
564
+ f"provided as a comma-separated list. The first value is used as a default "
565
+ f"if the list length is less than the number of nodeTypes. "
566
+ f"default=%(default)s",
567
+ )
568
+ autoscaling_options.add_argument(
569
+ "--minNodes",
570
+ default=[0],
571
+ dest="minNodes",
572
+ type=parse_int_list,
573
+ action=NodeExtendAction,
574
+ metavar="INT[,INT...]",
575
+ help="Mininum number of nodes of each type in the cluster, if using "
576
+ "auto-scaling. This should be provided as a comma-separated list of the "
577
+ "same length as the list of node types. default=%(default)s",
578
+ )
579
+ autoscaling_options.add_argument(
580
+ "--targetTime",
581
+ dest="targetTime",
582
+ default=defaultTargetTime,
583
+ type=int,
584
+ action=make_closed_interval_action(0),
585
+ metavar="INT",
586
+ help=f"Sets how rapidly you aim to complete jobs in seconds. Shorter times mean "
587
+ f"more aggressive parallelization. The autoscaler attempts to scale up/down "
588
+ f"so that it expects all queued jobs will complete within targetTime "
589
+ f"seconds. default=%(default)s",
590
+ )
591
+ autoscaling_options.add_argument(
592
+ "--betaInertia",
593
+ dest="betaInertia",
594
+ default=0.1,
595
+ type=float,
596
+ action=make_closed_interval_action(0.0, 0.9),
597
+ metavar="FLOAT",
598
+ help=f"A smoothing parameter to prevent unnecessary oscillations in the number "
599
+ f"of provisioned nodes. This controls an exponentially weighted moving "
600
+ f"average of the estimated number of nodes. A value of 0.0 disables any "
601
+ f"smoothing, and a value of 0.9 will smooth so much that few changes will "
602
+ f"ever be made. Must be between 0.0 and 0.9. default=%(default)s",
603
+ )
604
+ autoscaling_options.add_argument(
605
+ "--scaleInterval",
606
+ dest="scaleInterval",
607
+ default=60,
608
+ type=int,
609
+ metavar="INT",
610
+ help=f"The interval (seconds) between assessing if the scale of "
611
+ f"the cluster needs to change. default=%(default)s",
612
+ )
613
+ autoscaling_options.add_argument(
614
+ "--preemptibleCompensation",
615
+ "--preemptableCompensation",
616
+ dest="preemptibleCompensation",
617
+ default=0.0,
618
+ type=float,
619
+ action=make_closed_interval_action(0.0, 1.0),
620
+ metavar="FLOAT",
621
+ help=f"The preference of the autoscaler to replace preemptible nodes with "
622
+ f"non-preemptible nodes, when preemptible nodes cannot be started for some "
623
+ f"reason. This value must be between 0.0 and 1.0, inclusive. "
624
+ f"A value of 0.0 disables such "
625
+ f"compensation, a value of 0.5 compensates two missing preemptible nodes "
626
+ f"with a non-preemptible one. A value of 1.0 replaces every missing "
627
+ f"pre-emptable node with a non-preemptible one. default=%(default)s",
628
+ )
629
+ autoscaling_options.add_argument(
630
+ "--nodeStorage",
631
+ dest="nodeStorage",
632
+ default=50,
633
+ type=int,
634
+ metavar="INT",
635
+ help="Specify the size of the root volume of worker nodes when they are launched "
636
+ "in gigabytes. You may want to set this if your jobs require a lot of disk "
637
+ f"space. (default=%(default)s).",
638
+ )
639
+ autoscaling_options.add_argument(
640
+ "--nodeStorageOverrides",
641
+ dest="nodeStorageOverrides",
642
+ default=[],
643
+ type=parse_str_list,
644
+ action="extend",
645
+ metavar="NODETYPE:NODESTORAGE[,NODETYPE:NODESTORAGE...]",
646
+ help="Comma-separated list of nodeType:nodeStorage that are used to override "
647
+ "the default value from --nodeStorage for the specified nodeType(s). "
648
+ "This is useful for heterogeneous jobs where some tasks require much more "
649
+ "disk than others.",
650
+ )
651
+
652
+ autoscaling_options.add_argument(
653
+ "--metrics",
654
+ dest="metrics",
655
+ default=False,
656
+ type=strtobool,
657
+ metavar="BOOL",
658
+ help="Enable the prometheus/grafana dashboard for monitoring CPU/RAM usage, "
659
+ "queue size, and issued jobs.",
660
+ )
661
+ autoscaling_options.add_argument(
662
+ "--assumeZeroOverhead",
663
+ dest="assume_zero_overhead",
664
+ default=False,
665
+ type=strtobool,
666
+ metavar="BOOL",
667
+ help="Ignore scheduler and OS overhead and assume jobs can use every last byte "
668
+ "of memory and disk on a node when autoscaling.",
669
+ )
479
670
 
480
671
  # Parameters to limit service jobs / detect service deadlocks
481
672
  service_options = parser.add_argument_group(
482
673
  title="Toil options for limiting the number of service jobs and detecting service deadlocks",
483
- description="Allows the specification of the maximum number of service jobs in a cluster. By keeping "
484
- "this limited we can avoid nodes occupied with services causing deadlocks."
485
- )
486
- service_options.add_argument("--maxServiceJobs", dest="maxServiceJobs", default=SYS_MAX_SIZE, type=int,
487
- metavar="INT",
488
- help=SUPPRESS if cwl else f"The maximum number of service jobs that can be run "
489
- f"concurrently, excluding service jobs running on "
490
- f"preemptible nodes. default=%(default)s")
491
- service_options.add_argument("--maxPreemptibleServiceJobs", dest="maxPreemptibleServiceJobs",
492
- default=SYS_MAX_SIZE,
493
- type=int, metavar="INT",
494
- help=SUPPRESS if cwl else "The maximum number of service jobs that can run "
495
- "concurrently on preemptible nodes. default=%(default)s")
496
- service_options.add_argument("--deadlockWait", dest="deadlockWait", default=60, type=int, metavar="INT",
497
- help=SUPPRESS if cwl else f"Time, in seconds, to tolerate the workflow running only "
498
- f"the same service jobs, with no jobs to use them, "
499
- f"before declaring the workflow to be deadlocked and "
500
- f"stopping. default=%(default)s")
501
- service_options.add_argument("--deadlockCheckInterval", dest="deadlockCheckInterval", default=30, type=int,
502
- metavar="INT",
503
- help=SUPPRESS if cwl else "Time, in seconds, to wait between checks to see if the "
504
- "workflow is stuck running only service jobs, with no jobs "
505
- "to use them. Should be shorter than --deadlockWait. May "
506
- "need to be increased if the batch system cannot enumerate "
507
- "running jobs quickly enough, or if polling for running "
508
- "jobs is placing an unacceptable load on a shared cluster."
509
- f"default=%(default)s")
674
+ description=(
675
+ SUPPRESS
676
+ if cwl
677
+ else "Allows the specification of the maximum number of service jobs in a cluster. "
678
+ "By keeping this limited we can avoid nodes occupied with services causing "
679
+ "deadlocks."
680
+ ),
681
+ )
682
+ service_options.add_argument(
683
+ "--maxServiceJobs",
684
+ dest="maxServiceJobs",
685
+ default=SYS_MAX_SIZE,
686
+ type=int,
687
+ metavar="INT",
688
+ help=(
689
+ SUPPRESS
690
+ if cwl
691
+ else f"The maximum number of service jobs that can be run "
692
+ f"concurrently, excluding service jobs running on "
693
+ f"preemptible nodes. default=%(default)s"
694
+ ),
695
+ )
696
+ service_options.add_argument(
697
+ "--maxPreemptibleServiceJobs",
698
+ dest="maxPreemptibleServiceJobs",
699
+ default=SYS_MAX_SIZE,
700
+ type=int,
701
+ metavar="INT",
702
+ help=(
703
+ SUPPRESS
704
+ if cwl
705
+ else "The maximum number of service jobs that can run "
706
+ "concurrently on preemptible nodes. default=%(default)s"
707
+ ),
708
+ )
709
+ service_options.add_argument(
710
+ "--deadlockWait",
711
+ dest="deadlockWait",
712
+ default=60,
713
+ type=int,
714
+ metavar="INT",
715
+ help=(
716
+ SUPPRESS
717
+ if cwl
718
+ else f"Time, in seconds, to tolerate the workflow running only "
719
+ f"the same service jobs, with no jobs to use them, "
720
+ f"before declaring the workflow to be deadlocked and "
721
+ f"stopping. default=%(default)s"
722
+ ),
723
+ )
724
+ service_options.add_argument(
725
+ "--deadlockCheckInterval",
726
+ dest="deadlockCheckInterval",
727
+ default=30,
728
+ type=int,
729
+ metavar="INT",
730
+ help=(
731
+ SUPPRESS
732
+ if cwl
733
+ else "Time, in seconds, to wait between checks to see if the "
734
+ "workflow is stuck running only service jobs, with no jobs "
735
+ "to use them. Should be shorter than --deadlockWait. May "
736
+ "need to be increased if the batch system cannot enumerate "
737
+ "running jobs quickly enough, or if polling for running "
738
+ "jobs is placing an unacceptable load on a shared cluster."
739
+ f"default=%(default)s"
740
+ ),
741
+ )
510
742
 
511
743
  # Resource requirements
512
744
  resource_options = parser.add_argument_group(
513
- title="Toil options for cores/memory requirements.",
745
+ title="Toil options for cores/memory requirements",
514
746
  description="The options to specify default cores/memory requirements (if not specified by the jobs "
515
- "themselves), and to limit the total amount of memory/cores requested from the batch system."
516
- )
517
- resource_help_msg = ('The {} amount of {} to request for a job. '
518
- 'Only applicable to jobs that do not specify an explicit value for this requirement. '
519
- '{}. '
520
- 'Default is {}.')
521
- cpu_note = 'Fractions of a core (for example 0.1) are supported on some batch systems [mesos, single_machine]'
522
- disk_mem_note = 'Standard suffixes like K, Ki, M, Mi, G or Gi are supported'
747
+ "themselves), and to limit the total amount of memory/cores requested from the batch system.",
748
+ )
749
+ resource_help_msg = (
750
+ "The {} amount of {} to request for a job. "
751
+ "Only applicable to jobs that do not specify an explicit value for this requirement. "
752
+ "{}. "
753
+ "Default is {}."
754
+ )
755
+ cpu_note = "Fractions of a core (for example 0.1) are supported on some batch systems [mesos, single_machine]"
756
+ disk_mem_note = "Standard suffixes like K, Ki, M, Mi, G or Gi are supported"
523
757
  accelerators_note = (
524
- 'Each accelerator specification can have a type (gpu [default], nvidia, amd, cuda, rocm, opencl, '
525
- 'or a specific model like nvidia-tesla-k80), and a count [default: 1]. If both a type and a count '
526
- 'are used, they must be separated by a colon. If multiple types of accelerators are '
527
- 'used, the specifications are separated by commas')
758
+ "Each accelerator specification can have a type (gpu [default], nvidia, amd, cuda, rocm, opencl, "
759
+ "or a specific model like nvidia-tesla-k80), and a count [default: 1]. If both a type and a count "
760
+ "are used, they must be separated by a colon. If multiple types of accelerators are "
761
+ "used, the specifications are separated by commas"
762
+ )
528
763
 
529
764
  h2b = lambda x: human2bytes(str(x))
530
765
 
531
- resource_options.add_argument('--defaultMemory', dest='defaultMemory', default="2.0 Gi", type=h2b,
532
- action=make_open_interval_action(1),
533
- help=resource_help_msg.format('default', 'memory', disk_mem_note,
534
- bytes2human(2147483648)))
535
- resource_options.add_argument('--defaultCores', dest='defaultCores', default=1, metavar='FLOAT', type=float,
536
- action=make_open_interval_action(1.0),
537
- help=resource_help_msg.format('default', 'cpu', cpu_note, str(1)))
538
- resource_options.add_argument('--defaultDisk', dest='defaultDisk', default="2.0 Gi", metavar='INT', type=h2b,
539
- action=make_open_interval_action(1),
540
- help=resource_help_msg.format('default', 'disk', disk_mem_note,
541
- bytes2human(2147483648)))
542
- resource_options.add_argument('--defaultAccelerators', dest='defaultAccelerators', default=[],
543
- metavar='ACCELERATOR[,ACCELERATOR...]', type=parse_accelerator_list, action="extend",
544
- help=resource_help_msg.format('default', 'accelerators', accelerators_note, []))
545
- resource_options.add_argument('--defaultPreemptible', '--defaultPreemptable', dest='defaultPreemptible',
546
- metavar='BOOL',
547
- type=strtobool, nargs='?', const=True, default=False,
548
- help='Make all jobs able to run on preemptible (spot) nodes by default.')
549
- resource_options.add_argument('--maxCores', dest='maxCores', default=SYS_MAX_SIZE, metavar='INT', type=int,
550
- action=make_open_interval_action(1),
551
- help=resource_help_msg.format('max', 'cpu', cpu_note, str(SYS_MAX_SIZE)))
552
- resource_options.add_argument('--maxMemory', dest='maxMemory', default=SYS_MAX_SIZE, metavar='INT', type=h2b,
553
- action=make_open_interval_action(1),
554
- help=resource_help_msg.format('max', 'memory', disk_mem_note,
555
- bytes2human(SYS_MAX_SIZE)))
556
- resource_options.add_argument('--maxDisk', dest='maxDisk', default=SYS_MAX_SIZE, metavar='INT', type=h2b,
557
- action=make_open_interval_action(1),
558
- help=resource_help_msg.format('max', 'disk', disk_mem_note,
559
- bytes2human(SYS_MAX_SIZE)))
766
+ resource_options.add_argument(
767
+ "--defaultMemory",
768
+ dest="defaultMemory",
769
+ default="2.0 Gi",
770
+ type=h2b,
771
+ action=make_open_interval_action(1),
772
+ help=resource_help_msg.format(
773
+ "default", "memory", disk_mem_note, bytes2human(2147483648)
774
+ ),
775
+ )
776
+ resource_options.add_argument(
777
+ "--defaultCores",
778
+ dest="defaultCores",
779
+ default=1,
780
+ metavar="FLOAT",
781
+ type=float,
782
+ action=make_open_interval_action(1.0),
783
+ help=resource_help_msg.format("default", "cpu", cpu_note, str(1)),
784
+ )
785
+ resource_options.add_argument(
786
+ "--defaultDisk",
787
+ dest="defaultDisk",
788
+ default="2.0 Gi",
789
+ metavar="INT",
790
+ type=h2b,
791
+ action=make_open_interval_action(1),
792
+ help=resource_help_msg.format(
793
+ "default", "disk", disk_mem_note, bytes2human(2147483648)
794
+ ),
795
+ )
796
+ resource_options.add_argument(
797
+ "--defaultAccelerators",
798
+ dest="defaultAccelerators",
799
+ default=[],
800
+ metavar="ACCELERATOR[,ACCELERATOR...]",
801
+ type=parse_accelerator_list,
802
+ action="extend",
803
+ help=resource_help_msg.format("default", "accelerators", accelerators_note, []),
804
+ )
805
+ resource_options.add_argument(
806
+ "--defaultPreemptible",
807
+ "--defaultPreemptable",
808
+ dest="defaultPreemptible",
809
+ metavar="BOOL",
810
+ type=strtobool,
811
+ nargs="?",
812
+ const=True,
813
+ default=False,
814
+ help="Make all jobs able to run on preemptible (spot) nodes by default.",
815
+ )
816
+ resource_options.add_argument(
817
+ "--maxCores",
818
+ dest="maxCores",
819
+ default=SYS_MAX_SIZE,
820
+ metavar="INT",
821
+ type=int,
822
+ action=make_open_interval_action(1),
823
+ help=resource_help_msg.format("max", "cpu", cpu_note, str(SYS_MAX_SIZE)),
824
+ )
825
+ resource_options.add_argument(
826
+ "--maxMemory",
827
+ dest="maxMemory",
828
+ default=SYS_MAX_SIZE,
829
+ metavar="INT",
830
+ type=h2b,
831
+ action=make_open_interval_action(1),
832
+ help=resource_help_msg.format(
833
+ "max", "memory", disk_mem_note, bytes2human(SYS_MAX_SIZE)
834
+ ),
835
+ )
836
+ resource_options.add_argument(
837
+ "--maxDisk",
838
+ dest="maxDisk",
839
+ default=SYS_MAX_SIZE,
840
+ metavar="INT",
841
+ type=h2b,
842
+ action=make_open_interval_action(1),
843
+ help=resource_help_msg.format(
844
+ "max", "disk", disk_mem_note, bytes2human(SYS_MAX_SIZE)
845
+ ),
846
+ )
560
847
 
561
848
  # Retrying/rescuing jobs
562
849
  job_options = parser.add_argument_group(
563
- title="Toil options for rescuing/killing/restarting jobs.",
564
- description="The options for jobs that either run too long/fail or get lost (some batch systems have issues!)."
565
- )
566
- job_options.add_argument("--retryCount", dest="retryCount", default=1, type=int,
567
- action=make_open_interval_action(0), metavar="INT",
568
- help=f"Number of times to retry a failing job before giving up and "
569
- f"labeling job failed. default={1}")
570
- job_options.add_argument("--enableUnlimitedPreemptibleRetries", "--enableUnlimitedPreemptableRetries",
571
- dest="enableUnlimitedPreemptibleRetries",
572
- type=strtobool, default=False, metavar="BOOL",
573
- help="If set, preemptible failures (or any failure due to an instance getting "
574
- "unexpectedly terminated) will not count towards job failures and --retryCount.")
575
- job_options.add_argument("--doubleMem", dest="doubleMem", type=strtobool, default=False, metavar="BOOL",
576
- help="If set, batch jobs which die to reaching memory limit on batch schedulers "
577
- "will have their memory doubled and they will be retried. The remaining "
578
- "retry count will be reduced by 1. Currently supported by LSF.")
579
- job_options.add_argument("--maxJobDuration", dest="maxJobDuration", default=SYS_MAX_SIZE, type=int,
580
- action=make_open_interval_action(1), metavar="INT",
581
- help=f"Maximum runtime of a job (in seconds) before we kill it (this is a lower bound, "
582
- f"and the actual time before killing the job may be longer). "
583
- f"default=%(default)s")
584
- job_options.add_argument("--rescueJobsFrequency", dest="rescueJobsFrequency", default=60, type=int,
585
- action=make_open_interval_action(1), metavar="INT",
586
- help=f"Period of time to wait (in seconds) between checking for missing/overlong jobs, "
587
- f"that is jobs which get lost by the batch system. Expert parameter. "
588
- f"default=%(default)s")
589
- job_options.add_argument("--jobStoreTimeout", dest="job_store_timeout", default=30, type=float,
590
- action=make_open_interval_action(0), metavar="FLOAT",
591
- help=f"Maximum time (in seconds) to wait for a job's update to the job store "
592
- f"before declaring it failed. default=%(default)s")
593
-
850
+ title="Toil options for rescuing/killing/restarting jobs",
851
+ description="The options for jobs that either run too long/fail or get lost (some batch systems have issues!).",
852
+ )
853
+ job_options.add_argument(
854
+ "--retryCount",
855
+ dest="retryCount",
856
+ default=1,
857
+ type=int,
858
+ action=make_open_interval_action(0),
859
+ metavar="INT",
860
+ help=f"Number of times to retry a failing job before giving up and "
861
+ f"labeling job failed. default={1}",
862
+ )
863
+ job_options.add_argument(
864
+ "--enableUnlimitedPreemptibleRetries",
865
+ "--enableUnlimitedPreemptableRetries",
866
+ dest="enableUnlimitedPreemptibleRetries",
867
+ type=strtobool,
868
+ default=False,
869
+ metavar="BOOL",
870
+ help="If set, preemptible failures (or any failure due to an instance getting "
871
+ "unexpectedly terminated) will not count towards job failures and --retryCount.",
872
+ )
873
+ job_options.add_argument(
874
+ "--doubleMem",
875
+ dest="doubleMem",
876
+ type=strtobool,
877
+ default=False,
878
+ metavar="BOOL",
879
+ help="If set, batch jobs which die to reaching memory limit on batch schedulers "
880
+ "will have their memory doubled and they will be retried. The remaining "
881
+ "retry count will be reduced by 1. Currently supported by LSF.",
882
+ )
883
+ job_options.add_argument(
884
+ "--maxJobDuration",
885
+ dest="maxJobDuration",
886
+ default=SYS_MAX_SIZE,
887
+ type=int,
888
+ action=make_open_interval_action(1),
889
+ metavar="INT",
890
+ help=f"Maximum runtime of a job (in seconds) before we kill it (this is a lower bound, "
891
+ f"and the actual time before killing the job may be longer). "
892
+ f"default=%(default)s",
893
+ )
894
+ job_options.add_argument(
895
+ "--rescueJobsFrequency",
896
+ dest="rescueJobsFrequency",
897
+ default=60,
898
+ type=int,
899
+ action=make_open_interval_action(1),
900
+ metavar="INT",
901
+ help=f"Period of time to wait (in seconds) between checking for missing/overlong jobs, "
902
+ f"that is jobs which get lost by the batch system. Expert parameter. "
903
+ f"default=%(default)s",
904
+ )
905
+ job_options.add_argument(
906
+ "--jobStoreTimeout",
907
+ dest="job_store_timeout",
908
+ default=30,
909
+ type=float,
910
+ action=make_open_interval_action(0),
911
+ metavar="FLOAT",
912
+ help=f"Maximum time (in seconds) to wait for a job's update to the job store "
913
+ f"before declaring it failed. default=%(default)s",
914
+ )
594
915
 
595
916
  # Log management options
596
917
  log_options = parser.add_argument_group(
597
- title="Toil log management options.",
598
- description="Options for how Toil should manage its logs."
599
- )
600
- log_options.add_argument("--maxLogFileSize", dest="maxLogFileSize", default=100 * 1024 * 1024, type=h2b,
601
- action=make_open_interval_action(1),
602
- help=f"The maximum size of a job log file to keep (in bytes), log files larger than "
603
- f"this will be truncated to the last X bytes. Setting this option to zero will "
604
- f"prevent any truncation. Setting this option to a negative value will truncate "
605
- f"from the beginning. Default={bytes2human(100 * 1024 * 1024)}")
606
- log_options.add_argument("--writeLogs", dest="writeLogs", nargs='?', action='store', default=None,
607
- const=os.getcwd(), metavar="OPT_PATH",
608
- help="Write worker logs received by the leader into their own files at the specified "
609
- "path. Any non-empty standard output and error from failed batch system jobs will "
610
- "also be written into files at this path. The current working directory will be "
611
- "used if a path is not specified explicitly. Note: By default only the logs of "
612
- "failed jobs are returned to leader. Set log level to 'debug' or enable "
613
- "'--writeLogsFromAllJobs' to get logs back from successful jobs, and adjust "
614
- "'maxLogFileSize' to control the truncation limit for worker logs.")
615
- log_options.add_argument("--writeLogsGzip", dest="writeLogsGzip", nargs='?', action='store', default=None,
616
- const=os.getcwd(), metavar="OPT_PATH",
617
- help="Identical to --writeLogs except the logs files are gzipped on the leader.")
618
- log_options.add_argument("--writeLogsFromAllJobs", dest="writeLogsFromAllJobs", type=strtobool,
619
- default=False, metavar="BOOL",
620
- help="Whether to write logs from all jobs (including the successful ones) without "
621
- "necessarily setting the log level to 'debug'. Ensure that either --writeLogs "
622
- "or --writeLogsGzip is set if enabling this option.")
623
- log_options.add_argument("--writeMessages", dest="write_messages", default=None,
624
- type=lambda x: None if x is None else os.path.abspath(x), metavar="PATH",
625
- help="File to send messages from the leader's message bus to.")
626
- log_options.add_argument("--realTimeLogging", dest="realTimeLogging", type=strtobool, default=False,
627
- help="Enable real-time logging from workers to leader")
918
+ title="Toil log management options",
919
+ description="Options for how Toil should manage its logs.",
920
+ )
921
+ log_options.add_argument(
922
+ "--maxLogFileSize",
923
+ dest="maxLogFileSize",
924
+ default=100 * 1024 * 1024,
925
+ type=h2b,
926
+ action=make_open_interval_action(1),
927
+ help=f"The maximum size of a job log file to keep (in bytes), log files larger than "
928
+ f"this will be truncated to the last X bytes. Setting this option to zero will "
929
+ f"prevent any truncation. Setting this option to a negative value will truncate "
930
+ f"from the beginning. Default={bytes2human(100 * 1024 * 1024)}",
931
+ )
932
+ log_options.add_argument(
933
+ "--writeLogs",
934
+ dest="writeLogs",
935
+ nargs="?",
936
+ action="store",
937
+ default=None,
938
+ const=os.getcwd(),
939
+ metavar="OPT_PATH",
940
+ help="Write worker logs received by the leader into their own files at the specified "
941
+ "path. Any non-empty standard output and error from failed batch system jobs will "
942
+ "also be written into files at this path. The current working directory will be "
943
+ "used if a path is not specified explicitly. Note: By default only the logs of "
944
+ "failed jobs are returned to leader. Set log level to 'debug' or enable "
945
+ "'--writeLogsFromAllJobs' to get logs back from successful jobs, and adjust "
946
+ "'maxLogFileSize' to control the truncation limit for worker logs.",
947
+ )
948
+ log_options.add_argument(
949
+ "--writeLogsGzip",
950
+ dest="writeLogsGzip",
951
+ nargs="?",
952
+ action="store",
953
+ default=None,
954
+ const=os.getcwd(),
955
+ metavar="OPT_PATH",
956
+ help="Identical to --writeLogs except the logs files are gzipped on the leader.",
957
+ )
958
+ log_options.add_argument(
959
+ "--writeLogsFromAllJobs",
960
+ dest="writeLogsFromAllJobs",
961
+ type=strtobool,
962
+ default=False,
963
+ metavar="BOOL",
964
+ help="Whether to write logs from all jobs (including the successful ones) without "
965
+ "necessarily setting the log level to 'debug'. Ensure that either --writeLogs "
966
+ "or --writeLogsGzip is set if enabling this option.",
967
+ )
968
+ log_options.add_argument(
969
+ "--writeMessages",
970
+ dest="write_messages",
971
+ default=None,
972
+ type=lambda x: None if x is None else os.path.abspath(x),
973
+ metavar="PATH",
974
+ help="File to send messages from the leader's message bus to.",
975
+ )
976
+ log_options.add_argument(
977
+ "--realTimeLogging",
978
+ dest="realTimeLogging",
979
+ type=strtobool,
980
+ default=False,
981
+ metavar="BOOL",
982
+ help="Enable real-time logging from workers to leader",
983
+ )
628
984
 
629
985
  # Misc options
630
986
  misc_options = parser.add_argument_group(
631
- title="Toil miscellaneous options.",
632
- description="Everything else."
633
- )
634
- misc_options.add_argument('--disableChaining', dest='disableChaining', type=strtobool, default=False,
635
- metavar="BOOL",
636
- help="Disables chaining of jobs (chaining uses one job's resource allocation "
637
- "for its successor job if possible).")
638
- misc_options.add_argument("--disableJobStoreChecksumVerification", dest="disableJobStoreChecksumVerification",
639
- default=False, type=strtobool, metavar="BOOL",
640
- help="Disables checksum verification for files transferred to/from the job store. "
641
- "Checksum verification is a safety check to ensure the data is not corrupted "
642
- "during transfer. Currently only supported for non-streaming AWS files.")
987
+ title="Toil miscellaneous options", description="Everything else."
988
+ )
989
+ misc_options.add_argument(
990
+ "--disableChaining",
991
+ dest="disableChaining",
992
+ type=strtobool,
993
+ default=False,
994
+ metavar="BOOL",
995
+ help="Disables chaining of jobs (chaining uses one job's resource allocation "
996
+ "for its successor job if possible).",
997
+ )
998
+ misc_options.add_argument(
999
+ "--disableJobStoreChecksumVerification",
1000
+ dest="disableJobStoreChecksumVerification",
1001
+ default=False,
1002
+ type=strtobool,
1003
+ metavar="BOOL",
1004
+ help="Disables checksum verification for files transferred to/from the job store. "
1005
+ "Checksum verification is a safety check to ensure the data is not corrupted "
1006
+ "during transfer. Currently only supported for non-streaming AWS files.",
1007
+ )
643
1008
 
644
1009
  class SSEKeyAction(Action):
645
- def __call__(self, parser: Any, namespace: Any, values: Any, option_string: Any = None) -> None:
1010
+ def __call__(
1011
+ self, parser: Any, namespace: Any, values: Any, option_string: Any = None
1012
+ ) -> None:
646
1013
  if values is not None:
647
1014
  sse_key = values
648
1015
  if sse_key is None:
649
1016
  return
650
1017
  with open(sse_key) as f:
651
- assert len(f.readline().rstrip()) == 32, 'SSE key appears to be invalid.'
1018
+ assert (
1019
+ len(f.readline().rstrip()) == 32
1020
+ ), "SSE key appears to be invalid."
652
1021
  setattr(namespace, self.dest, values)
653
1022
 
654
- misc_options.add_argument("--sseKey", dest="sseKey", default=None, action=SSEKeyAction, metavar="PATH",
655
- help="Path to file containing 32 character key to be used for server-side encryption on "
656
- "awsJobStore or googleJobStore. SSE will not be used if this flag is not passed.")
1023
+ misc_options.add_argument(
1024
+ "--sseKey",
1025
+ dest="sseKey",
1026
+ default=None,
1027
+ action=SSEKeyAction,
1028
+ metavar="PATH",
1029
+ help="Path to file containing 32 character key to be used for server-side encryption on "
1030
+ "awsJobStore or googleJobStore. SSE will not be used if this flag is not passed.",
1031
+ )
657
1032
 
658
1033
  # yaml.safe_load is being deprecated, this is the suggested workaround
659
1034
  def yaml_safe_load(stream: Any) -> Any:
660
- yaml = YAML(typ='safe', pure=True)
1035
+ yaml = YAML(typ="safe", pure=True)
661
1036
  d = yaml.load(stream)
662
1037
  if isinstance(d, dict):
663
1038
  # this means the argument was a dictionary and is valid yaml (for configargparse)
@@ -671,69 +1046,142 @@ def add_base_toil_options(parser: ArgumentParser, jobstore_as_flag: bool = False
671
1046
  Argparse action class to implement the action="extend" functionality on dictionaries
672
1047
  """
673
1048
 
674
- def __call__(self, parser: Any, namespace: Any, values: Any, option_string: Any = None) -> None:
1049
+ def __call__(
1050
+ self, parser: Any, namespace: Any, values: Any, option_string: Any = None
1051
+ ) -> None:
675
1052
  items = getattr(namespace, self.dest, None)
676
- assert items is not None # for mypy. This should never be None, esp. if called in setEnv
1053
+ assert (
1054
+ items is not None
1055
+ ) # for mypy. This should never be None, esp. if called in setEnv
677
1056
  # note: this will overwrite existing entries
678
1057
  items.update(values)
679
1058
 
680
- misc_options.add_argument("--setEnv", '-e', metavar='NAME=VALUE or NAME', dest="environment",
681
- default={}, type=yaml_safe_load, action=ExtendActionDict,
682
- help="Set an environment variable early on in the worker. If VALUE is null, it will "
683
- "be looked up in the current environment. Independently of this option, the worker "
684
- "will try to emulate the leader's environment before running a job, except for "
685
- "some variables known to vary across systems. Using this option, a variable can "
686
- "be injected into the worker process itself before it is started.")
687
- misc_options.add_argument("--servicePollingInterval", dest="servicePollingInterval", default=60.0, type=float,
688
- action=make_open_interval_action(0.0), metavar="FLOAT",
689
- help=f"Interval of time service jobs wait between polling for the existence of the "
690
- f"keep-alive flag. Default: {60.0}")
691
- misc_options.add_argument('--forceDockerAppliance', dest='forceDockerAppliance', type=strtobool, default=False,
692
- metavar="BOOL",
693
- help='Disables sanity checking the existence of the docker image specified by '
694
- 'TOIL_APPLIANCE_SELF, which Toil uses to provision mesos for autoscaling.')
695
- misc_options.add_argument('--statusWait', dest='statusWait', type=int, default=3600, metavar="INT",
696
- help="Seconds to wait between reports of running jobs.")
697
- misc_options.add_argument('--disableProgress', dest='disableProgress', action="store_true", default=False,
698
- help="Disables the progress bar shown when standard error is a terminal.")
1059
+ misc_options.add_argument(
1060
+ "--setEnv",
1061
+ "-e",
1062
+ metavar="NAME=VALUE or NAME",
1063
+ dest="environment",
1064
+ default={},
1065
+ type=yaml_safe_load,
1066
+ action=ExtendActionDict,
1067
+ help="Set an environment variable early on in the worker. If VALUE is null, it will "
1068
+ "be looked up in the current environment. Independently of this option, the worker "
1069
+ "will try to emulate the leader's environment before running a job, except for "
1070
+ "some variables known to vary across systems. Using this option, a variable can "
1071
+ "be injected into the worker process itself before it is started.",
1072
+ )
1073
+ misc_options.add_argument(
1074
+ "--servicePollingInterval",
1075
+ dest="servicePollingInterval",
1076
+ default=60.0,
1077
+ type=float,
1078
+ action=make_open_interval_action(0.0),
1079
+ metavar="FLOAT",
1080
+ help=f"Interval of time service jobs wait between polling for the existence of the "
1081
+ f"keep-alive flag. Default: {60.0}",
1082
+ )
1083
+ misc_options.add_argument(
1084
+ "--forceDockerAppliance",
1085
+ dest="forceDockerAppliance",
1086
+ type=strtobool,
1087
+ default=False,
1088
+ metavar="BOOL",
1089
+ help="Disables sanity checking the existence of the docker image specified by "
1090
+ "TOIL_APPLIANCE_SELF, which Toil uses to provision mesos for autoscaling.",
1091
+ )
1092
+ misc_options.add_argument(
1093
+ "--statusWait",
1094
+ dest="statusWait",
1095
+ type=int,
1096
+ default=3600,
1097
+ metavar="INT",
1098
+ help="Seconds to wait between reports of running jobs.",
1099
+ )
1100
+ misc_options.add_argument(
1101
+ "--disableProgress",
1102
+ dest="disableProgress",
1103
+ action="store_true",
1104
+ default=False,
1105
+ help="Disables the progress bar shown when standard error is a terminal.",
1106
+ )
1107
+ misc_options.add_argument(
1108
+ "--publishWorkflowMetrics",
1109
+ dest="publish_workflow_metrics",
1110
+ choices=["all", "current", "no"],
1111
+ default=None,
1112
+ help="Whether to publish workflow metrics reports (including unique workflow "
1113
+ "and task run IDs, job names, and version and Toil feature use information) to "
1114
+ "Dockstore when a workflow completes. Selecting \"current\" will publish metrics "
1115
+ "for the current workflow. Selecting \"all\" will also publish prior workflow "
1116
+ "runs from the Toil history database, even if they themselves were run with \"no\". "
1117
+ "Note that once published, workflow metrics CANNOT be deleted or un-published; they "
1118
+ "will stay published forever!"
1119
+ )
699
1120
 
700
1121
  # Debug options
701
1122
  debug_options = parser.add_argument_group(
702
- title="Toil debug options.",
703
- description="Debug options for finding problems or helping with testing."
704
- )
705
- debug_options.add_argument("--debugWorker", dest="debugWorker", default=False, action="store_true",
706
- help="Experimental no forking mode for local debugging. Specifically, workers "
707
- "are not forked and stderr/stdout are not redirected to the log.")
708
- debug_options.add_argument("--disableWorkerOutputCapture", dest="disableWorkerOutputCapture", default=False,
709
- action="store_true",
710
- help="Let worker output go to worker's standard out/error instead of per-job logs.")
711
- debug_options.add_argument("--badWorker", dest="badWorker", default=0.0, type=float,
712
- action=make_closed_interval_action(0.0, 1.0), metavar="FLOAT",
713
- help=f"For testing purposes randomly kill --badWorker proportion of jobs using "
714
- f"SIGKILL. default={0.0}")
715
- debug_options.add_argument("--badWorkerFailInterval", dest="badWorkerFailInterval", default=0.01, type=float,
716
- action=make_open_interval_action(0.0), metavar="FLOAT", # might be cyclical?
717
- help=f"When killing the job pick uniformly within the interval from 0.0 to "
718
- f"--badWorkerFailInterval seconds after the worker starts. "
719
- f"default={0.01}")
1123
+ title="Toil debug options",
1124
+ description="Debug options for finding problems or helping with testing.",
1125
+ )
1126
+ debug_options.add_argument(
1127
+ "--debugWorker",
1128
+ dest="debugWorker",
1129
+ default=False,
1130
+ action="store_true",
1131
+ help="Experimental no forking mode for local debugging. Specifically, workers "
1132
+ "are not forked and stderr/stdout are not redirected to the log.",
1133
+ )
1134
+ debug_options.add_argument(
1135
+ "--disableWorkerOutputCapture",
1136
+ dest="disableWorkerOutputCapture",
1137
+ default=False,
1138
+ action="store_true",
1139
+ help="Let worker output go to worker's standard out/error instead of per-job logs.",
1140
+ )
1141
+ debug_options.add_argument(
1142
+ "--badWorker",
1143
+ dest="badWorker",
1144
+ default=0.0,
1145
+ type=float,
1146
+ action=make_closed_interval_action(0.0, 1.0),
1147
+ metavar="FLOAT",
1148
+ help=f"For testing purposes randomly kill --badWorker proportion of jobs using "
1149
+ f"SIGKILL. default={0.0}",
1150
+ )
1151
+ debug_options.add_argument(
1152
+ "--badWorkerFailInterval",
1153
+ dest="badWorkerFailInterval",
1154
+ default=0.01,
1155
+ type=float,
1156
+ action=make_open_interval_action(0.0),
1157
+ metavar="FLOAT", # might be cyclical?
1158
+ help=f"When killing the job pick uniformly within the interval from 0.0 to "
1159
+ f"--badWorkerFailInterval seconds after the worker starts. "
1160
+ f"default={0.01}",
1161
+ )
720
1162
 
721
1163
  # All deprecated options:
722
1164
 
723
1165
  # These are deprecated in favor of a simpler option
724
1166
  # ex: noLinkImports and linkImports can be simplified into a single link_imports argument
725
- link_imports.add_argument("--noLinkImports", dest="linkImports", action="store_false",
726
- help=SUPPRESS)
727
- link_imports.add_argument("--linkImports", dest="linkImports", action="store_true",
728
- help=SUPPRESS)
1167
+ link_imports.add_argument(
1168
+ "--noLinkImports", dest="linkImports", action="store_false", help=SUPPRESS
1169
+ )
1170
+ link_imports.add_argument(
1171
+ "--linkImports", dest="linkImports", action="store_true", help=SUPPRESS
1172
+ )
729
1173
  link_imports.set_defaults(linkImports=None)
730
1174
 
731
- move_exports.add_argument("--moveExports", dest="moveExports", action="store_true",
732
- help=SUPPRESS)
733
- move_exports.add_argument("--noMoveExports", dest="moveExports", action="store_false",
734
- help=SUPPRESS)
1175
+ move_exports.add_argument(
1176
+ "--moveExports", dest="moveExports", action="store_true", help=SUPPRESS
1177
+ )
1178
+ move_exports.add_argument(
1179
+ "--noMoveExports", dest="moveExports", action="store_false", help=SUPPRESS
1180
+ )
735
1181
  link_imports.set_defaults(moveExports=None)
736
1182
 
737
1183
  # dest is set to enableCaching to not conflict with the current --caching destination
738
- caching.add_argument('--disableCaching', dest='enableCaching', action='store_false', help=SUPPRESS)
1184
+ caching.add_argument(
1185
+ "--disableCaching", dest="enableCaching", action="store_false", help=SUPPRESS
1186
+ )
739
1187
  caching.set_defaults(enableCaching=None)