toil 5.12.0__py3-none-any.whl → 6.1.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (157) hide show
  1. toil/__init__.py +18 -13
  2. toil/batchSystems/abstractBatchSystem.py +21 -10
  3. toil/batchSystems/abstractGridEngineBatchSystem.py +2 -2
  4. toil/batchSystems/awsBatch.py +14 -14
  5. toil/batchSystems/contained_executor.py +3 -3
  6. toil/batchSystems/htcondor.py +0 -1
  7. toil/batchSystems/kubernetes.py +34 -31
  8. toil/batchSystems/local_support.py +3 -1
  9. toil/batchSystems/mesos/batchSystem.py +7 -7
  10. toil/batchSystems/options.py +32 -83
  11. toil/batchSystems/registry.py +104 -23
  12. toil/batchSystems/singleMachine.py +16 -13
  13. toil/batchSystems/slurm.py +3 -3
  14. toil/batchSystems/torque.py +0 -1
  15. toil/bus.py +6 -8
  16. toil/common.py +532 -743
  17. toil/cwl/__init__.py +28 -32
  18. toil/cwl/cwltoil.py +523 -520
  19. toil/cwl/utils.py +55 -10
  20. toil/fileStores/__init__.py +2 -2
  21. toil/fileStores/abstractFileStore.py +36 -11
  22. toil/fileStores/cachingFileStore.py +607 -530
  23. toil/fileStores/nonCachingFileStore.py +43 -10
  24. toil/job.py +140 -75
  25. toil/jobStores/abstractJobStore.py +147 -79
  26. toil/jobStores/aws/jobStore.py +23 -9
  27. toil/jobStores/aws/utils.py +1 -2
  28. toil/jobStores/fileJobStore.py +117 -19
  29. toil/jobStores/googleJobStore.py +16 -7
  30. toil/jobStores/utils.py +5 -6
  31. toil/leader.py +71 -43
  32. toil/lib/accelerators.py +10 -5
  33. toil/lib/aws/__init__.py +3 -14
  34. toil/lib/aws/ami.py +22 -9
  35. toil/lib/aws/iam.py +21 -13
  36. toil/lib/aws/session.py +2 -16
  37. toil/lib/aws/utils.py +4 -5
  38. toil/lib/compatibility.py +1 -1
  39. toil/lib/conversions.py +7 -3
  40. toil/lib/docker.py +22 -23
  41. toil/lib/ec2.py +10 -6
  42. toil/lib/ec2nodes.py +106 -100
  43. toil/lib/encryption/_nacl.py +2 -1
  44. toil/lib/generatedEC2Lists.py +325 -18
  45. toil/lib/io.py +21 -0
  46. toil/lib/misc.py +1 -1
  47. toil/lib/resources.py +1 -1
  48. toil/lib/threading.py +74 -26
  49. toil/options/common.py +738 -0
  50. toil/options/cwl.py +336 -0
  51. toil/options/wdl.py +32 -0
  52. toil/provisioners/abstractProvisioner.py +1 -4
  53. toil/provisioners/aws/__init__.py +3 -6
  54. toil/provisioners/aws/awsProvisioner.py +6 -0
  55. toil/provisioners/clusterScaler.py +3 -2
  56. toil/provisioners/gceProvisioner.py +2 -2
  57. toil/realtimeLogger.py +2 -1
  58. toil/resource.py +24 -18
  59. toil/server/app.py +2 -3
  60. toil/server/cli/wes_cwl_runner.py +4 -4
  61. toil/server/utils.py +1 -1
  62. toil/server/wes/abstract_backend.py +3 -2
  63. toil/server/wes/amazon_wes_utils.py +5 -4
  64. toil/server/wes/tasks.py +2 -3
  65. toil/server/wes/toil_backend.py +2 -10
  66. toil/server/wsgi_app.py +2 -0
  67. toil/serviceManager.py +12 -10
  68. toil/statsAndLogging.py +5 -1
  69. toil/test/__init__.py +29 -54
  70. toil/test/batchSystems/batchSystemTest.py +11 -111
  71. toil/test/batchSystems/test_slurm.py +3 -2
  72. toil/test/cwl/cwlTest.py +213 -90
  73. toil/test/cwl/glob_dir.cwl +15 -0
  74. toil/test/cwl/preemptible.cwl +21 -0
  75. toil/test/cwl/preemptible_expression.cwl +28 -0
  76. toil/test/cwl/revsort.cwl +1 -1
  77. toil/test/cwl/revsort2.cwl +1 -1
  78. toil/test/docs/scriptsTest.py +0 -1
  79. toil/test/jobStores/jobStoreTest.py +27 -16
  80. toil/test/lib/aws/test_iam.py +4 -14
  81. toil/test/lib/aws/test_utils.py +0 -3
  82. toil/test/lib/dockerTest.py +4 -4
  83. toil/test/lib/test_ec2.py +11 -16
  84. toil/test/mesos/helloWorld.py +4 -5
  85. toil/test/mesos/stress.py +1 -1
  86. toil/test/provisioners/aws/awsProvisionerTest.py +9 -5
  87. toil/test/provisioners/clusterScalerTest.py +6 -4
  88. toil/test/provisioners/clusterTest.py +14 -3
  89. toil/test/provisioners/gceProvisionerTest.py +0 -6
  90. toil/test/provisioners/restartScript.py +3 -2
  91. toil/test/server/serverTest.py +1 -1
  92. toil/test/sort/restart_sort.py +2 -1
  93. toil/test/sort/sort.py +2 -1
  94. toil/test/sort/sortTest.py +2 -13
  95. toil/test/src/autoDeploymentTest.py +45 -45
  96. toil/test/src/busTest.py +5 -5
  97. toil/test/src/checkpointTest.py +2 -2
  98. toil/test/src/deferredFunctionTest.py +1 -1
  99. toil/test/src/fileStoreTest.py +32 -16
  100. toil/test/src/helloWorldTest.py +1 -1
  101. toil/test/src/importExportFileTest.py +1 -1
  102. toil/test/src/jobDescriptionTest.py +2 -1
  103. toil/test/src/jobServiceTest.py +1 -1
  104. toil/test/src/jobTest.py +18 -18
  105. toil/test/src/miscTests.py +5 -3
  106. toil/test/src/promisedRequirementTest.py +3 -3
  107. toil/test/src/realtimeLoggerTest.py +1 -1
  108. toil/test/src/resourceTest.py +2 -2
  109. toil/test/src/restartDAGTest.py +1 -1
  110. toil/test/src/resumabilityTest.py +36 -2
  111. toil/test/src/retainTempDirTest.py +1 -1
  112. toil/test/src/systemTest.py +2 -2
  113. toil/test/src/toilContextManagerTest.py +2 -2
  114. toil/test/src/userDefinedJobArgTypeTest.py +1 -1
  115. toil/test/utils/toilDebugTest.py +98 -32
  116. toil/test/utils/toilKillTest.py +2 -2
  117. toil/test/utils/utilsTest.py +20 -0
  118. toil/test/wdl/wdltoil_test.py +148 -45
  119. toil/toilState.py +7 -6
  120. toil/utils/toilClean.py +1 -1
  121. toil/utils/toilConfig.py +36 -0
  122. toil/utils/toilDebugFile.py +60 -33
  123. toil/utils/toilDebugJob.py +39 -12
  124. toil/utils/toilDestroyCluster.py +1 -1
  125. toil/utils/toilKill.py +1 -1
  126. toil/utils/toilLaunchCluster.py +13 -2
  127. toil/utils/toilMain.py +3 -2
  128. toil/utils/toilRsyncCluster.py +1 -1
  129. toil/utils/toilSshCluster.py +1 -1
  130. toil/utils/toilStats.py +240 -143
  131. toil/utils/toilStatus.py +1 -4
  132. toil/version.py +11 -11
  133. toil/wdl/utils.py +2 -122
  134. toil/wdl/wdltoil.py +999 -386
  135. toil/worker.py +25 -31
  136. {toil-5.12.0.dist-info → toil-6.1.0a1.dist-info}/METADATA +60 -53
  137. toil-6.1.0a1.dist-info/RECORD +237 -0
  138. {toil-5.12.0.dist-info → toil-6.1.0a1.dist-info}/WHEEL +1 -1
  139. {toil-5.12.0.dist-info → toil-6.1.0a1.dist-info}/entry_points.txt +0 -1
  140. toil/batchSystems/parasol.py +0 -379
  141. toil/batchSystems/tes.py +0 -459
  142. toil/test/batchSystems/parasolTestSupport.py +0 -117
  143. toil/test/wdl/builtinTest.py +0 -506
  144. toil/test/wdl/conftest.py +0 -23
  145. toil/test/wdl/toilwdlTest.py +0 -522
  146. toil/wdl/toilwdl.py +0 -141
  147. toil/wdl/versions/dev.py +0 -107
  148. toil/wdl/versions/draft2.py +0 -980
  149. toil/wdl/versions/v1.py +0 -794
  150. toil/wdl/wdl_analysis.py +0 -116
  151. toil/wdl/wdl_functions.py +0 -997
  152. toil/wdl/wdl_synthesis.py +0 -1011
  153. toil/wdl/wdl_types.py +0 -243
  154. toil-5.12.0.dist-info/RECORD +0 -244
  155. /toil/{wdl/versions → options}/__init__.py +0 -0
  156. {toil-5.12.0.dist-info → toil-6.1.0a1.dist-info}/LICENSE +0 -0
  157. {toil-5.12.0.dist-info → toil-6.1.0a1.dist-info}/top_level.txt +0 -0
@@ -3,7 +3,6 @@ import functools
3
3
  import json
4
4
  import logging
5
5
  import os
6
- import tempfile
7
6
  from abc import abstractmethod
8
7
  from typing import Any, Callable, Dict, List, Optional, Tuple, Union
9
8
  from urllib.parse import urldefrag
@@ -11,6 +10,8 @@ from urllib.parse import urldefrag
11
10
  import connexion # type: ignore
12
11
  from werkzeug.utils import secure_filename
13
12
 
13
+ from toil.lib.io import mkdtemp
14
+
14
15
  logger = logging.getLogger(__name__)
15
16
 
16
17
  # Define a type for WES task log entries in responses
@@ -210,7 +211,7 @@ class WESBackend:
210
211
  If None, a temporary directory is created.
211
212
  """
212
213
  if not temp_dir:
213
- temp_dir = tempfile.mkdtemp()
214
+ temp_dir = mkdtemp()
214
215
  body: Dict[str, Any] = {}
215
216
  has_attachments = False
216
217
  for key, ls in connexion.request.files.lists():
@@ -20,11 +20,10 @@
20
20
 
21
21
  import json
22
22
  import logging
23
- import os
24
23
  import sys
25
24
  import zipfile
26
25
  from os import path
27
- from typing import IO, Any, Dict, List, Optional, Union, cast
26
+ from typing import IO, List, Optional, cast
28
27
 
29
28
  if sys.version_info >= (3, 8):
30
29
  from typing import TypedDict
@@ -164,15 +163,17 @@ def parse_workflow_manifest_file(manifest_file: str) -> WorkflowPlan:
164
163
  :rtype: dict of `data` and `files`
165
164
 
166
165
  MANIFEST.json is expected to be formatted like:
166
+
167
167
  .. code-block:: json
168
+
168
169
  {
169
170
  "mainWorkflowURL": "relpath/to/workflow",
170
171
  "inputFileURLs": [
171
172
  "relpath/to/input-file-1",
172
173
  "relpath/to/input-file-2",
173
- ...
174
+ "relpath/to/input-file-3"
174
175
  ],
175
- "optionsFileURL" "relpath/to/option-file
176
+ "optionsFileURL": "relpath/to/option-file"
176
177
  }
177
178
 
178
179
  The `mainWorkflowURL` property that provides a relative file path in the zip to a workflow file, which will be set as `workflowSource`
toil/server/wes/tasks.py CHANGED
@@ -11,7 +11,6 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
- import fcntl
15
14
  import json
16
15
  import logging
17
16
  import multiprocessing
@@ -134,8 +133,8 @@ class ToilWorkflowRunner:
134
133
  the workflow execution engine.
135
134
 
136
135
  :param workflow_engine_parameters: User-specified parameters for this
137
- particular workflow. Keys are command-line options, and values are
138
- option arguments, or None for options that are flags.
136
+ particular workflow. Keys are command-line options, and values are
137
+ option arguments, or None for options that are flags.
139
138
  """
140
139
  options = []
141
140
 
@@ -16,9 +16,8 @@ import logging
16
16
  import os
17
17
  import shutil
18
18
  import uuid
19
- from collections import Counter, defaultdict
19
+ from collections import Counter
20
20
  from contextlib import contextmanager
21
- from tempfile import NamedTemporaryFile
22
21
  from typing import (Any,
23
22
  Callable,
24
23
  Dict,
@@ -32,18 +31,11 @@ from typing import (Any,
32
31
  overload)
33
32
 
34
33
  from flask import send_from_directory
35
- from flask.globals import request as flask_request
36
34
  from werkzeug.utils import redirect
37
35
  from werkzeug.wrappers.response import Response
38
36
 
39
37
  import toil.server.wes.amazon_wes_utils as amazon_wes_utils
40
- from toil.bus import (JobAnnotationMessage,
41
- JobCompletedMessage,
42
- JobFailedMessage,
43
- JobIssuedMessage,
44
- JobUpdatedMessage,
45
- MessageBus,
46
- replay_message_bus, JobStatus)
38
+ from toil.bus import JobStatus, replay_message_bus
47
39
  from toil.lib.io import AtomicFileCreate
48
40
  from toil.lib.threading import global_mutex
49
41
  from toil.server.utils import (WorkflowStateMachine,
toil/server/wsgi_app.py CHANGED
@@ -21,6 +21,8 @@ class GunicornApplication(BaseApplication): # type: ignore
21
21
  An entry point to integrate a Gunicorn WSGI server in Python. To start a
22
22
  WSGI application with callable `app`, run the following code:
23
23
 
24
+ .. code-block:: python
25
+
24
26
  WSGIApplication(app, options={
25
27
  ...
26
28
  }).run()
toil/serviceManager.py CHANGED
@@ -122,7 +122,8 @@ class ServiceManager:
122
122
  try:
123
123
  client_id = self.__clients_out.get(timeout=maxWait)
124
124
  self.__waiting_clients.remove(client_id)
125
- assert self.__service_manager_jobs >= 0
125
+ if self.__service_manager_jobs < 0:
126
+ raise RuntimeError("The number of jobs scheduled by the service manager cannot be negative.")
126
127
  self.__service_manager_jobs -= 1
127
128
  return client_id
128
129
  except Empty:
@@ -139,7 +140,8 @@ class ServiceManager:
139
140
  try:
140
141
  client_id = self.__failed_clients_out.get(timeout=maxWait)
141
142
  self.__waiting_clients.remove(client_id)
142
- assert self.__service_manager_jobs >= 0
143
+ if self.__service_manager_jobs < 0:
144
+ raise RuntimeError("The number of jobs scheduled by the service manager cannot be negative.")
143
145
  self.__service_manager_jobs -= 1
144
146
  return client_id
145
147
  except Empty:
@@ -154,7 +156,8 @@ class ServiceManager:
154
156
  """
155
157
  try:
156
158
  service_id = self.__services_out.get(timeout=maxWait)
157
- assert self.__service_manager_jobs >= 0
159
+ if self.__service_manager_jobs < 0:
160
+ raise RuntimeError("The number of jobs scheduled by the service manager cannot be negative.")
158
161
  self.__service_manager_jobs -= 1
159
162
  return service_id
160
163
  except Empty:
@@ -304,7 +307,8 @@ class ServiceManager:
304
307
  starting_services.remove(service_id)
305
308
  client_id = service_to_client[service_id]
306
309
  remaining_services_by_client[client_id] -= 1
307
- assert remaining_services_by_client[client_id] >= 0
310
+ if remaining_services_by_client[client_id] < 0:
311
+ raise RuntimeError("The number of remaining services cannot be negative.")
308
312
  del service_to_client[service_id]
309
313
  if not self.__job_store.file_exists(service_job_desc.errorJobStoreID):
310
314
  logger.error(
@@ -356,12 +360,10 @@ class ServiceManager:
356
360
  service_job_desc,
357
361
  service_job_desc.startJobStoreID,
358
362
  )
359
- assert self.__job_store.file_exists(
360
- service_job_desc.startJobStoreID
361
- ), f"Service manager attempted to start service {service_job_desc} that has already started"
362
- assert self.__toil_state.job_exists(
363
- str(service_job_desc.jobStoreID)
364
- ), f"Service manager attempted to start service {service_job_desc} that is not in the job store"
363
+ if not self.__job_store.file_exists(service_job_desc.startJobStoreID):
364
+ raise RuntimeError(f"Service manager attempted to start service {service_job_desc} that has already started")
365
+ if not self.__toil_state.job_exists(str(service_job_desc.jobStoreID)):
366
+ raise RuntimeError(f"Service manager attempted to start service {service_job_desc} that is not in the job store")
365
367
  # At this point the terminateJobStoreID and errorJobStoreID
366
368
  # could have been deleted, since the service can be killed at
367
369
  # any time! So we can't assert their presence here.
toil/statsAndLogging.py CHANGED
@@ -148,10 +148,12 @@ class StatsAndLogging:
148
148
  if not isinstance(statsStr, str):
149
149
  statsStr = statsStr.decode()
150
150
  stats = json.loads(statsStr, object_hook=Expando)
151
+ if not stats:
152
+ return
151
153
  try:
152
154
  logs = stats.workers.logsToMaster
153
155
  except AttributeError:
154
- # To be expected if there were no calls to logToMaster()
156
+ # To be expected if there were no calls to log_to_leader()
155
157
  pass
156
158
  else:
157
159
  for message in logs:
@@ -225,6 +227,8 @@ def add_logging_options(parser: ArgumentParser) -> None:
225
227
  levels += [l.lower() for l in levels] + [l.upper() for l in levels]
226
228
  group.add_argument("--logOff", dest="logLevel", default=default_loglevel,
227
229
  action="store_const", const="CRITICAL", help="Same as --logCRITICAL.")
230
+ # Maybe deprecate the above in favor of --logLevel?
231
+
228
232
  group.add_argument("--logLevel", dest="logLevel", default=default_loglevel, choices=levels,
229
233
  help=f"Set the log level. Default: {default_loglevel}. Options: {levels}.")
230
234
  group.add_argument("--logFile", dest="logFile", help="File to log in.")
toil/test/__init__.py CHANGED
@@ -21,7 +21,6 @@ import shutil
21
21
  import signal
22
22
  import subprocess
23
23
  import sys
24
- import tempfile
25
24
  import threading
26
25
  import time
27
26
  import unittest
@@ -30,6 +29,7 @@ from abc import ABCMeta, abstractmethod
30
29
  from contextlib import contextmanager
31
30
  from inspect import getsource
32
31
  from shutil import which
32
+ from tempfile import mkstemp
33
33
  from textwrap import dedent
34
34
  from typing import (Any,
35
35
  Callable,
@@ -57,6 +57,7 @@ from toil import ApplianceImageNotFound, applianceSelf, toilPackageDirPath
57
57
  from toil.lib.accelerators import (have_working_nvidia_docker_runtime,
58
58
  have_working_nvidia_smi)
59
59
  from toil.lib.aws import running_on_ec2
60
+ from toil.lib.io import mkdtemp
60
61
  from toil.lib.iterables import concat
61
62
  from toil.lib.memoize import memoize
62
63
  from toil.lib.threading import ExceptionalThread, cpu_count
@@ -188,7 +189,7 @@ class ToilTest(unittest.TestCase):
188
189
  prefix.extend([_f for _f in names if _f])
189
190
  prefix.append('')
190
191
  temp_dir_path = os.path.realpath(
191
- tempfile.mkdtemp(dir=cls._tempBaseDir, prefix="-".join(prefix))
192
+ mkdtemp(dir=cls._tempBaseDir, prefix="-".join(prefix))
192
193
  )
193
194
  cls._tempDirs.append(temp_dir_path)
194
195
  return temp_dir_path
@@ -314,7 +315,7 @@ else:
314
315
  def get_temp_file(suffix: str = "", rootDir: Optional[str] = None) -> str:
315
316
  """Return a string representing a temporary file, that must be manually deleted."""
316
317
  if rootDir is None:
317
- handle, tmp_file = tempfile.mkstemp(suffix)
318
+ handle, tmp_file = mkstemp(suffix)
318
319
  os.close(handle)
319
320
  return tmp_file
320
321
  else:
@@ -359,10 +360,17 @@ def needs_rsync3(test_item: MT) -> MT:
359
360
  return test_item
360
361
 
361
362
 
363
+ def needs_online(test_item: MT) -> MT:
364
+ """Use as a decorator before test classes or methods to run only if we are meant to talk to the Internet."""
365
+ test_item = _mark_test('online', test_item)
366
+ if os.getenv('TOIL_SKIP_ONLINE', '').lower() == 'true':
367
+ return unittest.skip('Skipping online test.')(test_item)
368
+ return test_item
369
+
362
370
  def needs_aws_s3(test_item: MT) -> MT:
363
371
  """Use as a decorator before test classes or methods to run only if AWS S3 is usable."""
364
372
  # TODO: we just check for generic access to the AWS account
365
- test_item = _mark_test('aws-s3', test_item)
373
+ test_item = _mark_test('aws-s3', needs_online(test_item))
366
374
  try:
367
375
  from boto import config
368
376
  boto_credentials = config.get('Credentials', 'aws_access_key_id')
@@ -415,7 +423,7 @@ def needs_google_storage(test_item: MT) -> MT:
415
423
  Cloud is installed and we ought to be able to access public Google Storage
416
424
  URIs.
417
425
  """
418
- test_item = _mark_test('google-storage', test_item)
426
+ test_item = _mark_test('google-storage', needs_online(test_item))
419
427
  try:
420
428
  from google.cloud import storage # noqa
421
429
  except ImportError:
@@ -427,7 +435,7 @@ def needs_google_project(test_item: MT) -> MT:
427
435
  """
428
436
  Use as a decorator before test classes or methods to run only if we have a Google Cloud project set.
429
437
  """
430
- test_item = _mark_test('google-project', test_item)
438
+ test_item = _mark_test('google-project', needs_online(test_item))
431
439
  test_item = needs_env_var('TOIL_GOOGLE_PROJECTID', "a Google project ID")(test_item)
432
440
  return test_item
433
441
 
@@ -447,44 +455,19 @@ def needs_torque(test_item: MT) -> MT:
447
455
  return test_item
448
456
  return unittest.skip("Install PBS/Torque to include this test.")(test_item)
449
457
 
450
-
451
- def needs_tes(test_item: MT) -> MT:
452
- """Use as a decorator before test classes or methods to run only if TES is available."""
453
- test_item = _mark_test('tes', test_item)
454
-
455
- try:
456
- from toil.batchSystems.tes import TESBatchSystem
457
- except ImportError:
458
- return unittest.skip("Install py-tes to include this test")(test_item)
459
-
460
- tes_url = os.environ.get('TOIL_TES_ENDPOINT', TESBatchSystem.get_default_tes_endpoint())
461
- try:
462
- urlopen(tes_url)
463
- except HTTPError:
464
- # Funnel happens to 404 if TES is working. But any HTTPError means we
465
- # dialed somebody who picked up.
466
- pass
467
- except URLError:
468
- # Will give connection refused if we can't connect because the server's
469
- # not there. We can also get a "cannot assign requested address" if
470
- # we're on Kubernetes dialing localhost and !!creative things!! have
471
- # been done to the network stack.
472
- return unittest.skip(f"Run a TES server on {tes_url} to include this test")(test_item)
473
- return test_item
474
-
475
-
476
458
  def needs_kubernetes_installed(test_item: MT) -> MT:
477
459
  """Use as a decorator before test classes or methods to run only if Kubernetes is installed."""
478
460
  test_item = _mark_test('kubernetes', test_item)
479
461
  try:
480
462
  import kubernetes
463
+ str(kubernetes) # to prevent removal of this import
481
464
  except ImportError:
482
465
  return unittest.skip("Install Toil with the 'kubernetes' extra to include this test.")(test_item)
483
466
  return test_item
484
467
 
485
468
  def needs_kubernetes(test_item: MT) -> MT:
486
469
  """Use as a decorator before test classes or methods to run only if Kubernetes is installed and configured."""
487
- test_item = needs_kubernetes_installed(test_item)
470
+ test_item = needs_kubernetes_installed(needs_online(test_item))
488
471
  try:
489
472
  import kubernetes
490
473
  try:
@@ -514,14 +497,6 @@ def needs_mesos(test_item: MT) -> MT:
514
497
  return test_item
515
498
 
516
499
 
517
- def needs_parasol(test_item: MT) -> MT:
518
- """Use as decorator so tests are only run if Parasol is installed."""
519
- test_item = _mark_test('parasol', test_item)
520
- if which('parasol'):
521
- return test_item
522
- return unittest.skip("Install Parasol to include this test.")(test_item)
523
-
524
-
525
500
  def needs_slurm(test_item: MT) -> MT:
526
501
  """Use as a decorator before test classes or methods to run only if Slurm is installed."""
527
502
  test_item = _mark_test('slurm', test_item)
@@ -571,20 +546,20 @@ def needs_docker(test_item: MT) -> MT:
571
546
  Use as a decorator before test classes or methods to only run them if
572
547
  docker is installed and docker-based tests are enabled.
573
548
  """
574
- test_item = _mark_test('docker', test_item)
549
+ test_item = _mark_test('docker', needs_online(test_item))
575
550
  if os.getenv('TOIL_SKIP_DOCKER', '').lower() == 'true':
576
551
  return unittest.skip('Skipping docker test.')(test_item)
577
552
  if which('docker'):
578
553
  return test_item
579
554
  else:
580
555
  return unittest.skip("Install docker to include this test.")(test_item)
581
-
556
+
582
557
  def needs_singularity(test_item: MT) -> MT:
583
558
  """
584
559
  Use as a decorator before test classes or methods to only run them if
585
560
  singularity is installed.
586
561
  """
587
- test_item = _mark_test('singularity', test_item)
562
+ test_item = _mark_test('singularity', needs_online(test_item))
588
563
  if which('singularity'):
589
564
  return test_item
590
565
  else:
@@ -621,7 +596,7 @@ def needs_docker_cuda(test_item: MT) -> MT:
621
596
  Use as a decorator before test classes or methods to only run them if
622
597
  a CUDA setup is available through Docker.
623
598
  """
624
- test_item = _mark_test('docker_cuda', test_item)
599
+ test_item = _mark_test('docker_cuda', needs_online(test_item))
625
600
  if have_working_nvidia_docker_runtime():
626
601
  return test_item
627
602
  else:
@@ -677,7 +652,7 @@ def needs_celery_broker(test_item: MT) -> MT:
677
652
  """
678
653
  Use as a decorator before test classes or methods to run only if RabbitMQ is set up to take Celery jobs.
679
654
  """
680
- test_item = _mark_test('celery', test_item)
655
+ test_item = _mark_test('celery', needs_online(test_item))
681
656
  test_item = needs_env_var('TOIL_WES_BROKER_URL', "a URL to a RabbitMQ broker for Celery")(test_item)
682
657
  return test_item
683
658
 
@@ -686,7 +661,7 @@ def needs_wes_server(test_item: MT) -> MT:
686
661
  Use as a decorator before test classes or methods to run only if a WES
687
662
  server is available to run against.
688
663
  """
689
- test_item = _mark_test('wes_server', test_item)
664
+ test_item = _mark_test('wes_server', needs_online(test_item))
690
665
 
691
666
  wes_url = os.environ.get('TOIL_WES_ENDPOINT')
692
667
  if not wes_url:
@@ -744,7 +719,7 @@ def needs_fetchable_appliance(test_item: MT) -> MT:
744
719
  the Toil appliance Docker image is able to be downloaded from the Internet.
745
720
  """
746
721
 
747
- test_item = _mark_test('fetchable_appliance', test_item)
722
+ test_item = _mark_test('fetchable_appliance', needs_online(test_item))
748
723
  if os.getenv('TOIL_SKIP_DOCKER', '').lower() == 'true':
749
724
  return unittest.skip('Skipping docker test.')(test_item)
750
725
  try:
@@ -765,9 +740,7 @@ def integrative(test_item: MT) -> MT:
765
740
  Use this to decorate integration tests so as to skip them during regular builds.
766
741
 
767
742
  We define integration tests as A) involving other, non-Toil software components
768
- that we develop and/or B) having a higher cost (time or money). Note that brittleness
769
- does not qualify a test for being integrative. Neither does involvement of external
770
- services such as AWS, since that would cover most of Toil's test.
743
+ that we develop and/or B) having a higher cost (time or money).
771
744
  """
772
745
  test_item = _mark_test('integrative', test_item)
773
746
  if os.getenv('TOIL_TEST_INTEGRATIVE', '').lower() == 'true':
@@ -797,11 +770,13 @@ methodNamePartRegex = re.compile('^[a-zA-Z_0-9]+$')
797
770
  @contextmanager
798
771
  def timeLimit(seconds: int) -> Generator[None, None, None]:
799
772
  """
800
- http://stackoverflow.com/a/601168
801
- Use to limit the execution time of a function. Raises an exception if the execution of the
802
- function takes more than the specified amount of time.
773
+ Use to limit the execution time of a function.
774
+
775
+ Raises an exception if the execution of the function takes more than the
776
+ specified amount of time. See <http://stackoverflow.com/a/601168>.
803
777
 
804
778
  :param seconds: maximum allowable time, in seconds
779
+
805
780
  >>> import time
806
781
  >>> with timeLimit(2):
807
782
  ... time.sleep(1)
@@ -31,10 +31,9 @@ from toil.batchSystems.abstractBatchSystem import (AbstractBatchSystem,
31
31
  # in order to import properly. Import them later, in tests
32
32
  # protected by annotations.
33
33
  from toil.batchSystems.mesos.test import MesosTestSupport
34
- from toil.batchSystems.parasol import ParasolBatchSystem
35
- from toil.batchSystems.registry import (BATCH_SYSTEM_FACTORY_REGISTRY,
36
- BATCH_SYSTEMS,
37
- addBatchSystemFactory,
34
+ from toil.batchSystems.registry import (add_batch_system_factory,
35
+ get_batch_system,
36
+ get_batch_systems,
38
37
  restore_batch_system_plugin_state,
39
38
  save_batch_system_plugin_state)
40
39
  from toil.batchSystems.singleMachine import SingleMachineBatchSystem
@@ -52,12 +51,9 @@ from toil.test import (ToilTest,
52
51
  needs_kubernetes_installed,
53
52
  needs_lsf,
54
53
  needs_mesos,
55
- needs_parasol,
56
54
  needs_slurm,
57
- needs_tes,
58
55
  needs_torque,
59
56
  slow)
60
- from toil.test.batchSystems.parasolTestSupport import ParasolTestSupport
61
57
 
62
58
  logger = logging.getLogger(__name__)
63
59
 
@@ -88,16 +84,16 @@ class BatchSystemPluginTest(ToilTest):
88
84
  restore_batch_system_plugin_state(self.__state)
89
85
  super().tearDown()
90
86
 
91
- def testAddBatchSystemFactory(self):
87
+ def test_add_batch_system_factory(self):
92
88
  def test_batch_system_factory():
93
89
  # TODO: Adding the same batch system under multiple names means we
94
90
  # can't actually create Toil options, because each version tries to
95
91
  # add its arguments.
96
92
  return SingleMachineBatchSystem
97
93
 
98
- addBatchSystemFactory('testBatchSystem', test_batch_system_factory)
99
- assert ('testBatchSystem', test_batch_system_factory) in BATCH_SYSTEM_FACTORY_REGISTRY.items()
100
- assert 'testBatchSystem' in BATCH_SYSTEMS
94
+ add_batch_system_factory('testBatchSystem', test_batch_system_factory)
95
+ assert 'testBatchSystem' in get_batch_systems()
96
+ assert get_batch_system('testBatchSystem') == SingleMachineBatchSystem
101
97
 
102
98
  class hidden:
103
99
  """
@@ -248,10 +244,6 @@ class hidden:
248
244
  self.batchSystem.killBatchJobs([10])
249
245
 
250
246
  def test_set_env(self):
251
- # Parasol disobeys shell rules and splits the command at the space
252
- # character into arguments before exec'ing it, whether the space is
253
- # quoted, escaped or not.
254
-
255
247
  # Start with a relatively safe script
256
248
  script_shell = 'if [ "x${FOO}" == "xbar" ] ; then exit 23 ; else exit 42 ; fi'
257
249
 
@@ -575,23 +567,6 @@ class KubernetesBatchSystemBenchTest(ToilTest):
575
567
  self.assertEqual(str(spec.tolerations), "None")
576
568
 
577
569
 
578
- @needs_tes
579
- @needs_fetchable_appliance
580
- class TESBatchSystemTest(hidden.AbstractBatchSystemTest):
581
- """
582
- Tests against the TES batch system
583
- """
584
-
585
- def supportsWallTime(self):
586
- return True
587
-
588
- def createBatchSystem(self):
589
- # Import the batch system when we know we have it.
590
- # Doesn't really matter for TES right now, but someday it might.
591
- from toil.batchSystems.tes import TESBatchSystem
592
- return TESBatchSystem(config=self.config,
593
- maxCores=numCores, maxMemory=1e9, maxDisk=2001)
594
-
595
570
  @needs_aws_batch
596
571
  @needs_fetchable_appliance
597
572
  class AWSBatchBatchSystemTest(hidden.AbstractBatchSystemTest):
@@ -849,7 +824,7 @@ class MaxCoresSingleMachineBatchSystemTest(ToilTest):
849
824
  if len(sys.argv) < 3:
850
825
  count(1)
851
826
  try:
852
- time.sleep(1)
827
+ time.sleep(0.5)
853
828
  finally:
854
829
  count(-1)
855
830
  else:
@@ -910,9 +885,10 @@ class MaxCoresSingleMachineBatchSystemTest(ToilTest):
910
885
  logger.info(f'maxCores: {maxCores}, '
911
886
  f'coresPerJob: {coresPerJob}, '
912
887
  f'load: {load}')
913
- # This is the key assertion:
888
+ # This is the key assertion: we shouldn't run too many jobs.
889
+ # Because of nondeterminism we can't guarantee hitting the limit.
914
890
  expectedMaxConcurrentTasks = min(maxCores // coresPerJob, jobs)
915
- self.assertEqual(maxConcurrentTasks, expectedMaxConcurrentTasks)
891
+ self.assertLessEqual(maxConcurrentTasks, expectedMaxConcurrentTasks)
916
892
  resetCounters(self.counterPath)
917
893
 
918
894
  @skipIf(SingleMachineBatchSystem.numCores < 3, 'Need at least three cores to run this test')
@@ -965,82 +941,6 @@ class Service(Job.Service):
965
941
  subprocess.check_call(self.cmd + ' -1', shell=True)
966
942
 
967
943
 
968
- @slow
969
- @needs_parasol
970
- class ParasolBatchSystemTest(hidden.AbstractBatchSystemTest, ParasolTestSupport):
971
- """
972
- Tests the Parasol batch system
973
- """
974
-
975
- def supportsWallTime(self):
976
- return True
977
-
978
- def _createConfig(self):
979
- config = super()._createConfig()
980
- # can't use _getTestJobStorePath since that method removes the directory
981
- config.jobStore = self._createTempDir('jobStore')
982
- return config
983
-
984
- def createBatchSystem(self) -> AbstractBatchSystem:
985
- memory = int(3e9)
986
- self._startParasol(numCores=numCores, memory=memory)
987
-
988
- return ParasolBatchSystem(config=self.config,
989
- maxCores=numCores,
990
- maxMemory=memory,
991
- maxDisk=1001)
992
-
993
- def tearDown(self):
994
- super().tearDown()
995
- self._stopParasol()
996
-
997
- def testBatchResourceLimits(self):
998
- jobDesc1 = JobDescription(command="sleep 1000",
999
- requirements=dict(memory=1 << 30, cores=1,
1000
- disk=1000, accelerators=[],
1001
- preemptible=preemptible),
1002
- jobName='testResourceLimits')
1003
- job1 = self.batchSystem.issueBatchJob(jobDesc1)
1004
- self.assertIsNotNone(job1)
1005
- jobDesc2 = JobDescription(command="sleep 1000",
1006
- requirements=dict(memory=2 << 30, cores=1,
1007
- disk=1000, accelerators=[],
1008
- preemptible=preemptible),
1009
- jobName='testResourceLimits')
1010
- job2 = self.batchSystem.issueBatchJob(jobDesc2)
1011
- self.assertIsNotNone(job2)
1012
- batches = self._getBatchList()
1013
- self.assertEqual(len(batches), 2)
1014
- # It would be better to directly check that the batches have the correct memory and cpu
1015
- # values, but Parasol seems to slightly change the values sometimes.
1016
- self.assertNotEqual(batches[0]['ram'], batches[1]['ram'])
1017
- # Need to kill one of the jobs because there are only two cores available
1018
- self.batchSystem.killBatchJobs([job2])
1019
- job3 = self.batchSystem.issueBatchJob(jobDesc1)
1020
- self.assertIsNotNone(job3)
1021
- batches = self._getBatchList()
1022
- self.assertEqual(len(batches), 1)
1023
-
1024
- def _parseBatchString(self, batchString):
1025
- import re
1026
- batchInfo = dict()
1027
- memPattern = re.compile(r"(\d+\.\d+)([kgmbt])")
1028
- items = batchString.split()
1029
- batchInfo["cores"] = int(items[7])
1030
- memMatch = memPattern.match(items[8])
1031
- ramValue = float(memMatch.group(1))
1032
- ramUnits = memMatch.group(2)
1033
- ramConversion = {'b': 1e0, 'k': 1e3, 'm': 1e6, 'g': 1e9, 't': 1e12}
1034
- batchInfo["ram"] = ramValue * ramConversion[ramUnits]
1035
- return batchInfo
1036
-
1037
- def _getBatchList(self):
1038
- # noinspection PyUnresolvedReferences
1039
- exitStatus, batchLines = self.batchSystem._runParasol(['list', 'batches'])
1040
- self.assertEqual(exitStatus, 0)
1041
- return [self._parseBatchString(line) for line in batchLines[1:] if line]
1042
-
1043
-
1044
944
  @slow
1045
945
  @needs_gridengine
1046
946
  class GridEngineBatchSystemTest(hidden.AbstractGridEngineBatchSystemTest):
@@ -16,7 +16,8 @@ def call_sacct(args, **_) -> str:
16
16
  The arguments passed to `call_command` when executing `sacct` are:
17
17
  ['sacct', '-n', '-j', '<comma-separated list of job-ids>', '--format',
18
18
  'JobIDRaw,State,ExitCode', '-P', '-S', '1970-01-01']
19
- The multi-line output is something like:
19
+ The multi-line output is something like::
20
+
20
21
  1234|COMPLETED|0:0
21
22
  1234.batch|COMPLETED|0:0
22
23
  1235|PENDING|0:0
@@ -46,7 +47,7 @@ def call_sacct(args, **_) -> str:
46
47
  def call_scontrol(args, **_) -> str:
47
48
  """
48
49
  The arguments passed to `call_command` when executing `scontrol` are:
49
- ['scontrol', 'show', 'job'] or ['scontrol', 'show', 'job', '<job-id>']
50
+ ``['scontrol', 'show', 'job']`` or ``['scontrol', 'show', 'job', '<job-id>']``
50
51
  """
51
52
  job_id = int(args[3]) if len(args) > 3 else None
52
53
  # Fake output per fake job-id.