toil 8.2.0__py3-none-any.whl → 9.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. toil/batchSystems/abstractBatchSystem.py +13 -5
  2. toil/batchSystems/abstractGridEngineBatchSystem.py +17 -5
  3. toil/batchSystems/kubernetes.py +13 -2
  4. toil/batchSystems/mesos/batchSystem.py +33 -2
  5. toil/batchSystems/registry.py +15 -118
  6. toil/batchSystems/slurm.py +191 -16
  7. toil/common.py +20 -1
  8. toil/cwl/cwltoil.py +97 -119
  9. toil/cwl/utils.py +103 -3
  10. toil/fileStores/__init__.py +1 -1
  11. toil/fileStores/abstractFileStore.py +5 -2
  12. toil/fileStores/cachingFileStore.py +1 -1
  13. toil/job.py +30 -14
  14. toil/jobStores/abstractJobStore.py +35 -255
  15. toil/jobStores/aws/jobStore.py +864 -1964
  16. toil/jobStores/aws/utils.py +24 -270
  17. toil/jobStores/fileJobStore.py +2 -1
  18. toil/jobStores/googleJobStore.py +32 -13
  19. toil/jobStores/utils.py +0 -327
  20. toil/leader.py +27 -22
  21. toil/lib/accelerators.py +1 -1
  22. toil/lib/aws/config.py +22 -0
  23. toil/lib/aws/s3.py +477 -9
  24. toil/lib/aws/utils.py +22 -33
  25. toil/lib/checksum.py +88 -0
  26. toil/lib/conversions.py +33 -31
  27. toil/lib/directory.py +217 -0
  28. toil/lib/ec2.py +97 -29
  29. toil/lib/exceptions.py +2 -1
  30. toil/lib/expando.py +2 -2
  31. toil/lib/generatedEC2Lists.py +138 -19
  32. toil/lib/io.py +33 -2
  33. toil/lib/memoize.py +21 -7
  34. toil/lib/misc.py +1 -1
  35. toil/lib/pipes.py +385 -0
  36. toil/lib/plugins.py +106 -0
  37. toil/lib/retry.py +1 -1
  38. toil/lib/threading.py +1 -1
  39. toil/lib/url.py +320 -0
  40. toil/lib/web.py +4 -5
  41. toil/options/cwl.py +13 -1
  42. toil/options/runner.py +17 -10
  43. toil/options/wdl.py +12 -1
  44. toil/provisioners/__init__.py +5 -2
  45. toil/provisioners/aws/__init__.py +43 -36
  46. toil/provisioners/aws/awsProvisioner.py +47 -15
  47. toil/provisioners/node.py +60 -12
  48. toil/resource.py +3 -13
  49. toil/server/app.py +12 -6
  50. toil/server/cli/wes_cwl_runner.py +2 -2
  51. toil/server/wes/abstract_backend.py +21 -43
  52. toil/server/wes/toil_backend.py +2 -2
  53. toil/test/__init__.py +16 -18
  54. toil/test/batchSystems/batchSystemTest.py +2 -9
  55. toil/test/batchSystems/batch_system_plugin_test.py +7 -0
  56. toil/test/batchSystems/test_slurm.py +103 -14
  57. toil/test/cwl/cwlTest.py +181 -8
  58. toil/test/cwl/staging_cat.cwl +27 -0
  59. toil/test/cwl/staging_make_file.cwl +25 -0
  60. toil/test/cwl/staging_workflow.cwl +43 -0
  61. toil/test/cwl/zero_default.cwl +61 -0
  62. toil/test/docs/scripts/tutorial_staging.py +17 -8
  63. toil/test/docs/scriptsTest.py +2 -1
  64. toil/test/jobStores/jobStoreTest.py +23 -133
  65. toil/test/lib/aws/test_iam.py +7 -7
  66. toil/test/lib/aws/test_s3.py +30 -33
  67. toil/test/lib/aws/test_utils.py +9 -9
  68. toil/test/lib/test_url.py +69 -0
  69. toil/test/lib/url_plugin_test.py +105 -0
  70. toil/test/provisioners/aws/awsProvisionerTest.py +60 -7
  71. toil/test/provisioners/clusterTest.py +15 -2
  72. toil/test/provisioners/gceProvisionerTest.py +1 -1
  73. toil/test/server/serverTest.py +78 -36
  74. toil/test/src/autoDeploymentTest.py +2 -3
  75. toil/test/src/fileStoreTest.py +89 -87
  76. toil/test/utils/ABCWorkflowDebug/ABC.txt +1 -0
  77. toil/test/utils/ABCWorkflowDebug/debugWorkflow.py +4 -4
  78. toil/test/utils/toilKillTest.py +35 -28
  79. toil/test/wdl/md5sum/md5sum-gs.json +1 -1
  80. toil/test/wdl/md5sum/md5sum.json +1 -1
  81. toil/test/wdl/testfiles/read_file.wdl +18 -0
  82. toil/test/wdl/testfiles/url_to_optional_file.wdl +2 -1
  83. toil/test/wdl/wdltoil_test.py +171 -162
  84. toil/test/wdl/wdltoil_test_kubernetes.py +9 -0
  85. toil/utils/toilDebugFile.py +6 -3
  86. toil/utils/toilSshCluster.py +23 -0
  87. toil/utils/toilStats.py +17 -2
  88. toil/utils/toilUpdateEC2Instances.py +1 -0
  89. toil/version.py +10 -10
  90. toil/wdl/wdltoil.py +1179 -825
  91. toil/worker.py +16 -8
  92. {toil-8.2.0.dist-info → toil-9.1.0.dist-info}/METADATA +32 -32
  93. {toil-8.2.0.dist-info → toil-9.1.0.dist-info}/RECORD +97 -85
  94. {toil-8.2.0.dist-info → toil-9.1.0.dist-info}/WHEEL +1 -1
  95. toil/lib/iterables.py +0 -112
  96. toil/test/docs/scripts/stagingExampleFiles/in.txt +0 -1
  97. {toil-8.2.0.dist-info → toil-9.1.0.dist-info}/entry_points.txt +0 -0
  98. {toil-8.2.0.dist-info → toil-9.1.0.dist-info}/licenses/LICENSE +0 -0
  99. {toil-8.2.0.dist-info → toil-9.1.0.dist-info}/top_level.txt +0 -0
@@ -11,6 +11,7 @@
11
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
+ from dataclasses import dataclass
14
15
  import enum
15
16
  import logging
16
17
  import os
@@ -72,10 +73,13 @@ class BatchJobExitReason(enum.IntEnum):
72
73
  except ValueError:
73
74
  return str(value)
74
75
 
75
-
76
- class UpdatedBatchJobInfo(NamedTuple):
76
+ @dataclass
77
+ class UpdatedBatchJobInfo:
77
78
  jobID: int
78
- exitStatus: int
79
+ """
80
+ The Toil batch system ID of the job.
81
+ """
82
+ exitStatus: int = EXIT_STATUS_UNAVAILABLE_VALUE
79
83
  """
80
84
  The exit status (integer value) of the job. 0 implies successful.
81
85
 
@@ -83,8 +87,12 @@ class UpdatedBatchJobInfo(NamedTuple):
83
87
  (e.g. job is lost, or otherwise died but actual exit code was not reported).
84
88
  """
85
89
 
86
- exitReason: Optional[BatchJobExitReason]
87
- wallTime: Union[float, int, None]
90
+ exitReason: Optional[BatchJobExitReason] = None
91
+ wallTime: Union[float, int, None] = None
92
+ backing_id: Optional[str] = None
93
+ """
94
+ The identifier for the job in the backing scheduler, if available.
95
+ """
88
96
 
89
97
 
90
98
  # Information required for worker cleanup on shutdown of the batch system.
@@ -159,14 +159,21 @@ class AbstractGridEngineBatchSystem(BatchSystemCleanupSupport):
159
159
  logger.debug("Running %r", subLine)
160
160
  batchJobID = self.boss.with_retries(self.submitJob, subLine)
161
161
  if self.boss._outbox is not None:
162
- # JobID corresponds to the toil version of the jobID, dif from jobstore idea of the id, batchjobid is what we get from slurm
162
+ # JobID corresponds to the toil version of the jobID,
163
+ # different from the jobstore's idea of the id. batchjobid
164
+ # is what we get from e.g. slurm
163
165
  self.boss._outbox.publish(
164
166
  ExternalBatchIdMessage(
165
167
  jobID, batchJobID, self.boss.__class__.__name__
166
168
  )
167
169
  )
168
170
 
169
- logger.debug("Submitted job %s", str(batchJobID))
171
+ logger.info(
172
+ "Job %s with batch system ID %s queued as job %s",
173
+ jobName,
174
+ jobID,
175
+ str(batchJobID)
176
+ )
170
177
 
171
178
  # Store dict for mapping Toil job ID to batch job ID
172
179
  # TODO: Note that this currently stores a tuple of (batch system
@@ -251,8 +258,8 @@ class AbstractGridEngineBatchSystem(BatchSystemCleanupSupport):
251
258
  self.coalesce_job_exit_codes, batch_job_id_list
252
259
  )
253
260
  # We got the statuses as a batch
254
- for running_job_id, status in zip(running_job_list, statuses):
255
- activity = self._handle_job_status(running_job_id, status, activity)
261
+ for running_job_id, status, backing_id in zip(running_job_list, statuses, batch_job_id_list):
262
+ activity = self._handle_job_status(running_job_id, status, activity, backing_id)
256
263
 
257
264
  self._checkOnJobsCache = activity
258
265
  self._checkOnJobsTimestamp = datetime.now()
@@ -263,6 +270,7 @@ class AbstractGridEngineBatchSystem(BatchSystemCleanupSupport):
263
270
  job_id: int,
264
271
  status: Union[int, tuple[int, Optional[BatchJobExitReason]], None],
265
272
  activity: bool,
273
+ backing_id: str,
266
274
  ) -> bool:
267
275
  """
268
276
  Helper method for checkOnJobs to handle job statuses
@@ -275,7 +283,11 @@ class AbstractGridEngineBatchSystem(BatchSystemCleanupSupport):
275
283
  code, reason = status
276
284
  self.updatedJobsQueue.put(
277
285
  UpdatedBatchJobInfo(
278
- jobID=job_id, exitStatus=code, exitReason=reason, wallTime=None
286
+ jobID=job_id,
287
+ exitStatus=code,
288
+ exitReason=reason,
289
+ wallTime=None,
290
+ backing_id=backing_id,
279
291
  )
280
292
  )
281
293
  self.forgetJob(job_id)
@@ -37,6 +37,7 @@ from threading import Condition, Event, RLock, Thread
37
37
  from typing import Any, Callable, Literal, Optional, TypeVar, Union, cast, overload
38
38
 
39
39
  from toil.lib.conversions import opt_strtobool
40
+ from toil.lib.throttle import LocalThrottle
40
41
 
41
42
  if sys.version_info < (3, 10):
42
43
  from typing_extensions import ParamSpec
@@ -281,6 +282,10 @@ class KubernetesBatchSystem(BatchSystemCleanupSupport):
281
282
  # in the queue or any resource becomes available.
282
283
  self._work_available: Condition = Condition(lock=self._mutex)
283
284
 
285
+ # To make sure we don't spam the log when the metrics server is down,
286
+ # we use a throttle
287
+ self._metrics_throttle: LocalThrottle = LocalThrottle(600)
288
+
284
289
  self.schedulingThread: Thread = Thread(target=self._scheduler, daemon=True)
285
290
  self.schedulingThread.start()
286
291
 
@@ -1363,7 +1368,8 @@ class KubernetesBatchSystem(BatchSystemCleanupSupport):
1363
1368
  # This is the sort of error we would expect from an overloaded
1364
1369
  # Kubernetes or a dead metrics service.
1365
1370
  # We can't tell that the pod is stuck, so say that it isn't.
1366
- logger.warning("Could not query metrics service: %s", e)
1371
+ if self._metrics_throttle.throttle(False):
1372
+ logger.warning("Kubernetes metrics service is not available: %s", e)
1367
1373
  return False
1368
1374
  else:
1369
1375
  raise
@@ -1602,6 +1608,7 @@ class KubernetesBatchSystem(BatchSystemCleanupSupport):
1602
1608
  exitStatus=exitCode,
1603
1609
  wallTime=runtime,
1604
1610
  exitReason=exitReason,
1611
+ backing_id=jobObject.metadata.name,
1605
1612
  )
1606
1613
 
1607
1614
  if (exitReason == BatchJobExitReason.FAILED) or (
@@ -1855,7 +1862,11 @@ class KubernetesBatchSystem(BatchSystemCleanupSupport):
1855
1862
 
1856
1863
  # Return the one finished job we found
1857
1864
  return UpdatedBatchJobInfo(
1858
- jobID=jobID, exitStatus=exitCode, wallTime=runtime, exitReason=None
1865
+ jobID=jobID,
1866
+ exitStatus=exitCode,
1867
+ wallTime=runtime,
1868
+ exitReason=None,
1869
+ backing_id=jobObject.metadata.name,
1859
1870
  )
1860
1871
 
1861
1872
  def _waitForJobDeath(self, jobName: str) -> None:
@@ -103,6 +103,9 @@ class MesosBatchSystem(BatchSystemLocalSupport, AbstractScalableBatchSystem, Sch
103
103
  if config.mesos_framework_id is not None:
104
104
  self.mesos_framework_id = config.mesos_framework_id
105
105
 
106
+ # How long in seconds to wait to register before declaring Mesos unreachable.
107
+ self.mesos_timeout = 60
108
+
106
109
  # Written to when Mesos kills tasks, as directed by Toil.
107
110
  # Jobs must not enter this set until they are removed from runningJobMap.
108
111
  self.killedJobIds = set()
@@ -345,17 +348,38 @@ class MesosBatchSystem(BatchSystemLocalSupport, AbstractScalableBatchSystem, Sch
345
348
  framework.roles = config.mesos_role
346
349
  framework.capabilities = [dict(type="MULTI_ROLE")]
347
350
 
351
+ endpoint = self._resolveAddress(self.mesos_endpoint)
352
+ log.info("Connecting to Mesos at %s...", self.mesos_endpoint)
353
+
348
354
  # Make the driver which implements most of the scheduler logic and calls back to us for the user-defined parts.
349
355
  # Make sure it will call us with nice namespace-y addicts
350
356
  self.driver = MesosSchedulerDriver(
351
357
  self,
352
358
  framework,
353
- self._resolveAddress(self.mesos_endpoint),
359
+ endpoint,
354
360
  use_addict=True,
355
361
  implicit_acknowledgements=True,
356
362
  )
357
363
  self.driver.start()
358
364
 
365
+ wait_count = 0
366
+ while self.frameworkId is None:
367
+ # Wait to register with Mesos, and eventually fail if it just isn't
368
+ # responding.
369
+
370
+ # TODO: Use a condition instead of a spin wait.
371
+
372
+ if wait_count >= self.mesos_timeout:
373
+ error_message = f"Could not connect to Mesos endpoint at {self.mesos_endpoint}"
374
+ log.error(error_message)
375
+ self.shutdown()
376
+ raise RuntimeError(error_message)
377
+ elif wait_count > 1 and wait_count % 10 == 0:
378
+ log.warning("Waiting for Mesos registration (try %s/%s)", wait_count, self.mesos_timeout)
379
+ time.sleep(1)
380
+ wait_count += 1
381
+
382
+
359
383
  @staticmethod
360
384
  def _resolveAddress(address):
361
385
  """
@@ -394,10 +418,17 @@ class MesosBatchSystem(BatchSystemLocalSupport, AbstractScalableBatchSystem, Sch
394
418
  """
395
419
  Invoked when the scheduler successfully registers with a Mesos master
396
420
  """
397
- log.debug("Registered with framework ID %s", frameworkId.value)
421
+ log.info("Registered with Mesos as framework ID %s", frameworkId.value)
398
422
  # Save the framework ID
399
423
  self.frameworkId = frameworkId.value
400
424
 
425
+ def error(self, driver, message):
426
+ """
427
+ Invoked when Mesos reports an unrecoverable error.
428
+ """
429
+ log.error("Mesos error: %s", message)
430
+ super().error(driver, message)
431
+
401
432
  def _declineAllOffers(self, driver, offers):
402
433
  for offer in offers:
403
434
  driver.declineOffer(offer.id)
@@ -12,7 +12,6 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- import importlib
16
15
  import logging
17
16
  import pkgutil
18
17
  import warnings
@@ -21,6 +20,7 @@ from typing import TYPE_CHECKING, Callable
21
20
 
22
21
  from toil.lib.compatibility import deprecated
23
22
  from toil.lib.memoize import memoize
23
+ import toil.lib.plugins
24
24
 
25
25
  if TYPE_CHECKING:
26
26
  from toil.batchSystems.abstractBatchSystem import AbstractBatchSystem
@@ -40,17 +40,14 @@ def add_batch_system_factory(
40
40
 
41
41
  :param class_factory: A function that returns a batch system class (NOT an instance), which implements :class:`toil.batchSystems.abstractBatchSystem.AbstractBatchSystem`.
42
42
  """
43
- _registry_keys.append(key)
44
- _registry[key] = class_factory
43
+ toil.lib.plugins.register_plugin("batch_system", key, class_factory)
45
44
 
46
45
 
47
46
  def get_batch_systems() -> Sequence[str]:
48
47
  """
49
- Get the names of all the availsble batch systems.
48
+ Get the names of all the available batch systems.
50
49
  """
51
- _load_all_plugins()
52
-
53
- return _registry_keys
50
+ return toil.lib.plugins.get_plugin_names("batch_system")
54
51
 
55
52
 
56
53
  def get_batch_system(key: str) -> type["AbstractBatchSystem"]:
@@ -60,8 +57,7 @@ def get_batch_system(key: str) -> type["AbstractBatchSystem"]:
60
57
  :raises: KeyError if the key is not the name of a batch system, and
61
58
  ImportError if the batch system's class cannot be loaded.
62
59
  """
63
-
64
- return _registry[key]()
60
+ return toil.lib.plugins.get_plugin("batch_system", key)()
65
61
 
66
62
 
67
63
  DEFAULT_BATCH_SYSTEM = "single_machine"
@@ -126,114 +122,15 @@ def kubernetes_batch_system_factory():
126
122
 
127
123
 
128
124
  #####
129
- # Registry implementation
130
- #####
131
-
132
- _registry: dict[str, Callable[[], type["AbstractBatchSystem"]]] = {
133
- "aws_batch": aws_batch_batch_system_factory,
134
- "single_machine": single_machine_batch_system_factory,
135
- "grid_engine": gridengine_batch_system_factory,
136
- "lsf": lsf_batch_system_factory,
137
- "mesos": mesos_batch_system_factory,
138
- "slurm": slurm_batch_system_factory,
139
- "torque": torque_batch_system_factory,
140
- "htcondor": htcondor_batch_system_factory,
141
- "kubernetes": kubernetes_batch_system_factory,
142
- }
143
- _registry_keys = list(_registry.keys())
144
-
145
- # We will load any packages starting with this prefix and let them call
146
- # add_batch_system_factory()
147
- _PLUGIN_NAME_PREFIX = "toil_batch_system_"
148
-
149
-
150
- @memoize
151
- def _load_all_plugins() -> None:
152
- """
153
- Load all the batch system plugins that are installed.
154
- """
155
-
156
- for finder, name, is_pkg in pkgutil.iter_modules():
157
- # For all installed packages
158
- if name.startswith(_PLUGIN_NAME_PREFIX):
159
- # If it is a Toil batch system plugin, import it
160
- importlib.import_module(name)
161
-
162
-
163
- #####
164
- # Deprecated API
125
+ # Registers all built-in batch system
165
126
  #####
166
127
 
167
- # We used to directly access these constants, but now the Right Way to use this
168
- # module is add_batch_system_factory() to register and get_batch_systems() to
169
- # get the list/get_batch_system() to get a class by name.
170
-
171
-
172
- def __getattr__(name):
173
- """
174
- Implement a fallback attribute getter to handle deprecated constants.
175
-
176
- See <https://stackoverflow.com/a/48242860>.
177
- """
178
- if name == "BATCH_SYSTEM_FACTORY_REGISTRY":
179
- warnings.warn(
180
- "BATCH_SYSTEM_FACTORY_REGISTRY is deprecated; use get_batch_system() or add_batch_system_factory()",
181
- DeprecationWarning,
182
- )
183
- return _registry
184
- elif name == "BATCH_SYSTEMS":
185
- warnings.warn(
186
- "BATCH_SYSTEMS is deprecated; use get_batch_systems()", DeprecationWarning
187
- )
188
- return _registry_keys
189
- else:
190
- raise AttributeError(f"Module {__name__} ahs no attribute {name}")
191
-
192
-
193
- @deprecated(new_function_name="add_batch_system_factory")
194
- def addBatchSystemFactory(
195
- key: str, batchSystemFactory: Callable[[], type["AbstractBatchSystem"]]
196
- ):
197
- """
198
- Deprecated method to add a batch system.
199
- """
200
- return add_batch_system_factory(key, batchSystemFactory)
201
-
202
-
203
- #####
204
- # Testing utilities
205
- #####
206
-
207
- # We need a snapshot save/restore system for testing. We can't just tamper with
208
- # the globals because module-level globals are their own references, so we
209
- # can't touch this module's global name bindings from a client module.
210
-
211
-
212
- def save_batch_system_plugin_state() -> (
213
- tuple[list[str], dict[str, Callable[[], type["AbstractBatchSystem"]]]]
214
- ):
215
- """
216
- Return a snapshot of the plugin registry that can be restored to remove
217
- added plugins. Useful for testing the plugin system in-process with other
218
- tests.
219
- """
220
-
221
- snapshot = (list(_registry_keys), dict(_registry))
222
- return snapshot
223
-
224
-
225
- def restore_batch_system_plugin_state(
226
- snapshot: tuple[list[str], dict[str, Callable[[], type["AbstractBatchSystem"]]]]
227
- ):
228
- """
229
- Restore the batch system registry state to a snapshot from
230
- save_batch_system_plugin_state().
231
- """
232
-
233
- # We need to apply the snapshot without rebinding the names, because that
234
- # won't affect modules that imported the names.
235
- wanted_batch_systems, wanted_registry = snapshot
236
- _registry_keys.clear()
237
- _registry_keys.extend(wanted_batch_systems)
238
- _registry.clear()
239
- _registry.update(wanted_registry)
128
+ add_batch_system_factory("aws_batch", aws_batch_batch_system_factory)
129
+ add_batch_system_factory("single_machine", single_machine_batch_system_factory)
130
+ add_batch_system_factory("grid_engine", gridengine_batch_system_factory)
131
+ add_batch_system_factory("lsf", lsf_batch_system_factory)
132
+ add_batch_system_factory("mesos", mesos_batch_system_factory)
133
+ add_batch_system_factory("slurm", slurm_batch_system_factory)
134
+ add_batch_system_factory("torque", torque_batch_system_factory)
135
+ add_batch_system_factory("htcondor", htcondor_batch_system_factory)
136
+ add_batch_system_factory("kubernetes", kubernetes_batch_system_factory)
@@ -18,9 +18,11 @@ import logging
18
18
  import math
19
19
  import os
20
20
  import sys
21
- from argparse import SUPPRESS, ArgumentParser, _ArgumentGroup
22
21
  import shlex
23
- from typing import Callable, NamedTuple, TypeVar
22
+
23
+ from argparse import SUPPRESS, ArgumentParser, _ArgumentGroup
24
+ from datetime import datetime, timedelta, timezone
25
+ from typing import Callable, NamedTuple, Optional, TypeVar
24
26
 
25
27
  from toil.batchSystems.abstractBatchSystem import (
26
28
  EXIT_STATUS_UNAVAILABLE_VALUE,
@@ -350,9 +352,18 @@ class SlurmBatchSystem(AbstractGridEngineBatchSystem):
350
352
  ) -> list[int | tuple[int, BatchJobExitReason | None] | None]:
351
353
  """
352
354
  Collect all job exit codes in a single call.
353
- :param batch_job_id_list: list of Job ID strings, where each string has the form
354
- "<job>[.<task>]".
355
- :return: list of job exit codes or exit code, exit reason pairs associated with the list of job IDs.
355
+
356
+ :param batch_job_id_list: list of Job ID strings, where each string
357
+ has the form ``<job>[.<task>]``.
358
+
359
+ :return: list of job exit codes or exit code, exit reason pairs
360
+ associated with the list of job IDs.
361
+
362
+ :raises CalledProcessErrorStderr: if communicating with Slurm went
363
+ wrong.
364
+
365
+ :raises OSError: if job details are not available becasue a Slurm
366
+ command could not start.
356
367
  """
357
368
  logger.log(
358
369
  TRACE, "Getting exit codes for slurm jobs: %s", batch_job_id_list
@@ -387,15 +398,54 @@ class SlurmBatchSystem(AbstractGridEngineBatchSystem):
387
398
  Helper function for `getJobExitCode` and `coalesce_job_exit_codes`.
388
399
  Fetch job details from Slurm's accounting system or job control system.
389
400
  :param job_id_list: list of integer Job IDs.
390
- :return: dict of job statuses, where key is the integer job ID, and value is a tuple
391
- containing the job's state and exit code.
401
+ :return: dict of job statuses, where key is the integer job ID, and
402
+ value is a tuple containing the job's state and exit code.
403
+ :raises CalledProcessErrorStderr: if communicating with Slurm went
404
+ wrong.
405
+ :raises OSError: if job details are not available becasue a Slurm
406
+ command could not start.
392
407
  """
408
+
409
+ status_dict = {}
410
+ scontrol_problem: Optional[Exception] = None
411
+
412
+ try:
413
+ # Get all the job details we can from scontrol, which we think
414
+ # might be faster/less dangerous than sacct searching, even
415
+ # though it can't be aimed at more than one job.
416
+ status_dict.update(self._getJobDetailsFromScontrol(job_id_list))
417
+ except (CalledProcessErrorStderr, OSError) as e:
418
+ if isinstance(e, OSError):
419
+ logger.warning("Could not run scontrol: %s", e)
420
+ else:
421
+ logger.warning("Error from scontrol: %s", e)
422
+ scontrol_problem = e
423
+
424
+ logger.debug("After scontrol, got statuses: %s", status_dict)
425
+
426
+ # See what's not handy in scontrol (or everything if we couldn't
427
+ # call it).
428
+ sacct_job_id_list = self._remaining_jobs(job_id_list, status_dict)
429
+
430
+ logger.debug("Remaining jobs to find out about: %s", sacct_job_id_list)
431
+
393
432
  try:
394
- status_dict = self._getJobDetailsFromSacct(job_id_list)
433
+ # Ask sacct about those jobs
434
+ status_dict.update(self._getJobDetailsFromSacct(sacct_job_id_list))
395
435
  except (CalledProcessErrorStderr, OSError) as e:
396
436
  if isinstance(e, OSError):
397
437
  logger.warning("Could not run sacct: %s", e)
398
- status_dict = self._getJobDetailsFromScontrol(job_id_list)
438
+ else:
439
+ logger.warning("Error from sacct: %s", e)
440
+ if scontrol_problem is not None:
441
+ # Neither approach worked at all
442
+ raise
443
+
444
+ # One of the methods worked, so we have at least (None, None)
445
+ # values filled in for all jobs.
446
+ assert len(status_dict) == len(job_id_list)
447
+
448
+
399
449
  return status_dict
400
450
 
401
451
  def _get_job_return_code(
@@ -466,15 +516,123 @@ class SlurmBatchSystem(AbstractGridEngineBatchSystem):
466
516
 
467
517
  return state_token
468
518
 
519
+ def _remaining_jobs(self, job_id_list: list[int], job_details: dict[int, tuple[str | None, int | None]]) -> list[int]:
520
+ """
521
+ Given a list of job IDs and a list of job details (state and exit
522
+ code), get the list of job IDs where the details are (None, None)
523
+ (or are missing).
524
+ """
525
+ return [
526
+ j
527
+ for j in job_id_list
528
+ if job_details.get(j, (None, None)) == (None, None)
529
+ ]
530
+
469
531
  def _getJobDetailsFromSacct(
470
- self, job_id_list: list[int]
532
+ self,
533
+ job_id_list: list[int],
534
+ ) -> dict[int, tuple[str | None, int | None]]:
535
+ """
536
+ Get SLURM job exit codes for the jobs in `job_id_list` by running `sacct`.
537
+
538
+ Handles querying manageable time periods until all jobs have information.
539
+
540
+ There is no guarantee of inter-job consistency: one job may really
541
+ finish after another, but we might see the earlier-finishing job
542
+ still running and the later-finishing job finished.
543
+
544
+ :param job_id_list: list of integer batch job IDs.
545
+ :return: dict of job statuses, where key is the job-id, and value
546
+ is a tuple containing the job's state and exit code. Jobs with
547
+ no information reported from Slurm will have (None, None).
548
+ """
549
+
550
+ # Pick a now
551
+ now = datetime.now().astimezone(None)
552
+ # Decide when to start the search (first copy of past midnight)
553
+ begin_time = now.replace(
554
+ hour=0,
555
+ minute=0,
556
+ second=0,
557
+ microsecond=0,
558
+ fold=0
559
+ )
560
+ # And when to end (a day after that)
561
+ end_time = begin_time + timedelta(days=1)
562
+ while end_time < now:
563
+ # If something goes really weird, advance up to our chosen now
564
+ end_time += timedelta(days=1)
565
+ # If we don't go around the loop at least once, we might end up
566
+ # with an empty dict being returned, which shouldn't happen. We
567
+ # need the (None, None) entries for jobs we can't find.
568
+ assert end_time >= self.boss.start_time
569
+
570
+ results: dict[int, tuple[str | None, int | None]] = {}
571
+
572
+ while len(job_id_list) > 0 and end_time >= self.boss.start_time:
573
+ # There are still jobs to look for and our search isn't
574
+ # exclusively for stuff that only existed before our workflow
575
+ # started.
576
+ results.update(
577
+ self._get_job_details_from_sacct_for_range(
578
+ job_id_list,
579
+ begin_time,
580
+ end_time
581
+ )
582
+ )
583
+ job_id_list = self._remaining_jobs(job_id_list, results)
584
+ # If we have to search again, search the previous day. But
585
+ # overlap a tiny bit so the endpoints don't exactly match, in
586
+ # case Slurm is not working with inclusive intervals.
587
+ # TODO: is Slurm working with inclusive intervals?
588
+ end_time = begin_time + timedelta(seconds=1)
589
+ begin_time = end_time - timedelta(days=1, seconds=1)
590
+
591
+
592
+ if end_time < self.boss.start_time and len(job_id_list) > 0:
593
+ # This is suspicious.
594
+ logger.warning(
595
+ "Could not find any information from sacct after "
596
+ "workflow start at %s about jobs: %s",
597
+ self.boss.start_time.isoformat(),
598
+ job_id_list
599
+ )
600
+
601
+ return results
602
+
603
+ def _get_job_details_from_sacct_for_range(
604
+ self,
605
+ job_id_list: list[int],
606
+ begin_time: datetime,
607
+ end_time: datetime,
471
608
  ) -> dict[int, tuple[str | None, int | None]]:
472
609
  """
473
610
  Get SLURM job exit codes for the jobs in `job_id_list` by running `sacct`.
611
+
612
+ Internally, Slurm's accounting thinks in wall clock time, so for
613
+ efficiency you need to only search relevant real-time periods.
614
+
474
615
  :param job_id_list: list of integer batch job IDs.
475
- :return: dict of job statuses, where key is the job-id, and value is a tuple
476
- containing the job's state and exit code.
616
+ :param begin_time: An aware datetime of the earliest time to search
617
+ :param end_time: An aware datetime of the latest time to search
618
+ :return: dict of job statuses, where key is the job-id, and value
619
+ is a tuple containing the job's state and exit code. Jobs with
620
+ no information reported from Slurm will have (None, None).
477
621
  """
622
+
623
+ assert begin_time.tzinfo is not None, "begin_time must be aware"
624
+ assert end_time.tzinfo is not None, "end_time must be aware"
625
+ def stringify(t: datetime) -> str:
626
+ """
627
+ Convert an aware time local time, and format it *without* a
628
+ trailing time zone indicator.
629
+ """
630
+ # TODO: What happens when we get an aware time that's ambiguous
631
+ # in local time? Or when the local timezone changes while we're
632
+ # sending things to Slurm or doing a progressive search back?
633
+ naive_t = t.astimezone(None).replace(tzinfo=None)
634
+ return naive_t.isoformat(timespec="seconds")
635
+
478
636
  job_ids = ",".join(str(id) for id in job_id_list)
479
637
  args = [
480
638
  "sacct",
@@ -485,8 +643,10 @@ class SlurmBatchSystem(AbstractGridEngineBatchSystem):
485
643
  "JobIDRaw,State,ExitCode", # specify output columns
486
644
  "-P", # separate columns with pipes
487
645
  "-S",
488
- "1970-01-01",
489
- ] # override start time limit
646
+ stringify(begin_time),
647
+ "-E",
648
+ stringify(end_time),
649
+ ]
490
650
 
491
651
  # Collect the job statuses in a dict; key is the job-id, value is a tuple containing
492
652
  # job state and exit status. Initialize dict before processing output of `sacct`.
@@ -500,8 +660,20 @@ class SlurmBatchSystem(AbstractGridEngineBatchSystem):
500
660
  if len(job_id_list) == 1:
501
661
  # 1 is too big, we can't recurse further, bail out
502
662
  raise
503
- job_statuses.update(self._getJobDetailsFromSacct(job_id_list[:len(job_id_list)//2]))
504
- job_statuses.update(self._getJobDetailsFromSacct(job_id_list[len(job_id_list)//2:]))
663
+ job_statuses.update(
664
+ self._get_job_details_from_sacct_for_range(
665
+ job_id_list[:len(job_id_list)//2],
666
+ begin_time,
667
+ end_time,
668
+ )
669
+ )
670
+ job_statuses.update(
671
+ self._get_job_details_from_sacct_for_range(
672
+ job_id_list[len(job_id_list)//2:],
673
+ begin_time,
674
+ end_time,
675
+ )
676
+ )
505
677
  return job_statuses
506
678
  else:
507
679
  raise
@@ -847,6 +1019,9 @@ class SlurmBatchSystem(AbstractGridEngineBatchSystem):
847
1019
  ) -> None:
848
1020
  super().__init__(config, maxCores, maxMemory, maxDisk)
849
1021
  self.partitions = SlurmBatchSystem.PartitionSet()
1022
+ # Record when the workflow started, so we know when to stop looking for
1023
+ # jobs we ran.
1024
+ self.start_time = datetime.now().astimezone(None)
850
1025
 
851
1026
  # Override issuing jobs so we can check if we need to use Slurm's magic
852
1027
  # whole-node-memory feature.