toil 6.1.0a1__py3-none-any.whl → 8.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (193) hide show
  1. toil/__init__.py +122 -315
  2. toil/batchSystems/__init__.py +1 -0
  3. toil/batchSystems/abstractBatchSystem.py +173 -89
  4. toil/batchSystems/abstractGridEngineBatchSystem.py +272 -148
  5. toil/batchSystems/awsBatch.py +244 -135
  6. toil/batchSystems/cleanup_support.py +26 -16
  7. toil/batchSystems/contained_executor.py +31 -28
  8. toil/batchSystems/gridengine.py +86 -50
  9. toil/batchSystems/htcondor.py +166 -89
  10. toil/batchSystems/kubernetes.py +632 -382
  11. toil/batchSystems/local_support.py +20 -15
  12. toil/batchSystems/lsf.py +134 -81
  13. toil/batchSystems/lsfHelper.py +13 -11
  14. toil/batchSystems/mesos/__init__.py +41 -29
  15. toil/batchSystems/mesos/batchSystem.py +290 -151
  16. toil/batchSystems/mesos/executor.py +79 -50
  17. toil/batchSystems/mesos/test/__init__.py +31 -23
  18. toil/batchSystems/options.py +46 -28
  19. toil/batchSystems/registry.py +53 -19
  20. toil/batchSystems/singleMachine.py +296 -125
  21. toil/batchSystems/slurm.py +603 -138
  22. toil/batchSystems/torque.py +47 -33
  23. toil/bus.py +186 -76
  24. toil/common.py +664 -368
  25. toil/cwl/__init__.py +1 -1
  26. toil/cwl/cwltoil.py +1136 -483
  27. toil/cwl/utils.py +17 -22
  28. toil/deferred.py +63 -42
  29. toil/exceptions.py +5 -3
  30. toil/fileStores/__init__.py +5 -5
  31. toil/fileStores/abstractFileStore.py +140 -60
  32. toil/fileStores/cachingFileStore.py +717 -269
  33. toil/fileStores/nonCachingFileStore.py +116 -87
  34. toil/job.py +1225 -368
  35. toil/jobStores/abstractJobStore.py +416 -266
  36. toil/jobStores/aws/jobStore.py +863 -477
  37. toil/jobStores/aws/utils.py +201 -120
  38. toil/jobStores/conftest.py +3 -2
  39. toil/jobStores/fileJobStore.py +292 -154
  40. toil/jobStores/googleJobStore.py +140 -74
  41. toil/jobStores/utils.py +36 -15
  42. toil/leader.py +668 -272
  43. toil/lib/accelerators.py +115 -18
  44. toil/lib/aws/__init__.py +74 -31
  45. toil/lib/aws/ami.py +122 -87
  46. toil/lib/aws/iam.py +284 -108
  47. toil/lib/aws/s3.py +31 -0
  48. toil/lib/aws/session.py +214 -39
  49. toil/lib/aws/utils.py +287 -231
  50. toil/lib/bioio.py +13 -5
  51. toil/lib/compatibility.py +11 -6
  52. toil/lib/conversions.py +104 -47
  53. toil/lib/docker.py +131 -103
  54. toil/lib/ec2.py +361 -199
  55. toil/lib/ec2nodes.py +174 -106
  56. toil/lib/encryption/_dummy.py +5 -3
  57. toil/lib/encryption/_nacl.py +10 -6
  58. toil/lib/encryption/conftest.py +1 -0
  59. toil/lib/exceptions.py +26 -7
  60. toil/lib/expando.py +5 -3
  61. toil/lib/ftp_utils.py +217 -0
  62. toil/lib/generatedEC2Lists.py +127 -19
  63. toil/lib/humanize.py +6 -2
  64. toil/lib/integration.py +341 -0
  65. toil/lib/io.py +141 -15
  66. toil/lib/iterables.py +4 -2
  67. toil/lib/memoize.py +12 -8
  68. toil/lib/misc.py +66 -21
  69. toil/lib/objects.py +2 -2
  70. toil/lib/resources.py +68 -15
  71. toil/lib/retry.py +126 -81
  72. toil/lib/threading.py +299 -82
  73. toil/lib/throttle.py +16 -15
  74. toil/options/common.py +843 -409
  75. toil/options/cwl.py +175 -90
  76. toil/options/runner.py +50 -0
  77. toil/options/wdl.py +73 -17
  78. toil/provisioners/__init__.py +117 -46
  79. toil/provisioners/abstractProvisioner.py +332 -157
  80. toil/provisioners/aws/__init__.py +70 -33
  81. toil/provisioners/aws/awsProvisioner.py +1145 -715
  82. toil/provisioners/clusterScaler.py +541 -279
  83. toil/provisioners/gceProvisioner.py +282 -179
  84. toil/provisioners/node.py +155 -79
  85. toil/realtimeLogger.py +34 -22
  86. toil/resource.py +137 -75
  87. toil/server/app.py +128 -62
  88. toil/server/celery_app.py +3 -1
  89. toil/server/cli/wes_cwl_runner.py +82 -53
  90. toil/server/utils.py +54 -28
  91. toil/server/wes/abstract_backend.py +64 -26
  92. toil/server/wes/amazon_wes_utils.py +21 -15
  93. toil/server/wes/tasks.py +121 -63
  94. toil/server/wes/toil_backend.py +142 -107
  95. toil/server/wsgi_app.py +4 -3
  96. toil/serviceManager.py +58 -22
  97. toil/statsAndLogging.py +224 -70
  98. toil/test/__init__.py +282 -183
  99. toil/test/batchSystems/batchSystemTest.py +460 -210
  100. toil/test/batchSystems/batch_system_plugin_test.py +90 -0
  101. toil/test/batchSystems/test_gridengine.py +173 -0
  102. toil/test/batchSystems/test_lsf_helper.py +67 -58
  103. toil/test/batchSystems/test_slurm.py +110 -49
  104. toil/test/cactus/__init__.py +0 -0
  105. toil/test/cactus/test_cactus_integration.py +56 -0
  106. toil/test/cwl/cwlTest.py +496 -287
  107. toil/test/cwl/measure_default_memory.cwl +12 -0
  108. toil/test/cwl/not_run_required_input.cwl +29 -0
  109. toil/test/cwl/scatter_duplicate_outputs.cwl +40 -0
  110. toil/test/cwl/seqtk_seq.cwl +1 -1
  111. toil/test/docs/scriptsTest.py +69 -46
  112. toil/test/jobStores/jobStoreTest.py +427 -264
  113. toil/test/lib/aws/test_iam.py +118 -50
  114. toil/test/lib/aws/test_s3.py +16 -9
  115. toil/test/lib/aws/test_utils.py +5 -6
  116. toil/test/lib/dockerTest.py +118 -141
  117. toil/test/lib/test_conversions.py +113 -115
  118. toil/test/lib/test_ec2.py +58 -50
  119. toil/test/lib/test_integration.py +104 -0
  120. toil/test/lib/test_misc.py +12 -5
  121. toil/test/mesos/MesosDataStructuresTest.py +23 -10
  122. toil/test/mesos/helloWorld.py +7 -6
  123. toil/test/mesos/stress.py +25 -20
  124. toil/test/options/__init__.py +13 -0
  125. toil/test/options/options.py +42 -0
  126. toil/test/provisioners/aws/awsProvisionerTest.py +320 -150
  127. toil/test/provisioners/clusterScalerTest.py +440 -250
  128. toil/test/provisioners/clusterTest.py +166 -44
  129. toil/test/provisioners/gceProvisionerTest.py +174 -100
  130. toil/test/provisioners/provisionerTest.py +25 -13
  131. toil/test/provisioners/restartScript.py +5 -4
  132. toil/test/server/serverTest.py +188 -141
  133. toil/test/sort/restart_sort.py +137 -68
  134. toil/test/sort/sort.py +134 -66
  135. toil/test/sort/sortTest.py +91 -49
  136. toil/test/src/autoDeploymentTest.py +141 -101
  137. toil/test/src/busTest.py +20 -18
  138. toil/test/src/checkpointTest.py +8 -2
  139. toil/test/src/deferredFunctionTest.py +49 -35
  140. toil/test/src/dockerCheckTest.py +32 -24
  141. toil/test/src/environmentTest.py +135 -0
  142. toil/test/src/fileStoreTest.py +539 -272
  143. toil/test/src/helloWorldTest.py +7 -4
  144. toil/test/src/importExportFileTest.py +61 -31
  145. toil/test/src/jobDescriptionTest.py +46 -21
  146. toil/test/src/jobEncapsulationTest.py +2 -0
  147. toil/test/src/jobFileStoreTest.py +74 -50
  148. toil/test/src/jobServiceTest.py +187 -73
  149. toil/test/src/jobTest.py +121 -71
  150. toil/test/src/miscTests.py +19 -18
  151. toil/test/src/promisedRequirementTest.py +82 -36
  152. toil/test/src/promisesTest.py +7 -6
  153. toil/test/src/realtimeLoggerTest.py +10 -6
  154. toil/test/src/regularLogTest.py +71 -37
  155. toil/test/src/resourceTest.py +80 -49
  156. toil/test/src/restartDAGTest.py +36 -22
  157. toil/test/src/resumabilityTest.py +9 -2
  158. toil/test/src/retainTempDirTest.py +45 -14
  159. toil/test/src/systemTest.py +12 -8
  160. toil/test/src/threadingTest.py +44 -25
  161. toil/test/src/toilContextManagerTest.py +10 -7
  162. toil/test/src/userDefinedJobArgTypeTest.py +8 -5
  163. toil/test/src/workerTest.py +73 -23
  164. toil/test/utils/toilDebugTest.py +103 -33
  165. toil/test/utils/toilKillTest.py +4 -5
  166. toil/test/utils/utilsTest.py +245 -106
  167. toil/test/wdl/wdltoil_test.py +818 -149
  168. toil/test/wdl/wdltoil_test_kubernetes.py +91 -0
  169. toil/toilState.py +120 -35
  170. toil/utils/toilConfig.py +13 -4
  171. toil/utils/toilDebugFile.py +44 -27
  172. toil/utils/toilDebugJob.py +214 -27
  173. toil/utils/toilDestroyCluster.py +11 -6
  174. toil/utils/toilKill.py +8 -3
  175. toil/utils/toilLaunchCluster.py +256 -140
  176. toil/utils/toilMain.py +37 -16
  177. toil/utils/toilRsyncCluster.py +32 -14
  178. toil/utils/toilSshCluster.py +49 -22
  179. toil/utils/toilStats.py +356 -273
  180. toil/utils/toilStatus.py +292 -139
  181. toil/utils/toilUpdateEC2Instances.py +3 -1
  182. toil/version.py +12 -12
  183. toil/wdl/utils.py +5 -5
  184. toil/wdl/wdltoil.py +3913 -1033
  185. toil/worker.py +367 -184
  186. {toil-6.1.0a1.dist-info → toil-8.0.0.dist-info}/LICENSE +25 -0
  187. toil-8.0.0.dist-info/METADATA +173 -0
  188. toil-8.0.0.dist-info/RECORD +253 -0
  189. {toil-6.1.0a1.dist-info → toil-8.0.0.dist-info}/WHEEL +1 -1
  190. toil-6.1.0a1.dist-info/METADATA +0 -125
  191. toil-6.1.0a1.dist-info/RECORD +0 -237
  192. {toil-6.1.0a1.dist-info → toil-8.0.0.dist-info}/entry_points.txt +0 -0
  193. {toil-6.1.0a1.dist-info → toil-8.0.0.dist-info}/top_level.txt +0 -0
@@ -12,66 +12,253 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  """Debug tool for running a toil job locally."""
15
+ import gc
15
16
  import logging
16
-
17
+ import os
17
18
  import pprint
18
19
  import sys
20
+ from pathlib import Path
21
+ from typing import Optional
19
22
 
20
- from toil.common import Config, Toil, parser_with_common_options
23
+ from toil.common import Toil, parser_with_common_options
24
+ from toil.job import FilesDownloadedStoppingPointReached
21
25
  from toil.jobStores.fileJobStore import FileJobStore
22
26
  from toil.statsAndLogging import set_logging_from_options
23
27
  from toil.utils.toilDebugFile import printContentsOfJobStore
28
+ from toil.utils.toilStatus import ToilStatus
24
29
  from toil.worker import workerScript
25
30
 
26
31
  logger = logging.getLogger(__name__)
27
32
 
28
33
 
29
34
  def main() -> None:
30
- parser = parser_with_common_options(jobstore_option=True, prog="toil debug-job")
31
- parser.add_argument("jobID", type=str, nargs='?', default=None,
32
- help="The job store id of a job within the provided jobstore to run by itself.")
33
- parser.add_argument("--printJobInfo", type=str,
34
- help="Dump debugging info about this job ID")
35
+ parser = parser_with_common_options(
36
+ jobstore_option=True, prog="toil debug-job", default_log_level=logging.DEBUG
37
+ )
38
+ parser.add_argument(
39
+ "job",
40
+ type=str,
41
+ help="The job store id or job name of a job within the provided jobstore",
42
+ )
43
+ parser.add_argument(
44
+ "--printJobInfo",
45
+ action="store_true",
46
+ help="Dump debugging info about the job instead of running it",
47
+ )
48
+ parser.add_argument(
49
+ "--retrieveTaskDirectory",
50
+ dest="retrieve_task_directory",
51
+ type=str,
52
+ default=None,
53
+ help="Download CWL or WDL task inputs to the given directory and stop.",
54
+ )
35
55
 
36
56
  options = parser.parse_args()
37
57
  set_logging_from_options(options)
38
58
 
59
+ if options.retrieve_task_directory is not None and os.path.exists(
60
+ options.retrieve_task_directory
61
+ ):
62
+ # The logic to duplicate container mounts depends on stuff not already existing.
63
+ logger.error(
64
+ "The directory %s given for --retrieveTaskDirectory already exists. "
65
+ "Stopping to avoid clobbering existing files.",
66
+ options.retrieve_task_directory,
67
+ )
68
+ sys.exit(1)
69
+
39
70
  jobStore = Toil.resumeJobStore(options.jobStore)
40
71
  # Get the config with the workflow ID from the job store
41
72
  config = jobStore.config
42
73
  # But override its options
43
74
  config.setOptions(options)
75
+ config.cleanWorkDir = "never"
44
76
 
45
- did_something = False
77
+ # Find the job
78
+
79
+ if jobStore.job_exists(options.job):
80
+ # The user asked for a particular job and it exists
81
+ job_id = options.job
82
+ else:
83
+ # Go search by name and fill in job_id
84
+
85
+ # TODO: break out job store scan logic so it doesn't need to re-connect
86
+ # to the job store.
87
+ status = ToilStatus(options.jobStore)
88
+ hits = []
89
+ suggestion = None
90
+ for job in status.jobsToReport:
91
+ if options.job in (job.jobName, job.unitName, job.displayName):
92
+ # Find all the jobs that sort of match
93
+ hits.append(job)
94
+ if suggestion is None and job.remainingTryCount == 0:
95
+ # How about this nice failing job instead?
96
+ suggestion = job
97
+ if len(hits) == 0:
98
+ # No hits
99
+ if suggestion is None:
100
+ logger.critical(
101
+ 'No job found with ID or name "%s". No jobs are completely failed.',
102
+ options.job,
103
+ )
104
+ else:
105
+ logger.critical(
106
+ 'No job found with ID or name "%s". How about the failed job %s instead?',
107
+ options.job,
108
+ suggestion,
109
+ )
110
+ sys.exit(1)
111
+ elif len(hits) > 1:
112
+ # Several hits, maybe only one has failed
113
+ completely_failed_hits = [job for job in hits if job.remainingTryCount == 0]
114
+ if len(completely_failed_hits) == 0:
115
+ logger.critical(
116
+ 'Multiple jobs match "%s" but none are completely failed: %s',
117
+ options.job,
118
+ hits,
119
+ )
120
+ sys.exit(1)
121
+ elif len(completely_failed_hits) > 0:
122
+ logger.critical(
123
+ 'Multiple jobs matching "%s" are completely failed: %s',
124
+ options.job,
125
+ completely_failed_hits,
126
+ )
127
+ sys.exit(1)
128
+ else:
129
+ # We found one completely failed job, they probably mean that one.
130
+ logger.info(
131
+ 'There are %s jobs matching "%s"; assuming you mean the failed one: %s',
132
+ options.job,
133
+ completely_failed_hits[0],
134
+ )
135
+ job_id = completely_failed_hits[0].jobStoreID
136
+ else:
137
+ # We found one job with this name, so they must mean that one
138
+ logger.info('Looked up job named "%s": %s', options.job, hits[0])
139
+ job_id = hits[0].jobStoreID
46
140
 
47
141
  if options.printJobInfo:
142
+ # Report on the job
143
+
48
144
  if isinstance(jobStore, FileJobStore):
49
145
  # List all its files if we can
50
- printContentsOfJobStore(job_store=jobStore, job_id=options.printJobInfo)
146
+ printContentsOfJobStore(job_store=jobStore, job_id=job_id)
51
147
  # Print the job description itself
52
- job_desc = jobStore.load_job(options.printJobInfo)
148
+ job_desc = jobStore.load_job(job_id)
53
149
  print(f"Job: {job_desc}")
54
150
  pprint.pprint(job_desc.__dict__)
151
+ else:
152
+ # Run the job
55
153
 
56
- did_something = True
154
+ debug_flags = set()
155
+ local_worker_temp_dir = None
156
+ if options.retrieve_task_directory is not None:
157
+ # Pick a directory in it (which may be removed by the worker) as the worker's temp dir.
158
+ local_worker_temp_dir = os.path.join(
159
+ options.retrieve_task_directory, "worker"
160
+ )
161
+ # Make sure it exists
162
+ os.makedirs(local_worker_temp_dir, exist_ok=True)
163
+ # And tell the job to just download files
164
+ debug_flags.add("download_only")
165
+ # We might need to reconstruct a container environment.
166
+ host_and_job_paths: Optional[list[tuple[str, str]]] = None
167
+ # Track if the run succeeded without error
168
+ run_succeeded = False
57
169
 
58
- # TODO: Option to print list of successor jobs
59
- # TODO: Option to run job within python debugger, allowing step through of arguments
60
- # idea would be to have option to import pdb and set breakpoint at the start of the user's code
170
+ logger.info(f"Running the following job locally: {job_id}")
171
+ try:
172
+ workerScript(
173
+ jobStore,
174
+ config,
175
+ job_id,
176
+ job_id,
177
+ redirect_output_to_log_file=False,
178
+ local_worker_temp_dir=local_worker_temp_dir,
179
+ debug_flags=debug_flags,
180
+ )
181
+ except FilesDownloadedStoppingPointReached as e:
182
+ # We asked for the files to be downloaded and now they are.
183
+ assert options.retrieve_task_directory is not None
184
+ if e.host_and_job_paths is not None:
185
+ # Capture the container mapping so we can reconstruct the container environment after we unwind the worker stack.
186
+ host_and_job_paths = e.host_and_job_paths
187
+ else:
188
+ # No error!
189
+ run_succeeded = True
61
190
 
62
- if options.jobID is not None:
63
- # We actually want to run a job.
191
+ # Make sure the deferred function manager cleans up and logs its
192
+ # shutdown before we start writing any reports.
193
+ gc.collect()
64
194
 
65
- jobID = options.jobID
66
- logger.debug(f"Running the following job locally: {jobID}")
67
- workerScript(jobStore, config, jobID, jobID, redirectOutputToLogFile=False)
68
- logger.debug(f"Finished running: {jobID}")
69
- # Even if the job fails, the worker script succeeds unless something goes wrong with it internally.
195
+ if run_succeeded:
196
+ logger.info(f"Successfully ran: {job_id}")
70
197
 
71
- did_something = True
198
+ if host_and_job_paths is not None:
199
+ # We need to make a place that looks like the job paths half of these.
72
200
 
73
- if not did_something:
74
- # Somebody forgot to tell us to do anything.
75
- # Show the usage instructions.
76
- parser.print_help()
77
- sys.exit(1)
201
+ # Sort by job-side path so we do children before parents, to
202
+ # stop us from accidentally making children inside moutned
203
+ # parents.
204
+ sorted_mounts = sorted(host_and_job_paths, key=lambda t: t[1], reverse=True)
205
+
206
+ fake_job_root = os.path.join(options.retrieve_task_directory, "inside")
207
+ os.makedirs(fake_job_root, exist_ok=True)
208
+
209
+ for host_path, job_path in sorted_mounts:
210
+ if not os.path.exists(host_path):
211
+ logger.error(
212
+ "Job intended to mount %s as %s but it does not exist!",
213
+ host_path,
214
+ job_path,
215
+ )
216
+ continue
217
+ if not job_path.startswith("/"):
218
+ logger.error(
219
+ "Job intended to mount %s as %s but destination is a relative path!",
220
+ host_path,
221
+ job_path,
222
+ )
223
+ continue
224
+ # Drop the slash because we are building a chroot-ish mini filesystem.
225
+ job_relative_path = job_path[1:]
226
+ if job_relative_path.startswith("/"):
227
+ # We are having trouble understanding what the job
228
+ # intended to do. Stop working on this mount.
229
+ logger.error(
230
+ "Job intended to mount %s as %s but destination starts with multiple slashes for some reason!",
231
+ host_path,
232
+ job_path,
233
+ )
234
+ continue
235
+ fake_job_path = os.path.join(fake_job_root, job_relative_path)
236
+ if os.path.exists(fake_job_path):
237
+ logger.error(
238
+ "Job intended to mount %s as %s but that location is already mounted!",
239
+ host_path,
240
+ job_path,
241
+ )
242
+ continue
243
+
244
+ logger.info("Job mounted %s as %s", host_path, job_path)
245
+
246
+ # Make sure the directory to contain the mount exists.
247
+ fake_job_containing_path = os.path.dirname(fake_job_path)
248
+ os.makedirs(fake_job_containing_path, exist_ok=True)
249
+
250
+ top_pathobj = Path(os.path.abspath(options.retrieve_task_directory))
251
+ source_pathobj = Path(host_path)
252
+ if top_pathobj in source_pathobj.parents:
253
+ # We're linking to a file we already downloaded (probably).
254
+ # Make a relative symlink so the whole assemblage can move.
255
+ host_path = os.path.relpath(host_path, fake_job_containing_path)
256
+
257
+ # Make a symlink to simulate the mount
258
+ os.symlink(host_path, fake_job_path)
259
+
260
+ logger.info("Reconstructed job container filesystem at %s", fake_job_root)
261
+
262
+ # TODO: Option to print list of successor jobs
263
+ # TODO: Option to run job within python debugger, allowing step through of arguments
264
+ # idea would be to have option to import pdb and set breakpoint at the start of the user's code
@@ -20,16 +20,21 @@ from toil.statsAndLogging import set_logging_from_options
20
20
 
21
21
  logger = logging.getLogger(__name__)
22
22
 
23
+
23
24
  def main() -> None:
24
- parser = parser_with_common_options(provisioner_options=True, jobstore_option=False, prog="toil destroy-cluster")
25
+ parser = parser_with_common_options(
26
+ provisioner_options=True, jobstore_option=False, prog="toil destroy-cluster"
27
+ )
25
28
  options = parser.parse_args()
26
29
  set_logging_from_options(options)
27
30
 
28
- logger.info('Destroying cluster %s', options.clusterName)
31
+ logger.info("Destroying cluster %s", options.clusterName)
29
32
 
30
- cluster = cluster_factory(provisioner=options.provisioner,
31
- clusterName=options.clusterName,
32
- zone=options.zone)
33
+ cluster = cluster_factory(
34
+ provisioner=options.provisioner,
35
+ clusterName=options.clusterName,
36
+ zone=options.zone,
37
+ )
33
38
  cluster.destroyCluster()
34
39
 
35
- logger.info('Cluster %s is now gone.', options.clusterName)
40
+ logger.info("Cluster %s is now gone.", options.clusterName)
toil/utils/toilKill.py CHANGED
@@ -26,8 +26,11 @@ logger = logging.getLogger(__name__)
26
26
 
27
27
  def main() -> None:
28
28
  parser = parser_with_common_options(prog="toil kill")
29
- parser.add_argument('--force', action='store_true',
30
- help="Send SIGKILL to the leader process if local.")
29
+ parser.add_argument(
30
+ "--force",
31
+ action="store_true",
32
+ help="Send SIGKILL to the leader process if local.",
33
+ )
31
34
  options = parser.parse_args()
32
35
  set_logging_from_options(options)
33
36
  config = Config()
@@ -65,7 +68,9 @@ def main() -> None:
65
68
  os.kill(pid_to_kill, signal.SIGKILL if options.force else signal.SIGTERM)
66
69
  logger.info("Toil process %i successfully terminated.", pid_to_kill)
67
70
  except OSError:
68
- logger.error("Could not signal process %i. Is it still running?", pid_to_kill)
71
+ logger.error(
72
+ "Could not signal process %i. Is it still running?", pid_to_kill
73
+ )
69
74
  sys.exit(1)
70
75
  else:
71
76
  # Flip the flag inside the job store to signal kill