toil 6.1.0a1__py3-none-any.whl → 8.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (193) hide show
  1. toil/__init__.py +122 -315
  2. toil/batchSystems/__init__.py +1 -0
  3. toil/batchSystems/abstractBatchSystem.py +173 -89
  4. toil/batchSystems/abstractGridEngineBatchSystem.py +272 -148
  5. toil/batchSystems/awsBatch.py +244 -135
  6. toil/batchSystems/cleanup_support.py +26 -16
  7. toil/batchSystems/contained_executor.py +31 -28
  8. toil/batchSystems/gridengine.py +86 -50
  9. toil/batchSystems/htcondor.py +166 -89
  10. toil/batchSystems/kubernetes.py +632 -382
  11. toil/batchSystems/local_support.py +20 -15
  12. toil/batchSystems/lsf.py +134 -81
  13. toil/batchSystems/lsfHelper.py +13 -11
  14. toil/batchSystems/mesos/__init__.py +41 -29
  15. toil/batchSystems/mesos/batchSystem.py +290 -151
  16. toil/batchSystems/mesos/executor.py +79 -50
  17. toil/batchSystems/mesos/test/__init__.py +31 -23
  18. toil/batchSystems/options.py +46 -28
  19. toil/batchSystems/registry.py +53 -19
  20. toil/batchSystems/singleMachine.py +296 -125
  21. toil/batchSystems/slurm.py +603 -138
  22. toil/batchSystems/torque.py +47 -33
  23. toil/bus.py +186 -76
  24. toil/common.py +664 -368
  25. toil/cwl/__init__.py +1 -1
  26. toil/cwl/cwltoil.py +1136 -483
  27. toil/cwl/utils.py +17 -22
  28. toil/deferred.py +63 -42
  29. toil/exceptions.py +5 -3
  30. toil/fileStores/__init__.py +5 -5
  31. toil/fileStores/abstractFileStore.py +140 -60
  32. toil/fileStores/cachingFileStore.py +717 -269
  33. toil/fileStores/nonCachingFileStore.py +116 -87
  34. toil/job.py +1225 -368
  35. toil/jobStores/abstractJobStore.py +416 -266
  36. toil/jobStores/aws/jobStore.py +863 -477
  37. toil/jobStores/aws/utils.py +201 -120
  38. toil/jobStores/conftest.py +3 -2
  39. toil/jobStores/fileJobStore.py +292 -154
  40. toil/jobStores/googleJobStore.py +140 -74
  41. toil/jobStores/utils.py +36 -15
  42. toil/leader.py +668 -272
  43. toil/lib/accelerators.py +115 -18
  44. toil/lib/aws/__init__.py +74 -31
  45. toil/lib/aws/ami.py +122 -87
  46. toil/lib/aws/iam.py +284 -108
  47. toil/lib/aws/s3.py +31 -0
  48. toil/lib/aws/session.py +214 -39
  49. toil/lib/aws/utils.py +287 -231
  50. toil/lib/bioio.py +13 -5
  51. toil/lib/compatibility.py +11 -6
  52. toil/lib/conversions.py +104 -47
  53. toil/lib/docker.py +131 -103
  54. toil/lib/ec2.py +361 -199
  55. toil/lib/ec2nodes.py +174 -106
  56. toil/lib/encryption/_dummy.py +5 -3
  57. toil/lib/encryption/_nacl.py +10 -6
  58. toil/lib/encryption/conftest.py +1 -0
  59. toil/lib/exceptions.py +26 -7
  60. toil/lib/expando.py +5 -3
  61. toil/lib/ftp_utils.py +217 -0
  62. toil/lib/generatedEC2Lists.py +127 -19
  63. toil/lib/humanize.py +6 -2
  64. toil/lib/integration.py +341 -0
  65. toil/lib/io.py +141 -15
  66. toil/lib/iterables.py +4 -2
  67. toil/lib/memoize.py +12 -8
  68. toil/lib/misc.py +66 -21
  69. toil/lib/objects.py +2 -2
  70. toil/lib/resources.py +68 -15
  71. toil/lib/retry.py +126 -81
  72. toil/lib/threading.py +299 -82
  73. toil/lib/throttle.py +16 -15
  74. toil/options/common.py +843 -409
  75. toil/options/cwl.py +175 -90
  76. toil/options/runner.py +50 -0
  77. toil/options/wdl.py +73 -17
  78. toil/provisioners/__init__.py +117 -46
  79. toil/provisioners/abstractProvisioner.py +332 -157
  80. toil/provisioners/aws/__init__.py +70 -33
  81. toil/provisioners/aws/awsProvisioner.py +1145 -715
  82. toil/provisioners/clusterScaler.py +541 -279
  83. toil/provisioners/gceProvisioner.py +282 -179
  84. toil/provisioners/node.py +155 -79
  85. toil/realtimeLogger.py +34 -22
  86. toil/resource.py +137 -75
  87. toil/server/app.py +128 -62
  88. toil/server/celery_app.py +3 -1
  89. toil/server/cli/wes_cwl_runner.py +82 -53
  90. toil/server/utils.py +54 -28
  91. toil/server/wes/abstract_backend.py +64 -26
  92. toil/server/wes/amazon_wes_utils.py +21 -15
  93. toil/server/wes/tasks.py +121 -63
  94. toil/server/wes/toil_backend.py +142 -107
  95. toil/server/wsgi_app.py +4 -3
  96. toil/serviceManager.py +58 -22
  97. toil/statsAndLogging.py +224 -70
  98. toil/test/__init__.py +282 -183
  99. toil/test/batchSystems/batchSystemTest.py +460 -210
  100. toil/test/batchSystems/batch_system_plugin_test.py +90 -0
  101. toil/test/batchSystems/test_gridengine.py +173 -0
  102. toil/test/batchSystems/test_lsf_helper.py +67 -58
  103. toil/test/batchSystems/test_slurm.py +110 -49
  104. toil/test/cactus/__init__.py +0 -0
  105. toil/test/cactus/test_cactus_integration.py +56 -0
  106. toil/test/cwl/cwlTest.py +496 -287
  107. toil/test/cwl/measure_default_memory.cwl +12 -0
  108. toil/test/cwl/not_run_required_input.cwl +29 -0
  109. toil/test/cwl/scatter_duplicate_outputs.cwl +40 -0
  110. toil/test/cwl/seqtk_seq.cwl +1 -1
  111. toil/test/docs/scriptsTest.py +69 -46
  112. toil/test/jobStores/jobStoreTest.py +427 -264
  113. toil/test/lib/aws/test_iam.py +118 -50
  114. toil/test/lib/aws/test_s3.py +16 -9
  115. toil/test/lib/aws/test_utils.py +5 -6
  116. toil/test/lib/dockerTest.py +118 -141
  117. toil/test/lib/test_conversions.py +113 -115
  118. toil/test/lib/test_ec2.py +58 -50
  119. toil/test/lib/test_integration.py +104 -0
  120. toil/test/lib/test_misc.py +12 -5
  121. toil/test/mesos/MesosDataStructuresTest.py +23 -10
  122. toil/test/mesos/helloWorld.py +7 -6
  123. toil/test/mesos/stress.py +25 -20
  124. toil/test/options/__init__.py +13 -0
  125. toil/test/options/options.py +42 -0
  126. toil/test/provisioners/aws/awsProvisionerTest.py +320 -150
  127. toil/test/provisioners/clusterScalerTest.py +440 -250
  128. toil/test/provisioners/clusterTest.py +166 -44
  129. toil/test/provisioners/gceProvisionerTest.py +174 -100
  130. toil/test/provisioners/provisionerTest.py +25 -13
  131. toil/test/provisioners/restartScript.py +5 -4
  132. toil/test/server/serverTest.py +188 -141
  133. toil/test/sort/restart_sort.py +137 -68
  134. toil/test/sort/sort.py +134 -66
  135. toil/test/sort/sortTest.py +91 -49
  136. toil/test/src/autoDeploymentTest.py +141 -101
  137. toil/test/src/busTest.py +20 -18
  138. toil/test/src/checkpointTest.py +8 -2
  139. toil/test/src/deferredFunctionTest.py +49 -35
  140. toil/test/src/dockerCheckTest.py +32 -24
  141. toil/test/src/environmentTest.py +135 -0
  142. toil/test/src/fileStoreTest.py +539 -272
  143. toil/test/src/helloWorldTest.py +7 -4
  144. toil/test/src/importExportFileTest.py +61 -31
  145. toil/test/src/jobDescriptionTest.py +46 -21
  146. toil/test/src/jobEncapsulationTest.py +2 -0
  147. toil/test/src/jobFileStoreTest.py +74 -50
  148. toil/test/src/jobServiceTest.py +187 -73
  149. toil/test/src/jobTest.py +121 -71
  150. toil/test/src/miscTests.py +19 -18
  151. toil/test/src/promisedRequirementTest.py +82 -36
  152. toil/test/src/promisesTest.py +7 -6
  153. toil/test/src/realtimeLoggerTest.py +10 -6
  154. toil/test/src/regularLogTest.py +71 -37
  155. toil/test/src/resourceTest.py +80 -49
  156. toil/test/src/restartDAGTest.py +36 -22
  157. toil/test/src/resumabilityTest.py +9 -2
  158. toil/test/src/retainTempDirTest.py +45 -14
  159. toil/test/src/systemTest.py +12 -8
  160. toil/test/src/threadingTest.py +44 -25
  161. toil/test/src/toilContextManagerTest.py +10 -7
  162. toil/test/src/userDefinedJobArgTypeTest.py +8 -5
  163. toil/test/src/workerTest.py +73 -23
  164. toil/test/utils/toilDebugTest.py +103 -33
  165. toil/test/utils/toilKillTest.py +4 -5
  166. toil/test/utils/utilsTest.py +245 -106
  167. toil/test/wdl/wdltoil_test.py +818 -149
  168. toil/test/wdl/wdltoil_test_kubernetes.py +91 -0
  169. toil/toilState.py +120 -35
  170. toil/utils/toilConfig.py +13 -4
  171. toil/utils/toilDebugFile.py +44 -27
  172. toil/utils/toilDebugJob.py +214 -27
  173. toil/utils/toilDestroyCluster.py +11 -6
  174. toil/utils/toilKill.py +8 -3
  175. toil/utils/toilLaunchCluster.py +256 -140
  176. toil/utils/toilMain.py +37 -16
  177. toil/utils/toilRsyncCluster.py +32 -14
  178. toil/utils/toilSshCluster.py +49 -22
  179. toil/utils/toilStats.py +356 -273
  180. toil/utils/toilStatus.py +292 -139
  181. toil/utils/toilUpdateEC2Instances.py +3 -1
  182. toil/version.py +12 -12
  183. toil/wdl/utils.py +5 -5
  184. toil/wdl/wdltoil.py +3913 -1033
  185. toil/worker.py +367 -184
  186. {toil-6.1.0a1.dist-info → toil-8.0.0.dist-info}/LICENSE +25 -0
  187. toil-8.0.0.dist-info/METADATA +173 -0
  188. toil-8.0.0.dist-info/RECORD +253 -0
  189. {toil-6.1.0a1.dist-info → toil-8.0.0.dist-info}/WHEEL +1 -1
  190. toil-6.1.0a1.dist-info/METADATA +0 -125
  191. toil-6.1.0a1.dist-info/RECORD +0 -237
  192. {toil-6.1.0a1.dist-info → toil-8.0.0.dist-info}/entry_points.txt +0 -0
  193. {toil-6.1.0a1.dist-info → toil-8.0.0.dist-info}/top_level.txt +0 -0
toil/statsAndLogging.py CHANGED
@@ -12,6 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  import gzip
15
+ import io
15
16
  import json
16
17
  import logging
17
18
  import os
@@ -19,10 +20,11 @@ import time
19
20
  from argparse import ArgumentParser, Namespace
20
21
  from logging.handlers import RotatingFileHandler
21
22
  from threading import Event, Thread
22
- from typing import IO, TYPE_CHECKING, Any, Callable, List, Optional, Union
23
+ from typing import IO, TYPE_CHECKING, Any, Callable, Optional, Union
23
24
 
25
+ from toil.lib.conversions import strtobool
24
26
  from toil.lib.expando import Expando
25
- from toil.lib.resources import get_total_cpu_time
27
+ from toil.lib.resources import ResourceMonitor
26
28
 
27
29
  if TYPE_CHECKING:
28
30
  from toil.common import Config
@@ -30,26 +32,36 @@ if TYPE_CHECKING:
30
32
 
31
33
  logger = logging.getLogger(__name__)
32
34
  root_logger = logging.getLogger()
33
- toil_logger = logging.getLogger('toil')
35
+ toil_logger = logging.getLogger("toil")
34
36
 
35
37
  DEFAULT_LOGLEVEL = logging.INFO
36
38
  __loggingFiles = []
37
39
 
40
+ # We have some logging that belongs at a TRACE level, below DEBUG
41
+ TRACE = logging.DEBUG - 5
42
+
43
+ logging.addLevelName(TRACE, "TRACE")
44
+
38
45
 
39
46
  class StatsAndLogging:
40
47
  """A thread to aggregate statistics and logging."""
41
- def __init__(self, jobStore: 'AbstractJobStore', config: 'Config') -> None:
48
+
49
+ def __init__(self, jobStore: "AbstractJobStore", config: "Config") -> None:
42
50
  self._stop = Event()
43
- self._worker = Thread(target=self.statsAndLoggingAggregator,
44
- args=(jobStore, self._stop, config),
45
- daemon=True)
51
+ self._worker = Thread(
52
+ target=self.statsAndLoggingAggregator,
53
+ args=(jobStore, self._stop, config),
54
+ daemon=True,
55
+ )
46
56
 
47
57
  def start(self) -> None:
48
58
  """Start the stats and logging thread."""
49
59
  self._worker.start()
50
60
 
51
61
  @classmethod
52
- def formatLogStream(cls, stream: Union[IO[str], IO[bytes]], job_name: Optional[str] = None) -> str:
62
+ def formatLogStream(
63
+ cls, stream: Union[IO[str], IO[bytes]], stream_name: str
64
+ ) -> str:
53
65
  """
54
66
  Given a stream of text or bytes, and the job name, job itself, or some
55
67
  other optional stringifyable identity info for the job, return a big
@@ -62,50 +74,62 @@ class StatsAndLogging:
62
74
 
63
75
  :param stream: The stream of text or bytes to print for the user.
64
76
  """
65
- lines = [f'Log from job "{job_name}" follows:', '=========>']
77
+ lines = [f"{stream_name} follows:", "=========>"]
66
78
 
67
79
  for line in stream:
68
80
  if isinstance(line, bytes):
69
- line = line.decode('utf-8', errors='replace')
70
- lines.append('\t' + line.rstrip('\n'))
81
+ line = line.decode("utf-8", errors="replace")
82
+ lines.append("\t" + line.rstrip("\n"))
71
83
 
72
- lines.append('<=========')
73
-
74
- return '\n'.join(lines)
84
+ lines.append("<=========")
75
85
 
86
+ return "\n".join(lines)
76
87
 
77
88
  @classmethod
78
- def logWithFormatting(cls, jobStoreID: str, jobLogs: Union[IO[str], IO[bytes]], method: Callable[[str], None] = logger.debug,
79
- message: Optional[str] = None) -> None:
89
+ def logWithFormatting(
90
+ cls,
91
+ stream_name: str,
92
+ jobLogs: Union[IO[str], IO[bytes]],
93
+ method: Callable[[str], None] = logger.debug,
94
+ message: Optional[str] = None,
95
+ ) -> None:
80
96
  if message is not None:
81
97
  method(message)
82
98
 
83
- # Format and log the logs, identifying the job with its job store ID.
84
- method(cls.formatLogStream(jobLogs, jobStoreID))
99
+ # Format and log the logs, identifying the stream with the given name.
100
+ method(cls.formatLogStream(jobLogs, stream_name))
85
101
 
86
102
  @classmethod
87
- def writeLogFiles(cls, jobNames: List[str], jobLogList: List[str], config: 'Config', failed: bool = False) -> None:
88
- def createName(logPath: str, jobName: str, logExtension: str, failed: bool = False) -> str:
89
- logName = jobName.replace('-', '--')
90
- logName = logName.replace('/', '-')
91
- logName = logName.replace(' ', '_')
92
- logName = logName.replace("'", '')
93
- logName = logName.replace('"', '')
103
+ def writeLogFiles(
104
+ cls,
105
+ jobNames: list[str],
106
+ jobLogList: list[str],
107
+ config: "Config",
108
+ failed: bool = False,
109
+ ) -> None:
110
+ def createName(
111
+ logPath: str, jobName: str, logExtension: str, failed: bool = False
112
+ ) -> str:
113
+ logName = jobName.replace("-", "--")
114
+ logName = logName.replace("/", "-")
115
+ logName = logName.replace(" ", "_")
116
+ logName = logName.replace("'", "")
117
+ logName = logName.replace('"', "")
94
118
  # Add a "failed_" prefix to logs from failed jobs.
95
- logName = ('failed_' if failed else '') + logName
119
+ logName = ("failed_" if failed else "") + logName
96
120
  counter = 0
97
121
  while True:
98
- suffix = str(counter).zfill(3) + logExtension
122
+ suffix = "_" + str(counter).zfill(3) + logExtension
99
123
  fullName = os.path.join(logPath, logName + suffix)
100
124
  # The maximum file name size in the default HFS+ file system is 255 UTF-16 encoding units, so basically 255 characters
101
125
  if len(fullName) >= 255:
102
- return fullName[:(255-len(suffix))] + suffix
126
+ return fullName[: (255 - len(suffix))] + suffix
103
127
  if not os.path.exists(fullName):
104
128
  return fullName
105
129
  counter += 1
106
130
 
107
131
  mainFileName = jobNames[0]
108
- extension = '.log'
132
+ extension = ".log"
109
133
  writeFn: Callable[..., Any]
110
134
  if config.writeLogs:
111
135
  path = config.writeLogs
@@ -113,19 +137,22 @@ class StatsAndLogging:
113
137
  elif config.writeLogsGzip:
114
138
  path = config.writeLogsGzip
115
139
  writeFn = gzip.open
116
- extension += '.gz'
140
+ extension += ".gz"
117
141
  else:
118
142
  # we don't have anywhere to write the logs, return now
119
143
  return
120
144
 
145
+ # Make sure the destination exists
146
+ os.makedirs(path, exist_ok=True)
147
+
121
148
  fullName = createName(path, mainFileName, extension, failed)
122
- with writeFn(fullName, 'wb') as f:
149
+ with writeFn(fullName, "wb") as f:
123
150
  for l in jobLogList:
124
151
  if isinstance(l, bytes):
125
- l = l.decode('utf-8')
126
- if not l.endswith('\n'):
127
- l += '\n'
128
- f.write(l.encode('utf-8'))
152
+ l = l.decode("utf-8")
153
+ if not l.endswith("\n"):
154
+ l += "\n"
155
+ f.write(l.encode("utf-8"))
129
156
  for alternateName in jobNames[1:]:
130
157
  # There are chained jobs in this output - indicate this with a symlink
131
158
  # of the job's name to this file
@@ -134,14 +161,16 @@ class StatsAndLogging:
134
161
  os.symlink(os.path.relpath(fullName, path), name)
135
162
 
136
163
  @classmethod
137
- def statsAndLoggingAggregator(cls, jobStore: 'AbstractJobStore', stop: Event, config: 'Config') -> None:
164
+ def statsAndLoggingAggregator(
165
+ cls, jobStore: "AbstractJobStore", stop: Event, config: "Config"
166
+ ) -> None:
138
167
  """
139
168
  The following function is used for collating stats/reporting log messages from the workers.
140
169
  Works inside of a thread, collates as long as the stop flag is not True.
141
170
  """
142
171
  # Overall timing
143
172
  startTime = time.time()
144
- startClock = get_total_cpu_time()
173
+ startClock = ResourceMonitor.get_total_cpu_time()
145
174
 
146
175
  def callback(fileHandle: Union[IO[bytes], IO[str]]) -> None:
147
176
  statsStr = fileHandle.read()
@@ -150,16 +179,43 @@ class StatsAndLogging:
150
179
  stats = json.loads(statsStr, object_hook=Expando)
151
180
  if not stats:
152
181
  return
182
+
153
183
  try:
154
- logs = stats.workers.logsToMaster
184
+ # Handle all the log_to_leader messages
185
+ logs = stats.workers.logs_to_leader
155
186
  except AttributeError:
156
187
  # To be expected if there were no calls to log_to_leader()
157
188
  pass
158
189
  else:
159
190
  for message in logs:
160
- logger.log(int(message.level),
161
- 'Got message from job at time %s: %s',
162
- time.strftime('%m-%d-%Y %H:%M:%S'), message.text)
191
+ logger.log(
192
+ int(message.level),
193
+ "Got message from job at time %s: %s",
194
+ time.strftime("%m-%d-%Y %H:%M:%S"),
195
+ message.text,
196
+ )
197
+
198
+ try:
199
+ # Handle all the user-level text streams reported back (command output, etc.)
200
+ user_logs = stats.workers.logging_user_streams
201
+ except AttributeError:
202
+ # To be expected if there were no calls to log_user_stream()
203
+ pass
204
+ else:
205
+ for stream_entry in user_logs:
206
+ try:
207
+ # Unpack the stream name and text.
208
+ name, text = stream_entry.name, stream_entry.text
209
+ except AttributeError:
210
+ # Doesn't have a user-provided stream name and stream
211
+ # text, so skip it.
212
+ continue
213
+ # Since this is sent as inline text we need to pretend to stream it.
214
+ # TODO: Save these as individual files if they start to get too big?
215
+ cls.logWithFormatting(name, io.StringIO(text), logger.info)
216
+ # Save it as a log file, as if it were a Toil-level job.
217
+ cls.writeLogFiles([name], [text], config=config)
218
+
163
219
  try:
164
220
  logs = stats.logs
165
221
  except AttributeError:
@@ -168,8 +224,11 @@ class StatsAndLogging:
168
224
  # we may have multiple jobs per worker
169
225
  jobNames = logs.names
170
226
  messages = logs.messages
171
- cls.logWithFormatting(jobNames[0], messages,
172
- message='Received Toil worker log. Disable debug level logging to hide this output')
227
+ cls.logWithFormatting(
228
+ f'Log from job "{jobNames[0]}"',
229
+ messages,
230
+ message="Received Toil worker log. Disable debug level logging to hide this output",
231
+ )
173
232
  cls.writeLogFiles(jobNames, messages, config=config)
174
233
 
175
234
  while True:
@@ -181,8 +240,13 @@ class StatsAndLogging:
181
240
  time.sleep(0.5) # Avoid cycling too fast
182
241
 
183
242
  # Finish the stats file
184
- text = json.dumps(dict(total_time=str(time.time() - startTime),
185
- total_clock=str(get_total_cpu_time() - startClock)), ensure_ascii=True)
243
+ text = json.dumps(
244
+ dict(
245
+ total_time=str(time.time() - startTime),
246
+ total_clock=str(ResourceMonitor.get_total_cpu_time() - startClock),
247
+ ),
248
+ ensure_ascii=True,
249
+ )
186
250
  jobStore.write_logs(text)
187
251
 
188
252
  def check(self) -> None:
@@ -195,11 +259,14 @@ class StatsAndLogging:
195
259
 
196
260
  def shutdown(self) -> None:
197
261
  """Finish up the stats/logging aggregation thread."""
198
- logger.debug('Waiting for stats and logging collator thread to finish ...')
262
+ logger.debug("Waiting for stats and logging collator thread to finish ...")
199
263
  startTime = time.time()
200
264
  self._stop.set()
201
265
  self._worker.join()
202
- logger.debug('... finished collating stats and logs. Took %s seconds', time.time() - startTime)
266
+ logger.debug(
267
+ "... finished collating stats and logs. Took %s seconds",
268
+ time.time() - startTime,
269
+ )
203
270
  # in addition to cleaning on exceptions, onError should clean if there are any failed jobs
204
271
 
205
272
 
@@ -208,32 +275,105 @@ def set_log_level(level: str, set_logger: Optional[logging.Logger] = None) -> No
208
275
  level = "CRITICAL" if level.upper() == "OFF" else level.upper()
209
276
  set_logger = set_logger if set_logger else root_logger
210
277
  set_logger.setLevel(level)
211
-
212
278
  # Suppress any random loggers introduced by libraries we use.
213
279
  # Especially boto/boto3. They print too much. -__-
214
280
  suppress_exotic_logging(__name__)
215
281
 
216
282
 
217
- def add_logging_options(parser: ArgumentParser) -> None:
218
- """Add logging options to set the global log level."""
283
+ def install_log_color(set_logger: Optional[logging.Logger] = None) -> None:
284
+ """Make logs colored."""
285
+ # Most of this code is taken from miniwdl
286
+ # delayed import
287
+ import coloredlogs # type: ignore[import-untyped]
288
+
289
+ level_styles = dict(coloredlogs.DEFAULT_LEVEL_STYLES)
290
+ level_styles["trace"] = dict(level_styles["debug"])
291
+
292
+ # TODO: What if these fixed colors aren't right for the terminal background?
293
+ # It might be light or dark or even grey.
294
+ level_styles["trace"]["color"] = 242
295
+ level_styles["debug"]["color"] = 242
296
+ level_styles["notice"] = {"color": "green", "bold": True}
297
+ level_styles["error"]["bold"] = True
298
+ level_styles["warning"]["bold"] = True
299
+ level_styles["info"] = {}
300
+ field_styles = dict(coloredlogs.DEFAULT_FIELD_STYLES)
301
+ field_styles["asctime"] = {"color": "blue"}
302
+ field_styles["name"] = {"color": "magenta"}
303
+ field_styles["levelname"] = {"color": "blue"}
304
+ field_styles["threadName"] = {"color": "blue"}
305
+ fmt = "[%(asctime)s] [%(threadName)s] [%(levelname).1s] [%(name)s] %(message)s" # mimic old toil logging format
306
+ set_logger = set_logger if set_logger else root_logger
307
+ coloredlogs.install(
308
+ level=set_logger.getEffectiveLevel(),
309
+ logger=set_logger,
310
+ level_styles=level_styles,
311
+ field_styles=field_styles,
312
+ datefmt="%Y-%m-%dT%H:%M:%S%z", # mimic old toil date format
313
+ fmt=fmt,
314
+ )
315
+
316
+
317
+ def add_logging_options(
318
+ parser: ArgumentParser, default_level: Optional[int] = None
319
+ ) -> None:
320
+ """
321
+ Add logging options to set the global log level.
322
+
323
+ :param default_level: A logging level, like logging.INFO, to use as the default.
324
+ """
325
+ if default_level is None:
326
+ # Make sure we have a log levle to make the default
327
+ default_level = DEFAULT_LOGLEVEL
328
+ default_level_name = logging.getLevelName(default_level)
329
+
219
330
  group = parser.add_argument_group("Logging Options")
220
- default_loglevel = logging.getLevelName(DEFAULT_LOGLEVEL)
221
331
 
222
- levels = ['Critical', 'Error', 'Warning', 'Debug', 'Info']
332
+ levels = ["Critical", "Error", "Warning", "Info", "Debug", "Trace"]
223
333
  for level in levels:
224
- group.add_argument(f"--log{level}", dest="logLevel", default=default_loglevel, action="store_const",
225
- const=level, help=f"Turn on loglevel {level}. Default: {default_loglevel}.")
334
+ group.add_argument(
335
+ f"--log{level}",
336
+ dest="logLevel",
337
+ default=default_level_name,
338
+ action="store_const",
339
+ const=level,
340
+ help=f"Set logging level to {level}. Default: {default_level_name}.",
341
+ )
226
342
 
227
343
  levels += [l.lower() for l in levels] + [l.upper() for l in levels]
228
- group.add_argument("--logOff", dest="logLevel", default=default_loglevel,
229
- action="store_const", const="CRITICAL", help="Same as --logCRITICAL.")
344
+ group.add_argument(
345
+ "--logOff",
346
+ dest="logLevel",
347
+ default=default_level_name,
348
+ action="store_const",
349
+ const="CRITICAL",
350
+ help="Same as --logCritical.",
351
+ )
230
352
  # Maybe deprecate the above in favor of --logLevel?
231
353
 
232
- group.add_argument("--logLevel", dest="logLevel", default=default_loglevel, choices=levels,
233
- help=f"Set the log level. Default: {default_loglevel}. Options: {levels}.")
354
+ group.add_argument(
355
+ "--logLevel",
356
+ dest="logLevel",
357
+ default=default_level_name,
358
+ choices=levels,
359
+ help=f"Set the log level. Default: {default_level_name}. Options: {levels}.",
360
+ )
234
361
  group.add_argument("--logFile", dest="logFile", help="File to log in.")
235
- group.add_argument("--rotatingLogging", dest="logRotating", action="store_true", default=False,
236
- help="Turn on rotating logging, which prevents log files from getting too big.")
362
+ group.add_argument(
363
+ "--rotatingLogging",
364
+ dest="logRotating",
365
+ action="store_true",
366
+ default=False,
367
+ help="Turn on rotating logging, which prevents log files from getting too big.",
368
+ )
369
+ group.add_argument(
370
+ "--logColors",
371
+ dest="colored_logs",
372
+ default=True,
373
+ type=strtobool,
374
+ metavar="BOOL",
375
+ help="Enable or disable colored logging. Default: %(default)s",
376
+ )
237
377
 
238
378
 
239
379
  def configure_root_logger() -> None:
@@ -243,8 +383,10 @@ def configure_root_logger() -> None:
243
383
  Should be called before any entry point tries to log anything,
244
384
  to ensure consistent formatting.
245
385
  """
246
- logging.basicConfig(format='[%(asctime)s] [%(threadName)-10s] [%(levelname).1s] [%(name)s] %(message)s',
247
- datefmt='%Y-%m-%dT%H:%M:%S%z')
386
+ logging.basicConfig(
387
+ format="[%(asctime)s] [%(threadName)-10s] [%(levelname).1s] [%(name)s] %(message)s",
388
+ datefmt="%Y-%m-%dT%H:%M:%S%z",
389
+ )
248
390
  root_logger.setLevel(DEFAULT_LOGLEVEL)
249
391
 
250
392
 
@@ -262,10 +404,16 @@ def log_to_file(log_file: Optional[str], log_rotation: bool) -> None:
262
404
 
263
405
  def set_logging_from_options(options: Union["Config", Namespace]) -> None:
264
406
  configure_root_logger()
265
- options.logLevel = options.logLevel or logging.getLevelName(root_logger.getEffectiveLevel())
407
+ options.logLevel = options.logLevel or logging.getLevelName(
408
+ root_logger.getEffectiveLevel()
409
+ )
266
410
  set_log_level(options.logLevel)
267
- logger.debug(f"Root logger is at level '{logging.getLevelName(root_logger.getEffectiveLevel())}', "
268
- f"'toil' logger at level '{logging.getLevelName(toil_logger.getEffectiveLevel())}'.")
411
+ if options.colored_logs:
412
+ install_log_color()
413
+ logger.debug(
414
+ f"Root logger is at level '{logging.getLevelName(root_logger.getEffectiveLevel())}', "
415
+ f"'toil' logger at level '{logging.getLevelName(toil_logger.getEffectiveLevel())}'."
416
+ )
269
417
 
270
418
  # start logging to log file if specified
271
419
  log_to_file(options.logFile, options.logRotating)
@@ -283,18 +431,24 @@ def suppress_exotic_logging(local_logger: str) -> None:
283
431
  This is important because some packages, particularly boto3, are not always instantiated yet in the
284
432
  environment when this is run, and so we create the logger and set the level preemptively.
285
433
  """
286
- never_suppress = ['toil', '__init__', '__main__', 'toil-rt', 'cwltool']
287
- always_suppress = ['boto3', 'boto', 'botocore'] # ensure we suppress even before instantiated
434
+ never_suppress = ["toil", "__init__", "__main__", "toil-rt", "cwltool"]
435
+ always_suppress = [
436
+ "boto3",
437
+ "boto",
438
+ "botocore",
439
+ ] # ensure we suppress even before instantiated
288
440
 
289
- top_level_loggers: List[str] = []
441
+ top_level_loggers: list[str] = []
290
442
 
291
443
  # Due to https://stackoverflow.com/questions/61683713
292
444
  for pkg_logger in list(logging.Logger.manager.loggerDict.keys()) + always_suppress:
293
445
  if pkg_logger != local_logger:
294
446
  # many sub-loggers may exist, like "boto.a", "boto.b", "boto.c"; we only want the top_level: "boto"
295
- top_level_logger = pkg_logger.split('.')[0] if '.' in pkg_logger else pkg_logger
447
+ top_level_logger = (
448
+ pkg_logger.split(".")[0] if "." in pkg_logger else pkg_logger
449
+ )
296
450
 
297
451
  if top_level_logger not in top_level_loggers + never_suppress:
298
452
  top_level_loggers.append(top_level_logger)
299
453
  logging.getLogger(top_level_logger).setLevel(logging.CRITICAL)
300
- logger.debug(f'Suppressing the following loggers: {set(top_level_loggers)}')
454
+ logger.debug(f"Suppressing the following loggers: {set(top_level_loggers)}")