toil 8.0.0__py3-none-any.whl → 8.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (270) hide show
  1. toil/__init__.py +4 -39
  2. toil/batchSystems/abstractBatchSystem.py +1 -1
  3. toil/batchSystems/abstractGridEngineBatchSystem.py +1 -1
  4. toil/batchSystems/awsBatch.py +1 -1
  5. toil/batchSystems/cleanup_support.py +1 -1
  6. toil/batchSystems/kubernetes.py +53 -7
  7. toil/batchSystems/local_support.py +1 -1
  8. toil/batchSystems/mesos/batchSystem.py +13 -8
  9. toil/batchSystems/mesos/test/__init__.py +3 -2
  10. toil/batchSystems/options.py +1 -0
  11. toil/batchSystems/singleMachine.py +1 -1
  12. toil/batchSystems/slurm.py +229 -84
  13. toil/bus.py +5 -3
  14. toil/common.py +198 -54
  15. toil/cwl/cwltoil.py +32 -11
  16. toil/job.py +110 -86
  17. toil/jobStores/abstractJobStore.py +24 -3
  18. toil/jobStores/aws/jobStore.py +46 -10
  19. toil/jobStores/fileJobStore.py +25 -1
  20. toil/jobStores/googleJobStore.py +104 -30
  21. toil/leader.py +9 -0
  22. toil/lib/accelerators.py +3 -1
  23. toil/lib/aws/session.py +14 -3
  24. toil/lib/aws/utils.py +92 -35
  25. toil/lib/aws/utils.py.orig +504 -0
  26. toil/lib/bioio.py +1 -1
  27. toil/lib/docker.py +252 -91
  28. toil/lib/dockstore.py +387 -0
  29. toil/lib/ec2nodes.py +3 -2
  30. toil/lib/exceptions.py +5 -3
  31. toil/lib/history.py +1345 -0
  32. toil/lib/history_submission.py +695 -0
  33. toil/lib/io.py +56 -23
  34. toil/lib/misc.py +25 -1
  35. toil/lib/resources.py +2 -1
  36. toil/lib/retry.py +10 -10
  37. toil/lib/threading.py +11 -10
  38. toil/lib/{integration.py → trs.py} +95 -46
  39. toil/lib/web.py +38 -0
  40. toil/options/common.py +25 -2
  41. toil/options/cwl.py +10 -0
  42. toil/options/wdl.py +11 -0
  43. toil/provisioners/gceProvisioner.py +4 -4
  44. toil/server/api_spec/LICENSE +201 -0
  45. toil/server/api_spec/README.rst +5 -0
  46. toil/server/cli/wes_cwl_runner.py +5 -4
  47. toil/server/utils.py +2 -3
  48. toil/statsAndLogging.py +35 -1
  49. toil/test/__init__.py +275 -115
  50. toil/test/batchSystems/batchSystemTest.py +227 -205
  51. toil/test/batchSystems/test_slurm.py +199 -2
  52. toil/test/cactus/pestis.tar.gz +0 -0
  53. toil/test/conftest.py +7 -0
  54. toil/test/cwl/2.fasta +11 -0
  55. toil/test/cwl/2.fastq +12 -0
  56. toil/test/cwl/conftest.py +39 -0
  57. toil/test/cwl/cwlTest.py +1015 -780
  58. toil/test/cwl/directory/directory/file.txt +15 -0
  59. toil/test/cwl/download_directory_file.json +4 -0
  60. toil/test/cwl/download_directory_s3.json +4 -0
  61. toil/test/cwl/download_file.json +6 -0
  62. toil/test/cwl/download_http.json +6 -0
  63. toil/test/cwl/download_https.json +6 -0
  64. toil/test/cwl/download_s3.json +6 -0
  65. toil/test/cwl/download_subdirectory_file.json +5 -0
  66. toil/test/cwl/download_subdirectory_s3.json +5 -0
  67. toil/test/cwl/empty.json +1 -0
  68. toil/test/cwl/mock_mpi/fake_mpi.yml +8 -0
  69. toil/test/cwl/mock_mpi/fake_mpi_run.py +42 -0
  70. toil/test/cwl/optional-file-exists.json +6 -0
  71. toil/test/cwl/optional-file-missing.json +6 -0
  72. toil/test/cwl/optional-file.cwl +18 -0
  73. toil/test/cwl/preemptible_expression.json +1 -0
  74. toil/test/cwl/revsort-job-missing.json +6 -0
  75. toil/test/cwl/revsort-job.json +6 -0
  76. toil/test/cwl/s3_secondary_file.json +16 -0
  77. toil/test/cwl/seqtk_seq_job.json +6 -0
  78. toil/test/cwl/stream.json +6 -0
  79. toil/test/cwl/test_filename_conflict_resolution.ms/table.dat +0 -0
  80. toil/test/cwl/test_filename_conflict_resolution.ms/table.f0 +0 -0
  81. toil/test/cwl/test_filename_conflict_resolution.ms/table.f1 +0 -0
  82. toil/test/cwl/test_filename_conflict_resolution.ms/table.f1i +0 -0
  83. toil/test/cwl/test_filename_conflict_resolution.ms/table.f2 +0 -0
  84. toil/test/cwl/test_filename_conflict_resolution.ms/table.f2_TSM0 +0 -0
  85. toil/test/cwl/test_filename_conflict_resolution.ms/table.f3 +0 -0
  86. toil/test/cwl/test_filename_conflict_resolution.ms/table.f3_TSM0 +0 -0
  87. toil/test/cwl/test_filename_conflict_resolution.ms/table.f4 +0 -0
  88. toil/test/cwl/test_filename_conflict_resolution.ms/table.f4_TSM0 +0 -0
  89. toil/test/cwl/test_filename_conflict_resolution.ms/table.f5 +0 -0
  90. toil/test/cwl/test_filename_conflict_resolution.ms/table.info +0 -0
  91. toil/test/cwl/test_filename_conflict_resolution.ms/table.lock +0 -0
  92. toil/test/cwl/whale.txt +16 -0
  93. toil/test/docs/scripts/example_alwaysfail.py +38 -0
  94. toil/test/docs/scripts/example_alwaysfail_with_files.wdl +33 -0
  95. toil/test/docs/scripts/example_cachingbenchmark.py +117 -0
  96. toil/test/docs/scripts/stagingExampleFiles/in.txt +1 -0
  97. toil/test/docs/scripts/stagingExampleFiles/out.txt +2 -0
  98. toil/test/docs/scripts/tutorial_arguments.py +23 -0
  99. toil/test/docs/scripts/tutorial_debugging.patch +12 -0
  100. toil/test/docs/scripts/tutorial_debugging_hangs.wdl +126 -0
  101. toil/test/docs/scripts/tutorial_debugging_works.wdl +129 -0
  102. toil/test/docs/scripts/tutorial_docker.py +20 -0
  103. toil/test/docs/scripts/tutorial_dynamic.py +24 -0
  104. toil/test/docs/scripts/tutorial_encapsulation.py +28 -0
  105. toil/test/docs/scripts/tutorial_encapsulation2.py +29 -0
  106. toil/test/docs/scripts/tutorial_helloworld.py +15 -0
  107. toil/test/docs/scripts/tutorial_invokeworkflow.py +27 -0
  108. toil/test/docs/scripts/tutorial_invokeworkflow2.py +30 -0
  109. toil/test/docs/scripts/tutorial_jobfunctions.py +22 -0
  110. toil/test/docs/scripts/tutorial_managing.py +29 -0
  111. toil/test/docs/scripts/tutorial_managing2.py +56 -0
  112. toil/test/docs/scripts/tutorial_multiplejobs.py +25 -0
  113. toil/test/docs/scripts/tutorial_multiplejobs2.py +21 -0
  114. toil/test/docs/scripts/tutorial_multiplejobs3.py +22 -0
  115. toil/test/docs/scripts/tutorial_promises.py +25 -0
  116. toil/test/docs/scripts/tutorial_promises2.py +30 -0
  117. toil/test/docs/scripts/tutorial_quickstart.py +22 -0
  118. toil/test/docs/scripts/tutorial_requirements.py +44 -0
  119. toil/test/docs/scripts/tutorial_services.py +45 -0
  120. toil/test/docs/scripts/tutorial_staging.py +45 -0
  121. toil/test/docs/scripts/tutorial_stats.py +64 -0
  122. toil/test/lib/aws/test_iam.py +3 -1
  123. toil/test/lib/dockerTest.py +205 -122
  124. toil/test/lib/test_history.py +236 -0
  125. toil/test/lib/test_trs.py +161 -0
  126. toil/test/provisioners/aws/awsProvisionerTest.py +12 -9
  127. toil/test/provisioners/clusterTest.py +4 -4
  128. toil/test/provisioners/gceProvisionerTest.py +16 -14
  129. toil/test/sort/sort.py +4 -1
  130. toil/test/src/busTest.py +17 -17
  131. toil/test/src/deferredFunctionTest.py +145 -132
  132. toil/test/src/importExportFileTest.py +71 -63
  133. toil/test/src/jobEncapsulationTest.py +27 -28
  134. toil/test/src/jobServiceTest.py +149 -133
  135. toil/test/src/jobTest.py +219 -211
  136. toil/test/src/miscTests.py +66 -60
  137. toil/test/src/promisedRequirementTest.py +163 -169
  138. toil/test/src/regularLogTest.py +24 -24
  139. toil/test/src/resourceTest.py +82 -76
  140. toil/test/src/restartDAGTest.py +51 -47
  141. toil/test/src/resumabilityTest.py +24 -19
  142. toil/test/src/retainTempDirTest.py +60 -57
  143. toil/test/src/systemTest.py +17 -13
  144. toil/test/src/threadingTest.py +29 -32
  145. toil/test/utils/ABCWorkflowDebug/B_file.txt +1 -0
  146. toil/test/utils/ABCWorkflowDebug/debugWorkflow.py +204 -0
  147. toil/test/utils/ABCWorkflowDebug/mkFile.py +16 -0
  148. toil/test/utils/ABCWorkflowDebug/sleep.cwl +12 -0
  149. toil/test/utils/ABCWorkflowDebug/sleep.yaml +1 -0
  150. toil/test/utils/toilDebugTest.py +117 -102
  151. toil/test/utils/toilKillTest.py +54 -53
  152. toil/test/utils/utilsTest.py +303 -229
  153. toil/test/wdl/lint_error.wdl +9 -0
  154. toil/test/wdl/md5sum/empty_file.json +1 -0
  155. toil/test/wdl/md5sum/md5sum-gs.json +1 -0
  156. toil/test/wdl/md5sum/md5sum.1.0.wdl +32 -0
  157. toil/test/wdl/md5sum/md5sum.input +1 -0
  158. toil/test/wdl/md5sum/md5sum.json +1 -0
  159. toil/test/wdl/md5sum/md5sum.wdl +25 -0
  160. toil/test/wdl/miniwdl_self_test/inputs-namespaced.json +1 -0
  161. toil/test/wdl/miniwdl_self_test/inputs.json +1 -0
  162. toil/test/wdl/miniwdl_self_test/self_test.wdl +40 -0
  163. toil/test/wdl/standard_library/as_map.json +16 -0
  164. toil/test/wdl/standard_library/as_map_as_input.wdl +23 -0
  165. toil/test/wdl/standard_library/as_pairs.json +7 -0
  166. toil/test/wdl/standard_library/as_pairs_as_input.wdl +23 -0
  167. toil/test/wdl/standard_library/ceil.json +3 -0
  168. toil/test/wdl/standard_library/ceil_as_command.wdl +16 -0
  169. toil/test/wdl/standard_library/ceil_as_input.wdl +16 -0
  170. toil/test/wdl/standard_library/collect_by_key.json +1 -0
  171. toil/test/wdl/standard_library/collect_by_key_as_input.wdl +23 -0
  172. toil/test/wdl/standard_library/cross.json +11 -0
  173. toil/test/wdl/standard_library/cross_as_input.wdl +19 -0
  174. toil/test/wdl/standard_library/flatten.json +7 -0
  175. toil/test/wdl/standard_library/flatten_as_input.wdl +18 -0
  176. toil/test/wdl/standard_library/floor.json +3 -0
  177. toil/test/wdl/standard_library/floor_as_command.wdl +16 -0
  178. toil/test/wdl/standard_library/floor_as_input.wdl +16 -0
  179. toil/test/wdl/standard_library/keys.json +8 -0
  180. toil/test/wdl/standard_library/keys_as_input.wdl +24 -0
  181. toil/test/wdl/standard_library/length.json +7 -0
  182. toil/test/wdl/standard_library/length_as_input.wdl +16 -0
  183. toil/test/wdl/standard_library/length_as_input_with_map.json +7 -0
  184. toil/test/wdl/standard_library/length_as_input_with_map.wdl +17 -0
  185. toil/test/wdl/standard_library/length_invalid.json +3 -0
  186. toil/test/wdl/standard_library/range.json +3 -0
  187. toil/test/wdl/standard_library/range_0.json +3 -0
  188. toil/test/wdl/standard_library/range_as_input.wdl +17 -0
  189. toil/test/wdl/standard_library/range_invalid.json +3 -0
  190. toil/test/wdl/standard_library/read_boolean.json +3 -0
  191. toil/test/wdl/standard_library/read_boolean_as_command.wdl +17 -0
  192. toil/test/wdl/standard_library/read_float.json +3 -0
  193. toil/test/wdl/standard_library/read_float_as_command.wdl +17 -0
  194. toil/test/wdl/standard_library/read_int.json +3 -0
  195. toil/test/wdl/standard_library/read_int_as_command.wdl +17 -0
  196. toil/test/wdl/standard_library/read_json.json +3 -0
  197. toil/test/wdl/standard_library/read_json_as_output.wdl +31 -0
  198. toil/test/wdl/standard_library/read_lines.json +3 -0
  199. toil/test/wdl/standard_library/read_lines_as_output.wdl +31 -0
  200. toil/test/wdl/standard_library/read_map.json +3 -0
  201. toil/test/wdl/standard_library/read_map_as_output.wdl +31 -0
  202. toil/test/wdl/standard_library/read_string.json +3 -0
  203. toil/test/wdl/standard_library/read_string_as_command.wdl +17 -0
  204. toil/test/wdl/standard_library/read_tsv.json +3 -0
  205. toil/test/wdl/standard_library/read_tsv_as_output.wdl +31 -0
  206. toil/test/wdl/standard_library/round.json +3 -0
  207. toil/test/wdl/standard_library/round_as_command.wdl +16 -0
  208. toil/test/wdl/standard_library/round_as_input.wdl +16 -0
  209. toil/test/wdl/standard_library/size.json +3 -0
  210. toil/test/wdl/standard_library/size_as_command.wdl +17 -0
  211. toil/test/wdl/standard_library/size_as_output.wdl +36 -0
  212. toil/test/wdl/standard_library/stderr.json +3 -0
  213. toil/test/wdl/standard_library/stderr_as_output.wdl +30 -0
  214. toil/test/wdl/standard_library/stdout.json +3 -0
  215. toil/test/wdl/standard_library/stdout_as_output.wdl +30 -0
  216. toil/test/wdl/standard_library/sub.json +3 -0
  217. toil/test/wdl/standard_library/sub_as_input.wdl +17 -0
  218. toil/test/wdl/standard_library/sub_as_input_with_file.wdl +17 -0
  219. toil/test/wdl/standard_library/transpose.json +6 -0
  220. toil/test/wdl/standard_library/transpose_as_input.wdl +18 -0
  221. toil/test/wdl/standard_library/write_json.json +6 -0
  222. toil/test/wdl/standard_library/write_json_as_command.wdl +17 -0
  223. toil/test/wdl/standard_library/write_lines.json +7 -0
  224. toil/test/wdl/standard_library/write_lines_as_command.wdl +17 -0
  225. toil/test/wdl/standard_library/write_map.json +6 -0
  226. toil/test/wdl/standard_library/write_map_as_command.wdl +17 -0
  227. toil/test/wdl/standard_library/write_tsv.json +6 -0
  228. toil/test/wdl/standard_library/write_tsv_as_command.wdl +17 -0
  229. toil/test/wdl/standard_library/zip.json +12 -0
  230. toil/test/wdl/standard_library/zip_as_input.wdl +19 -0
  231. toil/test/wdl/test.csv +3 -0
  232. toil/test/wdl/test.tsv +3 -0
  233. toil/test/wdl/testfiles/croo.wdl +38 -0
  234. toil/test/wdl/testfiles/drop_files.wdl +62 -0
  235. toil/test/wdl/testfiles/drop_files_subworkflow.wdl +13 -0
  236. toil/test/wdl/testfiles/empty.txt +0 -0
  237. toil/test/wdl/testfiles/not_enough_outputs.wdl +33 -0
  238. toil/test/wdl/testfiles/random.wdl +66 -0
  239. toil/test/wdl/testfiles/string_file_coercion.json +1 -0
  240. toil/test/wdl/testfiles/string_file_coercion.wdl +35 -0
  241. toil/test/wdl/testfiles/test.json +4 -0
  242. toil/test/wdl/testfiles/test_boolean.txt +1 -0
  243. toil/test/wdl/testfiles/test_float.txt +1 -0
  244. toil/test/wdl/testfiles/test_int.txt +1 -0
  245. toil/test/wdl/testfiles/test_lines.txt +5 -0
  246. toil/test/wdl/testfiles/test_map.txt +2 -0
  247. toil/test/wdl/testfiles/test_string.txt +1 -0
  248. toil/test/wdl/testfiles/url_to_file.wdl +13 -0
  249. toil/test/wdl/testfiles/url_to_optional_file.wdl +13 -0
  250. toil/test/wdl/testfiles/vocab.json +1 -0
  251. toil/test/wdl/testfiles/vocab.wdl +66 -0
  252. toil/test/wdl/testfiles/wait.wdl +34 -0
  253. toil/test/wdl/wdl_specification/type_pair.json +23 -0
  254. toil/test/wdl/wdl_specification/type_pair_basic.wdl +36 -0
  255. toil/test/wdl/wdl_specification/type_pair_with_files.wdl +36 -0
  256. toil/test/wdl/wdl_specification/v1_spec.json +1 -0
  257. toil/test/wdl/wdl_specification/v1_spec_declaration.wdl +39 -0
  258. toil/test/wdl/wdltoil_test.py +681 -408
  259. toil/test/wdl/wdltoil_test_kubernetes.py +2 -2
  260. toil/version.py +10 -10
  261. toil/wdl/wdltoil.py +350 -123
  262. toil/worker.py +113 -33
  263. {toil-8.0.0.dist-info → toil-8.2.0.dist-info}/METADATA +13 -7
  264. toil-8.2.0.dist-info/RECORD +439 -0
  265. {toil-8.0.0.dist-info → toil-8.2.0.dist-info}/WHEEL +1 -1
  266. toil/test/lib/test_integration.py +0 -104
  267. toil-8.0.0.dist-info/RECORD +0 -253
  268. {toil-8.0.0.dist-info → toil-8.2.0.dist-info}/entry_points.txt +0 -0
  269. {toil-8.0.0.dist-info → toil-8.2.0.dist-info/licenses}/LICENSE +0 -0
  270. {toil-8.0.0.dist-info → toil-8.2.0.dist-info}/top_level.txt +0 -0
toil/lib/history.py ADDED
@@ -0,0 +1,1345 @@
1
+ # Copyright (C) 2024 Regents of the University of California
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Contains tools for tracking history.
17
+ """
18
+
19
+ from contextlib import contextmanager
20
+ import logging
21
+ import os
22
+ import sqlite3
23
+ import sys
24
+ import threading
25
+ import time
26
+ import uuid
27
+ from dataclasses import dataclass
28
+ from typing import Any, Iterable, Iterator, Optional, TypeVar, Callable
29
+
30
+ from toil.lib.conversions import strtobool
31
+ from toil.lib.io import get_toil_home
32
+ from toil.lib.retry import ErrorCondition, retry
33
+
34
+ logger = logging.getLogger(__name__)
35
+
36
+ class HistoryDatabaseSchemaTooNewError(RuntimeError):
37
+ """
38
+ Raised when we would write to the history database, but its schema is too
39
+ new for us to understand.
40
+ """
41
+ pass
42
+
43
+ @dataclass
44
+ class WorkflowSummary:
45
+ """
46
+ Data class holding summary information for a workflow.
47
+
48
+ Represents all the attempts to execute one run of a workflow.
49
+ """
50
+ id: str
51
+ name: Optional[str]
52
+ job_store: str
53
+ total_attempts: int
54
+ total_job_attempts: int
55
+ succeeded: bool
56
+ start_time: Optional[float]
57
+ """
58
+ Time when the first workflow attempt started, in seconds since epoch.
59
+
60
+ None if there are no attempts recorded.
61
+ """
62
+ runtime: Optional[float]
63
+ """
64
+ Time from the first workflow attempt's start to the last one's end, in seconds.
65
+
66
+ None if there are no attempts recorded.
67
+ """
68
+ trs_spec: Optional[str]
69
+
70
+ @dataclass
71
+ class WorkflowAttemptSummary:
72
+ """
73
+ Data class holding summary information for a workflow attempt.
74
+
75
+ Helpfully includes the workflow metadata for Dockstore.
76
+ """
77
+ workflow_id: str
78
+ attempt_number: int
79
+ succeeded: bool
80
+ start_time: float
81
+ runtime: float
82
+ submitted_to_dockstore: bool
83
+ batch_system: Optional[str]
84
+ caching: Optional[bool]
85
+ toil_version: Optional[str]
86
+ python_version: Optional[str]
87
+ platform_system: Optional[str]
88
+ platform_machine: Optional[str]
89
+ workflow_job_store: str
90
+ workflow_trs_spec: Optional[str]
91
+
92
+ @dataclass
93
+ class JobAttemptSummary:
94
+ """
95
+ Data class holding summary information for a job attempt within a known
96
+ workflow attempt.
97
+ """
98
+ id: str
99
+ job_name: str
100
+ succeeded: bool
101
+ start_time: float
102
+ runtime: float
103
+ submitted_to_dockstore: bool
104
+ cores: Optional[float]
105
+ cpu_seconds: Optional[float]
106
+ memory_bytes: Optional[int]
107
+ disk_bytes: Optional[int]
108
+
109
+
110
+ RT = TypeVar("RT")
111
+
112
+ def db_retry(function: Callable[..., RT]) -> Callable[..., RT]:
113
+ """
114
+ Decorate a function with the appropriate retries for accessing the database.
115
+ """
116
+ return retry(
117
+ infinite_retries=True,
118
+ errors=[
119
+ ErrorCondition(
120
+ error=sqlite3.OperationalError, error_message_must_include="is locked"
121
+ )
122
+ ],
123
+ )(function)
124
+
125
+ class HistoryManager:
126
+ """
127
+ Class responsible for managing the history of Toil runs.
128
+ """
129
+
130
+ @classmethod
131
+ def enabled(cls) -> bool:
132
+ """
133
+ Return True if history should be read from and written to the database.
134
+
135
+ If False, no access at all shoulf be made to the database.
136
+ """
137
+ return strtobool(os.environ.get("TOIL_HISTORY", 'True'))
138
+
139
+ @classmethod
140
+ def enabled_job(cls) -> bool:
141
+ """
142
+ Return True if job history should be read from and written to the database.
143
+
144
+ Always returns False if enabled() returns False.
145
+ """
146
+ # TODO: When Dockstore can take job metrics alongside whole-workflow
147
+ # metrics, and we've tested to make sure history recording doesn't slow
148
+ # down our leader job processing rate, turn on actual job history logging.
149
+ return cls.enabled() and strtobool(os.environ.get("TOIL_JOB_HISTORY", 'False'))
150
+
151
+ # For testing, we can move the database path for the class.
152
+ database_path_override: Optional[str] = None
153
+
154
+ @classmethod
155
+ def database_path(cls) -> str:
156
+ """
157
+ Get the path at which the database we store history in lives.
158
+ """
159
+ if cls.database_path_override is not None:
160
+ # Under test, we can use a temporary path.
161
+ return cls.database_path_override
162
+
163
+ return os.path.join(get_toil_home(), "history.sqlite")
164
+
165
+ @classmethod
166
+ def connection(cls) -> sqlite3.Connection:
167
+ """
168
+ Connect to the history database.
169
+
170
+ Caller must not actually use the connection without using
171
+ ensure_tables() to protect reads and updates.
172
+
173
+ Must be called from inside a top-level method marked @db_retry.
174
+
175
+ The connection will be in DEFERRED isolation_level, with autocommit off
176
+ on Python versions that support it. In order to run any commands
177
+ outside of a transaction use the no_transaction context manager.
178
+ """
179
+
180
+ if not cls.enabled():
181
+ # Make sure we're not missing an enabled check along any codepath
182
+ # that wants to access the database.
183
+ raise RuntimeError("Attempting to connect to database when HistoryManager is disabled!")
184
+
185
+ if not os.path.exists(cls.database_path()):
186
+ # Make the database and protect it from snoopers and busybodies
187
+ con = sqlite3.connect(cls.database_path())
188
+ del con
189
+ os.chmod(cls.database_path(), 0o600)
190
+
191
+ con = sqlite3.connect(
192
+ cls.database_path(),
193
+ isolation_level="DEFERRED"
194
+ )
195
+
196
+ with cls.no_transaction(con):
197
+ # Turn on foreign keys.
198
+ # This has to be outside any transaction.
199
+ # See <https://stackoverflow.com/q/78898176>
200
+ con.execute("PRAGMA foreign_keys = ON")
201
+ # This has the side effect of definitely leaving autocommit off, which
202
+ # is what we want as the base state.
203
+
204
+ # Set up the connection to use the Row class so that we can look up row values by column name and not just order.
205
+ con.row_factory = sqlite3.Row
206
+
207
+ return con
208
+
209
+ @classmethod
210
+ @contextmanager
211
+ def no_transaction(cls, con: sqlite3.Connection) -> Iterator[None]:
212
+ """
213
+ Temporarily disable the constant active transaction on the database
214
+ connection, on Python versions where it exists.
215
+
216
+ Commits the current transaction.
217
+ """
218
+
219
+ con.commit()
220
+ if hasattr(con, 'autocommit'):
221
+ con.autocommit = True
222
+ yield
223
+ if hasattr(con, 'autocommit'):
224
+ con.autocommit = False
225
+
226
+ @classmethod
227
+ def ensure_tables(cls, con: sqlite3.Connection, cur: sqlite3.Cursor) -> None:
228
+ """
229
+ Ensure that tables exist in the database and the schema is migrated to the current version.
230
+
231
+ Leaves the cursor in a transaction where the schema version is known to be correct.
232
+
233
+ Must be called from inside a top-level methodf marked @db_retry.
234
+
235
+ :raises HistoryDatabaseSchemaTooNewError: If the schema is newer than the current version.
236
+ """
237
+
238
+ # Python already puts us in a transaction.
239
+
240
+ # TODO: Do a try-and-fall-back to avoid sending the table schema for
241
+ # this every time we do anything.
242
+ cur.execute("""
243
+ CREATE TABLE IF NOT EXISTS migrations (
244
+ version INT NOT NULL PRIMARY KEY,
245
+ description TEXT
246
+ )
247
+ """)
248
+ db_version = next(cur.execute("SELECT MAX(version) FROM migrations"))[0]
249
+ if db_version is None:
250
+ db_version = -1
251
+
252
+ # This holds pairs of description and command lists.
253
+ # To make a schema change, ADD A NEW PAIR AT THE END, and include
254
+ # statements to adjust existing data.
255
+ migrations = [
256
+ (
257
+ "Make initial tables",
258
+ [
259
+ """
260
+ CREATE TABLE workflows (
261
+ id TEXT NOT NULL PRIMARY KEY,
262
+ job_store TEXT NOT NULL,
263
+ creation_time REAL NOT NULL,
264
+ name TEXT,
265
+ trs_spec TEXT
266
+ )
267
+ """,
268
+ """
269
+ CREATE INDEX idx_workflows_by_creation_time
270
+ ON workflows (creation_time)
271
+ """,
272
+ # There's no reference constraint from the job attempts to
273
+ # the workflow attempts because the jobs for a workflow
274
+ # attempt need to go in before the attempt is known to be
275
+ # finished or failed/before the attempt is submittable to
276
+ # Dockstore.
277
+ #
278
+ # TODO: Should we force workflow attempts to be reported on
279
+ # start so that we can have the jobs key-reference them?
280
+ # And so that we always have a start time for the workflow
281
+ # as a whole?
282
+ """
283
+ CREATE TABLE job_attempts (
284
+ id TEXT NOT NULL PRIMARY KEY,
285
+ workflow_id TEXT NOT NULL,
286
+ workflow_attempt_number INT NOT NULL,
287
+ job_name TEXT NOT NULL,
288
+ succeeded INTEGER NOT NULL,
289
+ start_time REAL NOT NULL,
290
+ runtime REAL NOT NULL,
291
+ cores REAL,
292
+ cpu_seconds REAL,
293
+ memory_bytes INTEGER,
294
+ disk_bytes INTEGER,
295
+ submitted_to_dockstore INTEGER NOT NULL DEFAULT FALSE,
296
+ FOREIGN KEY(workflow_id) REFERENCES workflows(id)
297
+ )
298
+ """,
299
+ """
300
+ CREATE INDEX idx_job_attempts_by_workflow_attempt
301
+ ON job_attempts (workflow_id, workflow_attempt_number)
302
+ """,
303
+ """
304
+ CREATE TABLE workflow_attempts (
305
+ workflow_id TEXT NOT NULL,
306
+ attempt_number INTEGER NOT NULL,
307
+ succeeded INTEGER NOT NULL,
308
+ start_time REAL NOT NULL,
309
+ runtime REAL NOT NULL,
310
+ batch_system TEXT,
311
+ caching INTEGER,
312
+ toil_version TEXT,
313
+ python_version TEXT,
314
+ platform_system TEXT,
315
+ platform_machine TEXT,
316
+ submitted_to_dockstore INTEGER NOT NULL DEFAULT FALSE,
317
+ PRIMARY KEY(workflow_id,attempt_number),
318
+ FOREIGN KEY(workflow_id) REFERENCES workflows(id)
319
+ )
320
+ """
321
+ ],
322
+ ),
323
+ ]
324
+
325
+ if db_version + 1 > len(migrations):
326
+ raise HistoryDatabaseSchemaTooNewError(f"History database version is {db_version}, but known migrations only go up to {len(migrations) - 1}")
327
+
328
+ for migration_number in range(db_version + 1, len(migrations)):
329
+ for statement_number, statement in enumerate(migrations[migration_number][1]):
330
+ # Run all the migration commands.
331
+ # We don't use executescript() because (on old Pythons?) it
332
+ # commits the current transactrion first.
333
+ try:
334
+ cur.execute(statement)
335
+ except sqlite3.OperationalError:
336
+ logger.exception("Could not execute migration %s statement %s: %s", migration_number, statement_number, statement)
337
+ raise
338
+ cur.execute("INSERT INTO migrations VALUES (?, ?)", (migration_number, migrations[migration_number][0]))
339
+
340
+ # If we did have to migrate, leave everything else we do as part of the migration transaction.
341
+
342
+ ##
343
+ # Recording Methods
344
+ ##
345
+
346
+ @classmethod
347
+ @db_retry
348
+ def record_workflow_creation(cls, workflow_id: str, job_store_spec: str) -> None:
349
+ """
350
+ Record that a workflow is being run.
351
+
352
+ Takes the Toil config's workflow ID and the location of the job store.
353
+
354
+ Should only be called on the *first* attempt on a job store, not on a
355
+ restart.
356
+
357
+ A workflow may have multiple attempts to run it, some of which succeed
358
+ and others of which fail. Probably only the last one should succeed.
359
+
360
+ :param job_store_spec: The job store specifier for the workflow. Should
361
+ be canonical and always start with the type and a colon. If the
362
+ job store is later moved by the user, the location will not be
363
+ updated.
364
+ """
365
+
366
+ if not cls.enabled():
367
+ return
368
+
369
+ logger.info("Recording workflow creation of %s in %s", workflow_id, job_store_spec)
370
+
371
+ con = cls.connection()
372
+ cur = con.cursor()
373
+ try:
374
+ cls.ensure_tables(con, cur)
375
+ cur.execute("INSERT INTO workflows VALUES (?, ?, ?, NULL, NULL)", (workflow_id, job_store_spec, time.time()))
376
+ except:
377
+ con.rollback()
378
+ con.close()
379
+ raise
380
+ else:
381
+ con.commit()
382
+ con.close()
383
+
384
+ # If we raise out of here the connection goes away and the transaction rolls back.
385
+
386
+
387
+ @classmethod
388
+ @db_retry
389
+ def record_workflow_metadata(cls, workflow_id: str, workflow_name: str, trs_spec: Optional[str] = None) -> None:
390
+ """
391
+ Associate a name and optionally a TRS ID and version with a workflow run.
392
+ """
393
+
394
+ # TODO: Make name of this function less general?
395
+
396
+ if not cls.enabled():
397
+ return
398
+
399
+ logger.info("Workflow %s is a run of %s", workflow_id, workflow_name)
400
+ if trs_spec:
401
+ logger.info("Workflow %s has TRS ID and version %s", workflow_id, trs_spec)
402
+
403
+ con = cls.connection()
404
+ cur = con.cursor()
405
+ try:
406
+ cls.ensure_tables(con, cur)
407
+ cur.execute("UPDATE workflows SET name = ? WHERE id = ?", (workflow_name, workflow_id))
408
+ if trs_spec is not None:
409
+ cur.execute("UPDATE workflows SET trs_spec = ? WHERE id = ?", (trs_spec, workflow_id))
410
+ except:
411
+ con.rollback()
412
+ con.close()
413
+ raise
414
+ else:
415
+ con.commit()
416
+ con.close()
417
+
418
+ @classmethod
419
+ @db_retry
420
+ def record_job_attempt(
421
+ cls,
422
+ workflow_id: str,
423
+ workflow_attempt_number: int,
424
+ job_name: str,
425
+ succeeded: bool,
426
+ start_time: float,
427
+ runtime: float,
428
+ cores: Optional[float] = None,
429
+ cpu_seconds: Optional[float] = None,
430
+ memory_bytes: Optional[int] = None,
431
+ disk_bytes: Optional[int] = None
432
+ ) -> None:
433
+ """
434
+ Record that a job ran in a workflow.
435
+
436
+ Doesn't expect the provided information to uniquely identify the job
437
+ attempt; assigns the job attempt its own unique ID.
438
+
439
+ Thread safe.
440
+
441
+ :param job_name: A human-readable name for the job. Not expected to be
442
+ a job store ID or to necessarily uniquely identify the job within
443
+ the workflow.
444
+ :param start_time: Job execution start time ins econds since epoch.
445
+ :param runtime: Job execution duration in seconds.
446
+ :param cores: Number of CPU cores the job was scheduled on.
447
+ :param cpu_seconds: CPU core-seconds actually consumed.
448
+ :param memory_bytes: Peak observed job memory usage.
449
+ :param disk_bytes: Observed job disk usage.
450
+ """
451
+
452
+ if not cls.enabled_job():
453
+ return
454
+
455
+ logger.debug("Workflow %s ran job %s", workflow_id, job_name)
456
+
457
+ con = cls.connection()
458
+ cur = con.cursor()
459
+ try:
460
+ cls.ensure_tables(con, cur)
461
+ cur.execute(
462
+ """
463
+ INSERT INTO job_attempts(
464
+ id,
465
+ workflow_id,
466
+ workflow_attempt_number,
467
+ job_name,
468
+ succeeded,
469
+ start_time,
470
+ runtime,
471
+ cores,
472
+ cpu_seconds,
473
+ memory_bytes,
474
+ disk_bytes
475
+ )
476
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
477
+ """,
478
+ (
479
+ str(uuid.uuid4()),
480
+ workflow_id,
481
+ workflow_attempt_number,
482
+ job_name,
483
+ 1 if succeeded else 0,
484
+ start_time,
485
+ runtime,
486
+ cores,
487
+ cpu_seconds,
488
+ memory_bytes,
489
+ disk_bytes,
490
+ )
491
+ )
492
+ except:
493
+ con.rollback()
494
+ con.close()
495
+ raise
496
+ else:
497
+ con.commit()
498
+ con.close()
499
+
500
+ @classmethod
501
+ @db_retry
502
+ def record_workflow_attempt(
503
+ cls,
504
+ workflow_id: str,
505
+ workflow_attempt_number: int,
506
+ succeeded: bool,
507
+ start_time: float,
508
+ runtime: float,
509
+ batch_system: Optional[str] = None,
510
+ caching: Optional[bool] = None,
511
+ toil_version: Optional[str] = None,
512
+ python_version: Optional[str] = None,
513
+ platform_system: Optional[str] = None,
514
+ platform_machine: Optional[str] = None
515
+ ) -> None:
516
+ """
517
+ Record a workflow attempt (start or restart) having finished or failed.
518
+
519
+ :param batch_system: The Python type name of the batch system implementation used.
520
+ :param caching: Whether Toil filestore-level caching was used.
521
+ :param toil_version: Version of Toil used to run the workflow.
522
+ :param python_version: Version of Python used to run the workflow.
523
+ :param platform_system: OS ("Darwin", "Linux", etc.) used to run the workflow.
524
+ :param platform_machine: CPU type ("AMD64", etc.) used to run the workflow leader.
525
+ """
526
+
527
+ if not cls.enabled():
528
+ return
529
+
530
+ logger.info("Workflow %s stopped. Success: %s", workflow_id, succeeded)
531
+
532
+ con = cls.connection()
533
+ cur = con.cursor()
534
+ try:
535
+ cls.ensure_tables(con, cur)
536
+ cur.execute(
537
+ """
538
+ INSERT INTO workflow_attempts(
539
+ workflow_id,
540
+ attempt_number,
541
+ succeeded,
542
+ start_time,
543
+ runtime,
544
+ batch_system,
545
+ caching,
546
+ toil_version,
547
+ python_version,
548
+ platform_system,
549
+ platform_machine
550
+ )
551
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
552
+ """,
553
+ (
554
+ workflow_id,
555
+ workflow_attempt_number,
556
+ 1 if succeeded else 0,
557
+ start_time,
558
+ runtime,
559
+ batch_system,
560
+ caching,
561
+ toil_version,
562
+ python_version,
563
+ platform_system,
564
+ platform_machine
565
+ )
566
+ )
567
+ except:
568
+ con.rollback()
569
+ con.close()
570
+ raise
571
+ else:
572
+ con.commit()
573
+ con.close()
574
+
575
+
576
+ ##
577
+ # Read methods
578
+ ##
579
+
580
+ # We would implement a bunch of iterators and allow follow-up queries, but
581
+ # then we'd have to figure out how to make sure we use one connection and
582
+ # cursor and not block ourselves with the database transaction locks.
583
+ #
584
+ # So instead we always fetch all the information asked for and close out
585
+ # the read transaction before returning.
586
+ #
587
+ # This means the caller has to worry about a workflow vanishing or changing
588
+ # between when it was shown to them and when they ask follow-up questions,
589
+ # but it also means we can't deadlock.
590
+
591
+ @classmethod
592
+ @db_retry
593
+ def summarize_workflows(cls) -> list[WorkflowSummary]:
594
+ """
595
+ List all known workflows and their summary statistics.
596
+ """
597
+
598
+ if not cls.enabled():
599
+ return []
600
+
601
+ workflows = []
602
+
603
+ con = cls.connection()
604
+ cur = con.cursor()
605
+ try:
606
+ cls.ensure_tables(con, cur)
607
+ cur.execute(
608
+ """
609
+ SELECT
610
+ workflows.id AS id,
611
+ workflows.name AS name,
612
+ workflows.job_store AS job_store,
613
+ (SELECT count(*) FROM workflow_attempts WHERE workflow_id = workflows.id) AS total_attempts,
614
+ (SELECT count(*) FROM job_attempts WHERE workflow_id = workflows.id) AS total_job_attempts,
615
+ (SELECT min(count(*), 1) FROM workflow_attempts WHERE workflow_id = workflows.id AND succeeded = TRUE) AS succeeded,
616
+ (SELECT min(start_time) FROM workflow_attempts WHERE workflow_id = workflows.id) AS start_time,
617
+ (SELECT max(start_time + runtime) FROM workflow_attempts WHERE workflow_id = workflows.id) AS end_time,
618
+ workflows.trs_spec AS trs_spec
619
+ FROM workflows
620
+ ORDER BY start_time DESC
621
+ """
622
+ )
623
+ for row in cur:
624
+ workflows.append(
625
+ WorkflowSummary(
626
+ id=row["id"],
627
+ name=row["name"],
628
+ job_store=row["job_store"],
629
+ total_attempts=row["total_attempts"],
630
+ total_job_attempts=row["total_job_attempts"],
631
+ succeeded=(row["succeeded"] == 1),
632
+ start_time=row["start_time"],
633
+ runtime=(row["end_time"] - row["start_time"]) if row["start_time"] is not None and row["end_time"] is not None else None,
634
+ trs_spec=row["trs_spec"]
635
+ )
636
+ )
637
+ except:
638
+ con.rollback()
639
+ con.close()
640
+ raise
641
+ else:
642
+ con.commit()
643
+ con.close()
644
+
645
+ return workflows
646
+
647
+ @classmethod
648
+ @db_retry
649
+ def get_submittable_workflow_attempts(cls, limit: int = sys.maxsize) -> list[WorkflowAttemptSummary]:
650
+ """
651
+ List all workflow attempts not yet submitted to Dockstore.
652
+
653
+ :param limit: Get no more than this many.
654
+ """
655
+
656
+ if not cls.enabled():
657
+ return []
658
+
659
+ attempts = []
660
+
661
+ con = cls.connection()
662
+ cur = con.cursor()
663
+ try:
664
+ cls.ensure_tables(con, cur)
665
+ cur.execute(
666
+ """
667
+ SELECT
668
+ workflow_attempts.workflow_id AS workflow_id,
669
+ workflow_attempts.attempt_number AS attempt_number,
670
+ workflow_attempts.succeeded AS succeeded,
671
+ workflow_attempts.start_time AS start_time,
672
+ workflow_attempts.runtime AS runtime,
673
+ workflow_attempts.batch_system AS batch_system,
674
+ workflow_attempts.caching AS caching,
675
+ workflow_attempts.toil_version AS toil_version,
676
+ workflow_attempts.python_version AS python_version,
677
+ workflow_attempts.platform_system AS platform_system,
678
+ workflow_attempts.platform_machine AS platform_machine,
679
+ workflow_attempts.submitted_to_dockstore AS submitted_to_dockstore,
680
+ workflows.job_store AS workflow_job_store,
681
+ workflows.trs_spec AS workflow_trs_spec
682
+ FROM workflow_attempts
683
+ JOIN workflows ON workflow_attempts.workflow_id = workflows.id
684
+ WHERE workflow_attempts.submitted_to_dockstore = FALSE
685
+ AND workflows.trs_spec IS NOT NULL
686
+ ORDER BY start_time DESC
687
+ LIMIT ?
688
+ """,
689
+ (limit,)
690
+ )
691
+ for row in cur:
692
+ attempts.append(
693
+ WorkflowAttemptSummary(
694
+ workflow_id=row["workflow_id"],
695
+ attempt_number=row["attempt_number"],
696
+ succeeded=(row["succeeded"] == 1),
697
+ start_time=row["start_time"],
698
+ runtime=row["runtime"],
699
+ batch_system=row["batch_system"],
700
+ caching=(row["caching"] == 1),
701
+ toil_version=row["toil_version"],
702
+ python_version=row["python_version"],
703
+ platform_system=row["platform_system"],
704
+ platform_machine=row["platform_machine"],
705
+ submitted_to_dockstore=(row["submitted_to_dockstore"] == 1),
706
+ workflow_job_store=row["workflow_job_store"],
707
+ workflow_trs_spec=row["workflow_trs_spec"]
708
+ )
709
+ )
710
+ except:
711
+ con.rollback()
712
+ con.close()
713
+ raise
714
+ else:
715
+ con.commit()
716
+ con.close()
717
+
718
+ return attempts
719
+
720
+ @classmethod
721
+ @db_retry
722
+ def get_workflow_attempts_with_submittable_job_attempts(cls, limit: int = sys.maxsize) -> list[WorkflowAttemptSummary]:
723
+ """
724
+ Get all workflow attempts that have job attempts not yet submitted to
725
+ Dockstore.
726
+
727
+ The workflow attempts themselves will have finished and been recorded,
728
+ and have TRS IDs.
729
+
730
+ :param limit: Get no more than this many.
731
+ """
732
+
733
+ if not cls.enabled_job():
734
+ return []
735
+
736
+ attempts = []
737
+
738
+ con = cls.connection()
739
+ cur = con.cursor()
740
+ try:
741
+ cls.ensure_tables(con, cur)
742
+ cur.execute(
743
+ """
744
+ SELECT
745
+ workflow_attempts.workflow_id AS workflow_id,
746
+ workflow_attempts.attempt_number AS attempt_number,
747
+ workflow_attempts.succeeded AS succeeded,
748
+ workflow_attempts.start_time AS start_time,
749
+ workflow_attempts.runtime AS runtime,
750
+ workflow_attempts.batch_system AS batch_system,
751
+ workflow_attempts.caching AS caching,
752
+ workflow_attempts.toil_version AS toil_version,
753
+ workflow_attempts.python_version AS python_version,
754
+ workflow_attempts.platform_system AS platform_system,
755
+ workflow_attempts.platform_machine AS platform_machine,
756
+ workflow_attempts.submitted_to_dockstore AS submitted_to_dockstore,
757
+ workflows.job_store AS workflow_job_store,
758
+ workflows.trs_spec AS workflow_trs_spec
759
+ FROM (
760
+ SELECT DISTINCT
761
+ workflow_id, workflow_attempt_number
762
+ FROM job_attempts
763
+ WHERE job_attempts.submitted_to_dockstore = FALSE
764
+ ) AS found_job_attempts
765
+ JOIN workflows ON found_job_attempts.workflow_id = workflows.id
766
+ JOIN workflow_attempts ON
767
+ found_job_attempts.workflow_id = workflow_attempts.workflow_id
768
+ AND found_job_attempts.workflow_attempt_number = workflow_attempts.attempt_number
769
+ WHERE workflows.trs_spec IS NOT NULL
770
+ LIMIT ?
771
+ """,
772
+ (limit,)
773
+ )
774
+ for row in cur:
775
+ # TODO: Unify row to data class conversion
776
+ attempts.append(
777
+ WorkflowAttemptSummary(
778
+ workflow_id=row["workflow_id"],
779
+ attempt_number=row["attempt_number"],
780
+ succeeded=(row["succeeded"] == 1),
781
+ start_time=row["start_time"],
782
+ runtime=row["runtime"],
783
+ batch_system=row["batch_system"],
784
+ caching=(row["caching"] == 1),
785
+ toil_version=row["toil_version"],
786
+ python_version=row["python_version"],
787
+ platform_system=row["platform_system"],
788
+ platform_machine=row["platform_machine"],
789
+ submitted_to_dockstore=(row["submitted_to_dockstore"] == 1),
790
+ workflow_job_store=row["workflow_job_store"],
791
+ workflow_trs_spec=row["workflow_trs_spec"]
792
+ )
793
+ )
794
+ except:
795
+ con.rollback()
796
+ con.close()
797
+ raise
798
+ else:
799
+ con.commit()
800
+ con.close()
801
+
802
+ return attempts
803
+
804
+ @classmethod
805
+ @db_retry
806
+ def get_workflow_attempt(cls, workflow_id: str, attempt_number: int) -> Optional[WorkflowAttemptSummary]:
807
+ """
808
+ Get a single (not necessarily unsubmitted, not necessarily TRS-ID-having) workflow attempt summary, if present.
809
+ """
810
+
811
+ # TODO: Consolidate with the other 2 ways to query workflow attempts!
812
+
813
+ if not cls.enabled():
814
+ return None
815
+
816
+ attempts = []
817
+
818
+ con = cls.connection()
819
+ cur = con.cursor()
820
+ try:
821
+ cls.ensure_tables(con, cur)
822
+ cur.execute(
823
+ """
824
+ SELECT
825
+ workflow_attempts.workflow_id AS workflow_id,
826
+ workflow_attempts.attempt_number AS attempt_number,
827
+ workflow_attempts.succeeded AS succeeded,
828
+ workflow_attempts.start_time AS start_time,
829
+ workflow_attempts.runtime AS runtime,
830
+ workflow_attempts.batch_system AS batch_system,
831
+ workflow_attempts.caching AS caching,
832
+ workflow_attempts.toil_version AS toil_version,
833
+ workflow_attempts.python_version AS python_version,
834
+ workflow_attempts.platform_system AS platform_system,
835
+ workflow_attempts.platform_machine AS platform_machine,
836
+ workflow_attempts.submitted_to_dockstore AS submitted_to_dockstore,
837
+ workflows.job_store AS workflow_job_store,
838
+ workflows.trs_spec AS workflow_trs_spec
839
+ FROM workflow_attempts
840
+ JOIN workflows ON workflow_attempts.workflow_id = workflows.id
841
+ WHERE workflow_id = ?
842
+ AND attempt_number = ?
843
+ ORDER BY start_time DESC
844
+ LIMIT 1
845
+ """,
846
+ (workflow_id, attempt_number)
847
+ )
848
+ for row in cur:
849
+ attempts.append(
850
+ WorkflowAttemptSummary(
851
+ workflow_id=row["workflow_id"],
852
+ attempt_number=row["attempt_number"],
853
+ succeeded=(row["succeeded"] == 1),
854
+ start_time=row["start_time"],
855
+ runtime=row["runtime"],
856
+ batch_system=row["batch_system"],
857
+ caching=(row["caching"] == 1),
858
+ toil_version=row["toil_version"],
859
+ python_version=row["python_version"],
860
+ platform_system=row["platform_system"],
861
+ platform_machine=row["platform_machine"],
862
+ submitted_to_dockstore=(row["submitted_to_dockstore"] == 1),
863
+ workflow_job_store=row["workflow_job_store"],
864
+ workflow_trs_spec=row["workflow_trs_spec"]
865
+ )
866
+ )
867
+ except:
868
+ con.rollback()
869
+ con.close()
870
+ raise
871
+ else:
872
+ con.commit()
873
+ con.close()
874
+
875
+ if len(attempts) == 0:
876
+ # Not found
877
+ return None
878
+ else:
879
+ return attempts[0]
880
+
881
+ @classmethod
882
+ @db_retry
883
+ def get_unsubmitted_job_attempts(cls, workflow_id: str, attempt_number: int) -> list[JobAttemptSummary]:
884
+ """
885
+ List all job attempts in the given workflow attempt not yet submitted to Dockstore.
886
+
887
+ Doesn't check to make sure the workflow has a TRS ID.
888
+ """
889
+
890
+ if not cls.enabled_job():
891
+ return []
892
+
893
+ attempts = []
894
+
895
+ con = cls.connection()
896
+ cur = con.cursor()
897
+ try:
898
+ cls.ensure_tables(con, cur)
899
+ cur.execute(
900
+ """
901
+ SELECT
902
+ id,
903
+ job_name,
904
+ succeeded,
905
+ start_time,
906
+ runtime,
907
+ cores,
908
+ cpu_seconds,
909
+ memory_bytes,
910
+ disk_bytes,
911
+ submitted_to_dockstore
912
+ FROM job_attempts
913
+ WHERE workflow_id = ?
914
+ AND workflow_attempt_number = ?
915
+ AND submitted_to_dockstore = FALSE
916
+ ORDER BY start_time DESC
917
+ """,
918
+ (workflow_id, attempt_number)
919
+ )
920
+ for row in cur:
921
+ attempts.append(
922
+ JobAttemptSummary(
923
+ id=row["id"],
924
+ job_name=row["job_name"],
925
+ succeeded=(row["succeeded"] == 1),
926
+ start_time=row["start_time"],
927
+ runtime=row["runtime"],
928
+ cores=row["cores"],
929
+ cpu_seconds=row["cpu_seconds"],
930
+ memory_bytes=row["memory_bytes"],
931
+ disk_bytes=row["disk_bytes"],
932
+ submitted_to_dockstore=(row["submitted_to_dockstore"] == 1)
933
+ )
934
+ )
935
+ except:
936
+ con.rollback()
937
+ con.close()
938
+ raise
939
+ else:
940
+ con.commit()
941
+ con.close()
942
+
943
+ return attempts
944
+
945
+ ###
946
+ # Submission marking methods
947
+ ###
948
+
949
+ @classmethod
950
+ @db_retry
951
+ def mark_workflow_attempt_submitted(cls, workflow_id: str, attempt_number: int) -> None:
952
+ """
953
+ Mark a workflow attempt as having been successfully submitted to Dockstore.
954
+
955
+ Does not mark the workflow attempt's job attempts as submitted.
956
+ """
957
+
958
+ if not cls.enabled():
959
+ return
960
+
961
+ con = cls.connection()
962
+ cur = con.cursor()
963
+ try:
964
+ cls.ensure_tables(con, cur)
965
+ cur.execute(
966
+ "UPDATE workflow_attempts SET submitted_to_dockstore = TRUE WHERE workflow_id = ? AND attempt_number = ?",
967
+ (workflow_id, attempt_number)
968
+ )
969
+ except:
970
+ con.rollback()
971
+ con.close()
972
+ raise
973
+ else:
974
+ con.commit()
975
+ con.close()
976
+
977
+ @classmethod
978
+ @db_retry
979
+ def mark_job_attempts_submitted(cls, job_attempt_ids: list[str]) -> None:
980
+ """
981
+ Mark a collection of job attempts as submitted to Dockstore in a single transaction.
982
+ """
983
+
984
+ if not cls.enabled_job():
985
+ return
986
+
987
+ con = cls.connection()
988
+ cur = con.cursor()
989
+ try:
990
+ cls.ensure_tables(con, cur)
991
+ for job_attempt_id in job_attempt_ids:
992
+ # Do all the marking in one transaction
993
+ cur.execute(
994
+ "UPDATE job_attempts SET submitted_to_dockstore = TRUE WHERE id = ?",
995
+ (job_attempt_id,)
996
+ )
997
+ except:
998
+ con.rollback()
999
+ con.close()
1000
+ raise
1001
+ else:
1002
+ con.commit()
1003
+ con.close()
1004
+
1005
+ @classmethod
1006
+ @db_retry
1007
+ def count_workflows(cls) -> int:
1008
+ """
1009
+ Count workflows in the database.
1010
+ """
1011
+
1012
+ if not cls.enabled():
1013
+ return 0
1014
+
1015
+ con = cls.connection()
1016
+ cur = con.cursor()
1017
+ try:
1018
+ cls.ensure_tables(con, cur)
1019
+
1020
+ cur.execute("SELECT count(*) FROM workflows")
1021
+
1022
+ count = cur.fetchone()[0]
1023
+ assert isinstance(count, int)
1024
+ except:
1025
+ con.rollback()
1026
+ con.close()
1027
+ raise
1028
+ else:
1029
+ con.commit()
1030
+ con.close()
1031
+
1032
+ return count
1033
+
1034
+ @classmethod
1035
+ @db_retry
1036
+ def count_workflow_attempts(cls) -> int:
1037
+ """
1038
+ Count workflow attempts in the database.
1039
+ """
1040
+
1041
+ if not cls.enabled():
1042
+ return 0
1043
+
1044
+ con = cls.connection()
1045
+ cur = con.cursor()
1046
+ try:
1047
+ cls.ensure_tables(con, cur)
1048
+
1049
+ cur.execute("SELECT count(*) FROM workflow_attempts")
1050
+
1051
+ count = cur.fetchone()[0]
1052
+ assert isinstance(count, int)
1053
+ except:
1054
+ con.rollback()
1055
+ con.close()
1056
+ raise
1057
+ else:
1058
+ con.commit()
1059
+ con.close()
1060
+
1061
+ return count
1062
+
1063
+ @classmethod
1064
+ @db_retry
1065
+ def count_job_attempts(cls) -> int:
1066
+ """
1067
+ Count job attempts in the database.
1068
+ """
1069
+
1070
+ if not cls.enabled_job():
1071
+ return 0
1072
+
1073
+ con = cls.connection()
1074
+ cur = con.cursor()
1075
+ try:
1076
+ cls.ensure_tables(con, cur)
1077
+
1078
+ cur.execute("SELECT count(*) FROM job_attempts")
1079
+
1080
+ count = cur.fetchone()[0]
1081
+ assert isinstance(count, int)
1082
+ except:
1083
+ con.rollback()
1084
+ con.close()
1085
+ raise
1086
+ else:
1087
+ con.commit()
1088
+ con.close()
1089
+
1090
+ return count
1091
+
1092
+ @classmethod
1093
+ @db_retry
1094
+ def get_fully_submitted_workflow_ids(cls, limit: int = sys.maxsize) -> list[str]:
1095
+ """
1096
+ Get workflows that have a successful attempt and no unsubmitted attempts or job attempts.
1097
+ """
1098
+
1099
+ if not cls.enabled():
1100
+ return []
1101
+
1102
+ ids = []
1103
+
1104
+ con = cls.connection()
1105
+ cur = con.cursor()
1106
+ try:
1107
+ cls.ensure_tables(con, cur)
1108
+
1109
+ cur.execute(
1110
+ """
1111
+ SELECT
1112
+ workflows.id
1113
+ FROM workflows
1114
+ WHERE
1115
+ (
1116
+ SELECT
1117
+ count(*)
1118
+ FROM workflow_attempts
1119
+ WHERE workflow_id = workflows.id
1120
+ AND succeeded = TRUE
1121
+ AND submitted_to_dockstore = TRUE
1122
+ LIMIT 1
1123
+ ) = 1
1124
+ AND (
1125
+ SELECT
1126
+ count(*)
1127
+ FROM workflow_attempts
1128
+ WHERE workflow_id = workflows.id
1129
+ AND submitted_to_dockstore = FALSE
1130
+ LIMIT 1
1131
+ ) = 0
1132
+ AND (
1133
+ SELECT
1134
+ count(*)
1135
+ FROM job_attempts
1136
+ WHERE workflow_id = workflows.id
1137
+ AND submitted_to_dockstore = FALSE
1138
+ LIMIT 1
1139
+ ) = 0
1140
+ LIMIT ?
1141
+ """,
1142
+ (limit,)
1143
+ )
1144
+ for row in cur:
1145
+ ids.append(row["id"])
1146
+ except:
1147
+ con.rollback()
1148
+ con.close()
1149
+ raise
1150
+ else:
1151
+ con.commit()
1152
+ con.close()
1153
+
1154
+ return ids
1155
+
1156
+ @classmethod
1157
+ @db_retry
1158
+ def get_oldest_workflow_ids(cls, limit: int = sys.maxsize) -> list[str]:
1159
+ """
1160
+ Get workflows that are old.
1161
+ """
1162
+
1163
+ if not cls.enabled():
1164
+ return []
1165
+
1166
+ ids = []
1167
+
1168
+ con = cls.connection()
1169
+ cur = con.cursor()
1170
+ try:
1171
+ cls.ensure_tables(con, cur)
1172
+
1173
+ # We could use a complicated query to bump workflows down the list
1174
+ # if they have been updated by having attempts or job attempts. But
1175
+ # that would mean we'd need to do a lot of querying and live
1176
+ # sorting, whereas using just the creation time lets us use an
1177
+ # index and a limit efficiently.
1178
+
1179
+ cur.execute(
1180
+ """
1181
+ SELECT
1182
+ id,
1183
+ creation_time
1184
+ FROM workflows
1185
+ ORDER BY creation_time ASC
1186
+ LIMIT ?
1187
+ """,
1188
+ (limit,)
1189
+ )
1190
+ for row in cur:
1191
+ ids.append(row["id"])
1192
+ except:
1193
+ con.rollback()
1194
+ con.close()
1195
+ raise
1196
+ else:
1197
+ con.commit()
1198
+ con.close()
1199
+
1200
+ return ids
1201
+
1202
+ @classmethod
1203
+ @db_retry
1204
+ def delete_workflow(cls, workflow_id: str) -> None:
1205
+ """
1206
+ Delete a workflow and all its attempts and job attempts.
1207
+
1208
+ Succeeds if the workflow does not exist.
1209
+ """
1210
+
1211
+ if not cls.enabled():
1212
+ return
1213
+
1214
+ con = cls.connection()
1215
+ cur = con.cursor()
1216
+ try:
1217
+ cls.ensure_tables(con, cur)
1218
+
1219
+ cur.execute("DELETE FROM job_attempts WHERE workflow_id = ?", (workflow_id,))
1220
+ cur.execute("DELETE FROM workflow_attempts WHERE workflow_id = ?", (workflow_id,))
1221
+ cur.execute("DELETE FROM workflows WHERE id = ?", (workflow_id,))
1222
+ except:
1223
+ con.rollback()
1224
+ con.close()
1225
+ raise
1226
+ else:
1227
+ con.commit()
1228
+ con.close()
1229
+
1230
+ @classmethod
1231
+ @db_retry
1232
+ def get_database_byte_size(cls) -> int:
1233
+ """
1234
+ Get the total number of bytes used by the database.
1235
+ """
1236
+
1237
+ if not cls.enabled():
1238
+ return 0
1239
+
1240
+ con = cls.connection()
1241
+ cur = con.cursor()
1242
+ try:
1243
+ cls.ensure_tables(con, cur)
1244
+
1245
+ cur.execute("PRAGMA page_size")
1246
+ page_size = cur.fetchone()[0]
1247
+ assert isinstance(page_size, int)
1248
+
1249
+ cur.execute("PRAGMA page_count")
1250
+ page_count = cur.fetchone()[0]
1251
+ assert isinstance(page_count, int)
1252
+
1253
+ except:
1254
+ con.rollback()
1255
+ con.close()
1256
+ raise
1257
+ else:
1258
+ con.commit()
1259
+ con.close()
1260
+
1261
+ return page_size * page_count
1262
+
1263
+ @classmethod
1264
+ @db_retry
1265
+ def compact_database(cls) -> None:
1266
+ """
1267
+ Shrink the database to remove unused space.
1268
+ """
1269
+
1270
+ if not cls.enabled():
1271
+ return
1272
+
1273
+ con = cls.connection()
1274
+ cur = con.cursor()
1275
+
1276
+ # Don't bother making tables; we don't need them for this and they need
1277
+ # a transaction.
1278
+
1279
+ with cls.no_transaction(con):
1280
+ # Do the vacuum outside any transaction, and rely on it to
1281
+ # synchronize appropriately internally.
1282
+ cur.execute("VACUUM")
1283
+
1284
+ con.close()
1285
+
1286
+ @classmethod
1287
+ def enforce_byte_size_limit(cls, limit: int = 100 * 1024 * 1024) -> None:
1288
+ """
1289
+ Shrink the database until it is smaller than the given limit, or until
1290
+ it is empty, by throwing away workflows.
1291
+
1292
+ Throws data away in a sensible order, least important to most
1293
+ important.
1294
+ """
1295
+
1296
+ if not cls.enabled():
1297
+ return
1298
+
1299
+ db_size = cls.get_database_byte_size()
1300
+
1301
+ if db_size < limit:
1302
+ # Nothing to do!
1303
+ return
1304
+
1305
+ while db_size > limit:
1306
+ # Look for some things we submitted already
1307
+ target_workflows = cls.get_fully_submitted_workflow_ids(limit=100)
1308
+ if len(target_workflows) == 0:
1309
+ # If there aren't any, do oldest workflows a few at a time
1310
+ # We need to balance the O(n^2)
1311
+ # delete-and-copy-the-whole-db-to-vacuum loop with not wanting
1312
+ # to delete too many workflows we could keep.
1313
+ target_workflows = cls.get_oldest_workflow_ids(limit=10)
1314
+ if len(target_workflows) == 0:
1315
+ # There are no more workflows to delete.
1316
+ break
1317
+
1318
+ for workflow_id in target_workflows:
1319
+ # Delete all the workflows we don't want.
1320
+ cls.delete_workflow(workflow_id)
1321
+
1322
+ # Shrink the DB
1323
+ cls.compact_database()
1324
+ # Re-check the size
1325
+ db_size = cls.get_database_byte_size()
1326
+
1327
+
1328
+
1329
+
1330
+ @classmethod
1331
+ def database_dump_lines(cls) -> Iterable[str]:
1332
+ """
1333
+ Yield lines from the database dump.
1334
+
1335
+ For debugging tests.
1336
+ """
1337
+
1338
+ if not cls.enabled():
1339
+ return []
1340
+
1341
+ return cls.connection().iterdump()
1342
+
1343
+
1344
+
1345
+