toil 8.0.0__py3-none-any.whl → 8.1.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. toil/__init__.py +4 -4
  2. toil/batchSystems/options.py +1 -0
  3. toil/batchSystems/slurm.py +227 -83
  4. toil/common.py +161 -45
  5. toil/cwl/cwltoil.py +31 -10
  6. toil/job.py +47 -38
  7. toil/jobStores/aws/jobStore.py +46 -10
  8. toil/lib/aws/session.py +14 -3
  9. toil/lib/aws/utils.py +92 -35
  10. toil/lib/dockstore.py +379 -0
  11. toil/lib/ec2nodes.py +3 -2
  12. toil/lib/history.py +1271 -0
  13. toil/lib/history_submission.py +681 -0
  14. toil/lib/io.py +22 -1
  15. toil/lib/misc.py +18 -0
  16. toil/lib/retry.py +10 -10
  17. toil/lib/{integration.py → trs.py} +95 -46
  18. toil/lib/web.py +38 -0
  19. toil/options/common.py +17 -2
  20. toil/options/cwl.py +10 -0
  21. toil/provisioners/gceProvisioner.py +4 -4
  22. toil/server/cli/wes_cwl_runner.py +3 -3
  23. toil/server/utils.py +2 -3
  24. toil/statsAndLogging.py +35 -1
  25. toil/test/batchSystems/test_slurm.py +172 -2
  26. toil/test/cwl/conftest.py +39 -0
  27. toil/test/cwl/cwlTest.py +105 -2
  28. toil/test/cwl/optional-file.cwl +18 -0
  29. toil/test/lib/test_history.py +212 -0
  30. toil/test/lib/test_trs.py +161 -0
  31. toil/test/wdl/wdltoil_test.py +1 -1
  32. toil/version.py +10 -10
  33. toil/wdl/wdltoil.py +23 -9
  34. toil/worker.py +113 -33
  35. {toil-8.0.0.dist-info → toil-8.1.0b1.dist-info}/METADATA +9 -4
  36. {toil-8.0.0.dist-info → toil-8.1.0b1.dist-info}/RECORD +40 -34
  37. {toil-8.0.0.dist-info → toil-8.1.0b1.dist-info}/WHEEL +1 -1
  38. toil/test/lib/test_integration.py +0 -104
  39. {toil-8.0.0.dist-info → toil-8.1.0b1.dist-info}/LICENSE +0 -0
  40. {toil-8.0.0.dist-info → toil-8.1.0b1.dist-info}/entry_points.txt +0 -0
  41. {toil-8.0.0.dist-info → toil-8.1.0b1.dist-info}/top_level.txt +0 -0
toil/statsAndLogging.py CHANGED
@@ -24,6 +24,7 @@ from typing import IO, TYPE_CHECKING, Any, Callable, Optional, Union
24
24
 
25
25
  from toil.lib.conversions import strtobool
26
26
  from toil.lib.expando import Expando
27
+ from toil.lib.history import HistoryManager
27
28
  from toil.lib.resources import ResourceMonitor
28
29
 
29
30
  if TYPE_CHECKING:
@@ -168,6 +169,7 @@ class StatsAndLogging:
168
169
  The following function is used for collating stats/reporting log messages from the workers.
169
170
  Works inside of a thread, collates as long as the stop flag is not True.
170
171
  """
172
+
171
173
  # Overall timing
172
174
  startTime = time.time()
173
175
  startClock = ResourceMonitor.get_total_cpu_time()
@@ -231,8 +233,40 @@ class StatsAndLogging:
231
233
  )
232
234
  cls.writeLogFiles(jobNames, messages, config=config)
233
235
 
236
+ try:
237
+ jobs = stats.jobs
238
+ except AttributeError:
239
+ pass
240
+ else:
241
+ for job in jobs:
242
+ try:
243
+ # Here we're talking to job._executor which fills in these stats.
244
+
245
+ # Convince MyPy we won't be sent any job stats without
246
+ # a workflow ID. You can't set up the job store without
247
+ # one, but if we're somehow missing one, keep the stats
248
+ # and logging thread up.
249
+ assert config.workflowID is not None
250
+
251
+ # TODO: Use better job names!
252
+ HistoryManager.record_job_attempt(
253
+ config.workflowID,
254
+ config.workflowAttemptNumber,
255
+ job.class_name,
256
+ job.succeeded == "True",
257
+ float(job.start),
258
+ float(job.time),
259
+ cores=float(job.requested_cores),
260
+ cpu_seconds=float(job.clock),
261
+ memory_bytes=int(job.memory) * 1024,
262
+ disk_bytes=int(job.disk)
263
+ )
264
+ except:
265
+ logger.exception("Could not record job attempt in history!")
266
+ # Keep going. Don't fail the workflow for history-related issues.
267
+
234
268
  while True:
235
- # This is a indirect way of getting a message to the thread to exit
269
+ # This is an indirect way of getting a message to the thread to exit
236
270
  if stop.is_set():
237
271
  jobStore.read_logs(callback)
238
272
  break
@@ -1,17 +1,24 @@
1
+ import errno
1
2
  import textwrap
2
3
  from queue import Queue
3
4
 
5
+ import logging
4
6
  import pytest
7
+ import sys
5
8
 
6
9
  import toil.batchSystems.slurm
7
10
  from toil.batchSystems.abstractBatchSystem import (
8
11
  EXIT_STATUS_UNAVAILABLE_VALUE,
9
12
  BatchJobExitReason,
13
+ BatchSystemSupport,
10
14
  )
11
15
  from toil.common import Config
12
16
  from toil.lib.misc import CalledProcessErrorStderr
13
17
  from toil.test import ToilTest
14
18
 
19
+ logger = logging.getLogger(__name__)
20
+
21
+
15
22
  # TODO: Come up with a better way to mock the commands then monkey-patching the
16
23
  # command-calling functions.
17
24
 
@@ -29,6 +36,9 @@ def call_sacct(args, **_) -> str:
29
36
  1236|FAILED|0:2
30
37
  1236.extern|COMPLETED|0:0
31
38
  """
39
+ if sum(len(a) for a in args) > 1000:
40
+ # Simulate if the argument list is too long
41
+ raise OSError(errno.E2BIG, "Argument list is too long")
32
42
  # Fake output per fake job-id.
33
43
  sacct_info = {
34
44
  609663: "609663|FAILED|0:2\n609663.extern|COMPLETED|0:0\n",
@@ -173,14 +183,34 @@ def call_sacct_raises(*_):
173
183
  1, "sacct: error: Problem talking to the database: " "Connection timed out"
174
184
  )
175
185
 
186
+ def call_sinfo(*_) -> str:
187
+ """
188
+ Simulate asking for partition info from Slurm
189
+ """
190
+ stdout = textwrap.dedent(
191
+ """\
192
+ PARTITION GRES TIMELIMIT PRIO_TIER CPUS MEMORY
193
+ short* (null) 1:00:00 500 256+ 1996800+
194
+ medium (null) 12:00:00 500 256+ 1996800+
195
+ long (null) 14-00:00:00 500 256+ 1996800+
196
+ gpu gpu:A100:8 7-00:00:00 5000 256 996800
197
+ gpu gpu:A5500:8 7-00:00:00 5000 256 1996800
198
+ high_priority gpu:A5500:8 7-00:00:00 65000 256 1996800
199
+ high_priority (null) 7-00:00:00 65000 256+ 1996800+
200
+ simple_nodelist gpu:A100:8 1:00 65000 256 996800
201
+ simple_nodelist gpu:A5500:8 1:00 65000 256 1996800
202
+ simple_nodelist (null) 1:00 65000 256+ 1996800+
203
+ """
204
+ )
205
+ return stdout
176
206
 
177
- class FakeBatchSystem:
207
+ class FakeBatchSystem(BatchSystemSupport):
178
208
  """
179
209
  Class that implements a minimal Batch System, needed to create a Worker (see below).
180
210
  """
181
211
 
182
212
  def __init__(self):
183
- self.config = self.__fake_config()
213
+ super().__init__(self.__fake_config(), float("inf"), sys.maxsize, sys.maxsize)
184
214
 
185
215
  def getWaitDuration(self):
186
216
  return 10
@@ -198,8 +228,12 @@ class FakeBatchSystem:
198
228
 
199
229
  config.workflowID = str(uuid4())
200
230
  config.cleanWorkDir = "always"
231
+ toil.batchSystems.slurm.SlurmBatchSystem.setOptions(lambda o: setattr(config, o, None))
201
232
  return config
202
233
 
234
+ # Make the mock class not have abstract methods anymore, even though we don't
235
+ # implement them. See <https://stackoverflow.com/a/17345619>.
236
+ FakeBatchSystem.__abstractmethods__ = set()
203
237
 
204
238
  class SlurmTest(ToilTest):
205
239
  """
@@ -262,6 +296,13 @@ class SlurmTest(ToilTest):
262
296
  result = self.worker._getJobDetailsFromSacct(list(expected_result))
263
297
  assert result == expected_result, f"{result} != {expected_result}"
264
298
 
299
+ def test_getJobDetailsFromSacct_argument_list_too_big(self):
300
+ self.monkeypatch.setattr(toil.batchSystems.slurm, "call_command", call_sacct)
301
+ expected_result = {i: (None, None) for i in range(2000)}
302
+ result = self.worker._getJobDetailsFromSacct(list(expected_result))
303
+ assert result == expected_result, f"{result} != {expected_result}"
304
+
305
+
265
306
  ####
266
307
  #### tests for _getJobDetailsFromScontrol()
267
308
  ####
@@ -449,3 +490,132 @@ class SlurmTest(ToilTest):
449
490
  pass
450
491
  else:
451
492
  assert False, "Exception CalledProcessErrorStderr not raised"
493
+
494
+ ###
495
+ ### Tests for partition selection
496
+ ##
497
+
498
+ def test_PartitionSet_get_partition(self):
499
+ self.monkeypatch.setattr(toil.batchSystems.slurm, "call_command", call_sinfo)
500
+ ps = toil.batchSystems.slurm.SlurmBatchSystem.PartitionSet()
501
+
502
+ # At zero. short will win because simple_nodelist has higher priority.
503
+ self.assertEqual(ps.get_partition(0), "short")
504
+ # Easily within the partition
505
+ self.assertEqual(ps.get_partition(10 * 60), "short")
506
+ # Exactly on the boundary
507
+ self.assertEqual(ps.get_partition(60 * 60), "short")
508
+ # Well within the next partition
509
+ self.assertEqual(ps.get_partition(2 * 60 * 60), "medium")
510
+ # Can only fit in long
511
+ self.assertEqual(ps.get_partition(8 * 24 * 60 * 60), "long")
512
+ # Could fit in gpu or long
513
+ self.assertEqual(ps.get_partition(6 * 24 * 60 * 60), "long")
514
+ # Can't fit in anything
515
+ with self.assertRaises(Exception):
516
+ ps.get_partition(365 * 24 * 60 * 60)
517
+
518
+ def test_PartitionSet_default_gpu_partition(self):
519
+ self.monkeypatch.setattr(toil.batchSystems.slurm, "call_command", call_sinfo)
520
+ ps = toil.batchSystems.slurm.SlurmBatchSystem.PartitionSet()
521
+
522
+ # Make sure we picked the useful-length GPU partition and not the super
523
+ # short one.
524
+ self.assertEqual(ps.default_gpu_partition.partition_name, "gpu")
525
+
526
+ def test_prepareSbatch_partition(self):
527
+ self.monkeypatch.setattr(toil.batchSystems.slurm, "call_command", call_sinfo)
528
+ ps = toil.batchSystems.slurm.SlurmBatchSystem.PartitionSet()
529
+ self.worker.boss.partitions = ps
530
+ # This is in seconds
531
+ self.worker.boss.config.slurm_time = 30
532
+
533
+ # Without a partition override in the environment, we should get the
534
+ # "short" partition for this job
535
+ command = self.worker.prepareSbatch(1, 100, 5, "job5", None, None)
536
+ assert "--partition=short" in command
537
+
538
+ # With a partition override, we should not. But the override will be rewritten.
539
+ self.worker.boss.config.slurm_args = "--something --partition foo --somethingElse"
540
+ command = self.worker.prepareSbatch(1, 100, 5, "job5", None, None)
541
+ assert "--partition=short" not in command
542
+ assert "--partition=foo" in command
543
+
544
+ # All ways of setting partition should work, including =
545
+ self.worker.boss.config.slurm_args = "--something --partition=foo --somethingElse"
546
+ command = self.worker.prepareSbatch(1, 100, 5, "job5", None, None)
547
+ assert "--partition=short" not in command
548
+ assert "--partition=foo" in command
549
+
550
+ # And short options
551
+ self.worker.boss.config.slurm_args = "--something -p foo --somethingElse"
552
+ command = self.worker.prepareSbatch(1, 100, 5, "job5", None, None)
553
+ assert "--partition=short" not in command
554
+ assert "--partition=foo" in command
555
+
556
+ # Partition settings from the config should override automatic selection
557
+ self.worker.boss.config.slurm_partition = "foobar"
558
+ self.worker.boss.config.slurm_args = "--something --somethingElse"
559
+ command = self.worker.prepareSbatch(1, 100, 5, "job5", None, None)
560
+ assert "--partition=foobar" in command
561
+
562
+ # But they should be overridden by the argument overrides
563
+ self.worker.boss.config.slurm_args = "--something --partition=baz --somethingElse"
564
+ command = self.worker.prepareSbatch(1, 100, 5, "job5", None, None)
565
+ assert "--partition=baz" in command
566
+
567
+ def test_prepareSbatch_time(self):
568
+ self.monkeypatch.setattr(toil.batchSystems.slurm, "call_command", call_sinfo)
569
+ ps = toil.batchSystems.slurm.SlurmBatchSystem.PartitionSet()
570
+ self.worker.boss.partitions = ps
571
+ # This is in seconds
572
+ self.worker.boss.config.slurm_time = 30
573
+
574
+ # Without a time override in the environment, we should use the normal
575
+ # time and the "short" partition
576
+ command = self.worker.prepareSbatch(1, 100, 5, "job5", None, None)
577
+ logger.debug("Command: %s", command)
578
+ assert "--time=0:30" in command
579
+ assert "--partition=short" in command
580
+
581
+ # With a time override, we should use it, slightly translated, and it
582
+ # should change the selected partition.
583
+ self.worker.boss.config.slurm_args = "--something --time 10:00:00 --somethingElse"
584
+ command = self.worker.prepareSbatch(1, 100, 5, "job5", None, None)
585
+ logger.debug("Command: %s", command)
586
+ assert "--partition=medium" in command
587
+ assert "--time=0:36000" in command
588
+
589
+ # All ways of setting time should work, including =
590
+ self.worker.boss.config.slurm_args = "--something --time=10:00:00 --somethingElse"
591
+ command = self.worker.prepareSbatch(1, 100, 5, "job5", None, None)
592
+ logger.debug("Command: %s", command)
593
+ assert "--partition=medium" in command
594
+ assert "--time=0:36000" in command
595
+
596
+ # And short options
597
+ self.worker.boss.config.slurm_args = "--something -t 10:00:00 --somethingElse"
598
+ command = self.worker.prepareSbatch(1, 100, 5, "job5", None, None)
599
+ logger.debug("Command: %s", command)
600
+ assert "--partition=medium" in command
601
+ assert "--time=0:36000" in command
602
+
603
+ def test_prepareSbatch_export(self):
604
+ self.monkeypatch.setattr(toil.batchSystems.slurm, "call_command", call_sinfo)
605
+ ps = toil.batchSystems.slurm.SlurmBatchSystem.PartitionSet()
606
+ self.worker.boss.partitions = ps
607
+
608
+ # Without any overrides, we need --export=ALL
609
+ command = self.worker.prepareSbatch(1, 100, 5, "job5", None, None)
610
+ assert "--export=ALL" in command
611
+
612
+ # With overrides, we don't get --export=ALL
613
+ self.worker.boss.config.slurm_args = "--export=foo"
614
+ command = self.worker.prepareSbatch(1, 100, 5, "job5", None, None)
615
+ assert "--export=ALL" not in command
616
+
617
+ # With --export-file, we don't get --export=ALL as documented.
618
+ self.worker.boss.config.slurm_args = "--export-file=./thefile.txt"
619
+ command = self.worker.prepareSbatch(1, 100, 5, "job5", None, None)
620
+ assert "--export=ALL" not in command
621
+
toil/test/cwl/conftest.py CHANGED
@@ -14,4 +14,43 @@
14
14
 
15
15
  # https://pytest.org/latest/example/pythoncollection.html
16
16
 
17
+ import json
18
+ import logging
19
+ from io import StringIO
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ from cwltest import utils
23
+ logger = logging.getLogger(__name__)
24
+
17
25
  collect_ignore = ["spec"]
26
+
27
+
28
+ # Hook into Pytest for testing CWL conformance with Toil
29
+ # https://pytest.org/en/6.2.x/writing_plugins.html?highlight=conftest#conftest-py-local-per-directory-plugins
30
+ # See cwltool's reference implementation:
31
+ # https://github.com/common-workflow-language/cwltool/blob/05af6c1357c327b3146e9f5da40e7c0aa3e6d976/tests/cwl-conformance/cwltool-conftest.py
32
+ def pytest_cwl_execute_test(
33
+ config: utils.CWLTestConfig,
34
+ processfile: str,
35
+ jobfile: Optional[str]
36
+ ) -> Tuple[int, Optional[Dict[str, Any]]]:
37
+ """Use the CWL reference runner (cwltool) to execute tests."""
38
+ from toil.cwl.cwltoil import main
39
+
40
+ stdout = StringIO()
41
+ argsl: List[str] = [f"--outdir={config.outdir}"]
42
+ if config.runner_quiet:
43
+ argsl.append("--quiet")
44
+ elif config.verbose:
45
+ argsl.append("--debug")
46
+ argsl.extend(config.args)
47
+ argsl.append(processfile)
48
+ if jobfile:
49
+ argsl.append(jobfile)
50
+ try:
51
+ result = main(args=argsl, stdout=stdout)
52
+ except Exception as e:
53
+ logger.error(e)
54
+ return 1, {}
55
+ out = stdout.getvalue()
56
+ return result, json.loads(out) if out else {}
toil/test/cwl/cwlTest.py CHANGED
@@ -449,7 +449,7 @@ class CWLWorkflowTest(ToilTest):
449
449
  main_args = [
450
450
  "--outdir",
451
451
  self.outDir,
452
- "#workflow/github.com/dockstore-testing/md5sum-checker",
452
+ "#workflow/github.com/dockstore-testing/md5sum-checker:master",
453
453
  "https://raw.githubusercontent.com/dockstore-testing/md5sum-checker/refs/heads/master/md5sum/md5sum-input-cwl.json"
454
454
  ]
455
455
  cwltoil.main(main_args, stdout=stdout)
@@ -703,6 +703,50 @@ class CWLWorkflowTest(ToilTest):
703
703
  except subprocess.CalledProcessError:
704
704
  pass
705
705
 
706
+ def test_caching(self) -> None:
707
+ log.info("Running CWL caching test.")
708
+ from toil.cwl import cwltoil
709
+
710
+ outDir = self._createTempDir()
711
+ cacheDir = self._createTempDir()
712
+
713
+ cwlDir = os.path.join(self._projectRootPath(), "src", "toil", "test", "cwl")
714
+ log_path = os.path.join(outDir, "log")
715
+ cmd = [
716
+ "--outdir",
717
+ outDir,
718
+ "--jobStore",
719
+ os.path.join(outDir, "jobStore"),
720
+ "--clean=always",
721
+ "--no-container",
722
+ "--cachedir",
723
+ cacheDir,
724
+ os.path.join(cwlDir, "revsort.cwl"),
725
+ os.path.join(cwlDir, "revsort-job.json"),
726
+ ]
727
+ st = StringIO()
728
+ ret = cwltoil.main(cmd, stdout=st)
729
+ assert ret == 0
730
+ # cwltool hashes certain steps into directories, ensure it exists
731
+ # since cwltool caches per task and revsort has 2 cwl tasks, there should be 2 directories and 2 status files
732
+ assert (len(os.listdir(cacheDir)) == 4)
733
+
734
+ # Rerun the workflow to ensure there is a cache hit and that we don't rerun the tools
735
+ st = StringIO()
736
+ cmd = [
737
+ "--writeLogsFromAllJobs=True",
738
+ "--writeLogs",
739
+ log_path
740
+ ] + cmd
741
+ ret = cwltoil.main(cmd, stdout=st)
742
+ assert ret == 0
743
+
744
+ # Ensure all of the worker logs are using their cached outputs
745
+ for file in os.listdir(log_path):
746
+ assert "Using cached output" in open(os.path.join(log_path, file), encoding="utf-8").read()
747
+
748
+
749
+
706
750
  @needs_aws_s3
707
751
  def test_streamable(self, extra_args: Optional[list[str]] = None) -> None:
708
752
  """
@@ -926,6 +970,58 @@ class CWLWorkflowTest(ToilTest):
926
970
  }
927
971
  }
928
972
 
973
+ def test_missing_import(self) -> None:
974
+ tmp_path = self._createTempDir()
975
+ out_dir = os.path.join(tmp_path, "cwl-out-dir")
976
+ toil = "toil-cwl-runner"
977
+ options = [
978
+ f"--outdir={out_dir}",
979
+ "--clean=always",
980
+ ]
981
+ cmd = [toil] + options + ["src/toil/test/cwl/revsort.cwl", "src/toil/test/cwl/revsort-job-missing.json"]
982
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
983
+ stdout, stderr = p.communicate()
984
+ # Make sure that the missing file is mentioned in the log so the user knows
985
+ assert b"missing.txt" in stderr
986
+ assert p.returncode == 1
987
+
988
+ @needs_aws_s3
989
+ def test_optional_secondary_files_exists(self) -> None:
990
+ tmp_path = self._createTempDir()
991
+ out_dir = os.path.join(tmp_path, "cwl-out-dir")
992
+
993
+ cwlfile = "src/toil/test/cwl/optional-file.cwl"
994
+ jobfile = "src/toil/test/cwl/optional-file-exists.json"
995
+
996
+ args = [
997
+ os.path.join(self.rootDir, cwlfile),
998
+ os.path.join(self.rootDir, jobfile),
999
+ f"--outdir={out_dir}"
1000
+ ]
1001
+ from toil.cwl import cwltoil
1002
+
1003
+ ret = cwltoil.main(args)
1004
+ assert ret == 0
1005
+ assert os.path.exists(os.path.join(out_dir, "wdl_templates_old.zip"))
1006
+
1007
+ @needs_aws_s3
1008
+ def test_optional_secondary_files_missing(self) -> None:
1009
+ tmp_path = self._createTempDir()
1010
+ out_dir = os.path.join(tmp_path, "cwl-out-dir")
1011
+
1012
+ cwlfile = "src/toil/test/cwl/optional-file.cwl"
1013
+ jobfile = "src/toil/test/cwl/optional-file-missing.json"
1014
+
1015
+ args = [
1016
+ os.path.join(self.rootDir, cwlfile),
1017
+ os.path.join(self.rootDir, jobfile),
1018
+ f"--outdir={out_dir}"
1019
+ ]
1020
+ from toil.cwl import cwltoil
1021
+
1022
+ ret = cwltoil.main(args)
1023
+ assert ret == 0
1024
+ assert not os.path.exists(os.path.join(out_dir, "hello_old.zip"))
929
1025
 
930
1026
  @needs_cwl
931
1027
  @needs_online
@@ -1194,7 +1290,6 @@ class CWLv12Test(ToilTest):
1194
1290
  must_support_all_features=must_support_all_features,
1195
1291
  junit_file=junit_file,
1196
1292
  )
1197
-
1198
1293
  @slow
1199
1294
  @pytest.mark.timeout(CONFORMANCE_TEST_TIMEOUT)
1200
1295
  def test_run_conformance_with_caching(self) -> None:
@@ -1203,6 +1298,14 @@ class CWLv12Test(ToilTest):
1203
1298
  junit_file=os.path.join(self.rootDir, "caching-conformance-1.2.junit.xml"),
1204
1299
  )
1205
1300
 
1301
+ @slow
1302
+ @pytest.mark.timeout(CONFORMANCE_TEST_TIMEOUT)
1303
+ def test_run_conformance_with_task_caching(self) -> None:
1304
+ self.test_run_conformance(
1305
+ junit_file=os.path.join(self.rootDir, "task-caching-conformance-1.2.junit.xml"),
1306
+ extra_args=["--cachedir", self._createTempDir("task_cache")]
1307
+ )
1308
+
1206
1309
  @slow
1207
1310
  @pytest.mark.timeout(CONFORMANCE_TEST_TIMEOUT)
1208
1311
  def test_run_conformance_with_in_place_update(self) -> None:
@@ -0,0 +1,18 @@
1
+ #!/usr/bin/env cwl-runner
2
+ class: Workflow
3
+ cwlVersion: v1.2
4
+
5
+ inputs:
6
+ inputFile:
7
+ type: File
8
+ secondaryFiles:
9
+ - ^_old.zip?
10
+
11
+ outputs:
12
+ out_file:
13
+ type: File
14
+ outputSource:
15
+ - inputFile
16
+
17
+ steps:
18
+ []