virtool-workflow 0.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. virtool_workflow/__init__.py +13 -0
  2. virtool_workflow/analysis/__init__.py +1 -0
  3. virtool_workflow/analysis/fastqc.py +467 -0
  4. virtool_workflow/analysis/skewer.py +265 -0
  5. virtool_workflow/analysis/trimming.py +56 -0
  6. virtool_workflow/analysis/utils.py +27 -0
  7. virtool_workflow/api/__init__.py +0 -0
  8. virtool_workflow/api/acquire.py +66 -0
  9. virtool_workflow/api/client.py +132 -0
  10. virtool_workflow/api/utils.py +109 -0
  11. virtool_workflow/cli.py +66 -0
  12. virtool_workflow/data/__init__.py +22 -0
  13. virtool_workflow/data/analyses.py +106 -0
  14. virtool_workflow/data/hmms.py +109 -0
  15. virtool_workflow/data/indexes.py +319 -0
  16. virtool_workflow/data/jobs.py +62 -0
  17. virtool_workflow/data/ml.py +82 -0
  18. virtool_workflow/data/samples.py +190 -0
  19. virtool_workflow/data/subtractions.py +244 -0
  20. virtool_workflow/data/uploads.py +35 -0
  21. virtool_workflow/decorators.py +47 -0
  22. virtool_workflow/errors.py +62 -0
  23. virtool_workflow/files.py +40 -0
  24. virtool_workflow/hooks.py +140 -0
  25. virtool_workflow/pytest_plugin/__init__.py +35 -0
  26. virtool_workflow/pytest_plugin/data.py +197 -0
  27. virtool_workflow/pytest_plugin/utils.py +9 -0
  28. virtool_workflow/runtime/__init__.py +0 -0
  29. virtool_workflow/runtime/config.py +21 -0
  30. virtool_workflow/runtime/discover.py +95 -0
  31. virtool_workflow/runtime/events.py +7 -0
  32. virtool_workflow/runtime/hook.py +129 -0
  33. virtool_workflow/runtime/path.py +19 -0
  34. virtool_workflow/runtime/ping.py +54 -0
  35. virtool_workflow/runtime/redis.py +65 -0
  36. virtool_workflow/runtime/run.py +276 -0
  37. virtool_workflow/runtime/run_subprocess.py +168 -0
  38. virtool_workflow/runtime/sentry.py +28 -0
  39. virtool_workflow/utils.py +90 -0
  40. virtool_workflow/workflow.py +90 -0
  41. virtool_workflow-0.0.0.dist-info/LICENSE +21 -0
  42. virtool_workflow-0.0.0.dist-info/METADATA +71 -0
  43. virtool_workflow-0.0.0.dist-info/RECORD +45 -0
  44. virtool_workflow-0.0.0.dist-info/WHEEL +4 -0
  45. virtool_workflow-0.0.0.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,54 @@
1
+ """Ping the API to keep the job alive."""
2
+ import asyncio
3
+ from contextlib import asynccontextmanager
4
+
5
+ from aiohttp import ClientOSError, ServerDisconnectedError
6
+ from structlog import get_logger
7
+
8
+ from virtool_workflow.api.client import APIClient
9
+
10
+ logger = get_logger("api")
11
+
12
+
13
+ async def _ping_periodically(api: APIClient, job_id: str):
14
+ retries = 0
15
+
16
+ try:
17
+ while True:
18
+ if retries > 5:
19
+ logger.warning("failed to ping server")
20
+ break
21
+
22
+ await asyncio.sleep(0.1)
23
+
24
+ try:
25
+ await api.put_json(f"/jobs/{job_id}/ping", {})
26
+ except (ClientOSError, ServerDisconnectedError):
27
+ await asyncio.sleep(0.3)
28
+ retries += 1
29
+ continue
30
+
31
+ await asyncio.sleep(5)
32
+ except asyncio.CancelledError:
33
+ logger.info("stopped pinging server")
34
+
35
+
36
+ @asynccontextmanager
37
+ async def ping_periodically(api: APIClient, job_id: str):
38
+ """Ping the API to keep the job alive.
39
+
40
+ While the context manager is open, a task runs that pings the API every 5 seconds.
41
+ When the context manager is closed, the task is cleanly cancelled.
42
+
43
+ The ping request is retried up to 5 times before the task is cancelled.
44
+
45
+ :param api: The API client.
46
+ :param job_id: The ID of the job to ping.
47
+
48
+ """
49
+ task = asyncio.create_task(_ping_periodically(api, job_id))
50
+
51
+ yield
52
+
53
+ task.cancel()
54
+ await task
@@ -0,0 +1,65 @@
1
+ import asyncio
2
+ from asyncio import CancelledError
3
+ from collections.abc import Callable
4
+
5
+ from structlog import get_logger
6
+ from virtool.redis import Redis
7
+
8
+ logger = get_logger("redis")
9
+
10
+ CANCELLATION_CHANNEL = "channel:cancel"
11
+
12
+
13
+ async def get_next_job_with_timeout(
14
+ list_name: str,
15
+ redis: Redis,
16
+ timeout: int | None = None,
17
+ ) -> str:
18
+ """Get the next job ID from a Redis list.
19
+
20
+ Raise a :class:``Timeout`` error if an ID is not found in ``timeout``
21
+ seconds.
22
+
23
+ :param list_name: the name of the list to pop from
24
+ :param redis: the Redis client
25
+ :param timeout: seconds to wait before raising :class:``Timeout``
26
+ :return: the next job ID
27
+ """
28
+ logger.info(
29
+ "Waiting for a job",
30
+ timeout=f"{timeout if timeout else 'infinity'} seconds",
31
+ )
32
+
33
+ return await asyncio.wait_for(get_next_job(list_name, redis), timeout)
34
+
35
+
36
+ async def get_next_job(list_name: str, redis: Redis) -> str:
37
+ """Get the next job ID from a Redis list.
38
+
39
+ :param list_name: the name of the list to pop from
40
+ :param redis: the Redis client
41
+ :return: the next job ID
42
+
43
+ """
44
+ if (job_id := await redis.blpop(list_name)) is not None:
45
+ logger.info("pulled job id from redis", id=job_id)
46
+ return job_id
47
+
48
+ raise ValueError("Unexpected None from job id list")
49
+
50
+
51
+ async def wait_for_cancellation(redis: Redis, job_id: str, func: Callable):
52
+ """Call a function ``func`` when a job matching ``job_id`` is cancelled.
53
+
54
+ :param redis: the Redis client
55
+ :param job_id: the job ID to watch for
56
+ :param func: the function to call when the job is cancelled
57
+
58
+ """
59
+ try:
60
+ async for cancelled_job_id in redis.subscribe(CANCELLATION_CHANNEL):
61
+ if cancelled_job_id == job_id:
62
+ return func()
63
+
64
+ except CancelledError:
65
+ ...
@@ -0,0 +1,276 @@
1
+ import asyncio
2
+ import signal
3
+ import sys
4
+ from asyncio import CancelledError
5
+ from collections.abc import Callable
6
+ from pathlib import Path
7
+
8
+ from pyfixtures import FixtureScope, runs_in_new_fixture_context
9
+ from structlog import get_logger
10
+ from virtool.jobs.models import JobState
11
+ from virtool.redis import Redis
12
+
13
+ from virtool_workflow.api.acquire import acquire_job_by_id
14
+ from virtool_workflow.api.client import api_client
15
+ from virtool_workflow.hooks import (
16
+ cleanup_builtin_status_hooks,
17
+ on_cancelled,
18
+ on_error,
19
+ on_failure,
20
+ on_finish,
21
+ on_result,
22
+ on_step_finish,
23
+ on_step_start,
24
+ on_success,
25
+ on_terminated,
26
+ on_workflow_start,
27
+ )
28
+ from virtool_workflow.runtime.config import RunConfig
29
+ from virtool_workflow.runtime.discover import (
30
+ load_builtin_fixtures,
31
+ load_custom_fixtures,
32
+ load_workflow_from_file,
33
+ )
34
+ from virtool_workflow.runtime.events import Events
35
+ from virtool_workflow.runtime.path import create_work_path
36
+ from virtool_workflow.runtime.ping import ping_periodically
37
+ from virtool_workflow.runtime.redis import (
38
+ get_next_job_with_timeout,
39
+ wait_for_cancellation,
40
+ )
41
+ from virtool_workflow.runtime.sentry import configure_sentry
42
+ from virtool_workflow.utils import configure_logs, get_virtool_workflow_version
43
+ from virtool_workflow.workflow import Workflow
44
+
45
+ logger = get_logger("runtime")
46
+
47
+
48
+ def configure_status_hooks():
49
+ """Configure built-in job status hooks.
50
+
51
+ Push status updates to API when various lifecycle hooks are triggered.
52
+
53
+ """
54
+
55
+ @on_step_start
56
+ async def handle_step_start(push_status):
57
+ await push_status()
58
+
59
+ @on_error(once=True)
60
+ async def handle_error(push_status):
61
+ await push_status()
62
+
63
+ @on_cancelled(once=True)
64
+ async def handle_cancelled(push_status):
65
+ await push_status()
66
+
67
+ @on_terminated(once=True)
68
+ async def handle_terminated(push_status):
69
+ await push_status()
70
+
71
+ @on_success(once=True)
72
+ async def handle_success(push_status):
73
+ await push_status()
74
+
75
+
76
+ async def execute(workflow: Workflow, scope: FixtureScope, events: Events):
77
+ """Execute a workflow.
78
+
79
+ :param workflow: The workflow to execute
80
+ :param scope: The :class:`FixtureScope` to use for fixture injection
81
+
82
+ """
83
+ await on_workflow_start.trigger(scope)
84
+
85
+ scope["_state"] = JobState.RUNNING
86
+
87
+ try:
88
+ for step in workflow.steps:
89
+ scope["_step"] = step
90
+
91
+ bound_step = await scope.bind(step.function)
92
+
93
+ await on_step_start.trigger(scope)
94
+ logger.info("running workflow step", name=step.display_name)
95
+ await bound_step()
96
+ await on_step_finish.trigger(scope)
97
+
98
+ except CancelledError:
99
+ logger.info("cancellation or termination interrupted workflow execution")
100
+
101
+ if events.cancelled.is_set():
102
+ logger.info("workflow cancelled")
103
+
104
+ scope["_state"] = JobState.CANCELLED
105
+
106
+ await asyncio.gather(
107
+ on_cancelled.trigger(scope),
108
+ on_failure.trigger(scope),
109
+ )
110
+ else:
111
+ logger.info("workflow terminated")
112
+
113
+ scope["_state"] = JobState.TERMINATED
114
+
115
+ if not events.terminated.is_set():
116
+ logger.warning(
117
+ "workflow terminated without sigterm. this should not happen.",
118
+ )
119
+
120
+ await asyncio.gather(
121
+ on_terminated.trigger(scope),
122
+ on_failure.trigger(scope),
123
+ )
124
+
125
+ except Exception as error:
126
+ scope["_error"] = error
127
+ scope["_state"] = JobState.ERROR
128
+
129
+ logger.exception(error)
130
+
131
+ await asyncio.gather(on_error.trigger(scope), on_failure.trigger(scope))
132
+
133
+ if isinstance(error, asyncio.CancelledError):
134
+ raise
135
+
136
+ else:
137
+ scope["_state"] = JobState.COMPLETE
138
+ scope["_step"] = None
139
+
140
+ if "results" in scope:
141
+ await on_result.trigger(scope)
142
+
143
+ await on_success.trigger(scope)
144
+
145
+ finally:
146
+ await on_finish.trigger(scope)
147
+
148
+
149
+ async def run_workflow(
150
+ config: RunConfig,
151
+ job_id: str,
152
+ workflow: Workflow,
153
+ events: Events,
154
+ ):
155
+ # Configure hooks here so that they can be tested when using `run_workflow`.
156
+ configure_status_hooks()
157
+
158
+ load_builtin_fixtures()
159
+
160
+ job = await acquire_job_by_id(config.jobs_api_connection_string, job_id)
161
+
162
+ async with (
163
+ api_client(
164
+ config.jobs_api_connection_string,
165
+ job.id,
166
+ job.key,
167
+ ) as api,
168
+ FixtureScope() as scope,
169
+ ):
170
+ # These fixtures should not be used directly by the workflow. They are used
171
+ # by other built-in fixtures.
172
+ scope["_api"] = api
173
+ scope["_config"] = config
174
+ scope["_error"] = None
175
+ scope["_job"] = job
176
+ scope["_state"] = JobState.WAITING
177
+ scope["_step"] = None
178
+ scope["_workflow"] = workflow
179
+
180
+ scope["logger"] = get_logger("workflow")
181
+ scope["mem"] = config.mem
182
+ scope["proc"] = config.proc
183
+ scope["results"] = {}
184
+
185
+ async with create_work_path(config) as work_path:
186
+ scope["work_path"] = work_path
187
+
188
+ async with ping_periodically(api, job_id):
189
+ await execute(workflow, scope, events)
190
+ cleanup_builtin_status_hooks()
191
+
192
+
193
+ @runs_in_new_fixture_context()
194
+ async def start_runtime(
195
+ dev: bool,
196
+ jobs_api_connection_string: str,
197
+ mem: int,
198
+ proc: int,
199
+ redis_connection_string: str,
200
+ redis_list_name: str,
201
+ sentry_dsn: str,
202
+ timeout: int,
203
+ work_path: Path,
204
+ workflow_loader: Callable[[], Workflow] = load_workflow_from_file,
205
+ ):
206
+ """Start the workflow runtime.
207
+
208
+ The runtime loads the workflow and fixtures. It then waits for a job ID to be pushed
209
+ to the configured Redis list.
210
+
211
+ When a job ID is received, the runtime acquires the job from the jobs API and
212
+ """
213
+ configure_logs(bool(sentry_dsn))
214
+
215
+ logger.info(
216
+ "found virtool-workflow",
217
+ version=get_virtool_workflow_version(),
218
+ )
219
+
220
+ workflow = workflow_loader()
221
+
222
+ load_builtin_fixtures()
223
+ load_custom_fixtures()
224
+
225
+ configure_sentry(sentry_dsn)
226
+
227
+ async with Redis(redis_connection_string) as redis:
228
+ try:
229
+ job_id = await get_next_job_with_timeout(redis_list_name, redis, timeout)
230
+ except TimeoutError:
231
+ # This happens due to Kubernetes scheduling issues or job cancellations. It
232
+ # is not an error.
233
+ logger.warning("timed out while waiting for job id")
234
+ return
235
+
236
+ events = Events()
237
+
238
+ run_workflow_task = asyncio.create_task(
239
+ run_workflow(
240
+ RunConfig(
241
+ dev,
242
+ jobs_api_connection_string,
243
+ mem,
244
+ proc,
245
+ work_path,
246
+ ),
247
+ job_id,
248
+ workflow,
249
+ events,
250
+ ),
251
+ )
252
+
253
+ def terminate_workflow(*_):
254
+ logger.info("received sigterm. terminating workflow.")
255
+ events.terminated.set()
256
+ run_workflow_task.cancel()
257
+
258
+ signal.signal(signal.SIGTERM, terminate_workflow)
259
+
260
+ def cancel_workflow(*_):
261
+ logger.info("received cancellation signal from redis")
262
+ events.cancelled.set()
263
+ run_workflow_task.cancel()
264
+
265
+ async with Redis(redis_connection_string) as redis:
266
+ cancellation_task = asyncio.create_task(
267
+ wait_for_cancellation(redis, job_id, cancel_workflow),
268
+ )
269
+
270
+ await run_workflow_task
271
+
272
+ cancellation_task.cancel()
273
+ await cancellation_task
274
+
275
+ if events.terminated.is_set():
276
+ sys.exit(124)
@@ -0,0 +1,168 @@
1
+ """Code for running and managing subprocesses."""
2
+
3
+ import asyncio
4
+ from asyncio.subprocess import Process
5
+ from collections.abc import Callable, Coroutine
6
+ from pathlib import Path
7
+ from typing import Protocol
8
+
9
+ from pyfixtures import fixture
10
+ from structlog import get_logger
11
+ from virtool.utils import timestamp
12
+
13
+ from virtool_workflow.errors import SubprocessFailedError
14
+
15
+ logger = get_logger("subprocess")
16
+
17
+
18
+ class LineOutputHandler(Protocol):
19
+ async def __call__(self, line: bytes):
20
+ """Handle input from stdin, or stderr, line by line.
21
+
22
+ :param line: A line of output from the stream.
23
+ """
24
+ raise NotImplementedError
25
+
26
+
27
+ class RunSubprocess(Protocol):
28
+ async def __call__(
29
+ self,
30
+ command: list[str],
31
+ cwd: str | Path | None = None,
32
+ env: dict | None = None,
33
+ stderr_handler: LineOutputHandler | None = None,
34
+ stdout_handler: LineOutputHandler | None = None,
35
+ ) -> Process:
36
+ """Run a shell command in a subprocess.
37
+
38
+ :param command: A shell command
39
+ :param stdout_handler: A function to handle stdout output line by line
40
+ :param stderr_handler: A function to handle stderr output line by line
41
+ :param env: environment variables which should be available to the subprocess
42
+ :param cwd: The current working directory
43
+ :raise SubprocessFailed: The subprocess has exited with a non-zero exit code
44
+ :return: An :class:`.Process` instance
45
+ """
46
+ raise NotImplementedError
47
+
48
+
49
+ async def watch_pipe(
50
+ stream: asyncio.StreamReader,
51
+ handler: LineOutputHandler,
52
+ ):
53
+ """Watch the stdout or stderr stream and pass lines to the `handler` callback function.
54
+
55
+ :param stream: a stdout or stderr file object
56
+ :param handler: a handler coroutine for output lines
57
+
58
+ """
59
+ while True:
60
+ line = await stream.readline()
61
+
62
+ if not line:
63
+ return
64
+
65
+ await handler(line)
66
+
67
+
68
+ def stderr_logger(line: bytes):
69
+ """Log a line of stderr output and try to decode it as UTF-8.
70
+
71
+ If the line is not decodable, log it as a string.
72
+
73
+ :param line: a line of stderr output
74
+ """
75
+ line = line.rstrip()
76
+
77
+ try:
78
+ logger.info("stderr", line=line.decode())
79
+ except UnicodeDecodeError:
80
+ logger.info("stderr", line=line)
81
+
82
+
83
+ async def _run_subprocess(
84
+ command: list[str],
85
+ stdout_handler: LineOutputHandler | None = None,
86
+ stderr_handler: Callable[[str], Coroutine] | None = None,
87
+ env: dict | None = None,
88
+ cwd: str | None = None,
89
+ ) -> asyncio.subprocess.Process:
90
+ """An implementation of :class:`RunSubprocess` using `asyncio.subprocess`."""
91
+ log = logger.bind()
92
+ log.info("running subprocess", command=command)
93
+
94
+ stdout = asyncio.subprocess.PIPE if stdout_handler else asyncio.subprocess.DEVNULL
95
+
96
+ if stderr_handler:
97
+
98
+ async def _stderr_handler(line):
99
+ stderr_logger(line)
100
+ await stderr_handler(line)
101
+
102
+ else:
103
+
104
+ async def _stderr_handler(line):
105
+ stderr_logger(line)
106
+
107
+ process = await asyncio.create_subprocess_exec(
108
+ *(str(arg) for arg in command),
109
+ cwd=cwd,
110
+ env=env,
111
+ limit=1024 * 1024 * 128,
112
+ stderr=asyncio.subprocess.PIPE,
113
+ stdout=stdout,
114
+ )
115
+
116
+ log.info("started subprocess", pid=process.pid, timestamp=timestamp().isoformat())
117
+
118
+ aws = [watch_pipe(process.stderr, _stderr_handler)]
119
+
120
+ if stdout_handler:
121
+ aws.append(watch_pipe(process.stdout, stdout_handler))
122
+
123
+ watcher_future = asyncio.gather(*aws)
124
+
125
+ try:
126
+ await watcher_future
127
+ except asyncio.CancelledError:
128
+ logger.info("terminating subprocess")
129
+
130
+ process.terminate()
131
+
132
+ # Have to do this in Python 3.10 to avoid Event loop closed error.
133
+ # https://github.com/python/cpython/issues/88050
134
+ try:
135
+ process._transport.close()
136
+ except AttributeError:
137
+ pass
138
+
139
+ await process.wait()
140
+ logger.info("subprocess exited", code=process.returncode)
141
+
142
+ await watcher_future
143
+
144
+ return process
145
+
146
+ await process.wait()
147
+
148
+ # Exit code 15 indicates that the process was terminated. This is expected
149
+ # when the workflow fails for some other reason, hence not an exception
150
+ if process.returncode not in [0, 15, -15]:
151
+ raise SubprocessFailedError(
152
+ f"{command[0]} failed with exit code {process.returncode}\n"
153
+ f"arguments: {command}\n",
154
+ )
155
+
156
+ log.info(
157
+ "subprocess finished",
158
+ return_code=process.returncode,
159
+ timestamp=timestamp().isoformat(),
160
+ )
161
+
162
+ return process
163
+
164
+
165
+ @fixture(protocol=RunSubprocess)
166
+ def run_subprocess() -> RunSubprocess:
167
+ """Fixture to run subprocesses and handle stdin and stderr output line-by-line."""
168
+ return _run_subprocess
@@ -0,0 +1,28 @@
1
+ import logging
2
+
3
+ import sentry_sdk
4
+ from sentry_sdk.integrations.logging import LoggingIntegration
5
+ from structlog import get_logger
6
+
7
+ from virtool_workflow.utils import get_virtool_workflow_version
8
+
9
+ logger = get_logger("runtime")
10
+
11
+
12
+ def configure_sentry(dsn: str):
13
+ """Initialize Sentry for log aggregation."""
14
+ if dsn:
15
+ logger.info("initializing sentry", dsn=f"{dsn[:15]}...")
16
+
17
+ sentry_sdk.init(
18
+ dsn=dsn,
19
+ integrations=[
20
+ LoggingIntegration(
21
+ event_level=logging.WARNING,
22
+ ),
23
+ ],
24
+ release=get_virtool_workflow_version(),
25
+ traces_sample_rate=0.2,
26
+ )
27
+ else:
28
+ logger.info("sentry disabled because no dsn was provided")
@@ -0,0 +1,90 @@
1
+ import asyncio
2
+ import logging
3
+ import sys
4
+ import tarfile
5
+ from collections.abc import Callable
6
+ from functools import wraps
7
+ from importlib import metadata
8
+ from inspect import iscoroutinefunction
9
+ from pathlib import Path
10
+
11
+ import structlog
12
+ from structlog.processors import LogfmtRenderer
13
+ from structlog_sentry import SentryProcessor
14
+
15
+
16
+ def coerce_to_coroutine_function(func: Callable):
17
+ """Wrap a non-async function in an async function."""
18
+ if iscoroutinefunction(func):
19
+ return func
20
+
21
+ @wraps(func)
22
+ async def _func(*args, **kwargs):
23
+ return func(*args, **kwargs)
24
+
25
+ return _func
26
+
27
+
28
+ def configure_logs(use_sentry: bool):
29
+ logging.basicConfig(
30
+ format="%(message)s",
31
+ stream=sys.stdout,
32
+ level=logging.INFO,
33
+ )
34
+
35
+ def normalize_log_level(logger, method_name, event_dict):
36
+ """Map EXCEPTION level to ERROR since logging module doesn't have EXCEPTION."""
37
+ if event_dict.get("level") == "EXCEPTION":
38
+ event_dict["level"] = "ERROR"
39
+ return event_dict
40
+
41
+ processors = [
42
+ structlog.stdlib.filter_by_level,
43
+ structlog.stdlib.add_logger_name,
44
+ structlog.stdlib.add_log_level,
45
+ normalize_log_level,
46
+ structlog.stdlib.PositionalArgumentsFormatter(),
47
+ structlog.processors.TimeStamper(fmt="%Y-%m-%dT%H:%M:%SZ"),
48
+ structlog.processors.StackInfoRenderer(),
49
+ structlog.processors.UnicodeDecoder(),
50
+ ]
51
+
52
+ if use_sentry:
53
+ processors.append(
54
+ SentryProcessor(event_level=logging.WARNING, level=logging.INFO),
55
+ )
56
+
57
+ processors.append(
58
+ LogfmtRenderer(
59
+ key_order=["timestamp", "level", "logger", "event"],
60
+ ),
61
+ )
62
+
63
+ structlog.configure(
64
+ cache_logger_on_first_use=True,
65
+ logger_factory=structlog.stdlib.LoggerFactory(),
66
+ processors=processors,
67
+ wrapper_class=structlog.stdlib.BoundLogger,
68
+ )
69
+
70
+
71
+ def get_virtool_workflow_version() -> str:
72
+ """Get the version of the installed virtool-workflow package."""
73
+ try:
74
+ return metadata.version("virtool-workflow")
75
+ except metadata.PackageNotFoundError:
76
+ return "0.0.0"
77
+
78
+
79
+ async def make_directory(path: Path):
80
+ await asyncio.to_thread(path.mkdir, exist_ok=True, parents=True)
81
+
82
+
83
+ def untar(path: Path, target_path: Path):
84
+ with tarfile.open(path, "r:gz") as tar:
85
+ tar.extractall(target_path)
86
+
87
+
88
+ def move_all_model_files(source_path: Path, target_path: Path):
89
+ for file in source_path.iterdir():
90
+ file.rename(target_path / file.name)