genelastic 0.6.1__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- genelastic/api/extends/example.py +2 -3
- genelastic/api/routes.py +160 -23
- genelastic/api/server.py +30 -22
- genelastic/api/settings.py +3 -2
- genelastic/common/__init__.py +36 -9
- genelastic/common/cli.py +51 -23
- genelastic/common/elastic.py +80 -49
- genelastic/common/exceptions.py +0 -2
- genelastic/common/types.py +20 -15
- genelastic/import_data/__init__.py +23 -5
- genelastic/import_data/analyses.py +17 -20
- genelastic/import_data/analysis.py +69 -65
- genelastic/import_data/bi_process.py +7 -5
- genelastic/import_data/bi_processes.py +8 -8
- genelastic/import_data/cli_gen_data.py +116 -0
- genelastic/import_data/cli_import.py +379 -0
- genelastic/import_data/{info.py → cli_info.py} +104 -75
- genelastic/import_data/cli_integrity.py +384 -0
- genelastic/import_data/cli_validate.py +54 -0
- genelastic/import_data/constants.py +11 -32
- genelastic/import_data/data_file.py +23 -20
- genelastic/import_data/filename_pattern.py +26 -32
- genelastic/import_data/import_bundle.py +56 -47
- genelastic/import_data/import_bundle_factory.py +166 -158
- genelastic/import_data/logger.py +22 -18
- genelastic/import_data/random_bundle.py +402 -0
- genelastic/import_data/tags.py +46 -26
- genelastic/import_data/wet_process.py +8 -4
- genelastic/import_data/wet_processes.py +13 -8
- genelastic/ui/__init__.py +0 -0
- genelastic/ui/server.py +87 -0
- genelastic/ui/settings.py +11 -0
- genelastic-0.7.0.dist-info/METADATA +105 -0
- genelastic-0.7.0.dist-info/RECORD +40 -0
- {genelastic-0.6.1.dist-info → genelastic-0.7.0.dist-info}/WHEEL +1 -1
- genelastic-0.7.0.dist-info/entry_points.txt +6 -0
- genelastic/import_data/gen_data.py +0 -194
- genelastic/import_data/import_data.py +0 -292
- genelastic/import_data/integrity.py +0 -290
- genelastic/import_data/validate_data.py +0 -43
- genelastic-0.6.1.dist-info/METADATA +0 -41
- genelastic-0.6.1.dist-info/RECORD +0 -36
- genelastic-0.6.1.dist-info/entry_points.txt +0 -6
- {genelastic-0.6.1.dist-info → genelastic-0.7.0.dist-info}/top_level.txt +0 -0
|
@@ -1,15 +1,17 @@
|
|
|
1
|
-
# pylint: disable=missing-module-docstring
|
|
2
1
|
import copy
|
|
3
|
-
import typing
|
|
4
2
|
|
|
5
3
|
from genelastic.common import BioInfoProcessData
|
|
6
4
|
|
|
7
5
|
|
|
8
6
|
class BioInfoProcess:
|
|
9
7
|
"""Class representing a bio process."""
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
8
|
+
|
|
9
|
+
def __init__(
|
|
10
|
+
self,
|
|
11
|
+
proc_id: str,
|
|
12
|
+
bundle_file: str | None = None,
|
|
13
|
+
**data: str | list[str],
|
|
14
|
+
) -> None:
|
|
13
15
|
self._proc_id = proc_id
|
|
14
16
|
self._bundle_file = bundle_file
|
|
15
17
|
self._data: BioInfoProcessData = data
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# pylint: disable=missing-module-docstring
|
|
2
1
|
import logging
|
|
3
2
|
import typing
|
|
4
3
|
|
|
@@ -6,14 +5,14 @@ from genelastic.common import BundleDict
|
|
|
6
5
|
|
|
7
6
|
from .bi_process import BioInfoProcess
|
|
8
7
|
|
|
9
|
-
logger = logging.getLogger(
|
|
8
|
+
logger = logging.getLogger("genelastic")
|
|
10
9
|
|
|
11
10
|
|
|
12
11
|
class BioInfoProcesses:
|
|
13
12
|
"""Class BioInfoProcesses is a container of BioInfoProcess objects."""
|
|
14
13
|
|
|
15
14
|
def __init__(self) -> None:
|
|
16
|
-
self._dict:
|
|
15
|
+
self._dict: dict[str, BioInfoProcess] = {}
|
|
17
16
|
|
|
18
17
|
def __len__(self) -> int:
|
|
19
18
|
return len(self._dict)
|
|
@@ -27,20 +26,21 @@ class BioInfoProcesses:
|
|
|
27
26
|
the program exits.
|
|
28
27
|
"""
|
|
29
28
|
if process.id in self._dict:
|
|
30
|
-
|
|
29
|
+
msg = f"A bi process with the id '{process.id}' is already present."
|
|
30
|
+
raise ValueError(msg)
|
|
31
31
|
|
|
32
32
|
# Add one WetProcess object.
|
|
33
33
|
self._dict[process.id] = process
|
|
34
34
|
|
|
35
|
-
def get_process_ids(self) ->
|
|
35
|
+
def get_process_ids(self) -> set[str]:
|
|
36
36
|
"""Get a list of the bio processes IDs."""
|
|
37
37
|
return set(self._dict.keys())
|
|
38
38
|
|
|
39
39
|
@classmethod
|
|
40
|
-
def from_array_of_dicts(
|
|
41
|
-
|
|
40
|
+
def from_array_of_dicts(
|
|
41
|
+
cls, arr: typing.Sequence[BundleDict]
|
|
42
|
+
) -> typing.Self:
|
|
42
43
|
"""Build a BioInfoProcesses instance."""
|
|
43
|
-
|
|
44
44
|
bi_processes = cls()
|
|
45
45
|
|
|
46
46
|
for d in arr:
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
import logging
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
from genelastic.common import add_verbose_control_args
|
|
6
|
+
|
|
7
|
+
from .logger import configure_logging
|
|
8
|
+
from .random_bundle import (
|
|
9
|
+
RandomBundle,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger("genelastic")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def read_args() -> argparse.Namespace:
|
|
16
|
+
"""Read arguments from command line."""
|
|
17
|
+
parser = argparse.ArgumentParser(
|
|
18
|
+
description="Genetics data random generator.",
|
|
19
|
+
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
|
20
|
+
allow_abbrev=False,
|
|
21
|
+
)
|
|
22
|
+
add_verbose_control_args(parser)
|
|
23
|
+
parser.add_argument(
|
|
24
|
+
"-d",
|
|
25
|
+
"--data-folder",
|
|
26
|
+
dest="data_folder",
|
|
27
|
+
required=True,
|
|
28
|
+
help="Data destination folder.",
|
|
29
|
+
type=Path,
|
|
30
|
+
)
|
|
31
|
+
parser.add_argument(
|
|
32
|
+
"--log-file", dest="log_file", help="Path to a log file."
|
|
33
|
+
)
|
|
34
|
+
parser.add_argument(
|
|
35
|
+
"-n",
|
|
36
|
+
"--chrom-nb",
|
|
37
|
+
dest="chrom_nb",
|
|
38
|
+
type=int,
|
|
39
|
+
default=5,
|
|
40
|
+
help="Number of chromosomes to include in the generated VCF file.",
|
|
41
|
+
)
|
|
42
|
+
parser.add_argument(
|
|
43
|
+
"-o",
|
|
44
|
+
"--output-yaml-file",
|
|
45
|
+
dest="output_file",
|
|
46
|
+
default=None,
|
|
47
|
+
help="Output YAML file.",
|
|
48
|
+
type=Path,
|
|
49
|
+
)
|
|
50
|
+
parser.add_argument(
|
|
51
|
+
"-s",
|
|
52
|
+
"--sequence-size",
|
|
53
|
+
type=int,
|
|
54
|
+
default=2000,
|
|
55
|
+
help="Sequence size (number of nucleotides) generated for each chromosome.",
|
|
56
|
+
)
|
|
57
|
+
parser.add_argument(
|
|
58
|
+
"-c",
|
|
59
|
+
"--coverage",
|
|
60
|
+
action="store_true",
|
|
61
|
+
help="Generate a coverage file for each analysis.",
|
|
62
|
+
)
|
|
63
|
+
parser.add_argument(
|
|
64
|
+
"-a",
|
|
65
|
+
"--analyses",
|
|
66
|
+
help="Number of analyses to generate. "
|
|
67
|
+
"Each analysis is composed of a YAML bundle file declaring its wet lab and bioinformatics processes, "
|
|
68
|
+
"a VCF file and optionally a coverage file.",
|
|
69
|
+
default=1,
|
|
70
|
+
type=int,
|
|
71
|
+
)
|
|
72
|
+
parser.add_argument(
|
|
73
|
+
"-p",
|
|
74
|
+
"--processes",
|
|
75
|
+
help="Number of Wet Lab and Bioinformatics processes to generate.",
|
|
76
|
+
default=1,
|
|
77
|
+
type=int,
|
|
78
|
+
)
|
|
79
|
+
return parser.parse_args()
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def main() -> None:
|
|
83
|
+
"""Entry point of the gen-data script."""
|
|
84
|
+
# Read command line arguments
|
|
85
|
+
args = read_args()
|
|
86
|
+
folder = args.data_folder.resolve()
|
|
87
|
+
|
|
88
|
+
if not folder.is_dir():
|
|
89
|
+
msg = f"ERROR: '{folder}' does not exist or is not a directory."
|
|
90
|
+
raise SystemExit(msg)
|
|
91
|
+
|
|
92
|
+
if args.analyses < 1:
|
|
93
|
+
msg = "Analyses count must be at least 1."
|
|
94
|
+
raise SystemExit(msg)
|
|
95
|
+
|
|
96
|
+
if args.processes < 1:
|
|
97
|
+
msg = "Processes count must be at least 1."
|
|
98
|
+
raise SystemExit(msg)
|
|
99
|
+
|
|
100
|
+
# Configure logging
|
|
101
|
+
configure_logging(args.verbose, log_file=args.log_file)
|
|
102
|
+
logger.debug("Arguments: %s", args)
|
|
103
|
+
|
|
104
|
+
# Write to stdout or file
|
|
105
|
+
RandomBundle(
|
|
106
|
+
folder,
|
|
107
|
+
args.analyses,
|
|
108
|
+
args.processes,
|
|
109
|
+
args.chrom_nb,
|
|
110
|
+
args.sequence_size,
|
|
111
|
+
do_gen_coverage=args.coverage,
|
|
112
|
+
).to_yaml(args.output_file)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
if __name__ == "__main__":
|
|
116
|
+
main()
|
|
@@ -0,0 +1,379 @@
|
|
|
1
|
+
# vi: se tw=80
|
|
2
|
+
|
|
3
|
+
# Elasticsearch Python API:
|
|
4
|
+
# https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/overview.html
|
|
5
|
+
# https://elasticsearch-py.readthedocs.io/en/latest/api.html
|
|
6
|
+
|
|
7
|
+
import argparse
|
|
8
|
+
import csv
|
|
9
|
+
import datetime
|
|
10
|
+
import hashlib
|
|
11
|
+
import logging
|
|
12
|
+
import sys
|
|
13
|
+
import time
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
|
|
16
|
+
import vcf
|
|
17
|
+
|
|
18
|
+
from genelastic.common import (
|
|
19
|
+
AnalysisDocument,
|
|
20
|
+
BulkItems,
|
|
21
|
+
ElasticImportConn,
|
|
22
|
+
MetadataDocument,
|
|
23
|
+
ProcessDocument,
|
|
24
|
+
add_es_connection_args,
|
|
25
|
+
add_verbose_control_args,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
from .bi_processes import BioInfoProcesses
|
|
29
|
+
from .data_file import DataFile
|
|
30
|
+
from .import_bundle_factory import make_import_bundle_from_files
|
|
31
|
+
from .logger import configure_logging
|
|
32
|
+
from .wet_processes import WetProcesses
|
|
33
|
+
|
|
34
|
+
logger = logging.getLogger("genelastic")
|
|
35
|
+
logging.getLogger("elastic_transport").setLevel(
|
|
36
|
+
logging.WARNING
|
|
37
|
+
) # Disable excessive logging
|
|
38
|
+
logging.getLogger("urllib3").setLevel(
|
|
39
|
+
logging.WARNING
|
|
40
|
+
) # Disable excessive logging
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def read_args() -> argparse.Namespace:
|
|
44
|
+
"""Read arguments from command line."""
|
|
45
|
+
parser = argparse.ArgumentParser(
|
|
46
|
+
description="Genetics data importer.",
|
|
47
|
+
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
|
48
|
+
allow_abbrev=False,
|
|
49
|
+
)
|
|
50
|
+
add_verbose_control_args(parser)
|
|
51
|
+
add_es_connection_args(parser)
|
|
52
|
+
parser.add_argument(
|
|
53
|
+
"-D",
|
|
54
|
+
"--dry-run",
|
|
55
|
+
dest="dryrun",
|
|
56
|
+
action="count",
|
|
57
|
+
default=0,
|
|
58
|
+
help=(
|
|
59
|
+
"Dry-run level. -D for data files loading (VCF, coverage, etc) "
|
|
60
|
+
"without connecting or importing to database. "
|
|
61
|
+
"-DD for metadata YAML files loading only (no loading of data files)."
|
|
62
|
+
),
|
|
63
|
+
)
|
|
64
|
+
parser.add_argument(
|
|
65
|
+
"--log-file", dest="log_file", help="Path to a log file."
|
|
66
|
+
)
|
|
67
|
+
parser.add_argument(
|
|
68
|
+
"--no-list",
|
|
69
|
+
dest="no_list",
|
|
70
|
+
action="store_true",
|
|
71
|
+
help="Do not print list of files to be imported.",
|
|
72
|
+
)
|
|
73
|
+
parser.add_argument(
|
|
74
|
+
"--no-confirm",
|
|
75
|
+
dest="no_confirm",
|
|
76
|
+
action="store_true",
|
|
77
|
+
help="Do not ask confirmation before importing.",
|
|
78
|
+
)
|
|
79
|
+
parser.add_argument(
|
|
80
|
+
"files",
|
|
81
|
+
type=Path,
|
|
82
|
+
nargs="+",
|
|
83
|
+
default=None,
|
|
84
|
+
help="Data files that describe what to import.",
|
|
85
|
+
)
|
|
86
|
+
return parser.parse_args()
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def import_cov_file(
|
|
90
|
+
es_import_conn: ElasticImportConn | None,
|
|
91
|
+
file_index: str,
|
|
92
|
+
file: Path,
|
|
93
|
+
dryrun: int = 0,
|
|
94
|
+
) -> None:
|
|
95
|
+
"""Import a coverage file to the Elasticsearch database."""
|
|
96
|
+
# Set field types
|
|
97
|
+
if dryrun == 0 and es_import_conn:
|
|
98
|
+
es_import_conn.client.indices.put_mapping(
|
|
99
|
+
index=file_index,
|
|
100
|
+
body={
|
|
101
|
+
"properties": {
|
|
102
|
+
"pos": {"type": "integer"},
|
|
103
|
+
"depth": {"type": "byte"},
|
|
104
|
+
}
|
|
105
|
+
},
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
# Open file
|
|
109
|
+
if dryrun > 1:
|
|
110
|
+
logger.info(
|
|
111
|
+
"Would load and import Coverage file %s " "into index %s.",
|
|
112
|
+
file,
|
|
113
|
+
file_index,
|
|
114
|
+
)
|
|
115
|
+
else:
|
|
116
|
+
logger.info("Load Coverage file %s.", file)
|
|
117
|
+
if dryrun == 1:
|
|
118
|
+
logger.info(
|
|
119
|
+
"Would import Coverage file %s into index %s.", file, file_index
|
|
120
|
+
)
|
|
121
|
+
else:
|
|
122
|
+
logger.info(
|
|
123
|
+
"Import Coverage file %s into index %s.", file, file_index
|
|
124
|
+
)
|
|
125
|
+
with file.open(newline="", encoding="utf-8") as f:
|
|
126
|
+
# Read file as CSV
|
|
127
|
+
reader = csv.reader(f, delimiter="\t", quotechar='"')
|
|
128
|
+
|
|
129
|
+
# Loop on al lines
|
|
130
|
+
for row in reader:
|
|
131
|
+
# Build document
|
|
132
|
+
# Position starts at 0 inside coverage file
|
|
133
|
+
doc: MetadataDocument = {
|
|
134
|
+
"type": "coverage",
|
|
135
|
+
"chr": row[0],
|
|
136
|
+
"pos": int(row[1]) + 1,
|
|
137
|
+
"depth": int(row[2]),
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
# Insert document
|
|
141
|
+
if dryrun == 0 and es_import_conn:
|
|
142
|
+
es_import_conn.client.index(index=file_index, document=doc)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def import_analysis_metadata( # noqa: PLR0913
|
|
146
|
+
es_import_conn: ElasticImportConn | None,
|
|
147
|
+
index_prefix: str,
|
|
148
|
+
file_index: str,
|
|
149
|
+
file: DataFile,
|
|
150
|
+
analysis_type: str,
|
|
151
|
+
dryrun: int = 0,
|
|
152
|
+
) -> None:
|
|
153
|
+
"""Import analysis metadata into a dedicated index."""
|
|
154
|
+
doc: AnalysisDocument = {
|
|
155
|
+
"path": str(file.path.resolve()),
|
|
156
|
+
"bundle_path": str(file.bundle_path.resolve())
|
|
157
|
+
if file.bundle_path
|
|
158
|
+
else None,
|
|
159
|
+
"metadata": file.metadata,
|
|
160
|
+
"file_index": file_index,
|
|
161
|
+
"type": analysis_type,
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
bulk_items: BulkItems = [
|
|
165
|
+
{"_index": f"{index_prefix}-analyses", "_source": doc}
|
|
166
|
+
]
|
|
167
|
+
|
|
168
|
+
if dryrun == 0 and es_import_conn:
|
|
169
|
+
es_import_conn.import_items(
|
|
170
|
+
bulk_items,
|
|
171
|
+
start_time=time.perf_counter(),
|
|
172
|
+
total_items=len(bulk_items),
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def import_vcf_file(
|
|
177
|
+
es_import_conn: ElasticImportConn | None,
|
|
178
|
+
file_index: str,
|
|
179
|
+
file: DataFile,
|
|
180
|
+
dryrun: int = 0,
|
|
181
|
+
) -> None:
|
|
182
|
+
"""Import a VCF file to the Elasticsearch database."""
|
|
183
|
+
logger.info('Import VCF file "%s".', file)
|
|
184
|
+
|
|
185
|
+
if dryrun > 1:
|
|
186
|
+
logger.info(
|
|
187
|
+
"Would load and import VCF file %s " "into index %s.",
|
|
188
|
+
file.path,
|
|
189
|
+
file_index,
|
|
190
|
+
)
|
|
191
|
+
else:
|
|
192
|
+
logger.info("Load VCF file %s.", file.path)
|
|
193
|
+
if dryrun == 1:
|
|
194
|
+
logger.info(
|
|
195
|
+
"Would import VCF file %s into index %s.", file.path, file_index
|
|
196
|
+
)
|
|
197
|
+
else:
|
|
198
|
+
logger.info(
|
|
199
|
+
"Importing VCF file %s into index %s...", file.path, file_index
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
try:
|
|
203
|
+
vcf_reader = vcf.Reader(filename=str(file.path))
|
|
204
|
+
n = 0
|
|
205
|
+
start = time.perf_counter()
|
|
206
|
+
bulk_sz = 256 # Bulk size
|
|
207
|
+
bulk_items: BulkItems = []
|
|
208
|
+
for record in vcf_reader:
|
|
209
|
+
# Correct values
|
|
210
|
+
if not record.CHROM.startswith("chr"):
|
|
211
|
+
if record.CHROM.lower().startswith("chr"):
|
|
212
|
+
record.CHROM = "chr" + record.CHROM[3:]
|
|
213
|
+
else:
|
|
214
|
+
record.CHROM = "chr" + record.CHROM
|
|
215
|
+
|
|
216
|
+
# Build document
|
|
217
|
+
alt = [x if x is None else x.type for x in record.ALT]
|
|
218
|
+
doc: MetadataDocument = {
|
|
219
|
+
"type": "vcf",
|
|
220
|
+
"chr": record.CHROM,
|
|
221
|
+
"pos": record.POS,
|
|
222
|
+
"alt": alt,
|
|
223
|
+
"info": record.INFO,
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
if dryrun == 0:
|
|
227
|
+
# Append item to bulk
|
|
228
|
+
bulk_items.append({"_index": file_index, "_source": doc})
|
|
229
|
+
n += 1
|
|
230
|
+
|
|
231
|
+
# Insert bulk of items
|
|
232
|
+
if len(bulk_items) >= bulk_sz and es_import_conn:
|
|
233
|
+
es_import_conn.import_items(
|
|
234
|
+
bulk_items, start_time=start, total_items=n
|
|
235
|
+
)
|
|
236
|
+
bulk_items = []
|
|
237
|
+
|
|
238
|
+
# Insert remaining items
|
|
239
|
+
if dryrun == 0 and es_import_conn:
|
|
240
|
+
es_import_conn.import_items(
|
|
241
|
+
bulk_items, start_time=start, total_items=n
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
except StopIteration:
|
|
245
|
+
logger.error("Skipping empty file : %s.", file.path)
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def import_processes(
|
|
249
|
+
es_import_conn: ElasticImportConn | None,
|
|
250
|
+
index: str,
|
|
251
|
+
processes: WetProcesses | BioInfoProcesses,
|
|
252
|
+
dryrun: int = 0,
|
|
253
|
+
) -> None:
|
|
254
|
+
"""Import processes into their own index."""
|
|
255
|
+
bulk_items: BulkItems = []
|
|
256
|
+
|
|
257
|
+
for proc_id in processes.get_process_ids():
|
|
258
|
+
process = processes[proc_id]
|
|
259
|
+
process_type = process.__class__.__name__
|
|
260
|
+
doc: ProcessDocument = process.data | {
|
|
261
|
+
"proc_id": proc_id,
|
|
262
|
+
"type": process_type,
|
|
263
|
+
}
|
|
264
|
+
bulk_items.append({"_index": index, "_source": doc})
|
|
265
|
+
|
|
266
|
+
if dryrun == 0 and es_import_conn:
|
|
267
|
+
es_import_conn.import_items(
|
|
268
|
+
bulk_items,
|
|
269
|
+
start_time=time.perf_counter(),
|
|
270
|
+
total_items=len(bulk_items),
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
def generate_unique_index(index_prefix: str, filepath: Path) -> str:
|
|
275
|
+
"""Generate a unique index with the following format:
|
|
276
|
+
<index_prefix>_<current_date>_<md5_hashed_filepath>
|
|
277
|
+
"""
|
|
278
|
+
current_date = datetime.datetime.now(tz=datetime.UTC).strftime("%Y%m%d")
|
|
279
|
+
hashed_filepath = hashlib.md5(
|
|
280
|
+
str(filepath).encode("utf-8"), usedforsecurity=False
|
|
281
|
+
).hexdigest()
|
|
282
|
+
return f"{index_prefix}-file-{current_date}-{hashed_filepath}"
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
def main() -> None: # noqa: C901
|
|
286
|
+
"""Entry point of the import script."""
|
|
287
|
+
# Read command line arguments
|
|
288
|
+
args = read_args()
|
|
289
|
+
|
|
290
|
+
# Configure logging
|
|
291
|
+
configure_logging(args.verbose, log_file=args.log_file)
|
|
292
|
+
logger.debug("Arguments: %s", args)
|
|
293
|
+
logger.debug("LOGGERS: %s", logging.root.manager.loggerDict)
|
|
294
|
+
|
|
295
|
+
# Open connection to ES
|
|
296
|
+
if args.dryrun == 0:
|
|
297
|
+
addr = f"https://{args.es_host}:{args.es_port}"
|
|
298
|
+
logger.info("Trying to connect to Elasticsearch at %s...", addr)
|
|
299
|
+
es_import_conn = ElasticImportConn(
|
|
300
|
+
addr, args.es_cert_fp, basic_auth=(args.es_usr, args.es_pwd)
|
|
301
|
+
)
|
|
302
|
+
else:
|
|
303
|
+
es_import_conn = None
|
|
304
|
+
|
|
305
|
+
# Load YAML import bundle
|
|
306
|
+
import_bundle = make_import_bundle_from_files(args.files, check=True)
|
|
307
|
+
all_bundled_files = import_bundle.get_files()
|
|
308
|
+
|
|
309
|
+
# CHECK
|
|
310
|
+
for f in all_bundled_files:
|
|
311
|
+
if not f.exists():
|
|
312
|
+
msg = f"Path {f.path} does not point to a valid file."
|
|
313
|
+
raise RuntimeError(msg)
|
|
314
|
+
|
|
315
|
+
# LIST
|
|
316
|
+
if not args.no_list:
|
|
317
|
+
for f in all_bundled_files:
|
|
318
|
+
logger.info("Will import %s.", f.path)
|
|
319
|
+
|
|
320
|
+
# Ask confirmation for importing
|
|
321
|
+
if not args.no_confirm:
|
|
322
|
+
answer: str = "maybe"
|
|
323
|
+
while answer not in ["", "n", "y"]:
|
|
324
|
+
answer = input("Import (y/N)? ").lower()
|
|
325
|
+
if answer != "y":
|
|
326
|
+
logger.info("Import canceled.")
|
|
327
|
+
sys.exit(0)
|
|
328
|
+
|
|
329
|
+
# IMPORT
|
|
330
|
+
# Loop on file categories
|
|
331
|
+
for cat in import_bundle.analyses.get_all_categories():
|
|
332
|
+
# Import all files in this category.
|
|
333
|
+
for f in import_bundle.get_files(cat):
|
|
334
|
+
logger.info("Import %s files from %s.", cat, f.path)
|
|
335
|
+
# First, generate a unique index name for each file.
|
|
336
|
+
file_index = generate_unique_index(args.es_index_prefix, f.path)
|
|
337
|
+
# Then, import the analysis metadata into a dedicated index.
|
|
338
|
+
import_analysis_metadata(
|
|
339
|
+
es_import_conn,
|
|
340
|
+
args.es_index_prefix,
|
|
341
|
+
file_index,
|
|
342
|
+
f,
|
|
343
|
+
cat,
|
|
344
|
+
args.dryrun,
|
|
345
|
+
)
|
|
346
|
+
# Finally, import the file in its own index.
|
|
347
|
+
globals()[f"import_{cat}_file"](
|
|
348
|
+
es_import_conn=es_import_conn,
|
|
349
|
+
file_index=file_index,
|
|
350
|
+
file=f,
|
|
351
|
+
dryrun=args.dryrun,
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
# Import processes
|
|
355
|
+
logger.info("Importing wet processes.")
|
|
356
|
+
logger.info(
|
|
357
|
+
"Wet processes IDs = %s",
|
|
358
|
+
str(import_bundle.wet_processes.get_process_ids()),
|
|
359
|
+
)
|
|
360
|
+
import_processes(
|
|
361
|
+
es_import_conn,
|
|
362
|
+
f"{args.es_index_prefix}-wet_processes",
|
|
363
|
+
import_bundle.wet_processes,
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
logger.info("Importing bio info processes.")
|
|
367
|
+
logger.info(
|
|
368
|
+
"Bio info processes IDs = %s",
|
|
369
|
+
str(import_bundle.bi_processes.get_process_ids()),
|
|
370
|
+
)
|
|
371
|
+
import_processes(
|
|
372
|
+
es_import_conn,
|
|
373
|
+
f"{args.es_index_prefix}-bi_processes",
|
|
374
|
+
import_bundle.bi_processes,
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
if __name__ == "__main__":
|
|
379
|
+
main()
|