genelastic 0.8.0__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- genelastic/api/.env +4 -0
- genelastic/api/cli_start_api.py +2 -2
- genelastic/api/errors.py +52 -0
- genelastic/api/extends/example.py +0 -6
- genelastic/api/extends/example.yml +0 -20
- genelastic/api/routes.py +313 -181
- genelastic/api/server.py +8 -3
- genelastic/api/specification.yml +343 -181
- genelastic/common/__init__.py +0 -44
- genelastic/common/cli.py +48 -0
- genelastic/common/elastic.py +374 -46
- genelastic/common/exceptions.py +34 -2
- genelastic/common/server.py +9 -1
- genelastic/common/types.py +1 -14
- genelastic/import_data/__init__.py +0 -27
- genelastic/import_data/checker.py +99 -0
- genelastic/import_data/checker_observer.py +13 -0
- genelastic/import_data/cli/__init__.py +0 -0
- genelastic/import_data/cli/cli_check.py +136 -0
- genelastic/import_data/{cli_gen_data.py → cli/gen_data.py} +4 -4
- genelastic/import_data/cli/import_data.py +346 -0
- genelastic/import_data/cli/info.py +247 -0
- genelastic/import_data/{cli_integrity.py → cli/integrity.py} +29 -7
- genelastic/import_data/cli/validate.py +146 -0
- genelastic/import_data/collect.py +185 -0
- genelastic/import_data/constants.py +136 -11
- genelastic/import_data/import_bundle.py +102 -59
- genelastic/import_data/import_bundle_factory.py +70 -149
- genelastic/import_data/importers/__init__.py +0 -0
- genelastic/import_data/importers/importer_base.py +131 -0
- genelastic/import_data/importers/importer_factory.py +85 -0
- genelastic/import_data/importers/importer_types.py +223 -0
- genelastic/import_data/logger.py +2 -1
- genelastic/import_data/models/__init__.py +0 -0
- genelastic/import_data/models/analyses.py +178 -0
- genelastic/import_data/models/analysis.py +144 -0
- genelastic/import_data/models/data_file.py +110 -0
- genelastic/import_data/models/process.py +45 -0
- genelastic/import_data/models/processes.py +84 -0
- genelastic/import_data/models/tags.py +170 -0
- genelastic/import_data/models/unique_list.py +109 -0
- genelastic/import_data/models/validate.py +26 -0
- genelastic/import_data/patterns.py +90 -0
- genelastic/import_data/random_bundle.py +10 -8
- genelastic/import_data/resolve.py +157 -0
- genelastic/ui/.env +1 -0
- genelastic/ui/cli_start_ui.py +4 -2
- genelastic/ui/routes.py +289 -42
- genelastic/ui/static/cea-cnrgh.ico +0 -0
- genelastic/ui/static/cea.ico +0 -0
- genelastic/ui/static/layout.ico +0 -0
- genelastic/ui/static/novaseq6000.png +0 -0
- genelastic/ui/static/style.css +430 -0
- genelastic/ui/static/ui.js +458 -0
- genelastic/ui/templates/analyses.html +96 -9
- genelastic/ui/templates/analysis_detail.html +44 -0
- genelastic/ui/templates/bi_process_detail.html +129 -0
- genelastic/ui/templates/bi_processes.html +114 -9
- genelastic/ui/templates/explorer.html +356 -0
- genelastic/ui/templates/home.html +205 -2
- genelastic/ui/templates/layout.html +148 -29
- genelastic/ui/templates/version.html +19 -7
- genelastic/ui/templates/wet_process_detail.html +131 -0
- genelastic/ui/templates/wet_processes.html +114 -9
- genelastic-0.9.0.dist-info/METADATA +686 -0
- genelastic-0.9.0.dist-info/RECORD +76 -0
- genelastic-0.9.0.dist-info/WHEEL +4 -0
- genelastic-0.9.0.dist-info/entry_points.txt +10 -0
- genelastic-0.9.0.dist-info/licenses/LICENSE +519 -0
- genelastic/import_data/analyses.py +0 -69
- genelastic/import_data/analysis.py +0 -205
- genelastic/import_data/bi_process.py +0 -27
- genelastic/import_data/bi_processes.py +0 -49
- genelastic/import_data/cli_import.py +0 -379
- genelastic/import_data/cli_info.py +0 -256
- genelastic/import_data/cli_validate.py +0 -54
- genelastic/import_data/data_file.py +0 -87
- genelastic/import_data/filename_pattern.py +0 -57
- genelastic/import_data/tags.py +0 -123
- genelastic/import_data/wet_process.py +0 -28
- genelastic/import_data/wet_processes.py +0 -53
- genelastic-0.8.0.dist-info/METADATA +0 -109
- genelastic-0.8.0.dist-info/RECORD +0 -52
- genelastic-0.8.0.dist-info/WHEEL +0 -5
- genelastic-0.8.0.dist-info/entry_points.txt +0 -8
- genelastic-0.8.0.dist-info/top_level.txt +0 -1
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
from genelastic.common.types import Metadata
|
|
5
|
+
from genelastic.import_data.constants import TOOLS_SUFFIX_RE
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class FilenamePattern:
|
|
9
|
+
"""Utility class to extract metadata from filenames based on a regex
|
|
10
|
+
pattern.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self, pattern: str) -> None:
|
|
14
|
+
"""Initializes a FilenamePattern instance.
|
|
15
|
+
|
|
16
|
+
:param pattern: The regex pattern used to extract metadata from
|
|
17
|
+
filenames.
|
|
18
|
+
"""
|
|
19
|
+
self._re = re.compile(pattern)
|
|
20
|
+
|
|
21
|
+
def extract_metadata(self, filename: str) -> Metadata:
|
|
22
|
+
"""Extracts metadata from the given filename using the defined pattern.
|
|
23
|
+
|
|
24
|
+
:param filename: The filename from which metadata should be extracted.
|
|
25
|
+
:raises RuntimeError: If the filename does not match the pattern.
|
|
26
|
+
:returns: A dictionary containing the extracted metadata.
|
|
27
|
+
"""
|
|
28
|
+
m = self._re.search(filename)
|
|
29
|
+
if not m:
|
|
30
|
+
msg = (
|
|
31
|
+
f"Failed parsing filename '{filename}' with pattern "
|
|
32
|
+
f"'{self._re.pattern}'."
|
|
33
|
+
)
|
|
34
|
+
raise RuntimeError(msg)
|
|
35
|
+
|
|
36
|
+
# Convert necessary values.
|
|
37
|
+
metadata = m.groupdict()
|
|
38
|
+
if "cov_depth" in metadata:
|
|
39
|
+
metadata["cov_depth"] = int(metadata["cov_depth"])
|
|
40
|
+
|
|
41
|
+
return metadata
|
|
42
|
+
|
|
43
|
+
def matches_pattern(self, filename: str) -> bool:
|
|
44
|
+
"""Checks whether the given filename matches the defined pattern.
|
|
45
|
+
|
|
46
|
+
:param filename: The filename to check.
|
|
47
|
+
:returns: True if the filename matches the pattern, False otherwise.
|
|
48
|
+
"""
|
|
49
|
+
return bool(self._re.fullmatch(filename))
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class MetricsPattern:
|
|
53
|
+
"""Utility class to extract tool/version metadata from filenames with a
|
|
54
|
+
``.metrics`` suffix.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
@staticmethod
|
|
58
|
+
def extract_metadata(file: Path) -> list[dict[str, str]] | None:
|
|
59
|
+
"""Extracts metadata from a filename based on the ``.metrics`` suffix.
|
|
60
|
+
|
|
61
|
+
:param file: The path to the file to be analyzed.
|
|
62
|
+
:raises RuntimeError: If the suffix is malformed or cannot be parsed.
|
|
63
|
+
:returns:
|
|
64
|
+
- None if the file does not have a ``.metrics`` prefix,
|
|
65
|
+
- An empty list if the prefix is present but no metadata is found,
|
|
66
|
+
- A list of dictionaries with ``tool`` and ``version`` keys if
|
|
67
|
+
metadata is extracted.
|
|
68
|
+
"""
|
|
69
|
+
if not file.suffixes or not file.suffixes[0].startswith(".metrics"):
|
|
70
|
+
return None
|
|
71
|
+
|
|
72
|
+
tools_str = file.suffixes[0].replace(".metrics", "")
|
|
73
|
+
matches = list(re.finditer(TOOLS_SUFFIX_RE, tools_str))
|
|
74
|
+
matched_str = "".join(m.group(0) for m in matches)
|
|
75
|
+
|
|
76
|
+
if matched_str != tools_str:
|
|
77
|
+
msg = (
|
|
78
|
+
f"Failed extracting metrics from filename '{file}': "
|
|
79
|
+
f"'{tools_str}' does not fully match pattern "
|
|
80
|
+
f"'{TOOLS_SUFFIX_RE}'."
|
|
81
|
+
)
|
|
82
|
+
raise RuntimeError(msg)
|
|
83
|
+
|
|
84
|
+
return [
|
|
85
|
+
{
|
|
86
|
+
"tool": m.group("tool"),
|
|
87
|
+
"version": m.group("version").replace("-", "."),
|
|
88
|
+
}
|
|
89
|
+
for m in matches
|
|
90
|
+
]
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import copy
|
|
2
1
|
import random
|
|
3
2
|
import sys
|
|
4
3
|
import tempfile
|
|
@@ -16,7 +15,7 @@ from biophony import (
|
|
|
16
15
|
MutSimParams,
|
|
17
16
|
)
|
|
18
17
|
|
|
19
|
-
from genelastic.common import (
|
|
18
|
+
from genelastic.common.types import (
|
|
20
19
|
RandomAnalysisData,
|
|
21
20
|
RandomBiProcessData,
|
|
22
21
|
RandomWetProcessData,
|
|
@@ -170,13 +169,16 @@ class RandomBiProcess(RandomBundleItem):
|
|
|
170
169
|
|
|
171
170
|
def _generate_steps(self) -> None:
|
|
172
171
|
steps_count = random.randint(1, 5)
|
|
173
|
-
random_steps =
|
|
174
|
-
for
|
|
175
|
-
|
|
176
|
-
|
|
172
|
+
random_steps = random.sample(self.STEPS, steps_count)
|
|
173
|
+
for rs in random_steps:
|
|
174
|
+
v = self._generate_version(random.choice(range(1, 5)))
|
|
175
|
+
self._steps.append(
|
|
176
|
+
{
|
|
177
|
+
"version": v,
|
|
178
|
+
"name": str(rs["name"]),
|
|
179
|
+
"cmd": random.choice(rs["cmd"]),
|
|
180
|
+
}
|
|
177
181
|
)
|
|
178
|
-
random_step["cmd"] = random.choice(random_step["cmd"])
|
|
179
|
-
self._steps.append(random_step)
|
|
180
182
|
|
|
181
183
|
def to_dict(self) -> RandomBiProcessData:
|
|
182
184
|
"""Return the generated bi informatics process as a dictionary."""
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import re
|
|
3
|
+
|
|
4
|
+
from genelastic.common.exceptions import (
|
|
5
|
+
FilenamePatternResolveError,
|
|
6
|
+
InvalidFilePrefixError,
|
|
7
|
+
)
|
|
8
|
+
from genelastic.common.types import Metadata
|
|
9
|
+
from genelastic.import_data.constants import (
|
|
10
|
+
FILE_SUFFIXES_RE,
|
|
11
|
+
)
|
|
12
|
+
from genelastic.import_data.models.tags import Tags
|
|
13
|
+
from genelastic.import_data.patterns import FilenamePattern
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger("genelastic")
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def validate_file_prefix(file_prefix: str, tags: Tags) -> None:
|
|
19
|
+
"""Validate a filename prefix for correctness.
|
|
20
|
+
|
|
21
|
+
The file prefix must be non-empty and contain only defined tags,
|
|
22
|
+
with no duplicates. If any of these rules are violated, an
|
|
23
|
+
`InvalidFilePrefixError`` is raised.
|
|
24
|
+
|
|
25
|
+
:param file_prefix: The filename prefix containing tags to validate
|
|
26
|
+
(e.g. ``%S_%F_%W_%B_%D_%R_rep-1``).
|
|
27
|
+
:param tags: The tag definitions used to verify whether tags are defined.
|
|
28
|
+
:raises InvalidFilePrefixError: If the file prefix is invalid.
|
|
29
|
+
"""
|
|
30
|
+
seen_tags = set()
|
|
31
|
+
|
|
32
|
+
if not file_prefix:
|
|
33
|
+
msg = "File prefix is empty."
|
|
34
|
+
raise InvalidFilePrefixError(msg)
|
|
35
|
+
|
|
36
|
+
# Check all tags in the file prefix:
|
|
37
|
+
# they must be defined and appear only once.
|
|
38
|
+
for match in re.finditer(tags.search_regex, file_prefix):
|
|
39
|
+
tag_name = match.group()
|
|
40
|
+
start = match.start() + 1
|
|
41
|
+
end = match.end()
|
|
42
|
+
|
|
43
|
+
if tag_name not in tags:
|
|
44
|
+
msg = (
|
|
45
|
+
f"File prefix '{file_prefix}' has an unknown tag "
|
|
46
|
+
f"'{tag_name}' at position {start}-{end}."
|
|
47
|
+
)
|
|
48
|
+
raise InvalidFilePrefixError(msg)
|
|
49
|
+
|
|
50
|
+
if tag_name in seen_tags:
|
|
51
|
+
msg = (
|
|
52
|
+
f"File prefix '{file_prefix}' has a duplicated tag "
|
|
53
|
+
f"'{tag_name}' at position {start}-{end}."
|
|
54
|
+
)
|
|
55
|
+
raise InvalidFilePrefixError(msg)
|
|
56
|
+
seen_tags.add(tag_name)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def resolve_analysis_id(
|
|
60
|
+
file_prefix: str, tags: Tags, metadata: Metadata
|
|
61
|
+
) -> str:
|
|
62
|
+
"""Resolve an analysis identifier from a filename prefix and metadata.
|
|
63
|
+
|
|
64
|
+
Each tag in the file prefix is replaced with its corresponding value from
|
|
65
|
+
``metadata``.
|
|
66
|
+
|
|
67
|
+
:param file_prefix: A filename prefix containing tags
|
|
68
|
+
(e.g. ``%S_%F_%W_%B_%D_%R_rep-1``).
|
|
69
|
+
:param tags: The tag definitions used to map tags to metadata fields.
|
|
70
|
+
:param metadata: A dictionary mapping metadata fields to their values.
|
|
71
|
+
:return: The resolved analysis identifier string where all tags have been
|
|
72
|
+
replaced by their metadata values.
|
|
73
|
+
"""
|
|
74
|
+
analysis_id = file_prefix
|
|
75
|
+
for match in re.finditer(tags.search_regex, file_prefix):
|
|
76
|
+
tag_name = match.group()
|
|
77
|
+
tag_field = tags[tag_name]["field"]
|
|
78
|
+
analysis_id = analysis_id.replace(tag_name, str(metadata[tag_field]))
|
|
79
|
+
return analysis_id
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def resolve_filename_pattern(
|
|
83
|
+
file_prefix: str,
|
|
84
|
+
tags: Tags,
|
|
85
|
+
metadata: Metadata,
|
|
86
|
+
suffix: str | None = None,
|
|
87
|
+
*,
|
|
88
|
+
strict: bool = False,
|
|
89
|
+
) -> FilenamePattern:
|
|
90
|
+
"""Build a regex pattern from a filename prefix containing tags.
|
|
91
|
+
|
|
92
|
+
Each tag in the file prefix is replaced with a named capturing group.
|
|
93
|
+
The group name corresponds to the metadata field associated with the tag,
|
|
94
|
+
and the group regex is chosen as follows:
|
|
95
|
+
|
|
96
|
+
- If the field has a value in ``metadata``, the tag becomes a group that
|
|
97
|
+
matches exactly this value (e.g. ``(?P<sample_name>HG0003)``).
|
|
98
|
+
- Otherwise, the tag becomes a group that matches the tag's default regex
|
|
99
|
+
(e.g. ``(?P<sample_name>[^_]+)``), unless ``strict=True``,
|
|
100
|
+
in which case a ``FilenamePatternResolveError`` is raised.
|
|
101
|
+
|
|
102
|
+
The resulting pattern is anchored at the start and end of the string,
|
|
103
|
+
includes the optional ``suffix`` if provided, and always appends
|
|
104
|
+
``FILE_SUFFIXES_RE`` at the end.
|
|
105
|
+
|
|
106
|
+
:param file_prefix: A string containing tags that describe the expected
|
|
107
|
+
structure of filenames (e.g. ``%S_%F_%W_%B_%D_%R_rep-1``).
|
|
108
|
+
:param tags: The tag definitions that map tag names to metadata fields
|
|
109
|
+
and default regexes.
|
|
110
|
+
:param metadata: Known metadata values used to restrict tag matches when
|
|
111
|
+
available.
|
|
112
|
+
:param suffix: Optional suffix to append to the regex after replacing tags.
|
|
113
|
+
:param strict: If True, all tags must have a corresponding value in
|
|
114
|
+
``metadata``; otherwise a ``FilenamePatternResolveError`` exception is
|
|
115
|
+
raised.
|
|
116
|
+
:raises FilenamePatternResolveError: If ``strict=True`` and some tag fields
|
|
117
|
+
are missing from ``metadata``.
|
|
118
|
+
:return: A ``FilenamePattern`` object encapsulating the compiled regex.
|
|
119
|
+
"""
|
|
120
|
+
filename_re = file_prefix
|
|
121
|
+
undefined_fields = []
|
|
122
|
+
|
|
123
|
+
# Expand each tag in the file prefix into a named capturing group.
|
|
124
|
+
# If a metadata value is provided, the group matches it exactly.
|
|
125
|
+
# Otherwise, fall back to the tag's default regex (or record it as
|
|
126
|
+
# undefined if strict).
|
|
127
|
+
for match in re.finditer(tags.search_regex, file_prefix):
|
|
128
|
+
tag_name = match.group()
|
|
129
|
+
tag_field = tags[tag_name]["field"]
|
|
130
|
+
tag_regex = tags[tag_name]["regex"]
|
|
131
|
+
|
|
132
|
+
tag_field_value = metadata.get(tag_field)
|
|
133
|
+
if not tag_field_value and strict:
|
|
134
|
+
undefined_fields.append(tag_field)
|
|
135
|
+
|
|
136
|
+
tag_field_regex = f"(?P<{tag_field}>{tag_field_value or tag_regex})"
|
|
137
|
+
filename_re = filename_re.replace(tag_name, tag_field_regex)
|
|
138
|
+
|
|
139
|
+
if undefined_fields:
|
|
140
|
+
formatted_fields = ", ".join(sorted(undefined_fields))
|
|
141
|
+
msg = (
|
|
142
|
+
f"In file prefix '{file_prefix}': "
|
|
143
|
+
f"no value in metadata found for field(s): {formatted_fields}. "
|
|
144
|
+
f"In single-match mode, "
|
|
145
|
+
f"all fields must have a corresponding value defined."
|
|
146
|
+
)
|
|
147
|
+
raise FilenamePatternResolveError(msg)
|
|
148
|
+
|
|
149
|
+
# Finalize the regex: append the optional suffix, enforce start (^) and end
|
|
150
|
+
# ($) anchors, and include FILE_SUFFIXES_RE to capture allowed file
|
|
151
|
+
# extensions.
|
|
152
|
+
parts = [f"^{filename_re}"]
|
|
153
|
+
if suffix:
|
|
154
|
+
# Avoid double anchors if suffix already ends with '$'.
|
|
155
|
+
parts.append(suffix.rstrip("$"))
|
|
156
|
+
parts.append(f"{FILE_SUFFIXES_RE}$")
|
|
157
|
+
return FilenamePattern("".join(parts))
|
genelastic/ui/.env
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
GENUI_API_URL="http://127.0.0.1:8000/api/"
|
genelastic/ui/cli_start_ui.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from genelastic.common import parse_server_launch_args
|
|
1
|
+
from genelastic.common.cli import parse_server_launch_args
|
|
2
2
|
from genelastic.common.server import start_dev_server, start_prod_server
|
|
3
3
|
|
|
4
4
|
|
|
@@ -6,7 +6,9 @@ def main() -> None:
|
|
|
6
6
|
app_module = "genelastic.ui.server:app"
|
|
7
7
|
args = parse_server_launch_args("Start UI server.", 8001)
|
|
8
8
|
if args.env == "dev":
|
|
9
|
-
start_dev_server(
|
|
9
|
+
start_dev_server(
|
|
10
|
+
app_module, args, reload_includes=["*.html", "*.js", "*.css"]
|
|
11
|
+
)
|
|
10
12
|
elif args.env == "prod":
|
|
11
13
|
start_prod_server(app_module, args)
|
|
12
14
|
else:
|
genelastic/ui/routes.py
CHANGED
|
@@ -1,36 +1,65 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
1
3
|
import requests
|
|
2
|
-
from flask import Blueprint, current_app, render_template
|
|
4
|
+
from flask import Blueprint, current_app, render_template, request
|
|
3
5
|
|
|
4
6
|
routes_bp = Blueprint("routes", __name__)
|
|
5
7
|
|
|
6
8
|
|
|
9
|
+
def clean_field_name(field: str) -> str:
|
|
10
|
+
return field.replace("metadata.", "").replace("_", " ").title()
|
|
11
|
+
|
|
12
|
+
|
|
7
13
|
@routes_bp.route("/")
|
|
8
14
|
def home() -> str:
|
|
9
15
|
api_url = current_app.config["GENUI_API_URL"]
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
16
|
+
|
|
17
|
+
response = requests.get(
|
|
18
|
+
f"{api_url}analyses/count_key_value_analyses_index", timeout=20
|
|
19
|
+
).json()
|
|
20
|
+
analyse_count = response.get("result", {})
|
|
21
|
+
|
|
22
|
+
response = requests.get(
|
|
23
|
+
f"{api_url}wet_processes/count_key_value_wet_processes_index",
|
|
24
|
+
timeout=20,
|
|
25
|
+
).json()
|
|
26
|
+
wet_count = response.get("result", {})
|
|
27
|
+
|
|
28
|
+
response = requests.get(
|
|
29
|
+
f"{api_url}bi_processes/count_key_value_bi_processes_index", timeout=20
|
|
30
|
+
).json()
|
|
31
|
+
bi_count = response.get("result", {})
|
|
32
|
+
|
|
33
|
+
total_analyses = (
|
|
34
|
+
sum(sum(values.values()) for values in analyse_count.values())
|
|
35
|
+
if analyse_count
|
|
36
|
+
else 0
|
|
37
|
+
)
|
|
38
|
+
total_wet = (
|
|
39
|
+
sum(sum(values.values()) for values in wet_count.values())
|
|
40
|
+
if wet_count
|
|
41
|
+
else 0
|
|
42
|
+
)
|
|
43
|
+
total_bi = (
|
|
44
|
+
sum(sum(values.values()) for values in bi_count.values())
|
|
45
|
+
if bi_count
|
|
46
|
+
else 0
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
total_analyses_per_key = {}
|
|
50
|
+
if analyse_count:
|
|
51
|
+
for key, values in analyse_count.items():
|
|
52
|
+
total_analyses_per_key[key] = sum(values.values())
|
|
53
|
+
|
|
28
54
|
return render_template(
|
|
29
55
|
"home.html",
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
56
|
+
analyse_count=analyse_count,
|
|
57
|
+
wet_count=wet_count,
|
|
58
|
+
bi_count=bi_count,
|
|
59
|
+
total_analyses=total_analyses,
|
|
60
|
+
total_wet=total_wet,
|
|
61
|
+
total_bi=total_bi,
|
|
62
|
+
total_analyses_per_key=total_analyses_per_key,
|
|
34
63
|
)
|
|
35
64
|
|
|
36
65
|
|
|
@@ -38,49 +67,267 @@ def home() -> str:
|
|
|
38
67
|
def show_analyses() -> str:
|
|
39
68
|
api_url = current_app.config["GENUI_API_URL"]
|
|
40
69
|
try:
|
|
41
|
-
|
|
42
|
-
analyses =
|
|
70
|
+
response = requests.get(f"{api_url}analyses", timeout=20)
|
|
71
|
+
analyses = response.json()["result"]
|
|
43
72
|
except requests.exceptions.RequestException:
|
|
44
73
|
analyses = ["Error fetching data."]
|
|
45
74
|
|
|
46
75
|
return render_template("analyses.html", analyses=analyses)
|
|
47
76
|
|
|
48
77
|
|
|
49
|
-
@routes_bp.route("/
|
|
78
|
+
@routes_bp.route("/analysis/<analysis_id>")
|
|
79
|
+
def show_analysis_detail(analysis_id: str) -> str:
|
|
80
|
+
api_url = current_app.config["GENUI_API_URL"]
|
|
81
|
+
source = request.args.get(
|
|
82
|
+
"source"
|
|
83
|
+
) # récupère ?source=wet ou ?source=bi si présent
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
wet_response = requests.get(f"{api_url}wet_processes", timeout=20)
|
|
87
|
+
bi_response = requests.get(f"{api_url}bi_processes", timeout=20)
|
|
88
|
+
wet_processes = wet_response.json()["result"]
|
|
89
|
+
bi_processes = bi_response.json()["result"]
|
|
90
|
+
except requests.exceptions.RequestException:
|
|
91
|
+
wet_processes = []
|
|
92
|
+
bi_processes = []
|
|
93
|
+
|
|
94
|
+
matched_wet = [wp for wp in wet_processes if wp in analysis_id]
|
|
95
|
+
matched_bi = [bp for bp in bi_processes if bp in analysis_id]
|
|
96
|
+
|
|
97
|
+
return render_template(
|
|
98
|
+
"analysis_detail.html",
|
|
99
|
+
analysis_id=analysis_id,
|
|
100
|
+
wet_processes=matched_wet,
|
|
101
|
+
bi_processes=matched_bi,
|
|
102
|
+
source=source,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
@routes_bp.route("/bi_processes", methods=["GET"])
|
|
50
107
|
def show_bi_processes() -> str:
|
|
51
108
|
api_url = current_app.config["GENUI_API_URL"]
|
|
109
|
+
selected_bi_processes = request.args.getlist("bi_processes")
|
|
110
|
+
|
|
52
111
|
try:
|
|
53
|
-
|
|
54
|
-
|
|
112
|
+
bi_processes, analyses = get_processes_and_analyses(
|
|
113
|
+
api_url, "bi_processes", selected_bi_processes
|
|
55
114
|
)
|
|
56
|
-
bi_processes = bi_processes_reponse.json()
|
|
57
115
|
except requests.exceptions.RequestException:
|
|
58
|
-
bi_processes = [
|
|
116
|
+
bi_processes = []
|
|
117
|
+
analyses = []
|
|
59
118
|
|
|
60
|
-
return render_template(
|
|
119
|
+
return render_template(
|
|
120
|
+
"bi_processes.html",
|
|
121
|
+
bi_processes=bi_processes,
|
|
122
|
+
selected_bi_processes=selected_bi_processes,
|
|
123
|
+
analyses=analyses,
|
|
124
|
+
)
|
|
61
125
|
|
|
62
126
|
|
|
63
|
-
@routes_bp.route("/wet_processes")
|
|
127
|
+
@routes_bp.route("/wet_processes", methods=["GET"])
|
|
64
128
|
def show_wet_processes() -> str:
|
|
65
129
|
api_url = current_app.config["GENUI_API_URL"]
|
|
130
|
+
selected_wet_processes = request.args.getlist("wet_processes")
|
|
131
|
+
|
|
132
|
+
try:
|
|
133
|
+
wet_processes, analyses = get_processes_and_analyses(
|
|
134
|
+
api_url, "wet_processes", selected_wet_processes
|
|
135
|
+
)
|
|
136
|
+
except requests.exceptions.RequestException:
|
|
137
|
+
wet_processes = []
|
|
138
|
+
analyses = []
|
|
139
|
+
|
|
140
|
+
return render_template(
|
|
141
|
+
"wet_processes.html",
|
|
142
|
+
wet_processes=wet_processes,
|
|
143
|
+
selected_wet_processes=selected_wet_processes,
|
|
144
|
+
analyses=analyses,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def get_processes_and_analyses(
|
|
149
|
+
api_url: str, process_type: str, selected_processes: list[str]
|
|
150
|
+
) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
|
|
151
|
+
analyses = []
|
|
152
|
+
|
|
153
|
+
try:
|
|
154
|
+
processes_response = requests.get(
|
|
155
|
+
f"{api_url}{process_type}", timeout=20
|
|
156
|
+
)
|
|
157
|
+
processes = processes_response.json()["result"]
|
|
158
|
+
|
|
159
|
+
if selected_processes:
|
|
160
|
+
analyses_response = requests.get(f"{api_url}analyses", timeout=20)
|
|
161
|
+
all_analyses = analyses_response.json()["result"]
|
|
162
|
+
analyses = [
|
|
163
|
+
analysis
|
|
164
|
+
for analysis in all_analyses
|
|
165
|
+
if any(process in analysis for process in selected_processes)
|
|
166
|
+
]
|
|
167
|
+
except requests.exceptions.RequestException:
|
|
168
|
+
processes = []
|
|
169
|
+
analyses = []
|
|
170
|
+
|
|
171
|
+
return processes, analyses
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
@routes_bp.route("/wet_process/details/<wet_process_id>")
|
|
175
|
+
def wet_process_detail(wet_process_id: str) -> str:
|
|
176
|
+
api_url = current_app.config["GENUI_API_URL"]
|
|
177
|
+
|
|
66
178
|
try:
|
|
67
|
-
|
|
68
|
-
f"{api_url}wet_processes", timeout=20
|
|
179
|
+
wet_response = requests.get(
|
|
180
|
+
f"{api_url}wet_processes/{wet_process_id}", timeout=20
|
|
181
|
+
)
|
|
182
|
+
wet_response.raise_for_status()
|
|
183
|
+
wet_process = wet_response.json()["result"]
|
|
184
|
+
except requests.exceptions.HTTPError as e:
|
|
185
|
+
return render_template(
|
|
186
|
+
"wet_process_detail.html",
|
|
187
|
+
proc_id=wet_process_id,
|
|
188
|
+
error=e.response.json(),
|
|
69
189
|
)
|
|
70
|
-
|
|
190
|
+
|
|
191
|
+
# Récupérer tous les bi-processus pour la sélection
|
|
192
|
+
try:
|
|
193
|
+
bi_list_response = requests.get(f"{api_url}bi_processes", timeout=20)
|
|
194
|
+
bi_processes = bi_list_response.json()["result"]
|
|
71
195
|
except requests.exceptions.RequestException:
|
|
72
|
-
|
|
196
|
+
bi_processes = []
|
|
73
197
|
|
|
74
|
-
|
|
198
|
+
# Si un bi_process_id est présent en paramètre GET, on le charge pour comparaison
|
|
199
|
+
bi_process_id = request.args.get("compare_with")
|
|
200
|
+
bi_process_data = None
|
|
201
|
+
if bi_process_id:
|
|
202
|
+
try:
|
|
203
|
+
bi_response = requests.get(
|
|
204
|
+
f"{api_url}bi_processes/{bi_process_id}", timeout=20
|
|
205
|
+
)
|
|
206
|
+
bi_response.raise_for_status()
|
|
207
|
+
bi_process_data = bi_response.json()["result"]
|
|
208
|
+
except requests.exceptions.RequestException:
|
|
209
|
+
bi_process_data = {"error": "Could not fetch bi process"}
|
|
210
|
+
|
|
211
|
+
return render_template(
|
|
212
|
+
"wet_process_detail.html",
|
|
213
|
+
proc_id=wet_process_id,
|
|
214
|
+
wet_process=wet_process,
|
|
215
|
+
bi_processes=bi_processes,
|
|
216
|
+
selected_bi=bi_process_id,
|
|
217
|
+
bi_process_data=bi_process_data,
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
@routes_bp.route("/bi_process/details/<bi_process_id>")
|
|
222
|
+
def bi_process_detail(bi_process_id: str) -> str:
|
|
223
|
+
api_url = current_app.config["GENUI_API_URL"]
|
|
224
|
+
|
|
225
|
+
# Récupérer les données du bi process principal
|
|
226
|
+
try:
|
|
227
|
+
response = requests.get(
|
|
228
|
+
f"{api_url}bi_processes/{bi_process_id}", timeout=20
|
|
229
|
+
)
|
|
230
|
+
response.raise_for_status()
|
|
231
|
+
bi_process = response.json()["result"]
|
|
232
|
+
except requests.exceptions.HTTPError as e:
|
|
233
|
+
return render_template(
|
|
234
|
+
"bi_process_detail.html",
|
|
235
|
+
proc_id=bi_process_id,
|
|
236
|
+
error=e.response.json(),
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
# Charger tous les wet processes pour le menu déroulant
|
|
240
|
+
try:
|
|
241
|
+
wet_list_response = requests.get(f"{api_url}wet_processes", timeout=20)
|
|
242
|
+
wet_processes = wet_list_response.json()["result"]
|
|
243
|
+
except requests.exceptions.RequestException:
|
|
244
|
+
wet_processes = []
|
|
245
|
+
|
|
246
|
+
# Récupérer le wet process sélectionné pour la comparaison
|
|
247
|
+
wet_process_id = request.args.get("compare_with")
|
|
248
|
+
wet_process_data = None
|
|
249
|
+
if wet_process_id:
|
|
250
|
+
try:
|
|
251
|
+
wet_response = requests.get(
|
|
252
|
+
f"{api_url}wet_processes/{wet_process_id}", timeout=20
|
|
253
|
+
)
|
|
254
|
+
wet_response.raise_for_status()
|
|
255
|
+
wet_process_data = wet_response.json()["result"]
|
|
256
|
+
except requests.exceptions.RequestException:
|
|
257
|
+
wet_process_data = {"error": "Could not fetch wet process"}
|
|
258
|
+
|
|
259
|
+
return render_template(
|
|
260
|
+
"bi_process_detail.html",
|
|
261
|
+
proc_id=bi_process_id,
|
|
262
|
+
bi_process=bi_process,
|
|
263
|
+
wet_processes=wet_processes,
|
|
264
|
+
selected_wet=wet_process_id,
|
|
265
|
+
wet_process_data=wet_process_data,
|
|
266
|
+
)
|
|
75
267
|
|
|
76
268
|
|
|
77
269
|
@routes_bp.route("/version")
|
|
78
|
-
def
|
|
270
|
+
def version() -> str:
|
|
271
|
+
api_url = current_app.config["GENUI_API_URL"]
|
|
272
|
+
try:
|
|
273
|
+
response = requests.get(f"{api_url}version", timeout=20)
|
|
274
|
+
vers = response.json()["result"].get("version", "Version not found")
|
|
275
|
+
except requests.exceptions.RequestException:
|
|
276
|
+
vers = "Error fetching version."
|
|
277
|
+
|
|
278
|
+
return render_template("version.html", version=vers)
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
@routes_bp.route("/search_analyses")
|
|
282
|
+
def search_analyses() -> dict[str, list[str]]:
|
|
79
283
|
api_url = current_app.config["GENUI_API_URL"]
|
|
284
|
+
query = request.args.get("q", "").lower()
|
|
285
|
+
|
|
80
286
|
try:
|
|
81
|
-
|
|
82
|
-
|
|
287
|
+
analyses_response = requests.get(f"{api_url}analyses", timeout=20)
|
|
288
|
+
analyses = analyses_response.json()["result"]
|
|
83
289
|
except requests.exceptions.RequestException:
|
|
84
|
-
|
|
290
|
+
return {"results": []}
|
|
291
|
+
|
|
292
|
+
filtered = [a for a in analyses if query in a.lower()]
|
|
293
|
+
|
|
294
|
+
filtered = filtered[:10]
|
|
295
|
+
|
|
296
|
+
return {"results": filtered}
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
@routes_bp.route("/explorer")
|
|
300
|
+
def explorer() -> str:
|
|
301
|
+
api_url = current_app.config["GENUI_API_URL"]
|
|
302
|
+
|
|
303
|
+
try:
|
|
304
|
+
wet_count = requests.get(
|
|
305
|
+
f"{api_url}wet_processes/count_key_value_wet_processes_index",
|
|
306
|
+
timeout=20,
|
|
307
|
+
).json()["result"]
|
|
308
|
+
except requests.exceptions.RequestException:
|
|
309
|
+
wet_count = {}
|
|
310
|
+
|
|
311
|
+
try:
|
|
312
|
+
bi_count = requests.get(
|
|
313
|
+
f"{api_url}bi_processes/count_key_value_bi_processes_index",
|
|
314
|
+
timeout=20,
|
|
315
|
+
).json()["result"]
|
|
316
|
+
except requests.exceptions.RequestException:
|
|
317
|
+
bi_count = {}
|
|
318
|
+
|
|
319
|
+
# Fusionner les deux dictionnaires et exclure les champs contenant "index"
|
|
320
|
+
metadata_counts: dict[str, dict[str | None, int]] = {}
|
|
321
|
+
|
|
322
|
+
for dataset in [wet_count, bi_count]:
|
|
323
|
+
for key, values in dataset.items():
|
|
324
|
+
if "index" in key:
|
|
325
|
+
continue # on saute les champs techniques
|
|
326
|
+
if key not in metadata_counts:
|
|
327
|
+
metadata_counts[key] = {}
|
|
328
|
+
for val, count in values.items():
|
|
329
|
+
metadata_counts[key][val] = (
|
|
330
|
+
metadata_counts[key].get(val, 0) + count
|
|
331
|
+
)
|
|
85
332
|
|
|
86
|
-
return render_template("
|
|
333
|
+
return render_template("explorer.html", metadata_counts=metadata_counts)
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|