ssi-analysis-result-parsers 0.0.9__py3-none-any.whl → 0.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ssi_analysis_result_parsers/Ecoli_parser.py +669 -0
- ssi_analysis_result_parsers/Legionella_parser.py +3 -1
- ssi_analysis_result_parsers/Nmeningitidis_parser.py +178 -0
- ssi_analysis_result_parsers/Spyogenes_parser.py +0 -1
- ssi_analysis_result_parsers/__init__.py +1 -1
- ssi_analysis_result_parsers/_modidx.py +43 -1
- {ssi_analysis_result_parsers-0.0.9.dist-info → ssi_analysis_result_parsers-0.0.11.dist-info}/METADATA +1 -1
- {ssi_analysis_result_parsers-0.0.9.dist-info → ssi_analysis_result_parsers-0.0.11.dist-info}/RECORD +22 -10
- {ssi_analysis_result_parsers-0.0.9.dist-info → ssi_analysis_result_parsers-0.0.11.dist-info}/entry_points.txt +3 -0
- {ssi_analysis_result_parsers-0.0.9.dist-info → ssi_analysis_result_parsers-0.0.11.dist-info}/top_level.txt +1 -0
- test_input/Ecoli/ERR14229029.res +1 -0
- test_input/Ecoli/ERR3528110.res +4 -0
- test_input/Ecoli/samplesheet.tsv +3 -0
- test_input/Legionella/test.tsv +2 -0
- test_input/Nmeningitidis/batch_parser_file_paths.tsv +6 -0
- test_input/Nmeningitidis/meningotype/meningotype1.tsv +2 -0
- test_input/Nmeningitidis/meningotype/meningotype2.tsv +2 -0
- test_input/Nmeningitidis/meningotype/meningotype3.tsv +2 -0
- test_input/Nmeningitidis/neisseria_mlst_scheme.tsv +18415 -0
- test_output/Ecoli/KMA_cases_parser.tsv +3 -0
- {ssi_analysis_result_parsers-0.0.9.dist-info → ssi_analysis_result_parsers-0.0.11.dist-info}/WHEEL +0 -0
- {ssi_analysis_result_parsers-0.0.9.dist-info → ssi_analysis_result_parsers-0.0.11.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,669 @@
|
|
1
|
+
# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/49_Ecoli_parser.ipynb.
|
2
|
+
|
3
|
+
# %% auto 0
|
4
|
+
__all__ = ['thresholds', 'samplesheet_path', 'output_dir', 'output_path', 'sample_sheet_df', 'sample_output_df', 'original_cols',
|
5
|
+
'output_cols', 'output_initial_cols', 'output_specific_cols', 'ERR3528110_res_path', 'ERR3528110_input_df',
|
6
|
+
'ERR3528110_row', 'gene_hits', 'parsed_hits', 'O_gene_alleles', 'H_gene_alleles', 'O_type', 'H_type',
|
7
|
+
'O_gene_keys', 'H_gene_keys', 'O_genes_no', 'H_genes_no', 'ERR14229029_row', 'ERR14229029_expected_values',
|
8
|
+
'ERR14229029_values', 'test_cases', 'setup_logging', 'get_threshold', 'process_res_file', 'EcoliResults',
|
9
|
+
'ecoli_parser']
|
10
|
+
|
11
|
+
# %% ../nbs/49_Ecoli_parser.ipynb 3
|
12
|
+
import os
|
13
|
+
import sys
|
14
|
+
import pandas as pd
|
15
|
+
from pathlib import Path
|
16
|
+
import logging
|
17
|
+
from datetime import datetime
|
18
|
+
from typing import List, Dict
|
19
|
+
from fastcore.script import call_parse
|
20
|
+
|
21
|
+
# import functions from core module (optional, but most likely needed).
|
22
|
+
from . import core
|
23
|
+
|
24
|
+
# %% ../nbs/49_Ecoli_parser.ipynb 6
|
25
|
+
thresholds = {
|
26
|
+
"stx": [98, 98],
|
27
|
+
"wzx": [98, 98],
|
28
|
+
"wzy": [98, 98],
|
29
|
+
"wzt": [98, 98],
|
30
|
+
"wzm": [98, 98],
|
31
|
+
"fliC": [90, 90],
|
32
|
+
"fli": [90, 90],
|
33
|
+
"eae": [95, 95],
|
34
|
+
"ehxA": [95, 95],
|
35
|
+
"other": [98, 98],
|
36
|
+
}
|
37
|
+
|
38
|
+
# %% ../nbs/49_Ecoli_parser.ipynb 9
|
39
|
+
def setup_logging(log_dir: str, sample_name: str) -> None:
|
40
|
+
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
41
|
+
os.makedirs(log_dir, exist_ok=True)
|
42
|
+
log_file = os.path.join(log_dir, f"{sample_name}_kma_fbi.log")
|
43
|
+
|
44
|
+
logger = logging.getLogger()
|
45
|
+
while logger.hasHandlers():
|
46
|
+
logger.removeHandler(logger.handlers[0])
|
47
|
+
|
48
|
+
logging.basicConfig(
|
49
|
+
filename=log_file,
|
50
|
+
filemode="a",
|
51
|
+
format="%(asctime)s - %(levelname)s - %(message)s",
|
52
|
+
level=logging.INFO,
|
53
|
+
)
|
54
|
+
|
55
|
+
console_handler = logging.StreamHandler(sys.stdout)
|
56
|
+
console_handler.setFormatter(logging.Formatter("%(message)s"))
|
57
|
+
logger.addHandler(console_handler)
|
58
|
+
|
59
|
+
logging.info(f"Logging started for {log_file}")
|
60
|
+
|
61
|
+
# %% ../nbs/49_Ecoli_parser.ipynb 11
|
62
|
+
def get_threshold(template_name: str, thresholds: Dict[str, List[int]]) -> List[int]:
|
63
|
+
"""
|
64
|
+
Returns the coverage and identity threshold for a given gene.
|
65
|
+
|
66
|
+
Args:
|
67
|
+
template_name (str): Name of the template (gene) from the .res file.
|
68
|
+
thresholds (Dict[str, List[int]]): Dictionary of gene thresholds.
|
69
|
+
|
70
|
+
Returns:
|
71
|
+
List[int]: A list of two integers: [coverage_threshold, identity_threshold].
|
72
|
+
"""
|
73
|
+
for key in thresholds:
|
74
|
+
if key in template_name:
|
75
|
+
return thresholds[key]
|
76
|
+
return thresholds["other"]
|
77
|
+
|
78
|
+
|
79
|
+
def process_res_file(res_file_path: str) -> pd.DataFrame:
|
80
|
+
"""
|
81
|
+
Reads and filters a KMA .res file based on predefined thresholds.
|
82
|
+
|
83
|
+
Args:
|
84
|
+
res_file_path (str): Path to the .res file.
|
85
|
+
thresholds (Dict[str, List[int]]): Gene-specific thresholds.
|
86
|
+
|
87
|
+
Returns:
|
88
|
+
pd.DataFrame: Filtered results DataFrame.
|
89
|
+
"""
|
90
|
+
try:
|
91
|
+
res_df = pd.read_csv(res_file_path, sep="\t")
|
92
|
+
except FileNotFoundError:
|
93
|
+
raise FileNotFoundError(f"File not found: {res_file_path}")
|
94
|
+
except pd.errors.EmptyDataError:
|
95
|
+
raise ValueError(f"File is empty or not properly formatted: {res_file_path}")
|
96
|
+
|
97
|
+
required_columns = {"#Template", "Template_Coverage", "Query_Identity", "Depth"}
|
98
|
+
if not required_columns.issubset(res_df.columns):
|
99
|
+
raise ValueError(f"Missing expected columns in {res_file_path}")
|
100
|
+
|
101
|
+
res_df["threshold"] = res_df["#Template"].apply(
|
102
|
+
lambda x: get_threshold(x, thresholds)
|
103
|
+
)
|
104
|
+
res_df_filtered = res_df[
|
105
|
+
(res_df["Template_Coverage"] >= res_df["threshold"].apply(lambda x: x[0]))
|
106
|
+
& (res_df["Query_Identity"] >= res_df["threshold"].apply(lambda x: x[1]))
|
107
|
+
]
|
108
|
+
return res_df_filtered
|
109
|
+
|
110
|
+
# %% ../nbs/49_Ecoli_parser.ipynb 13
|
111
|
+
class EcoliResults:
|
112
|
+
"""
|
113
|
+
Object for holding and processing E. coli typing results.
|
114
|
+
|
115
|
+
This class stores summary typing data for multiple samples, provides utilities for per-sample processing, and export results in a tab-seperated format (.tsv).
|
116
|
+
"""
|
117
|
+
|
118
|
+
# converts the sample results in dict to pandas df
|
119
|
+
def __init__(self, results_dict: dict):
|
120
|
+
"""
|
121
|
+
Initializes the EcoliResults object with typing result data.
|
122
|
+
|
123
|
+
Args:
|
124
|
+
results_dict (dict): Dictionary where keys are sample names and values are summary result dictionaries.
|
125
|
+
"""
|
126
|
+
self.results_dict = results_dict
|
127
|
+
self.results_df = pd.DataFrame.from_dict(
|
128
|
+
results_dict, orient="index"
|
129
|
+
).reset_index(names="sample_name")
|
130
|
+
|
131
|
+
@staticmethod
|
132
|
+
def summarize_single_sample(
|
133
|
+
sample_name: str, res_path: str, verbose_flag: int = 1
|
134
|
+
) -> dict:
|
135
|
+
"""
|
136
|
+
Processes a single sample KMA .res file and returns a summary dictionary.
|
137
|
+
|
138
|
+
Args:
|
139
|
+
sample_name (str): Sample identifier.
|
140
|
+
res_path (str): Path to the sample's .res file.
|
141
|
+
verbose_flag (int, optional): Include verbose info if set to 1. Default is 1.
|
142
|
+
|
143
|
+
Returns:
|
144
|
+
Dict[str, str]: Summary values extracted from the .res file.
|
145
|
+
"""
|
146
|
+
log_dir = "examples/Log"
|
147
|
+
setup_logging(log_dir, sample_name)
|
148
|
+
|
149
|
+
NA_string = "-"
|
150
|
+
output_data = {
|
151
|
+
"stx": NA_string,
|
152
|
+
"OH": NA_string,
|
153
|
+
"wzx": NA_string,
|
154
|
+
"wzy": NA_string,
|
155
|
+
"wzt": NA_string,
|
156
|
+
"wzm": NA_string,
|
157
|
+
"eae": NA_string,
|
158
|
+
"ehxA": NA_string,
|
159
|
+
"Other": NA_string,
|
160
|
+
}
|
161
|
+
|
162
|
+
try:
|
163
|
+
logging.info(f"Processing .res file: {res_path}")
|
164
|
+
filtered_df = process_res_file(res_path)
|
165
|
+
except Exception as e:
|
166
|
+
logging.error(f"Failed to process {res_path}: {e}")
|
167
|
+
return output_data
|
168
|
+
|
169
|
+
gene_map = {
|
170
|
+
"wzx": "wzx",
|
171
|
+
"wzy": "wzy",
|
172
|
+
"wzt": "wzt",
|
173
|
+
"wzm": "wzm",
|
174
|
+
"eae": "eae",
|
175
|
+
"ehxA": "ehxA",
|
176
|
+
}
|
177
|
+
toxin = "stx"
|
178
|
+
stx_alleles = set()
|
179
|
+
fli = NA_string
|
180
|
+
fliC = NA_string
|
181
|
+
|
182
|
+
for template in filtered_df["#Template"]:
|
183
|
+
parts = template.split("__")
|
184
|
+
if len(parts) < 3:
|
185
|
+
continue
|
186
|
+
gene, allele = parts[1], parts[2]
|
187
|
+
|
188
|
+
if gene in ["eae", "ehxA"]:
|
189
|
+
output_data[gene] = "Positive"
|
190
|
+
elif gene in gene_map:
|
191
|
+
output_data[gene] = allele
|
192
|
+
elif gene == "fliC":
|
193
|
+
fliC = allele
|
194
|
+
elif gene == "fli":
|
195
|
+
fli = allele
|
196
|
+
elif gene.startswith(toxin):
|
197
|
+
stx_alleles.add(allele)
|
198
|
+
elif gene not in thresholds:
|
199
|
+
output_data["Other"] = allele
|
200
|
+
|
201
|
+
if stx_alleles:
|
202
|
+
output_data[toxin] = ";".join(sorted(stx_alleles))
|
203
|
+
|
204
|
+
# serotype specific requirements
|
205
|
+
wzx, wzy, wzt, wzm = (
|
206
|
+
output_data["wzx"],
|
207
|
+
output_data["wzy"],
|
208
|
+
output_data["wzt"],
|
209
|
+
output_data["wzm"],
|
210
|
+
)
|
211
|
+
Otype = "-"
|
212
|
+
if (
|
213
|
+
wzx != NA_string
|
214
|
+
and wzy != NA_string
|
215
|
+
and wzx == wzy
|
216
|
+
and wzt == NA_string
|
217
|
+
and wzm == NA_string
|
218
|
+
):
|
219
|
+
Otype = wzx
|
220
|
+
output_data["wzx"] = output_data["wzy"] = NA_string
|
221
|
+
elif (
|
222
|
+
wzt != NA_string
|
223
|
+
and wzm != NA_string
|
224
|
+
and wzt == wzm
|
225
|
+
and wzx == NA_string
|
226
|
+
and wzy == NA_string
|
227
|
+
):
|
228
|
+
Otype = wzt
|
229
|
+
output_data["wzt"] = output_data["wzm"] = NA_string
|
230
|
+
|
231
|
+
Htype = fli if fli != NA_string else fliC
|
232
|
+
output_data["OH"] = f"{Otype};{Htype}"
|
233
|
+
|
234
|
+
# adding the additional depth, template coverage and query identity information
|
235
|
+
if verbose_flag == 1:
|
236
|
+
verbose_parts = []
|
237
|
+
for _, row in filtered_df.iterrows():
|
238
|
+
parts = row["#Template"].split("__")
|
239
|
+
if len(parts) >= 3:
|
240
|
+
gene, allele = parts[1], parts[2]
|
241
|
+
depth = row["Depth"]
|
242
|
+
coverage = row["Template_Coverage"]
|
243
|
+
identity = row["Query_Identity"]
|
244
|
+
verbose_parts.append(
|
245
|
+
f"{gene}_{allele}_{depth:.2f}_{coverage:.2f}_{identity:.2f}"
|
246
|
+
)
|
247
|
+
output_data["verbose"] = ";".join(verbose_parts)
|
248
|
+
|
249
|
+
logging.info(f"Successfully processed sample: {sample_name}")
|
250
|
+
return output_data
|
251
|
+
|
252
|
+
@classmethod
|
253
|
+
def from_samplesheet(
|
254
|
+
cls,
|
255
|
+
samplesheet_path: Path,
|
256
|
+
verbose: int = 1,
|
257
|
+
results_base: str = "examples/Results/{sample_name}/kma/{sample_name}.res",
|
258
|
+
) -> "EcoliResults":
|
259
|
+
"""
|
260
|
+
Loads sample data from a samplesheet and summarizes each sample.
|
261
|
+
|
262
|
+
Args:
|
263
|
+
samplesheet_path (Path): Path to the samplesheet TSV file.
|
264
|
+
verbose (int, optional): Whether to include verbose output per sample. Default is 1.
|
265
|
+
|
266
|
+
Returns:
|
267
|
+
EcoliResults: An instance of the class populated with summaries for all samples.
|
268
|
+
"""
|
269
|
+
df = pd.read_csv(samplesheet_path, sep="\t")
|
270
|
+
df.columns = df.columns.str.strip()
|
271
|
+
# print("I AM INSIDE FROM SAMPLESHEET")
|
272
|
+
# if "Illumina_read_files" in df.columns and ("read1" not in df.columns or "read2" not in df.columns):
|
273
|
+
# df[["read1", "read2"]] = df["Illumina_read_files"].str.split(",", expand=True)
|
274
|
+
|
275
|
+
results_dict = {}
|
276
|
+
for idx, row in df.iterrows():
|
277
|
+
sample_name = row["sample_name"]
|
278
|
+
res_path = Path(
|
279
|
+
results_base.format(sample_name=sample_name)
|
280
|
+
) # results_base / sample_name / "kma" / f"{sample_name}.res"
|
281
|
+
# print(f"The res path is : {res_path}")
|
282
|
+
summary = cls.summarize_single_sample(
|
283
|
+
sample_name, res_path, verbose_flag=verbose
|
284
|
+
)
|
285
|
+
results_dict[sample_name] = summary
|
286
|
+
|
287
|
+
# Convert to DataFrame
|
288
|
+
result_df = pd.DataFrame.from_dict(results_dict, orient="index").reset_index(
|
289
|
+
names="sample_name"
|
290
|
+
)
|
291
|
+
|
292
|
+
# Merge with original metadata
|
293
|
+
merged_df = df.merge(result_df, on="sample_name", how="left")
|
294
|
+
|
295
|
+
# Create and return object
|
296
|
+
obj = cls(results_dict)
|
297
|
+
obj.results_df = merged_df
|
298
|
+
return obj
|
299
|
+
|
300
|
+
def write_tsv(self, output_file: Path):
|
301
|
+
"""
|
302
|
+
Writes the summarized typing results to a TSV file.
|
303
|
+
|
304
|
+
Args:
|
305
|
+
output_file (Path): Destination file path for the output table.
|
306
|
+
"""
|
307
|
+
self.results_df.to_csv(output_file, sep="\t", index=False)
|
308
|
+
|
309
|
+
def __repr__(self):
|
310
|
+
"""
|
311
|
+
Returns a concise summary of the results object.
|
312
|
+
|
313
|
+
Returns:
|
314
|
+
str: A string with sample and variable counts.
|
315
|
+
"""
|
316
|
+
return f"<EcoliResults: {len(self.results_df)} samples, {len(self.results_df.columns)} variables>"
|
317
|
+
|
318
|
+
# %% ../nbs/49_Ecoli_parser.ipynb 15
|
319
|
+
@call_parse
|
320
|
+
def ecoli_parser(
|
321
|
+
samplesheet_path: Path, # Input samplesheet
|
322
|
+
output_file: Path = None, # Path to output
|
323
|
+
verbose: int = 1, # Verbosity,
|
324
|
+
results_base: str = "examples/Results/{sample_name}/kma/{sample_name}.res", # Path template for .res files
|
325
|
+
):
|
326
|
+
results = EcoliResults.from_samplesheet(
|
327
|
+
samplesheet_path, verbose=verbose, results_base=results_base
|
328
|
+
)
|
329
|
+
if output_file:
|
330
|
+
results.write_tsv(output_file)
|
331
|
+
else:
|
332
|
+
print(results.results_df)
|
333
|
+
|
334
|
+
# %% ../nbs/49_Ecoli_parser.ipynb 17
|
335
|
+
# | eval: true
|
336
|
+
import pandas as pd
|
337
|
+
from pathlib import Path
|
338
|
+
import os
|
339
|
+
|
340
|
+
# Define paths
|
341
|
+
samplesheet_path = Path("test_input/Ecoli/samplesheet.tsv")
|
342
|
+
output_dir = Path("test_output/Ecoli")
|
343
|
+
|
344
|
+
# Create output directory
|
345
|
+
if not output_dir.exists():
|
346
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
347
|
+
|
348
|
+
output_path = output_dir / "KMA_cases_parser.tsv"
|
349
|
+
|
350
|
+
# Assert input exists
|
351
|
+
assert samplesheet_path.exists(), f"File does not exist: {samplesheet_path}"
|
352
|
+
print(output_path)
|
353
|
+
|
354
|
+
# try the ecoli parser to see if the wrangling functionality works
|
355
|
+
try:
|
356
|
+
ecoli_parser(
|
357
|
+
samplesheet_path=samplesheet_path,
|
358
|
+
output_file=output_path,
|
359
|
+
verbose=1,
|
360
|
+
results_base="test_input/Ecoli/{sample_name}.res",
|
361
|
+
)
|
362
|
+
except Exception as e:
|
363
|
+
raise AssertionError(f"Parser execution failed: {e}")
|
364
|
+
|
365
|
+
# compare the output with the expected results based on input to ensure correct wrangling
|
366
|
+
|
367
|
+
# read the created output files and check the information
|
368
|
+
sample_sheet_df = pd.read_csv(samplesheet_path, sep="\t")
|
369
|
+
sample_output_df = pd.read_csv(output_path, sep="\t")
|
370
|
+
|
371
|
+
### Test case 1. Check if the datastructure is correct
|
372
|
+
original_cols = sample_sheet_df.columns.tolist()
|
373
|
+
output_cols = sample_output_df.columns.tolist()
|
374
|
+
output_initial_cols = sample_output_df.columns[: len(original_cols)].tolist()
|
375
|
+
output_specific_cols = sample_output_df.columns[len(original_cols) :].tolist()
|
376
|
+
|
377
|
+
assert (
|
378
|
+
original_cols == output_initial_cols
|
379
|
+
), f"Mismatch in first columns:\nExpected: {original_cols}\nGot: {output_initial_cols}"
|
380
|
+
|
381
|
+
assert output_specific_cols
|
382
|
+
|
383
|
+
### Test case 2. Check sample ERR3528110 which is correctly believed to be e.coli and ensure datawrangling does as expected
|
384
|
+
ERR3528110_res_path = "test_input/Ecoli/ERR3528110.res"
|
385
|
+
ERR3528110_input_df = pd.read_csv(ERR3528110_res_path, sep="\t")
|
386
|
+
|
387
|
+
ERR3528110_row = (
|
388
|
+
sample_output_df[sample_output_df["sample_name"] == "ERR3528110"]
|
389
|
+
.iloc[:, len(original_cols) : len(output_cols)]
|
390
|
+
.iloc[0]
|
391
|
+
)
|
392
|
+
|
393
|
+
# extract the original genes from the res
|
394
|
+
gene_hits = ERR3528110_input_df["#Template"].tolist()
|
395
|
+
|
396
|
+
parsed_hits = []
|
397
|
+
|
398
|
+
for hit in gene_hits:
|
399
|
+
parts = hit.split("__")
|
400
|
+
assert (
|
401
|
+
len(parts) != 3
|
402
|
+
), f"Unexpected KMA result format in: '{hit}'. Expected at least 3 '__' parts (e.g., ref__gene__allele) as off ecoli fbi 24-04-2025."
|
403
|
+
gene, allele = parts[1], parts[2]
|
404
|
+
parsed_hits.append((gene, allele))
|
405
|
+
|
406
|
+
# Extract OH genes
|
407
|
+
O_gene_alleles = {
|
408
|
+
gene: allele for gene, allele in parsed_hits if gene in {"wzx", "wzy", "wzt", "wzm"}
|
409
|
+
}
|
410
|
+
H_gene_alleles = {
|
411
|
+
gene: allele for gene, allele in parsed_hits if gene in {"fli", "fliC"}
|
412
|
+
}
|
413
|
+
|
414
|
+
O_type = ERR3528110_row["OH"].split(";")[0]
|
415
|
+
H_type = ERR3528110_row["OH"].split(";")[1]
|
416
|
+
|
417
|
+
O_gene_keys = set(O_gene_alleles.keys())
|
418
|
+
H_gene_keys = set(H_gene_alleles.keys())
|
419
|
+
|
420
|
+
O_genes_no = len(O_gene_keys)
|
421
|
+
H_genes_no = len(H_gene_keys)
|
422
|
+
|
423
|
+
# O typing scenarios
|
424
|
+
# Case 1: wzx/wzy match
|
425
|
+
if O_gene_keys == {"wzx", "wzy"} and O_gene_alleles["wzx"] == O_gene_alleles["wzy"]:
|
426
|
+
expected_otype = O_gene_alleles["wzx"]
|
427
|
+
assert O_type == expected_otype, f"Expected OH '{expected_otype}', got '{O_type}'"
|
428
|
+
# wzx/wzy should be suppressed
|
429
|
+
assert ERR3528110_row["wzx"] == "-", "wzx column should be '-' when OH is used"
|
430
|
+
assert ERR3528110_row["wzy"] == "-", "wzy column should be '-' when OH is used"
|
431
|
+
# print(f"O-type correctly assigned from matching wzx/wzy: {O_type}")
|
432
|
+
|
433
|
+
# Case 2: wzt/wzm match
|
434
|
+
elif O_gene_keys == {"wzt", "wzm"} and O_gene_alleles["wzt"] == O_gene_alleles["wzm"]:
|
435
|
+
expected_otype = O_gene_alleles["wzt"]
|
436
|
+
assert O_type == expected_otype, f"Expected OH '{expected_otype}', got '{O_type}'"
|
437
|
+
assert ERR3528110_row["wzt"] == "-", "wzt column should be '-' when OH is used"
|
438
|
+
assert ERR3528110_row["wzm"] == "-", "wzm column should be '-' when OH is used"
|
439
|
+
# print(f"O-type correctly assigned from matching wzt/wzm: {O_type}")
|
440
|
+
|
441
|
+
# Case 3: Conflict (≥3 genes, or 2 mismatched genes)
|
442
|
+
elif O_genes_no >= 3 or (
|
443
|
+
(O_gene_keys == {"wzx", "wzy"} and O_gene_alleles["wzx"] != O_gene_alleles["wzy"])
|
444
|
+
or (
|
445
|
+
O_gene_keys == {"wzt", "wzm"} and O_gene_alleles["wzt"] != O_gene_alleles["wzm"]
|
446
|
+
)
|
447
|
+
):
|
448
|
+
assert O_type == "-", f"Expected OH = '-' due to conflict, got: '{O_type}'"
|
449
|
+
for gene in O_gene_keys:
|
450
|
+
assert (
|
451
|
+
ERR3528110_row[gene] == O_gene_alleles[gene]
|
452
|
+
), f"{gene} column should contain '{O_gene_alleles[gene]}'"
|
453
|
+
# print("Conflict in O-typing correctly led to OH = '-' and individual gene columns retained.")
|
454
|
+
|
455
|
+
# H typing scenarios
|
456
|
+
|
457
|
+
# Case 1: If fli is present it will always take precedence over fliC
|
458
|
+
if H_gene_keys == {"fli"}:
|
459
|
+
expected_htype = H_gene_alleles["fli"]
|
460
|
+
assert (
|
461
|
+
H_type == expected_htype
|
462
|
+
), f"Expected OH '{expected_htype}' from 'fli', got '{H_type}'"
|
463
|
+
|
464
|
+
# Case 2: only if fliC is the sole gene it is used
|
465
|
+
elif H_gene_keys == {"fliC"}:
|
466
|
+
expected_htype = H_gene_alleles["fliC"]
|
467
|
+
assert (
|
468
|
+
H_type == expected_htype
|
469
|
+
), f"Expected OH '{expected_htype}' from 'fliC', got '{H_type}'"
|
470
|
+
|
471
|
+
# Case 3: if none exist the H type remains empty
|
472
|
+
else:
|
473
|
+
assert H_type == "-", f"Expected H-type '-', but got '{H_type}'"
|
474
|
+
|
475
|
+
### Test case 3. Check sample ERR14229029 which is believed to be e.coli in the samplesheet is empty, as a result of being erroneously classified as e.coli
|
476
|
+
|
477
|
+
ERR14229029_row = (
|
478
|
+
sample_output_df[sample_output_df["sample_name"] == "ERR14229029"]
|
479
|
+
.iloc[:, len(original_cols) : len(output_cols)]
|
480
|
+
.iloc[0]
|
481
|
+
)
|
482
|
+
|
483
|
+
ERR14229029_expected_values = [
|
484
|
+
"-",
|
485
|
+
"-;-",
|
486
|
+
"-",
|
487
|
+
"-",
|
488
|
+
"-",
|
489
|
+
"-",
|
490
|
+
"-",
|
491
|
+
"-",
|
492
|
+
"-",
|
493
|
+
float("nan"),
|
494
|
+
]
|
495
|
+
ERR14229029_values = [ERR14229029_row[col] for col in output_specific_cols]
|
496
|
+
|
497
|
+
for col, actual, expected in zip(
|
498
|
+
output_specific_cols, ERR14229029_values, ERR14229029_expected_values
|
499
|
+
):
|
500
|
+
if pd.isna(expected):
|
501
|
+
assert pd.isna(actual), f"{col}: Expected NaN, got {actual}"
|
502
|
+
else:
|
503
|
+
assert actual == expected, f"{col}: Expected '{expected}', got '{actual}'"
|
504
|
+
|
505
|
+
# %% ../nbs/49_Ecoli_parser.ipynb 19
|
506
|
+
import os
|
507
|
+
from tempfile import TemporaryDirectory
|
508
|
+
from pathlib import Path
|
509
|
+
|
510
|
+
test_cases = [
|
511
|
+
# sample_name, res_content, expected_oh, expected_stx, expected_eae, expected_ehxA
|
512
|
+
(
|
513
|
+
"sample1",
|
514
|
+
"1__wzx__O103__X\t100\t100\t60\n2__wzy__O103__X\t100\t100\t65\n3__fliC__H2__X\t100\t100\t70",
|
515
|
+
"O103;H2",
|
516
|
+
"-",
|
517
|
+
"-",
|
518
|
+
"-",
|
519
|
+
),
|
520
|
+
(
|
521
|
+
"sample2",
|
522
|
+
"1__wzt__O8__X\t100\t100\t60\n2__wzm__O8__X\t100\t100\t65\n3__fliC__H10__X\t100\t100\t70\n4__stx2__stx2-a__X\t100\t100\t90\n5__eae__eae-5__X\t100\t100\t80",
|
523
|
+
"O8;H10",
|
524
|
+
"stx2-a",
|
525
|
+
"Positive",
|
526
|
+
"-",
|
527
|
+
),
|
528
|
+
("sample3", "1__fliC__H7__X\t100\t100\t70", "-;H7", "-", "-", "-"),
|
529
|
+
(
|
530
|
+
"sample4",
|
531
|
+
"bad_line\n2__wzy__O111__X\t100\t100\t70\n3__fliC__H11__X\t100\t100\t70",
|
532
|
+
"-;H11",
|
533
|
+
"-",
|
534
|
+
"-",
|
535
|
+
"-",
|
536
|
+
),
|
537
|
+
("sample5", "", "-;-", "-", "-", "-"),
|
538
|
+
(
|
539
|
+
"sample6",
|
540
|
+
"1__wzx__O157__X\t100\t100\t60\n2__wzy__O157__X\t100\t100\t65\n3__wzt__O8__X\t100\t100\t60\n4__wzm__O8__X\t100\t100\t65\n5__fli__H2__X\t100\t100\t70",
|
541
|
+
"-;H2",
|
542
|
+
"-",
|
543
|
+
"-",
|
544
|
+
"-",
|
545
|
+
),
|
546
|
+
(
|
547
|
+
"sample7",
|
548
|
+
"1__wzx__O157__X\t100\t100\t60\n2__wzy__O111__X\t100\t100\t65\n3__fliC__H9__X\t100\t100\t70",
|
549
|
+
"-;H9",
|
550
|
+
"-",
|
551
|
+
"-",
|
552
|
+
"-",
|
553
|
+
),
|
554
|
+
(
|
555
|
+
"sample8",
|
556
|
+
"1__fli__H1__X\t100\t100\t70\n2__fliC__H12__X\t100\t100\t70",
|
557
|
+
"-;H1",
|
558
|
+
"-",
|
559
|
+
"-",
|
560
|
+
"-",
|
561
|
+
),
|
562
|
+
(
|
563
|
+
"sample9",
|
564
|
+
"1__wzx__O157__X\t100\t100\t60\n2__wzy__O157__X\t100\t100\t65\n3__wzt__O8__X\t100\t100\t60\n4__wzm__O8__X\t100\t100\t65\n5__fliC__H10__X\t100\t100\t70\n6__fli__H2__X\t100\t100\t70\n7__stx1__stx1-a__X\t100\t100\t90\n8__stx2__stx2-d__X\t100\t100\t90\n9__stx2__stx2-a__X\t100\t100\t90\n10__eae__eae-42-5__X\t100\t100\t80\n11__ehxA__ehxA-7__X\t100\t100\t80",
|
565
|
+
"-;H2",
|
566
|
+
"stx1-a;stx2-a;stx2-d",
|
567
|
+
"Positive",
|
568
|
+
"Positive",
|
569
|
+
),
|
570
|
+
(
|
571
|
+
"sample10",
|
572
|
+
"1__adk__adk__X\t100\t100\t70\n2__fliC__H4__X\t100\t100\t70",
|
573
|
+
"-;H4",
|
574
|
+
"-",
|
575
|
+
"-",
|
576
|
+
"-",
|
577
|
+
),
|
578
|
+
(
|
579
|
+
"sample11",
|
580
|
+
"1__eae__eae-1__X\t100\t94\t70\n2__fliC__H6__X\t100\t100\t70",
|
581
|
+
"-;H6",
|
582
|
+
"-",
|
583
|
+
"-",
|
584
|
+
"-",
|
585
|
+
),
|
586
|
+
(
|
587
|
+
"sample12",
|
588
|
+
"1__stx1__stx1a__X\t100\t100\t80\n2__stx2__stx2c__X\t100\t100\t85\n3__fli__H21__X\t100\t100\t70",
|
589
|
+
"-;H21",
|
590
|
+
"stx1a;stx2c",
|
591
|
+
"-",
|
592
|
+
"-",
|
593
|
+
),
|
594
|
+
]
|
595
|
+
|
596
|
+
for (
|
597
|
+
sample_name,
|
598
|
+
res_content,
|
599
|
+
expected_oh,
|
600
|
+
expected_stx,
|
601
|
+
expected_eae,
|
602
|
+
expected_ehxA,
|
603
|
+
) in test_cases:
|
604
|
+
with TemporaryDirectory() as tmpdir:
|
605
|
+
tmpdir = Path(tmpdir)
|
606
|
+
os.chdir(tmpdir)
|
607
|
+
|
608
|
+
res_dir = tmpdir / f"examples/Results/{sample_name}/kma"
|
609
|
+
res_dir.mkdir(parents=True)
|
610
|
+
res_file = res_dir / f"{sample_name}.res"
|
611
|
+
res_file.write_text(
|
612
|
+
"#Template\tTemplate_Coverage\tQuery_Identity\tDepth\n" + res_content
|
613
|
+
)
|
614
|
+
|
615
|
+
sheet = tmpdir / "samplesheet.tsv"
|
616
|
+
sheet.write_text(
|
617
|
+
"sample_name\tIllumina_read_files\tNanopore_read_file\tassembly_file\torganism\tvariant\tnotes\n"
|
618
|
+
f"{sample_name}\tread1.fastq,read2.fastq\t-\t-\tEcoli\t-\t-\n"
|
619
|
+
)
|
620
|
+
|
621
|
+
results = EcoliResults.from_samplesheet(sheet)
|
622
|
+
df = results.results_df
|
623
|
+
row = df.iloc[0]
|
624
|
+
|
625
|
+
# general output and functionality test
|
626
|
+
assert row["sample_name"] == sample_name
|
627
|
+
|
628
|
+
if row["OH"] != expected_oh:
|
629
|
+
raise AssertionError(
|
630
|
+
f"\nSample: {sample_name}\nExpected OH: {expected_oh}\nActual OH: {row['OH']}"
|
631
|
+
)
|
632
|
+
assert row["OH"] == expected_oh
|
633
|
+
|
634
|
+
if row["stx"] != expected_stx:
|
635
|
+
raise AssertionError(
|
636
|
+
f"\nSample: {sample_name}\nExpected stx: {expected_stx}\nActual stx: {row['stx']}"
|
637
|
+
)
|
638
|
+
assert row["stx"] == expected_stx
|
639
|
+
|
640
|
+
if row["eae"] != expected_eae:
|
641
|
+
raise AssertionError(
|
642
|
+
f"\nSample: {sample_name}\nExpected eae: {expected_eae}\nActual eae: {row['eae']}"
|
643
|
+
)
|
644
|
+
assert row["eae"] == expected_eae
|
645
|
+
|
646
|
+
if row["ehxA"] != expected_ehxA:
|
647
|
+
raise AssertionError(
|
648
|
+
f"\nSample: {sample_name}\nExpected ehxA: {expected_ehxA}\nActual ehxA: {row['ehxA']}"
|
649
|
+
)
|
650
|
+
assert row["ehxA"] == expected_ehxA
|
651
|
+
|
652
|
+
# sample specific information tests
|
653
|
+
|
654
|
+
# without confliciting O and H typing, the OH column should be filled and the remaining four genes empty
|
655
|
+
if sample_name == "sample1":
|
656
|
+
assert row["wzx"] == "-"
|
657
|
+
assert row["wzy"] == "-"
|
658
|
+
assert row["wzt"] == "-"
|
659
|
+
assert row["wzm"] == "-"
|
660
|
+
# with conflicts the OH should remain empty and the four 'conflicting' gene information remain filled
|
661
|
+
elif sample_name == "sample6":
|
662
|
+
assert row["wzx"] == "O157"
|
663
|
+
assert row["wzy"] == "O157"
|
664
|
+
assert row["wzt"] == "O8"
|
665
|
+
assert row["wzm"] == "O8"
|
666
|
+
elif sample_name == "sample10":
|
667
|
+
assert row["Other"] == "adk"
|
668
|
+
|
669
|
+
print("All 12 syntehtic E. coli sample inline tests passed.")
|
@@ -32,6 +32,7 @@ from ssi_analysis_result_parsers import (
|
|
32
32
|
# Project specific libraries
|
33
33
|
from pathlib import Path
|
34
34
|
import pandas
|
35
|
+
import numpy
|
35
36
|
import sys
|
36
37
|
|
37
38
|
# %% ../nbs/39_Legionella_parser.ipynb 6
|
@@ -49,7 +50,7 @@ def extract_legionella_sbt(legionella_sbt_results_tsv: Path) -> dict:
|
|
49
50
|
return d[fname]
|
50
51
|
except pandas.errors.EmptyDataError:
|
51
52
|
print(
|
52
|
-
f"
|
53
|
+
f"Legionella SBT output empty at {legionella_sbt_results_tsv}",
|
53
54
|
file=sys.stderr,
|
54
55
|
)
|
55
56
|
return None
|
@@ -98,6 +99,7 @@ class LegionellaResults(core.PipelineResults):
|
|
98
99
|
Alternative constructor for initializing results for multiple samples,
|
99
100
|
Initializes LegionellaResults instance by providing a DataFrame of paths to outputs from tools (legionella sbt and lag1 presence blast)
|
100
101
|
"""
|
102
|
+
file_paths_df.replace(numpy.nan, None, inplace=True)
|
101
103
|
file_paths = file_paths_df.to_dict(orient="index")
|
102
104
|
results_dict = {}
|
103
105
|
for sample_name, path_dict in file_paths.items():
|