mgnify-pipelines-toolkit 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mgnify-pipelines-toolkit might be problematic. Click here for more details.

@@ -0,0 +1,82 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ # Copyright 2025 EMBL - European Bioinformatics Institute
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the 'License');
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an 'AS IS' BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ import csv
19
+
20
+
21
+ def write_results_to_file(
22
+ outfile, header, main_gff_extended, fasta, ncrnas, trnas, crispr_annotations
23
+ ):
24
+ with open(outfile, "w") as file_out:
25
+ file_out.write("\n".join(header) + "\n")
26
+ contig_list = list(main_gff_extended.keys())
27
+ # check if there are any contigs that don't have CDS; if so add them in
28
+ contig_list = check_for_additional_keys(
29
+ ncrnas, trnas, crispr_annotations, contig_list
30
+ )
31
+ for contig in contig_list:
32
+ sorted_pos_list = sort_positions(
33
+ contig, main_gff_extended, ncrnas, trnas, crispr_annotations
34
+ )
35
+ for pos in sorted_pos_list:
36
+ for my_dict in (ncrnas, trnas, crispr_annotations, main_gff_extended):
37
+ if contig in my_dict and pos in my_dict[contig]:
38
+ for line in my_dict[contig][pos]:
39
+ if type(line) is str:
40
+ file_out.write(f"{line}\n")
41
+ else:
42
+ for element in line:
43
+ file_out.write(element)
44
+ for line in fasta:
45
+ file_out.write(f"{line}\n")
46
+
47
+
48
+ def sort_positions(contig, main_gff_extended, ncrnas, trnas, crispr_annotations):
49
+ sorted_pos_list = list()
50
+ for my_dict in (main_gff_extended, ncrnas, trnas, crispr_annotations):
51
+ if contig in my_dict:
52
+ sorted_pos_list += list(my_dict[contig].keys())
53
+ return sorted(list(set(sorted_pos_list)))
54
+
55
+
56
+ def check_for_additional_keys(ncrnas, trnas, crispr_annotations, contig_list):
57
+ for my_dict in (ncrnas, trnas, crispr_annotations):
58
+ dict_keys = set(my_dict.keys())
59
+ absent_keys = dict_keys - set(contig_list)
60
+ if absent_keys:
61
+ contig_list = contig_list + list(absent_keys)
62
+ return contig_list
63
+
64
+
65
+ def print_pseudogene_report(pseudogene_report_dict, pseudogene_report_file):
66
+ with open(pseudogene_report_file, "w") as file_out:
67
+ writer = csv.writer(file_out, delimiter="\t", lineterminator="\n")
68
+ # Print header
69
+ writer.writerow(
70
+ [
71
+ "ID",
72
+ "Pseudogene according to Bakta/Prokka",
73
+ "Pseudogene according to Pseudofinder",
74
+ "AntiFam hit",
75
+ ]
76
+ )
77
+
78
+ all_keys = ["gene_caller", "pseudofinder", "antifams"]
79
+ for protein, attributes in pseudogene_report_dict.items():
80
+ # Fill in missing attributes with False
81
+ line = [protein] + [str(attributes.get(key, False)) for key in all_keys]
82
+ writer.writerow(line)
@@ -0,0 +1,170 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ # Copyright 2025 EMBL - European Bioinformatics Institute
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the 'License');
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an 'AS IS' BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ import argparse
19
+
20
+ from gff_annotation_utils import get_ncrnas, get_trnas, load_annotations, load_crispr
21
+ from gff_file_utils import write_results_to_file, print_pseudogene_report
22
+
23
+
24
+ def main(
25
+ gff,
26
+ ipr_file,
27
+ eggnog_file,
28
+ sanntis_file,
29
+ crispr_file,
30
+ amr_file,
31
+ antismash_file,
32
+ gecco_file,
33
+ dbcan_file,
34
+ defense_finder_file,
35
+ pseudofinder_file,
36
+ rfam_file,
37
+ trnascan_file,
38
+ outfile,
39
+ pseudogene_report_file,
40
+ ):
41
+ # load annotations and add them to existing CDS
42
+ # here header contains leading GFF lines starting with "#",
43
+ # main_gff_extended is a dictionary that contains GFF lines with added in additional annotations
44
+ # fasta is the fasta portion of the original GFF file
45
+ # pseudogene_report_dict is the information on detected pseudogene which can be optionally printed
46
+ # to a separate output file
47
+ header, main_gff_extended, fasta, pseudogene_report_dict = load_annotations(
48
+ gff,
49
+ eggnog_file,
50
+ ipr_file,
51
+ sanntis_file,
52
+ amr_file,
53
+ antismash_file,
54
+ gecco_file,
55
+ dbcan_file,
56
+ defense_finder_file,
57
+ pseudofinder_file,
58
+ )
59
+ ncrnas = {}
60
+ if rfam_file:
61
+ ncrnas = get_ncrnas(rfam_file)
62
+ trnas = {}
63
+ if trnascan_file:
64
+ trnas = get_trnas(trnascan_file)
65
+ crispr_annotations = {}
66
+ if crispr_file:
67
+ crispr_annotations = load_crispr(crispr_file)
68
+
69
+ write_results_to_file(outfile, header, main_gff_extended, fasta, ncrnas, trnas, crispr_annotations)
70
+ if pseudogene_report_file:
71
+ print_pseudogene_report(pseudogene_report_dict, pseudogene_report_file)
72
+
73
+
74
+ def parse_args():
75
+ parser = argparse.ArgumentParser(
76
+ description="The script extends a user-provided base GFF annotation file by incorporating "
77
+ "information extracted from the user-provided outputs of supplementary annotation tools.",
78
+ )
79
+ parser.add_argument(
80
+ "-g",
81
+ dest="gff_input",
82
+ required=True,
83
+ help="GFF input file containing the base annotation",
84
+ )
85
+ parser.add_argument(
86
+ "-i",
87
+ dest="ips",
88
+ help="InterProScan annotation results (TSV)",
89
+ required=False,
90
+ )
91
+ parser.add_argument(
92
+ "-e",
93
+ dest="eggnog",
94
+ help="EggNOG mapper annotation results (TSV)",
95
+ required=False,
96
+ )
97
+ parser.add_argument(
98
+ "-s",
99
+ dest="sanntis",
100
+ help="SanntiS results",
101
+ required=False,
102
+ )
103
+ parser.add_argument(
104
+ "-c",
105
+ dest="crispr",
106
+ help="CRISPRCasFinder results for the cluster rep (pre-filtered high quality GFF)",
107
+ required=False,
108
+ )
109
+ parser.add_argument(
110
+ "-a",
111
+ dest="amr",
112
+ help="The TSV file produced by AMRFinderPlus",
113
+ required=False,
114
+ )
115
+ parser.add_argument(
116
+ "--antismash",
117
+ help="The GFF file produced by AntiSMASH post-processing script",
118
+ required=False,
119
+ )
120
+ parser.add_argument(
121
+ "--gecco",
122
+ help="The GFF file produced by GECCO",
123
+ required=False,
124
+ )
125
+ parser.add_argument(
126
+ "--dbcan",
127
+ help="The GFF file produced by dbCAN post-processing script",
128
+ required=False,
129
+ )
130
+ parser.add_argument(
131
+ "--defense-finder",
132
+ help="The GFF file produced by Defense Finder post-processing script",
133
+ required=False,
134
+ )
135
+ parser.add_argument(
136
+ "--pseudofinder",
137
+ help="The GFF file produced by the Pseudofinder post-processing script",
138
+ required=False,
139
+ )
140
+ parser.add_argument("-r", dest="rfam", help="Rfam results", required=False)
141
+ parser.add_argument(
142
+ "-t", dest="trnascan", help="tRNAScan-SE results", required=False
143
+ )
144
+ parser.add_argument("-o", dest="outfile", help="Outfile name", required=True)
145
+ parser.add_argument(
146
+ "--pseudogene-report", help="Pseudogene report filename", required=False
147
+ )
148
+
149
+ return parser.parse_args()
150
+
151
+
152
+ if __name__ == '__main__':
153
+ args = parse_args()
154
+ main(
155
+ args.gff_input,
156
+ args.ips,
157
+ args.eggnog,
158
+ args.sanntis,
159
+ args.crispr,
160
+ args.amr,
161
+ args.antismash,
162
+ args.gecco,
163
+ args.dbcan,
164
+ args.defense_finder,
165
+ args.pseudofinder,
166
+ args.rfam,
167
+ args.trnascan,
168
+ args.outfile,
169
+ args.pseudogene_report,
170
+ )
@@ -0,0 +1,243 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ # Copyright 2024-2025 EMBL - European Bioinformatics Institute
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ import argparse
19
+ from collections import defaultdict
20
+ import json
21
+ import pathlib
22
+ import logging
23
+
24
+ import pandas as pd
25
+ import pyfastx
26
+
27
+ from mgnify_pipelines_toolkit.constants.thresholds import MAJORITY_MARKER_PROPORTION
28
+
29
+ logging.basicConfig(level=logging.DEBUG)
30
+
31
+
32
+ def parse_args():
33
+
34
+ parser = argparse.ArgumentParser()
35
+ parser.add_argument(
36
+ "-i",
37
+ "--input_path",
38
+ required=True,
39
+ type=str,
40
+ help="Input directory containing amplicon analysis pipeline results",
41
+ )
42
+ parser.add_argument(
43
+ "-r",
44
+ "--runs",
45
+ required=True,
46
+ type=str,
47
+ help="CSV file containing successful analyses generated by the pipeline",
48
+ )
49
+ parser.add_argument(
50
+ "-p", "--prefix", required=True, type=str, help="Prefix for the output file"
51
+ )
52
+
53
+ args = parser.parse_args()
54
+
55
+ input_path = args.input_path
56
+ runs = args.runs
57
+ prefix = args.prefix
58
+
59
+ return input_path, runs, prefix
60
+
61
+
62
+ def get_read_count(read_path):
63
+
64
+ fasta = pyfastx.Fasta(read_path, build_index=False)
65
+ read_count = sum(1 for _ in fasta)
66
+
67
+ return read_count
68
+
69
+
70
+ def add_markergene(root_path, run_acc, markergene_dict, markergene):
71
+
72
+ if markergene != "ITS":
73
+
74
+ bacterial_ssu = list(
75
+ pathlib.Path(f"{root_path}/{run_acc}/sequence-categorisation").glob(
76
+ f"*{markergene}*bacteria*"
77
+ )
78
+ )
79
+ archaeal_ssu = list(
80
+ pathlib.Path(f"{root_path}/{run_acc}/sequence-categorisation").glob(
81
+ f"*{markergene}*archaea*"
82
+ )
83
+ )
84
+ eukarya_ssu = list(
85
+ pathlib.Path(f"{root_path}/{run_acc}/sequence-categorisation").glob(
86
+ f"*{markergene}*eukarya*"
87
+ )
88
+ )
89
+
90
+ markergene_dict[markergene] = defaultdict()
91
+ markergene_dict[markergene]["Bacteria"] = defaultdict()
92
+ markergene_dict[markergene]["Archaea"] = defaultdict()
93
+ markergene_dict[markergene]["Eukarya"] = defaultdict()
94
+
95
+ markergene_dict[markergene] = add_read_count_to_markergene(
96
+ markergene_dict[markergene], bacterial_ssu, "Bacteria"
97
+ )
98
+ markergene_dict[markergene] = add_read_count_to_markergene(
99
+ markergene_dict[markergene], archaeal_ssu, "Archaea"
100
+ )
101
+ markergene_dict[markergene] = add_read_count_to_markergene(
102
+ markergene_dict[markergene], eukarya_ssu, "Eukarya"
103
+ )
104
+ else:
105
+ its = list(
106
+ pathlib.Path(f"{root_path}/{run_acc}/sequence-categorisation").glob("*ITS*")
107
+ )
108
+ markergene_dict["ITS"] = defaultdict()
109
+ markergene_dict["ITS"]["Eukarya"] = defaultdict()
110
+ markergene_dict["ITS"] = add_read_count_to_markergene(
111
+ markergene_dict["ITS"], its, "Eukarya"
112
+ )
113
+
114
+ return markergene_dict
115
+
116
+
117
+ def add_read_count_to_markergene(marker_gene_dict, marker, label):
118
+
119
+ if marker:
120
+ read_count = get_read_count(str(marker[0]))
121
+ marker_gene_dict[label]["read_count"] = read_count
122
+ else:
123
+ marker_gene_dict[label]["read_count"] = 0
124
+
125
+ return marker_gene_dict
126
+
127
+
128
+ def main():
129
+
130
+ input_path, runs, prefix = parse_args()
131
+
132
+ root_path = pathlib.Path(input_path)
133
+
134
+ if not root_path.exists():
135
+ logging.error(f"Results path does not exist: {root_path}")
136
+ exit(1)
137
+
138
+ runs_df = pd.read_csv(runs, names=["run", "status"])
139
+
140
+ # Marker gene study summary
141
+ markergene_dict = defaultdict(dict)
142
+ for i in range(0, len(runs_df)):
143
+ run_acc = runs_df.loc[i, "run"]
144
+ markergene_dict[run_acc]["marker_genes"] = defaultdict(dict)
145
+ markergene_dict[run_acc]["marker_genes"] = add_markergene(
146
+ root_path, run_acc, markergene_dict[run_acc]["marker_genes"], "SSU"
147
+ )
148
+ markergene_dict[run_acc]["marker_genes"] = add_markergene(
149
+ root_path, run_acc, markergene_dict[run_acc]["marker_genes"], "LSU"
150
+ )
151
+ markergene_dict[run_acc]["marker_genes"] = add_markergene(
152
+ root_path, run_acc, markergene_dict[run_acc]["marker_genes"], "ITS"
153
+ )
154
+
155
+ total_read_counts = sum(
156
+ [
157
+ markergene["read_count"]
158
+ for markergene in markergene_dict[run_acc]["marker_genes"][
159
+ "SSU"
160
+ ].values()
161
+ ]
162
+ )
163
+ total_read_counts += sum(
164
+ [
165
+ markergene["read_count"]
166
+ for markergene in markergene_dict[run_acc]["marker_genes"][
167
+ "LSU"
168
+ ].values()
169
+ ]
170
+ )
171
+ total_read_counts += sum(
172
+ [
173
+ markergene["read_count"]
174
+ for markergene in markergene_dict[run_acc]["marker_genes"][
175
+ "ITS"
176
+ ].values()
177
+ ]
178
+ )
179
+
180
+ for markergene in markergene_dict[run_acc]["marker_genes"].keys():
181
+ read_count = 0
182
+ for domain in markergene_dict[run_acc]["marker_genes"][markergene].keys():
183
+ read_count += markergene_dict[run_acc]["marker_genes"][markergene][
184
+ domain
185
+ ]["read_count"]
186
+ proportion = read_count / float(total_read_counts)
187
+ markergene_dict[run_acc]["marker_genes"][markergene][domain][
188
+ "majority_marker"
189
+ ] = (proportion >= MAJORITY_MARKER_PROPORTION)
190
+
191
+ if markergene_dict:
192
+ with open(f"{prefix}_markergene_study_summary.json", "w") as fw:
193
+ fw.write(json.dumps(markergene_dict, indent=4))
194
+ else:
195
+ logging.warning(
196
+ "Marker gene data empty for some reason. No summary file created."
197
+ )
198
+
199
+ # Amplified region study summary (only available if ASV results present)
200
+
201
+ ampregion_dict = defaultdict(dict)
202
+ for i in range(0, len(runs_df)):
203
+ run_status = runs_df.loc[i, "status"]
204
+ if run_status == "no_asvs":
205
+ continue
206
+
207
+ run_acc = runs_df.loc[i, "run"]
208
+ ampregion_dict[run_acc]["amplified_regions"] = []
209
+
210
+ amp_regions = sorted(
211
+ list(pathlib.Path(f"{root_path}/{run_acc}/asv").glob("*S-V*/*.tsv"))
212
+ )
213
+
214
+ for amp_region_path in amp_regions:
215
+ amp_dict = defaultdict()
216
+ amp_region = str(amp_region_path).split("/")[-2]
217
+ marker_gene = amp_region.split("-")[0]
218
+ amp_region = "-".join(amp_region.split("-")[1:])
219
+
220
+ amp_region_df = pd.read_csv(amp_region_path, sep="\t")
221
+ asv_count = len(amp_region_df)
222
+ read_count = amp_region_df.loc[:, "count"].sum()
223
+
224
+ amp_dict["marker_gene"] = marker_gene
225
+ amp_dict["amplified_region"] = amp_region
226
+ amp_dict["asv_count"] = int(
227
+ asv_count
228
+ ) # casting needed for JSON serialising
229
+ amp_dict["read_count"] = int(
230
+ read_count
231
+ ) # casting needed for JSON serialising
232
+
233
+ ampregion_dict[run_acc]["amplified_regions"].append(amp_dict)
234
+
235
+ if ampregion_dict:
236
+ with open(f"{prefix}_ampregion_study_summary.json", "w") as fw:
237
+ fw.write(json.dumps(ampregion_dict, indent=4))
238
+ else:
239
+ logging.warning("No amplified region data found. No summary file created.")
240
+
241
+
242
+ if __name__ == "__main__":
243
+ main()
@@ -25,3 +25,10 @@ MAX_INTERNAL_PRIMER_PROPORTION = 0.2
25
25
 
26
26
  # used by library_strategy_checker in analysis.shared
27
27
  MIN_AMPLICON_STRATEGY_CHECK = 0.30
28
+
29
+ # used by markergene_study_summary in analysis.shared
30
+ MAJORITY_MARKER_PROPORTION = 0.45
31
+
32
+ # used by gff_toolkit in analysis.assembly
33
+ EVALUE_CUTOFF_IPS = 1e-10
34
+ EVALUE_CUTOFF_EGGNOG = 1e-10
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: mgnify_pipelines_toolkit
3
- Version: 0.2.0
3
+ Version: 0.2.2
4
4
  Summary: Collection of scripts and tools for MGnify pipelines
5
5
  Author-email: MGnify team <metagenomics-help@ebi.ac.uk>
6
6
  License: Apache Software License 2.0
@@ -18,6 +18,7 @@ Requires-Dist: regex==2023.12.25
18
18
  Requires-Dist: requests==2.32.3
19
19
  Requires-Dist: click==8.1.7
20
20
  Requires-Dist: pandera==0.22.1
21
+ Requires-Dist: pyfastx>=2.2.0
21
22
  Provides-Extra: tests
22
23
  Requires-Dist: pytest==7.4.0; extra == "tests"
23
24
  Requires-Dist: pytest-md==0.2.0; extra == "tests"
@@ -29,6 +30,7 @@ Requires-Dist: regex==2023.12.25; extra == "tests"
29
30
  Requires-Dist: requests==2.32.3; extra == "tests"
30
31
  Requires-Dist: click==8.1.7; extra == "tests"
31
32
  Requires-Dist: pandera==0.22.1; extra == "tests"
33
+ Requires-Dist: pyfastx>=2.2.0; extra == "tests"
32
34
  Provides-Extra: dev
33
35
  Requires-Dist: mgnify_pipelines_toolkit[tests]; extra == "dev"
34
36
  Requires-Dist: pre-commit==3.8.0; extra == "dev"
@@ -1,9 +1,9 @@
1
1
  mgnify_pipelines_toolkit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  mgnify_pipelines_toolkit/analysis/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- mgnify_pipelines_toolkit/analysis/amplicon/amplicon_utils.py,sha256=ySMZkgRSg-dnh6HMAE_1Vx8_EvJj7AiHJ2FcCaXKI-s,6448
4
- mgnify_pipelines_toolkit/analysis/amplicon/are_there_primers.py,sha256=P_BM3GTB1KKKDb5chDK7-6cP6KORJef7i8ub-XLDtM0,5289
3
+ mgnify_pipelines_toolkit/analysis/amplicon/amplicon_utils.py,sha256=9ScTh7uAIEBRDt61oG4inu9yezEzP1T2DgFWitaq4Po,6567
4
+ mgnify_pipelines_toolkit/analysis/amplicon/are_there_primers.py,sha256=Hfp5P89Sx6QE5oAxdNmuDwySG9FII3x4H5RFEXgnbF4,5311
5
5
  mgnify_pipelines_toolkit/analysis/amplicon/assess_inflection_point_mcp.py,sha256=hVkg8-tdLLf1Ewy9hor-H9zsyi-n8dnuj_shTQ5_rrM,7548
6
- mgnify_pipelines_toolkit/analysis/amplicon/assess_mcp_proportions.py,sha256=aNucaUnYejl2Not4YLMBSzyYWGYJvYwLPZcFE94TIDc,5355
6
+ mgnify_pipelines_toolkit/analysis/amplicon/assess_mcp_proportions.py,sha256=xdnois8ilj4wuyDSc8xfIclVpcqaygCliVjEgnFDdi0,5382
7
7
  mgnify_pipelines_toolkit/analysis/amplicon/classify_var_regions.py,sha256=kIuE2wo3FaFZw2-HRGxstKz29FyGuhqVDRhf_vPZgsA,19921
8
8
  mgnify_pipelines_toolkit/analysis/amplicon/find_mcp_inflection_points.py,sha256=EnsIrPGigsy8jVnjYgSECihhuquSJTgCi-k6fhusKYM,3547
9
9
  mgnify_pipelines_toolkit/analysis/amplicon/make_asv_count_table.py,sha256=ICFR8Ci_VofQFykasiSWwOwL_SH64PVcROoenw5jifE,8751
@@ -11,11 +11,14 @@ mgnify_pipelines_toolkit/analysis/amplicon/mapseq_to_asv_table.py,sha256=9QI6o85
11
11
  mgnify_pipelines_toolkit/analysis/amplicon/primer_val_classification.py,sha256=d_Mco92RRUXSq5-5oFlXC0ZO83kbxwOREoCCyA2glDc,3751
12
12
  mgnify_pipelines_toolkit/analysis/amplicon/remove_ambiguous_reads.py,sha256=8vwH6PY-XwMZhaUo08tOwdFsoREfNumvvDawTb9Y98U,3168
13
13
  mgnify_pipelines_toolkit/analysis/amplicon/rev_comp_se_primers.py,sha256=19NgCYE12bEvRBVibhZtZywwRiMdiBUBJjzL4by3_qo,1717
14
- mgnify_pipelines_toolkit/analysis/amplicon/standard_primer_matching.py,sha256=RDPsaWKf0wIDwvCHXyRCh2zSJf3y9E7uOhHjaAeX8bY,11099
15
- mgnify_pipelines_toolkit/analysis/assembly/add_rhea_chebi_annotation.py,sha256=69iK8vtG5xFgYQ-KJiSQlaxuhSoxzcO59eNLyDS3nm0,4323
14
+ mgnify_pipelines_toolkit/analysis/amplicon/standard_primer_matching.py,sha256=8xCjkCMtLuBWZ74AUu7tw0uXQRII3jD3n12PX-Xd9y4,11109
15
+ mgnify_pipelines_toolkit/analysis/assembly/add_rhea_chebi_annotation.py,sha256=8GRjqDVQLU6cutn-40wVuEz_PxlnjCz33YJ0PUpObIc,4253
16
16
  mgnify_pipelines_toolkit/analysis/assembly/antismash_gff_builder.py,sha256=OODl3XhLvksvG5RZn1iHZlg9L3DXiWIkyxJ6o-y6oeg,6949
17
17
  mgnify_pipelines_toolkit/analysis/assembly/cgc_merge.py,sha256=u6r_1GRGgBAJQvU_t5Rtl3ZYjTtGJGd5yHCobtL9ob0,15405
18
18
  mgnify_pipelines_toolkit/analysis/assembly/generate_gaf.py,sha256=U1Ls3O0CQmukmoyUwEAEN11jHUKuCdS-qVkr5ai243I,3582
19
+ mgnify_pipelines_toolkit/analysis/assembly/gff_annotation_utils.py,sha256=IlkeP4DuN7rXJIHa7o2sONHAXLhV9nGP-5Y1_0u8YQo,31393
20
+ mgnify_pipelines_toolkit/analysis/assembly/gff_file_utils.py,sha256=8kv_6KWznOVRkeAtghLf4pxKPhAqdn36LOK4MsTz9hU,3282
21
+ mgnify_pipelines_toolkit/analysis/assembly/gff_toolkit.py,sha256=uUIo97gmzO2zzN-pYF5paIzeHWBsmmjFp7zGAhf4PKY,5021
19
22
  mgnify_pipelines_toolkit/analysis/assembly/go_utils.py,sha256=vsYaFJ_cmbo6DXlWs_X8wpZJfMQOq1CrLX4-3owmYjI,5447
20
23
  mgnify_pipelines_toolkit/analysis/assembly/summarise_goslims.py,sha256=RthgLO3YTO_JGMC7Nx2JDrowXRimnOtVUDkM1l31rt4,5834
21
24
  mgnify_pipelines_toolkit/analysis/shared/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -24,20 +27,21 @@ mgnify_pipelines_toolkit/analysis/shared/get_subunits.py,sha256=xl5HduWtGPWiI9yq
24
27
  mgnify_pipelines_toolkit/analysis/shared/get_subunits_coords.py,sha256=DTX7S1P_BkGPEeDkbmUn1YoB247hpdNIe5rdFdRYDdA,1929
25
28
  mgnify_pipelines_toolkit/analysis/shared/library_strategy_check.py,sha256=XV1vjkjIHhzouM1k5hu_51XK_mgC_EOOGDN3mx4LOvc,1991
26
29
  mgnify_pipelines_toolkit/analysis/shared/mapseq2biom.py,sha256=exzWyuK0YxDiVSu4WX2H7g-uT5Y00w_EmrFqSHjRObU,5554
30
+ mgnify_pipelines_toolkit/analysis/shared/markergene_study_summary.py,sha256=sKAo_rKEyVAZXSaIFMkpSoYZxiWwXMA3XDA6Z-hbHgg,7904
27
31
  mgnify_pipelines_toolkit/analysis/shared/study_summary_generator.py,sha256=aWD-1B_fJg4rYZj2p8t8CUZdG1lDSo-oeFtLvjLgsak,13680
28
32
  mgnify_pipelines_toolkit/constants/db_labels.py,sha256=_2sGzTlfX7unGqkLylQFEUWNPQ8NZnQMtzlfVFuWtyU,853
29
33
  mgnify_pipelines_toolkit/constants/regex_ambiguous_bases.py,sha256=dCP3u_Qo-JMk3aqVapkqEbVUGE06jBQmUH6bB3bT8k0,1088
30
34
  mgnify_pipelines_toolkit/constants/regex_fasta_header.py,sha256=_2UTWfHKJyyFkIRQIPM2wDf-QkRTdLJ4xsA6gAkY9f4,1188
31
35
  mgnify_pipelines_toolkit/constants/tax_ranks.py,sha256=63dQlW7jAjLPOSCT670QCS5WhTp13vwaHqfmFYbKMyg,1076
32
- mgnify_pipelines_toolkit/constants/thresholds.py,sha256=zz8paGQfZAU8tT-RbSGpzZ1Aopf77yEs97BAblHH5fk,964
36
+ mgnify_pipelines_toolkit/constants/thresholds.py,sha256=7PuGhYPHBTJc-hwyOgnfmkfslWX-rQyLWeJVG_E6SGY,1152
33
37
  mgnify_pipelines_toolkit/constants/var_region_coordinates.py,sha256=jbOB_bTnW2TRjmdF7IS1A7nNOLt-lGnGyVXUHu0TmvQ,1307
34
38
  mgnify_pipelines_toolkit/schemas/schemas.py,sha256=fd2xCoA1Ty-XaMG9U_gxNcBokHiYENbA85n9YTsqbpU,7098
35
39
  mgnify_pipelines_toolkit/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
40
  mgnify_pipelines_toolkit/utils/fasta_to_delimited.py,sha256=GbNT7clHso21w_1PbPpWKVRd5bNs_MDbGXt8XVIGl2o,3991
37
41
  mgnify_pipelines_toolkit/utils/get_mpt_version.py,sha256=zsQ4TuR4vpqYa67MgIdopdscsS0DVJdy4enRe1nCjSs,793
38
- mgnify_pipelines_toolkit-0.2.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
39
- mgnify_pipelines_toolkit-0.2.0.dist-info/METADATA,sha256=TR0FyKtC0Xyj0zvDCPiYsI6bGbZI9GkQ8fiC1WWomEk,6068
40
- mgnify_pipelines_toolkit-0.2.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
41
- mgnify_pipelines_toolkit-0.2.0.dist-info/entry_points.txt,sha256=60Nov738JAon-uZXUqqjOGy4TXxgS4xtxqYhAi12HY0,2084
42
- mgnify_pipelines_toolkit-0.2.0.dist-info/top_level.txt,sha256=xA_wC7C01V3VwuDnqwRM2QYeJJ45WtvF6LVav4tYxuE,25
43
- mgnify_pipelines_toolkit-0.2.0.dist-info/RECORD,,
42
+ mgnify_pipelines_toolkit-0.2.2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
43
+ mgnify_pipelines_toolkit-0.2.2.dist-info/METADATA,sha256=7YtKMV_tE60N9c7H2ZXTmitIBGuULAjFn-IsyO-zg8M,6146
44
+ mgnify_pipelines_toolkit-0.2.2.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
45
+ mgnify_pipelines_toolkit-0.2.2.dist-info/entry_points.txt,sha256=ThsGYkuzeFRfs2NB1Z1EJ_EtfPDKrZ4lHL8AvXDRq1k,2181
46
+ mgnify_pipelines_toolkit-0.2.2.dist-info/top_level.txt,sha256=xA_wC7C01V3VwuDnqwRM2QYeJJ45WtvF6LVav4tYxuE,25
47
+ mgnify_pipelines_toolkit-0.2.2.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.8.0)
2
+ Generator: setuptools (75.8.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -16,9 +16,10 @@ library_strategy_check = mgnify_pipelines_toolkit.analysis.shared.library_strate
16
16
  make_asv_count_table = mgnify_pipelines_toolkit.analysis.amplicon.make_asv_count_table:main
17
17
  mapseq2biom = mgnify_pipelines_toolkit.analysis.shared.mapseq2biom:main
18
18
  mapseq_to_asv_table = mgnify_pipelines_toolkit.analysis.amplicon.mapseq_to_asv_table:main
19
+ markergene_study_summary = mgnify_pipelines_toolkit.analysis.shared.markergene_study_summary:main
19
20
  primer_val_classification = mgnify_pipelines_toolkit.analysis.amplicon.primer_val_classification:main
20
21
  remove_ambiguous_reads = mgnify_pipelines_toolkit.analysis.amplicon.remove_ambiguous_reads:main
21
22
  rev_comp_se_primers = mgnify_pipelines_toolkit.analysis.amplicon.rev_comp_se_primers:main
22
23
  standard_primer_matching = mgnify_pipelines_toolkit.analysis.amplicon.standard_primer_matching:main
23
- study_summary_generator = mgnify_pipelines_toolkit.analysis.shared.study_summary_generator:main
24
+ study_summary_generator = mgnify_pipelines_toolkit.analysis.shared.study_summary_generator:cli
24
25
  summarise_goslims = mgnify_pipelines_toolkit.analysis.assembly.summarise_goslims:main