mgnify-pipelines-toolkit 1.0.9__tar.gz → 1.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mgnify-pipelines-toolkit might be problematic. Click here for more details.

Files changed (63) hide show
  1. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/PKG-INFO +2 -2
  2. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/amplicon/classify_var_regions.py +1 -1
  3. {mgnify_pipelines_toolkit-1.0.9/mgnify_pipelines_toolkit/analysis/shared → mgnify_pipelines_toolkit-1.1.1/mgnify_pipelines_toolkit/analysis/amplicon}/study_summary_generator.py +2 -2
  4. mgnify_pipelines_toolkit-1.1.1/mgnify_pipelines_toolkit/analysis/assembly/study_summary_generator.py +605 -0
  5. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/assembly/summarise_antismash_bgcs.py +5 -9
  6. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/assembly/summarise_sanntis_bgcs.py +18 -16
  7. mgnify_pipelines_toolkit-1.1.1/mgnify_pipelines_toolkit/schemas/schemas.py +588 -0
  8. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit.egg-info/PKG-INFO +2 -2
  9. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit.egg-info/SOURCES.txt +2 -1
  10. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit.egg-info/entry_points.txt +2 -1
  11. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/pyproject.toml +14 -13
  12. mgnify_pipelines_toolkit-1.0.9/mgnify_pipelines_toolkit/schemas/schemas.py +0 -235
  13. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/LICENSE +0 -0
  14. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/README.md +0 -0
  15. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/__init__.py +0 -0
  16. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/__init__.py +0 -0
  17. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/amplicon/amplicon_utils.py +0 -0
  18. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/amplicon/are_there_primers.py +0 -0
  19. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/amplicon/assess_inflection_point_mcp.py +0 -0
  20. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/amplicon/assess_mcp_proportions.py +0 -0
  21. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/amplicon/find_mcp_inflection_points.py +0 -0
  22. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/amplicon/make_asv_count_table.py +0 -0
  23. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/amplicon/mapseq_to_asv_table.py +0 -0
  24. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/amplicon/primer_val_classification.py +0 -0
  25. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/amplicon/remove_ambiguous_reads.py +0 -0
  26. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/amplicon/rev_comp_se_primers.py +0 -0
  27. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/amplicon/standard_primer_matching.py +0 -0
  28. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/assembly/add_rhea_chebi_annotation.py +0 -0
  29. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/assembly/antismash_gff_builder.py +0 -0
  30. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/assembly/combined_gene_caller_merge.py +0 -0
  31. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/assembly/generate_gaf.py +0 -0
  32. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/assembly/gff_annotation_utils.py +0 -0
  33. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/assembly/gff_file_utils.py +0 -0
  34. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/assembly/gff_toolkit.py +0 -0
  35. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/assembly/go_utils.py +0 -0
  36. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/assembly/krona_txt_from_cat_classification.py +0 -0
  37. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/assembly/process_dbcan_result_cazys.py +0 -0
  38. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/assembly/process_dbcan_result_clusters.py +0 -0
  39. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/assembly/summarise_goslims.py +0 -0
  40. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/genomes/__init__.py +0 -0
  41. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/shared/__init__.py +0 -0
  42. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/shared/convert_cmscan_to_cmsearch_tblout.py +0 -0
  43. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/shared/dwc_summary_generator.py +0 -0
  44. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/shared/fastq_suffix_header_check.py +0 -0
  45. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/shared/get_subunits.py +0 -0
  46. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/shared/get_subunits_coords.py +0 -0
  47. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/shared/library_strategy_check.py +0 -0
  48. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/shared/mapseq2biom.py +0 -0
  49. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/analysis/shared/markergene_study_summary.py +0 -0
  50. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/constants/db_labels.py +0 -0
  51. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/constants/ncrna.py +0 -0
  52. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/constants/regex_ambiguous_bases.py +0 -0
  53. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/constants/regex_fasta_header.py +0 -0
  54. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/constants/tax_ranks.py +0 -0
  55. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/constants/thresholds.py +0 -0
  56. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/constants/var_region_coordinates.py +0 -0
  57. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/utils/__init__.py +0 -0
  58. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/utils/fasta_to_delimited.py +0 -0
  59. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit/utils/get_mpt_version.py +0 -0
  60. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit.egg-info/dependency_links.txt +0 -0
  61. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit.egg-info/requires.txt +0 -0
  62. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/mgnify_pipelines_toolkit.egg-info/top_level.txt +0 -0
  63. {mgnify_pipelines_toolkit-1.0.9 → mgnify_pipelines_toolkit-1.1.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mgnify_pipelines_toolkit
3
- Version: 1.0.9
3
+ Version: 1.1.1
4
4
  Summary: Collection of scripts and tools for MGnify pipelines
5
5
  Author-email: MGnify team <metagenomics-help@ebi.ac.uk>
6
6
  License: Apache Software License 2.0
@@ -8,7 +8,7 @@ Keywords: bioinformatics,pipelines,metagenomics
8
8
  Classifier: Programming Language :: Python :: 3
9
9
  Classifier: License :: OSI Approved :: Apache Software License
10
10
  Classifier: Operating System :: OS Independent
11
- Requires-Python: >=3.9
11
+ Requires-Python: >=3.10
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
14
  Requires-Dist: biopython>=1.85
@@ -427,7 +427,7 @@ def retrieve_regions(
427
427
  new_value.append(region)
428
428
  if not new_value:
429
429
  models_to_remove.append(model)
430
- if len(new_value) < MIN_SEQ_COUNT:
430
+ elif len(new_value) < MIN_SEQ_COUNT:
431
431
  models_to_remove.append(model)
432
432
  multiregion_matches[model] = new_value
433
433
 
@@ -257,7 +257,7 @@ def organise_study_summaries(all_study_summaries: List[str]) -> defaultdict[List
257
257
  @cli.command(
258
258
  "summarise",
259
259
  options_metavar="-r <runs> -a <analyses_dir> -p <output_prefix>",
260
- short_help="Generate study-level analysis summaries.",
260
+ short_help="Generate study-level summaries of amplicon analysis results.",
261
261
  )
262
262
  @click.option(
263
263
  "-r",
@@ -327,7 +327,7 @@ def summarise_analyses(
327
327
  @cli.command(
328
328
  "merge",
329
329
  options_metavar="-a <analyses_dir> -p <output_prefix>",
330
- short_help="Merge multiple study-level analysis summaries.",
330
+ short_help="Merge multiple study-level summaries of amplicon analysis.",
331
331
  )
332
332
  @click.option(
333
333
  "-a",
@@ -0,0 +1,605 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ # Copyright 2025 EMBL - European Bioinformatics Institute
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import click
18
+ from functools import reduce
19
+ import glob
20
+ import logging
21
+ from pathlib import Path
22
+ from typing import Literal
23
+
24
+ import pandas as pd
25
+
26
+ from mgnify_pipelines_toolkit.schemas.schemas import (
27
+ CompletedAnalysisSchema,
28
+ TaxonSchema,
29
+ GOSummarySchema,
30
+ InterProSummarySchema,
31
+ KOSummarySchema,
32
+ SanntisSummarySchema,
33
+ AntismashSummarySchema,
34
+ PFAMSummarySchema,
35
+ KEGGModulesSummarySchema,
36
+ GOStudySummarySchema,
37
+ InterProStudySummarySchema,
38
+ TaxonomyStudySummarySchema,
39
+ KOStudySummarySchema,
40
+ SanntisStudySummarySchema,
41
+ AntismashStudySummarySchema,
42
+ PFAMStudySummarySchema,
43
+ KEGGModulesStudySummarySchema,
44
+ validate_dataframe,
45
+ )
46
+
47
+ logging.basicConfig(
48
+ level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s"
49
+ )
50
+
51
+ # Keys are the original column names in the input files,
52
+ # values are the standardised column names used in the generated study summary files
53
+ # Note: "Count" or "count" column should be excluded
54
+ GO_COLUMN_NAMES = {
55
+ "go": "GO",
56
+ "term": "description",
57
+ "category": "category",
58
+ }
59
+
60
+ INTERPRO_COLUMN_NAMES = {
61
+ "interpro_accession": "IPR",
62
+ "description": "description",
63
+ }
64
+
65
+ SANNTIS_COLUMN_NAMES = {
66
+ "nearest_mibig": "nearest_mibig",
67
+ "nearest_mibig_class": "nearest_mibig_class",
68
+ "description": "description",
69
+ }
70
+
71
+ ANTISMASH_COLUMN_NAMES = {
72
+ "label": "label",
73
+ "description": "description",
74
+ }
75
+
76
+ KEGG_COLUMN_NAMES = {
77
+ "ko": "KO",
78
+ "description": "description",
79
+ }
80
+
81
+ PFAM_COLUMN_NAMES = {
82
+ "pfam": "PFAM",
83
+ "description": "description",
84
+ }
85
+
86
+ KEGG_MODULES_COLUMN_NAMES = {
87
+ "module_accession": "module_accession",
88
+ "pathway_name": "pathway_name",
89
+ "pathway_class": "pathway_class",
90
+ }
91
+
92
+ # this mapping allows using 'for' cycle later to process all summary types in one way
93
+ SUMMARY_TYPES_MAP = {
94
+ "go": {
95
+ "folder": "functional-annotation/go",
96
+ "column_names": GO_COLUMN_NAMES,
97
+ "schema": GOSummarySchema,
98
+ "study_schema": GOStudySummarySchema,
99
+ },
100
+ "goslim": {
101
+ "folder": "functional-annotation/go",
102
+ "column_names": GO_COLUMN_NAMES,
103
+ "schema": GOSummarySchema,
104
+ "study_schema": GOStudySummarySchema,
105
+ },
106
+ "interpro": {
107
+ "folder": "functional-annotation/interpro",
108
+ "column_names": INTERPRO_COLUMN_NAMES,
109
+ "schema": InterProSummarySchema,
110
+ "study_schema": InterProStudySummarySchema,
111
+ },
112
+ "ko": {
113
+ "folder": "functional-annotation/kegg",
114
+ "column_names": KEGG_COLUMN_NAMES,
115
+ "schema": KOSummarySchema,
116
+ "study_schema": KOStudySummarySchema,
117
+ },
118
+ "sanntis": {
119
+ "folder": "pathways-and-systems/sanntis",
120
+ "column_names": SANNTIS_COLUMN_NAMES,
121
+ "schema": SanntisSummarySchema,
122
+ "study_schema": SanntisStudySummarySchema,
123
+ },
124
+ "antismash": {
125
+ "folder": "pathways-and-systems/antismash",
126
+ "column_names": ANTISMASH_COLUMN_NAMES,
127
+ "schema": AntismashSummarySchema,
128
+ "study_schema": AntismashStudySummarySchema,
129
+ },
130
+ "pfam": {
131
+ "folder": "functional-annotation/pfam",
132
+ "column_names": PFAM_COLUMN_NAMES,
133
+ "schema": PFAMSummarySchema,
134
+ "study_schema": PFAMStudySummarySchema,
135
+ },
136
+ "kegg_modules": {
137
+ "folder": "pathways-and-systems/kegg-modules",
138
+ "column_names": KEGG_MODULES_COLUMN_NAMES,
139
+ "schema": KEGGModulesSummarySchema,
140
+ "study_schema": KEGGModulesStudySummarySchema,
141
+ },
142
+ }
143
+
144
+ # The taxonomy file is a tab-separated file without any header
145
+ # containing of following columns:
146
+ TAXONOMY_COLUMN_NAMES = [
147
+ "Count",
148
+ "Superkingdom",
149
+ "Kingdom",
150
+ "Phylum",
151
+ "Class",
152
+ "Order",
153
+ "Family",
154
+ "Genus",
155
+ "Species",
156
+ ]
157
+
158
+ OUTPUT_SUFFIX = "summary.tsv"
159
+
160
+
161
+ @click.group()
162
+ def cli():
163
+ pass
164
+
165
+
166
+ def check_files_exist(file_list: list[Path]) -> None:
167
+ """
168
+ Check that all files in the given list exist on disk.
169
+
170
+ :param file_list: List of file paths to check.
171
+ :raises FileNotFoundError: If any file does not exist.
172
+ """
173
+ missing_files = [str(path) for path in file_list if not path.is_file()]
174
+ if missing_files:
175
+ raise FileNotFoundError(
176
+ f"The following required files are missing: {', '.join(missing_files)}"
177
+ )
178
+
179
+
180
+ def generate_taxonomy_summary(
181
+ file_dict: dict[str, Path],
182
+ output_file_name: str,
183
+ outdir: Path = None,
184
+ ) -> None:
185
+ """
186
+ Generate a combined study-level taxonomic classification summary from multiple input
187
+ assembly-level summary files.
188
+
189
+ :param file_dict: Dictionary mapping assembly accession to its taxonomy file.
190
+ :param output_file_name: Output path for the output summary file.
191
+ :param outdir: Optional output directory for the results.
192
+
193
+ Example of the taxonomy file:
194
+ 23651 sk__Bacteria
195
+ 4985 sk__Archaea k__Thermoproteati p__Nitrososphaerota
196
+ 882 sk__Archaea k__Nanobdellati p__ c__ o__ f__ g__ s__Candidatus Pacearchaeota archaeon
197
+ """
198
+ check_files_exist(list(file_dict.values()))
199
+
200
+ tax_dfs = []
201
+ for assembly_acc, path in file_dict.items():
202
+ df = pd.read_csv(path, sep="\t", names=TAXONOMY_COLUMN_NAMES).fillna("")
203
+
204
+ # Note: schema validation will fail if the taxonomy file is empty
205
+ df = validate_dataframe(df, TaxonSchema, str(path))
206
+
207
+ # Combine all taxonomic ranks in the classification into a single string
208
+ df["full_taxon"] = (
209
+ df[TAXONOMY_COLUMN_NAMES[1:]].agg(";".join, axis=1).str.strip(";")
210
+ )
211
+
212
+ # Create a new DataFrame with taxonomy as index and count as the only column
213
+ result = df[["Count", "full_taxon"]].set_index("full_taxon")
214
+ result.columns = [assembly_acc]
215
+ tax_dfs.append(result)
216
+
217
+ summary_df = pd.concat(tax_dfs, axis=1)
218
+ summary_df = summary_df.fillna(0).astype(int).sort_index()
219
+
220
+ outfile = output_file_name
221
+ if outdir:
222
+ outfile = outdir / output_file_name
223
+
224
+ summary_df.to_csv(outfile, sep="\t", index_label="taxonomy")
225
+
226
+
227
+ def generate_functional_summary(
228
+ file_dict: dict[str, Path],
229
+ column_names: dict[str, str],
230
+ output_prefix: str,
231
+ label: Literal[
232
+ "go", "goslim", "interpro", "ko", "sanntis", "antismash", "pfam", "kegg_modules"
233
+ ],
234
+ outdir: Path = None,
235
+ ) -> None:
236
+ """
237
+ Generate a combined study-level functional annotation summary from multiple input
238
+ assembly-level summary files.
239
+
240
+ :param file_dict: Dictionary mapping assembly accession to its summary file path.
241
+ :param column_names: Dictionary mapping original column names to standard column names.
242
+ :param output_prefix: Prefix for the output summary file.
243
+ :param label: Label for the functional annotation type
244
+ (expected one of ["go", "goslim", "interpro", "ko", "sanntis", "antismash", "pfam", "kegg_modules"]).
245
+ :param outdir: Optional output directory for the results.
246
+
247
+ In the input files, column orders may vary, but the following columns are expected:
248
+ GO summary input file:
249
+ go term category count
250
+ GO:0016020 membrane cellular_component 30626
251
+ GO:0005524 ATP binding molecular_function 30524
252
+
253
+ InterPro summary input file:
254
+ interpro_accession description count
255
+ IPR036291 NAD(P)-binding domain superfamily 16503
256
+ IPR019734 Tetratricopeptide repeat 14694
257
+
258
+ KEGG summary input file:
259
+ ko description count
260
+ K01552 energy-coupling factor transport system ATP-binding protein [EC:7.-.-.-] 562
261
+ K18889 ATP-binding cassette, subfamily B, multidrug efflux pump 537
262
+ K15497 molybdate/tungstate transport system ATP-binding protein [EC:7.3.2.5 7.3.2.6] 517
263
+
264
+ Sanntis summary input file:
265
+ nearest_mibig nearest_mibig_class description count
266
+ BGC0000787 Saccharide Carbohydrate-based natural products (e.g., aminoglycoside antibiotics) 1
267
+ BGC0000248 Polyketide Built from iterative condensation of acetate units derived from acetyl-CoA 3
268
+ BGC0001327 NRP Polyketide Nonribosomal Peptide Polyketide 2
269
+
270
+ Antismash summary input file:
271
+ label description count
272
+ terpene Terpene 16
273
+ betalactone Beta-lactone containing protease inhibitor 8
274
+ T1PKS Type I PKS (Polyketide synthase) 3
275
+
276
+ PFAM summary input file:
277
+ pfam description count
278
+ PF00265 Thymidine kinase 457
279
+ PF01852 START domain 368
280
+ PF13756 Stimulus-sensing domain 397
281
+
282
+ KEGG modules summary input file:
283
+ module_accession completeness pathway_name pathway_class matching_ko missing_ko
284
+ M00986 100.0 Sulfur reduction, sulfur => sulfide Pathway modules; Energy metabolism; Sulfur metabolism K18367
285
+ M00163 83.33 Photosystem I Pathway modules; Energy metabolism; Photosynthesis K02689,K02690,K02691,K02692,K02694 K02693
286
+ M00615 50.0 Nitrate assimilation Signature modules; Module set; Metabolic capacity K02575 M00531
287
+ """
288
+ check_files_exist(list(file_dict.values()))
289
+
290
+ output_file_name = f"{output_prefix}_{label}_{OUTPUT_SUFFIX}"
291
+
292
+ original_col_names = list(column_names.keys())
293
+ renamed_col_names = list(column_names.values())
294
+ value_col_name = "completeness" if label == "kegg_modules" else "count"
295
+
296
+ dfs = []
297
+ for assembly_acc, filepath in file_dict.items():
298
+ try:
299
+ df = pd.read_csv(filepath, sep="\t")
300
+ except pd.errors.EmptyDataError:
301
+ logging.warning(f"File {filepath.resolve()} is empty. Skipping.")
302
+ continue
303
+
304
+ schema = SUMMARY_TYPES_MAP[label]["schema"]
305
+ df = validate_dataframe(df, schema, str(filepath))
306
+
307
+ # Extract only relevant columns
308
+ df = df[original_col_names + [value_col_name]].copy()
309
+
310
+ # Rename columns: metadata columns are renamed according to column_names dict, "count"/"completeness" -> assembly acc
311
+ df.rename(columns={**column_names, value_col_name: assembly_acc}, inplace=True)
312
+ dfs.append(df)
313
+
314
+ if not dfs:
315
+ logging.warning(
316
+ f"No valid files with functional annotation summary were found. Skipping creation of {output_file_name}."
317
+ )
318
+ return
319
+
320
+ # Merge all dataframes on the renamed metadata columns
321
+ merged_df = reduce(
322
+ lambda left, right: pd.merge(left, right, on=renamed_col_names, how="outer"),
323
+ dfs,
324
+ )
325
+
326
+ # Fill missing values appropriately, convert completeness percentages to float, counts to integers
327
+ value_columns = [col for col in merged_df.columns if col not in renamed_col_names]
328
+ fill_value = 0.0 if label == "kegg_modules" else 0
329
+ dtype = float if label == "kegg_modules" else int
330
+ merged_df[value_columns] = merged_df[value_columns].fillna(fill_value).astype(dtype)
331
+
332
+ # Reorder columns: merge keys first, then sorted assembly accessions
333
+ merged_df = merged_df[renamed_col_names + sorted(value_columns)]
334
+
335
+ outfile = output_file_name
336
+ if outdir:
337
+ outfile = outdir / output_file_name
338
+
339
+ merged_df.to_csv(outfile, sep="\t", index=False)
340
+
341
+
342
+ @cli.command(
343
+ "summarise",
344
+ options_metavar="-a <assemblies> -s <study_dir> -p <output_prefix>",
345
+ short_help="Generate study-level summaries for assembly analysis results.",
346
+ )
347
+ @click.option(
348
+ "-a",
349
+ "--assemblies",
350
+ required=True,
351
+ help="CSV file containing successful analyses generated by the pipeline",
352
+ type=click.Path(exists=True, path_type=Path, dir_okay=False),
353
+ )
354
+ @click.option(
355
+ "-s",
356
+ "--study_dir",
357
+ required=True,
358
+ help="Input directory to where all the individual analyses subdirectories for summarising",
359
+ type=click.Path(exists=True, path_type=Path, file_okay=False),
360
+ )
361
+ @click.option(
362
+ "-p",
363
+ "--output_prefix",
364
+ required=True,
365
+ help="Prefix for generated summary files",
366
+ type=str,
367
+ )
368
+ @click.option(
369
+ "-o",
370
+ "--outdir",
371
+ required=False,
372
+ help="Directory for the output files, by default it will use the current working directory.",
373
+ type=click.Path(exists=True, path_type=Path, file_okay=False),
374
+ )
375
+ def summarise_analyses(
376
+ assemblies: Path, study_dir: Path, output_prefix: str, outdir: Path
377
+ ) -> None:
378
+ """
379
+ Generate study-level summaries for successfully proccessed assemblies.
380
+
381
+ :param assemblies: Path to a file listing completed assembly accessions and their status.
382
+ :param study_dir: Path to the directory containing analysis results for each assembly.
383
+ :param output_prefix: Prefix for the generated summary files.
384
+ """
385
+ logging.info(f"Reading assembly list from {assemblies.resolve()}")
386
+ assemblies_df = pd.read_csv(assemblies, names=["assembly", "status"])
387
+ CompletedAnalysisSchema(assemblies_df)
388
+ assembly_list = assemblies_df["assembly"].tolist()
389
+ logging.info("Assembly list was read successfully.")
390
+
391
+ def get_file_paths(subdir: str, filename_template: str) -> dict[str, Path]:
392
+ """
393
+ Construct file paths for each assembly given a subdirectory and filename template.
394
+ Template must contain {acc} as a placeholder.
395
+ """
396
+ return {
397
+ acc: study_dir / acc / subdir / filename_template.format(acc=acc)
398
+ for acc in assembly_list
399
+ }
400
+
401
+ logging.info("Start processing of assembly-level summaries.")
402
+
403
+ logging.info(
404
+ "Generating taxonomy summary from assembly-level summaries <accession>.krona.txt"
405
+ )
406
+ generate_taxonomy_summary(
407
+ get_file_paths("taxonomy", "{acc}.krona.txt.gz"),
408
+ f"{output_prefix}_taxonomy_{OUTPUT_SUFFIX}",
409
+ outdir=outdir,
410
+ )
411
+
412
+ for summary_type, config in SUMMARY_TYPES_MAP.items():
413
+ logging.info(
414
+ f"Generating study-level {summary_type.capitalize()} summary from file <accession>_{summary_type}_summary.tsv.gz"
415
+ )
416
+ generate_functional_summary(
417
+ get_file_paths(config["folder"], f"{{acc}}_{summary_type}_summary.tsv.gz"),
418
+ config["column_names"],
419
+ output_prefix,
420
+ summary_type,
421
+ outdir=outdir,
422
+ )
423
+ logging.info("Assembly-level summaries were generated successfully.")
424
+ logging.info("Done.")
425
+
426
+
427
+ @cli.command(
428
+ "merge",
429
+ options_metavar="-a <study_dir> -p <output_prefix>",
430
+ short_help="Merge multiple study-level summaries of assembly analysis.",
431
+ )
432
+ @click.option(
433
+ "-s",
434
+ "--study_dir",
435
+ required=True,
436
+ help="Input directory to where all the individual analyses subdirectories for merging",
437
+ type=click.Path(exists=True, file_okay=False),
438
+ )
439
+ @click.option(
440
+ "-p",
441
+ "--output_prefix",
442
+ required=True,
443
+ help="Prefix for generated merged summary files",
444
+ type=str,
445
+ )
446
+ def merge_summaries(study_dir: str, output_prefix: str) -> None:
447
+ """
448
+ Merge multiple study-level summary files into combined summary files.
449
+
450
+ :param study_dir: Path to the directory containing study-level summary files.
451
+ :param output_prefix: Prefix for the output merged summary files.
452
+ """
453
+
454
+ def get_file_paths(summary_type: str) -> list[str]:
455
+ return glob.glob(f"{study_dir}/*_{summary_type}_{OUTPUT_SUFFIX}")
456
+
457
+ logging.info("Generating combined assembly-level summaries")
458
+ logging.info("Parsing summary files for taxonomic classification")
459
+ merge_taxonomy_summaries(
460
+ get_file_paths("taxonomy"), f"{output_prefix}_taxonomy_{OUTPUT_SUFFIX}"
461
+ )
462
+
463
+ for summary_type, config in SUMMARY_TYPES_MAP.items():
464
+ logging.info(f"Parsing summary files for {summary_type.capitalize()}.")
465
+ column_names = config["column_names"]
466
+ merge_functional_summaries(
467
+ get_file_paths(summary_type),
468
+ list(column_names.values()),
469
+ output_prefix,
470
+ summary_type,
471
+ )
472
+ logging.info("Merged assembly-level summaries were generated successfully.")
473
+ logging.info("Done.")
474
+
475
+
476
+ def merge_taxonomy_summaries(summary_files: list[str], output_file_name: str) -> None:
477
+ """
478
+ Merge multiple taxonomy study-level summary files into a single study-level summary.
479
+
480
+ :param summary_files: List of paths to taxonomy summary files, each containing
481
+ taxonomic classifications and counts for an individual analysis.
482
+ :param output_file_name: Output path for the merged taxonomy summary.
483
+
484
+ Example of input taxonomy summary file:
485
+ taxonomy ERZ1049444 ERZ1049446
486
+ sk__Eukaryota;k__Metazoa;p__Chordata 2 10
487
+ sk__Eukaryota;k__Metazoa;p__Chordata;c__Mammalia;o__Primates 118 94
488
+ """
489
+ if not summary_files:
490
+ raise FileNotFoundError(
491
+ "The required taxonomic classification summary files are missing. Exiting."
492
+ )
493
+
494
+ summary_dfs = []
495
+ for file in summary_files:
496
+ df = pd.read_csv(file, sep="\t", index_col=0)
497
+ df = validate_dataframe(df, TaxonomyStudySummarySchema, file)
498
+ summary_dfs.append(df)
499
+ merged_df = pd.concat(summary_dfs, axis=1)
500
+ merged_df = merged_df.fillna(0).astype(int)
501
+
502
+ # Reorder columns: taxonomy first, then sorted assembly accessions
503
+ merged_df = merged_df[sorted(merged_df.columns)]
504
+ merged_df = merged_df.sort_index()
505
+
506
+ merged_df.to_csv(
507
+ output_file_name,
508
+ sep="\t",
509
+ index_label="taxonomy",
510
+ )
511
+
512
+
513
+ def merge_functional_summaries(
514
+ summary_files: list[str],
515
+ merge_keys: list[str],
516
+ output_prefix: str,
517
+ label: Literal[
518
+ "go", "goslim", "interpro", "ko", "sanntis", "antismash", "pfam", "kegg_modules"
519
+ ],
520
+ ) -> None:
521
+ """
522
+ Merge multiple functional study-level summary files into a single study-level summary.
523
+
524
+ :param summary_files: List of paths to functional summary files, each containing
525
+ annotation terms and counts for an individual analysis.
526
+ :param merge_keys: List of column names to merge on (e.g. term ID, description).
527
+ :param output_prefix: Prefix for the generated output file.
528
+ :param label: Label describing the functional annotation type
529
+ (expected one of ["go", "goslim", "interpro", "ko", "sanntis", "antismash", "pfam", "kegg_modules"]).
530
+
531
+ In the input files, column orders may vary, but the following columns are expected:
532
+ GO summary input:
533
+ GO description category ERZ1049444 ERZ1049446
534
+ GO:0016020 membrane cellular_component 30626 673
535
+ GO:0005524 ATP binding molecular_function 30524 2873
536
+
537
+ Example of InterPro summary input:
538
+ IPR description ERZ1049444 ERZ1049446
539
+ IPR036291 NAD(P)-binding domain superfamily 16503 13450
540
+ IPR019734 Tetratricopeptide repeat 14694 11021
541
+
542
+ KEGG summary input:
543
+ GO description category ERZ1049440 ERZ1049443
544
+ GO:0003677 DNA binding molecular_function 6125 16417
545
+ GO:0055085 transmembrane transport biological_process 144 13926
546
+
547
+ Sanntis summary input:
548
+ nearest_mibig nearest_mibig_class description ERZ1049440 ERZ1049443
549
+ BGC0001356 RiPP Ribosomally synthesised and Post-translationally modified Peptide 230 185
550
+ BGC0001432 NRP Polyketide Nonribosomal Peptide Polyketide 0 8
551
+
552
+ Antismash summary input:
553
+ label description ERZ1049440 ERZ1049443
554
+ NRPS Non-ribosomal peptide synthetase 368 0
555
+ arylpolyene Aryl polyene 149 447
556
+
557
+ PFAM summary input:
558
+ PFAM description ERZ1049440 ERZ1049443
559
+ PF24718 HTH-like domain 468 1
560
+ PF06039 Malate:quinone oxidoreductase (Mqo) 490 21
561
+
562
+ KEGG modules summary input:
563
+ module_accession pathway_name pathway_class ERZ1049440 ERZ1049443
564
+ M00109 C21-Steroid hormone biosynthesis, progesterone => cortisol/cortisone Pathway modules; Lipid metabolism; Sterol biosynthesis 38.9 0.0
565
+ M00153 Cytochrome bd ubiquinol oxidase Pathway modules; Energy metabolism; ATP synthesis 44.7 84.4
566
+ """
567
+ output_file_name = f"{output_prefix}_{label}_{OUTPUT_SUFFIX}"
568
+
569
+ if not summary_files:
570
+ logging.warning(
571
+ f"Skipping creation of {output_file_name} because no summaries were found for this type of functional annotation."
572
+ )
573
+ return
574
+
575
+ validation_schema = SUMMARY_TYPES_MAP[label]["study_schema"]
576
+
577
+ dfs = []
578
+ for filepath in summary_files:
579
+ df = pd.read_csv(filepath, sep="\t")
580
+ df = validate_dataframe(df, validation_schema, filepath)
581
+ dfs.append(df)
582
+
583
+ if len(dfs) == 1:
584
+ merged_df = dfs[0]
585
+ else:
586
+ merged_df = reduce(
587
+ lambda left, right: pd.merge(left, right, on=merge_keys, how="outer"), dfs
588
+ )
589
+
590
+ # Identify non-key columns (i.e. counts)
591
+ value_columns = [col for col in merged_df.columns if col not in merge_keys]
592
+
593
+ # Fill NaNs and set dtype accordingly
594
+ fill_value = 0.0 if label == "kegg_modules" else 0
595
+ dtype = float if label == "kegg_modules" else int
596
+ merged_df[value_columns] = merged_df[value_columns].fillna(fill_value).astype(dtype)
597
+
598
+ # Reorder columns
599
+ merged_df = merged_df[merge_keys + sorted(value_columns)]
600
+
601
+ merged_df.to_csv(output_file_name, sep="\t", index=False)
602
+
603
+
604
+ if __name__ == "__main__":
605
+ cli()
@@ -155,7 +155,7 @@ def parse_args():
155
155
  description = (
156
156
  "antiSMASH output summary generator. "
157
157
  "Script takes regions from GFF and counts its appearance in annotation. "
158
- "Output columns contain classID, descriptions and count. "
158
+ "Output columns contain label, descriptions and count. "
159
159
  f"Descriptions were taken from pre-parsed glossary provided on antiSMASH website. "
160
160
  f"Current script supports antiSMASH results for version {ANTISMASH_VERSION} and older."
161
161
  )
@@ -202,15 +202,15 @@ def main():
202
202
  df = pd.DataFrame(dict_list)
203
203
  df = df[df["product"].notna()]
204
204
  df_grouped = (
205
- df.groupby(["product"]).size().reset_index(name="Count")
206
- ).sort_values(by="Count", ascending=False)
205
+ df.groupby(["product"]).size().reset_index(name="count")
206
+ ).sort_values(by="count", ascending=False)
207
207
 
208
208
  df_grouped = df_grouped.rename(
209
209
  columns={
210
210
  "product": "label",
211
211
  }
212
212
  )
213
- df_grouped["Description"] = df_grouped["label"].apply(
213
+ df_grouped["description"] = df_grouped["label"].apply(
214
214
  lambda x: ",".join(
215
215
  [
216
216
  DESCRIPTIONS.get(cls.strip().lower(), cls.strip())
@@ -218,11 +218,7 @@ def main():
218
218
  ]
219
219
  )
220
220
  )
221
- df_grouped = df_grouped[["label", "Description", "Count"]]
222
- df_grouped = df_grouped.rename(columns={
223
- "Description": "description",
224
- "Count": "count"
225
- })
221
+ df_grouped = df_grouped[["label", "description", "count"]]
226
222
  df_grouped.to_csv(output_filename, sep="\t", index=False)
227
223
 
228
224