mgnify-pipelines-toolkit 1.0.2__tar.gz → 1.0.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mgnify-pipelines-toolkit might be problematic. Click here for more details.

Files changed (58) hide show
  1. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/PKG-INFO +19 -27
  2. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/README.md +2 -1
  3. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/assembly/add_rhea_chebi_annotation.py +5 -1
  4. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/assembly/go_utils.py +44 -45
  5. mgnify_pipelines_toolkit-1.0.4/mgnify_pipelines_toolkit/analysis/assembly/krona_txt_from_cat_classification.py +131 -0
  6. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/assembly/summarise_goslims.py +56 -49
  7. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/shared/get_subunits.py +1 -1
  8. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/constants/ncrna.py +22 -0
  9. mgnify_pipelines_toolkit-1.0.4/mgnify_pipelines_toolkit/utils/__init__.py +0 -0
  10. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit.egg-info/PKG-INFO +19 -27
  11. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit.egg-info/SOURCES.txt +2 -0
  12. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit.egg-info/entry_points.txt +6 -1
  13. mgnify_pipelines_toolkit-1.0.4/mgnify_pipelines_toolkit.egg-info/requires.txt +20 -0
  14. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/pyproject.toml +30 -31
  15. mgnify_pipelines_toolkit-1.0.2/mgnify_pipelines_toolkit.egg-info/requires.txt +0 -29
  16. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/LICENSE +0 -0
  17. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/__init__.py +0 -0
  18. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/__init__.py +0 -0
  19. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/amplicon/amplicon_utils.py +0 -0
  20. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/amplicon/are_there_primers.py +0 -0
  21. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/amplicon/assess_inflection_point_mcp.py +0 -0
  22. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/amplicon/assess_mcp_proportions.py +0 -0
  23. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/amplicon/classify_var_regions.py +0 -0
  24. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/amplicon/find_mcp_inflection_points.py +0 -0
  25. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/amplicon/make_asv_count_table.py +0 -0
  26. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/amplicon/mapseq_to_asv_table.py +0 -0
  27. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/amplicon/primer_val_classification.py +0 -0
  28. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/amplicon/remove_ambiguous_reads.py +0 -0
  29. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/amplicon/rev_comp_se_primers.py +0 -0
  30. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/amplicon/standard_primer_matching.py +0 -0
  31. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/assembly/antismash_gff_builder.py +0 -0
  32. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/assembly/combined_gene_caller_merge.py +0 -0
  33. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/assembly/generate_gaf.py +0 -0
  34. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/assembly/gff_annotation_utils.py +0 -0
  35. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/assembly/gff_file_utils.py +0 -0
  36. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/assembly/gff_toolkit.py +0 -0
  37. {mgnify_pipelines_toolkit-1.0.2/mgnify_pipelines_toolkit/analysis/shared → mgnify_pipelines_toolkit-1.0.4/mgnify_pipelines_toolkit/analysis/genomes}/__init__.py +0 -0
  38. {mgnify_pipelines_toolkit-1.0.2/mgnify_pipelines_toolkit/utils → mgnify_pipelines_toolkit-1.0.4/mgnify_pipelines_toolkit/analysis/shared}/__init__.py +0 -0
  39. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/shared/convert_cmscan_to_cmsearch_tblout.py +0 -0
  40. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/shared/dwc_summary_generator.py +0 -0
  41. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/shared/fastq_suffix_header_check.py +0 -0
  42. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/shared/get_subunits_coords.py +0 -0
  43. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/shared/library_strategy_check.py +0 -0
  44. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/shared/mapseq2biom.py +0 -0
  45. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/shared/markergene_study_summary.py +0 -0
  46. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/analysis/shared/study_summary_generator.py +0 -0
  47. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/constants/db_labels.py +0 -0
  48. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/constants/regex_ambiguous_bases.py +0 -0
  49. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/constants/regex_fasta_header.py +0 -0
  50. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/constants/tax_ranks.py +0 -0
  51. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/constants/thresholds.py +0 -0
  52. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/constants/var_region_coordinates.py +0 -0
  53. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/schemas/schemas.py +0 -0
  54. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/utils/fasta_to_delimited.py +0 -0
  55. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit/utils/get_mpt_version.py +0 -0
  56. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit.egg-info/dependency_links.txt +0 -0
  57. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/mgnify_pipelines_toolkit.egg-info/top_level.txt +0 -0
  58. {mgnify_pipelines_toolkit-1.0.2 → mgnify_pipelines_toolkit-1.0.4}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mgnify_pipelines_toolkit
3
- Version: 1.0.2
3
+ Version: 1.0.4
4
4
  Summary: Collection of scripts and tools for MGnify pipelines
5
5
  Author-email: MGnify team <metagenomics-help@ebi.ac.uk>
6
6
  License: Apache Software License 2.0
@@ -11,33 +11,24 @@ Classifier: Operating System :: OS Independent
11
11
  Requires-Python: >=3.9
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
- Requires-Dist: biopython==1.82
15
- Requires-Dist: numpy==1.26.0
16
- Requires-Dist: pandas==2.0.2
17
- Requires-Dist: regex==2023.12.25
18
- Requires-Dist: requests==2.32.3
19
- Requires-Dist: click==8.1.7
20
- Requires-Dist: pandera==0.22.1
21
- Requires-Dist: pyfastx>=2.2.0
22
- Requires-Dist: intervaltree==3.1.0
14
+ Requires-Dist: biopython>=1.85
15
+ Requires-Dist: numpy<3,>=2.2.4
16
+ Requires-Dist: pandas<3,>=2.2.3
17
+ Requires-Dist: regex>=2024.11.6
18
+ Requires-Dist: requests<3,>=2.32.3
19
+ Requires-Dist: click<9,>=8.1.8
20
+ Requires-Dist: pandera<0.24,>=0.23.1
21
+ Requires-Dist: pyfastx<3,>=2.2.0
22
+ Requires-Dist: intervaltree<4,>=3.1.0
23
23
  Provides-Extra: tests
24
- Requires-Dist: pytest==7.4.0; extra == "tests"
25
- Requires-Dist: pytest-md==0.2.0; extra == "tests"
26
- Requires-Dist: pytest-workflow==2.0.1; extra == "tests"
27
- Requires-Dist: biopython==1.82; extra == "tests"
28
- Requires-Dist: pandas==2.0.2; extra == "tests"
29
- Requires-Dist: numpy==1.26.0; extra == "tests"
30
- Requires-Dist: regex==2023.12.25; extra == "tests"
31
- Requires-Dist: requests==2.32.3; extra == "tests"
32
- Requires-Dist: click==8.1.7; extra == "tests"
33
- Requires-Dist: pandera==0.22.1; extra == "tests"
34
- Requires-Dist: pyfastx>=2.2.0; extra == "tests"
24
+ Requires-Dist: pytest<9,>=8.3.5; extra == "tests"
25
+ Requires-Dist: pytest-md>=0.2.0; extra == "tests"
26
+ Requires-Dist: pytest-workflow==2.1.0; extra == "tests"
35
27
  Provides-Extra: dev
36
- Requires-Dist: mgnify_pipelines_toolkit[tests]; extra == "dev"
37
- Requires-Dist: pre-commit==3.8.0; extra == "dev"
38
- Requires-Dist: black==24.8.0; extra == "dev"
39
- Requires-Dist: flake8==7.1.1; extra == "dev"
40
- Requires-Dist: pep8-naming==0.14.1; extra == "dev"
28
+ Requires-Dist: pre-commit>=4.2.0; extra == "dev"
29
+ Requires-Dist: black>=25.1.0; extra == "dev"
30
+ Requires-Dist: flake8>=7.1.2; extra == "dev"
31
+ Requires-Dist: pep8-naming>=0.14.1; extra == "dev"
41
32
  Dynamic: license-file
42
33
 
43
34
  # mgnify-pipelines-toolkit
@@ -74,8 +65,9 @@ Before starting any development, you should do these few steps:
74
65
  - Clone the repo if you haven't already and create a feature branch from the `dev` branch (NOT `main`).
75
66
  - Create a virtual environment with the tool of your choice (i.e. `conda create --name my_new_env`)
76
67
  - Activate you new environment (i.e. `conda activate my_new_env`)
77
- - Install dev dependencies `pip install -e '.[dev]'`
68
+ - Install dev dependencies `pip install -e '.[tests,dev]'`
78
69
  - Install pre-commit hooks `pre-commit install`
70
+ - Run unit tests `pytest`
79
71
 
80
72
  When doing these steps above, you ensure that the code you add will be linted and formatted properly.
81
73
 
@@ -32,8 +32,9 @@ Before starting any development, you should do these few steps:
32
32
  - Clone the repo if you haven't already and create a feature branch from the `dev` branch (NOT `main`).
33
33
  - Create a virtual environment with the tool of your choice (i.e. `conda create --name my_new_env`)
34
34
  - Activate you new environment (i.e. `conda activate my_new_env`)
35
- - Install dev dependencies `pip install -e '.[dev]'`
35
+ - Install dev dependencies `pip install -e '.[tests,dev]'`
36
36
  - Install pre-commit hooks `pre-commit install`
37
+ - Run unit tests `pytest`
37
38
 
38
39
  When doing these steps above, you ensure that the code you add will be linted and formatted properly.
39
40
 
@@ -78,7 +78,11 @@ def main():
78
78
  "--output",
79
79
  required=True,
80
80
  type=Path,
81
- help="Output TSV file with columns: contig_id, protein_id, UniRef90 cluster, rhea_ids, CHEBI reaction participants",
81
+ help=(
82
+ "Output TSV file with columns: contig_id, protein_id, protein hash, "
83
+ "Rhea IDs, CHEBI reaction, reaction definition, 'top hit' if it is "
84
+ "the first hit for the protein"
85
+ ),
82
86
  )
83
87
  parser.add_argument(
84
88
  "-p",
@@ -84,52 +84,51 @@ def parse_interproscan_tsv(ips_file: Path, mapped_go_terms: dict = None) -> dict
84
84
  previous_protein_acc = None
85
85
  go_annotations_single_protein = set()
86
86
 
87
- fr = open(ips_file, "r")
88
87
  go_pattern = re.compile("GO:\\d+")
89
88
 
90
- for line in fr:
91
- # IPS files are parsed line by line - the same protein accession will appear multiple lines in a row with different annotation
92
- line_counter += 1
93
- line = line.strip()
94
- chunks = line.split("\t")
95
- # Get protein accession
96
- current_protein_acc = chunks[0]
97
-
98
- # TODO: not sure if this line is needed - do we ever have more than one protein in a single line of IPS?
99
- # Will keep just in case
100
- num_of_proteins = len(current_protein_acc.split("|"))
101
-
102
- # If we're at a new protein accession in the IPS file then we finally increment
103
- # the go2protein_count dictionary for each term that was found in that protein
104
- if current_protein_acc != previous_protein_acc:
105
- total_num_of_proteins += 1
106
- if len(go_annotations_single_protein) > 0:
107
- num_of_proteins_with_go += 1
108
- go2protein_count = count_and_assign_go_annotations(
109
- go2protein_count,
110
- go_annotations_single_protein,
111
- num_of_proteins,
112
- mapped_go_terms,
113
- )
114
- # reset GO id set because we hit a new protein accession
115
- go_annotations_single_protein = set()
116
- previous_protein_acc = current_protein_acc
117
-
118
- # Parse out GO annotations
119
- # GO annotations are associated to InterPro entries (InterPro entries start with 'IPR')
120
- # Than use the regex to extract the GO Ids (e.g. GO:0009842)
121
- if len(chunks) >= 13 and chunks[11].startswith("IPR"):
122
- for go_annotation in go_pattern.findall(line):
123
- go_annotations_single_protein.add(go_annotation)
124
-
125
- # Do final counting for the last protein
126
- go2protein_count = count_and_assign_go_annotations(
127
- go2protein_count,
128
- go_annotations_single_protein,
129
- num_of_proteins,
130
- mapped_go_terms,
131
- )
132
-
133
- fr.close()
89
+ with open(ips_file, "r") as fr:
90
+
91
+ for line in fr:
92
+ # IPS files are parsed line by line - the same protein accession will appear multiple lines in a row with different annotation
93
+ line_counter += 1
94
+ line = line.strip()
95
+ chunks = line.split("\t")
96
+ # Get protein accession
97
+ current_protein_acc = chunks[0]
98
+
99
+ # TODO: not sure if this line is needed - do we ever have more than one protein in a single line of IPS?
100
+ # Will keep just in case
101
+ num_of_proteins = len(current_protein_acc.split("|"))
102
+
103
+ # If we're at a new protein accession in the IPS file then we finally increment
104
+ # the go2protein_count dictionary for each term that was found in that protein
105
+ if current_protein_acc != previous_protein_acc:
106
+ total_num_of_proteins += 1
107
+ if len(go_annotations_single_protein) > 0:
108
+ num_of_proteins_with_go += 1
109
+ go2protein_count = count_and_assign_go_annotations(
110
+ go2protein_count,
111
+ go_annotations_single_protein,
112
+ num_of_proteins,
113
+ mapped_go_terms,
114
+ )
115
+ # reset GO id set because we hit a new protein accession
116
+ go_annotations_single_protein = set()
117
+ previous_protein_acc = current_protein_acc
118
+
119
+ # Parse out GO annotations
120
+ # GO annotations are associated to InterPro entries (InterPro entries start with 'IPR')
121
+ # Than use the regex to extract the GO Ids (e.g. GO:0009842)
122
+ if len(chunks) >= 13 and chunks[11].startswith("IPR"):
123
+ for go_annotation in go_pattern.findall(line):
124
+ go_annotations_single_protein.add(go_annotation)
125
+
126
+ # Do final counting for the last protein
127
+ go2protein_count = count_and_assign_go_annotations(
128
+ go2protein_count,
129
+ go_annotations_single_protein,
130
+ num_of_proteins,
131
+ mapped_go_terms,
132
+ )
134
133
 
135
134
  return go2protein_count
@@ -0,0 +1,131 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ # Copyright 2025 EMBL - European Bioinformatics Institute
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ from collections import Counter
19
+ import csv
20
+ import logging
21
+
22
+ RANK_PREFIXES = {
23
+ "superkingdom": "sk__",
24
+ "kingdom": "k__",
25
+ "phylum": "p__",
26
+ "class": "c__",
27
+ "order": "o__",
28
+ "family": "f__",
29
+ "genus": "g__",
30
+ "species": "s__",
31
+ }
32
+
33
+ logging.basicConfig(
34
+ level=logging.INFO, format="[%(asctime)s] - %(levelname)s - %(message)s"
35
+ )
36
+
37
+
38
+ def import_nodes(nodes_dmp):
39
+ logging.info(f"Loading file {nodes_dmp}")
40
+ taxid2rank = {}
41
+
42
+ with open(nodes_dmp) as f1:
43
+ for line in f1:
44
+ fields = [part.strip() for part in line.split("|")]
45
+ if len(fields) != 14:
46
+ raise ValueError(f"Unexpected number of columns in line: {line}")
47
+ taxid = fields[0]
48
+ rank = fields[2]
49
+ taxid2rank[taxid] = rank
50
+
51
+ return taxid2rank
52
+
53
+
54
+ def import_names(names_dmp):
55
+ logging.info(f"Loading file {names_dmp}")
56
+ taxid2name = {}
57
+
58
+ with open(names_dmp, newline="") as f1:
59
+ for line in f1:
60
+ fields = [part.strip() for part in line.split("|")]
61
+ if len(fields) != 5:
62
+ raise ValueError(f"Unexpected number of columns in line: {line}")
63
+ if fields[3] == "scientific name":
64
+ taxid = fields[0]
65
+ name = fields[1]
66
+ taxid2name[taxid] = name
67
+
68
+ return taxid2name
69
+
70
+
71
+ def convert_to_official_names(lineage, taxid2rank, taxid2name):
72
+ lineage_ranks = [taxid2rank[taxid.rstrip("*")] for taxid in lineage]
73
+ official_names = list(RANK_PREFIXES.values())
74
+ lowest_classification_index = -1
75
+
76
+ for i, rank in enumerate(RANK_PREFIXES):
77
+ if rank in lineage_ranks:
78
+ index = lineage_ranks.index(rank)
79
+ taxid = lineage[index].rstrip("*")
80
+ name = taxid2name[taxid]
81
+ official_names[i] = official_names[i] + name
82
+ lowest_classification_index = i
83
+
84
+ return official_names[: lowest_classification_index + 1]
85
+
86
+
87
+ def main():
88
+ parser = argparse.ArgumentParser(
89
+ description="Process TSV classification generated by CAT_pack contigs and write input file for Krona ktImportText"
90
+ )
91
+ parser.add_argument(
92
+ "-i", "--input", help="Path to the input TSV file from CAT_pack contigs"
93
+ )
94
+ parser.add_argument("-o", "--output", help="Name of the output Krona TXT file")
95
+ parser.add_argument(
96
+ "-n", "--names_dmp", help="Path to the nodes.dmp file from NCBI taxonomy"
97
+ )
98
+ parser.add_argument(
99
+ "-r", "--nodes_dmp", help="Path to the names.dmp file from NCBI taxonomy"
100
+ )
101
+ args = parser.parse_args()
102
+
103
+ taxid2rank = import_nodes(args.nodes_dmp)
104
+ taxid2name = import_names(args.names_dmp)
105
+
106
+ logging.info(f"Begin parsing of CAT_pack classiffication file {args.input}")
107
+ lineage_counter = Counter()
108
+ with open(args.input) as infile:
109
+ reader = csv.reader(infile, delimiter="\t")
110
+ next(reader) # Skip the header row
111
+ for row in reader:
112
+ if row[1] == "no taxid assigned":
113
+ lineage = "unclassified"
114
+ else:
115
+ taxid_lineage = row[3].split(";")
116
+ names_lineage = convert_to_official_names(
117
+ taxid_lineage, taxid2rank, taxid2name
118
+ )
119
+ lineage = "\t".join(names_lineage) if names_lineage else "unclassified"
120
+ lineage_counter[lineage] += 1
121
+
122
+ logging.info(f"Writting output to {args.output}")
123
+ with open(args.output, "w") as outfile:
124
+ for lineage, count in lineage_counter.most_common():
125
+ outfile.write(f"{count}\t{lineage}\n")
126
+
127
+ logging.info("Done")
128
+
129
+
130
+ if __name__ == "__main__":
131
+ main()
@@ -15,9 +15,10 @@
15
15
  # limitations under the License.
16
16
 
17
17
  import argparse
18
- from collections import defaultdict
18
+ import csv
19
19
  import logging
20
20
  import os
21
+ from collections import defaultdict
21
22
  from pathlib import Path
22
23
 
23
24
  from mgnify_pipelines_toolkit.analysis.assembly.go_utils import parse_interproscan_tsv
@@ -28,7 +29,6 @@ logging.basicConfig(
28
29
 
29
30
 
30
31
  def parse_args():
31
-
32
32
  description = "Go slim pipeline."
33
33
  parser = argparse.ArgumentParser(description=description)
34
34
  parser.add_argument(
@@ -59,46 +59,56 @@ def parse_args():
59
59
 
60
60
 
61
61
  def parse_mapped_gaf_file(gaf_file: Path) -> defaultdict[set]:
62
-
63
62
  mapped_go_dict = defaultdict(set)
64
63
  if os.path.exists(gaf_file):
65
- handle = open(gaf_file, "r")
66
- for line in handle:
67
- if not line.startswith("!"):
68
- line = line.strip()
69
- splitted_line = line.split("\t")
70
- go_id = splitted_line[1]
71
- mapped_go_id = splitted_line[4]
72
- mapped_go_dict[go_id].add(mapped_go_id)
73
-
64
+ with open(gaf_file, "r") as handle:
65
+ for line in handle:
66
+ if not line.startswith("!"):
67
+ line = line.strip()
68
+ splitted_line = line.split("\t")
69
+ go_id = splitted_line[1]
70
+ mapped_go_id = splitted_line[4]
71
+ mapped_go_dict[go_id].add(mapped_go_id)
74
72
  return mapped_go_dict
75
73
 
76
74
 
77
75
  def get_go_slim_summary(go_slim_banding_file, goslims2_protein_count):
78
76
  summary = []
79
77
 
80
- fr = open(go_slim_banding_file, "r")
81
-
82
- for line in fr:
83
- if line.startswith("GO"):
84
- line = line.strip()
85
- line_chunks = line.split("\t")
86
- go_id = line_chunks[0]
87
- term = line_chunks[1]
88
- category = line_chunks[2]
89
- # Default value for the count
90
- count = 0
91
- if go_id in goslims2_protein_count:
92
- count = goslims2_protein_count[go_id]
93
- summary.append((go_id, term, category, count))
78
+ with open(go_slim_banding_file, "r") as fr:
79
+ for line in fr:
80
+ if line.startswith("GO"):
81
+ line = line.strip()
82
+ line_chunks = line.split("\t")
83
+ go_id = line_chunks[0]
84
+ term = line_chunks[1]
85
+ category = line_chunks[2]
86
+ # Default value for the count
87
+ count = 0
88
+ if go_id in goslims2_protein_count:
89
+ count = goslims2_protein_count[go_id]
90
+ summary.append((go_id, term, category, count))
94
91
  return summary
95
92
 
96
93
 
97
94
  def write_go_summary_to_file(go_summary, output_file):
98
- fw = open(output_file, "w")
99
- for go, term, category, count in go_summary:
100
- fw.write('","'.join(['"' + go, term, category, str(count) + '"']) + "\n")
101
- fw.close()
95
+ """
96
+ Write a sorted GO summary to a TSV file.
97
+
98
+ :param go_summary: A list of tuples, where each tuple contains the following
99
+ elements:
100
+ - go (str): The GO identifier.
101
+ - term (str): The GO term description.
102
+ - category (str): The category of the GO term.
103
+ - count (int): The count associated with the GO term.
104
+ :param output_file: The path to the output TSV file where the sorted GO
105
+ """
106
+ sorted_go_summary = sorted(go_summary, key=lambda x: x[3], reverse=True)
107
+ with open(output_file, "w", newline="") as fw:
108
+ tsv_writer = csv.writer(fw, delimiter="\t")
109
+ tsv_writer.writerow(["go", "term", "category", "count"])
110
+ for go, term, category, count in sorted_go_summary:
111
+ tsv_writer.writerow([go, term, category, count])
102
112
 
103
113
 
104
114
  def parse_gene_ontology(obo_file):
@@ -108,23 +118,22 @@ def parse_gene_ontology(obo_file):
108
118
  :return:
109
119
  """
110
120
  go_term_tuples = []
111
- fr = open(obo_file, "r")
112
- id, term, category = "", "", ""
113
- for line in fr:
114
- line = line.strip()
115
- split_line = line.split(": ")
116
- if line.startswith("id:"):
117
- id = split_line[1]
118
- elif line.startswith("name:"):
119
- term = split_line[1]
120
- elif line.startswith("namespace"):
121
- category = split_line[1]
122
- else:
123
- if id.startswith("GO:") and id and term and category:
124
- item = (id, term, category)
125
- go_term_tuples.append(item)
126
- id, term, category = "", "", ""
127
- fr.close()
121
+ with open(obo_file, "r") as fr:
122
+ id, term, category = "", "", ""
123
+ for line in fr:
124
+ line = line.strip()
125
+ split_line = line.split(": ")
126
+ if line.startswith("id:"):
127
+ id = split_line[1]
128
+ elif line.startswith("name:"):
129
+ term = split_line[1]
130
+ elif line.startswith("namespace"):
131
+ category = split_line[1]
132
+ else:
133
+ if id.startswith("GO:") and id and term and category:
134
+ item = (id, term, category)
135
+ go_term_tuples.append(item)
136
+ id, term, category = "", "", ""
128
137
  return go_term_tuples
129
138
 
130
139
 
@@ -132,7 +141,6 @@ def get_full_go_summary(core_gene_ontology, go2protein_count_dict, top_level_go_
132
141
  summary = []
133
142
 
134
143
  for go_id, term, category in core_gene_ontology:
135
-
136
144
  if (go_id in go2protein_count_dict) and (
137
145
  go_id not in top_level_go_ids
138
146
  ): # make sure that top level terms are not included (they tell you nothing!)
@@ -143,7 +151,6 @@ def get_full_go_summary(core_gene_ontology, go2protein_count_dict, top_level_go_
143
151
 
144
152
 
145
153
  def main():
146
-
147
154
  go_obo, go_banding, gaf_input, ips_input, output = parse_args()
148
155
 
149
156
  logging.info("Parsing the InterProScan input: " + ips_input)
@@ -108,7 +108,7 @@ def main():
108
108
 
109
109
  open_files = {}
110
110
  for record in SeqIO.parse(args.input, "fasta"):
111
- model = "-".join(record.id.split("/")[0].split("-")[-1:])
111
+ model = "-".join("/".join(record.id.split("/")[:-1]).split("-")[-1:])
112
112
  if model in SSU_MODELS:
113
113
  if SSU not in open_files:
114
114
  file_out = open(pattern_dict[SSU], "w")
@@ -60,3 +60,25 @@ RFAM_MODELS = {
60
60
  LSU_rRNA_bacteria: "RF02541",
61
61
  LSU_rRNA_eukarya: "RF02543",
62
62
  }
63
+
64
+ TRNA = [
65
+ "Ala",
66
+ "Gly",
67
+ "Pro",
68
+ "Thr",
69
+ "Val",
70
+ "Ser",
71
+ "Arg",
72
+ "Leu",
73
+ "Phe",
74
+ "Asn",
75
+ "Lys",
76
+ "Asp",
77
+ "Glu",
78
+ "His",
79
+ "Gln",
80
+ "Ile",
81
+ "Tyr",
82
+ "Cys",
83
+ "Trp",
84
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mgnify_pipelines_toolkit
3
- Version: 1.0.2
3
+ Version: 1.0.4
4
4
  Summary: Collection of scripts and tools for MGnify pipelines
5
5
  Author-email: MGnify team <metagenomics-help@ebi.ac.uk>
6
6
  License: Apache Software License 2.0
@@ -11,33 +11,24 @@ Classifier: Operating System :: OS Independent
11
11
  Requires-Python: >=3.9
12
12
  Description-Content-Type: text/markdown
13
13
  License-File: LICENSE
14
- Requires-Dist: biopython==1.82
15
- Requires-Dist: numpy==1.26.0
16
- Requires-Dist: pandas==2.0.2
17
- Requires-Dist: regex==2023.12.25
18
- Requires-Dist: requests==2.32.3
19
- Requires-Dist: click==8.1.7
20
- Requires-Dist: pandera==0.22.1
21
- Requires-Dist: pyfastx>=2.2.0
22
- Requires-Dist: intervaltree==3.1.0
14
+ Requires-Dist: biopython>=1.85
15
+ Requires-Dist: numpy<3,>=2.2.4
16
+ Requires-Dist: pandas<3,>=2.2.3
17
+ Requires-Dist: regex>=2024.11.6
18
+ Requires-Dist: requests<3,>=2.32.3
19
+ Requires-Dist: click<9,>=8.1.8
20
+ Requires-Dist: pandera<0.24,>=0.23.1
21
+ Requires-Dist: pyfastx<3,>=2.2.0
22
+ Requires-Dist: intervaltree<4,>=3.1.0
23
23
  Provides-Extra: tests
24
- Requires-Dist: pytest==7.4.0; extra == "tests"
25
- Requires-Dist: pytest-md==0.2.0; extra == "tests"
26
- Requires-Dist: pytest-workflow==2.0.1; extra == "tests"
27
- Requires-Dist: biopython==1.82; extra == "tests"
28
- Requires-Dist: pandas==2.0.2; extra == "tests"
29
- Requires-Dist: numpy==1.26.0; extra == "tests"
30
- Requires-Dist: regex==2023.12.25; extra == "tests"
31
- Requires-Dist: requests==2.32.3; extra == "tests"
32
- Requires-Dist: click==8.1.7; extra == "tests"
33
- Requires-Dist: pandera==0.22.1; extra == "tests"
34
- Requires-Dist: pyfastx>=2.2.0; extra == "tests"
24
+ Requires-Dist: pytest<9,>=8.3.5; extra == "tests"
25
+ Requires-Dist: pytest-md>=0.2.0; extra == "tests"
26
+ Requires-Dist: pytest-workflow==2.1.0; extra == "tests"
35
27
  Provides-Extra: dev
36
- Requires-Dist: mgnify_pipelines_toolkit[tests]; extra == "dev"
37
- Requires-Dist: pre-commit==3.8.0; extra == "dev"
38
- Requires-Dist: black==24.8.0; extra == "dev"
39
- Requires-Dist: flake8==7.1.1; extra == "dev"
40
- Requires-Dist: pep8-naming==0.14.1; extra == "dev"
28
+ Requires-Dist: pre-commit>=4.2.0; extra == "dev"
29
+ Requires-Dist: black>=25.1.0; extra == "dev"
30
+ Requires-Dist: flake8>=7.1.2; extra == "dev"
31
+ Requires-Dist: pep8-naming>=0.14.1; extra == "dev"
41
32
  Dynamic: license-file
42
33
 
43
34
  # mgnify-pipelines-toolkit
@@ -74,8 +65,9 @@ Before starting any development, you should do these few steps:
74
65
  - Clone the repo if you haven't already and create a feature branch from the `dev` branch (NOT `main`).
75
66
  - Create a virtual environment with the tool of your choice (i.e. `conda create --name my_new_env`)
76
67
  - Activate you new environment (i.e. `conda activate my_new_env`)
77
- - Install dev dependencies `pip install -e '.[dev]'`
68
+ - Install dev dependencies `pip install -e '.[tests,dev]'`
78
69
  - Install pre-commit hooks `pre-commit install`
70
+ - Run unit tests `pytest`
79
71
 
80
72
  When doing these steps above, you ensure that the code you add will be linted and formatted properly.
81
73
 
@@ -29,7 +29,9 @@ mgnify_pipelines_toolkit/analysis/assembly/gff_annotation_utils.py
29
29
  mgnify_pipelines_toolkit/analysis/assembly/gff_file_utils.py
30
30
  mgnify_pipelines_toolkit/analysis/assembly/gff_toolkit.py
31
31
  mgnify_pipelines_toolkit/analysis/assembly/go_utils.py
32
+ mgnify_pipelines_toolkit/analysis/assembly/krona_txt_from_cat_classification.py
32
33
  mgnify_pipelines_toolkit/analysis/assembly/summarise_goslims.py
34
+ mgnify_pipelines_toolkit/analysis/genomes/__init__.py
33
35
  mgnify_pipelines_toolkit/analysis/shared/__init__.py
34
36
  mgnify_pipelines_toolkit/analysis/shared/convert_cmscan_to_cmsearch_tblout.py
35
37
  mgnify_pipelines_toolkit/analysis/shared/dwc_summary_generator.py
@@ -1,19 +1,24 @@
1
1
  [console_scripts]
2
2
  add_rhea_chebi_annotation = mgnify_pipelines_toolkit.analysis.assembly.add_rhea_chebi_annotation:main
3
+ antismash_gff_builder = mgnify_pipelines_toolkit.analysis.assembly.antismash_gff_builder:main
3
4
  are_there_primers = mgnify_pipelines_toolkit.analysis.amplicon.are_there_primers:main
4
5
  assess_inflection_point_mcp = mgnify_pipelines_toolkit.analysis.amplicon.assess_inflection_point_mcp:main
5
6
  assess_mcp_proportions = mgnify_pipelines_toolkit.analysis.amplicon.assess_mcp_proportions:main
6
7
  classify_var_regions = mgnify_pipelines_toolkit.analysis.amplicon.classify_var_regions:main
7
8
  combined_gene_caller_merge = mgnify_pipelines_toolkit.analysis.assembly.combined_gene_caller_merge:main
8
9
  convert_cmscan_to_cmsearch_tblout = mgnify_pipelines_toolkit.analysis.shared.convert_cmscan_to_cmsearch_tblout:main
9
- dwc_summary_generator = mgnify_pipelines_toolkit.analysis.assembly.dwc_summary_generator:main
10
+ dwc_summary_generator = mgnify_pipelines_toolkit.analysis.shared.dwc_summary_generator:main
10
11
  fasta_to_delimited = mgnify_pipelines_toolkit.utils.fasta_to_delimited:main
11
12
  fastq_suffix_header_check = mgnify_pipelines_toolkit.analysis.shared.fastq_suffix_header_check:main
12
13
  find_mcp_inflection_points = mgnify_pipelines_toolkit.analysis.amplicon.find_mcp_inflection_points:main
13
14
  generate_gaf = mgnify_pipelines_toolkit.analysis.assembly.generate_gaf:main
15
+ genomes_extract_bacterial_rrnas_as_tsv = mgnify_pipelines_toolkit.analysis.genomes.rna.extract_bacterial_rrnas_as_tsv:main
16
+ genomes_extract_rrnas_as_fasta = mgnify_pipelines_toolkit.analysis.genomes.rna.extract_rrnas_as_fasta:main
17
+ genomes_extract_trnas = mgnify_pipelines_toolkit.analysis.genomes.rna.extract_trnas:main
14
18
  get_mpt_version = mgnify_pipelines_toolkit.utils.get_mpt_version:main
15
19
  get_subunits = mgnify_pipelines_toolkit.analysis.shared.get_subunits:main
16
20
  get_subunits_coords = mgnify_pipelines_toolkit.analysis.shared.get_subunits_coords:main
21
+ krona_txt_from_cat_classification = mgnify_pipelines_toolkit.analysis.assembly.krona_txt_from_cat_classification:main
17
22
  library_strategy_check = mgnify_pipelines_toolkit.analysis.shared.library_strategy_check:main
18
23
  make_asv_count_table = mgnify_pipelines_toolkit.analysis.amplicon.make_asv_count_table:main
19
24
  mapseq2biom = mgnify_pipelines_toolkit.analysis.shared.mapseq2biom:main
@@ -0,0 +1,20 @@
1
+ biopython>=1.85
2
+ numpy<3,>=2.2.4
3
+ pandas<3,>=2.2.3
4
+ regex>=2024.11.6
5
+ requests<3,>=2.32.3
6
+ click<9,>=8.1.8
7
+ pandera<0.24,>=0.23.1
8
+ pyfastx<3,>=2.2.0
9
+ intervaltree<4,>=3.1.0
10
+
11
+ [dev]
12
+ pre-commit>=4.2.0
13
+ black>=25.1.0
14
+ flake8>=7.1.2
15
+ pep8-naming>=0.14.1
16
+
17
+ [tests]
18
+ pytest<9,>=8.3.5
19
+ pytest-md>=0.2.0
20
+ pytest-workflow==2.1.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "mgnify_pipelines_toolkit"
3
- version = "1.0.2"
3
+ version = "1.0.4"
4
4
  readme = "README.md"
5
5
  license = {text = "Apache Software License 2.0"}
6
6
  authors = [
@@ -16,15 +16,15 @@ classifiers = [
16
16
  ]
17
17
 
18
18
  dependencies = [
19
- "biopython==1.82",
20
- "numpy==1.26.0",
21
- "pandas==2.0.2",
22
- "regex==2023.12.25",
23
- "requests==2.32.3",
24
- "click==8.1.7",
25
- "pandera==0.22.1",
26
- "pyfastx>=2.2.0",
27
- "intervaltree==3.1.0",
19
+ "biopython>=1.85",
20
+ "numpy>=2.2.4,<3",
21
+ "pandas>=2.2.3,<3",
22
+ "regex>=2024.11.6",
23
+ "requests>=2.32.3,<3",
24
+ "click>=8.1.8,<9",
25
+ "pandera>=0.23.1,<0.24",
26
+ "pyfastx>=2.2.0,<3",
27
+ "intervaltree>=3.1.0,<4",
28
28
  ]
29
29
 
30
30
  [build-system]
@@ -40,10 +40,11 @@ packages = ["mgnify_pipelines_toolkit",
40
40
  "mgnify_pipelines_toolkit.analysis.shared",
41
41
  "mgnify_pipelines_toolkit.analysis.amplicon",
42
42
  "mgnify_pipelines_toolkit.analysis.assembly",
43
- ]
43
+ "mgnify_pipelines_toolkit.analysis.genomes"
44
+ ]
44
45
 
45
46
  [project.scripts]
46
- # analysis.shared
47
+ # analysis.shared #
47
48
  get_subunits = "mgnify_pipelines_toolkit.analysis.shared.get_subunits:main"
48
49
  get_subunits_coords = "mgnify_pipelines_toolkit.analysis.shared.get_subunits_coords:main"
49
50
  mapseq2biom = "mgnify_pipelines_toolkit.analysis.shared.mapseq2biom:main"
@@ -52,7 +53,8 @@ library_strategy_check = "mgnify_pipelines_toolkit.analysis.shared.library_strat
52
53
  study_summary_generator = "mgnify_pipelines_toolkit.analysis.shared.study_summary_generator:cli"
53
54
  markergene_study_summary = "mgnify_pipelines_toolkit.analysis.shared.markergene_study_summary:main"
54
55
  convert_cmscan_to_cmsearch_tblout = "mgnify_pipelines_toolkit.analysis.shared.convert_cmscan_to_cmsearch_tblout:main"
55
- # analysis.amplicon
56
+ dwc_summary_generator = "mgnify_pipelines_toolkit.analysis.shared.dwc_summary_generator:main"
57
+ # analysis.amplicon #
56
58
  are_there_primers = "mgnify_pipelines_toolkit.analysis.amplicon.are_there_primers:main"
57
59
  assess_inflection_point_mcp = "mgnify_pipelines_toolkit.analysis.amplicon.assess_inflection_point_mcp:main"
58
60
  assess_mcp_proportions = "mgnify_pipelines_toolkit.analysis.amplicon.assess_mcp_proportions:main"
@@ -64,35 +66,32 @@ rev_comp_se_primers = "mgnify_pipelines_toolkit.analysis.amplicon.rev_comp_se_pr
64
66
  standard_primer_matching = "mgnify_pipelines_toolkit.analysis.amplicon.standard_primer_matching:main"
65
67
  mapseq_to_asv_table = "mgnify_pipelines_toolkit.analysis.amplicon.mapseq_to_asv_table:main"
66
68
  primer_val_classification = "mgnify_pipelines_toolkit.analysis.amplicon.primer_val_classification:main"
67
- # analysis.assembly
69
+ # analysis.assembly #
70
+ krona_txt_from_cat_classification = "mgnify_pipelines_toolkit.analysis.assembly.krona_txt_from_cat_classification:main"
68
71
  add_rhea_chebi_annotation = "mgnify_pipelines_toolkit.analysis.assembly.add_rhea_chebi_annotation:main"
69
72
  combined_gene_caller_merge = "mgnify_pipelines_toolkit.analysis.assembly.combined_gene_caller_merge:main"
70
73
  generate_gaf = "mgnify_pipelines_toolkit.analysis.assembly.generate_gaf:main"
71
74
  summarise_goslims = "mgnify_pipelines_toolkit.analysis.assembly.summarise_goslims:main"
72
- dwc_summary_generator = "mgnify_pipelines_toolkit.analysis.assembly.dwc_summary_generator:main"
75
+ antismash_gff_builder = "mgnify_pipelines_toolkit.analysis.assembly.antismash_gff_builder:main"
76
+ # genomes #
77
+ genomes_extract_bacterial_rrnas_as_tsv = "mgnify_pipelines_toolkit.analysis.genomes.rna.extract_bacterial_rrnas_as_tsv:main"
78
+ genomes_extract_rrnas_as_fasta = "mgnify_pipelines_toolkit.analysis.genomes.rna.extract_rrnas_as_fasta:main"
79
+ genomes_extract_trnas = "mgnify_pipelines_toolkit.analysis.genomes.rna.extract_trnas:main"
80
+
73
81
  # utils
74
82
  fasta_to_delimited = "mgnify_pipelines_toolkit.utils.fasta_to_delimited:main"
75
83
  get_mpt_version = "mgnify_pipelines_toolkit.utils.get_mpt_version:main"
76
84
 
77
85
  [project.optional-dependencies]
78
86
  tests = [
79
- "pytest==7.4.0",
80
- "pytest-md==0.2.0",
81
- "pytest-workflow==2.0.1",
82
- "biopython==1.82",
83
- "pandas==2.0.2",
84
- "numpy==1.26.0",
85
- "regex==2023.12.25",
86
- "requests==2.32.3",
87
- "click==8.1.7",
88
- "pandera==0.22.1",
89
- "pyfastx>=2.2.0"
87
+ "pytest>=8.3.5,<9",
88
+ "pytest-md>=0.2.0",
89
+ "pytest-workflow==2.1.0",
90
90
  ]
91
91
 
92
92
  dev = [
93
- "mgnify_pipelines_toolkit[tests]",
94
- "pre-commit==3.8.0",
95
- "black==24.8.0",
96
- "flake8==7.1.1",
97
- "pep8-naming==0.14.1"
93
+ "pre-commit>=4.2.0",
94
+ "black>=25.1.0",
95
+ "flake8>=7.1.2",
96
+ "pep8-naming>=0.14.1"
98
97
  ]
@@ -1,29 +0,0 @@
1
- biopython==1.82
2
- numpy==1.26.0
3
- pandas==2.0.2
4
- regex==2023.12.25
5
- requests==2.32.3
6
- click==8.1.7
7
- pandera==0.22.1
8
- pyfastx>=2.2.0
9
- intervaltree==3.1.0
10
-
11
- [dev]
12
- mgnify_pipelines_toolkit[tests]
13
- pre-commit==3.8.0
14
- black==24.8.0
15
- flake8==7.1.1
16
- pep8-naming==0.14.1
17
-
18
- [tests]
19
- pytest==7.4.0
20
- pytest-md==0.2.0
21
- pytest-workflow==2.0.1
22
- biopython==1.82
23
- pandas==2.0.2
24
- numpy==1.26.0
25
- regex==2023.12.25
26
- requests==2.32.3
27
- click==8.1.7
28
- pandera==0.22.1
29
- pyfastx>=2.2.0