mgnify-pipelines-toolkit 0.2.1__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mgnify-pipelines-toolkit might be problematic. Click here for more details.

Files changed (46) hide show
  1. mgnify_pipelines_toolkit/analysis/amplicon/amplicon_utils.py +1 -1
  2. mgnify_pipelines_toolkit/analysis/amplicon/are_there_primers.py +1 -1
  3. mgnify_pipelines_toolkit/analysis/amplicon/assess_inflection_point_mcp.py +1 -1
  4. mgnify_pipelines_toolkit/analysis/amplicon/assess_mcp_proportions.py +1 -1
  5. mgnify_pipelines_toolkit/analysis/amplicon/classify_var_regions.py +1 -1
  6. mgnify_pipelines_toolkit/analysis/amplicon/find_mcp_inflection_points.py +1 -1
  7. mgnify_pipelines_toolkit/analysis/amplicon/make_asv_count_table.py +1 -1
  8. mgnify_pipelines_toolkit/analysis/amplicon/mapseq_to_asv_table.py +1 -1
  9. mgnify_pipelines_toolkit/analysis/amplicon/primer_val_classification.py +1 -1
  10. mgnify_pipelines_toolkit/analysis/amplicon/remove_ambiguous_reads.py +1 -1
  11. mgnify_pipelines_toolkit/analysis/amplicon/rev_comp_se_primers.py +1 -1
  12. mgnify_pipelines_toolkit/analysis/amplicon/standard_primer_matching.py +1 -1
  13. mgnify_pipelines_toolkit/analysis/assembly/add_rhea_chebi_annotation.py +1 -1
  14. mgnify_pipelines_toolkit/analysis/assembly/antismash_gff_builder.py +1 -1
  15. mgnify_pipelines_toolkit/analysis/assembly/combined_gene_caller_merge.py +511 -0
  16. mgnify_pipelines_toolkit/analysis/assembly/generate_gaf.py +1 -1
  17. mgnify_pipelines_toolkit/analysis/assembly/gff_annotation_utils.py +829 -0
  18. mgnify_pipelines_toolkit/analysis/assembly/gff_file_utils.py +82 -0
  19. mgnify_pipelines_toolkit/analysis/assembly/gff_toolkit.py +170 -0
  20. mgnify_pipelines_toolkit/analysis/assembly/go_utils.py +1 -1
  21. mgnify_pipelines_toolkit/analysis/assembly/summarise_goslims.py +1 -1
  22. mgnify_pipelines_toolkit/analysis/shared/dwc_summary_generator.py +240 -0
  23. mgnify_pipelines_toolkit/analysis/shared/fastq_suffix_header_check.py +1 -1
  24. mgnify_pipelines_toolkit/analysis/shared/get_subunits.py +1 -1
  25. mgnify_pipelines_toolkit/analysis/shared/get_subunits_coords.py +1 -1
  26. mgnify_pipelines_toolkit/analysis/shared/library_strategy_check.py +1 -1
  27. mgnify_pipelines_toolkit/analysis/shared/mapseq2biom.py +1 -1
  28. mgnify_pipelines_toolkit/analysis/shared/markergene_study_summary.py +243 -0
  29. mgnify_pipelines_toolkit/analysis/shared/study_summary_generator.py +1 -1
  30. mgnify_pipelines_toolkit/constants/db_labels.py +1 -1
  31. mgnify_pipelines_toolkit/constants/regex_ambiguous_bases.py +1 -1
  32. mgnify_pipelines_toolkit/constants/regex_fasta_header.py +1 -1
  33. mgnify_pipelines_toolkit/constants/tax_ranks.py +1 -1
  34. mgnify_pipelines_toolkit/constants/thresholds.py +8 -1
  35. mgnify_pipelines_toolkit/constants/var_region_coordinates.py +1 -1
  36. mgnify_pipelines_toolkit/schemas/schemas.py +1 -1
  37. mgnify_pipelines_toolkit/utils/fasta_to_delimited.py +1 -1
  38. mgnify_pipelines_toolkit/utils/get_mpt_version.py +1 -1
  39. {mgnify_pipelines_toolkit-0.2.1.dist-info → mgnify_pipelines_toolkit-1.0.0.dist-info}/METADATA +3 -1
  40. mgnify_pipelines_toolkit-1.0.0.dist-info/RECORD +48 -0
  41. {mgnify_pipelines_toolkit-0.2.1.dist-info → mgnify_pipelines_toolkit-1.0.0.dist-info}/WHEEL +1 -1
  42. {mgnify_pipelines_toolkit-0.2.1.dist-info → mgnify_pipelines_toolkit-1.0.0.dist-info}/entry_points.txt +4 -2
  43. mgnify_pipelines_toolkit/analysis/assembly/cgc_merge.py +0 -424
  44. mgnify_pipelines_toolkit-0.2.1.dist-info/RECORD +0 -43
  45. {mgnify_pipelines_toolkit-0.2.1.dist-info → mgnify_pipelines_toolkit-1.0.0.dist-info}/LICENSE +0 -0
  46. {mgnify_pipelines_toolkit-0.2.1.dist-info → mgnify_pipelines_toolkit-1.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,82 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ # Copyright 2025 EMBL - European Bioinformatics Institute
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the 'License');
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an 'AS IS' BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ import csv
19
+
20
+
21
+ def write_results_to_file(
22
+ outfile, header, main_gff_extended, fasta, ncrnas, trnas, crispr_annotations
23
+ ):
24
+ with open(outfile, "w") as file_out:
25
+ file_out.write("\n".join(header) + "\n")
26
+ contig_list = list(main_gff_extended.keys())
27
+ # check if there are any contigs that don't have CDS; if so add them in
28
+ contig_list = check_for_additional_keys(
29
+ ncrnas, trnas, crispr_annotations, contig_list
30
+ )
31
+ for contig in contig_list:
32
+ sorted_pos_list = sort_positions(
33
+ contig, main_gff_extended, ncrnas, trnas, crispr_annotations
34
+ )
35
+ for pos in sorted_pos_list:
36
+ for my_dict in (ncrnas, trnas, crispr_annotations, main_gff_extended):
37
+ if contig in my_dict and pos in my_dict[contig]:
38
+ for line in my_dict[contig][pos]:
39
+ if type(line) is str:
40
+ file_out.write(f"{line}\n")
41
+ else:
42
+ for element in line:
43
+ file_out.write(element)
44
+ for line in fasta:
45
+ file_out.write(f"{line}\n")
46
+
47
+
48
+ def sort_positions(contig, main_gff_extended, ncrnas, trnas, crispr_annotations):
49
+ sorted_pos_list = list()
50
+ for my_dict in (main_gff_extended, ncrnas, trnas, crispr_annotations):
51
+ if contig in my_dict:
52
+ sorted_pos_list += list(my_dict[contig].keys())
53
+ return sorted(list(set(sorted_pos_list)))
54
+
55
+
56
+ def check_for_additional_keys(ncrnas, trnas, crispr_annotations, contig_list):
57
+ for my_dict in (ncrnas, trnas, crispr_annotations):
58
+ dict_keys = set(my_dict.keys())
59
+ absent_keys = dict_keys - set(contig_list)
60
+ if absent_keys:
61
+ contig_list = contig_list + list(absent_keys)
62
+ return contig_list
63
+
64
+
65
+ def print_pseudogene_report(pseudogene_report_dict, pseudogene_report_file):
66
+ with open(pseudogene_report_file, "w") as file_out:
67
+ writer = csv.writer(file_out, delimiter="\t", lineterminator="\n")
68
+ # Print header
69
+ writer.writerow(
70
+ [
71
+ "ID",
72
+ "Pseudogene according to Bakta/Prokka",
73
+ "Pseudogene according to Pseudofinder",
74
+ "AntiFam hit",
75
+ ]
76
+ )
77
+
78
+ all_keys = ["gene_caller", "pseudofinder", "antifams"]
79
+ for protein, attributes in pseudogene_report_dict.items():
80
+ # Fill in missing attributes with False
81
+ line = [protein] + [str(attributes.get(key, False)) for key in all_keys]
82
+ writer.writerow(line)
@@ -0,0 +1,170 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ # Copyright 2025 EMBL - European Bioinformatics Institute
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the 'License');
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an 'AS IS' BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ import argparse
19
+
20
+ from gff_annotation_utils import get_ncrnas, get_trnas, load_annotations, load_crispr
21
+ from gff_file_utils import write_results_to_file, print_pseudogene_report
22
+
23
+
24
+ def main(
25
+ gff,
26
+ ipr_file,
27
+ eggnog_file,
28
+ sanntis_file,
29
+ crispr_file,
30
+ amr_file,
31
+ antismash_file,
32
+ gecco_file,
33
+ dbcan_file,
34
+ defense_finder_file,
35
+ pseudofinder_file,
36
+ rfam_file,
37
+ trnascan_file,
38
+ outfile,
39
+ pseudogene_report_file,
40
+ ):
41
+ # load annotations and add them to existing CDS
42
+ # here header contains leading GFF lines starting with "#",
43
+ # main_gff_extended is a dictionary that contains GFF lines with added in additional annotations
44
+ # fasta is the fasta portion of the original GFF file
45
+ # pseudogene_report_dict is the information on detected pseudogene which can be optionally printed
46
+ # to a separate output file
47
+ header, main_gff_extended, fasta, pseudogene_report_dict = load_annotations(
48
+ gff,
49
+ eggnog_file,
50
+ ipr_file,
51
+ sanntis_file,
52
+ amr_file,
53
+ antismash_file,
54
+ gecco_file,
55
+ dbcan_file,
56
+ defense_finder_file,
57
+ pseudofinder_file,
58
+ )
59
+ ncrnas = {}
60
+ if rfam_file:
61
+ ncrnas = get_ncrnas(rfam_file)
62
+ trnas = {}
63
+ if trnascan_file:
64
+ trnas = get_trnas(trnascan_file)
65
+ crispr_annotations = {}
66
+ if crispr_file:
67
+ crispr_annotations = load_crispr(crispr_file)
68
+
69
+ write_results_to_file(outfile, header, main_gff_extended, fasta, ncrnas, trnas, crispr_annotations)
70
+ if pseudogene_report_file:
71
+ print_pseudogene_report(pseudogene_report_dict, pseudogene_report_file)
72
+
73
+
74
+ def parse_args():
75
+ parser = argparse.ArgumentParser(
76
+ description="The script extends a user-provided base GFF annotation file by incorporating "
77
+ "information extracted from the user-provided outputs of supplementary annotation tools.",
78
+ )
79
+ parser.add_argument(
80
+ "-g",
81
+ dest="gff_input",
82
+ required=True,
83
+ help="GFF input file containing the base annotation",
84
+ )
85
+ parser.add_argument(
86
+ "-i",
87
+ dest="ips",
88
+ help="InterProScan annotation results (TSV)",
89
+ required=False,
90
+ )
91
+ parser.add_argument(
92
+ "-e",
93
+ dest="eggnog",
94
+ help="EggNOG mapper annotation results (TSV)",
95
+ required=False,
96
+ )
97
+ parser.add_argument(
98
+ "-s",
99
+ dest="sanntis",
100
+ help="SanntiS results",
101
+ required=False,
102
+ )
103
+ parser.add_argument(
104
+ "-c",
105
+ dest="crispr",
106
+ help="CRISPRCasFinder results for the cluster rep (pre-filtered high quality GFF)",
107
+ required=False,
108
+ )
109
+ parser.add_argument(
110
+ "-a",
111
+ dest="amr",
112
+ help="The TSV file produced by AMRFinderPlus",
113
+ required=False,
114
+ )
115
+ parser.add_argument(
116
+ "--antismash",
117
+ help="The GFF file produced by AntiSMASH post-processing script",
118
+ required=False,
119
+ )
120
+ parser.add_argument(
121
+ "--gecco",
122
+ help="The GFF file produced by GECCO",
123
+ required=False,
124
+ )
125
+ parser.add_argument(
126
+ "--dbcan",
127
+ help="The GFF file produced by dbCAN post-processing script",
128
+ required=False,
129
+ )
130
+ parser.add_argument(
131
+ "--defense-finder",
132
+ help="The GFF file produced by Defense Finder post-processing script",
133
+ required=False,
134
+ )
135
+ parser.add_argument(
136
+ "--pseudofinder",
137
+ help="The GFF file produced by the Pseudofinder post-processing script",
138
+ required=False,
139
+ )
140
+ parser.add_argument("-r", dest="rfam", help="Rfam results", required=False)
141
+ parser.add_argument(
142
+ "-t", dest="trnascan", help="tRNAScan-SE results", required=False
143
+ )
144
+ parser.add_argument("-o", dest="outfile", help="Outfile name", required=True)
145
+ parser.add_argument(
146
+ "--pseudogene-report", help="Pseudogene report filename", required=False
147
+ )
148
+
149
+ return parser.parse_args()
150
+
151
+
152
+ if __name__ == '__main__':
153
+ args = parse_args()
154
+ main(
155
+ args.gff_input,
156
+ args.ips,
157
+ args.eggnog,
158
+ args.sanntis,
159
+ args.crispr,
160
+ args.amr,
161
+ args.antismash,
162
+ args.gecco,
163
+ args.dbcan,
164
+ args.defense_finder,
165
+ args.pseudofinder,
166
+ args.rfam,
167
+ args.trnascan,
168
+ args.outfile,
169
+ args.pseudogene_report,
170
+ )
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
- # Copyright 2024 EMBL - European Bioinformatics Institute
4
+ # Copyright 2024-2025 EMBL - European Bioinformatics Institute
5
5
  #
6
6
  # Licensed under the Apache License, Version 2.0 (the 'License');
7
7
  # you may not use this file except in compliance with the License.
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
- # Copyright 2024 EMBL - European Bioinformatics Institute
4
+ # Copyright 2024-2025 EMBL - European Bioinformatics Institute
5
5
  #
6
6
  # Licensed under the Apache License, Version 2.0 (the 'License');
7
7
  # you may not use this file except in compliance with the License.
@@ -0,0 +1,240 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ # Copyright 2024-2025 EMBL - European Bioinformatics Institute
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ from collections import defaultdict
19
+ import pathlib
20
+ import logging
21
+ import requests
22
+
23
+ import pandas as pd
24
+ import pyfastx
25
+
26
+ logging.basicConfig(level=logging.DEBUG)
27
+
28
+ URL = "https://www.ebi.ac.uk/ena/portal/api/search?result"
29
+ RUNS_URL = f"{URL}=read_run&fields=secondary_study_accession,sample_accession&limit=10&format=json&download=false"
30
+ SAMPLES_URL = f"{URL}=sample&fields=lat,lon,collection_date,depth&limit=10&format=json&download=false"
31
+ HEADERS = {"Accept": "application/json"}
32
+
33
+
34
+ def parse_args():
35
+
36
+ parser = argparse.ArgumentParser()
37
+ parser.add_argument(
38
+ "-i",
39
+ "--input_path",
40
+ required=True,
41
+ type=str,
42
+ help="Directory where ASV files are.",
43
+ )
44
+ parser.add_argument(
45
+ "-r",
46
+ "--runs",
47
+ required=True,
48
+ type=str,
49
+ help="Path to CSV file containing successful analyses generated by the pipeline (columns: `run, status`)",
50
+ )
51
+ parser.add_argument(
52
+ "-o", "--output", required=True, type=str, help="Path to output directory."
53
+ )
54
+
55
+ args = parser.parse_args()
56
+
57
+ input_path = args.input_path
58
+ runs = args.runs
59
+ output = args.output
60
+
61
+ return input_path, runs, output
62
+
63
+
64
+ def get_metadata_from_run_acc(run_acc):
65
+
66
+ query = f"{RUNS_URL}&includeAccessions={run_acc}"
67
+ res_run = requests.get(query, headers=HEADERS)
68
+
69
+ if res_run.status_code != 200:
70
+ logging.error(f"Data not found for run {run_acc}")
71
+ return False
72
+
73
+ sample_acc = res_run.json()[0]["sample_accession"]
74
+
75
+ query = f"{SAMPLES_URL}&includeAccessions={sample_acc}"
76
+ res_sample = requests.get(query, headers=HEADERS)
77
+
78
+ full_res_dict = res_run.json()[0] | res_sample.json()[0]
79
+
80
+ fields_to_clean = ["lat", "lon", "depth"]
81
+
82
+ for field in fields_to_clean:
83
+ val = full_res_dict[field]
84
+ if val == "":
85
+ full_res_dict[field] = "NA"
86
+
87
+ if full_res_dict["collection_date"] == "":
88
+ full_res_dict["collectionDate"] = "NA"
89
+ else:
90
+ full_res_dict["collectionDate"] = full_res_dict["collection_date"]
91
+
92
+ del full_res_dict["collection_date"]
93
+
94
+ res_df = pd.DataFrame(full_res_dict, index=[0])
95
+ res_df.columns = [
96
+ "RunID",
97
+ "SampleID",
98
+ "StudyID",
99
+ "decimalLongitude",
100
+ "depth",
101
+ "decimalLatitude",
102
+ "collectionDate",
103
+ ]
104
+
105
+ return res_df
106
+
107
+
108
+ def get_all_metadata_from_runs(runs):
109
+
110
+ run_metadata_dict = defaultdict(dict)
111
+
112
+ for run in runs:
113
+ res_df = get_metadata_from_run_acc(run)
114
+ if res_df is not False:
115
+ run_metadata_dict[run] = res_df
116
+
117
+ return run_metadata_dict
118
+
119
+
120
+ def cleanup_taxa(df):
121
+
122
+ df.pop("Kingdom")
123
+ cleaned_df = df.rename(columns={"Superkingdom": "Kingdom", "asv": "ASVID"})
124
+
125
+ ranks = ["Kingdom", "Phylum", "Class", "Order", "Family", "Genus", "Species"]
126
+
127
+ for rank in ranks:
128
+ cleaned_df[rank] = cleaned_df[rank].apply(
129
+ lambda x: x.split("__")[1] if pd.notnull(x) else "NA"
130
+ )
131
+
132
+ for rank in ranks:
133
+ cleaned_df[rank] = cleaned_df[rank].apply(lambda x: x if x != "" else "NA")
134
+
135
+ cleaned_df = cleaned_df[
136
+ [
137
+ "ASVID",
138
+ "StudyID",
139
+ "SampleID",
140
+ "RunID",
141
+ "decimalLongitude",
142
+ "decimalLatitude",
143
+ "depth",
144
+ "collectionDate",
145
+ "Kingdom",
146
+ "Phylum",
147
+ "Class",
148
+ "Order",
149
+ "Family",
150
+ "Genus",
151
+ "Species",
152
+ "ASVSeq",
153
+ ]
154
+ ]
155
+
156
+ return cleaned_df
157
+
158
+
159
+ def get_asv_dict(runs_df, root_path):
160
+
161
+ asv_dict = {}
162
+ for i in range(0, len(runs_df)):
163
+ run_acc = runs_df.loc[i, "run"]
164
+ status = runs_df.loc[i, "status"]
165
+
166
+ if status != "all_results":
167
+ continue
168
+
169
+ tax_file = sorted(
170
+ list(
171
+ (pathlib.Path(root_path) / run_acc / "asv").glob(
172
+ "*_DADA2-SILVA_asv_tax.tsv"
173
+ )
174
+ )
175
+ )[0]
176
+ count_files = sorted(
177
+ list(pathlib.Path(f"{root_path}/{run_acc}/asv").glob("*S-V*/*.tsv"))
178
+ )
179
+
180
+ asv_fasta_file = sorted(
181
+ list(pathlib.Path(f"{root_path}/{run_acc}/asv").glob("*_asv_seqs.fasta"))
182
+ )[0]
183
+ fasta = pyfastx.Fasta(str(asv_fasta_file), build_index=False)
184
+ asv_fasta_dict = {name: seq for name, seq in fasta}
185
+ asv_fasta_df = pd.DataFrame(asv_fasta_dict, index=["ASVSeq"]).transpose()
186
+ asv_fasta_df["asv"] = asv_fasta_df.index
187
+ run_tax_df = pd.read_csv(tax_file, sep="\t")
188
+
189
+ count_dfs = []
190
+
191
+ for count_file in count_files:
192
+ count_df = pd.read_csv(count_file, sep="\t")
193
+ count_dfs.append(count_df)
194
+
195
+ all_ampregions_count_df = pd.concat(count_dfs)
196
+ merged_df = all_ampregions_count_df.merge(
197
+ run_tax_df, left_on="asv", right_on="ASV"
198
+ )
199
+ merged_df.pop("ASV")
200
+ run_col = [run_acc] * len(merged_df)
201
+ merged_df["RunID"] = run_col
202
+ merged_df = merged_df.merge(asv_fasta_df, on="asv")
203
+ asv_dict[run_acc] = merged_df
204
+
205
+ return asv_dict
206
+
207
+
208
+ def main():
209
+
210
+ input_path, runs, output = parse_args()
211
+
212
+ root_path = pathlib.Path(input_path)
213
+
214
+ if not root_path.exists():
215
+ logging.error(f"Results path does not exist: {root_path}")
216
+ exit(1)
217
+
218
+ runs_df = pd.read_csv(runs, names=["run", "status"])
219
+
220
+ all_runs = runs_df.run.to_list()
221
+ run_metadata_dict = get_all_metadata_from_runs(all_runs)
222
+ asv_dict = get_asv_dict(runs_df, root_path)
223
+
224
+ all_merged_df = []
225
+
226
+ for run in all_runs:
227
+ if run in asv_dict.keys() and run in run_metadata_dict.keys():
228
+ run_asv_data = asv_dict[run]
229
+ run_metadata = run_metadata_dict[run]
230
+ run_merged_result = run_metadata.merge(run_asv_data, on="RunID")
231
+ all_merged_df.append(run_merged_result)
232
+
233
+ final_df = pd.concat(all_merged_df, ignore_index=True)
234
+ final_df = cleanup_taxa(final_df)
235
+
236
+ final_df.to_csv(f"{output}_dwcready.csv", index=False, na_rep="NA")
237
+
238
+
239
+ if __name__ == "__main__":
240
+ main()
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
- # Copyright 2024 EMBL - European Bioinformatics Institute
4
+ # Copyright 2024-2025 EMBL - European Bioinformatics Institute
5
5
  #
6
6
  # Licensed under the Apache License, Version 2.0 (the "License");
7
7
  # you may not use this file except in compliance with the License.
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
- # Copyright 2024 EMBL - European Bioinformatics Institute
4
+ # Copyright 2024-2025 EMBL - European Bioinformatics Institute
5
5
  #
6
6
  # Licensed under the Apache License, Version 2.0 (the "License");
7
7
  # you may not use this file except in compliance with the License.
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
- # Copyright 2024 EMBL - European Bioinformatics Institute
4
+ # Copyright 2024-2025 EMBL - European Bioinformatics Institute
5
5
  #
6
6
  # Licensed under the Apache License, Version 2.0 (the "License");
7
7
  # you may not use this file except in compliance with the License.
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
- # Copyright 2024 EMBL - European Bioinformatics Institute
4
+ # Copyright 2024-2025 EMBL - European Bioinformatics Institute
5
5
  #
6
6
  # Licensed under the Apache License, Version 2.0 (the "License");
7
7
  # you may not use this file except in compliance with the License.
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env python
2
2
  # -*- coding: utf-8 -*-
3
3
 
4
- # Copyright 2024 EMBL - European Bioinformatics Institute
4
+ # Copyright 2024-2025 EMBL - European Bioinformatics Institute
5
5
  #
6
6
  # Licensed under the Apache License, Version 2.0 (the "License");
7
7
  # you may not use this file except in compliance with the License.