mgnify-pipelines-toolkit 0.2.1__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mgnify-pipelines-toolkit might be problematic. Click here for more details.

Files changed (46) hide show
  1. mgnify_pipelines_toolkit/analysis/amplicon/amplicon_utils.py +1 -1
  2. mgnify_pipelines_toolkit/analysis/amplicon/are_there_primers.py +1 -1
  3. mgnify_pipelines_toolkit/analysis/amplicon/assess_inflection_point_mcp.py +1 -1
  4. mgnify_pipelines_toolkit/analysis/amplicon/assess_mcp_proportions.py +1 -1
  5. mgnify_pipelines_toolkit/analysis/amplicon/classify_var_regions.py +1 -1
  6. mgnify_pipelines_toolkit/analysis/amplicon/find_mcp_inflection_points.py +1 -1
  7. mgnify_pipelines_toolkit/analysis/amplicon/make_asv_count_table.py +1 -1
  8. mgnify_pipelines_toolkit/analysis/amplicon/mapseq_to_asv_table.py +1 -1
  9. mgnify_pipelines_toolkit/analysis/amplicon/primer_val_classification.py +1 -1
  10. mgnify_pipelines_toolkit/analysis/amplicon/remove_ambiguous_reads.py +1 -1
  11. mgnify_pipelines_toolkit/analysis/amplicon/rev_comp_se_primers.py +1 -1
  12. mgnify_pipelines_toolkit/analysis/amplicon/standard_primer_matching.py +1 -1
  13. mgnify_pipelines_toolkit/analysis/assembly/add_rhea_chebi_annotation.py +1 -1
  14. mgnify_pipelines_toolkit/analysis/assembly/antismash_gff_builder.py +1 -1
  15. mgnify_pipelines_toolkit/analysis/assembly/combined_gene_caller_merge.py +511 -0
  16. mgnify_pipelines_toolkit/analysis/assembly/generate_gaf.py +1 -1
  17. mgnify_pipelines_toolkit/analysis/assembly/gff_annotation_utils.py +829 -0
  18. mgnify_pipelines_toolkit/analysis/assembly/gff_file_utils.py +82 -0
  19. mgnify_pipelines_toolkit/analysis/assembly/gff_toolkit.py +170 -0
  20. mgnify_pipelines_toolkit/analysis/assembly/go_utils.py +1 -1
  21. mgnify_pipelines_toolkit/analysis/assembly/summarise_goslims.py +1 -1
  22. mgnify_pipelines_toolkit/analysis/shared/dwc_summary_generator.py +240 -0
  23. mgnify_pipelines_toolkit/analysis/shared/fastq_suffix_header_check.py +1 -1
  24. mgnify_pipelines_toolkit/analysis/shared/get_subunits.py +1 -1
  25. mgnify_pipelines_toolkit/analysis/shared/get_subunits_coords.py +1 -1
  26. mgnify_pipelines_toolkit/analysis/shared/library_strategy_check.py +1 -1
  27. mgnify_pipelines_toolkit/analysis/shared/mapseq2biom.py +1 -1
  28. mgnify_pipelines_toolkit/analysis/shared/markergene_study_summary.py +243 -0
  29. mgnify_pipelines_toolkit/analysis/shared/study_summary_generator.py +1 -1
  30. mgnify_pipelines_toolkit/constants/db_labels.py +1 -1
  31. mgnify_pipelines_toolkit/constants/regex_ambiguous_bases.py +1 -1
  32. mgnify_pipelines_toolkit/constants/regex_fasta_header.py +1 -1
  33. mgnify_pipelines_toolkit/constants/tax_ranks.py +1 -1
  34. mgnify_pipelines_toolkit/constants/thresholds.py +8 -1
  35. mgnify_pipelines_toolkit/constants/var_region_coordinates.py +1 -1
  36. mgnify_pipelines_toolkit/schemas/schemas.py +1 -1
  37. mgnify_pipelines_toolkit/utils/fasta_to_delimited.py +1 -1
  38. mgnify_pipelines_toolkit/utils/get_mpt_version.py +1 -1
  39. {mgnify_pipelines_toolkit-0.2.1.dist-info → mgnify_pipelines_toolkit-1.0.0.dist-info}/METADATA +3 -1
  40. mgnify_pipelines_toolkit-1.0.0.dist-info/RECORD +48 -0
  41. {mgnify_pipelines_toolkit-0.2.1.dist-info → mgnify_pipelines_toolkit-1.0.0.dist-info}/WHEEL +1 -1
  42. {mgnify_pipelines_toolkit-0.2.1.dist-info → mgnify_pipelines_toolkit-1.0.0.dist-info}/entry_points.txt +4 -2
  43. mgnify_pipelines_toolkit/analysis/assembly/cgc_merge.py +0 -424
  44. mgnify_pipelines_toolkit-0.2.1.dist-info/RECORD +0 -43
  45. {mgnify_pipelines_toolkit-0.2.1.dist-info → mgnify_pipelines_toolkit-1.0.0.dist-info}/LICENSE +0 -0
  46. {mgnify_pipelines_toolkit-0.2.1.dist-info → mgnify_pipelines_toolkit-1.0.0.dist-info}/top_level.txt +0 -0
@@ -1,424 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
- import argparse
4
- import json
5
- import logging
6
- import os
7
- import re
8
-
9
- from Bio import SeqIO
10
-
11
- __version__ = "1.0.4"
12
-
13
-
14
- class Region:
15
- def __init__(self, start, end):
16
- # if end < start: # assuming that for +/- start always lower
17
- # start, end = end, start
18
- self.start = int(start)
19
- self.end = int(end)
20
-
21
- def __str__(self):
22
- return "[" + str(self.start) + "," + str(self.end) + "]"
23
-
24
- def __ge__(self, other):
25
- return self.start >= other.end
26
-
27
- def __gt__(self, other):
28
- return self.start > other.end
29
-
30
- def __le__(self, other):
31
- return self.end <= other.start
32
-
33
- def __lt__(self, other):
34
- return self.end < other.start
35
-
36
- def length(self):
37
- return self.end - self.start + 1
38
-
39
- # If 'other' overlaps and has a greater end position
40
- def extends_right(self, other):
41
- if self.overlaps(other) and self.end > other.end:
42
- return True
43
- return False
44
-
45
- # For overlapping fragments extend start and end to match other
46
- def extend(self, other):
47
- if self.overlaps(other):
48
- if other.end > self.end:
49
- self.end = other.end
50
- if other.start < self.start:
51
- self.start = other.start
52
-
53
- def within(self, other):
54
- if self.start >= other.start and self.end <= other.end:
55
- return True
56
- return False
57
-
58
- # Return length of overlap between regions
59
- def overlaps(self, other):
60
- if self > other or other > self:
61
- return False
62
- # overlap = sum of the individual lengths ...
63
- ltot = self.length() + other.length()
64
- # ... minus length of the combined region (i.e. min start to max end)
65
- lmax = max(self.end, other.end) - min(self.start, other.start) + 1
66
- return ltot - lmax
67
-
68
-
69
- # FGS has seq_id/start/end in the fasta files - use those to extract the sequences we want to keep;
70
- # for prodigal it uses a seq_id/index_number, so need to add an extra field
71
- class NumberedRegion(Region):
72
- def __init__(self, start, end, nid):
73
- super().__init__(start, end)
74
- self.nid = nid
75
-
76
-
77
- def flatten_regions(regions):
78
- """Take a list of regions (possibly overlapping) and return the non-overlapping set"""
79
- if len(regions) < 2:
80
- return regions
81
-
82
- flattened = []
83
- regions = sorted(regions, key=lambda x: x.start) # sort by start
84
- flattened = [regions[0]]
85
- regions = regions[1:] # store the first
86
- for region in regions:
87
- if not region.overlaps(flattened[-1]): # doesn't overlap: store new region
88
- flattened.append(region)
89
- elif region.extends_right(flattened[-1]): # overlaps to the right: extend previous region
90
- flattened[-1].extend(region)
91
- # else end < prev end => new region within old: do nothing
92
- return flattened
93
-
94
-
95
- def check_against_gaps(regions, candidates):
96
- """Given a set of non-overlapping gaps and a list of candidate regions, return the candidates that do not overlap"""
97
- regions = sorted(regions, key=lambda line: line.start)
98
- candidates = sorted(candidates, key=lambda line: line.start)
99
- selected = []
100
- r = 0
101
- if not len(regions):
102
- return candidates # no existing predictions - all candidates accepted
103
-
104
- for c in candidates:
105
- if c < regions[0] or c > regions[-1]: # outside any of the regions: just append
106
- selected.append(c)
107
- else:
108
- while r < len(regions) - 1 and c >= regions[r]:
109
- r += 1
110
- if c < regions[r]: # found a gap
111
- selected.append(c)
112
-
113
- return selected
114
-
115
-
116
- def output_prodigal(predictions, files, outputs):
117
- """From the combined predictions output the prodigal data"""
118
-
119
- sequence_set = set()
120
- for seq in predictions:
121
- for strand in ["-", "+"]:
122
- for region in predictions[seq][strand]:
123
- sequence_set.add("_".join([seq, str(region.nid)]))
124
-
125
- # files contains the .faa and .ffn fasta files
126
- for index in [1, 2]:
127
- sequences = []
128
- for record in SeqIO.parse(files[index], "fasta"):
129
- # remove anything after the first space
130
- seq_name = record.id.split(" ")[0]
131
- # Replace ending * #
132
- record.seq = record.seq.rstrip("*")
133
- if seq_name in sequence_set:
134
- sequences.append(record)
135
-
136
- with open(outputs[index], "a") as output_handle:
137
- SeqIO.write(sequences, output_handle, "fasta")
138
-
139
-
140
- def output_fgs(predictions, files, outputs):
141
- """From the combined predictions output the FGS data"""
142
- sequence_set = set()
143
- for seq in predictions:
144
- for strand in ["-", "+"]:
145
- for region in predictions[seq][strand]:
146
- sequence_set.add("_".join([seq, str(region.start), str(region.end), strand]))
147
-
148
- # files contains the .faa and .ffn fasta files
149
- for index in [1, 2]:
150
- sequences = []
151
- for record in SeqIO.parse(files[index], "fasta"):
152
- # remove anything after the first space
153
- seq_name = record.id.split(" ")[0]
154
- # Replace "*" with "X"
155
- record.seq = record.seq.replace("*", "X")
156
- if seq_name in sequence_set:
157
- sequences.append(record)
158
-
159
- with open(outputs[index], "a") as output_handle:
160
- SeqIO.write(sequences, output_handle, "fasta")
161
-
162
-
163
- def output_files(predictions, summary, files):
164
- """Output all files"""
165
- # To avoid that sequences get appended to the merged output files after restart,
166
- # make sure the files get deleted if they exist
167
- logging.info("Removing output files if they exist.")
168
- for file_ in files["merged"]:
169
- if os.path.exists(file_):
170
- logging.info(f"Removing {file_}")
171
- os.remove(file_)
172
-
173
- for caller in predictions:
174
- if caller == "fgs":
175
- output_fgs(predictions["fgs"], files["fgs"], files["merged"])
176
- if caller == "prodigal":
177
- output_prodigal(predictions["prodigal"], files["prodigal"], files["merged"])
178
-
179
- with open(files["merged"][0], "w") as sf:
180
- sf.write(json.dumps(summary, sort_keys=True, indent=4) + "\n")
181
-
182
-
183
- def get_regions_fgs(fn):
184
- """Parse FGS output.
185
- Example:
186
- # >Bifidobacterium-longum-subsp-infantis-MC2-contig1
187
- # 256 2133 - 1 1.263995 I: D:
188
- """
189
- regions = {}
190
- with open(fn) as f:
191
- for line in f:
192
- if line[0] == ">":
193
- id_ = line.split()[0][1:]
194
- regions[id_] = {}
195
- regions[id_]["+"] = []
196
- regions[id_]["-"] = []
197
- else:
198
- r = line.split() # start end strand
199
- s = int(r[0])
200
- e = int(r[1])
201
- regions[id_][r[2]].append(Region(s, e))
202
- return regions
203
-
204
-
205
- """
206
- # noqa: E501
207
- This is from cmsearch
208
- ERR855786.1000054-HWI-M02024:111:000000000-A8H14:1:1115:23473:14586-1 - LSU_rRNA_bacteria RF02541 hmm 1224 1446 5 227 + - 6 0.61 0.8 135.2 2.8e-38 ! -
209
- """
210
-
211
-
212
- def get_regions_mask(mask_file):
213
- """Parse masked region file (i.e. ncRNA)"""
214
- regions = {}
215
- with open(mask_file) as f:
216
- for line in f:
217
- if line[:1] == "#":
218
- continue
219
- r = line.rstrip().split()
220
- id_ = r[0]
221
- start = int(r[7])
222
- end = int(r[8])
223
- if id_ not in regions:
224
- regions[id_] = []
225
- if start > end:
226
- start, end = end, start
227
- regions[id_].append(Region(start, end))
228
- return regions
229
-
230
-
231
- # # Sequence Data: seqnum=1;seqlen=25479;seqhdr="Bifidobacterium-longum-subsp-infantis-MC2-contig1"
232
- # # Model Data: version=Prodigal.v2.6.3;run_type=Single;model="Ab initio";gc_cont=59.94;transl_table=11;uses_sd=1
233
- # >1_1_279_+
234
- def get_regions_prodigal(fn):
235
- """Parse prodigal output"""
236
- regions = {}
237
- with open(fn) as f:
238
- for line in f:
239
- if line[:12] == "# Model Data":
240
- continue
241
- if line[:15] == "# Sequence Data":
242
- m = re.search(r'seqhdr="(\S+)"', line)
243
- if m:
244
- id_ = m.group(1)
245
- regions[id_] = {}
246
- regions[id_]["+"] = []
247
- regions[id_]["-"] = []
248
- else:
249
- r = line[1:].rstrip().split("_")
250
- n = int(
251
- r[0]
252
- ) # also store the index of the fragment - prodigal uses these (rather than coords) to identify sequences in the fasta output
253
- s = int(r[1])
254
- e = int(r[2])
255
- regions[id_][r[3]].append(NumberedRegion(s, e, n))
256
- return regions
257
-
258
-
259
- def mask_regions(regions, mask):
260
- """Look for overlaps of more than 5 base pairs of the supplied regions against a set of masks
261
- This is probably O(N^2) but, in theory, there shouldn't be many mask regions
262
- """
263
- new_regions = {}
264
- for seq in regions:
265
- new_regions[seq] = {}
266
- for strand in ["-", "+"]:
267
- new_regions[seq][strand] = []
268
- for r in regions[seq][strand]:
269
- if seq in mask:
270
- overlap = 0
271
- for r2 in mask[seq]:
272
- if r.overlaps(r2) > 5:
273
- overlap = 1
274
- if not overlap:
275
- new_regions[seq][strand].append(r)
276
- else:
277
- new_regions[seq][strand].append(r)
278
-
279
- return new_regions
280
-
281
-
282
- # FIXME - This won't work if we have only a single set of predictions, but then
283
- # there's no point in trying to merge
284
- def merge_predictions(predictions, callers):
285
- """Check that we have priorities set of for all callers we have data for"""
286
- p = set(callers)
287
- new_predictions = {}
288
- for type_ in predictions:
289
- if type_ not in p:
290
- return None
291
- # throw here? - if we've used a caller that we don't have a priority for
292
-
293
- # first set of predictions takes priority - just transfer them
294
- new_predictions[callers[0]] = predictions[callers[0]]
295
-
296
- # for now assume only two callers, but can be extended
297
- new_predictions[callers[1]] = {} # empty set for second priority caller
298
- for seq in predictions[callers[1]]:
299
- new_predictions[callers[1]][seq] = {}
300
- for strand in ["-", "+"]:
301
- new_predictions[callers[1]][seq][strand] = []
302
- if seq in predictions[callers[0]]: # if this sequence already has predictions
303
- prev_predictions = flatten_regions(
304
- predictions[callers[0]][seq][strand]
305
- ) # non-overlapping set of existing predictions/regions
306
- new_predictions[callers[1]][seq][strand] = check_against_gaps(
307
- prev_predictions, predictions[callers[1]][seq][strand]
308
- ) # plug new predictions/regions into gaps
309
- else: # no existing predictions: just add them
310
- new_predictions[callers[1]][seq][strand] = predictions[callers[1]][seq][strand]
311
-
312
- return new_predictions
313
-
314
-
315
- def get_counts(predictions):
316
- total = {}
317
- for caller in predictions:
318
- total[caller] = 0
319
- for sample in predictions[caller]:
320
- for strand in ["-", "+"]:
321
- total[caller] += len(predictions[caller][sample][strand])
322
- return total
323
-
324
-
325
- def combine_main():
326
- parser = argparse.ArgumentParser(
327
- "MGnify gene caller combiner. This script will merge the gene called by prodigal and fraggenescan (in any order)"
328
- )
329
- parser.add_argument("-n", "--name", action="store", dest="name", required=True, help="basename")
330
- parser.add_argument("-k", "--mask", action="store", dest="mask", required=False, help="Sequence mask file")
331
-
332
- parser.add_argument("-a", "--prodigal-out", action="store", dest="prodigal_out", required=False, help="Stats out prodigal")
333
- parser.add_argument("-b", "--prodigal-ffn", action="store", dest="prodigal_ffn", required=False, help="Stats ffn prodigal")
334
- parser.add_argument("-c", "--prodigal-faa", action="store", dest="prodigal_faa", required=False, help="Stats faa prodigal")
335
-
336
- parser.add_argument("-d", "--fgs-out", action="store", dest="fgs_out", required=False, help="Stats out FGS")
337
- parser.add_argument("-e", "--fgs-ffn", action="store", dest="fgs_ffn", required=False, help="Stats ffn FGS")
338
- parser.add_argument("-f", "--fgs-faa", action="store", dest="fgs_faa", required=False, help="Stats faa FGS")
339
-
340
- parser.add_argument(
341
- "-p",
342
- "--caller-priority",
343
- action="store",
344
- dest="caller_priority",
345
- required=False,
346
- choices=["prodigal_fgs", "fgs_prodigal"],
347
- default="prodigal_fgs",
348
- help="Caller priority.",
349
- )
350
-
351
- parser.add_argument("-v", "--verbose", help="verbose output", dest="verbose", action="count", required=False)
352
-
353
- parser.add_argument("--version", action="version", version=f"{__version__}")
354
-
355
- args = parser.parse_args()
356
-
357
- # Set up logging system
358
- verbose_mode = args.verbose or 0
359
-
360
- log_level = logging.WARNING
361
- if verbose_mode:
362
- log_level = logging.DEBUG if verbose_mode > 1 else logging.INFO
363
-
364
- logging.basicConfig(level=log_level, format="%(levelname)s %(asctime)s - %(message)s", datefmt="%Y/%m/%d %I:%M:%S %p")
365
-
366
- summary = {}
367
- all_predictions = {}
368
- files = {}
369
- caller_priority = []
370
- if args.caller_priority:
371
- caller_priority = args.caller_priority.split("_")
372
- else:
373
- caller_priority = ["prodigal", "fgs"]
374
-
375
- logging.info(f"Caller priority: 1. {caller_priority[0]}, 2. {caller_priority[1]}")
376
-
377
- if args.prodigal_out:
378
- logging.info("Prodigal presented")
379
- logging.info("Getting Prodigal regions...")
380
- all_predictions["prodigal"] = get_regions_prodigal(args.prodigal_out)
381
-
382
- files["prodigal"] = [args.prodigal_out, args.prodigal_ffn, args.prodigal_faa]
383
-
384
- if args.fgs_out:
385
- logging.info("FGS presented")
386
- logging.info("Getting FragGeneScan regions ...")
387
- all_predictions["fgs"] = get_regions_fgs(args.fgs_out)
388
-
389
- files["fgs"] = [args.fgs_out, args.fgs_ffn, args.fgs_faa]
390
-
391
- summary["all"] = get_counts(all_predictions)
392
-
393
- # Apply mask of ncRNA search
394
- logging.info("Masking non coding RNA regions...")
395
- if args.mask:
396
- logging.info("Reading regions for masking...")
397
- mask = get_regions_mask(args.mask)
398
- if "prodigal" in all_predictions:
399
- logging.info("Masking Prodigal outputs...")
400
- all_predictions["prodigal"] = mask_regions(all_predictions["prodigal"], mask)
401
- if "fgs" in all_predictions:
402
- logging.info("Masking FragGeneScan outputs...")
403
- all_predictions["fgs"] = mask_regions(all_predictions["fgs"], mask)
404
- summary["masked"] = get_counts(all_predictions)
405
-
406
- # Run the merging step
407
- if len(all_predictions) > 1:
408
- logging.info("Merging combined gene caller results...")
409
- merged_predictions = merge_predictions(all_predictions, caller_priority)
410
- else:
411
- logging.info("Skipping merging step...")
412
- merged_predictions = all_predictions
413
- summary["merged"] = get_counts(merged_predictions)
414
-
415
- # Output fasta files and summary (json)
416
- logging.info("Writing output files...")
417
-
418
- files["merged"] = [args.name + ext for ext in [".out", ".ffn", ".faa"]]
419
-
420
- output_files(merged_predictions, summary, files)
421
-
422
-
423
- if __name__ == "__main__":
424
- combine_main()
@@ -1,43 +0,0 @@
1
- mgnify_pipelines_toolkit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- mgnify_pipelines_toolkit/analysis/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- mgnify_pipelines_toolkit/analysis/amplicon/amplicon_utils.py,sha256=9ScTh7uAIEBRDt61oG4inu9yezEzP1T2DgFWitaq4Po,6567
4
- mgnify_pipelines_toolkit/analysis/amplicon/are_there_primers.py,sha256=Hfp5P89Sx6QE5oAxdNmuDwySG9FII3x4H5RFEXgnbF4,5311
5
- mgnify_pipelines_toolkit/analysis/amplicon/assess_inflection_point_mcp.py,sha256=hVkg8-tdLLf1Ewy9hor-H9zsyi-n8dnuj_shTQ5_rrM,7548
6
- mgnify_pipelines_toolkit/analysis/amplicon/assess_mcp_proportions.py,sha256=xdnois8ilj4wuyDSc8xfIclVpcqaygCliVjEgnFDdi0,5382
7
- mgnify_pipelines_toolkit/analysis/amplicon/classify_var_regions.py,sha256=kIuE2wo3FaFZw2-HRGxstKz29FyGuhqVDRhf_vPZgsA,19921
8
- mgnify_pipelines_toolkit/analysis/amplicon/find_mcp_inflection_points.py,sha256=EnsIrPGigsy8jVnjYgSECihhuquSJTgCi-k6fhusKYM,3547
9
- mgnify_pipelines_toolkit/analysis/amplicon/make_asv_count_table.py,sha256=ICFR8Ci_VofQFykasiSWwOwL_SH64PVcROoenw5jifE,8751
10
- mgnify_pipelines_toolkit/analysis/amplicon/mapseq_to_asv_table.py,sha256=9QI6o85T4JPFq4EdKmnYzI6sxPLJG6t9W0xKiu24aqw,5035
11
- mgnify_pipelines_toolkit/analysis/amplicon/primer_val_classification.py,sha256=d_Mco92RRUXSq5-5oFlXC0ZO83kbxwOREoCCyA2glDc,3751
12
- mgnify_pipelines_toolkit/analysis/amplicon/remove_ambiguous_reads.py,sha256=8vwH6PY-XwMZhaUo08tOwdFsoREfNumvvDawTb9Y98U,3168
13
- mgnify_pipelines_toolkit/analysis/amplicon/rev_comp_se_primers.py,sha256=19NgCYE12bEvRBVibhZtZywwRiMdiBUBJjzL4by3_qo,1717
14
- mgnify_pipelines_toolkit/analysis/amplicon/standard_primer_matching.py,sha256=8xCjkCMtLuBWZ74AUu7tw0uXQRII3jD3n12PX-Xd9y4,11109
15
- mgnify_pipelines_toolkit/analysis/assembly/add_rhea_chebi_annotation.py,sha256=8GRjqDVQLU6cutn-40wVuEz_PxlnjCz33YJ0PUpObIc,4253
16
- mgnify_pipelines_toolkit/analysis/assembly/antismash_gff_builder.py,sha256=OODl3XhLvksvG5RZn1iHZlg9L3DXiWIkyxJ6o-y6oeg,6949
17
- mgnify_pipelines_toolkit/analysis/assembly/cgc_merge.py,sha256=u6r_1GRGgBAJQvU_t5Rtl3ZYjTtGJGd5yHCobtL9ob0,15405
18
- mgnify_pipelines_toolkit/analysis/assembly/generate_gaf.py,sha256=U1Ls3O0CQmukmoyUwEAEN11jHUKuCdS-qVkr5ai243I,3582
19
- mgnify_pipelines_toolkit/analysis/assembly/go_utils.py,sha256=vsYaFJ_cmbo6DXlWs_X8wpZJfMQOq1CrLX4-3owmYjI,5447
20
- mgnify_pipelines_toolkit/analysis/assembly/summarise_goslims.py,sha256=RthgLO3YTO_JGMC7Nx2JDrowXRimnOtVUDkM1l31rt4,5834
21
- mgnify_pipelines_toolkit/analysis/shared/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
- mgnify_pipelines_toolkit/analysis/shared/fastq_suffix_header_check.py,sha256=H5ccd1e_e5dk8vhVOvHLK1lknYbRPbnqPjULCYnU0FQ,4021
23
- mgnify_pipelines_toolkit/analysis/shared/get_subunits.py,sha256=xl5HduWtGPWiI9yqsjQ3itIzwHSxF2ig5KgjLXmj9EE,4772
24
- mgnify_pipelines_toolkit/analysis/shared/get_subunits_coords.py,sha256=DTX7S1P_BkGPEeDkbmUn1YoB247hpdNIe5rdFdRYDdA,1929
25
- mgnify_pipelines_toolkit/analysis/shared/library_strategy_check.py,sha256=XV1vjkjIHhzouM1k5hu_51XK_mgC_EOOGDN3mx4LOvc,1991
26
- mgnify_pipelines_toolkit/analysis/shared/mapseq2biom.py,sha256=exzWyuK0YxDiVSu4WX2H7g-uT5Y00w_EmrFqSHjRObU,5554
27
- mgnify_pipelines_toolkit/analysis/shared/study_summary_generator.py,sha256=aWD-1B_fJg4rYZj2p8t8CUZdG1lDSo-oeFtLvjLgsak,13680
28
- mgnify_pipelines_toolkit/constants/db_labels.py,sha256=_2sGzTlfX7unGqkLylQFEUWNPQ8NZnQMtzlfVFuWtyU,853
29
- mgnify_pipelines_toolkit/constants/regex_ambiguous_bases.py,sha256=dCP3u_Qo-JMk3aqVapkqEbVUGE06jBQmUH6bB3bT8k0,1088
30
- mgnify_pipelines_toolkit/constants/regex_fasta_header.py,sha256=_2UTWfHKJyyFkIRQIPM2wDf-QkRTdLJ4xsA6gAkY9f4,1188
31
- mgnify_pipelines_toolkit/constants/tax_ranks.py,sha256=63dQlW7jAjLPOSCT670QCS5WhTp13vwaHqfmFYbKMyg,1076
32
- mgnify_pipelines_toolkit/constants/thresholds.py,sha256=zz8paGQfZAU8tT-RbSGpzZ1Aopf77yEs97BAblHH5fk,964
33
- mgnify_pipelines_toolkit/constants/var_region_coordinates.py,sha256=jbOB_bTnW2TRjmdF7IS1A7nNOLt-lGnGyVXUHu0TmvQ,1307
34
- mgnify_pipelines_toolkit/schemas/schemas.py,sha256=fd2xCoA1Ty-XaMG9U_gxNcBokHiYENbA85n9YTsqbpU,7098
35
- mgnify_pipelines_toolkit/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
- mgnify_pipelines_toolkit/utils/fasta_to_delimited.py,sha256=GbNT7clHso21w_1PbPpWKVRd5bNs_MDbGXt8XVIGl2o,3991
37
- mgnify_pipelines_toolkit/utils/get_mpt_version.py,sha256=zsQ4TuR4vpqYa67MgIdopdscsS0DVJdy4enRe1nCjSs,793
38
- mgnify_pipelines_toolkit-0.2.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
39
- mgnify_pipelines_toolkit-0.2.1.dist-info/METADATA,sha256=fc5D5ynYHz-mer-o6RMNbOIHEucH_hphS35bjrKtLb8,6098
40
- mgnify_pipelines_toolkit-0.2.1.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
41
- mgnify_pipelines_toolkit-0.2.1.dist-info/entry_points.txt,sha256=60Nov738JAon-uZXUqqjOGy4TXxgS4xtxqYhAi12HY0,2084
42
- mgnify_pipelines_toolkit-0.2.1.dist-info/top_level.txt,sha256=xA_wC7C01V3VwuDnqwRM2QYeJJ45WtvF6LVav4tYxuE,25
43
- mgnify_pipelines_toolkit-0.2.1.dist-info/RECORD,,