smftools 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- smftools/_settings.py +3 -2
- smftools/_version.py +1 -1
- smftools/datasets/F1_sample_sheet.csv +5 -0
- smftools/datasets/datasets.py +8 -7
- smftools/informatics/__init__.py +7 -5
- smftools/informatics/{bam_conversion.py → archived/bam_conversion.py} +16 -4
- smftools/informatics/{bam_direct.py → archived/bam_direct.py} +22 -8
- smftools/informatics/archived/basecalls_to_adata.py +71 -0
- smftools/informatics/conversion_smf.py +79 -0
- smftools/informatics/direct_smf.py +89 -0
- smftools/informatics/fast5_to_pod5.py +8 -6
- smftools/informatics/helpers/__init__.py +18 -0
- smftools/informatics/helpers/align_and_sort_BAM.py +9 -13
- smftools/informatics/helpers/aligned_BAM_to_bed.py +73 -0
- smftools/informatics/helpers/bed_to_bigwig.py +39 -0
- smftools/informatics/helpers/binarize_converted_base_identities.py +2 -2
- smftools/informatics/helpers/canoncall.py +2 -0
- smftools/informatics/helpers/complement_base_list.py +21 -0
- smftools/informatics/helpers/concatenate_fastqs_to_bam.py +54 -0
- smftools/informatics/helpers/converted_BAM_to_adata.py +161 -92
- smftools/informatics/helpers/count_aligned_reads.py +13 -9
- smftools/informatics/helpers/extract_base_identities.py +34 -20
- smftools/informatics/helpers/extract_readnames_from_BAM.py +22 -0
- smftools/informatics/helpers/find_conversion_sites.py +11 -9
- smftools/informatics/helpers/generate_converted_FASTA.py +33 -14
- smftools/informatics/helpers/get_chromosome_lengths.py +32 -0
- smftools/informatics/helpers/index_fasta.py +12 -0
- smftools/informatics/helpers/modcall.py +3 -1
- smftools/informatics/helpers/modkit_extract_to_adata.py +467 -316
- smftools/informatics/helpers/ohe_batching.py +52 -0
- smftools/informatics/helpers/one_hot_encode.py +10 -8
- smftools/informatics/helpers/plot_read_length_and_coverage_histograms.py +52 -0
- smftools/informatics/helpers/separate_bam_by_bc.py +4 -2
- smftools/informatics/helpers/split_and_index_BAM.py +16 -4
- smftools/informatics/load_adata.py +127 -0
- smftools/informatics/subsample_fasta_from_bed.py +47 -0
- smftools/informatics/subsample_pod5.py +69 -13
- smftools/preprocessing/__init__.py +6 -1
- smftools/preprocessing/append_C_context.py +37 -14
- smftools/preprocessing/calculate_complexity.py +2 -2
- smftools/preprocessing/calculate_consensus.py +47 -0
- smftools/preprocessing/calculate_converted_read_methylation_stats.py +60 -9
- smftools/preprocessing/calculate_coverage.py +2 -2
- smftools/preprocessing/calculate_pairwise_hamming_distances.py +1 -1
- smftools/preprocessing/calculate_read_length_stats.py +56 -2
- smftools/preprocessing/clean_NaN.py +2 -2
- smftools/preprocessing/filter_converted_reads_on_methylation.py +4 -2
- smftools/preprocessing/filter_reads_on_length.py +4 -2
- smftools/preprocessing/invert_adata.py +1 -0
- smftools/preprocessing/load_sample_sheet.py +24 -0
- smftools/preprocessing/make_dirs.py +21 -0
- smftools/preprocessing/mark_duplicates.py +34 -19
- smftools/preprocessing/recipes.py +125 -0
- smftools/preprocessing/remove_duplicates.py +7 -4
- smftools/tools/apply_HMM.py +1 -0
- smftools/tools/cluster.py +0 -0
- smftools/tools/read_HMM.py +1 -0
- smftools/tools/subset_adata.py +32 -0
- smftools/tools/train_HMM.py +43 -0
- {smftools-0.1.1.dist-info → smftools-0.1.3.dist-info}/METADATA +13 -7
- smftools-0.1.3.dist-info/RECORD +84 -0
- smftools/informatics/basecalls_to_adata.py +0 -42
- smftools/informatics/pod5_conversion.py +0 -53
- smftools/informatics/pod5_direct.py +0 -55
- smftools/informatics/pod5_to_adata.py +0 -40
- smftools-0.1.1.dist-info/RECORD +0 -64
- {smftools-0.1.1.dist-info → smftools-0.1.3.dist-info}/WHEEL +0 -0
- {smftools-0.1.1.dist-info → smftools-0.1.3.dist-info}/licenses/LICENSE +0 -0
smftools/_settings.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from pathlib import Path
|
|
2
|
+
from typing import Union
|
|
2
3
|
|
|
3
4
|
class SMFConfig:
|
|
4
5
|
"""\
|
|
@@ -8,9 +9,9 @@ class SMFConfig:
|
|
|
8
9
|
def __init__(
|
|
9
10
|
self,
|
|
10
11
|
*,
|
|
11
|
-
datasetdir: Path
|
|
12
|
+
datasetdir: Union[Path, str] = "./datasets/"
|
|
12
13
|
):
|
|
13
|
-
|
|
14
|
+
self._datasetdir = Path(datasetdir) if isinstance(datasetdir, str) else datasetdir
|
|
14
15
|
|
|
15
16
|
@property
|
|
16
17
|
def datasetdir(self) -> Path:
|
smftools/_version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.1.
|
|
1
|
+
__version__ = "0.1.3"
|
smftools/datasets/datasets.py
CHANGED
|
@@ -1,10 +1,9 @@
|
|
|
1
1
|
## datasets
|
|
2
2
|
|
|
3
|
-
def
|
|
3
|
+
def import_HERE():
|
|
4
4
|
"""
|
|
5
|
-
|
|
5
|
+
Imports HERE for loading datasets
|
|
6
6
|
"""
|
|
7
|
-
import anndata as ad
|
|
8
7
|
from pathlib import Path
|
|
9
8
|
from .._settings import settings
|
|
10
9
|
HERE = Path(__file__).parent
|
|
@@ -12,16 +11,18 @@ def import_deps():
|
|
|
12
11
|
|
|
13
12
|
def dCas9_kinetics():
|
|
14
13
|
"""
|
|
15
|
-
|
|
14
|
+
in vitro Hia5 dCas9 kinetics SMF dataset. Nanopore HAC m6A modcalls.
|
|
16
15
|
"""
|
|
17
|
-
|
|
16
|
+
import anndata as ad
|
|
17
|
+
HERE = import_HERE()
|
|
18
18
|
filepath = HERE / "dCas9_m6A_invitro_kinetics.h5ad.gz"
|
|
19
19
|
return ad.read_h5ad(filepath)
|
|
20
20
|
|
|
21
21
|
def Kissiov_and_McKenna_2025():
|
|
22
22
|
"""
|
|
23
|
-
|
|
23
|
+
F1 Hybrid M.CviPI natural killer cell SMF. Nanopore canonical calls of NEB EMseq converted SMF gDNA.
|
|
24
24
|
"""
|
|
25
|
-
|
|
25
|
+
import anndata as ad
|
|
26
|
+
HERE = import_HERE()
|
|
26
27
|
filepath = HERE / "F1_hybrid_NKG2A_enhander_promoter_GpC_conversion_SMF.h5ad.gz"
|
|
27
28
|
return ad.read_h5ad(filepath)
|
smftools/informatics/__init__.py
CHANGED
|
@@ -1,12 +1,14 @@
|
|
|
1
|
-
from .
|
|
2
|
-
from .
|
|
1
|
+
from . import helpers
|
|
2
|
+
from .load_adata import load_adata
|
|
3
|
+
from .subsample_fasta_from_bed import subsample_fasta_from_bed
|
|
3
4
|
from .subsample_pod5 import subsample_pod5
|
|
4
5
|
from .fast5_to_pod5 import fast5_to_pod5
|
|
5
6
|
|
|
6
7
|
|
|
7
8
|
__all__ = [
|
|
8
|
-
"
|
|
9
|
-
"
|
|
9
|
+
"load_adata",
|
|
10
|
+
"subsample_fasta_from_bed",
|
|
10
11
|
"subsample_pod5",
|
|
11
|
-
"fast5_to_pod5"
|
|
12
|
+
"fast5_to_pod5",
|
|
13
|
+
"helpers"
|
|
12
14
|
]
|
|
@@ -18,7 +18,7 @@ def bam_conversion(fasta, output_directory, conversion_types, strands, basecalle
|
|
|
18
18
|
Returns:
|
|
19
19
|
None
|
|
20
20
|
"""
|
|
21
|
-
from .helpers import align_and_sort_BAM, converted_BAM_to_adata, generate_converted_FASTA, split_and_index_BAM
|
|
21
|
+
from .helpers import align_and_sort_BAM, converted_BAM_to_adata, generate_converted_FASTA, split_and_index_BAM, make_dirs
|
|
22
22
|
import os
|
|
23
23
|
input_basecalled_basename = os.path.basename(basecalled_path)
|
|
24
24
|
bam_basename = input_basecalled_basename.split(".")[0]
|
|
@@ -32,16 +32,28 @@ def bam_conversion(fasta, output_directory, conversion_types, strands, basecalle
|
|
|
32
32
|
fasta_basename = os.path.basename(fasta)
|
|
33
33
|
converted_FASTA_basename = fasta_basename.split('.fa')[0]+'_converted.fasta'
|
|
34
34
|
converted_FASTA = os.path.join(output_directory, converted_FASTA_basename)
|
|
35
|
-
if
|
|
35
|
+
if 'converted.fa' in fasta:
|
|
36
|
+
print(fasta + ' is already converted. Using existing converted FASTA.')
|
|
37
|
+
converted_FASTA = fasta
|
|
38
|
+
elif os.path.exists(converted_FASTA):
|
|
36
39
|
print(converted_FASTA + ' already exists. Using existing converted FASTA.')
|
|
37
40
|
else:
|
|
38
41
|
generate_converted_FASTA(fasta, conversion_types, strands, converted_FASTA)
|
|
39
42
|
|
|
40
43
|
# 2) Align the basecalled file to the converted reference FASTA and sort the bam on positional coordinates. Also make an index and a bed file of mapped reads
|
|
41
|
-
|
|
44
|
+
aligned_output = aligned_BAM + bam_suffix
|
|
45
|
+
sorted_output = aligned_sorted_BAM + bam_suffix
|
|
46
|
+
if os.path.exists(aligned_output) and os.path.exists(sorted_output):
|
|
47
|
+
print(sorted_output + ' already exists. Using existing aligned/sorted BAM.')
|
|
48
|
+
else:
|
|
49
|
+
align_and_sort_BAM(converted_FASTA, basecalled_path, bam_suffix, output_directory)
|
|
42
50
|
|
|
43
51
|
### 3) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory###
|
|
44
|
-
|
|
52
|
+
if os.path.isdir(split_dir):
|
|
53
|
+
print(split_dir + ' already exists. Using existing aligned/sorted/split BAMs.')
|
|
54
|
+
else:
|
|
55
|
+
make_dirs([split_dir])
|
|
56
|
+
split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory)
|
|
45
57
|
|
|
46
58
|
# 4) Take the converted BAM and load it into an adata object.
|
|
47
59
|
converted_BAM_to_adata(converted_FASTA, split_dir, mapping_threshold, experiment_name, conversion_types, bam_suffix)
|
|
@@ -29,8 +29,7 @@ def bam_direct(fasta, output_directory, mod_list, thresholds, bam_path, split_di
|
|
|
29
29
|
mod_bed_dir=f"{output_directory}/split_mod_beds"
|
|
30
30
|
mod_tsv_dir=f"{output_directory}/split_mod_tsvs"
|
|
31
31
|
|
|
32
|
-
|
|
33
|
-
|
|
32
|
+
aligned_output = aligned_BAM + bam_suffix
|
|
34
33
|
aligned_sorted_output = aligned_sorted_BAM + bam_suffix
|
|
35
34
|
mod_map = {'6mA': '6mA', '5mC_5hmC': '5mC'}
|
|
36
35
|
mods = [mod_map[mod] for mod in mod_list]
|
|
@@ -38,12 +37,27 @@ def bam_direct(fasta, output_directory, mod_list, thresholds, bam_path, split_di
|
|
|
38
37
|
os.chdir(output_directory)
|
|
39
38
|
|
|
40
39
|
# 1) Align the BAM to the reference FASTA. Also make an index and a bed file of mapped reads
|
|
41
|
-
|
|
40
|
+
if os.path.exists(aligned_output) and os.path.exists(aligned_sorted_output):
|
|
41
|
+
print(aligned_sorted_output + ' already exists. Using existing aligned/sorted BAM.')
|
|
42
|
+
else:
|
|
43
|
+
align_and_sort_BAM(fasta, bam_path, bam_suffix, output_directory)
|
|
42
44
|
# 2) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory
|
|
43
|
-
|
|
45
|
+
if os.path.isdir(split_dir):
|
|
46
|
+
print(split_dir + ' already exists. Using existing aligned/sorted/split BAMs.')
|
|
47
|
+
else:
|
|
48
|
+
make_dirs([split_dir])
|
|
49
|
+
split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory)
|
|
44
50
|
# 3) Using nanopore modkit to work with modified BAM files ###
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
51
|
+
if os.path.isdir(mod_bed_dir):
|
|
52
|
+
print(mod_bed_dir + ' already exists')
|
|
53
|
+
else:
|
|
54
|
+
make_dirs([mod_bed_dir])
|
|
55
|
+
modQC(aligned_sorted_output, thresholds) # get QC metrics for mod calls
|
|
56
|
+
make_modbed(aligned_sorted_output, thresholds, mod_bed_dir) # Generate bed files of position methylation summaries for every sample
|
|
57
|
+
if os.path.isdir(mod_tsv_dir):
|
|
58
|
+
print(mod_tsv_dir + ' already exists')
|
|
59
|
+
else:
|
|
60
|
+
make_dirs([mod_tsv_dir])
|
|
61
|
+
extract_mods(thresholds, mod_tsv_dir, split_dir, bam_suffix) # Extract methylations calls for split BAM files into split TSV files
|
|
48
62
|
#4 Load the modification data from TSVs into an adata object
|
|
49
|
-
modkit_extract_to_adata(fasta,
|
|
63
|
+
modkit_extract_to_adata(fasta, split_dir, mapping_threshold, experiment_name, mods, batch_size, mod_tsv_dir)
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
## basecalls_to_adata
|
|
2
|
+
|
|
3
|
+
def basecalls_to_adata(config_path):
|
|
4
|
+
"""
|
|
5
|
+
High-level function to call for loading basecalled SMF data from a BAM file into an adata object. Also works with FASTQ for conversion SMF.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
config_path (str): A string representing the file path to the experiment configuration csv file.
|
|
9
|
+
|
|
10
|
+
Returns:
|
|
11
|
+
None
|
|
12
|
+
"""
|
|
13
|
+
from .helpers import LoadExperimentConfig, make_dirs
|
|
14
|
+
from .subsample_fasta_from_bed import subsample_fasta_from_bed
|
|
15
|
+
import os
|
|
16
|
+
import numpy as np
|
|
17
|
+
bam_suffix = '.bam' # If different, change from here.
|
|
18
|
+
split_dir = 'split_BAMs' # If different, change from here.
|
|
19
|
+
strands = ['bottom', 'top'] # If different, change from here. Having both listed generally doesn't slow things down too much.
|
|
20
|
+
conversions = ['unconverted'] # The name to use for the unconverted files. If different, change from here.
|
|
21
|
+
|
|
22
|
+
# Load experiment config parameters into global variables
|
|
23
|
+
experiment_config = LoadExperimentConfig(config_path)
|
|
24
|
+
var_dict = experiment_config.var_dict
|
|
25
|
+
|
|
26
|
+
# These below variables will point to the value np.nan if they are either empty in the experiment_config.csv or if the variable is fully omitted from the csv.
|
|
27
|
+
default_value = None
|
|
28
|
+
|
|
29
|
+
conversion_types = var_dict.get('conversion_types', default_value)
|
|
30
|
+
output_directory = var_dict.get('output_directory', default_value)
|
|
31
|
+
smf_modality = var_dict.get('smf_modality', default_value)
|
|
32
|
+
fasta = var_dict.get('fasta', default_value)
|
|
33
|
+
fasta_regions_of_interest = var_dict.get("fasta_regions_of_interest", default_value)
|
|
34
|
+
basecalled_path = var_dict.get('basecalled_path', default_value)
|
|
35
|
+
mapping_threshold = var_dict.get('mapping_threshold', default_value)
|
|
36
|
+
experiment_name = var_dict.get('experiment_name', default_value)
|
|
37
|
+
filter_threshold = var_dict.get('filter_threshold', default_value)
|
|
38
|
+
m6A_threshold = var_dict.get('m6A_threshold', default_value)
|
|
39
|
+
m5C_threshold = var_dict.get('m5C_threshold', default_value)
|
|
40
|
+
hm5C_threshold = var_dict.get('hm5C_threshold', default_value)
|
|
41
|
+
mod_list = var_dict.get('mod_list', default_value)
|
|
42
|
+
batch_size = var_dict.get('batch_size', default_value)
|
|
43
|
+
thresholds = [filter_threshold, m6A_threshold, m5C_threshold, hm5C_threshold]
|
|
44
|
+
|
|
45
|
+
split_path = os.path.join(output_directory, split_dir)
|
|
46
|
+
|
|
47
|
+
make_dirs([output_directory])
|
|
48
|
+
os.chdir(output_directory)
|
|
49
|
+
|
|
50
|
+
conversions += conversion_types
|
|
51
|
+
|
|
52
|
+
# If a bed file is passed, subsample the input FASTA on regions of interest and use the subsampled FASTA.
|
|
53
|
+
if fasta_regions_of_interest != None:
|
|
54
|
+
if '.bed' in fasta_regions_of_interest:
|
|
55
|
+
fasta_basename = os.path.basename(fasta)
|
|
56
|
+
bed_basename_minus_suffix = os.path.basename(fasta_regions_of_interest).split('.bed')[0]
|
|
57
|
+
output_FASTA = bed_basename_minus_suffix + '_' + fasta_basename
|
|
58
|
+
subsample_fasta_from_bed(fasta, fasta_regions_of_interest, output_directory, output_FASTA)
|
|
59
|
+
fasta = output_FASTA
|
|
60
|
+
|
|
61
|
+
if smf_modality == 'conversion':
|
|
62
|
+
from .bam_conversion import bam_conversion
|
|
63
|
+
bam_conversion(fasta, output_directory, conversions, strands, basecalled_path, split_path, mapping_threshold, experiment_name, bam_suffix)
|
|
64
|
+
elif smf_modality == 'direct':
|
|
65
|
+
if bam_suffix in basecalled_path:
|
|
66
|
+
from .bam_direct import bam_direct
|
|
67
|
+
bam_direct(fasta, output_directory, mod_list, thresholds, basecalled_path, split_path, mapping_threshold, experiment_name, bam_suffix, batch_size)
|
|
68
|
+
else:
|
|
69
|
+
print('basecalls_to_adata function only work with the direct modality when the input filetype is BAM and not FASTQ.')
|
|
70
|
+
else:
|
|
71
|
+
print("Error")
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
## conversion_smf
|
|
2
|
+
|
|
3
|
+
def conversion_smf(fasta, output_directory, conversion_types, strands, model, input_data_path, split_dir, barcode_kit, mapping_threshold, experiment_name, bam_suffix, basecall):
|
|
4
|
+
"""
|
|
5
|
+
Processes sequencing data from a conversion SMF experiment to an adata object.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
fasta (str): File path to the reference genome to align to.
|
|
9
|
+
output_directory (str): A file path to the directory to output all the analyses.
|
|
10
|
+
conversion_type (list): A list of strings of the conversion types to use in the analysis.
|
|
11
|
+
strands (list): A list of converstion strands to use in the experiment.
|
|
12
|
+
model (str): a string representing the file path to the dorado basecalling model.
|
|
13
|
+
input_data_path (str): a string representing the file path to the experiment directory/file containing sequencing data
|
|
14
|
+
split_dir (str): A string representing the file path to the directory to split the BAMs into.
|
|
15
|
+
barcode_kit (str): A string representing the barcoding kit used in the experiment.
|
|
16
|
+
mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
|
|
17
|
+
experiment_name (str): A string to provide an experiment name to the output adata file.
|
|
18
|
+
bam_suffix (str): A suffix to add to the bam file.
|
|
19
|
+
basecall (bool): Whether to go through basecalling or not.
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
None
|
|
23
|
+
"""
|
|
24
|
+
from .helpers import align_and_sort_BAM, canoncall, converted_BAM_to_adata, generate_converted_FASTA, get_chromosome_lengths, split_and_index_BAM, make_dirs
|
|
25
|
+
import os
|
|
26
|
+
if basecall:
|
|
27
|
+
model_basename = os.path.basename(model)
|
|
28
|
+
model_basename = model_basename.replace('.', '_')
|
|
29
|
+
bam=f"{output_directory}/{model_basename}_canonical_basecalls"
|
|
30
|
+
else:
|
|
31
|
+
bam_base=os.path.basename(input_data_path).split('.bam')[0]
|
|
32
|
+
bam=os.path.join(output_directory, bam_base)
|
|
33
|
+
aligned_BAM=f"{bam}_aligned"
|
|
34
|
+
aligned_sorted_BAM=f"{aligned_BAM}_sorted"
|
|
35
|
+
|
|
36
|
+
os.chdir(output_directory)
|
|
37
|
+
|
|
38
|
+
# 1) Convert FASTA file
|
|
39
|
+
fasta_basename = os.path.basename(fasta)
|
|
40
|
+
converted_FASTA_basename = fasta_basename.split('.fa')[0]+'_converted.fasta'
|
|
41
|
+
converted_FASTA = os.path.join(output_directory, converted_FASTA_basename)
|
|
42
|
+
if 'converted.fa' in fasta:
|
|
43
|
+
print(fasta + ' is already converted. Using existing converted FASTA.')
|
|
44
|
+
converted_FASTA = fasta
|
|
45
|
+
elif os.path.exists(converted_FASTA):
|
|
46
|
+
print(converted_FASTA + ' already exists. Using existing converted FASTA.')
|
|
47
|
+
else:
|
|
48
|
+
generate_converted_FASTA(fasta, conversion_types, strands, converted_FASTA)
|
|
49
|
+
|
|
50
|
+
# Make a FAI and .chrom.names file for the converted fasta
|
|
51
|
+
get_chromosome_lengths(converted_FASTA)
|
|
52
|
+
|
|
53
|
+
# 2) Basecall from the input POD5 to generate a singular output BAM
|
|
54
|
+
if basecall:
|
|
55
|
+
canoncall_output = bam + bam_suffix
|
|
56
|
+
if os.path.exists(canoncall_output):
|
|
57
|
+
print(canoncall_output + ' already exists. Using existing basecalled BAM.')
|
|
58
|
+
else:
|
|
59
|
+
canoncall(model, input_data_path, barcode_kit, bam, bam_suffix)
|
|
60
|
+
else:
|
|
61
|
+
canoncall_output = input_data_path
|
|
62
|
+
|
|
63
|
+
# 3) Align the BAM to the converted reference FASTA and sort the bam on positional coordinates. Also make an index and a bed file of mapped reads
|
|
64
|
+
aligned_output = aligned_BAM + bam_suffix
|
|
65
|
+
sorted_output = aligned_sorted_BAM + bam_suffix
|
|
66
|
+
if os.path.exists(aligned_output) and os.path.exists(sorted_output):
|
|
67
|
+
print(sorted_output + ' already exists. Using existing aligned/sorted BAM.')
|
|
68
|
+
else:
|
|
69
|
+
align_and_sort_BAM(converted_FASTA, canoncall_output, bam_suffix, output_directory)
|
|
70
|
+
|
|
71
|
+
### 4) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory###
|
|
72
|
+
if os.path.isdir(split_dir):
|
|
73
|
+
print(split_dir + ' already exists. Using existing aligned/sorted/split BAMs.')
|
|
74
|
+
else:
|
|
75
|
+
make_dirs([split_dir])
|
|
76
|
+
split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory, converted_FASTA)
|
|
77
|
+
|
|
78
|
+
# 5) Take the converted BAM and load it into an adata object.
|
|
79
|
+
converted_BAM_to_adata(converted_FASTA, split_dir, mapping_threshold, experiment_name, conversion_types, bam_suffix)
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
## direct_smf
|
|
2
|
+
|
|
3
|
+
def direct_smf(fasta, output_directory, mod_list, model, thresholds, input_data_path, split_dir, barcode_kit, mapping_threshold, experiment_name, bam_suffix, batch_size, basecall):
|
|
4
|
+
"""
|
|
5
|
+
Processes sequencing data from a direct methylation detection Nanopore SMF experiment to an AnnData object.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
fasta (str): File path to the reference genome to align to.
|
|
9
|
+
output_directory (str): A file path to the directory to output all the analyses.
|
|
10
|
+
mod_list (list): A list of strings of the modification types to use in the analysis.
|
|
11
|
+
model (str): a string representing the file path to the dorado basecalling model.
|
|
12
|
+
thresholds (list): A list of floats to pass for call thresholds.
|
|
13
|
+
input_data_path (str): a string representing the file path to the experiment directory containing the input sequencing files.
|
|
14
|
+
split_dir (str): A string representing the file path to the directory to split the BAMs into.
|
|
15
|
+
barcode_kit (str): A string representing the barcoding kit used in the experiment.
|
|
16
|
+
mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
|
|
17
|
+
experiment_name (str): A string to provide an experiment name to the output adata file.
|
|
18
|
+
bam_suffix (str): A suffix to add to the bam file.
|
|
19
|
+
batch_size (int): An integer number of TSV files to analyze in memory at once while loading the final adata object.
|
|
20
|
+
basecall (bool): Whether to basecall
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
None
|
|
24
|
+
"""
|
|
25
|
+
from .helpers import align_and_sort_BAM, extract_mods, get_chromosome_lengths, make_modbed, modcall, modkit_extract_to_adata, modQC, split_and_index_BAM, make_dirs
|
|
26
|
+
import os
|
|
27
|
+
|
|
28
|
+
if basecall:
|
|
29
|
+
model_basename = os.path.basename(model)
|
|
30
|
+
model_basename = model_basename.replace('.', '_')
|
|
31
|
+
mod_string = "_".join(mod_list)
|
|
32
|
+
bam=f"{output_directory}/{model_basename}_{mod_string}_calls"
|
|
33
|
+
else:
|
|
34
|
+
bam_base=os.path.basename(input_data_path).split('.bam')[0]
|
|
35
|
+
bam=os.path.join(output_directory, bam_base)
|
|
36
|
+
aligned_BAM=f"{bam}_aligned"
|
|
37
|
+
aligned_sorted_BAM=f"{aligned_BAM}_sorted"
|
|
38
|
+
mod_bed_dir=f"{output_directory}/split_mod_beds"
|
|
39
|
+
mod_tsv_dir=f"{output_directory}/split_mod_tsvs"
|
|
40
|
+
|
|
41
|
+
aligned_sorted_output = aligned_sorted_BAM + bam_suffix
|
|
42
|
+
mod_map = {'6mA': '6mA', '5mC_5hmC': '5mC'}
|
|
43
|
+
mods = [mod_map[mod] for mod in mod_list]
|
|
44
|
+
|
|
45
|
+
# Make a FAI and .chrom.names file for the fasta
|
|
46
|
+
get_chromosome_lengths(fasta)
|
|
47
|
+
|
|
48
|
+
os.chdir(output_directory)
|
|
49
|
+
|
|
50
|
+
# 1) Basecall using dorado
|
|
51
|
+
if basecall:
|
|
52
|
+
modcall_output = bam + bam_suffix
|
|
53
|
+
if os.path.exists(modcall_output):
|
|
54
|
+
print(modcall_output + ' already exists. Using existing basecalled BAM.')
|
|
55
|
+
else:
|
|
56
|
+
modcall(model, input_data_path, barcode_kit, mod_list, bam, bam_suffix)
|
|
57
|
+
else:
|
|
58
|
+
modcall_output = input_data_path
|
|
59
|
+
|
|
60
|
+
# 2) Align the BAM to the reference FASTA. Also make an index and a bed file of mapped reads
|
|
61
|
+
aligned_output = aligned_BAM + bam_suffix
|
|
62
|
+
sorted_output = aligned_sorted_BAM + bam_suffix
|
|
63
|
+
if os.path.exists(aligned_output) and os.path.exists(sorted_output):
|
|
64
|
+
print(sorted_output + ' already exists. Using existing aligned/sorted BAM.')
|
|
65
|
+
else:
|
|
66
|
+
align_and_sort_BAM(fasta, modcall_output, bam_suffix, output_directory)
|
|
67
|
+
|
|
68
|
+
# 3) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory
|
|
69
|
+
if os.path.isdir(split_dir):
|
|
70
|
+
print(split_dir + ' already exists. Using existing aligned/sorted/split BAMs.')
|
|
71
|
+
else:
|
|
72
|
+
make_dirs([split_dir])
|
|
73
|
+
split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory, fasta)
|
|
74
|
+
|
|
75
|
+
# 4) Using nanopore modkit to work with modified BAM files ###
|
|
76
|
+
if os.path.isdir(mod_bed_dir):
|
|
77
|
+
print(mod_bed_dir + ' already exists')
|
|
78
|
+
else:
|
|
79
|
+
make_dirs([mod_bed_dir])
|
|
80
|
+
modQC(aligned_sorted_output, thresholds) # get QC metrics for mod calls
|
|
81
|
+
make_modbed(aligned_sorted_output, thresholds, mod_bed_dir) # Generate bed files of position methylation summaries for every sample
|
|
82
|
+
if os.path.isdir(mod_tsv_dir):
|
|
83
|
+
print(mod_tsv_dir + ' already exists')
|
|
84
|
+
else:
|
|
85
|
+
make_dirs([mod_tsv_dir])
|
|
86
|
+
extract_mods(thresholds, mod_tsv_dir, split_dir, bam_suffix) # Extract methylations calls for split BAM files into split TSV files
|
|
87
|
+
|
|
88
|
+
#5 Load the modification data from TSVs into an adata object
|
|
89
|
+
modkit_extract_to_adata(fasta, split_dir, mapping_threshold, experiment_name, mods, batch_size, mod_tsv_dir)
|
|
@@ -1,19 +1,21 @@
|
|
|
1
1
|
# fast5_to_pod5
|
|
2
2
|
|
|
3
|
-
def fast5_to_pod5(fast5_dir,
|
|
3
|
+
def fast5_to_pod5(fast5_dir, output_pod5='FAST5s_to_POD5.pod5'):
|
|
4
4
|
"""
|
|
5
5
|
Convert Nanopore FAST5 files to POD5 file
|
|
6
6
|
|
|
7
7
|
Parameters:
|
|
8
8
|
fast5_dir (str): String representing the file path to a directory containing all FAST5 files to convert into a single POD5 output.
|
|
9
|
-
|
|
10
|
-
output_pod5 (str): The name of the output POD5 to write out within the output directory.
|
|
9
|
+
output_pod5 (str): The name of the output POD5.
|
|
11
10
|
|
|
12
11
|
Returns:
|
|
13
12
|
None
|
|
14
13
|
|
|
15
14
|
"""
|
|
16
15
|
import subprocess
|
|
17
|
-
import
|
|
18
|
-
|
|
19
|
-
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
if Path(fast5_dir).is_file():
|
|
19
|
+
subprocess.run(["pod5", "convert", "fast5", fast5_dir, "--output", output_pod5])
|
|
20
|
+
elif Path(fast5_dir).is_dir():
|
|
21
|
+
subprocess.run(["pod5", "convert", "fast5", f".{fast5_dir}*.fast5", "--output", output_pod5])
|
|
@@ -1,13 +1,20 @@
|
|
|
1
1
|
from .align_and_sort_BAM import align_and_sort_BAM
|
|
2
|
+
from .aligned_BAM_to_bed import aligned_BAM_to_bed
|
|
3
|
+
from .bed_to_bigwig import bed_to_bigwig
|
|
2
4
|
from .binarize_converted_base_identities import binarize_converted_base_identities
|
|
3
5
|
from .canoncall import canoncall
|
|
6
|
+
from .complement_base_list import complement_base_list
|
|
4
7
|
from .converted_BAM_to_adata import converted_BAM_to_adata
|
|
8
|
+
from .concatenate_fastqs_to_bam import concatenate_fastqs_to_bam
|
|
5
9
|
from .count_aligned_reads import count_aligned_reads
|
|
6
10
|
from .extract_base_identities import extract_base_identities
|
|
7
11
|
from .extract_mods import extract_mods
|
|
12
|
+
from .extract_readnames_from_BAM import extract_readnames_from_BAM
|
|
8
13
|
from .find_conversion_sites import find_conversion_sites
|
|
9
14
|
from .generate_converted_FASTA import convert_FASTA_record, generate_converted_FASTA
|
|
15
|
+
from .get_chromosome_lengths import get_chromosome_lengths
|
|
10
16
|
from .get_native_references import get_native_references
|
|
17
|
+
from .index_fasta import index_fasta
|
|
11
18
|
from .LoadExperimentConfig import LoadExperimentConfig
|
|
12
19
|
from .make_dirs import make_dirs
|
|
13
20
|
from .make_modbed import make_modbed
|
|
@@ -15,21 +22,30 @@ from .modcall import modcall
|
|
|
15
22
|
from .modkit_extract_to_adata import modkit_extract_to_adata
|
|
16
23
|
from .modQC import modQC
|
|
17
24
|
from .one_hot_encode import one_hot_encode
|
|
25
|
+
from .ohe_batching import ohe_batching
|
|
26
|
+
from .plot_read_length_and_coverage_histograms import plot_read_length_and_coverage_histograms
|
|
18
27
|
from .separate_bam_by_bc import separate_bam_by_bc
|
|
19
28
|
from .split_and_index_BAM import split_and_index_BAM
|
|
20
29
|
|
|
21
30
|
__all__ = [
|
|
22
31
|
"align_and_sort_BAM",
|
|
32
|
+
"aligned_BAM_to_bed",
|
|
33
|
+
"bed_to_bigwig",
|
|
23
34
|
"binarize_converted_base_identities",
|
|
24
35
|
"canoncall",
|
|
36
|
+
"complement_base_list",
|
|
25
37
|
"converted_BAM_to_adata",
|
|
38
|
+
"concatenate_fastqs_to_bam",
|
|
26
39
|
"count_aligned_reads",
|
|
27
40
|
"extract_base_identities",
|
|
28
41
|
"extract_mods",
|
|
42
|
+
"extract_readnames_from_BAM",
|
|
29
43
|
"find_conversion_sites",
|
|
30
44
|
"convert_FASTA_record",
|
|
31
45
|
"generate_converted_FASTA",
|
|
46
|
+
"get_chromosome_lengths",
|
|
32
47
|
"get_native_references",
|
|
48
|
+
"index_fasta",
|
|
33
49
|
"LoadExperimentConfig",
|
|
34
50
|
"make_dirs",
|
|
35
51
|
"make_modbed",
|
|
@@ -37,6 +53,8 @@ __all__ = [
|
|
|
37
53
|
"modkit_extract_to_adata",
|
|
38
54
|
"modQC",
|
|
39
55
|
"one_hot_encode",
|
|
56
|
+
"ohe_batching",
|
|
57
|
+
"plot_read_length_and_coverage_histograms",
|
|
40
58
|
"separate_bam_by_bc",
|
|
41
59
|
"split_and_index_BAM"
|
|
42
60
|
]
|
|
@@ -16,6 +16,9 @@ def align_and_sort_BAM(fasta, input, bam_suffix, output_directory):
|
|
|
16
16
|
"""
|
|
17
17
|
import subprocess
|
|
18
18
|
import os
|
|
19
|
+
from .aligned_BAM_to_bed import aligned_BAM_to_bed
|
|
20
|
+
from .extract_readnames_from_BAM import extract_readnames_from_BAM
|
|
21
|
+
from .make_dirs import make_dirs
|
|
19
22
|
input_basename = os.path.basename(input)
|
|
20
23
|
input_suffix = '.' + input_basename.split('.')[1]
|
|
21
24
|
|
|
@@ -27,7 +30,7 @@ def align_and_sort_BAM(fasta, input, bam_suffix, output_directory):
|
|
|
27
30
|
aligned_sorted_output = aligned_sorted_BAM + bam_suffix
|
|
28
31
|
|
|
29
32
|
# Run dorado aligner
|
|
30
|
-
subprocess.run(["dorado", "aligner", "--secondary
|
|
33
|
+
subprocess.run(["dorado", "aligner", "--secondary", "no", fasta, input], stdout=open(aligned_output, "w"))
|
|
31
34
|
|
|
32
35
|
# Sort the BAM on positional coordinates
|
|
33
36
|
subprocess.run(["samtools", "sort", "-o", aligned_sorted_output, aligned_output])
|
|
@@ -36,17 +39,10 @@ def align_and_sort_BAM(fasta, input, bam_suffix, output_directory):
|
|
|
36
39
|
subprocess.run(["samtools", "index", aligned_sorted_output])
|
|
37
40
|
|
|
38
41
|
# Make a bed file of coordinates for the BAM
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
awk_process.wait()
|
|
44
|
-
samtools_view.wait()
|
|
42
|
+
plotting_dir = os.path.join(output_directory, 'coverage_and_readlength_histograms')
|
|
43
|
+
bed_dir = os.path.join(output_directory, 'read_alignment_coordinates')
|
|
44
|
+
make_dirs([plotting_dir, bed_dir])
|
|
45
|
+
aligned_BAM_to_bed(aligned_sorted_output, plotting_dir, bed_dir, fasta)
|
|
45
46
|
|
|
46
47
|
# Make a text file of reads for the BAM
|
|
47
|
-
|
|
48
|
-
with open(f"{aligned_sorted_BAM}_read_names.txt", "w") as output_file:
|
|
49
|
-
cut_process = subprocess.Popen(["cut", "-f1"], stdin=samtools_view.stdout, stdout=output_file)
|
|
50
|
-
samtools_view.stdout.close()
|
|
51
|
-
cut_process.wait()
|
|
52
|
-
samtools_view.wait()
|
|
48
|
+
extract_readnames_from_BAM(aligned_sorted_output)
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
# aligned_BAM_to_bed
|
|
2
|
+
|
|
3
|
+
def aligned_BAM_to_bed(aligned_BAM, plotting_dir, bed_dir, fasta):
|
|
4
|
+
"""
|
|
5
|
+
Takes an aligned BAM as input and writes a bed file of reads as output.
|
|
6
|
+
Bed columns are: Record name, start position, end position, read length, read name
|
|
7
|
+
|
|
8
|
+
Parameters:
|
|
9
|
+
aligned_BAM (str): Path to an input aligned_BAM to extract to a BED file.
|
|
10
|
+
plotting_dir (str): Path to write out read alignment length and coverage histograms
|
|
11
|
+
bed_dir (str): Path to write out read alignment coordinates
|
|
12
|
+
fasta (str): File path to the reference genome to align to.
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
None
|
|
16
|
+
|
|
17
|
+
"""
|
|
18
|
+
import subprocess
|
|
19
|
+
import os
|
|
20
|
+
from .bed_to_bigwig import bed_to_bigwig
|
|
21
|
+
from .plot_read_length_and_coverage_histograms import plot_read_length_and_coverage_histograms
|
|
22
|
+
|
|
23
|
+
bed_output_basename = os.path.basename(aligned_BAM).split('.bam')[0] + '_bed.bed'
|
|
24
|
+
bed_output = os.path.join(bed_dir, bed_output_basename)
|
|
25
|
+
|
|
26
|
+
samtools_view = subprocess.Popen(["samtools", "view", aligned_BAM], stdout=subprocess.PIPE)
|
|
27
|
+
with open(bed_output, "w") as output_file:
|
|
28
|
+
awk_process = subprocess.Popen(["awk", '{print $3 "\t" $4 "\t" $4+length($10)-1 "\t" length($10)-1 "\t" $1}'], stdin=samtools_view.stdout, stdout=output_file)
|
|
29
|
+
samtools_view.stdout.close()
|
|
30
|
+
awk_process.wait()
|
|
31
|
+
samtools_view.wait()
|
|
32
|
+
|
|
33
|
+
def split_bed(bed, delete_input=True):
|
|
34
|
+
"""
|
|
35
|
+
Reads in a BED file and splits it into two separate BED files based on alignment status.
|
|
36
|
+
|
|
37
|
+
Parameters:
|
|
38
|
+
bed (str): Path to the input BED file.
|
|
39
|
+
delete_input (bool): Whether to delete the input bed file
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
aligned (str): Path to the aligned bed file
|
|
43
|
+
"""
|
|
44
|
+
unaligned = bed.split('.bed')[0] + '_unaligned.bed'
|
|
45
|
+
aligned = bed.split('.bed')[0] + '_aligned.bed'
|
|
46
|
+
|
|
47
|
+
with open(bed, 'r') as infile, \
|
|
48
|
+
open(unaligned, 'w') as unaligned_outfile, \
|
|
49
|
+
open(aligned, 'w') as aligned_outfile:
|
|
50
|
+
|
|
51
|
+
for line in infile:
|
|
52
|
+
fields = line.strip().split('\t')
|
|
53
|
+
|
|
54
|
+
if fields[0] == '*':
|
|
55
|
+
unaligned_outfile.write(line)
|
|
56
|
+
else:
|
|
57
|
+
aligned_outfile.write(line)
|
|
58
|
+
|
|
59
|
+
if delete_input:
|
|
60
|
+
os.remove(bed)
|
|
61
|
+
|
|
62
|
+
return aligned
|
|
63
|
+
|
|
64
|
+
aligned_bed = split_bed(bed_output)
|
|
65
|
+
|
|
66
|
+
# Write out basic plots of reference coverage and read lengths
|
|
67
|
+
plot_read_length_and_coverage_histograms(aligned_bed, plotting_dir)
|
|
68
|
+
|
|
69
|
+
# Make a bedgraph and bigwig for the aligned reads
|
|
70
|
+
bed_to_bigwig(fasta, aligned_bed)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# bed_to_bigwig
|
|
2
|
+
|
|
3
|
+
def bed_to_bigwig(fasta, bed):
|
|
4
|
+
"""
|
|
5
|
+
Takes a bed file of reads and makes a bedgraph plus a bigwig
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
fasta (str): File path to the reference genome to align to.
|
|
9
|
+
bed (str): File path to the input bed.
|
|
10
|
+
Returns:
|
|
11
|
+
None
|
|
12
|
+
"""
|
|
13
|
+
import os
|
|
14
|
+
import subprocess
|
|
15
|
+
|
|
16
|
+
bed_basename = os.path.basename(bed)
|
|
17
|
+
parent_dir = os.path.dirname(bed)
|
|
18
|
+
bed_basename_minus_suffix = bed_basename.split('.bed')[0]
|
|
19
|
+
fasta_basename = os.path.basename(fasta)
|
|
20
|
+
fasta_dir = os.path.dirname(fasta)
|
|
21
|
+
fasta_basename_minus_suffix = fasta_basename.split('.fa')[0]
|
|
22
|
+
chrom_basename = fasta_basename_minus_suffix + '.chrom.sizes'
|
|
23
|
+
chrom_path = os.path.join(fasta_dir, chrom_basename)
|
|
24
|
+
bedgraph_basename = bed_basename_minus_suffix + '_bedgraph.bedgraph'
|
|
25
|
+
bedgraph_output = os.path.join(parent_dir, bedgraph_basename)
|
|
26
|
+
bigwig_basename = bed_basename_minus_suffix + '_bigwig.bw'
|
|
27
|
+
bigwig_output = os.path.join(parent_dir, bigwig_basename)
|
|
28
|
+
|
|
29
|
+
# Make the bedgraph
|
|
30
|
+
with open(bedgraph_output, 'w') as outfile:
|
|
31
|
+
# Command as a list
|
|
32
|
+
command = ["bedtools", "genomecov", "-i", bed, "-g", chrom_path, "-bg"]
|
|
33
|
+
print(f'Making bedgraph from {bed_basename}')
|
|
34
|
+
subprocess.run(command, stdout=outfile)
|
|
35
|
+
|
|
36
|
+
# Make the bigwig
|
|
37
|
+
command = ["bedGraphToBigWig", bedgraph_output, chrom_path, bigwig_output]
|
|
38
|
+
print(f'Making bigwig from {bedgraph_basename}')
|
|
39
|
+
subprocess.run(command)
|