smftools 0.1.0__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- smftools/__init__.py +0 -2
- smftools/_settings.py +3 -2
- smftools/_version.py +1 -0
- smftools/datasets/F1_sample_sheet.csv +5 -0
- smftools/datasets/datasets.py +14 -11
- smftools/informatics/__init__.py +10 -7
- smftools/informatics/archived/bam_conversion.py +59 -0
- smftools/informatics/archived/bam_direct.py +63 -0
- smftools/informatics/archived/basecalls_to_adata.py +71 -0
- smftools/informatics/conversion_smf.py +79 -0
- smftools/informatics/direct_smf.py +89 -0
- smftools/informatics/fast5_to_pod5.py +21 -0
- smftools/informatics/helpers/LoadExperimentConfig.py +74 -0
- smftools/informatics/helpers/__init__.py +22 -4
- smftools/informatics/helpers/align_and_sort_BAM.py +48 -0
- smftools/informatics/helpers/aligned_BAM_to_bed.py +73 -0
- smftools/informatics/helpers/bed_to_bigwig.py +39 -0
- smftools/informatics/helpers/binarize_converted_base_identities.py +11 -4
- smftools/informatics/helpers/canoncall.py +14 -1
- smftools/informatics/helpers/complement_base_list.py +21 -0
- smftools/informatics/helpers/concatenate_fastqs_to_bam.py +54 -0
- smftools/informatics/helpers/converted_BAM_to_adata.py +183 -97
- smftools/informatics/helpers/count_aligned_reads.py +25 -14
- smftools/informatics/helpers/extract_base_identities.py +44 -23
- smftools/informatics/helpers/extract_mods.py +17 -5
- smftools/informatics/helpers/extract_readnames_from_BAM.py +22 -0
- smftools/informatics/helpers/find_conversion_sites.py +24 -16
- smftools/informatics/helpers/generate_converted_FASTA.py +60 -21
- smftools/informatics/helpers/get_chromosome_lengths.py +32 -0
- smftools/informatics/helpers/get_native_references.py +10 -7
- smftools/informatics/helpers/index_fasta.py +12 -0
- smftools/informatics/helpers/make_dirs.py +9 -3
- smftools/informatics/helpers/make_modbed.py +10 -4
- smftools/informatics/helpers/modQC.py +10 -2
- smftools/informatics/helpers/modcall.py +16 -2
- smftools/informatics/helpers/modkit_extract_to_adata.py +486 -323
- smftools/informatics/helpers/ohe_batching.py +52 -0
- smftools/informatics/helpers/one_hot_encode.py +15 -8
- smftools/informatics/helpers/plot_read_length_and_coverage_histograms.py +52 -0
- smftools/informatics/helpers/separate_bam_by_bc.py +20 -5
- smftools/informatics/helpers/split_and_index_BAM.py +31 -11
- smftools/informatics/load_adata.py +127 -0
- smftools/informatics/readwrite.py +13 -16
- smftools/informatics/subsample_fasta_from_bed.py +47 -0
- smftools/informatics/subsample_pod5.py +104 -0
- smftools/preprocessing/__init__.py +6 -7
- smftools/preprocessing/append_C_context.py +52 -22
- smftools/preprocessing/binarize_on_Youden.py +8 -4
- smftools/preprocessing/binary_layers_to_ohe.py +9 -4
- smftools/preprocessing/calculate_complexity.py +26 -14
- smftools/preprocessing/calculate_consensus.py +47 -0
- smftools/preprocessing/calculate_converted_read_methylation_stats.py +69 -11
- smftools/preprocessing/calculate_coverage.py +14 -8
- smftools/preprocessing/calculate_pairwise_hamming_distances.py +11 -6
- smftools/preprocessing/calculate_position_Youden.py +21 -12
- smftools/preprocessing/calculate_read_length_stats.py +67 -8
- smftools/preprocessing/clean_NaN.py +13 -6
- smftools/preprocessing/filter_converted_reads_on_methylation.py +15 -6
- smftools/preprocessing/filter_reads_on_length.py +16 -6
- smftools/preprocessing/invert_adata.py +10 -5
- smftools/preprocessing/load_sample_sheet.py +24 -0
- smftools/preprocessing/make_dirs.py +21 -0
- smftools/preprocessing/mark_duplicates.py +54 -30
- smftools/preprocessing/min_non_diagonal.py +9 -4
- smftools/preprocessing/recipes.py +125 -0
- smftools/preprocessing/remove_duplicates.py +15 -6
- smftools/readwrite.py +13 -16
- smftools/tools/apply_HMM.py +1 -0
- smftools/tools/cluster.py +0 -0
- smftools/tools/read_HMM.py +1 -0
- smftools/tools/subset_adata.py +32 -0
- smftools/tools/train_HMM.py +43 -0
- smftools-0.1.3.dist-info/METADATA +94 -0
- smftools-0.1.3.dist-info/RECORD +84 -0
- smftools/informatics/helpers/align_BAM.py +0 -49
- smftools/informatics/helpers/load_experiment_config.py +0 -17
- smftools/informatics/pod5_conversion.py +0 -26
- smftools/informatics/pod5_direct.py +0 -29
- smftools/informatics/pod5_to_adata.py +0 -17
- smftools-0.1.0.dist-info/METADATA +0 -75
- smftools-0.1.0.dist-info/RECORD +0 -58
- /smftools/informatics/helpers/{informatics.py → archived/informatics.py} +0 -0
- /smftools/informatics/helpers/{load_adata.py → archived/load_adata.py} +0 -0
- /smftools/preprocessing/{preprocessing.py → archives/preprocessing.py} +0 -0
- {smftools-0.1.0.dist-info → smftools-0.1.3.dist-info}/WHEEL +0 -0
- {smftools-0.1.0.dist-info → smftools-0.1.3.dist-info}/licenses/LICENSE +0 -0
smftools/__init__.py
CHANGED
|
@@ -3,7 +3,6 @@
|
|
|
3
3
|
import logging
|
|
4
4
|
import warnings
|
|
5
5
|
|
|
6
|
-
from anndata import AnnData
|
|
7
6
|
from . import informatics as inform
|
|
8
7
|
from . import preprocessing as pp
|
|
9
8
|
from . import tools as tl
|
|
@@ -17,7 +16,6 @@ package_name = "smftools"
|
|
|
17
16
|
__version__ = version(package_name)
|
|
18
17
|
|
|
19
18
|
__all__ = [
|
|
20
|
-
"AnnData",
|
|
21
19
|
"inform",
|
|
22
20
|
"pp",
|
|
23
21
|
"tl",
|
smftools/_settings.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from pathlib import Path
|
|
2
|
+
from typing import Union
|
|
2
3
|
|
|
3
4
|
class SMFConfig:
|
|
4
5
|
"""\
|
|
@@ -8,9 +9,9 @@ class SMFConfig:
|
|
|
8
9
|
def __init__(
|
|
9
10
|
self,
|
|
10
11
|
*,
|
|
11
|
-
datasetdir: Path
|
|
12
|
+
datasetdir: Union[Path, str] = "./datasets/"
|
|
12
13
|
):
|
|
13
|
-
self.
|
|
14
|
+
self._datasetdir = Path(datasetdir) if isinstance(datasetdir, str) else datasetdir
|
|
14
15
|
|
|
15
16
|
@property
|
|
16
17
|
def datasetdir(self) -> Path:
|
smftools/_version.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.3"
|
smftools/datasets/datasets.py
CHANGED
|
@@ -1,25 +1,28 @@
|
|
|
1
1
|
## datasets
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
from .._settings import settings
|
|
9
|
-
|
|
10
|
-
HERE
|
|
11
|
-
|
|
3
|
+
def import_HERE():
|
|
4
|
+
"""
|
|
5
|
+
Imports HERE for loading datasets
|
|
6
|
+
"""
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from .._settings import settings
|
|
9
|
+
HERE = Path(__file__).parent
|
|
10
|
+
return HERE
|
|
12
11
|
|
|
13
12
|
def dCas9_kinetics():
|
|
14
13
|
"""
|
|
15
|
-
|
|
14
|
+
in vitro Hia5 dCas9 kinetics SMF dataset. Nanopore HAC m6A modcalls.
|
|
16
15
|
"""
|
|
16
|
+
import anndata as ad
|
|
17
|
+
HERE = import_HERE()
|
|
17
18
|
filepath = HERE / "dCas9_m6A_invitro_kinetics.h5ad.gz"
|
|
18
19
|
return ad.read_h5ad(filepath)
|
|
19
20
|
|
|
20
21
|
def Kissiov_and_McKenna_2025():
|
|
21
22
|
"""
|
|
22
|
-
|
|
23
|
+
F1 Hybrid M.CviPI natural killer cell SMF. Nanopore canonical calls of NEB EMseq converted SMF gDNA.
|
|
23
24
|
"""
|
|
25
|
+
import anndata as ad
|
|
26
|
+
HERE = import_HERE()
|
|
24
27
|
filepath = HERE / "F1_hybrid_NKG2A_enhander_promoter_GpC_conversion_SMF.h5ad.gz"
|
|
25
28
|
return ad.read_h5ad(filepath)
|
smftools/informatics/__init__.py
CHANGED
|
@@ -1,11 +1,14 @@
|
|
|
1
1
|
from . import helpers
|
|
2
|
-
from .
|
|
3
|
-
from .
|
|
4
|
-
from .
|
|
2
|
+
from .load_adata import load_adata
|
|
3
|
+
from .subsample_fasta_from_bed import subsample_fasta_from_bed
|
|
4
|
+
from .subsample_pod5 import subsample_pod5
|
|
5
|
+
from .fast5_to_pod5 import fast5_to_pod5
|
|
6
|
+
|
|
5
7
|
|
|
6
8
|
__all__ = [
|
|
7
|
-
"
|
|
8
|
-
"
|
|
9
|
-
"
|
|
10
|
-
"
|
|
9
|
+
"load_adata",
|
|
10
|
+
"subsample_fasta_from_bed",
|
|
11
|
+
"subsample_pod5",
|
|
12
|
+
"fast5_to_pod5",
|
|
13
|
+
"helpers"
|
|
11
14
|
]
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
## bam_conversion
|
|
2
|
+
|
|
3
|
+
def bam_conversion(fasta, output_directory, conversion_types, strands, basecalled_path, split_dir, mapping_threshold, experiment_name, bam_suffix):
|
|
4
|
+
"""
|
|
5
|
+
Converts a BAM file from a nanopore conversion SMF experiment to an adata object.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
fasta (str): File path to the reference genome to align to.
|
|
9
|
+
output_directory (str): A file path to the directory to output all the analyses.
|
|
10
|
+
conversion_type (list): A list of strings of the conversion types to use in the analysis.
|
|
11
|
+
strands (list): A list of converstion strands to use in the experiment.
|
|
12
|
+
basecalled_path (str): a string representing the file path to the experiment BAM or FASTQ file.
|
|
13
|
+
split_dir (str): A string representing the file path to the directory to split the BAMs into.
|
|
14
|
+
mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
|
|
15
|
+
experiment_name (str): A string to provide an experiment name to the output adata file.
|
|
16
|
+
bam_suffix (str): A suffix to add to the bam file.
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
None
|
|
20
|
+
"""
|
|
21
|
+
from .helpers import align_and_sort_BAM, converted_BAM_to_adata, generate_converted_FASTA, split_and_index_BAM, make_dirs
|
|
22
|
+
import os
|
|
23
|
+
input_basecalled_basename = os.path.basename(basecalled_path)
|
|
24
|
+
bam_basename = input_basecalled_basename.split(".")[0]
|
|
25
|
+
output_bam=f"{output_directory}/{bam_basename}"
|
|
26
|
+
aligned_BAM=f"{output_bam}_aligned"
|
|
27
|
+
aligned_sorted_BAM=f"{aligned_BAM}_sorted"
|
|
28
|
+
|
|
29
|
+
os.chdir(output_directory)
|
|
30
|
+
|
|
31
|
+
# 1) Convert FASTA file
|
|
32
|
+
fasta_basename = os.path.basename(fasta)
|
|
33
|
+
converted_FASTA_basename = fasta_basename.split('.fa')[0]+'_converted.fasta'
|
|
34
|
+
converted_FASTA = os.path.join(output_directory, converted_FASTA_basename)
|
|
35
|
+
if 'converted.fa' in fasta:
|
|
36
|
+
print(fasta + ' is already converted. Using existing converted FASTA.')
|
|
37
|
+
converted_FASTA = fasta
|
|
38
|
+
elif os.path.exists(converted_FASTA):
|
|
39
|
+
print(converted_FASTA + ' already exists. Using existing converted FASTA.')
|
|
40
|
+
else:
|
|
41
|
+
generate_converted_FASTA(fasta, conversion_types, strands, converted_FASTA)
|
|
42
|
+
|
|
43
|
+
# 2) Align the basecalled file to the converted reference FASTA and sort the bam on positional coordinates. Also make an index and a bed file of mapped reads
|
|
44
|
+
aligned_output = aligned_BAM + bam_suffix
|
|
45
|
+
sorted_output = aligned_sorted_BAM + bam_suffix
|
|
46
|
+
if os.path.exists(aligned_output) and os.path.exists(sorted_output):
|
|
47
|
+
print(sorted_output + ' already exists. Using existing aligned/sorted BAM.')
|
|
48
|
+
else:
|
|
49
|
+
align_and_sort_BAM(converted_FASTA, basecalled_path, bam_suffix, output_directory)
|
|
50
|
+
|
|
51
|
+
### 3) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory###
|
|
52
|
+
if os.path.isdir(split_dir):
|
|
53
|
+
print(split_dir + ' already exists. Using existing aligned/sorted/split BAMs.')
|
|
54
|
+
else:
|
|
55
|
+
make_dirs([split_dir])
|
|
56
|
+
split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory)
|
|
57
|
+
|
|
58
|
+
# 4) Take the converted BAM and load it into an adata object.
|
|
59
|
+
converted_BAM_to_adata(converted_FASTA, split_dir, mapping_threshold, experiment_name, conversion_types, bam_suffix)
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
## bam_direct
|
|
2
|
+
|
|
3
|
+
def bam_direct(fasta, output_directory, mod_list, thresholds, bam_path, split_dir, mapping_threshold, experiment_name, bam_suffix, batch_size):
|
|
4
|
+
"""
|
|
5
|
+
Converts a POD5 file from a nanopore native SMF experiment to an adata object.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
fasta (str): File path to the reference genome to align to.
|
|
9
|
+
output_directory (str): A file path to the directory to output all the analyses.
|
|
10
|
+
mod_list (list): A list of strings of the modification types to use in the analysis.
|
|
11
|
+
thresholds (list): A list of floats to pass for call thresholds.
|
|
12
|
+
bam_path (str): a string representing the file path to the the BAM file.
|
|
13
|
+
split_dir (str): A string representing the file path to the directory to split the BAMs into.
|
|
14
|
+
mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
|
|
15
|
+
experiment_name (str): A string to provide an experiment name to the output adata file.
|
|
16
|
+
bam_suffix (str): A suffix to add to the bam file.
|
|
17
|
+
batch_size (int): An integer number of TSV files to analyze in memory at once while loading the final adata object.
|
|
18
|
+
|
|
19
|
+
Returns:
|
|
20
|
+
None
|
|
21
|
+
"""
|
|
22
|
+
from .helpers import align_and_sort_BAM, extract_mods, make_modbed, modkit_extract_to_adata, modQC, split_and_index_BAM, make_dirs
|
|
23
|
+
import os
|
|
24
|
+
input_bam_base = os.path.basename(bam_path)
|
|
25
|
+
bam_basename = input_bam_base.split(bam_suffix)[0]
|
|
26
|
+
output_bam=f"{output_directory}/{bam_basename}"
|
|
27
|
+
aligned_BAM=f"{output_bam}_aligned"
|
|
28
|
+
aligned_sorted_BAM=f"{aligned_BAM}_sorted"
|
|
29
|
+
mod_bed_dir=f"{output_directory}/split_mod_beds"
|
|
30
|
+
mod_tsv_dir=f"{output_directory}/split_mod_tsvs"
|
|
31
|
+
|
|
32
|
+
aligned_output = aligned_BAM + bam_suffix
|
|
33
|
+
aligned_sorted_output = aligned_sorted_BAM + bam_suffix
|
|
34
|
+
mod_map = {'6mA': '6mA', '5mC_5hmC': '5mC'}
|
|
35
|
+
mods = [mod_map[mod] for mod in mod_list]
|
|
36
|
+
|
|
37
|
+
os.chdir(output_directory)
|
|
38
|
+
|
|
39
|
+
# 1) Align the BAM to the reference FASTA. Also make an index and a bed file of mapped reads
|
|
40
|
+
if os.path.exists(aligned_output) and os.path.exists(aligned_sorted_output):
|
|
41
|
+
print(aligned_sorted_output + ' already exists. Using existing aligned/sorted BAM.')
|
|
42
|
+
else:
|
|
43
|
+
align_and_sort_BAM(fasta, bam_path, bam_suffix, output_directory)
|
|
44
|
+
# 2) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory
|
|
45
|
+
if os.path.isdir(split_dir):
|
|
46
|
+
print(split_dir + ' already exists. Using existing aligned/sorted/split BAMs.')
|
|
47
|
+
else:
|
|
48
|
+
make_dirs([split_dir])
|
|
49
|
+
split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory)
|
|
50
|
+
# 3) Using nanopore modkit to work with modified BAM files ###
|
|
51
|
+
if os.path.isdir(mod_bed_dir):
|
|
52
|
+
print(mod_bed_dir + ' already exists')
|
|
53
|
+
else:
|
|
54
|
+
make_dirs([mod_bed_dir])
|
|
55
|
+
modQC(aligned_sorted_output, thresholds) # get QC metrics for mod calls
|
|
56
|
+
make_modbed(aligned_sorted_output, thresholds, mod_bed_dir) # Generate bed files of position methylation summaries for every sample
|
|
57
|
+
if os.path.isdir(mod_tsv_dir):
|
|
58
|
+
print(mod_tsv_dir + ' already exists')
|
|
59
|
+
else:
|
|
60
|
+
make_dirs([mod_tsv_dir])
|
|
61
|
+
extract_mods(thresholds, mod_tsv_dir, split_dir, bam_suffix) # Extract methylations calls for split BAM files into split TSV files
|
|
62
|
+
#4 Load the modification data from TSVs into an adata object
|
|
63
|
+
modkit_extract_to_adata(fasta, split_dir, mapping_threshold, experiment_name, mods, batch_size, mod_tsv_dir)
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
## basecalls_to_adata
|
|
2
|
+
|
|
3
|
+
def basecalls_to_adata(config_path):
|
|
4
|
+
"""
|
|
5
|
+
High-level function to call for loading basecalled SMF data from a BAM file into an adata object. Also works with FASTQ for conversion SMF.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
config_path (str): A string representing the file path to the experiment configuration csv file.
|
|
9
|
+
|
|
10
|
+
Returns:
|
|
11
|
+
None
|
|
12
|
+
"""
|
|
13
|
+
from .helpers import LoadExperimentConfig, make_dirs
|
|
14
|
+
from .subsample_fasta_from_bed import subsample_fasta_from_bed
|
|
15
|
+
import os
|
|
16
|
+
import numpy as np
|
|
17
|
+
bam_suffix = '.bam' # If different, change from here.
|
|
18
|
+
split_dir = 'split_BAMs' # If different, change from here.
|
|
19
|
+
strands = ['bottom', 'top'] # If different, change from here. Having both listed generally doesn't slow things down too much.
|
|
20
|
+
conversions = ['unconverted'] # The name to use for the unconverted files. If different, change from here.
|
|
21
|
+
|
|
22
|
+
# Load experiment config parameters into global variables
|
|
23
|
+
experiment_config = LoadExperimentConfig(config_path)
|
|
24
|
+
var_dict = experiment_config.var_dict
|
|
25
|
+
|
|
26
|
+
# These below variables will point to the value np.nan if they are either empty in the experiment_config.csv or if the variable is fully omitted from the csv.
|
|
27
|
+
default_value = None
|
|
28
|
+
|
|
29
|
+
conversion_types = var_dict.get('conversion_types', default_value)
|
|
30
|
+
output_directory = var_dict.get('output_directory', default_value)
|
|
31
|
+
smf_modality = var_dict.get('smf_modality', default_value)
|
|
32
|
+
fasta = var_dict.get('fasta', default_value)
|
|
33
|
+
fasta_regions_of_interest = var_dict.get("fasta_regions_of_interest", default_value)
|
|
34
|
+
basecalled_path = var_dict.get('basecalled_path', default_value)
|
|
35
|
+
mapping_threshold = var_dict.get('mapping_threshold', default_value)
|
|
36
|
+
experiment_name = var_dict.get('experiment_name', default_value)
|
|
37
|
+
filter_threshold = var_dict.get('filter_threshold', default_value)
|
|
38
|
+
m6A_threshold = var_dict.get('m6A_threshold', default_value)
|
|
39
|
+
m5C_threshold = var_dict.get('m5C_threshold', default_value)
|
|
40
|
+
hm5C_threshold = var_dict.get('hm5C_threshold', default_value)
|
|
41
|
+
mod_list = var_dict.get('mod_list', default_value)
|
|
42
|
+
batch_size = var_dict.get('batch_size', default_value)
|
|
43
|
+
thresholds = [filter_threshold, m6A_threshold, m5C_threshold, hm5C_threshold]
|
|
44
|
+
|
|
45
|
+
split_path = os.path.join(output_directory, split_dir)
|
|
46
|
+
|
|
47
|
+
make_dirs([output_directory])
|
|
48
|
+
os.chdir(output_directory)
|
|
49
|
+
|
|
50
|
+
conversions += conversion_types
|
|
51
|
+
|
|
52
|
+
# If a bed file is passed, subsample the input FASTA on regions of interest and use the subsampled FASTA.
|
|
53
|
+
if fasta_regions_of_interest != None:
|
|
54
|
+
if '.bed' in fasta_regions_of_interest:
|
|
55
|
+
fasta_basename = os.path.basename(fasta)
|
|
56
|
+
bed_basename_minus_suffix = os.path.basename(fasta_regions_of_interest).split('.bed')[0]
|
|
57
|
+
output_FASTA = bed_basename_minus_suffix + '_' + fasta_basename
|
|
58
|
+
subsample_fasta_from_bed(fasta, fasta_regions_of_interest, output_directory, output_FASTA)
|
|
59
|
+
fasta = output_FASTA
|
|
60
|
+
|
|
61
|
+
if smf_modality == 'conversion':
|
|
62
|
+
from .bam_conversion import bam_conversion
|
|
63
|
+
bam_conversion(fasta, output_directory, conversions, strands, basecalled_path, split_path, mapping_threshold, experiment_name, bam_suffix)
|
|
64
|
+
elif smf_modality == 'direct':
|
|
65
|
+
if bam_suffix in basecalled_path:
|
|
66
|
+
from .bam_direct import bam_direct
|
|
67
|
+
bam_direct(fasta, output_directory, mod_list, thresholds, basecalled_path, split_path, mapping_threshold, experiment_name, bam_suffix, batch_size)
|
|
68
|
+
else:
|
|
69
|
+
print('basecalls_to_adata function only work with the direct modality when the input filetype is BAM and not FASTQ.')
|
|
70
|
+
else:
|
|
71
|
+
print("Error")
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
## conversion_smf
|
|
2
|
+
|
|
3
|
+
def conversion_smf(fasta, output_directory, conversion_types, strands, model, input_data_path, split_dir, barcode_kit, mapping_threshold, experiment_name, bam_suffix, basecall):
|
|
4
|
+
"""
|
|
5
|
+
Processes sequencing data from a conversion SMF experiment to an adata object.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
fasta (str): File path to the reference genome to align to.
|
|
9
|
+
output_directory (str): A file path to the directory to output all the analyses.
|
|
10
|
+
conversion_type (list): A list of strings of the conversion types to use in the analysis.
|
|
11
|
+
strands (list): A list of converstion strands to use in the experiment.
|
|
12
|
+
model (str): a string representing the file path to the dorado basecalling model.
|
|
13
|
+
input_data_path (str): a string representing the file path to the experiment directory/file containing sequencing data
|
|
14
|
+
split_dir (str): A string representing the file path to the directory to split the BAMs into.
|
|
15
|
+
barcode_kit (str): A string representing the barcoding kit used in the experiment.
|
|
16
|
+
mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
|
|
17
|
+
experiment_name (str): A string to provide an experiment name to the output adata file.
|
|
18
|
+
bam_suffix (str): A suffix to add to the bam file.
|
|
19
|
+
basecall (bool): Whether to go through basecalling or not.
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
None
|
|
23
|
+
"""
|
|
24
|
+
from .helpers import align_and_sort_BAM, canoncall, converted_BAM_to_adata, generate_converted_FASTA, get_chromosome_lengths, split_and_index_BAM, make_dirs
|
|
25
|
+
import os
|
|
26
|
+
if basecall:
|
|
27
|
+
model_basename = os.path.basename(model)
|
|
28
|
+
model_basename = model_basename.replace('.', '_')
|
|
29
|
+
bam=f"{output_directory}/{model_basename}_canonical_basecalls"
|
|
30
|
+
else:
|
|
31
|
+
bam_base=os.path.basename(input_data_path).split('.bam')[0]
|
|
32
|
+
bam=os.path.join(output_directory, bam_base)
|
|
33
|
+
aligned_BAM=f"{bam}_aligned"
|
|
34
|
+
aligned_sorted_BAM=f"{aligned_BAM}_sorted"
|
|
35
|
+
|
|
36
|
+
os.chdir(output_directory)
|
|
37
|
+
|
|
38
|
+
# 1) Convert FASTA file
|
|
39
|
+
fasta_basename = os.path.basename(fasta)
|
|
40
|
+
converted_FASTA_basename = fasta_basename.split('.fa')[0]+'_converted.fasta'
|
|
41
|
+
converted_FASTA = os.path.join(output_directory, converted_FASTA_basename)
|
|
42
|
+
if 'converted.fa' in fasta:
|
|
43
|
+
print(fasta + ' is already converted. Using existing converted FASTA.')
|
|
44
|
+
converted_FASTA = fasta
|
|
45
|
+
elif os.path.exists(converted_FASTA):
|
|
46
|
+
print(converted_FASTA + ' already exists. Using existing converted FASTA.')
|
|
47
|
+
else:
|
|
48
|
+
generate_converted_FASTA(fasta, conversion_types, strands, converted_FASTA)
|
|
49
|
+
|
|
50
|
+
# Make a FAI and .chrom.names file for the converted fasta
|
|
51
|
+
get_chromosome_lengths(converted_FASTA)
|
|
52
|
+
|
|
53
|
+
# 2) Basecall from the input POD5 to generate a singular output BAM
|
|
54
|
+
if basecall:
|
|
55
|
+
canoncall_output = bam + bam_suffix
|
|
56
|
+
if os.path.exists(canoncall_output):
|
|
57
|
+
print(canoncall_output + ' already exists. Using existing basecalled BAM.')
|
|
58
|
+
else:
|
|
59
|
+
canoncall(model, input_data_path, barcode_kit, bam, bam_suffix)
|
|
60
|
+
else:
|
|
61
|
+
canoncall_output = input_data_path
|
|
62
|
+
|
|
63
|
+
# 3) Align the BAM to the converted reference FASTA and sort the bam on positional coordinates. Also make an index and a bed file of mapped reads
|
|
64
|
+
aligned_output = aligned_BAM + bam_suffix
|
|
65
|
+
sorted_output = aligned_sorted_BAM + bam_suffix
|
|
66
|
+
if os.path.exists(aligned_output) and os.path.exists(sorted_output):
|
|
67
|
+
print(sorted_output + ' already exists. Using existing aligned/sorted BAM.')
|
|
68
|
+
else:
|
|
69
|
+
align_and_sort_BAM(converted_FASTA, canoncall_output, bam_suffix, output_directory)
|
|
70
|
+
|
|
71
|
+
### 4) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory###
|
|
72
|
+
if os.path.isdir(split_dir):
|
|
73
|
+
print(split_dir + ' already exists. Using existing aligned/sorted/split BAMs.')
|
|
74
|
+
else:
|
|
75
|
+
make_dirs([split_dir])
|
|
76
|
+
split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory, converted_FASTA)
|
|
77
|
+
|
|
78
|
+
# 5) Take the converted BAM and load it into an adata object.
|
|
79
|
+
converted_BAM_to_adata(converted_FASTA, split_dir, mapping_threshold, experiment_name, conversion_types, bam_suffix)
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
## direct_smf
|
|
2
|
+
|
|
3
|
+
def direct_smf(fasta, output_directory, mod_list, model, thresholds, input_data_path, split_dir, barcode_kit, mapping_threshold, experiment_name, bam_suffix, batch_size, basecall):
|
|
4
|
+
"""
|
|
5
|
+
Processes sequencing data from a direct methylation detection Nanopore SMF experiment to an AnnData object.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
fasta (str): File path to the reference genome to align to.
|
|
9
|
+
output_directory (str): A file path to the directory to output all the analyses.
|
|
10
|
+
mod_list (list): A list of strings of the modification types to use in the analysis.
|
|
11
|
+
model (str): a string representing the file path to the dorado basecalling model.
|
|
12
|
+
thresholds (list): A list of floats to pass for call thresholds.
|
|
13
|
+
input_data_path (str): a string representing the file path to the experiment directory containing the input sequencing files.
|
|
14
|
+
split_dir (str): A string representing the file path to the directory to split the BAMs into.
|
|
15
|
+
barcode_kit (str): A string representing the barcoding kit used in the experiment.
|
|
16
|
+
mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
|
|
17
|
+
experiment_name (str): A string to provide an experiment name to the output adata file.
|
|
18
|
+
bam_suffix (str): A suffix to add to the bam file.
|
|
19
|
+
batch_size (int): An integer number of TSV files to analyze in memory at once while loading the final adata object.
|
|
20
|
+
basecall (bool): Whether to basecall
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
None
|
|
24
|
+
"""
|
|
25
|
+
from .helpers import align_and_sort_BAM, extract_mods, get_chromosome_lengths, make_modbed, modcall, modkit_extract_to_adata, modQC, split_and_index_BAM, make_dirs
|
|
26
|
+
import os
|
|
27
|
+
|
|
28
|
+
if basecall:
|
|
29
|
+
model_basename = os.path.basename(model)
|
|
30
|
+
model_basename = model_basename.replace('.', '_')
|
|
31
|
+
mod_string = "_".join(mod_list)
|
|
32
|
+
bam=f"{output_directory}/{model_basename}_{mod_string}_calls"
|
|
33
|
+
else:
|
|
34
|
+
bam_base=os.path.basename(input_data_path).split('.bam')[0]
|
|
35
|
+
bam=os.path.join(output_directory, bam_base)
|
|
36
|
+
aligned_BAM=f"{bam}_aligned"
|
|
37
|
+
aligned_sorted_BAM=f"{aligned_BAM}_sorted"
|
|
38
|
+
mod_bed_dir=f"{output_directory}/split_mod_beds"
|
|
39
|
+
mod_tsv_dir=f"{output_directory}/split_mod_tsvs"
|
|
40
|
+
|
|
41
|
+
aligned_sorted_output = aligned_sorted_BAM + bam_suffix
|
|
42
|
+
mod_map = {'6mA': '6mA', '5mC_5hmC': '5mC'}
|
|
43
|
+
mods = [mod_map[mod] for mod in mod_list]
|
|
44
|
+
|
|
45
|
+
# Make a FAI and .chrom.names file for the fasta
|
|
46
|
+
get_chromosome_lengths(fasta)
|
|
47
|
+
|
|
48
|
+
os.chdir(output_directory)
|
|
49
|
+
|
|
50
|
+
# 1) Basecall using dorado
|
|
51
|
+
if basecall:
|
|
52
|
+
modcall_output = bam + bam_suffix
|
|
53
|
+
if os.path.exists(modcall_output):
|
|
54
|
+
print(modcall_output + ' already exists. Using existing basecalled BAM.')
|
|
55
|
+
else:
|
|
56
|
+
modcall(model, input_data_path, barcode_kit, mod_list, bam, bam_suffix)
|
|
57
|
+
else:
|
|
58
|
+
modcall_output = input_data_path
|
|
59
|
+
|
|
60
|
+
# 2) Align the BAM to the reference FASTA. Also make an index and a bed file of mapped reads
|
|
61
|
+
aligned_output = aligned_BAM + bam_suffix
|
|
62
|
+
sorted_output = aligned_sorted_BAM + bam_suffix
|
|
63
|
+
if os.path.exists(aligned_output) and os.path.exists(sorted_output):
|
|
64
|
+
print(sorted_output + ' already exists. Using existing aligned/sorted BAM.')
|
|
65
|
+
else:
|
|
66
|
+
align_and_sort_BAM(fasta, modcall_output, bam_suffix, output_directory)
|
|
67
|
+
|
|
68
|
+
# 3) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory
|
|
69
|
+
if os.path.isdir(split_dir):
|
|
70
|
+
print(split_dir + ' already exists. Using existing aligned/sorted/split BAMs.')
|
|
71
|
+
else:
|
|
72
|
+
make_dirs([split_dir])
|
|
73
|
+
split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory, fasta)
|
|
74
|
+
|
|
75
|
+
# 4) Using nanopore modkit to work with modified BAM files ###
|
|
76
|
+
if os.path.isdir(mod_bed_dir):
|
|
77
|
+
print(mod_bed_dir + ' already exists')
|
|
78
|
+
else:
|
|
79
|
+
make_dirs([mod_bed_dir])
|
|
80
|
+
modQC(aligned_sorted_output, thresholds) # get QC metrics for mod calls
|
|
81
|
+
make_modbed(aligned_sorted_output, thresholds, mod_bed_dir) # Generate bed files of position methylation summaries for every sample
|
|
82
|
+
if os.path.isdir(mod_tsv_dir):
|
|
83
|
+
print(mod_tsv_dir + ' already exists')
|
|
84
|
+
else:
|
|
85
|
+
make_dirs([mod_tsv_dir])
|
|
86
|
+
extract_mods(thresholds, mod_tsv_dir, split_dir, bam_suffix) # Extract methylations calls for split BAM files into split TSV files
|
|
87
|
+
|
|
88
|
+
#5 Load the modification data from TSVs into an adata object
|
|
89
|
+
modkit_extract_to_adata(fasta, split_dir, mapping_threshold, experiment_name, mods, batch_size, mod_tsv_dir)
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# fast5_to_pod5
|
|
2
|
+
|
|
3
|
+
def fast5_to_pod5(fast5_dir, output_pod5='FAST5s_to_POD5.pod5'):
|
|
4
|
+
"""
|
|
5
|
+
Convert Nanopore FAST5 files to POD5 file
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
fast5_dir (str): String representing the file path to a directory containing all FAST5 files to convert into a single POD5 output.
|
|
9
|
+
output_pod5 (str): The name of the output POD5.
|
|
10
|
+
|
|
11
|
+
Returns:
|
|
12
|
+
None
|
|
13
|
+
|
|
14
|
+
"""
|
|
15
|
+
import subprocess
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
if Path(fast5_dir).is_file():
|
|
19
|
+
subprocess.run(["pod5", "convert", "fast5", fast5_dir, "--output", output_pod5])
|
|
20
|
+
elif Path(fast5_dir).is_dir():
|
|
21
|
+
subprocess.run(["pod5", "convert", "fast5", f".{fast5_dir}*.fast5", "--output", output_pod5])
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
## LoadExperimentConfig
|
|
2
|
+
|
|
3
|
+
class LoadExperimentConfig:
|
|
4
|
+
"""
|
|
5
|
+
Loads in the experiment configuration csv and saves global variables with experiment configuration parameters.
|
|
6
|
+
Parameters:
|
|
7
|
+
experiment_config (str): A string representing the file path to the experiment configuration csv file.
|
|
8
|
+
|
|
9
|
+
Attributes:
|
|
10
|
+
var_dict (dict): A dictionary containing experiment configuration parameters.
|
|
11
|
+
|
|
12
|
+
Example:
|
|
13
|
+
>>> import pandas as pd
|
|
14
|
+
>>> from io import StringIO
|
|
15
|
+
>>> csv_data = '''variable,value,type
|
|
16
|
+
... mapping_threshold,0.05,float
|
|
17
|
+
... batch_size,4,int
|
|
18
|
+
... testing_bool,True,bool
|
|
19
|
+
... strands,"[bottom, top]",list
|
|
20
|
+
... split_dir,split_bams,string
|
|
21
|
+
... pod5_dir,None,string
|
|
22
|
+
... pod5_dir,,string
|
|
23
|
+
... '''
|
|
24
|
+
>>> csv_file = StringIO(csv_data)
|
|
25
|
+
>>> df = pd.read_csv(csv_file)
|
|
26
|
+
>>> df.to_csv('test_config.csv', index=False)
|
|
27
|
+
>>> config_loader = LoadExperimentConfig('test_config.csv')
|
|
28
|
+
>>> config_loader.var_dict['mapping_threshold']
|
|
29
|
+
0.05
|
|
30
|
+
>>> config_loader.var_dict['batch_size']
|
|
31
|
+
4
|
|
32
|
+
>>> config_loader.var_dict['testing_bool']
|
|
33
|
+
True
|
|
34
|
+
>>> config_loader.var_dict['strands']
|
|
35
|
+
['bottom', 'top']
|
|
36
|
+
>>> config_loader.var_dict['split_dir']
|
|
37
|
+
'split_bams'
|
|
38
|
+
>>> config_loader.var_dict['pod5_dir'] is None
|
|
39
|
+
True
|
|
40
|
+
>>> config_loader.var_dict['pod5_dir'] is None
|
|
41
|
+
True
|
|
42
|
+
"""
|
|
43
|
+
def __init__(self, experiment_config):
|
|
44
|
+
import pandas as pd
|
|
45
|
+
# Read the CSV into a pandas DataFrame
|
|
46
|
+
df = pd.read_csv(experiment_config)
|
|
47
|
+
# Initialize an empty dictionary to store variables
|
|
48
|
+
var_dict = {}
|
|
49
|
+
# Iterate through each row in the DataFrame
|
|
50
|
+
for _, row in df.iterrows():
|
|
51
|
+
var_name = str(row['variable'])
|
|
52
|
+
value = row['value']
|
|
53
|
+
dtype = row['type']
|
|
54
|
+
# Handle empty and None values
|
|
55
|
+
if pd.isna(value) or value in ['None', '']:
|
|
56
|
+
value = None
|
|
57
|
+
else:
|
|
58
|
+
# Handle different data types
|
|
59
|
+
if dtype == 'list':
|
|
60
|
+
# Convert the string representation of a list to an actual list
|
|
61
|
+
value = value.strip('()[]').replace(', ', ',').split(',')
|
|
62
|
+
elif dtype == 'int':
|
|
63
|
+
value = int(value)
|
|
64
|
+
elif dtype == 'float':
|
|
65
|
+
value = float(value)
|
|
66
|
+
elif dtype == 'bool':
|
|
67
|
+
value = value.lower() == 'true'
|
|
68
|
+
elif dtype == 'string':
|
|
69
|
+
value = str(value)
|
|
70
|
+
# Store the variable in the dictionary
|
|
71
|
+
var_dict[var_name] = value
|
|
72
|
+
# Save the dictionary as an attribute of the class
|
|
73
|
+
self.var_dict = var_dict
|
|
74
|
+
|
|
@@ -1,42 +1,60 @@
|
|
|
1
|
-
from .
|
|
1
|
+
from .align_and_sort_BAM import align_and_sort_BAM
|
|
2
|
+
from .aligned_BAM_to_bed import aligned_BAM_to_bed
|
|
3
|
+
from .bed_to_bigwig import bed_to_bigwig
|
|
2
4
|
from .binarize_converted_base_identities import binarize_converted_base_identities
|
|
3
5
|
from .canoncall import canoncall
|
|
6
|
+
from .complement_base_list import complement_base_list
|
|
4
7
|
from .converted_BAM_to_adata import converted_BAM_to_adata
|
|
8
|
+
from .concatenate_fastqs_to_bam import concatenate_fastqs_to_bam
|
|
5
9
|
from .count_aligned_reads import count_aligned_reads
|
|
6
10
|
from .extract_base_identities import extract_base_identities
|
|
7
11
|
from .extract_mods import extract_mods
|
|
12
|
+
from .extract_readnames_from_BAM import extract_readnames_from_BAM
|
|
8
13
|
from .find_conversion_sites import find_conversion_sites
|
|
9
14
|
from .generate_converted_FASTA import convert_FASTA_record, generate_converted_FASTA
|
|
15
|
+
from .get_chromosome_lengths import get_chromosome_lengths
|
|
10
16
|
from .get_native_references import get_native_references
|
|
11
|
-
from .
|
|
17
|
+
from .index_fasta import index_fasta
|
|
18
|
+
from .LoadExperimentConfig import LoadExperimentConfig
|
|
12
19
|
from .make_dirs import make_dirs
|
|
13
20
|
from .make_modbed import make_modbed
|
|
14
21
|
from .modcall import modcall
|
|
15
22
|
from .modkit_extract_to_adata import modkit_extract_to_adata
|
|
16
23
|
from .modQC import modQC
|
|
17
24
|
from .one_hot_encode import one_hot_encode
|
|
25
|
+
from .ohe_batching import ohe_batching
|
|
26
|
+
from .plot_read_length_and_coverage_histograms import plot_read_length_and_coverage_histograms
|
|
18
27
|
from .separate_bam_by_bc import separate_bam_by_bc
|
|
19
28
|
from .split_and_index_BAM import split_and_index_BAM
|
|
20
29
|
|
|
21
30
|
__all__ = [
|
|
22
|
-
"
|
|
31
|
+
"align_and_sort_BAM",
|
|
32
|
+
"aligned_BAM_to_bed",
|
|
33
|
+
"bed_to_bigwig",
|
|
23
34
|
"binarize_converted_base_identities",
|
|
24
35
|
"canoncall",
|
|
36
|
+
"complement_base_list",
|
|
25
37
|
"converted_BAM_to_adata",
|
|
38
|
+
"concatenate_fastqs_to_bam",
|
|
26
39
|
"count_aligned_reads",
|
|
27
40
|
"extract_base_identities",
|
|
28
41
|
"extract_mods",
|
|
42
|
+
"extract_readnames_from_BAM",
|
|
29
43
|
"find_conversion_sites",
|
|
30
44
|
"convert_FASTA_record",
|
|
31
45
|
"generate_converted_FASTA",
|
|
46
|
+
"get_chromosome_lengths",
|
|
32
47
|
"get_native_references",
|
|
33
|
-
"
|
|
48
|
+
"index_fasta",
|
|
49
|
+
"LoadExperimentConfig",
|
|
34
50
|
"make_dirs",
|
|
35
51
|
"make_modbed",
|
|
36
52
|
"modcall",
|
|
37
53
|
"modkit_extract_to_adata",
|
|
38
54
|
"modQC",
|
|
39
55
|
"one_hot_encode",
|
|
56
|
+
"ohe_batching",
|
|
57
|
+
"plot_read_length_and_coverage_histograms",
|
|
40
58
|
"separate_bam_by_bc",
|
|
41
59
|
"split_and_index_BAM"
|
|
42
60
|
]
|