smftools 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- smftools/__init__.py +0 -2
- smftools/_settings.py +1 -1
- smftools/_version.py +1 -0
- smftools/datasets/datasets.py +11 -9
- smftools/informatics/__init__.py +8 -7
- smftools/informatics/bam_conversion.py +47 -0
- smftools/informatics/bam_direct.py +49 -0
- smftools/informatics/basecalls_to_adata.py +42 -0
- smftools/informatics/fast5_to_pod5.py +19 -0
- smftools/informatics/helpers/LoadExperimentConfig.py +74 -0
- smftools/informatics/helpers/__init__.py +4 -4
- smftools/informatics/helpers/align_and_sort_BAM.py +52 -0
- smftools/informatics/helpers/binarize_converted_base_identities.py +10 -3
- smftools/informatics/helpers/canoncall.py +12 -1
- smftools/informatics/helpers/converted_BAM_to_adata.py +30 -13
- smftools/informatics/helpers/count_aligned_reads.py +12 -5
- smftools/informatics/helpers/extract_base_identities.py +13 -6
- smftools/informatics/helpers/extract_mods.py +17 -5
- smftools/informatics/helpers/find_conversion_sites.py +15 -9
- smftools/informatics/helpers/generate_converted_FASTA.py +49 -29
- smftools/informatics/helpers/get_native_references.py +10 -7
- smftools/informatics/helpers/make_dirs.py +9 -3
- smftools/informatics/helpers/make_modbed.py +10 -4
- smftools/informatics/helpers/modQC.py +10 -2
- smftools/informatics/helpers/modcall.py +13 -1
- smftools/informatics/helpers/modkit_extract_to_adata.py +25 -13
- smftools/informatics/helpers/one_hot_encode.py +8 -3
- smftools/informatics/helpers/separate_bam_by_bc.py +18 -5
- smftools/informatics/helpers/split_and_index_BAM.py +18 -10
- smftools/informatics/pod5_conversion.py +34 -7
- smftools/informatics/pod5_direct.py +31 -5
- smftools/informatics/pod5_to_adata.py +31 -8
- smftools/informatics/readwrite.py +13 -16
- smftools/informatics/subsample_pod5.py +48 -0
- smftools/preprocessing/__init__.py +0 -6
- smftools/preprocessing/append_C_context.py +15 -8
- smftools/preprocessing/binarize_on_Youden.py +8 -4
- smftools/preprocessing/binary_layers_to_ohe.py +9 -4
- smftools/preprocessing/calculate_complexity.py +26 -14
- smftools/preprocessing/calculate_converted_read_methylation_stats.py +12 -5
- smftools/preprocessing/calculate_coverage.py +13 -7
- smftools/preprocessing/calculate_pairwise_hamming_distances.py +11 -6
- smftools/preprocessing/calculate_position_Youden.py +21 -12
- smftools/preprocessing/calculate_read_length_stats.py +11 -6
- smftools/preprocessing/clean_NaN.py +12 -5
- smftools/preprocessing/filter_converted_reads_on_methylation.py +12 -5
- smftools/preprocessing/filter_reads_on_length.py +13 -5
- smftools/preprocessing/invert_adata.py +9 -5
- smftools/preprocessing/mark_duplicates.py +20 -11
- smftools/preprocessing/min_non_diagonal.py +9 -4
- smftools/preprocessing/remove_duplicates.py +9 -3
- smftools/readwrite.py +13 -16
- smftools-0.1.1.dist-info/METADATA +88 -0
- smftools-0.1.1.dist-info/RECORD +64 -0
- smftools/informatics/helpers/align_BAM.py +0 -49
- smftools/informatics/helpers/load_experiment_config.py +0 -17
- smftools-0.1.0.dist-info/METADATA +0 -75
- smftools-0.1.0.dist-info/RECORD +0 -58
- /smftools/informatics/helpers/{informatics.py → archived/informatics.py} +0 -0
- /smftools/informatics/helpers/{load_adata.py → archived/load_adata.py} +0 -0
- /smftools/preprocessing/{preprocessing.py → archives/preprocessing.py} +0 -0
- {smftools-0.1.0.dist-info → smftools-0.1.1.dist-info}/WHEEL +0 -0
- {smftools-0.1.0.dist-info → smftools-0.1.1.dist-info}/licenses/LICENSE +0 -0
smftools/__init__.py
CHANGED
|
@@ -3,7 +3,6 @@
|
|
|
3
3
|
import logging
|
|
4
4
|
import warnings
|
|
5
5
|
|
|
6
|
-
from anndata import AnnData
|
|
7
6
|
from . import informatics as inform
|
|
8
7
|
from . import preprocessing as pp
|
|
9
8
|
from . import tools as tl
|
|
@@ -17,7 +16,6 @@ package_name = "smftools"
|
|
|
17
16
|
__version__ = version(package_name)
|
|
18
17
|
|
|
19
18
|
__all__ = [
|
|
20
|
-
"AnnData",
|
|
21
19
|
"inform",
|
|
22
20
|
"pp",
|
|
23
21
|
"tl",
|
smftools/_settings.py
CHANGED
smftools/_version.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.1.1"
|
smftools/datasets/datasets.py
CHANGED
|
@@ -1,19 +1,20 @@
|
|
|
1
1
|
## datasets
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
from
|
|
9
|
-
|
|
10
|
-
HERE = Path(__file__).parent
|
|
11
|
-
|
|
3
|
+
def import_deps():
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
"""
|
|
7
|
+
import anndata as ad
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from .._settings import settings
|
|
10
|
+
HERE = Path(__file__).parent
|
|
11
|
+
return HERE
|
|
12
12
|
|
|
13
13
|
def dCas9_kinetics():
|
|
14
14
|
"""
|
|
15
15
|
|
|
16
16
|
"""
|
|
17
|
+
HERE = import_deps()
|
|
17
18
|
filepath = HERE / "dCas9_m6A_invitro_kinetics.h5ad.gz"
|
|
18
19
|
return ad.read_h5ad(filepath)
|
|
19
20
|
|
|
@@ -21,5 +22,6 @@ def Kissiov_and_McKenna_2025():
|
|
|
21
22
|
"""
|
|
22
23
|
|
|
23
24
|
"""
|
|
25
|
+
HERE = import_deps()
|
|
24
26
|
filepath = HERE / "F1_hybrid_NKG2A_enhander_promoter_GpC_conversion_SMF.h5ad.gz"
|
|
25
27
|
return ad.read_h5ad(filepath)
|
smftools/informatics/__init__.py
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
|
-
from . import helpers
|
|
2
|
-
from .pod5_conversion import pod5_conversion
|
|
3
|
-
from .pod5_direct import pod5_direct
|
|
4
1
|
from .pod5_to_adata import pod5_to_adata
|
|
2
|
+
from .basecalls_to_adata import basecalls_to_adata
|
|
3
|
+
from .subsample_pod5 import subsample_pod5
|
|
4
|
+
from .fast5_to_pod5 import fast5_to_pod5
|
|
5
|
+
|
|
5
6
|
|
|
6
7
|
__all__ = [
|
|
7
|
-
"
|
|
8
|
-
"
|
|
9
|
-
"
|
|
10
|
-
"
|
|
8
|
+
"pod5_to_adata",
|
|
9
|
+
"basecalls_to_adata",
|
|
10
|
+
"subsample_pod5",
|
|
11
|
+
"fast5_to_pod5"
|
|
11
12
|
]
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
## bam_conversion
|
|
2
|
+
|
|
3
|
+
def bam_conversion(fasta, output_directory, conversion_types, strands, basecalled_path, split_dir, mapping_threshold, experiment_name, bam_suffix):
|
|
4
|
+
"""
|
|
5
|
+
Converts a BAM file from a nanopore conversion SMF experiment to an adata object.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
fasta (str): File path to the reference genome to align to.
|
|
9
|
+
output_directory (str): A file path to the directory to output all the analyses.
|
|
10
|
+
conversion_type (list): A list of strings of the conversion types to use in the analysis.
|
|
11
|
+
strands (list): A list of converstion strands to use in the experiment.
|
|
12
|
+
basecalled_path (str): a string representing the file path to the experiment BAM or FASTQ file.
|
|
13
|
+
split_dir (str): A string representing the file path to the directory to split the BAMs into.
|
|
14
|
+
mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
|
|
15
|
+
experiment_name (str): A string to provide an experiment name to the output adata file.
|
|
16
|
+
bam_suffix (str): A suffix to add to the bam file.
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
None
|
|
20
|
+
"""
|
|
21
|
+
from .helpers import align_and_sort_BAM, converted_BAM_to_adata, generate_converted_FASTA, split_and_index_BAM
|
|
22
|
+
import os
|
|
23
|
+
input_basecalled_basename = os.path.basename(basecalled_path)
|
|
24
|
+
bam_basename = input_basecalled_basename.split(".")[0]
|
|
25
|
+
output_bam=f"{output_directory}/{bam_basename}"
|
|
26
|
+
aligned_BAM=f"{output_bam}_aligned"
|
|
27
|
+
aligned_sorted_BAM=f"{aligned_BAM}_sorted"
|
|
28
|
+
|
|
29
|
+
os.chdir(output_directory)
|
|
30
|
+
|
|
31
|
+
# 1) Convert FASTA file
|
|
32
|
+
fasta_basename = os.path.basename(fasta)
|
|
33
|
+
converted_FASTA_basename = fasta_basename.split('.fa')[0]+'_converted.fasta'
|
|
34
|
+
converted_FASTA = os.path.join(output_directory, converted_FASTA_basename)
|
|
35
|
+
if os.path.exists(converted_FASTA):
|
|
36
|
+
print(converted_FASTA + ' already exists. Using existing converted FASTA.')
|
|
37
|
+
else:
|
|
38
|
+
generate_converted_FASTA(fasta, conversion_types, strands, converted_FASTA)
|
|
39
|
+
|
|
40
|
+
# 2) Align the basecalled file to the converted reference FASTA and sort the bam on positional coordinates. Also make an index and a bed file of mapped reads
|
|
41
|
+
align_and_sort_BAM(converted_FASTA, basecalled_path, bam_suffix, output_directory)
|
|
42
|
+
|
|
43
|
+
### 3) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory###
|
|
44
|
+
split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix)
|
|
45
|
+
|
|
46
|
+
# 4) Take the converted BAM and load it into an adata object.
|
|
47
|
+
converted_BAM_to_adata(converted_FASTA, split_dir, mapping_threshold, experiment_name, conversion_types, bam_suffix)
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
## bam_direct
|
|
2
|
+
|
|
3
|
+
def bam_direct(fasta, output_directory, mod_list, thresholds, bam_path, split_dir, mapping_threshold, experiment_name, bam_suffix, batch_size):
|
|
4
|
+
"""
|
|
5
|
+
Converts a POD5 file from a nanopore native SMF experiment to an adata object.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
fasta (str): File path to the reference genome to align to.
|
|
9
|
+
output_directory (str): A file path to the directory to output all the analyses.
|
|
10
|
+
mod_list (list): A list of strings of the modification types to use in the analysis.
|
|
11
|
+
thresholds (list): A list of floats to pass for call thresholds.
|
|
12
|
+
bam_path (str): a string representing the file path to the the BAM file.
|
|
13
|
+
split_dir (str): A string representing the file path to the directory to split the BAMs into.
|
|
14
|
+
mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
|
|
15
|
+
experiment_name (str): A string to provide an experiment name to the output adata file.
|
|
16
|
+
bam_suffix (str): A suffix to add to the bam file.
|
|
17
|
+
batch_size (int): An integer number of TSV files to analyze in memory at once while loading the final adata object.
|
|
18
|
+
|
|
19
|
+
Returns:
|
|
20
|
+
None
|
|
21
|
+
"""
|
|
22
|
+
from .helpers import align_and_sort_BAM, extract_mods, make_modbed, modkit_extract_to_adata, modQC, split_and_index_BAM, make_dirs
|
|
23
|
+
import os
|
|
24
|
+
input_bam_base = os.path.basename(bam_path)
|
|
25
|
+
bam_basename = input_bam_base.split(bam_suffix)[0]
|
|
26
|
+
output_bam=f"{output_directory}/{bam_basename}"
|
|
27
|
+
aligned_BAM=f"{output_bam}_aligned"
|
|
28
|
+
aligned_sorted_BAM=f"{aligned_BAM}_sorted"
|
|
29
|
+
mod_bed_dir=f"{output_directory}/split_mod_beds"
|
|
30
|
+
mod_tsv_dir=f"{output_directory}/split_mod_tsvs"
|
|
31
|
+
|
|
32
|
+
make_dirs([mod_bed_dir, mod_tsv_dir])
|
|
33
|
+
|
|
34
|
+
aligned_sorted_output = aligned_sorted_BAM + bam_suffix
|
|
35
|
+
mod_map = {'6mA': '6mA', '5mC_5hmC': '5mC'}
|
|
36
|
+
mods = [mod_map[mod] for mod in mod_list]
|
|
37
|
+
|
|
38
|
+
os.chdir(output_directory)
|
|
39
|
+
|
|
40
|
+
# 1) Align the BAM to the reference FASTA. Also make an index and a bed file of mapped reads
|
|
41
|
+
align_and_sort_BAM(fasta, bam_path, bam_suffix, output_directory)
|
|
42
|
+
# 2) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory
|
|
43
|
+
split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix)
|
|
44
|
+
# 3) Using nanopore modkit to work with modified BAM files ###
|
|
45
|
+
modQC(aligned_sorted_output, thresholds) # get QC metrics for mod calls
|
|
46
|
+
make_modbed(aligned_sorted_output, thresholds, mod_bed_dir) # Generate bed files of position methylation summaries for every sample
|
|
47
|
+
extract_mods(thresholds, mod_tsv_dir, split_dir, bam_suffix) # Extract methylations calls for split BAM files into split TSV files
|
|
48
|
+
#4 Load the modification data from TSVs into an adata object
|
|
49
|
+
modkit_extract_to_adata(fasta, aligned_sorted_output, mapping_threshold, experiment_name, mods, batch_size)
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
## basecalls_to_adata
|
|
2
|
+
|
|
3
|
+
def basecalls_to_adata(config_path):
|
|
4
|
+
"""
|
|
5
|
+
High-level function to call for loading basecalled SMF data from a BAM file into an adata object. Also works with FASTQ for conversion SMF.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
config_path (str): A string representing the file path to the experiment configuration csv file.
|
|
9
|
+
|
|
10
|
+
Returns:
|
|
11
|
+
None
|
|
12
|
+
"""
|
|
13
|
+
from .helpers import LoadExperimentConfig, make_dirs
|
|
14
|
+
import os
|
|
15
|
+
bam_suffix = '.bam' # If different, change from here.
|
|
16
|
+
split_dir = 'split_BAMs' # If different, change from here.
|
|
17
|
+
strands = ['bottom', 'top'] # If different, change from here. Having both listed generally doesn't slow things down too much.
|
|
18
|
+
conversions = ['unconverted'] # The name to use for the unconverted files. If different, change from here.
|
|
19
|
+
|
|
20
|
+
# Load experiment config parameters into global variables
|
|
21
|
+
experiment_config = LoadExperimentConfig(config_path)
|
|
22
|
+
var_dict = experiment_config.var_dict
|
|
23
|
+
for key, value in var_dict.items():
|
|
24
|
+
globals()[key] = value
|
|
25
|
+
|
|
26
|
+
split_path = os.path.join(output_directory, split_dir)
|
|
27
|
+
make_dirs([output_directory, split_path])
|
|
28
|
+
os.chdir(output_directory)
|
|
29
|
+
|
|
30
|
+
conversions += conversion_types
|
|
31
|
+
|
|
32
|
+
if smf_modality == 'conversion':
|
|
33
|
+
from .bam_conversion import bam_conversion
|
|
34
|
+
bam_conversion(fasta, output_directory, conversions, strands, basecalled_path, split_path, mapping_threshold, experiment_name, bam_suffix)
|
|
35
|
+
elif smf_modality == 'direct':
|
|
36
|
+
if bam_suffix in basecalled_path:
|
|
37
|
+
from .bam_direct import bam_direct
|
|
38
|
+
bam_direct(fasta, output_directory, mod_list, thresholds, basecalled_path, split_path, mapping_threshold, experiment_name, bam_suffix, batch_size)
|
|
39
|
+
else:
|
|
40
|
+
print('basecalls_to_adata function only work with the direct modality when the input filetype is BAM and not FASTQ.')
|
|
41
|
+
else:
|
|
42
|
+
print("Error")
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# fast5_to_pod5
|
|
2
|
+
|
|
3
|
+
def fast5_to_pod5(fast5_dir, output_dir='outputs/', output_pod5='FAST5s_to_POD5.pod5'):
|
|
4
|
+
"""
|
|
5
|
+
Convert Nanopore FAST5 files to POD5 file
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
fast5_dir (str): String representing the file path to a directory containing all FAST5 files to convert into a single POD5 output.
|
|
9
|
+
output_dir (str): String representing the file path to the output directory.
|
|
10
|
+
output_pod5 (str): The name of the output POD5 to write out within the output directory.
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
None
|
|
14
|
+
|
|
15
|
+
"""
|
|
16
|
+
import subprocess
|
|
17
|
+
import os
|
|
18
|
+
pod5 = os.path.join(output_dir, output_pod5)
|
|
19
|
+
subprocess.run(["pod5", "convert", "fast5", f".{fast5_dir}*.fast5", "--output", pod5])
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
## LoadExperimentConfig
|
|
2
|
+
|
|
3
|
+
class LoadExperimentConfig:
|
|
4
|
+
"""
|
|
5
|
+
Loads in the experiment configuration csv and saves global variables with experiment configuration parameters.
|
|
6
|
+
Parameters:
|
|
7
|
+
experiment_config (str): A string representing the file path to the experiment configuration csv file.
|
|
8
|
+
|
|
9
|
+
Attributes:
|
|
10
|
+
var_dict (dict): A dictionary containing experiment configuration parameters.
|
|
11
|
+
|
|
12
|
+
Example:
|
|
13
|
+
>>> import pandas as pd
|
|
14
|
+
>>> from io import StringIO
|
|
15
|
+
>>> csv_data = '''variable,value,type
|
|
16
|
+
... mapping_threshold,0.05,float
|
|
17
|
+
... batch_size,4,int
|
|
18
|
+
... testing_bool,True,bool
|
|
19
|
+
... strands,"[bottom, top]",list
|
|
20
|
+
... split_dir,split_bams,string
|
|
21
|
+
... pod5_dir,None,string
|
|
22
|
+
... pod5_dir,,string
|
|
23
|
+
... '''
|
|
24
|
+
>>> csv_file = StringIO(csv_data)
|
|
25
|
+
>>> df = pd.read_csv(csv_file)
|
|
26
|
+
>>> df.to_csv('test_config.csv', index=False)
|
|
27
|
+
>>> config_loader = LoadExperimentConfig('test_config.csv')
|
|
28
|
+
>>> config_loader.var_dict['mapping_threshold']
|
|
29
|
+
0.05
|
|
30
|
+
>>> config_loader.var_dict['batch_size']
|
|
31
|
+
4
|
|
32
|
+
>>> config_loader.var_dict['testing_bool']
|
|
33
|
+
True
|
|
34
|
+
>>> config_loader.var_dict['strands']
|
|
35
|
+
['bottom', 'top']
|
|
36
|
+
>>> config_loader.var_dict['split_dir']
|
|
37
|
+
'split_bams'
|
|
38
|
+
>>> config_loader.var_dict['pod5_dir'] is None
|
|
39
|
+
True
|
|
40
|
+
>>> config_loader.var_dict['pod5_dir'] is None
|
|
41
|
+
True
|
|
42
|
+
"""
|
|
43
|
+
def __init__(self, experiment_config):
|
|
44
|
+
import pandas as pd
|
|
45
|
+
# Read the CSV into a pandas DataFrame
|
|
46
|
+
df = pd.read_csv(experiment_config)
|
|
47
|
+
# Initialize an empty dictionary to store variables
|
|
48
|
+
var_dict = {}
|
|
49
|
+
# Iterate through each row in the DataFrame
|
|
50
|
+
for _, row in df.iterrows():
|
|
51
|
+
var_name = str(row['variable'])
|
|
52
|
+
value = row['value']
|
|
53
|
+
dtype = row['type']
|
|
54
|
+
# Handle empty and None values
|
|
55
|
+
if pd.isna(value) or value in ['None', '']:
|
|
56
|
+
value = None
|
|
57
|
+
else:
|
|
58
|
+
# Handle different data types
|
|
59
|
+
if dtype == 'list':
|
|
60
|
+
# Convert the string representation of a list to an actual list
|
|
61
|
+
value = value.strip('()[]').replace(', ', ',').split(',')
|
|
62
|
+
elif dtype == 'int':
|
|
63
|
+
value = int(value)
|
|
64
|
+
elif dtype == 'float':
|
|
65
|
+
value = float(value)
|
|
66
|
+
elif dtype == 'bool':
|
|
67
|
+
value = value.lower() == 'true'
|
|
68
|
+
elif dtype == 'string':
|
|
69
|
+
value = str(value)
|
|
70
|
+
# Store the variable in the dictionary
|
|
71
|
+
var_dict[var_name] = value
|
|
72
|
+
# Save the dictionary as an attribute of the class
|
|
73
|
+
self.var_dict = var_dict
|
|
74
|
+
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from .
|
|
1
|
+
from .align_and_sort_BAM import align_and_sort_BAM
|
|
2
2
|
from .binarize_converted_base_identities import binarize_converted_base_identities
|
|
3
3
|
from .canoncall import canoncall
|
|
4
4
|
from .converted_BAM_to_adata import converted_BAM_to_adata
|
|
@@ -8,7 +8,7 @@ from .extract_mods import extract_mods
|
|
|
8
8
|
from .find_conversion_sites import find_conversion_sites
|
|
9
9
|
from .generate_converted_FASTA import convert_FASTA_record, generate_converted_FASTA
|
|
10
10
|
from .get_native_references import get_native_references
|
|
11
|
-
from .
|
|
11
|
+
from .LoadExperimentConfig import LoadExperimentConfig
|
|
12
12
|
from .make_dirs import make_dirs
|
|
13
13
|
from .make_modbed import make_modbed
|
|
14
14
|
from .modcall import modcall
|
|
@@ -19,7 +19,7 @@ from .separate_bam_by_bc import separate_bam_by_bc
|
|
|
19
19
|
from .split_and_index_BAM import split_and_index_BAM
|
|
20
20
|
|
|
21
21
|
__all__ = [
|
|
22
|
-
"
|
|
22
|
+
"align_and_sort_BAM",
|
|
23
23
|
"binarize_converted_base_identities",
|
|
24
24
|
"canoncall",
|
|
25
25
|
"converted_BAM_to_adata",
|
|
@@ -30,7 +30,7 @@ __all__ = [
|
|
|
30
30
|
"convert_FASTA_record",
|
|
31
31
|
"generate_converted_FASTA",
|
|
32
32
|
"get_native_references",
|
|
33
|
-
"
|
|
33
|
+
"LoadExperimentConfig",
|
|
34
34
|
"make_dirs",
|
|
35
35
|
"make_modbed",
|
|
36
36
|
"modcall",
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
## align_and_sort_BAM
|
|
2
|
+
|
|
3
|
+
def align_and_sort_BAM(fasta, input, bam_suffix, output_directory):
|
|
4
|
+
"""
|
|
5
|
+
A wrapper for running dorado aligner and samtools functions
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
fasta (str): File path to the reference genome to align to.
|
|
9
|
+
input (str): File path to the basecalled file to align. Works for .bam and .fastq files
|
|
10
|
+
bam_suffix (str): The suffix to use for the BAM file.
|
|
11
|
+
output_directory (str): A file path to the directory to output all the analyses.
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
None
|
|
15
|
+
The function writes out files for: 1) An aligned BAM, 2) and aligned_sorted BAM, 3) an index file for the aligned_sorted BAM, 4) A bed file for the aligned_sorted BAM, 5) A text file containing read names in the aligned_sorted BAM
|
|
16
|
+
"""
|
|
17
|
+
import subprocess
|
|
18
|
+
import os
|
|
19
|
+
input_basename = os.path.basename(input)
|
|
20
|
+
input_suffix = '.' + input_basename.split('.')[1]
|
|
21
|
+
|
|
22
|
+
output_path_minus_suffix = os.path.join(output_directory, input_basename.split(input_suffix)[0])
|
|
23
|
+
|
|
24
|
+
aligned_BAM=f"{output_path_minus_suffix}_aligned"
|
|
25
|
+
aligned_sorted_BAM=f"{aligned_BAM}_sorted"
|
|
26
|
+
aligned_output = aligned_BAM + bam_suffix
|
|
27
|
+
aligned_sorted_output = aligned_sorted_BAM + bam_suffix
|
|
28
|
+
|
|
29
|
+
# Run dorado aligner
|
|
30
|
+
subprocess.run(["dorado", "aligner", "--secondary=no", fasta, input], stdout=open(aligned_output, "w"))
|
|
31
|
+
|
|
32
|
+
# Sort the BAM on positional coordinates
|
|
33
|
+
subprocess.run(["samtools", "sort", "-o", aligned_sorted_output, aligned_output])
|
|
34
|
+
|
|
35
|
+
# Create a BAM index file
|
|
36
|
+
subprocess.run(["samtools", "index", aligned_sorted_output])
|
|
37
|
+
|
|
38
|
+
# Make a bed file of coordinates for the BAM
|
|
39
|
+
samtools_view = subprocess.Popen(["samtools", "view", aligned_sorted_output], stdout=subprocess.PIPE)
|
|
40
|
+
with open(f"{aligned_sorted_BAM}_bed.bed", "w") as output_file:
|
|
41
|
+
awk_process = subprocess.Popen(["awk", '{print $3, $4, $4+length($10)-1}'], stdin=samtools_view.stdout, stdout=output_file)
|
|
42
|
+
samtools_view.stdout.close()
|
|
43
|
+
awk_process.wait()
|
|
44
|
+
samtools_view.wait()
|
|
45
|
+
|
|
46
|
+
# Make a text file of reads for the BAM
|
|
47
|
+
samtools_view = subprocess.Popen(["samtools", "view", aligned_sorted_output], stdout=subprocess.PIPE)
|
|
48
|
+
with open(f"{aligned_sorted_BAM}_read_names.txt", "w") as output_file:
|
|
49
|
+
cut_process = subprocess.Popen(["cut", "-f1"], stdin=samtools_view.stdout, stdout=output_file)
|
|
50
|
+
samtools_view.stdout.close()
|
|
51
|
+
cut_process.wait()
|
|
52
|
+
samtools_view.wait()
|
|
@@ -1,11 +1,18 @@
|
|
|
1
1
|
## binarize_converted_base_identities
|
|
2
|
-
import numpy as np
|
|
3
2
|
# Conversion SMF specific
|
|
4
3
|
def binarize_converted_base_identities(base_identities, strand, modification_type):
|
|
5
4
|
"""
|
|
6
|
-
|
|
7
|
-
|
|
5
|
+
Binarizes conversion SMF data within a sequence string
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
base_identities (dict): A dictionary returned by extract_base_identity_at_coordinates.
|
|
9
|
+
strand (str): A string indicating which strand was converted in the experiment (options are 'top' and 'bottom').
|
|
10
|
+
modification_type (str): A string indicating the modification type of interest (options are '5mC' and '6mA').
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
binarized_base_identities (dict): A binarized dictionary, where 1 represents a methylated site. 0 represents an unmethylated site. NaN represents a site that does not carry methylation information.
|
|
8
14
|
"""
|
|
15
|
+
import numpy as np
|
|
9
16
|
binarized_base_identities = {}
|
|
10
17
|
# Iterate over base identity keys to binarize the base identities
|
|
11
18
|
for key in base_identities.keys():
|
|
@@ -1,11 +1,22 @@
|
|
|
1
1
|
## canoncall
|
|
2
|
-
import subprocess
|
|
3
2
|
|
|
4
3
|
# Conversion SMF specific
|
|
5
4
|
def canoncall(model, pod5_dir, barcode_kit, bam, bam_suffix):
|
|
6
5
|
"""
|
|
7
6
|
Wrapper function for dorado canonical base calling.
|
|
7
|
+
|
|
8
|
+
Parameters:
|
|
9
|
+
model (str): a string representing the file path to the dorado basecalling model.
|
|
10
|
+
pod5_dir (str): a string representing the file path to the experiment directory containing the POD5 files.
|
|
11
|
+
barcode_kit (str): A string reppresenting the barcoding kit used in the experiment.
|
|
12
|
+
bam (str): File path to the BAM file to output.
|
|
13
|
+
bam_suffix (str): The suffix to use for the BAM file.
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
None
|
|
17
|
+
Outputs a BAM file holding the canonical base calls output by the dorado basecaller.
|
|
8
18
|
"""
|
|
19
|
+
import subprocess
|
|
9
20
|
output = bam + bam_suffix
|
|
10
21
|
command = ["dorado", "basecaller", model, pod5_dir, "--kit-name", barcode_kit, "-Y"]
|
|
11
22
|
with open(output, "w") as outfile:
|
|
@@ -1,19 +1,32 @@
|
|
|
1
1
|
## converted_BAM_to_adata
|
|
2
|
-
from .. import readwrite
|
|
3
|
-
from .binarize_converted_base_identities import binarize_converted_base_identities
|
|
4
|
-
from .find_conversion_sites import find_conversion_sites
|
|
5
|
-
from .count_aligned_reads import count_aligned_reads
|
|
6
|
-
from .extract_base_identities import extract_base_identities
|
|
7
|
-
from .one_hot_encode import one_hot_encode
|
|
8
|
-
import pandas as pd
|
|
9
|
-
import numpy as np
|
|
10
|
-
import anndata as ad
|
|
11
|
-
import os
|
|
12
2
|
|
|
13
3
|
def converted_BAM_to_adata(converted_FASTA, split_dir, mapping_threshold, experiment_name, conversion_types, bam_suffix):
|
|
14
4
|
"""
|
|
5
|
+
A wrapper function to take converted aligned_sorted_split BAM files and format the data into an anndata object.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
converted_FASTA (str): A string representing the file path to the converted FASTA reference.
|
|
9
|
+
split_dir (str): A string representing the file path to the directory containing the converted aligned_sorted_split BAM files.
|
|
10
|
+
mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
|
|
11
|
+
experiment_name (str): A string to provide an experiment name to the output adata file.
|
|
12
|
+
conversion_types (list): A list of strings of the conversion types to use in the analysis.
|
|
13
|
+
bam_suffix (str): The suffix to use for the BAM file.
|
|
15
14
|
|
|
15
|
+
Returns:
|
|
16
|
+
None
|
|
17
|
+
Outputs a single gzipped adata object for the experiment.
|
|
16
18
|
"""
|
|
19
|
+
from .. import readwrite
|
|
20
|
+
from .binarize_converted_base_identities import binarize_converted_base_identities
|
|
21
|
+
from .find_conversion_sites import find_conversion_sites
|
|
22
|
+
from .count_aligned_reads import count_aligned_reads
|
|
23
|
+
from .extract_base_identities import extract_base_identities
|
|
24
|
+
from .one_hot_encode import one_hot_encode
|
|
25
|
+
import pandas as pd
|
|
26
|
+
import numpy as np
|
|
27
|
+
import anndata as ad
|
|
28
|
+
import os
|
|
29
|
+
|
|
17
30
|
# Get all of the input BAM files
|
|
18
31
|
files = os.listdir(split_dir)
|
|
19
32
|
# Change directory to the BAM directory
|
|
@@ -30,11 +43,14 @@ def converted_BAM_to_adata(converted_FASTA, split_dir, mapping_threshold, experi
|
|
|
30
43
|
# While populating the dictionary, also extract the longest sequence record in the input references
|
|
31
44
|
max_reference_length = 0
|
|
32
45
|
for conversion_type in conversion_types:
|
|
33
|
-
modification_dict[conversion_type] = find_conversion_sites(converted_FASTA, conversion_type)
|
|
46
|
+
modification_dict[conversion_type] = find_conversion_sites(converted_FASTA, conversion_type, conversion_types)
|
|
34
47
|
for record in modification_dict[conversion_type].keys():
|
|
35
48
|
if modification_dict[conversion_type][record][0] > max_reference_length:
|
|
36
49
|
max_reference_length = modification_dict[conversion_type][record][0]
|
|
37
50
|
|
|
51
|
+
# Init a dict to be keyed by FASTA record that points to the sequence string of the unconverted record
|
|
52
|
+
record_FASTA_dict = {}
|
|
53
|
+
|
|
38
54
|
# Iterate over the experiment BAM files
|
|
39
55
|
for bam_index, bam in enumerate(bams):
|
|
40
56
|
# Give each bam a sample name
|
|
@@ -51,7 +67,6 @@ def converted_BAM_to_adata(converted_FASTA, split_dir, mapping_threshold, experi
|
|
|
51
67
|
records_to_analyze.append(record)
|
|
52
68
|
print(f'Records to analyze: {records_to_analyze}')
|
|
53
69
|
# Iterate over records to analyze (ie all conversions detected)
|
|
54
|
-
record_FASTA_dict = {}
|
|
55
70
|
for record in records_to_analyze:
|
|
56
71
|
mod_type, strand = record.split('_')[-2:]
|
|
57
72
|
if strand == 'top':
|
|
@@ -60,7 +75,7 @@ def converted_BAM_to_adata(converted_FASTA, split_dir, mapping_threshold, experi
|
|
|
60
75
|
strand_index = 2
|
|
61
76
|
|
|
62
77
|
chromosome = record.split('_{0}_{1}'.format(mod_type, strand))[0]
|
|
63
|
-
unconverted_chromosome_name = chromosome
|
|
78
|
+
unconverted_chromosome_name = f'{chromosome}_{conversion_types[0]}_top'
|
|
64
79
|
positions = modification_dict[mod_type][unconverted_chromosome_name][strand_index]
|
|
65
80
|
current_reference_length = modification_dict[mod_type][unconverted_chromosome_name][0]
|
|
66
81
|
delta_max_length = max_reference_length - current_reference_length
|
|
@@ -130,6 +145,8 @@ def converted_BAM_to_adata(converted_FASTA, split_dir, mapping_threshold, experi
|
|
|
130
145
|
sequence = record_FASTA_dict[record]
|
|
131
146
|
final_adata.uns[f'{record}_FASTA_sequence'] = sequence
|
|
132
147
|
final_adata.var[f'{record}_FASTA_sequence'] = list(sequence)
|
|
148
|
+
|
|
149
|
+
# May need to remove the bottom for conversion SMF
|
|
133
150
|
record_subset = final_adata[final_adata.obs['Reference'] == record].copy()
|
|
134
151
|
layer_map, layer_counts = {}, []
|
|
135
152
|
for i, layer in enumerate(record_subset.layers):
|
|
@@ -1,14 +1,21 @@
|
|
|
1
1
|
## count_aligned_reads
|
|
2
|
-
from .. import readwrite
|
|
3
|
-
# bioinformatic operations
|
|
4
|
-
import pysam
|
|
5
2
|
|
|
6
3
|
# General
|
|
7
4
|
def count_aligned_reads(bam_file):
|
|
8
5
|
"""
|
|
9
|
-
|
|
10
|
-
|
|
6
|
+
Counts the number of aligned reads in a bam file that map to each reference record.
|
|
7
|
+
|
|
8
|
+
Parameters:
|
|
9
|
+
bam_file (str): A string representing the path to an aligned BAM file.
|
|
10
|
+
|
|
11
|
+
Returns:
|
|
12
|
+
aligned_reads_count (int): The total number or reads aligned in the BAM.
|
|
13
|
+
unaligned_reads_count (int): The total number of reads not aligned in the BAM.
|
|
14
|
+
record_counts (dict): A dictionary keyed by reference record instance that points toa tuple containing the total reads mapped to the record and the fraction of mapped reads which map to the record.
|
|
15
|
+
|
|
11
16
|
"""
|
|
17
|
+
from .. import readwrite
|
|
18
|
+
import pysam
|
|
12
19
|
print('{0}: Counting aligned reads in BAM > {1}'.format(readwrite.time_string(), bam_file))
|
|
13
20
|
aligned_reads_count = 0
|
|
14
21
|
unaligned_reads_count = 0
|
|
@@ -1,15 +1,22 @@
|
|
|
1
1
|
## extract_base_identities
|
|
2
|
-
from .. import readwrite
|
|
3
|
-
# bioinformatic operations
|
|
4
|
-
import pysam
|
|
5
2
|
|
|
6
3
|
# General
|
|
7
4
|
def extract_base_identities(bam_file, chromosome, positions, max_reference_length):
|
|
8
5
|
"""
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
6
|
+
Extracts the base identities from every position within the read that has a reference coordinate
|
|
7
|
+
|
|
8
|
+
Parameters:
|
|
9
|
+
bam (str): File path to the BAM file to align (excluding the file suffix).
|
|
10
|
+
chromosome (str): A string representing the name of the record within the reference FASTA.
|
|
11
|
+
positions (list): A list of position coordinates within the record to extract.
|
|
12
|
+
max_reference_length (int): The maximum length of a record in the reference set.
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
base_identities (dict): A dictionary, keyed by read name, that points to a list of base identities. If the read does not contain that position, fill the list at that index with a N value.
|
|
16
|
+
|
|
12
17
|
"""
|
|
18
|
+
from .. import readwrite
|
|
19
|
+
import pysam
|
|
13
20
|
positions = set(positions)
|
|
14
21
|
# Initialize a base identity dictionary that will hold key-value pairs that are: key (read-name) and value (list of base identities at positions of interest)
|
|
15
22
|
base_identities = {}
|
|
@@ -1,13 +1,25 @@
|
|
|
1
1
|
## extract_mods
|
|
2
|
-
import os
|
|
3
|
-
import subprocess
|
|
4
|
-
import glob
|
|
5
|
-
import zipfile
|
|
6
2
|
|
|
7
3
|
def extract_mods(thresholds, mod_tsv_dir, split_dir, bam_suffix):
|
|
8
4
|
"""
|
|
9
5
|
Takes all of the aligned, sorted, split modified BAM files and runs Nanopore Modkit Extract to load the modification data into zipped TSV files
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
thresholds (list): A list of thresholds to use for marking each basecalled base as passing or failing on canonical and modification call status.
|
|
9
|
+
mod_tsv_dir (str): A string representing the file path to the directory to hold the modkit extract outputs.
|
|
10
|
+
split_dit (str): A string representing the file path to the directory containing the converted aligned_sorted_split BAM files.
|
|
11
|
+
bam_suffix (str): The suffix to use for the BAM file.
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
None
|
|
15
|
+
Runs modkit extract on input aligned_sorted_split modified BAM files to output zipped TSVs containing modification calls.
|
|
16
|
+
|
|
10
17
|
"""
|
|
18
|
+
import os
|
|
19
|
+
import subprocess
|
|
20
|
+
import glob
|
|
21
|
+
import zipfile
|
|
22
|
+
|
|
11
23
|
os.chdir(mod_tsv_dir)
|
|
12
24
|
filter_threshold, m6A_threshold, m5C_threshold, hm5C_threshold = thresholds
|
|
13
25
|
bam_files = glob.glob(os.path.join(split_dir, f"*{bam_suffix}"))
|
|
@@ -23,7 +35,7 @@ def extract_mods(thresholds, mod_tsv_dir, split_dir, bam_suffix):
|
|
|
23
35
|
# Run modkit extract
|
|
24
36
|
subprocess.run([
|
|
25
37
|
"modkit", "extract",
|
|
26
|
-
"--filter-threshold", filter_threshold,
|
|
38
|
+
"--filter-threshold", f'{filter_threshold}',
|
|
27
39
|
"--mod-thresholds", f"m:{m5C_threshold}",
|
|
28
40
|
"--mod-thresholds", f"a:{m6A_threshold}",
|
|
29
41
|
"--mod-thresholds", f"h:{hm5C_threshold}",
|