smftools 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- smftools/__init__.py +29 -0
- smftools/_settings.py +20 -0
- smftools/_version.py +1 -0
- smftools/datasets/F1_hybrid_NKG2A_enhander_promoter_GpC_conversion_SMF.h5ad.gz +0 -0
- smftools/datasets/F1_sample_sheet.csv +5 -0
- smftools/datasets/__init__.py +9 -0
- smftools/datasets/dCas9_m6A_invitro_kinetics.h5ad.gz +0 -0
- smftools/datasets/datasets.py +28 -0
- smftools/informatics/__init__.py +16 -0
- smftools/informatics/archived/bam_conversion.py +59 -0
- smftools/informatics/archived/bam_direct.py +63 -0
- smftools/informatics/archived/basecalls_to_adata.py +71 -0
- smftools/informatics/archived/print_bam_query_seq.py +29 -0
- smftools/informatics/basecall_pod5s.py +80 -0
- smftools/informatics/conversion_smf.py +132 -0
- smftools/informatics/direct_smf.py +137 -0
- smftools/informatics/fast5_to_pod5.py +21 -0
- smftools/informatics/helpers/LoadExperimentConfig.py +75 -0
- smftools/informatics/helpers/__init__.py +74 -0
- smftools/informatics/helpers/align_and_sort_BAM.py +59 -0
- smftools/informatics/helpers/aligned_BAM_to_bed.py +74 -0
- smftools/informatics/helpers/archived/informatics.py +260 -0
- smftools/informatics/helpers/archived/load_adata.py +516 -0
- smftools/informatics/helpers/bam_qc.py +66 -0
- smftools/informatics/helpers/bed_to_bigwig.py +39 -0
- smftools/informatics/helpers/binarize_converted_base_identities.py +79 -0
- smftools/informatics/helpers/canoncall.py +34 -0
- smftools/informatics/helpers/complement_base_list.py +21 -0
- smftools/informatics/helpers/concatenate_fastqs_to_bam.py +55 -0
- smftools/informatics/helpers/converted_BAM_to_adata.py +245 -0
- smftools/informatics/helpers/converted_BAM_to_adata_II.py +369 -0
- smftools/informatics/helpers/count_aligned_reads.py +43 -0
- smftools/informatics/helpers/demux_and_index_BAM.py +52 -0
- smftools/informatics/helpers/extract_base_identities.py +44 -0
- smftools/informatics/helpers/extract_mods.py +83 -0
- smftools/informatics/helpers/extract_read_features_from_bam.py +31 -0
- smftools/informatics/helpers/extract_read_lengths_from_bed.py +25 -0
- smftools/informatics/helpers/extract_readnames_from_BAM.py +22 -0
- smftools/informatics/helpers/find_conversion_sites.py +50 -0
- smftools/informatics/helpers/generate_converted_FASTA.py +99 -0
- smftools/informatics/helpers/get_chromosome_lengths.py +32 -0
- smftools/informatics/helpers/get_native_references.py +28 -0
- smftools/informatics/helpers/index_fasta.py +12 -0
- smftools/informatics/helpers/make_dirs.py +21 -0
- smftools/informatics/helpers/make_modbed.py +27 -0
- smftools/informatics/helpers/modQC.py +27 -0
- smftools/informatics/helpers/modcall.py +36 -0
- smftools/informatics/helpers/modkit_extract_to_adata.py +884 -0
- smftools/informatics/helpers/ohe_batching.py +76 -0
- smftools/informatics/helpers/ohe_layers_decode.py +32 -0
- smftools/informatics/helpers/one_hot_decode.py +27 -0
- smftools/informatics/helpers/one_hot_encode.py +57 -0
- smftools/informatics/helpers/plot_read_length_and_coverage_histograms.py +53 -0
- smftools/informatics/helpers/run_multiqc.py +28 -0
- smftools/informatics/helpers/separate_bam_by_bc.py +43 -0
- smftools/informatics/helpers/split_and_index_BAM.py +36 -0
- smftools/informatics/load_adata.py +182 -0
- smftools/informatics/readwrite.py +106 -0
- smftools/informatics/subsample_fasta_from_bed.py +47 -0
- smftools/informatics/subsample_pod5.py +104 -0
- smftools/plotting/__init__.py +15 -0
- smftools/plotting/classifiers.py +355 -0
- smftools/plotting/general_plotting.py +205 -0
- smftools/plotting/position_stats.py +462 -0
- smftools/preprocessing/__init__.py +33 -0
- smftools/preprocessing/append_C_context.py +82 -0
- smftools/preprocessing/archives/mark_duplicates.py +146 -0
- smftools/preprocessing/archives/preprocessing.py +614 -0
- smftools/preprocessing/archives/remove_duplicates.py +21 -0
- smftools/preprocessing/binarize_on_Youden.py +45 -0
- smftools/preprocessing/binary_layers_to_ohe.py +40 -0
- smftools/preprocessing/calculate_complexity.py +72 -0
- smftools/preprocessing/calculate_consensus.py +47 -0
- smftools/preprocessing/calculate_converted_read_methylation_stats.py +94 -0
- smftools/preprocessing/calculate_coverage.py +42 -0
- smftools/preprocessing/calculate_pairwise_differences.py +49 -0
- smftools/preprocessing/calculate_pairwise_hamming_distances.py +27 -0
- smftools/preprocessing/calculate_position_Youden.py +115 -0
- smftools/preprocessing/calculate_read_length_stats.py +79 -0
- smftools/preprocessing/clean_NaN.py +46 -0
- smftools/preprocessing/filter_adata_by_nan_proportion.py +31 -0
- smftools/preprocessing/filter_converted_reads_on_methylation.py +44 -0
- smftools/preprocessing/filter_reads_on_length.py +51 -0
- smftools/preprocessing/flag_duplicate_reads.py +149 -0
- smftools/preprocessing/invert_adata.py +30 -0
- smftools/preprocessing/load_sample_sheet.py +38 -0
- smftools/preprocessing/make_dirs.py +21 -0
- smftools/preprocessing/min_non_diagonal.py +25 -0
- smftools/preprocessing/recipes.py +127 -0
- smftools/preprocessing/subsample_adata.py +58 -0
- smftools/readwrite.py +198 -0
- smftools/tools/__init__.py +49 -0
- smftools/tools/apply_hmm.py +202 -0
- smftools/tools/apply_hmm_batched.py +241 -0
- smftools/tools/archived/classify_methylated_features.py +66 -0
- smftools/tools/archived/classify_non_methylated_features.py +75 -0
- smftools/tools/archived/subset_adata_v1.py +32 -0
- smftools/tools/archived/subset_adata_v2.py +46 -0
- smftools/tools/calculate_distances.py +18 -0
- smftools/tools/calculate_umap.py +62 -0
- smftools/tools/call_hmm_peaks.py +105 -0
- smftools/tools/classifiers.py +787 -0
- smftools/tools/cluster_adata_on_methylation.py +105 -0
- smftools/tools/data/__init__.py +2 -0
- smftools/tools/data/anndata_data_module.py +90 -0
- smftools/tools/data/preprocessing.py +6 -0
- smftools/tools/display_hmm.py +18 -0
- smftools/tools/evaluation/__init__.py +0 -0
- smftools/tools/general_tools.py +69 -0
- smftools/tools/hmm_readwrite.py +16 -0
- smftools/tools/inference/__init__.py +1 -0
- smftools/tools/inference/lightning_inference.py +41 -0
- smftools/tools/models/__init__.py +9 -0
- smftools/tools/models/base.py +14 -0
- smftools/tools/models/cnn.py +34 -0
- smftools/tools/models/lightning_base.py +41 -0
- smftools/tools/models/mlp.py +17 -0
- smftools/tools/models/positional.py +17 -0
- smftools/tools/models/rnn.py +16 -0
- smftools/tools/models/sklearn_models.py +40 -0
- smftools/tools/models/transformer.py +133 -0
- smftools/tools/models/wrappers.py +20 -0
- smftools/tools/nucleosome_hmm_refinement.py +104 -0
- smftools/tools/position_stats.py +239 -0
- smftools/tools/read_stats.py +70 -0
- smftools/tools/subset_adata.py +28 -0
- smftools/tools/train_hmm.py +78 -0
- smftools/tools/training/__init__.py +1 -0
- smftools/tools/training/train_lightning_model.py +47 -0
- smftools/tools/utils/__init__.py +2 -0
- smftools/tools/utils/device.py +10 -0
- smftools/tools/utils/grl.py +14 -0
- {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/METADATA +5 -2
- smftools-0.1.7.dist-info/RECORD +136 -0
- smftools-0.1.6.dist-info/RECORD +0 -4
- {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/WHEEL +0 -0
- {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import anndata as ad
|
|
3
|
+
import numpy as np
|
|
4
|
+
import concurrent.futures
|
|
5
|
+
from .one_hot_encode import one_hot_encode
|
|
6
|
+
|
|
7
|
+
def encode_sequence(args):
|
|
8
|
+
"""Parallel helper function for one-hot encoding."""
|
|
9
|
+
read_name, seq, device = args
|
|
10
|
+
try:
|
|
11
|
+
one_hot_matrix = one_hot_encode(seq, device)
|
|
12
|
+
return read_name, one_hot_matrix
|
|
13
|
+
except Exception:
|
|
14
|
+
return None # Skip invalid sequences
|
|
15
|
+
|
|
16
|
+
def encode_and_save_batch(batch_data, tmp_dir, prefix, record, batch_number):
|
|
17
|
+
"""Encodes a batch and writes to disk immediately."""
|
|
18
|
+
batch = {read_name: matrix for read_name, matrix in batch_data if matrix is not None}
|
|
19
|
+
|
|
20
|
+
if batch:
|
|
21
|
+
save_name = os.path.join(tmp_dir, f'tmp_{prefix}_{record}_{batch_number}.h5ad')
|
|
22
|
+
tmp_ad = ad.AnnData(X=np.zeros((1, 1)), uns=batch) # Placeholder X
|
|
23
|
+
tmp_ad.write_h5ad(save_name)
|
|
24
|
+
return save_name
|
|
25
|
+
return None
|
|
26
|
+
|
|
27
|
+
def ohe_batching(base_identities, tmp_dir, record, prefix='', batch_size=100000, progress_bar=None, device='auto', threads=None):
|
|
28
|
+
"""
|
|
29
|
+
Efficient version of ohe_batching: one-hot encodes sequences in parallel and writes batches immediately.
|
|
30
|
+
|
|
31
|
+
Parameters:
|
|
32
|
+
base_identities (dict): Dictionary mapping read names to sequences.
|
|
33
|
+
tmp_dir (str): Directory for storing temporary files.
|
|
34
|
+
record (str): Record name.
|
|
35
|
+
prefix (str): Prefix for file naming.
|
|
36
|
+
batch_size (int): Number of reads per batch.
|
|
37
|
+
progress_bar (tqdm instance, optional): Shared progress bar.
|
|
38
|
+
device (str): Device for encoding.
|
|
39
|
+
threads (int, optional): Number of parallel workers.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
list: List of valid H5AD file paths.
|
|
43
|
+
"""
|
|
44
|
+
threads = threads or os.cpu_count() # Default to max available CPU cores
|
|
45
|
+
batch_data = []
|
|
46
|
+
batch_number = 0
|
|
47
|
+
file_names = []
|
|
48
|
+
|
|
49
|
+
# Step 1: Prepare Data for Parallel Encoding
|
|
50
|
+
encoding_args = [(read_name, seq, device) for read_name, seq in base_identities.items() if seq is not None]
|
|
51
|
+
|
|
52
|
+
# Step 2: Parallel One-Hot Encoding using threads (to avoid nested processes)
|
|
53
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
|
|
54
|
+
for result in executor.map(encode_sequence, encoding_args):
|
|
55
|
+
if result:
|
|
56
|
+
batch_data.append(result)
|
|
57
|
+
|
|
58
|
+
if len(batch_data) >= batch_size:
|
|
59
|
+
# Step 3: Process and Write Batch Immediately
|
|
60
|
+
file_name = encode_and_save_batch(batch_data.copy(), tmp_dir, prefix, record, batch_number)
|
|
61
|
+
if file_name:
|
|
62
|
+
file_names.append(file_name)
|
|
63
|
+
|
|
64
|
+
batch_data.clear()
|
|
65
|
+
batch_number += 1
|
|
66
|
+
|
|
67
|
+
if progress_bar:
|
|
68
|
+
progress_bar.update(1)
|
|
69
|
+
|
|
70
|
+
# Step 4: Process Remaining Batch
|
|
71
|
+
if batch_data:
|
|
72
|
+
file_name = encode_and_save_batch(batch_data, tmp_dir, prefix, record, batch_number)
|
|
73
|
+
if file_name:
|
|
74
|
+
file_names.append(file_name)
|
|
75
|
+
|
|
76
|
+
return file_names
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# ohe_layers_decode
|
|
2
|
+
|
|
3
|
+
def ohe_layers_decode(adata, obs_names):
|
|
4
|
+
"""
|
|
5
|
+
Takes an anndata object and a list of observation names. Returns a list of sequence strings for the reads of interest.
|
|
6
|
+
Parameters:
|
|
7
|
+
adata (AnnData): An anndata object.
|
|
8
|
+
obs_names (list): A list of observation name strings to retrieve sequences for.
|
|
9
|
+
|
|
10
|
+
Returns:
|
|
11
|
+
sequences (list of str): List of strings of the one hot encoded array
|
|
12
|
+
"""
|
|
13
|
+
import anndata as ad
|
|
14
|
+
import numpy as np
|
|
15
|
+
from .ohe_decode import ohe_decode
|
|
16
|
+
|
|
17
|
+
# Define the mapping of one-hot encoded indices to DNA bases
|
|
18
|
+
mapping = ['A', 'C', 'G', 'T', 'N']
|
|
19
|
+
|
|
20
|
+
ohe_layers = [f"{base}_binary_encoding" for base in mapping]
|
|
21
|
+
sequences = []
|
|
22
|
+
|
|
23
|
+
for obs_name in obs_names:
|
|
24
|
+
obs_subset = adata[obs_name]
|
|
25
|
+
ohe_list = []
|
|
26
|
+
for layer in ohe_layers:
|
|
27
|
+
ohe_list += list(obs_subset.layers[layer])
|
|
28
|
+
ohe_array = np.array(ohe_list)
|
|
29
|
+
sequence = ohe_decode(ohe_array)
|
|
30
|
+
sequences.append(sequence)
|
|
31
|
+
|
|
32
|
+
return sequences
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
# one_hot_decode
|
|
2
|
+
|
|
3
|
+
# String encodings
|
|
4
|
+
def one_hot_decode(ohe_array):
|
|
5
|
+
"""
|
|
6
|
+
Takes a flattened one hot encoded array and returns the sequence string from that array.
|
|
7
|
+
Parameters:
|
|
8
|
+
ohe_array (np.array): A one hot encoded array
|
|
9
|
+
|
|
10
|
+
Returns:
|
|
11
|
+
sequence (str): Sequence string of the one hot encoded array
|
|
12
|
+
"""
|
|
13
|
+
import numpy as np
|
|
14
|
+
# Define the mapping of one-hot encoded indices to DNA bases
|
|
15
|
+
mapping = ['A', 'C', 'G', 'T', 'N']
|
|
16
|
+
|
|
17
|
+
# Reshape the flattened array into a 2D matrix with 5 columns (one for each base)
|
|
18
|
+
one_hot_matrix = ohe_array.reshape(-1, 5)
|
|
19
|
+
|
|
20
|
+
# Get the index of the maximum value (which will be 1) in each row
|
|
21
|
+
decoded_indices = np.argmax(one_hot_matrix, axis=1)
|
|
22
|
+
|
|
23
|
+
# Map the indices back to the corresponding bases
|
|
24
|
+
sequence_list = [mapping[i] for i in decoded_indices]
|
|
25
|
+
sequence = ''.join(sequence_list)
|
|
26
|
+
|
|
27
|
+
return sequence
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
# one_hot_encode
|
|
2
|
+
|
|
3
|
+
def one_hot_encode(sequence, device='auto'):
|
|
4
|
+
"""
|
|
5
|
+
One-hot encodes a DNA sequence.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
sequence (str or list): DNA sequence (e.g., "ACGTN" or ['A', 'C', 'G', 'T', 'N']).
|
|
9
|
+
|
|
10
|
+
Returns:
|
|
11
|
+
ndarray: Flattened one-hot encoded representation of the input sequence.
|
|
12
|
+
"""
|
|
13
|
+
import numpy as np
|
|
14
|
+
|
|
15
|
+
mapping = np.array(['A', 'C', 'G', 'T', 'N'])
|
|
16
|
+
|
|
17
|
+
# Ensure input is a list of characters
|
|
18
|
+
if not isinstance(sequence, list):
|
|
19
|
+
sequence = list(sequence) # Convert string to list of characters
|
|
20
|
+
|
|
21
|
+
# Handle empty sequences
|
|
22
|
+
if len(sequence) == 0:
|
|
23
|
+
print("Warning: Empty sequence encountered in one_hot_encode()")
|
|
24
|
+
return np.zeros(len(mapping)) # Return empty encoding instead of failing
|
|
25
|
+
|
|
26
|
+
# Convert sequence to NumPy array
|
|
27
|
+
seq_array = np.array(sequence, dtype='<U1')
|
|
28
|
+
|
|
29
|
+
# Replace invalid bases with 'N'
|
|
30
|
+
seq_array = np.where(np.isin(seq_array, mapping), seq_array, 'N')
|
|
31
|
+
|
|
32
|
+
# Create one-hot encoding matrix
|
|
33
|
+
one_hot_matrix = (seq_array[:, None] == mapping).astype(int)
|
|
34
|
+
|
|
35
|
+
# Flatten and return
|
|
36
|
+
return one_hot_matrix.flatten()
|
|
37
|
+
|
|
38
|
+
# import torch
|
|
39
|
+
# bases = torch.tensor([ord('A'), ord('C'), ord('G'), ord('T'), ord('N')], dtype=torch.int8, device=device)
|
|
40
|
+
|
|
41
|
+
# # Convert input to tensor of character ASCII codes
|
|
42
|
+
# seq_tensor = torch.tensor([ord(c) for c in sequence], dtype=torch.int8, device=device)
|
|
43
|
+
|
|
44
|
+
# # Handle empty sequence
|
|
45
|
+
# if seq_tensor.numel() == 0:
|
|
46
|
+
# print("Warning: Empty sequence encountered in one_hot_encode_torch()")
|
|
47
|
+
# return torch.zeros(len(bases), device=device)
|
|
48
|
+
|
|
49
|
+
# # Replace invalid bases with 'N'
|
|
50
|
+
# is_valid = (seq_tensor[:, None] == bases) # Compare each base with mapping
|
|
51
|
+
# seq_tensor = torch.where(is_valid.any(dim=1), seq_tensor, ord('N'))
|
|
52
|
+
|
|
53
|
+
# # Create one-hot encoding matrix
|
|
54
|
+
# one_hot_matrix = (seq_tensor[:, None] == bases).int()
|
|
55
|
+
|
|
56
|
+
# # Flatten and return
|
|
57
|
+
# return one_hot_matrix.flatten()
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
# plot_read_length_and_coverage_histograms
|
|
2
|
+
|
|
3
|
+
def plot_read_length_and_coverage_histograms(bed_file, plotting_directory):
|
|
4
|
+
"""
|
|
5
|
+
Plots read length and coverage statistics for each record.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
bed_file (str): Path to the bed file to derive read lengths and coverage from.
|
|
9
|
+
plot_directory (str): Path to the directory to write out historgrams.
|
|
10
|
+
|
|
11
|
+
Returns:
|
|
12
|
+
None
|
|
13
|
+
"""
|
|
14
|
+
import pandas as pd
|
|
15
|
+
import matplotlib.pyplot as plt
|
|
16
|
+
import numpy as np
|
|
17
|
+
import os
|
|
18
|
+
|
|
19
|
+
bed_basename = os.path.basename(bed_file).split('.bed')[0]
|
|
20
|
+
# Load the BED file into a DataFrame
|
|
21
|
+
print(f"Loading BED to plot read length and coverage histograms: {bed_file}")
|
|
22
|
+
df = pd.read_csv(bed_file, sep='\t', header=None, names=['chromosome', 'start', 'end', 'length', 'read_name'])
|
|
23
|
+
|
|
24
|
+
# Group by chromosome
|
|
25
|
+
grouped = df.groupby('chromosome')
|
|
26
|
+
|
|
27
|
+
for chrom, group in grouped:
|
|
28
|
+
# Plot read length histogram
|
|
29
|
+
plt.figure(figsize=(12, 6))
|
|
30
|
+
plt.hist(group['length'], bins=50, edgecolor='k', alpha=0.7)
|
|
31
|
+
plt.title(f'Read Length Histogram of reads aligned to {chrom}')
|
|
32
|
+
plt.xlabel('Read Length')
|
|
33
|
+
plt.ylabel('Count')
|
|
34
|
+
plt.grid(True)
|
|
35
|
+
save_name = os.path.join(plotting_directory, f'{bed_basename}_{chrom}_read_length_histogram.png')
|
|
36
|
+
plt.savefig(save_name)
|
|
37
|
+
plt.close()
|
|
38
|
+
|
|
39
|
+
# Compute coverage
|
|
40
|
+
coverage = np.zeros(group['end'].max())
|
|
41
|
+
for _, row in group.iterrows():
|
|
42
|
+
coverage[row['start']:row['end']] += 1
|
|
43
|
+
|
|
44
|
+
# Plot coverage histogram
|
|
45
|
+
plt.figure(figsize=(12, 6))
|
|
46
|
+
plt.plot(coverage, color='b')
|
|
47
|
+
plt.title(f'Coverage Histogram for {chrom}')
|
|
48
|
+
plt.xlabel('Position')
|
|
49
|
+
plt.ylabel('Coverage')
|
|
50
|
+
plt.grid(True)
|
|
51
|
+
save_name = os.path.join(plotting_directory, f'{bed_basename}_{chrom}_coverage_histogram.png')
|
|
52
|
+
plt.savefig(save_name)
|
|
53
|
+
plt.close()
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
def run_multiqc(input_dir, output_dir):
|
|
2
|
+
"""
|
|
3
|
+
Runs MultiQC on a given directory and saves the report to the specified output directory.
|
|
4
|
+
|
|
5
|
+
Parameters:
|
|
6
|
+
- input_dir (str): Path to the directory containing QC reports (e.g., FastQC, Samtools, bcftools outputs).
|
|
7
|
+
- output_dir (str): Path to the directory where MultiQC reports should be saved.
|
|
8
|
+
|
|
9
|
+
Returns:
|
|
10
|
+
- None: The function executes MultiQC and prints the status.
|
|
11
|
+
"""
|
|
12
|
+
import os
|
|
13
|
+
import subprocess
|
|
14
|
+
# Ensure the output directory exists
|
|
15
|
+
os.makedirs(output_dir, exist_ok=True)
|
|
16
|
+
|
|
17
|
+
# Construct MultiQC command
|
|
18
|
+
command = ["multiqc", input_dir, "-o", output_dir]
|
|
19
|
+
|
|
20
|
+
print(f"Running MultiQC on '{input_dir}' and saving results to '{output_dir}'...")
|
|
21
|
+
|
|
22
|
+
# Run MultiQC
|
|
23
|
+
try:
|
|
24
|
+
subprocess.run(command, check=True)
|
|
25
|
+
print(f"MultiQC report generated successfully in: {output_dir}")
|
|
26
|
+
except subprocess.CalledProcessError as e:
|
|
27
|
+
print(f"Error running MultiQC: {e}")
|
|
28
|
+
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
## separate_bam_by_bc
|
|
2
|
+
|
|
3
|
+
# General
|
|
4
|
+
def separate_bam_by_bc(input_bam, output_prefix, bam_suffix, split_dir):
|
|
5
|
+
"""
|
|
6
|
+
Separates an input BAM file on the BC SAM tag values.
|
|
7
|
+
|
|
8
|
+
Parameters:
|
|
9
|
+
input_bam (str): File path to the BAM file to split.
|
|
10
|
+
output_prefix (str): A prefix to append to the output BAM.
|
|
11
|
+
bam_suffix (str): A suffix to add to the bam file.
|
|
12
|
+
split_dir (str): String indicating path to directory to split BAMs into
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
None
|
|
16
|
+
Writes out split BAM files.
|
|
17
|
+
"""
|
|
18
|
+
import pysam
|
|
19
|
+
import os
|
|
20
|
+
|
|
21
|
+
bam_base = os.path.basename(input_bam)
|
|
22
|
+
bam_base_minus_suffix = bam_base.split(bam_suffix)[0]
|
|
23
|
+
|
|
24
|
+
# Open the input BAM file for reading
|
|
25
|
+
with pysam.AlignmentFile(input_bam, "rb") as bam:
|
|
26
|
+
# Create a dictionary to store output BAM files
|
|
27
|
+
output_files = {}
|
|
28
|
+
# Iterate over each read in the BAM file
|
|
29
|
+
for read in bam:
|
|
30
|
+
try:
|
|
31
|
+
# Get the barcode tag value
|
|
32
|
+
bc_tag = read.get_tag("BC", with_value_type=True)[0].split('barcode')[1]
|
|
33
|
+
# Open the output BAM file corresponding to the barcode
|
|
34
|
+
if bc_tag not in output_files:
|
|
35
|
+
output_path = os.path.join(split_dir, f"{output_prefix}_{bam_base_minus_suffix}_{bc_tag}{bam_suffix}")
|
|
36
|
+
output_files[bc_tag] = pysam.AlignmentFile(output_path, "wb", header=bam.header)
|
|
37
|
+
# Write the read to the corresponding output BAM file
|
|
38
|
+
output_files[bc_tag].write(read)
|
|
39
|
+
except KeyError:
|
|
40
|
+
print(f"BC tag not present for read: {read.query_name}")
|
|
41
|
+
# Close all output BAM files
|
|
42
|
+
for output_file in output_files.values():
|
|
43
|
+
output_file.close()
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
## split_and_index_BAM
|
|
2
|
+
|
|
3
|
+
def split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory):
|
|
4
|
+
"""
|
|
5
|
+
A wrapper function for splitting BAMS and indexing them.
|
|
6
|
+
Parameters:
|
|
7
|
+
aligned_sorted_BAM (str): A string representing the file path of the aligned_sorted BAM file.
|
|
8
|
+
split_dir (str): A string representing the file path to the directory to split the BAMs into.
|
|
9
|
+
bam_suffix (str): A suffix to add to the bam file.
|
|
10
|
+
output_directory (str): A file path to the directory to output all the analyses.
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
None
|
|
14
|
+
Splits an input BAM file on barcode value and makes a BAM index file.
|
|
15
|
+
"""
|
|
16
|
+
from .. import readwrite
|
|
17
|
+
import os
|
|
18
|
+
import subprocess
|
|
19
|
+
import glob
|
|
20
|
+
from .separate_bam_by_bc import separate_bam_by_bc
|
|
21
|
+
from .make_dirs import make_dirs
|
|
22
|
+
|
|
23
|
+
plotting_dir = os.path.join(output_directory, 'demultiplexed_bed_histograms')
|
|
24
|
+
bed_dir = os.path.join(output_directory, 'demultiplexed_read_alignment_coordinates')
|
|
25
|
+
make_dirs([plotting_dir, bed_dir])
|
|
26
|
+
aligned_sorted_output = aligned_sorted_BAM + bam_suffix
|
|
27
|
+
file_prefix = readwrite.date_string()
|
|
28
|
+
separate_bam_by_bc(aligned_sorted_output, file_prefix, bam_suffix, split_dir)
|
|
29
|
+
# Make a BAM index file for the BAMs in that directory
|
|
30
|
+
bam_pattern = '*' + bam_suffix
|
|
31
|
+
bam_files = glob.glob(os.path.join(split_dir, bam_pattern))
|
|
32
|
+
bam_files = [bam for bam in bam_files if '.bai' not in bam]
|
|
33
|
+
for input_file in bam_files:
|
|
34
|
+
subprocess.run(["samtools", "index", input_file])
|
|
35
|
+
|
|
36
|
+
return bam_files
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
## load_adata
|
|
2
|
+
|
|
3
|
+
def load_adata(config_path):
|
|
4
|
+
"""
|
|
5
|
+
High-level function to call for converting raw sequencing data to an adata object.
|
|
6
|
+
Works for nanopore pod5, fast5, and unaligned modBAM data types for direct SMF workflows.
|
|
7
|
+
Works for nanopore pod5, fast5, unaligned BAM for conversion SMF workflows.
|
|
8
|
+
Also works for illumina fastq and unaligned BAM for conversion SMF workflows.
|
|
9
|
+
|
|
10
|
+
Parameters:
|
|
11
|
+
config_path (str): A string representing the file path to the experiment configuration csv file.
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
None
|
|
15
|
+
"""
|
|
16
|
+
# Lazy importing of packages
|
|
17
|
+
from .helpers import LoadExperimentConfig, make_dirs, concatenate_fastqs_to_bam, extract_read_features_from_bam
|
|
18
|
+
from .fast5_to_pod5 import fast5_to_pod5
|
|
19
|
+
from .subsample_fasta_from_bed import subsample_fasta_from_bed
|
|
20
|
+
import os
|
|
21
|
+
import numpy as np
|
|
22
|
+
import anndata as ad
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
|
|
25
|
+
# Default params
|
|
26
|
+
bam_suffix = '.bam' # If different, change from here.
|
|
27
|
+
split_dir = 'demultiplexed_BAMs' # If different, change from here.
|
|
28
|
+
strands = ['bottom', 'top'] # If different, change from here. Having both listed generally doesn't slow things down too much.
|
|
29
|
+
conversions = ['unconverted'] # The name to use for the unconverted files. If different, change from here.
|
|
30
|
+
|
|
31
|
+
# Load experiment config parameters into global variables
|
|
32
|
+
experiment_config = LoadExperimentConfig(config_path)
|
|
33
|
+
var_dict = experiment_config.var_dict
|
|
34
|
+
|
|
35
|
+
# These below variables will point to default_value if they are empty in the experiment_config.csv or if the variable is fully omitted from the csv.
|
|
36
|
+
default_value = None
|
|
37
|
+
|
|
38
|
+
# General config variable init
|
|
39
|
+
smf_modality = var_dict.get('smf_modality', default_value) # needed for specifying if the data is conversion SMF or direct methylation detection SMF. Necessary.
|
|
40
|
+
input_data_path = var_dict.get('input_data_path', default_value) # Path to a directory of POD5s/FAST5s or to a BAM/FASTQ file. Necessary.
|
|
41
|
+
output_directory = var_dict.get('output_directory', default_value) # Path to the output directory to make for the analysis. Necessary.
|
|
42
|
+
fasta = var_dict.get('fasta', default_value) # Path to reference FASTA.
|
|
43
|
+
fasta_regions_of_interest = var_dict.get("fasta_regions_of_interest", default_value) # Path to a bed file listing coordinate regions of interest within the FASTA to include. Optional.
|
|
44
|
+
mapping_threshold = var_dict.get('mapping_threshold', default_value) # Minimum proportion of mapped reads that need to fall within a region to include in the final AnnData.
|
|
45
|
+
experiment_name = var_dict.get('experiment_name', default_value) # A key term to add to the AnnData file name.
|
|
46
|
+
model_dir = var_dict.get('model_dir', default_value) # needed for dorado basecaller
|
|
47
|
+
model = var_dict.get('model', default_value) # needed for dorado basecaller
|
|
48
|
+
barcode_kit = var_dict.get('barcode_kit', default_value) # needed for dorado basecaller
|
|
49
|
+
barcode_both_ends = var_dict.get('barcode_both_ends', default_value) # dorado demultiplexing
|
|
50
|
+
trim = var_dict.get('trim', default_value) # dorado adapter and barcode removal
|
|
51
|
+
input_already_demuxed = var_dict.get('input_already_demuxed', default_value) # If the input files are already demultiplexed.
|
|
52
|
+
threads = var_dict.get('threads', default_value) # number of cpu threads available for multiprocessing
|
|
53
|
+
# Conversion specific variable init
|
|
54
|
+
conversion_types = var_dict.get('conversion_types', default_value)
|
|
55
|
+
# Direct methylation specific variable init
|
|
56
|
+
filter_threshold = var_dict.get('filter_threshold', default_value)
|
|
57
|
+
m6A_threshold = var_dict.get('m6A_threshold', default_value)
|
|
58
|
+
m5C_threshold = var_dict.get('m5C_threshold', default_value)
|
|
59
|
+
hm5C_threshold = var_dict.get('hm5C_threshold', default_value)
|
|
60
|
+
thresholds = [filter_threshold, m6A_threshold, m5C_threshold, hm5C_threshold]
|
|
61
|
+
mod_list = var_dict.get('mod_list', default_value)
|
|
62
|
+
batch_size = var_dict.get('batch_size', default_value)
|
|
63
|
+
device = var_dict.get('device', 'auto')
|
|
64
|
+
make_bigwigs = var_dict.get('make_bigwigs', default_value)
|
|
65
|
+
skip_unclassified = var_dict.get('skip_unclassified', True)
|
|
66
|
+
delete_batch_hdfs = var_dict.get('delete_batch_hdfs', True)
|
|
67
|
+
|
|
68
|
+
# Make initial output directory
|
|
69
|
+
make_dirs([output_directory])
|
|
70
|
+
os.chdir(output_directory)
|
|
71
|
+
# Define the pathname to split BAMs into later during demultiplexing.
|
|
72
|
+
split_path = os.path.join(output_directory, split_dir)
|
|
73
|
+
|
|
74
|
+
# If fasta_regions_of_interest is passed, subsample the input FASTA on regions of interest and use the subsampled FASTA.
|
|
75
|
+
if fasta_regions_of_interest and '.bed' in fasta_regions_of_interest:
|
|
76
|
+
fasta_basename = os.path.basename(fasta).split('.fa')[0]
|
|
77
|
+
bed_basename_minus_suffix = os.path.basename(fasta_regions_of_interest).split('.bed')[0]
|
|
78
|
+
output_FASTA = fasta_basename + '_subsampled_by_' + bed_basename_minus_suffix + '.fasta'
|
|
79
|
+
subsample_fasta_from_bed(fasta, fasta_regions_of_interest, output_directory, output_FASTA)
|
|
80
|
+
fasta = os.path.join(output_directory, output_FASTA)
|
|
81
|
+
|
|
82
|
+
# If conversion_types is passed:
|
|
83
|
+
if conversion_types:
|
|
84
|
+
conversions += conversion_types
|
|
85
|
+
|
|
86
|
+
# Get the input filetype
|
|
87
|
+
if Path(input_data_path).is_file():
|
|
88
|
+
input_data_filetype = '.' + os.path.basename(input_data_path).split('.')[1].lower()
|
|
89
|
+
input_is_pod5 = input_data_filetype in ['.pod5','.p5']
|
|
90
|
+
input_is_fast5 = input_data_filetype in ['.fast5','.f5']
|
|
91
|
+
input_is_fastq = input_data_filetype in ['.fastq', '.fq']
|
|
92
|
+
input_is_bam = input_data_filetype == bam_suffix
|
|
93
|
+
if input_is_fastq:
|
|
94
|
+
fastq_paths = [input_data_path]
|
|
95
|
+
elif Path(input_data_path).is_dir():
|
|
96
|
+
# Get the file names in the input data dir
|
|
97
|
+
input_files = os.listdir(input_data_path)
|
|
98
|
+
input_is_pod5 = sum([True for file in input_files if '.pod5' in file or '.p5' in file])
|
|
99
|
+
input_is_fast5 = sum([True for file in input_files if '.fast5' in file or '.f5' in file])
|
|
100
|
+
input_is_fastq = sum([True for file in input_files if '.fastq' in file or '.fq' in file])
|
|
101
|
+
input_is_bam = sum([True for file in input_files if bam_suffix in file])
|
|
102
|
+
if input_is_fastq:
|
|
103
|
+
fastq_paths = [os.path.join(input_data_path, file) for file in input_files if '.fastq' in file or '.fq' in file]
|
|
104
|
+
|
|
105
|
+
# If the input files are not pod5 files, and they are fast5 files, convert the files to a pod5 file before proceeding.
|
|
106
|
+
if input_is_fast5 and not input_is_pod5:
|
|
107
|
+
# take the input directory of fast5 files and write out a single pod5 file into the output directory.
|
|
108
|
+
output_pod5 = os.path.join(output_directory, 'FAST5s_to_POD5.pod5')
|
|
109
|
+
print(f'Input directory contains fast5 files, converting them and concatenating into a single pod5 file in the {output_pod5}')
|
|
110
|
+
fast5_to_pod5(input_data_path, output_pod5)
|
|
111
|
+
# Reassign the pod5_dir variable to point to the new pod5 file.
|
|
112
|
+
input_data_path = output_pod5
|
|
113
|
+
input_is_pod5 = True
|
|
114
|
+
input_is_fast5 = False
|
|
115
|
+
|
|
116
|
+
elif input_is_fastq:
|
|
117
|
+
output_bam = os.path.join(output_directory, 'FASTQs_concatenated_into_BAM.bam')
|
|
118
|
+
concatenate_fastqs_to_bam(fastq_paths, output_bam, barcode_tag='BC', gzip_suffix='.gz')
|
|
119
|
+
input_data_path = output_bam
|
|
120
|
+
input_is_bam = True
|
|
121
|
+
input_is_fastq = False
|
|
122
|
+
|
|
123
|
+
if input_is_pod5:
|
|
124
|
+
basecall = True
|
|
125
|
+
elif input_is_bam:
|
|
126
|
+
basecall = False
|
|
127
|
+
else:
|
|
128
|
+
print('Error, can not find input bam or pod5')
|
|
129
|
+
|
|
130
|
+
if smf_modality == 'conversion':
|
|
131
|
+
from .conversion_smf import conversion_smf
|
|
132
|
+
final_adata, final_adata_path, sorted_output, bam_files = conversion_smf(fasta, output_directory, conversions, strands, model_dir, model, input_data_path, split_path
|
|
133
|
+
, barcode_kit, mapping_threshold, experiment_name, bam_suffix, basecall, barcode_both_ends, trim, device, make_bigwigs, threads, input_already_demuxed)
|
|
134
|
+
elif smf_modality == 'direct':
|
|
135
|
+
from .direct_smf import direct_smf
|
|
136
|
+
# need to add input_already_demuxed workflow here.
|
|
137
|
+
final_adata, final_adata_path, sorted_output, bam_files = direct_smf(fasta, output_directory, mod_list,model_dir, model, thresholds, input_data_path, split_path
|
|
138
|
+
, barcode_kit, mapping_threshold, experiment_name, bam_suffix, batch_size, basecall, barcode_both_ends, trim, device, make_bigwigs, skip_unclassified, delete_batch_hdfs, threads)
|
|
139
|
+
else:
|
|
140
|
+
print("Error")
|
|
141
|
+
|
|
142
|
+
# Read in the final adata object and append final metadata
|
|
143
|
+
#print(f'Reading in adata from {final_adata_path} to add final metadata')
|
|
144
|
+
# final_adata = ad.read_h5ad(final_adata_path)
|
|
145
|
+
|
|
146
|
+
# Adding read query length metadata to adata object.
|
|
147
|
+
read_metrics = {}
|
|
148
|
+
for bam_file in bam_files:
|
|
149
|
+
bam_read_metrics = extract_read_features_from_bam(bam_file)
|
|
150
|
+
read_metrics.update(bam_read_metrics)
|
|
151
|
+
#read_metrics = extract_read_features_from_bam(sorted_output)
|
|
152
|
+
|
|
153
|
+
query_read_length_values = []
|
|
154
|
+
query_read_quality_values = []
|
|
155
|
+
reference_lengths = []
|
|
156
|
+
# Iterate over each row of the AnnData object
|
|
157
|
+
for obs_name in final_adata.obs_names:
|
|
158
|
+
# Fetch the value from the dictionary using the obs_name as the key
|
|
159
|
+
value = read_metrics.get(obs_name, np.nan) # Use np.nan if the key is not found
|
|
160
|
+
if type(value) is list:
|
|
161
|
+
query_read_length_values.append(value[0])
|
|
162
|
+
query_read_quality_values.append(value[1])
|
|
163
|
+
reference_lengths.append(value[2])
|
|
164
|
+
else:
|
|
165
|
+
query_read_length_values.append(value)
|
|
166
|
+
query_read_quality_values.append(value)
|
|
167
|
+
reference_lengths.append(value)
|
|
168
|
+
|
|
169
|
+
# Add the new column to adata.obs
|
|
170
|
+
final_adata.obs['query_read_length'] = query_read_length_values
|
|
171
|
+
final_adata.obs['query_read_quality'] = query_read_quality_values
|
|
172
|
+
final_adata.obs['query_length_to_reference_length_ratio'] = np.array(query_read_length_values) / np.array(reference_lengths)
|
|
173
|
+
|
|
174
|
+
final_adata.obs['Raw_methylation_signal'] = np.nansum(final_adata.X, axis=1)
|
|
175
|
+
final_adata.obs['Raw_per_base_methylation_average'] = final_adata.obs['Raw_methylation_signal'] / final_adata.obs['query_read_length']
|
|
176
|
+
|
|
177
|
+
print('Saving final adata')
|
|
178
|
+
if ".gz" in final_adata_path:
|
|
179
|
+
final_adata.write_h5ad(f"{final_adata_path}", compression='gzip')
|
|
180
|
+
else:
|
|
181
|
+
final_adata.write_h5ad(f"{final_adata_path}.gz", compression='gzip')
|
|
182
|
+
print('Final adata saved')
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
## readwrite ##
|
|
2
|
+
|
|
3
|
+
######################################################################################################
|
|
4
|
+
## Datetime functionality
|
|
5
|
+
def date_string():
|
|
6
|
+
"""
|
|
7
|
+
Each time this is called, it returns the current date string
|
|
8
|
+
"""
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
current_date = datetime.now()
|
|
11
|
+
date_string = current_date.strftime("%Y%m%d")
|
|
12
|
+
date_string = date_string[2:]
|
|
13
|
+
return date_string
|
|
14
|
+
|
|
15
|
+
def time_string():
|
|
16
|
+
"""
|
|
17
|
+
Each time this is called, it returns the current time string
|
|
18
|
+
"""
|
|
19
|
+
from datetime import datetime
|
|
20
|
+
current_time = datetime.now()
|
|
21
|
+
return current_time.strftime("%H:%M:%S")
|
|
22
|
+
######################################################################################################
|
|
23
|
+
|
|
24
|
+
######################################################################################################
|
|
25
|
+
## Numpy, Pandas, Anndata functionality
|
|
26
|
+
def adata_to_df(adata, layer=None):
|
|
27
|
+
"""
|
|
28
|
+
Input: An adata object with a specified layer.
|
|
29
|
+
Output: A dataframe for the specific layer.
|
|
30
|
+
"""
|
|
31
|
+
import pandas as pd
|
|
32
|
+
import anndata as ad
|
|
33
|
+
|
|
34
|
+
# Extract the data matrix from the given layer
|
|
35
|
+
if layer:
|
|
36
|
+
data_matrix = adata.layers[layer]
|
|
37
|
+
else:
|
|
38
|
+
data_matrix = adata.X
|
|
39
|
+
# Extract observation (read) annotations
|
|
40
|
+
obs_df = adata.obs
|
|
41
|
+
# Extract variable (position) annotations
|
|
42
|
+
var_df = adata.var
|
|
43
|
+
# Convert data matrix and annotations to pandas DataFrames
|
|
44
|
+
df = pd.DataFrame(data_matrix, index=obs_df.index, columns=var_df.index)
|
|
45
|
+
return df
|
|
46
|
+
|
|
47
|
+
def save_matrix(matrix, save_name):
|
|
48
|
+
"""
|
|
49
|
+
Input: A numpy matrix and a save_name
|
|
50
|
+
Output: A txt file representation of the data matrix
|
|
51
|
+
"""
|
|
52
|
+
import numpy as np
|
|
53
|
+
np.savetxt(f'{save_name}.txt', matrix)
|
|
54
|
+
|
|
55
|
+
def concatenate_h5ads(output_file, file_suffix='h5ad.gz', delete_inputs=True):
|
|
56
|
+
"""
|
|
57
|
+
Concatenate all h5ad files in a directory and delete them after the final adata is written out.
|
|
58
|
+
Input: an output file path relative to the directory in which the function is called
|
|
59
|
+
"""
|
|
60
|
+
import os
|
|
61
|
+
import anndata as ad
|
|
62
|
+
# Runtime warnings
|
|
63
|
+
import warnings
|
|
64
|
+
warnings.filterwarnings('ignore', category=UserWarning, module='anndata')
|
|
65
|
+
warnings.filterwarnings('ignore', category=FutureWarning, module='anndata')
|
|
66
|
+
|
|
67
|
+
# List all files in the directory
|
|
68
|
+
files = os.listdir(os.getcwd())
|
|
69
|
+
# get current working directory
|
|
70
|
+
cwd = os.getcwd()
|
|
71
|
+
suffix = file_suffix
|
|
72
|
+
# Filter file names that contain the search string in their filename and keep them in a list
|
|
73
|
+
hdfs = [hdf for hdf in files if suffix in hdf]
|
|
74
|
+
# Sort file list by names and print the list of file names
|
|
75
|
+
hdfs.sort()
|
|
76
|
+
print('{0} sample files found: {1}'.format(len(hdfs), hdfs))
|
|
77
|
+
# Iterate over all of the hdf5 files and concatenate them.
|
|
78
|
+
final_adata = None
|
|
79
|
+
for hdf in hdfs:
|
|
80
|
+
print('{0}: Reading in {1} hdf5 file'.format(time_string(), hdf))
|
|
81
|
+
temp_adata = ad.read_h5ad(hdf)
|
|
82
|
+
if final_adata:
|
|
83
|
+
print('{0}: Concatenating final adata object with {1} hdf5 file'.format(time_string(), hdf))
|
|
84
|
+
final_adata = ad.concat([final_adata, temp_adata], join='outer', index_unique=None)
|
|
85
|
+
else:
|
|
86
|
+
print('{0}: Initializing final adata object with {1} hdf5 file'.format(time_string(), hdf))
|
|
87
|
+
final_adata = temp_adata
|
|
88
|
+
print('{0}: Writing final concatenated hdf5 file'.format(time_string()))
|
|
89
|
+
final_adata.write_h5ad(output_file, compression='gzip')
|
|
90
|
+
|
|
91
|
+
# Delete the individual h5ad files and only keep the final concatenated file
|
|
92
|
+
if delete_inputs:
|
|
93
|
+
files = os.listdir(os.getcwd())
|
|
94
|
+
hdfs = [hdf for hdf in files if suffix in hdf]
|
|
95
|
+
if output_file in hdfs:
|
|
96
|
+
hdfs.remove(output_file)
|
|
97
|
+
# Iterate over the files and delete them
|
|
98
|
+
for hdf in hdfs:
|
|
99
|
+
try:
|
|
100
|
+
os.remove(hdf)
|
|
101
|
+
print(f"Deleted file: {hdf}")
|
|
102
|
+
except OSError as e:
|
|
103
|
+
print(f"Error deleting file {hdf}: {e}")
|
|
104
|
+
else:
|
|
105
|
+
print('Keeping input files')
|
|
106
|
+
######################################################################################################
|