smftools 0.1.0__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- smftools/__init__.py +0 -2
- smftools/_settings.py +3 -2
- smftools/_version.py +1 -0
- smftools/datasets/F1_sample_sheet.csv +5 -0
- smftools/datasets/datasets.py +14 -11
- smftools/informatics/__init__.py +10 -7
- smftools/informatics/archived/bam_conversion.py +59 -0
- smftools/informatics/archived/bam_direct.py +63 -0
- smftools/informatics/archived/basecalls_to_adata.py +71 -0
- smftools/informatics/conversion_smf.py +79 -0
- smftools/informatics/direct_smf.py +89 -0
- smftools/informatics/fast5_to_pod5.py +21 -0
- smftools/informatics/helpers/LoadExperimentConfig.py +74 -0
- smftools/informatics/helpers/__init__.py +22 -4
- smftools/informatics/helpers/align_and_sort_BAM.py +48 -0
- smftools/informatics/helpers/aligned_BAM_to_bed.py +73 -0
- smftools/informatics/helpers/bed_to_bigwig.py +39 -0
- smftools/informatics/helpers/binarize_converted_base_identities.py +11 -4
- smftools/informatics/helpers/canoncall.py +14 -1
- smftools/informatics/helpers/complement_base_list.py +21 -0
- smftools/informatics/helpers/concatenate_fastqs_to_bam.py +54 -0
- smftools/informatics/helpers/converted_BAM_to_adata.py +183 -97
- smftools/informatics/helpers/count_aligned_reads.py +25 -14
- smftools/informatics/helpers/extract_base_identities.py +44 -23
- smftools/informatics/helpers/extract_mods.py +17 -5
- smftools/informatics/helpers/extract_readnames_from_BAM.py +22 -0
- smftools/informatics/helpers/find_conversion_sites.py +24 -16
- smftools/informatics/helpers/generate_converted_FASTA.py +60 -21
- smftools/informatics/helpers/get_chromosome_lengths.py +32 -0
- smftools/informatics/helpers/get_native_references.py +10 -7
- smftools/informatics/helpers/index_fasta.py +12 -0
- smftools/informatics/helpers/make_dirs.py +9 -3
- smftools/informatics/helpers/make_modbed.py +10 -4
- smftools/informatics/helpers/modQC.py +10 -2
- smftools/informatics/helpers/modcall.py +16 -2
- smftools/informatics/helpers/modkit_extract_to_adata.py +486 -323
- smftools/informatics/helpers/ohe_batching.py +52 -0
- smftools/informatics/helpers/one_hot_encode.py +15 -8
- smftools/informatics/helpers/plot_read_length_and_coverage_histograms.py +52 -0
- smftools/informatics/helpers/separate_bam_by_bc.py +20 -5
- smftools/informatics/helpers/split_and_index_BAM.py +31 -11
- smftools/informatics/load_adata.py +127 -0
- smftools/informatics/readwrite.py +13 -16
- smftools/informatics/subsample_fasta_from_bed.py +47 -0
- smftools/informatics/subsample_pod5.py +104 -0
- smftools/preprocessing/__init__.py +6 -7
- smftools/preprocessing/append_C_context.py +52 -22
- smftools/preprocessing/binarize_on_Youden.py +8 -4
- smftools/preprocessing/binary_layers_to_ohe.py +9 -4
- smftools/preprocessing/calculate_complexity.py +26 -14
- smftools/preprocessing/calculate_consensus.py +47 -0
- smftools/preprocessing/calculate_converted_read_methylation_stats.py +69 -11
- smftools/preprocessing/calculate_coverage.py +14 -8
- smftools/preprocessing/calculate_pairwise_hamming_distances.py +11 -6
- smftools/preprocessing/calculate_position_Youden.py +21 -12
- smftools/preprocessing/calculate_read_length_stats.py +67 -8
- smftools/preprocessing/clean_NaN.py +13 -6
- smftools/preprocessing/filter_converted_reads_on_methylation.py +15 -6
- smftools/preprocessing/filter_reads_on_length.py +16 -6
- smftools/preprocessing/invert_adata.py +10 -5
- smftools/preprocessing/load_sample_sheet.py +24 -0
- smftools/preprocessing/make_dirs.py +21 -0
- smftools/preprocessing/mark_duplicates.py +54 -30
- smftools/preprocessing/min_non_diagonal.py +9 -4
- smftools/preprocessing/recipes.py +125 -0
- smftools/preprocessing/remove_duplicates.py +15 -6
- smftools/readwrite.py +13 -16
- smftools/tools/apply_HMM.py +1 -0
- smftools/tools/cluster.py +0 -0
- smftools/tools/read_HMM.py +1 -0
- smftools/tools/subset_adata.py +32 -0
- smftools/tools/train_HMM.py +43 -0
- smftools-0.1.3.dist-info/METADATA +94 -0
- smftools-0.1.3.dist-info/RECORD +84 -0
- smftools/informatics/helpers/align_BAM.py +0 -49
- smftools/informatics/helpers/load_experiment_config.py +0 -17
- smftools/informatics/pod5_conversion.py +0 -26
- smftools/informatics/pod5_direct.py +0 -29
- smftools/informatics/pod5_to_adata.py +0 -17
- smftools-0.1.0.dist-info/METADATA +0 -75
- smftools-0.1.0.dist-info/RECORD +0 -58
- /smftools/informatics/helpers/{informatics.py → archived/informatics.py} +0 -0
- /smftools/informatics/helpers/{load_adata.py → archived/load_adata.py} +0 -0
- /smftools/preprocessing/{preprocessing.py → archives/preprocessing.py} +0 -0
- {smftools-0.1.0.dist-info → smftools-0.1.3.dist-info}/WHEEL +0 -0
- {smftools-0.1.0.dist-info → smftools-0.1.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,14 +1,22 @@
|
|
|
1
1
|
## filter_reads_on_length
|
|
2
|
-
import numpy as np
|
|
3
|
-
import anndata as ad
|
|
4
|
-
import pandas as pd
|
|
5
2
|
|
|
6
3
|
def filter_reads_on_length(adata, filter_on_coordinates=False, min_read_length=2700):
|
|
7
4
|
"""
|
|
5
|
+
Filters the adata object to keep a defined coordinate window, as well as reads that are over a minimum threshold in length.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
adata (AnnData): An adata object.
|
|
9
|
+
filter_on_coordinates (bool | list): If False, skips filtering. Otherwise, provide a list containing integers representing the lower and upper bound coordinates to filter on. Default is False.
|
|
10
|
+
min_read_length (int): The minimum read length to keep in the filtered dataset. Default is 2700.
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
adata (AnnData): The filtered adata object
|
|
8
14
|
Input: Adata object. a list of lower and upper bound (set to False or None if not wanted), and a minimum read length integer.
|
|
9
|
-
|
|
15
|
+
|
|
10
16
|
"""
|
|
11
|
-
|
|
17
|
+
import numpy as np
|
|
18
|
+
import anndata as ad
|
|
19
|
+
import pandas as pd
|
|
12
20
|
if filter_on_coordinates:
|
|
13
21
|
lower_bound, upper_bound = filter_on_coordinates
|
|
14
22
|
# Extract the position information from the adata object as an np array
|
|
@@ -28,4 +36,6 @@ def filter_reads_on_length(adata, filter_on_coordinates=False, min_read_length=2
|
|
|
28
36
|
|
|
29
37
|
if min_read_length:
|
|
30
38
|
print(f'Subsetting adata to keep reads longer than {min_read_length}')
|
|
31
|
-
adata = adata[adata.obs['read_length'] > min_read_length].copy()
|
|
39
|
+
adata = adata[adata.obs['read_length'] > min_read_length].copy()
|
|
40
|
+
|
|
41
|
+
return adata
|
|
@@ -1,14 +1,19 @@
|
|
|
1
1
|
## invert_adata
|
|
2
|
-
import numpy as np
|
|
3
|
-
import anndata as ad
|
|
4
|
-
import pandas as pd
|
|
5
2
|
|
|
6
3
|
# Optional inversion of the adata
|
|
7
4
|
def invert_adata(adata):
|
|
8
5
|
"""
|
|
9
|
-
|
|
10
|
-
|
|
6
|
+
Inverts the adata object along the variable axis
|
|
7
|
+
|
|
8
|
+
Parameters:
|
|
9
|
+
adata (AnnData): An adata object.
|
|
10
|
+
|
|
11
|
+
Returns:
|
|
12
|
+
None
|
|
11
13
|
"""
|
|
14
|
+
import numpy as np
|
|
15
|
+
import anndata as ad
|
|
16
|
+
print('Inverting adata')
|
|
12
17
|
# Reassign var_names with new names
|
|
13
18
|
old_var_names = adata.var_names.astype(int).to_numpy()
|
|
14
19
|
new_var_names = np.sort(old_var_names)[::-1].astype(str)
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# load_sample_sheet
|
|
2
|
+
|
|
3
|
+
def load_sample_sheet(adata, sample_sheet_path, mapping_key_column):
|
|
4
|
+
"""
|
|
5
|
+
Loads a sample sheet csv and uses one of the columns to map sample information into the AnnData object.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
adata (AnnData): The Anndata object to append sample information to.
|
|
9
|
+
sample_sheet_path (str):
|
|
10
|
+
mapping_key_column (str):
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
None
|
|
14
|
+
"""
|
|
15
|
+
import pandas as pd
|
|
16
|
+
import anndata as ad
|
|
17
|
+
df = pd.read_csv(sample_sheet_path)
|
|
18
|
+
key_column = mapping_key_column
|
|
19
|
+
df[key_column] = df[key_column].astype(str)
|
|
20
|
+
value_columns = [column for column in df.columns if column != key_column]
|
|
21
|
+
mapping_dict = df.set_index(key_column)[value_columns].to_dict(orient='index')
|
|
22
|
+
for column in value_columns:
|
|
23
|
+
column_map = {key: value[column] for key, value in mapping_dict.items()}
|
|
24
|
+
adata.obs[column] = adata.obs[key_column].map(column_map)
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
## make_dirs
|
|
2
|
+
|
|
3
|
+
# General
|
|
4
|
+
def make_dirs(directories):
|
|
5
|
+
"""
|
|
6
|
+
Takes a list of file paths and makes new directories if the directory does not already exist.
|
|
7
|
+
|
|
8
|
+
Parameters:
|
|
9
|
+
directories (list): A list of directories to make
|
|
10
|
+
|
|
11
|
+
Returns:
|
|
12
|
+
None
|
|
13
|
+
"""
|
|
14
|
+
import os
|
|
15
|
+
|
|
16
|
+
for directory in directories:
|
|
17
|
+
if not os.path.isdir(directory):
|
|
18
|
+
os.mkdir(directory)
|
|
19
|
+
print(f"Directory '{directory}' created successfully.")
|
|
20
|
+
else:
|
|
21
|
+
print(f"Directory '{directory}' already exists.")
|
|
@@ -1,19 +1,29 @@
|
|
|
1
1
|
## mark_duplicates
|
|
2
|
-
import numpy as np
|
|
3
|
-
import pandas as pd
|
|
4
|
-
import matplotlib.pyplot as plt
|
|
5
|
-
from scipy.signal import find_peaks
|
|
6
|
-
import networkx as nx
|
|
7
|
-
from .binary_layers_to_ohe import binary_layers_to_ohe
|
|
8
|
-
from .calculate_pairwise_hamming_distances import calculate_pairwise_hamming_distances
|
|
9
|
-
from .min_non_diagonal import min_non_diagonal
|
|
10
2
|
|
|
11
|
-
|
|
12
|
-
def mark_duplicates(adata, layers, obs_column='Reference', sample_col='Sample_names'):
|
|
3
|
+
def mark_duplicates(adata, layers, obs_column='Reference', sample_col='Sample_names', hamming_distance_thresholds={}):
|
|
13
4
|
"""
|
|
14
|
-
|
|
15
|
-
|
|
5
|
+
Marks duplicates in the adata object.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
adata (AnnData): An adata object.
|
|
9
|
+
layers (list): A list of strings representing the layers to use.
|
|
10
|
+
obs_column (str): A string representing the obs column name to first subset on. Default is 'Reference'.
|
|
11
|
+
sample_col (str):L A string representing the obs column name to second subset on. Default is 'Sample_names'.
|
|
12
|
+
hamming_distance_thresholds (dict): A dictionary keyed by obs_column categories that points to a float corresponding to the distance threshold to apply. Default is an empty dict.
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
None
|
|
16
16
|
"""
|
|
17
|
+
|
|
18
|
+
import numpy as np
|
|
19
|
+
import pandas as pd
|
|
20
|
+
import matplotlib.pyplot as plt
|
|
21
|
+
from scipy.signal import find_peaks
|
|
22
|
+
import networkx as nx
|
|
23
|
+
from .binary_layers_to_ohe import binary_layers_to_ohe
|
|
24
|
+
from .calculate_pairwise_hamming_distances import calculate_pairwise_hamming_distances
|
|
25
|
+
from .min_non_diagonal import min_non_diagonal
|
|
26
|
+
|
|
17
27
|
categories = adata.obs[obs_column].cat.categories
|
|
18
28
|
sample_names = adata.obs[sample_col].cat.categories
|
|
19
29
|
|
|
@@ -39,22 +49,32 @@ def mark_duplicates(adata, layers, obs_column='Reference', sample_col='Sample_na
|
|
|
39
49
|
distance_df = pd.DataFrame(distance_matrix, index=read_names, columns=read_names)
|
|
40
50
|
# Save the distance dataframe into an unstructured component of the adata object
|
|
41
51
|
adata.uns[f'Pairwise_Hamming_distance_within_{cat}_{sample}'] = distance_df
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
52
|
+
if n_reads > 1:
|
|
53
|
+
# Calculate the minimum non-self distance for every read in the reference and sample
|
|
54
|
+
min_distance_values = min_non_diagonal(distance_matrix)
|
|
55
|
+
min_distance_df = pd.DataFrame({'Nearest_neighbor_Hamming_distance': min_distance_values}, index=read_names)
|
|
56
|
+
adata.obs.update(min_distance_df)
|
|
57
|
+
# Generate a histogram of minimum non-self distances for each read
|
|
58
|
+
if n_reads > 3:
|
|
59
|
+
n_bins = n_reads // 4
|
|
60
|
+
else:
|
|
61
|
+
n_bins = 1
|
|
62
|
+
min_distance_bins = plt.hist(min_distance_values, bins=n_bins)
|
|
63
|
+
if cat in hamming_distance_thresholds:
|
|
64
|
+
adata.uns[f'Hamming_distance_threshold_for_{cat}_{sample}'] = hamming_distance_thresholds[cat]
|
|
65
|
+
else: # eventually this should be written to use known PCR duplicate controls for thresholding.
|
|
66
|
+
# Normalize the max value in any histogram bin to 1
|
|
67
|
+
normalized_min_distance_counts = min_distance_bins[0] / np.max(min_distance_bins[0])
|
|
68
|
+
# Extract the bin index of peak centers in the histogram
|
|
69
|
+
peak_centers, _ = find_peaks(normalized_min_distance_counts, prominence=0.2, distance=5)
|
|
70
|
+
first_peak_index = peak_centers[0]
|
|
71
|
+
offset_index = first_peak_index-1
|
|
72
|
+
# Use the distance corresponding to the first peak as the threshold distance in graph construction
|
|
73
|
+
first_peak_distance = min_distance_bins[1][first_peak_index]
|
|
74
|
+
offset_distance = min_distance_bins[1][offset_index]
|
|
75
|
+
adata.uns[f'Hamming_distance_threshold_for_{cat}_{sample}'] = offset_distance
|
|
76
|
+
else:
|
|
77
|
+
adata.uns[f'Hamming_distance_threshold_for_{cat}_{sample}'] = 0
|
|
58
78
|
|
|
59
79
|
## Detect likely duplicate reads and mark them in the adata object.
|
|
60
80
|
adata.obs['Marked_duplicate'] = pd.Series(False, index=adata.obs_names, dtype=bool)
|
|
@@ -82,7 +102,11 @@ def mark_duplicates(adata, layers, obs_column='Reference', sample_col='Sample_na
|
|
|
82
102
|
clusters = [list(cluster) for cluster in clusters]
|
|
83
103
|
# Get the number of clusters
|
|
84
104
|
cluster_count = len(clusters)
|
|
85
|
-
|
|
105
|
+
if n_reads > 0:
|
|
106
|
+
fraction_unique = cluster_count / n_reads
|
|
107
|
+
else:
|
|
108
|
+
fraction_unique = 0
|
|
109
|
+
adata.uns[f'Hamming_distance_clusters_within_{cat}_{sample}'] = [cluster_count, n_reads, fraction_unique, clusters]
|
|
86
110
|
# Update the adata object
|
|
87
111
|
read_cluster_map = {}
|
|
88
112
|
read_duplicate_map = {}
|
|
@@ -107,4 +131,4 @@ def mark_duplicates(adata, layers, obs_column='Reference', sample_col='Sample_na
|
|
|
107
131
|
adata.obs.update(df_combined)
|
|
108
132
|
adata.obs['Marked_duplicate'] = adata.obs['Marked_duplicate'].astype(bool)
|
|
109
133
|
adata.obs['Unique_in_final_read_set'] = adata.obs['Unique_in_final_read_set'].astype(bool)
|
|
110
|
-
print(f'Hamming clusters for {sample} on {cat}\nThreshold: {
|
|
134
|
+
print(f'Hamming clusters for {sample} on {cat}\nThreshold: {distance_threshold}\nNumber clusters: {cluster_count}\nNumber reads: {n_reads}\nFraction unique: {fraction_unique}')
|
|
@@ -1,12 +1,17 @@
|
|
|
1
1
|
## min_non_diagonal
|
|
2
|
-
import numpy as np
|
|
3
2
|
|
|
4
3
|
def min_non_diagonal(matrix):
|
|
5
4
|
"""
|
|
6
|
-
Takes a matrix and returns the smallest value from each row with the diagonal masked
|
|
7
|
-
|
|
8
|
-
|
|
5
|
+
Takes a matrix and returns the smallest value from each row with the diagonal masked.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
matrix (ndarray): A 2D ndarray.
|
|
9
|
+
|
|
10
|
+
Returns:
|
|
11
|
+
min_values (list): A list of minimum values from each row of the matrix
|
|
9
12
|
"""
|
|
13
|
+
import numpy as np
|
|
14
|
+
|
|
10
15
|
n = matrix.shape[0]
|
|
11
16
|
min_values = []
|
|
12
17
|
for i in range(n):
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
# recipes
|
|
2
|
+
|
|
3
|
+
def recipe_1_Kissiov_and_McKenna_2025(adata, sample_sheet_path, output_directory, mapping_key_column='Sample', reference_column = 'Reference', sample_names_col='Sample_names', invert=False):
|
|
4
|
+
"""
|
|
5
|
+
The first part of the preprocessing workflow applied to the smf.inform.pod_to_adata() output derived from Kissiov_and_McKenna_2025.
|
|
6
|
+
|
|
7
|
+
Performs the following tasks:
|
|
8
|
+
1) Loads a sample CSV to append metadata mappings to the adata object.
|
|
9
|
+
2) Appends a boolean indicating whether each position in var_names is within a given reference.
|
|
10
|
+
3) Appends the cytosine context to each position from each reference.
|
|
11
|
+
4) Calculate read level methylation statistics.
|
|
12
|
+
5) Optionally inverts the adata to flip the position coordinate orientation.
|
|
13
|
+
6) Calculates read length statistics (start position, end position, read length)
|
|
14
|
+
7) Returns a dictionary to pass the variable namespace to the parent scope.
|
|
15
|
+
|
|
16
|
+
Parameters:
|
|
17
|
+
adata (AnnData): The AnnData object to use as input.
|
|
18
|
+
sample_sheet_path (str): String representing the path to the sample sheet csv containing the sample metadata.
|
|
19
|
+
output_directory (str): String representing the path to the output directory for plots.
|
|
20
|
+
mapping_key_column (str): The column name to use as the mapping keys for applying the sample sheet metadata.
|
|
21
|
+
reference_column (str): The name of the reference column to use.
|
|
22
|
+
sample_names_col (str): The name of the sample name column to use.
|
|
23
|
+
invert (bool): Whether to invert the positional coordinates of the adata object.
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
variables (dict): A dictionary of variables to append to the parent scope.
|
|
27
|
+
"""
|
|
28
|
+
import anndata as ad
|
|
29
|
+
import pandas as pd
|
|
30
|
+
import numpy as np
|
|
31
|
+
from .load_sample_sheet import load_sample_sheet
|
|
32
|
+
from .calculate_coverage import calculate_coverage
|
|
33
|
+
from .append_C_context import append_C_context
|
|
34
|
+
from .calculate_converted_read_methylation_stats import calculate_converted_read_methylation_stats
|
|
35
|
+
from .invert_adata import invert_adata
|
|
36
|
+
from .calculate_read_length_stats import calculate_read_length_stats
|
|
37
|
+
|
|
38
|
+
# Clean up some of the Reference metadata and save variable names that point to sets of values in the column.
|
|
39
|
+
adata.obs[reference_column] = adata.obs[reference_column].astype('category')
|
|
40
|
+
references = adata.obs[reference_column].cat.categories
|
|
41
|
+
split_references = [(reference, reference.split('_')[0][1:]) for reference in references]
|
|
42
|
+
reference_mapping = {k: v for k, v in split_references}
|
|
43
|
+
adata.obs[f'{reference_column}_short'] = adata.obs[reference_column].map(reference_mapping)
|
|
44
|
+
short_references = set(adata.obs[f'{reference_column}_short'])
|
|
45
|
+
binary_layers = adata.layers.keys()
|
|
46
|
+
|
|
47
|
+
# load sample sheet metadata
|
|
48
|
+
load_sample_sheet(adata, sample_sheet_path, mapping_key_column)
|
|
49
|
+
|
|
50
|
+
# hold sample names set
|
|
51
|
+
adata.obs[sample_names_col] = adata.obs[sample_names_col].astype('category')
|
|
52
|
+
sample_names = adata.obs[sample_names_col].cat.categories
|
|
53
|
+
|
|
54
|
+
# Add position level metadata
|
|
55
|
+
calculate_coverage(adata, obs_column=reference_column)
|
|
56
|
+
adata.var['SNP_position'] = (adata.var[f'N_{reference_column}_with_position'] > 0) & (adata.var[f'N_{reference_column}_with_position'] < len(references)).astype(bool)
|
|
57
|
+
|
|
58
|
+
# Append cytosine context to the reference positions based on the conversion strand.
|
|
59
|
+
append_C_context(adata, obs_column=reference_column, use_consensus=False)
|
|
60
|
+
|
|
61
|
+
# Calculate read level methylation statistics. Assess if GpC methylation level is above other_C methylation level as a QC.
|
|
62
|
+
calculate_converted_read_methylation_stats(adata, reference_column, sample_names_col, output_directory, show_methylation_histogram=False, save_methylation_histogram=False)
|
|
63
|
+
|
|
64
|
+
# Invert the adata object (ie flip the strand orientation for visualization)
|
|
65
|
+
if invert:
|
|
66
|
+
invert_adata(adata)
|
|
67
|
+
else:
|
|
68
|
+
pass
|
|
69
|
+
|
|
70
|
+
# Calculate read length statistics, with options to display or save the read length histograms
|
|
71
|
+
upper_bound, lower_bound = calculate_read_length_stats(adata, reference_column, sample_names_col, output_directory, show_read_length_histogram=False, save_read_length_histogram=False)
|
|
72
|
+
|
|
73
|
+
variables = {
|
|
74
|
+
"short_references": short_references,
|
|
75
|
+
"binary_layers": binary_layers,
|
|
76
|
+
"sample_names": sample_names,
|
|
77
|
+
"upper_bound": upper_bound,
|
|
78
|
+
"lower_bound": lower_bound,
|
|
79
|
+
"references": references
|
|
80
|
+
}
|
|
81
|
+
return variables
|
|
82
|
+
|
|
83
|
+
def recipe_2_Kissiov_and_McKenna_2025(adata, output_directory, binary_layers, hamming_distance_thresholds={}, reference_column = 'Reference', sample_names_col='Sample_names'):
|
|
84
|
+
"""
|
|
85
|
+
The second part of the preprocessing workflow applied to the adata that has already been preprocessed by recipe_1_Kissiov_and_McKenna_2025.
|
|
86
|
+
|
|
87
|
+
Performs the following tasks:
|
|
88
|
+
1) Adds new layers containing NaN replaced variants of adata.X (fill_closest, nan0_0minus1, nan1_12).
|
|
89
|
+
2) Marks putative PCR duplicates using pairwise hamming distance metrics.
|
|
90
|
+
3) Performs a complexity analysis of the library based on the PCR duplicate detection rate.
|
|
91
|
+
4) Removes PCR duplicates from the adata.
|
|
92
|
+
5) Returns two adata object: one for the filtered adata and one for the duplicate adata.
|
|
93
|
+
|
|
94
|
+
Parameters:
|
|
95
|
+
adata (AnnData): The AnnData object to use as input.
|
|
96
|
+
output_directory (str): String representing the path to the output directory for plots.
|
|
97
|
+
binary_layers (list): A list of layers to used for the binary encoding of read sequences. Used for duplicate detection.
|
|
98
|
+
hamming_distance_thresholds (dict): A dictionary keyed by obs_column categories that points to a float corresponding to the distance threshold to apply. Default is an empty dict.
|
|
99
|
+
reference_column (str): The name of the reference column to use.
|
|
100
|
+
sample_names_col (str): The name of the sample name column to use.
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
filtered_adata (AnnData): An AnnData object containing the filtered reads
|
|
104
|
+
duplicates (AnnData): An AnnData object containing the duplicate reads
|
|
105
|
+
"""
|
|
106
|
+
import anndata as ad
|
|
107
|
+
import pandas as pd
|
|
108
|
+
import numpy as np
|
|
109
|
+
from .clean_NaN import clean_NaN
|
|
110
|
+
from .mark_duplicates import mark_duplicates
|
|
111
|
+
from .calculate_complexity import calculate_complexity
|
|
112
|
+
from .remove_duplicates import remove_duplicates
|
|
113
|
+
|
|
114
|
+
# NaN replacement strategies stored in additional layers. Having layer=None uses adata.X
|
|
115
|
+
clean_NaN(adata, layer=None)
|
|
116
|
+
|
|
117
|
+
# Duplicate detection using pairwise hamming distance across reads
|
|
118
|
+
mark_duplicates(adata, binary_layers, obs_column=reference_column, sample_col=sample_names_col, hamming_distance_thresholds=hamming_distance_thresholds)
|
|
119
|
+
|
|
120
|
+
# Complexity analysis using the marked duplicates and the lander-watermann algorithm
|
|
121
|
+
calculate_complexity(adata, output_directory, obs_column=reference_column, sample_col=sample_names_col, plot=True, save_plot=False)
|
|
122
|
+
|
|
123
|
+
# Remove duplicate reads and store the duplicate reads in a new AnnData object named duplicates.
|
|
124
|
+
filtered_adata, duplicates = remove_duplicates(adata)
|
|
125
|
+
return filtered_adata, duplicates
|
|
@@ -1,12 +1,21 @@
|
|
|
1
1
|
# remove_duplicates
|
|
2
|
-
import anndata as ad
|
|
3
2
|
|
|
4
3
|
def remove_duplicates(adata):
|
|
5
4
|
"""
|
|
6
|
-
|
|
7
|
-
|
|
5
|
+
Remove duplicates from the adata object
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
adata (Anndata): An adata object.
|
|
9
|
+
|
|
10
|
+
Returns:
|
|
11
|
+
filtered_adata (AnnData): An AnnData object of the filtered reads
|
|
12
|
+
duplicates (AnnData): An AnnData object of the duplicate reads
|
|
8
13
|
"""
|
|
14
|
+
import anndata as ad
|
|
15
|
+
|
|
9
16
|
initial_size = adata.shape[0]
|
|
10
|
-
|
|
11
|
-
final_size =
|
|
12
|
-
print(f'Removed {initial_size-final_size} reads from the dataset')
|
|
17
|
+
filtered_adata = adata[adata.obs['Unique_in_final_read_set'] == True].copy()
|
|
18
|
+
final_size = filtered_adata.shape[0]
|
|
19
|
+
print(f'Removed {initial_size-final_size} reads from the dataset')
|
|
20
|
+
duplicates = adata[adata.obs['Unique_in_final_read_set'] == False].copy()
|
|
21
|
+
return filtered_adata, duplicates
|
smftools/readwrite.py
CHANGED
|
@@ -1,27 +1,12 @@
|
|
|
1
1
|
## readwrite ##
|
|
2
2
|
|
|
3
|
-
# Basic I/O
|
|
4
|
-
import os
|
|
5
|
-
# Datetime
|
|
6
|
-
from datetime import datetime
|
|
7
|
-
# Data structures and basic operations
|
|
8
|
-
import math
|
|
9
|
-
import numpy as np
|
|
10
|
-
import pandas as pd
|
|
11
|
-
import anndata as ad
|
|
12
|
-
import scipy.sparse as sp
|
|
13
|
-
|
|
14
|
-
# Runtime warnings
|
|
15
|
-
import warnings
|
|
16
|
-
warnings.filterwarnings('ignore', category=UserWarning, module='anndata')
|
|
17
|
-
warnings.filterwarnings('ignore', category=FutureWarning, module='anndata')
|
|
18
|
-
|
|
19
3
|
######################################################################################################
|
|
20
4
|
## Datetime functionality
|
|
21
5
|
def date_string():
|
|
22
6
|
"""
|
|
23
7
|
Each time this is called, it returns the current date string
|
|
24
8
|
"""
|
|
9
|
+
from datetime import datetime
|
|
25
10
|
current_date = datetime.now()
|
|
26
11
|
date_string = current_date.strftime("%Y%m%d")
|
|
27
12
|
date_string = date_string[2:]
|
|
@@ -31,6 +16,7 @@ def time_string():
|
|
|
31
16
|
"""
|
|
32
17
|
Each time this is called, it returns the current time string
|
|
33
18
|
"""
|
|
19
|
+
from datetime import datetime
|
|
34
20
|
current_time = datetime.now()
|
|
35
21
|
return current_time.strftime("%H:%M:%S")
|
|
36
22
|
######################################################################################################
|
|
@@ -42,6 +28,9 @@ def adata_to_df(adata, layer=None):
|
|
|
42
28
|
Input: An adata object with a specified layer.
|
|
43
29
|
Output: A dataframe for the specific layer.
|
|
44
30
|
"""
|
|
31
|
+
import pandas as pd
|
|
32
|
+
import anndata as ad
|
|
33
|
+
|
|
45
34
|
# Extract the data matrix from the given layer
|
|
46
35
|
if layer:
|
|
47
36
|
data_matrix = adata.layers[layer]
|
|
@@ -60,6 +49,7 @@ def save_matrix(matrix, save_name):
|
|
|
60
49
|
Input: A numpy matrix and a save_name
|
|
61
50
|
Output: A txt file representation of the data matrix
|
|
62
51
|
"""
|
|
52
|
+
import numpy as np
|
|
63
53
|
np.savetxt(f'{save_name}.txt', matrix)
|
|
64
54
|
|
|
65
55
|
def concatenate_h5ads(output_file, file_suffix='h5ad.gz', delete_inputs=True):
|
|
@@ -67,6 +57,13 @@ def concatenate_h5ads(output_file, file_suffix='h5ad.gz', delete_inputs=True):
|
|
|
67
57
|
Concatenate all h5ad files in a directory and delete them after the final adata is written out.
|
|
68
58
|
Input: an output file path relative to the directory in which the function is called
|
|
69
59
|
"""
|
|
60
|
+
import os
|
|
61
|
+
import anndata as ad
|
|
62
|
+
# Runtime warnings
|
|
63
|
+
import warnings
|
|
64
|
+
warnings.filterwarnings('ignore', category=UserWarning, module='anndata')
|
|
65
|
+
warnings.filterwarnings('ignore', category=FutureWarning, module='anndata')
|
|
66
|
+
|
|
70
67
|
# List all files in the directory
|
|
71
68
|
files = os.listdir(os.getcwd())
|
|
72
69
|
# get current working directory
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# apply_HMM
|
|
File without changes
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# read_HMM
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# subset_adata
|
|
2
|
+
|
|
3
|
+
def subset_adata(adata, obs_columns):
|
|
4
|
+
"""
|
|
5
|
+
Subsets an AnnData object based on categorical values in specified `.obs` columns.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
adata (AnnData): The AnnData object to subset.
|
|
9
|
+
obs_columns (list of str): List of `.obs` column names to subset by. The order matters.
|
|
10
|
+
|
|
11
|
+
Returns:
|
|
12
|
+
dict: A dictionary where keys are tuples of category values and values are corresponding AnnData subsets.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
def subset_recursive(adata_subset, columns):
|
|
16
|
+
if not columns:
|
|
17
|
+
return {(): adata_subset}
|
|
18
|
+
|
|
19
|
+
current_column = columns[0]
|
|
20
|
+
categories = adata_subset.obs[current_column].cat.categories
|
|
21
|
+
|
|
22
|
+
subsets = {}
|
|
23
|
+
for cat in categories:
|
|
24
|
+
subset = adata_subset[adata_subset.obs[current_column] == cat]
|
|
25
|
+
subsets.update(subset_recursive(subset, columns[1:]))
|
|
26
|
+
|
|
27
|
+
return subsets
|
|
28
|
+
|
|
29
|
+
# Start the recursive subset process
|
|
30
|
+
subsets_dict = subset_recursive(adata, obs_columns)
|
|
31
|
+
|
|
32
|
+
return subsets_dict
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# train_HMM
|
|
2
|
+
|
|
3
|
+
def train_HMM(adata, model_name='trained_HMM', save_hmm=False):
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
Parameters:
|
|
7
|
+
adata (AnnData): Input AnnData object
|
|
8
|
+
model_name (str): Name of the model
|
|
9
|
+
save_hmm (bool): Whether to save the model
|
|
10
|
+
|
|
11
|
+
"""
|
|
12
|
+
import numpy as np
|
|
13
|
+
import anndata as ad
|
|
14
|
+
from pomegranate.distributions import Categorical
|
|
15
|
+
from pomegranate.hmm import DenseHMM
|
|
16
|
+
|
|
17
|
+
bound = Categorical([[0.95, 0.05]])
|
|
18
|
+
unbound = Categorical([[0.05, 0.95]])
|
|
19
|
+
|
|
20
|
+
edges = [[0.9, 0.1], [0.1, 0.9]]
|
|
21
|
+
starts = [0.5, 0.5]
|
|
22
|
+
ends = [0.5, 0.5]
|
|
23
|
+
|
|
24
|
+
model = DenseHMM([bound, unbound], edges=edges, starts=starts, ends=ends, max_iter=5, verbose=True)
|
|
25
|
+
|
|
26
|
+
# define training sets and labels
|
|
27
|
+
# Determine the number of reads to sample
|
|
28
|
+
n_sample = round(0.7 * adata.X.shape[0])
|
|
29
|
+
# Generate random indices
|
|
30
|
+
np.random.seed(0)
|
|
31
|
+
random_indices = np.random.choice(adata.shape[0], size=n_sample, replace=False)
|
|
32
|
+
# Subset the AnnData object using the random indices
|
|
33
|
+
training_adata_subsampled = adata[random_indices, :]
|
|
34
|
+
training_sequences = training_adata_subsampled.X
|
|
35
|
+
|
|
36
|
+
# Train the HMM without labeled data
|
|
37
|
+
model.fit(training_sequences, algorithm='baum-welch')
|
|
38
|
+
|
|
39
|
+
if save_hmm:
|
|
40
|
+
# Save the model to a file
|
|
41
|
+
model_json = model.to_json()
|
|
42
|
+
with open(f'{model_name}.json', 'w') as f:
|
|
43
|
+
f.write(model_json)
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: smftools
|
|
3
|
+
Version: 0.1.3
|
|
4
|
+
Summary: Single Molecule Footprinting Analysis in Python.
|
|
5
|
+
Project-URL: Source, https://github.com/jkmckenna/smftools
|
|
6
|
+
Project-URL: Documentation, https://smftools.readthedocs.io/
|
|
7
|
+
Author: Joseph McKenna
|
|
8
|
+
Maintainer-email: Joseph McKenna <jkmckenna@berkeley.edu>
|
|
9
|
+
License-Expression: MIT
|
|
10
|
+
License-File: LICENSE
|
|
11
|
+
Keywords: anndata,chromatin-accessibility,machine-learning,nanopore,protein-dna-binding,single-locus,single-molecule-footprinting
|
|
12
|
+
Classifier: Development Status :: 2 - Pre-Alpha
|
|
13
|
+
Classifier: Environment :: Console
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: Intended Audience :: Science/Research
|
|
16
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
17
|
+
Classifier: Natural Language :: English
|
|
18
|
+
Classifier: Operating System :: MacOS :: MacOS X
|
|
19
|
+
Classifier: Programming Language :: Python :: 3
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
22
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
23
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
24
|
+
Classifier: Topic :: Scientific/Engineering :: Bio-Informatics
|
|
25
|
+
Classifier: Topic :: Scientific/Engineering :: Visualization
|
|
26
|
+
Requires-Python: >=3.9
|
|
27
|
+
Requires-Dist: anndata>=0.10.0
|
|
28
|
+
Requires-Dist: biopython>=1.79
|
|
29
|
+
Requires-Dist: cython>=0.29.28
|
|
30
|
+
Requires-Dist: networkx>=3.2
|
|
31
|
+
Requires-Dist: numpy<2,>=1.22.0
|
|
32
|
+
Requires-Dist: pandas>=1.4.2
|
|
33
|
+
Requires-Dist: pod5>=0.1.21
|
|
34
|
+
Requires-Dist: pomegranate>1.0.0
|
|
35
|
+
Requires-Dist: pyfaidx>=0.8.0
|
|
36
|
+
Requires-Dist: pysam>=0.19.1
|
|
37
|
+
Requires-Dist: scanpy>=1.9
|
|
38
|
+
Requires-Dist: scikit-learn>=1.0.2
|
|
39
|
+
Requires-Dist: scipy>=1.7.3
|
|
40
|
+
Requires-Dist: seaborn>=0.11
|
|
41
|
+
Requires-Dist: torch>=1.9.0
|
|
42
|
+
Requires-Dist: tqdm
|
|
43
|
+
Provides-Extra: docs
|
|
44
|
+
Requires-Dist: ipython>=7.20; extra == 'docs'
|
|
45
|
+
Requires-Dist: matplotlib!=3.6.1; extra == 'docs'
|
|
46
|
+
Requires-Dist: myst-nb>=1; extra == 'docs'
|
|
47
|
+
Requires-Dist: myst-parser>=2; extra == 'docs'
|
|
48
|
+
Requires-Dist: nbsphinx>=0.9; extra == 'docs'
|
|
49
|
+
Requires-Dist: readthedocs-sphinx-search; extra == 'docs'
|
|
50
|
+
Requires-Dist: setuptools; extra == 'docs'
|
|
51
|
+
Requires-Dist: sphinx-autodoc-typehints>=1.25.2; extra == 'docs'
|
|
52
|
+
Requires-Dist: sphinx-book-theme>=1.1.0; extra == 'docs'
|
|
53
|
+
Requires-Dist: sphinx-copybutton; extra == 'docs'
|
|
54
|
+
Requires-Dist: sphinx-design; extra == 'docs'
|
|
55
|
+
Requires-Dist: sphinx>=7; extra == 'docs'
|
|
56
|
+
Requires-Dist: sphinxcontrib-bibtex; extra == 'docs'
|
|
57
|
+
Requires-Dist: sphinxext-opengraph; extra == 'docs'
|
|
58
|
+
Provides-Extra: tests
|
|
59
|
+
Requires-Dist: pytest; extra == 'tests'
|
|
60
|
+
Requires-Dist: pytest-cov; extra == 'tests'
|
|
61
|
+
Description-Content-Type: text/markdown
|
|
62
|
+
|
|
63
|
+
[](https://pypi.org/project/smftools)
|
|
64
|
+
[](https://smftools.readthedocs.io/en/latest/?badge=latest)
|
|
65
|
+
|
|
66
|
+
# smftools
|
|
67
|
+
A Python tool for processing raw sequencing data derived from single molecule footprinting experiments into [anndata](https://anndata.readthedocs.io/en/latest/) objects. Additional functionality for preprocessing, analysis, and visualization.
|
|
68
|
+
|
|
69
|
+
## Philosophy
|
|
70
|
+
While most genomic data structures handle low-coverage data (<100X) along large references, smftools prioritizes high-coverage data (scalable to at least 1 million X coverage) of a few genomic loci at a time. This enables efficient data storage, rapid data operations, hierarchical metadata handling, seamless integration with various machine-learning packages, and ease of visualization. Furthermore, functionality is modularized, enabling analysis sessions to be saved, reloaded, and easily shared with collaborators. Analyses are centered around the [anndata](https://anndata.readthedocs.io/en/latest/) object, and are heavily inspired by the work conducted within the single-cell genomics community.
|
|
71
|
+
|
|
72
|
+
## Dependencies
|
|
73
|
+
The following CLI tools need to be installed and configured before using the informatics (smftools.inform) module of smftools:
|
|
74
|
+
1) [Dorado](https://github.com/nanoporetech/dorado) -> For standard/modified basecalling and alignment. Can be attained by downloading and configuring nanopore MinKnow software.
|
|
75
|
+
2) [Samtools](https://github.com/samtools/samtools) -> For working with SAM/BAM files
|
|
76
|
+
3) [Minimap2](https://github.com/lh3/minimap2) -> The aligner used by Dorado
|
|
77
|
+
4) [Modkit](https://github.com/nanoporetech/modkit) -> Extracting summary statistics and read level methylation calls from modified BAM files
|
|
78
|
+
5) [Bedtools](https://github.com/arq5x/bedtools2) -> For generating Bedgraphs from BAM alignment files.
|
|
79
|
+
6) [BedGraphToBigWig](https://genome.ucsc.edu/goldenPath/help/bigWig.html) -> For converting BedGraphs to BigWig files for IGV sessions.
|
|
80
|
+
|
|
81
|
+
## Modules
|
|
82
|
+
### Informatics: Processes raw Nanopore/Illumina data from SMF experiments into an AnnData object.
|
|
83
|
+

|
|
84
|
+
### Preprocessing: Appends QC metrics to the AnnData object and perfroms filtering.
|
|
85
|
+

|
|
86
|
+
- Tools: Appends various analyses to the AnnData object.
|
|
87
|
+
- Plotting: Visualization of analyses stored within the AnnData object.
|
|
88
|
+
|
|
89
|
+
## Announcements
|
|
90
|
+
### 09/09/24 - The pre-alpha phase package ([smftools-0.1.1](https://pypi.org/project/smftools/))
|
|
91
|
+
The informatics module has been bumped to alpha-phase status. This module can deal with POD5s and unaligned BAMS from nanopore conversion and direct SMF experiments, as well as FASTQs from Illumina conversion SMF experiments. Primary output from this module is an AnnData object containing all relevant SMF data, which is compatible with all downstream smftools modules. The other modules are still in pre-alpha phase. Preprocessing, Tools, and Plotting modules should be promoted to alpha-phase within the next month or so.
|
|
92
|
+
|
|
93
|
+
### 08/30/24 - The pre-alpha phase package ([smftools-0.1.0](https://pypi.org/project/smftools/)) is installable through pypi!
|
|
94
|
+
Currently, this package (smftools-0.1.0) is going through rapid improvement (dependency handling accross Linux and Mac OS, testing, documentation, debugging) and is still too early in development for standard use. The underlying functionality was originally developed as a collection of scripts for single molecule footprinting (SMF) experiments in our lab, but is being packaged/developed to facilitate the expansion of SMF to any lab that is interested in performing these styles of experiments/analyses. The alpha-phase package is expected to be available within a couple months, so stay tuned!
|