smftools 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- smftools/__init__.py +29 -0
- smftools/_settings.py +20 -0
- smftools/_version.py +1 -0
- smftools/datasets/F1_hybrid_NKG2A_enhander_promoter_GpC_conversion_SMF.h5ad.gz +0 -0
- smftools/datasets/F1_sample_sheet.csv +5 -0
- smftools/datasets/__init__.py +9 -0
- smftools/datasets/dCas9_m6A_invitro_kinetics.h5ad.gz +0 -0
- smftools/datasets/datasets.py +28 -0
- smftools/informatics/__init__.py +16 -0
- smftools/informatics/archived/bam_conversion.py +59 -0
- smftools/informatics/archived/bam_direct.py +63 -0
- smftools/informatics/archived/basecalls_to_adata.py +71 -0
- smftools/informatics/archived/print_bam_query_seq.py +29 -0
- smftools/informatics/basecall_pod5s.py +80 -0
- smftools/informatics/conversion_smf.py +132 -0
- smftools/informatics/direct_smf.py +137 -0
- smftools/informatics/fast5_to_pod5.py +21 -0
- smftools/informatics/helpers/LoadExperimentConfig.py +75 -0
- smftools/informatics/helpers/__init__.py +74 -0
- smftools/informatics/helpers/align_and_sort_BAM.py +59 -0
- smftools/informatics/helpers/aligned_BAM_to_bed.py +74 -0
- smftools/informatics/helpers/archived/informatics.py +260 -0
- smftools/informatics/helpers/archived/load_adata.py +516 -0
- smftools/informatics/helpers/bam_qc.py +66 -0
- smftools/informatics/helpers/bed_to_bigwig.py +39 -0
- smftools/informatics/helpers/binarize_converted_base_identities.py +79 -0
- smftools/informatics/helpers/canoncall.py +34 -0
- smftools/informatics/helpers/complement_base_list.py +21 -0
- smftools/informatics/helpers/concatenate_fastqs_to_bam.py +55 -0
- smftools/informatics/helpers/converted_BAM_to_adata.py +245 -0
- smftools/informatics/helpers/converted_BAM_to_adata_II.py +369 -0
- smftools/informatics/helpers/count_aligned_reads.py +43 -0
- smftools/informatics/helpers/demux_and_index_BAM.py +52 -0
- smftools/informatics/helpers/extract_base_identities.py +44 -0
- smftools/informatics/helpers/extract_mods.py +83 -0
- smftools/informatics/helpers/extract_read_features_from_bam.py +31 -0
- smftools/informatics/helpers/extract_read_lengths_from_bed.py +25 -0
- smftools/informatics/helpers/extract_readnames_from_BAM.py +22 -0
- smftools/informatics/helpers/find_conversion_sites.py +50 -0
- smftools/informatics/helpers/generate_converted_FASTA.py +99 -0
- smftools/informatics/helpers/get_chromosome_lengths.py +32 -0
- smftools/informatics/helpers/get_native_references.py +28 -0
- smftools/informatics/helpers/index_fasta.py +12 -0
- smftools/informatics/helpers/make_dirs.py +21 -0
- smftools/informatics/helpers/make_modbed.py +27 -0
- smftools/informatics/helpers/modQC.py +27 -0
- smftools/informatics/helpers/modcall.py +36 -0
- smftools/informatics/helpers/modkit_extract_to_adata.py +884 -0
- smftools/informatics/helpers/ohe_batching.py +76 -0
- smftools/informatics/helpers/ohe_layers_decode.py +32 -0
- smftools/informatics/helpers/one_hot_decode.py +27 -0
- smftools/informatics/helpers/one_hot_encode.py +57 -0
- smftools/informatics/helpers/plot_read_length_and_coverage_histograms.py +53 -0
- smftools/informatics/helpers/run_multiqc.py +28 -0
- smftools/informatics/helpers/separate_bam_by_bc.py +43 -0
- smftools/informatics/helpers/split_and_index_BAM.py +36 -0
- smftools/informatics/load_adata.py +182 -0
- smftools/informatics/readwrite.py +106 -0
- smftools/informatics/subsample_fasta_from_bed.py +47 -0
- smftools/informatics/subsample_pod5.py +104 -0
- smftools/plotting/__init__.py +15 -0
- smftools/plotting/classifiers.py +355 -0
- smftools/plotting/general_plotting.py +205 -0
- smftools/plotting/position_stats.py +462 -0
- smftools/preprocessing/__init__.py +33 -0
- smftools/preprocessing/append_C_context.py +82 -0
- smftools/preprocessing/archives/mark_duplicates.py +146 -0
- smftools/preprocessing/archives/preprocessing.py +614 -0
- smftools/preprocessing/archives/remove_duplicates.py +21 -0
- smftools/preprocessing/binarize_on_Youden.py +45 -0
- smftools/preprocessing/binary_layers_to_ohe.py +40 -0
- smftools/preprocessing/calculate_complexity.py +72 -0
- smftools/preprocessing/calculate_consensus.py +47 -0
- smftools/preprocessing/calculate_converted_read_methylation_stats.py +94 -0
- smftools/preprocessing/calculate_coverage.py +42 -0
- smftools/preprocessing/calculate_pairwise_differences.py +49 -0
- smftools/preprocessing/calculate_pairwise_hamming_distances.py +27 -0
- smftools/preprocessing/calculate_position_Youden.py +115 -0
- smftools/preprocessing/calculate_read_length_stats.py +79 -0
- smftools/preprocessing/clean_NaN.py +46 -0
- smftools/preprocessing/filter_adata_by_nan_proportion.py +31 -0
- smftools/preprocessing/filter_converted_reads_on_methylation.py +44 -0
- smftools/preprocessing/filter_reads_on_length.py +51 -0
- smftools/preprocessing/flag_duplicate_reads.py +149 -0
- smftools/preprocessing/invert_adata.py +30 -0
- smftools/preprocessing/load_sample_sheet.py +38 -0
- smftools/preprocessing/make_dirs.py +21 -0
- smftools/preprocessing/min_non_diagonal.py +25 -0
- smftools/preprocessing/recipes.py +127 -0
- smftools/preprocessing/subsample_adata.py +58 -0
- smftools/readwrite.py +198 -0
- smftools/tools/__init__.py +49 -0
- smftools/tools/apply_hmm.py +202 -0
- smftools/tools/apply_hmm_batched.py +241 -0
- smftools/tools/archived/classify_methylated_features.py +66 -0
- smftools/tools/archived/classify_non_methylated_features.py +75 -0
- smftools/tools/archived/subset_adata_v1.py +32 -0
- smftools/tools/archived/subset_adata_v2.py +46 -0
- smftools/tools/calculate_distances.py +18 -0
- smftools/tools/calculate_umap.py +62 -0
- smftools/tools/call_hmm_peaks.py +105 -0
- smftools/tools/classifiers.py +787 -0
- smftools/tools/cluster_adata_on_methylation.py +105 -0
- smftools/tools/data/__init__.py +2 -0
- smftools/tools/data/anndata_data_module.py +90 -0
- smftools/tools/data/preprocessing.py +6 -0
- smftools/tools/display_hmm.py +18 -0
- smftools/tools/evaluation/__init__.py +0 -0
- smftools/tools/general_tools.py +69 -0
- smftools/tools/hmm_readwrite.py +16 -0
- smftools/tools/inference/__init__.py +1 -0
- smftools/tools/inference/lightning_inference.py +41 -0
- smftools/tools/models/__init__.py +9 -0
- smftools/tools/models/base.py +14 -0
- smftools/tools/models/cnn.py +34 -0
- smftools/tools/models/lightning_base.py +41 -0
- smftools/tools/models/mlp.py +17 -0
- smftools/tools/models/positional.py +17 -0
- smftools/tools/models/rnn.py +16 -0
- smftools/tools/models/sklearn_models.py +40 -0
- smftools/tools/models/transformer.py +133 -0
- smftools/tools/models/wrappers.py +20 -0
- smftools/tools/nucleosome_hmm_refinement.py +104 -0
- smftools/tools/position_stats.py +239 -0
- smftools/tools/read_stats.py +70 -0
- smftools/tools/subset_adata.py +28 -0
- smftools/tools/train_hmm.py +78 -0
- smftools/tools/training/__init__.py +1 -0
- smftools/tools/training/train_lightning_model.py +47 -0
- smftools/tools/utils/__init__.py +2 -0
- smftools/tools/utils/device.py +10 -0
- smftools/tools/utils/grl.py +14 -0
- {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/METADATA +5 -2
- smftools-0.1.7.dist-info/RECORD +136 -0
- smftools-0.1.6.dist-info/RECORD +0 -4
- {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/WHEEL +0 -0
- {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# subsample_fasta_from_bed
|
|
2
|
+
|
|
3
|
+
def subsample_fasta_from_bed(input_FASTA, input_bed, output_directory, output_FASTA):
|
|
4
|
+
"""
|
|
5
|
+
Take a genome-wide FASTA file and a bed file containing coordinate windows of interest. Outputs a subsampled FASTA.
|
|
6
|
+
|
|
7
|
+
Parameters:
|
|
8
|
+
input_FASTA (str): String representing the path to the input FASTA file.
|
|
9
|
+
input_bed (str): String representing the path to the input BED file.
|
|
10
|
+
output_directory (str): String representing the path to the output directory for the new FASTA file.
|
|
11
|
+
output_FASTA (str): Name of the output FASTA.
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
None
|
|
15
|
+
"""
|
|
16
|
+
from pyfaidx import Fasta
|
|
17
|
+
import os
|
|
18
|
+
|
|
19
|
+
# Load the FASTA file using pyfaidx
|
|
20
|
+
fasta = Fasta(input_FASTA)
|
|
21
|
+
|
|
22
|
+
output_FASTA_path = os.path.join(output_directory, output_FASTA)
|
|
23
|
+
|
|
24
|
+
# Open the BED file
|
|
25
|
+
with open(input_bed, 'r') as bed, open(output_FASTA_path, 'w') as out_fasta:
|
|
26
|
+
for line in bed:
|
|
27
|
+
# Each line in BED file contains: chrom, start, end (and possibly more columns)
|
|
28
|
+
fields = line.strip().split()
|
|
29
|
+
n_fields = len(fields)
|
|
30
|
+
chrom = fields[0]
|
|
31
|
+
start = int(fields[1]) # BED is 0-based
|
|
32
|
+
end = int(fields[2]) # BED is 0-based and end is exclusive
|
|
33
|
+
if n_fields > 3:
|
|
34
|
+
description = " ".join(fields[3:])
|
|
35
|
+
|
|
36
|
+
# Check if the chromosome exists in the FASTA file
|
|
37
|
+
if chrom in fasta:
|
|
38
|
+
# pyfaidx is 1-based, so convert coordinates accordingly
|
|
39
|
+
sequence = fasta[chrom][start:end].seq
|
|
40
|
+
# Write the sequence to the output FASTA file
|
|
41
|
+
if n_fields > 3:
|
|
42
|
+
out_fasta.write(f">{chrom}:{start}-{end} {description}\n")
|
|
43
|
+
else:
|
|
44
|
+
out_fasta.write(f">{chrom}:{start}-{end}\n")
|
|
45
|
+
out_fasta.write(f"{sequence}\n")
|
|
46
|
+
else:
|
|
47
|
+
print(f"Warning: {chrom} not found in the FASTA file")
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
# subsample_pod5
|
|
2
|
+
|
|
3
|
+
def subsample_pod5(pod5_path, read_name_path, output_directory):
|
|
4
|
+
"""
|
|
5
|
+
Takes a POD5 file and a text file containing read names of interest and writes out a subsampled POD5 for just those reads.
|
|
6
|
+
This is a useful function when you have a list of read names that mapped to a region of interest that you want to reanalyze from the pod5 level.
|
|
7
|
+
|
|
8
|
+
Parameters:
|
|
9
|
+
pod5_path (str): File path to the POD5 file (or directory of multiple pod5 files) to subsample.
|
|
10
|
+
read_name_path (str | int): File path to a text file of read names. One read name per line. If an int value is passed, a random subset of that many reads will occur
|
|
11
|
+
output_directory (str): A file path to the directory to output the file.
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
None
|
|
15
|
+
"""
|
|
16
|
+
import pod5 as p5
|
|
17
|
+
import os
|
|
18
|
+
|
|
19
|
+
if os.path.isdir(pod5_path):
|
|
20
|
+
pod5_path_is_dir = True
|
|
21
|
+
input_pod5_base = 'input_pod5s.pod5'
|
|
22
|
+
files = os.listdir(pod5_path)
|
|
23
|
+
pod5_files = [os.path.join(pod5_path, file) for file in files if '.pod5' in file]
|
|
24
|
+
pod5_files.sort()
|
|
25
|
+
print(f'Found input pod5s: {pod5_files}')
|
|
26
|
+
|
|
27
|
+
elif os.path.exists(pod5_path):
|
|
28
|
+
pod5_path_is_dir = False
|
|
29
|
+
input_pod5_base = os.path.basename(pod5_path)
|
|
30
|
+
|
|
31
|
+
else:
|
|
32
|
+
print('Error: pod5_path passed does not exist')
|
|
33
|
+
return None
|
|
34
|
+
|
|
35
|
+
if type(read_name_path) == str:
|
|
36
|
+
input_read_name_base = os.path.basename(read_name_path)
|
|
37
|
+
output_base = input_pod5_base.split('.pod5')[0] + '_' + input_read_name_base.split('.txt')[0] + '_subsampled.pod5'
|
|
38
|
+
|
|
39
|
+
# extract read names into a list of strings
|
|
40
|
+
with open(read_name_path, 'r') as file:
|
|
41
|
+
read_names = [line.strip() for line in file]
|
|
42
|
+
|
|
43
|
+
print(f'Looking for read_ids: {read_names}')
|
|
44
|
+
read_records = []
|
|
45
|
+
|
|
46
|
+
if pod5_path_is_dir:
|
|
47
|
+
for input_pod5 in pod5_files:
|
|
48
|
+
with p5.Reader(input_pod5) as reader:
|
|
49
|
+
try:
|
|
50
|
+
for read_record in reader.reads(selection=read_names, missing_ok=True):
|
|
51
|
+
read_records.append(read_record.to_read())
|
|
52
|
+
print(f'Found read in {input_pod5}: {read_record.read_id}')
|
|
53
|
+
except:
|
|
54
|
+
print('Skipping pod5, could not find reads')
|
|
55
|
+
else:
|
|
56
|
+
with p5.Reader(pod5_path) as reader:
|
|
57
|
+
try:
|
|
58
|
+
for read_record in reader.reads(selection=read_names):
|
|
59
|
+
read_records.append(read_record.to_read())
|
|
60
|
+
print(f'Found read in {input_pod5}: {read_record}')
|
|
61
|
+
except:
|
|
62
|
+
print('Could not find reads')
|
|
63
|
+
|
|
64
|
+
elif type(read_name_path) == int:
|
|
65
|
+
import random
|
|
66
|
+
output_base = input_pod5_base.split('.pod5')[0] + f'_{read_name_path}_randomly_subsampled.pod5'
|
|
67
|
+
all_read_records = []
|
|
68
|
+
|
|
69
|
+
if pod5_path_is_dir:
|
|
70
|
+
# Shuffle the list of input pod5 paths
|
|
71
|
+
random.shuffle(pod5_files)
|
|
72
|
+
for input_pod5 in pod5_files:
|
|
73
|
+
# iterate over the input pod5s
|
|
74
|
+
print(f'Opening pod5 file {input_pod5}')
|
|
75
|
+
with p5.Reader(pod5_path) as reader:
|
|
76
|
+
for read_record in reader.reads():
|
|
77
|
+
all_read_records.append(read_record.to_read())
|
|
78
|
+
# When enough reads are in all_read_records, stop accumulating reads.
|
|
79
|
+
if len(all_read_records) >= read_name_path:
|
|
80
|
+
break
|
|
81
|
+
|
|
82
|
+
if read_name_path <= len(all_read_records):
|
|
83
|
+
read_records = random.sample(all_read_records, read_name_path)
|
|
84
|
+
else:
|
|
85
|
+
print('Trying to sample more reads than are contained in the input pod5s, taking all reads')
|
|
86
|
+
read_records = all_read_records
|
|
87
|
+
|
|
88
|
+
else:
|
|
89
|
+
with p5.Reader(pod5_path) as reader:
|
|
90
|
+
for read_record in reader.reads():
|
|
91
|
+
# get all read records from the input pod5
|
|
92
|
+
all_read_records.append(read_record.to_read())
|
|
93
|
+
if read_name_path <= len(all_read_records):
|
|
94
|
+
# if the subsampling amount is less than the record amount in the file, randomly subsample the reads
|
|
95
|
+
read_records = random.sample(all_read_records, read_name_path)
|
|
96
|
+
else:
|
|
97
|
+
print('Trying to sample more reads than are contained in the input pod5s, taking all reads')
|
|
98
|
+
read_records = all_read_records
|
|
99
|
+
|
|
100
|
+
output_pod5 = os.path.join(output_directory, output_base)
|
|
101
|
+
|
|
102
|
+
# Write the subsampled POD5
|
|
103
|
+
with p5.Writer(output_pod5) as writer:
|
|
104
|
+
writer.add_reads(read_records)
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from .position_stats import plot_bar_relative_risk, plot_volcano_relative_risk, plot_positionwise_matrix, plot_positionwise_matrix_grid
|
|
2
|
+
from .general_plotting import combined_hmm_raw_clustermap
|
|
3
|
+
from .classifiers import plot_model_performance, plot_feature_importances_or_saliency, plot_model_curves_from_adata, plot_model_curves_from_adata_with_frequency_grid
|
|
4
|
+
|
|
5
|
+
__all__ = [
|
|
6
|
+
"combined_hmm_raw_clustermap",
|
|
7
|
+
"plot_bar_relative_risk",
|
|
8
|
+
"plot_positionwise_matrix",
|
|
9
|
+
"plot_positionwise_matrix_grid",
|
|
10
|
+
"plot_volcano_relative_risk",
|
|
11
|
+
"plot_feature_importances_or_saliency",
|
|
12
|
+
"plot_model_performance",
|
|
13
|
+
"plot_model_curves_from_adata",
|
|
14
|
+
"plot_model_curves_from_adata_with_frequency_grid"
|
|
15
|
+
]
|
|
@@ -0,0 +1,355 @@
|
|
|
1
|
+
|
|
2
|
+
import numpy as np
|
|
3
|
+
import matplotlib.pyplot as plt
|
|
4
|
+
import torch
|
|
5
|
+
import os
|
|
6
|
+
|
|
7
|
+
def plot_model_performance(metrics, save_path=None):
|
|
8
|
+
import matplotlib.pyplot as plt
|
|
9
|
+
import os
|
|
10
|
+
for ref in metrics.keys():
|
|
11
|
+
plt.figure(figsize=(12, 5))
|
|
12
|
+
|
|
13
|
+
# ROC Curve
|
|
14
|
+
plt.subplot(1, 2, 1)
|
|
15
|
+
for model_name, vals in metrics[ref].items():
|
|
16
|
+
model_type = model_name.split('_')[0]
|
|
17
|
+
data_type = model_name.split(f"{model_type}_")[1]
|
|
18
|
+
plt.plot(vals['fpr'], vals['tpr'], label=f"{model_type.upper()} - AUC: {vals['auc']:.4f}")
|
|
19
|
+
plt.xlabel('False Positive Rate')
|
|
20
|
+
plt.ylabel('True Positive Rate')
|
|
21
|
+
plt.title(f'{data_type} ROC Curve ({ref})')
|
|
22
|
+
plt.legend()
|
|
23
|
+
|
|
24
|
+
# PR Curve
|
|
25
|
+
plt.subplot(1, 2, 2)
|
|
26
|
+
for model_name, vals in metrics[ref].items():
|
|
27
|
+
model_type = model_name.split('_')[0]
|
|
28
|
+
data_type = model_name.split(f"{model_type}_")[1]
|
|
29
|
+
plt.plot(vals['recall'], vals['precision'], label=f"{model_type.upper()} - F1: {vals['f1']:.4f}")
|
|
30
|
+
plt.xlabel('Recall')
|
|
31
|
+
plt.ylabel('Precision')
|
|
32
|
+
plt.title(f'{data_type} Precision-Recall Curve ({ref})')
|
|
33
|
+
plt.legend()
|
|
34
|
+
|
|
35
|
+
plt.tight_layout()
|
|
36
|
+
|
|
37
|
+
if save_path:
|
|
38
|
+
save_name = f"{ref}"
|
|
39
|
+
os.makedirs(save_path, exist_ok=True)
|
|
40
|
+
safe_name = save_name.replace("=", "").replace("__", "_").replace(",", "_")
|
|
41
|
+
out_file = os.path.join(save_path, f"{safe_name}.png")
|
|
42
|
+
plt.savefig(out_file, dpi=300)
|
|
43
|
+
print(f"📁 Saved: {out_file}")
|
|
44
|
+
plt.show()
|
|
45
|
+
|
|
46
|
+
# Confusion Matrices
|
|
47
|
+
for model_name, vals in metrics[ref].items():
|
|
48
|
+
print(f"Confusion Matrix for {ref} - {model_name.upper()}:")
|
|
49
|
+
print(vals['confusion_matrix'])
|
|
50
|
+
print()
|
|
51
|
+
|
|
52
|
+
def plot_feature_importances_or_saliency(
|
|
53
|
+
models,
|
|
54
|
+
positions,
|
|
55
|
+
tensors,
|
|
56
|
+
site_config,
|
|
57
|
+
adata=None,
|
|
58
|
+
layer_name=None,
|
|
59
|
+
save_path=None,
|
|
60
|
+
shaded_regions=None
|
|
61
|
+
):
|
|
62
|
+
import torch
|
|
63
|
+
import numpy as np
|
|
64
|
+
import matplotlib.pyplot as plt
|
|
65
|
+
import os
|
|
66
|
+
|
|
67
|
+
# Select device for NN models
|
|
68
|
+
device = (
|
|
69
|
+
torch.device('cuda') if torch.cuda.is_available() else
|
|
70
|
+
torch.device('mps') if torch.backends.mps.is_available() else
|
|
71
|
+
torch.device('cpu')
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
for ref, model_dict in models.items():
|
|
75
|
+
if layer_name:
|
|
76
|
+
suffix = layer_name
|
|
77
|
+
else:
|
|
78
|
+
suffix = "_".join(site_config[ref]) if ref in site_config else "full"
|
|
79
|
+
|
|
80
|
+
if ref not in positions or suffix not in positions[ref]:
|
|
81
|
+
print(f"Positions not found for {ref} with suffix {suffix}. Skipping {ref}.")
|
|
82
|
+
continue
|
|
83
|
+
|
|
84
|
+
coords_index = positions[ref][suffix]
|
|
85
|
+
coords = coords_index.astype(int)
|
|
86
|
+
|
|
87
|
+
# Classify positions using adata.var columns
|
|
88
|
+
cpg_sites = set()
|
|
89
|
+
gpc_sites = set()
|
|
90
|
+
other_sites = set()
|
|
91
|
+
|
|
92
|
+
if adata is None:
|
|
93
|
+
print("⚠️ AnnData object is required to classify site types. Skipping site type markers.")
|
|
94
|
+
else:
|
|
95
|
+
gpc_col = f"{ref}_GpC_site"
|
|
96
|
+
cpg_col = f"{ref}_CpG_site"
|
|
97
|
+
for idx_str in coords_index:
|
|
98
|
+
try:
|
|
99
|
+
gpc = adata.var.at[idx_str, gpc_col] if gpc_col in adata.var.columns else False
|
|
100
|
+
cpg = adata.var.at[idx_str, cpg_col] if cpg_col in adata.var.columns else False
|
|
101
|
+
coord_int = int(idx_str)
|
|
102
|
+
if gpc and not cpg:
|
|
103
|
+
gpc_sites.add(coord_int)
|
|
104
|
+
elif cpg and not gpc:
|
|
105
|
+
cpg_sites.add(coord_int)
|
|
106
|
+
else:
|
|
107
|
+
other_sites.add(coord_int)
|
|
108
|
+
except KeyError:
|
|
109
|
+
print(f"⚠️ Index '{idx_str}' not found in adata.var. Skipping.")
|
|
110
|
+
continue
|
|
111
|
+
|
|
112
|
+
for model_key, model in model_dict.items():
|
|
113
|
+
if not model_key.endswith(suffix):
|
|
114
|
+
continue
|
|
115
|
+
|
|
116
|
+
if model_key.startswith("rf"):
|
|
117
|
+
if hasattr(model, "feature_importances_"):
|
|
118
|
+
importances = model.feature_importances_
|
|
119
|
+
else:
|
|
120
|
+
print(f"Random Forest model {model_key} has no feature_importances_. Skipping.")
|
|
121
|
+
continue
|
|
122
|
+
plot_title = f"RF Feature Importances for {ref} ({model_key})"
|
|
123
|
+
y_label = "Feature Importance"
|
|
124
|
+
else:
|
|
125
|
+
if tensors is None or ref not in tensors or suffix not in tensors[ref]:
|
|
126
|
+
print(f"No input data provided for NN saliency for {model_key}. Skipping.")
|
|
127
|
+
continue
|
|
128
|
+
input_tensor = tensors[ref][suffix]
|
|
129
|
+
model.eval()
|
|
130
|
+
input_tensor = input_tensor.to(device)
|
|
131
|
+
input_tensor.requires_grad_()
|
|
132
|
+
|
|
133
|
+
with torch.enable_grad():
|
|
134
|
+
logits = model(input_tensor)
|
|
135
|
+
score = logits[:, 1].sum()
|
|
136
|
+
score.backward()
|
|
137
|
+
saliency = input_tensor.grad.abs().mean(dim=0).cpu().numpy()
|
|
138
|
+
importances = saliency
|
|
139
|
+
plot_title = f"Feature Saliency for {ref} ({model_key})"
|
|
140
|
+
y_label = "Feature Saliency"
|
|
141
|
+
|
|
142
|
+
sorted_idx = np.argsort(coords)
|
|
143
|
+
positions_sorted = coords[sorted_idx]
|
|
144
|
+
importances_sorted = np.array(importances)[sorted_idx]
|
|
145
|
+
|
|
146
|
+
plt.figure(figsize=(12, 4))
|
|
147
|
+
for pos, imp in zip(positions_sorted, importances_sorted):
|
|
148
|
+
if pos in cpg_sites:
|
|
149
|
+
plt.plot(pos, imp, marker='*', color='black', markersize=10, linestyle='None',
|
|
150
|
+
label='CpG site' if 'CpG site' not in plt.gca().get_legend_handles_labels()[1] else "")
|
|
151
|
+
elif pos in gpc_sites:
|
|
152
|
+
plt.plot(pos, imp, marker='o', color='blue', markersize=6, linestyle='None',
|
|
153
|
+
label='GpC site' if 'GpC site' not in plt.gca().get_legend_handles_labels()[1] else "")
|
|
154
|
+
else:
|
|
155
|
+
plt.plot(pos, imp, marker='.', color='gray', linestyle='None',
|
|
156
|
+
label='Other' if 'Other' not in plt.gca().get_legend_handles_labels()[1] else "")
|
|
157
|
+
|
|
158
|
+
plt.plot(positions_sorted, importances_sorted, linestyle='-', alpha=0.5, color='black')
|
|
159
|
+
|
|
160
|
+
if shaded_regions:
|
|
161
|
+
for (start, end) in shaded_regions:
|
|
162
|
+
plt.axvspan(start, end, color='gray', alpha=0.3)
|
|
163
|
+
|
|
164
|
+
plt.xlabel("Genomic Position")
|
|
165
|
+
plt.ylabel(y_label)
|
|
166
|
+
plt.title(plot_title)
|
|
167
|
+
plt.grid(True)
|
|
168
|
+
plt.legend()
|
|
169
|
+
plt.tight_layout()
|
|
170
|
+
|
|
171
|
+
if save_path:
|
|
172
|
+
os.makedirs(save_path, exist_ok=True)
|
|
173
|
+
safe_name = plot_title.replace("=", "").replace("__", "_").replace(",", "_").replace(" ", "_")
|
|
174
|
+
out_file = os.path.join(save_path, f"{safe_name}.png")
|
|
175
|
+
plt.savefig(out_file, dpi=300)
|
|
176
|
+
print(f"📁 Saved: {out_file}")
|
|
177
|
+
|
|
178
|
+
plt.show()
|
|
179
|
+
|
|
180
|
+
def plot_model_curves_from_adata(
|
|
181
|
+
adata,
|
|
182
|
+
label_col='activity_status',
|
|
183
|
+
model_names = ["cnn", "mlp", "rf"],
|
|
184
|
+
suffix='GpC_site_CpG_site',
|
|
185
|
+
omit_training=True,
|
|
186
|
+
save_path=None,
|
|
187
|
+
ylim_roc=(0.0, 1.05),
|
|
188
|
+
ylim_pr=(0.0, 1.05)):
|
|
189
|
+
|
|
190
|
+
from sklearn.metrics import precision_recall_curve, roc_curve, auc
|
|
191
|
+
import matplotlib.pyplot as plt
|
|
192
|
+
import seaborn as sns
|
|
193
|
+
|
|
194
|
+
if omit_training:
|
|
195
|
+
subset = adata[adata.obs['used_for_training'].astype(bool) == False]
|
|
196
|
+
|
|
197
|
+
label = subset.obs[label_col].map({'Active': 1, 'Silent': 0}).values
|
|
198
|
+
|
|
199
|
+
positive_ratio = np.sum(label.astype(int)) / len(label)
|
|
200
|
+
|
|
201
|
+
plt.figure(figsize=(12, 5))
|
|
202
|
+
|
|
203
|
+
# ROC curve
|
|
204
|
+
plt.subplot(1, 2, 1)
|
|
205
|
+
for model in model_names:
|
|
206
|
+
prob_col = f"{model}_active_prob_{suffix}"
|
|
207
|
+
if prob_col in subset.obs.columns:
|
|
208
|
+
probs = subset.obs[prob_col].astype(float).values
|
|
209
|
+
fpr, tpr, _ = roc_curve(label, probs)
|
|
210
|
+
roc_auc = auc(fpr, tpr)
|
|
211
|
+
plt.plot(fpr, tpr, label=f"{model.upper()} (AUC={roc_auc:.4f})")
|
|
212
|
+
|
|
213
|
+
plt.plot([0, 1], [0, 1], 'k--', alpha=0.5)
|
|
214
|
+
plt.xlabel("False Positive Rate")
|
|
215
|
+
plt.ylabel("True Positive Rate")
|
|
216
|
+
plt.title("ROC Curve")
|
|
217
|
+
plt.ylim(*ylim_roc)
|
|
218
|
+
plt.legend()
|
|
219
|
+
|
|
220
|
+
# PR curve
|
|
221
|
+
plt.subplot(1, 2, 2)
|
|
222
|
+
for model in model_names:
|
|
223
|
+
prob_col = f"{model}_active_prob_{suffix}"
|
|
224
|
+
if prob_col in subset.obs.columns:
|
|
225
|
+
probs = subset.obs[prob_col].astype(float).values
|
|
226
|
+
precision, recall, _ = precision_recall_curve(label, probs)
|
|
227
|
+
pr_auc = auc(recall, precision)
|
|
228
|
+
plt.plot(recall, precision, label=f"{model.upper()} (AUC={pr_auc:.4f})")
|
|
229
|
+
|
|
230
|
+
plt.xlabel("Recall")
|
|
231
|
+
plt.ylabel("Precision")
|
|
232
|
+
plt.ylim(*ylim_pr)
|
|
233
|
+
plt.axhline(y=positive_ratio, linestyle='--', color='gray', label='Random Baseline')
|
|
234
|
+
plt.title("Precision-Recall Curve")
|
|
235
|
+
plt.legend()
|
|
236
|
+
|
|
237
|
+
plt.tight_layout()
|
|
238
|
+
if save_path:
|
|
239
|
+
save_name = f"ROC_PR_curves"
|
|
240
|
+
os.makedirs(save_path, exist_ok=True)
|
|
241
|
+
safe_name = save_name.replace("=", "").replace("__", "_").replace(",", "_")
|
|
242
|
+
out_file = os.path.join(save_path, f"{safe_name}.png")
|
|
243
|
+
plt.savefig(out_file, dpi=300)
|
|
244
|
+
print(f"📁 Saved: {out_file}")
|
|
245
|
+
plt.show()
|
|
246
|
+
|
|
247
|
+
def plot_model_curves_from_adata_with_frequency_grid(
|
|
248
|
+
adata,
|
|
249
|
+
label_col='activity_status',
|
|
250
|
+
model_names=["cnn", "mlp", "rf"],
|
|
251
|
+
suffix='GpC_site_CpG_site',
|
|
252
|
+
omit_training=True,
|
|
253
|
+
save_path=None,
|
|
254
|
+
ylim_roc=(0.0, 1.05),
|
|
255
|
+
ylim_pr=(0.0, 1.05),
|
|
256
|
+
pos_sample_count=500,
|
|
257
|
+
pos_freq_list=[0.01, 0.05, 0.1],
|
|
258
|
+
show_f1_iso_curves=False,
|
|
259
|
+
f1_levels=None):
|
|
260
|
+
import numpy as np
|
|
261
|
+
import matplotlib.pyplot as plt
|
|
262
|
+
import seaborn as sns
|
|
263
|
+
import os
|
|
264
|
+
from sklearn.metrics import precision_recall_curve, roc_curve, auc
|
|
265
|
+
|
|
266
|
+
if f1_levels is None:
|
|
267
|
+
f1_levels = np.linspace(0.2, 0.9, 8)
|
|
268
|
+
|
|
269
|
+
if omit_training:
|
|
270
|
+
subset = adata[adata.obs['used_for_training'].astype(bool) == False]
|
|
271
|
+
else:
|
|
272
|
+
subset = adata
|
|
273
|
+
|
|
274
|
+
label = subset.obs[label_col].map({'Active': 1, 'Silent': 0}).values
|
|
275
|
+
subset = subset.copy()
|
|
276
|
+
subset.obs["__label__"] = label
|
|
277
|
+
|
|
278
|
+
pos_indices = np.where(label == 1)[0]
|
|
279
|
+
neg_indices = np.where(label == 0)[0]
|
|
280
|
+
|
|
281
|
+
n_rows = len(pos_freq_list)
|
|
282
|
+
fig, axes = plt.subplots(n_rows, 2, figsize=(12, 5 * n_rows))
|
|
283
|
+
fig.suptitle(f'{suffix} Performance metrics')
|
|
284
|
+
|
|
285
|
+
for row_idx, pos_freq in enumerate(pos_freq_list):
|
|
286
|
+
desired_total = int(pos_sample_count / pos_freq)
|
|
287
|
+
neg_sample_count = desired_total - pos_sample_count
|
|
288
|
+
|
|
289
|
+
if pos_sample_count > len(pos_indices) or neg_sample_count > len(neg_indices):
|
|
290
|
+
print(f"⚠️ Skipping frequency {pos_freq:.3f}: not enough samples.")
|
|
291
|
+
continue
|
|
292
|
+
|
|
293
|
+
sampled_pos = np.random.choice(pos_indices, size=pos_sample_count, replace=False)
|
|
294
|
+
sampled_neg = np.random.choice(neg_indices, size=neg_sample_count, replace=False)
|
|
295
|
+
sampled_indices = np.concatenate([sampled_pos, sampled_neg])
|
|
296
|
+
|
|
297
|
+
data_sampled = subset[sampled_indices]
|
|
298
|
+
y_true = data_sampled.obs["__label__"].values
|
|
299
|
+
|
|
300
|
+
ax_roc = axes[row_idx, 0] if n_rows > 1 else axes[0]
|
|
301
|
+
ax_pr = axes[row_idx, 1] if n_rows > 1 else axes[1]
|
|
302
|
+
|
|
303
|
+
# ROC Curve
|
|
304
|
+
for model in model_names:
|
|
305
|
+
prob_col = f"{model}_active_prob_{suffix}"
|
|
306
|
+
if prob_col in data_sampled.obs.columns:
|
|
307
|
+
probs = data_sampled.obs[prob_col].astype(float).values
|
|
308
|
+
fpr, tpr, _ = roc_curve(y_true, probs)
|
|
309
|
+
roc_auc = auc(fpr, tpr)
|
|
310
|
+
ax_roc.plot(fpr, tpr, label=f"{model.upper()} (AUC={roc_auc:.4f})")
|
|
311
|
+
ax_roc.plot([0, 1], [0, 1], 'k--', alpha=0.5)
|
|
312
|
+
ax_roc.set_xlabel("False Positive Rate")
|
|
313
|
+
ax_roc.set_ylabel("True Positive Rate")
|
|
314
|
+
ax_roc.set_ylim(*ylim_roc)
|
|
315
|
+
ax_roc.set_title(f"ROC Curve (Pos Freq: {pos_freq:.2%})")
|
|
316
|
+
ax_roc.legend()
|
|
317
|
+
ax_roc.spines['top'].set_visible(False)
|
|
318
|
+
ax_roc.spines['right'].set_visible(False)
|
|
319
|
+
|
|
320
|
+
# PR Curve
|
|
321
|
+
for model in model_names:
|
|
322
|
+
prob_col = f"{model}_active_prob_{suffix}"
|
|
323
|
+
if prob_col in data_sampled.obs.columns:
|
|
324
|
+
probs = data_sampled.obs[prob_col].astype(float).values
|
|
325
|
+
precision, recall, _ = precision_recall_curve(y_true, probs)
|
|
326
|
+
pr_auc = auc(recall, precision)
|
|
327
|
+
ax_pr.plot(recall, precision, label=f"{model.upper()} (AUC={pr_auc:.4f})")
|
|
328
|
+
ax_pr.axhline(y=pos_freq, linestyle='--', color='gray', label='Random Baseline')
|
|
329
|
+
|
|
330
|
+
if show_f1_iso_curves:
|
|
331
|
+
recall_vals = np.linspace(0.01, 1, 500)
|
|
332
|
+
for f1 in f1_levels:
|
|
333
|
+
precision_vals = (f1 * recall_vals) / (2 * recall_vals - f1)
|
|
334
|
+
precision_vals[precision_vals < 0] = np.nan # Avoid plotting invalid values
|
|
335
|
+
ax_pr.plot(recall_vals, precision_vals, color='gray', linestyle=':', linewidth=1, alpha=0.6)
|
|
336
|
+
x_val = 0.9
|
|
337
|
+
y_val = (f1 * x_val) / (2 * x_val - f1)
|
|
338
|
+
if 0 < y_val < 1:
|
|
339
|
+
ax_pr.text(x_val, y_val, f"F1={f1:.1f}", fontsize=8, color='gray')
|
|
340
|
+
|
|
341
|
+
ax_pr.set_xlabel("Recall")
|
|
342
|
+
ax_pr.set_ylabel("Precision")
|
|
343
|
+
ax_pr.set_ylim(*ylim_pr)
|
|
344
|
+
ax_pr.set_title(f"PR Curve (Pos Freq: {pos_freq:.2%})")
|
|
345
|
+
ax_pr.legend()
|
|
346
|
+
ax_pr.spines['top'].set_visible(False)
|
|
347
|
+
ax_pr.spines['right'].set_visible(False)
|
|
348
|
+
|
|
349
|
+
plt.tight_layout(rect=[0, 0, 1, 0.97])
|
|
350
|
+
if save_path:
|
|
351
|
+
os.makedirs(save_path, exist_ok=True)
|
|
352
|
+
out_file = os.path.join(save_path, "ROC_PR_grid.png")
|
|
353
|
+
plt.savefig(out_file, dpi=300)
|
|
354
|
+
print(f"📁 Saved: {out_file}")
|
|
355
|
+
plt.show()
|