smftools 0.1.3__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. {smftools-0.1.3.dist-info → smftools-0.1.6.dist-info}/METADATA +44 -11
  2. smftools-0.1.6.dist-info/RECORD +4 -0
  3. smftools/__init__.py +0 -25
  4. smftools/_settings.py +0 -20
  5. smftools/_version.py +0 -1
  6. smftools/datasets/F1_hybrid_NKG2A_enhander_promoter_GpC_conversion_SMF.h5ad.gz +0 -0
  7. smftools/datasets/F1_sample_sheet.csv +0 -5
  8. smftools/datasets/__init__.py +0 -9
  9. smftools/datasets/dCas9_m6A_invitro_kinetics.h5ad.gz +0 -0
  10. smftools/datasets/datasets.py +0 -28
  11. smftools/informatics/__init__.py +0 -14
  12. smftools/informatics/archived/bam_conversion.py +0 -59
  13. smftools/informatics/archived/bam_direct.py +0 -63
  14. smftools/informatics/archived/basecalls_to_adata.py +0 -71
  15. smftools/informatics/conversion_smf.py +0 -79
  16. smftools/informatics/direct_smf.py +0 -89
  17. smftools/informatics/fast5_to_pod5.py +0 -21
  18. smftools/informatics/helpers/LoadExperimentConfig.py +0 -74
  19. smftools/informatics/helpers/__init__.py +0 -60
  20. smftools/informatics/helpers/align_and_sort_BAM.py +0 -48
  21. smftools/informatics/helpers/aligned_BAM_to_bed.py +0 -73
  22. smftools/informatics/helpers/archived/informatics.py +0 -260
  23. smftools/informatics/helpers/archived/load_adata.py +0 -516
  24. smftools/informatics/helpers/bed_to_bigwig.py +0 -39
  25. smftools/informatics/helpers/binarize_converted_base_identities.py +0 -31
  26. smftools/informatics/helpers/canoncall.py +0 -25
  27. smftools/informatics/helpers/complement_base_list.py +0 -21
  28. smftools/informatics/helpers/concatenate_fastqs_to_bam.py +0 -54
  29. smftools/informatics/helpers/converted_BAM_to_adata.py +0 -233
  30. smftools/informatics/helpers/count_aligned_reads.py +0 -43
  31. smftools/informatics/helpers/extract_base_identities.py +0 -57
  32. smftools/informatics/helpers/extract_mods.py +0 -51
  33. smftools/informatics/helpers/extract_readnames_from_BAM.py +0 -22
  34. smftools/informatics/helpers/find_conversion_sites.py +0 -61
  35. smftools/informatics/helpers/generate_converted_FASTA.py +0 -98
  36. smftools/informatics/helpers/get_chromosome_lengths.py +0 -32
  37. smftools/informatics/helpers/get_native_references.py +0 -28
  38. smftools/informatics/helpers/index_fasta.py +0 -12
  39. smftools/informatics/helpers/make_dirs.py +0 -21
  40. smftools/informatics/helpers/make_modbed.py +0 -27
  41. smftools/informatics/helpers/modQC.py +0 -27
  42. smftools/informatics/helpers/modcall.py +0 -28
  43. smftools/informatics/helpers/modkit_extract_to_adata.py +0 -518
  44. smftools/informatics/helpers/ohe_batching.py +0 -52
  45. smftools/informatics/helpers/one_hot_encode.py +0 -21
  46. smftools/informatics/helpers/plot_read_length_and_coverage_histograms.py +0 -52
  47. smftools/informatics/helpers/separate_bam_by_bc.py +0 -43
  48. smftools/informatics/helpers/split_and_index_BAM.py +0 -41
  49. smftools/informatics/load_adata.py +0 -127
  50. smftools/informatics/readwrite.py +0 -106
  51. smftools/informatics/subsample_fasta_from_bed.py +0 -47
  52. smftools/informatics/subsample_pod5.py +0 -104
  53. smftools/plotting/__init__.py +0 -0
  54. smftools/preprocessing/__init__.py +0 -34
  55. smftools/preprocessing/append_C_context.py +0 -69
  56. smftools/preprocessing/archives/preprocessing.py +0 -614
  57. smftools/preprocessing/binarize_on_Youden.py +0 -42
  58. smftools/preprocessing/binary_layers_to_ohe.py +0 -30
  59. smftools/preprocessing/calculate_complexity.py +0 -71
  60. smftools/preprocessing/calculate_consensus.py +0 -47
  61. smftools/preprocessing/calculate_converted_read_methylation_stats.py +0 -96
  62. smftools/preprocessing/calculate_coverage.py +0 -41
  63. smftools/preprocessing/calculate_pairwise_hamming_distances.py +0 -27
  64. smftools/preprocessing/calculate_position_Youden.py +0 -104
  65. smftools/preprocessing/calculate_read_length_stats.py +0 -86
  66. smftools/preprocessing/clean_NaN.py +0 -38
  67. smftools/preprocessing/filter_converted_reads_on_methylation.py +0 -29
  68. smftools/preprocessing/filter_reads_on_length.py +0 -41
  69. smftools/preprocessing/invert_adata.py +0 -23
  70. smftools/preprocessing/load_sample_sheet.py +0 -24
  71. smftools/preprocessing/make_dirs.py +0 -21
  72. smftools/preprocessing/mark_duplicates.py +0 -134
  73. smftools/preprocessing/min_non_diagonal.py +0 -25
  74. smftools/preprocessing/recipes.py +0 -125
  75. smftools/preprocessing/remove_duplicates.py +0 -21
  76. smftools/readwrite.py +0 -106
  77. smftools/tools/__init__.py +0 -0
  78. smftools/tools/apply_HMM.py +0 -1
  79. smftools/tools/cluster.py +0 -0
  80. smftools/tools/read_HMM.py +0 -1
  81. smftools/tools/subset_adata.py +0 -32
  82. smftools/tools/train_HMM.py +0 -43
  83. smftools-0.1.3.dist-info/RECORD +0 -84
  84. {smftools-0.1.3.dist-info → smftools-0.1.6.dist-info}/WHEEL +0 -0
  85. {smftools-0.1.3.dist-info → smftools-0.1.6.dist-info}/licenses/LICENSE +0 -0
@@ -1,30 +0,0 @@
1
- ## binary_layers_to_ohe
2
-
3
- ## Conversion SMF Specific
4
- def binary_layers_to_ohe(adata, layers, stack='hstack'):
5
- """
6
- Parameters:
7
- adata (AnnData): Anndata object.
8
- layers (list): a list of strings. Each string represents a layer in the adata object. The layer should encode a binary matrix
9
- stack (str): Dimension to stack the one-hot-encoding. Options include 'hstack' and 'vstack'. Default is 'hstack', since this is more efficient.
10
-
11
- Returns:
12
- ohe_dict (dict): A dictionary keyed by obs_name that points to a stacked (hstack or vstack) one-hot encoding of the binary layers
13
- Input: An adata object and a list of layers containing a binary encoding.
14
- """
15
- import numpy as np
16
- import anndata as ad
17
- # Extract the layers
18
- layers = [adata.layers[layer_name] for layer_name in layers]
19
- n_reads = layers[0].shape[0]
20
- ohe_dict = {}
21
- for i in range(n_reads):
22
- read_ohe = []
23
- for layer in layers:
24
- read_ohe.append(layer[i])
25
- read_name = adata.obs_names[i]
26
- if stack == 'hstack':
27
- ohe_dict[read_name] = np.hstack(read_ohe)
28
- elif stack == 'vstack':
29
- ohe_dict[read_name] = np.vstack(read_ohe)
30
- return ohe_dict
@@ -1,71 +0,0 @@
1
- ## calculate_complexity
2
-
3
- def calculate_complexity(adata, output_directory='', obs_column='Reference', sample_col='Sample_names', plot=True, save_plot=False):
4
- """
5
- A complexity analysis of the library.
6
-
7
- Parameters:
8
- adata (AnnData): An adata object with mark_duplicates already run.
9
- output_directory (str): String representing the path to the output directory.
10
- obs_column (str): String of the obs column to iterate over.
11
- sample_col (str): String of the sample column to iterate over.
12
- plot (bool): Whether to plot the complexity model.
13
- save_plot (bool): Whether to save the complexity model.
14
-
15
- Returns:
16
- None
17
-
18
- """
19
- import numpy as np
20
- import pandas as pd
21
- from scipy.optimize import curve_fit
22
-
23
- def lander_waterman(x, C0):
24
- return C0 * (1 - np.exp(-x / C0))
25
-
26
- def count_unique_reads(reads, depth):
27
- subsample = np.random.choice(reads, depth, replace=False)
28
- return len(np.unique(subsample))
29
-
30
- categories = adata.obs[obs_column].cat.categories
31
- sample_names = adata.obs[sample_col].cat.categories
32
-
33
- for cat in categories:
34
- for sample in sample_names:
35
- unique_reads, total_reads = adata.uns[f'Hamming_distance_clusters_within_{cat}_{sample}'][0:2]
36
- reads = np.concatenate((np.arange(unique_reads), np.random.choice(unique_reads, total_reads - unique_reads, replace=True)))
37
- # Subsampling depths
38
- subsampling_depths = [total_reads // (i+1) for i in range(10)]
39
- # Arrays to store results
40
- subsampled_total_reads = []
41
- subsampled_unique_reads = []
42
- # Perform subsampling
43
- for depth in subsampling_depths:
44
- unique_count = count_unique_reads(reads, depth)
45
- subsampled_total_reads.append(depth)
46
- subsampled_unique_reads.append(unique_count)
47
- # Fit the Lander-Waterman model to the data
48
- popt, _ = curve_fit(lander_waterman, subsampled_total_reads, subsampled_unique_reads)
49
- # Generate data for the complexity curve
50
- x_data = np.linspace(0, 5000, 100)
51
- y_data = lander_waterman(x_data, *popt)
52
- adata.uns[f'Library_complexity_{sample}_on_{cat}'] = popt[0]
53
- if plot:
54
- import matplotlib.pyplot as plt
55
- # Plot the complexity curve
56
- plt.figure(figsize=(6, 4))
57
- plt.plot(total_reads, unique_reads, 'o', label='Observed unique reads')
58
- plt.plot(x_data, y_data, '-', label=f'Lander-Waterman fit\nEstimated C0 = {popt[0]:.2f}')
59
- plt.xlabel('Total number of reads')
60
- plt.ylabel('Number of unique reads')
61
- title = f'Library Complexity Analysis for {sample} on {cat}'
62
- plt.title(title)
63
- plt.legend()
64
- plt.grid(True)
65
- if save_plot:
66
- date_string = date_string()
67
- save_name = output_directory + f'/{date_string}_{title}'
68
- plt.savefig(save_name, bbox_inches='tight', pad_inches=0.1)
69
- plt.close()
70
- else:
71
- plt.show()
@@ -1,47 +0,0 @@
1
- # calculate_consensus
2
-
3
- def calculate_consensus(adata, reference, sample=False, reference_column='Reference', sample_column='Sample'):
4
- """
5
- Takes an input AnnData object, the reference to subset on, and the sample name to subset on to calculate the consensus sequence of the read set.
6
-
7
- Parameters:
8
- adata (AnnData): The input adata to append consensus metadata to.
9
- reference (str): The name of the reference to subset the adata on.
10
- sample (bool | str): If False, uses all samples. If a string is passed, the adata is further subsetted to only analyze that sample.
11
- reference_column (str): The name of the reference column (Default is 'Reference')
12
- sample_column (str): The name of the sample column (Default is 'Sample)
13
-
14
- Returns:
15
- None
16
-
17
- """
18
- import numpy as np
19
-
20
- # Subset the adata on the refernce of interest. Optionally, subset additionally on a sample of interest.
21
- record_subset = adata[adata.obs[reference_column] == reference].copy()
22
- if sample:
23
- record_subset = record_subset[record_subset.obs[sample_column] == sample].copy()
24
- else:
25
- pass
26
-
27
- # Grab layer names from the adata object that correspond to the binary encodings of the read sequences.
28
- layers = [layer for layer in record_subset.layers if '_binary_' in layer]
29
- layer_map, layer_counts = {}, []
30
- for i, layer in enumerate(layers):
31
- # Gives an integer mapping to access which sequence base the binary layer is encoding
32
- layer_map[i] = layer.split('_')[0]
33
- # Get the positional counts from all reads for the given base identity.
34
- layer_counts.append(np.sum(record_subset.layers[layer], axis=0))
35
- # Combine the positional counts array derived from each binary base layer into an ndarray
36
- count_array = np.array(layer_counts)
37
- # Determine the row index that contains the largest count for each position and store this in an array.
38
- nucleotide_indexes = np.argmax(count_array, axis=0)
39
- # Map the base sequence derived from the row index array to attain the consensus sequence in a list.
40
- consensus_sequence_list = [layer_map[i] for i in nucleotide_indexes]
41
-
42
- if sample:
43
- adata.var[f'{reference}_consensus_from_{sample}'] = consensus_sequence_list
44
- else:
45
- adata.var[f'{reference}_consensus_across_samples'] = consensus_sequence_list
46
-
47
- adata.uns[f'{reference}_consensus_sequence'] = consensus_sequence_list
@@ -1,96 +0,0 @@
1
- ## calculate_converted_read_methylation_stats
2
-
3
- ## Conversion SMF Specific
4
- # Read methylation QC
5
-
6
- def calculate_converted_read_methylation_stats(adata, reference_column, sample_names_col, output_directory, show_methylation_histogram=False, save_methylation_histogram=False):
7
- """
8
- Adds methylation statistics for each read. Indicates whether the read GpC methylation exceeded other_C methylation (background false positives).
9
-
10
- Parameters:
11
- adata (AnnData): An adata object
12
- reference_column (str): String representing the name of the Reference column to use
13
- sample_names_col (str): String representing the name of the sample name column to use
14
- output_directory (str): String representing the output directory to make and write out the histograms.
15
- show_methylation_histogram (bool): Whether to display the histograms.
16
- save_methylation_histogram (bool): Whether to save the histograms.
17
-
18
- Returns:
19
- None
20
- """
21
- import numpy as np
22
- import anndata as ad
23
- import pandas as pd
24
- import matplotlib.pyplot as plt
25
- from .. import readwrite
26
-
27
- references = set(adata.obs[reference_column])
28
- sample_names = set(adata.obs[sample_names_col])
29
-
30
- site_types = ['GpC_site', 'CpG_site', 'ambiguous_GpC_CpG_site', 'other_C']
31
-
32
- for site_type in site_types:
33
- adata.obs[f'{site_type}_row_methylation_sums'] = pd.Series(0, index=adata.obs_names, dtype=int)
34
- adata.obs[f'{site_type}_row_methylation_means'] = pd.Series(np.nan, index=adata.obs_names, dtype=float)
35
- adata.obs[f'number_valid_{site_type}_in_read'] = pd.Series(0, index=adata.obs_names, dtype=int)
36
- adata.obs[f'fraction_valid_{site_type}_in_range'] = pd.Series(np.nan, index=adata.obs_names, dtype=float)
37
- for cat in references:
38
- cat_subset = adata[adata.obs[reference_column] == cat].copy()
39
- for site_type in site_types:
40
- print(f'Iterating over {cat}_{site_type}')
41
- observation_matrix = cat_subset.obsm[f'{cat}_{site_type}']
42
- number_valid_positions_in_read = np.nansum(~np.isnan(observation_matrix), axis=1)
43
- row_methylation_sums = np.nansum(observation_matrix, axis=1)
44
- number_valid_positions_in_read[number_valid_positions_in_read == 0] = 1
45
- fraction_valid_positions_in_range = number_valid_positions_in_read / np.max(number_valid_positions_in_read)
46
- row_methylation_means = np.divide(row_methylation_sums, number_valid_positions_in_read)
47
- temp_obs_data = pd.DataFrame({f'number_valid_{site_type}_in_read': number_valid_positions_in_read,
48
- f'fraction_valid_{site_type}_in_range': fraction_valid_positions_in_range,
49
- f'{site_type}_row_methylation_sums': row_methylation_sums,
50
- f'{site_type}_row_methylation_means': row_methylation_means}, index=cat_subset.obs.index)
51
- adata.obs.update(temp_obs_data)
52
- # Indicate whether the read-level GpC methylation rate exceeds the false methylation rate of the read
53
- pass_array = np.array(adata.obs[f'GpC_site_row_methylation_means'] > adata.obs[f'other_C_row_methylation_means'])
54
- adata.obs['GpC_above_other_C'] = pd.Series(pass_array, index=adata.obs.index, dtype=bool)
55
-
56
- adata.uns['methylation_dict'] = {}
57
- n_bins = 50
58
- site_types_to_analyze = ['GpC_site', 'CpG_site', 'ambiguous_GpC_CpG_site', 'other_C']
59
-
60
- for reference in references:
61
- reference_adata = adata[adata.obs[reference_column] == reference].copy()
62
- split_reference = reference.split('_')[0][1:]
63
- for sample in sample_names:
64
- sample_adata = reference_adata[reference_adata.obs[sample_names_col] == sample].copy()
65
- for site_type in site_types_to_analyze:
66
- methylation_data = sample_adata.obs[f'{site_type}_row_methylation_means']
67
- max_meth = np.max(sample_adata.obs[f'{site_type}_row_methylation_sums'])
68
- if not np.isnan(max_meth):
69
- n_bins = int(max_meth // 2)
70
- else:
71
- n_bins = 1
72
- mean = np.mean(methylation_data)
73
- median = np.median(methylation_data)
74
- stdev = np.std(methylation_data)
75
- adata.uns['methylation_dict'][f'{reference}_{sample}_{site_type}'] = [mean, median, stdev]
76
- if show_methylation_histogram or save_methylation_histogram:
77
- fig, ax = plt.subplots(figsize=(6, 4))
78
- count, bins, patches = plt.hist(methylation_data, bins=n_bins, weights=np.ones(len(methylation_data)) / len(methylation_data), alpha=0.7, color='blue', edgecolor='black')
79
- plt.axvline(median, color='red', linestyle='dashed', linewidth=1)
80
- plt.text(median + stdev, max(count)*0.8, f'Median: {median:.2f}', color='red')
81
- plt.axvline(median - stdev, color='green', linestyle='dashed', linewidth=1, label=f'Stdev: {stdev:.2f}')
82
- plt.axvline(median + stdev, color='green', linestyle='dashed', linewidth=1)
83
- plt.text(median + stdev + 0.05, max(count) / 3, f'+1 Stdev: {stdev:.2f}', color='green')
84
- plt.xlabel('Fraction methylated')
85
- plt.ylabel('Proportion')
86
- title = f'Distribution of {methylation_data.shape[0]} read {site_type} methylation means \nfor {sample} sample on {split_reference} after filtering'
87
- plt.title(title, pad=20)
88
- plt.xlim(-0.05, 1.05) # Set x-axis range from 0 to 1
89
- ax.spines['right'].set_visible(False)
90
- ax.spines['top'].set_visible(False)
91
- save_name = output_directory + f'/{readwrite.date_string()} {title}'
92
- if save_methylation_histogram:
93
- plt.savefig(save_name, bbox_inches='tight', pad_inches=0.1)
94
- plt.close()
95
- else:
96
- plt.show()
@@ -1,41 +0,0 @@
1
- ## calculate_coverage
2
-
3
- def calculate_coverage(adata, obs_column='Reference', position_nan_threshold=0.05):
4
- """
5
- Append position level metadata regarding whether the position is informative within the given observation category.
6
-
7
- Parameters:
8
- adata (AnnData): An AnnData object
9
- obs_column (str): Observation column value to subset on prior to calculating position statistics for that category.
10
- position_nan_threshold (float): A minimal fractional threshold of coverage within the obs_column category to call the position as valid.
11
-
12
- Returns:
13
- None
14
- """
15
- import numpy as np
16
- import anndata as ad
17
- import pandas as pd
18
-
19
- categories = adata.obs[obs_column].cat.categories
20
- n_categories_with_position = np.zeros(adata.shape[1])
21
- # Loop over categories
22
- for cat in categories:
23
- # Look at positional information for each reference
24
- temp_cat_adata = adata[adata.obs[obs_column] == cat].copy()
25
- # Look at read coverage on the given category strand
26
- cat_valid_coverage = np.sum(~np.isnan(temp_cat_adata.X), axis=0)
27
- cat_invalid_coverage = np.sum(np.isnan(temp_cat_adata.X), axis=0)
28
- cat_valid_fraction = cat_valid_coverage / (cat_valid_coverage + cat_invalid_coverage)
29
- # Append metadata for category to the anndata object
30
- adata.var[f'{cat}_valid_fraction'] = pd.Series(cat_valid_fraction, index=adata.var.index)
31
- # Characterize if the position is in the given category or not
32
- conditions = [
33
- (adata.var[f'{cat}_valid_fraction'] >= position_nan_threshold),
34
- (adata.var[f'{cat}_valid_fraction'] < position_nan_threshold)
35
- ]
36
- choices = [True, False]
37
- adata.var[f'position_in_{cat}'] = np.select(conditions, choices, default=False)
38
- n_categories_with_position += np.array(adata.var[f'position_in_{cat}'])
39
-
40
- # Final array with the sum at each position of the number of categories covering that position
41
- adata.var[f'N_{obs_column}_with_position'] = n_categories_with_position.astype(int)
@@ -1,27 +0,0 @@
1
- ## calculate_pairwise_hamming_distances
2
-
3
- ## Conversion SMF Specific
4
- def calculate_pairwise_hamming_distances(arrays):
5
- """
6
- Calculate the pairwise Hamming distances for a list of h-stacked ndarrays.
7
-
8
- Parameters:
9
- arrays (str): A list of ndarrays.
10
-
11
- Returns:
12
- distance_matrix (ndarray): a 2D array containing the pairwise Hamming distances between all arrays.
13
-
14
- """
15
- import numpy as np
16
- from tqdm import tqdm
17
- from scipy.spatial.distance import hamming
18
- num_arrays = len(arrays)
19
- # Initialize an empty distance matrix
20
- distance_matrix = np.zeros((num_arrays, num_arrays))
21
- # Calculate pairwise distances with progress bar
22
- for i in tqdm(range(num_arrays), desc="Calculating Hamming Distances"):
23
- for j in range(i + 1, num_arrays):
24
- distance = hamming(arrays[i], arrays[j])
25
- distance_matrix[i, j] = distance
26
- distance_matrix[j, i] = distance
27
- return distance_matrix
@@ -1,104 +0,0 @@
1
- ## calculate_position_Youden
2
-
3
- ## Calculating and applying position level thresholds for methylation calls to binarize the SMF data
4
- def calculate_position_Youden(adata, positive_control_sample, negative_control_sample, J_threshold=0.4, obs_column='Reference', save=False, output_directory=''):
5
- """
6
- Adds new variable metadata to each position indicating whether the position provides reliable SMF methylation calls. Also outputs plots of the positional ROC curves.
7
-
8
- Parameters:
9
- adata (AnnData): An AnnData object.
10
- positive_control_sample (str): string representing the sample name corresponding to the Plus MTase control sample.
11
- negative_control_sample (str): string representing the sample name corresponding to the Minus MTase control sample.
12
- J_threshold (float): A float indicating the J-statistic used to indicate whether a position passes QC for methylation calls.
13
- obs_column (str): The category to iterate over.
14
- save (bool): Whether to save the ROC plots.
15
- output_directory (str): String representing the path to the output directory to output the ROC curves.
16
-
17
- Returns:
18
- None
19
- """
20
- import numpy as np
21
- import pandas as pd
22
- import anndata as ad
23
- import matplotlib.pyplot as plt
24
- from sklearn.metrics import roc_curve, roc_auc_score
25
-
26
- control_samples = [positive_control_sample, negative_control_sample]
27
- categories = adata.obs[obs_column].cat.categories
28
- # Iterate over each category in the specified obs_column
29
- for cat in categories:
30
- # Subset to keep only reads associated with the category
31
- cat_subset = adata[adata.obs[obs_column] == cat].copy()
32
- # Iterate over positive and negative control samples
33
- for control in control_samples:
34
- # Initialize a dictionary for the given control sample. This will be keyed by dataset and position to point to a tuple of coordinate position and an array of methylation probabilities
35
- adata.uns[f'{cat}_position_methylation_dict_{control}'] = {}
36
- # get the current control subset on the given category
37
- filtered_obs = cat_subset.obs[cat_subset.obs['Sample_names'].str.contains(control, na=False, regex=True)]
38
- control_subset = cat_subset[filtered_obs.index].copy()
39
- # Iterate through every position in the control subset
40
- for position in range(control_subset.shape[1]):
41
- # Get the coordinate name associated with that position
42
- coordinate = control_subset.var_names[position]
43
- # Get the array of methlyation probabilities for each read in the subset at that position
44
- position_data = control_subset.X[:, position]
45
- # Get the indexes of everywhere that is not a nan value
46
- nan_mask = ~np.isnan(position_data)
47
- # Keep only the methlyation data that has real values
48
- position_data = position_data[nan_mask]
49
- # Get the position data coverage
50
- position_coverage = len(position_data)
51
- # Get fraction coverage
52
- fraction_coverage = position_coverage / control_subset.shape[0]
53
- # Save the position and the position methylation data for the control subset
54
- adata.uns[f'{cat}_position_methylation_dict_{control}'][f'{position}'] = (position, position_data, fraction_coverage)
55
-
56
- for cat in categories:
57
- fig, ax = plt.subplots(figsize=(6, 4))
58
- plt.plot([0, 1], [0, 1], linestyle='--', color='gray')
59
- plt.xlabel('False Positive Rate')
60
- plt.ylabel('True Positive Rate')
61
- ax.spines['right'].set_visible(False)
62
- ax.spines['top'].set_visible(False)
63
- n_passed_positions = 0
64
- n_total_positions = 0
65
- # Initialize a list that will hold the positional thresholds for the category
66
- probability_thresholding_list = [(np.nan, np.nan)] * adata.shape[1]
67
- for i, key in enumerate(adata.uns[f'{cat}_position_methylation_dict_{positive_control_sample}'].keys()):
68
- position = int(adata.uns[f'{cat}_position_methylation_dict_{positive_control_sample}'][key][0])
69
- positive_position_array = adata.uns[f'{cat}_position_methylation_dict_{positive_control_sample}'][key][1]
70
- fraction_coverage = adata.uns[f'{cat}_position_methylation_dict_{positive_control_sample}'][key][2]
71
- if fraction_coverage > 0.2:
72
- try:
73
- negative_position_array = adata.uns[f'{cat}_position_methylation_dict_{negative_control_sample}'][key][1]
74
- # Combine the negative and positive control data
75
- data = np.concatenate([negative_position_array, positive_position_array])
76
- labels = np.array([0] * len(negative_position_array) + [1] * len(positive_position_array))
77
- # Calculate the ROC curve
78
- fpr, tpr, thresholds = roc_curve(labels, data)
79
- # Calculate Youden's J statistic
80
- J = tpr - fpr
81
- optimal_idx = np.argmax(J)
82
- optimal_threshold = thresholds[optimal_idx]
83
- max_J = np.max(J)
84
- data_tuple = (optimal_threshold, max_J)
85
- probability_thresholding_list[position] = data_tuple
86
- n_total_positions += 1
87
- if max_J > J_threshold:
88
- n_passed_positions += 1
89
- plt.plot(fpr, tpr, label='ROC curve')
90
- except:
91
- probability_thresholding_list[position] = (0.8, np.nan)
92
- title = f'ROC Curve for {n_passed_positions} positions with J-stat greater than {J_threshold}\n out of {n_total_positions} total positions on {cat}'
93
- plt.title(title)
94
- date_string = date_string()
95
- save_name = output_directory + f'/{date_string} {title}'
96
- if save:
97
- plt.savefig(save_name)
98
- plt.close()
99
- else:
100
- plt.show()
101
-
102
- adata.var[f'{cat}_position_methylation_thresholding_Youden_stats'] = probability_thresholding_list
103
- J_max_list = [probability_thresholding_list[i][1] for i in range(adata.shape[1])]
104
- adata.var[f'{cat}_position_passed_QC'] = [True if i > J_threshold else False for i in J_max_list]
@@ -1,86 +0,0 @@
1
- ## calculate_read_length_stats
2
-
3
- # Read length QC
4
- def calculate_read_length_stats(adata, reference_column, sample_names_col, output_directory, show_read_length_histogram=False, save_read_length_histogram=False):
5
- """
6
- Append first valid position in a read and last valid position in the read. From this determine and append the read length.
7
-
8
- Parameters:
9
- adata (AnnData): An adata object
10
- reference_column (str): String representing the name of the Reference column to use
11
- sample_names_col (str): String representing the name of the sample name column to use
12
- output_directory (str): String representing the output directory to make and write out the histograms.
13
- show_read_length_histogram (bool): Whether to display the histograms.
14
- save_read_length_histogram (bool): Whether to save the histograms.
15
-
16
- Returns:
17
- upper_bound (int): last valid position in the dataset
18
- lower_bound (int): first valid position in the dataset
19
- """
20
- import numpy as np
21
- import anndata as ad
22
- import pandas as pd
23
- import matplotlib.pyplot as plt
24
- from .. import readwrite
25
- from .make_dirs import make_dirs
26
-
27
- make_dirs([output_directory])
28
-
29
- references = set(adata.obs[reference_column])
30
- sample_names = set(adata.obs[sample_names_col])
31
-
32
- ## Add basic observation-level (read-level) metadata to the object: first valid position in a read and last valid position in the read. From this determine the read length. Save two new variable which hold the first and last valid positions in the entire dataset
33
- print('calculating read length stats')
34
- # Add some basic observation-level (read-level) metadata to the anndata object
35
- read_first_valid_position = np.array([int(adata.var_names[i]) for i in np.argmax(~np.isnan(adata.X), axis=1)])
36
- read_last_valid_position = np.array([int(adata.var_names[i]) for i in (adata.X.shape[1] - 1 - np.argmax(~np.isnan(adata.X[:, ::-1]), axis=1))])
37
- read_length = read_last_valid_position - read_first_valid_position + np.ones(len(read_first_valid_position))
38
-
39
- adata.obs['first_valid_position'] = pd.Series(read_first_valid_position, index=adata.obs.index, dtype=int)
40
- adata.obs['last_valid_position'] = pd.Series(read_last_valid_position, index=adata.obs.index, dtype=int)
41
- adata.obs['read_length'] = pd.Series(read_length, index=adata.obs.index, dtype=int)
42
-
43
- # Define variables to hold the first and last valid position in the dataset
44
- upper_bound = int(np.nanmax(adata.obs['last_valid_position']))
45
- lower_bound = int(np.nanmin(adata.obs['first_valid_position']))
46
-
47
- # Add an unstructured element to the anndata object which points to a dictionary of read lengths keyed by reference and sample name. Points to a tuple containing (mean, median, stdev) of the read lengths of the sample for the given reference strand
48
-
49
- ## Plot histogram of read length data and save the median and stdev of the read lengths for each sample.
50
- adata.uns['read_length_dict'] = {}
51
-
52
- for reference in references:
53
- temp_reference_adata = adata[adata.obs[reference_column] == reference].copy()
54
- split_reference = reference.split('_')[0][1:]
55
- for sample in sample_names:
56
- temp_sample_adata = temp_reference_adata[temp_reference_adata.obs[sample_names_col] == sample].copy()
57
- temp_data = temp_sample_adata.obs['read_length']
58
- max_length = np.max(temp_data)
59
- mean = np.mean(temp_data)
60
- median = np.median(temp_data)
61
- stdev = np.std(temp_data)
62
- adata.uns['read_length_dict'][f'{reference}_{sample}'] = [mean, median, stdev]
63
- if not np.isnan(max_length):
64
- n_bins = int(max_length // 100)
65
- else:
66
- n_bins = 1
67
- if show_read_length_histogram or save_read_length_histogram:
68
- plt.figure(figsize=(10, 6))
69
- plt.text(median + 0.5, max(plt.hist(temp_data, bins=n_bins)[0]) / 2, f'Median: {median:.2f}', color='red')
70
- plt.hist(temp_data, bins=n_bins, alpha=0.7, color='blue', edgecolor='black')
71
- plt.xlabel('Read Length')
72
- plt.ylabel('Count')
73
- title = f'Read length distribution of {temp_sample_adata.shape[0]} total reads from {sample} sample on {split_reference} allele'
74
- plt.title(title)
75
- # Add a vertical line at the median
76
- plt.axvline(median, color='red', linestyle='dashed', linewidth=1)
77
- # Annotate the median
78
- plt.xlim(lower_bound - 100, upper_bound + 100)
79
- if save_read_length_histogram:
80
- save_name = output_directory + f'/{readwrite.date_string()} {title}'
81
- plt.savefig(save_name, bbox_inches='tight', pad_inches=0.1)
82
- plt.close()
83
- else:
84
- plt.show()
85
-
86
- return upper_bound, lower_bound
@@ -1,38 +0,0 @@
1
- ## clean_NaN
2
-
3
- def clean_NaN(adata, layer=None):
4
- """
5
- Append layers to adata that contain NaN cleaning strategies.
6
-
7
- Parameters:
8
- adata (AnnData): an adata object
9
- layer (str): string representing the layer to fill NaN values in
10
-
11
- Returns:
12
- None
13
- """
14
- import numpy as np
15
- import anndata as ad
16
- import pandas as pd
17
- from ..readwrite import adata_to_df
18
-
19
- # Fill NaN with closest SMF value
20
- df = adata_to_df(adata, layer=layer)
21
- df = df.ffill(axis=1).bfill(axis=1)
22
- adata.layers['fill_nans_closest'] = df.values
23
-
24
- # Replace NaN values with 0, and 0 with minus 1
25
- old_value, new_value = [0, -1]
26
- df = adata_to_df(adata, layer=layer)
27
- df = df.replace(old_value, new_value)
28
- old_value, new_value = [np.nan, 0]
29
- df = df.replace(old_value, new_value)
30
- adata.layers['nan0_0minus1'] = df.values
31
-
32
- # Replace NaN values with 1, and 1 with 2
33
- old_value, new_value = [1, 2]
34
- df = adata_to_df(adata, layer=layer)
35
- df = df.replace(old_value, new_value)
36
- old_value, new_value = [np.nan, 1]
37
- df = df.replace(old_value, new_value)
38
- adata.layers['nan1_12'] = df.values
@@ -1,29 +0,0 @@
1
- ## filter_converted_reads_on_methylation
2
-
3
- ## Conversion SMF Specific
4
- # Read methylation QC
5
- def filter_converted_reads_on_methylation(adata, valid_SMF_site_threshold=0.8, min_SMF_threshold=0.025):
6
- """
7
- Filter adata object using minimum thresholds for valid SMF site fraction in read, as well as minimum methylation content in read.
8
-
9
- Parameters:
10
- adata (AnnData): An adata object.
11
- valid_SMF_site_threshold (float): A minimum proportion of valid SMF sites that must be present in the read. Default is 0.8
12
- min_SMF_threshold (float): A minimum read methylation level. Default is 0.025
13
- Returns:
14
- adata (AnnData): The filtered adata object.
15
- """
16
- import numpy as np
17
- import anndata as ad
18
- import pandas as pd
19
-
20
- if valid_SMF_site_threshold:
21
- # Keep reads that have over a given valid GpC site content
22
- adata = adata[adata.obs['fraction_valid_GpC_site_in_range'] > valid_SMF_site_threshold].copy()
23
- if min_SMF_threshold:
24
- # Keep reads with SMF methylation over background methylation.
25
- adata = adata[adata.obs['GpC_above_other_C'] == True].copy()
26
- # Keep reads over a defined methylation threshold
27
- adata = adata[adata.obs['GpC_site_row_methylation_means'] > min_SMF_threshold].copy()
28
-
29
- return adata
@@ -1,41 +0,0 @@
1
- ## filter_reads_on_length
2
-
3
- def filter_reads_on_length(adata, filter_on_coordinates=False, min_read_length=2700):
4
- """
5
- Filters the adata object to keep a defined coordinate window, as well as reads that are over a minimum threshold in length.
6
-
7
- Parameters:
8
- adata (AnnData): An adata object.
9
- filter_on_coordinates (bool | list): If False, skips filtering. Otherwise, provide a list containing integers representing the lower and upper bound coordinates to filter on. Default is False.
10
- min_read_length (int): The minimum read length to keep in the filtered dataset. Default is 2700.
11
-
12
- Returns:
13
- adata (AnnData): The filtered adata object
14
- Input: Adata object. a list of lower and upper bound (set to False or None if not wanted), and a minimum read length integer.
15
-
16
- """
17
- import numpy as np
18
- import anndata as ad
19
- import pandas as pd
20
- if filter_on_coordinates:
21
- lower_bound, upper_bound = filter_on_coordinates
22
- # Extract the position information from the adata object as an np array
23
- var_names_arr = adata.var_names.astype(int).to_numpy()
24
- # Find the upper bound coordinate that is closest to the specified value
25
- closest_end_index = np.argmin(np.abs(var_names_arr - upper_bound))
26
- upper_bound = int(adata.var_names[closest_end_index])
27
- # Find the lower bound coordinate that is closest to the specified value
28
- closest_start_index = np.argmin(np.abs(var_names_arr - lower_bound))
29
- lower_bound = int(adata.var_names[closest_start_index])
30
- # Get a list of positional indexes that encompass the lower and upper bounds of the dataset
31
- position_list = list(range(lower_bound, upper_bound + 1))
32
- position_list = [str(pos) for pos in position_list]
33
- position_set = set(position_list)
34
- print(f'Subsetting adata to keep data between coordinates {lower_bound} and {upper_bound}')
35
- adata = adata[:, adata.var_names.isin(position_set)].copy()
36
-
37
- if min_read_length:
38
- print(f'Subsetting adata to keep reads longer than {min_read_length}')
39
- adata = adata[adata.obs['read_length'] > min_read_length].copy()
40
-
41
- return adata
@@ -1,23 +0,0 @@
1
- ## invert_adata
2
-
3
- # Optional inversion of the adata
4
- def invert_adata(adata):
5
- """
6
- Inverts the adata object along the variable axis
7
-
8
- Parameters:
9
- adata (AnnData): An adata object.
10
-
11
- Returns:
12
- None
13
- """
14
- import numpy as np
15
- import anndata as ad
16
- print('Inverting adata')
17
- # Reassign var_names with new names
18
- old_var_names = adata.var_names.astype(int).to_numpy()
19
- new_var_names = np.sort(old_var_names)[::-1].astype(str)
20
- adata.var['Original_positional_coordinate'] = old_var_names.astype(str)
21
- adata.var_names = new_var_names
22
- # Sort the AnnData object based on the old var_names
23
- adata = adata[:, old_var_names.astype(str)]