smftools 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. smftools/__init__.py +29 -0
  2. smftools/_settings.py +20 -0
  3. smftools/_version.py +1 -0
  4. smftools/datasets/F1_hybrid_NKG2A_enhander_promoter_GpC_conversion_SMF.h5ad.gz +0 -0
  5. smftools/datasets/F1_sample_sheet.csv +5 -0
  6. smftools/datasets/__init__.py +9 -0
  7. smftools/datasets/dCas9_m6A_invitro_kinetics.h5ad.gz +0 -0
  8. smftools/datasets/datasets.py +28 -0
  9. smftools/informatics/__init__.py +16 -0
  10. smftools/informatics/archived/bam_conversion.py +59 -0
  11. smftools/informatics/archived/bam_direct.py +63 -0
  12. smftools/informatics/archived/basecalls_to_adata.py +71 -0
  13. smftools/informatics/archived/print_bam_query_seq.py +29 -0
  14. smftools/informatics/basecall_pod5s.py +80 -0
  15. smftools/informatics/conversion_smf.py +132 -0
  16. smftools/informatics/direct_smf.py +137 -0
  17. smftools/informatics/fast5_to_pod5.py +21 -0
  18. smftools/informatics/helpers/LoadExperimentConfig.py +75 -0
  19. smftools/informatics/helpers/__init__.py +74 -0
  20. smftools/informatics/helpers/align_and_sort_BAM.py +59 -0
  21. smftools/informatics/helpers/aligned_BAM_to_bed.py +74 -0
  22. smftools/informatics/helpers/archived/informatics.py +260 -0
  23. smftools/informatics/helpers/archived/load_adata.py +516 -0
  24. smftools/informatics/helpers/bam_qc.py +66 -0
  25. smftools/informatics/helpers/bed_to_bigwig.py +39 -0
  26. smftools/informatics/helpers/binarize_converted_base_identities.py +79 -0
  27. smftools/informatics/helpers/canoncall.py +34 -0
  28. smftools/informatics/helpers/complement_base_list.py +21 -0
  29. smftools/informatics/helpers/concatenate_fastqs_to_bam.py +55 -0
  30. smftools/informatics/helpers/converted_BAM_to_adata.py +245 -0
  31. smftools/informatics/helpers/converted_BAM_to_adata_II.py +369 -0
  32. smftools/informatics/helpers/count_aligned_reads.py +43 -0
  33. smftools/informatics/helpers/demux_and_index_BAM.py +52 -0
  34. smftools/informatics/helpers/extract_base_identities.py +44 -0
  35. smftools/informatics/helpers/extract_mods.py +83 -0
  36. smftools/informatics/helpers/extract_read_features_from_bam.py +31 -0
  37. smftools/informatics/helpers/extract_read_lengths_from_bed.py +25 -0
  38. smftools/informatics/helpers/extract_readnames_from_BAM.py +22 -0
  39. smftools/informatics/helpers/find_conversion_sites.py +50 -0
  40. smftools/informatics/helpers/generate_converted_FASTA.py +99 -0
  41. smftools/informatics/helpers/get_chromosome_lengths.py +32 -0
  42. smftools/informatics/helpers/get_native_references.py +28 -0
  43. smftools/informatics/helpers/index_fasta.py +12 -0
  44. smftools/informatics/helpers/make_dirs.py +21 -0
  45. smftools/informatics/helpers/make_modbed.py +27 -0
  46. smftools/informatics/helpers/modQC.py +27 -0
  47. smftools/informatics/helpers/modcall.py +36 -0
  48. smftools/informatics/helpers/modkit_extract_to_adata.py +884 -0
  49. smftools/informatics/helpers/ohe_batching.py +76 -0
  50. smftools/informatics/helpers/ohe_layers_decode.py +32 -0
  51. smftools/informatics/helpers/one_hot_decode.py +27 -0
  52. smftools/informatics/helpers/one_hot_encode.py +57 -0
  53. smftools/informatics/helpers/plot_read_length_and_coverage_histograms.py +53 -0
  54. smftools/informatics/helpers/run_multiqc.py +28 -0
  55. smftools/informatics/helpers/separate_bam_by_bc.py +43 -0
  56. smftools/informatics/helpers/split_and_index_BAM.py +36 -0
  57. smftools/informatics/load_adata.py +182 -0
  58. smftools/informatics/readwrite.py +106 -0
  59. smftools/informatics/subsample_fasta_from_bed.py +47 -0
  60. smftools/informatics/subsample_pod5.py +104 -0
  61. smftools/plotting/__init__.py +15 -0
  62. smftools/plotting/classifiers.py +355 -0
  63. smftools/plotting/general_plotting.py +205 -0
  64. smftools/plotting/position_stats.py +462 -0
  65. smftools/preprocessing/__init__.py +33 -0
  66. smftools/preprocessing/append_C_context.py +82 -0
  67. smftools/preprocessing/archives/mark_duplicates.py +146 -0
  68. smftools/preprocessing/archives/preprocessing.py +614 -0
  69. smftools/preprocessing/archives/remove_duplicates.py +21 -0
  70. smftools/preprocessing/binarize_on_Youden.py +45 -0
  71. smftools/preprocessing/binary_layers_to_ohe.py +40 -0
  72. smftools/preprocessing/calculate_complexity.py +72 -0
  73. smftools/preprocessing/calculate_consensus.py +47 -0
  74. smftools/preprocessing/calculate_converted_read_methylation_stats.py +94 -0
  75. smftools/preprocessing/calculate_coverage.py +42 -0
  76. smftools/preprocessing/calculate_pairwise_differences.py +49 -0
  77. smftools/preprocessing/calculate_pairwise_hamming_distances.py +27 -0
  78. smftools/preprocessing/calculate_position_Youden.py +115 -0
  79. smftools/preprocessing/calculate_read_length_stats.py +79 -0
  80. smftools/preprocessing/clean_NaN.py +46 -0
  81. smftools/preprocessing/filter_adata_by_nan_proportion.py +31 -0
  82. smftools/preprocessing/filter_converted_reads_on_methylation.py +44 -0
  83. smftools/preprocessing/filter_reads_on_length.py +51 -0
  84. smftools/preprocessing/flag_duplicate_reads.py +149 -0
  85. smftools/preprocessing/invert_adata.py +30 -0
  86. smftools/preprocessing/load_sample_sheet.py +38 -0
  87. smftools/preprocessing/make_dirs.py +21 -0
  88. smftools/preprocessing/min_non_diagonal.py +25 -0
  89. smftools/preprocessing/recipes.py +127 -0
  90. smftools/preprocessing/subsample_adata.py +58 -0
  91. smftools/readwrite.py +198 -0
  92. smftools/tools/__init__.py +49 -0
  93. smftools/tools/apply_hmm.py +202 -0
  94. smftools/tools/apply_hmm_batched.py +241 -0
  95. smftools/tools/archived/classify_methylated_features.py +66 -0
  96. smftools/tools/archived/classify_non_methylated_features.py +75 -0
  97. smftools/tools/archived/subset_adata_v1.py +32 -0
  98. smftools/tools/archived/subset_adata_v2.py +46 -0
  99. smftools/tools/calculate_distances.py +18 -0
  100. smftools/tools/calculate_umap.py +62 -0
  101. smftools/tools/call_hmm_peaks.py +105 -0
  102. smftools/tools/classifiers.py +787 -0
  103. smftools/tools/cluster_adata_on_methylation.py +105 -0
  104. smftools/tools/data/__init__.py +2 -0
  105. smftools/tools/data/anndata_data_module.py +90 -0
  106. smftools/tools/data/preprocessing.py +6 -0
  107. smftools/tools/display_hmm.py +18 -0
  108. smftools/tools/evaluation/__init__.py +0 -0
  109. smftools/tools/general_tools.py +69 -0
  110. smftools/tools/hmm_readwrite.py +16 -0
  111. smftools/tools/inference/__init__.py +1 -0
  112. smftools/tools/inference/lightning_inference.py +41 -0
  113. smftools/tools/models/__init__.py +9 -0
  114. smftools/tools/models/base.py +14 -0
  115. smftools/tools/models/cnn.py +34 -0
  116. smftools/tools/models/lightning_base.py +41 -0
  117. smftools/tools/models/mlp.py +17 -0
  118. smftools/tools/models/positional.py +17 -0
  119. smftools/tools/models/rnn.py +16 -0
  120. smftools/tools/models/sklearn_models.py +40 -0
  121. smftools/tools/models/transformer.py +133 -0
  122. smftools/tools/models/wrappers.py +20 -0
  123. smftools/tools/nucleosome_hmm_refinement.py +104 -0
  124. smftools/tools/position_stats.py +239 -0
  125. smftools/tools/read_stats.py +70 -0
  126. smftools/tools/subset_adata.py +28 -0
  127. smftools/tools/train_hmm.py +78 -0
  128. smftools/tools/training/__init__.py +1 -0
  129. smftools/tools/training/train_lightning_model.py +47 -0
  130. smftools/tools/utils/__init__.py +2 -0
  131. smftools/tools/utils/device.py +10 -0
  132. smftools/tools/utils/grl.py +14 -0
  133. {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/METADATA +5 -2
  134. smftools-0.1.7.dist-info/RECORD +136 -0
  135. smftools-0.1.6.dist-info/RECORD +0 -4
  136. {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/WHEEL +0 -0
  137. {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,260 @@
1
+ ## fasta_module
2
+ from .. import readwrite
3
+ # bioinformatic operations
4
+ from Bio import SeqIO
5
+ from Bio.SeqRecord import SeqRecord
6
+ from Bio.Seq import Seq
7
+ import pysam
8
+
9
+ ######################################################################################################
10
+ ## FASTA functionality
11
+ # General
12
+
13
+ # Conversion specific
14
+ def modify_sequence_and_id(record, modification_type, strand):
15
+ """
16
+ Input: Takes a FASTA record, modification type, and strand as input
17
+ Output: Returns a new seqrecord object with the conversions of interest
18
+ """
19
+ if modification_type == '5mC':
20
+ if strand == 'top':
21
+ # Replace every 'C' with 'T' in the sequence
22
+ new_seq = record.seq.upper().replace('C', 'T')
23
+ elif strand == 'bottom':
24
+ # Replace every 'G' with 'A' in the sequence
25
+ new_seq = record.seq.upper().replace('G', 'A')
26
+ else:
27
+ print('need to provide a valid strand string: top or bottom')
28
+ elif modification_type == '6mA':
29
+ if strand == 'top':
30
+ # Replace every 'A' with 'G' in the sequence
31
+ new_seq = record.seq.upper().replace('A', 'G')
32
+ elif strand == 'bottom':
33
+ # Replace every 'T' with 'C' in the sequence
34
+ new_seq = record.seq.upper().replace('T', 'C')
35
+ else:
36
+ print('need to provide a valid strand string: top or bottom')
37
+ elif modification_type == 'unconverted':
38
+ new_seq = record.seq.upper()
39
+ else:
40
+ print('need to provide a valid modification_type string: 5mC, 6mA, or unconverted')
41
+ new_id = '{0}_{1}_{2}'.format(record.id, modification_type, strand)
42
+ # Return a new SeqRecord with modified sequence and ID
43
+ return record.__class__(new_seq, id=new_id, description=record.description)
44
+
45
+ def generate_converted_FASTA(input_fasta, modification_types, strands, output_fasta):
46
+ """
47
+ Input: Takes an input FASTA, modification types of interest, strands of interest, and an output FASTA name
48
+ Output: Writes out a new fasta with all stranded conversions
49
+ Notes: Uses modify_sequence_and_id function on every record within the FASTA
50
+ """
51
+ with open(output_fasta, 'w') as output_handle:
52
+ modified_records = []
53
+ # Iterate over each record in the input FASTA
54
+ for record in SeqIO.parse(input_fasta, 'fasta'):
55
+ # Iterate over each modification type of interest
56
+ for modification_type in modification_types:
57
+ # Iterate over the strands of interest
58
+ for i, strand in enumerate(strands):
59
+ if i > 0 and modification_type == 'unconverted': # This ensures that the unconverted only is added once and takes on the strand that is provided at the 0 index on strands.
60
+ pass
61
+ else:
62
+ # Add the modified record to the list of modified records
63
+ print(f'converting {modification_type} on the {strand} strand of record {record}')
64
+ modified_records.append(modify_sequence_and_id(record, modification_type, strand))
65
+ # write out the concatenated FASTA file of modified sequences
66
+ SeqIO.write(modified_records, output_handle, 'fasta')
67
+
68
+ def find_coordinates(fasta_file, modification_type):
69
+ """
70
+ A function to find genomic coordinates in every unconverted record contained within a FASTA file of every cytosine.
71
+ If searching for adenine conversions, it will find coordinates of all adenines.
72
+ Input: A FASTA file and the modification_types of interest
73
+ Returns:
74
+ A dictionary called record_dict, which is keyed by unconverted record ids contained within the FASTA. Points to a list containing: 1) sequence length of the record, 2) top strand coordinate list, 3) bottom strand coorinate list, 4) sequence string
75
+ """
76
+ print('{0}: Finding positions of interest in reference FASTA > {1}'.format(time_string(), fasta_file))
77
+ # Initialize lists to hold top and bottom strand positional coordinates of interest
78
+ top_strand_coordinates = []
79
+ bottom_strand_coordinates = []
80
+ record_dict = {}
81
+ print('{0}: Opening FASTA file {1}'.format(time_string(), fasta_file))
82
+ # Open the FASTA record as read only
83
+ with open(fasta_file, "r") as f:
84
+ # Iterate over records in the FASTA
85
+ for record in SeqIO.parse(f, "fasta"):
86
+ # Only iterate over the unconverted records for the reference
87
+ if 'unconverted' in record.id:
88
+ print('{0}: Iterating over record {1} in FASTA file {2}'.format(time_string(), record, fasta_file))
89
+ # Extract the sequence string of the record
90
+ sequence = str(record.seq).upper()
91
+ sequence_length = len(sequence)
92
+ if modification_type == '5mC':
93
+ # Iterate over the sequence string from the record
94
+ for i in range(0, len(sequence)):
95
+ if sequence[i] == 'C':
96
+ top_strand_coordinates.append(i) # 0-indexed coordinate
97
+ if sequence[i] == 'G':
98
+ bottom_strand_coordinates.append(i) # 0-indexed coordinate
99
+ print('{0}: Returning zero-indexed top and bottom strand FASTA coordinates for all cytosines'.format(time_string()))
100
+ elif modification_type == '6mA':
101
+ # Iterate over the sequence string from the record
102
+ for i in range(0, len(sequence)):
103
+ if sequence[i] == 'A':
104
+ top_strand_coordinates.append(i) # 0-indexed coordinate
105
+ if sequence[i] == 'T':
106
+ bottom_strand_coordinates.append(i) # 0-indexed coordinate
107
+ print('{0}: Returning zero-indexed top and bottom strand FASTA coordinates for adenines of interest'.format(time_string()))
108
+ else:
109
+ print('modification_type not found. Please try 5mC or 6mA')
110
+ record_dict[record.id] = [sequence_length, top_strand_coordinates, bottom_strand_coordinates, sequence]
111
+ else:
112
+ pass
113
+ return record_dict
114
+
115
+ # Direct methylation specific
116
+ def get_references(fasta_file):
117
+ """
118
+ Input: A FASTA file
119
+ Returns:
120
+ A dictionary called record_dict, which is keyed by record ids contained within the FASTA. Points to a list containing: 1) sequence length of the record, 2) sequence of the record
121
+ """
122
+ record_dict = {}
123
+ print('{0}: Opening FASTA file {1}'.format(time_string(), fasta_file))
124
+ # Open the FASTA record as read only
125
+ with open(fasta_file, "r") as f:
126
+ # Iterate over records in the FASTA
127
+ for record in SeqIO.parse(f, "fasta"):
128
+ # Extract the sequence string of the record
129
+ sequence = str(record.seq).upper()
130
+ sequence_length = len(sequence)
131
+ record_dict[record.id] = [sequence_length, sequence]
132
+ return record_dict
133
+ ######################################################################################################
134
+
135
+ ######################################################################################################
136
+ ## BAM functionality
137
+ # General
138
+ def separate_bam_by_bc(input_bam, output_prefix):
139
+ """
140
+ Input: Takes a single BAM input. Also takes an output prefix to append to the output file.
141
+ Output: Splits the BAM based on the BC SAM tag value.
142
+ """
143
+ # Open the input BAM file for reading
144
+ with pysam.AlignmentFile(input_bam, "rb") as bam:
145
+ # Create a dictionary to store output BAM files
146
+ output_files = {}
147
+ # Iterate over each read in the BAM file
148
+ for read in bam:
149
+ try:
150
+ # Get the barcode tag value
151
+ bc_tag = read.get_tag("BC", with_value_type=True)[0].split('barcode')[1]
152
+ # Open the output BAM file corresponding to the barcode
153
+ if bc_tag not in output_files:
154
+ output_files[bc_tag] = pysam.AlignmentFile(f"{output_prefix}_{bc_tag}.bam", "wb", header=bam.header)
155
+ # Write the read to the corresponding output BAM file
156
+ output_files[bc_tag].write(read)
157
+ except KeyError:
158
+ print(f"BC tag not present for read: {read.query_name}")
159
+ # Close all output BAM files
160
+ for output_file in output_files.values():
161
+ output_file.close()
162
+
163
+ def count_aligned_reads(bam_file):
164
+ """
165
+ Input: A BAM alignment file.
166
+ Output: The number of aligned/unaligned reads in the BAM file. Also returns a dictionary, keyed by reference id that points to a tuple. The tuple contains an integer number of mapped reads to that reference, followed by the proportion of mapped reads that map to that reference
167
+ """
168
+ print('{0}: Counting aligned reads in BAM > {1}'.format(time_string(), bam_file))
169
+ aligned_reads_count = 0
170
+ unaligned_reads_count = 0
171
+ # Make a dictionary, keyed by the reference_name of reference chromosome that points to an integer number of read counts mapped to the chromosome, as well as the proportion of mapped reads in that chromosome
172
+ record_counts = {}
173
+ with pysam.AlignmentFile(bam_file, "rb") as bam:
174
+ # Iterate over reads to get the total mapped read counts and the reads that map to each reference
175
+ for read in bam:
176
+ if read.is_unmapped:
177
+ unaligned_reads_count += 1
178
+ else:
179
+ aligned_reads_count += 1
180
+ if read.reference_name in record_counts:
181
+ record_counts[read.reference_name] += 1
182
+ else:
183
+ record_counts[read.reference_name] = 1
184
+ # reformat the dictionary to contain read counts mapped to the reference, as well as the proportion of mapped reads in reference
185
+ for reference in record_counts:
186
+ proportion_mapped_reads_in_record = record_counts[reference] / aligned_reads_count
187
+ record_counts[reference] = (record_counts[reference], proportion_mapped_reads_in_record)
188
+ return aligned_reads_count, unaligned_reads_count, record_counts
189
+
190
+ def extract_base_identity_at_coordinates(bam_file, chromosome, positions, max_reference_length):
191
+ """
192
+ Input: A position sorted BAM file, chromosome number, position coordinate set, and reference length to extract the base identitity from the read.
193
+ Output: A dictionary, keyed by read name, that points to a list of Base identities from each read.
194
+ If the read does not contain that position, fill the list at that index with a N value.
195
+ """
196
+ positions = set(positions)
197
+ # Initialize a base identity dictionary that will hold key-value pairs that are: key (read-name) and value (list of base identities at positions of interest)
198
+ base_identities = {}
199
+ # Open the postion sorted BAM file
200
+ print('{0}: Reading BAM file: {1}'.format(time_string(), bam_file))
201
+ with pysam.AlignmentFile(bam_file, "rb") as bam:
202
+ # Iterate over every read in the bam that comes from the chromosome of interest
203
+ print('{0}: Iterating over reads in bam'.format(time_string()))
204
+ for read in bam.fetch(chromosome):
205
+ if read.query_name in base_identities:
206
+ pass
207
+ #print('Duplicate read found in BAM for read {}. Skipping duplicate'.format(read.query_name))
208
+ else:
209
+ # Initialize the read key in the base_identities dictionary by pointing to a N filled list of length reference_length
210
+ base_identities[read.query_name] = ['N'] * max_reference_length
211
+ # Iterate over a list of tuples for the given read. The tuples contain the 0-indexed position relative to the read start, as well the 0-based index relative to the reference.
212
+ for read_position, reference_position in read.get_aligned_pairs():
213
+ # If the aligned read's reference coordinate is in the positions set and if the read position was successfully mapped
214
+ if reference_position in positions and read_position:
215
+ # get the base_identity in the read corresponding to that position
216
+ base_identity = read.query_sequence[read_position]
217
+ # Add the base identity to array
218
+ base_identities[read.query_name][reference_position] = base_identity
219
+ return base_identities
220
+
221
+ # Conversion SMF specific
222
+ def binarize_converted_base_identities(base_identities, strand, modification_type):
223
+ """
224
+ Input: The base identities dictionary returned by extract_base_identity_at_coordinates.
225
+ Output: A binarized format of the dictionary, where 1 represents a methylated site. 0 represents an unmethylated site. NaN represents a site that does not carry SMF information.
226
+ """
227
+ binarized_base_identities = {}
228
+ # Iterate over base identity keys to binarize the base identities
229
+ for key in base_identities.keys():
230
+ if strand == 'top':
231
+ if modification_type == '5mC':
232
+ binarized_base_identities[key] = [1 if x == 'C' else 0 if x == 'T' else np.nan for x in base_identities[key]]
233
+ elif modification_type == '6mA':
234
+ binarized_base_identities[key] = [1 if x == 'A' else 0 if x == 'G' else np.nan for x in base_identities[key]]
235
+ elif strand == 'bottom':
236
+ if modification_type == '5mC':
237
+ binarized_base_identities[key] = [1 if x == 'G' else 0 if x == 'A' else np.nan for x in base_identities[key]]
238
+ elif modification_type == '6mA':
239
+ binarized_base_identities[key] = [1 if x == 'T' else 0 if x == 'C' else np.nan for x in base_identities[key]]
240
+ else:
241
+ pass
242
+ return binarized_base_identities
243
+
244
+ # Direct methylation specific
245
+
246
+ ######################################################################################################
247
+
248
+ ######################################################################################################
249
+ # String encodings
250
+ def one_hot_encode(sequence):
251
+ """
252
+ Input: A sequence string of a read.
253
+ Output: One hot encoding of the sequence string.
254
+ """
255
+ mapping = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'N': 4}
256
+ one_hot_matrix = np.zeros((len(sequence), 5), dtype=int)
257
+ for i, nucleotide in enumerate(sequence):
258
+ one_hot_matrix[i, mapping[nucleotide]] = 1
259
+ return one_hot_matrix
260
+ ######################################################################################################