smftools 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. smftools/__init__.py +29 -0
  2. smftools/_settings.py +20 -0
  3. smftools/_version.py +1 -0
  4. smftools/datasets/F1_hybrid_NKG2A_enhander_promoter_GpC_conversion_SMF.h5ad.gz +0 -0
  5. smftools/datasets/F1_sample_sheet.csv +5 -0
  6. smftools/datasets/__init__.py +9 -0
  7. smftools/datasets/dCas9_m6A_invitro_kinetics.h5ad.gz +0 -0
  8. smftools/datasets/datasets.py +28 -0
  9. smftools/informatics/__init__.py +16 -0
  10. smftools/informatics/archived/bam_conversion.py +59 -0
  11. smftools/informatics/archived/bam_direct.py +63 -0
  12. smftools/informatics/archived/basecalls_to_adata.py +71 -0
  13. smftools/informatics/archived/print_bam_query_seq.py +29 -0
  14. smftools/informatics/basecall_pod5s.py +80 -0
  15. smftools/informatics/conversion_smf.py +132 -0
  16. smftools/informatics/direct_smf.py +137 -0
  17. smftools/informatics/fast5_to_pod5.py +21 -0
  18. smftools/informatics/helpers/LoadExperimentConfig.py +75 -0
  19. smftools/informatics/helpers/__init__.py +74 -0
  20. smftools/informatics/helpers/align_and_sort_BAM.py +59 -0
  21. smftools/informatics/helpers/aligned_BAM_to_bed.py +74 -0
  22. smftools/informatics/helpers/archived/informatics.py +260 -0
  23. smftools/informatics/helpers/archived/load_adata.py +516 -0
  24. smftools/informatics/helpers/bam_qc.py +66 -0
  25. smftools/informatics/helpers/bed_to_bigwig.py +39 -0
  26. smftools/informatics/helpers/binarize_converted_base_identities.py +79 -0
  27. smftools/informatics/helpers/canoncall.py +34 -0
  28. smftools/informatics/helpers/complement_base_list.py +21 -0
  29. smftools/informatics/helpers/concatenate_fastqs_to_bam.py +55 -0
  30. smftools/informatics/helpers/converted_BAM_to_adata.py +245 -0
  31. smftools/informatics/helpers/converted_BAM_to_adata_II.py +369 -0
  32. smftools/informatics/helpers/count_aligned_reads.py +43 -0
  33. smftools/informatics/helpers/demux_and_index_BAM.py +52 -0
  34. smftools/informatics/helpers/extract_base_identities.py +44 -0
  35. smftools/informatics/helpers/extract_mods.py +83 -0
  36. smftools/informatics/helpers/extract_read_features_from_bam.py +31 -0
  37. smftools/informatics/helpers/extract_read_lengths_from_bed.py +25 -0
  38. smftools/informatics/helpers/extract_readnames_from_BAM.py +22 -0
  39. smftools/informatics/helpers/find_conversion_sites.py +50 -0
  40. smftools/informatics/helpers/generate_converted_FASTA.py +99 -0
  41. smftools/informatics/helpers/get_chromosome_lengths.py +32 -0
  42. smftools/informatics/helpers/get_native_references.py +28 -0
  43. smftools/informatics/helpers/index_fasta.py +12 -0
  44. smftools/informatics/helpers/make_dirs.py +21 -0
  45. smftools/informatics/helpers/make_modbed.py +27 -0
  46. smftools/informatics/helpers/modQC.py +27 -0
  47. smftools/informatics/helpers/modcall.py +36 -0
  48. smftools/informatics/helpers/modkit_extract_to_adata.py +884 -0
  49. smftools/informatics/helpers/ohe_batching.py +76 -0
  50. smftools/informatics/helpers/ohe_layers_decode.py +32 -0
  51. smftools/informatics/helpers/one_hot_decode.py +27 -0
  52. smftools/informatics/helpers/one_hot_encode.py +57 -0
  53. smftools/informatics/helpers/plot_read_length_and_coverage_histograms.py +53 -0
  54. smftools/informatics/helpers/run_multiqc.py +28 -0
  55. smftools/informatics/helpers/separate_bam_by_bc.py +43 -0
  56. smftools/informatics/helpers/split_and_index_BAM.py +36 -0
  57. smftools/informatics/load_adata.py +182 -0
  58. smftools/informatics/readwrite.py +106 -0
  59. smftools/informatics/subsample_fasta_from_bed.py +47 -0
  60. smftools/informatics/subsample_pod5.py +104 -0
  61. smftools/plotting/__init__.py +15 -0
  62. smftools/plotting/classifiers.py +355 -0
  63. smftools/plotting/general_plotting.py +205 -0
  64. smftools/plotting/position_stats.py +462 -0
  65. smftools/preprocessing/__init__.py +33 -0
  66. smftools/preprocessing/append_C_context.py +82 -0
  67. smftools/preprocessing/archives/mark_duplicates.py +146 -0
  68. smftools/preprocessing/archives/preprocessing.py +614 -0
  69. smftools/preprocessing/archives/remove_duplicates.py +21 -0
  70. smftools/preprocessing/binarize_on_Youden.py +45 -0
  71. smftools/preprocessing/binary_layers_to_ohe.py +40 -0
  72. smftools/preprocessing/calculate_complexity.py +72 -0
  73. smftools/preprocessing/calculate_consensus.py +47 -0
  74. smftools/preprocessing/calculate_converted_read_methylation_stats.py +94 -0
  75. smftools/preprocessing/calculate_coverage.py +42 -0
  76. smftools/preprocessing/calculate_pairwise_differences.py +49 -0
  77. smftools/preprocessing/calculate_pairwise_hamming_distances.py +27 -0
  78. smftools/preprocessing/calculate_position_Youden.py +115 -0
  79. smftools/preprocessing/calculate_read_length_stats.py +79 -0
  80. smftools/preprocessing/clean_NaN.py +46 -0
  81. smftools/preprocessing/filter_adata_by_nan_proportion.py +31 -0
  82. smftools/preprocessing/filter_converted_reads_on_methylation.py +44 -0
  83. smftools/preprocessing/filter_reads_on_length.py +51 -0
  84. smftools/preprocessing/flag_duplicate_reads.py +149 -0
  85. smftools/preprocessing/invert_adata.py +30 -0
  86. smftools/preprocessing/load_sample_sheet.py +38 -0
  87. smftools/preprocessing/make_dirs.py +21 -0
  88. smftools/preprocessing/min_non_diagonal.py +25 -0
  89. smftools/preprocessing/recipes.py +127 -0
  90. smftools/preprocessing/subsample_adata.py +58 -0
  91. smftools/readwrite.py +198 -0
  92. smftools/tools/__init__.py +49 -0
  93. smftools/tools/apply_hmm.py +202 -0
  94. smftools/tools/apply_hmm_batched.py +241 -0
  95. smftools/tools/archived/classify_methylated_features.py +66 -0
  96. smftools/tools/archived/classify_non_methylated_features.py +75 -0
  97. smftools/tools/archived/subset_adata_v1.py +32 -0
  98. smftools/tools/archived/subset_adata_v2.py +46 -0
  99. smftools/tools/calculate_distances.py +18 -0
  100. smftools/tools/calculate_umap.py +62 -0
  101. smftools/tools/call_hmm_peaks.py +105 -0
  102. smftools/tools/classifiers.py +787 -0
  103. smftools/tools/cluster_adata_on_methylation.py +105 -0
  104. smftools/tools/data/__init__.py +2 -0
  105. smftools/tools/data/anndata_data_module.py +90 -0
  106. smftools/tools/data/preprocessing.py +6 -0
  107. smftools/tools/display_hmm.py +18 -0
  108. smftools/tools/evaluation/__init__.py +0 -0
  109. smftools/tools/general_tools.py +69 -0
  110. smftools/tools/hmm_readwrite.py +16 -0
  111. smftools/tools/inference/__init__.py +1 -0
  112. smftools/tools/inference/lightning_inference.py +41 -0
  113. smftools/tools/models/__init__.py +9 -0
  114. smftools/tools/models/base.py +14 -0
  115. smftools/tools/models/cnn.py +34 -0
  116. smftools/tools/models/lightning_base.py +41 -0
  117. smftools/tools/models/mlp.py +17 -0
  118. smftools/tools/models/positional.py +17 -0
  119. smftools/tools/models/rnn.py +16 -0
  120. smftools/tools/models/sklearn_models.py +40 -0
  121. smftools/tools/models/transformer.py +133 -0
  122. smftools/tools/models/wrappers.py +20 -0
  123. smftools/tools/nucleosome_hmm_refinement.py +104 -0
  124. smftools/tools/position_stats.py +239 -0
  125. smftools/tools/read_stats.py +70 -0
  126. smftools/tools/subset_adata.py +28 -0
  127. smftools/tools/train_hmm.py +78 -0
  128. smftools/tools/training/__init__.py +1 -0
  129. smftools/tools/training/train_lightning_model.py +47 -0
  130. smftools/tools/utils/__init__.py +2 -0
  131. smftools/tools/utils/device.py +10 -0
  132. smftools/tools/utils/grl.py +14 -0
  133. {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/METADATA +5 -2
  134. smftools-0.1.7.dist-info/RECORD +136 -0
  135. smftools-0.1.6.dist-info/RECORD +0 -4
  136. {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/WHEEL +0 -0
  137. {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,50 @@
1
+ def find_conversion_sites(fasta_file, modification_type, conversion_types):
2
+ """
3
+ Finds genomic coordinates of modified bases (5mC or 6mA) in a reference FASTA file.
4
+
5
+ Parameters:
6
+ fasta_file (str): Path to the converted reference FASTA.
7
+ modification_type (str): Modification type ('5mC' or '6mA') or 'unconverted'.
8
+ conversion_types (list): List of conversion types. The first element is the unconverted record type.
9
+
10
+ Returns:
11
+ dict: Dictionary where keys are **both unconverted & converted record names**.
12
+ Values contain:
13
+ [sequence length, top strand coordinates, bottom strand coordinates, sequence, complement sequence].
14
+ """
15
+ import numpy as np
16
+ from Bio import SeqIO
17
+ unconverted = conversion_types[0]
18
+ record_dict = {}
19
+
20
+ # Define base mapping based on modification type
21
+ base_mappings = {
22
+ '5mC': ('C', 'G'), # Cytosine and Guanine
23
+ '6mA': ('A', 'T') # Adenine and Thymine
24
+ }
25
+
26
+ # Read FASTA file and process records
27
+ with open(fasta_file, "r") as f:
28
+ for record in SeqIO.parse(f, "fasta"):
29
+ if unconverted in record.id:
30
+ sequence = str(record.seq).upper()
31
+ complement = str(record.seq.complement()).upper()
32
+ sequence_length = len(sequence)
33
+
34
+ # Unconverted case: store the full sequence without coordinate filtering
35
+ if modification_type == unconverted:
36
+ record_dict[record.id] = [sequence_length, [], [], sequence, complement]
37
+
38
+ # Process converted records: extract modified base positions
39
+ elif modification_type in base_mappings:
40
+ top_base, bottom_base = base_mappings[modification_type]
41
+ seq_array = np.array(list(sequence))
42
+ top_strand_coordinates = np.where(seq_array == top_base)[0].tolist()
43
+ bottom_strand_coordinates = np.where(seq_array == bottom_base)[0].tolist()
44
+
45
+ record_dict[record.id] = [sequence_length, top_strand_coordinates, bottom_strand_coordinates, sequence, complement]
46
+
47
+ else:
48
+ raise ValueError(f"Invalid modification_type: {modification_type}. Choose '5mC', '6mA', or 'unconverted'.")
49
+
50
+ return record_dict
@@ -0,0 +1,99 @@
1
+ import numpy as np
2
+ import gzip
3
+ import os
4
+ from Bio import SeqIO
5
+ from Bio.SeqRecord import SeqRecord
6
+ from Bio.Seq import Seq
7
+ from concurrent.futures import ProcessPoolExecutor
8
+ from itertools import chain
9
+
10
+ def convert_FASTA_record(record, modification_type, strand, unconverted):
11
+ """ Converts a FASTA record based on modification type and strand. """
12
+ conversion_maps = {
13
+ ('5mC', 'top'): ('C', 'T'),
14
+ ('5mC', 'bottom'): ('G', 'A'),
15
+ ('6mA', 'top'): ('A', 'G'),
16
+ ('6mA', 'bottom'): ('T', 'C')
17
+ }
18
+
19
+ sequence = str(record.seq).upper()
20
+
21
+ if modification_type == unconverted:
22
+ return SeqRecord(Seq(sequence), id=f"{record.id}_{modification_type}_top", description=record.description)
23
+
24
+ if (modification_type, strand) not in conversion_maps:
25
+ raise ValueError(f"Invalid combination: {modification_type}, {strand}")
26
+
27
+ original_base, converted_base = conversion_maps[(modification_type, strand)]
28
+ new_seq = sequence.replace(original_base, converted_base)
29
+
30
+ return SeqRecord(Seq(new_seq), id=f"{record.id}_{modification_type}_{strand}", description=record.description)
31
+
32
+
33
+ def process_fasta_record(args):
34
+ """
35
+ Processes a single FASTA record for parallel execution.
36
+ Args:
37
+ args (tuple): (record, modification_types, strands, unconverted)
38
+ Returns:
39
+ list of modified SeqRecord objects.
40
+ """
41
+ record, modification_types, strands, unconverted = args
42
+ modified_records = []
43
+
44
+ for modification_type in modification_types:
45
+ for i, strand in enumerate(strands):
46
+ if i > 0 and modification_type == unconverted:
47
+ continue # Ensure unconverted is added only once
48
+
49
+ modified_records.append(convert_FASTA_record(record, modification_type, strand, unconverted))
50
+
51
+ return modified_records
52
+
53
+
54
+ def generate_converted_FASTA(input_fasta, modification_types, strands, output_fasta, num_threads=4, chunk_size=500):
55
+ """
56
+ Converts an input FASTA file and writes a new converted FASTA file efficiently.
57
+
58
+ Parameters:
59
+ input_fasta (str): Path to the unconverted FASTA file.
60
+ modification_types (list): List of modification types ('5mC', '6mA', or unconverted).
61
+ strands (list): List of strands ('top', 'bottom').
62
+ output_fasta (str): Path to the converted FASTA output file.
63
+ num_threads (int): Number of parallel threads to use.
64
+ chunk_size (int): Number of records to process per write batch.
65
+
66
+ Returns:
67
+ None (Writes the converted FASTA file).
68
+ """
69
+ unconverted = modification_types[0]
70
+
71
+ # Detect if input is gzipped
72
+ open_func = gzip.open if input_fasta.endswith('.gz') else open
73
+ file_mode = 'rt' if input_fasta.endswith('.gz') else 'r'
74
+
75
+ def fasta_record_generator():
76
+ """ Lazily yields FASTA records from file. """
77
+ with open_func(input_fasta, file_mode) as handle:
78
+ for record in SeqIO.parse(handle, 'fasta'):
79
+ yield record
80
+
81
+ with open(output_fasta, 'w') as output_handle, ProcessPoolExecutor(max_workers=num_threads) as executor:
82
+ # Process records in parallel using a named function (avoiding lambda)
83
+ results = executor.map(
84
+ process_fasta_record,
85
+ ((record, modification_types, strands, unconverted) for record in fasta_record_generator())
86
+ )
87
+
88
+ buffer = []
89
+ for modified_records in results:
90
+ buffer.extend(modified_records)
91
+
92
+ # Write out in chunks to save memory
93
+ if len(buffer) >= chunk_size:
94
+ SeqIO.write(buffer, output_handle, 'fasta')
95
+ buffer.clear()
96
+
97
+ # Write any remaining records
98
+ if buffer:
99
+ SeqIO.write(buffer, output_handle, 'fasta')
@@ -0,0 +1,32 @@
1
+ # get_chromosome_lengths
2
+
3
+ def get_chromosome_lengths(fasta):
4
+ """
5
+ Generates a file containing chromosome lengths within an input FASTA.
6
+
7
+ Parameters:
8
+ fasta (str): Path to the input fasta
9
+ """
10
+ import os
11
+ import subprocess
12
+ from .index_fasta import index_fasta
13
+
14
+ # Make a fasta index file if one isn't already available
15
+ index_path = f'{fasta}.fai'
16
+ if os.path.exists(index_path):
17
+ print(f'Using existing fasta index file: {index_path}')
18
+ else:
19
+ index_fasta(fasta)
20
+
21
+ parent_dir = os.path.dirname(fasta)
22
+ fasta_basename = os.path.basename(fasta)
23
+ chrom_basename = fasta_basename.split('.fa')[0] + '.chrom.sizes'
24
+ chrom_path = os.path.join(parent_dir, chrom_basename)
25
+
26
+ # Make a chromosome length file
27
+ if os.path.exists(chrom_path):
28
+ print(f'Using existing chrom length index file: {chrom_path}')
29
+ else:
30
+ with open(chrom_path, 'w') as outfile:
31
+ command = ["cut", "-f1,2", index_path]
32
+ subprocess.run(command, stdout=outfile)
@@ -0,0 +1,28 @@
1
+ ## get_native_references
2
+
3
+ # Direct methylation specific
4
+ def get_native_references(fasta_file):
5
+ """
6
+ Makes a dictionary keyed by record id which points to the record length and record sequence.
7
+
8
+ Paramaters:
9
+ fasta_file (str): A string representing the path to the FASTA file for the experiment.
10
+
11
+ Returns:
12
+ None
13
+ """
14
+ from .. import readwrite
15
+ from Bio import SeqIO
16
+ from Bio.SeqRecord import SeqRecord
17
+ from Bio.Seq import Seq
18
+ record_dict = {}
19
+ print('{0}: Opening FASTA file {1}'.format(readwrite.time_string(), fasta_file))
20
+ # Open the FASTA record as read only
21
+ with open(fasta_file, "r") as f:
22
+ # Iterate over records in the FASTA
23
+ for record in SeqIO.parse(f, "fasta"):
24
+ # Extract the sequence string of the record
25
+ sequence = str(record.seq).upper()
26
+ sequence_length = len(sequence)
27
+ record_dict[record.id] = [sequence_length, sequence]
28
+ return record_dict
@@ -0,0 +1,12 @@
1
+ # index_fasta
2
+
3
+ def index_fasta(fasta):
4
+ """
5
+ Generate a FASTA index file for an input fasta.
6
+
7
+ Parameters:
8
+ fasta (str): Path to the input fasta to make an index file for.
9
+ """
10
+ import subprocess
11
+
12
+ subprocess.run(["samtools", "faidx", fasta])
@@ -0,0 +1,21 @@
1
+ ## make_dirs
2
+
3
+ # General
4
+ def make_dirs(directories):
5
+ """
6
+ Takes a list of file paths and makes new directories if the directory does not already exist.
7
+
8
+ Parameters:
9
+ directories (list): A list of directories to make
10
+
11
+ Returns:
12
+ None
13
+ """
14
+ import os
15
+
16
+ for directory in directories:
17
+ if not os.path.isdir(directory):
18
+ os.mkdir(directory)
19
+ print(f"Directory '{directory}' created successfully.")
20
+ else:
21
+ print(f"Directory '{directory}' already exists.")
@@ -0,0 +1,27 @@
1
+ ## make_modbed
2
+
3
+ # Direct SMF
4
+ def make_modbed(aligned_sorted_output, thresholds, mod_bed_dir):
5
+ """
6
+ Generating position methylation summaries for each barcoded sample starting from the overall BAM file that was direct output of dorado aligner.
7
+ Parameters:
8
+ aligned_sorted_output (str): A string representing the file path to the aligned_sorted non-split BAM file.
9
+
10
+ Returns:
11
+ None
12
+ """
13
+ import os
14
+ import subprocess
15
+
16
+ os.chdir(mod_bed_dir)
17
+ filter_threshold, m6A_threshold, m5C_threshold, hm5C_threshold = thresholds
18
+ command = [
19
+ "modkit", "pileup", aligned_sorted_output, mod_bed_dir,
20
+ "--partition-tag", "BC",
21
+ "--only-tabs",
22
+ "--filter-threshold", f'{filter_threshold}',
23
+ "--mod-thresholds", f"m:{m5C_threshold}",
24
+ "--mod-thresholds", f"a:{m6A_threshold}",
25
+ "--mod-thresholds", f"h:{hm5C_threshold}"
26
+ ]
27
+ subprocess.run(command)
@@ -0,0 +1,27 @@
1
+ ## modQC
2
+
3
+ # Direct SMF
4
+ def modQC(aligned_sorted_output, thresholds):
5
+ """
6
+ Output the percentile of bases falling at a call threshold (threshold is a probability between 0-1) for the overall BAM file.
7
+ It is generally good to look at these parameters on positive and negative controls.
8
+
9
+ Parameters:
10
+ aligned_sorted_output (str): A string representing the file path of the aligned_sorted non-split BAM file output by the dorado aligned.
11
+ thresholds (list): A list of floats to pass for call thresholds.
12
+
13
+ Returns:
14
+ None
15
+ """
16
+ import subprocess
17
+
18
+ filter_threshold, m6A_threshold, m5C_threshold, hm5C_threshold = thresholds
19
+ subprocess.run(["modkit", "sample-probs", aligned_sorted_output])
20
+ command = [
21
+ "modkit", "summary", aligned_sorted_output,
22
+ "--filter-threshold", f"{filter_threshold}",
23
+ "--mod-thresholds", f"m:{m5C_threshold}",
24
+ "--mod-thresholds", f"a:{m6A_threshold}",
25
+ "--mod-thresholds", f"h:{hm5C_threshold}"
26
+ ]
27
+ subprocess.run(command)
@@ -0,0 +1,36 @@
1
+ ## modcall
2
+
3
+ # Direct methylation specific
4
+ def modcall(model_dir, model, pod5_dir, barcode_kit, mod_list, bam, bam_suffix, barcode_both_ends=True, trim=False, device='auto'):
5
+ """
6
+ Wrapper function for dorado modified base calling.
7
+
8
+ Parameters:
9
+ model_dir (str): a string representing the file path to the dorado basecalling model directory.
10
+ model (str): a string representing the the dorado basecalling model.
11
+ pod5_dir (str): a string representing the file path to the experiment directory containing the POD5 files.
12
+ barcode_kit (str): A string representing the barcoding kit used in the experiment.
13
+ mod_list (list): A list of modification types to use in the analysis.
14
+ bam (str): File path to the BAM file to output.
15
+ bam_suffix (str): The suffix to use for the BAM file.
16
+ barcode_both_ends (bool): Whether to require a barcode detection on both ends for demultiplexing.
17
+ trim (bool): Whether to trim barcodes, adapters, and primers from read ends
18
+ device (str): Device to use for basecalling. auto, metal, cpu, cuda.
19
+
20
+ Returns:
21
+ None
22
+ Outputs a BAM file holding the modified base calls output by the dorado basecaller.
23
+ """
24
+ import subprocess
25
+ output = bam + bam_suffix
26
+ command = ["dorado", "basecaller", "--models-directory", model_dir, "--kit-name", barcode_kit, "--modified-bases"]
27
+ command += mod_list
28
+ command += ["--device", device, "--batchsize", "0"]
29
+ if barcode_both_ends:
30
+ command.append("--barcode-both-ends")
31
+ if not trim:
32
+ command.append("--no-trim")
33
+ command += [model, pod5_dir]
34
+ print(f'Running: {" ".join(command)}')
35
+ with open(output, "w") as outfile:
36
+ subprocess.run(command, stdout=outfile)