smftools 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. smftools/__init__.py +29 -0
  2. smftools/_settings.py +20 -0
  3. smftools/_version.py +1 -0
  4. smftools/datasets/F1_hybrid_NKG2A_enhander_promoter_GpC_conversion_SMF.h5ad.gz +0 -0
  5. smftools/datasets/F1_sample_sheet.csv +5 -0
  6. smftools/datasets/__init__.py +9 -0
  7. smftools/datasets/dCas9_m6A_invitro_kinetics.h5ad.gz +0 -0
  8. smftools/datasets/datasets.py +28 -0
  9. smftools/informatics/__init__.py +16 -0
  10. smftools/informatics/archived/bam_conversion.py +59 -0
  11. smftools/informatics/archived/bam_direct.py +63 -0
  12. smftools/informatics/archived/basecalls_to_adata.py +71 -0
  13. smftools/informatics/archived/print_bam_query_seq.py +29 -0
  14. smftools/informatics/basecall_pod5s.py +80 -0
  15. smftools/informatics/conversion_smf.py +132 -0
  16. smftools/informatics/direct_smf.py +137 -0
  17. smftools/informatics/fast5_to_pod5.py +21 -0
  18. smftools/informatics/helpers/LoadExperimentConfig.py +75 -0
  19. smftools/informatics/helpers/__init__.py +74 -0
  20. smftools/informatics/helpers/align_and_sort_BAM.py +59 -0
  21. smftools/informatics/helpers/aligned_BAM_to_bed.py +74 -0
  22. smftools/informatics/helpers/archived/informatics.py +260 -0
  23. smftools/informatics/helpers/archived/load_adata.py +516 -0
  24. smftools/informatics/helpers/bam_qc.py +66 -0
  25. smftools/informatics/helpers/bed_to_bigwig.py +39 -0
  26. smftools/informatics/helpers/binarize_converted_base_identities.py +79 -0
  27. smftools/informatics/helpers/canoncall.py +34 -0
  28. smftools/informatics/helpers/complement_base_list.py +21 -0
  29. smftools/informatics/helpers/concatenate_fastqs_to_bam.py +55 -0
  30. smftools/informatics/helpers/converted_BAM_to_adata.py +245 -0
  31. smftools/informatics/helpers/converted_BAM_to_adata_II.py +369 -0
  32. smftools/informatics/helpers/count_aligned_reads.py +43 -0
  33. smftools/informatics/helpers/demux_and_index_BAM.py +52 -0
  34. smftools/informatics/helpers/extract_base_identities.py +44 -0
  35. smftools/informatics/helpers/extract_mods.py +83 -0
  36. smftools/informatics/helpers/extract_read_features_from_bam.py +31 -0
  37. smftools/informatics/helpers/extract_read_lengths_from_bed.py +25 -0
  38. smftools/informatics/helpers/extract_readnames_from_BAM.py +22 -0
  39. smftools/informatics/helpers/find_conversion_sites.py +50 -0
  40. smftools/informatics/helpers/generate_converted_FASTA.py +99 -0
  41. smftools/informatics/helpers/get_chromosome_lengths.py +32 -0
  42. smftools/informatics/helpers/get_native_references.py +28 -0
  43. smftools/informatics/helpers/index_fasta.py +12 -0
  44. smftools/informatics/helpers/make_dirs.py +21 -0
  45. smftools/informatics/helpers/make_modbed.py +27 -0
  46. smftools/informatics/helpers/modQC.py +27 -0
  47. smftools/informatics/helpers/modcall.py +36 -0
  48. smftools/informatics/helpers/modkit_extract_to_adata.py +884 -0
  49. smftools/informatics/helpers/ohe_batching.py +76 -0
  50. smftools/informatics/helpers/ohe_layers_decode.py +32 -0
  51. smftools/informatics/helpers/one_hot_decode.py +27 -0
  52. smftools/informatics/helpers/one_hot_encode.py +57 -0
  53. smftools/informatics/helpers/plot_read_length_and_coverage_histograms.py +53 -0
  54. smftools/informatics/helpers/run_multiqc.py +28 -0
  55. smftools/informatics/helpers/separate_bam_by_bc.py +43 -0
  56. smftools/informatics/helpers/split_and_index_BAM.py +36 -0
  57. smftools/informatics/load_adata.py +182 -0
  58. smftools/informatics/readwrite.py +106 -0
  59. smftools/informatics/subsample_fasta_from_bed.py +47 -0
  60. smftools/informatics/subsample_pod5.py +104 -0
  61. smftools/plotting/__init__.py +15 -0
  62. smftools/plotting/classifiers.py +355 -0
  63. smftools/plotting/general_plotting.py +205 -0
  64. smftools/plotting/position_stats.py +462 -0
  65. smftools/preprocessing/__init__.py +33 -0
  66. smftools/preprocessing/append_C_context.py +82 -0
  67. smftools/preprocessing/archives/mark_duplicates.py +146 -0
  68. smftools/preprocessing/archives/preprocessing.py +614 -0
  69. smftools/preprocessing/archives/remove_duplicates.py +21 -0
  70. smftools/preprocessing/binarize_on_Youden.py +45 -0
  71. smftools/preprocessing/binary_layers_to_ohe.py +40 -0
  72. smftools/preprocessing/calculate_complexity.py +72 -0
  73. smftools/preprocessing/calculate_consensus.py +47 -0
  74. smftools/preprocessing/calculate_converted_read_methylation_stats.py +94 -0
  75. smftools/preprocessing/calculate_coverage.py +42 -0
  76. smftools/preprocessing/calculate_pairwise_differences.py +49 -0
  77. smftools/preprocessing/calculate_pairwise_hamming_distances.py +27 -0
  78. smftools/preprocessing/calculate_position_Youden.py +115 -0
  79. smftools/preprocessing/calculate_read_length_stats.py +79 -0
  80. smftools/preprocessing/clean_NaN.py +46 -0
  81. smftools/preprocessing/filter_adata_by_nan_proportion.py +31 -0
  82. smftools/preprocessing/filter_converted_reads_on_methylation.py +44 -0
  83. smftools/preprocessing/filter_reads_on_length.py +51 -0
  84. smftools/preprocessing/flag_duplicate_reads.py +149 -0
  85. smftools/preprocessing/invert_adata.py +30 -0
  86. smftools/preprocessing/load_sample_sheet.py +38 -0
  87. smftools/preprocessing/make_dirs.py +21 -0
  88. smftools/preprocessing/min_non_diagonal.py +25 -0
  89. smftools/preprocessing/recipes.py +127 -0
  90. smftools/preprocessing/subsample_adata.py +58 -0
  91. smftools/readwrite.py +198 -0
  92. smftools/tools/__init__.py +49 -0
  93. smftools/tools/apply_hmm.py +202 -0
  94. smftools/tools/apply_hmm_batched.py +241 -0
  95. smftools/tools/archived/classify_methylated_features.py +66 -0
  96. smftools/tools/archived/classify_non_methylated_features.py +75 -0
  97. smftools/tools/archived/subset_adata_v1.py +32 -0
  98. smftools/tools/archived/subset_adata_v2.py +46 -0
  99. smftools/tools/calculate_distances.py +18 -0
  100. smftools/tools/calculate_umap.py +62 -0
  101. smftools/tools/call_hmm_peaks.py +105 -0
  102. smftools/tools/classifiers.py +787 -0
  103. smftools/tools/cluster_adata_on_methylation.py +105 -0
  104. smftools/tools/data/__init__.py +2 -0
  105. smftools/tools/data/anndata_data_module.py +90 -0
  106. smftools/tools/data/preprocessing.py +6 -0
  107. smftools/tools/display_hmm.py +18 -0
  108. smftools/tools/evaluation/__init__.py +0 -0
  109. smftools/tools/general_tools.py +69 -0
  110. smftools/tools/hmm_readwrite.py +16 -0
  111. smftools/tools/inference/__init__.py +1 -0
  112. smftools/tools/inference/lightning_inference.py +41 -0
  113. smftools/tools/models/__init__.py +9 -0
  114. smftools/tools/models/base.py +14 -0
  115. smftools/tools/models/cnn.py +34 -0
  116. smftools/tools/models/lightning_base.py +41 -0
  117. smftools/tools/models/mlp.py +17 -0
  118. smftools/tools/models/positional.py +17 -0
  119. smftools/tools/models/rnn.py +16 -0
  120. smftools/tools/models/sklearn_models.py +40 -0
  121. smftools/tools/models/transformer.py +133 -0
  122. smftools/tools/models/wrappers.py +20 -0
  123. smftools/tools/nucleosome_hmm_refinement.py +104 -0
  124. smftools/tools/position_stats.py +239 -0
  125. smftools/tools/read_stats.py +70 -0
  126. smftools/tools/subset_adata.py +28 -0
  127. smftools/tools/train_hmm.py +78 -0
  128. smftools/tools/training/__init__.py +1 -0
  129. smftools/tools/training/train_lightning_model.py +47 -0
  130. smftools/tools/utils/__init__.py +2 -0
  131. smftools/tools/utils/device.py +10 -0
  132. smftools/tools/utils/grl.py +14 -0
  133. {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/METADATA +5 -2
  134. smftools-0.1.7.dist-info/RECORD +136 -0
  135. smftools-0.1.6.dist-info/RECORD +0 -4
  136. {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/WHEEL +0 -0
  137. {smftools-0.1.6.dist-info → smftools-0.1.7.dist-info}/licenses/LICENSE +0 -0
smftools/__init__.py ADDED
@@ -0,0 +1,29 @@
1
+ """smftools"""
2
+
3
+ import logging
4
+ import warnings
5
+
6
+ from . import informatics as inform
7
+ from . import preprocessing as pp
8
+ from . import tools as tl
9
+ from . import plotting as pl
10
+ from . import readwrite, datasets
11
+ from .readwrite import adata_to_df, safe_write_h5ad, merge_barcoded_anndatas
12
+
13
+
14
+ from importlib.metadata import version
15
+
16
+ package_name = "smftools"
17
+ __version__ = version(package_name)
18
+
19
+ __all__ = [
20
+ "adata_to_df",
21
+ "inform",
22
+ "pp",
23
+ "tl",
24
+ "pl",
25
+ "readwrite",
26
+ "datasets",
27
+ "safe_write_h5ad",
28
+ "merge_barcoded_anndatas"
29
+ ]
smftools/_settings.py ADDED
@@ -0,0 +1,20 @@
1
+ from pathlib import Path
2
+ from typing import Union
3
+
4
+ class SMFConfig:
5
+ """\
6
+ Config for smftools.
7
+ """
8
+
9
+ def __init__(
10
+ self,
11
+ *,
12
+ datasetdir: Union[Path, str] = "./datasets/"
13
+ ):
14
+ self._datasetdir = Path(datasetdir) if isinstance(datasetdir, str) else datasetdir
15
+
16
+ @property
17
+ def datasetdir(self) -> Path:
18
+ return self._datasetdir
19
+
20
+ settings = SMFConfig()
smftools/_version.py ADDED
@@ -0,0 +1 @@
1
+ __version__ = "0.1.7"
@@ -0,0 +1,5 @@
1
+ Sample,Sample_names,MTase,Time (min),Notes
2
+ barcode0001_sorted,Neither,M.CviPI,7.5,Cultured in IL2
3
+ barcode0002_sorted,BALBC,M.CviPI,7.5,Cultured in IL2
4
+ barcode0003_sorted,B6,M.CviPI,7.5,Cultured in IL2
5
+ barcode0004_sorted,Both,M.CviPI,7.5,Cultured in IL2
@@ -0,0 +1,9 @@
1
+ from .datasets import (
2
+ dCas9_kinetics,
3
+ Kissiov_and_McKenna_2025
4
+ )
5
+
6
+ __all__ = [
7
+ "dCas9_kinetics",
8
+ "Kissiov_and_McKenna_2025"
9
+ ]
@@ -0,0 +1,28 @@
1
+ ## datasets
2
+
3
+ def import_HERE():
4
+ """
5
+ Imports HERE for loading datasets
6
+ """
7
+ from pathlib import Path
8
+ from .._settings import settings
9
+ HERE = Path(__file__).parent
10
+ return HERE
11
+
12
+ def dCas9_kinetics():
13
+ """
14
+ in vitro Hia5 dCas9 kinetics SMF dataset. Nanopore HAC m6A modcalls.
15
+ """
16
+ import anndata as ad
17
+ HERE = import_HERE()
18
+ filepath = HERE / "dCas9_m6A_invitro_kinetics.h5ad.gz"
19
+ return ad.read_h5ad(filepath)
20
+
21
+ def Kissiov_and_McKenna_2025():
22
+ """
23
+ F1 Hybrid M.CviPI natural killer cell SMF. Nanopore canonical calls of NEB EMseq converted SMF gDNA.
24
+ """
25
+ import anndata as ad
26
+ HERE = import_HERE()
27
+ filepath = HERE / "F1_hybrid_NKG2A_enhander_promoter_GpC_conversion_SMF.h5ad.gz"
28
+ return ad.read_h5ad(filepath)
@@ -0,0 +1,16 @@
1
+ from . import helpers
2
+ from .basecall_pod5s import basecall_pod5s
3
+ from .load_adata import load_adata
4
+ from .subsample_fasta_from_bed import subsample_fasta_from_bed
5
+ from .subsample_pod5 import subsample_pod5
6
+ from .fast5_to_pod5 import fast5_to_pod5
7
+
8
+
9
+ __all__ = [
10
+ "basecall_pod5s",
11
+ "load_adata",
12
+ "subsample_fasta_from_bed",
13
+ "subsample_pod5",
14
+ "fast5_to_pod5",
15
+ "helpers"
16
+ ]
@@ -0,0 +1,59 @@
1
+ ## bam_conversion
2
+
3
+ def bam_conversion(fasta, output_directory, conversion_types, strands, basecalled_path, split_dir, mapping_threshold, experiment_name, bam_suffix):
4
+ """
5
+ Converts a BAM file from a nanopore conversion SMF experiment to an adata object.
6
+
7
+ Parameters:
8
+ fasta (str): File path to the reference genome to align to.
9
+ output_directory (str): A file path to the directory to output all the analyses.
10
+ conversion_type (list): A list of strings of the conversion types to use in the analysis.
11
+ strands (list): A list of converstion strands to use in the experiment.
12
+ basecalled_path (str): a string representing the file path to the experiment BAM or FASTQ file.
13
+ split_dir (str): A string representing the file path to the directory to split the BAMs into.
14
+ mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
15
+ experiment_name (str): A string to provide an experiment name to the output adata file.
16
+ bam_suffix (str): A suffix to add to the bam file.
17
+
18
+ Returns:
19
+ None
20
+ """
21
+ from .helpers import align_and_sort_BAM, converted_BAM_to_adata, generate_converted_FASTA, split_and_index_BAM, make_dirs
22
+ import os
23
+ input_basecalled_basename = os.path.basename(basecalled_path)
24
+ bam_basename = input_basecalled_basename.split(".")[0]
25
+ output_bam=f"{output_directory}/{bam_basename}"
26
+ aligned_BAM=f"{output_bam}_aligned"
27
+ aligned_sorted_BAM=f"{aligned_BAM}_sorted"
28
+
29
+ os.chdir(output_directory)
30
+
31
+ # 1) Convert FASTA file
32
+ fasta_basename = os.path.basename(fasta)
33
+ converted_FASTA_basename = fasta_basename.split('.fa')[0]+'_converted.fasta'
34
+ converted_FASTA = os.path.join(output_directory, converted_FASTA_basename)
35
+ if 'converted.fa' in fasta:
36
+ print(fasta + ' is already converted. Using existing converted FASTA.')
37
+ converted_FASTA = fasta
38
+ elif os.path.exists(converted_FASTA):
39
+ print(converted_FASTA + ' already exists. Using existing converted FASTA.')
40
+ else:
41
+ generate_converted_FASTA(fasta, conversion_types, strands, converted_FASTA)
42
+
43
+ # 2) Align the basecalled file to the converted reference FASTA and sort the bam on positional coordinates. Also make an index and a bed file of mapped reads
44
+ aligned_output = aligned_BAM + bam_suffix
45
+ sorted_output = aligned_sorted_BAM + bam_suffix
46
+ if os.path.exists(aligned_output) and os.path.exists(sorted_output):
47
+ print(sorted_output + ' already exists. Using existing aligned/sorted BAM.')
48
+ else:
49
+ align_and_sort_BAM(converted_FASTA, basecalled_path, bam_suffix, output_directory)
50
+
51
+ ### 3) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory###
52
+ if os.path.isdir(split_dir):
53
+ print(split_dir + ' already exists. Using existing aligned/sorted/split BAMs.')
54
+ else:
55
+ make_dirs([split_dir])
56
+ split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory)
57
+
58
+ # 4) Take the converted BAM and load it into an adata object.
59
+ converted_BAM_to_adata(converted_FASTA, split_dir, mapping_threshold, experiment_name, conversion_types, bam_suffix)
@@ -0,0 +1,63 @@
1
+ ## bam_direct
2
+
3
+ def bam_direct(fasta, output_directory, mod_list, thresholds, bam_path, split_dir, mapping_threshold, experiment_name, bam_suffix, batch_size):
4
+ """
5
+ Converts a POD5 file from a nanopore native SMF experiment to an adata object.
6
+
7
+ Parameters:
8
+ fasta (str): File path to the reference genome to align to.
9
+ output_directory (str): A file path to the directory to output all the analyses.
10
+ mod_list (list): A list of strings of the modification types to use in the analysis.
11
+ thresholds (list): A list of floats to pass for call thresholds.
12
+ bam_path (str): a string representing the file path to the the BAM file.
13
+ split_dir (str): A string representing the file path to the directory to split the BAMs into.
14
+ mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
15
+ experiment_name (str): A string to provide an experiment name to the output adata file.
16
+ bam_suffix (str): A suffix to add to the bam file.
17
+ batch_size (int): An integer number of TSV files to analyze in memory at once while loading the final adata object.
18
+
19
+ Returns:
20
+ None
21
+ """
22
+ from .helpers import align_and_sort_BAM, extract_mods, make_modbed, modkit_extract_to_adata, modQC, split_and_index_BAM, make_dirs
23
+ import os
24
+ input_bam_base = os.path.basename(bam_path)
25
+ bam_basename = input_bam_base.split(bam_suffix)[0]
26
+ output_bam=f"{output_directory}/{bam_basename}"
27
+ aligned_BAM=f"{output_bam}_aligned"
28
+ aligned_sorted_BAM=f"{aligned_BAM}_sorted"
29
+ mod_bed_dir=f"{output_directory}/split_mod_beds"
30
+ mod_tsv_dir=f"{output_directory}/split_mod_tsvs"
31
+
32
+ aligned_output = aligned_BAM + bam_suffix
33
+ aligned_sorted_output = aligned_sorted_BAM + bam_suffix
34
+ mod_map = {'6mA': '6mA', '5mC_5hmC': '5mC'}
35
+ mods = [mod_map[mod] for mod in mod_list]
36
+
37
+ os.chdir(output_directory)
38
+
39
+ # 1) Align the BAM to the reference FASTA. Also make an index and a bed file of mapped reads
40
+ if os.path.exists(aligned_output) and os.path.exists(aligned_sorted_output):
41
+ print(aligned_sorted_output + ' already exists. Using existing aligned/sorted BAM.')
42
+ else:
43
+ align_and_sort_BAM(fasta, bam_path, bam_suffix, output_directory)
44
+ # 2) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory
45
+ if os.path.isdir(split_dir):
46
+ print(split_dir + ' already exists. Using existing aligned/sorted/split BAMs.')
47
+ else:
48
+ make_dirs([split_dir])
49
+ split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory)
50
+ # 3) Using nanopore modkit to work with modified BAM files ###
51
+ if os.path.isdir(mod_bed_dir):
52
+ print(mod_bed_dir + ' already exists')
53
+ else:
54
+ make_dirs([mod_bed_dir])
55
+ modQC(aligned_sorted_output, thresholds) # get QC metrics for mod calls
56
+ make_modbed(aligned_sorted_output, thresholds, mod_bed_dir) # Generate bed files of position methylation summaries for every sample
57
+ if os.path.isdir(mod_tsv_dir):
58
+ print(mod_tsv_dir + ' already exists')
59
+ else:
60
+ make_dirs([mod_tsv_dir])
61
+ extract_mods(thresholds, mod_tsv_dir, split_dir, bam_suffix) # Extract methylations calls for split BAM files into split TSV files
62
+ #4 Load the modification data from TSVs into an adata object
63
+ modkit_extract_to_adata(fasta, split_dir, mapping_threshold, experiment_name, mods, batch_size, mod_tsv_dir)
@@ -0,0 +1,71 @@
1
+ ## basecalls_to_adata
2
+
3
+ def basecalls_to_adata(config_path):
4
+ """
5
+ High-level function to call for loading basecalled SMF data from a BAM file into an adata object. Also works with FASTQ for conversion SMF.
6
+
7
+ Parameters:
8
+ config_path (str): A string representing the file path to the experiment configuration csv file.
9
+
10
+ Returns:
11
+ None
12
+ """
13
+ from .helpers import LoadExperimentConfig, make_dirs
14
+ from .subsample_fasta_from_bed import subsample_fasta_from_bed
15
+ import os
16
+ import numpy as np
17
+ bam_suffix = '.bam' # If different, change from here.
18
+ split_dir = 'split_BAMs' # If different, change from here.
19
+ strands = ['bottom', 'top'] # If different, change from here. Having both listed generally doesn't slow things down too much.
20
+ conversions = ['unconverted'] # The name to use for the unconverted files. If different, change from here.
21
+
22
+ # Load experiment config parameters into global variables
23
+ experiment_config = LoadExperimentConfig(config_path)
24
+ var_dict = experiment_config.var_dict
25
+
26
+ # These below variables will point to the value np.nan if they are either empty in the experiment_config.csv or if the variable is fully omitted from the csv.
27
+ default_value = None
28
+
29
+ conversion_types = var_dict.get('conversion_types', default_value)
30
+ output_directory = var_dict.get('output_directory', default_value)
31
+ smf_modality = var_dict.get('smf_modality', default_value)
32
+ fasta = var_dict.get('fasta', default_value)
33
+ fasta_regions_of_interest = var_dict.get("fasta_regions_of_interest", default_value)
34
+ basecalled_path = var_dict.get('basecalled_path', default_value)
35
+ mapping_threshold = var_dict.get('mapping_threshold', default_value)
36
+ experiment_name = var_dict.get('experiment_name', default_value)
37
+ filter_threshold = var_dict.get('filter_threshold', default_value)
38
+ m6A_threshold = var_dict.get('m6A_threshold', default_value)
39
+ m5C_threshold = var_dict.get('m5C_threshold', default_value)
40
+ hm5C_threshold = var_dict.get('hm5C_threshold', default_value)
41
+ mod_list = var_dict.get('mod_list', default_value)
42
+ batch_size = var_dict.get('batch_size', default_value)
43
+ thresholds = [filter_threshold, m6A_threshold, m5C_threshold, hm5C_threshold]
44
+
45
+ split_path = os.path.join(output_directory, split_dir)
46
+
47
+ make_dirs([output_directory])
48
+ os.chdir(output_directory)
49
+
50
+ conversions += conversion_types
51
+
52
+ # If a bed file is passed, subsample the input FASTA on regions of interest and use the subsampled FASTA.
53
+ if fasta_regions_of_interest != None:
54
+ if '.bed' in fasta_regions_of_interest:
55
+ fasta_basename = os.path.basename(fasta)
56
+ bed_basename_minus_suffix = os.path.basename(fasta_regions_of_interest).split('.bed')[0]
57
+ output_FASTA = bed_basename_minus_suffix + '_' + fasta_basename
58
+ subsample_fasta_from_bed(fasta, fasta_regions_of_interest, output_directory, output_FASTA)
59
+ fasta = output_FASTA
60
+
61
+ if smf_modality == 'conversion':
62
+ from .bam_conversion import bam_conversion
63
+ bam_conversion(fasta, output_directory, conversions, strands, basecalled_path, split_path, mapping_threshold, experiment_name, bam_suffix)
64
+ elif smf_modality == 'direct':
65
+ if bam_suffix in basecalled_path:
66
+ from .bam_direct import bam_direct
67
+ bam_direct(fasta, output_directory, mod_list, thresholds, basecalled_path, split_path, mapping_threshold, experiment_name, bam_suffix, batch_size)
68
+ else:
69
+ print('basecalls_to_adata function only work with the direct modality when the input filetype is BAM and not FASTQ.')
70
+ else:
71
+ print("Error")
@@ -0,0 +1,29 @@
1
+ import pysam
2
+ import sys
3
+
4
+ def extract_reads(bam_file_path, num_reads=10):
5
+ # Open the BAM file
6
+ bam_file = pysam.AlignmentFile(bam_file_path, "rb")
7
+
8
+ # Iterate through the first 'num_reads' reads and print the sequences
9
+ count = 0
10
+ for read in bam_file:
11
+ print(f"Read {count + 1}: {read.query_sequence}")
12
+ count += 1
13
+ if count >= num_reads:
14
+ break
15
+
16
+ # Close the BAM file
17
+ bam_file.close()
18
+
19
+ if __name__ == "__main__":
20
+ # Ensure a BAM file path is provided as a command line argument
21
+ if len(sys.argv) < 2:
22
+ print("Usage: python extract_reads.py <path_to_bam_file>")
23
+ sys.exit(1)
24
+
25
+ # Get the BAM file path from command line arguments
26
+ bam_file_path = sys.argv[1]
27
+
28
+ # Call the function to extract the first 10 reads
29
+ extract_reads(bam_file_path)
@@ -0,0 +1,80 @@
1
+ # basecall_pod5s
2
+
3
+ def basecall_pod5s(config_path):
4
+ """
5
+ Basecall from pod5s given a config file.
6
+
7
+ Parameters:
8
+ config_path (str): File path to the basecall configuration file
9
+
10
+ Returns:
11
+ None
12
+ """
13
+ # Lazy importing of packages
14
+ from .helpers import LoadExperimentConfig, make_dirs, canoncall, modcall
15
+ from .fast5_to_pod5 import fast5_to_pod5
16
+ import os
17
+ from pathlib import Path
18
+
19
+ # Default params
20
+ bam_suffix = '.bam' # If different, change from here.
21
+
22
+ # Load experiment config parameters into global variables
23
+ experiment_config = LoadExperimentConfig(config_path)
24
+ var_dict = experiment_config.var_dict
25
+
26
+ # These below variables will point to default_value if they are empty in the experiment_config.csv or if the variable is fully omitted from the csv.
27
+ default_value = None
28
+
29
+ # General config variable init
30
+ input_data_path = var_dict.get('input_data_path', default_value) # Path to a directory of POD5s/FAST5s or to a BAM/FASTQ file. Necessary.
31
+ output_directory = var_dict.get('output_directory', default_value) # Path to the output directory to make for the analysis. Necessary.
32
+ model = var_dict.get('model', default_value) # needed for dorado basecaller
33
+ barcode_kit = var_dict.get('barcode_kit', default_value) # needed for dorado basecaller
34
+ barcode_both_ends = var_dict.get('barcode_both_ends', default_value) # dorado demultiplexing
35
+ trim = var_dict.get('trim', default_value) # dorado adapter and barcode removal
36
+ device = var_dict.get('device', 'auto')
37
+
38
+ # Modified basecalling specific variable init
39
+ filter_threshold = var_dict.get('filter_threshold', default_value)
40
+ m6A_threshold = var_dict.get('m6A_threshold', default_value)
41
+ m5C_threshold = var_dict.get('m5C_threshold', default_value)
42
+ hm5C_threshold = var_dict.get('hm5C_threshold', default_value)
43
+ thresholds = [filter_threshold, m6A_threshold, m5C_threshold, hm5C_threshold]
44
+ mod_list = var_dict.get('mod_list', default_value)
45
+
46
+ # Make initial output directory
47
+ make_dirs([output_directory])
48
+ os.chdir(output_directory)
49
+
50
+ # Get the input filetype
51
+ if Path(input_data_path).is_file():
52
+ input_data_filetype = '.' + os.path.basename(input_data_path).split('.')[1].lower()
53
+ input_is_pod5 = input_data_filetype in ['.pod5','.p5']
54
+ input_is_fast5 = input_data_filetype in ['.fast5','.f5']
55
+
56
+ elif Path(input_data_path).is_dir():
57
+ # Get the file names in the input data dir
58
+ input_files = os.listdir(input_data_path)
59
+ input_is_pod5 = sum([True for file in input_files if '.pod5' in file or '.p5' in file])
60
+ input_is_fast5 = sum([True for file in input_files if '.fast5' in file or '.f5' in file])
61
+
62
+ # If the input files are not pod5 files, and they are fast5 files, convert the files to a pod5 file before proceeding.
63
+ if input_is_fast5 and not input_is_pod5:
64
+ # take the input directory of fast5 files and write out a single pod5 file into the output directory.
65
+ output_pod5 = os.path.join(output_directory, 'FAST5s_to_POD5.pod5')
66
+ print(f'Input directory contains fast5 files, converting them and concatenating into a single pod5 file in the {output_pod5}')
67
+ fast5_to_pod5(input_data_path, output_pod5)
68
+ # Reassign the pod5_dir variable to point to the new pod5 file.
69
+ input_data_path = output_pod5
70
+
71
+ model_basename = os.path.basename(model)
72
+ model_basename = model_basename.replace('.', '_')
73
+
74
+ if mod_list:
75
+ mod_string = "_".join(mod_list)
76
+ bam=f"{output_directory}/{model_basename}_{mod_string}_calls"
77
+ modcall(model, input_data_path, barcode_kit, mod_list, bam, bam_suffix, barcode_both_ends, trim, device)
78
+ else:
79
+ bam=f"{output_directory}/{model_basename}_canonical_basecalls"
80
+ canoncall(model, input_data_path, barcode_kit, bam, bam_suffix, barcode_both_ends, trim, device)
@@ -0,0 +1,132 @@
1
+ ## conversion_smf
2
+
3
+ def conversion_smf(fasta, output_directory, conversion_types, strands, model_dir, model, input_data_path, split_dir, barcode_kit, mapping_threshold, experiment_name, bam_suffix, basecall, barcode_both_ends, trim, device, make_bigwigs, threads, input_already_demuxed):
4
+ """
5
+ Processes sequencing data from a conversion SMF experiment to an adata object.
6
+
7
+ Parameters:
8
+ fasta (str): File path to the reference genome to align to.
9
+ output_directory (str): A file path to the directory to output all the analyses.
10
+ conversion_type (list): A list of strings of the conversion types to use in the analysis.
11
+ strands (list): A list of converstion strands to use in the experiment.
12
+ model_dir (str): a string representing the file path to the dorado basecalling model directory.
13
+ model (str): a string representing the dorado basecalling model.
14
+ input_data_path (str): a string representing the file path to the experiment directory/file containing sequencing data
15
+ split_dir (str): A string representing the file path to the directory to split the BAMs into.
16
+ barcode_kit (str): A string representing the barcoding kit used in the experiment.
17
+ mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
18
+ experiment_name (str): A string to provide an experiment name to the output adata file.
19
+ bam_suffix (str): A suffix to add to the bam file.
20
+ basecall (bool): Whether to go through basecalling or not.
21
+ barcode_both_ends (bool): Whether to require a barcode detection on both ends for demultiplexing.
22
+ trim (bool): Whether to trim barcodes, adapters, and primers from read ends.
23
+ device (str): Device to use for basecalling. auto, metal, cpu, cuda
24
+ make_bigwigs (bool): Whether to make bigwigs
25
+ threads (int): cpu threads available for processing.
26
+ input_already_demuxed (bool): Whether the input files were already demultiplexed
27
+
28
+ Returns:
29
+ final_adata_path (str): Path to the final adata object
30
+ sorted_output (str): Path to the aligned, sorted BAM
31
+ """
32
+ from .helpers import align_and_sort_BAM, aligned_BAM_to_bed, canoncall, converted_BAM_to_adata_II, generate_converted_FASTA, get_chromosome_lengths, demux_and_index_BAM, make_dirs, bam_qc, run_multiqc, split_and_index_BAM
33
+ import os
34
+ import glob
35
+
36
+ if basecall:
37
+ model_basename = os.path.basename(model)
38
+ model_basename = model_basename.replace('.', '_')
39
+ bam=f"{output_directory}/{model_basename}_canonical_basecalls"
40
+ else:
41
+ bam_base=os.path.basename(input_data_path).split('.bam')[0]
42
+ bam=os.path.join(output_directory, bam_base)
43
+ aligned_BAM=f"{bam}_aligned"
44
+ aligned_sorted_BAM=f"{aligned_BAM}_sorted"
45
+
46
+ os.chdir(output_directory)
47
+
48
+ # 1) Convert FASTA file
49
+ fasta_basename = os.path.basename(fasta)
50
+ converted_FASTA_basename = fasta_basename.split('.fa')[0]+'_converted.fasta'
51
+ converted_FASTA = os.path.join(output_directory, converted_FASTA_basename)
52
+ if 'converted.fa' in fasta:
53
+ print(fasta + ' is already converted. Using existing converted FASTA.')
54
+ converted_FASTA = fasta
55
+ elif os.path.exists(converted_FASTA):
56
+ print(converted_FASTA + ' already exists. Using existing converted FASTA.')
57
+ else:
58
+ generate_converted_FASTA(fasta, conversion_types, strands, converted_FASTA)
59
+
60
+ # Make a FAI and .chrom.names file for the converted fasta
61
+ get_chromosome_lengths(converted_FASTA)
62
+
63
+ # 2) Basecall from the input POD5 to generate a singular output BAM
64
+ if basecall:
65
+ canoncall_output = bam + bam_suffix
66
+ if os.path.exists(canoncall_output):
67
+ print(canoncall_output + ' already exists. Using existing basecalled BAM.')
68
+ else:
69
+ canoncall(model_dir, model, input_data_path, barcode_kit, bam, bam_suffix, barcode_both_ends, trim, device)
70
+ else:
71
+ canoncall_output = input_data_path
72
+
73
+ # 3) Align the BAM to the converted reference FASTA and sort the bam on positional coordinates. Also make an index and a bed file of mapped reads
74
+ aligned_output = aligned_BAM + bam_suffix
75
+ sorted_output = aligned_sorted_BAM + bam_suffix
76
+ if os.path.exists(aligned_output) and os.path.exists(sorted_output):
77
+ print(sorted_output + ' already exists. Using existing aligned/sorted BAM.')
78
+ else:
79
+ align_and_sort_BAM(converted_FASTA, canoncall_output, bam_suffix, output_directory, make_bigwigs, threads)
80
+
81
+ # Make beds and provide basic histograms
82
+ bed_dir = os.path.join(output_directory, 'beds')
83
+ if os.path.isdir(bed_dir):
84
+ print(bed_dir + ' already exists. Skipping BAM -> BED conversion for ' + sorted_output)
85
+ else:
86
+ aligned_BAM_to_bed(aligned_output, output_directory, converted_FASTA, make_bigwigs, threads)
87
+
88
+ ### 4) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory###
89
+ if barcode_both_ends:
90
+ split_dir = split_dir + '_both_ends_barcoded'
91
+ else:
92
+ split_dir = split_dir + '_at_least_one_end_barcoded'
93
+
94
+ if os.path.isdir(split_dir):
95
+ print(split_dir + ' already exists. Using existing demultiplexed BAMs.')
96
+ bam_pattern = '*' + bam_suffix
97
+ bam_files = glob.glob(os.path.join(split_dir, bam_pattern))
98
+ bam_files = [bam for bam in bam_files if '.bai' not in bam and 'unclassified' not in bam]
99
+ bam_files.sort()
100
+ else:
101
+ make_dirs([split_dir])
102
+ if input_already_demuxed:
103
+ bam_files = split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory) # custom for non-nanopore
104
+ else:
105
+ bam_files = demux_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, barcode_kit, barcode_both_ends, trim, fasta, make_bigwigs, threads)
106
+
107
+ # Make beds and provide basic histograms
108
+ bed_dir = os.path.join(split_dir, 'beds')
109
+ if os.path.isdir(bed_dir):
110
+ print(bed_dir + ' already exists. Skipping BAM -> BED conversion for demultiplexed bams')
111
+ else:
112
+ for bam in bam_files:
113
+ aligned_BAM_to_bed(bam, split_dir, converted_FASTA, make_bigwigs, threads)
114
+
115
+ # 5) Samtools QC metrics on split BAM files
116
+ bam_qc_dir = f"{split_dir}/bam_qc"
117
+ if os.path.isdir(bam_qc_dir):
118
+ print(bam_qc_dir + ' already exists. Using existing BAM QC calculations.')
119
+ else:
120
+ make_dirs([bam_qc_dir])
121
+ bam_qc(bam_files, bam_qc_dir, threads, modality='conversion')
122
+
123
+ # multiqc ###
124
+ if os.path.isdir(f"{split_dir}/multiqc"):
125
+ print(f"{split_dir}/multiqc" + ' already exists, skipping multiqc')
126
+ else:
127
+ run_multiqc(split_dir, f"{split_dir}/multiqc")
128
+
129
+ # 6) Take the converted BAM and load it into an adata object.
130
+ final_adata, final_adata_path = converted_BAM_to_adata_II(converted_FASTA, split_dir, mapping_threshold, experiment_name, conversion_types, bam_suffix, device)
131
+
132
+ return final_adata, final_adata_path, sorted_output, bam_files