smftools 0.1.3__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. {smftools-0.1.3.dist-info → smftools-0.1.6.dist-info}/METADATA +44 -11
  2. smftools-0.1.6.dist-info/RECORD +4 -0
  3. smftools/__init__.py +0 -25
  4. smftools/_settings.py +0 -20
  5. smftools/_version.py +0 -1
  6. smftools/datasets/F1_hybrid_NKG2A_enhander_promoter_GpC_conversion_SMF.h5ad.gz +0 -0
  7. smftools/datasets/F1_sample_sheet.csv +0 -5
  8. smftools/datasets/__init__.py +0 -9
  9. smftools/datasets/dCas9_m6A_invitro_kinetics.h5ad.gz +0 -0
  10. smftools/datasets/datasets.py +0 -28
  11. smftools/informatics/__init__.py +0 -14
  12. smftools/informatics/archived/bam_conversion.py +0 -59
  13. smftools/informatics/archived/bam_direct.py +0 -63
  14. smftools/informatics/archived/basecalls_to_adata.py +0 -71
  15. smftools/informatics/conversion_smf.py +0 -79
  16. smftools/informatics/direct_smf.py +0 -89
  17. smftools/informatics/fast5_to_pod5.py +0 -21
  18. smftools/informatics/helpers/LoadExperimentConfig.py +0 -74
  19. smftools/informatics/helpers/__init__.py +0 -60
  20. smftools/informatics/helpers/align_and_sort_BAM.py +0 -48
  21. smftools/informatics/helpers/aligned_BAM_to_bed.py +0 -73
  22. smftools/informatics/helpers/archived/informatics.py +0 -260
  23. smftools/informatics/helpers/archived/load_adata.py +0 -516
  24. smftools/informatics/helpers/bed_to_bigwig.py +0 -39
  25. smftools/informatics/helpers/binarize_converted_base_identities.py +0 -31
  26. smftools/informatics/helpers/canoncall.py +0 -25
  27. smftools/informatics/helpers/complement_base_list.py +0 -21
  28. smftools/informatics/helpers/concatenate_fastqs_to_bam.py +0 -54
  29. smftools/informatics/helpers/converted_BAM_to_adata.py +0 -233
  30. smftools/informatics/helpers/count_aligned_reads.py +0 -43
  31. smftools/informatics/helpers/extract_base_identities.py +0 -57
  32. smftools/informatics/helpers/extract_mods.py +0 -51
  33. smftools/informatics/helpers/extract_readnames_from_BAM.py +0 -22
  34. smftools/informatics/helpers/find_conversion_sites.py +0 -61
  35. smftools/informatics/helpers/generate_converted_FASTA.py +0 -98
  36. smftools/informatics/helpers/get_chromosome_lengths.py +0 -32
  37. smftools/informatics/helpers/get_native_references.py +0 -28
  38. smftools/informatics/helpers/index_fasta.py +0 -12
  39. smftools/informatics/helpers/make_dirs.py +0 -21
  40. smftools/informatics/helpers/make_modbed.py +0 -27
  41. smftools/informatics/helpers/modQC.py +0 -27
  42. smftools/informatics/helpers/modcall.py +0 -28
  43. smftools/informatics/helpers/modkit_extract_to_adata.py +0 -518
  44. smftools/informatics/helpers/ohe_batching.py +0 -52
  45. smftools/informatics/helpers/one_hot_encode.py +0 -21
  46. smftools/informatics/helpers/plot_read_length_and_coverage_histograms.py +0 -52
  47. smftools/informatics/helpers/separate_bam_by_bc.py +0 -43
  48. smftools/informatics/helpers/split_and_index_BAM.py +0 -41
  49. smftools/informatics/load_adata.py +0 -127
  50. smftools/informatics/readwrite.py +0 -106
  51. smftools/informatics/subsample_fasta_from_bed.py +0 -47
  52. smftools/informatics/subsample_pod5.py +0 -104
  53. smftools/plotting/__init__.py +0 -0
  54. smftools/preprocessing/__init__.py +0 -34
  55. smftools/preprocessing/append_C_context.py +0 -69
  56. smftools/preprocessing/archives/preprocessing.py +0 -614
  57. smftools/preprocessing/binarize_on_Youden.py +0 -42
  58. smftools/preprocessing/binary_layers_to_ohe.py +0 -30
  59. smftools/preprocessing/calculate_complexity.py +0 -71
  60. smftools/preprocessing/calculate_consensus.py +0 -47
  61. smftools/preprocessing/calculate_converted_read_methylation_stats.py +0 -96
  62. smftools/preprocessing/calculate_coverage.py +0 -41
  63. smftools/preprocessing/calculate_pairwise_hamming_distances.py +0 -27
  64. smftools/preprocessing/calculate_position_Youden.py +0 -104
  65. smftools/preprocessing/calculate_read_length_stats.py +0 -86
  66. smftools/preprocessing/clean_NaN.py +0 -38
  67. smftools/preprocessing/filter_converted_reads_on_methylation.py +0 -29
  68. smftools/preprocessing/filter_reads_on_length.py +0 -41
  69. smftools/preprocessing/invert_adata.py +0 -23
  70. smftools/preprocessing/load_sample_sheet.py +0 -24
  71. smftools/preprocessing/make_dirs.py +0 -21
  72. smftools/preprocessing/mark_duplicates.py +0 -134
  73. smftools/preprocessing/min_non_diagonal.py +0 -25
  74. smftools/preprocessing/recipes.py +0 -125
  75. smftools/preprocessing/remove_duplicates.py +0 -21
  76. smftools/readwrite.py +0 -106
  77. smftools/tools/__init__.py +0 -0
  78. smftools/tools/apply_HMM.py +0 -1
  79. smftools/tools/cluster.py +0 -0
  80. smftools/tools/read_HMM.py +0 -1
  81. smftools/tools/subset_adata.py +0 -32
  82. smftools/tools/train_HMM.py +0 -43
  83. smftools-0.1.3.dist-info/RECORD +0 -84
  84. {smftools-0.1.3.dist-info → smftools-0.1.6.dist-info}/WHEEL +0 -0
  85. {smftools-0.1.3.dist-info → smftools-0.1.6.dist-info}/licenses/LICENSE +0 -0
@@ -1,12 +1,32 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: smftools
3
- Version: 0.1.3
3
+ Version: 0.1.6
4
4
  Summary: Single Molecule Footprinting Analysis in Python.
5
5
  Project-URL: Source, https://github.com/jkmckenna/smftools
6
6
  Project-URL: Documentation, https://smftools.readthedocs.io/
7
7
  Author: Joseph McKenna
8
8
  Maintainer-email: Joseph McKenna <jkmckenna@berkeley.edu>
9
- License-Expression: MIT
9
+ License: MIT License
10
+
11
+ Copyright (c) 2024 jkmckenna
12
+
13
+ Permission is hereby granted, free of charge, to any person obtaining a copy
14
+ of this software and associated documentation files (the "Software"), to deal
15
+ in the Software without restriction, including without limitation the rights
16
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17
+ copies of the Software, and to permit persons to whom the Software is
18
+ furnished to do so, subject to the following conditions:
19
+
20
+ The above copyright notice and this permission notice shall be included in all
21
+ copies or substantial portions of the Software.
22
+
23
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29
+ SOFTWARE.
10
30
  License-File: LICENSE
11
31
  Keywords: anndata,chromatin-accessibility,machine-learning,nanopore,protein-dna-binding,single-locus,single-molecule-footprinting
12
32
  Classifier: Development Status :: 2 - Pre-Alpha
@@ -26,12 +46,18 @@ Classifier: Topic :: Scientific/Engineering :: Visualization
26
46
  Requires-Python: >=3.9
27
47
  Requires-Dist: anndata>=0.10.0
28
48
  Requires-Dist: biopython>=1.79
29
- Requires-Dist: cython>=0.29.28
49
+ Requires-Dist: fastcluster
50
+ Requires-Dist: hydra-core
51
+ Requires-Dist: igraph
52
+ Requires-Dist: leidenalg
53
+ Requires-Dist: lightning
54
+ Requires-Dist: multiqc
30
55
  Requires-Dist: networkx>=3.2
31
56
  Requires-Dist: numpy<2,>=1.22.0
57
+ Requires-Dist: omegaconf
32
58
  Requires-Dist: pandas>=1.4.2
33
59
  Requires-Dist: pod5>=0.1.21
34
- Requires-Dist: pomegranate>1.0.0
60
+ Requires-Dist: pomegranate>=1.0.0
35
61
  Requires-Dist: pyfaidx>=0.8.0
36
62
  Requires-Dist: pysam>=0.19.1
37
63
  Requires-Dist: scanpy>=1.9
@@ -40,6 +66,7 @@ Requires-Dist: scipy>=1.7.3
40
66
  Requires-Dist: seaborn>=0.11
41
67
  Requires-Dist: torch>=1.9.0
42
68
  Requires-Dist: tqdm
69
+ Requires-Dist: wandb
43
70
  Provides-Extra: docs
44
71
  Requires-Dist: ipython>=7.20; extra == 'docs'
45
72
  Requires-Dist: matplotlib!=3.6.1; extra == 'docs'
@@ -67,7 +94,7 @@ Description-Content-Type: text/markdown
67
94
  A Python tool for processing raw sequencing data derived from single molecule footprinting experiments into [anndata](https://anndata.readthedocs.io/en/latest/) objects. Additional functionality for preprocessing, analysis, and visualization.
68
95
 
69
96
  ## Philosophy
70
- While most genomic data structures handle low-coverage data (<100X) along large references, smftools prioritizes high-coverage data (scalable to at least 1 million X coverage) of a few genomic loci at a time. This enables efficient data storage, rapid data operations, hierarchical metadata handling, seamless integration with various machine-learning packages, and ease of visualization. Furthermore, functionality is modularized, enabling analysis sessions to be saved, reloaded, and easily shared with collaborators. Analyses are centered around the [anndata](https://anndata.readthedocs.io/en/latest/) object, and are heavily inspired by the work conducted within the single-cell genomics community.
97
+ While most genomic data structures handle low-coverage data (<100X) along large references, smftools prioritizes high-coverage data (scalable to >1,000,000X coverage) of a few genomic loci at a time. This enables efficient data storage, rapid data operations, hierarchical metadata handling, seamless integration with various machine-learning packages, and ease of visualization. Furthermore, functionality is modularized, enabling analysis sessions to be saved, reloaded, and easily shared with collaborators. Analyses are centered around the [anndata](https://anndata.readthedocs.io/en/latest/) object, and are heavily inspired by the work conducted within the single-cell genomics community.
71
98
 
72
99
  ## Dependencies
73
100
  The following CLI tools need to be installed and configured before using the informatics (smftools.inform) module of smftools:
@@ -81,14 +108,20 @@ The following CLI tools need to be installed and configured before using the inf
81
108
  ## Modules
82
109
  ### Informatics: Processes raw Nanopore/Illumina data from SMF experiments into an AnnData object.
83
110
  ![](docs/source/_static/smftools_informatics_diagram.png)
84
- ### Preprocessing: Appends QC metrics to the AnnData object and perfroms filtering.
111
+ ### Preprocessing: Appends QC metrics to the AnnData object and performs filtering.
85
112
  ![](docs/source/_static/smftools_preprocessing_diagram.png)
86
- - Tools: Appends various analyses to the AnnData object.
87
- - Plotting: Visualization of analyses stored within the AnnData object.
113
+ ### Tools: Appends analyses to the AnnData object.
114
+ - Currently Includes: Position X Position correlation matrices, Hidden Markov Model feature detection, clustering, dimensionality reduction, peak calling, train/test workflows for various ML classifiers.
115
+ - To do: Additional ML methods for learning predictive single molecule features on condition labels: Autoencoders, Variational Autoencoders, Transformers.
116
+ ### Plotting: Visualization of analyses stored within the AnnData object.
117
+ - Most analyses appended to the adata object by a tools method have, or will have, an accompanying plotting method.
88
118
 
89
119
  ## Announcements
90
- ### 09/09/24 - The pre-alpha phase package ([smftools-0.1.1](https://pypi.org/project/smftools/))
120
+
121
+ ### 10/01/24 - More recent versions are being updated through github and are not currently on pypi, please install from source. Thank you!
122
+
123
+ ### 09/09/24 - The version 0.1.1 package ([smftools-0.1.1](https://pypi.org/project/smftools/)) is installable through pypi!
91
124
  The informatics module has been bumped to alpha-phase status. This module can deal with POD5s and unaligned BAMS from nanopore conversion and direct SMF experiments, as well as FASTQs from Illumina conversion SMF experiments. Primary output from this module is an AnnData object containing all relevant SMF data, which is compatible with all downstream smftools modules. The other modules are still in pre-alpha phase. Preprocessing, Tools, and Plotting modules should be promoted to alpha-phase within the next month or so.
92
125
 
93
- ### 08/30/24 - The pre-alpha phase package ([smftools-0.1.0](https://pypi.org/project/smftools/)) is installable through pypi!
94
- Currently, this package (smftools-0.1.0) is going through rapid improvement (dependency handling accross Linux and Mac OS, testing, documentation, debugging) and is still too early in development for standard use. The underlying functionality was originally developed as a collection of scripts for single molecule footprinting (SMF) experiments in our lab, but is being packaged/developed to facilitate the expansion of SMF to any lab that is interested in performing these styles of experiments/analyses. The alpha-phase package is expected to be available within a couple months, so stay tuned!
126
+ ### 08/30/24 - The version 0.1.0 package ([smftools-0.1.0](https://pypi.org/project/smftools/)) is installable through pypi!
127
+ Currently, this package (smftools-0.1.0) is going through rapid improvement (dependency handling accross Linux and Mac OS, testing, documentation, debugging) and is still too early in development for widespread use. The underlying functionality was originally developed as a collection of scripts for single molecule footprinting (SMF) experiments in our lab, but is being packaged/developed to facilitate the expansion of SMF to any lab that is interested in performing these styles of experiments/analyses. The alpha-phase package is expected to be available within a couple months, so stay tuned!
@@ -0,0 +1,4 @@
1
+ smftools-0.1.6.dist-info/METADATA,sha256=03UV92H9JAHE0TlXfQ9Bmw9EU7EM9Gr5BNn9c5Td4Tw,8415
2
+ smftools-0.1.6.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
3
+ smftools-0.1.6.dist-info/licenses/LICENSE,sha256=F8LwmL6vMPddaCt1z1S83Kh_OZv50alTlY7BvVx1RXw,1066
4
+ smftools-0.1.6.dist-info/RECORD,,
smftools/__init__.py DELETED
@@ -1,25 +0,0 @@
1
- """smftools"""
2
-
3
- import logging
4
- import warnings
5
-
6
- from . import informatics as inform
7
- from . import preprocessing as pp
8
- from . import tools as tl
9
- from . import plotting as pl
10
- from . import readwrite, datasets
11
-
12
-
13
- from importlib.metadata import version
14
-
15
- package_name = "smftools"
16
- __version__ = version(package_name)
17
-
18
- __all__ = [
19
- "inform",
20
- "pp",
21
- "tl",
22
- "pl",
23
- "readwrite",
24
- "datasets"
25
- ]
smftools/_settings.py DELETED
@@ -1,20 +0,0 @@
1
- from pathlib import Path
2
- from typing import Union
3
-
4
- class SMFConfig:
5
- """\
6
- Config for smftools.
7
- """
8
-
9
- def __init__(
10
- self,
11
- *,
12
- datasetdir: Union[Path, str] = "./datasets/"
13
- ):
14
- self._datasetdir = Path(datasetdir) if isinstance(datasetdir, str) else datasetdir
15
-
16
- @property
17
- def datasetdir(self) -> Path:
18
- return self._datasetdir
19
-
20
- settings = SMFConfig()
smftools/_version.py DELETED
@@ -1 +0,0 @@
1
- __version__ = "0.1.3"
@@ -1,5 +0,0 @@
1
- Sample,Sample_names,MTase,Time (min),Notes
2
- barcode0001_sorted,Neither,M.CviPI,7.5,Cultured in IL2
3
- barcode0002_sorted,BALBC,M.CviPI,7.5,Cultured in IL2
4
- barcode0003_sorted,B6,M.CviPI,7.5,Cultured in IL2
5
- barcode0004_sorted,Both,M.CviPI,7.5,Cultured in IL2
@@ -1,9 +0,0 @@
1
- from .datasets import (
2
- dCas9_kinetics,
3
- Kissiov_and_McKenna_2025
4
- )
5
-
6
- __all__ = [
7
- "dCas9_kinetics",
8
- "Kissiov_and_McKenna_2025"
9
- ]
@@ -1,28 +0,0 @@
1
- ## datasets
2
-
3
- def import_HERE():
4
- """
5
- Imports HERE for loading datasets
6
- """
7
- from pathlib import Path
8
- from .._settings import settings
9
- HERE = Path(__file__).parent
10
- return HERE
11
-
12
- def dCas9_kinetics():
13
- """
14
- in vitro Hia5 dCas9 kinetics SMF dataset. Nanopore HAC m6A modcalls.
15
- """
16
- import anndata as ad
17
- HERE = import_HERE()
18
- filepath = HERE / "dCas9_m6A_invitro_kinetics.h5ad.gz"
19
- return ad.read_h5ad(filepath)
20
-
21
- def Kissiov_and_McKenna_2025():
22
- """
23
- F1 Hybrid M.CviPI natural killer cell SMF. Nanopore canonical calls of NEB EMseq converted SMF gDNA.
24
- """
25
- import anndata as ad
26
- HERE = import_HERE()
27
- filepath = HERE / "F1_hybrid_NKG2A_enhander_promoter_GpC_conversion_SMF.h5ad.gz"
28
- return ad.read_h5ad(filepath)
@@ -1,14 +0,0 @@
1
- from . import helpers
2
- from .load_adata import load_adata
3
- from .subsample_fasta_from_bed import subsample_fasta_from_bed
4
- from .subsample_pod5 import subsample_pod5
5
- from .fast5_to_pod5 import fast5_to_pod5
6
-
7
-
8
- __all__ = [
9
- "load_adata",
10
- "subsample_fasta_from_bed",
11
- "subsample_pod5",
12
- "fast5_to_pod5",
13
- "helpers"
14
- ]
@@ -1,59 +0,0 @@
1
- ## bam_conversion
2
-
3
- def bam_conversion(fasta, output_directory, conversion_types, strands, basecalled_path, split_dir, mapping_threshold, experiment_name, bam_suffix):
4
- """
5
- Converts a BAM file from a nanopore conversion SMF experiment to an adata object.
6
-
7
- Parameters:
8
- fasta (str): File path to the reference genome to align to.
9
- output_directory (str): A file path to the directory to output all the analyses.
10
- conversion_type (list): A list of strings of the conversion types to use in the analysis.
11
- strands (list): A list of converstion strands to use in the experiment.
12
- basecalled_path (str): a string representing the file path to the experiment BAM or FASTQ file.
13
- split_dir (str): A string representing the file path to the directory to split the BAMs into.
14
- mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
15
- experiment_name (str): A string to provide an experiment name to the output adata file.
16
- bam_suffix (str): A suffix to add to the bam file.
17
-
18
- Returns:
19
- None
20
- """
21
- from .helpers import align_and_sort_BAM, converted_BAM_to_adata, generate_converted_FASTA, split_and_index_BAM, make_dirs
22
- import os
23
- input_basecalled_basename = os.path.basename(basecalled_path)
24
- bam_basename = input_basecalled_basename.split(".")[0]
25
- output_bam=f"{output_directory}/{bam_basename}"
26
- aligned_BAM=f"{output_bam}_aligned"
27
- aligned_sorted_BAM=f"{aligned_BAM}_sorted"
28
-
29
- os.chdir(output_directory)
30
-
31
- # 1) Convert FASTA file
32
- fasta_basename = os.path.basename(fasta)
33
- converted_FASTA_basename = fasta_basename.split('.fa')[0]+'_converted.fasta'
34
- converted_FASTA = os.path.join(output_directory, converted_FASTA_basename)
35
- if 'converted.fa' in fasta:
36
- print(fasta + ' is already converted. Using existing converted FASTA.')
37
- converted_FASTA = fasta
38
- elif os.path.exists(converted_FASTA):
39
- print(converted_FASTA + ' already exists. Using existing converted FASTA.')
40
- else:
41
- generate_converted_FASTA(fasta, conversion_types, strands, converted_FASTA)
42
-
43
- # 2) Align the basecalled file to the converted reference FASTA and sort the bam on positional coordinates. Also make an index and a bed file of mapped reads
44
- aligned_output = aligned_BAM + bam_suffix
45
- sorted_output = aligned_sorted_BAM + bam_suffix
46
- if os.path.exists(aligned_output) and os.path.exists(sorted_output):
47
- print(sorted_output + ' already exists. Using existing aligned/sorted BAM.')
48
- else:
49
- align_and_sort_BAM(converted_FASTA, basecalled_path, bam_suffix, output_directory)
50
-
51
- ### 3) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory###
52
- if os.path.isdir(split_dir):
53
- print(split_dir + ' already exists. Using existing aligned/sorted/split BAMs.')
54
- else:
55
- make_dirs([split_dir])
56
- split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory)
57
-
58
- # 4) Take the converted BAM and load it into an adata object.
59
- converted_BAM_to_adata(converted_FASTA, split_dir, mapping_threshold, experiment_name, conversion_types, bam_suffix)
@@ -1,63 +0,0 @@
1
- ## bam_direct
2
-
3
- def bam_direct(fasta, output_directory, mod_list, thresholds, bam_path, split_dir, mapping_threshold, experiment_name, bam_suffix, batch_size):
4
- """
5
- Converts a POD5 file from a nanopore native SMF experiment to an adata object.
6
-
7
- Parameters:
8
- fasta (str): File path to the reference genome to align to.
9
- output_directory (str): A file path to the directory to output all the analyses.
10
- mod_list (list): A list of strings of the modification types to use in the analysis.
11
- thresholds (list): A list of floats to pass for call thresholds.
12
- bam_path (str): a string representing the file path to the the BAM file.
13
- split_dir (str): A string representing the file path to the directory to split the BAMs into.
14
- mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
15
- experiment_name (str): A string to provide an experiment name to the output adata file.
16
- bam_suffix (str): A suffix to add to the bam file.
17
- batch_size (int): An integer number of TSV files to analyze in memory at once while loading the final adata object.
18
-
19
- Returns:
20
- None
21
- """
22
- from .helpers import align_and_sort_BAM, extract_mods, make_modbed, modkit_extract_to_adata, modQC, split_and_index_BAM, make_dirs
23
- import os
24
- input_bam_base = os.path.basename(bam_path)
25
- bam_basename = input_bam_base.split(bam_suffix)[0]
26
- output_bam=f"{output_directory}/{bam_basename}"
27
- aligned_BAM=f"{output_bam}_aligned"
28
- aligned_sorted_BAM=f"{aligned_BAM}_sorted"
29
- mod_bed_dir=f"{output_directory}/split_mod_beds"
30
- mod_tsv_dir=f"{output_directory}/split_mod_tsvs"
31
-
32
- aligned_output = aligned_BAM + bam_suffix
33
- aligned_sorted_output = aligned_sorted_BAM + bam_suffix
34
- mod_map = {'6mA': '6mA', '5mC_5hmC': '5mC'}
35
- mods = [mod_map[mod] for mod in mod_list]
36
-
37
- os.chdir(output_directory)
38
-
39
- # 1) Align the BAM to the reference FASTA. Also make an index and a bed file of mapped reads
40
- if os.path.exists(aligned_output) and os.path.exists(aligned_sorted_output):
41
- print(aligned_sorted_output + ' already exists. Using existing aligned/sorted BAM.')
42
- else:
43
- align_and_sort_BAM(fasta, bam_path, bam_suffix, output_directory)
44
- # 2) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory
45
- if os.path.isdir(split_dir):
46
- print(split_dir + ' already exists. Using existing aligned/sorted/split BAMs.')
47
- else:
48
- make_dirs([split_dir])
49
- split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory)
50
- # 3) Using nanopore modkit to work with modified BAM files ###
51
- if os.path.isdir(mod_bed_dir):
52
- print(mod_bed_dir + ' already exists')
53
- else:
54
- make_dirs([mod_bed_dir])
55
- modQC(aligned_sorted_output, thresholds) # get QC metrics for mod calls
56
- make_modbed(aligned_sorted_output, thresholds, mod_bed_dir) # Generate bed files of position methylation summaries for every sample
57
- if os.path.isdir(mod_tsv_dir):
58
- print(mod_tsv_dir + ' already exists')
59
- else:
60
- make_dirs([mod_tsv_dir])
61
- extract_mods(thresholds, mod_tsv_dir, split_dir, bam_suffix) # Extract methylations calls for split BAM files into split TSV files
62
- #4 Load the modification data from TSVs into an adata object
63
- modkit_extract_to_adata(fasta, split_dir, mapping_threshold, experiment_name, mods, batch_size, mod_tsv_dir)
@@ -1,71 +0,0 @@
1
- ## basecalls_to_adata
2
-
3
- def basecalls_to_adata(config_path):
4
- """
5
- High-level function to call for loading basecalled SMF data from a BAM file into an adata object. Also works with FASTQ for conversion SMF.
6
-
7
- Parameters:
8
- config_path (str): A string representing the file path to the experiment configuration csv file.
9
-
10
- Returns:
11
- None
12
- """
13
- from .helpers import LoadExperimentConfig, make_dirs
14
- from .subsample_fasta_from_bed import subsample_fasta_from_bed
15
- import os
16
- import numpy as np
17
- bam_suffix = '.bam' # If different, change from here.
18
- split_dir = 'split_BAMs' # If different, change from here.
19
- strands = ['bottom', 'top'] # If different, change from here. Having both listed generally doesn't slow things down too much.
20
- conversions = ['unconverted'] # The name to use for the unconverted files. If different, change from here.
21
-
22
- # Load experiment config parameters into global variables
23
- experiment_config = LoadExperimentConfig(config_path)
24
- var_dict = experiment_config.var_dict
25
-
26
- # These below variables will point to the value np.nan if they are either empty in the experiment_config.csv or if the variable is fully omitted from the csv.
27
- default_value = None
28
-
29
- conversion_types = var_dict.get('conversion_types', default_value)
30
- output_directory = var_dict.get('output_directory', default_value)
31
- smf_modality = var_dict.get('smf_modality', default_value)
32
- fasta = var_dict.get('fasta', default_value)
33
- fasta_regions_of_interest = var_dict.get("fasta_regions_of_interest", default_value)
34
- basecalled_path = var_dict.get('basecalled_path', default_value)
35
- mapping_threshold = var_dict.get('mapping_threshold', default_value)
36
- experiment_name = var_dict.get('experiment_name', default_value)
37
- filter_threshold = var_dict.get('filter_threshold', default_value)
38
- m6A_threshold = var_dict.get('m6A_threshold', default_value)
39
- m5C_threshold = var_dict.get('m5C_threshold', default_value)
40
- hm5C_threshold = var_dict.get('hm5C_threshold', default_value)
41
- mod_list = var_dict.get('mod_list', default_value)
42
- batch_size = var_dict.get('batch_size', default_value)
43
- thresholds = [filter_threshold, m6A_threshold, m5C_threshold, hm5C_threshold]
44
-
45
- split_path = os.path.join(output_directory, split_dir)
46
-
47
- make_dirs([output_directory])
48
- os.chdir(output_directory)
49
-
50
- conversions += conversion_types
51
-
52
- # If a bed file is passed, subsample the input FASTA on regions of interest and use the subsampled FASTA.
53
- if fasta_regions_of_interest != None:
54
- if '.bed' in fasta_regions_of_interest:
55
- fasta_basename = os.path.basename(fasta)
56
- bed_basename_minus_suffix = os.path.basename(fasta_regions_of_interest).split('.bed')[0]
57
- output_FASTA = bed_basename_minus_suffix + '_' + fasta_basename
58
- subsample_fasta_from_bed(fasta, fasta_regions_of_interest, output_directory, output_FASTA)
59
- fasta = output_FASTA
60
-
61
- if smf_modality == 'conversion':
62
- from .bam_conversion import bam_conversion
63
- bam_conversion(fasta, output_directory, conversions, strands, basecalled_path, split_path, mapping_threshold, experiment_name, bam_suffix)
64
- elif smf_modality == 'direct':
65
- if bam_suffix in basecalled_path:
66
- from .bam_direct import bam_direct
67
- bam_direct(fasta, output_directory, mod_list, thresholds, basecalled_path, split_path, mapping_threshold, experiment_name, bam_suffix, batch_size)
68
- else:
69
- print('basecalls_to_adata function only work with the direct modality when the input filetype is BAM and not FASTQ.')
70
- else:
71
- print("Error")
@@ -1,79 +0,0 @@
1
- ## conversion_smf
2
-
3
- def conversion_smf(fasta, output_directory, conversion_types, strands, model, input_data_path, split_dir, barcode_kit, mapping_threshold, experiment_name, bam_suffix, basecall):
4
- """
5
- Processes sequencing data from a conversion SMF experiment to an adata object.
6
-
7
- Parameters:
8
- fasta (str): File path to the reference genome to align to.
9
- output_directory (str): A file path to the directory to output all the analyses.
10
- conversion_type (list): A list of strings of the conversion types to use in the analysis.
11
- strands (list): A list of converstion strands to use in the experiment.
12
- model (str): a string representing the file path to the dorado basecalling model.
13
- input_data_path (str): a string representing the file path to the experiment directory/file containing sequencing data
14
- split_dir (str): A string representing the file path to the directory to split the BAMs into.
15
- barcode_kit (str): A string representing the barcoding kit used in the experiment.
16
- mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
17
- experiment_name (str): A string to provide an experiment name to the output adata file.
18
- bam_suffix (str): A suffix to add to the bam file.
19
- basecall (bool): Whether to go through basecalling or not.
20
-
21
- Returns:
22
- None
23
- """
24
- from .helpers import align_and_sort_BAM, canoncall, converted_BAM_to_adata, generate_converted_FASTA, get_chromosome_lengths, split_and_index_BAM, make_dirs
25
- import os
26
- if basecall:
27
- model_basename = os.path.basename(model)
28
- model_basename = model_basename.replace('.', '_')
29
- bam=f"{output_directory}/{model_basename}_canonical_basecalls"
30
- else:
31
- bam_base=os.path.basename(input_data_path).split('.bam')[0]
32
- bam=os.path.join(output_directory, bam_base)
33
- aligned_BAM=f"{bam}_aligned"
34
- aligned_sorted_BAM=f"{aligned_BAM}_sorted"
35
-
36
- os.chdir(output_directory)
37
-
38
- # 1) Convert FASTA file
39
- fasta_basename = os.path.basename(fasta)
40
- converted_FASTA_basename = fasta_basename.split('.fa')[0]+'_converted.fasta'
41
- converted_FASTA = os.path.join(output_directory, converted_FASTA_basename)
42
- if 'converted.fa' in fasta:
43
- print(fasta + ' is already converted. Using existing converted FASTA.')
44
- converted_FASTA = fasta
45
- elif os.path.exists(converted_FASTA):
46
- print(converted_FASTA + ' already exists. Using existing converted FASTA.')
47
- else:
48
- generate_converted_FASTA(fasta, conversion_types, strands, converted_FASTA)
49
-
50
- # Make a FAI and .chrom.names file for the converted fasta
51
- get_chromosome_lengths(converted_FASTA)
52
-
53
- # 2) Basecall from the input POD5 to generate a singular output BAM
54
- if basecall:
55
- canoncall_output = bam + bam_suffix
56
- if os.path.exists(canoncall_output):
57
- print(canoncall_output + ' already exists. Using existing basecalled BAM.')
58
- else:
59
- canoncall(model, input_data_path, barcode_kit, bam, bam_suffix)
60
- else:
61
- canoncall_output = input_data_path
62
-
63
- # 3) Align the BAM to the converted reference FASTA and sort the bam on positional coordinates. Also make an index and a bed file of mapped reads
64
- aligned_output = aligned_BAM + bam_suffix
65
- sorted_output = aligned_sorted_BAM + bam_suffix
66
- if os.path.exists(aligned_output) and os.path.exists(sorted_output):
67
- print(sorted_output + ' already exists. Using existing aligned/sorted BAM.')
68
- else:
69
- align_and_sort_BAM(converted_FASTA, canoncall_output, bam_suffix, output_directory)
70
-
71
- ### 4) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory###
72
- if os.path.isdir(split_dir):
73
- print(split_dir + ' already exists. Using existing aligned/sorted/split BAMs.')
74
- else:
75
- make_dirs([split_dir])
76
- split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory, converted_FASTA)
77
-
78
- # 5) Take the converted BAM and load it into an adata object.
79
- converted_BAM_to_adata(converted_FASTA, split_dir, mapping_threshold, experiment_name, conversion_types, bam_suffix)
@@ -1,89 +0,0 @@
1
- ## direct_smf
2
-
3
- def direct_smf(fasta, output_directory, mod_list, model, thresholds, input_data_path, split_dir, barcode_kit, mapping_threshold, experiment_name, bam_suffix, batch_size, basecall):
4
- """
5
- Processes sequencing data from a direct methylation detection Nanopore SMF experiment to an AnnData object.
6
-
7
- Parameters:
8
- fasta (str): File path to the reference genome to align to.
9
- output_directory (str): A file path to the directory to output all the analyses.
10
- mod_list (list): A list of strings of the modification types to use in the analysis.
11
- model (str): a string representing the file path to the dorado basecalling model.
12
- thresholds (list): A list of floats to pass for call thresholds.
13
- input_data_path (str): a string representing the file path to the experiment directory containing the input sequencing files.
14
- split_dir (str): A string representing the file path to the directory to split the BAMs into.
15
- barcode_kit (str): A string representing the barcoding kit used in the experiment.
16
- mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
17
- experiment_name (str): A string to provide an experiment name to the output adata file.
18
- bam_suffix (str): A suffix to add to the bam file.
19
- batch_size (int): An integer number of TSV files to analyze in memory at once while loading the final adata object.
20
- basecall (bool): Whether to basecall
21
-
22
- Returns:
23
- None
24
- """
25
- from .helpers import align_and_sort_BAM, extract_mods, get_chromosome_lengths, make_modbed, modcall, modkit_extract_to_adata, modQC, split_and_index_BAM, make_dirs
26
- import os
27
-
28
- if basecall:
29
- model_basename = os.path.basename(model)
30
- model_basename = model_basename.replace('.', '_')
31
- mod_string = "_".join(mod_list)
32
- bam=f"{output_directory}/{model_basename}_{mod_string}_calls"
33
- else:
34
- bam_base=os.path.basename(input_data_path).split('.bam')[0]
35
- bam=os.path.join(output_directory, bam_base)
36
- aligned_BAM=f"{bam}_aligned"
37
- aligned_sorted_BAM=f"{aligned_BAM}_sorted"
38
- mod_bed_dir=f"{output_directory}/split_mod_beds"
39
- mod_tsv_dir=f"{output_directory}/split_mod_tsvs"
40
-
41
- aligned_sorted_output = aligned_sorted_BAM + bam_suffix
42
- mod_map = {'6mA': '6mA', '5mC_5hmC': '5mC'}
43
- mods = [mod_map[mod] for mod in mod_list]
44
-
45
- # Make a FAI and .chrom.names file for the fasta
46
- get_chromosome_lengths(fasta)
47
-
48
- os.chdir(output_directory)
49
-
50
- # 1) Basecall using dorado
51
- if basecall:
52
- modcall_output = bam + bam_suffix
53
- if os.path.exists(modcall_output):
54
- print(modcall_output + ' already exists. Using existing basecalled BAM.')
55
- else:
56
- modcall(model, input_data_path, barcode_kit, mod_list, bam, bam_suffix)
57
- else:
58
- modcall_output = input_data_path
59
-
60
- # 2) Align the BAM to the reference FASTA. Also make an index and a bed file of mapped reads
61
- aligned_output = aligned_BAM + bam_suffix
62
- sorted_output = aligned_sorted_BAM + bam_suffix
63
- if os.path.exists(aligned_output) and os.path.exists(sorted_output):
64
- print(sorted_output + ' already exists. Using existing aligned/sorted BAM.')
65
- else:
66
- align_and_sort_BAM(fasta, modcall_output, bam_suffix, output_directory)
67
-
68
- # 3) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory
69
- if os.path.isdir(split_dir):
70
- print(split_dir + ' already exists. Using existing aligned/sorted/split BAMs.')
71
- else:
72
- make_dirs([split_dir])
73
- split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, output_directory, fasta)
74
-
75
- # 4) Using nanopore modkit to work with modified BAM files ###
76
- if os.path.isdir(mod_bed_dir):
77
- print(mod_bed_dir + ' already exists')
78
- else:
79
- make_dirs([mod_bed_dir])
80
- modQC(aligned_sorted_output, thresholds) # get QC metrics for mod calls
81
- make_modbed(aligned_sorted_output, thresholds, mod_bed_dir) # Generate bed files of position methylation summaries for every sample
82
- if os.path.isdir(mod_tsv_dir):
83
- print(mod_tsv_dir + ' already exists')
84
- else:
85
- make_dirs([mod_tsv_dir])
86
- extract_mods(thresholds, mod_tsv_dir, split_dir, bam_suffix) # Extract methylations calls for split BAM files into split TSV files
87
-
88
- #5 Load the modification data from TSVs into an adata object
89
- modkit_extract_to_adata(fasta, split_dir, mapping_threshold, experiment_name, mods, batch_size, mod_tsv_dir)
@@ -1,21 +0,0 @@
1
- # fast5_to_pod5
2
-
3
- def fast5_to_pod5(fast5_dir, output_pod5='FAST5s_to_POD5.pod5'):
4
- """
5
- Convert Nanopore FAST5 files to POD5 file
6
-
7
- Parameters:
8
- fast5_dir (str): String representing the file path to a directory containing all FAST5 files to convert into a single POD5 output.
9
- output_pod5 (str): The name of the output POD5.
10
-
11
- Returns:
12
- None
13
-
14
- """
15
- import subprocess
16
- from pathlib import Path
17
-
18
- if Path(fast5_dir).is_file():
19
- subprocess.run(["pod5", "convert", "fast5", fast5_dir, "--output", output_pod5])
20
- elif Path(fast5_dir).is_dir():
21
- subprocess.run(["pod5", "convert", "fast5", f".{fast5_dir}*.fast5", "--output", output_pod5])