smftools 0.1.1__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. smftools-0.1.6.dist-info/METADATA +127 -0
  2. smftools-0.1.6.dist-info/RECORD +4 -0
  3. smftools/__init__.py +0 -25
  4. smftools/_settings.py +0 -19
  5. smftools/_version.py +0 -1
  6. smftools/datasets/F1_hybrid_NKG2A_enhander_promoter_GpC_conversion_SMF.h5ad.gz +0 -0
  7. smftools/datasets/__init__.py +0 -9
  8. smftools/datasets/dCas9_m6A_invitro_kinetics.h5ad.gz +0 -0
  9. smftools/datasets/datasets.py +0 -27
  10. smftools/informatics/__init__.py +0 -12
  11. smftools/informatics/bam_conversion.py +0 -47
  12. smftools/informatics/bam_direct.py +0 -49
  13. smftools/informatics/basecalls_to_adata.py +0 -42
  14. smftools/informatics/fast5_to_pod5.py +0 -19
  15. smftools/informatics/helpers/LoadExperimentConfig.py +0 -74
  16. smftools/informatics/helpers/__init__.py +0 -42
  17. smftools/informatics/helpers/align_and_sort_BAM.py +0 -52
  18. smftools/informatics/helpers/archived/informatics.py +0 -260
  19. smftools/informatics/helpers/archived/load_adata.py +0 -516
  20. smftools/informatics/helpers/binarize_converted_base_identities.py +0 -31
  21. smftools/informatics/helpers/canoncall.py +0 -23
  22. smftools/informatics/helpers/converted_BAM_to_adata.py +0 -164
  23. smftools/informatics/helpers/count_aligned_reads.py +0 -39
  24. smftools/informatics/helpers/extract_base_identities.py +0 -43
  25. smftools/informatics/helpers/extract_mods.py +0 -51
  26. smftools/informatics/helpers/find_conversion_sites.py +0 -59
  27. smftools/informatics/helpers/generate_converted_FASTA.py +0 -79
  28. smftools/informatics/helpers/get_native_references.py +0 -28
  29. smftools/informatics/helpers/make_dirs.py +0 -21
  30. smftools/informatics/helpers/make_modbed.py +0 -27
  31. smftools/informatics/helpers/modQC.py +0 -27
  32. smftools/informatics/helpers/modcall.py +0 -26
  33. smftools/informatics/helpers/modkit_extract_to_adata.py +0 -367
  34. smftools/informatics/helpers/one_hot_encode.py +0 -19
  35. smftools/informatics/helpers/separate_bam_by_bc.py +0 -41
  36. smftools/informatics/helpers/split_and_index_BAM.py +0 -29
  37. smftools/informatics/pod5_conversion.py +0 -53
  38. smftools/informatics/pod5_direct.py +0 -55
  39. smftools/informatics/pod5_to_adata.py +0 -40
  40. smftools/informatics/readwrite.py +0 -106
  41. smftools/informatics/subsample_pod5.py +0 -48
  42. smftools/plotting/__init__.py +0 -0
  43. smftools/preprocessing/__init__.py +0 -29
  44. smftools/preprocessing/append_C_context.py +0 -46
  45. smftools/preprocessing/archives/preprocessing.py +0 -614
  46. smftools/preprocessing/binarize_on_Youden.py +0 -42
  47. smftools/preprocessing/binary_layers_to_ohe.py +0 -30
  48. smftools/preprocessing/calculate_complexity.py +0 -71
  49. smftools/preprocessing/calculate_converted_read_methylation_stats.py +0 -45
  50. smftools/preprocessing/calculate_coverage.py +0 -41
  51. smftools/preprocessing/calculate_pairwise_hamming_distances.py +0 -27
  52. smftools/preprocessing/calculate_position_Youden.py +0 -104
  53. smftools/preprocessing/calculate_read_length_stats.py +0 -32
  54. smftools/preprocessing/clean_NaN.py +0 -38
  55. smftools/preprocessing/filter_converted_reads_on_methylation.py +0 -27
  56. smftools/preprocessing/filter_reads_on_length.py +0 -39
  57. smftools/preprocessing/invert_adata.py +0 -22
  58. smftools/preprocessing/mark_duplicates.py +0 -119
  59. smftools/preprocessing/min_non_diagonal.py +0 -25
  60. smftools/preprocessing/remove_duplicates.py +0 -18
  61. smftools/readwrite.py +0 -106
  62. smftools/tools/__init__.py +0 -0
  63. smftools-0.1.1.dist-info/METADATA +0 -88
  64. smftools-0.1.1.dist-info/RECORD +0 -64
  65. {smftools-0.1.1.dist-info → smftools-0.1.6.dist-info}/WHEEL +0 -0
  66. {smftools-0.1.1.dist-info → smftools-0.1.6.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,127 @@
1
+ Metadata-Version: 2.3
2
+ Name: smftools
3
+ Version: 0.1.6
4
+ Summary: Single Molecule Footprinting Analysis in Python.
5
+ Project-URL: Source, https://github.com/jkmckenna/smftools
6
+ Project-URL: Documentation, https://smftools.readthedocs.io/
7
+ Author: Joseph McKenna
8
+ Maintainer-email: Joseph McKenna <jkmckenna@berkeley.edu>
9
+ License: MIT License
10
+
11
+ Copyright (c) 2024 jkmckenna
12
+
13
+ Permission is hereby granted, free of charge, to any person obtaining a copy
14
+ of this software and associated documentation files (the "Software"), to deal
15
+ in the Software without restriction, including without limitation the rights
16
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17
+ copies of the Software, and to permit persons to whom the Software is
18
+ furnished to do so, subject to the following conditions:
19
+
20
+ The above copyright notice and this permission notice shall be included in all
21
+ copies or substantial portions of the Software.
22
+
23
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29
+ SOFTWARE.
30
+ License-File: LICENSE
31
+ Keywords: anndata,chromatin-accessibility,machine-learning,nanopore,protein-dna-binding,single-locus,single-molecule-footprinting
32
+ Classifier: Development Status :: 2 - Pre-Alpha
33
+ Classifier: Environment :: Console
34
+ Classifier: Intended Audience :: Developers
35
+ Classifier: Intended Audience :: Science/Research
36
+ Classifier: License :: OSI Approved :: MIT License
37
+ Classifier: Natural Language :: English
38
+ Classifier: Operating System :: MacOS :: MacOS X
39
+ Classifier: Programming Language :: Python :: 3
40
+ Classifier: Programming Language :: Python :: 3.9
41
+ Classifier: Programming Language :: Python :: 3.10
42
+ Classifier: Programming Language :: Python :: 3.11
43
+ Classifier: Programming Language :: Python :: 3.12
44
+ Classifier: Topic :: Scientific/Engineering :: Bio-Informatics
45
+ Classifier: Topic :: Scientific/Engineering :: Visualization
46
+ Requires-Python: >=3.9
47
+ Requires-Dist: anndata>=0.10.0
48
+ Requires-Dist: biopython>=1.79
49
+ Requires-Dist: fastcluster
50
+ Requires-Dist: hydra-core
51
+ Requires-Dist: igraph
52
+ Requires-Dist: leidenalg
53
+ Requires-Dist: lightning
54
+ Requires-Dist: multiqc
55
+ Requires-Dist: networkx>=3.2
56
+ Requires-Dist: numpy<2,>=1.22.0
57
+ Requires-Dist: omegaconf
58
+ Requires-Dist: pandas>=1.4.2
59
+ Requires-Dist: pod5>=0.1.21
60
+ Requires-Dist: pomegranate>=1.0.0
61
+ Requires-Dist: pyfaidx>=0.8.0
62
+ Requires-Dist: pysam>=0.19.1
63
+ Requires-Dist: scanpy>=1.9
64
+ Requires-Dist: scikit-learn>=1.0.2
65
+ Requires-Dist: scipy>=1.7.3
66
+ Requires-Dist: seaborn>=0.11
67
+ Requires-Dist: torch>=1.9.0
68
+ Requires-Dist: tqdm
69
+ Requires-Dist: wandb
70
+ Provides-Extra: docs
71
+ Requires-Dist: ipython>=7.20; extra == 'docs'
72
+ Requires-Dist: matplotlib!=3.6.1; extra == 'docs'
73
+ Requires-Dist: myst-nb>=1; extra == 'docs'
74
+ Requires-Dist: myst-parser>=2; extra == 'docs'
75
+ Requires-Dist: nbsphinx>=0.9; extra == 'docs'
76
+ Requires-Dist: readthedocs-sphinx-search; extra == 'docs'
77
+ Requires-Dist: setuptools; extra == 'docs'
78
+ Requires-Dist: sphinx-autodoc-typehints>=1.25.2; extra == 'docs'
79
+ Requires-Dist: sphinx-book-theme>=1.1.0; extra == 'docs'
80
+ Requires-Dist: sphinx-copybutton; extra == 'docs'
81
+ Requires-Dist: sphinx-design; extra == 'docs'
82
+ Requires-Dist: sphinx>=7; extra == 'docs'
83
+ Requires-Dist: sphinxcontrib-bibtex; extra == 'docs'
84
+ Requires-Dist: sphinxext-opengraph; extra == 'docs'
85
+ Provides-Extra: tests
86
+ Requires-Dist: pytest; extra == 'tests'
87
+ Requires-Dist: pytest-cov; extra == 'tests'
88
+ Description-Content-Type: text/markdown
89
+
90
+ [![PyPI](https://img.shields.io/pypi/v/smftools.svg)](https://pypi.org/project/smftools)
91
+ [![Docs](https://readthedocs.org/projects/smftools/badge/?version=latest)](https://smftools.readthedocs.io/en/latest/?badge=latest)
92
+
93
+ # smftools
94
+ A Python tool for processing raw sequencing data derived from single molecule footprinting experiments into [anndata](https://anndata.readthedocs.io/en/latest/) objects. Additional functionality for preprocessing, analysis, and visualization.
95
+
96
+ ## Philosophy
97
+ While most genomic data structures handle low-coverage data (<100X) along large references, smftools prioritizes high-coverage data (scalable to >1,000,000X coverage) of a few genomic loci at a time. This enables efficient data storage, rapid data operations, hierarchical metadata handling, seamless integration with various machine-learning packages, and ease of visualization. Furthermore, functionality is modularized, enabling analysis sessions to be saved, reloaded, and easily shared with collaborators. Analyses are centered around the [anndata](https://anndata.readthedocs.io/en/latest/) object, and are heavily inspired by the work conducted within the single-cell genomics community.
98
+
99
+ ## Dependencies
100
+ The following CLI tools need to be installed and configured before using the informatics (smftools.inform) module of smftools:
101
+ 1) [Dorado](https://github.com/nanoporetech/dorado) -> For standard/modified basecalling and alignment. Can be attained by downloading and configuring nanopore MinKnow software.
102
+ 2) [Samtools](https://github.com/samtools/samtools) -> For working with SAM/BAM files
103
+ 3) [Minimap2](https://github.com/lh3/minimap2) -> The aligner used by Dorado
104
+ 4) [Modkit](https://github.com/nanoporetech/modkit) -> Extracting summary statistics and read level methylation calls from modified BAM files
105
+ 5) [Bedtools](https://github.com/arq5x/bedtools2) -> For generating Bedgraphs from BAM alignment files.
106
+ 6) [BedGraphToBigWig](https://genome.ucsc.edu/goldenPath/help/bigWig.html) -> For converting BedGraphs to BigWig files for IGV sessions.
107
+
108
+ ## Modules
109
+ ### Informatics: Processes raw Nanopore/Illumina data from SMF experiments into an AnnData object.
110
+ ![](docs/source/_static/smftools_informatics_diagram.png)
111
+ ### Preprocessing: Appends QC metrics to the AnnData object and performs filtering.
112
+ ![](docs/source/_static/smftools_preprocessing_diagram.png)
113
+ ### Tools: Appends analyses to the AnnData object.
114
+ - Currently Includes: Position X Position correlation matrices, Hidden Markov Model feature detection, clustering, dimensionality reduction, peak calling, train/test workflows for various ML classifiers.
115
+ - To do: Additional ML methods for learning predictive single molecule features on condition labels: Autoencoders, Variational Autoencoders, Transformers.
116
+ ### Plotting: Visualization of analyses stored within the AnnData object.
117
+ - Most analyses appended to the adata object by a tools method have, or will have, an accompanying plotting method.
118
+
119
+ ## Announcements
120
+
121
+ ### 10/01/24 - More recent versions are being updated through github and are not currently on pypi, please install from source. Thank you!
122
+
123
+ ### 09/09/24 - The version 0.1.1 package ([smftools-0.1.1](https://pypi.org/project/smftools/)) is installable through pypi!
124
+ The informatics module has been bumped to alpha-phase status. This module can deal with POD5s and unaligned BAMS from nanopore conversion and direct SMF experiments, as well as FASTQs from Illumina conversion SMF experiments. Primary output from this module is an AnnData object containing all relevant SMF data, which is compatible with all downstream smftools modules. The other modules are still in pre-alpha phase. Preprocessing, Tools, and Plotting modules should be promoted to alpha-phase within the next month or so.
125
+
126
+ ### 08/30/24 - The version 0.1.0 package ([smftools-0.1.0](https://pypi.org/project/smftools/)) is installable through pypi!
127
+ Currently, this package (smftools-0.1.0) is going through rapid improvement (dependency handling accross Linux and Mac OS, testing, documentation, debugging) and is still too early in development for widespread use. The underlying functionality was originally developed as a collection of scripts for single molecule footprinting (SMF) experiments in our lab, but is being packaged/developed to facilitate the expansion of SMF to any lab that is interested in performing these styles of experiments/analyses. The alpha-phase package is expected to be available within a couple months, so stay tuned!
@@ -0,0 +1,4 @@
1
+ smftools-0.1.6.dist-info/METADATA,sha256=03UV92H9JAHE0TlXfQ9Bmw9EU7EM9Gr5BNn9c5Td4Tw,8415
2
+ smftools-0.1.6.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
3
+ smftools-0.1.6.dist-info/licenses/LICENSE,sha256=F8LwmL6vMPddaCt1z1S83Kh_OZv50alTlY7BvVx1RXw,1066
4
+ smftools-0.1.6.dist-info/RECORD,,
smftools/__init__.py DELETED
@@ -1,25 +0,0 @@
1
- """smftools"""
2
-
3
- import logging
4
- import warnings
5
-
6
- from . import informatics as inform
7
- from . import preprocessing as pp
8
- from . import tools as tl
9
- from . import plotting as pl
10
- from . import readwrite, datasets
11
-
12
-
13
- from importlib.metadata import version
14
-
15
- package_name = "smftools"
16
- __version__ = version(package_name)
17
-
18
- __all__ = [
19
- "inform",
20
- "pp",
21
- "tl",
22
- "pl",
23
- "readwrite",
24
- "datasets"
25
- ]
smftools/_settings.py DELETED
@@ -1,19 +0,0 @@
1
- from pathlib import Path
2
-
3
- class SMFConfig:
4
- """\
5
- Config for smftools.
6
- """
7
-
8
- def __init__(
9
- self,
10
- *,
11
- datasetdir: Path | str = "./datasets/"
12
- ):
13
- self._datasetdir = Path(datasetdir) if isinstance(datasetdir, str) else datasetdir
14
-
15
- @property
16
- def datasetdir(self) -> Path:
17
- return self._datasetdir
18
-
19
- settings = SMFConfig()
smftools/_version.py DELETED
@@ -1 +0,0 @@
1
- __version__ = "0.1.1"
@@ -1,9 +0,0 @@
1
- from .datasets import (
2
- dCas9_kinetics,
3
- Kissiov_and_McKenna_2025
4
- )
5
-
6
- __all__ = [
7
- "dCas9_kinetics",
8
- "Kissiov_and_McKenna_2025"
9
- ]
@@ -1,27 +0,0 @@
1
- ## datasets
2
-
3
- def import_deps():
4
- """
5
-
6
- """
7
- import anndata as ad
8
- from pathlib import Path
9
- from .._settings import settings
10
- HERE = Path(__file__).parent
11
- return HERE
12
-
13
- def dCas9_kinetics():
14
- """
15
-
16
- """
17
- HERE = import_deps()
18
- filepath = HERE / "dCas9_m6A_invitro_kinetics.h5ad.gz"
19
- return ad.read_h5ad(filepath)
20
-
21
- def Kissiov_and_McKenna_2025():
22
- """
23
-
24
- """
25
- HERE = import_deps()
26
- filepath = HERE / "F1_hybrid_NKG2A_enhander_promoter_GpC_conversion_SMF.h5ad.gz"
27
- return ad.read_h5ad(filepath)
@@ -1,12 +0,0 @@
1
- from .pod5_to_adata import pod5_to_adata
2
- from .basecalls_to_adata import basecalls_to_adata
3
- from .subsample_pod5 import subsample_pod5
4
- from .fast5_to_pod5 import fast5_to_pod5
5
-
6
-
7
- __all__ = [
8
- "pod5_to_adata",
9
- "basecalls_to_adata",
10
- "subsample_pod5",
11
- "fast5_to_pod5"
12
- ]
@@ -1,47 +0,0 @@
1
- ## bam_conversion
2
-
3
- def bam_conversion(fasta, output_directory, conversion_types, strands, basecalled_path, split_dir, mapping_threshold, experiment_name, bam_suffix):
4
- """
5
- Converts a BAM file from a nanopore conversion SMF experiment to an adata object.
6
-
7
- Parameters:
8
- fasta (str): File path to the reference genome to align to.
9
- output_directory (str): A file path to the directory to output all the analyses.
10
- conversion_type (list): A list of strings of the conversion types to use in the analysis.
11
- strands (list): A list of converstion strands to use in the experiment.
12
- basecalled_path (str): a string representing the file path to the experiment BAM or FASTQ file.
13
- split_dir (str): A string representing the file path to the directory to split the BAMs into.
14
- mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
15
- experiment_name (str): A string to provide an experiment name to the output adata file.
16
- bam_suffix (str): A suffix to add to the bam file.
17
-
18
- Returns:
19
- None
20
- """
21
- from .helpers import align_and_sort_BAM, converted_BAM_to_adata, generate_converted_FASTA, split_and_index_BAM
22
- import os
23
- input_basecalled_basename = os.path.basename(basecalled_path)
24
- bam_basename = input_basecalled_basename.split(".")[0]
25
- output_bam=f"{output_directory}/{bam_basename}"
26
- aligned_BAM=f"{output_bam}_aligned"
27
- aligned_sorted_BAM=f"{aligned_BAM}_sorted"
28
-
29
- os.chdir(output_directory)
30
-
31
- # 1) Convert FASTA file
32
- fasta_basename = os.path.basename(fasta)
33
- converted_FASTA_basename = fasta_basename.split('.fa')[0]+'_converted.fasta'
34
- converted_FASTA = os.path.join(output_directory, converted_FASTA_basename)
35
- if os.path.exists(converted_FASTA):
36
- print(converted_FASTA + ' already exists. Using existing converted FASTA.')
37
- else:
38
- generate_converted_FASTA(fasta, conversion_types, strands, converted_FASTA)
39
-
40
- # 2) Align the basecalled file to the converted reference FASTA and sort the bam on positional coordinates. Also make an index and a bed file of mapped reads
41
- align_and_sort_BAM(converted_FASTA, basecalled_path, bam_suffix, output_directory)
42
-
43
- ### 3) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory###
44
- split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix)
45
-
46
- # 4) Take the converted BAM and load it into an adata object.
47
- converted_BAM_to_adata(converted_FASTA, split_dir, mapping_threshold, experiment_name, conversion_types, bam_suffix)
@@ -1,49 +0,0 @@
1
- ## bam_direct
2
-
3
- def bam_direct(fasta, output_directory, mod_list, thresholds, bam_path, split_dir, mapping_threshold, experiment_name, bam_suffix, batch_size):
4
- """
5
- Converts a POD5 file from a nanopore native SMF experiment to an adata object.
6
-
7
- Parameters:
8
- fasta (str): File path to the reference genome to align to.
9
- output_directory (str): A file path to the directory to output all the analyses.
10
- mod_list (list): A list of strings of the modification types to use in the analysis.
11
- thresholds (list): A list of floats to pass for call thresholds.
12
- bam_path (str): a string representing the file path to the the BAM file.
13
- split_dir (str): A string representing the file path to the directory to split the BAMs into.
14
- mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
15
- experiment_name (str): A string to provide an experiment name to the output adata file.
16
- bam_suffix (str): A suffix to add to the bam file.
17
- batch_size (int): An integer number of TSV files to analyze in memory at once while loading the final adata object.
18
-
19
- Returns:
20
- None
21
- """
22
- from .helpers import align_and_sort_BAM, extract_mods, make_modbed, modkit_extract_to_adata, modQC, split_and_index_BAM, make_dirs
23
- import os
24
- input_bam_base = os.path.basename(bam_path)
25
- bam_basename = input_bam_base.split(bam_suffix)[0]
26
- output_bam=f"{output_directory}/{bam_basename}"
27
- aligned_BAM=f"{output_bam}_aligned"
28
- aligned_sorted_BAM=f"{aligned_BAM}_sorted"
29
- mod_bed_dir=f"{output_directory}/split_mod_beds"
30
- mod_tsv_dir=f"{output_directory}/split_mod_tsvs"
31
-
32
- make_dirs([mod_bed_dir, mod_tsv_dir])
33
-
34
- aligned_sorted_output = aligned_sorted_BAM + bam_suffix
35
- mod_map = {'6mA': '6mA', '5mC_5hmC': '5mC'}
36
- mods = [mod_map[mod] for mod in mod_list]
37
-
38
- os.chdir(output_directory)
39
-
40
- # 1) Align the BAM to the reference FASTA. Also make an index and a bed file of mapped reads
41
- align_and_sort_BAM(fasta, bam_path, bam_suffix, output_directory)
42
- # 2) Split the aligned and sorted BAM files by barcode (BC Tag) into the split_BAM directory
43
- split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix)
44
- # 3) Using nanopore modkit to work with modified BAM files ###
45
- modQC(aligned_sorted_output, thresholds) # get QC metrics for mod calls
46
- make_modbed(aligned_sorted_output, thresholds, mod_bed_dir) # Generate bed files of position methylation summaries for every sample
47
- extract_mods(thresholds, mod_tsv_dir, split_dir, bam_suffix) # Extract methylations calls for split BAM files into split TSV files
48
- #4 Load the modification data from TSVs into an adata object
49
- modkit_extract_to_adata(fasta, aligned_sorted_output, mapping_threshold, experiment_name, mods, batch_size)
@@ -1,42 +0,0 @@
1
- ## basecalls_to_adata
2
-
3
- def basecalls_to_adata(config_path):
4
- """
5
- High-level function to call for loading basecalled SMF data from a BAM file into an adata object. Also works with FASTQ for conversion SMF.
6
-
7
- Parameters:
8
- config_path (str): A string representing the file path to the experiment configuration csv file.
9
-
10
- Returns:
11
- None
12
- """
13
- from .helpers import LoadExperimentConfig, make_dirs
14
- import os
15
- bam_suffix = '.bam' # If different, change from here.
16
- split_dir = 'split_BAMs' # If different, change from here.
17
- strands = ['bottom', 'top'] # If different, change from here. Having both listed generally doesn't slow things down too much.
18
- conversions = ['unconverted'] # The name to use for the unconverted files. If different, change from here.
19
-
20
- # Load experiment config parameters into global variables
21
- experiment_config = LoadExperimentConfig(config_path)
22
- var_dict = experiment_config.var_dict
23
- for key, value in var_dict.items():
24
- globals()[key] = value
25
-
26
- split_path = os.path.join(output_directory, split_dir)
27
- make_dirs([output_directory, split_path])
28
- os.chdir(output_directory)
29
-
30
- conversions += conversion_types
31
-
32
- if smf_modality == 'conversion':
33
- from .bam_conversion import bam_conversion
34
- bam_conversion(fasta, output_directory, conversions, strands, basecalled_path, split_path, mapping_threshold, experiment_name, bam_suffix)
35
- elif smf_modality == 'direct':
36
- if bam_suffix in basecalled_path:
37
- from .bam_direct import bam_direct
38
- bam_direct(fasta, output_directory, mod_list, thresholds, basecalled_path, split_path, mapping_threshold, experiment_name, bam_suffix, batch_size)
39
- else:
40
- print('basecalls_to_adata function only work with the direct modality when the input filetype is BAM and not FASTQ.')
41
- else:
42
- print("Error")
@@ -1,19 +0,0 @@
1
- # fast5_to_pod5
2
-
3
- def fast5_to_pod5(fast5_dir, output_dir='outputs/', output_pod5='FAST5s_to_POD5.pod5'):
4
- """
5
- Convert Nanopore FAST5 files to POD5 file
6
-
7
- Parameters:
8
- fast5_dir (str): String representing the file path to a directory containing all FAST5 files to convert into a single POD5 output.
9
- output_dir (str): String representing the file path to the output directory.
10
- output_pod5 (str): The name of the output POD5 to write out within the output directory.
11
-
12
- Returns:
13
- None
14
-
15
- """
16
- import subprocess
17
- import os
18
- pod5 = os.path.join(output_dir, output_pod5)
19
- subprocess.run(["pod5", "convert", "fast5", f".{fast5_dir}*.fast5", "--output", pod5])
@@ -1,74 +0,0 @@
1
- ## LoadExperimentConfig
2
-
3
- class LoadExperimentConfig:
4
- """
5
- Loads in the experiment configuration csv and saves global variables with experiment configuration parameters.
6
- Parameters:
7
- experiment_config (str): A string representing the file path to the experiment configuration csv file.
8
-
9
- Attributes:
10
- var_dict (dict): A dictionary containing experiment configuration parameters.
11
-
12
- Example:
13
- >>> import pandas as pd
14
- >>> from io import StringIO
15
- >>> csv_data = '''variable,value,type
16
- ... mapping_threshold,0.05,float
17
- ... batch_size,4,int
18
- ... testing_bool,True,bool
19
- ... strands,"[bottom, top]",list
20
- ... split_dir,split_bams,string
21
- ... pod5_dir,None,string
22
- ... pod5_dir,,string
23
- ... '''
24
- >>> csv_file = StringIO(csv_data)
25
- >>> df = pd.read_csv(csv_file)
26
- >>> df.to_csv('test_config.csv', index=False)
27
- >>> config_loader = LoadExperimentConfig('test_config.csv')
28
- >>> config_loader.var_dict['mapping_threshold']
29
- 0.05
30
- >>> config_loader.var_dict['batch_size']
31
- 4
32
- >>> config_loader.var_dict['testing_bool']
33
- True
34
- >>> config_loader.var_dict['strands']
35
- ['bottom', 'top']
36
- >>> config_loader.var_dict['split_dir']
37
- 'split_bams'
38
- >>> config_loader.var_dict['pod5_dir'] is None
39
- True
40
- >>> config_loader.var_dict['pod5_dir'] is None
41
- True
42
- """
43
- def __init__(self, experiment_config):
44
- import pandas as pd
45
- # Read the CSV into a pandas DataFrame
46
- df = pd.read_csv(experiment_config)
47
- # Initialize an empty dictionary to store variables
48
- var_dict = {}
49
- # Iterate through each row in the DataFrame
50
- for _, row in df.iterrows():
51
- var_name = str(row['variable'])
52
- value = row['value']
53
- dtype = row['type']
54
- # Handle empty and None values
55
- if pd.isna(value) or value in ['None', '']:
56
- value = None
57
- else:
58
- # Handle different data types
59
- if dtype == 'list':
60
- # Convert the string representation of a list to an actual list
61
- value = value.strip('()[]').replace(', ', ',').split(',')
62
- elif dtype == 'int':
63
- value = int(value)
64
- elif dtype == 'float':
65
- value = float(value)
66
- elif dtype == 'bool':
67
- value = value.lower() == 'true'
68
- elif dtype == 'string':
69
- value = str(value)
70
- # Store the variable in the dictionary
71
- var_dict[var_name] = value
72
- # Save the dictionary as an attribute of the class
73
- self.var_dict = var_dict
74
-
@@ -1,42 +0,0 @@
1
- from .align_and_sort_BAM import align_and_sort_BAM
2
- from .binarize_converted_base_identities import binarize_converted_base_identities
3
- from .canoncall import canoncall
4
- from .converted_BAM_to_adata import converted_BAM_to_adata
5
- from .count_aligned_reads import count_aligned_reads
6
- from .extract_base_identities import extract_base_identities
7
- from .extract_mods import extract_mods
8
- from .find_conversion_sites import find_conversion_sites
9
- from .generate_converted_FASTA import convert_FASTA_record, generate_converted_FASTA
10
- from .get_native_references import get_native_references
11
- from .LoadExperimentConfig import LoadExperimentConfig
12
- from .make_dirs import make_dirs
13
- from .make_modbed import make_modbed
14
- from .modcall import modcall
15
- from .modkit_extract_to_adata import modkit_extract_to_adata
16
- from .modQC import modQC
17
- from .one_hot_encode import one_hot_encode
18
- from .separate_bam_by_bc import separate_bam_by_bc
19
- from .split_and_index_BAM import split_and_index_BAM
20
-
21
- __all__ = [
22
- "align_and_sort_BAM",
23
- "binarize_converted_base_identities",
24
- "canoncall",
25
- "converted_BAM_to_adata",
26
- "count_aligned_reads",
27
- "extract_base_identities",
28
- "extract_mods",
29
- "find_conversion_sites",
30
- "convert_FASTA_record",
31
- "generate_converted_FASTA",
32
- "get_native_references",
33
- "LoadExperimentConfig",
34
- "make_dirs",
35
- "make_modbed",
36
- "modcall",
37
- "modkit_extract_to_adata",
38
- "modQC",
39
- "one_hot_encode",
40
- "separate_bam_by_bc",
41
- "split_and_index_BAM"
42
- ]
@@ -1,52 +0,0 @@
1
- ## align_and_sort_BAM
2
-
3
- def align_and_sort_BAM(fasta, input, bam_suffix, output_directory):
4
- """
5
- A wrapper for running dorado aligner and samtools functions
6
-
7
- Parameters:
8
- fasta (str): File path to the reference genome to align to.
9
- input (str): File path to the basecalled file to align. Works for .bam and .fastq files
10
- bam_suffix (str): The suffix to use for the BAM file.
11
- output_directory (str): A file path to the directory to output all the analyses.
12
-
13
- Returns:
14
- None
15
- The function writes out files for: 1) An aligned BAM, 2) and aligned_sorted BAM, 3) an index file for the aligned_sorted BAM, 4) A bed file for the aligned_sorted BAM, 5) A text file containing read names in the aligned_sorted BAM
16
- """
17
- import subprocess
18
- import os
19
- input_basename = os.path.basename(input)
20
- input_suffix = '.' + input_basename.split('.')[1]
21
-
22
- output_path_minus_suffix = os.path.join(output_directory, input_basename.split(input_suffix)[0])
23
-
24
- aligned_BAM=f"{output_path_minus_suffix}_aligned"
25
- aligned_sorted_BAM=f"{aligned_BAM}_sorted"
26
- aligned_output = aligned_BAM + bam_suffix
27
- aligned_sorted_output = aligned_sorted_BAM + bam_suffix
28
-
29
- # Run dorado aligner
30
- subprocess.run(["dorado", "aligner", "--secondary=no", fasta, input], stdout=open(aligned_output, "w"))
31
-
32
- # Sort the BAM on positional coordinates
33
- subprocess.run(["samtools", "sort", "-o", aligned_sorted_output, aligned_output])
34
-
35
- # Create a BAM index file
36
- subprocess.run(["samtools", "index", aligned_sorted_output])
37
-
38
- # Make a bed file of coordinates for the BAM
39
- samtools_view = subprocess.Popen(["samtools", "view", aligned_sorted_output], stdout=subprocess.PIPE)
40
- with open(f"{aligned_sorted_BAM}_bed.bed", "w") as output_file:
41
- awk_process = subprocess.Popen(["awk", '{print $3, $4, $4+length($10)-1}'], stdin=samtools_view.stdout, stdout=output_file)
42
- samtools_view.stdout.close()
43
- awk_process.wait()
44
- samtools_view.wait()
45
-
46
- # Make a text file of reads for the BAM
47
- samtools_view = subprocess.Popen(["samtools", "view", aligned_sorted_output], stdout=subprocess.PIPE)
48
- with open(f"{aligned_sorted_BAM}_read_names.txt", "w") as output_file:
49
- cut_process = subprocess.Popen(["cut", "-f1"], stdin=samtools_view.stdout, stdout=output_file)
50
- samtools_view.stdout.close()
51
- cut_process.wait()
52
- samtools_view.wait()