smftools 0.2.1__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. smftools/__init__.py +2 -6
  2. smftools/_version.py +1 -1
  3. smftools/cli/__init__.py +0 -0
  4. smftools/cli/archived/cli_flows.py +94 -0
  5. smftools/cli/helpers.py +48 -0
  6. smftools/cli/hmm_adata.py +361 -0
  7. smftools/cli/load_adata.py +637 -0
  8. smftools/cli/preprocess_adata.py +455 -0
  9. smftools/cli/spatial_adata.py +697 -0
  10. smftools/cli_entry.py +434 -0
  11. smftools/config/conversion.yaml +18 -6
  12. smftools/config/deaminase.yaml +18 -11
  13. smftools/config/default.yaml +151 -36
  14. smftools/config/direct.yaml +28 -1
  15. smftools/config/discover_input_files.py +115 -0
  16. smftools/config/experiment_config.py +225 -27
  17. smftools/hmm/HMM.py +12 -1
  18. smftools/hmm/__init__.py +0 -6
  19. smftools/hmm/archived/call_hmm_peaks.py +106 -0
  20. smftools/hmm/call_hmm_peaks.py +318 -90
  21. smftools/informatics/__init__.py +13 -7
  22. smftools/informatics/archived/fast5_to_pod5.py +43 -0
  23. smftools/informatics/archived/helpers/archived/__init__.py +71 -0
  24. smftools/informatics/archived/helpers/archived/align_and_sort_BAM.py +126 -0
  25. smftools/informatics/{helpers → archived/helpers/archived}/aligned_BAM_to_bed.py +6 -4
  26. smftools/informatics/archived/helpers/archived/bam_qc.py +213 -0
  27. smftools/informatics/archived/helpers/archived/bed_to_bigwig.py +90 -0
  28. smftools/informatics/archived/helpers/archived/concatenate_fastqs_to_bam.py +259 -0
  29. smftools/informatics/{helpers → archived/helpers/archived}/count_aligned_reads.py +2 -2
  30. smftools/informatics/{helpers → archived/helpers/archived}/demux_and_index_BAM.py +8 -10
  31. smftools/informatics/{helpers → archived/helpers/archived}/extract_base_identities.py +1 -1
  32. smftools/informatics/{helpers → archived/helpers/archived}/extract_mods.py +15 -13
  33. smftools/informatics/{helpers → archived/helpers/archived}/generate_converted_FASTA.py +2 -0
  34. smftools/informatics/{helpers → archived/helpers/archived}/get_chromosome_lengths.py +9 -8
  35. smftools/informatics/archived/helpers/archived/index_fasta.py +24 -0
  36. smftools/informatics/{helpers → archived/helpers/archived}/make_modbed.py +1 -2
  37. smftools/informatics/{helpers → archived/helpers/archived}/modQC.py +2 -2
  38. smftools/informatics/{helpers → archived/helpers/archived}/plot_bed_histograms.py +0 -19
  39. smftools/informatics/{helpers → archived/helpers/archived}/separate_bam_by_bc.py +6 -5
  40. smftools/informatics/{helpers → archived/helpers/archived}/split_and_index_BAM.py +7 -7
  41. smftools/informatics/archived/subsample_fasta_from_bed.py +49 -0
  42. smftools/informatics/bam_functions.py +811 -0
  43. smftools/informatics/basecalling.py +67 -0
  44. smftools/informatics/bed_functions.py +366 -0
  45. smftools/informatics/{helpers/converted_BAM_to_adata_II.py → converted_BAM_to_adata.py} +42 -30
  46. smftools/informatics/fasta_functions.py +255 -0
  47. smftools/informatics/h5ad_functions.py +197 -0
  48. smftools/informatics/{helpers/modkit_extract_to_adata.py → modkit_extract_to_adata.py} +142 -59
  49. smftools/informatics/modkit_functions.py +129 -0
  50. smftools/informatics/ohe.py +160 -0
  51. smftools/informatics/pod5_functions.py +224 -0
  52. smftools/informatics/{helpers/run_multiqc.py → run_multiqc.py} +5 -2
  53. smftools/plotting/autocorrelation_plotting.py +1 -3
  54. smftools/plotting/general_plotting.py +1084 -363
  55. smftools/plotting/position_stats.py +3 -3
  56. smftools/preprocessing/__init__.py +4 -4
  57. smftools/preprocessing/append_base_context.py +35 -26
  58. smftools/preprocessing/append_binary_layer_by_base_context.py +6 -6
  59. smftools/preprocessing/binarize.py +17 -0
  60. smftools/preprocessing/binarize_on_Youden.py +11 -9
  61. smftools/preprocessing/calculate_complexity_II.py +1 -1
  62. smftools/preprocessing/calculate_coverage.py +16 -13
  63. smftools/preprocessing/calculate_position_Youden.py +42 -26
  64. smftools/preprocessing/calculate_read_modification_stats.py +2 -2
  65. smftools/preprocessing/filter_reads_on_length_quality_mapping.py +1 -1
  66. smftools/preprocessing/filter_reads_on_modification_thresholds.py +20 -20
  67. smftools/preprocessing/flag_duplicate_reads.py +2 -2
  68. smftools/preprocessing/invert_adata.py +1 -1
  69. smftools/preprocessing/load_sample_sheet.py +1 -1
  70. smftools/preprocessing/reindex_references_adata.py +37 -0
  71. smftools/readwrite.py +360 -140
  72. {smftools-0.2.1.dist-info → smftools-0.2.4.dist-info}/METADATA +26 -19
  73. smftools-0.2.4.dist-info/RECORD +176 -0
  74. smftools-0.2.4.dist-info/entry_points.txt +2 -0
  75. smftools/cli.py +0 -184
  76. smftools/informatics/fast5_to_pod5.py +0 -24
  77. smftools/informatics/helpers/__init__.py +0 -73
  78. smftools/informatics/helpers/align_and_sort_BAM.py +0 -86
  79. smftools/informatics/helpers/bam_qc.py +0 -66
  80. smftools/informatics/helpers/bed_to_bigwig.py +0 -39
  81. smftools/informatics/helpers/concatenate_fastqs_to_bam.py +0 -378
  82. smftools/informatics/helpers/discover_input_files.py +0 -100
  83. smftools/informatics/helpers/index_fasta.py +0 -12
  84. smftools/informatics/helpers/make_dirs.py +0 -21
  85. smftools/informatics/readwrite.py +0 -106
  86. smftools/informatics/subsample_fasta_from_bed.py +0 -47
  87. smftools/load_adata.py +0 -1346
  88. smftools-0.2.1.dist-info/RECORD +0 -161
  89. smftools-0.2.1.dist-info/entry_points.txt +0 -2
  90. /smftools/hmm/{apply_hmm_batched.py → archived/apply_hmm_batched.py} +0 -0
  91. /smftools/hmm/{calculate_distances.py → archived/calculate_distances.py} +0 -0
  92. /smftools/hmm/{train_hmm.py → archived/train_hmm.py} +0 -0
  93. /smftools/informatics/{basecall_pod5s.py → archived/basecall_pod5s.py} +0 -0
  94. /smftools/informatics/{helpers → archived/helpers/archived}/canoncall.py +0 -0
  95. /smftools/informatics/{helpers → archived/helpers/archived}/converted_BAM_to_adata.py +0 -0
  96. /smftools/informatics/{helpers → archived/helpers/archived}/extract_read_features_from_bam.py +0 -0
  97. /smftools/informatics/{helpers → archived/helpers/archived}/extract_read_lengths_from_bed.py +0 -0
  98. /smftools/informatics/{helpers → archived/helpers/archived}/extract_readnames_from_BAM.py +0 -0
  99. /smftools/informatics/{helpers → archived/helpers/archived}/find_conversion_sites.py +0 -0
  100. /smftools/informatics/{helpers → archived/helpers/archived}/get_native_references.py +0 -0
  101. /smftools/informatics/{helpers → archived/helpers}/archived/informatics.py +0 -0
  102. /smftools/informatics/{helpers → archived/helpers}/archived/load_adata.py +0 -0
  103. /smftools/informatics/{helpers → archived/helpers/archived}/modcall.py +0 -0
  104. /smftools/informatics/{helpers → archived/helpers/archived}/ohe_batching.py +0 -0
  105. /smftools/informatics/{helpers → archived/helpers/archived}/ohe_layers_decode.py +0 -0
  106. /smftools/informatics/{helpers → archived/helpers/archived}/one_hot_decode.py +0 -0
  107. /smftools/informatics/{helpers → archived/helpers/archived}/one_hot_encode.py +0 -0
  108. /smftools/informatics/{subsample_pod5.py → archived/subsample_pod5.py} +0 -0
  109. /smftools/informatics/{helpers/binarize_converted_base_identities.py → binarize_converted_base_identities.py} +0 -0
  110. /smftools/informatics/{helpers/complement_base_list.py → complement_base_list.py} +0 -0
  111. /smftools/preprocessing/{add_read_length_and_mapping_qc.py → archives/add_read_length_and_mapping_qc.py} +0 -0
  112. /smftools/preprocessing/{calculate_complexity.py → archives/calculate_complexity.py} +0 -0
  113. {smftools-0.2.1.dist-info → smftools-0.2.4.dist-info}/WHEEL +0 -0
  114. {smftools-0.2.1.dist-info → smftools-0.2.4.dist-info}/licenses/LICENSE +0 -0
@@ -1,11 +1,12 @@
1
- ## modkit_extract_to_adata
2
-
3
1
  import concurrent.futures
4
2
  import gc
5
- from .count_aligned_reads import count_aligned_reads
3
+ from .bam_functions import count_aligned_reads
6
4
  import pandas as pd
7
5
  from tqdm import tqdm
8
6
  import numpy as np
7
+ from pathlib import Path
8
+ from typing import Union, Iterable, Optional
9
+ import shutil
9
10
 
10
11
  def filter_bam_records(bam, mapping_threshold):
11
12
  """Processes a single BAM file, counts reads, and determines records to analyze."""
@@ -336,29 +337,122 @@ def parallel_extract_stranded_methylation(dict_list, dict_to_skip, max_reference
336
337
  dict_list[dict_index][record][sample] = processed_data
337
338
  return dict_list
338
339
 
339
- def modkit_extract_to_adata(fasta, bam_dir, mapping_threshold, experiment_name, mods, batch_size, mod_tsv_dir, delete_batch_hdfs=False, threads=None):
340
+ def delete_intermediate_h5ads_and_tmpdir(
341
+ h5_dir: Union[str, Path, Iterable[str], None],
342
+ tmp_dir: Optional[Union[str, Path]] = None,
343
+ *,
344
+ dry_run: bool = False,
345
+ verbose: bool = True,
346
+ ):
347
+ """
348
+ Delete intermediate .h5ad files and a temporary directory.
349
+
350
+ Parameters
351
+ ----------
352
+ h5_dir : str | Path | iterable[str] | None
353
+ If a directory path is given, all files directly inside it will be considered.
354
+ If an iterable of file paths is given, those files will be considered.
355
+ Only files ending with '.h5ad' (and not ending with '.gz') are removed.
356
+ tmp_dir : str | Path | None
357
+ Path to a directory to remove recursively (e.g. a temp dir created earlier).
358
+ dry_run : bool
359
+ If True, print what *would* be removed but do not actually delete.
360
+ verbose : bool
361
+ Print progress / warnings.
362
+ """
363
+ # Helper: remove a single file path (Path-like or string)
364
+ def _maybe_unlink(p: Path):
365
+ if not p.exists():
366
+ if verbose:
367
+ print(f"[skip] not found: {p}")
368
+ return
369
+ if not p.is_file():
370
+ if verbose:
371
+ print(f"[skip] not a file: {p}")
372
+ return
373
+ if dry_run:
374
+ print(f"[dry-run] would remove file: {p}")
375
+ return
376
+ try:
377
+ p.unlink()
378
+ if verbose:
379
+ print(f"Removed file: {p}")
380
+ except Exception as e:
381
+ print(f"[error] failed to remove file {p}: {e}")
382
+
383
+ # Handle h5_dir input (directory OR iterable of file paths)
384
+ if h5_dir is not None:
385
+ # If it's a path to a directory, iterate its children
386
+ if isinstance(h5_dir, (str, Path)) and Path(h5_dir).is_dir():
387
+ dpath = Path(h5_dir)
388
+ for p in dpath.iterdir():
389
+ # only target top-level files (not recursing); require '.h5ad' suffix and exclude gz
390
+ name = p.name.lower()
391
+ if "h5ad" in name:
392
+ _maybe_unlink(p)
393
+ else:
394
+ if verbose:
395
+ # optional: comment this out if too noisy
396
+ print(f"[skip] not matching pattern: {p.name}")
397
+ else:
398
+ # treat as iterable of file paths
399
+ for f in h5_dir:
400
+ p = Path(f)
401
+ name = p.name.lower()
402
+ if name.endswith(".h5ad") and not name.endswith(".gz"):
403
+ _maybe_unlink(p)
404
+ else:
405
+ if verbose:
406
+ print(f"[skip] not matching pattern or not a file: {p}")
407
+
408
+ # Remove tmp_dir recursively (if provided)
409
+ if tmp_dir is not None:
410
+ td = Path(tmp_dir)
411
+ if not td.exists():
412
+ if verbose:
413
+ print(f"[skip] tmp_dir not found: {td}")
414
+ else:
415
+ if not td.is_dir():
416
+ if verbose:
417
+ print(f"[skip] tmp_dir is not a directory: {td}")
418
+ else:
419
+ if dry_run:
420
+ print(f"[dry-run] would remove directory tree: {td}")
421
+ else:
422
+ try:
423
+ shutil.rmtree(td)
424
+ if verbose:
425
+ print(f"Removed directory tree: {td}")
426
+ except Exception as e:
427
+ print(f"[error] failed to remove tmp dir {td}: {e}")
428
+
429
+ def modkit_extract_to_adata(fasta, bam_dir, out_dir, input_already_demuxed, mapping_threshold, experiment_name, mods, batch_size, mod_tsv_dir, delete_batch_hdfs=False, threads=None, double_barcoded_path = None):
340
430
  """
341
431
  Takes modkit extract outputs and organizes it into an adata object
342
432
 
343
433
  Parameters:
344
- fasta (str): File path to the reference genome to align to.
345
- bam_dir (str): File path to the directory containing the aligned_sorted split modified BAM files
434
+ fasta (Path): File path to the reference genome to align to.
435
+ bam_dir (Path): File path to the directory containing the aligned_sorted split modified BAM files
436
+ out_dir (Path): File path to output directory
437
+ input_already_demuxed (bool): Whether input reads were originally demuxed
346
438
  mapping_threshold (float): A value in between 0 and 1 to threshold the minimal fraction of aligned reads which map to the reference region. References with values above the threshold are included in the output adata.
347
439
  experiment_name (str): A string to provide an experiment name to the output adata file.
348
440
  mods (list): A list of strings of the modification types to use in the analysis.
349
441
  batch_size (int): An integer number of TSV files to analyze in memory at once while loading the final adata object.
350
- mod_tsv_dir (str): String representing the path to the mod TSV directory
442
+ mod_tsv_dir (Path): path to the mod TSV directory
351
443
  delete_batch_hdfs (bool): Whether to delete the batch hdfs after writing out the final concatenated hdf. Default is False
444
+ double_barcoded_path (Path): Path to dorado demux summary file of double ended barcodes
352
445
 
353
446
  Returns:
354
- final_adata_path (str): Path to the final adata
447
+ final_adata_path (Path): Path to the final adata
355
448
  """
356
449
  ###################################################
357
450
  # Package imports
358
451
  from .. import readwrite
359
- from .get_native_references import get_native_references
360
- from .extract_base_identities import extract_base_identities
361
- from .ohe_batching import ohe_batching
452
+ from ..readwrite import safe_write_h5ad, make_dirs
453
+ from .fasta_functions import get_native_references
454
+ from .bam_functions import extract_base_identities
455
+ from .ohe import ohe_batching
362
456
  import pandas as pd
363
457
  import anndata as ad
364
458
  import os
@@ -368,42 +462,34 @@ def modkit_extract_to_adata(fasta, bam_dir, mapping_threshold, experiment_name,
368
462
  from Bio.Seq import Seq
369
463
  from tqdm import tqdm
370
464
  import h5py
371
- from .make_dirs import make_dirs
372
465
  ###################################################
373
466
 
374
467
  ################## Get input tsv and bam file names into a sorted list ################
375
- # List all files in the directory
376
- tsv_files = os.listdir(mod_tsv_dir)
377
- bam_files = os.listdir(bam_dir)
378
- # get current working directory
379
- parent_dir = os.path.dirname(mod_tsv_dir)
380
-
381
468
  # Make output dirs
382
- h5_dir = os.path.join(parent_dir, 'h5ads')
383
- tmp_dir = os.path.join(parent_dir, 'tmp')
469
+ h5_dir = out_dir / 'h5ads'
470
+ tmp_dir = out_dir / 'tmp'
384
471
  make_dirs([h5_dir, tmp_dir])
385
- existing_h5s = os.listdir(h5_dir)
386
- existing_h5s = [h5 for h5 in existing_h5s if '.h5ad.gz' in h5]
387
- final_hdf = f'{experiment_name}_final_experiment_hdf5.h5ad'
388
- final_adata_path = os.path.join(h5_dir, final_hdf)
389
- final_adata = None
390
472
 
391
- if os.path.exists(f"{final_adata_path}.gz"):
392
- print(f'{final_adata_path}.gz already exists. Using existing adata')
393
- return final_adata, f"{final_adata_path}.gz"
473
+ existing_h5s = h5_dir.iterdir()
474
+ existing_h5s = [h5 for h5 in existing_h5s if '.h5ad.gz' in str(h5)]
475
+ final_hdf = f'{experiment_name}.h5ad.gz'
476
+ final_adata_path = h5_dir / final_hdf
477
+ final_adata = None
394
478
 
395
- elif os.path.exists(f"{final_adata_path}"):
479
+ if final_adata_path.exists():
396
480
  print(f'{final_adata_path} already exists. Using existing adata')
397
481
  return final_adata, final_adata_path
398
482
 
399
- # Filter file names that contain the search string in their filename and keep them in a list
400
- tsvs = [tsv for tsv in tsv_files if 'extract.tsv' in tsv and 'unclassified' not in tsv]
401
- bams = [bam for bam in bam_files if '.bam' in bam and '.bai' not in bam and 'unclassified' not in bam]
402
- # Sort file list by names and print the list of file names
403
- tsvs.sort()
404
- tsv_path_list = [os.path.join(mod_tsv_dir, tsv) for tsv in tsvs]
405
- bams.sort()
406
- bam_path_list = [os.path.join(bam_dir, bam) for bam in bams]
483
+ # List all files in the directory
484
+ tsvs = sorted(
485
+ p for p in mod_tsv_dir.iterdir()
486
+ if p.is_file() and 'unclassified' not in p.name and 'extract.tsv' in p.name)
487
+ bams = sorted(
488
+ p for p in bam_dir.iterdir()
489
+ if p.is_file() and p.suffix == '.bam' and 'unclassified' not in p.name and '.bai' not in p.name)
490
+
491
+ tsv_path_list = [mod_tsv_dir / tsv for tsv in tsvs]
492
+ bam_path_list = [bam_dir / bam for bam in bams]
407
493
  print(f'{len(tsvs)} sample tsv files found: {tsvs}')
408
494
  print(f'{len(bams)} sample bams found: {bams}')
409
495
  ##########################################################################################
@@ -417,7 +503,7 @@ def modkit_extract_to_adata(fasta, bam_dir, mapping_threshold, experiment_name,
417
503
  ########### Determine the maximum record length to analyze in the dataset ################
418
504
  # Get all references within the FASTA and indicate the length and identity of the record sequence
419
505
  max_reference_length = 0
420
- reference_dict = get_native_references(fasta) # returns a dict keyed by record name. Points to a tuple of (reference length, reference sequence)
506
+ reference_dict = get_native_references(str(fasta)) # returns a dict keyed by record name. Points to a tuple of (reference length, reference sequence)
421
507
  # Get the max record length in the dataset.
422
508
  for record in records_to_analyze:
423
509
  if reference_dict[record][0] > max_reference_length:
@@ -431,11 +517,11 @@ def modkit_extract_to_adata(fasta, bam_dir, mapping_threshold, experiment_name,
431
517
  # One hot encode read sequences and write them out into the tmp_dir as h5ad files.
432
518
  # Save the file paths in the bam_record_ohe_files dict.
433
519
  bam_record_ohe_files = {}
434
- bam_record_save = os.path.join(tmp_dir, 'tmp_file_dict.h5ad')
520
+ bam_record_save = tmp_dir / 'tmp_file_dict.h5ad'
435
521
  fwd_mapped_reads = set()
436
522
  rev_mapped_reads = set()
437
523
  # If this step has already been performed, read in the tmp_dile_dict
438
- if os.path.exists(bam_record_save):
524
+ if bam_record_save.exists():
439
525
  bam_record_ohe_files = ad.read_h5ad(bam_record_save).uns
440
526
  print('Found existing OHE reads, using these')
441
527
  else:
@@ -489,7 +575,7 @@ def modkit_extract_to_adata(fasta, bam_dir, mapping_threshold, experiment_name,
489
575
  bam_path_list = bam_path_list[batch_size:]
490
576
  print('{0}: tsvs in batch {1} '.format(readwrite.time_string(), tsv_batch))
491
577
 
492
- batch_already_processed = sum([1 for h5 in existing_h5s if f'_{batch}_' in h5])
578
+ batch_already_processed = sum([1 for h5 in existing_h5s if f'_{batch}_' in h5.name])
493
579
  ###################################################
494
580
  if batch_already_processed:
495
581
  print(f'Batch {batch} has already been processed into h5ads. Skipping batch and using existing files')
@@ -677,7 +763,6 @@ def modkit_extract_to_adata(fasta, bam_dir, mapping_threshold, experiment_name,
677
763
 
678
764
 
679
765
  # Save the sample files in the batch as gzipped hdf5 files
680
- os.chdir(h5_dir)
681
766
  print('{0}: Converting batch {1} dictionaries to anndata objects'.format(readwrite.time_string(), batch))
682
767
  for dict_index, dict_type in enumerate(dict_list):
683
768
  if dict_index not in dict_to_skip:
@@ -807,7 +892,7 @@ def modkit_extract_to_adata(fasta, bam_dir, mapping_threshold, experiment_name,
807
892
 
808
893
  try:
809
894
  print('{0}: Writing {1} anndata out as a hdf5 file'.format(readwrite.time_string(), sample_types[dict_index]))
810
- adata.write_h5ad('{0}_{1}_{2}_SMF_binarized_sample_hdf5.h5ad.gz'.format(readwrite.date_string(), batch, sample_types[dict_index]), compression='gzip')
895
+ adata.write_h5ad(h5_dir / '{0}_{1}_{2}_SMF_binarized_sample_hdf5.h5ad.gz'.format(readwrite.date_string(), batch, sample_types[dict_index]), compression='gzip')
811
896
  except:
812
897
  print(f"Skipping writing anndata for sample")
813
898
 
@@ -816,11 +901,10 @@ def modkit_extract_to_adata(fasta, bam_dir, mapping_threshold, experiment_name,
816
901
  gc.collect()
817
902
 
818
903
  # Iterate over all of the batched hdf5 files and concatenate them.
819
- os.chdir(h5_dir)
820
- files = os.listdir(h5_dir)
904
+ files = h5_dir.iterdir()
821
905
  # Filter file names that contain the search string in their filename and keep them in a list
822
- hdfs = [hdf for hdf in files if 'hdf5.h5ad' in hdf and hdf != final_hdf]
823
- combined_hdfs = [hdf for hdf in hdfs if "combined" in hdf]
906
+ hdfs = [hdf for hdf in files if 'hdf5.h5ad' in hdf.name and hdf != final_hdf]
907
+ combined_hdfs = [hdf for hdf in hdfs if "combined" in hdf.name]
824
908
  if len(combined_hdfs) > 0:
825
909
  hdfs = combined_hdfs
826
910
  else:
@@ -828,7 +912,7 @@ def modkit_extract_to_adata(fasta, bam_dir, mapping_threshold, experiment_name,
828
912
  # Sort file list by names and print the list of file names
829
913
  hdfs.sort()
830
914
  print('{0} sample files found: {1}'.format(len(hdfs), hdfs))
831
- hdf_paths = [os.path.join(h5_dir, hd5) for hd5 in hdfs]
915
+ hdf_paths = [h5_dir / hd5 for hd5 in hdfs]
832
916
  final_adata = None
833
917
  for hdf_index, hdf in enumerate(hdf_paths):
834
918
  print('{0}: Reading in {1} hdf5 file'.format(readwrite.time_string(), hdfs[hdf_index]))
@@ -847,6 +931,7 @@ def modkit_extract_to_adata(fasta, bam_dir, mapping_threshold, experiment_name,
847
931
 
848
932
  ohe_bases = ['A', 'C', 'G', 'T'] # ignore N bases for consensus
849
933
  ohe_layers = [f"{ohe_base}_binary_encoding" for ohe_base in ohe_bases]
934
+ final_adata.uns['References'] = {}
850
935
  for record in records_to_analyze:
851
936
  # Add FASTA sequence to the object
852
937
  sequence = record_seq_dict[record][0]
@@ -854,6 +939,7 @@ def modkit_extract_to_adata(fasta, bam_dir, mapping_threshold, experiment_name,
854
939
  final_adata.var[f'{record}_top_strand_FASTA_base'] = list(sequence)
855
940
  final_adata.var[f'{record}_bottom_strand_FASTA_base'] = list(complement)
856
941
  final_adata.uns[f'{record}_FASTA_sequence'] = sequence
942
+ final_adata.uns['References'][f'{record}_FASTA_sequence'] = sequence
857
943
  # Add consensus sequence of samples mapped to the record to the object
858
944
  record_subset = final_adata[final_adata.obs['Reference'] == record]
859
945
  for strand in record_subset.obs['Strand'].cat.categories:
@@ -869,19 +955,16 @@ def modkit_extract_to_adata(fasta, bam_dir, mapping_threshold, experiment_name,
869
955
  consensus_sequence_list = [layer_map[i] for i in nucleotide_indexes]
870
956
  final_adata.var[f'{record}_{strand}_{mapping_dir}_consensus_sequence_from_all_samples'] = consensus_sequence_list
871
957
 
872
- #final_adata.write_h5ad(final_adata_path)
958
+ if input_already_demuxed:
959
+ final_adata.obs["demux_type"] = ["already"] * final_adata.shape[0]
960
+ final_adata.obs["demux_type"] = final_adata.obs["demux_type"].astype("category")
961
+ else:
962
+ from .h5ad_functions import add_demux_type_annotation
963
+ double_barcoded_reads = double_barcoded_path / "barcoding_summary.txt"
964
+ add_demux_type_annotation(final_adata, double_barcoded_reads)
873
965
 
874
966
  # Delete the individual h5ad files and only keep the final concatenated file
875
967
  if delete_batch_hdfs:
876
- files = os.listdir(h5_dir)
877
- hdfs_to_delete = [hdf for hdf in files if 'hdf5.h5ad' in hdf and hdf != final_hdf]
878
- hdf_paths_to_delete = [os.path.join(h5_dir, hdf) for hdf in hdfs_to_delete]
879
- # Iterate over the files and delete them
880
- for hdf in hdf_paths_to_delete:
881
- try:
882
- os.remove(hdf)
883
- print(f"Deleted file: {hdf}")
884
- except OSError as e:
885
- print(f"Error deleting file {hdf}: {e}")
968
+ delete_intermediate_h5ads_and_tmpdir(h5_dir, tmp_dir)
886
969
 
887
970
  return final_adata, final_adata_path
@@ -0,0 +1,129 @@
1
+ import os
2
+ import subprocess
3
+ import glob
4
+ import zipfile
5
+ from pathlib import Path
6
+
7
+ def extract_mods(thresholds, mod_tsv_dir, split_dir, bam_suffix, skip_unclassified=True, modkit_summary=False, threads=None):
8
+ """
9
+ Takes all of the aligned, sorted, split modified BAM files and runs Nanopore Modkit Extract to load the modification data into zipped TSV files
10
+
11
+ Parameters:
12
+ thresholds (list): A list of thresholds to use for marking each basecalled base as passing or failing on canonical and modification call status.
13
+ mod_tsv_dir (str): A string representing the file path to the directory to hold the modkit extract outputs.
14
+ split_dit (str): A string representing the file path to the directory containing the converted aligned_sorted_split BAM files.
15
+ bam_suffix (str): The suffix to use for the BAM file.
16
+ skip_unclassified (bool): Whether to skip unclassified bam file for modkit extract command
17
+ modkit_summary (bool): Whether to run and display modkit summary
18
+ threads (int): Number of threads to use
19
+
20
+ Returns:
21
+ None
22
+ Runs modkit extract on input aligned_sorted_split modified BAM files to output zipped TSVs containing modification calls.
23
+
24
+ """
25
+ filter_threshold, m6A_threshold, m5C_threshold, hm5C_threshold = thresholds
26
+ bam_files = sorted(p for p in split_dir.iterdir() if bam_suffix in p.name and '.bai' not in p.name)
27
+ if skip_unclassified:
28
+ bam_files = [p for p in bam_files if "unclassified" not in p.name]
29
+ print(f"Running modkit extract for the following bam files: {bam_files}")
30
+
31
+ if threads:
32
+ threads = str(threads)
33
+ else:
34
+ pass
35
+
36
+ for input_file in bam_files:
37
+ print(input_file)
38
+ # Construct the output TSV file path
39
+ output_tsv = mod_tsv_dir / (input_file.stem + "_extract.tsv")
40
+ output_tsv_gz = output_tsv.parent / (output_tsv.name + '.gz')
41
+ if output_tsv_gz.exists():
42
+ print(f"{output_tsv_gz} already exists, skipping modkit extract")
43
+ else:
44
+ print(f"Extracting modification data from {input_file}")
45
+ if modkit_summary:
46
+ # Run modkit summary
47
+ subprocess.run(["modkit", "summary", str(input_file)])
48
+ else:
49
+ pass
50
+ # Run modkit extract
51
+ if threads:
52
+ extract_command = [
53
+ "modkit", "extract",
54
+ "calls", "--mapped-only",
55
+ "--filter-threshold", f'{filter_threshold}',
56
+ "--mod-thresholds", f"m:{m5C_threshold}",
57
+ "--mod-thresholds", f"a:{m6A_threshold}",
58
+ "--mod-thresholds", f"h:{hm5C_threshold}",
59
+ "-t", threads,
60
+ str(input_file), str(output_tsv)
61
+ ]
62
+ else:
63
+ extract_command = [
64
+ "modkit", "extract",
65
+ "calls", "--mapped-only",
66
+ "--filter-threshold", f'{filter_threshold}',
67
+ "--mod-thresholds", f"m:{m5C_threshold}",
68
+ "--mod-thresholds", f"a:{m6A_threshold}",
69
+ "--mod-thresholds", f"h:{hm5C_threshold}",
70
+ str(input_file), str(output_tsv)
71
+ ]
72
+ subprocess.run(extract_command)
73
+ # Zip the output TSV
74
+ print(f'zipping {output_tsv}')
75
+ if threads:
76
+ zip_command = ["pigz", "-f", "-p", threads, str(output_tsv)]
77
+ else:
78
+ zip_command = ["pigz", "-f", str(output_tsv)]
79
+ subprocess.run(zip_command, check=True)
80
+ return
81
+
82
+ def make_modbed(aligned_sorted_output, thresholds, mod_bed_dir):
83
+ """
84
+ Generating position methylation summaries for each barcoded sample starting from the overall BAM file that was direct output of dorado aligner.
85
+ Parameters:
86
+ aligned_sorted_output (str): A string representing the file path to the aligned_sorted non-split BAM file.
87
+
88
+ Returns:
89
+ None
90
+ """
91
+ import os
92
+ import subprocess
93
+
94
+ filter_threshold, m6A_threshold, m5C_threshold, hm5C_threshold = thresholds
95
+ command = [
96
+ "modkit", "pileup", str(aligned_sorted_output), str(mod_bed_dir),
97
+ "--partition-tag", "BC",
98
+ "--only-tabs",
99
+ "--filter-threshold", f'{filter_threshold}',
100
+ "--mod-thresholds", f"m:{m5C_threshold}",
101
+ "--mod-thresholds", f"a:{m6A_threshold}",
102
+ "--mod-thresholds", f"h:{hm5C_threshold}"
103
+ ]
104
+ subprocess.run(command)
105
+
106
+ def modQC(aligned_sorted_output, thresholds):
107
+ """
108
+ Output the percentile of bases falling at a call threshold (threshold is a probability between 0-1) for the overall BAM file.
109
+ It is generally good to look at these parameters on positive and negative controls.
110
+
111
+ Parameters:
112
+ aligned_sorted_output (str): A string representing the file path of the aligned_sorted non-split BAM file output by the dorado aligned.
113
+ thresholds (list): A list of floats to pass for call thresholds.
114
+
115
+ Returns:
116
+ None
117
+ """
118
+ import subprocess
119
+
120
+ filter_threshold, m6A_threshold, m5C_threshold, hm5C_threshold = thresholds
121
+ subprocess.run(["modkit", "sample-probs", str(aligned_sorted_output)])
122
+ command = [
123
+ "modkit", "summary", str(aligned_sorted_output),
124
+ "--filter-threshold", f"{filter_threshold}",
125
+ "--mod-thresholds", f"m:{m5C_threshold}",
126
+ "--mod-thresholds", f"a:{m6A_threshold}",
127
+ "--mod-thresholds", f"h:{hm5C_threshold}"
128
+ ]
129
+ subprocess.run(command)
@@ -0,0 +1,160 @@
1
+ import numpy as np
2
+ import anndata as ad
3
+
4
+ import os
5
+ import concurrent.futures
6
+
7
+ def one_hot_encode(sequence, device='auto'):
8
+ """
9
+ One-hot encodes a DNA sequence.
10
+
11
+ Parameters:
12
+ sequence (str or list): DNA sequence (e.g., "ACGTN" or ['A', 'C', 'G', 'T', 'N']).
13
+
14
+ Returns:
15
+ ndarray: Flattened one-hot encoded representation of the input sequence.
16
+ """
17
+ mapping = np.array(['A', 'C', 'G', 'T', 'N'])
18
+
19
+ # Ensure input is a list of characters
20
+ if not isinstance(sequence, list):
21
+ sequence = list(sequence) # Convert string to list of characters
22
+
23
+ # Handle empty sequences
24
+ if len(sequence) == 0:
25
+ print("Warning: Empty sequence encountered in one_hot_encode()")
26
+ return np.zeros(len(mapping)) # Return empty encoding instead of failing
27
+
28
+ # Convert sequence to NumPy array
29
+ seq_array = np.array(sequence, dtype='<U1')
30
+
31
+ # Replace invalid bases with 'N'
32
+ seq_array = np.where(np.isin(seq_array, mapping), seq_array, 'N')
33
+
34
+ # Create one-hot encoding matrix
35
+ one_hot_matrix = (seq_array[:, None] == mapping).astype(int)
36
+
37
+ # Flatten and return
38
+ return one_hot_matrix.flatten()
39
+
40
+ def one_hot_decode(ohe_array):
41
+ """
42
+ Takes a flattened one hot encoded array and returns the sequence string from that array.
43
+ Parameters:
44
+ ohe_array (np.array): A one hot encoded array
45
+
46
+ Returns:
47
+ sequence (str): Sequence string of the one hot encoded array
48
+ """
49
+ # Define the mapping of one-hot encoded indices to DNA bases
50
+ mapping = ['A', 'C', 'G', 'T', 'N']
51
+
52
+ # Reshape the flattened array into a 2D matrix with 5 columns (one for each base)
53
+ one_hot_matrix = ohe_array.reshape(-1, 5)
54
+
55
+ # Get the index of the maximum value (which will be 1) in each row
56
+ decoded_indices = np.argmax(one_hot_matrix, axis=1)
57
+
58
+ # Map the indices back to the corresponding bases
59
+ sequence_list = [mapping[i] for i in decoded_indices]
60
+ sequence = ''.join(sequence_list)
61
+
62
+ return sequence
63
+
64
+ def ohe_layers_decode(adata, obs_names):
65
+ """
66
+ Takes an anndata object and a list of observation names. Returns a list of sequence strings for the reads of interest.
67
+ Parameters:
68
+ adata (AnnData): An anndata object.
69
+ obs_names (list): A list of observation name strings to retrieve sequences for.
70
+
71
+ Returns:
72
+ sequences (list of str): List of strings of the one hot encoded array
73
+ """
74
+ # Define the mapping of one-hot encoded indices to DNA bases
75
+ mapping = ['A', 'C', 'G', 'T', 'N']
76
+
77
+ ohe_layers = [f"{base}_binary_encoding" for base in mapping]
78
+ sequences = []
79
+
80
+ for obs_name in obs_names:
81
+ obs_subset = adata[obs_name]
82
+ ohe_list = []
83
+ for layer in ohe_layers:
84
+ ohe_list += list(obs_subset.layers[layer])
85
+ ohe_array = np.array(ohe_list)
86
+ sequence = one_hot_decode(ohe_array)
87
+ sequences.append(sequence)
88
+
89
+ return sequences
90
+
91
+ def _encode_sequence(args):
92
+ """Parallel helper function for one-hot encoding."""
93
+ read_name, seq, device = args
94
+ try:
95
+ one_hot_matrix = one_hot_encode(seq, device)
96
+ return read_name, one_hot_matrix
97
+ except Exception:
98
+ return None # Skip invalid sequences
99
+
100
+ def _encode_and_save_batch(batch_data, tmp_dir, prefix, record, batch_number):
101
+ """Encodes a batch and writes to disk immediately."""
102
+ batch = {read_name: matrix for read_name, matrix in batch_data if matrix is not None}
103
+
104
+ if batch:
105
+ save_name = os.path.join(tmp_dir, f'tmp_{prefix}_{record}_{batch_number}.h5ad')
106
+ tmp_ad = ad.AnnData(X=np.zeros((1, 1)), uns=batch) # Placeholder X
107
+ tmp_ad.write_h5ad(save_name)
108
+ return save_name
109
+ return None
110
+
111
+ def ohe_batching(base_identities, tmp_dir, record, prefix='', batch_size=100000, progress_bar=None, device='auto', threads=None):
112
+ """
113
+ Efficient version of ohe_batching: one-hot encodes sequences in parallel and writes batches immediately.
114
+
115
+ Parameters:
116
+ base_identities (dict): Dictionary mapping read names to sequences.
117
+ tmp_dir (str): Directory for storing temporary files.
118
+ record (str): Record name.
119
+ prefix (str): Prefix for file naming.
120
+ batch_size (int): Number of reads per batch.
121
+ progress_bar (tqdm instance, optional): Shared progress bar.
122
+ device (str): Device for encoding.
123
+ threads (int, optional): Number of parallel workers.
124
+
125
+ Returns:
126
+ list: List of valid H5AD file paths.
127
+ """
128
+ threads = threads or os.cpu_count() # Default to max available CPU cores
129
+ batch_data = []
130
+ batch_number = 0
131
+ file_names = []
132
+
133
+ # Step 1: Prepare Data for Parallel Encoding
134
+ encoding_args = [(read_name, seq, device) for read_name, seq in base_identities.items() if seq is not None]
135
+
136
+ # Step 2: Parallel One-Hot Encoding using threads (to avoid nested processes)
137
+ with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor:
138
+ for result in executor.map(_encode_sequence, encoding_args):
139
+ if result:
140
+ batch_data.append(result)
141
+
142
+ if len(batch_data) >= batch_size:
143
+ # Step 3: Process and Write Batch Immediately
144
+ file_name = _encode_and_save_batch(batch_data.copy(), tmp_dir, prefix, record, batch_number)
145
+ if file_name:
146
+ file_names.append(file_name)
147
+
148
+ batch_data.clear()
149
+ batch_number += 1
150
+
151
+ if progress_bar:
152
+ progress_bar.update(1)
153
+
154
+ # Step 4: Process Remaining Batch
155
+ if batch_data:
156
+ file_name = _encode_and_save_batch(batch_data, tmp_dir, prefix, record, batch_number)
157
+ if file_name:
158
+ file_names.append(file_name)
159
+
160
+ return file_names