smftools 0.2.1__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. smftools/__init__.py +2 -6
  2. smftools/_version.py +1 -1
  3. smftools/cli/__init__.py +0 -0
  4. smftools/cli/cli_flows.py +94 -0
  5. smftools/cli/hmm_adata.py +338 -0
  6. smftools/cli/load_adata.py +577 -0
  7. smftools/cli/preprocess_adata.py +363 -0
  8. smftools/cli/spatial_adata.py +564 -0
  9. smftools/cli_entry.py +435 -0
  10. smftools/config/conversion.yaml +11 -6
  11. smftools/config/deaminase.yaml +12 -7
  12. smftools/config/default.yaml +36 -25
  13. smftools/config/direct.yaml +25 -1
  14. smftools/config/discover_input_files.py +115 -0
  15. smftools/config/experiment_config.py +109 -12
  16. smftools/informatics/__init__.py +13 -7
  17. smftools/informatics/archived/fast5_to_pod5.py +43 -0
  18. smftools/informatics/archived/helpers/archived/__init__.py +71 -0
  19. smftools/informatics/archived/helpers/archived/align_and_sort_BAM.py +126 -0
  20. smftools/informatics/{helpers → archived/helpers/archived}/aligned_BAM_to_bed.py +6 -4
  21. smftools/informatics/archived/helpers/archived/bam_qc.py +213 -0
  22. smftools/informatics/archived/helpers/archived/bed_to_bigwig.py +90 -0
  23. smftools/informatics/archived/helpers/archived/concatenate_fastqs_to_bam.py +259 -0
  24. smftools/informatics/{helpers → archived/helpers/archived}/count_aligned_reads.py +2 -2
  25. smftools/informatics/{helpers → archived/helpers/archived}/demux_and_index_BAM.py +8 -10
  26. smftools/informatics/{helpers → archived/helpers/archived}/extract_base_identities.py +1 -1
  27. smftools/informatics/{helpers → archived/helpers/archived}/extract_mods.py +15 -13
  28. smftools/informatics/{helpers → archived/helpers/archived}/generate_converted_FASTA.py +2 -0
  29. smftools/informatics/{helpers → archived/helpers/archived}/get_chromosome_lengths.py +9 -8
  30. smftools/informatics/archived/helpers/archived/index_fasta.py +24 -0
  31. smftools/informatics/{helpers → archived/helpers/archived}/make_modbed.py +1 -2
  32. smftools/informatics/{helpers → archived/helpers/archived}/modQC.py +2 -2
  33. smftools/informatics/{helpers → archived/helpers/archived}/plot_bed_histograms.py +0 -19
  34. smftools/informatics/{helpers → archived/helpers/archived}/separate_bam_by_bc.py +6 -5
  35. smftools/informatics/{helpers → archived/helpers/archived}/split_and_index_BAM.py +7 -7
  36. smftools/informatics/archived/subsample_fasta_from_bed.py +49 -0
  37. smftools/informatics/bam_functions.py +812 -0
  38. smftools/informatics/basecalling.py +67 -0
  39. smftools/informatics/bed_functions.py +366 -0
  40. smftools/informatics/{helpers/converted_BAM_to_adata_II.py → converted_BAM_to_adata.py} +42 -30
  41. smftools/informatics/fasta_functions.py +255 -0
  42. smftools/informatics/h5ad_functions.py +197 -0
  43. smftools/informatics/{helpers/modkit_extract_to_adata.py → modkit_extract_to_adata.py} +142 -59
  44. smftools/informatics/modkit_functions.py +129 -0
  45. smftools/informatics/ohe.py +160 -0
  46. smftools/informatics/pod5_functions.py +224 -0
  47. smftools/informatics/{helpers/run_multiqc.py → run_multiqc.py} +5 -2
  48. smftools/plotting/autocorrelation_plotting.py +1 -3
  49. smftools/plotting/general_plotting.py +1037 -362
  50. smftools/preprocessing/__init__.py +2 -0
  51. smftools/preprocessing/append_base_context.py +3 -3
  52. smftools/preprocessing/append_binary_layer_by_base_context.py +4 -4
  53. smftools/preprocessing/binarize.py +17 -0
  54. smftools/preprocessing/binarize_on_Youden.py +2 -2
  55. smftools/preprocessing/calculate_position_Youden.py +1 -1
  56. smftools/preprocessing/calculate_read_modification_stats.py +1 -1
  57. smftools/preprocessing/filter_reads_on_modification_thresholds.py +19 -19
  58. smftools/preprocessing/flag_duplicate_reads.py +1 -1
  59. smftools/readwrite.py +266 -140
  60. {smftools-0.2.1.dist-info → smftools-0.2.3.dist-info}/METADATA +10 -9
  61. {smftools-0.2.1.dist-info → smftools-0.2.3.dist-info}/RECORD +82 -70
  62. smftools-0.2.3.dist-info/entry_points.txt +2 -0
  63. smftools/cli.py +0 -184
  64. smftools/informatics/fast5_to_pod5.py +0 -24
  65. smftools/informatics/helpers/__init__.py +0 -73
  66. smftools/informatics/helpers/align_and_sort_BAM.py +0 -86
  67. smftools/informatics/helpers/bam_qc.py +0 -66
  68. smftools/informatics/helpers/bed_to_bigwig.py +0 -39
  69. smftools/informatics/helpers/concatenate_fastqs_to_bam.py +0 -378
  70. smftools/informatics/helpers/discover_input_files.py +0 -100
  71. smftools/informatics/helpers/index_fasta.py +0 -12
  72. smftools/informatics/helpers/make_dirs.py +0 -21
  73. smftools/informatics/readwrite.py +0 -106
  74. smftools/informatics/subsample_fasta_from_bed.py +0 -47
  75. smftools/load_adata.py +0 -1346
  76. smftools-0.2.1.dist-info/entry_points.txt +0 -2
  77. /smftools/informatics/{basecall_pod5s.py → archived/basecall_pod5s.py} +0 -0
  78. /smftools/informatics/{helpers → archived/helpers/archived}/canoncall.py +0 -0
  79. /smftools/informatics/{helpers → archived/helpers/archived}/converted_BAM_to_adata.py +0 -0
  80. /smftools/informatics/{helpers → archived/helpers/archived}/extract_read_features_from_bam.py +0 -0
  81. /smftools/informatics/{helpers → archived/helpers/archived}/extract_read_lengths_from_bed.py +0 -0
  82. /smftools/informatics/{helpers → archived/helpers/archived}/extract_readnames_from_BAM.py +0 -0
  83. /smftools/informatics/{helpers → archived/helpers/archived}/find_conversion_sites.py +0 -0
  84. /smftools/informatics/{helpers → archived/helpers/archived}/get_native_references.py +0 -0
  85. /smftools/informatics/{helpers → archived/helpers}/archived/informatics.py +0 -0
  86. /smftools/informatics/{helpers → archived/helpers}/archived/load_adata.py +0 -0
  87. /smftools/informatics/{helpers → archived/helpers/archived}/modcall.py +0 -0
  88. /smftools/informatics/{helpers → archived/helpers/archived}/ohe_batching.py +0 -0
  89. /smftools/informatics/{helpers → archived/helpers/archived}/ohe_layers_decode.py +0 -0
  90. /smftools/informatics/{helpers → archived/helpers/archived}/one_hot_decode.py +0 -0
  91. /smftools/informatics/{helpers → archived/helpers/archived}/one_hot_encode.py +0 -0
  92. /smftools/informatics/{subsample_pod5.py → archived/subsample_pod5.py} +0 -0
  93. /smftools/informatics/{helpers/binarize_converted_base_identities.py → binarize_converted_base_identities.py} +0 -0
  94. /smftools/informatics/{helpers/complement_base_list.py → complement_base_list.py} +0 -0
  95. {smftools-0.2.1.dist-info → smftools-0.2.3.dist-info}/WHEEL +0 -0
  96. {smftools-0.2.1.dist-info → smftools-0.2.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,812 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+ import os
5
+ import subprocess
6
+ import glob
7
+ import time
8
+ from typing import Dict, List, Any, Tuple, Union, Optional, Iterable
9
+ import re
10
+ from itertools import zip_longest
11
+ import pysam
12
+
13
+ import numpy as np
14
+ import concurrent.futures
15
+ from concurrent.futures import ThreadPoolExecutor, as_completed
16
+ from concurrent.futures import ProcessPoolExecutor
17
+
18
+ from tqdm import tqdm
19
+ from collections import defaultdict, Counter
20
+
21
+ from ..readwrite import make_dirs, time_string, date_string
22
+
23
+ def _bam_to_fastq_with_pysam(bam_path: Union[str, Path], fastq_path: Union[str, Path]) -> None:
24
+ """
25
+ Minimal BAM->FASTQ using pysam. Writes unmapped or unaligned reads as-is.
26
+ """
27
+ bam_path = str(bam_path)
28
+ fastq_path = str(fastq_path)
29
+ with pysam.AlignmentFile(bam_path, "rb", check_sq=False) as bam, open(fastq_path, "w", encoding="utf-8") as fq:
30
+ for r in bam.fetch(until_eof=True):
31
+ # Optionally skip secondary/supplementary:
32
+ # if r.is_secondary or r.is_supplementary:
33
+ # continue
34
+
35
+ name = r.query_name or ""
36
+ seq = r.query_sequence or ""
37
+
38
+ # Get numeric qualities; may be None
39
+ q = r.query_qualities
40
+
41
+ if q is None:
42
+ # fallback: fill with low quality ("!")
43
+ qual_str = "!" * len(seq)
44
+ else:
45
+ # q is an array/list of ints (Phred scores).
46
+ # Convert to FASTQ string with Phred+33 encoding,
47
+ # clamping to sane range [0, 93] to stay in printable ASCII.
48
+ qual_str = "".join(
49
+ chr(min(max(int(qv), 0), 93) + 33)
50
+ for qv in q
51
+ )
52
+
53
+ fq.write(f"@{name}\n{seq}\n+\n{qual_str}\n")
54
+
55
+ def _sort_bam_with_pysam(in_bam: Union[str, Path], out_bam: Union[str, Path], threads: Optional[int] = None) -> None:
56
+ in_bam, out_bam = str(in_bam), str(out_bam)
57
+ args = []
58
+ if threads:
59
+ args += ["-@", str(threads)]
60
+ args += ["-o", out_bam, in_bam]
61
+ pysam.sort(*args)
62
+
63
+ def _index_bam_with_pysam(bam_path: Union[str, Path], threads: Optional[int] = None) -> None:
64
+ bam_path = str(bam_path)
65
+ # pysam.index supports samtools-style args
66
+ if threads:
67
+ pysam.index("-@", str(threads), bam_path)
68
+ else:
69
+ pysam.index(bam_path)
70
+
71
+ def align_and_sort_BAM(fasta,
72
+ input,
73
+ bam_suffix='.bam',
74
+ output_directory='aligned_outputs',
75
+ make_bigwigs=False,
76
+ threads=None,
77
+ aligner='minimap2',
78
+ aligner_args=['-a', '-x', 'map-ont', '--MD', '-Y', '-y', '-N', '5', '--secondary=no']):
79
+ """
80
+ A wrapper for running dorado aligner and samtools functions
81
+
82
+ Parameters:
83
+ fasta (str): File path to the reference genome to align to.
84
+ input (str): File path to the basecalled file to align. Works for .bam and .fastq files
85
+ bam_suffix (str): The suffix to use for the BAM file.
86
+ output_directory (str): A file path to the directory to output all the analyses.
87
+ make_bigwigs (bool): Whether to make bigwigs
88
+ threads (int): Number of additional threads to use
89
+ aligner (str): Aligner to use. minimap2 and dorado options
90
+ aligner_args (list): list of optional parameters to use for the alignment
91
+
92
+ Returns:
93
+ None
94
+ The function writes out files for: 1) An aligned BAM, 2) and aligned_sorted BAM, 3) an index file for the aligned_sorted BAM, 4) A bed file for the aligned_sorted BAM, 5) A text file containing read names in the aligned_sorted BAM
95
+ """
96
+ input_basename = input.name
97
+ input_suffix = input.suffix
98
+ input_as_fastq = input.with_name(input.stem + '.fastq')
99
+
100
+ output_path_minus_suffix = output_directory / input.stem
101
+
102
+ aligned_BAM = output_path_minus_suffix.with_name(output_path_minus_suffix.stem + "_aligned")
103
+ aligned_output = aligned_BAM.with_suffix(bam_suffix)
104
+ aligned_sorted_BAM =aligned_BAM.with_name(aligned_BAM.stem + "_sorted")
105
+ aligned_sorted_output = aligned_sorted_BAM.with_suffix(bam_suffix)
106
+
107
+ if threads:
108
+ threads = str(threads)
109
+ else:
110
+ pass
111
+
112
+ if aligner == 'minimap2':
113
+ print(f"Converting BAM to FASTQ: {input}")
114
+ _bam_to_fastq_with_pysam(input, input_as_fastq)
115
+ print(f"Aligning FASTQ to Reference: {input_as_fastq}")
116
+ if threads:
117
+ minimap_command = ['minimap2'] + aligner_args + ['-t', threads, str(fasta), str(input_as_fastq)]
118
+ else:
119
+ minimap_command = ['minimap2'] + aligner_args + [str(fasta), str(input_as_fastq)]
120
+ subprocess.run(minimap_command, stdout=open(aligned_output, "wb"))
121
+ os.remove(input_as_fastq)
122
+
123
+ elif aligner == 'dorado':
124
+ # Run dorado aligner
125
+ print(f"Aligning BAM to Reference: {input}")
126
+ if threads:
127
+ alignment_command = ["dorado", "aligner", "-t", threads] + aligner_args + [str(fasta), str(input)]
128
+ else:
129
+ alignment_command = ["dorado", "aligner"] + aligner_args + [str(fasta), str(input)]
130
+ subprocess.run(alignment_command, stdout=open(aligned_output, "wb"))
131
+
132
+ else:
133
+ print(f'Aligner not recognized: {aligner}. Choose from minimap2 and dorado')
134
+ return
135
+
136
+ # --- Sort & Index with pysam ---
137
+ print(f"[pysam] Sorting: {aligned_output} -> {aligned_sorted_output}")
138
+ _sort_bam_with_pysam(aligned_output, aligned_sorted_output, threads=threads)
139
+
140
+ print(f"[pysam] Indexing: {aligned_sorted_output}")
141
+ _index_bam_with_pysam(aligned_sorted_output, threads=threads)
142
+
143
+ def bam_qc(
144
+ bam_files: Iterable[str | Path],
145
+ bam_qc_dir: str | Path,
146
+ threads: Optional[int],
147
+ modality: str,
148
+ stats: bool = True,
149
+ flagstats: bool = True,
150
+ idxstats: bool = True,
151
+ ) -> None:
152
+ """
153
+ QC for BAM/CRAMs: stats, flagstat, idxstats.
154
+ Prefers pysam; falls back to `samtools` if needed.
155
+ Runs BAMs in parallel (up to `threads`, default serial).
156
+ """
157
+ import subprocess
158
+ import shutil
159
+
160
+ # Try to import pysam once
161
+ try:
162
+ import pysam
163
+ HAVE_PYSAM = True
164
+ except Exception:
165
+ HAVE_PYSAM = False
166
+
167
+ bam_qc_dir = Path(bam_qc_dir)
168
+ bam_qc_dir.mkdir(parents=True, exist_ok=True)
169
+
170
+ bam_files = [Path(b) for b in bam_files]
171
+
172
+ def _has_index(p: Path) -> bool:
173
+ if p.suffix.lower() == ".bam":
174
+ bai = p.with_suffix(p.suffix + ".bai")
175
+ bai_alt = Path(str(p) + ".bai")
176
+ return bai.exists() or bai_alt.exists()
177
+ if p.suffix.lower() == ".cram":
178
+ crai = Path(str(p) + ".crai")
179
+ return crai.exists()
180
+ return False
181
+
182
+ def _ensure_index(p: Path) -> None:
183
+ if _has_index(p):
184
+ return
185
+ if HAVE_PYSAM:
186
+ # pysam.index supports both BAM & CRAM
187
+ pysam.index(str(p))
188
+ else:
189
+ cmd = ["samtools", "index", str(p)]
190
+ subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
191
+
192
+ def _run_one(bam: Path) -> Tuple[Path, List[Tuple[str, int]]]:
193
+ # outputs + return (file, [(task_name, returncode)])
194
+ results: List[Tuple[str, int]] = []
195
+ base = bam.stem # filename without .bam
196
+ out_stats = bam_qc_dir / f"{base}_stats.txt"
197
+ out_flag = bam_qc_dir / f"{base}_flagstat.txt"
198
+ out_idx = bam_qc_dir / f"{base}_idxstats.txt"
199
+
200
+ # Make sure index exists (samtools stats/flagstat don’t require, idxstats does)
201
+ try:
202
+ _ensure_index(bam)
203
+ except Exception as e:
204
+ # Still attempt stats/flagstat if requested
205
+ print(f"[warn] Indexing failed for {bam}: {e}")
206
+
207
+ # Choose runner per task
208
+ def run_stats():
209
+ if not stats:
210
+ return
211
+ if HAVE_PYSAM and hasattr(pysam, "stats"):
212
+ txt = pysam.stats(str(bam))
213
+ out_stats.write_text(txt)
214
+ results.append(("stats(pysam)", 0))
215
+ else:
216
+ cmd = ["samtools", "stats", str(bam)]
217
+ with open(out_stats, "w") as fh:
218
+ cp = subprocess.run(cmd, stdout=fh, stderr=subprocess.PIPE)
219
+ results.append(("stats(samtools)", cp.returncode))
220
+ if cp.returncode != 0:
221
+ raise RuntimeError(cp.stderr.decode(errors="replace"))
222
+
223
+ def run_flagstat():
224
+ if not flagstats:
225
+ return
226
+ if HAVE_PYSAM and hasattr(pysam, "flagstat"):
227
+ txt = pysam.flagstat(str(bam))
228
+ out_flag.write_text(txt)
229
+ results.append(("flagstat(pysam)", 0))
230
+ else:
231
+ cmd = ["samtools", "flagstat", str(bam)]
232
+ with open(out_flag, "w") as fh:
233
+ cp = subprocess.run(cmd, stdout=fh, stderr=subprocess.PIPE)
234
+ results.append(("flagstat(samtools)", cp.returncode))
235
+ if cp.returncode != 0:
236
+ raise RuntimeError(cp.stderr.decode(errors="replace"))
237
+
238
+ def run_idxstats():
239
+ if not idxstats:
240
+ return
241
+ if HAVE_PYSAM and hasattr(pysam, "idxstats"):
242
+ txt = pysam.idxstats(str(bam))
243
+ out_idx.write_text(txt)
244
+ results.append(("idxstats(pysam)", 0))
245
+ else:
246
+ cmd = ["samtools", "idxstats", str(bam)]
247
+ with open(out_idx, "w") as fh:
248
+ cp = subprocess.run(cmd, stdout=fh, stderr=subprocess.PIPE)
249
+ results.append(("idxstats(samtools)", cp.returncode))
250
+ if cp.returncode != 0:
251
+ raise RuntimeError(cp.stderr.decode(errors="replace"))
252
+
253
+ # Sanity: ensure samtools exists if pysam missing
254
+ if not HAVE_PYSAM:
255
+ if not shutil.which("samtools"):
256
+ raise RuntimeError("Neither pysam nor samtools is available in PATH.")
257
+
258
+ # Execute tasks (serial per file; parallelized across files)
259
+ run_stats()
260
+ run_flagstat()
261
+ run_idxstats()
262
+ return bam, results
263
+
264
+ # Parallel across BAMs
265
+ max_workers = int(threads) if threads and int(threads) > 0 else 1
266
+ futures = []
267
+ with ThreadPoolExecutor(max_workers=max_workers) as ex:
268
+ for b in bam_files:
269
+ futures.append(ex.submit(_run_one, b))
270
+
271
+ for fut in as_completed(futures):
272
+ try:
273
+ bam, res = fut.result()
274
+ summary = ", ".join(f"{name}:{rc}" for name, rc in res) or "no-op"
275
+ print(f"[qc] {bam.name}: {summary}")
276
+ except Exception as e:
277
+ print(f"[error] QC failed: {e}")
278
+
279
+ # Placeholders to keep your signature stable
280
+ if modality not in {"conversion", "direct"}:
281
+ print(f"[warn] Unknown modality '{modality}', continuing.")
282
+
283
+ print("QC processing completed.")
284
+
285
+ def concatenate_fastqs_to_bam(
286
+ fastq_files: List[Union[str, Tuple[str, str], Path, Tuple[Path, Path]]],
287
+ output_bam: Union[str, Path],
288
+ barcode_tag: str = "BC",
289
+ barcode_map: Optional[Dict[Union[str, Path], str]] = None,
290
+ add_read_group: bool = True,
291
+ rg_sample_field: Optional[str] = None,
292
+ progress: bool = True,
293
+ auto_pair: bool = True,
294
+ ) -> Dict[str, Any]:
295
+ """
296
+ Concatenate FASTQ(s) into an **unaligned** BAM. Supports single-end and paired-end.
297
+
298
+ Parameters
299
+ ----------
300
+ fastq_files : list[Path|str] or list[(Path|str, Path|str)]
301
+ Either explicit pairs (R1,R2) or a flat list of FASTQs (auto-paired if auto_pair=True).
302
+ output_bam : Path|str
303
+ Output BAM path (parent directory will be created).
304
+ barcode_tag : str
305
+ SAM tag used to store barcode on each read (default 'BC').
306
+ barcode_map : dict or None
307
+ Optional mapping {path: barcode} to override automatic filename-based barcode extraction.
308
+ add_read_group : bool
309
+ If True, add @RG header lines (ID = barcode) and set each read's RG tag.
310
+ rg_sample_field : str or None
311
+ If set, include SM=<value> in @RG.
312
+ progress : bool
313
+ Show tqdm progress bars.
314
+ auto_pair : bool
315
+ Auto-pair R1/R2 based on filename patterns if given a flat list.
316
+
317
+ Returns
318
+ -------
319
+ dict
320
+ {'total_reads','per_file','paired_pairs_written','singletons_written','barcodes'}
321
+ """
322
+
323
+ # ---------- helpers (Pathlib-only) ----------
324
+ def _strip_fastq_ext(p: Path) -> str:
325
+ """
326
+ Remove common FASTQ multi-suffixes; return stem-like name.
327
+ """
328
+ name = p.name
329
+ lowers = name.lower()
330
+ for ext in (".fastq.gz", ".fq.gz", ".fastq.bz2", ".fq.bz2", ".fastq.xz", ".fq.xz", ".fastq", ".fq"):
331
+ if lowers.endswith(ext):
332
+ return name[: -len(ext)]
333
+ return p.stem # fallback: remove last suffix only
334
+
335
+ def _extract_barcode_from_filename(p: Path) -> str:
336
+ stem = _strip_fastq_ext(p)
337
+ if "_" in stem:
338
+ token = stem.split("_")[-1]
339
+ if token:
340
+ return token
341
+ return stem
342
+
343
+ def _classify_read_token(stem: str) -> Tuple[Optional[str], Optional[int]]:
344
+ # return (prefix, readnum) if matches; else (None, None)
345
+ patterns = [
346
+ r"(?i)(.*?)[._-]r?([12])$", # prefix_R1 / prefix.r2 / prefix-1
347
+ r"(?i)(.*?)[._-]read[_-]?([12])$", # prefix_read1
348
+ ]
349
+ for pat in patterns:
350
+ m = re.match(pat, stem)
351
+ if m:
352
+ return m.group(1), int(m.group(2))
353
+ return None, None
354
+
355
+ def _pair_by_filename(paths: List[Path]) -> Tuple[List[Tuple[Path, Path]], List[Path]]:
356
+ pref_map: Dict[str, Dict[int, Path]] = {}
357
+ unpaired: List[Path] = []
358
+ for pth in paths:
359
+ stem = _strip_fastq_ext(pth)
360
+ pref, num = _classify_read_token(stem)
361
+ if pref is None:
362
+ unpaired.append(pth)
363
+ else:
364
+ entry = pref_map.setdefault(pref, {})
365
+ entry[num] = pth
366
+ pairs: List[Tuple[Path, Path]] = []
367
+ leftovers: List[Path] = []
368
+ for d in pref_map.values():
369
+ if 1 in d and 2 in d:
370
+ pairs.append((d[1], d[2]))
371
+ else:
372
+ leftovers.extend(d.values())
373
+ leftovers.extend(unpaired)
374
+ return pairs, leftovers
375
+
376
+ def _fastq_iter(p: Path):
377
+ # pysam.FastxFile handles compressed extensions transparently
378
+ with pysam.FastxFile(str(p)) as fx:
379
+ for rec in fx:
380
+ yield rec # rec.name, rec.sequence, rec.quality
381
+
382
+ def _make_unaligned_segment(
383
+ name: str,
384
+ seq: str,
385
+ qual: Optional[str],
386
+ bc: str,
387
+ read1: bool,
388
+ read2: bool,
389
+ ) -> pysam.AlignedSegment:
390
+ a = pysam.AlignedSegment()
391
+ a.query_name = name
392
+ a.query_sequence = seq
393
+ if qual is not None:
394
+ a.query_qualities = pysam.qualitystring_to_array(qual)
395
+ a.is_unmapped = True
396
+ a.is_paired = read1 or read2
397
+ a.is_read1 = read1
398
+ a.is_read2 = read2
399
+ a.mate_is_unmapped = a.is_paired
400
+ a.reference_id = -1
401
+ a.reference_start = -1
402
+ a.next_reference_id = -1
403
+ a.next_reference_start = -1
404
+ a.template_length = 0
405
+ a.set_tag(barcode_tag, str(bc), value_type="Z")
406
+ if add_read_group:
407
+ a.set_tag("RG", str(bc), value_type="Z")
408
+ return a
409
+
410
+ # ---------- normalize inputs to Path ----------
411
+ def _to_path_pair(x) -> Tuple[Path, Path]:
412
+ a, b = x
413
+ return Path(a), Path(b)
414
+
415
+ explicit_pairs: List[Tuple[Path, Path]] = []
416
+ singles: List[Path] = []
417
+
418
+ if not isinstance(fastq_files, (list, tuple)):
419
+ raise ValueError("fastq_files must be a list of paths or list of (R1,R2) tuples.")
420
+
421
+ if all(isinstance(x, (list, tuple)) and len(x) == 2 for x in fastq_files):
422
+ explicit_pairs = [_to_path_pair(x) for x in fastq_files]
423
+ else:
424
+ flat_paths = [Path(x) for x in fastq_files if x is not None]
425
+ if auto_pair:
426
+ explicit_pairs, leftovers = _pair_by_filename(flat_paths)
427
+ singles = leftovers
428
+ else:
429
+ singles = flat_paths
430
+
431
+ output_bam = Path(output_bam)
432
+ output_bam.parent.mkdir(parents=True, exist_ok=True)
433
+
434
+ # ---------- barcodes ----------
435
+ barcode_map = {Path(k): v for k, v in (barcode_map or {}).items()}
436
+ per_path_barcode: Dict[Path, str] = {}
437
+ barcodes_in_order: List[str] = []
438
+
439
+ for r1, r2 in explicit_pairs:
440
+ bc = barcode_map.get(r1) or barcode_map.get(r2) or _extract_barcode_from_filename(r1)
441
+ per_path_barcode[r1] = bc
442
+ per_path_barcode[r2] = bc
443
+ if bc not in barcodes_in_order:
444
+ barcodes_in_order.append(bc)
445
+ for pth in singles:
446
+ bc = barcode_map.get(pth) or _extract_barcode_from_filename(pth)
447
+ per_path_barcode[pth] = bc
448
+ if bc not in barcodes_in_order:
449
+ barcodes_in_order.append(bc)
450
+
451
+ # ---------- BAM header ----------
452
+ header = {"HD": {"VN": "1.6", "SO": "unknown"}, "SQ": []}
453
+ if add_read_group:
454
+ header["RG"] = [{"ID": bc, **({"SM": rg_sample_field} if rg_sample_field else {})} for bc in barcodes_in_order]
455
+ header.setdefault("PG", []).append(
456
+ {"ID": "concat-fastq", "PN": "concatenate_fastqs_to_bam", "VN": "1"}
457
+ )
458
+
459
+ # ---------- counters ----------
460
+ per_file_counts: Dict[Path, int] = {}
461
+ total_written = 0
462
+ paired_pairs_written = 0
463
+ singletons_written = 0
464
+
465
+ # ---------- write BAM ----------
466
+ with pysam.AlignmentFile(str(output_bam), "wb", header=header) as bam_out:
467
+ # Paired
468
+ it_pairs = explicit_pairs
469
+ if progress and it_pairs:
470
+ it_pairs = tqdm(it_pairs, desc="Paired FASTQ→BAM")
471
+ for r1_path, r2_path in it_pairs:
472
+ if not (r1_path.exists() and r2_path.exists()):
473
+ raise FileNotFoundError(f"Paired file missing: {r1_path} or {r2_path}")
474
+ bc = per_path_barcode.get(r1_path) or per_path_barcode.get(r2_path) or "barcode"
475
+
476
+ it1 = _fastq_iter(r1_path)
477
+ it2 = _fastq_iter(r2_path)
478
+
479
+ for rec1, rec2 in zip_longest(it1, it2, fillvalue=None):
480
+ def _clean(n: Optional[str]) -> Optional[str]:
481
+ if n is None:
482
+ return None
483
+ return re.sub(r"(?:/1$|/2$|\s[12]$)", "", n)
484
+
485
+ name = (
486
+ _clean(getattr(rec1, "name", None))
487
+ or _clean(getattr(rec2, "name", None))
488
+ or getattr(rec1, "name", None)
489
+ or getattr(rec2, "name", None)
490
+ )
491
+
492
+ if rec1 is not None:
493
+ a1 = _make_unaligned_segment(name, rec1.sequence, rec1.quality, bc, read1=True, read2=False)
494
+ bam_out.write(a1)
495
+ per_file_counts[r1_path] = per_file_counts.get(r1_path, 0) + 1
496
+ total_written += 1
497
+ if rec2 is not None:
498
+ a2 = _make_unaligned_segment(name, rec2.sequence, rec2.quality, bc, read1=False, read2=True)
499
+ bam_out.write(a2)
500
+ per_file_counts[r2_path] = per_file_counts.get(r2_path, 0) + 1
501
+ total_written += 1
502
+
503
+ if rec1 is not None and rec2 is not None:
504
+ paired_pairs_written += 1
505
+ else:
506
+ if rec1 is not None:
507
+ singletons_written += 1
508
+ if rec2 is not None:
509
+ singletons_written += 1
510
+
511
+ # Singles
512
+ it_singles = singles
513
+ if progress and it_singles:
514
+ it_singles = tqdm(it_singles, desc="Single FASTQ→BAM")
515
+ for pth in it_singles:
516
+ if not pth.exists():
517
+ raise FileNotFoundError(pth)
518
+ bc = per_path_barcode.get(pth, "barcode")
519
+ for rec in _fastq_iter(pth):
520
+ a = _make_unaligned_segment(rec.name, rec.sequence, rec.quality, bc, read1=False, read2=False)
521
+ bam_out.write(a)
522
+ per_file_counts[pth] = per_file_counts.get(pth, 0) + 1
523
+ total_written += 1
524
+ singletons_written += 1
525
+
526
+ return {
527
+ "total_reads": total_written,
528
+ "per_file": {str(k): v for k, v in per_file_counts.items()},
529
+ "paired_pairs_written": paired_pairs_written,
530
+ "singletons_written": singletons_written,
531
+ "barcodes": barcodes_in_order,
532
+ }
533
+
534
+ def count_aligned_reads(bam_file):
535
+ """
536
+ Counts the number of aligned reads in a bam file that map to each reference record.
537
+
538
+ Parameters:
539
+ bam_file (str): A string representing the path to an aligned BAM file.
540
+
541
+ Returns:
542
+ aligned_reads_count (int): The total number or reads aligned in the BAM.
543
+ unaligned_reads_count (int): The total number of reads not aligned in the BAM.
544
+ record_counts (dict): A dictionary keyed by reference record instance that points toa tuple containing the total reads mapped to the record and the fraction of mapped reads which map to the record.
545
+
546
+ """
547
+ print('{0}: Counting aligned reads in BAM > {1}'.format(time_string(), bam_file))
548
+ aligned_reads_count = 0
549
+ unaligned_reads_count = 0
550
+ # Make a dictionary, keyed by the reference_name of reference chromosome that points to an integer number of read counts mapped to the chromosome, as well as the proportion of mapped reads in that chromosome
551
+ record_counts = defaultdict(int)
552
+
553
+ with pysam.AlignmentFile(str(bam_file), "rb") as bam:
554
+ total_reads = bam.mapped + bam.unmapped
555
+ # Iterate over reads to get the total mapped read counts and the reads that map to each reference
556
+ for read in tqdm(bam, desc='Counting aligned reads in BAM', total=total_reads):
557
+ if read.is_unmapped:
558
+ unaligned_reads_count += 1
559
+ else:
560
+ aligned_reads_count += 1
561
+ record_counts[read.reference_name] += 1 # Automatically increments if key exists, adds if not
562
+
563
+ # reformat the dictionary to contain read counts mapped to the reference, as well as the proportion of mapped reads in reference
564
+ for reference in record_counts:
565
+ proportion_mapped_reads_in_record = record_counts[reference] / aligned_reads_count
566
+ record_counts[reference] = (record_counts[reference], proportion_mapped_reads_in_record)
567
+
568
+ return aligned_reads_count, unaligned_reads_count, dict(record_counts)
569
+
570
+ def demux_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix, barcode_kit, barcode_both_ends, trim, threads):
571
+ """
572
+ A wrapper function for splitting BAMS and indexing them.
573
+ Parameters:
574
+ aligned_sorted_BAM (str): A string representing the file path of the aligned_sorted BAM file.
575
+ split_dir (str): A string representing the file path to the directory to split the BAMs into.
576
+ bam_suffix (str): A suffix to add to the bam file.
577
+ barcode_kit (str): Name of barcoding kit.
578
+ barcode_both_ends (bool): Whether to require both ends to be barcoded.
579
+ trim (bool): Whether to trim off barcodes after demultiplexing.
580
+ threads (int): Number of threads to use.
581
+
582
+ Returns:
583
+ bam_files (list): List of split BAM file path strings
584
+ Splits an input BAM file on barcode value and makes a BAM index file.
585
+ """
586
+ input_bam = aligned_sorted_BAM.with_suffix(bam_suffix)
587
+ command = ["dorado", "demux", "--kit-name", barcode_kit]
588
+ if barcode_both_ends:
589
+ command.append("--barcode-both-ends")
590
+ if not trim:
591
+ command.append("--no-trim")
592
+ if threads:
593
+ command += ["-t", str(threads)]
594
+ else:
595
+ pass
596
+ command += ["--emit-summary", "--sort-bam", "--output-dir", str(split_dir)]
597
+ command.append(str(input_bam))
598
+ command_string = ' '.join(command)
599
+ print(f"Running: {command_string}")
600
+ subprocess.run(command)
601
+
602
+ bam_files = sorted(
603
+ p for p in split_dir.glob(f"*{bam_suffix}")
604
+ if p.is_file() and p.suffix == bam_suffix
605
+ )
606
+
607
+ if not bam_files:
608
+ raise FileNotFoundError(f"No BAM files found in {split_dir} with suffix {bam_suffix}")
609
+
610
+ # ---- Optional renaming with prefix ----
611
+ renamed_bams = []
612
+ prefix = "de" if barcode_both_ends else "se"
613
+
614
+ for bam in bam_files:
615
+ bam = Path(bam)
616
+ bai = bam.with_suffix(bam_suffix + ".bai") # dorado’s sorting produces .bam.bai
617
+
618
+ if prefix:
619
+ new_name = f"{prefix}_{bam.name}"
620
+ else:
621
+ new_name = bam.name
622
+
623
+ new_bam = bam.with_name(new_name)
624
+ bam.rename(new_bam)
625
+
626
+ # rename index if exists
627
+ if bai.exists():
628
+ new_bai = new_bam.with_suffix(bam_suffix + ".bai")
629
+ bai.rename(new_bai)
630
+
631
+ renamed_bams.append(new_bam)
632
+
633
+ return renamed_bams
634
+
635
+ def extract_base_identities(bam_file, chromosome, positions, max_reference_length, sequence):
636
+ """
637
+ Efficiently extracts base identities from mapped reads with reference coordinates.
638
+
639
+ Parameters:
640
+ bam_file (str): Path to the BAM file.
641
+ chromosome (str): Name of the reference chromosome.
642
+ positions (list): Positions to extract (0-based).
643
+ max_reference_length (int): Maximum reference length for padding.
644
+ sequence (str): The sequence of the record fasta
645
+
646
+ Returns:
647
+ dict: Base identities from forward mapped reads.
648
+ dict: Base identities from reverse mapped reads.
649
+ """
650
+ timestamp = time.strftime("[%Y-%m-%d %H:%M:%S]")
651
+
652
+ positions = set(positions)
653
+ fwd_base_identities = defaultdict(lambda: np.full(max_reference_length, 'N', dtype='<U1'))
654
+ rev_base_identities = defaultdict(lambda: np.full(max_reference_length, 'N', dtype='<U1'))
655
+ mismatch_counts_per_read = defaultdict(lambda: defaultdict(Counter))
656
+
657
+ #print(f"{timestamp} Reading reads from {chromosome} BAM file: {bam_file}")
658
+ with pysam.AlignmentFile(str(bam_file), "rb") as bam:
659
+ total_reads = bam.mapped
660
+ ref_seq = sequence.upper()
661
+ for read in bam.fetch(chromosome):
662
+ if not read.is_mapped:
663
+ continue # Skip unmapped reads
664
+
665
+ read_name = read.query_name
666
+ query_sequence = read.query_sequence
667
+ base_dict = rev_base_identities if read.is_reverse else fwd_base_identities
668
+
669
+ # Use get_aligned_pairs directly with positions filtering
670
+ aligned_pairs = read.get_aligned_pairs(matches_only=True)
671
+
672
+ for read_position, reference_position in aligned_pairs:
673
+ if reference_position in positions:
674
+ read_base = query_sequence[read_position]
675
+ ref_base = ref_seq[reference_position]
676
+
677
+ base_dict[read_name][reference_position] = read_base
678
+
679
+ # Track mismatches (excluding Ns)
680
+ if read_base != ref_base and read_base != 'N' and ref_base != 'N':
681
+ mismatch_counts_per_read[read_name][ref_base][read_base] += 1
682
+
683
+ # Determine C→T vs G→A dominance per read
684
+ mismatch_trend_per_read = {}
685
+ for read_name, ref_dict in mismatch_counts_per_read.items():
686
+ c_to_t = ref_dict.get("C", {}).get("T", 0)
687
+ g_to_a = ref_dict.get("G", {}).get("A", 0)
688
+
689
+ if abs(c_to_t - g_to_a) < 0.01 and c_to_t > 0:
690
+ mismatch_trend_per_read[read_name] = "equal"
691
+ elif c_to_t > g_to_a:
692
+ mismatch_trend_per_read[read_name] = "C->T"
693
+ elif g_to_a > c_to_t:
694
+ mismatch_trend_per_read[read_name] = "G->A"
695
+ else:
696
+ mismatch_trend_per_read[read_name] = "none"
697
+
698
+ return dict(fwd_base_identities), dict(rev_base_identities), dict(mismatch_counts_per_read), mismatch_trend_per_read
699
+
700
+ def extract_read_features_from_bam(bam_file_path):
701
+ """
702
+ Make a dict of reads from a bam that points to a list of read metrics: read length, read median Q-score, reference length, mapped length, mapping quality
703
+ Params:
704
+ bam_file_path (str):
705
+ Returns:
706
+ read_metrics (dict)
707
+ """
708
+ # Open the BAM file
709
+ print(f'Extracting read features from BAM: {bam_file_path}')
710
+ with pysam.AlignmentFile(bam_file_path, "rb") as bam_file:
711
+ read_metrics = {}
712
+ reference_lengths = bam_file.lengths # List of lengths for each reference (chromosome)
713
+ for read in bam_file:
714
+ # Skip unmapped reads
715
+ if read.is_unmapped:
716
+ continue
717
+ # Extract the read metrics
718
+ read_quality = read.query_qualities
719
+ median_read_quality = np.median(read_quality)
720
+ # Extract the reference (chromosome) name and its length
721
+ reference_name = read.reference_name
722
+ reference_index = bam_file.references.index(reference_name)
723
+ reference_length = reference_lengths[reference_index]
724
+ mapped_length = sum(end - start for start, end in read.get_blocks())
725
+ mapping_quality = read.mapping_quality # Phred-scaled MAPQ
726
+ read_metrics[read.query_name] = [read.query_length, median_read_quality, reference_length, mapped_length, mapping_quality]
727
+
728
+ return read_metrics
729
+
730
+ def extract_readnames_from_bam(aligned_BAM):
731
+ """
732
+ Takes a BAM and writes out a txt file containing read names from the BAM
733
+
734
+ Parameters:
735
+ aligned_BAM (str): Path to an input aligned_BAM to extract read names from.
736
+
737
+ Returns:
738
+ None
739
+
740
+ """
741
+ import subprocess
742
+ # Make a text file of reads for the BAM
743
+ txt_output = aligned_BAM.split('.bam')[0] + '_read_names.txt'
744
+ samtools_view = subprocess.Popen(["samtools", "view", aligned_BAM], stdout=subprocess.PIPE)
745
+ with open(txt_output, "w") as output_file:
746
+ cut_process = subprocess.Popen(["cut", "-f1"], stdin=samtools_view.stdout, stdout=output_file)
747
+ samtools_view.stdout.close()
748
+ cut_process.wait()
749
+ samtools_view.wait()
750
+
751
+ def separate_bam_by_bc(input_bam, output_prefix, bam_suffix, split_dir):
752
+ """
753
+ Separates an input BAM file on the BC SAM tag values.
754
+
755
+ Parameters:
756
+ input_bam (str): File path to the BAM file to split.
757
+ output_prefix (str): A prefix to append to the output BAM.
758
+ bam_suffix (str): A suffix to add to the bam file.
759
+ split_dir (str): String indicating path to directory to split BAMs into
760
+
761
+ Returns:
762
+ None
763
+ Writes out split BAM files.
764
+ """
765
+ bam_base = input_bam.name
766
+ bam_base_minus_suffix = input_bam.stem
767
+
768
+ # Open the input BAM file for reading
769
+ with pysam.AlignmentFile(str(input_bam), "rb") as bam:
770
+ # Create a dictionary to store output BAM files
771
+ output_files = {}
772
+ # Iterate over each read in the BAM file
773
+ for read in bam:
774
+ try:
775
+ # Get the barcode tag value
776
+ bc_tag = read.get_tag("BC", with_value_type=True)[0]
777
+ #bc_tag = read.get_tag("BC", with_value_type=True)[0].split('barcode')[1]
778
+ # Open the output BAM file corresponding to the barcode
779
+ if bc_tag not in output_files:
780
+ output_path = split_dir / f"{output_prefix}_{bam_base_minus_suffix}_{bc_tag}{bam_suffix}"
781
+ output_files[bc_tag] = pysam.AlignmentFile(str(output_path), "wb", header=bam.header)
782
+ # Write the read to the corresponding output BAM file
783
+ output_files[bc_tag].write(read)
784
+ except KeyError:
785
+ print(f"BC tag not present for read: {read.query_name}")
786
+ # Close all output BAM files
787
+ for output_file in output_files.values():
788
+ output_file.close()
789
+
790
+ def split_and_index_BAM(aligned_sorted_BAM, split_dir, bam_suffix):
791
+ """
792
+ A wrapper function for splitting BAMS and indexing them.
793
+ Parameters:
794
+ aligned_sorted_BAM (str): A string representing the file path of the aligned_sorted BAM file.
795
+ split_dir (str): A string representing the file path to the directory to split the BAMs into.
796
+ bam_suffix (str): A suffix to add to the bam file.
797
+
798
+ Returns:
799
+ None
800
+ Splits an input BAM file on barcode value and makes a BAM index file.
801
+ """
802
+ aligned_sorted_output = aligned_sorted_BAM + bam_suffix
803
+ file_prefix = date_string()
804
+ separate_bam_by_bc(aligned_sorted_output, file_prefix, bam_suffix, split_dir)
805
+ # Make a BAM index file for the BAMs in that directory
806
+ bam_pattern = '*' + bam_suffix
807
+ bam_files = glob.glob(split_dir / bam_pattern)
808
+ bam_files = [str(bam) for bam in bam_files if '.bai' not in str(bam)]
809
+ for input_file in bam_files:
810
+ pysam.index(input_file)
811
+
812
+ return bam_files